Home | History | Annotate | Download | only in libevent
      1 /*
      2  * Copyright (c) 2000-2007 Niels Provos <provos (at) citi.umich.edu>
      3  * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
      4  *
      5  * Redistribution and use in source and binary forms, with or without
      6  * modification, are permitted provided that the following conditions
      7  * are met:
      8  * 1. Redistributions of source code must retain the above copyright
      9  *    notice, this list of conditions and the following disclaimer.
     10  * 2. Redistributions in binary form must reproduce the above copyright
     11  *    notice, this list of conditions and the following disclaimer in the
     12  *    documentation and/or other materials provided with the distribution.
     13  * 3. The name of the author may not be used to endorse or promote products
     14  *    derived from this software without specific prior written permission.
     15  *
     16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     18  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     19  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     21  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     22  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     23  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     24  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     25  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     26  */
     27 #ifndef EVENT_INTERNAL_H_INCLUDED_
     28 #define EVENT_INTERNAL_H_INCLUDED_
     29 
     30 #ifdef __cplusplus
     31 extern "C" {
     32 #endif
     33 
     34 #include "event2/event-config.h"
     35 #include "evconfig-private.h"
     36 
     37 #include <time.h>
     38 #include <sys/queue.h>
     39 #include "event2/event_struct.h"
     40 #include "minheap-internal.h"
     41 #include "evsignal-internal.h"
     42 #include "mm-internal.h"
     43 #include "defer-internal.h"
     44 
     45 /* map union members back */
     46 
     47 /* mutually exclusive */
     48 #define ev_signal_next	ev_.ev_signal.ev_signal_next
     49 #define ev_io_next	ev_.ev_io.ev_io_next
     50 #define ev_io_timeout	ev_.ev_io.ev_timeout
     51 
     52 /* used only by signals */
     53 #define ev_ncalls	ev_.ev_signal.ev_ncalls
     54 #define ev_pncalls	ev_.ev_signal.ev_pncalls
     55 
     56 #define ev_pri ev_evcallback.evcb_pri
     57 #define ev_flags ev_evcallback.evcb_flags
     58 #define ev_closure ev_evcallback.evcb_closure
     59 #define ev_callback ev_evcallback.evcb_cb_union.evcb_callback
     60 #define ev_arg ev_evcallback.evcb_arg
     61 
     62 /** @name Event closure codes
     63 
     64     Possible values for evcb_closure in struct event_callback
     65 
     66     @{
     67  */
     68 /** A regular event. Uses the evcb_callback callback */
     69 #define EV_CLOSURE_EVENT 0
     70 /** A signal event. Uses the evcb_callback callback */
     71 #define EV_CLOSURE_EVENT_SIGNAL 1
     72 /** A persistent non-signal event. Uses the evcb_callback callback */
     73 #define EV_CLOSURE_EVENT_PERSIST 2
     74 /** A simple callback. Uses the evcb_selfcb callback. */
     75 #define EV_CLOSURE_CB_SELF 3
     76 /** A finalizing callback. Uses the evcb_cbfinalize callback. */
     77 #define EV_CLOSURE_CB_FINALIZE 4
     78 /** A finalizing event. Uses the evcb_evfinalize callback. */
     79 #define EV_CLOSURE_EVENT_FINALIZE 5
     80 /** A finalizing event that should get freed after. Uses the evcb_evfinalize
     81  * callback. */
     82 #define EV_CLOSURE_EVENT_FINALIZE_FREE 6
     83 /** @} */
     84 
     85 /** Structure to define the backend of a given event_base. */
     86 struct eventop {
     87 	/** The name of this backend. */
     88 	const char *name;
     89 	/** Function to set up an event_base to use this backend.  It should
     90 	 * create a new structure holding whatever information is needed to
     91 	 * run the backend, and return it.  The returned pointer will get
     92 	 * stored by event_init into the event_base.evbase field.  On failure,
     93 	 * this function should return NULL. */
     94 	void *(*init)(struct event_base *);
     95 	/** Enable reading/writing on a given fd or signal.  'events' will be
     96 	 * the events that we're trying to enable: one or more of EV_READ,
     97 	 * EV_WRITE, EV_SIGNAL, and EV_ET.  'old' will be those events that
     98 	 * were enabled on this fd previously.  'fdinfo' will be a structure
     99 	 * associated with the fd by the evmap; its size is defined by the
    100 	 * fdinfo field below.  It will be set to 0 the first time the fd is
    101 	 * added.  The function should return 0 on success and -1 on error.
    102 	 */
    103 	int (*add)(struct event_base *, evutil_socket_t fd, short old, short events, void *fdinfo);
    104 	/** As "add", except 'events' contains the events we mean to disable. */
    105 	int (*del)(struct event_base *, evutil_socket_t fd, short old, short events, void *fdinfo);
    106 	/** Function to implement the core of an event loop.  It must see which
    107 	    added events are ready, and cause event_active to be called for each
    108 	    active event (usually via event_io_active or such).  It should
    109 	    return 0 on success and -1 on error.
    110 	 */
    111 	int (*dispatch)(struct event_base *, struct timeval *);
    112 	/** Function to clean up and free our data from the event_base. */
    113 	void (*dealloc)(struct event_base *);
    114 	/** Flag: set if we need to reinitialize the event base after we fork.
    115 	 */
    116 	int need_reinit;
    117 	/** Bit-array of supported event_method_features that this backend can
    118 	 * provide. */
    119 	enum event_method_feature features;
    120 	/** Length of the extra information we should record for each fd that
    121 	    has one or more active events.  This information is recorded
    122 	    as part of the evmap entry for each fd, and passed as an argument
    123 	    to the add and del functions above.
    124 	 */
    125 	size_t fdinfo_len;
    126 };
    127 
    128 #ifdef _WIN32
    129 /* If we're on win32, then file descriptors are not nice low densely packed
    130    integers.  Instead, they are pointer-like windows handles, and we want to
    131    use a hashtable instead of an array to map fds to events.
    132 */
    133 #define EVMAP_USE_HT
    134 #endif
    135 
    136 /* #define HT_CACHE_HASH_VALS */
    137 
    138 #ifdef EVMAP_USE_HT
    139 #define HT_NO_CACHE_HASH_VALUES
    140 #include "ht-internal.h"
    141 struct event_map_entry;
    142 HT_HEAD(event_io_map, event_map_entry);
    143 #else
    144 #define event_io_map event_signal_map
    145 #endif
    146 
    147 /* Used to map signal numbers to a list of events.  If EVMAP_USE_HT is not
    148    defined, this structure is also used as event_io_map, which maps fds to a
    149    list of events.
    150 */
    151 struct event_signal_map {
    152 	/* An array of evmap_io * or of evmap_signal *; empty entries are
    153 	 * set to NULL. */
    154 	void **entries;
    155 	/* The number of entries available in entries */
    156 	int nentries;
    157 };
    158 
    159 /* A list of events waiting on a given 'common' timeout value.  Ordinarily,
    160  * events waiting for a timeout wait on a minheap.  Sometimes, however, a
    161  * queue can be faster.
    162  **/
    163 struct common_timeout_list {
    164 	/* List of events currently waiting in the queue. */
    165 	struct event_list events;
    166 	/* 'magic' timeval used to indicate the duration of events in this
    167 	 * queue. */
    168 	struct timeval duration;
    169 	/* Event that triggers whenever one of the events in the queue is
    170 	 * ready to activate */
    171 	struct event timeout_event;
    172 	/* The event_base that this timeout list is part of */
    173 	struct event_base *base;
    174 };
    175 
    176 /** Mask used to get the real tv_usec value from a common timeout. */
    177 #define COMMON_TIMEOUT_MICROSECONDS_MASK       0x000fffff
    178 
    179 struct event_change;
    180 
    181 /* List of 'changes' since the last call to eventop.dispatch.  Only maintained
    182  * if the backend is using changesets. */
    183 struct event_changelist {
    184 	struct event_change *changes;
    185 	int n_changes;
    186 	int changes_size;
    187 };
    188 
    189 #ifndef EVENT__DISABLE_DEBUG_MODE
    190 /* Global internal flag: set to one if debug mode is on. */
    191 extern int event_debug_mode_on_;
    192 #define EVENT_DEBUG_MODE_IS_ON() (event_debug_mode_on_)
    193 #else
    194 #define EVENT_DEBUG_MODE_IS_ON() (0)
    195 #endif
    196 
    197 TAILQ_HEAD(evcallback_list, event_callback);
    198 
    199 /* Sets up an event for processing once */
    200 struct event_once {
    201 	LIST_ENTRY(event_once) next_once;
    202 	struct event ev;
    203 
    204 	void (*cb)(evutil_socket_t, short, void *);
    205 	void *arg;
    206 };
    207 
    208 struct event_base {
    209 	/** Function pointers and other data to describe this event_base's
    210 	 * backend. */
    211 	const struct eventop *evsel;
    212 	/** Pointer to backend-specific data. */
    213 	void *evbase;
    214 
    215 	/** List of changes to tell backend about at next dispatch.  Only used
    216 	 * by the O(1) backends. */
    217 	struct event_changelist changelist;
    218 
    219 	/** Function pointers used to describe the backend that this event_base
    220 	 * uses for signals */
    221 	const struct eventop *evsigsel;
    222 	/** Data to implement the common signal handelr code. */
    223 	struct evsig_info sig;
    224 
    225 	/** Number of virtual events */
    226 	int virtual_event_count;
    227 	/** Maximum number of virtual events active */
    228 	int virtual_event_count_max;
    229 	/** Number of total events added to this event_base */
    230 	int event_count;
    231 	/** Maximum number of total events added to this event_base */
    232 	int event_count_max;
    233 	/** Number of total events active in this event_base */
    234 	int event_count_active;
    235 	/** Maximum number of total events active in this event_base */
    236 	int event_count_active_max;
    237 
    238 	/** Set if we should terminate the loop once we're done processing
    239 	 * events. */
    240 	int event_gotterm;
    241 	/** Set if we should terminate the loop immediately */
    242 	int event_break;
    243 	/** Set if we should start a new instance of the loop immediately. */
    244 	int event_continue;
    245 
    246 	/** The currently running priority of events */
    247 	int event_running_priority;
    248 
    249 	/** Set if we're running the event_base_loop function, to prevent
    250 	 * reentrant invocation. */
    251 	int running_loop;
    252 
    253 	/** Set to the number of deferred_cbs we've made 'active' in the
    254 	 * loop.  This is a hack to prevent starvation; it would be smarter
    255 	 * to just use event_config_set_max_dispatch_interval's max_callbacks
    256 	 * feature */
    257 	int n_deferreds_queued;
    258 
    259 	/* Active event management. */
    260 	/** An array of nactivequeues queues for active event_callbacks (ones
    261 	 * that have triggered, and whose callbacks need to be called).  Low
    262 	 * priority numbers are more important, and stall higher ones.
    263 	 */
    264 	struct evcallback_list *activequeues;
    265 	/** The length of the activequeues array */
    266 	int nactivequeues;
    267 	/** A list of event_callbacks that should become active the next time
    268 	 * we process events, but not this time. */
    269 	struct evcallback_list active_later_queue;
    270 
    271 	/* common timeout logic */
    272 
    273 	/** An array of common_timeout_list* for all of the common timeout
    274 	 * values we know. */
    275 	struct common_timeout_list **common_timeout_queues;
    276 	/** The number of entries used in common_timeout_queues */
    277 	int n_common_timeouts;
    278 	/** The total size of common_timeout_queues. */
    279 	int n_common_timeouts_allocated;
    280 
    281 	/** Mapping from file descriptors to enabled (added) events */
    282 	struct event_io_map io;
    283 
    284 	/** Mapping from signal numbers to enabled (added) events. */
    285 	struct event_signal_map sigmap;
    286 
    287 	/** Priority queue of events with timeouts. */
    288 	struct min_heap timeheap;
    289 
    290 	/** Stored timeval: used to avoid calling gettimeofday/clock_gettime
    291 	 * too often. */
    292 	struct timeval tv_cache;
    293 
    294 	struct evutil_monotonic_timer monotonic_timer;
    295 
    296 	/** Difference between internal time (maybe from clock_gettime) and
    297 	 * gettimeofday. */
    298 	struct timeval tv_clock_diff;
    299 	/** Second in which we last updated tv_clock_diff, in monotonic time. */
    300 	time_t last_updated_clock_diff;
    301 
    302 #ifndef EVENT__DISABLE_THREAD_SUPPORT
    303 	/* threading support */
    304 	/** The thread currently running the event_loop for this base */
    305 	unsigned long th_owner_id;
    306 	/** A lock to prevent conflicting accesses to this event_base */
    307 	void *th_base_lock;
    308 	/** A condition that gets signalled when we're done processing an
    309 	 * event with waiters on it. */
    310 	void *current_event_cond;
    311 	/** Number of threads blocking on current_event_cond. */
    312 	int current_event_waiters;
    313 #endif
    314 	/** The event whose callback is executing right now */
    315 	struct event_callback *current_event;
    316 
    317 #ifdef _WIN32
    318 	/** IOCP support structure, if IOCP is enabled. */
    319 	struct event_iocp_port *iocp;
    320 #endif
    321 
    322 	/** Flags that this base was configured with */
    323 	enum event_base_config_flag flags;
    324 
    325 	struct timeval max_dispatch_time;
    326 	int max_dispatch_callbacks;
    327 	int limit_callbacks_after_prio;
    328 
    329 	/* Notify main thread to wake up break, etc. */
    330 	/** True if the base already has a pending notify, and we don't need
    331 	 * to add any more. */
    332 	int is_notify_pending;
    333 	/** A socketpair used by some th_notify functions to wake up the main
    334 	 * thread. */
    335 	evutil_socket_t th_notify_fd[2];
    336 	/** An event used by some th_notify functions to wake up the main
    337 	 * thread. */
    338 	struct event th_notify;
    339 	/** A function used to wake up the main thread from another thread. */
    340 	int (*th_notify_fn)(struct event_base *base);
    341 
    342 	/** Saved seed for weak random number generator. Some backends use
    343 	 * this to produce fairness among sockets. Protected by th_base_lock. */
    344 	struct evutil_weakrand_state weakrand_seed;
    345 
    346 	/** List of event_onces that have not yet fired. */
    347 	LIST_HEAD(once_event_list, event_once) once_events;
    348 
    349 };
    350 
    351 struct event_config_entry {
    352 	TAILQ_ENTRY(event_config_entry) next;
    353 
    354 	const char *avoid_method;
    355 };
    356 
    357 /** Internal structure: describes the configuration we want for an event_base
    358  * that we're about to allocate. */
    359 struct event_config {
    360 	TAILQ_HEAD(event_configq, event_config_entry) entries;
    361 
    362 	int n_cpus_hint;
    363 	struct timeval max_dispatch_interval;
    364 	int max_dispatch_callbacks;
    365 	int limit_callbacks_after_prio;
    366 	enum event_method_feature require_features;
    367 	enum event_base_config_flag flags;
    368 };
    369 
    370 /* Internal use only: Functions that might be missing from <sys/queue.h> */
    371 #ifndef TAILQ_FIRST
    372 #define	TAILQ_FIRST(head)		((head)->tqh_first)
    373 #endif
    374 #ifndef TAILQ_END
    375 #define	TAILQ_END(head)			NULL
    376 #endif
    377 #ifndef TAILQ_NEXT
    378 #define	TAILQ_NEXT(elm, field)		((elm)->field.tqe_next)
    379 #endif
    380 
    381 #ifndef TAILQ_FOREACH
    382 #define TAILQ_FOREACH(var, head, field)					\
    383 	for ((var) = TAILQ_FIRST(head);					\
    384 	     (var) != TAILQ_END(head);					\
    385 	     (var) = TAILQ_NEXT(var, field))
    386 #endif
    387 
    388 #ifndef TAILQ_INSERT_BEFORE
    389 #define	TAILQ_INSERT_BEFORE(listelm, elm, field) do {			\
    390 	(elm)->field.tqe_prev = (listelm)->field.tqe_prev;		\
    391 	(elm)->field.tqe_next = (listelm);				\
    392 	*(listelm)->field.tqe_prev = (elm);				\
    393 	(listelm)->field.tqe_prev = &(elm)->field.tqe_next;		\
    394 } while (0)
    395 #endif
    396 
    397 #define N_ACTIVE_CALLBACKS(base)					\
    398 	((base)->event_count_active)
    399 
    400 int evsig_set_handler_(struct event_base *base, int evsignal,
    401 			  void (*fn)(int));
    402 int evsig_restore_handler_(struct event_base *base, int evsignal);
    403 
    404 int event_add_nolock_(struct event *ev,
    405     const struct timeval *tv, int tv_is_absolute);
    406 /** Argument for event_del_nolock_. Tells event_del not to block on the event
    407  * if it's running in another thread. */
    408 #define EVENT_DEL_NOBLOCK 0
    409 /** Argument for event_del_nolock_. Tells event_del to block on the event
    410  * if it's running in another thread, regardless of its value for EV_FINALIZE
    411  */
    412 #define EVENT_DEL_BLOCK 1
    413 /** Argument for event_del_nolock_. Tells event_del to block on the event
    414  * if it is running in another thread and it doesn't have EV_FINALIZE set.
    415  */
    416 #define EVENT_DEL_AUTOBLOCK 2
    417 /** Argument for event_del_nolock_. Tells event_del to procede even if the
    418  * event is set up for finalization rather for regular use.*/
    419 #define EVENT_DEL_EVEN_IF_FINALIZING 3
    420 int event_del_nolock_(struct event *ev, int blocking);
    421 int event_remove_timer_nolock_(struct event *ev);
    422 
    423 void event_active_nolock_(struct event *ev, int res, short count);
    424 int event_callback_activate_(struct event_base *, struct event_callback *);
    425 int event_callback_activate_nolock_(struct event_base *, struct event_callback *);
    426 int event_callback_cancel_(struct event_base *base,
    427     struct event_callback *evcb);
    428 
    429 void event_callback_finalize_nolock_(struct event_base *base, unsigned flags, struct event_callback *evcb, void (*cb)(struct event_callback *, void *));
    430 void event_callback_finalize_(struct event_base *base, unsigned flags, struct event_callback *evcb, void (*cb)(struct event_callback *, void *));
    431 int event_callback_finalize_many_(struct event_base *base, int n_cbs, struct event_callback **evcb, void (*cb)(struct event_callback *, void *));
    432 
    433 
    434 void event_active_later_(struct event *ev, int res);
    435 void event_active_later_nolock_(struct event *ev, int res);
    436 int event_callback_activate_later_nolock_(struct event_base *base,
    437     struct event_callback *evcb);
    438 int event_callback_cancel_nolock_(struct event_base *base,
    439     struct event_callback *evcb, int even_if_finalizing);
    440 void event_callback_init_(struct event_base *base,
    441     struct event_callback *cb);
    442 
    443 /* FIXME document. */
    444 void event_base_add_virtual_(struct event_base *base);
    445 void event_base_del_virtual_(struct event_base *base);
    446 
    447 /** For debugging: unless assertions are disabled, verify the referential
    448     integrity of the internal data structures of 'base'.  This operation can
    449     be expensive.
    450 
    451     Returns on success; aborts on failure.
    452 */
    453 void event_base_assert_ok_(struct event_base *base);
    454 void event_base_assert_ok_nolock_(struct event_base *base);
    455 
    456 
    457 /* Helper function: Call 'fn' exactly once every inserted or active event in
    458  * the event_base 'base'.
    459  *
    460  * If fn returns 0, continue on to the next event. Otherwise, return the same
    461  * value that fn returned.
    462  *
    463  * Requires that 'base' be locked.
    464  */
    465 int event_base_foreach_event_nolock_(struct event_base *base,
    466     event_base_foreach_event_cb cb, void *arg);
    467 
    468 /* Cleanup function to reset debug mode during shutdown.
    469  *
    470  * Calling this function doesn't mean it'll be possible to re-enable
    471  * debug mode if any events were added.
    472  */
    473 void event_disable_debug_mode(void);
    474 
    475 #ifdef __cplusplus
    476 }
    477 #endif
    478 
    479 #endif /* EVENT_INTERNAL_H_INCLUDED_ */
    480