Home | History | Annotate | Download | only in libevent
      1 /*
      2  * Copyright (c) 2000-2004 Niels Provos <provos (at) citi.umich.edu>
      3  * All rights reserved.
      4  *
      5  * Redistribution and use in source and binary forms, with or without
      6  * modification, are permitted provided that the following conditions
      7  * are met:
      8  * 1. Redistributions of source code must retain the above copyright
      9  *    notice, this list of conditions and the following disclaimer.
     10  * 2. Redistributions in binary form must reproduce the above copyright
     11  *    notice, this list of conditions and the following disclaimer in the
     12  *    documentation and/or other materials provided with the distribution.
     13  * 3. The name of the author may not be used to endorse or promote products
     14  *    derived from this software without specific prior written permission.
     15  *
     16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     18  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     19  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     21  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     22  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     23  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     24  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     25  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     26  */
     27 #ifdef HAVE_CONFIG_H
     28 #include "config.h"
     29 #endif
     30 
     31 #ifdef WIN32
     32 #define WIN32_LEAN_AND_MEAN
     33 #include <windows.h>
     34 #undef WIN32_LEAN_AND_MEAN
     35 #endif
     36 #include <sys/types.h>
     37 #ifdef HAVE_SYS_TIME_H
     38 #include <sys/time.h>
     39 #else
     40 #include <sys/_libevent_time.h>
     41 #endif
     42 #include <sys/queue.h>
     43 #include <stdio.h>
     44 #include <stdlib.h>
     45 #ifndef WIN32
     46 #include <unistd.h>
     47 #endif
     48 #include <errno.h>
     49 #include <signal.h>
     50 #include <string.h>
     51 #include <assert.h>
     52 #include <time.h>
     53 
     54 #include "event.h"
     55 #include "event-internal.h"
     56 #include "evutil.h"
     57 #include "log.h"
     58 
     59 #ifdef HAVE_EVENT_PORTS
     60 extern const struct eventop evportops;
     61 #endif
     62 #ifdef HAVE_SELECT
     63 extern const struct eventop selectops;
     64 #endif
     65 #ifdef HAVE_POLL
     66 extern const struct eventop pollops;
     67 #endif
     68 #ifdef HAVE_EPOLL
     69 extern const struct eventop epollops;
     70 #endif
     71 #ifdef HAVE_WORKING_KQUEUE
     72 extern const struct eventop kqops;
     73 #endif
     74 #ifdef HAVE_DEVPOLL
     75 extern const struct eventop devpollops;
     76 #endif
     77 #ifdef WIN32
     78 extern const struct eventop win32ops;
     79 #endif
     80 
     81 /* In order of preference */
     82 static const struct eventop *eventops[] = {
     83 #ifdef HAVE_EVENT_PORTS
     84 	&evportops,
     85 #endif
     86 #ifdef HAVE_WORKING_KQUEUE
     87 	&kqops,
     88 #endif
     89 #ifdef HAVE_EPOLL
     90 	&epollops,
     91 #endif
     92 #ifdef HAVE_DEVPOLL
     93 	&devpollops,
     94 #endif
     95 #ifdef HAVE_POLL
     96 	&pollops,
     97 #endif
     98 #ifdef HAVE_SELECT
     99 	&selectops,
    100 #endif
    101 #ifdef WIN32
    102 	&win32ops,
    103 #endif
    104 	NULL
    105 };
    106 
    107 /* Global state */
    108 struct event_base *current_base = NULL;
    109 extern struct event_base *evsignal_base;
    110 static int use_monotonic;
    111 
    112 /* Prototypes */
    113 static void	event_queue_insert(struct event_base *, struct event *, int);
    114 static void	event_queue_remove(struct event_base *, struct event *, int);
    115 static int	event_haveevents(struct event_base *);
    116 
    117 static void	event_process_active(struct event_base *);
    118 
    119 static int	timeout_next(struct event_base *, struct timeval **);
    120 static void	timeout_process(struct event_base *);
    121 static void	timeout_correct(struct event_base *, struct timeval *);
    122 
    123 static void
    124 detect_monotonic(void)
    125 {
    126 #if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
    127 	struct timespec	ts;
    128 
    129 	if (clock_gettime(CLOCK_MONOTONIC, &ts) == 0)
    130 		use_monotonic = 1;
    131 #endif
    132 }
    133 
    134 static int
    135 gettime(struct event_base *base, struct timeval *tp)
    136 {
    137 	if (base->tv_cache.tv_sec) {
    138 		*tp = base->tv_cache;
    139 		return (0);
    140 	}
    141 
    142 #if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
    143 	if (use_monotonic) {
    144 		struct timespec	ts;
    145 
    146 		if (clock_gettime(CLOCK_MONOTONIC, &ts) == -1)
    147 			return (-1);
    148 
    149 		tp->tv_sec = ts.tv_sec;
    150 		tp->tv_usec = ts.tv_nsec / 1000;
    151 		return (0);
    152 	}
    153 #endif
    154 
    155 	return (evutil_gettimeofday(tp, NULL));
    156 }
    157 
    158 struct event_base *
    159 event_init(void)
    160 {
    161 	struct event_base *base = event_base_new();
    162 
    163 	if (base != NULL)
    164 		current_base = base;
    165 
    166 	return (base);
    167 }
    168 
    169 struct event_base *
    170 event_base_new(void)
    171 {
    172 	int i;
    173 	struct event_base *base;
    174 
    175 	if ((base = calloc(1, sizeof(struct event_base))) == NULL)
    176 		event_err(1, "%s: calloc", __func__);
    177 
    178 	detect_monotonic();
    179 	gettime(base, &base->event_tv);
    180 
    181 	min_heap_ctor(&base->timeheap);
    182 	TAILQ_INIT(&base->eventqueue);
    183 	base->sig.ev_signal_pair[0] = -1;
    184 	base->sig.ev_signal_pair[1] = -1;
    185 
    186 	base->evbase = NULL;
    187 	for (i = 0; eventops[i] && !base->evbase; i++) {
    188 		base->evsel = eventops[i];
    189 
    190 		base->evbase = base->evsel->init(base);
    191 	}
    192 
    193 	if (base->evbase == NULL)
    194 		event_errx(1, "%s: no event mechanism available", __func__);
    195 
    196 	if (evutil_getenv("EVENT_SHOW_METHOD"))
    197 		event_msgx("libevent using: %s\n",
    198 			   base->evsel->name);
    199 
    200 	/* allocate a single active event queue */
    201 	event_base_priority_init(base, 1);
    202 
    203 	return (base);
    204 }
    205 
    206 void
    207 event_base_free(struct event_base *base)
    208 {
    209 	int i, n_deleted=0;
    210 	struct event *ev;
    211 
    212 	if (base == NULL && current_base)
    213 		base = current_base;
    214 	if (base == current_base)
    215 		current_base = NULL;
    216 
    217 	/* XXX(niels) - check for internal events first */
    218 	assert(base);
    219 	/* Delete all non-internal events. */
    220 	for (ev = TAILQ_FIRST(&base->eventqueue); ev; ) {
    221 		struct event *next = TAILQ_NEXT(ev, ev_next);
    222 		if (!(ev->ev_flags & EVLIST_INTERNAL)) {
    223 			event_del(ev);
    224 			++n_deleted;
    225 		}
    226 		ev = next;
    227 	}
    228 	while ((ev = min_heap_top(&base->timeheap)) != NULL) {
    229 		event_del(ev);
    230 		++n_deleted;
    231 	}
    232 
    233 	for (i = 0; i < base->nactivequeues; ++i) {
    234 		for (ev = TAILQ_FIRST(base->activequeues[i]); ev; ) {
    235 			struct event *next = TAILQ_NEXT(ev, ev_active_next);
    236 			if (!(ev->ev_flags & EVLIST_INTERNAL)) {
    237 				event_del(ev);
    238 				++n_deleted;
    239 			}
    240 			ev = next;
    241 		}
    242 	}
    243 
    244 	if (n_deleted)
    245 		event_debug(("%s: %d events were still set in base",
    246 			__func__, n_deleted));
    247 
    248 	if (base->evsel->dealloc != NULL)
    249 		base->evsel->dealloc(base, base->evbase);
    250 
    251 	for (i = 0; i < base->nactivequeues; ++i)
    252 		assert(TAILQ_EMPTY(base->activequeues[i]));
    253 
    254 	assert(min_heap_empty(&base->timeheap));
    255 	min_heap_dtor(&base->timeheap);
    256 
    257 	for (i = 0; i < base->nactivequeues; ++i)
    258 		free(base->activequeues[i]);
    259 	free(base->activequeues);
    260 
    261 	assert(TAILQ_EMPTY(&base->eventqueue));
    262 
    263 	free(base);
    264 }
    265 
    266 /* reinitialized the event base after a fork */
    267 int
    268 event_reinit(struct event_base *base)
    269 {
    270 	const struct eventop *evsel = base->evsel;
    271 	void *evbase = base->evbase;
    272 	int res = 0;
    273 	struct event *ev;
    274 
    275 	/* check if this event mechanism requires reinit */
    276 	if (!evsel->need_reinit)
    277 		return (0);
    278 
    279 	/* prevent internal delete */
    280 	if (base->sig.ev_signal_added) {
    281 		/* we cannot call event_del here because the base has
    282 		 * not been reinitialized yet. */
    283 		event_queue_remove(base, &base->sig.ev_signal,
    284 		    EVLIST_INSERTED);
    285 		if (base->sig.ev_signal.ev_flags & EVLIST_ACTIVE)
    286 			event_queue_remove(base, &base->sig.ev_signal,
    287 			    EVLIST_ACTIVE);
    288 		base->sig.ev_signal_added = 0;
    289 	}
    290 
    291 	if (base->evsel->dealloc != NULL)
    292 		base->evsel->dealloc(base, base->evbase);
    293 	evbase = base->evbase = evsel->init(base);
    294 	if (base->evbase == NULL)
    295 		event_errx(1, "%s: could not reinitialize event mechanism",
    296 		    __func__);
    297 
    298 	TAILQ_FOREACH(ev, &base->eventqueue, ev_next) {
    299 		if (evsel->add(evbase, ev) == -1)
    300 			res = -1;
    301 	}
    302 
    303 	return (res);
    304 }
    305 
    306 int
    307 event_priority_init(int npriorities)
    308 {
    309   return event_base_priority_init(current_base, npriorities);
    310 }
    311 
    312 int
    313 event_base_priority_init(struct event_base *base, int npriorities)
    314 {
    315 	int i;
    316 
    317 	if (base->event_count_active)
    318 		return (-1);
    319 
    320 	if (base->nactivequeues && npriorities != base->nactivequeues) {
    321 		for (i = 0; i < base->nactivequeues; ++i) {
    322 			free(base->activequeues[i]);
    323 		}
    324 		free(base->activequeues);
    325 	}
    326 
    327 	/* Allocate our priority queues */
    328 	base->nactivequeues = npriorities;
    329 	base->activequeues = (struct event_list **)
    330 	    calloc(base->nactivequeues, sizeof(struct event_list *));
    331 	if (base->activequeues == NULL)
    332 		event_err(1, "%s: calloc", __func__);
    333 
    334 	for (i = 0; i < base->nactivequeues; ++i) {
    335 		base->activequeues[i] = malloc(sizeof(struct event_list));
    336 		if (base->activequeues[i] == NULL)
    337 			event_err(1, "%s: malloc", __func__);
    338 		TAILQ_INIT(base->activequeues[i]);
    339 	}
    340 
    341 	return (0);
    342 }
    343 
    344 int
    345 event_haveevents(struct event_base *base)
    346 {
    347 	return (base->event_count > 0);
    348 }
    349 
    350 /*
    351  * Active events are stored in priority queues.  Lower priorities are always
    352  * process before higher priorities.  Low priority events can starve high
    353  * priority ones.
    354  */
    355 
    356 static void
    357 event_process_active(struct event_base *base)
    358 {
    359 	struct event *ev;
    360 	struct event_list *activeq = NULL;
    361 	int i;
    362 	short ncalls;
    363 
    364 	for (i = 0; i < base->nactivequeues; ++i) {
    365 		if (TAILQ_FIRST(base->activequeues[i]) != NULL) {
    366 			activeq = base->activequeues[i];
    367 			break;
    368 		}
    369 	}
    370 
    371 	assert(activeq != NULL);
    372 
    373 	for (ev = TAILQ_FIRST(activeq); ev; ev = TAILQ_FIRST(activeq)) {
    374 		if (ev->ev_events & EV_PERSIST)
    375 			event_queue_remove(base, ev, EVLIST_ACTIVE);
    376 		else
    377 			event_del(ev);
    378 
    379 		/* Allows deletes to work */
    380 		ncalls = ev->ev_ncalls;
    381 		ev->ev_pncalls = &ncalls;
    382 		while (ncalls) {
    383 			ncalls--;
    384 			ev->ev_ncalls = ncalls;
    385 			(*ev->ev_callback)((int)ev->ev_fd, ev->ev_res, ev->ev_arg);
    386 			if (base->event_break)
    387 				return;
    388 		}
    389 	}
    390 }
    391 
    392 /*
    393  * Wait continously for events.  We exit only if no events are left.
    394  */
    395 
    396 int
    397 event_dispatch(void)
    398 {
    399 	return (event_loop(0));
    400 }
    401 
    402 int
    403 event_base_dispatch(struct event_base *event_base)
    404 {
    405   return (event_base_loop(event_base, 0));
    406 }
    407 
    408 const char *
    409 event_base_get_method(struct event_base *base)
    410 {
    411 	assert(base);
    412 	return (base->evsel->name);
    413 }
    414 
    415 static void
    416 event_loopexit_cb(int fd, short what, void *arg)
    417 {
    418 	struct event_base *base = arg;
    419 	base->event_gotterm = 1;
    420 }
    421 
    422 /* not thread safe */
    423 int
    424 event_loopexit(const struct timeval *tv)
    425 {
    426 	return (event_once(-1, EV_TIMEOUT, event_loopexit_cb,
    427 		    current_base, tv));
    428 }
    429 
    430 int
    431 event_base_loopexit(struct event_base *event_base, const struct timeval *tv)
    432 {
    433 	return (event_base_once(event_base, -1, EV_TIMEOUT, event_loopexit_cb,
    434 		    event_base, tv));
    435 }
    436 
    437 /* not thread safe */
    438 int
    439 event_loopbreak(void)
    440 {
    441 	return (event_base_loopbreak(current_base));
    442 }
    443 
    444 int
    445 event_base_loopbreak(struct event_base *event_base)
    446 {
    447 	if (event_base == NULL)
    448 		return (-1);
    449 
    450 	event_base->event_break = 1;
    451 	return (0);
    452 }
    453 
    454 
    455 
    456 /* not thread safe */
    457 
    458 int
    459 event_loop(int flags)
    460 {
    461 	return event_base_loop(current_base, flags);
    462 }
    463 
    464 int
    465 event_base_loop(struct event_base *base, int flags)
    466 {
    467 	const struct eventop *evsel = base->evsel;
    468 	void *evbase = base->evbase;
    469 	struct timeval tv;
    470 	struct timeval *tv_p;
    471 	int res, done;
    472 
    473 	/* clear time cache */
    474 	base->tv_cache.tv_sec = 0;
    475 
    476 	if (base->sig.ev_signal_added)
    477 		evsignal_base = base;
    478 	done = 0;
    479 	while (!done) {
    480 		/* Terminate the loop if we have been asked to */
    481 		if (base->event_gotterm) {
    482 			base->event_gotterm = 0;
    483 			break;
    484 		}
    485 
    486 		if (base->event_break) {
    487 			base->event_break = 0;
    488 			break;
    489 		}
    490 
    491 		timeout_correct(base, &tv);
    492 
    493 		tv_p = &tv;
    494 		if (!base->event_count_active && !(flags & EVLOOP_NONBLOCK)) {
    495 			timeout_next(base, &tv_p);
    496 		} else {
    497 			/*
    498 			 * if we have active events, we just poll new events
    499 			 * without waiting.
    500 			 */
    501 			evutil_timerclear(&tv);
    502 		}
    503 
    504 		/* If we have no events, we just exit */
    505 		if (!event_haveevents(base)) {
    506 			event_debug(("%s: no events registered.", __func__));
    507 			return (1);
    508 		}
    509 
    510 		/* update last old time */
    511 		gettime(base, &base->event_tv);
    512 
    513 		/* clear time cache */
    514 		base->tv_cache.tv_sec = 0;
    515 
    516 		res = evsel->dispatch(base, evbase, tv_p);
    517 
    518 		if (res == -1)
    519 			return (-1);
    520 		gettime(base, &base->tv_cache);
    521 
    522 		timeout_process(base);
    523 
    524 		if (base->event_count_active) {
    525 			event_process_active(base);
    526 			if (!base->event_count_active && (flags & EVLOOP_ONCE))
    527 				done = 1;
    528 		} else if (flags & EVLOOP_NONBLOCK)
    529 			done = 1;
    530 	}
    531 
    532 	/* clear time cache */
    533 	base->tv_cache.tv_sec = 0;
    534 
    535 	event_debug(("%s: asked to terminate loop.", __func__));
    536 	return (0);
    537 }
    538 
    539 /* Sets up an event for processing once */
    540 
    541 struct event_once {
    542 	struct event ev;
    543 
    544 	void (*cb)(int, short, void *);
    545 	void *arg;
    546 };
    547 
    548 /* One-time callback, it deletes itself */
    549 
    550 static void
    551 event_once_cb(int fd, short events, void *arg)
    552 {
    553 	struct event_once *eonce = arg;
    554 
    555 	(*eonce->cb)(fd, events, eonce->arg);
    556 	free(eonce);
    557 }
    558 
    559 /* not threadsafe, event scheduled once. */
    560 int
    561 event_once(int fd, short events,
    562     void (*callback)(int, short, void *), void *arg, const struct timeval *tv)
    563 {
    564 	return event_base_once(current_base, fd, events, callback, arg, tv);
    565 }
    566 
    567 /* Schedules an event once */
    568 int
    569 event_base_once(struct event_base *base, int fd, short events,
    570     void (*callback)(int, short, void *), void *arg, const struct timeval *tv)
    571 {
    572 	struct event_once *eonce;
    573 	struct timeval etv;
    574 	int res;
    575 
    576 	/* We cannot support signals that just fire once */
    577 	if (events & EV_SIGNAL)
    578 		return (-1);
    579 
    580 	if ((eonce = calloc(1, sizeof(struct event_once))) == NULL)
    581 		return (-1);
    582 
    583 	eonce->cb = callback;
    584 	eonce->arg = arg;
    585 
    586 	if (events == EV_TIMEOUT) {
    587 		if (tv == NULL) {
    588 			evutil_timerclear(&etv);
    589 			tv = &etv;
    590 		}
    591 
    592 		evtimer_set(&eonce->ev, event_once_cb, eonce);
    593 	} else if (events & (EV_READ|EV_WRITE)) {
    594 		events &= EV_READ|EV_WRITE;
    595 
    596 		event_set(&eonce->ev, fd, events, event_once_cb, eonce);
    597 	} else {
    598 		/* Bad event combination */
    599 		free(eonce);
    600 		return (-1);
    601 	}
    602 
    603 	res = event_base_set(base, &eonce->ev);
    604 	if (res == 0)
    605 		res = event_add(&eonce->ev, tv);
    606 	if (res != 0) {
    607 		free(eonce);
    608 		return (res);
    609 	}
    610 
    611 	return (0);
    612 }
    613 
    614 void
    615 event_set(struct event *ev, int fd, short events,
    616 	  void (*callback)(int, short, void *), void *arg)
    617 {
    618 	/* Take the current base - caller needs to set the real base later */
    619 	ev->ev_base = current_base;
    620 
    621 	ev->ev_callback = callback;
    622 	ev->ev_arg = arg;
    623 	ev->ev_fd = fd;
    624 	ev->ev_events = events;
    625 	ev->ev_res = 0;
    626 	ev->ev_flags = EVLIST_INIT;
    627 	ev->ev_ncalls = 0;
    628 	ev->ev_pncalls = NULL;
    629 
    630 	min_heap_elem_init(ev);
    631 
    632 	/* by default, we put new events into the middle priority */
    633 	if(current_base)
    634 		ev->ev_pri = current_base->nactivequeues/2;
    635 }
    636 
    637 int
    638 event_base_set(struct event_base *base, struct event *ev)
    639 {
    640 	/* Only innocent events may be assigned to a different base */
    641 	if (ev->ev_flags != EVLIST_INIT)
    642 		return (-1);
    643 
    644 	ev->ev_base = base;
    645 	ev->ev_pri = base->nactivequeues/2;
    646 
    647 	return (0);
    648 }
    649 
    650 /*
    651  * Set's the priority of an event - if an event is already scheduled
    652  * changing the priority is going to fail.
    653  */
    654 
    655 int
    656 event_priority_set(struct event *ev, int pri)
    657 {
    658 	if (ev->ev_flags & EVLIST_ACTIVE)
    659 		return (-1);
    660 	if (pri < 0 || pri >= ev->ev_base->nactivequeues)
    661 		return (-1);
    662 
    663 	ev->ev_pri = pri;
    664 
    665 	return (0);
    666 }
    667 
    668 /*
    669  * Checks if a specific event is pending or scheduled.
    670  */
    671 
    672 int
    673 event_pending(struct event *ev, short event, struct timeval *tv)
    674 {
    675 	struct timeval	now, res;
    676 	int flags = 0;
    677 
    678 	if (ev->ev_flags & EVLIST_INSERTED)
    679 		flags |= (ev->ev_events & (EV_READ|EV_WRITE|EV_SIGNAL));
    680 	if (ev->ev_flags & EVLIST_ACTIVE)
    681 		flags |= ev->ev_res;
    682 	if (ev->ev_flags & EVLIST_TIMEOUT)
    683 		flags |= EV_TIMEOUT;
    684 
    685 	event &= (EV_TIMEOUT|EV_READ|EV_WRITE|EV_SIGNAL);
    686 
    687 	/* See if there is a timeout that we should report */
    688 	if (tv != NULL && (flags & event & EV_TIMEOUT)) {
    689 		gettime(ev->ev_base, &now);
    690 		evutil_timersub(&ev->ev_timeout, &now, &res);
    691 		/* correctly remap to real time */
    692 		evutil_gettimeofday(&now, NULL);
    693 		evutil_timeradd(&now, &res, tv);
    694 	}
    695 
    696 	return (flags & event);
    697 }
    698 
    699 int
    700 event_add(struct event *ev, const struct timeval *tv)
    701 {
    702 	struct event_base *base = ev->ev_base;
    703 	const struct eventop *evsel = base->evsel;
    704 	void *evbase = base->evbase;
    705 	int res = 0;
    706 
    707 	event_debug((
    708 		 "event_add: event: %p, %s%s%scall %p",
    709 		 ev,
    710 		 ev->ev_events & EV_READ ? "EV_READ " : " ",
    711 		 ev->ev_events & EV_WRITE ? "EV_WRITE " : " ",
    712 		 tv ? "EV_TIMEOUT " : " ",
    713 		 ev->ev_callback));
    714 
    715 	assert(!(ev->ev_flags & ~EVLIST_ALL));
    716 
    717 	/*
    718 	 * prepare for timeout insertion further below, if we get a
    719 	 * failure on any step, we should not change any state.
    720 	 */
    721 	if (tv != NULL && !(ev->ev_flags & EVLIST_TIMEOUT)) {
    722 		if (min_heap_reserve(&base->timeheap,
    723 			1 + min_heap_size(&base->timeheap)) == -1)
    724 			return (-1);  /* ENOMEM == errno */
    725 	}
    726 
    727 	if ((ev->ev_events & (EV_READ|EV_WRITE|EV_SIGNAL)) &&
    728 	    !(ev->ev_flags & (EVLIST_INSERTED|EVLIST_ACTIVE))) {
    729 		res = evsel->add(evbase, ev);
    730 		if (res != -1)
    731 			event_queue_insert(base, ev, EVLIST_INSERTED);
    732 	}
    733 
    734 	/*
    735 	 * we should change the timout state only if the previous event
    736 	 * addition succeeded.
    737 	 */
    738 	if (res != -1 && tv != NULL) {
    739 		struct timeval now;
    740 
    741 		/*
    742 		 * we already reserved memory above for the case where we
    743 		 * are not replacing an exisiting timeout.
    744 		 */
    745 		if (ev->ev_flags & EVLIST_TIMEOUT)
    746 			event_queue_remove(base, ev, EVLIST_TIMEOUT);
    747 
    748 		/* Check if it is active due to a timeout.  Rescheduling
    749 		 * this timeout before the callback can be executed
    750 		 * removes it from the active list. */
    751 		if ((ev->ev_flags & EVLIST_ACTIVE) &&
    752 		    (ev->ev_res & EV_TIMEOUT)) {
    753 			/* See if we are just active executing this
    754 			 * event in a loop
    755 			 */
    756 			if (ev->ev_ncalls && ev->ev_pncalls) {
    757 				/* Abort loop */
    758 				*ev->ev_pncalls = 0;
    759 			}
    760 
    761 			event_queue_remove(base, ev, EVLIST_ACTIVE);
    762 		}
    763 
    764 		gettime(base, &now);
    765 		evutil_timeradd(&now, tv, &ev->ev_timeout);
    766 
    767 		event_debug((
    768 			 "event_add: timeout in %ld seconds, call %p",
    769 			 tv->tv_sec, ev->ev_callback));
    770 
    771 		event_queue_insert(base, ev, EVLIST_TIMEOUT);
    772 	}
    773 
    774 	return (res);
    775 }
    776 
    777 int
    778 event_del(struct event *ev)
    779 {
    780 	struct event_base *base;
    781 	const struct eventop *evsel;
    782 	void *evbase;
    783 
    784 	event_debug(("event_del: %p, callback %p",
    785 		 ev, ev->ev_callback));
    786 
    787 	/* An event without a base has not been added */
    788 	if (ev->ev_base == NULL)
    789 		return (-1);
    790 
    791 	base = ev->ev_base;
    792 	evsel = base->evsel;
    793 	evbase = base->evbase;
    794 
    795 	assert(!(ev->ev_flags & ~EVLIST_ALL));
    796 
    797 	/* See if we are just active executing this event in a loop */
    798 	if (ev->ev_ncalls && ev->ev_pncalls) {
    799 		/* Abort loop */
    800 		*ev->ev_pncalls = 0;
    801 	}
    802 
    803 	if (ev->ev_flags & EVLIST_TIMEOUT)
    804 		event_queue_remove(base, ev, EVLIST_TIMEOUT);
    805 
    806 	if (ev->ev_flags & EVLIST_ACTIVE)
    807 		event_queue_remove(base, ev, EVLIST_ACTIVE);
    808 
    809 	if (ev->ev_flags & EVLIST_INSERTED) {
    810 		event_queue_remove(base, ev, EVLIST_INSERTED);
    811 		return (evsel->del(evbase, ev));
    812 	}
    813 
    814 	return (0);
    815 }
    816 
    817 void
    818 event_active(struct event *ev, int res, short ncalls)
    819 {
    820 	/* We get different kinds of events, add them together */
    821 	if (ev->ev_flags & EVLIST_ACTIVE) {
    822 		ev->ev_res |= res;
    823 		return;
    824 	}
    825 
    826 	ev->ev_res = res;
    827 	ev->ev_ncalls = ncalls;
    828 	ev->ev_pncalls = NULL;
    829 	event_queue_insert(ev->ev_base, ev, EVLIST_ACTIVE);
    830 }
    831 
    832 static int
    833 timeout_next(struct event_base *base, struct timeval **tv_p)
    834 {
    835 	struct timeval now;
    836 	struct event *ev;
    837 	struct timeval *tv = *tv_p;
    838 
    839 	if ((ev = min_heap_top(&base->timeheap)) == NULL) {
    840 		/* if no time-based events are active wait for I/O */
    841 		*tv_p = NULL;
    842 		return (0);
    843 	}
    844 
    845 	if (gettime(base, &now) == -1)
    846 		return (-1);
    847 
    848 	if (evutil_timercmp(&ev->ev_timeout, &now, <=)) {
    849 		evutil_timerclear(tv);
    850 		return (0);
    851 	}
    852 
    853 	evutil_timersub(&ev->ev_timeout, &now, tv);
    854 
    855 	assert(tv->tv_sec >= 0);
    856 	assert(tv->tv_usec >= 0);
    857 
    858 	event_debug(("timeout_next: in %ld seconds", tv->tv_sec));
    859 	return (0);
    860 }
    861 
    862 /*
    863  * Determines if the time is running backwards by comparing the current
    864  * time against the last time we checked.  Not needed when using clock
    865  * monotonic.
    866  */
    867 
    868 static void
    869 timeout_correct(struct event_base *base, struct timeval *tv)
    870 {
    871 	struct event **pev;
    872 	unsigned int size;
    873 	struct timeval off;
    874 
    875 	if (use_monotonic)
    876 		return;
    877 
    878 	/* Check if time is running backwards */
    879 	gettime(base, tv);
    880 	if (evutil_timercmp(tv, &base->event_tv, >=)) {
    881 		base->event_tv = *tv;
    882 		return;
    883 	}
    884 
    885 	event_debug(("%s: time is running backwards, corrected",
    886 		    __func__));
    887 	evutil_timersub(&base->event_tv, tv, &off);
    888 
    889 	/*
    890 	 * We can modify the key element of the node without destroying
    891 	 * the key, beause we apply it to all in the right order.
    892 	 */
    893 	pev = base->timeheap.p;
    894 	size = base->timeheap.n;
    895 	for (; size-- > 0; ++pev) {
    896 		struct timeval *ev_tv = &(**pev).ev_timeout;
    897 		evutil_timersub(ev_tv, &off, ev_tv);
    898 	}
    899 	/* Now remember what the new time turned out to be. */
    900 	base->event_tv = *tv;
    901 }
    902 
    903 void
    904 timeout_process(struct event_base *base)
    905 {
    906 	struct timeval now;
    907 	struct event *ev;
    908 
    909 	if (min_heap_empty(&base->timeheap))
    910 		return;
    911 
    912 	gettime(base, &now);
    913 
    914 	while ((ev = min_heap_top(&base->timeheap))) {
    915 		if (evutil_timercmp(&ev->ev_timeout, &now, >))
    916 			break;
    917 
    918 		/* delete this event from the I/O queues */
    919 		event_del(ev);
    920 
    921 		event_debug(("timeout_process: call %p",
    922 			 ev->ev_callback));
    923 		event_active(ev, EV_TIMEOUT, 1);
    924 	}
    925 }
    926 
    927 void
    928 event_queue_remove(struct event_base *base, struct event *ev, int queue)
    929 {
    930 	if (!(ev->ev_flags & queue))
    931 		event_errx(1, "%s: %p(fd %d) not on queue %x", __func__,
    932 			   ev, ev->ev_fd, queue);
    933 
    934 	if (~ev->ev_flags & EVLIST_INTERNAL)
    935 		base->event_count--;
    936 
    937 	ev->ev_flags &= ~queue;
    938 	switch (queue) {
    939 	case EVLIST_INSERTED:
    940 		TAILQ_REMOVE(&base->eventqueue, ev, ev_next);
    941 		break;
    942 	case EVLIST_ACTIVE:
    943 		base->event_count_active--;
    944 		TAILQ_REMOVE(base->activequeues[ev->ev_pri],
    945 		    ev, ev_active_next);
    946 		break;
    947 	case EVLIST_TIMEOUT:
    948 		min_heap_erase(&base->timeheap, ev);
    949 		break;
    950 	default:
    951 		event_errx(1, "%s: unknown queue %x", __func__, queue);
    952 	}
    953 }
    954 
    955 void
    956 event_queue_insert(struct event_base *base, struct event *ev, int queue)
    957 {
    958 	if (ev->ev_flags & queue) {
    959 		/* Double insertion is possible for active events */
    960 		if (queue & EVLIST_ACTIVE)
    961 			return;
    962 
    963 		event_errx(1, "%s: %p(fd %d) already on queue %x", __func__,
    964 			   ev, ev->ev_fd, queue);
    965 	}
    966 
    967 	if (~ev->ev_flags & EVLIST_INTERNAL)
    968 		base->event_count++;
    969 
    970 	ev->ev_flags |= queue;
    971 	switch (queue) {
    972 	case EVLIST_INSERTED:
    973 		TAILQ_INSERT_TAIL(&base->eventqueue, ev, ev_next);
    974 		break;
    975 	case EVLIST_ACTIVE:
    976 		base->event_count_active++;
    977 		TAILQ_INSERT_TAIL(base->activequeues[ev->ev_pri],
    978 		    ev,ev_active_next);
    979 		break;
    980 	case EVLIST_TIMEOUT: {
    981 		min_heap_push(&base->timeheap, ev);
    982 		break;
    983 	}
    984 	default:
    985 		event_errx(1, "%s: unknown queue %x", __func__, queue);
    986 	}
    987 }
    988 
    989 /* Functions for debugging */
    990 
    991 const char *
    992 event_get_version(void)
    993 {
    994 	return (VERSION);
    995 }
    996 
    997 /*
    998  * No thread-safe interface needed - the information should be the same
    999  * for all threads.
   1000  */
   1001 
   1002 const char *
   1003 event_get_method(void)
   1004 {
   1005 	return (current_base->evsel->name);
   1006 }
   1007