xref: /freebsd/contrib/pf/libevent/event.c (revision 67ecd4f3a477a0ca5b76a1694f89755df27a8679)
167ecd4f3SMax Laier /*
267ecd4f3SMax Laier  * Copyright (c) 2000-2004 Niels Provos <provos@citi.umich.edu>
367ecd4f3SMax Laier  * All rights reserved.
467ecd4f3SMax Laier  *
567ecd4f3SMax Laier  * Redistribution and use in source and binary forms, with or without
667ecd4f3SMax Laier  * modification, are permitted provided that the following conditions
767ecd4f3SMax Laier  * are met:
867ecd4f3SMax Laier  * 1. Redistributions of source code must retain the above copyright
967ecd4f3SMax Laier  *    notice, this list of conditions and the following disclaimer.
1067ecd4f3SMax Laier  * 2. Redistributions in binary form must reproduce the above copyright
1167ecd4f3SMax Laier  *    notice, this list of conditions and the following disclaimer in the
1267ecd4f3SMax Laier  *    documentation and/or other materials provided with the distribution.
1367ecd4f3SMax Laier  * 3. The name of the author may not be used to endorse or promote products
1467ecd4f3SMax Laier  *    derived from this software without specific prior written permission.
1567ecd4f3SMax Laier  *
1667ecd4f3SMax Laier  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
1767ecd4f3SMax Laier  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
1867ecd4f3SMax Laier  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
1967ecd4f3SMax Laier  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
2067ecd4f3SMax Laier  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
2167ecd4f3SMax Laier  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
2267ecd4f3SMax Laier  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
2367ecd4f3SMax Laier  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
2467ecd4f3SMax Laier  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
2567ecd4f3SMax Laier  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
2667ecd4f3SMax Laier  */
2767ecd4f3SMax Laier #ifdef HAVE_CONFIG_H
2867ecd4f3SMax Laier #include "config.h"
2967ecd4f3SMax Laier #endif
3067ecd4f3SMax Laier 
3167ecd4f3SMax Laier #ifdef WIN32
3267ecd4f3SMax Laier #define WIN32_LEAN_AND_MEAN
3367ecd4f3SMax Laier #include <windows.h>
3467ecd4f3SMax Laier #undef WIN32_LEAN_AND_MEAN
3567ecd4f3SMax Laier #include "misc.h"
3667ecd4f3SMax Laier #endif
3767ecd4f3SMax Laier #include <sys/types.h>
3867ecd4f3SMax Laier #include <sys/tree.h>
3967ecd4f3SMax Laier #ifdef HAVE_SYS_TIME_H
4067ecd4f3SMax Laier #include <sys/time.h>
4167ecd4f3SMax Laier #else
4267ecd4f3SMax Laier #include <sys/_time.h>
4367ecd4f3SMax Laier #endif
4467ecd4f3SMax Laier #include <sys/queue.h>
4567ecd4f3SMax Laier #include <stdio.h>
4667ecd4f3SMax Laier #include <stdlib.h>
4767ecd4f3SMax Laier #ifndef WIN32
4867ecd4f3SMax Laier #include <unistd.h>
4967ecd4f3SMax Laier #endif
5067ecd4f3SMax Laier #include <errno.h>
5167ecd4f3SMax Laier #include <signal.h>
5267ecd4f3SMax Laier #include <string.h>
5367ecd4f3SMax Laier #include <assert.h>
5467ecd4f3SMax Laier 
5567ecd4f3SMax Laier #include "event.h"
5667ecd4f3SMax Laier #include "event-internal.h"
5767ecd4f3SMax Laier #include "log.h"
5867ecd4f3SMax Laier 
5967ecd4f3SMax Laier #ifdef HAVE_EVENT_PORTS
6067ecd4f3SMax Laier extern const struct eventop evportops;
6167ecd4f3SMax Laier #endif
6267ecd4f3SMax Laier #ifdef HAVE_SELECT
6367ecd4f3SMax Laier extern const struct eventop selectops;
6467ecd4f3SMax Laier #endif
6567ecd4f3SMax Laier #ifdef HAVE_POLL
6667ecd4f3SMax Laier extern const struct eventop pollops;
6767ecd4f3SMax Laier #endif
6867ecd4f3SMax Laier #ifdef HAVE_RTSIG
6967ecd4f3SMax Laier extern const struct eventop rtsigops;
7067ecd4f3SMax Laier #endif
7167ecd4f3SMax Laier #ifdef HAVE_EPOLL
7267ecd4f3SMax Laier extern const struct eventop epollops;
7367ecd4f3SMax Laier #endif
7467ecd4f3SMax Laier #ifdef HAVE_WORKING_KQUEUE
7567ecd4f3SMax Laier extern const struct eventop kqops;
7667ecd4f3SMax Laier #endif
7767ecd4f3SMax Laier #ifdef HAVE_DEVPOLL
7867ecd4f3SMax Laier extern const struct eventop devpollops;
7967ecd4f3SMax Laier #endif
8067ecd4f3SMax Laier #ifdef WIN32
8167ecd4f3SMax Laier extern const struct eventop win32ops;
8267ecd4f3SMax Laier #endif
8367ecd4f3SMax Laier 
8467ecd4f3SMax Laier /* In order of preference */
8567ecd4f3SMax Laier const struct eventop *eventops[] = {
8667ecd4f3SMax Laier #ifdef HAVE_EVENT_PORTS
8767ecd4f3SMax Laier 	&evportops,
8867ecd4f3SMax Laier #endif
8967ecd4f3SMax Laier #ifdef HAVE_WORKING_KQUEUE
9067ecd4f3SMax Laier 	&kqops,
9167ecd4f3SMax Laier #endif
9267ecd4f3SMax Laier #ifdef HAVE_EPOLL
9367ecd4f3SMax Laier 	&epollops,
9467ecd4f3SMax Laier #endif
9567ecd4f3SMax Laier #ifdef HAVE_DEVPOLL
9667ecd4f3SMax Laier 	&devpollops,
9767ecd4f3SMax Laier #endif
9867ecd4f3SMax Laier #ifdef HAVE_RTSIG
9967ecd4f3SMax Laier 	&rtsigops,
10067ecd4f3SMax Laier #endif
10167ecd4f3SMax Laier #ifdef HAVE_POLL
10267ecd4f3SMax Laier 	&pollops,
10367ecd4f3SMax Laier #endif
10467ecd4f3SMax Laier #ifdef HAVE_SELECT
10567ecd4f3SMax Laier 	&selectops,
10667ecd4f3SMax Laier #endif
10767ecd4f3SMax Laier #ifdef WIN32
10867ecd4f3SMax Laier 	&win32ops,
10967ecd4f3SMax Laier #endif
11067ecd4f3SMax Laier 	NULL
11167ecd4f3SMax Laier };
11267ecd4f3SMax Laier 
11367ecd4f3SMax Laier /* Global state */
11467ecd4f3SMax Laier struct event_list signalqueue;
11567ecd4f3SMax Laier 
11667ecd4f3SMax Laier struct event_base *current_base = NULL;
11767ecd4f3SMax Laier 
11867ecd4f3SMax Laier /* Handle signals - This is a deprecated interface */
11967ecd4f3SMax Laier int (*event_sigcb)(void);		/* Signal callback when gotsig is set */
12067ecd4f3SMax Laier volatile sig_atomic_t event_gotsig;	/* Set in signal handler */
12167ecd4f3SMax Laier 
12267ecd4f3SMax Laier /* Prototypes */
12367ecd4f3SMax Laier static void	event_queue_insert(struct event_base *, struct event *, int);
12467ecd4f3SMax Laier static void	event_queue_remove(struct event_base *, struct event *, int);
12567ecd4f3SMax Laier static int	event_haveevents(struct event_base *);
12667ecd4f3SMax Laier 
12767ecd4f3SMax Laier static void	event_process_active(struct event_base *);
12867ecd4f3SMax Laier 
12967ecd4f3SMax Laier static int	timeout_next(struct event_base *, struct timeval *);
13067ecd4f3SMax Laier static void	timeout_process(struct event_base *);
13167ecd4f3SMax Laier static void	timeout_correct(struct event_base *, struct timeval *);
13267ecd4f3SMax Laier 
13367ecd4f3SMax Laier static int
compare(struct event * a,struct event * b)13467ecd4f3SMax Laier compare(struct event *a, struct event *b)
13567ecd4f3SMax Laier {
13667ecd4f3SMax Laier 	if (timercmp(&a->ev_timeout, &b->ev_timeout, <))
13767ecd4f3SMax Laier 		return (-1);
13867ecd4f3SMax Laier 	else if (timercmp(&a->ev_timeout, &b->ev_timeout, >))
13967ecd4f3SMax Laier 		return (1);
14067ecd4f3SMax Laier 	if (a < b)
14167ecd4f3SMax Laier 		return (-1);
14267ecd4f3SMax Laier 	else if (a > b)
14367ecd4f3SMax Laier 		return (1);
14467ecd4f3SMax Laier 	return (0);
14567ecd4f3SMax Laier }
14667ecd4f3SMax Laier 
14767ecd4f3SMax Laier static int
gettime(struct timeval * tp)14867ecd4f3SMax Laier gettime(struct timeval *tp)
14967ecd4f3SMax Laier {
15067ecd4f3SMax Laier #ifdef HAVE_CLOCK_GETTIME
15167ecd4f3SMax Laier 	struct timespec	ts;
15267ecd4f3SMax Laier 
15367ecd4f3SMax Laier #ifdef HAVE_CLOCK_MONOTONIC
15467ecd4f3SMax Laier 	if (clock_gettime(CLOCK_MONOTONIC, &ts) == -1)
15567ecd4f3SMax Laier #else
15667ecd4f3SMax Laier 	if (clock_gettime(CLOCK_REALTIME, &ts) == -1)
15767ecd4f3SMax Laier #endif
15867ecd4f3SMax Laier 		return (-1);
15967ecd4f3SMax Laier 	tp->tv_sec = ts.tv_sec;
16067ecd4f3SMax Laier 	tp->tv_usec = ts.tv_nsec / 1000;
16167ecd4f3SMax Laier #else
16267ecd4f3SMax Laier 	gettimeofday(tp, NULL);
16367ecd4f3SMax Laier #endif
16467ecd4f3SMax Laier 
16567ecd4f3SMax Laier 	return (0);
16667ecd4f3SMax Laier }
16767ecd4f3SMax Laier 
16867ecd4f3SMax Laier RB_PROTOTYPE(event_tree, event, ev_timeout_node, compare);
16967ecd4f3SMax Laier 
17067ecd4f3SMax Laier RB_GENERATE(event_tree, event, ev_timeout_node, compare);
17167ecd4f3SMax Laier 
17267ecd4f3SMax Laier 
17367ecd4f3SMax Laier void *
event_init(void)17467ecd4f3SMax Laier event_init(void)
17567ecd4f3SMax Laier {
17667ecd4f3SMax Laier 	int i;
17767ecd4f3SMax Laier 
17867ecd4f3SMax Laier 	if ((current_base = calloc(1, sizeof(struct event_base))) == NULL)
17967ecd4f3SMax Laier 		event_err(1, "%s: calloc");
18067ecd4f3SMax Laier 
18167ecd4f3SMax Laier 	event_sigcb = NULL;
18267ecd4f3SMax Laier 	event_gotsig = 0;
18367ecd4f3SMax Laier 	gettime(&current_base->event_tv);
18467ecd4f3SMax Laier 
18567ecd4f3SMax Laier 	RB_INIT(&current_base->timetree);
18667ecd4f3SMax Laier 	TAILQ_INIT(&current_base->eventqueue);
18767ecd4f3SMax Laier 	TAILQ_INIT(&signalqueue);
18867ecd4f3SMax Laier 
18967ecd4f3SMax Laier 	current_base->evbase = NULL;
19067ecd4f3SMax Laier 	for (i = 0; eventops[i] && !current_base->evbase; i++) {
19167ecd4f3SMax Laier 		current_base->evsel = eventops[i];
19267ecd4f3SMax Laier 
19367ecd4f3SMax Laier 		current_base->evbase = current_base->evsel->init();
19467ecd4f3SMax Laier 	}
19567ecd4f3SMax Laier 
19667ecd4f3SMax Laier 	if (current_base->evbase == NULL)
19767ecd4f3SMax Laier 		event_errx(1, "%s: no event mechanism available", __func__);
19867ecd4f3SMax Laier 
19967ecd4f3SMax Laier 	if (getenv("EVENT_SHOW_METHOD"))
20067ecd4f3SMax Laier 		event_msgx("libevent using: %s\n",
20167ecd4f3SMax Laier 			   current_base->evsel->name);
20267ecd4f3SMax Laier 
20367ecd4f3SMax Laier 	/* allocate a single active event queue */
20467ecd4f3SMax Laier 	event_base_priority_init(current_base, 1);
20567ecd4f3SMax Laier 
20667ecd4f3SMax Laier 	return (current_base);
20767ecd4f3SMax Laier }
20867ecd4f3SMax Laier 
20967ecd4f3SMax Laier void
event_base_free(struct event_base * base)21067ecd4f3SMax Laier event_base_free(struct event_base *base)
21167ecd4f3SMax Laier {
21267ecd4f3SMax Laier 	int i;
21367ecd4f3SMax Laier 
21467ecd4f3SMax Laier 	if (base == NULL && current_base)
21567ecd4f3SMax Laier 		base = current_base;
21667ecd4f3SMax Laier         if (base == current_base)
21767ecd4f3SMax Laier 		current_base = NULL;
21867ecd4f3SMax Laier 
21967ecd4f3SMax Laier 	assert(base);
22067ecd4f3SMax Laier 	assert(TAILQ_EMPTY(&base->eventqueue));
22167ecd4f3SMax Laier 	for (i=0; i < base->nactivequeues; ++i)
22267ecd4f3SMax Laier 		assert(TAILQ_EMPTY(base->activequeues[i]));
22367ecd4f3SMax Laier 
22467ecd4f3SMax Laier 	assert(RB_EMPTY(&base->timetree));
22567ecd4f3SMax Laier 
22667ecd4f3SMax Laier 	for (i = 0; i < base->nactivequeues; ++i)
22767ecd4f3SMax Laier 		free(base->activequeues[i]);
22867ecd4f3SMax Laier 	free(base->activequeues);
22967ecd4f3SMax Laier 
23067ecd4f3SMax Laier 	if (base->evsel->dealloc != NULL)
23167ecd4f3SMax Laier 		base->evsel->dealloc(base->evbase);
23267ecd4f3SMax Laier 
23367ecd4f3SMax Laier 	free(base);
23467ecd4f3SMax Laier }
23567ecd4f3SMax Laier 
23667ecd4f3SMax Laier int
event_priority_init(int npriorities)23767ecd4f3SMax Laier event_priority_init(int npriorities)
23867ecd4f3SMax Laier {
23967ecd4f3SMax Laier   return event_base_priority_init(current_base, npriorities);
24067ecd4f3SMax Laier }
24167ecd4f3SMax Laier 
24267ecd4f3SMax Laier int
event_base_priority_init(struct event_base * base,int npriorities)24367ecd4f3SMax Laier event_base_priority_init(struct event_base *base, int npriorities)
24467ecd4f3SMax Laier {
24567ecd4f3SMax Laier 	int i;
24667ecd4f3SMax Laier 
24767ecd4f3SMax Laier 	if (base->event_count_active)
24867ecd4f3SMax Laier 		return (-1);
24967ecd4f3SMax Laier 
25067ecd4f3SMax Laier 	if (base->nactivequeues && npriorities != base->nactivequeues) {
25167ecd4f3SMax Laier 		for (i = 0; i < base->nactivequeues; ++i) {
25267ecd4f3SMax Laier 			free(base->activequeues[i]);
25367ecd4f3SMax Laier 		}
25467ecd4f3SMax Laier 		free(base->activequeues);
25567ecd4f3SMax Laier 	}
25667ecd4f3SMax Laier 
25767ecd4f3SMax Laier 	/* Allocate our priority queues */
25867ecd4f3SMax Laier 	base->nactivequeues = npriorities;
25967ecd4f3SMax Laier 	base->activequeues = (struct event_list **)calloc(base->nactivequeues,
26067ecd4f3SMax Laier 	    npriorities * sizeof(struct event_list *));
26167ecd4f3SMax Laier 	if (base->activequeues == NULL)
26267ecd4f3SMax Laier 		event_err(1, "%s: calloc", __func__);
26367ecd4f3SMax Laier 
26467ecd4f3SMax Laier 	for (i = 0; i < base->nactivequeues; ++i) {
26567ecd4f3SMax Laier 		base->activequeues[i] = malloc(sizeof(struct event_list));
26667ecd4f3SMax Laier 		if (base->activequeues[i] == NULL)
26767ecd4f3SMax Laier 			event_err(1, "%s: malloc", __func__);
26867ecd4f3SMax Laier 		TAILQ_INIT(base->activequeues[i]);
26967ecd4f3SMax Laier 	}
27067ecd4f3SMax Laier 
27167ecd4f3SMax Laier 	return (0);
27267ecd4f3SMax Laier }
27367ecd4f3SMax Laier 
27467ecd4f3SMax Laier int
event_haveevents(struct event_base * base)27567ecd4f3SMax Laier event_haveevents(struct event_base *base)
27667ecd4f3SMax Laier {
27767ecd4f3SMax Laier 	return (base->event_count > 0);
27867ecd4f3SMax Laier }
27967ecd4f3SMax Laier 
28067ecd4f3SMax Laier /*
28167ecd4f3SMax Laier  * Active events are stored in priority queues.  Lower priorities are always
28267ecd4f3SMax Laier  * process before higher priorities.  Low priority events can starve high
28367ecd4f3SMax Laier  * priority ones.
28467ecd4f3SMax Laier  */
28567ecd4f3SMax Laier 
28667ecd4f3SMax Laier static void
event_process_active(struct event_base * base)28767ecd4f3SMax Laier event_process_active(struct event_base *base)
28867ecd4f3SMax Laier {
28967ecd4f3SMax Laier 	struct event *ev;
29067ecd4f3SMax Laier 	struct event_list *activeq = NULL;
29167ecd4f3SMax Laier 	int i;
29267ecd4f3SMax Laier 	short ncalls;
29367ecd4f3SMax Laier 
29467ecd4f3SMax Laier 	if (!base->event_count_active)
29567ecd4f3SMax Laier 		return;
29667ecd4f3SMax Laier 
29767ecd4f3SMax Laier 	for (i = 0; i < base->nactivequeues; ++i) {
29867ecd4f3SMax Laier 		if (TAILQ_FIRST(base->activequeues[i]) != NULL) {
29967ecd4f3SMax Laier 			activeq = base->activequeues[i];
30067ecd4f3SMax Laier 			break;
30167ecd4f3SMax Laier 		}
30267ecd4f3SMax Laier 	}
30367ecd4f3SMax Laier 
30467ecd4f3SMax Laier 	assert(activeq != NULL);
30567ecd4f3SMax Laier 
30667ecd4f3SMax Laier 	for (ev = TAILQ_FIRST(activeq); ev; ev = TAILQ_FIRST(activeq)) {
30767ecd4f3SMax Laier 		event_queue_remove(base, ev, EVLIST_ACTIVE);
30867ecd4f3SMax Laier 
30967ecd4f3SMax Laier 		/* Allows deletes to work */
31067ecd4f3SMax Laier 		ncalls = ev->ev_ncalls;
31167ecd4f3SMax Laier 		ev->ev_pncalls = &ncalls;
31267ecd4f3SMax Laier 		while (ncalls) {
31367ecd4f3SMax Laier 			ncalls--;
31467ecd4f3SMax Laier 			ev->ev_ncalls = ncalls;
31567ecd4f3SMax Laier 			(*ev->ev_callback)((int)ev->ev_fd, ev->ev_res, ev->ev_arg);
31667ecd4f3SMax Laier 			if (event_gotsig)
31767ecd4f3SMax Laier 				return;
31867ecd4f3SMax Laier 		}
31967ecd4f3SMax Laier 	}
32067ecd4f3SMax Laier }
32167ecd4f3SMax Laier 
32267ecd4f3SMax Laier /*
32367ecd4f3SMax Laier  * Wait continously for events.  We exit only if no events are left.
32467ecd4f3SMax Laier  */
32567ecd4f3SMax Laier 
32667ecd4f3SMax Laier int
event_dispatch(void)32767ecd4f3SMax Laier event_dispatch(void)
32867ecd4f3SMax Laier {
32967ecd4f3SMax Laier 	return (event_loop(0));
33067ecd4f3SMax Laier }
33167ecd4f3SMax Laier 
33267ecd4f3SMax Laier int
event_base_dispatch(struct event_base * event_base)33367ecd4f3SMax Laier event_base_dispatch(struct event_base *event_base)
33467ecd4f3SMax Laier {
33567ecd4f3SMax Laier   return (event_base_loop(event_base, 0));
33667ecd4f3SMax Laier }
33767ecd4f3SMax Laier 
33867ecd4f3SMax Laier static void
event_loopexit_cb(int fd,short what,void * arg)33967ecd4f3SMax Laier event_loopexit_cb(int fd, short what, void *arg)
34067ecd4f3SMax Laier {
34167ecd4f3SMax Laier 	struct event_base *base = arg;
34267ecd4f3SMax Laier 	base->event_gotterm = 1;
34367ecd4f3SMax Laier }
34467ecd4f3SMax Laier 
34567ecd4f3SMax Laier /* not thread safe */
34667ecd4f3SMax Laier 
34767ecd4f3SMax Laier int
event_loopexit(struct timeval * tv)34867ecd4f3SMax Laier event_loopexit(struct timeval *tv)
34967ecd4f3SMax Laier {
35067ecd4f3SMax Laier 	return (event_once(-1, EV_TIMEOUT, event_loopexit_cb,
35167ecd4f3SMax Laier 		    current_base, tv));
35267ecd4f3SMax Laier }
35367ecd4f3SMax Laier 
35467ecd4f3SMax Laier int
event_base_loopexit(struct event_base * event_base,struct timeval * tv)35567ecd4f3SMax Laier event_base_loopexit(struct event_base *event_base, struct timeval *tv)
35667ecd4f3SMax Laier {
35767ecd4f3SMax Laier 	return (event_once(-1, EV_TIMEOUT, event_loopexit_cb,
35867ecd4f3SMax Laier 		    event_base, tv));
35967ecd4f3SMax Laier }
36067ecd4f3SMax Laier 
36167ecd4f3SMax Laier /* not thread safe */
36267ecd4f3SMax Laier 
36367ecd4f3SMax Laier int
event_loop(int flags)36467ecd4f3SMax Laier event_loop(int flags)
36567ecd4f3SMax Laier {
36667ecd4f3SMax Laier 	return event_base_loop(current_base, flags);
36767ecd4f3SMax Laier }
36867ecd4f3SMax Laier 
36967ecd4f3SMax Laier int
event_base_loop(struct event_base * base,int flags)37067ecd4f3SMax Laier event_base_loop(struct event_base *base, int flags)
37167ecd4f3SMax Laier {
37267ecd4f3SMax Laier 	const struct eventop *evsel = base->evsel;
37367ecd4f3SMax Laier 	void *evbase = base->evbase;
37467ecd4f3SMax Laier 	struct timeval tv;
37567ecd4f3SMax Laier 	int res, done;
37667ecd4f3SMax Laier 
37767ecd4f3SMax Laier 	done = 0;
37867ecd4f3SMax Laier 	while (!done) {
37967ecd4f3SMax Laier 		/* Calculate the initial events that we are waiting for */
38067ecd4f3SMax Laier 		if (evsel->recalc(base, evbase, 0) == -1)
38167ecd4f3SMax Laier 			return (-1);
38267ecd4f3SMax Laier 
38367ecd4f3SMax Laier 		/* Terminate the loop if we have been asked to */
38467ecd4f3SMax Laier 		if (base->event_gotterm) {
38567ecd4f3SMax Laier 			base->event_gotterm = 0;
38667ecd4f3SMax Laier 			break;
38767ecd4f3SMax Laier 		}
38867ecd4f3SMax Laier 
38967ecd4f3SMax Laier 		/* You cannot use this interface for multi-threaded apps */
39067ecd4f3SMax Laier 		while (event_gotsig) {
39167ecd4f3SMax Laier 			event_gotsig = 0;
39267ecd4f3SMax Laier 			if (event_sigcb) {
39367ecd4f3SMax Laier 				res = (*event_sigcb)();
39467ecd4f3SMax Laier 				if (res == -1) {
39567ecd4f3SMax Laier 					errno = EINTR;
39667ecd4f3SMax Laier 					return (-1);
39767ecd4f3SMax Laier 				}
39867ecd4f3SMax Laier 			}
39967ecd4f3SMax Laier 		}
40067ecd4f3SMax Laier 
40167ecd4f3SMax Laier 		/* Check if time is running backwards */
40267ecd4f3SMax Laier 		gettime(&tv);
40367ecd4f3SMax Laier 		if (timercmp(&tv, &base->event_tv, <)) {
40467ecd4f3SMax Laier 			struct timeval off;
40567ecd4f3SMax Laier 			event_debug(("%s: time is running backwards, corrected",
40667ecd4f3SMax Laier 				    __func__));
40767ecd4f3SMax Laier 			timersub(&base->event_tv, &tv, &off);
40867ecd4f3SMax Laier 			timeout_correct(base, &off);
40967ecd4f3SMax Laier 		}
41067ecd4f3SMax Laier 		base->event_tv = tv;
41167ecd4f3SMax Laier 
41267ecd4f3SMax Laier 		if (!base->event_count_active && !(flags & EVLOOP_NONBLOCK))
41367ecd4f3SMax Laier 			timeout_next(base, &tv);
41467ecd4f3SMax Laier 		else
41567ecd4f3SMax Laier 			timerclear(&tv);
41667ecd4f3SMax Laier 
41767ecd4f3SMax Laier 		/* If we have no events, we just exit */
41867ecd4f3SMax Laier 		if (!event_haveevents(base)) {
41967ecd4f3SMax Laier 			event_debug(("%s: no events registered.", __func__));
42067ecd4f3SMax Laier 			return (1);
42167ecd4f3SMax Laier 		}
42267ecd4f3SMax Laier 
42367ecd4f3SMax Laier 		res = evsel->dispatch(base, evbase, &tv);
42467ecd4f3SMax Laier 
42567ecd4f3SMax Laier 		if (res == -1)
42667ecd4f3SMax Laier 			return (-1);
42767ecd4f3SMax Laier 
42867ecd4f3SMax Laier 		timeout_process(base);
42967ecd4f3SMax Laier 
43067ecd4f3SMax Laier 		if (base->event_count_active) {
43167ecd4f3SMax Laier 			event_process_active(base);
43267ecd4f3SMax Laier 			if (!base->event_count_active && (flags & EVLOOP_ONCE))
43367ecd4f3SMax Laier 				done = 1;
43467ecd4f3SMax Laier 		} else if (flags & EVLOOP_NONBLOCK)
43567ecd4f3SMax Laier 			done = 1;
43667ecd4f3SMax Laier 	}
43767ecd4f3SMax Laier 
43867ecd4f3SMax Laier 	event_debug(("%s: asked to terminate loop.", __func__));
43967ecd4f3SMax Laier 	return (0);
44067ecd4f3SMax Laier }
44167ecd4f3SMax Laier 
44267ecd4f3SMax Laier /* Sets up an event for processing once */
44367ecd4f3SMax Laier 
44467ecd4f3SMax Laier struct event_once {
44567ecd4f3SMax Laier 	struct event ev;
44667ecd4f3SMax Laier 
44767ecd4f3SMax Laier 	void (*cb)(int, short, void *);
44867ecd4f3SMax Laier 	void *arg;
44967ecd4f3SMax Laier };
45067ecd4f3SMax Laier 
45167ecd4f3SMax Laier /* One-time callback, it deletes itself */
45267ecd4f3SMax Laier 
45367ecd4f3SMax Laier static void
event_once_cb(int fd,short events,void * arg)45467ecd4f3SMax Laier event_once_cb(int fd, short events, void *arg)
45567ecd4f3SMax Laier {
45667ecd4f3SMax Laier 	struct event_once *eonce = arg;
45767ecd4f3SMax Laier 
45867ecd4f3SMax Laier 	(*eonce->cb)(fd, events, eonce->arg);
45967ecd4f3SMax Laier 	free(eonce);
46067ecd4f3SMax Laier }
46167ecd4f3SMax Laier 
46267ecd4f3SMax Laier /* Schedules an event once */
46367ecd4f3SMax Laier 
46467ecd4f3SMax Laier int
event_once(int fd,short events,void (* callback)(int,short,void *),void * arg,struct timeval * tv)46567ecd4f3SMax Laier event_once(int fd, short events,
46667ecd4f3SMax Laier     void (*callback)(int, short, void *), void *arg, struct timeval *tv)
46767ecd4f3SMax Laier {
46867ecd4f3SMax Laier 	struct event_once *eonce;
46967ecd4f3SMax Laier 	struct timeval etv;
47067ecd4f3SMax Laier 	int res;
47167ecd4f3SMax Laier 
47267ecd4f3SMax Laier 	/* We cannot support signals that just fire once */
47367ecd4f3SMax Laier 	if (events & EV_SIGNAL)
47467ecd4f3SMax Laier 		return (-1);
47567ecd4f3SMax Laier 
47667ecd4f3SMax Laier 	if ((eonce = calloc(1, sizeof(struct event_once))) == NULL)
47767ecd4f3SMax Laier 		return (-1);
47867ecd4f3SMax Laier 
47967ecd4f3SMax Laier 	eonce->cb = callback;
48067ecd4f3SMax Laier 	eonce->arg = arg;
48167ecd4f3SMax Laier 
48267ecd4f3SMax Laier 	if (events == EV_TIMEOUT) {
48367ecd4f3SMax Laier 		if (tv == NULL) {
48467ecd4f3SMax Laier 			timerclear(&etv);
48567ecd4f3SMax Laier 			tv = &etv;
48667ecd4f3SMax Laier 		}
48767ecd4f3SMax Laier 
48867ecd4f3SMax Laier 		evtimer_set(&eonce->ev, event_once_cb, eonce);
48967ecd4f3SMax Laier 	} else if (events & (EV_READ|EV_WRITE)) {
49067ecd4f3SMax Laier 		events &= EV_READ|EV_WRITE;
49167ecd4f3SMax Laier 
49267ecd4f3SMax Laier 		event_set(&eonce->ev, fd, events, event_once_cb, eonce);
49367ecd4f3SMax Laier 	} else {
49467ecd4f3SMax Laier 		/* Bad event combination */
49567ecd4f3SMax Laier 		free(eonce);
49667ecd4f3SMax Laier 		return (-1);
49767ecd4f3SMax Laier 	}
49867ecd4f3SMax Laier 
49967ecd4f3SMax Laier 	res = event_add(&eonce->ev, tv);
50067ecd4f3SMax Laier 	if (res != 0) {
50167ecd4f3SMax Laier 		free(eonce);
50267ecd4f3SMax Laier 		return (res);
50367ecd4f3SMax Laier 	}
50467ecd4f3SMax Laier 
50567ecd4f3SMax Laier 	return (0);
50667ecd4f3SMax Laier }
50767ecd4f3SMax Laier 
50867ecd4f3SMax Laier void
event_set(struct event * ev,int fd,short events,void (* callback)(int,short,void *),void * arg)50967ecd4f3SMax Laier event_set(struct event *ev, int fd, short events,
51067ecd4f3SMax Laier 	  void (*callback)(int, short, void *), void *arg)
51167ecd4f3SMax Laier {
51267ecd4f3SMax Laier 	/* Take the current base - caller needs to set the real base later */
51367ecd4f3SMax Laier 	ev->ev_base = current_base;
51467ecd4f3SMax Laier 
51567ecd4f3SMax Laier 	ev->ev_callback = callback;
51667ecd4f3SMax Laier 	ev->ev_arg = arg;
51767ecd4f3SMax Laier 	ev->ev_fd = fd;
51867ecd4f3SMax Laier 	ev->ev_events = events;
51967ecd4f3SMax Laier 	ev->ev_flags = EVLIST_INIT;
52067ecd4f3SMax Laier 	ev->ev_ncalls = 0;
52167ecd4f3SMax Laier 	ev->ev_pncalls = NULL;
52267ecd4f3SMax Laier 
52367ecd4f3SMax Laier 	/* by default, we put new events into the middle priority */
52467ecd4f3SMax Laier 	ev->ev_pri = current_base->nactivequeues/2;
52567ecd4f3SMax Laier }
52667ecd4f3SMax Laier 
52767ecd4f3SMax Laier int
event_base_set(struct event_base * base,struct event * ev)52867ecd4f3SMax Laier event_base_set(struct event_base *base, struct event *ev)
52967ecd4f3SMax Laier {
53067ecd4f3SMax Laier 	/* Only innocent events may be assigned to a different base */
53167ecd4f3SMax Laier 	if (ev->ev_flags != EVLIST_INIT)
53267ecd4f3SMax Laier 		return (-1);
53367ecd4f3SMax Laier 
53467ecd4f3SMax Laier 	ev->ev_base = base;
53567ecd4f3SMax Laier 	ev->ev_pri = base->nactivequeues/2;
53667ecd4f3SMax Laier 
53767ecd4f3SMax Laier 	return (0);
53867ecd4f3SMax Laier }
53967ecd4f3SMax Laier 
54067ecd4f3SMax Laier /*
54167ecd4f3SMax Laier  * Set's the priority of an event - if an event is already scheduled
54267ecd4f3SMax Laier  * changing the priority is going to fail.
54367ecd4f3SMax Laier  */
54467ecd4f3SMax Laier 
54567ecd4f3SMax Laier int
event_priority_set(struct event * ev,int pri)54667ecd4f3SMax Laier event_priority_set(struct event *ev, int pri)
54767ecd4f3SMax Laier {
54867ecd4f3SMax Laier 	if (ev->ev_flags & EVLIST_ACTIVE)
54967ecd4f3SMax Laier 		return (-1);
55067ecd4f3SMax Laier 	if (pri < 0 || pri >= ev->ev_base->nactivequeues)
55167ecd4f3SMax Laier 		return (-1);
55267ecd4f3SMax Laier 
55367ecd4f3SMax Laier 	ev->ev_pri = pri;
55467ecd4f3SMax Laier 
55567ecd4f3SMax Laier 	return (0);
55667ecd4f3SMax Laier }
55767ecd4f3SMax Laier 
55867ecd4f3SMax Laier /*
55967ecd4f3SMax Laier  * Checks if a specific event is pending or scheduled.
56067ecd4f3SMax Laier  */
56167ecd4f3SMax Laier 
56267ecd4f3SMax Laier int
event_pending(struct event * ev,short event,struct timeval * tv)56367ecd4f3SMax Laier event_pending(struct event *ev, short event, struct timeval *tv)
56467ecd4f3SMax Laier {
56567ecd4f3SMax Laier 	struct timeval	now, res;
56667ecd4f3SMax Laier 	int flags = 0;
56767ecd4f3SMax Laier 
56867ecd4f3SMax Laier 	if (ev->ev_flags & EVLIST_INSERTED)
56967ecd4f3SMax Laier 		flags |= (ev->ev_events & (EV_READ|EV_WRITE));
57067ecd4f3SMax Laier 	if (ev->ev_flags & EVLIST_ACTIVE)
57167ecd4f3SMax Laier 		flags |= ev->ev_res;
57267ecd4f3SMax Laier 	if (ev->ev_flags & EVLIST_TIMEOUT)
57367ecd4f3SMax Laier 		flags |= EV_TIMEOUT;
57467ecd4f3SMax Laier 	if (ev->ev_flags & EVLIST_SIGNAL)
57567ecd4f3SMax Laier 		flags |= EV_SIGNAL;
57667ecd4f3SMax Laier 
57767ecd4f3SMax Laier 	event &= (EV_TIMEOUT|EV_READ|EV_WRITE|EV_SIGNAL);
57867ecd4f3SMax Laier 
57967ecd4f3SMax Laier 	/* See if there is a timeout that we should report */
58067ecd4f3SMax Laier 	if (tv != NULL && (flags & event & EV_TIMEOUT)) {
58167ecd4f3SMax Laier 		gettime(&now);
58267ecd4f3SMax Laier 		timersub(&ev->ev_timeout, &now, &res);
58367ecd4f3SMax Laier 		/* correctly remap to real time */
58467ecd4f3SMax Laier 		gettimeofday(&now, NULL);
58567ecd4f3SMax Laier 		timeradd(&now, &res, tv);
58667ecd4f3SMax Laier 	}
58767ecd4f3SMax Laier 
58867ecd4f3SMax Laier 	return (flags & event);
58967ecd4f3SMax Laier }
59067ecd4f3SMax Laier 
59167ecd4f3SMax Laier int
event_add(struct event * ev,struct timeval * tv)59267ecd4f3SMax Laier event_add(struct event *ev, struct timeval *tv)
59367ecd4f3SMax Laier {
59467ecd4f3SMax Laier 	struct event_base *base = ev->ev_base;
59567ecd4f3SMax Laier 	const struct eventop *evsel = base->evsel;
59667ecd4f3SMax Laier 	void *evbase = base->evbase;
59767ecd4f3SMax Laier 
59867ecd4f3SMax Laier 	event_debug((
59967ecd4f3SMax Laier 		 "event_add: event: %p, %s%s%scall %p",
60067ecd4f3SMax Laier 		 ev,
60167ecd4f3SMax Laier 		 ev->ev_events & EV_READ ? "EV_READ " : " ",
60267ecd4f3SMax Laier 		 ev->ev_events & EV_WRITE ? "EV_WRITE " : " ",
60367ecd4f3SMax Laier 		 tv ? "EV_TIMEOUT " : " ",
60467ecd4f3SMax Laier 		 ev->ev_callback));
60567ecd4f3SMax Laier 
60667ecd4f3SMax Laier 	assert(!(ev->ev_flags & ~EVLIST_ALL));
60767ecd4f3SMax Laier 
60867ecd4f3SMax Laier 	if (tv != NULL) {
60967ecd4f3SMax Laier 		struct timeval now;
61067ecd4f3SMax Laier 
61167ecd4f3SMax Laier 		if (ev->ev_flags & EVLIST_TIMEOUT)
61267ecd4f3SMax Laier 			event_queue_remove(base, ev, EVLIST_TIMEOUT);
61367ecd4f3SMax Laier 
61467ecd4f3SMax Laier 		/* Check if it is active due to a timeout.  Rescheduling
61567ecd4f3SMax Laier 		 * this timeout before the callback can be executed
61667ecd4f3SMax Laier 		 * removes it from the active list. */
61767ecd4f3SMax Laier 		if ((ev->ev_flags & EVLIST_ACTIVE) &&
61867ecd4f3SMax Laier 		    (ev->ev_res & EV_TIMEOUT)) {
61967ecd4f3SMax Laier 			/* See if we are just active executing this
62067ecd4f3SMax Laier 			 * event in a loop
62167ecd4f3SMax Laier 			 */
62267ecd4f3SMax Laier 			if (ev->ev_ncalls && ev->ev_pncalls) {
62367ecd4f3SMax Laier 				/* Abort loop */
62467ecd4f3SMax Laier 				*ev->ev_pncalls = 0;
62567ecd4f3SMax Laier 			}
62667ecd4f3SMax Laier 
62767ecd4f3SMax Laier 			event_queue_remove(base, ev, EVLIST_ACTIVE);
62867ecd4f3SMax Laier 		}
62967ecd4f3SMax Laier 
63067ecd4f3SMax Laier 		gettime(&now);
63167ecd4f3SMax Laier 		timeradd(&now, tv, &ev->ev_timeout);
63267ecd4f3SMax Laier 
63367ecd4f3SMax Laier 		event_debug((
63467ecd4f3SMax Laier 			 "event_add: timeout in %d seconds, call %p",
63567ecd4f3SMax Laier 			 tv->tv_sec, ev->ev_callback));
63667ecd4f3SMax Laier 
63767ecd4f3SMax Laier 		event_queue_insert(base, ev, EVLIST_TIMEOUT);
63867ecd4f3SMax Laier 	}
63967ecd4f3SMax Laier 
64067ecd4f3SMax Laier 	if ((ev->ev_events & (EV_READ|EV_WRITE)) &&
64167ecd4f3SMax Laier 	    !(ev->ev_flags & (EVLIST_INSERTED|EVLIST_ACTIVE))) {
64267ecd4f3SMax Laier 		event_queue_insert(base, ev, EVLIST_INSERTED);
64367ecd4f3SMax Laier 
64467ecd4f3SMax Laier 		return (evsel->add(evbase, ev));
64567ecd4f3SMax Laier 	} else if ((ev->ev_events & EV_SIGNAL) &&
64667ecd4f3SMax Laier 	    !(ev->ev_flags & EVLIST_SIGNAL)) {
64767ecd4f3SMax Laier 		event_queue_insert(base, ev, EVLIST_SIGNAL);
64867ecd4f3SMax Laier 
64967ecd4f3SMax Laier 		return (evsel->add(evbase, ev));
65067ecd4f3SMax Laier 	}
65167ecd4f3SMax Laier 
65267ecd4f3SMax Laier 	return (0);
65367ecd4f3SMax Laier }
65467ecd4f3SMax Laier 
65567ecd4f3SMax Laier int
event_del(struct event * ev)65667ecd4f3SMax Laier event_del(struct event *ev)
65767ecd4f3SMax Laier {
65867ecd4f3SMax Laier 	struct event_base *base;
65967ecd4f3SMax Laier 	const struct eventop *evsel;
66067ecd4f3SMax Laier 	void *evbase;
66167ecd4f3SMax Laier 
66267ecd4f3SMax Laier 	event_debug(("event_del: %p, callback %p",
66367ecd4f3SMax Laier 		 ev, ev->ev_callback));
66467ecd4f3SMax Laier 
66567ecd4f3SMax Laier 	/* An event without a base has not been added */
66667ecd4f3SMax Laier 	if (ev->ev_base == NULL)
66767ecd4f3SMax Laier 		return (-1);
66867ecd4f3SMax Laier 
66967ecd4f3SMax Laier 	base = ev->ev_base;
67067ecd4f3SMax Laier 	evsel = base->evsel;
67167ecd4f3SMax Laier 	evbase = base->evbase;
67267ecd4f3SMax Laier 
67367ecd4f3SMax Laier 	assert(!(ev->ev_flags & ~EVLIST_ALL));
67467ecd4f3SMax Laier 
67567ecd4f3SMax Laier 	/* See if we are just active executing this event in a loop */
67667ecd4f3SMax Laier 	if (ev->ev_ncalls && ev->ev_pncalls) {
67767ecd4f3SMax Laier 		/* Abort loop */
67867ecd4f3SMax Laier 		*ev->ev_pncalls = 0;
67967ecd4f3SMax Laier 	}
68067ecd4f3SMax Laier 
68167ecd4f3SMax Laier 	if (ev->ev_flags & EVLIST_TIMEOUT)
68267ecd4f3SMax Laier 		event_queue_remove(base, ev, EVLIST_TIMEOUT);
68367ecd4f3SMax Laier 
68467ecd4f3SMax Laier 	if (ev->ev_flags & EVLIST_ACTIVE)
68567ecd4f3SMax Laier 		event_queue_remove(base, ev, EVLIST_ACTIVE);
68667ecd4f3SMax Laier 
68767ecd4f3SMax Laier 	if (ev->ev_flags & EVLIST_INSERTED) {
68867ecd4f3SMax Laier 		event_queue_remove(base, ev, EVLIST_INSERTED);
68967ecd4f3SMax Laier 		return (evsel->del(evbase, ev));
69067ecd4f3SMax Laier 	} else if (ev->ev_flags & EVLIST_SIGNAL) {
69167ecd4f3SMax Laier 		event_queue_remove(base, ev, EVLIST_SIGNAL);
69267ecd4f3SMax Laier 		return (evsel->del(evbase, ev));
69367ecd4f3SMax Laier 	}
69467ecd4f3SMax Laier 
69567ecd4f3SMax Laier 	return (0);
69667ecd4f3SMax Laier }
69767ecd4f3SMax Laier 
69867ecd4f3SMax Laier void
event_active(struct event * ev,int res,short ncalls)69967ecd4f3SMax Laier event_active(struct event *ev, int res, short ncalls)
70067ecd4f3SMax Laier {
70167ecd4f3SMax Laier 	/* We get different kinds of events, add them together */
70267ecd4f3SMax Laier 	if (ev->ev_flags & EVLIST_ACTIVE) {
70367ecd4f3SMax Laier 		ev->ev_res |= res;
70467ecd4f3SMax Laier 		return;
70567ecd4f3SMax Laier 	}
70667ecd4f3SMax Laier 
70767ecd4f3SMax Laier 	ev->ev_res = res;
70867ecd4f3SMax Laier 	ev->ev_ncalls = ncalls;
70967ecd4f3SMax Laier 	ev->ev_pncalls = NULL;
71067ecd4f3SMax Laier 	event_queue_insert(ev->ev_base, ev, EVLIST_ACTIVE);
71167ecd4f3SMax Laier }
71267ecd4f3SMax Laier 
71367ecd4f3SMax Laier int
timeout_next(struct event_base * base,struct timeval * tv)71467ecd4f3SMax Laier timeout_next(struct event_base *base, struct timeval *tv)
71567ecd4f3SMax Laier {
71667ecd4f3SMax Laier 	struct timeval dflt = TIMEOUT_DEFAULT;
71767ecd4f3SMax Laier 
71867ecd4f3SMax Laier 	struct timeval now;
71967ecd4f3SMax Laier 	struct event *ev;
72067ecd4f3SMax Laier 
72167ecd4f3SMax Laier 	if ((ev = RB_MIN(event_tree, &base->timetree)) == NULL) {
72267ecd4f3SMax Laier 		*tv = dflt;
72367ecd4f3SMax Laier 		return (0);
72467ecd4f3SMax Laier 	}
72567ecd4f3SMax Laier 
72667ecd4f3SMax Laier 	if (gettime(&now) == -1)
72767ecd4f3SMax Laier 		return (-1);
72867ecd4f3SMax Laier 
72967ecd4f3SMax Laier 	if (timercmp(&ev->ev_timeout, &now, <=)) {
73067ecd4f3SMax Laier 		timerclear(tv);
73167ecd4f3SMax Laier 		return (0);
73267ecd4f3SMax Laier 	}
73367ecd4f3SMax Laier 
73467ecd4f3SMax Laier 	timersub(&ev->ev_timeout, &now, tv);
73567ecd4f3SMax Laier 
73667ecd4f3SMax Laier 	assert(tv->tv_sec >= 0);
73767ecd4f3SMax Laier 	assert(tv->tv_usec >= 0);
73867ecd4f3SMax Laier 
73967ecd4f3SMax Laier 	event_debug(("timeout_next: in %d seconds", tv->tv_sec));
74067ecd4f3SMax Laier 	return (0);
74167ecd4f3SMax Laier }
74267ecd4f3SMax Laier 
74367ecd4f3SMax Laier static void
timeout_correct(struct event_base * base,struct timeval * off)74467ecd4f3SMax Laier timeout_correct(struct event_base *base, struct timeval *off)
74567ecd4f3SMax Laier {
74667ecd4f3SMax Laier 	struct event *ev;
74767ecd4f3SMax Laier 
74867ecd4f3SMax Laier 	/*
74967ecd4f3SMax Laier 	 * We can modify the key element of the node without destroying
75067ecd4f3SMax Laier 	 * the key, beause we apply it to all in the right order.
75167ecd4f3SMax Laier 	 */
75267ecd4f3SMax Laier 	RB_FOREACH(ev, event_tree, &base->timetree)
75367ecd4f3SMax Laier 		timersub(&ev->ev_timeout, off, &ev->ev_timeout);
75467ecd4f3SMax Laier }
75567ecd4f3SMax Laier 
75667ecd4f3SMax Laier void
timeout_process(struct event_base * base)75767ecd4f3SMax Laier timeout_process(struct event_base *base)
75867ecd4f3SMax Laier {
75967ecd4f3SMax Laier 	struct timeval now;
76067ecd4f3SMax Laier 	struct event *ev, *next;
76167ecd4f3SMax Laier 
76267ecd4f3SMax Laier 	gettime(&now);
76367ecd4f3SMax Laier 
76467ecd4f3SMax Laier 	for (ev = RB_MIN(event_tree, &base->timetree); ev; ev = next) {
76567ecd4f3SMax Laier 		if (timercmp(&ev->ev_timeout, &now, >))
76667ecd4f3SMax Laier 			break;
76767ecd4f3SMax Laier 		next = RB_NEXT(event_tree, &base->timetree, ev);
76867ecd4f3SMax Laier 
76967ecd4f3SMax Laier 		event_queue_remove(base, ev, EVLIST_TIMEOUT);
77067ecd4f3SMax Laier 
77167ecd4f3SMax Laier 		/* delete this event from the I/O queues */
77267ecd4f3SMax Laier 		event_del(ev);
77367ecd4f3SMax Laier 
77467ecd4f3SMax Laier 		event_debug(("timeout_process: call %p",
77567ecd4f3SMax Laier 			 ev->ev_callback));
77667ecd4f3SMax Laier 		event_active(ev, EV_TIMEOUT, 1);
77767ecd4f3SMax Laier 	}
77867ecd4f3SMax Laier }
77967ecd4f3SMax Laier 
78067ecd4f3SMax Laier void
event_queue_remove(struct event_base * base,struct event * ev,int queue)78167ecd4f3SMax Laier event_queue_remove(struct event_base *base, struct event *ev, int queue)
78267ecd4f3SMax Laier {
78367ecd4f3SMax Laier 	int docount = 1;
78467ecd4f3SMax Laier 
78567ecd4f3SMax Laier 	if (!(ev->ev_flags & queue))
78667ecd4f3SMax Laier 		event_errx(1, "%s: %p(fd %d) not on queue %x", __func__,
78767ecd4f3SMax Laier 			   ev, ev->ev_fd, queue);
78867ecd4f3SMax Laier 
78967ecd4f3SMax Laier 	if (ev->ev_flags & EVLIST_INTERNAL)
79067ecd4f3SMax Laier 		docount = 0;
79167ecd4f3SMax Laier 
79267ecd4f3SMax Laier 	if (docount)
79367ecd4f3SMax Laier 		base->event_count--;
79467ecd4f3SMax Laier 
79567ecd4f3SMax Laier 	ev->ev_flags &= ~queue;
79667ecd4f3SMax Laier 	switch (queue) {
79767ecd4f3SMax Laier 	case EVLIST_ACTIVE:
79867ecd4f3SMax Laier 		if (docount)
79967ecd4f3SMax Laier 			base->event_count_active--;
80067ecd4f3SMax Laier 		TAILQ_REMOVE(base->activequeues[ev->ev_pri],
80167ecd4f3SMax Laier 		    ev, ev_active_next);
80267ecd4f3SMax Laier 		break;
80367ecd4f3SMax Laier 	case EVLIST_SIGNAL:
80467ecd4f3SMax Laier 		TAILQ_REMOVE(&signalqueue, ev, ev_signal_next);
80567ecd4f3SMax Laier 		break;
80667ecd4f3SMax Laier 	case EVLIST_TIMEOUT:
80767ecd4f3SMax Laier 		RB_REMOVE(event_tree, &base->timetree, ev);
80867ecd4f3SMax Laier 		break;
80967ecd4f3SMax Laier 	case EVLIST_INSERTED:
81067ecd4f3SMax Laier 		TAILQ_REMOVE(&base->eventqueue, ev, ev_next);
81167ecd4f3SMax Laier 		break;
81267ecd4f3SMax Laier 	default:
81367ecd4f3SMax Laier 		event_errx(1, "%s: unknown queue %x", __func__, queue);
81467ecd4f3SMax Laier 	}
81567ecd4f3SMax Laier }
81667ecd4f3SMax Laier 
81767ecd4f3SMax Laier void
event_queue_insert(struct event_base * base,struct event * ev,int queue)81867ecd4f3SMax Laier event_queue_insert(struct event_base *base, struct event *ev, int queue)
81967ecd4f3SMax Laier {
82067ecd4f3SMax Laier 	int docount = 1;
82167ecd4f3SMax Laier 
82267ecd4f3SMax Laier 	if (ev->ev_flags & queue) {
82367ecd4f3SMax Laier 		/* Double insertion is possible for active events */
82467ecd4f3SMax Laier 		if (queue & EVLIST_ACTIVE)
82567ecd4f3SMax Laier 			return;
82667ecd4f3SMax Laier 
82767ecd4f3SMax Laier 		event_errx(1, "%s: %p(fd %d) already on queue %x", __func__,
82867ecd4f3SMax Laier 			   ev, ev->ev_fd, queue);
82967ecd4f3SMax Laier 	}
83067ecd4f3SMax Laier 
83167ecd4f3SMax Laier 	if (ev->ev_flags & EVLIST_INTERNAL)
83267ecd4f3SMax Laier 		docount = 0;
83367ecd4f3SMax Laier 
83467ecd4f3SMax Laier 	if (docount)
83567ecd4f3SMax Laier 		base->event_count++;
83667ecd4f3SMax Laier 
83767ecd4f3SMax Laier 	ev->ev_flags |= queue;
83867ecd4f3SMax Laier 	switch (queue) {
83967ecd4f3SMax Laier 	case EVLIST_ACTIVE:
84067ecd4f3SMax Laier 		if (docount)
84167ecd4f3SMax Laier 			base->event_count_active++;
84267ecd4f3SMax Laier 		TAILQ_INSERT_TAIL(base->activequeues[ev->ev_pri],
84367ecd4f3SMax Laier 		    ev,ev_active_next);
84467ecd4f3SMax Laier 		break;
84567ecd4f3SMax Laier 	case EVLIST_SIGNAL:
84667ecd4f3SMax Laier 		TAILQ_INSERT_TAIL(&signalqueue, ev, ev_signal_next);
84767ecd4f3SMax Laier 		break;
84867ecd4f3SMax Laier 	case EVLIST_TIMEOUT: {
84967ecd4f3SMax Laier 		struct event *tmp = RB_INSERT(event_tree, &base->timetree, ev);
85067ecd4f3SMax Laier 		assert(tmp == NULL);
85167ecd4f3SMax Laier 		break;
85267ecd4f3SMax Laier 	}
85367ecd4f3SMax Laier 	case EVLIST_INSERTED:
85467ecd4f3SMax Laier 		TAILQ_INSERT_TAIL(&base->eventqueue, ev, ev_next);
85567ecd4f3SMax Laier 		break;
85667ecd4f3SMax Laier 	default:
85767ecd4f3SMax Laier 		event_errx(1, "%s: unknown queue %x", __func__, queue);
85867ecd4f3SMax Laier 	}
85967ecd4f3SMax Laier }
86067ecd4f3SMax Laier 
86167ecd4f3SMax Laier /* Functions for debugging */
86267ecd4f3SMax Laier 
86367ecd4f3SMax Laier const char *
event_get_version(void)86467ecd4f3SMax Laier event_get_version(void)
86567ecd4f3SMax Laier {
86667ecd4f3SMax Laier 	return (VERSION);
86767ecd4f3SMax Laier }
86867ecd4f3SMax Laier 
86967ecd4f3SMax Laier /*
87067ecd4f3SMax Laier  * No thread-safe interface needed - the information should be the same
87167ecd4f3SMax Laier  * for all threads.
87267ecd4f3SMax Laier  */
87367ecd4f3SMax Laier 
87467ecd4f3SMax Laier const char *
event_get_method(void)87567ecd4f3SMax Laier event_get_method(void)
87667ecd4f3SMax Laier {
87767ecd4f3SMax Laier 	return (current_base->evsel->name);
87867ecd4f3SMax Laier }
879