xref: /freebsd/contrib/ntp/sntp/libevent/event.c (revision 952364486a4b9d135e4b28f7f88a8703a74eae6f)
1 /*
2  * Copyright (c) 2000-2007 Niels Provos <provos@citi.umich.edu>
3  * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. The name of the author may not be used to endorse or promote products
14  *    derived from this software without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26  */
27 #include "event2/event-config.h"
28 #include "evconfig-private.h"
29 
30 #ifdef _WIN32
31 #include <winsock2.h>
32 #define WIN32_LEAN_AND_MEAN
33 #include <windows.h>
34 #undef WIN32_LEAN_AND_MEAN
35 #endif
36 #include <sys/types.h>
37 #if !defined(_WIN32) && defined(EVENT__HAVE_SYS_TIME_H)
38 #include <sys/time.h>
39 #endif
40 #include <sys/queue.h>
41 #ifdef EVENT__HAVE_SYS_SOCKET_H
42 #include <sys/socket.h>
43 #endif
44 #include <stdio.h>
45 #include <stdlib.h>
46 #ifdef EVENT__HAVE_UNISTD_H
47 #include <unistd.h>
48 #endif
49 #include <ctype.h>
50 #include <errno.h>
51 #include <signal.h>
52 #include <string.h>
53 #include <time.h>
54 #include <limits.h>
55 
56 #include "event2/event.h"
57 #include "event2/event_struct.h"
58 #include "event2/event_compat.h"
59 #include "event-internal.h"
60 #include "defer-internal.h"
61 #include "evthread-internal.h"
62 #include "event2/thread.h"
63 #include "event2/util.h"
64 #include "log-internal.h"
65 #include "evmap-internal.h"
66 #include "iocp-internal.h"
67 #include "changelist-internal.h"
68 #define HT_NO_CACHE_HASH_VALUES
69 #include "ht-internal.h"
70 #include "util-internal.h"
71 
72 
73 #ifdef EVENT__HAVE_WORKING_KQUEUE
74 #include "kqueue-internal.h"
75 #endif
76 
77 #ifdef EVENT__HAVE_EVENT_PORTS
78 extern const struct eventop evportops;
79 #endif
80 #ifdef EVENT__HAVE_SELECT
81 extern const struct eventop selectops;
82 #endif
83 #ifdef EVENT__HAVE_POLL
84 extern const struct eventop pollops;
85 #endif
86 #ifdef EVENT__HAVE_EPOLL
87 extern const struct eventop epollops;
88 #endif
89 #ifdef EVENT__HAVE_WORKING_KQUEUE
90 extern const struct eventop kqops;
91 #endif
92 #ifdef EVENT__HAVE_DEVPOLL
93 extern const struct eventop devpollops;
94 #endif
95 #ifdef _WIN32
96 extern const struct eventop win32ops;
97 #endif
98 
99 /* Array of backends in order of preference. */
100 static const struct eventop *eventops[] = {
101 #ifdef EVENT__HAVE_EVENT_PORTS
102 	&evportops,
103 #endif
104 #ifdef EVENT__HAVE_WORKING_KQUEUE
105 	&kqops,
106 #endif
107 #ifdef EVENT__HAVE_EPOLL
108 	&epollops,
109 #endif
110 #ifdef EVENT__HAVE_DEVPOLL
111 	&devpollops,
112 #endif
113 #ifdef EVENT__HAVE_POLL
114 	&pollops,
115 #endif
116 #ifdef EVENT__HAVE_SELECT
117 	&selectops,
118 #endif
119 #ifdef _WIN32
120 	&win32ops,
121 #endif
122 	NULL
123 };
124 
125 /* Global state; deprecated */
126 struct event_base *event_global_current_base_ = NULL;
127 #define current_base event_global_current_base_
128 
129 /* Global state */
130 
131 static void *event_self_cbarg_ptr_ = NULL;
132 
133 /* Prototypes */
134 static void	event_queue_insert_active(struct event_base *, struct event_callback *);
135 static void	event_queue_insert_active_later(struct event_base *, struct event_callback *);
136 static void	event_queue_insert_timeout(struct event_base *, struct event *);
137 static void	event_queue_insert_inserted(struct event_base *, struct event *);
138 static void	event_queue_remove_active(struct event_base *, struct event_callback *);
139 static void	event_queue_remove_active_later(struct event_base *, struct event_callback *);
140 static void	event_queue_remove_timeout(struct event_base *, struct event *);
141 static void	event_queue_remove_inserted(struct event_base *, struct event *);
142 static void event_queue_make_later_events_active(struct event_base *base);
143 
144 static int evthread_make_base_notifiable_nolock_(struct event_base *base);
145 static int event_del_(struct event *ev, int blocking);
146 
147 #ifdef USE_REINSERT_TIMEOUT
148 /* This code seems buggy; only turn it on if we find out what the trouble is. */
149 static void	event_queue_reinsert_timeout(struct event_base *,struct event *, int was_common, int is_common, int old_timeout_idx);
150 #endif
151 
152 static int	event_haveevents(struct event_base *);
153 
154 static int	event_process_active(struct event_base *);
155 
156 static int	timeout_next(struct event_base *, struct timeval **);
157 static void	timeout_process(struct event_base *);
158 
159 static inline void	event_signal_closure(struct event_base *, struct event *ev);
160 static inline void	event_persist_closure(struct event_base *, struct event *ev);
161 
162 static int	evthread_notify_base(struct event_base *base);
163 
164 static void insert_common_timeout_inorder(struct common_timeout_list *ctl,
165     struct event *ev);
166 
167 #ifndef EVENT__DISABLE_DEBUG_MODE
168 /* These functions implement a hashtable of which 'struct event *' structures
169  * have been setup or added.  We don't want to trust the content of the struct
170  * event itself, since we're trying to work through cases where an event gets
171  * clobbered or freed.  Instead, we keep a hashtable indexed by the pointer.
172  */
173 
174 struct event_debug_entry {
175 	HT_ENTRY(event_debug_entry) node;
176 	const struct event *ptr;
177 	unsigned added : 1;
178 };
179 
180 static inline unsigned
181 hash_debug_entry(const struct event_debug_entry *e)
182 {
183 	/* We need to do this silliness to convince compilers that we
184 	 * honestly mean to cast e->ptr to an integer, and discard any
185 	 * part of it that doesn't fit in an unsigned.
186 	 */
187 	unsigned u = (unsigned) ((ev_uintptr_t) e->ptr);
188 	/* Our hashtable implementation is pretty sensitive to low bits,
189 	 * and every struct event is over 64 bytes in size, so we can
190 	 * just say >>6. */
191 	return (u >> 6);
192 }
193 
194 static inline int
195 eq_debug_entry(const struct event_debug_entry *a,
196     const struct event_debug_entry *b)
197 {
198 	return a->ptr == b->ptr;
199 }
200 
201 int event_debug_mode_on_ = 0;
202 /* Set if it's too late to enable event_debug_mode. */
203 static int event_debug_mode_too_late = 0;
204 #ifndef EVENT__DISABLE_THREAD_SUPPORT
205 static void *event_debug_map_lock_ = NULL;
206 #endif
207 static HT_HEAD(event_debug_map, event_debug_entry) global_debug_map =
208 	HT_INITIALIZER();
209 
210 HT_PROTOTYPE(event_debug_map, event_debug_entry, node, hash_debug_entry,
211     eq_debug_entry)
212 HT_GENERATE(event_debug_map, event_debug_entry, node, hash_debug_entry,
213     eq_debug_entry, 0.5, mm_malloc, mm_realloc, mm_free)
214 
215 /* Macro: record that ev is now setup (that is, ready for an add) */
216 #define event_debug_note_setup_(ev) do {				\
217 	if (event_debug_mode_on_) {					\
218 		struct event_debug_entry *dent,find;			\
219 		find.ptr = (ev);					\
220 		EVLOCK_LOCK(event_debug_map_lock_, 0);			\
221 		dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
222 		if (dent) {						\
223 			dent->added = 0;				\
224 		} else {						\
225 			dent = mm_malloc(sizeof(*dent));		\
226 			if (!dent)					\
227 				event_err(1,				\
228 				    "Out of memory in debugging code");	\
229 			dent->ptr = (ev);				\
230 			dent->added = 0;				\
231 			HT_INSERT(event_debug_map, &global_debug_map, dent); \
232 		}							\
233 		EVLOCK_UNLOCK(event_debug_map_lock_, 0);		\
234 	}								\
235 	event_debug_mode_too_late = 1;					\
236 	} while (0)
237 /* Macro: record that ev is no longer setup */
238 #define event_debug_note_teardown_(ev) do {				\
239 	if (event_debug_mode_on_) {					\
240 		struct event_debug_entry *dent,find;			\
241 		find.ptr = (ev);					\
242 		EVLOCK_LOCK(event_debug_map_lock_, 0);			\
243 		dent = HT_REMOVE(event_debug_map, &global_debug_map, &find); \
244 		if (dent)						\
245 			mm_free(dent);					\
246 		EVLOCK_UNLOCK(event_debug_map_lock_, 0);		\
247 	}								\
248 	event_debug_mode_too_late = 1;					\
249 	} while (0)
250 /* Macro: record that ev is now added */
251 #define event_debug_note_add_(ev)	do {				\
252 	if (event_debug_mode_on_) {					\
253 		struct event_debug_entry *dent,find;			\
254 		find.ptr = (ev);					\
255 		EVLOCK_LOCK(event_debug_map_lock_, 0);			\
256 		dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
257 		if (dent) {						\
258 			dent->added = 1;				\
259 		} else {						\
260 			event_errx(EVENT_ERR_ABORT_,			\
261 			    "%s: noting an add on a non-setup event %p" \
262 			    " (events: 0x%x, fd: "EV_SOCK_FMT		\
263 			    ", flags: 0x%x)",				\
264 			    __func__, (ev), (ev)->ev_events,		\
265 			    EV_SOCK_ARG((ev)->ev_fd), (ev)->ev_flags);	\
266 		}							\
267 		EVLOCK_UNLOCK(event_debug_map_lock_, 0);		\
268 	}								\
269 	event_debug_mode_too_late = 1;					\
270 	} while (0)
271 /* Macro: record that ev is no longer added */
272 #define event_debug_note_del_(ev) do {					\
273 	if (event_debug_mode_on_) {					\
274 		struct event_debug_entry *dent,find;			\
275 		find.ptr = (ev);					\
276 		EVLOCK_LOCK(event_debug_map_lock_, 0);			\
277 		dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
278 		if (dent) {						\
279 			dent->added = 0;				\
280 		} else {						\
281 			event_errx(EVENT_ERR_ABORT_,			\
282 			    "%s: noting a del on a non-setup event %p"	\
283 			    " (events: 0x%x, fd: "EV_SOCK_FMT		\
284 			    ", flags: 0x%x)",				\
285 			    __func__, (ev), (ev)->ev_events,		\
286 			    EV_SOCK_ARG((ev)->ev_fd), (ev)->ev_flags);	\
287 		}							\
288 		EVLOCK_UNLOCK(event_debug_map_lock_, 0);		\
289 	}								\
290 	event_debug_mode_too_late = 1;					\
291 	} while (0)
292 /* Macro: assert that ev is setup (i.e., okay to add or inspect) */
293 #define event_debug_assert_is_setup_(ev) do {				\
294 	if (event_debug_mode_on_) {					\
295 		struct event_debug_entry *dent,find;			\
296 		find.ptr = (ev);					\
297 		EVLOCK_LOCK(event_debug_map_lock_, 0);			\
298 		dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
299 		if (!dent) {						\
300 			event_errx(EVENT_ERR_ABORT_,			\
301 			    "%s called on a non-initialized event %p"	\
302 			    " (events: 0x%x, fd: "EV_SOCK_FMT\
303 			    ", flags: 0x%x)",				\
304 			    __func__, (ev), (ev)->ev_events,		\
305 			    EV_SOCK_ARG((ev)->ev_fd), (ev)->ev_flags);	\
306 		}							\
307 		EVLOCK_UNLOCK(event_debug_map_lock_, 0);		\
308 	}								\
309 	} while (0)
310 /* Macro: assert that ev is not added (i.e., okay to tear down or set
311  * up again) */
312 #define event_debug_assert_not_added_(ev) do {				\
313 	if (event_debug_mode_on_) {					\
314 		struct event_debug_entry *dent,find;			\
315 		find.ptr = (ev);					\
316 		EVLOCK_LOCK(event_debug_map_lock_, 0);			\
317 		dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
318 		if (dent && dent->added) {				\
319 			event_errx(EVENT_ERR_ABORT_,			\
320 			    "%s called on an already added event %p"	\
321 			    " (events: 0x%x, fd: "EV_SOCK_FMT", "	\
322 			    "flags: 0x%x)",				\
323 			    __func__, (ev), (ev)->ev_events,		\
324 			    EV_SOCK_ARG((ev)->ev_fd), (ev)->ev_flags);	\
325 		}							\
326 		EVLOCK_UNLOCK(event_debug_map_lock_, 0);		\
327 	}								\
328 	} while (0)
329 #else
330 #define event_debug_note_setup_(ev) \
331 	((void)0)
332 #define event_debug_note_teardown_(ev) \
333 	((void)0)
334 #define event_debug_note_add_(ev) \
335 	((void)0)
336 #define event_debug_note_del_(ev) \
337 	((void)0)
338 #define event_debug_assert_is_setup_(ev) \
339 	((void)0)
340 #define event_debug_assert_not_added_(ev) \
341 	((void)0)
342 #endif
343 
344 #define EVENT_BASE_ASSERT_LOCKED(base)		\
345 	EVLOCK_ASSERT_LOCKED((base)->th_base_lock)
346 
347 /* How often (in seconds) do we check for changes in wall clock time relative
348  * to monotonic time?  Set this to -1 for 'never.' */
349 #define CLOCK_SYNC_INTERVAL 5
350 
351 /** Set 'tp' to the current time according to 'base'.  We must hold the lock
352  * on 'base'.  If there is a cached time, return it.  Otherwise, use
353  * clock_gettime or gettimeofday as appropriate to find out the right time.
354  * Return 0 on success, -1 on failure.
355  */
356 static int
357 gettime(struct event_base *base, struct timeval *tp)
358 {
359 	EVENT_BASE_ASSERT_LOCKED(base);
360 
361 	if (base->tv_cache.tv_sec) {
362 		*tp = base->tv_cache;
363 		return (0);
364 	}
365 
366 	if (evutil_gettime_monotonic_(&base->monotonic_timer, tp) == -1) {
367 		return -1;
368 	}
369 
370 	if (base->last_updated_clock_diff + CLOCK_SYNC_INTERVAL
371 	    < tp->tv_sec) {
372 		struct timeval tv;
373 		evutil_gettimeofday(&tv,NULL);
374 		evutil_timersub(&tv, tp, &base->tv_clock_diff);
375 		base->last_updated_clock_diff = tp->tv_sec;
376 	}
377 
378 	return 0;
379 }
380 
381 int
382 event_base_gettimeofday_cached(struct event_base *base, struct timeval *tv)
383 {
384 	int r;
385 	if (!base) {
386 		base = current_base;
387 		if (!current_base)
388 			return evutil_gettimeofday(tv, NULL);
389 	}
390 
391 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
392 	if (base->tv_cache.tv_sec == 0) {
393 		r = evutil_gettimeofday(tv, NULL);
394 	} else {
395 		evutil_timeradd(&base->tv_cache, &base->tv_clock_diff, tv);
396 		r = 0;
397 	}
398 	EVBASE_RELEASE_LOCK(base, th_base_lock);
399 	return r;
400 }
401 
402 /** Make 'base' have no current cached time. */
403 static inline void
404 clear_time_cache(struct event_base *base)
405 {
406 	base->tv_cache.tv_sec = 0;
407 }
408 
409 /** Replace the cached time in 'base' with the current time. */
410 static inline void
411 update_time_cache(struct event_base *base)
412 {
413 	base->tv_cache.tv_sec = 0;
414 	if (!(base->flags & EVENT_BASE_FLAG_NO_CACHE_TIME))
415 	    gettime(base, &base->tv_cache);
416 }
417 
418 int
419 event_base_update_cache_time(struct event_base *base)
420 {
421 
422 	if (!base) {
423 		base = current_base;
424 		if (!current_base)
425 			return -1;
426 	}
427 
428 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
429 	if (base->running_loop)
430 		update_time_cache(base);
431 	EVBASE_RELEASE_LOCK(base, th_base_lock);
432 	return 0;
433 }
434 
435 static inline struct event *
436 event_callback_to_event(struct event_callback *evcb)
437 {
438 	EVUTIL_ASSERT((evcb->evcb_flags & EVLIST_INIT));
439 	return EVUTIL_UPCAST(evcb, struct event, ev_evcallback);
440 }
441 
442 static inline struct event_callback *
443 event_to_event_callback(struct event *ev)
444 {
445 	return &ev->ev_evcallback;
446 }
447 
448 struct event_base *
449 event_init(void)
450 {
451 	struct event_base *base = event_base_new_with_config(NULL);
452 
453 	if (base == NULL) {
454 		event_errx(1, "%s: Unable to construct event_base", __func__);
455 		return NULL;
456 	}
457 
458 	current_base = base;
459 
460 	return (base);
461 }
462 
463 struct event_base *
464 event_base_new(void)
465 {
466 	struct event_base *base = NULL;
467 	struct event_config *cfg = event_config_new();
468 	if (cfg) {
469 		base = event_base_new_with_config(cfg);
470 		event_config_free(cfg);
471 	}
472 	return base;
473 }
474 
475 /** Return true iff 'method' is the name of a method that 'cfg' tells us to
476  * avoid. */
477 static int
478 event_config_is_avoided_method(const struct event_config *cfg,
479     const char *method)
480 {
481 	struct event_config_entry *entry;
482 
483 	TAILQ_FOREACH(entry, &cfg->entries, next) {
484 		if (entry->avoid_method != NULL &&
485 		    strcmp(entry->avoid_method, method) == 0)
486 			return (1);
487 	}
488 
489 	return (0);
490 }
491 
492 /** Return true iff 'method' is disabled according to the environment. */
493 static int
494 event_is_method_disabled(const char *name)
495 {
496 	char environment[64];
497 	int i;
498 
499 	evutil_snprintf(environment, sizeof(environment), "EVENT_NO%s", name);
500 	for (i = 8; environment[i] != '\0'; ++i)
501 		environment[i] = EVUTIL_TOUPPER_(environment[i]);
502 	/* Note that evutil_getenv_() ignores the environment entirely if
503 	 * we're setuid */
504 	return (evutil_getenv_(environment) != NULL);
505 }
506 
507 int
508 event_base_get_features(const struct event_base *base)
509 {
510 	return base->evsel->features;
511 }
512 
513 void
514 event_enable_debug_mode(void)
515 {
516 #ifndef EVENT__DISABLE_DEBUG_MODE
517 	if (event_debug_mode_on_)
518 		event_errx(1, "%s was called twice!", __func__);
519 	if (event_debug_mode_too_late)
520 		event_errx(1, "%s must be called *before* creating any events "
521 		    "or event_bases",__func__);
522 
523 	event_debug_mode_on_ = 1;
524 
525 	HT_INIT(event_debug_map, &global_debug_map);
526 #endif
527 }
528 
529 #if 0
530 void
531 event_disable_debug_mode(void)
532 {
533 	struct event_debug_entry **ent, *victim;
534 
535 	EVLOCK_LOCK(event_debug_map_lock_, 0);
536 	for (ent = HT_START(event_debug_map, &global_debug_map); ent; ) {
537 		victim = *ent;
538 		ent = HT_NEXT_RMV(event_debug_map,&global_debug_map, ent);
539 		mm_free(victim);
540 	}
541 	HT_CLEAR(event_debug_map, &global_debug_map);
542 	EVLOCK_UNLOCK(event_debug_map_lock_ , 0);
543 }
544 #endif
545 
546 struct event_base *
547 event_base_new_with_config(const struct event_config *cfg)
548 {
549 	int i;
550 	struct event_base *base;
551 	int should_check_environment;
552 
553 #ifndef EVENT__DISABLE_DEBUG_MODE
554 	event_debug_mode_too_late = 1;
555 #endif
556 
557 	if ((base = mm_calloc(1, sizeof(struct event_base))) == NULL) {
558 		event_warn("%s: calloc", __func__);
559 		return NULL;
560 	}
561 
562 	if (cfg)
563 		base->flags = cfg->flags;
564 
565 	should_check_environment =
566 	    !(cfg && (cfg->flags & EVENT_BASE_FLAG_IGNORE_ENV));
567 
568 	{
569 		struct timeval tmp;
570 		int precise_time =
571 		    cfg && (cfg->flags & EVENT_BASE_FLAG_PRECISE_TIMER);
572 		int flags;
573 		if (should_check_environment && !precise_time) {
574 			precise_time = evutil_getenv_("EVENT_PRECISE_TIMER") != NULL;
575 			base->flags |= EVENT_BASE_FLAG_PRECISE_TIMER;
576 		}
577 		flags = precise_time ? EV_MONOT_PRECISE : 0;
578 		evutil_configure_monotonic_time_(&base->monotonic_timer, flags);
579 
580 		gettime(base, &tmp);
581 	}
582 
583 	min_heap_ctor_(&base->timeheap);
584 
585 	base->sig.ev_signal_pair[0] = -1;
586 	base->sig.ev_signal_pair[1] = -1;
587 	base->th_notify_fd[0] = -1;
588 	base->th_notify_fd[1] = -1;
589 
590 	TAILQ_INIT(&base->active_later_queue);
591 
592 	evmap_io_initmap_(&base->io);
593 	evmap_signal_initmap_(&base->sigmap);
594 	event_changelist_init_(&base->changelist);
595 
596 	base->evbase = NULL;
597 
598 	if (cfg) {
599 		memcpy(&base->max_dispatch_time,
600 		    &cfg->max_dispatch_interval, sizeof(struct timeval));
601 		base->limit_callbacks_after_prio =
602 		    cfg->limit_callbacks_after_prio;
603 	} else {
604 		base->max_dispatch_time.tv_sec = -1;
605 		base->limit_callbacks_after_prio = 1;
606 	}
607 	if (cfg && cfg->max_dispatch_callbacks >= 0) {
608 		base->max_dispatch_callbacks = cfg->max_dispatch_callbacks;
609 	} else {
610 		base->max_dispatch_callbacks = INT_MAX;
611 	}
612 	if (base->max_dispatch_callbacks == INT_MAX &&
613 	    base->max_dispatch_time.tv_sec == -1)
614 		base->limit_callbacks_after_prio = INT_MAX;
615 
616 	for (i = 0; eventops[i] && !base->evbase; i++) {
617 		if (cfg != NULL) {
618 			/* determine if this backend should be avoided */
619 			if (event_config_is_avoided_method(cfg,
620 				eventops[i]->name))
621 				continue;
622 			if ((eventops[i]->features & cfg->require_features)
623 			    != cfg->require_features)
624 				continue;
625 		}
626 
627 		/* also obey the environment variables */
628 		if (should_check_environment &&
629 		    event_is_method_disabled(eventops[i]->name))
630 			continue;
631 
632 		base->evsel = eventops[i];
633 
634 		base->evbase = base->evsel->init(base);
635 	}
636 
637 	if (base->evbase == NULL) {
638 		event_warnx("%s: no event mechanism available",
639 		    __func__);
640 		base->evsel = NULL;
641 		event_base_free(base);
642 		return NULL;
643 	}
644 
645 	if (evutil_getenv_("EVENT_SHOW_METHOD"))
646 		event_msgx("libevent using: %s", base->evsel->name);
647 
648 	/* allocate a single active event queue */
649 	if (event_base_priority_init(base, 1) < 0) {
650 		event_base_free(base);
651 		return NULL;
652 	}
653 
654 	/* prepare for threading */
655 
656 #ifndef EVENT__DISABLE_THREAD_SUPPORT
657 	if (EVTHREAD_LOCKING_ENABLED() &&
658 	    (!cfg || !(cfg->flags & EVENT_BASE_FLAG_NOLOCK))) {
659 		int r;
660 		EVTHREAD_ALLOC_LOCK(base->th_base_lock, 0);
661 		EVTHREAD_ALLOC_COND(base->current_event_cond);
662 		r = evthread_make_base_notifiable(base);
663 		if (r<0) {
664 			event_warnx("%s: Unable to make base notifiable.", __func__);
665 			event_base_free(base);
666 			return NULL;
667 		}
668 	}
669 #endif
670 
671 #ifdef _WIN32
672 	if (cfg && (cfg->flags & EVENT_BASE_FLAG_STARTUP_IOCP))
673 		event_base_start_iocp_(base, cfg->n_cpus_hint);
674 #endif
675 
676 	return (base);
677 }
678 
679 int
680 event_base_start_iocp_(struct event_base *base, int n_cpus)
681 {
682 #ifdef _WIN32
683 	if (base->iocp)
684 		return 0;
685 	base->iocp = event_iocp_port_launch_(n_cpus);
686 	if (!base->iocp) {
687 		event_warnx("%s: Couldn't launch IOCP", __func__);
688 		return -1;
689 	}
690 	return 0;
691 #else
692 	return -1;
693 #endif
694 }
695 
696 void
697 event_base_stop_iocp_(struct event_base *base)
698 {
699 #ifdef _WIN32
700 	int rv;
701 
702 	if (!base->iocp)
703 		return;
704 	rv = event_iocp_shutdown_(base->iocp, -1);
705 	EVUTIL_ASSERT(rv >= 0);
706 	base->iocp = NULL;
707 #endif
708 }
709 
710 static int
711 event_base_cancel_single_callback_(struct event_base *base,
712     struct event_callback *evcb,
713     int run_finalizers)
714 {
715 	int result = 0;
716 
717 	if (evcb->evcb_flags & EVLIST_INIT) {
718 		struct event *ev = event_callback_to_event(evcb);
719 		if (!(ev->ev_flags & EVLIST_INTERNAL)) {
720 			event_del_(ev, EVENT_DEL_EVEN_IF_FINALIZING);
721 			result = 1;
722 		}
723 	} else {
724 		EVBASE_ACQUIRE_LOCK(base, th_base_lock);
725 		event_callback_cancel_nolock_(base, evcb, 1);
726 		EVBASE_RELEASE_LOCK(base, th_base_lock);
727 		result = 1;
728 	}
729 
730 	if (run_finalizers && (evcb->evcb_flags & EVLIST_FINALIZING)) {
731 		switch (evcb->evcb_closure) {
732 		case EV_CLOSURE_EVENT_FINALIZE:
733 		case EV_CLOSURE_EVENT_FINALIZE_FREE: {
734 			struct event *ev = event_callback_to_event(evcb);
735 			ev->ev_evcallback.evcb_cb_union.evcb_evfinalize(ev, ev->ev_arg);
736 			if (evcb->evcb_closure == EV_CLOSURE_EVENT_FINALIZE_FREE)
737 				mm_free(ev);
738 			break;
739 		}
740 		case EV_CLOSURE_CB_FINALIZE:
741 			evcb->evcb_cb_union.evcb_cbfinalize(evcb, evcb->evcb_arg);
742 			break;
743 		default:
744 			break;
745 		}
746 	}
747 	return result;
748 }
749 
750 static void
751 event_base_free_(struct event_base *base, int run_finalizers)
752 {
753 	int i, n_deleted=0;
754 	struct event *ev;
755 	/* XXXX grab the lock? If there is contention when one thread frees
756 	 * the base, then the contending thread will be very sad soon. */
757 
758 	/* event_base_free(NULL) is how to free the current_base if we
759 	 * made it with event_init and forgot to hold a reference to it. */
760 	if (base == NULL && current_base)
761 		base = current_base;
762 	/* Don't actually free NULL. */
763 	if (base == NULL) {
764 		event_warnx("%s: no base to free", __func__);
765 		return;
766 	}
767 	/* XXX(niels) - check for internal events first */
768 
769 #ifdef _WIN32
770 	event_base_stop_iocp_(base);
771 #endif
772 
773 	/* threading fds if we have them */
774 	if (base->th_notify_fd[0] != -1) {
775 		event_del(&base->th_notify);
776 		EVUTIL_CLOSESOCKET(base->th_notify_fd[0]);
777 		if (base->th_notify_fd[1] != -1)
778 			EVUTIL_CLOSESOCKET(base->th_notify_fd[1]);
779 		base->th_notify_fd[0] = -1;
780 		base->th_notify_fd[1] = -1;
781 		event_debug_unassign(&base->th_notify);
782 	}
783 
784 	/* Delete all non-internal events. */
785 	evmap_delete_all_(base);
786 
787 	while ((ev = min_heap_top_(&base->timeheap)) != NULL) {
788 		event_del(ev);
789 		++n_deleted;
790 	}
791 	for (i = 0; i < base->n_common_timeouts; ++i) {
792 		struct common_timeout_list *ctl =
793 		    base->common_timeout_queues[i];
794 		event_del(&ctl->timeout_event); /* Internal; doesn't count */
795 		event_debug_unassign(&ctl->timeout_event);
796 		for (ev = TAILQ_FIRST(&ctl->events); ev; ) {
797 			struct event *next = TAILQ_NEXT(ev,
798 			    ev_timeout_pos.ev_next_with_common_timeout);
799 			if (!(ev->ev_flags & EVLIST_INTERNAL)) {
800 				event_del(ev);
801 				++n_deleted;
802 			}
803 			ev = next;
804 		}
805 		mm_free(ctl);
806 	}
807 	if (base->common_timeout_queues)
808 		mm_free(base->common_timeout_queues);
809 
810 	for (i = 0; i < base->nactivequeues; ++i) {
811 		struct event_callback *evcb, *next;
812 		for (evcb = TAILQ_FIRST(&base->activequeues[i]); evcb; ) {
813 			next = TAILQ_NEXT(evcb, evcb_active_next);
814 			n_deleted += event_base_cancel_single_callback_(base, evcb, run_finalizers);
815 			evcb = next;
816 		}
817 	}
818 	{
819 		struct event_callback *evcb;
820 		while ((evcb = TAILQ_FIRST(&base->active_later_queue))) {
821 			n_deleted += event_base_cancel_single_callback_(base, evcb, run_finalizers);
822 		}
823 	}
824 
825 
826 	if (n_deleted)
827 		event_debug(("%s: %d events were still set in base",
828 			__func__, n_deleted));
829 
830 	while (LIST_FIRST(&base->once_events)) {
831 		struct event_once *eonce = LIST_FIRST(&base->once_events);
832 		LIST_REMOVE(eonce, next_once);
833 		mm_free(eonce);
834 	}
835 
836 	if (base->evsel != NULL && base->evsel->dealloc != NULL)
837 		base->evsel->dealloc(base);
838 
839 	for (i = 0; i < base->nactivequeues; ++i)
840 		EVUTIL_ASSERT(TAILQ_EMPTY(&base->activequeues[i]));
841 
842 	EVUTIL_ASSERT(min_heap_empty_(&base->timeheap));
843 	min_heap_dtor_(&base->timeheap);
844 
845 	mm_free(base->activequeues);
846 
847 	evmap_io_clear_(&base->io);
848 	evmap_signal_clear_(&base->sigmap);
849 	event_changelist_freemem_(&base->changelist);
850 
851 	EVTHREAD_FREE_LOCK(base->th_base_lock, 0);
852 	EVTHREAD_FREE_COND(base->current_event_cond);
853 
854 	/* If we're freeing current_base, there won't be a current_base. */
855 	if (base == current_base)
856 		current_base = NULL;
857 	mm_free(base);
858 }
859 
860 void
861 event_base_free_nofinalize(struct event_base *base)
862 {
863 	event_base_free_(base, 0);
864 }
865 
866 void
867 event_base_free(struct event_base *base)
868 {
869 	event_base_free_(base, 1);
870 }
871 
872 /* Fake eventop; used to disable the backend temporarily inside event_reinit
873  * so that we can call event_del() on an event without telling the backend.
874  */
875 static int
876 nil_backend_del(struct event_base *b, evutil_socket_t fd, short old,
877     short events, void *fdinfo)
878 {
879 	return 0;
880 }
881 const struct eventop nil_eventop = {
882 	"nil",
883 	NULL, /* init: unused. */
884 	NULL, /* add: unused. */
885 	nil_backend_del, /* del: used, so needs to be killed. */
886 	NULL, /* dispatch: unused. */
887 	NULL, /* dealloc: unused. */
888 	0, 0, 0
889 };
890 
891 /* reinitialize the event base after a fork */
892 int
893 event_reinit(struct event_base *base)
894 {
895 	const struct eventop *evsel;
896 	int res = 0;
897 	int was_notifiable = 0;
898 	int had_signal_added = 0;
899 
900 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
901 
902 	evsel = base->evsel;
903 
904 	/* check if this event mechanism requires reinit on the backend */
905 	if (evsel->need_reinit) {
906 		/* We're going to call event_del() on our notify events (the
907 		 * ones that tell about signals and wakeup events).  But we
908 		 * don't actually want to tell the backend to change its
909 		 * state, since it might still share some resource (a kqueue,
910 		 * an epoll fd) with the parent process, and we don't want to
911 		 * delete the fds from _that_ backend, we temporarily stub out
912 		 * the evsel with a replacement.
913 		 */
914 		base->evsel = &nil_eventop;
915 	}
916 
917 	/* We need to re-create a new signal-notification fd and a new
918 	 * thread-notification fd.  Otherwise, we'll still share those with
919 	 * the parent process, which would make any notification sent to them
920 	 * get received by one or both of the event loops, more or less at
921 	 * random.
922 	 */
923 	if (base->sig.ev_signal_added) {
924 		event_del_nolock_(&base->sig.ev_signal, EVENT_DEL_AUTOBLOCK);
925 		event_debug_unassign(&base->sig.ev_signal);
926 		memset(&base->sig.ev_signal, 0, sizeof(base->sig.ev_signal));
927 		if (base->sig.ev_signal_pair[0] != -1)
928 			EVUTIL_CLOSESOCKET(base->sig.ev_signal_pair[0]);
929 		if (base->sig.ev_signal_pair[1] != -1)
930 			EVUTIL_CLOSESOCKET(base->sig.ev_signal_pair[1]);
931 		had_signal_added = 1;
932 		base->sig.ev_signal_added = 0;
933 	}
934 	if (base->th_notify_fn != NULL) {
935 		was_notifiable = 1;
936 		base->th_notify_fn = NULL;
937 	}
938 	if (base->th_notify_fd[0] != -1) {
939 		event_del_nolock_(&base->th_notify, EVENT_DEL_AUTOBLOCK);
940 		EVUTIL_CLOSESOCKET(base->th_notify_fd[0]);
941 		if (base->th_notify_fd[1] != -1)
942 			EVUTIL_CLOSESOCKET(base->th_notify_fd[1]);
943 		base->th_notify_fd[0] = -1;
944 		base->th_notify_fd[1] = -1;
945 		event_debug_unassign(&base->th_notify);
946 	}
947 
948 	/* Replace the original evsel. */
949         base->evsel = evsel;
950 
951 	if (evsel->need_reinit) {
952 		/* Reconstruct the backend through brute-force, so that we do
953 		 * not share any structures with the parent process. For some
954 		 * backends, this is necessary: epoll and kqueue, for
955 		 * instance, have events associated with a kernel
956 		 * structure. If didn't reinitialize, we'd share that
957 		 * structure with the parent process, and any changes made by
958 		 * the parent would affect our backend's behavior (and vice
959 		 * versa).
960 		 */
961 		if (base->evsel->dealloc != NULL)
962 			base->evsel->dealloc(base);
963 		base->evbase = evsel->init(base);
964 		if (base->evbase == NULL) {
965 			event_errx(1,
966 			   "%s: could not reinitialize event mechanism",
967 			   __func__);
968 			res = -1;
969 			goto done;
970 		}
971 
972 		/* Empty out the changelist (if any): we are starting from a
973 		 * blank slate. */
974 		event_changelist_freemem_(&base->changelist);
975 
976 		/* Tell the event maps to re-inform the backend about all
977 		 * pending events. This will make the signal notification
978 		 * event get re-created if necessary. */
979 		if (evmap_reinit_(base) < 0)
980 			res = -1;
981 	} else {
982 		if (had_signal_added)
983 			res = evsig_init_(base);
984 	}
985 
986 	/* If we were notifiable before, and nothing just exploded, become
987 	 * notifiable again. */
988 	if (was_notifiable && res == 0)
989 		res = evthread_make_base_notifiable_nolock_(base);
990 
991 done:
992 	EVBASE_RELEASE_LOCK(base, th_base_lock);
993 	return (res);
994 }
995 
996 const char **
997 event_get_supported_methods(void)
998 {
999 	static const char **methods = NULL;
1000 	const struct eventop **method;
1001 	const char **tmp;
1002 	int i = 0, k;
1003 
1004 	/* count all methods */
1005 	for (method = &eventops[0]; *method != NULL; ++method) {
1006 		++i;
1007 	}
1008 
1009 	/* allocate one more than we need for the NULL pointer */
1010 	tmp = mm_calloc((i + 1), sizeof(char *));
1011 	if (tmp == NULL)
1012 		return (NULL);
1013 
1014 	/* populate the array with the supported methods */
1015 	for (k = 0, i = 0; eventops[k] != NULL; ++k) {
1016 		tmp[i++] = eventops[k]->name;
1017 	}
1018 	tmp[i] = NULL;
1019 
1020 	if (methods != NULL)
1021 		mm_free((char**)methods);
1022 
1023 	methods = tmp;
1024 
1025 	return (methods);
1026 }
1027 
1028 struct event_config *
1029 event_config_new(void)
1030 {
1031 	struct event_config *cfg = mm_calloc(1, sizeof(*cfg));
1032 
1033 	if (cfg == NULL)
1034 		return (NULL);
1035 
1036 	TAILQ_INIT(&cfg->entries);
1037 	cfg->max_dispatch_interval.tv_sec = -1;
1038 	cfg->max_dispatch_callbacks = INT_MAX;
1039 	cfg->limit_callbacks_after_prio = 1;
1040 
1041 	return (cfg);
1042 }
1043 
1044 static void
1045 event_config_entry_free(struct event_config_entry *entry)
1046 {
1047 	if (entry->avoid_method != NULL)
1048 		mm_free((char *)entry->avoid_method);
1049 	mm_free(entry);
1050 }
1051 
1052 void
1053 event_config_free(struct event_config *cfg)
1054 {
1055 	struct event_config_entry *entry;
1056 
1057 	while ((entry = TAILQ_FIRST(&cfg->entries)) != NULL) {
1058 		TAILQ_REMOVE(&cfg->entries, entry, next);
1059 		event_config_entry_free(entry);
1060 	}
1061 	mm_free(cfg);
1062 }
1063 
1064 int
1065 event_config_set_flag(struct event_config *cfg, int flag)
1066 {
1067 	if (!cfg)
1068 		return -1;
1069 	cfg->flags |= flag;
1070 	return 0;
1071 }
1072 
1073 int
1074 event_config_avoid_method(struct event_config *cfg, const char *method)
1075 {
1076 	struct event_config_entry *entry = mm_malloc(sizeof(*entry));
1077 	if (entry == NULL)
1078 		return (-1);
1079 
1080 	if ((entry->avoid_method = mm_strdup(method)) == NULL) {
1081 		mm_free(entry);
1082 		return (-1);
1083 	}
1084 
1085 	TAILQ_INSERT_TAIL(&cfg->entries, entry, next);
1086 
1087 	return (0);
1088 }
1089 
1090 int
1091 event_config_require_features(struct event_config *cfg,
1092     int features)
1093 {
1094 	if (!cfg)
1095 		return (-1);
1096 	cfg->require_features = features;
1097 	return (0);
1098 }
1099 
1100 int
1101 event_config_set_num_cpus_hint(struct event_config *cfg, int cpus)
1102 {
1103 	if (!cfg)
1104 		return (-1);
1105 	cfg->n_cpus_hint = cpus;
1106 	return (0);
1107 }
1108 
1109 int
1110 event_config_set_max_dispatch_interval(struct event_config *cfg,
1111     const struct timeval *max_interval, int max_callbacks, int min_priority)
1112 {
1113 	if (max_interval)
1114 		memcpy(&cfg->max_dispatch_interval, max_interval,
1115 		    sizeof(struct timeval));
1116 	else
1117 		cfg->max_dispatch_interval.tv_sec = -1;
1118 	cfg->max_dispatch_callbacks =
1119 	    max_callbacks >= 0 ? max_callbacks : INT_MAX;
1120 	if (min_priority < 0)
1121 		min_priority = 0;
1122 	cfg->limit_callbacks_after_prio = min_priority;
1123 	return (0);
1124 }
1125 
1126 int
1127 event_priority_init(int npriorities)
1128 {
1129 	return event_base_priority_init(current_base, npriorities);
1130 }
1131 
1132 int
1133 event_base_priority_init(struct event_base *base, int npriorities)
1134 {
1135 	int i, r;
1136 	r = -1;
1137 
1138 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1139 
1140 	if (N_ACTIVE_CALLBACKS(base) || npriorities < 1
1141 	    || npriorities >= EVENT_MAX_PRIORITIES)
1142 		goto err;
1143 
1144 	if (npriorities == base->nactivequeues)
1145 		goto ok;
1146 
1147 	if (base->nactivequeues) {
1148 		mm_free(base->activequeues);
1149 		base->nactivequeues = 0;
1150 	}
1151 
1152 	/* Allocate our priority queues */
1153 	base->activequeues = (struct evcallback_list *)
1154 	  mm_calloc(npriorities, sizeof(struct evcallback_list));
1155 	if (base->activequeues == NULL) {
1156 		event_warn("%s: calloc", __func__);
1157 		goto err;
1158 	}
1159 	base->nactivequeues = npriorities;
1160 
1161 	for (i = 0; i < base->nactivequeues; ++i) {
1162 		TAILQ_INIT(&base->activequeues[i]);
1163 	}
1164 
1165 ok:
1166 	r = 0;
1167 err:
1168 	EVBASE_RELEASE_LOCK(base, th_base_lock);
1169 	return (r);
1170 }
1171 
1172 int
1173 event_base_get_npriorities(struct event_base *base)
1174 {
1175 
1176 	int n;
1177 	if (base == NULL)
1178 		base = current_base;
1179 
1180 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1181 	n = base->nactivequeues;
1182 	EVBASE_RELEASE_LOCK(base, th_base_lock);
1183 	return (n);
1184 }
1185 
1186 int
1187 event_base_get_num_events(struct event_base *base, unsigned int type)
1188 {
1189 	int r = 0;
1190 
1191 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1192 
1193 	if (type & EVENT_BASE_COUNT_ACTIVE)
1194 		r += base->event_count_active;
1195 
1196 	if (type & EVENT_BASE_COUNT_VIRTUAL)
1197 		r += base->virtual_event_count;
1198 
1199 	if (type & EVENT_BASE_COUNT_ADDED)
1200 		r += base->event_count;
1201 
1202 	EVBASE_RELEASE_LOCK(base, th_base_lock);
1203 
1204 	return r;
1205 }
1206 
1207 int
1208 event_base_get_max_events(struct event_base *base, unsigned int type, int clear)
1209 {
1210 	int r = 0;
1211 
1212 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1213 
1214 	if (type & EVENT_BASE_COUNT_ACTIVE) {
1215 		r += base->event_count_active_max;
1216 		if (clear)
1217 			base->event_count_active_max = 0;
1218 	}
1219 
1220 	if (type & EVENT_BASE_COUNT_VIRTUAL) {
1221 		r += base->virtual_event_count_max;
1222 		if (clear)
1223 			base->virtual_event_count_max = 0;
1224 	}
1225 
1226 	if (type & EVENT_BASE_COUNT_ADDED) {
1227 		r += base->event_count_max;
1228 		if (clear)
1229 			base->event_count_max = 0;
1230 	}
1231 
1232 	EVBASE_RELEASE_LOCK(base, th_base_lock);
1233 
1234 	return r;
1235 }
1236 
1237 /* Returns true iff we're currently watching any events. */
1238 static int
1239 event_haveevents(struct event_base *base)
1240 {
1241 	/* Caller must hold th_base_lock */
1242 	return (base->virtual_event_count > 0 || base->event_count > 0);
1243 }
1244 
1245 /* "closure" function called when processing active signal events */
1246 static inline void
1247 event_signal_closure(struct event_base *base, struct event *ev)
1248 {
1249 	short ncalls;
1250 	int should_break;
1251 
1252 	/* Allows deletes to work */
1253 	ncalls = ev->ev_ncalls;
1254 	if (ncalls != 0)
1255 		ev->ev_pncalls = &ncalls;
1256 	EVBASE_RELEASE_LOCK(base, th_base_lock);
1257 	while (ncalls) {
1258 		ncalls--;
1259 		ev->ev_ncalls = ncalls;
1260 		if (ncalls == 0)
1261 			ev->ev_pncalls = NULL;
1262 		(*ev->ev_callback)(ev->ev_fd, ev->ev_res, ev->ev_arg);
1263 
1264 		EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1265 		should_break = base->event_break;
1266 		EVBASE_RELEASE_LOCK(base, th_base_lock);
1267 
1268 		if (should_break) {
1269 			if (ncalls != 0)
1270 				ev->ev_pncalls = NULL;
1271 			return;
1272 		}
1273 	}
1274 }
1275 
1276 /* Common timeouts are special timeouts that are handled as queues rather than
1277  * in the minheap.  This is more efficient than the minheap if we happen to
1278  * know that we're going to get several thousands of timeout events all with
1279  * the same timeout value.
1280  *
1281  * Since all our timeout handling code assumes timevals can be copied,
1282  * assigned, etc, we can't use "magic pointer" to encode these common
1283  * timeouts.  Searching through a list to see if every timeout is common could
1284  * also get inefficient.  Instead, we take advantage of the fact that tv_usec
1285  * is 32 bits long, but only uses 20 of those bits (since it can never be over
1286  * 999999.)  We use the top bits to encode 4 bites of magic number, and 8 bits
1287  * of index into the event_base's aray of common timeouts.
1288  */
1289 
1290 #define MICROSECONDS_MASK       COMMON_TIMEOUT_MICROSECONDS_MASK
1291 #define COMMON_TIMEOUT_IDX_MASK 0x0ff00000
1292 #define COMMON_TIMEOUT_IDX_SHIFT 20
1293 #define COMMON_TIMEOUT_MASK     0xf0000000
1294 #define COMMON_TIMEOUT_MAGIC    0x50000000
1295 
1296 #define COMMON_TIMEOUT_IDX(tv) \
1297 	(((tv)->tv_usec & COMMON_TIMEOUT_IDX_MASK)>>COMMON_TIMEOUT_IDX_SHIFT)
1298 
1299 /** Return true iff if 'tv' is a common timeout in 'base' */
1300 static inline int
1301 is_common_timeout(const struct timeval *tv,
1302     const struct event_base *base)
1303 {
1304 	int idx;
1305 	if ((tv->tv_usec & COMMON_TIMEOUT_MASK) != COMMON_TIMEOUT_MAGIC)
1306 		return 0;
1307 	idx = COMMON_TIMEOUT_IDX(tv);
1308 	return idx < base->n_common_timeouts;
1309 }
1310 
1311 /* True iff tv1 and tv2 have the same common-timeout index, or if neither
1312  * one is a common timeout. */
1313 static inline int
1314 is_same_common_timeout(const struct timeval *tv1, const struct timeval *tv2)
1315 {
1316 	return (tv1->tv_usec & ~MICROSECONDS_MASK) ==
1317 	    (tv2->tv_usec & ~MICROSECONDS_MASK);
1318 }
1319 
1320 /** Requires that 'tv' is a common timeout.  Return the corresponding
1321  * common_timeout_list. */
1322 static inline struct common_timeout_list *
1323 get_common_timeout_list(struct event_base *base, const struct timeval *tv)
1324 {
1325 	return base->common_timeout_queues[COMMON_TIMEOUT_IDX(tv)];
1326 }
1327 
1328 #if 0
1329 static inline int
1330 common_timeout_ok(const struct timeval *tv,
1331     struct event_base *base)
1332 {
1333 	const struct timeval *expect =
1334 	    &get_common_timeout_list(base, tv)->duration;
1335 	return tv->tv_sec == expect->tv_sec &&
1336 	    tv->tv_usec == expect->tv_usec;
1337 }
1338 #endif
1339 
1340 /* Add the timeout for the first event in given common timeout list to the
1341  * event_base's minheap. */
1342 static void
1343 common_timeout_schedule(struct common_timeout_list *ctl,
1344     const struct timeval *now, struct event *head)
1345 {
1346 	struct timeval timeout = head->ev_timeout;
1347 	timeout.tv_usec &= MICROSECONDS_MASK;
1348 	event_add_nolock_(&ctl->timeout_event, &timeout, 1);
1349 }
1350 
1351 /* Callback: invoked when the timeout for a common timeout queue triggers.
1352  * This means that (at least) the first event in that queue should be run,
1353  * and the timeout should be rescheduled if there are more events. */
1354 static void
1355 common_timeout_callback(evutil_socket_t fd, short what, void *arg)
1356 {
1357 	struct timeval now;
1358 	struct common_timeout_list *ctl = arg;
1359 	struct event_base *base = ctl->base;
1360 	struct event *ev = NULL;
1361 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1362 	gettime(base, &now);
1363 	while (1) {
1364 		ev = TAILQ_FIRST(&ctl->events);
1365 		if (!ev || ev->ev_timeout.tv_sec > now.tv_sec ||
1366 		    (ev->ev_timeout.tv_sec == now.tv_sec &&
1367 			(ev->ev_timeout.tv_usec&MICROSECONDS_MASK) > now.tv_usec))
1368 			break;
1369 		event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
1370 		event_active_nolock_(ev, EV_TIMEOUT, 1);
1371 	}
1372 	if (ev)
1373 		common_timeout_schedule(ctl, &now, ev);
1374 	EVBASE_RELEASE_LOCK(base, th_base_lock);
1375 }
1376 
1377 #define MAX_COMMON_TIMEOUTS 256
1378 
1379 const struct timeval *
1380 event_base_init_common_timeout(struct event_base *base,
1381     const struct timeval *duration)
1382 {
1383 	int i;
1384 	struct timeval tv;
1385 	const struct timeval *result=NULL;
1386 	struct common_timeout_list *new_ctl;
1387 
1388 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1389 	if (duration->tv_usec > 1000000) {
1390 		memcpy(&tv, duration, sizeof(struct timeval));
1391 		if (is_common_timeout(duration, base))
1392 			tv.tv_usec &= MICROSECONDS_MASK;
1393 		tv.tv_sec += tv.tv_usec / 1000000;
1394 		tv.tv_usec %= 1000000;
1395 		duration = &tv;
1396 	}
1397 	for (i = 0; i < base->n_common_timeouts; ++i) {
1398 		const struct common_timeout_list *ctl =
1399 		    base->common_timeout_queues[i];
1400 		if (duration->tv_sec == ctl->duration.tv_sec &&
1401 		    duration->tv_usec ==
1402 		    (ctl->duration.tv_usec & MICROSECONDS_MASK)) {
1403 			EVUTIL_ASSERT(is_common_timeout(&ctl->duration, base));
1404 			result = &ctl->duration;
1405 			goto done;
1406 		}
1407 	}
1408 	if (base->n_common_timeouts == MAX_COMMON_TIMEOUTS) {
1409 		event_warnx("%s: Too many common timeouts already in use; "
1410 		    "we only support %d per event_base", __func__,
1411 		    MAX_COMMON_TIMEOUTS);
1412 		goto done;
1413 	}
1414 	if (base->n_common_timeouts_allocated == base->n_common_timeouts) {
1415 		int n = base->n_common_timeouts < 16 ? 16 :
1416 		    base->n_common_timeouts*2;
1417 		struct common_timeout_list **newqueues =
1418 		    mm_realloc(base->common_timeout_queues,
1419 			n*sizeof(struct common_timeout_queue *));
1420 		if (!newqueues) {
1421 			event_warn("%s: realloc",__func__);
1422 			goto done;
1423 		}
1424 		base->n_common_timeouts_allocated = n;
1425 		base->common_timeout_queues = newqueues;
1426 	}
1427 	new_ctl = mm_calloc(1, sizeof(struct common_timeout_list));
1428 	if (!new_ctl) {
1429 		event_warn("%s: calloc",__func__);
1430 		goto done;
1431 	}
1432 	TAILQ_INIT(&new_ctl->events);
1433 	new_ctl->duration.tv_sec = duration->tv_sec;
1434 	new_ctl->duration.tv_usec =
1435 	    duration->tv_usec | COMMON_TIMEOUT_MAGIC |
1436 	    (base->n_common_timeouts << COMMON_TIMEOUT_IDX_SHIFT);
1437 	evtimer_assign(&new_ctl->timeout_event, base,
1438 	    common_timeout_callback, new_ctl);
1439 	new_ctl->timeout_event.ev_flags |= EVLIST_INTERNAL;
1440 	event_priority_set(&new_ctl->timeout_event, 0);
1441 	new_ctl->base = base;
1442 	base->common_timeout_queues[base->n_common_timeouts++] = new_ctl;
1443 	result = &new_ctl->duration;
1444 
1445 done:
1446 	if (result)
1447 		EVUTIL_ASSERT(is_common_timeout(result, base));
1448 
1449 	EVBASE_RELEASE_LOCK(base, th_base_lock);
1450 	return result;
1451 }
1452 
1453 /* Closure function invoked when we're activating a persistent event. */
1454 static inline void
1455 event_persist_closure(struct event_base *base, struct event *ev)
1456 {
1457 
1458 	// Define our callback, we use this to store our callback before it's executed
1459 	void (*evcb_callback)(evutil_socket_t, short, void *);
1460 
1461 	/* reschedule the persistent event if we have a timeout. */
1462 	if (ev->ev_io_timeout.tv_sec || ev->ev_io_timeout.tv_usec) {
1463 		/* If there was a timeout, we want it to run at an interval of
1464 		 * ev_io_timeout after the last time it was _scheduled_ for,
1465 		 * not ev_io_timeout after _now_.  If it fired for another
1466 		 * reason, though, the timeout ought to start ticking _now_. */
1467 		struct timeval run_at, relative_to, delay, now;
1468 		ev_uint32_t usec_mask = 0;
1469 		EVUTIL_ASSERT(is_same_common_timeout(&ev->ev_timeout,
1470 			&ev->ev_io_timeout));
1471 		gettime(base, &now);
1472 		if (is_common_timeout(&ev->ev_timeout, base)) {
1473 			delay = ev->ev_io_timeout;
1474 			usec_mask = delay.tv_usec & ~MICROSECONDS_MASK;
1475 			delay.tv_usec &= MICROSECONDS_MASK;
1476 			if (ev->ev_res & EV_TIMEOUT) {
1477 				relative_to = ev->ev_timeout;
1478 				relative_to.tv_usec &= MICROSECONDS_MASK;
1479 			} else {
1480 				relative_to = now;
1481 			}
1482 		} else {
1483 			delay = ev->ev_io_timeout;
1484 			if (ev->ev_res & EV_TIMEOUT) {
1485 				relative_to = ev->ev_timeout;
1486 			} else {
1487 				relative_to = now;
1488 			}
1489 		}
1490 		evutil_timeradd(&relative_to, &delay, &run_at);
1491 		if (evutil_timercmp(&run_at, &now, <)) {
1492 			/* Looks like we missed at least one invocation due to
1493 			 * a clock jump, not running the event loop for a
1494 			 * while, really slow callbacks, or
1495 			 * something. Reschedule relative to now.
1496 			 */
1497 			evutil_timeradd(&now, &delay, &run_at);
1498 		}
1499 		run_at.tv_usec |= usec_mask;
1500 		event_add_nolock_(ev, &run_at, 1);
1501 	}
1502 
1503 	// Save our callback before we release the lock
1504 	evcb_callback = *ev->ev_callback;
1505 
1506 	// Release the lock
1507  	EVBASE_RELEASE_LOCK(base, th_base_lock);
1508 
1509 	// Execute the callback
1510 	(evcb_callback)(ev->ev_fd, ev->ev_res, ev->ev_arg);
1511 }
1512 
1513 /*
1514   Helper for event_process_active to process all the events in a single queue,
1515   releasing the lock as we go.  This function requires that the lock be held
1516   when it's invoked.  Returns -1 if we get a signal or an event_break that
1517   means we should stop processing any active events now.  Otherwise returns
1518   the number of non-internal event_callbacks that we processed.
1519 */
1520 static int
1521 event_process_active_single_queue(struct event_base *base,
1522     struct evcallback_list *activeq,
1523     int max_to_process, const struct timeval *endtime)
1524 {
1525 	struct event_callback *evcb;
1526 	int count = 0;
1527 
1528 	EVUTIL_ASSERT(activeq != NULL);
1529 
1530 	for (evcb = TAILQ_FIRST(activeq); evcb; evcb = TAILQ_FIRST(activeq)) {
1531 		struct event *ev=NULL;
1532 		if (evcb->evcb_flags & EVLIST_INIT) {
1533 			ev = event_callback_to_event(evcb);
1534 
1535 			if (ev->ev_events & EV_PERSIST || ev->ev_flags & EVLIST_FINALIZING)
1536 				event_queue_remove_active(base, evcb);
1537 			else
1538 				event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
1539 			event_debug((
1540 			    "event_process_active: event: %p, %s%s%scall %p",
1541 			    ev,
1542 			    ev->ev_res & EV_READ ? "EV_READ " : " ",
1543 			    ev->ev_res & EV_WRITE ? "EV_WRITE " : " ",
1544 			    ev->ev_res & EV_CLOSED ? "EV_CLOSED " : " ",
1545 			    ev->ev_callback));
1546 		} else {
1547 			event_queue_remove_active(base, evcb);
1548 			event_debug(("event_process_active: event_callback %p, "
1549 				"closure %d, call %p",
1550 				evcb, evcb->evcb_closure, evcb->evcb_cb_union.evcb_callback));
1551 		}
1552 
1553 		if (!(evcb->evcb_flags & EVLIST_INTERNAL))
1554 			++count;
1555 
1556 
1557 		base->current_event = evcb;
1558 #ifndef EVENT__DISABLE_THREAD_SUPPORT
1559 		base->current_event_waiters = 0;
1560 #endif
1561 
1562 		switch (evcb->evcb_closure) {
1563 		case EV_CLOSURE_EVENT_SIGNAL:
1564 			EVUTIL_ASSERT(ev != NULL);
1565 			event_signal_closure(base, ev);
1566 			break;
1567 		case EV_CLOSURE_EVENT_PERSIST:
1568 			EVUTIL_ASSERT(ev != NULL);
1569 			event_persist_closure(base, ev);
1570 			break;
1571 		case EV_CLOSURE_EVENT: {
1572 			EVUTIL_ASSERT(ev != NULL);
1573 			void (*evcb_callback)(evutil_socket_t, short, void *) = *ev->ev_callback;
1574 			EVBASE_RELEASE_LOCK(base, th_base_lock);
1575 			evcb_callback(ev->ev_fd, ev->ev_res, ev->ev_arg);
1576 		}
1577 		break;
1578 		case EV_CLOSURE_CB_SELF: {
1579 			void (*evcb_selfcb)(struct event_callback *, void *) = evcb->evcb_cb_union.evcb_selfcb;
1580 			EVBASE_RELEASE_LOCK(base, th_base_lock);
1581 			evcb_selfcb(evcb, evcb->evcb_arg);
1582 		}
1583 		break;
1584 		case EV_CLOSURE_EVENT_FINALIZE:
1585 		case EV_CLOSURE_EVENT_FINALIZE_FREE: {
1586 			EVUTIL_ASSERT(ev != NULL);
1587 			void (*evcb_evfinalize)(struct event *, void *) = ev->ev_evcallback.evcb_cb_union.evcb_evfinalize;
1588 			base->current_event = NULL;
1589 			EVUTIL_ASSERT((evcb->evcb_flags & EVLIST_FINALIZING));
1590 			EVBASE_RELEASE_LOCK(base, th_base_lock);
1591 			evcb_evfinalize(ev, ev->ev_arg);
1592 			event_debug_note_teardown_(ev);
1593 			if (evcb->evcb_closure == EV_CLOSURE_EVENT_FINALIZE_FREE)
1594 				mm_free(ev);
1595 		}
1596 		break;
1597 		case EV_CLOSURE_CB_FINALIZE: {
1598 			void (*evcb_cbfinalize)(struct event_callback *, void *) = evcb->evcb_cb_union.evcb_cbfinalize;
1599 			base->current_event = NULL;
1600 			EVUTIL_ASSERT((evcb->evcb_flags & EVLIST_FINALIZING));
1601 			EVBASE_RELEASE_LOCK(base, th_base_lock);
1602 			evcb_cbfinalize(evcb, evcb->evcb_arg);
1603 		}
1604 		break;
1605 		default:
1606 			EVUTIL_ASSERT(0);
1607 		}
1608 
1609 		EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1610 		base->current_event = NULL;
1611 #ifndef EVENT__DISABLE_THREAD_SUPPORT
1612 		if (base->current_event_waiters) {
1613 			base->current_event_waiters = 0;
1614 			EVTHREAD_COND_BROADCAST(base->current_event_cond);
1615 		}
1616 #endif
1617 
1618 		if (base->event_break)
1619 			return -1;
1620 		if (count >= max_to_process)
1621 			return count;
1622 		if (count && endtime) {
1623 			struct timeval now;
1624 			update_time_cache(base);
1625 			gettime(base, &now);
1626 			if (evutil_timercmp(&now, endtime, >=))
1627 				return count;
1628 		}
1629 		if (base->event_continue)
1630 			break;
1631 	}
1632 	return count;
1633 }
1634 
1635 /*
1636  * Active events are stored in priority queues.  Lower priorities are always
1637  * process before higher priorities.  Low priority events can starve high
1638  * priority ones.
1639  */
1640 
1641 static int
1642 event_process_active(struct event_base *base)
1643 {
1644 	/* Caller must hold th_base_lock */
1645 	struct evcallback_list *activeq = NULL;
1646 	int i, c = 0;
1647 	const struct timeval *endtime;
1648 	struct timeval tv;
1649 	const int maxcb = base->max_dispatch_callbacks;
1650 	const int limit_after_prio = base->limit_callbacks_after_prio;
1651 	if (base->max_dispatch_time.tv_sec >= 0) {
1652 		update_time_cache(base);
1653 		gettime(base, &tv);
1654 		evutil_timeradd(&base->max_dispatch_time, &tv, &tv);
1655 		endtime = &tv;
1656 	} else {
1657 		endtime = NULL;
1658 	}
1659 
1660 	for (i = 0; i < base->nactivequeues; ++i) {
1661 		if (TAILQ_FIRST(&base->activequeues[i]) != NULL) {
1662 			base->event_running_priority = i;
1663 			activeq = &base->activequeues[i];
1664 			if (i < limit_after_prio)
1665 				c = event_process_active_single_queue(base, activeq,
1666 				    INT_MAX, NULL);
1667 			else
1668 				c = event_process_active_single_queue(base, activeq,
1669 				    maxcb, endtime);
1670 			if (c < 0) {
1671 				goto done;
1672 			} else if (c > 0)
1673 				break; /* Processed a real event; do not
1674 					* consider lower-priority events */
1675 			/* If we get here, all of the events we processed
1676 			 * were internal.  Continue. */
1677 		}
1678 	}
1679 
1680 done:
1681 	base->event_running_priority = -1;
1682 
1683 	return c;
1684 }
1685 
1686 /*
1687  * Wait continuously for events.  We exit only if no events are left.
1688  */
1689 
1690 int
1691 event_dispatch(void)
1692 {
1693 	return (event_loop(0));
1694 }
1695 
1696 int
1697 event_base_dispatch(struct event_base *event_base)
1698 {
1699 	return (event_base_loop(event_base, 0));
1700 }
1701 
1702 const char *
1703 event_base_get_method(const struct event_base *base)
1704 {
1705 	EVUTIL_ASSERT(base);
1706 	return (base->evsel->name);
1707 }
1708 
1709 /** Callback: used to implement event_base_loopexit by telling the event_base
1710  * that it's time to exit its loop. */
1711 static void
1712 event_loopexit_cb(evutil_socket_t fd, short what, void *arg)
1713 {
1714 	struct event_base *base = arg;
1715 	base->event_gotterm = 1;
1716 }
1717 
1718 int
1719 event_loopexit(const struct timeval *tv)
1720 {
1721 	return (event_once(-1, EV_TIMEOUT, event_loopexit_cb,
1722 		    current_base, tv));
1723 }
1724 
1725 int
1726 event_base_loopexit(struct event_base *event_base, const struct timeval *tv)
1727 {
1728 	return (event_base_once(event_base, -1, EV_TIMEOUT, event_loopexit_cb,
1729 		    event_base, tv));
1730 }
1731 
1732 int
1733 event_loopbreak(void)
1734 {
1735 	return (event_base_loopbreak(current_base));
1736 }
1737 
1738 int
1739 event_base_loopbreak(struct event_base *event_base)
1740 {
1741 	int r = 0;
1742 	if (event_base == NULL)
1743 		return (-1);
1744 
1745 	EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
1746 	event_base->event_break = 1;
1747 
1748 	if (EVBASE_NEED_NOTIFY(event_base)) {
1749 		r = evthread_notify_base(event_base);
1750 	} else {
1751 		r = (0);
1752 	}
1753 	EVBASE_RELEASE_LOCK(event_base, th_base_lock);
1754 	return r;
1755 }
1756 
1757 int
1758 event_base_loopcontinue(struct event_base *event_base)
1759 {
1760 	int r = 0;
1761 	if (event_base == NULL)
1762 		return (-1);
1763 
1764 	EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
1765 	event_base->event_continue = 1;
1766 
1767 	if (EVBASE_NEED_NOTIFY(event_base)) {
1768 		r = evthread_notify_base(event_base);
1769 	} else {
1770 		r = (0);
1771 	}
1772 	EVBASE_RELEASE_LOCK(event_base, th_base_lock);
1773 	return r;
1774 }
1775 
1776 int
1777 event_base_got_break(struct event_base *event_base)
1778 {
1779 	int res;
1780 	EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
1781 	res = event_base->event_break;
1782 	EVBASE_RELEASE_LOCK(event_base, th_base_lock);
1783 	return res;
1784 }
1785 
1786 int
1787 event_base_got_exit(struct event_base *event_base)
1788 {
1789 	int res;
1790 	EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
1791 	res = event_base->event_gotterm;
1792 	EVBASE_RELEASE_LOCK(event_base, th_base_lock);
1793 	return res;
1794 }
1795 
1796 /* not thread safe */
1797 
1798 int
1799 event_loop(int flags)
1800 {
1801 	return event_base_loop(current_base, flags);
1802 }
1803 
1804 int
1805 event_base_loop(struct event_base *base, int flags)
1806 {
1807 	const struct eventop *evsel = base->evsel;
1808 	struct timeval tv;
1809 	struct timeval *tv_p;
1810 	int res, done, retval = 0;
1811 
1812 	/* Grab the lock.  We will release it inside evsel.dispatch, and again
1813 	 * as we invoke user callbacks. */
1814 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1815 
1816 	if (base->running_loop) {
1817 		event_warnx("%s: reentrant invocation.  Only one event_base_loop"
1818 		    " can run on each event_base at once.", __func__);
1819 		EVBASE_RELEASE_LOCK(base, th_base_lock);
1820 		return -1;
1821 	}
1822 
1823 	base->running_loop = 1;
1824 
1825 	clear_time_cache(base);
1826 
1827 	if (base->sig.ev_signal_added && base->sig.ev_n_signals_added)
1828 		evsig_set_base_(base);
1829 
1830 	done = 0;
1831 
1832 #ifndef EVENT__DISABLE_THREAD_SUPPORT
1833 	base->th_owner_id = EVTHREAD_GET_ID();
1834 #endif
1835 
1836 	base->event_gotterm = base->event_break = 0;
1837 
1838 	while (!done) {
1839 		base->event_continue = 0;
1840 		base->n_deferreds_queued = 0;
1841 
1842 		/* Terminate the loop if we have been asked to */
1843 		if (base->event_gotterm) {
1844 			break;
1845 		}
1846 
1847 		if (base->event_break) {
1848 			break;
1849 		}
1850 
1851 		tv_p = &tv;
1852 		if (!N_ACTIVE_CALLBACKS(base) && !(flags & EVLOOP_NONBLOCK)) {
1853 			timeout_next(base, &tv_p);
1854 		} else {
1855 			/*
1856 			 * if we have active events, we just poll new events
1857 			 * without waiting.
1858 			 */
1859 			evutil_timerclear(&tv);
1860 		}
1861 
1862 		/* If we have no events, we just exit */
1863 		if (0==(flags&EVLOOP_NO_EXIT_ON_EMPTY) &&
1864 		    !event_haveevents(base) && !N_ACTIVE_CALLBACKS(base)) {
1865 			event_debug(("%s: no events registered.", __func__));
1866 			retval = 1;
1867 			goto done;
1868 		}
1869 
1870 		event_queue_make_later_events_active(base);
1871 
1872 		clear_time_cache(base);
1873 
1874 		res = evsel->dispatch(base, tv_p);
1875 
1876 		if (res == -1) {
1877 			event_debug(("%s: dispatch returned unsuccessfully.",
1878 				__func__));
1879 			retval = -1;
1880 			goto done;
1881 		}
1882 
1883 		update_time_cache(base);
1884 
1885 		timeout_process(base);
1886 
1887 		if (N_ACTIVE_CALLBACKS(base)) {
1888 			int n = event_process_active(base);
1889 			if ((flags & EVLOOP_ONCE)
1890 			    && N_ACTIVE_CALLBACKS(base) == 0
1891 			    && n != 0)
1892 				done = 1;
1893 		} else if (flags & EVLOOP_NONBLOCK)
1894 			done = 1;
1895 	}
1896 	event_debug(("%s: asked to terminate loop.", __func__));
1897 
1898 done:
1899 	clear_time_cache(base);
1900 	base->running_loop = 0;
1901 
1902 	EVBASE_RELEASE_LOCK(base, th_base_lock);
1903 
1904 	return (retval);
1905 }
1906 
1907 /* One-time callback to implement event_base_once: invokes the user callback,
1908  * then deletes the allocated storage */
1909 static void
1910 event_once_cb(evutil_socket_t fd, short events, void *arg)
1911 {
1912 	struct event_once *eonce = arg;
1913 
1914 	(*eonce->cb)(fd, events, eonce->arg);
1915 	EVBASE_ACQUIRE_LOCK(eonce->ev.ev_base, th_base_lock);
1916 	LIST_REMOVE(eonce, next_once);
1917 	EVBASE_RELEASE_LOCK(eonce->ev.ev_base, th_base_lock);
1918 	event_debug_unassign(&eonce->ev);
1919 	mm_free(eonce);
1920 }
1921 
1922 /* not threadsafe, event scheduled once. */
1923 int
1924 event_once(evutil_socket_t fd, short events,
1925     void (*callback)(evutil_socket_t, short, void *),
1926     void *arg, const struct timeval *tv)
1927 {
1928 	return event_base_once(current_base, fd, events, callback, arg, tv);
1929 }
1930 
1931 /* Schedules an event once */
1932 int
1933 event_base_once(struct event_base *base, evutil_socket_t fd, short events,
1934     void (*callback)(evutil_socket_t, short, void *),
1935     void *arg, const struct timeval *tv)
1936 {
1937 	struct event_once *eonce;
1938 	int res = 0;
1939 	int activate = 0;
1940 
1941 	/* We cannot support signals that just fire once, or persistent
1942 	 * events. */
1943 	if (events & (EV_SIGNAL|EV_PERSIST))
1944 		return (-1);
1945 
1946 	if ((eonce = mm_calloc(1, sizeof(struct event_once))) == NULL)
1947 		return (-1);
1948 
1949 	eonce->cb = callback;
1950 	eonce->arg = arg;
1951 
1952 	if ((events & (EV_TIMEOUT|EV_SIGNAL|EV_READ|EV_WRITE|EV_CLOSED)) == EV_TIMEOUT) {
1953 		evtimer_assign(&eonce->ev, base, event_once_cb, eonce);
1954 
1955 		if (tv == NULL || ! evutil_timerisset(tv)) {
1956 			/* If the event is going to become active immediately,
1957 			 * don't put it on the timeout queue.  This is one
1958 			 * idiom for scheduling a callback, so let's make
1959 			 * it fast (and order-preserving). */
1960 			activate = 1;
1961 		}
1962 	} else if (events & (EV_READ|EV_WRITE|EV_CLOSED)) {
1963 		events &= EV_READ|EV_WRITE|EV_CLOSED;
1964 
1965 		event_assign(&eonce->ev, base, fd, events, event_once_cb, eonce);
1966 	} else {
1967 		/* Bad event combination */
1968 		mm_free(eonce);
1969 		return (-1);
1970 	}
1971 
1972 	if (res == 0) {
1973 		EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1974 		if (activate)
1975 			event_active_nolock_(&eonce->ev, EV_TIMEOUT, 1);
1976 		else
1977 			res = event_add_nolock_(&eonce->ev, tv, 0);
1978 
1979 		if (res != 0) {
1980 			mm_free(eonce);
1981 			return (res);
1982 		} else {
1983 			LIST_INSERT_HEAD(&base->once_events, eonce, next_once);
1984 		}
1985 		EVBASE_RELEASE_LOCK(base, th_base_lock);
1986 	}
1987 
1988 	return (0);
1989 }
1990 
1991 int
1992 event_assign(struct event *ev, struct event_base *base, evutil_socket_t fd, short events, void (*callback)(evutil_socket_t, short, void *), void *arg)
1993 {
1994 	if (!base)
1995 		base = current_base;
1996 	if (arg == &event_self_cbarg_ptr_)
1997 		arg = ev;
1998 
1999 	event_debug_assert_not_added_(ev);
2000 
2001 	ev->ev_base = base;
2002 
2003 	ev->ev_callback = callback;
2004 	ev->ev_arg = arg;
2005 	ev->ev_fd = fd;
2006 	ev->ev_events = events;
2007 	ev->ev_res = 0;
2008 	ev->ev_flags = EVLIST_INIT;
2009 	ev->ev_ncalls = 0;
2010 	ev->ev_pncalls = NULL;
2011 
2012 	if (events & EV_SIGNAL) {
2013 		if ((events & (EV_READ|EV_WRITE|EV_CLOSED)) != 0) {
2014 			event_warnx("%s: EV_SIGNAL is not compatible with "
2015 			    "EV_READ, EV_WRITE or EV_CLOSED", __func__);
2016 			return -1;
2017 		}
2018 		ev->ev_closure = EV_CLOSURE_EVENT_SIGNAL;
2019 	} else {
2020 		if (events & EV_PERSIST) {
2021 			evutil_timerclear(&ev->ev_io_timeout);
2022 			ev->ev_closure = EV_CLOSURE_EVENT_PERSIST;
2023 		} else {
2024 			ev->ev_closure = EV_CLOSURE_EVENT;
2025 		}
2026 	}
2027 
2028 	min_heap_elem_init_(ev);
2029 
2030 	if (base != NULL) {
2031 		/* by default, we put new events into the middle priority */
2032 		ev->ev_pri = base->nactivequeues / 2;
2033 	}
2034 
2035 	event_debug_note_setup_(ev);
2036 
2037 	return 0;
2038 }
2039 
2040 int
2041 event_base_set(struct event_base *base, struct event *ev)
2042 {
2043 	/* Only innocent events may be assigned to a different base */
2044 	if (ev->ev_flags != EVLIST_INIT)
2045 		return (-1);
2046 
2047 	event_debug_assert_is_setup_(ev);
2048 
2049 	ev->ev_base = base;
2050 	ev->ev_pri = base->nactivequeues/2;
2051 
2052 	return (0);
2053 }
2054 
2055 void
2056 event_set(struct event *ev, evutil_socket_t fd, short events,
2057 	  void (*callback)(evutil_socket_t, short, void *), void *arg)
2058 {
2059 	int r;
2060 	r = event_assign(ev, current_base, fd, events, callback, arg);
2061 	EVUTIL_ASSERT(r == 0);
2062 }
2063 
2064 void *
2065 event_self_cbarg(void)
2066 {
2067 	return &event_self_cbarg_ptr_;
2068 }
2069 
2070 struct event *
2071 event_base_get_running_event(struct event_base *base)
2072 {
2073 	struct event *ev = NULL;
2074 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2075 	if (EVBASE_IN_THREAD(base)) {
2076 		struct event_callback *evcb = base->current_event;
2077 		if (evcb->evcb_flags & EVLIST_INIT)
2078 			ev = event_callback_to_event(evcb);
2079 	}
2080 	EVBASE_RELEASE_LOCK(base, th_base_lock);
2081 	return ev;
2082 }
2083 
2084 struct event *
2085 event_new(struct event_base *base, evutil_socket_t fd, short events, void (*cb)(evutil_socket_t, short, void *), void *arg)
2086 {
2087 	struct event *ev;
2088 	ev = mm_malloc(sizeof(struct event));
2089 	if (ev == NULL)
2090 		return (NULL);
2091 	if (event_assign(ev, base, fd, events, cb, arg) < 0) {
2092 		mm_free(ev);
2093 		return (NULL);
2094 	}
2095 
2096 	return (ev);
2097 }
2098 
2099 void
2100 event_free(struct event *ev)
2101 {
2102 	/* This is disabled, so that events which have been finalized be a
2103 	 * valid target for event_free(). That's */
2104 	// event_debug_assert_is_setup_(ev);
2105 
2106 	/* make sure that this event won't be coming back to haunt us. */
2107 	event_del(ev);
2108 	event_debug_note_teardown_(ev);
2109 	mm_free(ev);
2110 
2111 }
2112 
2113 void
2114 event_debug_unassign(struct event *ev)
2115 {
2116 	event_debug_assert_not_added_(ev);
2117 	event_debug_note_teardown_(ev);
2118 
2119 	ev->ev_flags &= ~EVLIST_INIT;
2120 }
2121 
2122 #define EVENT_FINALIZE_FREE_ 0x10000
2123 static int
2124 event_finalize_nolock_(struct event_base *base, unsigned flags, struct event *ev, event_finalize_callback_fn cb)
2125 {
2126 	ev_uint8_t closure = (flags & EVENT_FINALIZE_FREE_) ?
2127 	    EV_CLOSURE_EVENT_FINALIZE_FREE : EV_CLOSURE_EVENT_FINALIZE;
2128 
2129 	event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
2130 	ev->ev_closure = closure;
2131 	ev->ev_evcallback.evcb_cb_union.evcb_evfinalize = cb;
2132 	event_active_nolock_(ev, EV_FINALIZE, 1);
2133 	ev->ev_flags |= EVLIST_FINALIZING;
2134 	return 0;
2135 }
2136 
2137 static int
2138 event_finalize_impl_(unsigned flags, struct event *ev, event_finalize_callback_fn cb)
2139 {
2140 	int r;
2141 	struct event_base *base = ev->ev_base;
2142 	if (EVUTIL_FAILURE_CHECK(!base)) {
2143 		event_warnx("%s: event has no event_base set.", __func__);
2144 		return -1;
2145 	}
2146 
2147 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2148 	r = event_finalize_nolock_(base, flags, ev, cb);
2149 	EVBASE_RELEASE_LOCK(base, th_base_lock);
2150 	return r;
2151 }
2152 
2153 int
2154 event_finalize(unsigned flags, struct event *ev, event_finalize_callback_fn cb)
2155 {
2156 	return event_finalize_impl_(flags, ev, cb);
2157 }
2158 
2159 int
2160 event_free_finalize(unsigned flags, struct event *ev, event_finalize_callback_fn cb)
2161 {
2162 	return event_finalize_impl_(flags|EVENT_FINALIZE_FREE_, ev, cb);
2163 }
2164 
2165 void
2166 event_callback_finalize_nolock_(struct event_base *base, unsigned flags, struct event_callback *evcb, void (*cb)(struct event_callback *, void *))
2167 {
2168 	struct event *ev = NULL;
2169 	if (evcb->evcb_flags & EVLIST_INIT) {
2170 		ev = event_callback_to_event(evcb);
2171 		event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
2172 	} else {
2173 		event_callback_cancel_nolock_(base, evcb, 0); /*XXX can this fail?*/
2174 	}
2175 
2176 	evcb->evcb_closure = EV_CLOSURE_CB_FINALIZE;
2177 	evcb->evcb_cb_union.evcb_cbfinalize = cb;
2178 	event_callback_activate_nolock_(base, evcb); /* XXX can this really fail?*/
2179 	evcb->evcb_flags |= EVLIST_FINALIZING;
2180 }
2181 
2182 void
2183 event_callback_finalize_(struct event_base *base, unsigned flags, struct event_callback *evcb, void (*cb)(struct event_callback *, void *))
2184 {
2185 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2186 	event_callback_finalize_nolock_(base, flags, evcb, cb);
2187 	EVBASE_RELEASE_LOCK(base, th_base_lock);
2188 }
2189 
2190 /** Internal: Finalize all of the n_cbs callbacks in evcbs.  The provided
2191  * callback will be invoked on *one of them*, after they have *all* been
2192  * finalized. */
2193 int
2194 event_callback_finalize_many_(struct event_base *base, int n_cbs, struct event_callback **evcbs, void (*cb)(struct event_callback *, void *))
2195 {
2196 	int n_pending = 0, i;
2197 
2198 	if (base == NULL)
2199 		base = current_base;
2200 
2201 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2202 
2203 	event_debug(("%s: %d events finalizing", __func__, n_cbs));
2204 
2205 	/* At most one can be currently executing; the rest we just
2206 	 * cancel... But we always make sure that the finalize callback
2207 	 * runs. */
2208 	for (i = 0; i < n_cbs; ++i) {
2209 		struct event_callback *evcb = evcbs[i];
2210 		if (evcb == base->current_event) {
2211 			event_callback_finalize_nolock_(base, 0, evcb, cb);
2212 			++n_pending;
2213 		} else {
2214 			event_callback_cancel_nolock_(base, evcb, 0);
2215 		}
2216 	}
2217 
2218 	if (n_pending == 0) {
2219 		/* Just do the first one. */
2220 		event_callback_finalize_nolock_(base, 0, evcbs[0], cb);
2221 	}
2222 
2223 	EVBASE_RELEASE_LOCK(base, th_base_lock);
2224 	return 0;
2225 }
2226 
2227 /*
2228  * Set's the priority of an event - if an event is already scheduled
2229  * changing the priority is going to fail.
2230  */
2231 
2232 int
2233 event_priority_set(struct event *ev, int pri)
2234 {
2235 	event_debug_assert_is_setup_(ev);
2236 
2237 	if (ev->ev_flags & EVLIST_ACTIVE)
2238 		return (-1);
2239 	if (pri < 0 || pri >= ev->ev_base->nactivequeues)
2240 		return (-1);
2241 
2242 	ev->ev_pri = pri;
2243 
2244 	return (0);
2245 }
2246 
2247 /*
2248  * Checks if a specific event is pending or scheduled.
2249  */
2250 
2251 int
2252 event_pending(const struct event *ev, short event, struct timeval *tv)
2253 {
2254 	int flags = 0;
2255 
2256 	if (EVUTIL_FAILURE_CHECK(ev->ev_base == NULL)) {
2257 		event_warnx("%s: event has no event_base set.", __func__);
2258 		return 0;
2259 	}
2260 
2261 	EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2262 	event_debug_assert_is_setup_(ev);
2263 
2264 	if (ev->ev_flags & EVLIST_INSERTED)
2265 		flags |= (ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED|EV_SIGNAL));
2266 	if (ev->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))
2267 		flags |= ev->ev_res;
2268 	if (ev->ev_flags & EVLIST_TIMEOUT)
2269 		flags |= EV_TIMEOUT;
2270 
2271 	event &= (EV_TIMEOUT|EV_READ|EV_WRITE|EV_CLOSED|EV_SIGNAL);
2272 
2273 	/* See if there is a timeout that we should report */
2274 	if (tv != NULL && (flags & event & EV_TIMEOUT)) {
2275 		struct timeval tmp = ev->ev_timeout;
2276 		tmp.tv_usec &= MICROSECONDS_MASK;
2277 		/* correctly remamp to real time */
2278 		evutil_timeradd(&ev->ev_base->tv_clock_diff, &tmp, tv);
2279 	}
2280 
2281 	EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2282 
2283 	return (flags & event);
2284 }
2285 
2286 int
2287 event_initialized(const struct event *ev)
2288 {
2289 	if (!(ev->ev_flags & EVLIST_INIT))
2290 		return 0;
2291 
2292 	return 1;
2293 }
2294 
2295 void
2296 event_get_assignment(const struct event *event, struct event_base **base_out, evutil_socket_t *fd_out, short *events_out, event_callback_fn *callback_out, void **arg_out)
2297 {
2298 	event_debug_assert_is_setup_(event);
2299 
2300 	if (base_out)
2301 		*base_out = event->ev_base;
2302 	if (fd_out)
2303 		*fd_out = event->ev_fd;
2304 	if (events_out)
2305 		*events_out = event->ev_events;
2306 	if (callback_out)
2307 		*callback_out = event->ev_callback;
2308 	if (arg_out)
2309 		*arg_out = event->ev_arg;
2310 }
2311 
2312 size_t
2313 event_get_struct_event_size(void)
2314 {
2315 	return sizeof(struct event);
2316 }
2317 
2318 evutil_socket_t
2319 event_get_fd(const struct event *ev)
2320 {
2321 	event_debug_assert_is_setup_(ev);
2322 	return ev->ev_fd;
2323 }
2324 
2325 struct event_base *
2326 event_get_base(const struct event *ev)
2327 {
2328 	event_debug_assert_is_setup_(ev);
2329 	return ev->ev_base;
2330 }
2331 
2332 short
2333 event_get_events(const struct event *ev)
2334 {
2335 	event_debug_assert_is_setup_(ev);
2336 	return ev->ev_events;
2337 }
2338 
2339 event_callback_fn
2340 event_get_callback(const struct event *ev)
2341 {
2342 	event_debug_assert_is_setup_(ev);
2343 	return ev->ev_callback;
2344 }
2345 
2346 void *
2347 event_get_callback_arg(const struct event *ev)
2348 {
2349 	event_debug_assert_is_setup_(ev);
2350 	return ev->ev_arg;
2351 }
2352 
2353 int
2354 event_get_priority(const struct event *ev)
2355 {
2356 	event_debug_assert_is_setup_(ev);
2357 	return ev->ev_pri;
2358 }
2359 
2360 int
2361 event_add(struct event *ev, const struct timeval *tv)
2362 {
2363 	int res;
2364 
2365 	if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
2366 		event_warnx("%s: event has no event_base set.", __func__);
2367 		return -1;
2368 	}
2369 
2370 	EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2371 
2372 	res = event_add_nolock_(ev, tv, 0);
2373 
2374 	EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2375 
2376 	return (res);
2377 }
2378 
2379 /* Helper callback: wake an event_base from another thread.  This version
2380  * works by writing a byte to one end of a socketpair, so that the event_base
2381  * listening on the other end will wake up as the corresponding event
2382  * triggers */
2383 static int
2384 evthread_notify_base_default(struct event_base *base)
2385 {
2386 	char buf[1];
2387 	int r;
2388 	buf[0] = (char) 0;
2389 #ifdef _WIN32
2390 	r = send(base->th_notify_fd[1], buf, 1, 0);
2391 #else
2392 	r = write(base->th_notify_fd[1], buf, 1);
2393 #endif
2394 	return (r < 0 && ! EVUTIL_ERR_IS_EAGAIN(errno)) ? -1 : 0;
2395 }
2396 
2397 #ifdef EVENT__HAVE_EVENTFD
2398 /* Helper callback: wake an event_base from another thread.  This version
2399  * assumes that you have a working eventfd() implementation. */
2400 static int
2401 evthread_notify_base_eventfd(struct event_base *base)
2402 {
2403 	ev_uint64_t msg = 1;
2404 	int r;
2405 	do {
2406 		r = write(base->th_notify_fd[0], (void*) &msg, sizeof(msg));
2407 	} while (r < 0 && errno == EAGAIN);
2408 
2409 	return (r < 0) ? -1 : 0;
2410 }
2411 #endif
2412 
2413 
2414 /** Tell the thread currently running the event_loop for base (if any) that it
2415  * needs to stop waiting in its dispatch function (if it is) and process all
2416  * active callbacks. */
2417 static int
2418 evthread_notify_base(struct event_base *base)
2419 {
2420 	EVENT_BASE_ASSERT_LOCKED(base);
2421 	if (!base->th_notify_fn)
2422 		return -1;
2423 	if (base->is_notify_pending)
2424 		return 0;
2425 	base->is_notify_pending = 1;
2426 	return base->th_notify_fn(base);
2427 }
2428 
2429 /* Implementation function to remove a timeout on a currently pending event.
2430  */
2431 int
2432 event_remove_timer_nolock_(struct event *ev)
2433 {
2434 	struct event_base *base = ev->ev_base;
2435 
2436 	EVENT_BASE_ASSERT_LOCKED(base);
2437 	event_debug_assert_is_setup_(ev);
2438 
2439 	event_debug(("event_remove_timer_nolock: event: %p", ev));
2440 
2441 	/* If it's not pending on a timeout, we don't need to do anything. */
2442 	if (ev->ev_flags & EVLIST_TIMEOUT) {
2443 		event_queue_remove_timeout(base, ev);
2444 		evutil_timerclear(&ev->ev_.ev_io.ev_timeout);
2445 	}
2446 
2447 	return (0);
2448 }
2449 
2450 int
2451 event_remove_timer(struct event *ev)
2452 {
2453 	int res;
2454 
2455 	if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
2456 		event_warnx("%s: event has no event_base set.", __func__);
2457 		return -1;
2458 	}
2459 
2460 	EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2461 
2462 	res = event_remove_timer_nolock_(ev);
2463 
2464 	EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2465 
2466 	return (res);
2467 }
2468 
2469 /* Implementation function to add an event.  Works just like event_add,
2470  * except: 1) it requires that we have the lock.  2) if tv_is_absolute is set,
2471  * we treat tv as an absolute time, not as an interval to add to the current
2472  * time */
2473 int
2474 event_add_nolock_(struct event *ev, const struct timeval *tv,
2475     int tv_is_absolute)
2476 {
2477 	struct event_base *base = ev->ev_base;
2478 	int res = 0;
2479 	int notify = 0;
2480 
2481 	EVENT_BASE_ASSERT_LOCKED(base);
2482 	event_debug_assert_is_setup_(ev);
2483 
2484 	event_debug((
2485 		 "event_add: event: %p (fd "EV_SOCK_FMT"), %s%s%s%scall %p",
2486 		 ev,
2487 		 EV_SOCK_ARG(ev->ev_fd),
2488 		 ev->ev_events & EV_READ ? "EV_READ " : " ",
2489 		 ev->ev_events & EV_WRITE ? "EV_WRITE " : " ",
2490 		 ev->ev_events & EV_CLOSED ? "EV_CLOSED " : " ",
2491 		 tv ? "EV_TIMEOUT " : " ",
2492 		 ev->ev_callback));
2493 
2494 	EVUTIL_ASSERT(!(ev->ev_flags & ~EVLIST_ALL));
2495 
2496 	if (ev->ev_flags & EVLIST_FINALIZING) {
2497 		/* XXXX debug */
2498 		return (-1);
2499 	}
2500 
2501 	/*
2502 	 * prepare for timeout insertion further below, if we get a
2503 	 * failure on any step, we should not change any state.
2504 	 */
2505 	if (tv != NULL && !(ev->ev_flags & EVLIST_TIMEOUT)) {
2506 		if (min_heap_reserve_(&base->timeheap,
2507 			1 + min_heap_size_(&base->timeheap)) == -1)
2508 			return (-1);  /* ENOMEM == errno */
2509 	}
2510 
2511 	/* If the main thread is currently executing a signal event's
2512 	 * callback, and we are not the main thread, then we want to wait
2513 	 * until the callback is done before we mess with the event, or else
2514 	 * we can race on ev_ncalls and ev_pncalls below. */
2515 #ifndef EVENT__DISABLE_THREAD_SUPPORT
2516 	if (base->current_event == event_to_event_callback(ev) &&
2517 	    (ev->ev_events & EV_SIGNAL)
2518 	    && !EVBASE_IN_THREAD(base)) {
2519 		++base->current_event_waiters;
2520 		EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
2521 	}
2522 #endif
2523 
2524 	if ((ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED|EV_SIGNAL)) &&
2525 	    !(ev->ev_flags & (EVLIST_INSERTED|EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))) {
2526 		if (ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED))
2527 			res = evmap_io_add_(base, ev->ev_fd, ev);
2528 		else if (ev->ev_events & EV_SIGNAL)
2529 			res = evmap_signal_add_(base, (int)ev->ev_fd, ev);
2530 		if (res != -1)
2531 			event_queue_insert_inserted(base, ev);
2532 		if (res == 1) {
2533 			/* evmap says we need to notify the main thread. */
2534 			notify = 1;
2535 			res = 0;
2536 		}
2537 	}
2538 
2539 	/*
2540 	 * we should change the timeout state only if the previous event
2541 	 * addition succeeded.
2542 	 */
2543 	if (res != -1 && tv != NULL) {
2544 		struct timeval now;
2545 		int common_timeout;
2546 #ifdef USE_REINSERT_TIMEOUT
2547 		int was_common;
2548 		int old_timeout_idx;
2549 #endif
2550 
2551 		/*
2552 		 * for persistent timeout events, we remember the
2553 		 * timeout value and re-add the event.
2554 		 *
2555 		 * If tv_is_absolute, this was already set.
2556 		 */
2557 		if (ev->ev_closure == EV_CLOSURE_EVENT_PERSIST && !tv_is_absolute)
2558 			ev->ev_io_timeout = *tv;
2559 
2560 #ifndef USE_REINSERT_TIMEOUT
2561 		if (ev->ev_flags & EVLIST_TIMEOUT) {
2562 			event_queue_remove_timeout(base, ev);
2563 		}
2564 #endif
2565 
2566 		/* Check if it is active due to a timeout.  Rescheduling
2567 		 * this timeout before the callback can be executed
2568 		 * removes it from the active list. */
2569 		if ((ev->ev_flags & EVLIST_ACTIVE) &&
2570 		    (ev->ev_res & EV_TIMEOUT)) {
2571 			if (ev->ev_events & EV_SIGNAL) {
2572 				/* See if we are just active executing
2573 				 * this event in a loop
2574 				 */
2575 				if (ev->ev_ncalls && ev->ev_pncalls) {
2576 					/* Abort loop */
2577 					*ev->ev_pncalls = 0;
2578 				}
2579 			}
2580 
2581 			event_queue_remove_active(base, event_to_event_callback(ev));
2582 		}
2583 
2584 		gettime(base, &now);
2585 
2586 		common_timeout = is_common_timeout(tv, base);
2587 #ifdef USE_REINSERT_TIMEOUT
2588 		was_common = is_common_timeout(&ev->ev_timeout, base);
2589 		old_timeout_idx = COMMON_TIMEOUT_IDX(&ev->ev_timeout);
2590 #endif
2591 
2592 		if (tv_is_absolute) {
2593 			ev->ev_timeout = *tv;
2594 		} else if (common_timeout) {
2595 			struct timeval tmp = *tv;
2596 			tmp.tv_usec &= MICROSECONDS_MASK;
2597 			evutil_timeradd(&now, &tmp, &ev->ev_timeout);
2598 			ev->ev_timeout.tv_usec |=
2599 			    (tv->tv_usec & ~MICROSECONDS_MASK);
2600 		} else {
2601 			evutil_timeradd(&now, tv, &ev->ev_timeout);
2602 		}
2603 
2604 		event_debug((
2605 			 "event_add: event %p, timeout in %d seconds %d useconds, call %p",
2606 			 ev, (int)tv->tv_sec, (int)tv->tv_usec, ev->ev_callback));
2607 
2608 #ifdef USE_REINSERT_TIMEOUT
2609 		event_queue_reinsert_timeout(base, ev, was_common, common_timeout, old_timeout_idx);
2610 #else
2611 		event_queue_insert_timeout(base, ev);
2612 #endif
2613 
2614 		if (common_timeout) {
2615 			struct common_timeout_list *ctl =
2616 			    get_common_timeout_list(base, &ev->ev_timeout);
2617 			if (ev == TAILQ_FIRST(&ctl->events)) {
2618 				common_timeout_schedule(ctl, &now, ev);
2619 			}
2620 		} else {
2621 			struct event* top = NULL;
2622 			/* See if the earliest timeout is now earlier than it
2623 			 * was before: if so, we will need to tell the main
2624 			 * thread to wake up earlier than it would otherwise.
2625 			 * We double check the timeout of the top element to
2626 			 * handle time distortions due to system suspension.
2627 			 */
2628 			if (min_heap_elt_is_top_(ev))
2629 				notify = 1;
2630 			else if ((top = min_heap_top_(&base->timeheap)) != NULL &&
2631 					 evutil_timercmp(&top->ev_timeout, &now, <))
2632 				notify = 1;
2633 		}
2634 	}
2635 
2636 	/* if we are not in the right thread, we need to wake up the loop */
2637 	if (res != -1 && notify && EVBASE_NEED_NOTIFY(base))
2638 		evthread_notify_base(base);
2639 
2640 	event_debug_note_add_(ev);
2641 
2642 	return (res);
2643 }
2644 
2645 static int
2646 event_del_(struct event *ev, int blocking)
2647 {
2648 	int res;
2649 
2650 	if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
2651 		event_warnx("%s: event has no event_base set.", __func__);
2652 		return -1;
2653 	}
2654 
2655 	EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2656 
2657 	res = event_del_nolock_(ev, blocking);
2658 
2659 	EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2660 
2661 	return (res);
2662 }
2663 
2664 int
2665 event_del(struct event *ev)
2666 {
2667 	return event_del_(ev, EVENT_DEL_AUTOBLOCK);
2668 }
2669 
2670 int
2671 event_del_block(struct event *ev)
2672 {
2673 	return event_del_(ev, EVENT_DEL_BLOCK);
2674 }
2675 
2676 int
2677 event_del_noblock(struct event *ev)
2678 {
2679 	return event_del_(ev, EVENT_DEL_NOBLOCK);
2680 }
2681 
2682 /** Helper for event_del: always called with th_base_lock held.
2683  *
2684  * "blocking" must be one of the EVENT_DEL_{BLOCK, NOBLOCK, AUTOBLOCK,
2685  * EVEN_IF_FINALIZING} values. See those for more information.
2686  */
2687 int
2688 event_del_nolock_(struct event *ev, int blocking)
2689 {
2690 	struct event_base *base;
2691 	int res = 0, notify = 0;
2692 
2693 	event_debug(("event_del: %p (fd "EV_SOCK_FMT"), callback %p",
2694 		ev, EV_SOCK_ARG(ev->ev_fd), ev->ev_callback));
2695 
2696 	/* An event without a base has not been added */
2697 	if (ev->ev_base == NULL)
2698 		return (-1);
2699 
2700 	EVENT_BASE_ASSERT_LOCKED(ev->ev_base);
2701 
2702 	if (blocking != EVENT_DEL_EVEN_IF_FINALIZING) {
2703 		if (ev->ev_flags & EVLIST_FINALIZING) {
2704 			/* XXXX Debug */
2705 			return 0;
2706 		}
2707 	}
2708 
2709 	/* If the main thread is currently executing this event's callback,
2710 	 * and we are not the main thread, then we want to wait until the
2711 	 * callback is done before we start removing the event.  That way,
2712 	 * when this function returns, it will be safe to free the
2713 	 * user-supplied argument. */
2714 	base = ev->ev_base;
2715 #ifndef EVENT__DISABLE_THREAD_SUPPORT
2716 	if (blocking != EVENT_DEL_NOBLOCK &&
2717 	    base->current_event == event_to_event_callback(ev) &&
2718 	    !EVBASE_IN_THREAD(base) &&
2719 	    (blocking == EVENT_DEL_BLOCK || !(ev->ev_events & EV_FINALIZE))) {
2720 		++base->current_event_waiters;
2721 		EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
2722 	}
2723 #endif
2724 
2725 	EVUTIL_ASSERT(!(ev->ev_flags & ~EVLIST_ALL));
2726 
2727 	/* See if we are just active executing this event in a loop */
2728 	if (ev->ev_events & EV_SIGNAL) {
2729 		if (ev->ev_ncalls && ev->ev_pncalls) {
2730 			/* Abort loop */
2731 			*ev->ev_pncalls = 0;
2732 		}
2733 	}
2734 
2735 	if (ev->ev_flags & EVLIST_TIMEOUT) {
2736 		/* NOTE: We never need to notify the main thread because of a
2737 		 * deleted timeout event: all that could happen if we don't is
2738 		 * that the dispatch loop might wake up too early.  But the
2739 		 * point of notifying the main thread _is_ to wake up the
2740 		 * dispatch loop early anyway, so we wouldn't gain anything by
2741 		 * doing it.
2742 		 */
2743 		event_queue_remove_timeout(base, ev);
2744 	}
2745 
2746 	if (ev->ev_flags & EVLIST_ACTIVE)
2747 		event_queue_remove_active(base, event_to_event_callback(ev));
2748 	else if (ev->ev_flags & EVLIST_ACTIVE_LATER)
2749 		event_queue_remove_active_later(base, event_to_event_callback(ev));
2750 
2751 	if (ev->ev_flags & EVLIST_INSERTED) {
2752 		event_queue_remove_inserted(base, ev);
2753 		if (ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED))
2754 			res = evmap_io_del_(base, ev->ev_fd, ev);
2755 		else
2756 			res = evmap_signal_del_(base, (int)ev->ev_fd, ev);
2757 		if (res == 1) {
2758 			/* evmap says we need to notify the main thread. */
2759 			notify = 1;
2760 			res = 0;
2761 		}
2762 	}
2763 
2764 	/* if we are not in the right thread, we need to wake up the loop */
2765 	if (res != -1 && notify && EVBASE_NEED_NOTIFY(base))
2766 		evthread_notify_base(base);
2767 
2768 	event_debug_note_del_(ev);
2769 
2770 	return (res);
2771 }
2772 
2773 void
2774 event_active(struct event *ev, int res, short ncalls)
2775 {
2776 	if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
2777 		event_warnx("%s: event has no event_base set.", __func__);
2778 		return;
2779 	}
2780 
2781 	EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2782 
2783 	event_debug_assert_is_setup_(ev);
2784 
2785 	event_active_nolock_(ev, res, ncalls);
2786 
2787 	EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2788 }
2789 
2790 
2791 void
2792 event_active_nolock_(struct event *ev, int res, short ncalls)
2793 {
2794 	struct event_base *base;
2795 
2796 	event_debug(("event_active: %p (fd "EV_SOCK_FMT"), res %d, callback %p",
2797 		ev, EV_SOCK_ARG(ev->ev_fd), (int)res, ev->ev_callback));
2798 
2799 	base = ev->ev_base;
2800 	EVENT_BASE_ASSERT_LOCKED(base);
2801 
2802 	if (ev->ev_flags & EVLIST_FINALIZING) {
2803 		/* XXXX debug */
2804 		return;
2805 	}
2806 
2807 	switch ((ev->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))) {
2808 	default:
2809 	case EVLIST_ACTIVE|EVLIST_ACTIVE_LATER:
2810 		EVUTIL_ASSERT(0);
2811 		break;
2812 	case EVLIST_ACTIVE:
2813 		/* We get different kinds of events, add them together */
2814 		ev->ev_res |= res;
2815 		return;
2816 	case EVLIST_ACTIVE_LATER:
2817 		ev->ev_res |= res;
2818 		break;
2819 	case 0:
2820 		ev->ev_res = res;
2821 		break;
2822 	}
2823 
2824 	if (ev->ev_pri < base->event_running_priority)
2825 		base->event_continue = 1;
2826 
2827 	if (ev->ev_events & EV_SIGNAL) {
2828 #ifndef EVENT__DISABLE_THREAD_SUPPORT
2829 		if (base->current_event == event_to_event_callback(ev) &&
2830 		    !EVBASE_IN_THREAD(base)) {
2831 			++base->current_event_waiters;
2832 			EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
2833 		}
2834 #endif
2835 		ev->ev_ncalls = ncalls;
2836 		ev->ev_pncalls = NULL;
2837 	}
2838 
2839 	event_callback_activate_nolock_(base, event_to_event_callback(ev));
2840 }
2841 
2842 void
2843 event_active_later_(struct event *ev, int res)
2844 {
2845 	EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2846 	event_active_later_nolock_(ev, res);
2847 	EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2848 }
2849 
2850 void
2851 event_active_later_nolock_(struct event *ev, int res)
2852 {
2853 	struct event_base *base = ev->ev_base;
2854 	EVENT_BASE_ASSERT_LOCKED(base);
2855 
2856 	if (ev->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) {
2857 		/* We get different kinds of events, add them together */
2858 		ev->ev_res |= res;
2859 		return;
2860 	}
2861 
2862 	ev->ev_res = res;
2863 
2864 	event_callback_activate_later_nolock_(base, event_to_event_callback(ev));
2865 }
2866 
2867 int
2868 event_callback_activate_(struct event_base *base,
2869     struct event_callback *evcb)
2870 {
2871 	int r;
2872 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2873 	r = event_callback_activate_nolock_(base, evcb);
2874 	EVBASE_RELEASE_LOCK(base, th_base_lock);
2875 	return r;
2876 }
2877 
2878 int
2879 event_callback_activate_nolock_(struct event_base *base,
2880     struct event_callback *evcb)
2881 {
2882 	int r = 1;
2883 
2884 	if (evcb->evcb_flags & EVLIST_FINALIZING)
2885 		return 0;
2886 
2887 	switch (evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) {
2888 	default:
2889 		EVUTIL_ASSERT(0);
2890 	case EVLIST_ACTIVE_LATER:
2891 		event_queue_remove_active_later(base, evcb);
2892 		r = 0;
2893 		break;
2894 	case EVLIST_ACTIVE:
2895 		return 0;
2896 	case 0:
2897 		break;
2898 	}
2899 
2900 	event_queue_insert_active(base, evcb);
2901 
2902 	if (EVBASE_NEED_NOTIFY(base))
2903 		evthread_notify_base(base);
2904 
2905 	return r;
2906 }
2907 
2908 void
2909 event_callback_activate_later_nolock_(struct event_base *base,
2910     struct event_callback *evcb)
2911 {
2912 	if (evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))
2913 		return;
2914 
2915 	event_queue_insert_active_later(base, evcb);
2916 	if (EVBASE_NEED_NOTIFY(base))
2917 		evthread_notify_base(base);
2918 }
2919 
2920 void
2921 event_callback_init_(struct event_base *base,
2922     struct event_callback *cb)
2923 {
2924 	memset(cb, 0, sizeof(*cb));
2925 	cb->evcb_pri = base->nactivequeues - 1;
2926 }
2927 
2928 int
2929 event_callback_cancel_(struct event_base *base,
2930     struct event_callback *evcb)
2931 {
2932 	int r;
2933 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2934 	r = event_callback_cancel_nolock_(base, evcb, 0);
2935 	EVBASE_RELEASE_LOCK(base, th_base_lock);
2936 	return r;
2937 }
2938 
2939 int
2940 event_callback_cancel_nolock_(struct event_base *base,
2941     struct event_callback *evcb, int even_if_finalizing)
2942 {
2943 	if ((evcb->evcb_flags & EVLIST_FINALIZING) && !even_if_finalizing)
2944 		return 0;
2945 
2946 	if (evcb->evcb_flags & EVLIST_INIT)
2947 		return event_del_nolock_(event_callback_to_event(evcb),
2948 		    even_if_finalizing ? EVENT_DEL_EVEN_IF_FINALIZING : EVENT_DEL_AUTOBLOCK);
2949 
2950 	switch ((evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))) {
2951 	default:
2952 	case EVLIST_ACTIVE|EVLIST_ACTIVE_LATER:
2953 		EVUTIL_ASSERT(0);
2954 		break;
2955 	case EVLIST_ACTIVE:
2956 		/* We get different kinds of events, add them together */
2957 		event_queue_remove_active(base, evcb);
2958 		return 0;
2959 	case EVLIST_ACTIVE_LATER:
2960 		event_queue_remove_active_later(base, evcb);
2961 		break;
2962 	case 0:
2963 		break;
2964 	}
2965 
2966 	return 0;
2967 }
2968 
2969 void
2970 event_deferred_cb_init_(struct event_callback *cb, ev_uint8_t priority, deferred_cb_fn fn, void *arg)
2971 {
2972 	memset(cb, 0, sizeof(*cb));
2973 	cb->evcb_cb_union.evcb_selfcb = fn;
2974 	cb->evcb_arg = arg;
2975 	cb->evcb_pri = priority;
2976 	cb->evcb_closure = EV_CLOSURE_CB_SELF;
2977 }
2978 
2979 void
2980 event_deferred_cb_set_priority_(struct event_callback *cb, ev_uint8_t priority)
2981 {
2982 	cb->evcb_pri = priority;
2983 }
2984 
2985 void
2986 event_deferred_cb_cancel_(struct event_base *base, struct event_callback *cb)
2987 {
2988 	if (!base)
2989 		base = current_base;
2990 	event_callback_cancel_(base, cb);
2991 }
2992 
2993 #define MAX_DEFERREDS_QUEUED 32
2994 int
2995 event_deferred_cb_schedule_(struct event_base *base, struct event_callback *cb)
2996 {
2997 	int r = 1;
2998 	if (!base)
2999 		base = current_base;
3000 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3001 	if (base->n_deferreds_queued > MAX_DEFERREDS_QUEUED) {
3002 		event_callback_activate_later_nolock_(base, cb);
3003 	} else {
3004 		++base->n_deferreds_queued;
3005 		r = event_callback_activate_nolock_(base, cb);
3006 	}
3007 	EVBASE_RELEASE_LOCK(base, th_base_lock);
3008 	return r;
3009 }
3010 
3011 static int
3012 timeout_next(struct event_base *base, struct timeval **tv_p)
3013 {
3014 	/* Caller must hold th_base_lock */
3015 	struct timeval now;
3016 	struct event *ev;
3017 	struct timeval *tv = *tv_p;
3018 	int res = 0;
3019 
3020 	ev = min_heap_top_(&base->timeheap);
3021 
3022 	if (ev == NULL) {
3023 		/* if no time-based events are active wait for I/O */
3024 		*tv_p = NULL;
3025 		goto out;
3026 	}
3027 
3028 	if (gettime(base, &now) == -1) {
3029 		res = -1;
3030 		goto out;
3031 	}
3032 
3033 	if (evutil_timercmp(&ev->ev_timeout, &now, <=)) {
3034 		evutil_timerclear(tv);
3035 		goto out;
3036 	}
3037 
3038 	evutil_timersub(&ev->ev_timeout, &now, tv);
3039 
3040 	EVUTIL_ASSERT(tv->tv_sec >= 0);
3041 	EVUTIL_ASSERT(tv->tv_usec >= 0);
3042 	event_debug(("timeout_next: event: %p, in %d seconds, %d useconds", ev, (int)tv->tv_sec, (int)tv->tv_usec));
3043 
3044 out:
3045 	return (res);
3046 }
3047 
3048 /* Activate every event whose timeout has elapsed. */
3049 static void
3050 timeout_process(struct event_base *base)
3051 {
3052 	/* Caller must hold lock. */
3053 	struct timeval now;
3054 	struct event *ev;
3055 
3056 	if (min_heap_empty_(&base->timeheap)) {
3057 		return;
3058 	}
3059 
3060 	gettime(base, &now);
3061 
3062 	while ((ev = min_heap_top_(&base->timeheap))) {
3063 		if (evutil_timercmp(&ev->ev_timeout, &now, >))
3064 			break;
3065 
3066 		/* delete this event from the I/O queues */
3067 		event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
3068 
3069 		event_debug(("timeout_process: event: %p, call %p",
3070 			 ev, ev->ev_callback));
3071 		event_active_nolock_(ev, EV_TIMEOUT, 1);
3072 	}
3073 }
3074 
3075 #if (EVLIST_INTERNAL >> 4) != 1
3076 #error "Mismatch for value of EVLIST_INTERNAL"
3077 #endif
3078 
3079 #ifndef MAX
3080 #define MAX(a,b) (((a)>(b))?(a):(b))
3081 #endif
3082 
3083 #define MAX_EVENT_COUNT(var, v) var = MAX(var, v)
3084 
3085 /* These are a fancy way to spell
3086      if (flags & EVLIST_INTERNAL)
3087          base->event_count--/++;
3088 */
3089 #define DECR_EVENT_COUNT(base,flags) \
3090 	((base)->event_count -= (~((flags) >> 4) & 1))
3091 #define INCR_EVENT_COUNT(base,flags) do {					\
3092 	((base)->event_count += (~((flags) >> 4) & 1));				\
3093 	MAX_EVENT_COUNT((base)->event_count_max, (base)->event_count);		\
3094 } while (0)
3095 
3096 static void
3097 event_queue_remove_inserted(struct event_base *base, struct event *ev)
3098 {
3099 	EVENT_BASE_ASSERT_LOCKED(base);
3100 	if (EVUTIL_FAILURE_CHECK(!(ev->ev_flags & EVLIST_INSERTED))) {
3101 		event_errx(1, "%s: %p(fd "EV_SOCK_FMT") not on queue %x", __func__,
3102 		    ev, EV_SOCK_ARG(ev->ev_fd), EVLIST_INSERTED);
3103 		return;
3104 	}
3105 	DECR_EVENT_COUNT(base, ev->ev_flags);
3106 	ev->ev_flags &= ~EVLIST_INSERTED;
3107 }
3108 static void
3109 event_queue_remove_active(struct event_base *base, struct event_callback *evcb)
3110 {
3111 	EVENT_BASE_ASSERT_LOCKED(base);
3112 	if (EVUTIL_FAILURE_CHECK(!(evcb->evcb_flags & EVLIST_ACTIVE))) {
3113 		event_errx(1, "%s: %p not on queue %x", __func__,
3114 			   evcb, EVLIST_ACTIVE);
3115 		return;
3116 	}
3117 	DECR_EVENT_COUNT(base, evcb->evcb_flags);
3118 	evcb->evcb_flags &= ~EVLIST_ACTIVE;
3119 	base->event_count_active--;
3120 
3121 	TAILQ_REMOVE(&base->activequeues[evcb->evcb_pri],
3122 	    evcb, evcb_active_next);
3123 }
3124 static void
3125 event_queue_remove_active_later(struct event_base *base, struct event_callback *evcb)
3126 {
3127 	EVENT_BASE_ASSERT_LOCKED(base);
3128 	if (EVUTIL_FAILURE_CHECK(!(evcb->evcb_flags & EVLIST_ACTIVE_LATER))) {
3129 		event_errx(1, "%s: %p not on queue %x", __func__,
3130 			   evcb, EVLIST_ACTIVE_LATER);
3131 		return;
3132 	}
3133 	DECR_EVENT_COUNT(base, evcb->evcb_flags);
3134 	evcb->evcb_flags &= ~EVLIST_ACTIVE_LATER;
3135 	base->event_count_active--;
3136 
3137 	TAILQ_REMOVE(&base->active_later_queue, evcb, evcb_active_next);
3138 }
3139 static void
3140 event_queue_remove_timeout(struct event_base *base, struct event *ev)
3141 {
3142 	EVENT_BASE_ASSERT_LOCKED(base);
3143 	if (EVUTIL_FAILURE_CHECK(!(ev->ev_flags & EVLIST_TIMEOUT))) {
3144 		event_errx(1, "%s: %p(fd "EV_SOCK_FMT") not on queue %x", __func__,
3145 		    ev, EV_SOCK_ARG(ev->ev_fd), EVLIST_TIMEOUT);
3146 		return;
3147 	}
3148 	DECR_EVENT_COUNT(base, ev->ev_flags);
3149 	ev->ev_flags &= ~EVLIST_TIMEOUT;
3150 
3151 	if (is_common_timeout(&ev->ev_timeout, base)) {
3152 		struct common_timeout_list *ctl =
3153 		    get_common_timeout_list(base, &ev->ev_timeout);
3154 		TAILQ_REMOVE(&ctl->events, ev,
3155 		    ev_timeout_pos.ev_next_with_common_timeout);
3156 	} else {
3157 		min_heap_erase_(&base->timeheap, ev);
3158 	}
3159 }
3160 
3161 #ifdef USE_REINSERT_TIMEOUT
3162 /* Remove and reinsert 'ev' into the timeout queue. */
3163 static void
3164 event_queue_reinsert_timeout(struct event_base *base, struct event *ev,
3165     int was_common, int is_common, int old_timeout_idx)
3166 {
3167 	struct common_timeout_list *ctl;
3168 	if (!(ev->ev_flags & EVLIST_TIMEOUT)) {
3169 		event_queue_insert_timeout(base, ev);
3170 		return;
3171 	}
3172 
3173 	switch ((was_common<<1) | is_common) {
3174 	case 3: /* Changing from one common timeout to another */
3175 		ctl = base->common_timeout_queues[old_timeout_idx];
3176 		TAILQ_REMOVE(&ctl->events, ev,
3177 		    ev_timeout_pos.ev_next_with_common_timeout);
3178 		ctl = get_common_timeout_list(base, &ev->ev_timeout);
3179 		insert_common_timeout_inorder(ctl, ev);
3180 		break;
3181 	case 2: /* Was common; is no longer common */
3182 		ctl = base->common_timeout_queues[old_timeout_idx];
3183 		TAILQ_REMOVE(&ctl->events, ev,
3184 		    ev_timeout_pos.ev_next_with_common_timeout);
3185 		min_heap_push_(&base->timeheap, ev);
3186 		break;
3187 	case 1: /* Wasn't common; has become common. */
3188 		min_heap_erase_(&base->timeheap, ev);
3189 		ctl = get_common_timeout_list(base, &ev->ev_timeout);
3190 		insert_common_timeout_inorder(ctl, ev);
3191 		break;
3192 	case 0: /* was in heap; is still on heap. */
3193 		min_heap_adjust_(&base->timeheap, ev);
3194 		break;
3195 	default:
3196 		EVUTIL_ASSERT(0); /* unreachable */
3197 		break;
3198 	}
3199 }
3200 #endif
3201 
3202 /* Add 'ev' to the common timeout list in 'ev'. */
3203 static void
3204 insert_common_timeout_inorder(struct common_timeout_list *ctl,
3205     struct event *ev)
3206 {
3207 	struct event *e;
3208 	/* By all logic, we should just be able to append 'ev' to the end of
3209 	 * ctl->events, since the timeout on each 'ev' is set to {the common
3210 	 * timeout} + {the time when we add the event}, and so the events
3211 	 * should arrive in order of their timeeouts.  But just in case
3212 	 * there's some wacky threading issue going on, we do a search from
3213 	 * the end of 'ev' to find the right insertion point.
3214 	 */
3215 	TAILQ_FOREACH_REVERSE(e, &ctl->events,
3216 	    event_list, ev_timeout_pos.ev_next_with_common_timeout) {
3217 		/* This timercmp is a little sneaky, since both ev and e have
3218 		 * magic values in tv_usec.  Fortunately, they ought to have
3219 		 * the _same_ magic values in tv_usec.  Let's assert for that.
3220 		 */
3221 		EVUTIL_ASSERT(
3222 			is_same_common_timeout(&e->ev_timeout, &ev->ev_timeout));
3223 		if (evutil_timercmp(&ev->ev_timeout, &e->ev_timeout, >=)) {
3224 			TAILQ_INSERT_AFTER(&ctl->events, e, ev,
3225 			    ev_timeout_pos.ev_next_with_common_timeout);
3226 			return;
3227 		}
3228 	}
3229 	TAILQ_INSERT_HEAD(&ctl->events, ev,
3230 	    ev_timeout_pos.ev_next_with_common_timeout);
3231 }
3232 
3233 static void
3234 event_queue_insert_inserted(struct event_base *base, struct event *ev)
3235 {
3236 	EVENT_BASE_ASSERT_LOCKED(base);
3237 
3238 	if (EVUTIL_FAILURE_CHECK(ev->ev_flags & EVLIST_INSERTED)) {
3239 		event_errx(1, "%s: %p(fd "EV_SOCK_FMT") already inserted", __func__,
3240 		    ev, EV_SOCK_ARG(ev->ev_fd));
3241 		return;
3242 	}
3243 
3244 	INCR_EVENT_COUNT(base, ev->ev_flags);
3245 
3246 	ev->ev_flags |= EVLIST_INSERTED;
3247 }
3248 
3249 static void
3250 event_queue_insert_active(struct event_base *base, struct event_callback *evcb)
3251 {
3252 	EVENT_BASE_ASSERT_LOCKED(base);
3253 
3254 	if (evcb->evcb_flags & EVLIST_ACTIVE) {
3255 		/* Double insertion is possible for active events */
3256 		return;
3257 	}
3258 
3259 	INCR_EVENT_COUNT(base, evcb->evcb_flags);
3260 
3261 	evcb->evcb_flags |= EVLIST_ACTIVE;
3262 
3263 	base->event_count_active++;
3264 	MAX_EVENT_COUNT(base->event_count_active_max, base->event_count_active);
3265 	EVUTIL_ASSERT(evcb->evcb_pri < base->nactivequeues);
3266 	TAILQ_INSERT_TAIL(&base->activequeues[evcb->evcb_pri],
3267 	    evcb, evcb_active_next);
3268 }
3269 
3270 static void
3271 event_queue_insert_active_later(struct event_base *base, struct event_callback *evcb)
3272 {
3273 	EVENT_BASE_ASSERT_LOCKED(base);
3274 	if (evcb->evcb_flags & (EVLIST_ACTIVE_LATER|EVLIST_ACTIVE)) {
3275 		/* Double insertion is possible */
3276 		return;
3277 	}
3278 
3279 	INCR_EVENT_COUNT(base, evcb->evcb_flags);
3280 	evcb->evcb_flags |= EVLIST_ACTIVE_LATER;
3281 	base->event_count_active++;
3282 	MAX_EVENT_COUNT(base->event_count_active_max, base->event_count_active);
3283 	EVUTIL_ASSERT(evcb->evcb_pri < base->nactivequeues);
3284 	TAILQ_INSERT_TAIL(&base->active_later_queue, evcb, evcb_active_next);
3285 }
3286 
3287 static void
3288 event_queue_insert_timeout(struct event_base *base, struct event *ev)
3289 {
3290 	EVENT_BASE_ASSERT_LOCKED(base);
3291 
3292 	if (EVUTIL_FAILURE_CHECK(ev->ev_flags & EVLIST_TIMEOUT)) {
3293 		event_errx(1, "%s: %p(fd "EV_SOCK_FMT") already on timeout", __func__,
3294 		    ev, EV_SOCK_ARG(ev->ev_fd));
3295 		return;
3296 	}
3297 
3298 	INCR_EVENT_COUNT(base, ev->ev_flags);
3299 
3300 	ev->ev_flags |= EVLIST_TIMEOUT;
3301 
3302 	if (is_common_timeout(&ev->ev_timeout, base)) {
3303 		struct common_timeout_list *ctl =
3304 		    get_common_timeout_list(base, &ev->ev_timeout);
3305 		insert_common_timeout_inorder(ctl, ev);
3306 	} else {
3307 		min_heap_push_(&base->timeheap, ev);
3308 	}
3309 }
3310 
3311 static void
3312 event_queue_make_later_events_active(struct event_base *base)
3313 {
3314 	struct event_callback *evcb;
3315 	EVENT_BASE_ASSERT_LOCKED(base);
3316 
3317 	while ((evcb = TAILQ_FIRST(&base->active_later_queue))) {
3318 		TAILQ_REMOVE(&base->active_later_queue, evcb, evcb_active_next);
3319 		evcb->evcb_flags = (evcb->evcb_flags & ~EVLIST_ACTIVE_LATER) | EVLIST_ACTIVE;
3320 		EVUTIL_ASSERT(evcb->evcb_pri < base->nactivequeues);
3321 		TAILQ_INSERT_TAIL(&base->activequeues[evcb->evcb_pri], evcb, evcb_active_next);
3322 		base->n_deferreds_queued += (evcb->evcb_closure == EV_CLOSURE_CB_SELF);
3323 	}
3324 }
3325 
3326 /* Functions for debugging */
3327 
3328 const char *
3329 event_get_version(void)
3330 {
3331 	return (EVENT__VERSION);
3332 }
3333 
3334 ev_uint32_t
3335 event_get_version_number(void)
3336 {
3337 	return (EVENT__NUMERIC_VERSION);
3338 }
3339 
3340 /*
3341  * No thread-safe interface needed - the information should be the same
3342  * for all threads.
3343  */
3344 
3345 const char *
3346 event_get_method(void)
3347 {
3348 	return (current_base->evsel->name);
3349 }
3350 
3351 #ifndef EVENT__DISABLE_MM_REPLACEMENT
3352 static void *(*mm_malloc_fn_)(size_t sz) = NULL;
3353 static void *(*mm_realloc_fn_)(void *p, size_t sz) = NULL;
3354 static void (*mm_free_fn_)(void *p) = NULL;
3355 
3356 void *
3357 event_mm_malloc_(size_t sz)
3358 {
3359 	if (sz == 0)
3360 		return NULL;
3361 
3362 	if (mm_malloc_fn_)
3363 		return mm_malloc_fn_(sz);
3364 	else
3365 		return malloc(sz);
3366 }
3367 
3368 void *
3369 event_mm_calloc_(size_t count, size_t size)
3370 {
3371 	if (count == 0 || size == 0)
3372 		return NULL;
3373 
3374 	if (mm_malloc_fn_) {
3375 		size_t sz = count * size;
3376 		void *p = NULL;
3377 		if (count > EV_SIZE_MAX / size)
3378 			goto error;
3379 		p = mm_malloc_fn_(sz);
3380 		if (p)
3381 			return memset(p, 0, sz);
3382 	} else {
3383 		void *p = calloc(count, size);
3384 #ifdef _WIN32
3385 		/* Windows calloc doesn't reliably set ENOMEM */
3386 		if (p == NULL)
3387 			goto error;
3388 #endif
3389 		return p;
3390 	}
3391 
3392 error:
3393 	errno = ENOMEM;
3394 	return NULL;
3395 }
3396 
3397 char *
3398 event_mm_strdup_(const char *str)
3399 {
3400 	if (!str) {
3401 		errno = EINVAL;
3402 		return NULL;
3403 	}
3404 
3405 	if (mm_malloc_fn_) {
3406 		size_t ln = strlen(str);
3407 		void *p = NULL;
3408 		if (ln == EV_SIZE_MAX)
3409 			goto error;
3410 		p = mm_malloc_fn_(ln+1);
3411 		if (p)
3412 			return memcpy(p, str, ln+1);
3413 	} else
3414 #ifdef _WIN32
3415 		return _strdup(str);
3416 #else
3417 		return strdup(str);
3418 #endif
3419 
3420 error:
3421 	errno = ENOMEM;
3422 	return NULL;
3423 }
3424 
3425 void *
3426 event_mm_realloc_(void *ptr, size_t sz)
3427 {
3428 	if (mm_realloc_fn_)
3429 		return mm_realloc_fn_(ptr, sz);
3430 	else
3431 		return realloc(ptr, sz);
3432 }
3433 
3434 void
3435 event_mm_free_(void *ptr)
3436 {
3437 	if (mm_free_fn_)
3438 		mm_free_fn_(ptr);
3439 	else
3440 		free(ptr);
3441 }
3442 
3443 void
3444 event_set_mem_functions(void *(*malloc_fn)(size_t sz),
3445 			void *(*realloc_fn)(void *ptr, size_t sz),
3446 			void (*free_fn)(void *ptr))
3447 {
3448 	mm_malloc_fn_ = malloc_fn;
3449 	mm_realloc_fn_ = realloc_fn;
3450 	mm_free_fn_ = free_fn;
3451 }
3452 #endif
3453 
3454 #ifdef EVENT__HAVE_EVENTFD
3455 static void
3456 evthread_notify_drain_eventfd(evutil_socket_t fd, short what, void *arg)
3457 {
3458 	ev_uint64_t msg;
3459 	ev_ssize_t r;
3460 	struct event_base *base = arg;
3461 
3462 	r = read(fd, (void*) &msg, sizeof(msg));
3463 	if (r<0 && errno != EAGAIN) {
3464 		event_sock_warn(fd, "Error reading from eventfd");
3465 	}
3466 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3467 	base->is_notify_pending = 0;
3468 	EVBASE_RELEASE_LOCK(base, th_base_lock);
3469 }
3470 #endif
3471 
3472 static void
3473 evthread_notify_drain_default(evutil_socket_t fd, short what, void *arg)
3474 {
3475 	unsigned char buf[1024];
3476 	struct event_base *base = arg;
3477 #ifdef _WIN32
3478 	while (recv(fd, (char*)buf, sizeof(buf), 0) > 0)
3479 		;
3480 #else
3481 	while (read(fd, (char*)buf, sizeof(buf)) > 0)
3482 		;
3483 #endif
3484 
3485 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3486 	base->is_notify_pending = 0;
3487 	EVBASE_RELEASE_LOCK(base, th_base_lock);
3488 }
3489 
3490 int
3491 evthread_make_base_notifiable(struct event_base *base)
3492 {
3493 	int r;
3494 	if (!base)
3495 		return -1;
3496 
3497 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3498 	r = evthread_make_base_notifiable_nolock_(base);
3499 	EVBASE_RELEASE_LOCK(base, th_base_lock);
3500 	return r;
3501 }
3502 
3503 static int
3504 evthread_make_base_notifiable_nolock_(struct event_base *base)
3505 {
3506 	void (*cb)(evutil_socket_t, short, void *);
3507 	int (*notify)(struct event_base *);
3508 
3509 	if (base->th_notify_fn != NULL) {
3510 		/* The base is already notifiable: we're doing fine. */
3511 		return 0;
3512 	}
3513 
3514 #if defined(EVENT__HAVE_WORKING_KQUEUE)
3515 	if (base->evsel == &kqops && event_kq_add_notify_event_(base) == 0) {
3516 		base->th_notify_fn = event_kq_notify_base_;
3517 		/* No need to add an event here; the backend can wake
3518 		 * itself up just fine. */
3519 		return 0;
3520 	}
3521 #endif
3522 
3523 #ifdef EVENT__HAVE_EVENTFD
3524 	base->th_notify_fd[0] = evutil_eventfd_(0,
3525 	    EVUTIL_EFD_CLOEXEC|EVUTIL_EFD_NONBLOCK);
3526 	if (base->th_notify_fd[0] >= 0) {
3527 		base->th_notify_fd[1] = -1;
3528 		notify = evthread_notify_base_eventfd;
3529 		cb = evthread_notify_drain_eventfd;
3530 	} else
3531 #endif
3532 	if (evutil_make_internal_pipe_(base->th_notify_fd) == 0) {
3533 		notify = evthread_notify_base_default;
3534 		cb = evthread_notify_drain_default;
3535 	} else {
3536 		return -1;
3537 	}
3538 
3539 	base->th_notify_fn = notify;
3540 
3541 	/* prepare an event that we can use for wakeup */
3542 	event_assign(&base->th_notify, base, base->th_notify_fd[0],
3543 				 EV_READ|EV_PERSIST, cb, base);
3544 
3545 	/* we need to mark this as internal event */
3546 	base->th_notify.ev_flags |= EVLIST_INTERNAL;
3547 	event_priority_set(&base->th_notify, 0);
3548 
3549 	return event_add_nolock_(&base->th_notify, NULL, 0);
3550 }
3551 
3552 int
3553 event_base_foreach_event_nolock_(struct event_base *base,
3554     event_base_foreach_event_cb fn, void *arg)
3555 {
3556 	int r, i;
3557 	unsigned u;
3558 	struct event *ev;
3559 
3560 	/* Start out with all the EVLIST_INSERTED events. */
3561 	if ((r = evmap_foreach_event_(base, fn, arg)))
3562 		return r;
3563 
3564 	/* Okay, now we deal with those events that have timeouts and are in
3565 	 * the min-heap. */
3566 	for (u = 0; u < base->timeheap.n; ++u) {
3567 		ev = base->timeheap.p[u];
3568 		if (ev->ev_flags & EVLIST_INSERTED) {
3569 			/* we already processed this one */
3570 			continue;
3571 		}
3572 		if ((r = fn(base, ev, arg)))
3573 			return r;
3574 	}
3575 
3576 	/* Now for the events in one of the timeout queues.
3577 	 * the min-heap. */
3578 	for (i = 0; i < base->n_common_timeouts; ++i) {
3579 		struct common_timeout_list *ctl =
3580 		    base->common_timeout_queues[i];
3581 		TAILQ_FOREACH(ev, &ctl->events,
3582 		    ev_timeout_pos.ev_next_with_common_timeout) {
3583 			if (ev->ev_flags & EVLIST_INSERTED) {
3584 				/* we already processed this one */
3585 				continue;
3586 			}
3587 			if ((r = fn(base, ev, arg)))
3588 				return r;
3589 		}
3590 	}
3591 
3592 	/* Finally, we deal wit all the active events that we haven't touched
3593 	 * yet. */
3594 	for (i = 0; i < base->nactivequeues; ++i) {
3595 		struct event_callback *evcb;
3596 		TAILQ_FOREACH(evcb, &base->activequeues[i], evcb_active_next) {
3597 			if ((evcb->evcb_flags & (EVLIST_INIT|EVLIST_INSERTED|EVLIST_TIMEOUT)) != EVLIST_INIT) {
3598 				/* This isn't an event (evlist_init clear), or
3599 				 * we already processed it. (inserted or
3600 				 * timeout set */
3601 				continue;
3602 			}
3603 			ev = event_callback_to_event(evcb);
3604 			if ((r = fn(base, ev, arg)))
3605 				return r;
3606 		}
3607 	}
3608 
3609 	return 0;
3610 }
3611 
3612 /* Helper for event_base_dump_events: called on each event in the event base;
3613  * dumps only the inserted events. */
3614 static int
3615 dump_inserted_event_fn(const struct event_base *base, const struct event *e, void *arg)
3616 {
3617 	FILE *output = arg;
3618 	const char *gloss = (e->ev_events & EV_SIGNAL) ?
3619 	    "sig" : "fd ";
3620 
3621 	if (! (e->ev_flags & (EVLIST_INSERTED|EVLIST_TIMEOUT)))
3622 		return 0;
3623 
3624 	fprintf(output, "  %p [%s "EV_SOCK_FMT"]%s%s%s%s%s%s",
3625 	    (void*)e, gloss, EV_SOCK_ARG(e->ev_fd),
3626 	    (e->ev_events&EV_READ)?" Read":"",
3627 	    (e->ev_events&EV_WRITE)?" Write":"",
3628 	    (e->ev_events&EV_CLOSED)?" EOF":"",
3629 	    (e->ev_events&EV_SIGNAL)?" Signal":"",
3630 	    (e->ev_events&EV_PERSIST)?" Persist":"",
3631 	    (e->ev_flags&EVLIST_INTERNAL)?" Internal":"");
3632 	if (e->ev_flags & EVLIST_TIMEOUT) {
3633 		struct timeval tv;
3634 		tv.tv_sec = e->ev_timeout.tv_sec;
3635 		tv.tv_usec = e->ev_timeout.tv_usec & MICROSECONDS_MASK;
3636 		evutil_timeradd(&tv, &base->tv_clock_diff, &tv);
3637 		fprintf(output, " Timeout=%ld.%06d",
3638 		    (long)tv.tv_sec, (int)(tv.tv_usec & MICROSECONDS_MASK));
3639 	}
3640 	fputc('\n', output);
3641 
3642 	return 0;
3643 }
3644 
3645 /* Helper for event_base_dump_events: called on each event in the event base;
3646  * dumps only the active events. */
3647 static int
3648 dump_active_event_fn(const struct event_base *base, const struct event *e, void *arg)
3649 {
3650 	FILE *output = arg;
3651 	const char *gloss = (e->ev_events & EV_SIGNAL) ?
3652 	    "sig" : "fd ";
3653 
3654 	if (! (e->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)))
3655 		return 0;
3656 
3657 	fprintf(output, "  %p [%s "EV_SOCK_FMT", priority=%d]%s%s%s%s%s active%s%s\n",
3658 	    (void*)e, gloss, EV_SOCK_ARG(e->ev_fd), e->ev_pri,
3659 	    (e->ev_res&EV_READ)?" Read":"",
3660 	    (e->ev_res&EV_WRITE)?" Write":"",
3661 	    (e->ev_res&EV_CLOSED)?" EOF":"",
3662 	    (e->ev_res&EV_SIGNAL)?" Signal":"",
3663 	    (e->ev_res&EV_TIMEOUT)?" Timeout":"",
3664 	    (e->ev_flags&EVLIST_INTERNAL)?" [Internal]":"",
3665 	    (e->ev_flags&EVLIST_ACTIVE_LATER)?" [NextTime]":"");
3666 
3667 	return 0;
3668 }
3669 
3670 int
3671 event_base_foreach_event(struct event_base *base,
3672     event_base_foreach_event_cb fn, void *arg)
3673 {
3674 	int r;
3675 	if ((!fn) || (!base)) {
3676 		return -1;
3677 	}
3678 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3679 	r = event_base_foreach_event_nolock_(base, fn, arg);
3680 	EVBASE_RELEASE_LOCK(base, th_base_lock);
3681 	return r;
3682 }
3683 
3684 
3685 void
3686 event_base_dump_events(struct event_base *base, FILE *output)
3687 {
3688 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3689 	fprintf(output, "Inserted events:\n");
3690 	event_base_foreach_event_nolock_(base, dump_inserted_event_fn, output);
3691 
3692 	fprintf(output, "Active events:\n");
3693 	event_base_foreach_event_nolock_(base, dump_active_event_fn, output);
3694 	EVBASE_RELEASE_LOCK(base, th_base_lock);
3695 }
3696 
3697 void
3698 event_base_active_by_fd(struct event_base *base, evutil_socket_t fd, short events)
3699 {
3700 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3701 	evmap_io_active_(base, fd, events & (EV_READ|EV_WRITE|EV_CLOSED));
3702 	EVBASE_RELEASE_LOCK(base, th_base_lock);
3703 }
3704 
3705 void
3706 event_base_active_by_signal(struct event_base *base, int sig)
3707 {
3708 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3709 	evmap_signal_active_(base, sig, 1);
3710 	EVBASE_RELEASE_LOCK(base, th_base_lock);
3711 }
3712 
3713 
3714 void
3715 event_base_add_virtual_(struct event_base *base)
3716 {
3717 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3718 	base->virtual_event_count++;
3719 	MAX_EVENT_COUNT(base->virtual_event_count_max, base->virtual_event_count);
3720 	EVBASE_RELEASE_LOCK(base, th_base_lock);
3721 }
3722 
3723 void
3724 event_base_del_virtual_(struct event_base *base)
3725 {
3726 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3727 	EVUTIL_ASSERT(base->virtual_event_count > 0);
3728 	base->virtual_event_count--;
3729 	if (base->virtual_event_count == 0 && EVBASE_NEED_NOTIFY(base))
3730 		evthread_notify_base(base);
3731 	EVBASE_RELEASE_LOCK(base, th_base_lock);
3732 }
3733 
3734 static void
3735 event_free_debug_globals_locks(void)
3736 {
3737 #ifndef EVENT__DISABLE_THREAD_SUPPORT
3738 #ifndef EVENT__DISABLE_DEBUG_MODE
3739 	if (event_debug_map_lock_ != NULL) {
3740 		EVTHREAD_FREE_LOCK(event_debug_map_lock_, 0);
3741 		event_debug_map_lock_ = NULL;
3742 	}
3743 #endif /* EVENT__DISABLE_DEBUG_MODE */
3744 #endif /* EVENT__DISABLE_THREAD_SUPPORT */
3745 	return;
3746 }
3747 
3748 static void
3749 event_free_debug_globals(void)
3750 {
3751 	event_free_debug_globals_locks();
3752 }
3753 
3754 static void
3755 event_free_evsig_globals(void)
3756 {
3757 	evsig_free_globals_();
3758 }
3759 
3760 static void
3761 event_free_evutil_globals(void)
3762 {
3763 	evutil_free_globals_();
3764 }
3765 
3766 static void
3767 event_free_globals(void)
3768 {
3769 	event_free_debug_globals();
3770 	event_free_evsig_globals();
3771 	event_free_evutil_globals();
3772 }
3773 
3774 void
3775 libevent_global_shutdown(void)
3776 {
3777 	event_free_globals();
3778 }
3779 
3780 #ifndef EVENT__DISABLE_THREAD_SUPPORT
3781 int
3782 event_global_setup_locks_(const int enable_locks)
3783 {
3784 #ifndef EVENT__DISABLE_DEBUG_MODE
3785 	EVTHREAD_SETUP_GLOBAL_LOCK(event_debug_map_lock_, 0);
3786 #endif
3787 	if (evsig_global_setup_locks_(enable_locks) < 0)
3788 		return -1;
3789 	if (evutil_global_setup_locks_(enable_locks) < 0)
3790 		return -1;
3791 	if (evutil_secure_rng_global_setup_locks_(enable_locks) < 0)
3792 		return -1;
3793 	return 0;
3794 }
3795 #endif
3796 
3797 void
3798 event_base_assert_ok_(struct event_base *base)
3799 {
3800 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3801 	event_base_assert_ok_nolock_(base);
3802 	EVBASE_RELEASE_LOCK(base, th_base_lock);
3803 }
3804 
3805 void
3806 event_base_assert_ok_nolock_(struct event_base *base)
3807 {
3808 	int i;
3809 	int count;
3810 
3811 	/* First do checks on the per-fd and per-signal lists */
3812 	evmap_check_integrity_(base);
3813 
3814 	/* Check the heap property */
3815 	for (i = 1; i < (int)base->timeheap.n; ++i) {
3816 		int parent = (i - 1) / 2;
3817 		struct event *ev, *p_ev;
3818 		ev = base->timeheap.p[i];
3819 		p_ev = base->timeheap.p[parent];
3820 		EVUTIL_ASSERT(ev->ev_flags & EVLIST_TIMEOUT);
3821 		EVUTIL_ASSERT(evutil_timercmp(&p_ev->ev_timeout, &ev->ev_timeout, <=));
3822 		EVUTIL_ASSERT(ev->ev_timeout_pos.min_heap_idx == i);
3823 	}
3824 
3825 	/* Check that the common timeouts are fine */
3826 	for (i = 0; i < base->n_common_timeouts; ++i) {
3827 		struct common_timeout_list *ctl = base->common_timeout_queues[i];
3828 		struct event *last=NULL, *ev;
3829 
3830 		EVUTIL_ASSERT_TAILQ_OK(&ctl->events, event, ev_timeout_pos.ev_next_with_common_timeout);
3831 
3832 		TAILQ_FOREACH(ev, &ctl->events, ev_timeout_pos.ev_next_with_common_timeout) {
3833 			if (last)
3834 				EVUTIL_ASSERT(evutil_timercmp(&last->ev_timeout, &ev->ev_timeout, <=));
3835 			EVUTIL_ASSERT(ev->ev_flags & EVLIST_TIMEOUT);
3836 			EVUTIL_ASSERT(is_common_timeout(&ev->ev_timeout,base));
3837 			EVUTIL_ASSERT(COMMON_TIMEOUT_IDX(&ev->ev_timeout) == i);
3838 			last = ev;
3839 		}
3840 	}
3841 
3842 	/* Check the active queues. */
3843 	count = 0;
3844 	for (i = 0; i < base->nactivequeues; ++i) {
3845 		struct event_callback *evcb;
3846 		EVUTIL_ASSERT_TAILQ_OK(&base->activequeues[i], event_callback, evcb_active_next);
3847 		TAILQ_FOREACH(evcb, &base->activequeues[i], evcb_active_next) {
3848 			EVUTIL_ASSERT((evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) == EVLIST_ACTIVE);
3849 			EVUTIL_ASSERT(evcb->evcb_pri == i);
3850 			++count;
3851 		}
3852 	}
3853 
3854 	{
3855 		struct event_callback *evcb;
3856 		TAILQ_FOREACH(evcb, &base->active_later_queue, evcb_active_next) {
3857 			EVUTIL_ASSERT((evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) == EVLIST_ACTIVE_LATER);
3858 			++count;
3859 		}
3860 	}
3861 	EVUTIL_ASSERT(count == base->event_count_active);
3862 }
3863