12b15cb3dSCy Schubert /*
22b15cb3dSCy Schubert * Copyright (c) 2000-2007 Niels Provos <provos@citi.umich.edu>
32b15cb3dSCy Schubert * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
42b15cb3dSCy Schubert *
52b15cb3dSCy Schubert * Redistribution and use in source and binary forms, with or without
62b15cb3dSCy Schubert * modification, are permitted provided that the following conditions
72b15cb3dSCy Schubert * are met:
82b15cb3dSCy Schubert * 1. Redistributions of source code must retain the above copyright
92b15cb3dSCy Schubert * notice, this list of conditions and the following disclaimer.
102b15cb3dSCy Schubert * 2. Redistributions in binary form must reproduce the above copyright
112b15cb3dSCy Schubert * notice, this list of conditions and the following disclaimer in the
122b15cb3dSCy Schubert * documentation and/or other materials provided with the distribution.
132b15cb3dSCy Schubert * 3. The name of the author may not be used to endorse or promote products
142b15cb3dSCy Schubert * derived from this software without specific prior written permission.
152b15cb3dSCy Schubert *
162b15cb3dSCy Schubert * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
172b15cb3dSCy Schubert * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
182b15cb3dSCy Schubert * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
192b15cb3dSCy Schubert * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
202b15cb3dSCy Schubert * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
212b15cb3dSCy Schubert * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
222b15cb3dSCy Schubert * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
232b15cb3dSCy Schubert * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
242b15cb3dSCy Schubert * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
252b15cb3dSCy Schubert * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
262b15cb3dSCy Schubert */
272b15cb3dSCy Schubert #include "event2/event-config.h"
282b15cb3dSCy Schubert #include "evconfig-private.h"
292b15cb3dSCy Schubert
302b15cb3dSCy Schubert #ifdef _WIN32
312b15cb3dSCy Schubert #include <winsock2.h>
322b15cb3dSCy Schubert #define WIN32_LEAN_AND_MEAN
332b15cb3dSCy Schubert #include <windows.h>
342b15cb3dSCy Schubert #undef WIN32_LEAN_AND_MEAN
352b15cb3dSCy Schubert #endif
362b15cb3dSCy Schubert #include <sys/types.h>
372b15cb3dSCy Schubert #if !defined(_WIN32) && defined(EVENT__HAVE_SYS_TIME_H)
382b15cb3dSCy Schubert #include <sys/time.h>
392b15cb3dSCy Schubert #endif
402b15cb3dSCy Schubert #include <sys/queue.h>
412b15cb3dSCy Schubert #ifdef EVENT__HAVE_SYS_SOCKET_H
422b15cb3dSCy Schubert #include <sys/socket.h>
432b15cb3dSCy Schubert #endif
442b15cb3dSCy Schubert #include <stdio.h>
452b15cb3dSCy Schubert #include <stdlib.h>
462b15cb3dSCy Schubert #ifdef EVENT__HAVE_UNISTD_H
472b15cb3dSCy Schubert #include <unistd.h>
482b15cb3dSCy Schubert #endif
492b15cb3dSCy Schubert #include <ctype.h>
502b15cb3dSCy Schubert #include <errno.h>
512b15cb3dSCy Schubert #include <signal.h>
522b15cb3dSCy Schubert #include <string.h>
532b15cb3dSCy Schubert #include <time.h>
542b15cb3dSCy Schubert #include <limits.h>
55*a466cc55SCy Schubert #ifdef EVENT__HAVE_FCNTL_H
56*a466cc55SCy Schubert #include <fcntl.h>
57*a466cc55SCy Schubert #endif
582b15cb3dSCy Schubert
592b15cb3dSCy Schubert #include "event2/event.h"
602b15cb3dSCy Schubert #include "event2/event_struct.h"
612b15cb3dSCy Schubert #include "event2/event_compat.h"
622b15cb3dSCy Schubert #include "event-internal.h"
632b15cb3dSCy Schubert #include "defer-internal.h"
642b15cb3dSCy Schubert #include "evthread-internal.h"
652b15cb3dSCy Schubert #include "event2/thread.h"
662b15cb3dSCy Schubert #include "event2/util.h"
672b15cb3dSCy Schubert #include "log-internal.h"
682b15cb3dSCy Schubert #include "evmap-internal.h"
692b15cb3dSCy Schubert #include "iocp-internal.h"
702b15cb3dSCy Schubert #include "changelist-internal.h"
712b15cb3dSCy Schubert #define HT_NO_CACHE_HASH_VALUES
722b15cb3dSCy Schubert #include "ht-internal.h"
732b15cb3dSCy Schubert #include "util-internal.h"
742b15cb3dSCy Schubert
752b15cb3dSCy Schubert
762b15cb3dSCy Schubert #ifdef EVENT__HAVE_WORKING_KQUEUE
772b15cb3dSCy Schubert #include "kqueue-internal.h"
782b15cb3dSCy Schubert #endif
792b15cb3dSCy Schubert
802b15cb3dSCy Schubert #ifdef EVENT__HAVE_EVENT_PORTS
812b15cb3dSCy Schubert extern const struct eventop evportops;
822b15cb3dSCy Schubert #endif
832b15cb3dSCy Schubert #ifdef EVENT__HAVE_SELECT
842b15cb3dSCy Schubert extern const struct eventop selectops;
852b15cb3dSCy Schubert #endif
862b15cb3dSCy Schubert #ifdef EVENT__HAVE_POLL
872b15cb3dSCy Schubert extern const struct eventop pollops;
882b15cb3dSCy Schubert #endif
892b15cb3dSCy Schubert #ifdef EVENT__HAVE_EPOLL
902b15cb3dSCy Schubert extern const struct eventop epollops;
912b15cb3dSCy Schubert #endif
922b15cb3dSCy Schubert #ifdef EVENT__HAVE_WORKING_KQUEUE
932b15cb3dSCy Schubert extern const struct eventop kqops;
942b15cb3dSCy Schubert #endif
952b15cb3dSCy Schubert #ifdef EVENT__HAVE_DEVPOLL
962b15cb3dSCy Schubert extern const struct eventop devpollops;
972b15cb3dSCy Schubert #endif
982b15cb3dSCy Schubert #ifdef _WIN32
992b15cb3dSCy Schubert extern const struct eventop win32ops;
1002b15cb3dSCy Schubert #endif
1012b15cb3dSCy Schubert
1022b15cb3dSCy Schubert /* Array of backends in order of preference. */
1032b15cb3dSCy Schubert static const struct eventop *eventops[] = {
1042b15cb3dSCy Schubert #ifdef EVENT__HAVE_EVENT_PORTS
1052b15cb3dSCy Schubert &evportops,
1062b15cb3dSCy Schubert #endif
1072b15cb3dSCy Schubert #ifdef EVENT__HAVE_WORKING_KQUEUE
1082b15cb3dSCy Schubert &kqops,
1092b15cb3dSCy Schubert #endif
1102b15cb3dSCy Schubert #ifdef EVENT__HAVE_EPOLL
1112b15cb3dSCy Schubert &epollops,
1122b15cb3dSCy Schubert #endif
1132b15cb3dSCy Schubert #ifdef EVENT__HAVE_DEVPOLL
1142b15cb3dSCy Schubert &devpollops,
1152b15cb3dSCy Schubert #endif
1162b15cb3dSCy Schubert #ifdef EVENT__HAVE_POLL
1172b15cb3dSCy Schubert &pollops,
1182b15cb3dSCy Schubert #endif
1192b15cb3dSCy Schubert #ifdef EVENT__HAVE_SELECT
1202b15cb3dSCy Schubert &selectops,
1212b15cb3dSCy Schubert #endif
1222b15cb3dSCy Schubert #ifdef _WIN32
1232b15cb3dSCy Schubert &win32ops,
1242b15cb3dSCy Schubert #endif
1252b15cb3dSCy Schubert NULL
1262b15cb3dSCy Schubert };
1272b15cb3dSCy Schubert
1282b15cb3dSCy Schubert /* Global state; deprecated */
129*a466cc55SCy Schubert EVENT2_EXPORT_SYMBOL
1302b15cb3dSCy Schubert struct event_base *event_global_current_base_ = NULL;
1312b15cb3dSCy Schubert #define current_base event_global_current_base_
1322b15cb3dSCy Schubert
1332b15cb3dSCy Schubert /* Global state */
1342b15cb3dSCy Schubert
1352b15cb3dSCy Schubert static void *event_self_cbarg_ptr_ = NULL;
1362b15cb3dSCy Schubert
1372b15cb3dSCy Schubert /* Prototypes */
1382b15cb3dSCy Schubert static void event_queue_insert_active(struct event_base *, struct event_callback *);
1392b15cb3dSCy Schubert static void event_queue_insert_active_later(struct event_base *, struct event_callback *);
1402b15cb3dSCy Schubert static void event_queue_insert_timeout(struct event_base *, struct event *);
1412b15cb3dSCy Schubert static void event_queue_insert_inserted(struct event_base *, struct event *);
1422b15cb3dSCy Schubert static void event_queue_remove_active(struct event_base *, struct event_callback *);
1432b15cb3dSCy Schubert static void event_queue_remove_active_later(struct event_base *, struct event_callback *);
1442b15cb3dSCy Schubert static void event_queue_remove_timeout(struct event_base *, struct event *);
1452b15cb3dSCy Schubert static void event_queue_remove_inserted(struct event_base *, struct event *);
1462b15cb3dSCy Schubert static void event_queue_make_later_events_active(struct event_base *base);
1472b15cb3dSCy Schubert
1482b15cb3dSCy Schubert static int evthread_make_base_notifiable_nolock_(struct event_base *base);
1492b15cb3dSCy Schubert static int event_del_(struct event *ev, int blocking);
1502b15cb3dSCy Schubert
1512b15cb3dSCy Schubert #ifdef USE_REINSERT_TIMEOUT
1522b15cb3dSCy Schubert /* This code seems buggy; only turn it on if we find out what the trouble is. */
1532b15cb3dSCy Schubert static void event_queue_reinsert_timeout(struct event_base *,struct event *, int was_common, int is_common, int old_timeout_idx);
1542b15cb3dSCy Schubert #endif
1552b15cb3dSCy Schubert
1562b15cb3dSCy Schubert static int event_haveevents(struct event_base *);
1572b15cb3dSCy Schubert
1582b15cb3dSCy Schubert static int event_process_active(struct event_base *);
1592b15cb3dSCy Schubert
1602b15cb3dSCy Schubert static int timeout_next(struct event_base *, struct timeval **);
1612b15cb3dSCy Schubert static void timeout_process(struct event_base *);
1622b15cb3dSCy Schubert
1632b15cb3dSCy Schubert static inline void event_signal_closure(struct event_base *, struct event *ev);
1642b15cb3dSCy Schubert static inline void event_persist_closure(struct event_base *, struct event *ev);
1652b15cb3dSCy Schubert
1662b15cb3dSCy Schubert static int evthread_notify_base(struct event_base *base);
1672b15cb3dSCy Schubert
1682b15cb3dSCy Schubert static void insert_common_timeout_inorder(struct common_timeout_list *ctl,
1692b15cb3dSCy Schubert struct event *ev);
1702b15cb3dSCy Schubert
1712b15cb3dSCy Schubert #ifndef EVENT__DISABLE_DEBUG_MODE
1722b15cb3dSCy Schubert /* These functions implement a hashtable of which 'struct event *' structures
1732b15cb3dSCy Schubert * have been setup or added. We don't want to trust the content of the struct
1742b15cb3dSCy Schubert * event itself, since we're trying to work through cases where an event gets
1752b15cb3dSCy Schubert * clobbered or freed. Instead, we keep a hashtable indexed by the pointer.
1762b15cb3dSCy Schubert */
1772b15cb3dSCy Schubert
1782b15cb3dSCy Schubert struct event_debug_entry {
1792b15cb3dSCy Schubert HT_ENTRY(event_debug_entry) node;
1802b15cb3dSCy Schubert const struct event *ptr;
1812b15cb3dSCy Schubert unsigned added : 1;
1822b15cb3dSCy Schubert };
1832b15cb3dSCy Schubert
1842b15cb3dSCy Schubert static inline unsigned
hash_debug_entry(const struct event_debug_entry * e)1852b15cb3dSCy Schubert hash_debug_entry(const struct event_debug_entry *e)
1862b15cb3dSCy Schubert {
1872b15cb3dSCy Schubert /* We need to do this silliness to convince compilers that we
1882b15cb3dSCy Schubert * honestly mean to cast e->ptr to an integer, and discard any
1892b15cb3dSCy Schubert * part of it that doesn't fit in an unsigned.
1902b15cb3dSCy Schubert */
1912b15cb3dSCy Schubert unsigned u = (unsigned) ((ev_uintptr_t) e->ptr);
1922b15cb3dSCy Schubert /* Our hashtable implementation is pretty sensitive to low bits,
1932b15cb3dSCy Schubert * and every struct event is over 64 bytes in size, so we can
1942b15cb3dSCy Schubert * just say >>6. */
1952b15cb3dSCy Schubert return (u >> 6);
1962b15cb3dSCy Schubert }
1972b15cb3dSCy Schubert
1982b15cb3dSCy Schubert static inline int
eq_debug_entry(const struct event_debug_entry * a,const struct event_debug_entry * b)1992b15cb3dSCy Schubert eq_debug_entry(const struct event_debug_entry *a,
2002b15cb3dSCy Schubert const struct event_debug_entry *b)
2012b15cb3dSCy Schubert {
2022b15cb3dSCy Schubert return a->ptr == b->ptr;
2032b15cb3dSCy Schubert }
2042b15cb3dSCy Schubert
2052b15cb3dSCy Schubert int event_debug_mode_on_ = 0;
206*a466cc55SCy Schubert
207*a466cc55SCy Schubert
208*a466cc55SCy Schubert #if !defined(EVENT__DISABLE_THREAD_SUPPORT) && !defined(EVENT__DISABLE_DEBUG_MODE)
209*a466cc55SCy Schubert /**
210*a466cc55SCy Schubert * @brief debug mode variable which is set for any function/structure that needs
211*a466cc55SCy Schubert * to be shared across threads (if thread support is enabled).
212*a466cc55SCy Schubert *
213*a466cc55SCy Schubert * When and if evthreads are initialized, this variable will be evaluated,
214*a466cc55SCy Schubert * and if set to something other than zero, this means the evthread setup
215*a466cc55SCy Schubert * functions were called out of order.
216*a466cc55SCy Schubert *
217*a466cc55SCy Schubert * See: "Locks and threading" in the documentation.
218*a466cc55SCy Schubert */
219*a466cc55SCy Schubert int event_debug_created_threadable_ctx_ = 0;
220*a466cc55SCy Schubert #endif
221*a466cc55SCy Schubert
2222b15cb3dSCy Schubert /* Set if it's too late to enable event_debug_mode. */
2232b15cb3dSCy Schubert static int event_debug_mode_too_late = 0;
2242b15cb3dSCy Schubert #ifndef EVENT__DISABLE_THREAD_SUPPORT
2252b15cb3dSCy Schubert static void *event_debug_map_lock_ = NULL;
2262b15cb3dSCy Schubert #endif
2272b15cb3dSCy Schubert static HT_HEAD(event_debug_map, event_debug_entry) global_debug_map =
2282b15cb3dSCy Schubert HT_INITIALIZER();
2292b15cb3dSCy Schubert
HT_PROTOTYPE(event_debug_map,event_debug_entry,node,hash_debug_entry,eq_debug_entry)2302b15cb3dSCy Schubert HT_PROTOTYPE(event_debug_map, event_debug_entry, node, hash_debug_entry,
2312b15cb3dSCy Schubert eq_debug_entry)
2322b15cb3dSCy Schubert HT_GENERATE(event_debug_map, event_debug_entry, node, hash_debug_entry,
2332b15cb3dSCy Schubert eq_debug_entry, 0.5, mm_malloc, mm_realloc, mm_free)
2342b15cb3dSCy Schubert
235*a466cc55SCy Schubert /* record that ev is now setup (that is, ready for an add) */
236*a466cc55SCy Schubert static void event_debug_note_setup_(const struct event *ev)
237*a466cc55SCy Schubert {
238*a466cc55SCy Schubert struct event_debug_entry *dent, find;
239*a466cc55SCy Schubert
240*a466cc55SCy Schubert if (!event_debug_mode_on_)
241*a466cc55SCy Schubert goto out;
242*a466cc55SCy Schubert
243*a466cc55SCy Schubert find.ptr = ev;
244*a466cc55SCy Schubert EVLOCK_LOCK(event_debug_map_lock_, 0);
245*a466cc55SCy Schubert dent = HT_FIND(event_debug_map, &global_debug_map, &find);
246*a466cc55SCy Schubert if (dent) {
247*a466cc55SCy Schubert dent->added = 0;
248*a466cc55SCy Schubert } else {
249*a466cc55SCy Schubert dent = mm_malloc(sizeof(*dent));
250*a466cc55SCy Schubert if (!dent)
251*a466cc55SCy Schubert event_err(1,
252*a466cc55SCy Schubert "Out of memory in debugging code");
253*a466cc55SCy Schubert dent->ptr = ev;
254*a466cc55SCy Schubert dent->added = 0;
255*a466cc55SCy Schubert HT_INSERT(event_debug_map, &global_debug_map, dent);
256*a466cc55SCy Schubert }
257*a466cc55SCy Schubert EVLOCK_UNLOCK(event_debug_map_lock_, 0);
258*a466cc55SCy Schubert
259*a466cc55SCy Schubert out:
260*a466cc55SCy Schubert event_debug_mode_too_late = 1;
261*a466cc55SCy Schubert }
262*a466cc55SCy Schubert /* record that ev is no longer setup */
event_debug_note_teardown_(const struct event * ev)263*a466cc55SCy Schubert static void event_debug_note_teardown_(const struct event *ev)
264*a466cc55SCy Schubert {
265*a466cc55SCy Schubert struct event_debug_entry *dent, find;
266*a466cc55SCy Schubert
267*a466cc55SCy Schubert if (!event_debug_mode_on_)
268*a466cc55SCy Schubert goto out;
269*a466cc55SCy Schubert
270*a466cc55SCy Schubert find.ptr = ev;
271*a466cc55SCy Schubert EVLOCK_LOCK(event_debug_map_lock_, 0);
272*a466cc55SCy Schubert dent = HT_REMOVE(event_debug_map, &global_debug_map, &find);
273*a466cc55SCy Schubert if (dent)
274*a466cc55SCy Schubert mm_free(dent);
275*a466cc55SCy Schubert EVLOCK_UNLOCK(event_debug_map_lock_, 0);
276*a466cc55SCy Schubert
277*a466cc55SCy Schubert out:
278*a466cc55SCy Schubert event_debug_mode_too_late = 1;
279*a466cc55SCy Schubert }
2802b15cb3dSCy Schubert /* Macro: record that ev is now added */
event_debug_note_add_(const struct event * ev)281*a466cc55SCy Schubert static void event_debug_note_add_(const struct event *ev)
282*a466cc55SCy Schubert {
283*a466cc55SCy Schubert struct event_debug_entry *dent,find;
284*a466cc55SCy Schubert
285*a466cc55SCy Schubert if (!event_debug_mode_on_)
286*a466cc55SCy Schubert goto out;
287*a466cc55SCy Schubert
288*a466cc55SCy Schubert find.ptr = ev;
289*a466cc55SCy Schubert EVLOCK_LOCK(event_debug_map_lock_, 0);
290*a466cc55SCy Schubert dent = HT_FIND(event_debug_map, &global_debug_map, &find);
291*a466cc55SCy Schubert if (dent) {
292*a466cc55SCy Schubert dent->added = 1;
293*a466cc55SCy Schubert } else {
294*a466cc55SCy Schubert event_errx(EVENT_ERR_ABORT_,
295*a466cc55SCy Schubert "%s: noting an add on a non-setup event %p"
296*a466cc55SCy Schubert " (events: 0x%x, fd: "EV_SOCK_FMT
297*a466cc55SCy Schubert ", flags: 0x%x)",
298*a466cc55SCy Schubert __func__, ev, ev->ev_events,
299*a466cc55SCy Schubert EV_SOCK_ARG(ev->ev_fd), ev->ev_flags);
300*a466cc55SCy Schubert }
301*a466cc55SCy Schubert EVLOCK_UNLOCK(event_debug_map_lock_, 0);
302*a466cc55SCy Schubert
303*a466cc55SCy Schubert out:
304*a466cc55SCy Schubert event_debug_mode_too_late = 1;
305*a466cc55SCy Schubert }
306*a466cc55SCy Schubert /* record that ev is no longer added */
event_debug_note_del_(const struct event * ev)307*a466cc55SCy Schubert static void event_debug_note_del_(const struct event *ev)
308*a466cc55SCy Schubert {
309*a466cc55SCy Schubert struct event_debug_entry *dent, find;
310*a466cc55SCy Schubert
311*a466cc55SCy Schubert if (!event_debug_mode_on_)
312*a466cc55SCy Schubert goto out;
313*a466cc55SCy Schubert
314*a466cc55SCy Schubert find.ptr = ev;
315*a466cc55SCy Schubert EVLOCK_LOCK(event_debug_map_lock_, 0);
316*a466cc55SCy Schubert dent = HT_FIND(event_debug_map, &global_debug_map, &find);
317*a466cc55SCy Schubert if (dent) {
318*a466cc55SCy Schubert dent->added = 0;
319*a466cc55SCy Schubert } else {
320*a466cc55SCy Schubert event_errx(EVENT_ERR_ABORT_,
321*a466cc55SCy Schubert "%s: noting a del on a non-setup event %p"
322*a466cc55SCy Schubert " (events: 0x%x, fd: "EV_SOCK_FMT
323*a466cc55SCy Schubert ", flags: 0x%x)",
324*a466cc55SCy Schubert __func__, ev, ev->ev_events,
325*a466cc55SCy Schubert EV_SOCK_ARG(ev->ev_fd), ev->ev_flags);
326*a466cc55SCy Schubert }
327*a466cc55SCy Schubert EVLOCK_UNLOCK(event_debug_map_lock_, 0);
328*a466cc55SCy Schubert
329*a466cc55SCy Schubert out:
330*a466cc55SCy Schubert event_debug_mode_too_late = 1;
331*a466cc55SCy Schubert }
332*a466cc55SCy Schubert /* assert that ev is setup (i.e., okay to add or inspect) */
event_debug_assert_is_setup_(const struct event * ev)333*a466cc55SCy Schubert static void event_debug_assert_is_setup_(const struct event *ev)
334*a466cc55SCy Schubert {
335*a466cc55SCy Schubert struct event_debug_entry *dent, find;
336*a466cc55SCy Schubert
337*a466cc55SCy Schubert if (!event_debug_mode_on_)
338*a466cc55SCy Schubert return;
339*a466cc55SCy Schubert
340*a466cc55SCy Schubert find.ptr = ev;
341*a466cc55SCy Schubert EVLOCK_LOCK(event_debug_map_lock_, 0);
342*a466cc55SCy Schubert dent = HT_FIND(event_debug_map, &global_debug_map, &find);
343*a466cc55SCy Schubert if (!dent) {
344*a466cc55SCy Schubert event_errx(EVENT_ERR_ABORT_,
345*a466cc55SCy Schubert "%s called on a non-initialized event %p"
346*a466cc55SCy Schubert " (events: 0x%x, fd: "EV_SOCK_FMT
347*a466cc55SCy Schubert ", flags: 0x%x)",
348*a466cc55SCy Schubert __func__, ev, ev->ev_events,
349*a466cc55SCy Schubert EV_SOCK_ARG(ev->ev_fd), ev->ev_flags);
350*a466cc55SCy Schubert }
351*a466cc55SCy Schubert EVLOCK_UNLOCK(event_debug_map_lock_, 0);
352*a466cc55SCy Schubert }
353*a466cc55SCy Schubert /* assert that ev is not added (i.e., okay to tear down or set up again) */
event_debug_assert_not_added_(const struct event * ev)354*a466cc55SCy Schubert static void event_debug_assert_not_added_(const struct event *ev)
355*a466cc55SCy Schubert {
356*a466cc55SCy Schubert struct event_debug_entry *dent, find;
357*a466cc55SCy Schubert
358*a466cc55SCy Schubert if (!event_debug_mode_on_)
359*a466cc55SCy Schubert return;
360*a466cc55SCy Schubert
361*a466cc55SCy Schubert find.ptr = ev;
362*a466cc55SCy Schubert EVLOCK_LOCK(event_debug_map_lock_, 0);
363*a466cc55SCy Schubert dent = HT_FIND(event_debug_map, &global_debug_map, &find);
364*a466cc55SCy Schubert if (dent && dent->added) {
365*a466cc55SCy Schubert event_errx(EVENT_ERR_ABORT_,
366*a466cc55SCy Schubert "%s called on an already added event %p"
367*a466cc55SCy Schubert " (events: 0x%x, fd: "EV_SOCK_FMT", "
368*a466cc55SCy Schubert "flags: 0x%x)",
369*a466cc55SCy Schubert __func__, ev, ev->ev_events,
370*a466cc55SCy Schubert EV_SOCK_ARG(ev->ev_fd), ev->ev_flags);
371*a466cc55SCy Schubert }
372*a466cc55SCy Schubert EVLOCK_UNLOCK(event_debug_map_lock_, 0);
373*a466cc55SCy Schubert }
event_debug_assert_socket_nonblocking_(evutil_socket_t fd)374*a466cc55SCy Schubert static void event_debug_assert_socket_nonblocking_(evutil_socket_t fd)
375*a466cc55SCy Schubert {
376*a466cc55SCy Schubert if (!event_debug_mode_on_)
377*a466cc55SCy Schubert return;
378*a466cc55SCy Schubert if (fd < 0)
379*a466cc55SCy Schubert return;
380*a466cc55SCy Schubert
381*a466cc55SCy Schubert #ifndef _WIN32
382*a466cc55SCy Schubert {
383*a466cc55SCy Schubert int flags;
384*a466cc55SCy Schubert if ((flags = fcntl(fd, F_GETFL, NULL)) >= 0) {
385*a466cc55SCy Schubert EVUTIL_ASSERT(flags & O_NONBLOCK);
386*a466cc55SCy Schubert }
387*a466cc55SCy Schubert }
388*a466cc55SCy Schubert #endif
389*a466cc55SCy Schubert }
3902b15cb3dSCy Schubert #else
event_debug_note_setup_(const struct event * ev)391*a466cc55SCy Schubert static void event_debug_note_setup_(const struct event *ev) { (void)ev; }
event_debug_note_teardown_(const struct event * ev)392*a466cc55SCy Schubert static void event_debug_note_teardown_(const struct event *ev) { (void)ev; }
event_debug_note_add_(const struct event * ev)393*a466cc55SCy Schubert static void event_debug_note_add_(const struct event *ev) { (void)ev; }
event_debug_note_del_(const struct event * ev)394*a466cc55SCy Schubert static void event_debug_note_del_(const struct event *ev) { (void)ev; }
event_debug_assert_is_setup_(const struct event * ev)395*a466cc55SCy Schubert static void event_debug_assert_is_setup_(const struct event *ev) { (void)ev; }
event_debug_assert_not_added_(const struct event * ev)396*a466cc55SCy Schubert static void event_debug_assert_not_added_(const struct event *ev) { (void)ev; }
event_debug_assert_socket_nonblocking_(evutil_socket_t fd)397*a466cc55SCy Schubert static void event_debug_assert_socket_nonblocking_(evutil_socket_t fd) { (void)fd; }
3982b15cb3dSCy Schubert #endif
3992b15cb3dSCy Schubert
4002b15cb3dSCy Schubert #define EVENT_BASE_ASSERT_LOCKED(base) \
4012b15cb3dSCy Schubert EVLOCK_ASSERT_LOCKED((base)->th_base_lock)
4022b15cb3dSCy Schubert
4032b15cb3dSCy Schubert /* How often (in seconds) do we check for changes in wall clock time relative
4042b15cb3dSCy Schubert * to monotonic time? Set this to -1 for 'never.' */
4052b15cb3dSCy Schubert #define CLOCK_SYNC_INTERVAL 5
4062b15cb3dSCy Schubert
4072b15cb3dSCy Schubert /** Set 'tp' to the current time according to 'base'. We must hold the lock
4082b15cb3dSCy Schubert * on 'base'. If there is a cached time, return it. Otherwise, use
4092b15cb3dSCy Schubert * clock_gettime or gettimeofday as appropriate to find out the right time.
4102b15cb3dSCy Schubert * Return 0 on success, -1 on failure.
4112b15cb3dSCy Schubert */
4122b15cb3dSCy Schubert static int
gettime(struct event_base * base,struct timeval * tp)4132b15cb3dSCy Schubert gettime(struct event_base *base, struct timeval *tp)
4142b15cb3dSCy Schubert {
4152b15cb3dSCy Schubert EVENT_BASE_ASSERT_LOCKED(base);
4162b15cb3dSCy Schubert
4172b15cb3dSCy Schubert if (base->tv_cache.tv_sec) {
4182b15cb3dSCy Schubert *tp = base->tv_cache;
4192b15cb3dSCy Schubert return (0);
4202b15cb3dSCy Schubert }
4212b15cb3dSCy Schubert
4222b15cb3dSCy Schubert if (evutil_gettime_monotonic_(&base->monotonic_timer, tp) == -1) {
4232b15cb3dSCy Schubert return -1;
4242b15cb3dSCy Schubert }
4252b15cb3dSCy Schubert
4262b15cb3dSCy Schubert if (base->last_updated_clock_diff + CLOCK_SYNC_INTERVAL
4272b15cb3dSCy Schubert < tp->tv_sec) {
4282b15cb3dSCy Schubert struct timeval tv;
4292b15cb3dSCy Schubert evutil_gettimeofday(&tv,NULL);
4302b15cb3dSCy Schubert evutil_timersub(&tv, tp, &base->tv_clock_diff);
4312b15cb3dSCy Schubert base->last_updated_clock_diff = tp->tv_sec;
4322b15cb3dSCy Schubert }
4332b15cb3dSCy Schubert
4342b15cb3dSCy Schubert return 0;
4352b15cb3dSCy Schubert }
4362b15cb3dSCy Schubert
4372b15cb3dSCy Schubert int
event_base_gettimeofday_cached(struct event_base * base,struct timeval * tv)4382b15cb3dSCy Schubert event_base_gettimeofday_cached(struct event_base *base, struct timeval *tv)
4392b15cb3dSCy Schubert {
4402b15cb3dSCy Schubert int r;
4412b15cb3dSCy Schubert if (!base) {
4422b15cb3dSCy Schubert base = current_base;
4432b15cb3dSCy Schubert if (!current_base)
4442b15cb3dSCy Schubert return evutil_gettimeofday(tv, NULL);
4452b15cb3dSCy Schubert }
4462b15cb3dSCy Schubert
4472b15cb3dSCy Schubert EVBASE_ACQUIRE_LOCK(base, th_base_lock);
4482b15cb3dSCy Schubert if (base->tv_cache.tv_sec == 0) {
4492b15cb3dSCy Schubert r = evutil_gettimeofday(tv, NULL);
4502b15cb3dSCy Schubert } else {
4512b15cb3dSCy Schubert evutil_timeradd(&base->tv_cache, &base->tv_clock_diff, tv);
4522b15cb3dSCy Schubert r = 0;
4532b15cb3dSCy Schubert }
4542b15cb3dSCy Schubert EVBASE_RELEASE_LOCK(base, th_base_lock);
4552b15cb3dSCy Schubert return r;
4562b15cb3dSCy Schubert }
4572b15cb3dSCy Schubert
4582b15cb3dSCy Schubert /** Make 'base' have no current cached time. */
4592b15cb3dSCy Schubert static inline void
clear_time_cache(struct event_base * base)4602b15cb3dSCy Schubert clear_time_cache(struct event_base *base)
4612b15cb3dSCy Schubert {
4622b15cb3dSCy Schubert base->tv_cache.tv_sec = 0;
4632b15cb3dSCy Schubert }
4642b15cb3dSCy Schubert
4652b15cb3dSCy Schubert /** Replace the cached time in 'base' with the current time. */
4662b15cb3dSCy Schubert static inline void
update_time_cache(struct event_base * base)4672b15cb3dSCy Schubert update_time_cache(struct event_base *base)
4682b15cb3dSCy Schubert {
4692b15cb3dSCy Schubert base->tv_cache.tv_sec = 0;
4702b15cb3dSCy Schubert if (!(base->flags & EVENT_BASE_FLAG_NO_CACHE_TIME))
4712b15cb3dSCy Schubert gettime(base, &base->tv_cache);
4722b15cb3dSCy Schubert }
4732b15cb3dSCy Schubert
4742b15cb3dSCy Schubert int
event_base_update_cache_time(struct event_base * base)4752b15cb3dSCy Schubert event_base_update_cache_time(struct event_base *base)
4762b15cb3dSCy Schubert {
4772b15cb3dSCy Schubert
4782b15cb3dSCy Schubert if (!base) {
4792b15cb3dSCy Schubert base = current_base;
4802b15cb3dSCy Schubert if (!current_base)
4812b15cb3dSCy Schubert return -1;
4822b15cb3dSCy Schubert }
4832b15cb3dSCy Schubert
4842b15cb3dSCy Schubert EVBASE_ACQUIRE_LOCK(base, th_base_lock);
4852b15cb3dSCy Schubert if (base->running_loop)
4862b15cb3dSCy Schubert update_time_cache(base);
4872b15cb3dSCy Schubert EVBASE_RELEASE_LOCK(base, th_base_lock);
4882b15cb3dSCy Schubert return 0;
4892b15cb3dSCy Schubert }
4902b15cb3dSCy Schubert
4912b15cb3dSCy Schubert static inline struct event *
event_callback_to_event(struct event_callback * evcb)4922b15cb3dSCy Schubert event_callback_to_event(struct event_callback *evcb)
4932b15cb3dSCy Schubert {
4942b15cb3dSCy Schubert EVUTIL_ASSERT((evcb->evcb_flags & EVLIST_INIT));
4952b15cb3dSCy Schubert return EVUTIL_UPCAST(evcb, struct event, ev_evcallback);
4962b15cb3dSCy Schubert }
4972b15cb3dSCy Schubert
4982b15cb3dSCy Schubert static inline struct event_callback *
event_to_event_callback(struct event * ev)4992b15cb3dSCy Schubert event_to_event_callback(struct event *ev)
5002b15cb3dSCy Schubert {
5012b15cb3dSCy Schubert return &ev->ev_evcallback;
5022b15cb3dSCy Schubert }
5032b15cb3dSCy Schubert
5042b15cb3dSCy Schubert struct event_base *
event_init(void)5052b15cb3dSCy Schubert event_init(void)
5062b15cb3dSCy Schubert {
5072b15cb3dSCy Schubert struct event_base *base = event_base_new_with_config(NULL);
5082b15cb3dSCy Schubert
5092b15cb3dSCy Schubert if (base == NULL) {
5102b15cb3dSCy Schubert event_errx(1, "%s: Unable to construct event_base", __func__);
5112b15cb3dSCy Schubert return NULL;
5122b15cb3dSCy Schubert }
5132b15cb3dSCy Schubert
5142b15cb3dSCy Schubert current_base = base;
5152b15cb3dSCy Schubert
5162b15cb3dSCy Schubert return (base);
5172b15cb3dSCy Schubert }
5182b15cb3dSCy Schubert
5192b15cb3dSCy Schubert struct event_base *
event_base_new(void)5202b15cb3dSCy Schubert event_base_new(void)
5212b15cb3dSCy Schubert {
5222b15cb3dSCy Schubert struct event_base *base = NULL;
5232b15cb3dSCy Schubert struct event_config *cfg = event_config_new();
5242b15cb3dSCy Schubert if (cfg) {
5252b15cb3dSCy Schubert base = event_base_new_with_config(cfg);
5262b15cb3dSCy Schubert event_config_free(cfg);
5272b15cb3dSCy Schubert }
5282b15cb3dSCy Schubert return base;
5292b15cb3dSCy Schubert }
5302b15cb3dSCy Schubert
5312b15cb3dSCy Schubert /** Return true iff 'method' is the name of a method that 'cfg' tells us to
5322b15cb3dSCy Schubert * avoid. */
5332b15cb3dSCy Schubert static int
event_config_is_avoided_method(const struct event_config * cfg,const char * method)5342b15cb3dSCy Schubert event_config_is_avoided_method(const struct event_config *cfg,
5352b15cb3dSCy Schubert const char *method)
5362b15cb3dSCy Schubert {
5372b15cb3dSCy Schubert struct event_config_entry *entry;
5382b15cb3dSCy Schubert
5392b15cb3dSCy Schubert TAILQ_FOREACH(entry, &cfg->entries, next) {
5402b15cb3dSCy Schubert if (entry->avoid_method != NULL &&
5412b15cb3dSCy Schubert strcmp(entry->avoid_method, method) == 0)
5422b15cb3dSCy Schubert return (1);
5432b15cb3dSCy Schubert }
5442b15cb3dSCy Schubert
5452b15cb3dSCy Schubert return (0);
5462b15cb3dSCy Schubert }
5472b15cb3dSCy Schubert
5482b15cb3dSCy Schubert /** Return true iff 'method' is disabled according to the environment. */
5492b15cb3dSCy Schubert static int
event_is_method_disabled(const char * name)5502b15cb3dSCy Schubert event_is_method_disabled(const char *name)
5512b15cb3dSCy Schubert {
5522b15cb3dSCy Schubert char environment[64];
5532b15cb3dSCy Schubert int i;
5542b15cb3dSCy Schubert
5552b15cb3dSCy Schubert evutil_snprintf(environment, sizeof(environment), "EVENT_NO%s", name);
5562b15cb3dSCy Schubert for (i = 8; environment[i] != '\0'; ++i)
5572b15cb3dSCy Schubert environment[i] = EVUTIL_TOUPPER_(environment[i]);
5582b15cb3dSCy Schubert /* Note that evutil_getenv_() ignores the environment entirely if
5592b15cb3dSCy Schubert * we're setuid */
5602b15cb3dSCy Schubert return (evutil_getenv_(environment) != NULL);
5612b15cb3dSCy Schubert }
5622b15cb3dSCy Schubert
5632b15cb3dSCy Schubert int
event_base_get_features(const struct event_base * base)5642b15cb3dSCy Schubert event_base_get_features(const struct event_base *base)
5652b15cb3dSCy Schubert {
5662b15cb3dSCy Schubert return base->evsel->features;
5672b15cb3dSCy Schubert }
5682b15cb3dSCy Schubert
5692b15cb3dSCy Schubert void
event_enable_debug_mode(void)5702b15cb3dSCy Schubert event_enable_debug_mode(void)
5712b15cb3dSCy Schubert {
5722b15cb3dSCy Schubert #ifndef EVENT__DISABLE_DEBUG_MODE
5732b15cb3dSCy Schubert if (event_debug_mode_on_)
5742b15cb3dSCy Schubert event_errx(1, "%s was called twice!", __func__);
5752b15cb3dSCy Schubert if (event_debug_mode_too_late)
5762b15cb3dSCy Schubert event_errx(1, "%s must be called *before* creating any events "
5772b15cb3dSCy Schubert "or event_bases",__func__);
5782b15cb3dSCy Schubert
5792b15cb3dSCy Schubert event_debug_mode_on_ = 1;
5802b15cb3dSCy Schubert
5812b15cb3dSCy Schubert HT_INIT(event_debug_map, &global_debug_map);
5822b15cb3dSCy Schubert #endif
5832b15cb3dSCy Schubert }
5842b15cb3dSCy Schubert
5852b15cb3dSCy Schubert void
event_disable_debug_mode(void)5862b15cb3dSCy Schubert event_disable_debug_mode(void)
5872b15cb3dSCy Schubert {
588a25439b6SCy Schubert #ifndef EVENT__DISABLE_DEBUG_MODE
5892b15cb3dSCy Schubert struct event_debug_entry **ent, *victim;
5902b15cb3dSCy Schubert
5912b15cb3dSCy Schubert EVLOCK_LOCK(event_debug_map_lock_, 0);
5922b15cb3dSCy Schubert for (ent = HT_START(event_debug_map, &global_debug_map); ent; ) {
5932b15cb3dSCy Schubert victim = *ent;
5942b15cb3dSCy Schubert ent = HT_NEXT_RMV(event_debug_map, &global_debug_map, ent);
5952b15cb3dSCy Schubert mm_free(victim);
5962b15cb3dSCy Schubert }
5972b15cb3dSCy Schubert HT_CLEAR(event_debug_map, &global_debug_map);
5982b15cb3dSCy Schubert EVLOCK_UNLOCK(event_debug_map_lock_ , 0);
599a25439b6SCy Schubert
600a25439b6SCy Schubert event_debug_mode_on_ = 0;
6012b15cb3dSCy Schubert #endif
602a25439b6SCy Schubert }
6032b15cb3dSCy Schubert
6042b15cb3dSCy Schubert struct event_base *
event_base_new_with_config(const struct event_config * cfg)6052b15cb3dSCy Schubert event_base_new_with_config(const struct event_config *cfg)
6062b15cb3dSCy Schubert {
6072b15cb3dSCy Schubert int i;
6082b15cb3dSCy Schubert struct event_base *base;
6092b15cb3dSCy Schubert int should_check_environment;
6102b15cb3dSCy Schubert
6112b15cb3dSCy Schubert #ifndef EVENT__DISABLE_DEBUG_MODE
6122b15cb3dSCy Schubert event_debug_mode_too_late = 1;
6132b15cb3dSCy Schubert #endif
6142b15cb3dSCy Schubert
6152b15cb3dSCy Schubert if ((base = mm_calloc(1, sizeof(struct event_base))) == NULL) {
6162b15cb3dSCy Schubert event_warn("%s: calloc", __func__);
6172b15cb3dSCy Schubert return NULL;
6182b15cb3dSCy Schubert }
6192b15cb3dSCy Schubert
6202b15cb3dSCy Schubert if (cfg)
6212b15cb3dSCy Schubert base->flags = cfg->flags;
6222b15cb3dSCy Schubert
6232b15cb3dSCy Schubert should_check_environment =
6242b15cb3dSCy Schubert !(cfg && (cfg->flags & EVENT_BASE_FLAG_IGNORE_ENV));
6252b15cb3dSCy Schubert
6262b15cb3dSCy Schubert {
6272b15cb3dSCy Schubert struct timeval tmp;
6282b15cb3dSCy Schubert int precise_time =
6292b15cb3dSCy Schubert cfg && (cfg->flags & EVENT_BASE_FLAG_PRECISE_TIMER);
6302b15cb3dSCy Schubert int flags;
6312b15cb3dSCy Schubert if (should_check_environment && !precise_time) {
6322b15cb3dSCy Schubert precise_time = evutil_getenv_("EVENT_PRECISE_TIMER") != NULL;
633*a466cc55SCy Schubert if (precise_time) {
6342b15cb3dSCy Schubert base->flags |= EVENT_BASE_FLAG_PRECISE_TIMER;
6352b15cb3dSCy Schubert }
636*a466cc55SCy Schubert }
6372b15cb3dSCy Schubert flags = precise_time ? EV_MONOT_PRECISE : 0;
6382b15cb3dSCy Schubert evutil_configure_monotonic_time_(&base->monotonic_timer, flags);
6392b15cb3dSCy Schubert
6402b15cb3dSCy Schubert gettime(base, &tmp);
6412b15cb3dSCy Schubert }
6422b15cb3dSCy Schubert
6432b15cb3dSCy Schubert min_heap_ctor_(&base->timeheap);
6442b15cb3dSCy Schubert
6452b15cb3dSCy Schubert base->sig.ev_signal_pair[0] = -1;
6462b15cb3dSCy Schubert base->sig.ev_signal_pair[1] = -1;
6472b15cb3dSCy Schubert base->th_notify_fd[0] = -1;
6482b15cb3dSCy Schubert base->th_notify_fd[1] = -1;
6492b15cb3dSCy Schubert
6502b15cb3dSCy Schubert TAILQ_INIT(&base->active_later_queue);
6512b15cb3dSCy Schubert
6522b15cb3dSCy Schubert evmap_io_initmap_(&base->io);
6532b15cb3dSCy Schubert evmap_signal_initmap_(&base->sigmap);
6542b15cb3dSCy Schubert event_changelist_init_(&base->changelist);
6552b15cb3dSCy Schubert
6562b15cb3dSCy Schubert base->evbase = NULL;
6572b15cb3dSCy Schubert
6582b15cb3dSCy Schubert if (cfg) {
6592b15cb3dSCy Schubert memcpy(&base->max_dispatch_time,
6602b15cb3dSCy Schubert &cfg->max_dispatch_interval, sizeof(struct timeval));
6612b15cb3dSCy Schubert base->limit_callbacks_after_prio =
6622b15cb3dSCy Schubert cfg->limit_callbacks_after_prio;
6632b15cb3dSCy Schubert } else {
6642b15cb3dSCy Schubert base->max_dispatch_time.tv_sec = -1;
6652b15cb3dSCy Schubert base->limit_callbacks_after_prio = 1;
6662b15cb3dSCy Schubert }
6672b15cb3dSCy Schubert if (cfg && cfg->max_dispatch_callbacks >= 0) {
6682b15cb3dSCy Schubert base->max_dispatch_callbacks = cfg->max_dispatch_callbacks;
6692b15cb3dSCy Schubert } else {
6702b15cb3dSCy Schubert base->max_dispatch_callbacks = INT_MAX;
6712b15cb3dSCy Schubert }
6722b15cb3dSCy Schubert if (base->max_dispatch_callbacks == INT_MAX &&
6732b15cb3dSCy Schubert base->max_dispatch_time.tv_sec == -1)
6742b15cb3dSCy Schubert base->limit_callbacks_after_prio = INT_MAX;
6752b15cb3dSCy Schubert
6762b15cb3dSCy Schubert for (i = 0; eventops[i] && !base->evbase; i++) {
6772b15cb3dSCy Schubert if (cfg != NULL) {
6782b15cb3dSCy Schubert /* determine if this backend should be avoided */
6792b15cb3dSCy Schubert if (event_config_is_avoided_method(cfg,
6802b15cb3dSCy Schubert eventops[i]->name))
6812b15cb3dSCy Schubert continue;
6822b15cb3dSCy Schubert if ((eventops[i]->features & cfg->require_features)
6832b15cb3dSCy Schubert != cfg->require_features)
6842b15cb3dSCy Schubert continue;
6852b15cb3dSCy Schubert }
6862b15cb3dSCy Schubert
6872b15cb3dSCy Schubert /* also obey the environment variables */
6882b15cb3dSCy Schubert if (should_check_environment &&
6892b15cb3dSCy Schubert event_is_method_disabled(eventops[i]->name))
6902b15cb3dSCy Schubert continue;
6912b15cb3dSCy Schubert
6922b15cb3dSCy Schubert base->evsel = eventops[i];
6932b15cb3dSCy Schubert
6942b15cb3dSCy Schubert base->evbase = base->evsel->init(base);
6952b15cb3dSCy Schubert }
6962b15cb3dSCy Schubert
6972b15cb3dSCy Schubert if (base->evbase == NULL) {
6982b15cb3dSCy Schubert event_warnx("%s: no event mechanism available",
6992b15cb3dSCy Schubert __func__);
7002b15cb3dSCy Schubert base->evsel = NULL;
7012b15cb3dSCy Schubert event_base_free(base);
7022b15cb3dSCy Schubert return NULL;
7032b15cb3dSCy Schubert }
7042b15cb3dSCy Schubert
7052b15cb3dSCy Schubert if (evutil_getenv_("EVENT_SHOW_METHOD"))
7062b15cb3dSCy Schubert event_msgx("libevent using: %s", base->evsel->name);
7072b15cb3dSCy Schubert
7082b15cb3dSCy Schubert /* allocate a single active event queue */
7092b15cb3dSCy Schubert if (event_base_priority_init(base, 1) < 0) {
7102b15cb3dSCy Schubert event_base_free(base);
7112b15cb3dSCy Schubert return NULL;
7122b15cb3dSCy Schubert }
7132b15cb3dSCy Schubert
7142b15cb3dSCy Schubert /* prepare for threading */
7152b15cb3dSCy Schubert
716*a466cc55SCy Schubert #if !defined(EVENT__DISABLE_THREAD_SUPPORT) && !defined(EVENT__DISABLE_DEBUG_MODE)
717*a466cc55SCy Schubert event_debug_created_threadable_ctx_ = 1;
718*a466cc55SCy Schubert #endif
719*a466cc55SCy Schubert
7202b15cb3dSCy Schubert #ifndef EVENT__DISABLE_THREAD_SUPPORT
7212b15cb3dSCy Schubert if (EVTHREAD_LOCKING_ENABLED() &&
7222b15cb3dSCy Schubert (!cfg || !(cfg->flags & EVENT_BASE_FLAG_NOLOCK))) {
7232b15cb3dSCy Schubert int r;
7242b15cb3dSCy Schubert EVTHREAD_ALLOC_LOCK(base->th_base_lock, 0);
7252b15cb3dSCy Schubert EVTHREAD_ALLOC_COND(base->current_event_cond);
7262b15cb3dSCy Schubert r = evthread_make_base_notifiable(base);
7272b15cb3dSCy Schubert if (r<0) {
7282b15cb3dSCy Schubert event_warnx("%s: Unable to make base notifiable.", __func__);
7292b15cb3dSCy Schubert event_base_free(base);
7302b15cb3dSCy Schubert return NULL;
7312b15cb3dSCy Schubert }
7322b15cb3dSCy Schubert }
7332b15cb3dSCy Schubert #endif
7342b15cb3dSCy Schubert
7352b15cb3dSCy Schubert #ifdef _WIN32
7362b15cb3dSCy Schubert if (cfg && (cfg->flags & EVENT_BASE_FLAG_STARTUP_IOCP))
7372b15cb3dSCy Schubert event_base_start_iocp_(base, cfg->n_cpus_hint);
7382b15cb3dSCy Schubert #endif
7392b15cb3dSCy Schubert
7402b15cb3dSCy Schubert return (base);
7412b15cb3dSCy Schubert }
7422b15cb3dSCy Schubert
7432b15cb3dSCy Schubert int
event_base_start_iocp_(struct event_base * base,int n_cpus)7442b15cb3dSCy Schubert event_base_start_iocp_(struct event_base *base, int n_cpus)
7452b15cb3dSCy Schubert {
7462b15cb3dSCy Schubert #ifdef _WIN32
7472b15cb3dSCy Schubert if (base->iocp)
7482b15cb3dSCy Schubert return 0;
7492b15cb3dSCy Schubert base->iocp = event_iocp_port_launch_(n_cpus);
7502b15cb3dSCy Schubert if (!base->iocp) {
7512b15cb3dSCy Schubert event_warnx("%s: Couldn't launch IOCP", __func__);
7522b15cb3dSCy Schubert return -1;
7532b15cb3dSCy Schubert }
7542b15cb3dSCy Schubert return 0;
7552b15cb3dSCy Schubert #else
7562b15cb3dSCy Schubert return -1;
7572b15cb3dSCy Schubert #endif
7582b15cb3dSCy Schubert }
7592b15cb3dSCy Schubert
7602b15cb3dSCy Schubert void
event_base_stop_iocp_(struct event_base * base)7612b15cb3dSCy Schubert event_base_stop_iocp_(struct event_base *base)
7622b15cb3dSCy Schubert {
7632b15cb3dSCy Schubert #ifdef _WIN32
7642b15cb3dSCy Schubert int rv;
7652b15cb3dSCy Schubert
7662b15cb3dSCy Schubert if (!base->iocp)
7672b15cb3dSCy Schubert return;
7682b15cb3dSCy Schubert rv = event_iocp_shutdown_(base->iocp, -1);
7692b15cb3dSCy Schubert EVUTIL_ASSERT(rv >= 0);
7702b15cb3dSCy Schubert base->iocp = NULL;
7712b15cb3dSCy Schubert #endif
7722b15cb3dSCy Schubert }
7732b15cb3dSCy Schubert
7742b15cb3dSCy Schubert static int
event_base_cancel_single_callback_(struct event_base * base,struct event_callback * evcb,int run_finalizers)7752b15cb3dSCy Schubert event_base_cancel_single_callback_(struct event_base *base,
7762b15cb3dSCy Schubert struct event_callback *evcb,
7772b15cb3dSCy Schubert int run_finalizers)
7782b15cb3dSCy Schubert {
7792b15cb3dSCy Schubert int result = 0;
7802b15cb3dSCy Schubert
7812b15cb3dSCy Schubert if (evcb->evcb_flags & EVLIST_INIT) {
7822b15cb3dSCy Schubert struct event *ev = event_callback_to_event(evcb);
7832b15cb3dSCy Schubert if (!(ev->ev_flags & EVLIST_INTERNAL)) {
7842b15cb3dSCy Schubert event_del_(ev, EVENT_DEL_EVEN_IF_FINALIZING);
7852b15cb3dSCy Schubert result = 1;
7862b15cb3dSCy Schubert }
7872b15cb3dSCy Schubert } else {
7882b15cb3dSCy Schubert EVBASE_ACQUIRE_LOCK(base, th_base_lock);
7892b15cb3dSCy Schubert event_callback_cancel_nolock_(base, evcb, 1);
7902b15cb3dSCy Schubert EVBASE_RELEASE_LOCK(base, th_base_lock);
7912b15cb3dSCy Schubert result = 1;
7922b15cb3dSCy Schubert }
7932b15cb3dSCy Schubert
7942b15cb3dSCy Schubert if (run_finalizers && (evcb->evcb_flags & EVLIST_FINALIZING)) {
7952b15cb3dSCy Schubert switch (evcb->evcb_closure) {
7962b15cb3dSCy Schubert case EV_CLOSURE_EVENT_FINALIZE:
7972b15cb3dSCy Schubert case EV_CLOSURE_EVENT_FINALIZE_FREE: {
7982b15cb3dSCy Schubert struct event *ev = event_callback_to_event(evcb);
7992b15cb3dSCy Schubert ev->ev_evcallback.evcb_cb_union.evcb_evfinalize(ev, ev->ev_arg);
8002b15cb3dSCy Schubert if (evcb->evcb_closure == EV_CLOSURE_EVENT_FINALIZE_FREE)
8012b15cb3dSCy Schubert mm_free(ev);
8022b15cb3dSCy Schubert break;
8032b15cb3dSCy Schubert }
8042b15cb3dSCy Schubert case EV_CLOSURE_CB_FINALIZE:
8052b15cb3dSCy Schubert evcb->evcb_cb_union.evcb_cbfinalize(evcb, evcb->evcb_arg);
8062b15cb3dSCy Schubert break;
8072b15cb3dSCy Schubert default:
8082b15cb3dSCy Schubert break;
8092b15cb3dSCy Schubert }
8102b15cb3dSCy Schubert }
8112b15cb3dSCy Schubert return result;
8122b15cb3dSCy Schubert }
8132b15cb3dSCy Schubert
event_base_free_queues_(struct event_base * base,int run_finalizers)814*a466cc55SCy Schubert static int event_base_free_queues_(struct event_base *base, int run_finalizers)
815*a466cc55SCy Schubert {
816*a466cc55SCy Schubert int deleted = 0, i;
817*a466cc55SCy Schubert
818*a466cc55SCy Schubert for (i = 0; i < base->nactivequeues; ++i) {
819*a466cc55SCy Schubert struct event_callback *evcb, *next;
820*a466cc55SCy Schubert for (evcb = TAILQ_FIRST(&base->activequeues[i]); evcb; ) {
821*a466cc55SCy Schubert next = TAILQ_NEXT(evcb, evcb_active_next);
822*a466cc55SCy Schubert deleted += event_base_cancel_single_callback_(base, evcb, run_finalizers);
823*a466cc55SCy Schubert evcb = next;
824*a466cc55SCy Schubert }
825*a466cc55SCy Schubert }
826*a466cc55SCy Schubert
827*a466cc55SCy Schubert {
828*a466cc55SCy Schubert struct event_callback *evcb;
829*a466cc55SCy Schubert while ((evcb = TAILQ_FIRST(&base->active_later_queue))) {
830*a466cc55SCy Schubert deleted += event_base_cancel_single_callback_(base, evcb, run_finalizers);
831*a466cc55SCy Schubert }
832*a466cc55SCy Schubert }
833*a466cc55SCy Schubert
834*a466cc55SCy Schubert return deleted;
835*a466cc55SCy Schubert }
836*a466cc55SCy Schubert
8372b15cb3dSCy Schubert static void
event_base_free_(struct event_base * base,int run_finalizers)8382b15cb3dSCy Schubert event_base_free_(struct event_base *base, int run_finalizers)
8392b15cb3dSCy Schubert {
8402b15cb3dSCy Schubert int i, n_deleted=0;
8412b15cb3dSCy Schubert struct event *ev;
8422b15cb3dSCy Schubert /* XXXX grab the lock? If there is contention when one thread frees
8432b15cb3dSCy Schubert * the base, then the contending thread will be very sad soon. */
8442b15cb3dSCy Schubert
8452b15cb3dSCy Schubert /* event_base_free(NULL) is how to free the current_base if we
8462b15cb3dSCy Schubert * made it with event_init and forgot to hold a reference to it. */
8472b15cb3dSCy Schubert if (base == NULL && current_base)
8482b15cb3dSCy Schubert base = current_base;
8492b15cb3dSCy Schubert /* Don't actually free NULL. */
8502b15cb3dSCy Schubert if (base == NULL) {
8512b15cb3dSCy Schubert event_warnx("%s: no base to free", __func__);
8522b15cb3dSCy Schubert return;
8532b15cb3dSCy Schubert }
8542b15cb3dSCy Schubert /* XXX(niels) - check for internal events first */
8552b15cb3dSCy Schubert
8562b15cb3dSCy Schubert #ifdef _WIN32
8572b15cb3dSCy Schubert event_base_stop_iocp_(base);
8582b15cb3dSCy Schubert #endif
8592b15cb3dSCy Schubert
8602b15cb3dSCy Schubert /* threading fds if we have them */
8612b15cb3dSCy Schubert if (base->th_notify_fd[0] != -1) {
8622b15cb3dSCy Schubert event_del(&base->th_notify);
8632b15cb3dSCy Schubert EVUTIL_CLOSESOCKET(base->th_notify_fd[0]);
8642b15cb3dSCy Schubert if (base->th_notify_fd[1] != -1)
8652b15cb3dSCy Schubert EVUTIL_CLOSESOCKET(base->th_notify_fd[1]);
8662b15cb3dSCy Schubert base->th_notify_fd[0] = -1;
8672b15cb3dSCy Schubert base->th_notify_fd[1] = -1;
8682b15cb3dSCy Schubert event_debug_unassign(&base->th_notify);
8692b15cb3dSCy Schubert }
8702b15cb3dSCy Schubert
8712b15cb3dSCy Schubert /* Delete all non-internal events. */
8722b15cb3dSCy Schubert evmap_delete_all_(base);
8732b15cb3dSCy Schubert
8742b15cb3dSCy Schubert while ((ev = min_heap_top_(&base->timeheap)) != NULL) {
8752b15cb3dSCy Schubert event_del(ev);
8762b15cb3dSCy Schubert ++n_deleted;
8772b15cb3dSCy Schubert }
8782b15cb3dSCy Schubert for (i = 0; i < base->n_common_timeouts; ++i) {
8792b15cb3dSCy Schubert struct common_timeout_list *ctl =
8802b15cb3dSCy Schubert base->common_timeout_queues[i];
8812b15cb3dSCy Schubert event_del(&ctl->timeout_event); /* Internal; doesn't count */
8822b15cb3dSCy Schubert event_debug_unassign(&ctl->timeout_event);
8832b15cb3dSCy Schubert for (ev = TAILQ_FIRST(&ctl->events); ev; ) {
8842b15cb3dSCy Schubert struct event *next = TAILQ_NEXT(ev,
8852b15cb3dSCy Schubert ev_timeout_pos.ev_next_with_common_timeout);
8862b15cb3dSCy Schubert if (!(ev->ev_flags & EVLIST_INTERNAL)) {
8872b15cb3dSCy Schubert event_del(ev);
8882b15cb3dSCy Schubert ++n_deleted;
8892b15cb3dSCy Schubert }
8902b15cb3dSCy Schubert ev = next;
8912b15cb3dSCy Schubert }
8922b15cb3dSCy Schubert mm_free(ctl);
8932b15cb3dSCy Schubert }
8942b15cb3dSCy Schubert if (base->common_timeout_queues)
8952b15cb3dSCy Schubert mm_free(base->common_timeout_queues);
8962b15cb3dSCy Schubert
897*a466cc55SCy Schubert for (;;) {
898*a466cc55SCy Schubert /* For finalizers we can register yet another finalizer out from
899*a466cc55SCy Schubert * finalizer, and iff finalizer will be in active_later_queue we can
900*a466cc55SCy Schubert * add finalizer to activequeues, and we will have events in
901*a466cc55SCy Schubert * activequeues after this function returns, which is not what we want
902*a466cc55SCy Schubert * (we even have an assertion for this).
903*a466cc55SCy Schubert *
904*a466cc55SCy Schubert * A simple case is bufferevent with underlying (i.e. filters).
905*a466cc55SCy Schubert */
906*a466cc55SCy Schubert int i = event_base_free_queues_(base, run_finalizers);
907*a466cc55SCy Schubert event_debug(("%s: %d events freed", __func__, i));
908*a466cc55SCy Schubert if (!i) {
909*a466cc55SCy Schubert break;
9102b15cb3dSCy Schubert }
911*a466cc55SCy Schubert n_deleted += i;
9122b15cb3dSCy Schubert }
9132b15cb3dSCy Schubert
9142b15cb3dSCy Schubert if (n_deleted)
9152b15cb3dSCy Schubert event_debug(("%s: %d events were still set in base",
9162b15cb3dSCy Schubert __func__, n_deleted));
9172b15cb3dSCy Schubert
9182b15cb3dSCy Schubert while (LIST_FIRST(&base->once_events)) {
9192b15cb3dSCy Schubert struct event_once *eonce = LIST_FIRST(&base->once_events);
9202b15cb3dSCy Schubert LIST_REMOVE(eonce, next_once);
9212b15cb3dSCy Schubert mm_free(eonce);
9222b15cb3dSCy Schubert }
9232b15cb3dSCy Schubert
9242b15cb3dSCy Schubert if (base->evsel != NULL && base->evsel->dealloc != NULL)
9252b15cb3dSCy Schubert base->evsel->dealloc(base);
9262b15cb3dSCy Schubert
9272b15cb3dSCy Schubert for (i = 0; i < base->nactivequeues; ++i)
9282b15cb3dSCy Schubert EVUTIL_ASSERT(TAILQ_EMPTY(&base->activequeues[i]));
9292b15cb3dSCy Schubert
9302b15cb3dSCy Schubert EVUTIL_ASSERT(min_heap_empty_(&base->timeheap));
9312b15cb3dSCy Schubert min_heap_dtor_(&base->timeheap);
9322b15cb3dSCy Schubert
9332b15cb3dSCy Schubert mm_free(base->activequeues);
9342b15cb3dSCy Schubert
9352b15cb3dSCy Schubert evmap_io_clear_(&base->io);
9362b15cb3dSCy Schubert evmap_signal_clear_(&base->sigmap);
9372b15cb3dSCy Schubert event_changelist_freemem_(&base->changelist);
9382b15cb3dSCy Schubert
9392b15cb3dSCy Schubert EVTHREAD_FREE_LOCK(base->th_base_lock, 0);
9402b15cb3dSCy Schubert EVTHREAD_FREE_COND(base->current_event_cond);
9412b15cb3dSCy Schubert
9422b15cb3dSCy Schubert /* If we're freeing current_base, there won't be a current_base. */
9432b15cb3dSCy Schubert if (base == current_base)
9442b15cb3dSCy Schubert current_base = NULL;
9452b15cb3dSCy Schubert mm_free(base);
9462b15cb3dSCy Schubert }
9472b15cb3dSCy Schubert
9482b15cb3dSCy Schubert void
event_base_free_nofinalize(struct event_base * base)9492b15cb3dSCy Schubert event_base_free_nofinalize(struct event_base *base)
9502b15cb3dSCy Schubert {
9512b15cb3dSCy Schubert event_base_free_(base, 0);
9522b15cb3dSCy Schubert }
9532b15cb3dSCy Schubert
9542b15cb3dSCy Schubert void
event_base_free(struct event_base * base)9552b15cb3dSCy Schubert event_base_free(struct event_base *base)
9562b15cb3dSCy Schubert {
9572b15cb3dSCy Schubert event_base_free_(base, 1);
9582b15cb3dSCy Schubert }
9592b15cb3dSCy Schubert
9602b15cb3dSCy Schubert /* Fake eventop; used to disable the backend temporarily inside event_reinit
9612b15cb3dSCy Schubert * so that we can call event_del() on an event without telling the backend.
9622b15cb3dSCy Schubert */
9632b15cb3dSCy Schubert static int
nil_backend_del(struct event_base * b,evutil_socket_t fd,short old,short events,void * fdinfo)9642b15cb3dSCy Schubert nil_backend_del(struct event_base *b, evutil_socket_t fd, short old,
9652b15cb3dSCy Schubert short events, void *fdinfo)
9662b15cb3dSCy Schubert {
9672b15cb3dSCy Schubert return 0;
9682b15cb3dSCy Schubert }
9692b15cb3dSCy Schubert const struct eventop nil_eventop = {
9702b15cb3dSCy Schubert "nil",
9712b15cb3dSCy Schubert NULL, /* init: unused. */
9722b15cb3dSCy Schubert NULL, /* add: unused. */
9732b15cb3dSCy Schubert nil_backend_del, /* del: used, so needs to be killed. */
9742b15cb3dSCy Schubert NULL, /* dispatch: unused. */
9752b15cb3dSCy Schubert NULL, /* dealloc: unused. */
9762b15cb3dSCy Schubert 0, 0, 0
9772b15cb3dSCy Schubert };
9782b15cb3dSCy Schubert
9792b15cb3dSCy Schubert /* reinitialize the event base after a fork */
9802b15cb3dSCy Schubert int
event_reinit(struct event_base * base)9812b15cb3dSCy Schubert event_reinit(struct event_base *base)
9822b15cb3dSCy Schubert {
9832b15cb3dSCy Schubert const struct eventop *evsel;
9842b15cb3dSCy Schubert int res = 0;
9852b15cb3dSCy Schubert int was_notifiable = 0;
9862b15cb3dSCy Schubert int had_signal_added = 0;
9872b15cb3dSCy Schubert
9882b15cb3dSCy Schubert EVBASE_ACQUIRE_LOCK(base, th_base_lock);
9892b15cb3dSCy Schubert
9902b15cb3dSCy Schubert evsel = base->evsel;
9912b15cb3dSCy Schubert
9922b15cb3dSCy Schubert /* check if this event mechanism requires reinit on the backend */
9932b15cb3dSCy Schubert if (evsel->need_reinit) {
9942b15cb3dSCy Schubert /* We're going to call event_del() on our notify events (the
9952b15cb3dSCy Schubert * ones that tell about signals and wakeup events). But we
9962b15cb3dSCy Schubert * don't actually want to tell the backend to change its
9972b15cb3dSCy Schubert * state, since it might still share some resource (a kqueue,
9982b15cb3dSCy Schubert * an epoll fd) with the parent process, and we don't want to
9992b15cb3dSCy Schubert * delete the fds from _that_ backend, we temporarily stub out
10002b15cb3dSCy Schubert * the evsel with a replacement.
10012b15cb3dSCy Schubert */
10022b15cb3dSCy Schubert base->evsel = &nil_eventop;
10032b15cb3dSCy Schubert }
10042b15cb3dSCy Schubert
10052b15cb3dSCy Schubert /* We need to re-create a new signal-notification fd and a new
10062b15cb3dSCy Schubert * thread-notification fd. Otherwise, we'll still share those with
10072b15cb3dSCy Schubert * the parent process, which would make any notification sent to them
10082b15cb3dSCy Schubert * get received by one or both of the event loops, more or less at
10092b15cb3dSCy Schubert * random.
10102b15cb3dSCy Schubert */
10112b15cb3dSCy Schubert if (base->sig.ev_signal_added) {
10122b15cb3dSCy Schubert event_del_nolock_(&base->sig.ev_signal, EVENT_DEL_AUTOBLOCK);
10132b15cb3dSCy Schubert event_debug_unassign(&base->sig.ev_signal);
10142b15cb3dSCy Schubert memset(&base->sig.ev_signal, 0, sizeof(base->sig.ev_signal));
1015*a466cc55SCy Schubert had_signal_added = 1;
1016*a466cc55SCy Schubert base->sig.ev_signal_added = 0;
1017*a466cc55SCy Schubert }
10182b15cb3dSCy Schubert if (base->sig.ev_signal_pair[0] != -1)
10192b15cb3dSCy Schubert EVUTIL_CLOSESOCKET(base->sig.ev_signal_pair[0]);
10202b15cb3dSCy Schubert if (base->sig.ev_signal_pair[1] != -1)
10212b15cb3dSCy Schubert EVUTIL_CLOSESOCKET(base->sig.ev_signal_pair[1]);
10222b15cb3dSCy Schubert if (base->th_notify_fn != NULL) {
10232b15cb3dSCy Schubert was_notifiable = 1;
10242b15cb3dSCy Schubert base->th_notify_fn = NULL;
10252b15cb3dSCy Schubert }
10262b15cb3dSCy Schubert if (base->th_notify_fd[0] != -1) {
10272b15cb3dSCy Schubert event_del_nolock_(&base->th_notify, EVENT_DEL_AUTOBLOCK);
10282b15cb3dSCy Schubert EVUTIL_CLOSESOCKET(base->th_notify_fd[0]);
10292b15cb3dSCy Schubert if (base->th_notify_fd[1] != -1)
10302b15cb3dSCy Schubert EVUTIL_CLOSESOCKET(base->th_notify_fd[1]);
10312b15cb3dSCy Schubert base->th_notify_fd[0] = -1;
10322b15cb3dSCy Schubert base->th_notify_fd[1] = -1;
10332b15cb3dSCy Schubert event_debug_unassign(&base->th_notify);
10342b15cb3dSCy Schubert }
10352b15cb3dSCy Schubert
10362b15cb3dSCy Schubert /* Replace the original evsel. */
10372b15cb3dSCy Schubert base->evsel = evsel;
10382b15cb3dSCy Schubert
10392b15cb3dSCy Schubert if (evsel->need_reinit) {
10402b15cb3dSCy Schubert /* Reconstruct the backend through brute-force, so that we do
10412b15cb3dSCy Schubert * not share any structures with the parent process. For some
10422b15cb3dSCy Schubert * backends, this is necessary: epoll and kqueue, for
10432b15cb3dSCy Schubert * instance, have events associated with a kernel
10442b15cb3dSCy Schubert * structure. If didn't reinitialize, we'd share that
10452b15cb3dSCy Schubert * structure with the parent process, and any changes made by
10462b15cb3dSCy Schubert * the parent would affect our backend's behavior (and vice
10472b15cb3dSCy Schubert * versa).
10482b15cb3dSCy Schubert */
10492b15cb3dSCy Schubert if (base->evsel->dealloc != NULL)
10502b15cb3dSCy Schubert base->evsel->dealloc(base);
10512b15cb3dSCy Schubert base->evbase = evsel->init(base);
10522b15cb3dSCy Schubert if (base->evbase == NULL) {
10532b15cb3dSCy Schubert event_errx(1,
10542b15cb3dSCy Schubert "%s: could not reinitialize event mechanism",
10552b15cb3dSCy Schubert __func__);
10562b15cb3dSCy Schubert res = -1;
10572b15cb3dSCy Schubert goto done;
10582b15cb3dSCy Schubert }
10592b15cb3dSCy Schubert
10602b15cb3dSCy Schubert /* Empty out the changelist (if any): we are starting from a
10612b15cb3dSCy Schubert * blank slate. */
10622b15cb3dSCy Schubert event_changelist_freemem_(&base->changelist);
10632b15cb3dSCy Schubert
10642b15cb3dSCy Schubert /* Tell the event maps to re-inform the backend about all
10652b15cb3dSCy Schubert * pending events. This will make the signal notification
10662b15cb3dSCy Schubert * event get re-created if necessary. */
10672b15cb3dSCy Schubert if (evmap_reinit_(base) < 0)
10682b15cb3dSCy Schubert res = -1;
10692b15cb3dSCy Schubert } else {
10702b15cb3dSCy Schubert res = evsig_init_(base);
1071*a466cc55SCy Schubert if (res == 0 && had_signal_added) {
1072*a466cc55SCy Schubert res = event_add_nolock_(&base->sig.ev_signal, NULL, 0);
1073*a466cc55SCy Schubert if (res == 0)
1074*a466cc55SCy Schubert base->sig.ev_signal_added = 1;
1075*a466cc55SCy Schubert }
10762b15cb3dSCy Schubert }
10772b15cb3dSCy Schubert
10782b15cb3dSCy Schubert /* If we were notifiable before, and nothing just exploded, become
10792b15cb3dSCy Schubert * notifiable again. */
10802b15cb3dSCy Schubert if (was_notifiable && res == 0)
10812b15cb3dSCy Schubert res = evthread_make_base_notifiable_nolock_(base);
10822b15cb3dSCy Schubert
10832b15cb3dSCy Schubert done:
10842b15cb3dSCy Schubert EVBASE_RELEASE_LOCK(base, th_base_lock);
10852b15cb3dSCy Schubert return (res);
10862b15cb3dSCy Schubert }
10872b15cb3dSCy Schubert
1088a25439b6SCy Schubert /* Get the monotonic time for this event_base' timer */
1089a25439b6SCy Schubert int
event_gettime_monotonic(struct event_base * base,struct timeval * tv)1090a25439b6SCy Schubert event_gettime_monotonic(struct event_base *base, struct timeval *tv)
1091a25439b6SCy Schubert {
1092a25439b6SCy Schubert int rv = -1;
1093a25439b6SCy Schubert
1094a25439b6SCy Schubert if (base && tv) {
1095a25439b6SCy Schubert EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1096a25439b6SCy Schubert rv = evutil_gettime_monotonic_(&(base->monotonic_timer), tv);
1097a25439b6SCy Schubert EVBASE_RELEASE_LOCK(base, th_base_lock);
1098a25439b6SCy Schubert }
1099a25439b6SCy Schubert
1100a25439b6SCy Schubert return rv;
1101a25439b6SCy Schubert }
1102a25439b6SCy Schubert
11032b15cb3dSCy Schubert const char **
event_get_supported_methods(void)11042b15cb3dSCy Schubert event_get_supported_methods(void)
11052b15cb3dSCy Schubert {
11062b15cb3dSCy Schubert static const char **methods = NULL;
11072b15cb3dSCy Schubert const struct eventop **method;
11082b15cb3dSCy Schubert const char **tmp;
11092b15cb3dSCy Schubert int i = 0, k;
11102b15cb3dSCy Schubert
11112b15cb3dSCy Schubert /* count all methods */
11122b15cb3dSCy Schubert for (method = &eventops[0]; *method != NULL; ++method) {
11132b15cb3dSCy Schubert ++i;
11142b15cb3dSCy Schubert }
11152b15cb3dSCy Schubert
11162b15cb3dSCy Schubert /* allocate one more than we need for the NULL pointer */
11172b15cb3dSCy Schubert tmp = mm_calloc((i + 1), sizeof(char *));
11182b15cb3dSCy Schubert if (tmp == NULL)
11192b15cb3dSCy Schubert return (NULL);
11202b15cb3dSCy Schubert
11212b15cb3dSCy Schubert /* populate the array with the supported methods */
11222b15cb3dSCy Schubert for (k = 0, i = 0; eventops[k] != NULL; ++k) {
11232b15cb3dSCy Schubert tmp[i++] = eventops[k]->name;
11242b15cb3dSCy Schubert }
11252b15cb3dSCy Schubert tmp[i] = NULL;
11262b15cb3dSCy Schubert
11272b15cb3dSCy Schubert if (methods != NULL)
11282b15cb3dSCy Schubert mm_free((char**)methods);
11292b15cb3dSCy Schubert
11302b15cb3dSCy Schubert methods = tmp;
11312b15cb3dSCy Schubert
11322b15cb3dSCy Schubert return (methods);
11332b15cb3dSCy Schubert }
11342b15cb3dSCy Schubert
11352b15cb3dSCy Schubert struct event_config *
event_config_new(void)11362b15cb3dSCy Schubert event_config_new(void)
11372b15cb3dSCy Schubert {
11382b15cb3dSCy Schubert struct event_config *cfg = mm_calloc(1, sizeof(*cfg));
11392b15cb3dSCy Schubert
11402b15cb3dSCy Schubert if (cfg == NULL)
11412b15cb3dSCy Schubert return (NULL);
11422b15cb3dSCy Schubert
11432b15cb3dSCy Schubert TAILQ_INIT(&cfg->entries);
11442b15cb3dSCy Schubert cfg->max_dispatch_interval.tv_sec = -1;
11452b15cb3dSCy Schubert cfg->max_dispatch_callbacks = INT_MAX;
11462b15cb3dSCy Schubert cfg->limit_callbacks_after_prio = 1;
11472b15cb3dSCy Schubert
11482b15cb3dSCy Schubert return (cfg);
11492b15cb3dSCy Schubert }
11502b15cb3dSCy Schubert
11512b15cb3dSCy Schubert static void
event_config_entry_free(struct event_config_entry * entry)11522b15cb3dSCy Schubert event_config_entry_free(struct event_config_entry *entry)
11532b15cb3dSCy Schubert {
11542b15cb3dSCy Schubert if (entry->avoid_method != NULL)
11552b15cb3dSCy Schubert mm_free((char *)entry->avoid_method);
11562b15cb3dSCy Schubert mm_free(entry);
11572b15cb3dSCy Schubert }
11582b15cb3dSCy Schubert
11592b15cb3dSCy Schubert void
event_config_free(struct event_config * cfg)11602b15cb3dSCy Schubert event_config_free(struct event_config *cfg)
11612b15cb3dSCy Schubert {
11622b15cb3dSCy Schubert struct event_config_entry *entry;
11632b15cb3dSCy Schubert
11642b15cb3dSCy Schubert while ((entry = TAILQ_FIRST(&cfg->entries)) != NULL) {
11652b15cb3dSCy Schubert TAILQ_REMOVE(&cfg->entries, entry, next);
11662b15cb3dSCy Schubert event_config_entry_free(entry);
11672b15cb3dSCy Schubert }
11682b15cb3dSCy Schubert mm_free(cfg);
11692b15cb3dSCy Schubert }
11702b15cb3dSCy Schubert
11712b15cb3dSCy Schubert int
event_config_set_flag(struct event_config * cfg,int flag)11722b15cb3dSCy Schubert event_config_set_flag(struct event_config *cfg, int flag)
11732b15cb3dSCy Schubert {
11742b15cb3dSCy Schubert if (!cfg)
11752b15cb3dSCy Schubert return -1;
11762b15cb3dSCy Schubert cfg->flags |= flag;
11772b15cb3dSCy Schubert return 0;
11782b15cb3dSCy Schubert }
11792b15cb3dSCy Schubert
11802b15cb3dSCy Schubert int
event_config_avoid_method(struct event_config * cfg,const char * method)11812b15cb3dSCy Schubert event_config_avoid_method(struct event_config *cfg, const char *method)
11822b15cb3dSCy Schubert {
11832b15cb3dSCy Schubert struct event_config_entry *entry = mm_malloc(sizeof(*entry));
11842b15cb3dSCy Schubert if (entry == NULL)
11852b15cb3dSCy Schubert return (-1);
11862b15cb3dSCy Schubert
11872b15cb3dSCy Schubert if ((entry->avoid_method = mm_strdup(method)) == NULL) {
11882b15cb3dSCy Schubert mm_free(entry);
11892b15cb3dSCy Schubert return (-1);
11902b15cb3dSCy Schubert }
11912b15cb3dSCy Schubert
11922b15cb3dSCy Schubert TAILQ_INSERT_TAIL(&cfg->entries, entry, next);
11932b15cb3dSCy Schubert
11942b15cb3dSCy Schubert return (0);
11952b15cb3dSCy Schubert }
11962b15cb3dSCy Schubert
11972b15cb3dSCy Schubert int
event_config_require_features(struct event_config * cfg,int features)11982b15cb3dSCy Schubert event_config_require_features(struct event_config *cfg,
11992b15cb3dSCy Schubert int features)
12002b15cb3dSCy Schubert {
12012b15cb3dSCy Schubert if (!cfg)
12022b15cb3dSCy Schubert return (-1);
12032b15cb3dSCy Schubert cfg->require_features = features;
12042b15cb3dSCy Schubert return (0);
12052b15cb3dSCy Schubert }
12062b15cb3dSCy Schubert
12072b15cb3dSCy Schubert int
event_config_set_num_cpus_hint(struct event_config * cfg,int cpus)12082b15cb3dSCy Schubert event_config_set_num_cpus_hint(struct event_config *cfg, int cpus)
12092b15cb3dSCy Schubert {
12102b15cb3dSCy Schubert if (!cfg)
12112b15cb3dSCy Schubert return (-1);
12122b15cb3dSCy Schubert cfg->n_cpus_hint = cpus;
12132b15cb3dSCy Schubert return (0);
12142b15cb3dSCy Schubert }
12152b15cb3dSCy Schubert
12162b15cb3dSCy Schubert int
event_config_set_max_dispatch_interval(struct event_config * cfg,const struct timeval * max_interval,int max_callbacks,int min_priority)12172b15cb3dSCy Schubert event_config_set_max_dispatch_interval(struct event_config *cfg,
12182b15cb3dSCy Schubert const struct timeval *max_interval, int max_callbacks, int min_priority)
12192b15cb3dSCy Schubert {
12202b15cb3dSCy Schubert if (max_interval)
12212b15cb3dSCy Schubert memcpy(&cfg->max_dispatch_interval, max_interval,
12222b15cb3dSCy Schubert sizeof(struct timeval));
12232b15cb3dSCy Schubert else
12242b15cb3dSCy Schubert cfg->max_dispatch_interval.tv_sec = -1;
12252b15cb3dSCy Schubert cfg->max_dispatch_callbacks =
12262b15cb3dSCy Schubert max_callbacks >= 0 ? max_callbacks : INT_MAX;
12272b15cb3dSCy Schubert if (min_priority < 0)
12282b15cb3dSCy Schubert min_priority = 0;
12292b15cb3dSCy Schubert cfg->limit_callbacks_after_prio = min_priority;
12302b15cb3dSCy Schubert return (0);
12312b15cb3dSCy Schubert }
12322b15cb3dSCy Schubert
12332b15cb3dSCy Schubert int
event_priority_init(int npriorities)12342b15cb3dSCy Schubert event_priority_init(int npriorities)
12352b15cb3dSCy Schubert {
12362b15cb3dSCy Schubert return event_base_priority_init(current_base, npriorities);
12372b15cb3dSCy Schubert }
12382b15cb3dSCy Schubert
12392b15cb3dSCy Schubert int
event_base_priority_init(struct event_base * base,int npriorities)12402b15cb3dSCy Schubert event_base_priority_init(struct event_base *base, int npriorities)
12412b15cb3dSCy Schubert {
12422b15cb3dSCy Schubert int i, r;
12432b15cb3dSCy Schubert r = -1;
12442b15cb3dSCy Schubert
12452b15cb3dSCy Schubert EVBASE_ACQUIRE_LOCK(base, th_base_lock);
12462b15cb3dSCy Schubert
12472b15cb3dSCy Schubert if (N_ACTIVE_CALLBACKS(base) || npriorities < 1
12482b15cb3dSCy Schubert || npriorities >= EVENT_MAX_PRIORITIES)
12492b15cb3dSCy Schubert goto err;
12502b15cb3dSCy Schubert
12512b15cb3dSCy Schubert if (npriorities == base->nactivequeues)
12522b15cb3dSCy Schubert goto ok;
12532b15cb3dSCy Schubert
12542b15cb3dSCy Schubert if (base->nactivequeues) {
12552b15cb3dSCy Schubert mm_free(base->activequeues);
12562b15cb3dSCy Schubert base->nactivequeues = 0;
12572b15cb3dSCy Schubert }
12582b15cb3dSCy Schubert
12592b15cb3dSCy Schubert /* Allocate our priority queues */
12602b15cb3dSCy Schubert base->activequeues = (struct evcallback_list *)
12612b15cb3dSCy Schubert mm_calloc(npriorities, sizeof(struct evcallback_list));
12622b15cb3dSCy Schubert if (base->activequeues == NULL) {
12632b15cb3dSCy Schubert event_warn("%s: calloc", __func__);
12642b15cb3dSCy Schubert goto err;
12652b15cb3dSCy Schubert }
12662b15cb3dSCy Schubert base->nactivequeues = npriorities;
12672b15cb3dSCy Schubert
12682b15cb3dSCy Schubert for (i = 0; i < base->nactivequeues; ++i) {
12692b15cb3dSCy Schubert TAILQ_INIT(&base->activequeues[i]);
12702b15cb3dSCy Schubert }
12712b15cb3dSCy Schubert
12722b15cb3dSCy Schubert ok:
12732b15cb3dSCy Schubert r = 0;
12742b15cb3dSCy Schubert err:
12752b15cb3dSCy Schubert EVBASE_RELEASE_LOCK(base, th_base_lock);
12762b15cb3dSCy Schubert return (r);
12772b15cb3dSCy Schubert }
12782b15cb3dSCy Schubert
12792b15cb3dSCy Schubert int
event_base_get_npriorities(struct event_base * base)12802b15cb3dSCy Schubert event_base_get_npriorities(struct event_base *base)
12812b15cb3dSCy Schubert {
12822b15cb3dSCy Schubert
12832b15cb3dSCy Schubert int n;
12842b15cb3dSCy Schubert if (base == NULL)
12852b15cb3dSCy Schubert base = current_base;
12862b15cb3dSCy Schubert
12872b15cb3dSCy Schubert EVBASE_ACQUIRE_LOCK(base, th_base_lock);
12882b15cb3dSCy Schubert n = base->nactivequeues;
12892b15cb3dSCy Schubert EVBASE_RELEASE_LOCK(base, th_base_lock);
12902b15cb3dSCy Schubert return (n);
12912b15cb3dSCy Schubert }
12922b15cb3dSCy Schubert
12932b15cb3dSCy Schubert int
event_base_get_num_events(struct event_base * base,unsigned int type)12942b15cb3dSCy Schubert event_base_get_num_events(struct event_base *base, unsigned int type)
12952b15cb3dSCy Schubert {
12962b15cb3dSCy Schubert int r = 0;
12972b15cb3dSCy Schubert
12982b15cb3dSCy Schubert EVBASE_ACQUIRE_LOCK(base, th_base_lock);
12992b15cb3dSCy Schubert
13002b15cb3dSCy Schubert if (type & EVENT_BASE_COUNT_ACTIVE)
13012b15cb3dSCy Schubert r += base->event_count_active;
13022b15cb3dSCy Schubert
13032b15cb3dSCy Schubert if (type & EVENT_BASE_COUNT_VIRTUAL)
13042b15cb3dSCy Schubert r += base->virtual_event_count;
13052b15cb3dSCy Schubert
13062b15cb3dSCy Schubert if (type & EVENT_BASE_COUNT_ADDED)
13072b15cb3dSCy Schubert r += base->event_count;
13082b15cb3dSCy Schubert
13092b15cb3dSCy Schubert EVBASE_RELEASE_LOCK(base, th_base_lock);
13102b15cb3dSCy Schubert
13112b15cb3dSCy Schubert return r;
13122b15cb3dSCy Schubert }
13132b15cb3dSCy Schubert
13142b15cb3dSCy Schubert int
event_base_get_max_events(struct event_base * base,unsigned int type,int clear)13152b15cb3dSCy Schubert event_base_get_max_events(struct event_base *base, unsigned int type, int clear)
13162b15cb3dSCy Schubert {
13172b15cb3dSCy Schubert int r = 0;
13182b15cb3dSCy Schubert
13192b15cb3dSCy Schubert EVBASE_ACQUIRE_LOCK(base, th_base_lock);
13202b15cb3dSCy Schubert
13212b15cb3dSCy Schubert if (type & EVENT_BASE_COUNT_ACTIVE) {
13222b15cb3dSCy Schubert r += base->event_count_active_max;
13232b15cb3dSCy Schubert if (clear)
13242b15cb3dSCy Schubert base->event_count_active_max = 0;
13252b15cb3dSCy Schubert }
13262b15cb3dSCy Schubert
13272b15cb3dSCy Schubert if (type & EVENT_BASE_COUNT_VIRTUAL) {
13282b15cb3dSCy Schubert r += base->virtual_event_count_max;
13292b15cb3dSCy Schubert if (clear)
13302b15cb3dSCy Schubert base->virtual_event_count_max = 0;
13312b15cb3dSCy Schubert }
13322b15cb3dSCy Schubert
13332b15cb3dSCy Schubert if (type & EVENT_BASE_COUNT_ADDED) {
13342b15cb3dSCy Schubert r += base->event_count_max;
13352b15cb3dSCy Schubert if (clear)
13362b15cb3dSCy Schubert base->event_count_max = 0;
13372b15cb3dSCy Schubert }
13382b15cb3dSCy Schubert
13392b15cb3dSCy Schubert EVBASE_RELEASE_LOCK(base, th_base_lock);
13402b15cb3dSCy Schubert
13412b15cb3dSCy Schubert return r;
13422b15cb3dSCy Schubert }
13432b15cb3dSCy Schubert
13442b15cb3dSCy Schubert /* Returns true iff we're currently watching any events. */
13452b15cb3dSCy Schubert static int
event_haveevents(struct event_base * base)13462b15cb3dSCy Schubert event_haveevents(struct event_base *base)
13472b15cb3dSCy Schubert {
13482b15cb3dSCy Schubert /* Caller must hold th_base_lock */
13492b15cb3dSCy Schubert return (base->virtual_event_count > 0 || base->event_count > 0);
13502b15cb3dSCy Schubert }
13512b15cb3dSCy Schubert
13522b15cb3dSCy Schubert /* "closure" function called when processing active signal events */
13532b15cb3dSCy Schubert static inline void
event_signal_closure(struct event_base * base,struct event * ev)13542b15cb3dSCy Schubert event_signal_closure(struct event_base *base, struct event *ev)
13552b15cb3dSCy Schubert {
13562b15cb3dSCy Schubert short ncalls;
13572b15cb3dSCy Schubert int should_break;
13582b15cb3dSCy Schubert
13592b15cb3dSCy Schubert /* Allows deletes to work */
13602b15cb3dSCy Schubert ncalls = ev->ev_ncalls;
13612b15cb3dSCy Schubert if (ncalls != 0)
13622b15cb3dSCy Schubert ev->ev_pncalls = &ncalls;
13632b15cb3dSCy Schubert EVBASE_RELEASE_LOCK(base, th_base_lock);
13642b15cb3dSCy Schubert while (ncalls) {
13652b15cb3dSCy Schubert ncalls--;
13662b15cb3dSCy Schubert ev->ev_ncalls = ncalls;
13672b15cb3dSCy Schubert if (ncalls == 0)
13682b15cb3dSCy Schubert ev->ev_pncalls = NULL;
13692b15cb3dSCy Schubert (*ev->ev_callback)(ev->ev_fd, ev->ev_res, ev->ev_arg);
13702b15cb3dSCy Schubert
13712b15cb3dSCy Schubert EVBASE_ACQUIRE_LOCK(base, th_base_lock);
13722b15cb3dSCy Schubert should_break = base->event_break;
13732b15cb3dSCy Schubert EVBASE_RELEASE_LOCK(base, th_base_lock);
13742b15cb3dSCy Schubert
13752b15cb3dSCy Schubert if (should_break) {
13762b15cb3dSCy Schubert if (ncalls != 0)
13772b15cb3dSCy Schubert ev->ev_pncalls = NULL;
13782b15cb3dSCy Schubert return;
13792b15cb3dSCy Schubert }
13802b15cb3dSCy Schubert }
13812b15cb3dSCy Schubert }
13822b15cb3dSCy Schubert
13832b15cb3dSCy Schubert /* Common timeouts are special timeouts that are handled as queues rather than
13842b15cb3dSCy Schubert * in the minheap. This is more efficient than the minheap if we happen to
13852b15cb3dSCy Schubert * know that we're going to get several thousands of timeout events all with
13862b15cb3dSCy Schubert * the same timeout value.
13872b15cb3dSCy Schubert *
13882b15cb3dSCy Schubert * Since all our timeout handling code assumes timevals can be copied,
13892b15cb3dSCy Schubert * assigned, etc, we can't use "magic pointer" to encode these common
13902b15cb3dSCy Schubert * timeouts. Searching through a list to see if every timeout is common could
13912b15cb3dSCy Schubert * also get inefficient. Instead, we take advantage of the fact that tv_usec
13922b15cb3dSCy Schubert * is 32 bits long, but only uses 20 of those bits (since it can never be over
13932b15cb3dSCy Schubert * 999999.) We use the top bits to encode 4 bites of magic number, and 8 bits
13942b15cb3dSCy Schubert * of index into the event_base's aray of common timeouts.
13952b15cb3dSCy Schubert */
13962b15cb3dSCy Schubert
13972b15cb3dSCy Schubert #define MICROSECONDS_MASK COMMON_TIMEOUT_MICROSECONDS_MASK
13982b15cb3dSCy Schubert #define COMMON_TIMEOUT_IDX_MASK 0x0ff00000
13992b15cb3dSCy Schubert #define COMMON_TIMEOUT_IDX_SHIFT 20
14002b15cb3dSCy Schubert #define COMMON_TIMEOUT_MASK 0xf0000000
14012b15cb3dSCy Schubert #define COMMON_TIMEOUT_MAGIC 0x50000000
14022b15cb3dSCy Schubert
14032b15cb3dSCy Schubert #define COMMON_TIMEOUT_IDX(tv) \
14042b15cb3dSCy Schubert (((tv)->tv_usec & COMMON_TIMEOUT_IDX_MASK)>>COMMON_TIMEOUT_IDX_SHIFT)
14052b15cb3dSCy Schubert
14062b15cb3dSCy Schubert /** Return true iff if 'tv' is a common timeout in 'base' */
14072b15cb3dSCy Schubert static inline int
is_common_timeout(const struct timeval * tv,const struct event_base * base)14082b15cb3dSCy Schubert is_common_timeout(const struct timeval *tv,
14092b15cb3dSCy Schubert const struct event_base *base)
14102b15cb3dSCy Schubert {
14112b15cb3dSCy Schubert int idx;
14122b15cb3dSCy Schubert if ((tv->tv_usec & COMMON_TIMEOUT_MASK) != COMMON_TIMEOUT_MAGIC)
14132b15cb3dSCy Schubert return 0;
14142b15cb3dSCy Schubert idx = COMMON_TIMEOUT_IDX(tv);
14152b15cb3dSCy Schubert return idx < base->n_common_timeouts;
14162b15cb3dSCy Schubert }
14172b15cb3dSCy Schubert
14182b15cb3dSCy Schubert /* True iff tv1 and tv2 have the same common-timeout index, or if neither
14192b15cb3dSCy Schubert * one is a common timeout. */
14202b15cb3dSCy Schubert static inline int
is_same_common_timeout(const struct timeval * tv1,const struct timeval * tv2)14212b15cb3dSCy Schubert is_same_common_timeout(const struct timeval *tv1, const struct timeval *tv2)
14222b15cb3dSCy Schubert {
14232b15cb3dSCy Schubert return (tv1->tv_usec & ~MICROSECONDS_MASK) ==
14242b15cb3dSCy Schubert (tv2->tv_usec & ~MICROSECONDS_MASK);
14252b15cb3dSCy Schubert }
14262b15cb3dSCy Schubert
14272b15cb3dSCy Schubert /** Requires that 'tv' is a common timeout. Return the corresponding
14282b15cb3dSCy Schubert * common_timeout_list. */
14292b15cb3dSCy Schubert static inline struct common_timeout_list *
get_common_timeout_list(struct event_base * base,const struct timeval * tv)14302b15cb3dSCy Schubert get_common_timeout_list(struct event_base *base, const struct timeval *tv)
14312b15cb3dSCy Schubert {
14322b15cb3dSCy Schubert return base->common_timeout_queues[COMMON_TIMEOUT_IDX(tv)];
14332b15cb3dSCy Schubert }
14342b15cb3dSCy Schubert
14352b15cb3dSCy Schubert #if 0
14362b15cb3dSCy Schubert static inline int
14372b15cb3dSCy Schubert common_timeout_ok(const struct timeval *tv,
14382b15cb3dSCy Schubert struct event_base *base)
14392b15cb3dSCy Schubert {
14402b15cb3dSCy Schubert const struct timeval *expect =
14412b15cb3dSCy Schubert &get_common_timeout_list(base, tv)->duration;
14422b15cb3dSCy Schubert return tv->tv_sec == expect->tv_sec &&
14432b15cb3dSCy Schubert tv->tv_usec == expect->tv_usec;
14442b15cb3dSCy Schubert }
14452b15cb3dSCy Schubert #endif
14462b15cb3dSCy Schubert
14472b15cb3dSCy Schubert /* Add the timeout for the first event in given common timeout list to the
14482b15cb3dSCy Schubert * event_base's minheap. */
14492b15cb3dSCy Schubert static void
common_timeout_schedule(struct common_timeout_list * ctl,const struct timeval * now,struct event * head)14502b15cb3dSCy Schubert common_timeout_schedule(struct common_timeout_list *ctl,
14512b15cb3dSCy Schubert const struct timeval *now, struct event *head)
14522b15cb3dSCy Schubert {
14532b15cb3dSCy Schubert struct timeval timeout = head->ev_timeout;
14542b15cb3dSCy Schubert timeout.tv_usec &= MICROSECONDS_MASK;
14552b15cb3dSCy Schubert event_add_nolock_(&ctl->timeout_event, &timeout, 1);
14562b15cb3dSCy Schubert }
14572b15cb3dSCy Schubert
14582b15cb3dSCy Schubert /* Callback: invoked when the timeout for a common timeout queue triggers.
14592b15cb3dSCy Schubert * This means that (at least) the first event in that queue should be run,
14602b15cb3dSCy Schubert * and the timeout should be rescheduled if there are more events. */
14612b15cb3dSCy Schubert static void
common_timeout_callback(evutil_socket_t fd,short what,void * arg)14622b15cb3dSCy Schubert common_timeout_callback(evutil_socket_t fd, short what, void *arg)
14632b15cb3dSCy Schubert {
14642b15cb3dSCy Schubert struct timeval now;
14652b15cb3dSCy Schubert struct common_timeout_list *ctl = arg;
14662b15cb3dSCy Schubert struct event_base *base = ctl->base;
14672b15cb3dSCy Schubert struct event *ev = NULL;
14682b15cb3dSCy Schubert EVBASE_ACQUIRE_LOCK(base, th_base_lock);
14692b15cb3dSCy Schubert gettime(base, &now);
14702b15cb3dSCy Schubert while (1) {
14712b15cb3dSCy Schubert ev = TAILQ_FIRST(&ctl->events);
14722b15cb3dSCy Schubert if (!ev || ev->ev_timeout.tv_sec > now.tv_sec ||
14732b15cb3dSCy Schubert (ev->ev_timeout.tv_sec == now.tv_sec &&
14742b15cb3dSCy Schubert (ev->ev_timeout.tv_usec&MICROSECONDS_MASK) > now.tv_usec))
14752b15cb3dSCy Schubert break;
14762b15cb3dSCy Schubert event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
14772b15cb3dSCy Schubert event_active_nolock_(ev, EV_TIMEOUT, 1);
14782b15cb3dSCy Schubert }
14792b15cb3dSCy Schubert if (ev)
14802b15cb3dSCy Schubert common_timeout_schedule(ctl, &now, ev);
14812b15cb3dSCy Schubert EVBASE_RELEASE_LOCK(base, th_base_lock);
14822b15cb3dSCy Schubert }
14832b15cb3dSCy Schubert
14842b15cb3dSCy Schubert #define MAX_COMMON_TIMEOUTS 256
14852b15cb3dSCy Schubert
14862b15cb3dSCy Schubert const struct timeval *
event_base_init_common_timeout(struct event_base * base,const struct timeval * duration)14872b15cb3dSCy Schubert event_base_init_common_timeout(struct event_base *base,
14882b15cb3dSCy Schubert const struct timeval *duration)
14892b15cb3dSCy Schubert {
14902b15cb3dSCy Schubert int i;
14912b15cb3dSCy Schubert struct timeval tv;
14922b15cb3dSCy Schubert const struct timeval *result=NULL;
14932b15cb3dSCy Schubert struct common_timeout_list *new_ctl;
14942b15cb3dSCy Schubert
14952b15cb3dSCy Schubert EVBASE_ACQUIRE_LOCK(base, th_base_lock);
14962b15cb3dSCy Schubert if (duration->tv_usec > 1000000) {
14972b15cb3dSCy Schubert memcpy(&tv, duration, sizeof(struct timeval));
14982b15cb3dSCy Schubert if (is_common_timeout(duration, base))
14992b15cb3dSCy Schubert tv.tv_usec &= MICROSECONDS_MASK;
15002b15cb3dSCy Schubert tv.tv_sec += tv.tv_usec / 1000000;
15012b15cb3dSCy Schubert tv.tv_usec %= 1000000;
15022b15cb3dSCy Schubert duration = &tv;
15032b15cb3dSCy Schubert }
15042b15cb3dSCy Schubert for (i = 0; i < base->n_common_timeouts; ++i) {
15052b15cb3dSCy Schubert const struct common_timeout_list *ctl =
15062b15cb3dSCy Schubert base->common_timeout_queues[i];
15072b15cb3dSCy Schubert if (duration->tv_sec == ctl->duration.tv_sec &&
15082b15cb3dSCy Schubert duration->tv_usec ==
15092b15cb3dSCy Schubert (ctl->duration.tv_usec & MICROSECONDS_MASK)) {
15102b15cb3dSCy Schubert EVUTIL_ASSERT(is_common_timeout(&ctl->duration, base));
15112b15cb3dSCy Schubert result = &ctl->duration;
15122b15cb3dSCy Schubert goto done;
15132b15cb3dSCy Schubert }
15142b15cb3dSCy Schubert }
15152b15cb3dSCy Schubert if (base->n_common_timeouts == MAX_COMMON_TIMEOUTS) {
15162b15cb3dSCy Schubert event_warnx("%s: Too many common timeouts already in use; "
15172b15cb3dSCy Schubert "we only support %d per event_base", __func__,
15182b15cb3dSCy Schubert MAX_COMMON_TIMEOUTS);
15192b15cb3dSCy Schubert goto done;
15202b15cb3dSCy Schubert }
15212b15cb3dSCy Schubert if (base->n_common_timeouts_allocated == base->n_common_timeouts) {
15222b15cb3dSCy Schubert int n = base->n_common_timeouts < 16 ? 16 :
15232b15cb3dSCy Schubert base->n_common_timeouts*2;
15242b15cb3dSCy Schubert struct common_timeout_list **newqueues =
15252b15cb3dSCy Schubert mm_realloc(base->common_timeout_queues,
15262b15cb3dSCy Schubert n*sizeof(struct common_timeout_queue *));
15272b15cb3dSCy Schubert if (!newqueues) {
15282b15cb3dSCy Schubert event_warn("%s: realloc",__func__);
15292b15cb3dSCy Schubert goto done;
15302b15cb3dSCy Schubert }
15312b15cb3dSCy Schubert base->n_common_timeouts_allocated = n;
15322b15cb3dSCy Schubert base->common_timeout_queues = newqueues;
15332b15cb3dSCy Schubert }
15342b15cb3dSCy Schubert new_ctl = mm_calloc(1, sizeof(struct common_timeout_list));
15352b15cb3dSCy Schubert if (!new_ctl) {
15362b15cb3dSCy Schubert event_warn("%s: calloc",__func__);
15372b15cb3dSCy Schubert goto done;
15382b15cb3dSCy Schubert }
15392b15cb3dSCy Schubert TAILQ_INIT(&new_ctl->events);
15402b15cb3dSCy Schubert new_ctl->duration.tv_sec = duration->tv_sec;
15412b15cb3dSCy Schubert new_ctl->duration.tv_usec =
15422b15cb3dSCy Schubert duration->tv_usec | COMMON_TIMEOUT_MAGIC |
15432b15cb3dSCy Schubert (base->n_common_timeouts << COMMON_TIMEOUT_IDX_SHIFT);
15442b15cb3dSCy Schubert evtimer_assign(&new_ctl->timeout_event, base,
15452b15cb3dSCy Schubert common_timeout_callback, new_ctl);
15462b15cb3dSCy Schubert new_ctl->timeout_event.ev_flags |= EVLIST_INTERNAL;
15472b15cb3dSCy Schubert event_priority_set(&new_ctl->timeout_event, 0);
15482b15cb3dSCy Schubert new_ctl->base = base;
15492b15cb3dSCy Schubert base->common_timeout_queues[base->n_common_timeouts++] = new_ctl;
15502b15cb3dSCy Schubert result = &new_ctl->duration;
15512b15cb3dSCy Schubert
15522b15cb3dSCy Schubert done:
15532b15cb3dSCy Schubert if (result)
15542b15cb3dSCy Schubert EVUTIL_ASSERT(is_common_timeout(result, base));
15552b15cb3dSCy Schubert
15562b15cb3dSCy Schubert EVBASE_RELEASE_LOCK(base, th_base_lock);
15572b15cb3dSCy Schubert return result;
15582b15cb3dSCy Schubert }
15592b15cb3dSCy Schubert
15602b15cb3dSCy Schubert /* Closure function invoked when we're activating a persistent event. */
15612b15cb3dSCy Schubert static inline void
event_persist_closure(struct event_base * base,struct event * ev)15622b15cb3dSCy Schubert event_persist_closure(struct event_base *base, struct event *ev)
15632b15cb3dSCy Schubert {
15642b15cb3dSCy Schubert void (*evcb_callback)(evutil_socket_t, short, void *);
15652b15cb3dSCy Schubert
1566a25439b6SCy Schubert // Other fields of *ev that must be stored before executing
1567a25439b6SCy Schubert evutil_socket_t evcb_fd;
1568a25439b6SCy Schubert short evcb_res;
1569a25439b6SCy Schubert void *evcb_arg;
1570a25439b6SCy Schubert
15712b15cb3dSCy Schubert /* reschedule the persistent event if we have a timeout. */
15722b15cb3dSCy Schubert if (ev->ev_io_timeout.tv_sec || ev->ev_io_timeout.tv_usec) {
15732b15cb3dSCy Schubert /* If there was a timeout, we want it to run at an interval of
15742b15cb3dSCy Schubert * ev_io_timeout after the last time it was _scheduled_ for,
15752b15cb3dSCy Schubert * not ev_io_timeout after _now_. If it fired for another
15762b15cb3dSCy Schubert * reason, though, the timeout ought to start ticking _now_. */
15772b15cb3dSCy Schubert struct timeval run_at, relative_to, delay, now;
15782b15cb3dSCy Schubert ev_uint32_t usec_mask = 0;
15792b15cb3dSCy Schubert EVUTIL_ASSERT(is_same_common_timeout(&ev->ev_timeout,
15802b15cb3dSCy Schubert &ev->ev_io_timeout));
15812b15cb3dSCy Schubert gettime(base, &now);
15822b15cb3dSCy Schubert if (is_common_timeout(&ev->ev_timeout, base)) {
15832b15cb3dSCy Schubert delay = ev->ev_io_timeout;
15842b15cb3dSCy Schubert usec_mask = delay.tv_usec & ~MICROSECONDS_MASK;
15852b15cb3dSCy Schubert delay.tv_usec &= MICROSECONDS_MASK;
15862b15cb3dSCy Schubert if (ev->ev_res & EV_TIMEOUT) {
15872b15cb3dSCy Schubert relative_to = ev->ev_timeout;
15882b15cb3dSCy Schubert relative_to.tv_usec &= MICROSECONDS_MASK;
15892b15cb3dSCy Schubert } else {
15902b15cb3dSCy Schubert relative_to = now;
15912b15cb3dSCy Schubert }
15922b15cb3dSCy Schubert } else {
15932b15cb3dSCy Schubert delay = ev->ev_io_timeout;
15942b15cb3dSCy Schubert if (ev->ev_res & EV_TIMEOUT) {
15952b15cb3dSCy Schubert relative_to = ev->ev_timeout;
15962b15cb3dSCy Schubert } else {
15972b15cb3dSCy Schubert relative_to = now;
15982b15cb3dSCy Schubert }
15992b15cb3dSCy Schubert }
16002b15cb3dSCy Schubert evutil_timeradd(&relative_to, &delay, &run_at);
16012b15cb3dSCy Schubert if (evutil_timercmp(&run_at, &now, <)) {
16022b15cb3dSCy Schubert /* Looks like we missed at least one invocation due to
16032b15cb3dSCy Schubert * a clock jump, not running the event loop for a
16042b15cb3dSCy Schubert * while, really slow callbacks, or
16052b15cb3dSCy Schubert * something. Reschedule relative to now.
16062b15cb3dSCy Schubert */
16072b15cb3dSCy Schubert evutil_timeradd(&now, &delay, &run_at);
16082b15cb3dSCy Schubert }
16092b15cb3dSCy Schubert run_at.tv_usec |= usec_mask;
16102b15cb3dSCy Schubert event_add_nolock_(ev, &run_at, 1);
16112b15cb3dSCy Schubert }
16122b15cb3dSCy Schubert
16132b15cb3dSCy Schubert // Save our callback before we release the lock
1614a25439b6SCy Schubert evcb_callback = ev->ev_callback;
1615a25439b6SCy Schubert evcb_fd = ev->ev_fd;
1616a25439b6SCy Schubert evcb_res = ev->ev_res;
1617a25439b6SCy Schubert evcb_arg = ev->ev_arg;
16182b15cb3dSCy Schubert
16192b15cb3dSCy Schubert // Release the lock
16202b15cb3dSCy Schubert EVBASE_RELEASE_LOCK(base, th_base_lock);
16212b15cb3dSCy Schubert
16222b15cb3dSCy Schubert // Execute the callback
1623a25439b6SCy Schubert (evcb_callback)(evcb_fd, evcb_res, evcb_arg);
16242b15cb3dSCy Schubert }
16252b15cb3dSCy Schubert
16262b15cb3dSCy Schubert /*
16272b15cb3dSCy Schubert Helper for event_process_active to process all the events in a single queue,
16282b15cb3dSCy Schubert releasing the lock as we go. This function requires that the lock be held
16292b15cb3dSCy Schubert when it's invoked. Returns -1 if we get a signal or an event_break that
16302b15cb3dSCy Schubert means we should stop processing any active events now. Otherwise returns
16312b15cb3dSCy Schubert the number of non-internal event_callbacks that we processed.
16322b15cb3dSCy Schubert */
16332b15cb3dSCy Schubert static int
event_process_active_single_queue(struct event_base * base,struct evcallback_list * activeq,int max_to_process,const struct timeval * endtime)16342b15cb3dSCy Schubert event_process_active_single_queue(struct event_base *base,
16352b15cb3dSCy Schubert struct evcallback_list *activeq,
16362b15cb3dSCy Schubert int max_to_process, const struct timeval *endtime)
16372b15cb3dSCy Schubert {
16382b15cb3dSCy Schubert struct event_callback *evcb;
16392b15cb3dSCy Schubert int count = 0;
16402b15cb3dSCy Schubert
16412b15cb3dSCy Schubert EVUTIL_ASSERT(activeq != NULL);
16422b15cb3dSCy Schubert
16432b15cb3dSCy Schubert for (evcb = TAILQ_FIRST(activeq); evcb; evcb = TAILQ_FIRST(activeq)) {
16442b15cb3dSCy Schubert struct event *ev=NULL;
16452b15cb3dSCy Schubert if (evcb->evcb_flags & EVLIST_INIT) {
16462b15cb3dSCy Schubert ev = event_callback_to_event(evcb);
16472b15cb3dSCy Schubert
16482b15cb3dSCy Schubert if (ev->ev_events & EV_PERSIST || ev->ev_flags & EVLIST_FINALIZING)
16492b15cb3dSCy Schubert event_queue_remove_active(base, evcb);
16502b15cb3dSCy Schubert else
16512b15cb3dSCy Schubert event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
16522b15cb3dSCy Schubert event_debug((
16532b15cb3dSCy Schubert "event_process_active: event: %p, %s%s%scall %p",
16542b15cb3dSCy Schubert ev,
16552b15cb3dSCy Schubert ev->ev_res & EV_READ ? "EV_READ " : " ",
16562b15cb3dSCy Schubert ev->ev_res & EV_WRITE ? "EV_WRITE " : " ",
16572b15cb3dSCy Schubert ev->ev_res & EV_CLOSED ? "EV_CLOSED " : " ",
16582b15cb3dSCy Schubert ev->ev_callback));
16592b15cb3dSCy Schubert } else {
16602b15cb3dSCy Schubert event_queue_remove_active(base, evcb);
16612b15cb3dSCy Schubert event_debug(("event_process_active: event_callback %p, "
16622b15cb3dSCy Schubert "closure %d, call %p",
16632b15cb3dSCy Schubert evcb, evcb->evcb_closure, evcb->evcb_cb_union.evcb_callback));
16642b15cb3dSCy Schubert }
16652b15cb3dSCy Schubert
16662b15cb3dSCy Schubert if (!(evcb->evcb_flags & EVLIST_INTERNAL))
16672b15cb3dSCy Schubert ++count;
16682b15cb3dSCy Schubert
16692b15cb3dSCy Schubert
16702b15cb3dSCy Schubert base->current_event = evcb;
16712b15cb3dSCy Schubert #ifndef EVENT__DISABLE_THREAD_SUPPORT
16722b15cb3dSCy Schubert base->current_event_waiters = 0;
16732b15cb3dSCy Schubert #endif
16742b15cb3dSCy Schubert
16752b15cb3dSCy Schubert switch (evcb->evcb_closure) {
16762b15cb3dSCy Schubert case EV_CLOSURE_EVENT_SIGNAL:
16772b15cb3dSCy Schubert EVUTIL_ASSERT(ev != NULL);
16782b15cb3dSCy Schubert event_signal_closure(base, ev);
16792b15cb3dSCy Schubert break;
16802b15cb3dSCy Schubert case EV_CLOSURE_EVENT_PERSIST:
16812b15cb3dSCy Schubert EVUTIL_ASSERT(ev != NULL);
16822b15cb3dSCy Schubert event_persist_closure(base, ev);
16832b15cb3dSCy Schubert break;
16842b15cb3dSCy Schubert case EV_CLOSURE_EVENT: {
1685a25439b6SCy Schubert void (*evcb_callback)(evutil_socket_t, short, void *);
1686*a466cc55SCy Schubert short res;
16872b15cb3dSCy Schubert EVUTIL_ASSERT(ev != NULL);
1688a25439b6SCy Schubert evcb_callback = *ev->ev_callback;
1689*a466cc55SCy Schubert res = ev->ev_res;
16902b15cb3dSCy Schubert EVBASE_RELEASE_LOCK(base, th_base_lock);
1691*a466cc55SCy Schubert evcb_callback(ev->ev_fd, res, ev->ev_arg);
16922b15cb3dSCy Schubert }
16932b15cb3dSCy Schubert break;
16942b15cb3dSCy Schubert case EV_CLOSURE_CB_SELF: {
16952b15cb3dSCy Schubert void (*evcb_selfcb)(struct event_callback *, void *) = evcb->evcb_cb_union.evcb_selfcb;
16962b15cb3dSCy Schubert EVBASE_RELEASE_LOCK(base, th_base_lock);
16972b15cb3dSCy Schubert evcb_selfcb(evcb, evcb->evcb_arg);
16982b15cb3dSCy Schubert }
16992b15cb3dSCy Schubert break;
17002b15cb3dSCy Schubert case EV_CLOSURE_EVENT_FINALIZE:
17012b15cb3dSCy Schubert case EV_CLOSURE_EVENT_FINALIZE_FREE: {
1702a25439b6SCy Schubert void (*evcb_evfinalize)(struct event *, void *);
1703a25439b6SCy Schubert int evcb_closure = evcb->evcb_closure;
17042b15cb3dSCy Schubert EVUTIL_ASSERT(ev != NULL);
17052b15cb3dSCy Schubert base->current_event = NULL;
1706a25439b6SCy Schubert evcb_evfinalize = ev->ev_evcallback.evcb_cb_union.evcb_evfinalize;
17072b15cb3dSCy Schubert EVUTIL_ASSERT((evcb->evcb_flags & EVLIST_FINALIZING));
17082b15cb3dSCy Schubert EVBASE_RELEASE_LOCK(base, th_base_lock);
17092b15cb3dSCy Schubert event_debug_note_teardown_(ev);
1710*a466cc55SCy Schubert evcb_evfinalize(ev, ev->ev_arg);
1711a25439b6SCy Schubert if (evcb_closure == EV_CLOSURE_EVENT_FINALIZE_FREE)
17122b15cb3dSCy Schubert mm_free(ev);
17132b15cb3dSCy Schubert }
17142b15cb3dSCy Schubert break;
17152b15cb3dSCy Schubert case EV_CLOSURE_CB_FINALIZE: {
17162b15cb3dSCy Schubert void (*evcb_cbfinalize)(struct event_callback *, void *) = evcb->evcb_cb_union.evcb_cbfinalize;
17172b15cb3dSCy Schubert base->current_event = NULL;
17182b15cb3dSCy Schubert EVUTIL_ASSERT((evcb->evcb_flags & EVLIST_FINALIZING));
17192b15cb3dSCy Schubert EVBASE_RELEASE_LOCK(base, th_base_lock);
17202b15cb3dSCy Schubert evcb_cbfinalize(evcb, evcb->evcb_arg);
17212b15cb3dSCy Schubert }
17222b15cb3dSCy Schubert break;
17232b15cb3dSCy Schubert default:
17242b15cb3dSCy Schubert EVUTIL_ASSERT(0);
17252b15cb3dSCy Schubert }
17262b15cb3dSCy Schubert
17272b15cb3dSCy Schubert EVBASE_ACQUIRE_LOCK(base, th_base_lock);
17282b15cb3dSCy Schubert base->current_event = NULL;
17292b15cb3dSCy Schubert #ifndef EVENT__DISABLE_THREAD_SUPPORT
17302b15cb3dSCy Schubert if (base->current_event_waiters) {
17312b15cb3dSCy Schubert base->current_event_waiters = 0;
17322b15cb3dSCy Schubert EVTHREAD_COND_BROADCAST(base->current_event_cond);
17332b15cb3dSCy Schubert }
17342b15cb3dSCy Schubert #endif
17352b15cb3dSCy Schubert
17362b15cb3dSCy Schubert if (base->event_break)
17372b15cb3dSCy Schubert return -1;
17382b15cb3dSCy Schubert if (count >= max_to_process)
17392b15cb3dSCy Schubert return count;
17402b15cb3dSCy Schubert if (count && endtime) {
17412b15cb3dSCy Schubert struct timeval now;
17422b15cb3dSCy Schubert update_time_cache(base);
17432b15cb3dSCy Schubert gettime(base, &now);
17442b15cb3dSCy Schubert if (evutil_timercmp(&now, endtime, >=))
17452b15cb3dSCy Schubert return count;
17462b15cb3dSCy Schubert }
17472b15cb3dSCy Schubert if (base->event_continue)
17482b15cb3dSCy Schubert break;
17492b15cb3dSCy Schubert }
17502b15cb3dSCy Schubert return count;
17512b15cb3dSCy Schubert }
17522b15cb3dSCy Schubert
17532b15cb3dSCy Schubert /*
17542b15cb3dSCy Schubert * Active events are stored in priority queues. Lower priorities are always
17552b15cb3dSCy Schubert * process before higher priorities. Low priority events can starve high
17562b15cb3dSCy Schubert * priority ones.
17572b15cb3dSCy Schubert */
17582b15cb3dSCy Schubert
17592b15cb3dSCy Schubert static int
event_process_active(struct event_base * base)17602b15cb3dSCy Schubert event_process_active(struct event_base *base)
17612b15cb3dSCy Schubert {
17622b15cb3dSCy Schubert /* Caller must hold th_base_lock */
17632b15cb3dSCy Schubert struct evcallback_list *activeq = NULL;
17642b15cb3dSCy Schubert int i, c = 0;
17652b15cb3dSCy Schubert const struct timeval *endtime;
17662b15cb3dSCy Schubert struct timeval tv;
17672b15cb3dSCy Schubert const int maxcb = base->max_dispatch_callbacks;
17682b15cb3dSCy Schubert const int limit_after_prio = base->limit_callbacks_after_prio;
17692b15cb3dSCy Schubert if (base->max_dispatch_time.tv_sec >= 0) {
17702b15cb3dSCy Schubert update_time_cache(base);
17712b15cb3dSCy Schubert gettime(base, &tv);
17722b15cb3dSCy Schubert evutil_timeradd(&base->max_dispatch_time, &tv, &tv);
17732b15cb3dSCy Schubert endtime = &tv;
17742b15cb3dSCy Schubert } else {
17752b15cb3dSCy Schubert endtime = NULL;
17762b15cb3dSCy Schubert }
17772b15cb3dSCy Schubert
17782b15cb3dSCy Schubert for (i = 0; i < base->nactivequeues; ++i) {
17792b15cb3dSCy Schubert if (TAILQ_FIRST(&base->activequeues[i]) != NULL) {
17802b15cb3dSCy Schubert base->event_running_priority = i;
17812b15cb3dSCy Schubert activeq = &base->activequeues[i];
17822b15cb3dSCy Schubert if (i < limit_after_prio)
17832b15cb3dSCy Schubert c = event_process_active_single_queue(base, activeq,
17842b15cb3dSCy Schubert INT_MAX, NULL);
17852b15cb3dSCy Schubert else
17862b15cb3dSCy Schubert c = event_process_active_single_queue(base, activeq,
17872b15cb3dSCy Schubert maxcb, endtime);
17882b15cb3dSCy Schubert if (c < 0) {
17892b15cb3dSCy Schubert goto done;
17902b15cb3dSCy Schubert } else if (c > 0)
17912b15cb3dSCy Schubert break; /* Processed a real event; do not
17922b15cb3dSCy Schubert * consider lower-priority events */
17932b15cb3dSCy Schubert /* If we get here, all of the events we processed
17942b15cb3dSCy Schubert * were internal. Continue. */
17952b15cb3dSCy Schubert }
17962b15cb3dSCy Schubert }
17972b15cb3dSCy Schubert
17982b15cb3dSCy Schubert done:
17992b15cb3dSCy Schubert base->event_running_priority = -1;
18002b15cb3dSCy Schubert
18012b15cb3dSCy Schubert return c;
18022b15cb3dSCy Schubert }
18032b15cb3dSCy Schubert
18042b15cb3dSCy Schubert /*
18052b15cb3dSCy Schubert * Wait continuously for events. We exit only if no events are left.
18062b15cb3dSCy Schubert */
18072b15cb3dSCy Schubert
18082b15cb3dSCy Schubert int
event_dispatch(void)18092b15cb3dSCy Schubert event_dispatch(void)
18102b15cb3dSCy Schubert {
18112b15cb3dSCy Schubert return (event_loop(0));
18122b15cb3dSCy Schubert }
18132b15cb3dSCy Schubert
18142b15cb3dSCy Schubert int
event_base_dispatch(struct event_base * event_base)18152b15cb3dSCy Schubert event_base_dispatch(struct event_base *event_base)
18162b15cb3dSCy Schubert {
18172b15cb3dSCy Schubert return (event_base_loop(event_base, 0));
18182b15cb3dSCy Schubert }
18192b15cb3dSCy Schubert
18202b15cb3dSCy Schubert const char *
event_base_get_method(const struct event_base * base)18212b15cb3dSCy Schubert event_base_get_method(const struct event_base *base)
18222b15cb3dSCy Schubert {
18232b15cb3dSCy Schubert EVUTIL_ASSERT(base);
18242b15cb3dSCy Schubert return (base->evsel->name);
18252b15cb3dSCy Schubert }
18262b15cb3dSCy Schubert
18272b15cb3dSCy Schubert /** Callback: used to implement event_base_loopexit by telling the event_base
18282b15cb3dSCy Schubert * that it's time to exit its loop. */
18292b15cb3dSCy Schubert static void
event_loopexit_cb(evutil_socket_t fd,short what,void * arg)18302b15cb3dSCy Schubert event_loopexit_cb(evutil_socket_t fd, short what, void *arg)
18312b15cb3dSCy Schubert {
18322b15cb3dSCy Schubert struct event_base *base = arg;
18332b15cb3dSCy Schubert base->event_gotterm = 1;
18342b15cb3dSCy Schubert }
18352b15cb3dSCy Schubert
18362b15cb3dSCy Schubert int
event_loopexit(const struct timeval * tv)18372b15cb3dSCy Schubert event_loopexit(const struct timeval *tv)
18382b15cb3dSCy Schubert {
18392b15cb3dSCy Schubert return (event_once(-1, EV_TIMEOUT, event_loopexit_cb,
18402b15cb3dSCy Schubert current_base, tv));
18412b15cb3dSCy Schubert }
18422b15cb3dSCy Schubert
18432b15cb3dSCy Schubert int
event_base_loopexit(struct event_base * event_base,const struct timeval * tv)18442b15cb3dSCy Schubert event_base_loopexit(struct event_base *event_base, const struct timeval *tv)
18452b15cb3dSCy Schubert {
18462b15cb3dSCy Schubert return (event_base_once(event_base, -1, EV_TIMEOUT, event_loopexit_cb,
18472b15cb3dSCy Schubert event_base, tv));
18482b15cb3dSCy Schubert }
18492b15cb3dSCy Schubert
18502b15cb3dSCy Schubert int
event_loopbreak(void)18512b15cb3dSCy Schubert event_loopbreak(void)
18522b15cb3dSCy Schubert {
18532b15cb3dSCy Schubert return (event_base_loopbreak(current_base));
18542b15cb3dSCy Schubert }
18552b15cb3dSCy Schubert
18562b15cb3dSCy Schubert int
event_base_loopbreak(struct event_base * event_base)18572b15cb3dSCy Schubert event_base_loopbreak(struct event_base *event_base)
18582b15cb3dSCy Schubert {
18592b15cb3dSCy Schubert int r = 0;
18602b15cb3dSCy Schubert if (event_base == NULL)
18612b15cb3dSCy Schubert return (-1);
18622b15cb3dSCy Schubert
18632b15cb3dSCy Schubert EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
18642b15cb3dSCy Schubert event_base->event_break = 1;
18652b15cb3dSCy Schubert
18662b15cb3dSCy Schubert if (EVBASE_NEED_NOTIFY(event_base)) {
18672b15cb3dSCy Schubert r = evthread_notify_base(event_base);
18682b15cb3dSCy Schubert } else {
18692b15cb3dSCy Schubert r = (0);
18702b15cb3dSCy Schubert }
18712b15cb3dSCy Schubert EVBASE_RELEASE_LOCK(event_base, th_base_lock);
18722b15cb3dSCy Schubert return r;
18732b15cb3dSCy Schubert }
18742b15cb3dSCy Schubert
18752b15cb3dSCy Schubert int
event_base_loopcontinue(struct event_base * event_base)18762b15cb3dSCy Schubert event_base_loopcontinue(struct event_base *event_base)
18772b15cb3dSCy Schubert {
18782b15cb3dSCy Schubert int r = 0;
18792b15cb3dSCy Schubert if (event_base == NULL)
18802b15cb3dSCy Schubert return (-1);
18812b15cb3dSCy Schubert
18822b15cb3dSCy Schubert EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
18832b15cb3dSCy Schubert event_base->event_continue = 1;
18842b15cb3dSCy Schubert
18852b15cb3dSCy Schubert if (EVBASE_NEED_NOTIFY(event_base)) {
18862b15cb3dSCy Schubert r = evthread_notify_base(event_base);
18872b15cb3dSCy Schubert } else {
18882b15cb3dSCy Schubert r = (0);
18892b15cb3dSCy Schubert }
18902b15cb3dSCy Schubert EVBASE_RELEASE_LOCK(event_base, th_base_lock);
18912b15cb3dSCy Schubert return r;
18922b15cb3dSCy Schubert }
18932b15cb3dSCy Schubert
18942b15cb3dSCy Schubert int
event_base_got_break(struct event_base * event_base)18952b15cb3dSCy Schubert event_base_got_break(struct event_base *event_base)
18962b15cb3dSCy Schubert {
18972b15cb3dSCy Schubert int res;
18982b15cb3dSCy Schubert EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
18992b15cb3dSCy Schubert res = event_base->event_break;
19002b15cb3dSCy Schubert EVBASE_RELEASE_LOCK(event_base, th_base_lock);
19012b15cb3dSCy Schubert return res;
19022b15cb3dSCy Schubert }
19032b15cb3dSCy Schubert
19042b15cb3dSCy Schubert int
event_base_got_exit(struct event_base * event_base)19052b15cb3dSCy Schubert event_base_got_exit(struct event_base *event_base)
19062b15cb3dSCy Schubert {
19072b15cb3dSCy Schubert int res;
19082b15cb3dSCy Schubert EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
19092b15cb3dSCy Schubert res = event_base->event_gotterm;
19102b15cb3dSCy Schubert EVBASE_RELEASE_LOCK(event_base, th_base_lock);
19112b15cb3dSCy Schubert return res;
19122b15cb3dSCy Schubert }
19132b15cb3dSCy Schubert
19142b15cb3dSCy Schubert /* not thread safe */
19152b15cb3dSCy Schubert
19162b15cb3dSCy Schubert int
event_loop(int flags)19172b15cb3dSCy Schubert event_loop(int flags)
19182b15cb3dSCy Schubert {
19192b15cb3dSCy Schubert return event_base_loop(current_base, flags);
19202b15cb3dSCy Schubert }
19212b15cb3dSCy Schubert
19222b15cb3dSCy Schubert int
event_base_loop(struct event_base * base,int flags)19232b15cb3dSCy Schubert event_base_loop(struct event_base *base, int flags)
19242b15cb3dSCy Schubert {
19252b15cb3dSCy Schubert const struct eventop *evsel = base->evsel;
19262b15cb3dSCy Schubert struct timeval tv;
19272b15cb3dSCy Schubert struct timeval *tv_p;
19282b15cb3dSCy Schubert int res, done, retval = 0;
19292b15cb3dSCy Schubert
19302b15cb3dSCy Schubert /* Grab the lock. We will release it inside evsel.dispatch, and again
19312b15cb3dSCy Schubert * as we invoke user callbacks. */
19322b15cb3dSCy Schubert EVBASE_ACQUIRE_LOCK(base, th_base_lock);
19332b15cb3dSCy Schubert
19342b15cb3dSCy Schubert if (base->running_loop) {
19352b15cb3dSCy Schubert event_warnx("%s: reentrant invocation. Only one event_base_loop"
19362b15cb3dSCy Schubert " can run on each event_base at once.", __func__);
19372b15cb3dSCy Schubert EVBASE_RELEASE_LOCK(base, th_base_lock);
19382b15cb3dSCy Schubert return -1;
19392b15cb3dSCy Schubert }
19402b15cb3dSCy Schubert
19412b15cb3dSCy Schubert base->running_loop = 1;
19422b15cb3dSCy Schubert
19432b15cb3dSCy Schubert clear_time_cache(base);
19442b15cb3dSCy Schubert
19452b15cb3dSCy Schubert if (base->sig.ev_signal_added && base->sig.ev_n_signals_added)
19462b15cb3dSCy Schubert evsig_set_base_(base);
19472b15cb3dSCy Schubert
19482b15cb3dSCy Schubert done = 0;
19492b15cb3dSCy Schubert
19502b15cb3dSCy Schubert #ifndef EVENT__DISABLE_THREAD_SUPPORT
19512b15cb3dSCy Schubert base->th_owner_id = EVTHREAD_GET_ID();
19522b15cb3dSCy Schubert #endif
19532b15cb3dSCy Schubert
19542b15cb3dSCy Schubert base->event_gotterm = base->event_break = 0;
19552b15cb3dSCy Schubert
19562b15cb3dSCy Schubert while (!done) {
19572b15cb3dSCy Schubert base->event_continue = 0;
19582b15cb3dSCy Schubert base->n_deferreds_queued = 0;
19592b15cb3dSCy Schubert
19602b15cb3dSCy Schubert /* Terminate the loop if we have been asked to */
19612b15cb3dSCy Schubert if (base->event_gotterm) {
19622b15cb3dSCy Schubert break;
19632b15cb3dSCy Schubert }
19642b15cb3dSCy Schubert
19652b15cb3dSCy Schubert if (base->event_break) {
19662b15cb3dSCy Schubert break;
19672b15cb3dSCy Schubert }
19682b15cb3dSCy Schubert
19692b15cb3dSCy Schubert tv_p = &tv;
19702b15cb3dSCy Schubert if (!N_ACTIVE_CALLBACKS(base) && !(flags & EVLOOP_NONBLOCK)) {
19712b15cb3dSCy Schubert timeout_next(base, &tv_p);
19722b15cb3dSCy Schubert } else {
19732b15cb3dSCy Schubert /*
19742b15cb3dSCy Schubert * if we have active events, we just poll new events
19752b15cb3dSCy Schubert * without waiting.
19762b15cb3dSCy Schubert */
19772b15cb3dSCy Schubert evutil_timerclear(&tv);
19782b15cb3dSCy Schubert }
19792b15cb3dSCy Schubert
19802b15cb3dSCy Schubert /* If we have no events, we just exit */
19812b15cb3dSCy Schubert if (0==(flags&EVLOOP_NO_EXIT_ON_EMPTY) &&
19822b15cb3dSCy Schubert !event_haveevents(base) && !N_ACTIVE_CALLBACKS(base)) {
19832b15cb3dSCy Schubert event_debug(("%s: no events registered.", __func__));
19842b15cb3dSCy Schubert retval = 1;
19852b15cb3dSCy Schubert goto done;
19862b15cb3dSCy Schubert }
19872b15cb3dSCy Schubert
19882b15cb3dSCy Schubert event_queue_make_later_events_active(base);
19892b15cb3dSCy Schubert
19902b15cb3dSCy Schubert clear_time_cache(base);
19912b15cb3dSCy Schubert
19922b15cb3dSCy Schubert res = evsel->dispatch(base, tv_p);
19932b15cb3dSCy Schubert
19942b15cb3dSCy Schubert if (res == -1) {
19952b15cb3dSCy Schubert event_debug(("%s: dispatch returned unsuccessfully.",
19962b15cb3dSCy Schubert __func__));
19972b15cb3dSCy Schubert retval = -1;
19982b15cb3dSCy Schubert goto done;
19992b15cb3dSCy Schubert }
20002b15cb3dSCy Schubert
20012b15cb3dSCy Schubert update_time_cache(base);
20022b15cb3dSCy Schubert
20032b15cb3dSCy Schubert timeout_process(base);
20042b15cb3dSCy Schubert
20052b15cb3dSCy Schubert if (N_ACTIVE_CALLBACKS(base)) {
20062b15cb3dSCy Schubert int n = event_process_active(base);
20072b15cb3dSCy Schubert if ((flags & EVLOOP_ONCE)
20082b15cb3dSCy Schubert && N_ACTIVE_CALLBACKS(base) == 0
20092b15cb3dSCy Schubert && n != 0)
20102b15cb3dSCy Schubert done = 1;
20112b15cb3dSCy Schubert } else if (flags & EVLOOP_NONBLOCK)
20122b15cb3dSCy Schubert done = 1;
20132b15cb3dSCy Schubert }
20142b15cb3dSCy Schubert event_debug(("%s: asked to terminate loop.", __func__));
20152b15cb3dSCy Schubert
20162b15cb3dSCy Schubert done:
20172b15cb3dSCy Schubert clear_time_cache(base);
20182b15cb3dSCy Schubert base->running_loop = 0;
20192b15cb3dSCy Schubert
20202b15cb3dSCy Schubert EVBASE_RELEASE_LOCK(base, th_base_lock);
20212b15cb3dSCy Schubert
20222b15cb3dSCy Schubert return (retval);
20232b15cb3dSCy Schubert }
20242b15cb3dSCy Schubert
20252b15cb3dSCy Schubert /* One-time callback to implement event_base_once: invokes the user callback,
20262b15cb3dSCy Schubert * then deletes the allocated storage */
20272b15cb3dSCy Schubert static void
event_once_cb(evutil_socket_t fd,short events,void * arg)20282b15cb3dSCy Schubert event_once_cb(evutil_socket_t fd, short events, void *arg)
20292b15cb3dSCy Schubert {
20302b15cb3dSCy Schubert struct event_once *eonce = arg;
20312b15cb3dSCy Schubert
20322b15cb3dSCy Schubert (*eonce->cb)(fd, events, eonce->arg);
20332b15cb3dSCy Schubert EVBASE_ACQUIRE_LOCK(eonce->ev.ev_base, th_base_lock);
20342b15cb3dSCy Schubert LIST_REMOVE(eonce, next_once);
20352b15cb3dSCy Schubert EVBASE_RELEASE_LOCK(eonce->ev.ev_base, th_base_lock);
20362b15cb3dSCy Schubert event_debug_unassign(&eonce->ev);
20372b15cb3dSCy Schubert mm_free(eonce);
20382b15cb3dSCy Schubert }
20392b15cb3dSCy Schubert
20402b15cb3dSCy Schubert /* not threadsafe, event scheduled once. */
20412b15cb3dSCy Schubert int
event_once(evutil_socket_t fd,short events,void (* callback)(evutil_socket_t,short,void *),void * arg,const struct timeval * tv)20422b15cb3dSCy Schubert event_once(evutil_socket_t fd, short events,
20432b15cb3dSCy Schubert void (*callback)(evutil_socket_t, short, void *),
20442b15cb3dSCy Schubert void *arg, const struct timeval *tv)
20452b15cb3dSCy Schubert {
20462b15cb3dSCy Schubert return event_base_once(current_base, fd, events, callback, arg, tv);
20472b15cb3dSCy Schubert }
20482b15cb3dSCy Schubert
20492b15cb3dSCy Schubert /* Schedules an event once */
20502b15cb3dSCy Schubert int
event_base_once(struct event_base * base,evutil_socket_t fd,short events,void (* callback)(evutil_socket_t,short,void *),void * arg,const struct timeval * tv)20512b15cb3dSCy Schubert event_base_once(struct event_base *base, evutil_socket_t fd, short events,
20522b15cb3dSCy Schubert void (*callback)(evutil_socket_t, short, void *),
20532b15cb3dSCy Schubert void *arg, const struct timeval *tv)
20542b15cb3dSCy Schubert {
20552b15cb3dSCy Schubert struct event_once *eonce;
20562b15cb3dSCy Schubert int res = 0;
20572b15cb3dSCy Schubert int activate = 0;
20582b15cb3dSCy Schubert
2059*a466cc55SCy Schubert if (!base)
2060*a466cc55SCy Schubert return (-1);
2061*a466cc55SCy Schubert
20622b15cb3dSCy Schubert /* We cannot support signals that just fire once, or persistent
20632b15cb3dSCy Schubert * events. */
20642b15cb3dSCy Schubert if (events & (EV_SIGNAL|EV_PERSIST))
20652b15cb3dSCy Schubert return (-1);
20662b15cb3dSCy Schubert
20672b15cb3dSCy Schubert if ((eonce = mm_calloc(1, sizeof(struct event_once))) == NULL)
20682b15cb3dSCy Schubert return (-1);
20692b15cb3dSCy Schubert
20702b15cb3dSCy Schubert eonce->cb = callback;
20712b15cb3dSCy Schubert eonce->arg = arg;
20722b15cb3dSCy Schubert
20732b15cb3dSCy Schubert if ((events & (EV_TIMEOUT|EV_SIGNAL|EV_READ|EV_WRITE|EV_CLOSED)) == EV_TIMEOUT) {
20742b15cb3dSCy Schubert evtimer_assign(&eonce->ev, base, event_once_cb, eonce);
20752b15cb3dSCy Schubert
20762b15cb3dSCy Schubert if (tv == NULL || ! evutil_timerisset(tv)) {
20772b15cb3dSCy Schubert /* If the event is going to become active immediately,
20782b15cb3dSCy Schubert * don't put it on the timeout queue. This is one
20792b15cb3dSCy Schubert * idiom for scheduling a callback, so let's make
20802b15cb3dSCy Schubert * it fast (and order-preserving). */
20812b15cb3dSCy Schubert activate = 1;
20822b15cb3dSCy Schubert }
20832b15cb3dSCy Schubert } else if (events & (EV_READ|EV_WRITE|EV_CLOSED)) {
20842b15cb3dSCy Schubert events &= EV_READ|EV_WRITE|EV_CLOSED;
20852b15cb3dSCy Schubert
20862b15cb3dSCy Schubert event_assign(&eonce->ev, base, fd, events, event_once_cb, eonce);
20872b15cb3dSCy Schubert } else {
20882b15cb3dSCy Schubert /* Bad event combination */
20892b15cb3dSCy Schubert mm_free(eonce);
20902b15cb3dSCy Schubert return (-1);
20912b15cb3dSCy Schubert }
20922b15cb3dSCy Schubert
20932b15cb3dSCy Schubert if (res == 0) {
20942b15cb3dSCy Schubert EVBASE_ACQUIRE_LOCK(base, th_base_lock);
20952b15cb3dSCy Schubert if (activate)
20962b15cb3dSCy Schubert event_active_nolock_(&eonce->ev, EV_TIMEOUT, 1);
20972b15cb3dSCy Schubert else
20982b15cb3dSCy Schubert res = event_add_nolock_(&eonce->ev, tv, 0);
20992b15cb3dSCy Schubert
21002b15cb3dSCy Schubert if (res != 0) {
21012b15cb3dSCy Schubert mm_free(eonce);
21022b15cb3dSCy Schubert return (res);
21032b15cb3dSCy Schubert } else {
21042b15cb3dSCy Schubert LIST_INSERT_HEAD(&base->once_events, eonce, next_once);
21052b15cb3dSCy Schubert }
21062b15cb3dSCy Schubert EVBASE_RELEASE_LOCK(base, th_base_lock);
21072b15cb3dSCy Schubert }
21082b15cb3dSCy Schubert
21092b15cb3dSCy Schubert return (0);
21102b15cb3dSCy Schubert }
21112b15cb3dSCy Schubert
21122b15cb3dSCy Schubert int
event_assign(struct event * ev,struct event_base * base,evutil_socket_t fd,short events,void (* callback)(evutil_socket_t,short,void *),void * arg)21132b15cb3dSCy Schubert event_assign(struct event *ev, struct event_base *base, evutil_socket_t fd, short events, void (*callback)(evutil_socket_t, short, void *), void *arg)
21142b15cb3dSCy Schubert {
21152b15cb3dSCy Schubert if (!base)
21162b15cb3dSCy Schubert base = current_base;
21172b15cb3dSCy Schubert if (arg == &event_self_cbarg_ptr_)
21182b15cb3dSCy Schubert arg = ev;
21192b15cb3dSCy Schubert
2120*a466cc55SCy Schubert if (!(events & EV_SIGNAL))
2121*a466cc55SCy Schubert event_debug_assert_socket_nonblocking_(fd);
21222b15cb3dSCy Schubert event_debug_assert_not_added_(ev);
21232b15cb3dSCy Schubert
21242b15cb3dSCy Schubert ev->ev_base = base;
21252b15cb3dSCy Schubert
21262b15cb3dSCy Schubert ev->ev_callback = callback;
21272b15cb3dSCy Schubert ev->ev_arg = arg;
21282b15cb3dSCy Schubert ev->ev_fd = fd;
21292b15cb3dSCy Schubert ev->ev_events = events;
21302b15cb3dSCy Schubert ev->ev_res = 0;
21312b15cb3dSCy Schubert ev->ev_flags = EVLIST_INIT;
21322b15cb3dSCy Schubert ev->ev_ncalls = 0;
21332b15cb3dSCy Schubert ev->ev_pncalls = NULL;
21342b15cb3dSCy Schubert
21352b15cb3dSCy Schubert if (events & EV_SIGNAL) {
21362b15cb3dSCy Schubert if ((events & (EV_READ|EV_WRITE|EV_CLOSED)) != 0) {
21372b15cb3dSCy Schubert event_warnx("%s: EV_SIGNAL is not compatible with "
21382b15cb3dSCy Schubert "EV_READ, EV_WRITE or EV_CLOSED", __func__);
21392b15cb3dSCy Schubert return -1;
21402b15cb3dSCy Schubert }
21412b15cb3dSCy Schubert ev->ev_closure = EV_CLOSURE_EVENT_SIGNAL;
21422b15cb3dSCy Schubert } else {
21432b15cb3dSCy Schubert if (events & EV_PERSIST) {
21442b15cb3dSCy Schubert evutil_timerclear(&ev->ev_io_timeout);
21452b15cb3dSCy Schubert ev->ev_closure = EV_CLOSURE_EVENT_PERSIST;
21462b15cb3dSCy Schubert } else {
21472b15cb3dSCy Schubert ev->ev_closure = EV_CLOSURE_EVENT;
21482b15cb3dSCy Schubert }
21492b15cb3dSCy Schubert }
21502b15cb3dSCy Schubert
21512b15cb3dSCy Schubert min_heap_elem_init_(ev);
21522b15cb3dSCy Schubert
21532b15cb3dSCy Schubert if (base != NULL) {
21542b15cb3dSCy Schubert /* by default, we put new events into the middle priority */
21552b15cb3dSCy Schubert ev->ev_pri = base->nactivequeues / 2;
21562b15cb3dSCy Schubert }
21572b15cb3dSCy Schubert
21582b15cb3dSCy Schubert event_debug_note_setup_(ev);
21592b15cb3dSCy Schubert
21602b15cb3dSCy Schubert return 0;
21612b15cb3dSCy Schubert }
21622b15cb3dSCy Schubert
21632b15cb3dSCy Schubert int
event_base_set(struct event_base * base,struct event * ev)21642b15cb3dSCy Schubert event_base_set(struct event_base *base, struct event *ev)
21652b15cb3dSCy Schubert {
21662b15cb3dSCy Schubert /* Only innocent events may be assigned to a different base */
21672b15cb3dSCy Schubert if (ev->ev_flags != EVLIST_INIT)
21682b15cb3dSCy Schubert return (-1);
21692b15cb3dSCy Schubert
21702b15cb3dSCy Schubert event_debug_assert_is_setup_(ev);
21712b15cb3dSCy Schubert
21722b15cb3dSCy Schubert ev->ev_base = base;
21732b15cb3dSCy Schubert ev->ev_pri = base->nactivequeues/2;
21742b15cb3dSCy Schubert
21752b15cb3dSCy Schubert return (0);
21762b15cb3dSCy Schubert }
21772b15cb3dSCy Schubert
21782b15cb3dSCy Schubert void
event_set(struct event * ev,evutil_socket_t fd,short events,void (* callback)(evutil_socket_t,short,void *),void * arg)21792b15cb3dSCy Schubert event_set(struct event *ev, evutil_socket_t fd, short events,
21802b15cb3dSCy Schubert void (*callback)(evutil_socket_t, short, void *), void *arg)
21812b15cb3dSCy Schubert {
21822b15cb3dSCy Schubert int r;
21832b15cb3dSCy Schubert r = event_assign(ev, current_base, fd, events, callback, arg);
21842b15cb3dSCy Schubert EVUTIL_ASSERT(r == 0);
21852b15cb3dSCy Schubert }
21862b15cb3dSCy Schubert
21872b15cb3dSCy Schubert void *
event_self_cbarg(void)21882b15cb3dSCy Schubert event_self_cbarg(void)
21892b15cb3dSCy Schubert {
21902b15cb3dSCy Schubert return &event_self_cbarg_ptr_;
21912b15cb3dSCy Schubert }
21922b15cb3dSCy Schubert
21932b15cb3dSCy Schubert struct event *
event_base_get_running_event(struct event_base * base)21942b15cb3dSCy Schubert event_base_get_running_event(struct event_base *base)
21952b15cb3dSCy Schubert {
21962b15cb3dSCy Schubert struct event *ev = NULL;
21972b15cb3dSCy Schubert EVBASE_ACQUIRE_LOCK(base, th_base_lock);
21982b15cb3dSCy Schubert if (EVBASE_IN_THREAD(base)) {
21992b15cb3dSCy Schubert struct event_callback *evcb = base->current_event;
22002b15cb3dSCy Schubert if (evcb->evcb_flags & EVLIST_INIT)
22012b15cb3dSCy Schubert ev = event_callback_to_event(evcb);
22022b15cb3dSCy Schubert }
22032b15cb3dSCy Schubert EVBASE_RELEASE_LOCK(base, th_base_lock);
22042b15cb3dSCy Schubert return ev;
22052b15cb3dSCy Schubert }
22062b15cb3dSCy Schubert
22072b15cb3dSCy Schubert struct event *
event_new(struct event_base * base,evutil_socket_t fd,short events,void (* cb)(evutil_socket_t,short,void *),void * arg)22082b15cb3dSCy Schubert event_new(struct event_base *base, evutil_socket_t fd, short events, void (*cb)(evutil_socket_t, short, void *), void *arg)
22092b15cb3dSCy Schubert {
22102b15cb3dSCy Schubert struct event *ev;
22112b15cb3dSCy Schubert ev = mm_malloc(sizeof(struct event));
22122b15cb3dSCy Schubert if (ev == NULL)
22132b15cb3dSCy Schubert return (NULL);
22142b15cb3dSCy Schubert if (event_assign(ev, base, fd, events, cb, arg) < 0) {
22152b15cb3dSCy Schubert mm_free(ev);
22162b15cb3dSCy Schubert return (NULL);
22172b15cb3dSCy Schubert }
22182b15cb3dSCy Schubert
22192b15cb3dSCy Schubert return (ev);
22202b15cb3dSCy Schubert }
22212b15cb3dSCy Schubert
22222b15cb3dSCy Schubert void
event_free(struct event * ev)22232b15cb3dSCy Schubert event_free(struct event *ev)
22242b15cb3dSCy Schubert {
22252b15cb3dSCy Schubert /* This is disabled, so that events which have been finalized be a
22262b15cb3dSCy Schubert * valid target for event_free(). That's */
22272b15cb3dSCy Schubert // event_debug_assert_is_setup_(ev);
22282b15cb3dSCy Schubert
22292b15cb3dSCy Schubert /* make sure that this event won't be coming back to haunt us. */
22302b15cb3dSCy Schubert event_del(ev);
22312b15cb3dSCy Schubert event_debug_note_teardown_(ev);
22322b15cb3dSCy Schubert mm_free(ev);
22332b15cb3dSCy Schubert
22342b15cb3dSCy Schubert }
22352b15cb3dSCy Schubert
22362b15cb3dSCy Schubert void
event_debug_unassign(struct event * ev)22372b15cb3dSCy Schubert event_debug_unassign(struct event *ev)
22382b15cb3dSCy Schubert {
22392b15cb3dSCy Schubert event_debug_assert_not_added_(ev);
22402b15cb3dSCy Schubert event_debug_note_teardown_(ev);
22412b15cb3dSCy Schubert
22422b15cb3dSCy Schubert ev->ev_flags &= ~EVLIST_INIT;
22432b15cb3dSCy Schubert }
22442b15cb3dSCy Schubert
22452b15cb3dSCy Schubert #define EVENT_FINALIZE_FREE_ 0x10000
22462b15cb3dSCy Schubert static int
event_finalize_nolock_(struct event_base * base,unsigned flags,struct event * ev,event_finalize_callback_fn cb)22472b15cb3dSCy Schubert event_finalize_nolock_(struct event_base *base, unsigned flags, struct event *ev, event_finalize_callback_fn cb)
22482b15cb3dSCy Schubert {
22492b15cb3dSCy Schubert ev_uint8_t closure = (flags & EVENT_FINALIZE_FREE_) ?
22502b15cb3dSCy Schubert EV_CLOSURE_EVENT_FINALIZE_FREE : EV_CLOSURE_EVENT_FINALIZE;
22512b15cb3dSCy Schubert
22522b15cb3dSCy Schubert event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
22532b15cb3dSCy Schubert ev->ev_closure = closure;
22542b15cb3dSCy Schubert ev->ev_evcallback.evcb_cb_union.evcb_evfinalize = cb;
22552b15cb3dSCy Schubert event_active_nolock_(ev, EV_FINALIZE, 1);
22562b15cb3dSCy Schubert ev->ev_flags |= EVLIST_FINALIZING;
22572b15cb3dSCy Schubert return 0;
22582b15cb3dSCy Schubert }
22592b15cb3dSCy Schubert
22602b15cb3dSCy Schubert static int
event_finalize_impl_(unsigned flags,struct event * ev,event_finalize_callback_fn cb)22612b15cb3dSCy Schubert event_finalize_impl_(unsigned flags, struct event *ev, event_finalize_callback_fn cb)
22622b15cb3dSCy Schubert {
22632b15cb3dSCy Schubert int r;
22642b15cb3dSCy Schubert struct event_base *base = ev->ev_base;
22652b15cb3dSCy Schubert if (EVUTIL_FAILURE_CHECK(!base)) {
22662b15cb3dSCy Schubert event_warnx("%s: event has no event_base set.", __func__);
22672b15cb3dSCy Schubert return -1;
22682b15cb3dSCy Schubert }
22692b15cb3dSCy Schubert
22702b15cb3dSCy Schubert EVBASE_ACQUIRE_LOCK(base, th_base_lock);
22712b15cb3dSCy Schubert r = event_finalize_nolock_(base, flags, ev, cb);
22722b15cb3dSCy Schubert EVBASE_RELEASE_LOCK(base, th_base_lock);
22732b15cb3dSCy Schubert return r;
22742b15cb3dSCy Schubert }
22752b15cb3dSCy Schubert
22762b15cb3dSCy Schubert int
event_finalize(unsigned flags,struct event * ev,event_finalize_callback_fn cb)22772b15cb3dSCy Schubert event_finalize(unsigned flags, struct event *ev, event_finalize_callback_fn cb)
22782b15cb3dSCy Schubert {
22792b15cb3dSCy Schubert return event_finalize_impl_(flags, ev, cb);
22802b15cb3dSCy Schubert }
22812b15cb3dSCy Schubert
22822b15cb3dSCy Schubert int
event_free_finalize(unsigned flags,struct event * ev,event_finalize_callback_fn cb)22832b15cb3dSCy Schubert event_free_finalize(unsigned flags, struct event *ev, event_finalize_callback_fn cb)
22842b15cb3dSCy Schubert {
22852b15cb3dSCy Schubert return event_finalize_impl_(flags|EVENT_FINALIZE_FREE_, ev, cb);
22862b15cb3dSCy Schubert }
22872b15cb3dSCy Schubert
22882b15cb3dSCy Schubert void
event_callback_finalize_nolock_(struct event_base * base,unsigned flags,struct event_callback * evcb,void (* cb)(struct event_callback *,void *))22892b15cb3dSCy Schubert event_callback_finalize_nolock_(struct event_base *base, unsigned flags, struct event_callback *evcb, void (*cb)(struct event_callback *, void *))
22902b15cb3dSCy Schubert {
22912b15cb3dSCy Schubert struct event *ev = NULL;
22922b15cb3dSCy Schubert if (evcb->evcb_flags & EVLIST_INIT) {
22932b15cb3dSCy Schubert ev = event_callback_to_event(evcb);
22942b15cb3dSCy Schubert event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
22952b15cb3dSCy Schubert } else {
22962b15cb3dSCy Schubert event_callback_cancel_nolock_(base, evcb, 0); /*XXX can this fail?*/
22972b15cb3dSCy Schubert }
22982b15cb3dSCy Schubert
22992b15cb3dSCy Schubert evcb->evcb_closure = EV_CLOSURE_CB_FINALIZE;
23002b15cb3dSCy Schubert evcb->evcb_cb_union.evcb_cbfinalize = cb;
23012b15cb3dSCy Schubert event_callback_activate_nolock_(base, evcb); /* XXX can this really fail?*/
23022b15cb3dSCy Schubert evcb->evcb_flags |= EVLIST_FINALIZING;
23032b15cb3dSCy Schubert }
23042b15cb3dSCy Schubert
23052b15cb3dSCy Schubert void
event_callback_finalize_(struct event_base * base,unsigned flags,struct event_callback * evcb,void (* cb)(struct event_callback *,void *))23062b15cb3dSCy Schubert event_callback_finalize_(struct event_base *base, unsigned flags, struct event_callback *evcb, void (*cb)(struct event_callback *, void *))
23072b15cb3dSCy Schubert {
23082b15cb3dSCy Schubert EVBASE_ACQUIRE_LOCK(base, th_base_lock);
23092b15cb3dSCy Schubert event_callback_finalize_nolock_(base, flags, evcb, cb);
23102b15cb3dSCy Schubert EVBASE_RELEASE_LOCK(base, th_base_lock);
23112b15cb3dSCy Schubert }
23122b15cb3dSCy Schubert
23132b15cb3dSCy Schubert /** Internal: Finalize all of the n_cbs callbacks in evcbs. The provided
23142b15cb3dSCy Schubert * callback will be invoked on *one of them*, after they have *all* been
23152b15cb3dSCy Schubert * finalized. */
23162b15cb3dSCy Schubert int
event_callback_finalize_many_(struct event_base * base,int n_cbs,struct event_callback ** evcbs,void (* cb)(struct event_callback *,void *))23172b15cb3dSCy Schubert event_callback_finalize_many_(struct event_base *base, int n_cbs, struct event_callback **evcbs, void (*cb)(struct event_callback *, void *))
23182b15cb3dSCy Schubert {
23192b15cb3dSCy Schubert int n_pending = 0, i;
23202b15cb3dSCy Schubert
23212b15cb3dSCy Schubert if (base == NULL)
23222b15cb3dSCy Schubert base = current_base;
23232b15cb3dSCy Schubert
23242b15cb3dSCy Schubert EVBASE_ACQUIRE_LOCK(base, th_base_lock);
23252b15cb3dSCy Schubert
23262b15cb3dSCy Schubert event_debug(("%s: %d events finalizing", __func__, n_cbs));
23272b15cb3dSCy Schubert
23282b15cb3dSCy Schubert /* At most one can be currently executing; the rest we just
23292b15cb3dSCy Schubert * cancel... But we always make sure that the finalize callback
23302b15cb3dSCy Schubert * runs. */
23312b15cb3dSCy Schubert for (i = 0; i < n_cbs; ++i) {
23322b15cb3dSCy Schubert struct event_callback *evcb = evcbs[i];
23332b15cb3dSCy Schubert if (evcb == base->current_event) {
23342b15cb3dSCy Schubert event_callback_finalize_nolock_(base, 0, evcb, cb);
23352b15cb3dSCy Schubert ++n_pending;
23362b15cb3dSCy Schubert } else {
23372b15cb3dSCy Schubert event_callback_cancel_nolock_(base, evcb, 0);
23382b15cb3dSCy Schubert }
23392b15cb3dSCy Schubert }
23402b15cb3dSCy Schubert
23412b15cb3dSCy Schubert if (n_pending == 0) {
23422b15cb3dSCy Schubert /* Just do the first one. */
23432b15cb3dSCy Schubert event_callback_finalize_nolock_(base, 0, evcbs[0], cb);
23442b15cb3dSCy Schubert }
23452b15cb3dSCy Schubert
23462b15cb3dSCy Schubert EVBASE_RELEASE_LOCK(base, th_base_lock);
23472b15cb3dSCy Schubert return 0;
23482b15cb3dSCy Schubert }
23492b15cb3dSCy Schubert
23502b15cb3dSCy Schubert /*
23512b15cb3dSCy Schubert * Set's the priority of an event - if an event is already scheduled
23522b15cb3dSCy Schubert * changing the priority is going to fail.
23532b15cb3dSCy Schubert */
23542b15cb3dSCy Schubert
23552b15cb3dSCy Schubert int
event_priority_set(struct event * ev,int pri)23562b15cb3dSCy Schubert event_priority_set(struct event *ev, int pri)
23572b15cb3dSCy Schubert {
23582b15cb3dSCy Schubert event_debug_assert_is_setup_(ev);
23592b15cb3dSCy Schubert
23602b15cb3dSCy Schubert if (ev->ev_flags & EVLIST_ACTIVE)
23612b15cb3dSCy Schubert return (-1);
23622b15cb3dSCy Schubert if (pri < 0 || pri >= ev->ev_base->nactivequeues)
23632b15cb3dSCy Schubert return (-1);
23642b15cb3dSCy Schubert
23652b15cb3dSCy Schubert ev->ev_pri = pri;
23662b15cb3dSCy Schubert
23672b15cb3dSCy Schubert return (0);
23682b15cb3dSCy Schubert }
23692b15cb3dSCy Schubert
23702b15cb3dSCy Schubert /*
23712b15cb3dSCy Schubert * Checks if a specific event is pending or scheduled.
23722b15cb3dSCy Schubert */
23732b15cb3dSCy Schubert
23742b15cb3dSCy Schubert int
event_pending(const struct event * ev,short event,struct timeval * tv)23752b15cb3dSCy Schubert event_pending(const struct event *ev, short event, struct timeval *tv)
23762b15cb3dSCy Schubert {
23772b15cb3dSCy Schubert int flags = 0;
23782b15cb3dSCy Schubert
23792b15cb3dSCy Schubert if (EVUTIL_FAILURE_CHECK(ev->ev_base == NULL)) {
23802b15cb3dSCy Schubert event_warnx("%s: event has no event_base set.", __func__);
23812b15cb3dSCy Schubert return 0;
23822b15cb3dSCy Schubert }
23832b15cb3dSCy Schubert
23842b15cb3dSCy Schubert EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
23852b15cb3dSCy Schubert event_debug_assert_is_setup_(ev);
23862b15cb3dSCy Schubert
23872b15cb3dSCy Schubert if (ev->ev_flags & EVLIST_INSERTED)
23882b15cb3dSCy Schubert flags |= (ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED|EV_SIGNAL));
23892b15cb3dSCy Schubert if (ev->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))
23902b15cb3dSCy Schubert flags |= ev->ev_res;
23912b15cb3dSCy Schubert if (ev->ev_flags & EVLIST_TIMEOUT)
23922b15cb3dSCy Schubert flags |= EV_TIMEOUT;
23932b15cb3dSCy Schubert
23942b15cb3dSCy Schubert event &= (EV_TIMEOUT|EV_READ|EV_WRITE|EV_CLOSED|EV_SIGNAL);
23952b15cb3dSCy Schubert
23962b15cb3dSCy Schubert /* See if there is a timeout that we should report */
23972b15cb3dSCy Schubert if (tv != NULL && (flags & event & EV_TIMEOUT)) {
23982b15cb3dSCy Schubert struct timeval tmp = ev->ev_timeout;
23992b15cb3dSCy Schubert tmp.tv_usec &= MICROSECONDS_MASK;
24002b15cb3dSCy Schubert /* correctly remamp to real time */
24012b15cb3dSCy Schubert evutil_timeradd(&ev->ev_base->tv_clock_diff, &tmp, tv);
24022b15cb3dSCy Schubert }
24032b15cb3dSCy Schubert
24042b15cb3dSCy Schubert EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
24052b15cb3dSCy Schubert
24062b15cb3dSCy Schubert return (flags & event);
24072b15cb3dSCy Schubert }
24082b15cb3dSCy Schubert
24092b15cb3dSCy Schubert int
event_initialized(const struct event * ev)24102b15cb3dSCy Schubert event_initialized(const struct event *ev)
24112b15cb3dSCy Schubert {
24122b15cb3dSCy Schubert if (!(ev->ev_flags & EVLIST_INIT))
24132b15cb3dSCy Schubert return 0;
24142b15cb3dSCy Schubert
24152b15cb3dSCy Schubert return 1;
24162b15cb3dSCy Schubert }
24172b15cb3dSCy Schubert
24182b15cb3dSCy Schubert void
event_get_assignment(const struct event * event,struct event_base ** base_out,evutil_socket_t * fd_out,short * events_out,event_callback_fn * callback_out,void ** arg_out)24192b15cb3dSCy Schubert event_get_assignment(const struct event *event, struct event_base **base_out, evutil_socket_t *fd_out, short *events_out, event_callback_fn *callback_out, void **arg_out)
24202b15cb3dSCy Schubert {
24212b15cb3dSCy Schubert event_debug_assert_is_setup_(event);
24222b15cb3dSCy Schubert
24232b15cb3dSCy Schubert if (base_out)
24242b15cb3dSCy Schubert *base_out = event->ev_base;
24252b15cb3dSCy Schubert if (fd_out)
24262b15cb3dSCy Schubert *fd_out = event->ev_fd;
24272b15cb3dSCy Schubert if (events_out)
24282b15cb3dSCy Schubert *events_out = event->ev_events;
24292b15cb3dSCy Schubert if (callback_out)
24302b15cb3dSCy Schubert *callback_out = event->ev_callback;
24312b15cb3dSCy Schubert if (arg_out)
24322b15cb3dSCy Schubert *arg_out = event->ev_arg;
24332b15cb3dSCy Schubert }
24342b15cb3dSCy Schubert
24352b15cb3dSCy Schubert size_t
event_get_struct_event_size(void)24362b15cb3dSCy Schubert event_get_struct_event_size(void)
24372b15cb3dSCy Schubert {
24382b15cb3dSCy Schubert return sizeof(struct event);
24392b15cb3dSCy Schubert }
24402b15cb3dSCy Schubert
24412b15cb3dSCy Schubert evutil_socket_t
event_get_fd(const struct event * ev)24422b15cb3dSCy Schubert event_get_fd(const struct event *ev)
24432b15cb3dSCy Schubert {
24442b15cb3dSCy Schubert event_debug_assert_is_setup_(ev);
24452b15cb3dSCy Schubert return ev->ev_fd;
24462b15cb3dSCy Schubert }
24472b15cb3dSCy Schubert
24482b15cb3dSCy Schubert struct event_base *
event_get_base(const struct event * ev)24492b15cb3dSCy Schubert event_get_base(const struct event *ev)
24502b15cb3dSCy Schubert {
24512b15cb3dSCy Schubert event_debug_assert_is_setup_(ev);
24522b15cb3dSCy Schubert return ev->ev_base;
24532b15cb3dSCy Schubert }
24542b15cb3dSCy Schubert
24552b15cb3dSCy Schubert short
event_get_events(const struct event * ev)24562b15cb3dSCy Schubert event_get_events(const struct event *ev)
24572b15cb3dSCy Schubert {
24582b15cb3dSCy Schubert event_debug_assert_is_setup_(ev);
24592b15cb3dSCy Schubert return ev->ev_events;
24602b15cb3dSCy Schubert }
24612b15cb3dSCy Schubert
24622b15cb3dSCy Schubert event_callback_fn
event_get_callback(const struct event * ev)24632b15cb3dSCy Schubert event_get_callback(const struct event *ev)
24642b15cb3dSCy Schubert {
24652b15cb3dSCy Schubert event_debug_assert_is_setup_(ev);
24662b15cb3dSCy Schubert return ev->ev_callback;
24672b15cb3dSCy Schubert }
24682b15cb3dSCy Schubert
24692b15cb3dSCy Schubert void *
event_get_callback_arg(const struct event * ev)24702b15cb3dSCy Schubert event_get_callback_arg(const struct event *ev)
24712b15cb3dSCy Schubert {
24722b15cb3dSCy Schubert event_debug_assert_is_setup_(ev);
24732b15cb3dSCy Schubert return ev->ev_arg;
24742b15cb3dSCy Schubert }
24752b15cb3dSCy Schubert
24762b15cb3dSCy Schubert int
event_get_priority(const struct event * ev)24772b15cb3dSCy Schubert event_get_priority(const struct event *ev)
24782b15cb3dSCy Schubert {
24792b15cb3dSCy Schubert event_debug_assert_is_setup_(ev);
24802b15cb3dSCy Schubert return ev->ev_pri;
24812b15cb3dSCy Schubert }
24822b15cb3dSCy Schubert
24832b15cb3dSCy Schubert int
event_add(struct event * ev,const struct timeval * tv)24842b15cb3dSCy Schubert event_add(struct event *ev, const struct timeval *tv)
24852b15cb3dSCy Schubert {
24862b15cb3dSCy Schubert int res;
24872b15cb3dSCy Schubert
24882b15cb3dSCy Schubert if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
24892b15cb3dSCy Schubert event_warnx("%s: event has no event_base set.", __func__);
24902b15cb3dSCy Schubert return -1;
24912b15cb3dSCy Schubert }
24922b15cb3dSCy Schubert
24932b15cb3dSCy Schubert EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
24942b15cb3dSCy Schubert
24952b15cb3dSCy Schubert res = event_add_nolock_(ev, tv, 0);
24962b15cb3dSCy Schubert
24972b15cb3dSCy Schubert EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
24982b15cb3dSCy Schubert
24992b15cb3dSCy Schubert return (res);
25002b15cb3dSCy Schubert }
25012b15cb3dSCy Schubert
25022b15cb3dSCy Schubert /* Helper callback: wake an event_base from another thread. This version
25032b15cb3dSCy Schubert * works by writing a byte to one end of a socketpair, so that the event_base
25042b15cb3dSCy Schubert * listening on the other end will wake up as the corresponding event
25052b15cb3dSCy Schubert * triggers */
25062b15cb3dSCy Schubert static int
evthread_notify_base_default(struct event_base * base)25072b15cb3dSCy Schubert evthread_notify_base_default(struct event_base *base)
25082b15cb3dSCy Schubert {
25092b15cb3dSCy Schubert char buf[1];
25102b15cb3dSCy Schubert int r;
25112b15cb3dSCy Schubert buf[0] = (char) 0;
25122b15cb3dSCy Schubert #ifdef _WIN32
25132b15cb3dSCy Schubert r = send(base->th_notify_fd[1], buf, 1, 0);
25142b15cb3dSCy Schubert #else
25152b15cb3dSCy Schubert r = write(base->th_notify_fd[1], buf, 1);
25162b15cb3dSCy Schubert #endif
25172b15cb3dSCy Schubert return (r < 0 && ! EVUTIL_ERR_IS_EAGAIN(errno)) ? -1 : 0;
25182b15cb3dSCy Schubert }
25192b15cb3dSCy Schubert
25202b15cb3dSCy Schubert #ifdef EVENT__HAVE_EVENTFD
25212b15cb3dSCy Schubert /* Helper callback: wake an event_base from another thread. This version
25222b15cb3dSCy Schubert * assumes that you have a working eventfd() implementation. */
25232b15cb3dSCy Schubert static int
evthread_notify_base_eventfd(struct event_base * base)25242b15cb3dSCy Schubert evthread_notify_base_eventfd(struct event_base *base)
25252b15cb3dSCy Schubert {
25262b15cb3dSCy Schubert ev_uint64_t msg = 1;
25272b15cb3dSCy Schubert int r;
25282b15cb3dSCy Schubert do {
25292b15cb3dSCy Schubert r = write(base->th_notify_fd[0], (void*) &msg, sizeof(msg));
25302b15cb3dSCy Schubert } while (r < 0 && errno == EAGAIN);
25312b15cb3dSCy Schubert
25322b15cb3dSCy Schubert return (r < 0) ? -1 : 0;
25332b15cb3dSCy Schubert }
25342b15cb3dSCy Schubert #endif
25352b15cb3dSCy Schubert
25362b15cb3dSCy Schubert
25372b15cb3dSCy Schubert /** Tell the thread currently running the event_loop for base (if any) that it
25382b15cb3dSCy Schubert * needs to stop waiting in its dispatch function (if it is) and process all
25392b15cb3dSCy Schubert * active callbacks. */
25402b15cb3dSCy Schubert static int
evthread_notify_base(struct event_base * base)25412b15cb3dSCy Schubert evthread_notify_base(struct event_base *base)
25422b15cb3dSCy Schubert {
25432b15cb3dSCy Schubert EVENT_BASE_ASSERT_LOCKED(base);
25442b15cb3dSCy Schubert if (!base->th_notify_fn)
25452b15cb3dSCy Schubert return -1;
25462b15cb3dSCy Schubert if (base->is_notify_pending)
25472b15cb3dSCy Schubert return 0;
25482b15cb3dSCy Schubert base->is_notify_pending = 1;
25492b15cb3dSCy Schubert return base->th_notify_fn(base);
25502b15cb3dSCy Schubert }
25512b15cb3dSCy Schubert
25522b15cb3dSCy Schubert /* Implementation function to remove a timeout on a currently pending event.
25532b15cb3dSCy Schubert */
25542b15cb3dSCy Schubert int
event_remove_timer_nolock_(struct event * ev)25552b15cb3dSCy Schubert event_remove_timer_nolock_(struct event *ev)
25562b15cb3dSCy Schubert {
25572b15cb3dSCy Schubert struct event_base *base = ev->ev_base;
25582b15cb3dSCy Schubert
25592b15cb3dSCy Schubert EVENT_BASE_ASSERT_LOCKED(base);
25602b15cb3dSCy Schubert event_debug_assert_is_setup_(ev);
25612b15cb3dSCy Schubert
25622b15cb3dSCy Schubert event_debug(("event_remove_timer_nolock: event: %p", ev));
25632b15cb3dSCy Schubert
25642b15cb3dSCy Schubert /* If it's not pending on a timeout, we don't need to do anything. */
25652b15cb3dSCy Schubert if (ev->ev_flags & EVLIST_TIMEOUT) {
25662b15cb3dSCy Schubert event_queue_remove_timeout(base, ev);
25672b15cb3dSCy Schubert evutil_timerclear(&ev->ev_.ev_io.ev_timeout);
25682b15cb3dSCy Schubert }
25692b15cb3dSCy Schubert
25702b15cb3dSCy Schubert return (0);
25712b15cb3dSCy Schubert }
25722b15cb3dSCy Schubert
25732b15cb3dSCy Schubert int
event_remove_timer(struct event * ev)25742b15cb3dSCy Schubert event_remove_timer(struct event *ev)
25752b15cb3dSCy Schubert {
25762b15cb3dSCy Schubert int res;
25772b15cb3dSCy Schubert
25782b15cb3dSCy Schubert if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
25792b15cb3dSCy Schubert event_warnx("%s: event has no event_base set.", __func__);
25802b15cb3dSCy Schubert return -1;
25812b15cb3dSCy Schubert }
25822b15cb3dSCy Schubert
25832b15cb3dSCy Schubert EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
25842b15cb3dSCy Schubert
25852b15cb3dSCy Schubert res = event_remove_timer_nolock_(ev);
25862b15cb3dSCy Schubert
25872b15cb3dSCy Schubert EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
25882b15cb3dSCy Schubert
25892b15cb3dSCy Schubert return (res);
25902b15cb3dSCy Schubert }
25912b15cb3dSCy Schubert
25922b15cb3dSCy Schubert /* Implementation function to add an event. Works just like event_add,
25932b15cb3dSCy Schubert * except: 1) it requires that we have the lock. 2) if tv_is_absolute is set,
25942b15cb3dSCy Schubert * we treat tv as an absolute time, not as an interval to add to the current
25952b15cb3dSCy Schubert * time */
25962b15cb3dSCy Schubert int
event_add_nolock_(struct event * ev,const struct timeval * tv,int tv_is_absolute)25972b15cb3dSCy Schubert event_add_nolock_(struct event *ev, const struct timeval *tv,
25982b15cb3dSCy Schubert int tv_is_absolute)
25992b15cb3dSCy Schubert {
26002b15cb3dSCy Schubert struct event_base *base = ev->ev_base;
26012b15cb3dSCy Schubert int res = 0;
26022b15cb3dSCy Schubert int notify = 0;
26032b15cb3dSCy Schubert
26042b15cb3dSCy Schubert EVENT_BASE_ASSERT_LOCKED(base);
26052b15cb3dSCy Schubert event_debug_assert_is_setup_(ev);
26062b15cb3dSCy Schubert
26072b15cb3dSCy Schubert event_debug((
26082b15cb3dSCy Schubert "event_add: event: %p (fd "EV_SOCK_FMT"), %s%s%s%scall %p",
26092b15cb3dSCy Schubert ev,
26102b15cb3dSCy Schubert EV_SOCK_ARG(ev->ev_fd),
26112b15cb3dSCy Schubert ev->ev_events & EV_READ ? "EV_READ " : " ",
26122b15cb3dSCy Schubert ev->ev_events & EV_WRITE ? "EV_WRITE " : " ",
26132b15cb3dSCy Schubert ev->ev_events & EV_CLOSED ? "EV_CLOSED " : " ",
26142b15cb3dSCy Schubert tv ? "EV_TIMEOUT " : " ",
26152b15cb3dSCy Schubert ev->ev_callback));
26162b15cb3dSCy Schubert
26172b15cb3dSCy Schubert EVUTIL_ASSERT(!(ev->ev_flags & ~EVLIST_ALL));
26182b15cb3dSCy Schubert
26192b15cb3dSCy Schubert if (ev->ev_flags & EVLIST_FINALIZING) {
26202b15cb3dSCy Schubert /* XXXX debug */
26212b15cb3dSCy Schubert return (-1);
26222b15cb3dSCy Schubert }
26232b15cb3dSCy Schubert
26242b15cb3dSCy Schubert /*
26252b15cb3dSCy Schubert * prepare for timeout insertion further below, if we get a
26262b15cb3dSCy Schubert * failure on any step, we should not change any state.
26272b15cb3dSCy Schubert */
26282b15cb3dSCy Schubert if (tv != NULL && !(ev->ev_flags & EVLIST_TIMEOUT)) {
26292b15cb3dSCy Schubert if (min_heap_reserve_(&base->timeheap,
26302b15cb3dSCy Schubert 1 + min_heap_size_(&base->timeheap)) == -1)
26312b15cb3dSCy Schubert return (-1); /* ENOMEM == errno */
26322b15cb3dSCy Schubert }
26332b15cb3dSCy Schubert
26342b15cb3dSCy Schubert /* If the main thread is currently executing a signal event's
26352b15cb3dSCy Schubert * callback, and we are not the main thread, then we want to wait
26362b15cb3dSCy Schubert * until the callback is done before we mess with the event, or else
26372b15cb3dSCy Schubert * we can race on ev_ncalls and ev_pncalls below. */
26382b15cb3dSCy Schubert #ifndef EVENT__DISABLE_THREAD_SUPPORT
26392b15cb3dSCy Schubert if (base->current_event == event_to_event_callback(ev) &&
26402b15cb3dSCy Schubert (ev->ev_events & EV_SIGNAL)
26412b15cb3dSCy Schubert && !EVBASE_IN_THREAD(base)) {
26422b15cb3dSCy Schubert ++base->current_event_waiters;
26432b15cb3dSCy Schubert EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
26442b15cb3dSCy Schubert }
26452b15cb3dSCy Schubert #endif
26462b15cb3dSCy Schubert
26472b15cb3dSCy Schubert if ((ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED|EV_SIGNAL)) &&
26482b15cb3dSCy Schubert !(ev->ev_flags & (EVLIST_INSERTED|EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))) {
26492b15cb3dSCy Schubert if (ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED))
26502b15cb3dSCy Schubert res = evmap_io_add_(base, ev->ev_fd, ev);
26512b15cb3dSCy Schubert else if (ev->ev_events & EV_SIGNAL)
26522b15cb3dSCy Schubert res = evmap_signal_add_(base, (int)ev->ev_fd, ev);
26532b15cb3dSCy Schubert if (res != -1)
26542b15cb3dSCy Schubert event_queue_insert_inserted(base, ev);
26552b15cb3dSCy Schubert if (res == 1) {
26562b15cb3dSCy Schubert /* evmap says we need to notify the main thread. */
26572b15cb3dSCy Schubert notify = 1;
26582b15cb3dSCy Schubert res = 0;
26592b15cb3dSCy Schubert }
26602b15cb3dSCy Schubert }
26612b15cb3dSCy Schubert
26622b15cb3dSCy Schubert /*
26632b15cb3dSCy Schubert * we should change the timeout state only if the previous event
26642b15cb3dSCy Schubert * addition succeeded.
26652b15cb3dSCy Schubert */
26662b15cb3dSCy Schubert if (res != -1 && tv != NULL) {
26672b15cb3dSCy Schubert struct timeval now;
26682b15cb3dSCy Schubert int common_timeout;
26692b15cb3dSCy Schubert #ifdef USE_REINSERT_TIMEOUT
26702b15cb3dSCy Schubert int was_common;
26712b15cb3dSCy Schubert int old_timeout_idx;
26722b15cb3dSCy Schubert #endif
26732b15cb3dSCy Schubert
26742b15cb3dSCy Schubert /*
26752b15cb3dSCy Schubert * for persistent timeout events, we remember the
26762b15cb3dSCy Schubert * timeout value and re-add the event.
26772b15cb3dSCy Schubert *
26782b15cb3dSCy Schubert * If tv_is_absolute, this was already set.
26792b15cb3dSCy Schubert */
26802b15cb3dSCy Schubert if (ev->ev_closure == EV_CLOSURE_EVENT_PERSIST && !tv_is_absolute)
26812b15cb3dSCy Schubert ev->ev_io_timeout = *tv;
26822b15cb3dSCy Schubert
26832b15cb3dSCy Schubert #ifndef USE_REINSERT_TIMEOUT
26842b15cb3dSCy Schubert if (ev->ev_flags & EVLIST_TIMEOUT) {
26852b15cb3dSCy Schubert event_queue_remove_timeout(base, ev);
26862b15cb3dSCy Schubert }
26872b15cb3dSCy Schubert #endif
26882b15cb3dSCy Schubert
26892b15cb3dSCy Schubert /* Check if it is active due to a timeout. Rescheduling
26902b15cb3dSCy Schubert * this timeout before the callback can be executed
26912b15cb3dSCy Schubert * removes it from the active list. */
26922b15cb3dSCy Schubert if ((ev->ev_flags & EVLIST_ACTIVE) &&
26932b15cb3dSCy Schubert (ev->ev_res & EV_TIMEOUT)) {
26942b15cb3dSCy Schubert if (ev->ev_events & EV_SIGNAL) {
26952b15cb3dSCy Schubert /* See if we are just active executing
26962b15cb3dSCy Schubert * this event in a loop
26972b15cb3dSCy Schubert */
26982b15cb3dSCy Schubert if (ev->ev_ncalls && ev->ev_pncalls) {
26992b15cb3dSCy Schubert /* Abort loop */
27002b15cb3dSCy Schubert *ev->ev_pncalls = 0;
27012b15cb3dSCy Schubert }
27022b15cb3dSCy Schubert }
27032b15cb3dSCy Schubert
27042b15cb3dSCy Schubert event_queue_remove_active(base, event_to_event_callback(ev));
27052b15cb3dSCy Schubert }
27062b15cb3dSCy Schubert
27072b15cb3dSCy Schubert gettime(base, &now);
27082b15cb3dSCy Schubert
27092b15cb3dSCy Schubert common_timeout = is_common_timeout(tv, base);
27102b15cb3dSCy Schubert #ifdef USE_REINSERT_TIMEOUT
27112b15cb3dSCy Schubert was_common = is_common_timeout(&ev->ev_timeout, base);
27122b15cb3dSCy Schubert old_timeout_idx = COMMON_TIMEOUT_IDX(&ev->ev_timeout);
27132b15cb3dSCy Schubert #endif
27142b15cb3dSCy Schubert
27152b15cb3dSCy Schubert if (tv_is_absolute) {
27162b15cb3dSCy Schubert ev->ev_timeout = *tv;
27172b15cb3dSCy Schubert } else if (common_timeout) {
27182b15cb3dSCy Schubert struct timeval tmp = *tv;
27192b15cb3dSCy Schubert tmp.tv_usec &= MICROSECONDS_MASK;
27202b15cb3dSCy Schubert evutil_timeradd(&now, &tmp, &ev->ev_timeout);
27212b15cb3dSCy Schubert ev->ev_timeout.tv_usec |=
27222b15cb3dSCy Schubert (tv->tv_usec & ~MICROSECONDS_MASK);
27232b15cb3dSCy Schubert } else {
27242b15cb3dSCy Schubert evutil_timeradd(&now, tv, &ev->ev_timeout);
27252b15cb3dSCy Schubert }
27262b15cb3dSCy Schubert
27272b15cb3dSCy Schubert event_debug((
27282b15cb3dSCy Schubert "event_add: event %p, timeout in %d seconds %d useconds, call %p",
27292b15cb3dSCy Schubert ev, (int)tv->tv_sec, (int)tv->tv_usec, ev->ev_callback));
27302b15cb3dSCy Schubert
27312b15cb3dSCy Schubert #ifdef USE_REINSERT_TIMEOUT
27322b15cb3dSCy Schubert event_queue_reinsert_timeout(base, ev, was_common, common_timeout, old_timeout_idx);
27332b15cb3dSCy Schubert #else
27342b15cb3dSCy Schubert event_queue_insert_timeout(base, ev);
27352b15cb3dSCy Schubert #endif
27362b15cb3dSCy Schubert
27372b15cb3dSCy Schubert if (common_timeout) {
27382b15cb3dSCy Schubert struct common_timeout_list *ctl =
27392b15cb3dSCy Schubert get_common_timeout_list(base, &ev->ev_timeout);
27402b15cb3dSCy Schubert if (ev == TAILQ_FIRST(&ctl->events)) {
27412b15cb3dSCy Schubert common_timeout_schedule(ctl, &now, ev);
27422b15cb3dSCy Schubert }
27432b15cb3dSCy Schubert } else {
27442b15cb3dSCy Schubert struct event* top = NULL;
27452b15cb3dSCy Schubert /* See if the earliest timeout is now earlier than it
27462b15cb3dSCy Schubert * was before: if so, we will need to tell the main
27472b15cb3dSCy Schubert * thread to wake up earlier than it would otherwise.
27482b15cb3dSCy Schubert * We double check the timeout of the top element to
27492b15cb3dSCy Schubert * handle time distortions due to system suspension.
27502b15cb3dSCy Schubert */
27512b15cb3dSCy Schubert if (min_heap_elt_is_top_(ev))
27522b15cb3dSCy Schubert notify = 1;
27532b15cb3dSCy Schubert else if ((top = min_heap_top_(&base->timeheap)) != NULL &&
27542b15cb3dSCy Schubert evutil_timercmp(&top->ev_timeout, &now, <))
27552b15cb3dSCy Schubert notify = 1;
27562b15cb3dSCy Schubert }
27572b15cb3dSCy Schubert }
27582b15cb3dSCy Schubert
27592b15cb3dSCy Schubert /* if we are not in the right thread, we need to wake up the loop */
27602b15cb3dSCy Schubert if (res != -1 && notify && EVBASE_NEED_NOTIFY(base))
27612b15cb3dSCy Schubert evthread_notify_base(base);
27622b15cb3dSCy Schubert
27632b15cb3dSCy Schubert event_debug_note_add_(ev);
27642b15cb3dSCy Schubert
27652b15cb3dSCy Schubert return (res);
27662b15cb3dSCy Schubert }
27672b15cb3dSCy Schubert
27682b15cb3dSCy Schubert static int
event_del_(struct event * ev,int blocking)27692b15cb3dSCy Schubert event_del_(struct event *ev, int blocking)
27702b15cb3dSCy Schubert {
27712b15cb3dSCy Schubert int res;
2772*a466cc55SCy Schubert struct event_base *base = ev->ev_base;
27732b15cb3dSCy Schubert
2774*a466cc55SCy Schubert if (EVUTIL_FAILURE_CHECK(!base)) {
27752b15cb3dSCy Schubert event_warnx("%s: event has no event_base set.", __func__);
27762b15cb3dSCy Schubert return -1;
27772b15cb3dSCy Schubert }
27782b15cb3dSCy Schubert
2779*a466cc55SCy Schubert EVBASE_ACQUIRE_LOCK(base, th_base_lock);
27802b15cb3dSCy Schubert res = event_del_nolock_(ev, blocking);
2781*a466cc55SCy Schubert EVBASE_RELEASE_LOCK(base, th_base_lock);
27822b15cb3dSCy Schubert
27832b15cb3dSCy Schubert return (res);
27842b15cb3dSCy Schubert }
27852b15cb3dSCy Schubert
27862b15cb3dSCy Schubert int
event_del(struct event * ev)27872b15cb3dSCy Schubert event_del(struct event *ev)
27882b15cb3dSCy Schubert {
27892b15cb3dSCy Schubert return event_del_(ev, EVENT_DEL_AUTOBLOCK);
27902b15cb3dSCy Schubert }
27912b15cb3dSCy Schubert
27922b15cb3dSCy Schubert int
event_del_block(struct event * ev)27932b15cb3dSCy Schubert event_del_block(struct event *ev)
27942b15cb3dSCy Schubert {
27952b15cb3dSCy Schubert return event_del_(ev, EVENT_DEL_BLOCK);
27962b15cb3dSCy Schubert }
27972b15cb3dSCy Schubert
27982b15cb3dSCy Schubert int
event_del_noblock(struct event * ev)27992b15cb3dSCy Schubert event_del_noblock(struct event *ev)
28002b15cb3dSCy Schubert {
28012b15cb3dSCy Schubert return event_del_(ev, EVENT_DEL_NOBLOCK);
28022b15cb3dSCy Schubert }
28032b15cb3dSCy Schubert
28042b15cb3dSCy Schubert /** Helper for event_del: always called with th_base_lock held.
28052b15cb3dSCy Schubert *
28062b15cb3dSCy Schubert * "blocking" must be one of the EVENT_DEL_{BLOCK, NOBLOCK, AUTOBLOCK,
28072b15cb3dSCy Schubert * EVEN_IF_FINALIZING} values. See those for more information.
28082b15cb3dSCy Schubert */
28092b15cb3dSCy Schubert int
event_del_nolock_(struct event * ev,int blocking)28102b15cb3dSCy Schubert event_del_nolock_(struct event *ev, int blocking)
28112b15cb3dSCy Schubert {
28122b15cb3dSCy Schubert struct event_base *base;
28132b15cb3dSCy Schubert int res = 0, notify = 0;
28142b15cb3dSCy Schubert
28152b15cb3dSCy Schubert event_debug(("event_del: %p (fd "EV_SOCK_FMT"), callback %p",
28162b15cb3dSCy Schubert ev, EV_SOCK_ARG(ev->ev_fd), ev->ev_callback));
28172b15cb3dSCy Schubert
28182b15cb3dSCy Schubert /* An event without a base has not been added */
28192b15cb3dSCy Schubert if (ev->ev_base == NULL)
28202b15cb3dSCy Schubert return (-1);
28212b15cb3dSCy Schubert
28222b15cb3dSCy Schubert EVENT_BASE_ASSERT_LOCKED(ev->ev_base);
28232b15cb3dSCy Schubert
28242b15cb3dSCy Schubert if (blocking != EVENT_DEL_EVEN_IF_FINALIZING) {
28252b15cb3dSCy Schubert if (ev->ev_flags & EVLIST_FINALIZING) {
28262b15cb3dSCy Schubert /* XXXX Debug */
28272b15cb3dSCy Schubert return 0;
28282b15cb3dSCy Schubert }
28292b15cb3dSCy Schubert }
28302b15cb3dSCy Schubert
28312b15cb3dSCy Schubert base = ev->ev_base;
28322b15cb3dSCy Schubert
28332b15cb3dSCy Schubert EVUTIL_ASSERT(!(ev->ev_flags & ~EVLIST_ALL));
28342b15cb3dSCy Schubert
28352b15cb3dSCy Schubert /* See if we are just active executing this event in a loop */
28362b15cb3dSCy Schubert if (ev->ev_events & EV_SIGNAL) {
28372b15cb3dSCy Schubert if (ev->ev_ncalls && ev->ev_pncalls) {
28382b15cb3dSCy Schubert /* Abort loop */
28392b15cb3dSCy Schubert *ev->ev_pncalls = 0;
28402b15cb3dSCy Schubert }
28412b15cb3dSCy Schubert }
28422b15cb3dSCy Schubert
28432b15cb3dSCy Schubert if (ev->ev_flags & EVLIST_TIMEOUT) {
28442b15cb3dSCy Schubert /* NOTE: We never need to notify the main thread because of a
28452b15cb3dSCy Schubert * deleted timeout event: all that could happen if we don't is
28462b15cb3dSCy Schubert * that the dispatch loop might wake up too early. But the
28472b15cb3dSCy Schubert * point of notifying the main thread _is_ to wake up the
28482b15cb3dSCy Schubert * dispatch loop early anyway, so we wouldn't gain anything by
28492b15cb3dSCy Schubert * doing it.
28502b15cb3dSCy Schubert */
28512b15cb3dSCy Schubert event_queue_remove_timeout(base, ev);
28522b15cb3dSCy Schubert }
28532b15cb3dSCy Schubert
28542b15cb3dSCy Schubert if (ev->ev_flags & EVLIST_ACTIVE)
28552b15cb3dSCy Schubert event_queue_remove_active(base, event_to_event_callback(ev));
28562b15cb3dSCy Schubert else if (ev->ev_flags & EVLIST_ACTIVE_LATER)
28572b15cb3dSCy Schubert event_queue_remove_active_later(base, event_to_event_callback(ev));
28582b15cb3dSCy Schubert
28592b15cb3dSCy Schubert if (ev->ev_flags & EVLIST_INSERTED) {
28602b15cb3dSCy Schubert event_queue_remove_inserted(base, ev);
28612b15cb3dSCy Schubert if (ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED))
28622b15cb3dSCy Schubert res = evmap_io_del_(base, ev->ev_fd, ev);
28632b15cb3dSCy Schubert else
28642b15cb3dSCy Schubert res = evmap_signal_del_(base, (int)ev->ev_fd, ev);
28652b15cb3dSCy Schubert if (res == 1) {
28662b15cb3dSCy Schubert /* evmap says we need to notify the main thread. */
28672b15cb3dSCy Schubert notify = 1;
28682b15cb3dSCy Schubert res = 0;
28692b15cb3dSCy Schubert }
2870*a466cc55SCy Schubert /* If we do not have events, let's notify event base so it can
2871*a466cc55SCy Schubert * exit without waiting */
2872*a466cc55SCy Schubert if (!event_haveevents(base) && !N_ACTIVE_CALLBACKS(base))
2873*a466cc55SCy Schubert notify = 1;
28742b15cb3dSCy Schubert }
28752b15cb3dSCy Schubert
28762b15cb3dSCy Schubert /* if we are not in the right thread, we need to wake up the loop */
28772b15cb3dSCy Schubert if (res != -1 && notify && EVBASE_NEED_NOTIFY(base))
28782b15cb3dSCy Schubert evthread_notify_base(base);
28792b15cb3dSCy Schubert
28802b15cb3dSCy Schubert event_debug_note_del_(ev);
28812b15cb3dSCy Schubert
2882*a466cc55SCy Schubert /* If the main thread is currently executing this event's callback,
2883*a466cc55SCy Schubert * and we are not the main thread, then we want to wait until the
2884*a466cc55SCy Schubert * callback is done before returning. That way, when this function
2885*a466cc55SCy Schubert * returns, it will be safe to free the user-supplied argument.
2886*a466cc55SCy Schubert */
2887*a466cc55SCy Schubert #ifndef EVENT__DISABLE_THREAD_SUPPORT
2888*a466cc55SCy Schubert if (blocking != EVENT_DEL_NOBLOCK &&
2889*a466cc55SCy Schubert base->current_event == event_to_event_callback(ev) &&
2890*a466cc55SCy Schubert !EVBASE_IN_THREAD(base) &&
2891*a466cc55SCy Schubert (blocking == EVENT_DEL_BLOCK || !(ev->ev_events & EV_FINALIZE))) {
2892*a466cc55SCy Schubert ++base->current_event_waiters;
2893*a466cc55SCy Schubert EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
2894*a466cc55SCy Schubert }
2895*a466cc55SCy Schubert #endif
2896*a466cc55SCy Schubert
28972b15cb3dSCy Schubert return (res);
28982b15cb3dSCy Schubert }
28992b15cb3dSCy Schubert
29002b15cb3dSCy Schubert void
event_active(struct event * ev,int res,short ncalls)29012b15cb3dSCy Schubert event_active(struct event *ev, int res, short ncalls)
29022b15cb3dSCy Schubert {
29032b15cb3dSCy Schubert if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
29042b15cb3dSCy Schubert event_warnx("%s: event has no event_base set.", __func__);
29052b15cb3dSCy Schubert return;
29062b15cb3dSCy Schubert }
29072b15cb3dSCy Schubert
29082b15cb3dSCy Schubert EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
29092b15cb3dSCy Schubert
29102b15cb3dSCy Schubert event_debug_assert_is_setup_(ev);
29112b15cb3dSCy Schubert
29122b15cb3dSCy Schubert event_active_nolock_(ev, res, ncalls);
29132b15cb3dSCy Schubert
29142b15cb3dSCy Schubert EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
29152b15cb3dSCy Schubert }
29162b15cb3dSCy Schubert
29172b15cb3dSCy Schubert
29182b15cb3dSCy Schubert void
event_active_nolock_(struct event * ev,int res,short ncalls)29192b15cb3dSCy Schubert event_active_nolock_(struct event *ev, int res, short ncalls)
29202b15cb3dSCy Schubert {
29212b15cb3dSCy Schubert struct event_base *base;
29222b15cb3dSCy Schubert
29232b15cb3dSCy Schubert event_debug(("event_active: %p (fd "EV_SOCK_FMT"), res %d, callback %p",
29242b15cb3dSCy Schubert ev, EV_SOCK_ARG(ev->ev_fd), (int)res, ev->ev_callback));
29252b15cb3dSCy Schubert
29262b15cb3dSCy Schubert base = ev->ev_base;
29272b15cb3dSCy Schubert EVENT_BASE_ASSERT_LOCKED(base);
29282b15cb3dSCy Schubert
29292b15cb3dSCy Schubert if (ev->ev_flags & EVLIST_FINALIZING) {
29302b15cb3dSCy Schubert /* XXXX debug */
29312b15cb3dSCy Schubert return;
29322b15cb3dSCy Schubert }
29332b15cb3dSCy Schubert
29342b15cb3dSCy Schubert switch ((ev->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))) {
29352b15cb3dSCy Schubert default:
29362b15cb3dSCy Schubert case EVLIST_ACTIVE|EVLIST_ACTIVE_LATER:
29372b15cb3dSCy Schubert EVUTIL_ASSERT(0);
29382b15cb3dSCy Schubert break;
29392b15cb3dSCy Schubert case EVLIST_ACTIVE:
29402b15cb3dSCy Schubert /* We get different kinds of events, add them together */
29412b15cb3dSCy Schubert ev->ev_res |= res;
29422b15cb3dSCy Schubert return;
29432b15cb3dSCy Schubert case EVLIST_ACTIVE_LATER:
29442b15cb3dSCy Schubert ev->ev_res |= res;
29452b15cb3dSCy Schubert break;
29462b15cb3dSCy Schubert case 0:
29472b15cb3dSCy Schubert ev->ev_res = res;
29482b15cb3dSCy Schubert break;
29492b15cb3dSCy Schubert }
29502b15cb3dSCy Schubert
29512b15cb3dSCy Schubert if (ev->ev_pri < base->event_running_priority)
29522b15cb3dSCy Schubert base->event_continue = 1;
29532b15cb3dSCy Schubert
29542b15cb3dSCy Schubert if (ev->ev_events & EV_SIGNAL) {
29552b15cb3dSCy Schubert #ifndef EVENT__DISABLE_THREAD_SUPPORT
29562b15cb3dSCy Schubert if (base->current_event == event_to_event_callback(ev) &&
29572b15cb3dSCy Schubert !EVBASE_IN_THREAD(base)) {
29582b15cb3dSCy Schubert ++base->current_event_waiters;
29592b15cb3dSCy Schubert EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
29602b15cb3dSCy Schubert }
29612b15cb3dSCy Schubert #endif
29622b15cb3dSCy Schubert ev->ev_ncalls = ncalls;
29632b15cb3dSCy Schubert ev->ev_pncalls = NULL;
29642b15cb3dSCy Schubert }
29652b15cb3dSCy Schubert
29662b15cb3dSCy Schubert event_callback_activate_nolock_(base, event_to_event_callback(ev));
29672b15cb3dSCy Schubert }
29682b15cb3dSCy Schubert
29692b15cb3dSCy Schubert void
event_active_later_(struct event * ev,int res)29702b15cb3dSCy Schubert event_active_later_(struct event *ev, int res)
29712b15cb3dSCy Schubert {
29722b15cb3dSCy Schubert EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
29732b15cb3dSCy Schubert event_active_later_nolock_(ev, res);
29742b15cb3dSCy Schubert EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
29752b15cb3dSCy Schubert }
29762b15cb3dSCy Schubert
29772b15cb3dSCy Schubert void
event_active_later_nolock_(struct event * ev,int res)29782b15cb3dSCy Schubert event_active_later_nolock_(struct event *ev, int res)
29792b15cb3dSCy Schubert {
29802b15cb3dSCy Schubert struct event_base *base = ev->ev_base;
29812b15cb3dSCy Schubert EVENT_BASE_ASSERT_LOCKED(base);
29822b15cb3dSCy Schubert
29832b15cb3dSCy Schubert if (ev->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) {
29842b15cb3dSCy Schubert /* We get different kinds of events, add them together */
29852b15cb3dSCy Schubert ev->ev_res |= res;
29862b15cb3dSCy Schubert return;
29872b15cb3dSCy Schubert }
29882b15cb3dSCy Schubert
29892b15cb3dSCy Schubert ev->ev_res = res;
29902b15cb3dSCy Schubert
29912b15cb3dSCy Schubert event_callback_activate_later_nolock_(base, event_to_event_callback(ev));
29922b15cb3dSCy Schubert }
29932b15cb3dSCy Schubert
29942b15cb3dSCy Schubert int
event_callback_activate_(struct event_base * base,struct event_callback * evcb)29952b15cb3dSCy Schubert event_callback_activate_(struct event_base *base,
29962b15cb3dSCy Schubert struct event_callback *evcb)
29972b15cb3dSCy Schubert {
29982b15cb3dSCy Schubert int r;
29992b15cb3dSCy Schubert EVBASE_ACQUIRE_LOCK(base, th_base_lock);
30002b15cb3dSCy Schubert r = event_callback_activate_nolock_(base, evcb);
30012b15cb3dSCy Schubert EVBASE_RELEASE_LOCK(base, th_base_lock);
30022b15cb3dSCy Schubert return r;
30032b15cb3dSCy Schubert }
30042b15cb3dSCy Schubert
30052b15cb3dSCy Schubert int
event_callback_activate_nolock_(struct event_base * base,struct event_callback * evcb)30062b15cb3dSCy Schubert event_callback_activate_nolock_(struct event_base *base,
30072b15cb3dSCy Schubert struct event_callback *evcb)
30082b15cb3dSCy Schubert {
30092b15cb3dSCy Schubert int r = 1;
30102b15cb3dSCy Schubert
30112b15cb3dSCy Schubert if (evcb->evcb_flags & EVLIST_FINALIZING)
30122b15cb3dSCy Schubert return 0;
30132b15cb3dSCy Schubert
30142b15cb3dSCy Schubert switch (evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) {
30152b15cb3dSCy Schubert default:
30162b15cb3dSCy Schubert EVUTIL_ASSERT(0);
3017*a466cc55SCy Schubert EVUTIL_FALLTHROUGH;
30182b15cb3dSCy Schubert case EVLIST_ACTIVE_LATER:
30192b15cb3dSCy Schubert event_queue_remove_active_later(base, evcb);
30202b15cb3dSCy Schubert r = 0;
30212b15cb3dSCy Schubert break;
30222b15cb3dSCy Schubert case EVLIST_ACTIVE:
30232b15cb3dSCy Schubert return 0;
30242b15cb3dSCy Schubert case 0:
30252b15cb3dSCy Schubert break;
30262b15cb3dSCy Schubert }
30272b15cb3dSCy Schubert
30282b15cb3dSCy Schubert event_queue_insert_active(base, evcb);
30292b15cb3dSCy Schubert
30302b15cb3dSCy Schubert if (EVBASE_NEED_NOTIFY(base))
30312b15cb3dSCy Schubert evthread_notify_base(base);
30322b15cb3dSCy Schubert
30332b15cb3dSCy Schubert return r;
30342b15cb3dSCy Schubert }
30352b15cb3dSCy Schubert
3036*a466cc55SCy Schubert int
event_callback_activate_later_nolock_(struct event_base * base,struct event_callback * evcb)30372b15cb3dSCy Schubert event_callback_activate_later_nolock_(struct event_base *base,
30382b15cb3dSCy Schubert struct event_callback *evcb)
30392b15cb3dSCy Schubert {
30402b15cb3dSCy Schubert if (evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))
3041*a466cc55SCy Schubert return 0;
30422b15cb3dSCy Schubert
30432b15cb3dSCy Schubert event_queue_insert_active_later(base, evcb);
30442b15cb3dSCy Schubert if (EVBASE_NEED_NOTIFY(base))
30452b15cb3dSCy Schubert evthread_notify_base(base);
3046*a466cc55SCy Schubert return 1;
30472b15cb3dSCy Schubert }
30482b15cb3dSCy Schubert
30492b15cb3dSCy Schubert void
event_callback_init_(struct event_base * base,struct event_callback * cb)30502b15cb3dSCy Schubert event_callback_init_(struct event_base *base,
30512b15cb3dSCy Schubert struct event_callback *cb)
30522b15cb3dSCy Schubert {
30532b15cb3dSCy Schubert memset(cb, 0, sizeof(*cb));
30542b15cb3dSCy Schubert cb->evcb_pri = base->nactivequeues - 1;
30552b15cb3dSCy Schubert }
30562b15cb3dSCy Schubert
30572b15cb3dSCy Schubert int
event_callback_cancel_(struct event_base * base,struct event_callback * evcb)30582b15cb3dSCy Schubert event_callback_cancel_(struct event_base *base,
30592b15cb3dSCy Schubert struct event_callback *evcb)
30602b15cb3dSCy Schubert {
30612b15cb3dSCy Schubert int r;
30622b15cb3dSCy Schubert EVBASE_ACQUIRE_LOCK(base, th_base_lock);
30632b15cb3dSCy Schubert r = event_callback_cancel_nolock_(base, evcb, 0);
30642b15cb3dSCy Schubert EVBASE_RELEASE_LOCK(base, th_base_lock);
30652b15cb3dSCy Schubert return r;
30662b15cb3dSCy Schubert }
30672b15cb3dSCy Schubert
30682b15cb3dSCy Schubert int
event_callback_cancel_nolock_(struct event_base * base,struct event_callback * evcb,int even_if_finalizing)30692b15cb3dSCy Schubert event_callback_cancel_nolock_(struct event_base *base,
30702b15cb3dSCy Schubert struct event_callback *evcb, int even_if_finalizing)
30712b15cb3dSCy Schubert {
30722b15cb3dSCy Schubert if ((evcb->evcb_flags & EVLIST_FINALIZING) && !even_if_finalizing)
30732b15cb3dSCy Schubert return 0;
30742b15cb3dSCy Schubert
30752b15cb3dSCy Schubert if (evcb->evcb_flags & EVLIST_INIT)
30762b15cb3dSCy Schubert return event_del_nolock_(event_callback_to_event(evcb),
30772b15cb3dSCy Schubert even_if_finalizing ? EVENT_DEL_EVEN_IF_FINALIZING : EVENT_DEL_AUTOBLOCK);
30782b15cb3dSCy Schubert
30792b15cb3dSCy Schubert switch ((evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))) {
30802b15cb3dSCy Schubert default:
30812b15cb3dSCy Schubert case EVLIST_ACTIVE|EVLIST_ACTIVE_LATER:
30822b15cb3dSCy Schubert EVUTIL_ASSERT(0);
30832b15cb3dSCy Schubert break;
30842b15cb3dSCy Schubert case EVLIST_ACTIVE:
30852b15cb3dSCy Schubert /* We get different kinds of events, add them together */
30862b15cb3dSCy Schubert event_queue_remove_active(base, evcb);
30872b15cb3dSCy Schubert return 0;
30882b15cb3dSCy Schubert case EVLIST_ACTIVE_LATER:
30892b15cb3dSCy Schubert event_queue_remove_active_later(base, evcb);
30902b15cb3dSCy Schubert break;
30912b15cb3dSCy Schubert case 0:
30922b15cb3dSCy Schubert break;
30932b15cb3dSCy Schubert }
30942b15cb3dSCy Schubert
30952b15cb3dSCy Schubert return 0;
30962b15cb3dSCy Schubert }
30972b15cb3dSCy Schubert
30982b15cb3dSCy Schubert void
event_deferred_cb_init_(struct event_callback * cb,ev_uint8_t priority,deferred_cb_fn fn,void * arg)30992b15cb3dSCy Schubert event_deferred_cb_init_(struct event_callback *cb, ev_uint8_t priority, deferred_cb_fn fn, void *arg)
31002b15cb3dSCy Schubert {
31012b15cb3dSCy Schubert memset(cb, 0, sizeof(*cb));
31022b15cb3dSCy Schubert cb->evcb_cb_union.evcb_selfcb = fn;
31032b15cb3dSCy Schubert cb->evcb_arg = arg;
31042b15cb3dSCy Schubert cb->evcb_pri = priority;
31052b15cb3dSCy Schubert cb->evcb_closure = EV_CLOSURE_CB_SELF;
31062b15cb3dSCy Schubert }
31072b15cb3dSCy Schubert
31082b15cb3dSCy Schubert void
event_deferred_cb_set_priority_(struct event_callback * cb,ev_uint8_t priority)31092b15cb3dSCy Schubert event_deferred_cb_set_priority_(struct event_callback *cb, ev_uint8_t priority)
31102b15cb3dSCy Schubert {
31112b15cb3dSCy Schubert cb->evcb_pri = priority;
31122b15cb3dSCy Schubert }
31132b15cb3dSCy Schubert
31142b15cb3dSCy Schubert void
event_deferred_cb_cancel_(struct event_base * base,struct event_callback * cb)31152b15cb3dSCy Schubert event_deferred_cb_cancel_(struct event_base *base, struct event_callback *cb)
31162b15cb3dSCy Schubert {
31172b15cb3dSCy Schubert if (!base)
31182b15cb3dSCy Schubert base = current_base;
31192b15cb3dSCy Schubert event_callback_cancel_(base, cb);
31202b15cb3dSCy Schubert }
31212b15cb3dSCy Schubert
31222b15cb3dSCy Schubert #define MAX_DEFERREDS_QUEUED 32
31232b15cb3dSCy Schubert int
event_deferred_cb_schedule_(struct event_base * base,struct event_callback * cb)31242b15cb3dSCy Schubert event_deferred_cb_schedule_(struct event_base *base, struct event_callback *cb)
31252b15cb3dSCy Schubert {
31262b15cb3dSCy Schubert int r = 1;
31272b15cb3dSCy Schubert if (!base)
31282b15cb3dSCy Schubert base = current_base;
31292b15cb3dSCy Schubert EVBASE_ACQUIRE_LOCK(base, th_base_lock);
31302b15cb3dSCy Schubert if (base->n_deferreds_queued > MAX_DEFERREDS_QUEUED) {
3131*a466cc55SCy Schubert r = event_callback_activate_later_nolock_(base, cb);
31322b15cb3dSCy Schubert } else {
31332b15cb3dSCy Schubert r = event_callback_activate_nolock_(base, cb);
3134*a466cc55SCy Schubert if (r) {
3135*a466cc55SCy Schubert ++base->n_deferreds_queued;
3136*a466cc55SCy Schubert }
31372b15cb3dSCy Schubert }
31382b15cb3dSCy Schubert EVBASE_RELEASE_LOCK(base, th_base_lock);
31392b15cb3dSCy Schubert return r;
31402b15cb3dSCy Schubert }
31412b15cb3dSCy Schubert
31422b15cb3dSCy Schubert static int
timeout_next(struct event_base * base,struct timeval ** tv_p)31432b15cb3dSCy Schubert timeout_next(struct event_base *base, struct timeval **tv_p)
31442b15cb3dSCy Schubert {
31452b15cb3dSCy Schubert /* Caller must hold th_base_lock */
31462b15cb3dSCy Schubert struct timeval now;
31472b15cb3dSCy Schubert struct event *ev;
31482b15cb3dSCy Schubert struct timeval *tv = *tv_p;
31492b15cb3dSCy Schubert int res = 0;
31502b15cb3dSCy Schubert
31512b15cb3dSCy Schubert ev = min_heap_top_(&base->timeheap);
31522b15cb3dSCy Schubert
31532b15cb3dSCy Schubert if (ev == NULL) {
31542b15cb3dSCy Schubert /* if no time-based events are active wait for I/O */
31552b15cb3dSCy Schubert *tv_p = NULL;
31562b15cb3dSCy Schubert goto out;
31572b15cb3dSCy Schubert }
31582b15cb3dSCy Schubert
31592b15cb3dSCy Schubert if (gettime(base, &now) == -1) {
31602b15cb3dSCy Schubert res = -1;
31612b15cb3dSCy Schubert goto out;
31622b15cb3dSCy Schubert }
31632b15cb3dSCy Schubert
31642b15cb3dSCy Schubert if (evutil_timercmp(&ev->ev_timeout, &now, <=)) {
31652b15cb3dSCy Schubert evutil_timerclear(tv);
31662b15cb3dSCy Schubert goto out;
31672b15cb3dSCy Schubert }
31682b15cb3dSCy Schubert
31692b15cb3dSCy Schubert evutil_timersub(&ev->ev_timeout, &now, tv);
31702b15cb3dSCy Schubert
31712b15cb3dSCy Schubert EVUTIL_ASSERT(tv->tv_sec >= 0);
31722b15cb3dSCy Schubert EVUTIL_ASSERT(tv->tv_usec >= 0);
31732b15cb3dSCy Schubert event_debug(("timeout_next: event: %p, in %d seconds, %d useconds", ev, (int)tv->tv_sec, (int)tv->tv_usec));
31742b15cb3dSCy Schubert
31752b15cb3dSCy Schubert out:
31762b15cb3dSCy Schubert return (res);
31772b15cb3dSCy Schubert }
31782b15cb3dSCy Schubert
31792b15cb3dSCy Schubert /* Activate every event whose timeout has elapsed. */
31802b15cb3dSCy Schubert static void
timeout_process(struct event_base * base)31812b15cb3dSCy Schubert timeout_process(struct event_base *base)
31822b15cb3dSCy Schubert {
31832b15cb3dSCy Schubert /* Caller must hold lock. */
31842b15cb3dSCy Schubert struct timeval now;
31852b15cb3dSCy Schubert struct event *ev;
31862b15cb3dSCy Schubert
31872b15cb3dSCy Schubert if (min_heap_empty_(&base->timeheap)) {
31882b15cb3dSCy Schubert return;
31892b15cb3dSCy Schubert }
31902b15cb3dSCy Schubert
31912b15cb3dSCy Schubert gettime(base, &now);
31922b15cb3dSCy Schubert
31932b15cb3dSCy Schubert while ((ev = min_heap_top_(&base->timeheap))) {
31942b15cb3dSCy Schubert if (evutil_timercmp(&ev->ev_timeout, &now, >))
31952b15cb3dSCy Schubert break;
31962b15cb3dSCy Schubert
31972b15cb3dSCy Schubert /* delete this event from the I/O queues */
31982b15cb3dSCy Schubert event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
31992b15cb3dSCy Schubert
32002b15cb3dSCy Schubert event_debug(("timeout_process: event: %p, call %p",
32012b15cb3dSCy Schubert ev, ev->ev_callback));
32022b15cb3dSCy Schubert event_active_nolock_(ev, EV_TIMEOUT, 1);
32032b15cb3dSCy Schubert }
32042b15cb3dSCy Schubert }
32052b15cb3dSCy Schubert
32062b15cb3dSCy Schubert #ifndef MAX
32072b15cb3dSCy Schubert #define MAX(a,b) (((a)>(b))?(a):(b))
32082b15cb3dSCy Schubert #endif
32092b15cb3dSCy Schubert
32102b15cb3dSCy Schubert #define MAX_EVENT_COUNT(var, v) var = MAX(var, v)
32112b15cb3dSCy Schubert
32122b15cb3dSCy Schubert /* These are a fancy way to spell
3213*a466cc55SCy Schubert if (~flags & EVLIST_INTERNAL)
32142b15cb3dSCy Schubert base->event_count--/++;
32152b15cb3dSCy Schubert */
32162b15cb3dSCy Schubert #define DECR_EVENT_COUNT(base,flags) \
3217*a466cc55SCy Schubert ((base)->event_count -= !((flags) & EVLIST_INTERNAL))
32182b15cb3dSCy Schubert #define INCR_EVENT_COUNT(base,flags) do { \
3219*a466cc55SCy Schubert ((base)->event_count += !((flags) & EVLIST_INTERNAL)); \
32202b15cb3dSCy Schubert MAX_EVENT_COUNT((base)->event_count_max, (base)->event_count); \
32212b15cb3dSCy Schubert } while (0)
32222b15cb3dSCy Schubert
32232b15cb3dSCy Schubert static void
event_queue_remove_inserted(struct event_base * base,struct event * ev)32242b15cb3dSCy Schubert event_queue_remove_inserted(struct event_base *base, struct event *ev)
32252b15cb3dSCy Schubert {
32262b15cb3dSCy Schubert EVENT_BASE_ASSERT_LOCKED(base);
32272b15cb3dSCy Schubert if (EVUTIL_FAILURE_CHECK(!(ev->ev_flags & EVLIST_INSERTED))) {
32282b15cb3dSCy Schubert event_errx(1, "%s: %p(fd "EV_SOCK_FMT") not on queue %x", __func__,
32292b15cb3dSCy Schubert ev, EV_SOCK_ARG(ev->ev_fd), EVLIST_INSERTED);
32302b15cb3dSCy Schubert return;
32312b15cb3dSCy Schubert }
32322b15cb3dSCy Schubert DECR_EVENT_COUNT(base, ev->ev_flags);
32332b15cb3dSCy Schubert ev->ev_flags &= ~EVLIST_INSERTED;
32342b15cb3dSCy Schubert }
32352b15cb3dSCy Schubert static void
event_queue_remove_active(struct event_base * base,struct event_callback * evcb)32362b15cb3dSCy Schubert event_queue_remove_active(struct event_base *base, struct event_callback *evcb)
32372b15cb3dSCy Schubert {
32382b15cb3dSCy Schubert EVENT_BASE_ASSERT_LOCKED(base);
32392b15cb3dSCy Schubert if (EVUTIL_FAILURE_CHECK(!(evcb->evcb_flags & EVLIST_ACTIVE))) {
32402b15cb3dSCy Schubert event_errx(1, "%s: %p not on queue %x", __func__,
32412b15cb3dSCy Schubert evcb, EVLIST_ACTIVE);
32422b15cb3dSCy Schubert return;
32432b15cb3dSCy Schubert }
32442b15cb3dSCy Schubert DECR_EVENT_COUNT(base, evcb->evcb_flags);
32452b15cb3dSCy Schubert evcb->evcb_flags &= ~EVLIST_ACTIVE;
32462b15cb3dSCy Schubert base->event_count_active--;
32472b15cb3dSCy Schubert
32482b15cb3dSCy Schubert TAILQ_REMOVE(&base->activequeues[evcb->evcb_pri],
32492b15cb3dSCy Schubert evcb, evcb_active_next);
32502b15cb3dSCy Schubert }
32512b15cb3dSCy Schubert static void
event_queue_remove_active_later(struct event_base * base,struct event_callback * evcb)32522b15cb3dSCy Schubert event_queue_remove_active_later(struct event_base *base, struct event_callback *evcb)
32532b15cb3dSCy Schubert {
32542b15cb3dSCy Schubert EVENT_BASE_ASSERT_LOCKED(base);
32552b15cb3dSCy Schubert if (EVUTIL_FAILURE_CHECK(!(evcb->evcb_flags & EVLIST_ACTIVE_LATER))) {
32562b15cb3dSCy Schubert event_errx(1, "%s: %p not on queue %x", __func__,
32572b15cb3dSCy Schubert evcb, EVLIST_ACTIVE_LATER);
32582b15cb3dSCy Schubert return;
32592b15cb3dSCy Schubert }
32602b15cb3dSCy Schubert DECR_EVENT_COUNT(base, evcb->evcb_flags);
32612b15cb3dSCy Schubert evcb->evcb_flags &= ~EVLIST_ACTIVE_LATER;
32622b15cb3dSCy Schubert base->event_count_active--;
32632b15cb3dSCy Schubert
32642b15cb3dSCy Schubert TAILQ_REMOVE(&base->active_later_queue, evcb, evcb_active_next);
32652b15cb3dSCy Schubert }
32662b15cb3dSCy Schubert static void
event_queue_remove_timeout(struct event_base * base,struct event * ev)32672b15cb3dSCy Schubert event_queue_remove_timeout(struct event_base *base, struct event *ev)
32682b15cb3dSCy Schubert {
32692b15cb3dSCy Schubert EVENT_BASE_ASSERT_LOCKED(base);
32702b15cb3dSCy Schubert if (EVUTIL_FAILURE_CHECK(!(ev->ev_flags & EVLIST_TIMEOUT))) {
32712b15cb3dSCy Schubert event_errx(1, "%s: %p(fd "EV_SOCK_FMT") not on queue %x", __func__,
32722b15cb3dSCy Schubert ev, EV_SOCK_ARG(ev->ev_fd), EVLIST_TIMEOUT);
32732b15cb3dSCy Schubert return;
32742b15cb3dSCy Schubert }
32752b15cb3dSCy Schubert DECR_EVENT_COUNT(base, ev->ev_flags);
32762b15cb3dSCy Schubert ev->ev_flags &= ~EVLIST_TIMEOUT;
32772b15cb3dSCy Schubert
32782b15cb3dSCy Schubert if (is_common_timeout(&ev->ev_timeout, base)) {
32792b15cb3dSCy Schubert struct common_timeout_list *ctl =
32802b15cb3dSCy Schubert get_common_timeout_list(base, &ev->ev_timeout);
32812b15cb3dSCy Schubert TAILQ_REMOVE(&ctl->events, ev,
32822b15cb3dSCy Schubert ev_timeout_pos.ev_next_with_common_timeout);
32832b15cb3dSCy Schubert } else {
32842b15cb3dSCy Schubert min_heap_erase_(&base->timeheap, ev);
32852b15cb3dSCy Schubert }
32862b15cb3dSCy Schubert }
32872b15cb3dSCy Schubert
32882b15cb3dSCy Schubert #ifdef USE_REINSERT_TIMEOUT
32892b15cb3dSCy Schubert /* Remove and reinsert 'ev' into the timeout queue. */
32902b15cb3dSCy Schubert static void
event_queue_reinsert_timeout(struct event_base * base,struct event * ev,int was_common,int is_common,int old_timeout_idx)32912b15cb3dSCy Schubert event_queue_reinsert_timeout(struct event_base *base, struct event *ev,
32922b15cb3dSCy Schubert int was_common, int is_common, int old_timeout_idx)
32932b15cb3dSCy Schubert {
32942b15cb3dSCy Schubert struct common_timeout_list *ctl;
32952b15cb3dSCy Schubert if (!(ev->ev_flags & EVLIST_TIMEOUT)) {
32962b15cb3dSCy Schubert event_queue_insert_timeout(base, ev);
32972b15cb3dSCy Schubert return;
32982b15cb3dSCy Schubert }
32992b15cb3dSCy Schubert
33002b15cb3dSCy Schubert switch ((was_common<<1) | is_common) {
33012b15cb3dSCy Schubert case 3: /* Changing from one common timeout to another */
33022b15cb3dSCy Schubert ctl = base->common_timeout_queues[old_timeout_idx];
33032b15cb3dSCy Schubert TAILQ_REMOVE(&ctl->events, ev,
33042b15cb3dSCy Schubert ev_timeout_pos.ev_next_with_common_timeout);
33052b15cb3dSCy Schubert ctl = get_common_timeout_list(base, &ev->ev_timeout);
33062b15cb3dSCy Schubert insert_common_timeout_inorder(ctl, ev);
33072b15cb3dSCy Schubert break;
33082b15cb3dSCy Schubert case 2: /* Was common; is no longer common */
33092b15cb3dSCy Schubert ctl = base->common_timeout_queues[old_timeout_idx];
33102b15cb3dSCy Schubert TAILQ_REMOVE(&ctl->events, ev,
33112b15cb3dSCy Schubert ev_timeout_pos.ev_next_with_common_timeout);
33122b15cb3dSCy Schubert min_heap_push_(&base->timeheap, ev);
33132b15cb3dSCy Schubert break;
33142b15cb3dSCy Schubert case 1: /* Wasn't common; has become common. */
33152b15cb3dSCy Schubert min_heap_erase_(&base->timeheap, ev);
33162b15cb3dSCy Schubert ctl = get_common_timeout_list(base, &ev->ev_timeout);
33172b15cb3dSCy Schubert insert_common_timeout_inorder(ctl, ev);
33182b15cb3dSCy Schubert break;
33192b15cb3dSCy Schubert case 0: /* was in heap; is still on heap. */
33202b15cb3dSCy Schubert min_heap_adjust_(&base->timeheap, ev);
33212b15cb3dSCy Schubert break;
33222b15cb3dSCy Schubert default:
33232b15cb3dSCy Schubert EVUTIL_ASSERT(0); /* unreachable */
33242b15cb3dSCy Schubert break;
33252b15cb3dSCy Schubert }
33262b15cb3dSCy Schubert }
33272b15cb3dSCy Schubert #endif
33282b15cb3dSCy Schubert
33292b15cb3dSCy Schubert /* Add 'ev' to the common timeout list in 'ev'. */
33302b15cb3dSCy Schubert static void
insert_common_timeout_inorder(struct common_timeout_list * ctl,struct event * ev)33312b15cb3dSCy Schubert insert_common_timeout_inorder(struct common_timeout_list *ctl,
33322b15cb3dSCy Schubert struct event *ev)
33332b15cb3dSCy Schubert {
33342b15cb3dSCy Schubert struct event *e;
33352b15cb3dSCy Schubert /* By all logic, we should just be able to append 'ev' to the end of
33362b15cb3dSCy Schubert * ctl->events, since the timeout on each 'ev' is set to {the common
33372b15cb3dSCy Schubert * timeout} + {the time when we add the event}, and so the events
33382b15cb3dSCy Schubert * should arrive in order of their timeeouts. But just in case
33392b15cb3dSCy Schubert * there's some wacky threading issue going on, we do a search from
33402b15cb3dSCy Schubert * the end of 'ev' to find the right insertion point.
33412b15cb3dSCy Schubert */
33422b15cb3dSCy Schubert TAILQ_FOREACH_REVERSE(e, &ctl->events,
33432b15cb3dSCy Schubert event_list, ev_timeout_pos.ev_next_with_common_timeout) {
33442b15cb3dSCy Schubert /* This timercmp is a little sneaky, since both ev and e have
33452b15cb3dSCy Schubert * magic values in tv_usec. Fortunately, they ought to have
33462b15cb3dSCy Schubert * the _same_ magic values in tv_usec. Let's assert for that.
33472b15cb3dSCy Schubert */
33482b15cb3dSCy Schubert EVUTIL_ASSERT(
33492b15cb3dSCy Schubert is_same_common_timeout(&e->ev_timeout, &ev->ev_timeout));
33502b15cb3dSCy Schubert if (evutil_timercmp(&ev->ev_timeout, &e->ev_timeout, >=)) {
33512b15cb3dSCy Schubert TAILQ_INSERT_AFTER(&ctl->events, e, ev,
33522b15cb3dSCy Schubert ev_timeout_pos.ev_next_with_common_timeout);
33532b15cb3dSCy Schubert return;
33542b15cb3dSCy Schubert }
33552b15cb3dSCy Schubert }
33562b15cb3dSCy Schubert TAILQ_INSERT_HEAD(&ctl->events, ev,
33572b15cb3dSCy Schubert ev_timeout_pos.ev_next_with_common_timeout);
33582b15cb3dSCy Schubert }
33592b15cb3dSCy Schubert
33602b15cb3dSCy Schubert static void
event_queue_insert_inserted(struct event_base * base,struct event * ev)33612b15cb3dSCy Schubert event_queue_insert_inserted(struct event_base *base, struct event *ev)
33622b15cb3dSCy Schubert {
33632b15cb3dSCy Schubert EVENT_BASE_ASSERT_LOCKED(base);
33642b15cb3dSCy Schubert
33652b15cb3dSCy Schubert if (EVUTIL_FAILURE_CHECK(ev->ev_flags & EVLIST_INSERTED)) {
33662b15cb3dSCy Schubert event_errx(1, "%s: %p(fd "EV_SOCK_FMT") already inserted", __func__,
33672b15cb3dSCy Schubert ev, EV_SOCK_ARG(ev->ev_fd));
33682b15cb3dSCy Schubert return;
33692b15cb3dSCy Schubert }
33702b15cb3dSCy Schubert
33712b15cb3dSCy Schubert INCR_EVENT_COUNT(base, ev->ev_flags);
33722b15cb3dSCy Schubert
33732b15cb3dSCy Schubert ev->ev_flags |= EVLIST_INSERTED;
33742b15cb3dSCy Schubert }
33752b15cb3dSCy Schubert
33762b15cb3dSCy Schubert static void
event_queue_insert_active(struct event_base * base,struct event_callback * evcb)33772b15cb3dSCy Schubert event_queue_insert_active(struct event_base *base, struct event_callback *evcb)
33782b15cb3dSCy Schubert {
33792b15cb3dSCy Schubert EVENT_BASE_ASSERT_LOCKED(base);
33802b15cb3dSCy Schubert
33812b15cb3dSCy Schubert if (evcb->evcb_flags & EVLIST_ACTIVE) {
33822b15cb3dSCy Schubert /* Double insertion is possible for active events */
33832b15cb3dSCy Schubert return;
33842b15cb3dSCy Schubert }
33852b15cb3dSCy Schubert
33862b15cb3dSCy Schubert INCR_EVENT_COUNT(base, evcb->evcb_flags);
33872b15cb3dSCy Schubert
33882b15cb3dSCy Schubert evcb->evcb_flags |= EVLIST_ACTIVE;
33892b15cb3dSCy Schubert
33902b15cb3dSCy Schubert base->event_count_active++;
33912b15cb3dSCy Schubert MAX_EVENT_COUNT(base->event_count_active_max, base->event_count_active);
33922b15cb3dSCy Schubert EVUTIL_ASSERT(evcb->evcb_pri < base->nactivequeues);
33932b15cb3dSCy Schubert TAILQ_INSERT_TAIL(&base->activequeues[evcb->evcb_pri],
33942b15cb3dSCy Schubert evcb, evcb_active_next);
33952b15cb3dSCy Schubert }
33962b15cb3dSCy Schubert
33972b15cb3dSCy Schubert static void
event_queue_insert_active_later(struct event_base * base,struct event_callback * evcb)33982b15cb3dSCy Schubert event_queue_insert_active_later(struct event_base *base, struct event_callback *evcb)
33992b15cb3dSCy Schubert {
34002b15cb3dSCy Schubert EVENT_BASE_ASSERT_LOCKED(base);
34012b15cb3dSCy Schubert if (evcb->evcb_flags & (EVLIST_ACTIVE_LATER|EVLIST_ACTIVE)) {
34022b15cb3dSCy Schubert /* Double insertion is possible */
34032b15cb3dSCy Schubert return;
34042b15cb3dSCy Schubert }
34052b15cb3dSCy Schubert
34062b15cb3dSCy Schubert INCR_EVENT_COUNT(base, evcb->evcb_flags);
34072b15cb3dSCy Schubert evcb->evcb_flags |= EVLIST_ACTIVE_LATER;
34082b15cb3dSCy Schubert base->event_count_active++;
34092b15cb3dSCy Schubert MAX_EVENT_COUNT(base->event_count_active_max, base->event_count_active);
34102b15cb3dSCy Schubert EVUTIL_ASSERT(evcb->evcb_pri < base->nactivequeues);
34112b15cb3dSCy Schubert TAILQ_INSERT_TAIL(&base->active_later_queue, evcb, evcb_active_next);
34122b15cb3dSCy Schubert }
34132b15cb3dSCy Schubert
34142b15cb3dSCy Schubert static void
event_queue_insert_timeout(struct event_base * base,struct event * ev)34152b15cb3dSCy Schubert event_queue_insert_timeout(struct event_base *base, struct event *ev)
34162b15cb3dSCy Schubert {
34172b15cb3dSCy Schubert EVENT_BASE_ASSERT_LOCKED(base);
34182b15cb3dSCy Schubert
34192b15cb3dSCy Schubert if (EVUTIL_FAILURE_CHECK(ev->ev_flags & EVLIST_TIMEOUT)) {
34202b15cb3dSCy Schubert event_errx(1, "%s: %p(fd "EV_SOCK_FMT") already on timeout", __func__,
34212b15cb3dSCy Schubert ev, EV_SOCK_ARG(ev->ev_fd));
34222b15cb3dSCy Schubert return;
34232b15cb3dSCy Schubert }
34242b15cb3dSCy Schubert
34252b15cb3dSCy Schubert INCR_EVENT_COUNT(base, ev->ev_flags);
34262b15cb3dSCy Schubert
34272b15cb3dSCy Schubert ev->ev_flags |= EVLIST_TIMEOUT;
34282b15cb3dSCy Schubert
34292b15cb3dSCy Schubert if (is_common_timeout(&ev->ev_timeout, base)) {
34302b15cb3dSCy Schubert struct common_timeout_list *ctl =
34312b15cb3dSCy Schubert get_common_timeout_list(base, &ev->ev_timeout);
34322b15cb3dSCy Schubert insert_common_timeout_inorder(ctl, ev);
34332b15cb3dSCy Schubert } else {
34342b15cb3dSCy Schubert min_heap_push_(&base->timeheap, ev);
34352b15cb3dSCy Schubert }
34362b15cb3dSCy Schubert }
34372b15cb3dSCy Schubert
34382b15cb3dSCy Schubert static void
event_queue_make_later_events_active(struct event_base * base)34392b15cb3dSCy Schubert event_queue_make_later_events_active(struct event_base *base)
34402b15cb3dSCy Schubert {
34412b15cb3dSCy Schubert struct event_callback *evcb;
34422b15cb3dSCy Schubert EVENT_BASE_ASSERT_LOCKED(base);
34432b15cb3dSCy Schubert
34442b15cb3dSCy Schubert while ((evcb = TAILQ_FIRST(&base->active_later_queue))) {
34452b15cb3dSCy Schubert TAILQ_REMOVE(&base->active_later_queue, evcb, evcb_active_next);
34462b15cb3dSCy Schubert evcb->evcb_flags = (evcb->evcb_flags & ~EVLIST_ACTIVE_LATER) | EVLIST_ACTIVE;
34472b15cb3dSCy Schubert EVUTIL_ASSERT(evcb->evcb_pri < base->nactivequeues);
34482b15cb3dSCy Schubert TAILQ_INSERT_TAIL(&base->activequeues[evcb->evcb_pri], evcb, evcb_active_next);
34492b15cb3dSCy Schubert base->n_deferreds_queued += (evcb->evcb_closure == EV_CLOSURE_CB_SELF);
34502b15cb3dSCy Schubert }
34512b15cb3dSCy Schubert }
34522b15cb3dSCy Schubert
34532b15cb3dSCy Schubert /* Functions for debugging */
34542b15cb3dSCy Schubert
34552b15cb3dSCy Schubert const char *
event_get_version(void)34562b15cb3dSCy Schubert event_get_version(void)
34572b15cb3dSCy Schubert {
34582b15cb3dSCy Schubert return (EVENT__VERSION);
34592b15cb3dSCy Schubert }
34602b15cb3dSCy Schubert
34612b15cb3dSCy Schubert ev_uint32_t
event_get_version_number(void)34622b15cb3dSCy Schubert event_get_version_number(void)
34632b15cb3dSCy Schubert {
34642b15cb3dSCy Schubert return (EVENT__NUMERIC_VERSION);
34652b15cb3dSCy Schubert }
34662b15cb3dSCy Schubert
34672b15cb3dSCy Schubert /*
34682b15cb3dSCy Schubert * No thread-safe interface needed - the information should be the same
34692b15cb3dSCy Schubert * for all threads.
34702b15cb3dSCy Schubert */
34712b15cb3dSCy Schubert
34722b15cb3dSCy Schubert const char *
event_get_method(void)34732b15cb3dSCy Schubert event_get_method(void)
34742b15cb3dSCy Schubert {
34752b15cb3dSCy Schubert return (current_base->evsel->name);
34762b15cb3dSCy Schubert }
34772b15cb3dSCy Schubert
34782b15cb3dSCy Schubert #ifndef EVENT__DISABLE_MM_REPLACEMENT
34792b15cb3dSCy Schubert static void *(*mm_malloc_fn_)(size_t sz) = NULL;
34802b15cb3dSCy Schubert static void *(*mm_realloc_fn_)(void *p, size_t sz) = NULL;
34812b15cb3dSCy Schubert static void (*mm_free_fn_)(void *p) = NULL;
34822b15cb3dSCy Schubert
34832b15cb3dSCy Schubert void *
event_mm_malloc_(size_t sz)34842b15cb3dSCy Schubert event_mm_malloc_(size_t sz)
34852b15cb3dSCy Schubert {
34862b15cb3dSCy Schubert if (sz == 0)
34872b15cb3dSCy Schubert return NULL;
34882b15cb3dSCy Schubert
34892b15cb3dSCy Schubert if (mm_malloc_fn_)
34902b15cb3dSCy Schubert return mm_malloc_fn_(sz);
34912b15cb3dSCy Schubert else
34922b15cb3dSCy Schubert return malloc(sz);
34932b15cb3dSCy Schubert }
34942b15cb3dSCy Schubert
34952b15cb3dSCy Schubert void *
event_mm_calloc_(size_t count,size_t size)34962b15cb3dSCy Schubert event_mm_calloc_(size_t count, size_t size)
34972b15cb3dSCy Schubert {
34982b15cb3dSCy Schubert if (count == 0 || size == 0)
34992b15cb3dSCy Schubert return NULL;
35002b15cb3dSCy Schubert
35012b15cb3dSCy Schubert if (mm_malloc_fn_) {
35022b15cb3dSCy Schubert size_t sz = count * size;
35032b15cb3dSCy Schubert void *p = NULL;
35042b15cb3dSCy Schubert if (count > EV_SIZE_MAX / size)
35052b15cb3dSCy Schubert goto error;
35062b15cb3dSCy Schubert p = mm_malloc_fn_(sz);
35072b15cb3dSCy Schubert if (p)
35082b15cb3dSCy Schubert return memset(p, 0, sz);
35092b15cb3dSCy Schubert } else {
35102b15cb3dSCy Schubert void *p = calloc(count, size);
35112b15cb3dSCy Schubert #ifdef _WIN32
35122b15cb3dSCy Schubert /* Windows calloc doesn't reliably set ENOMEM */
35132b15cb3dSCy Schubert if (p == NULL)
35142b15cb3dSCy Schubert goto error;
35152b15cb3dSCy Schubert #endif
35162b15cb3dSCy Schubert return p;
35172b15cb3dSCy Schubert }
35182b15cb3dSCy Schubert
35192b15cb3dSCy Schubert error:
35202b15cb3dSCy Schubert errno = ENOMEM;
35212b15cb3dSCy Schubert return NULL;
35222b15cb3dSCy Schubert }
35232b15cb3dSCy Schubert
35242b15cb3dSCy Schubert char *
event_mm_strdup_(const char * str)35252b15cb3dSCy Schubert event_mm_strdup_(const char *str)
35262b15cb3dSCy Schubert {
35272b15cb3dSCy Schubert if (!str) {
35282b15cb3dSCy Schubert errno = EINVAL;
35292b15cb3dSCy Schubert return NULL;
35302b15cb3dSCy Schubert }
35312b15cb3dSCy Schubert
35322b15cb3dSCy Schubert if (mm_malloc_fn_) {
35332b15cb3dSCy Schubert size_t ln = strlen(str);
35342b15cb3dSCy Schubert void *p = NULL;
35352b15cb3dSCy Schubert if (ln == EV_SIZE_MAX)
35362b15cb3dSCy Schubert goto error;
35372b15cb3dSCy Schubert p = mm_malloc_fn_(ln+1);
35382b15cb3dSCy Schubert if (p)
35392b15cb3dSCy Schubert return memcpy(p, str, ln+1);
35402b15cb3dSCy Schubert } else
35412b15cb3dSCy Schubert #ifdef _WIN32
35422b15cb3dSCy Schubert return _strdup(str);
35432b15cb3dSCy Schubert #else
35442b15cb3dSCy Schubert return strdup(str);
35452b15cb3dSCy Schubert #endif
35462b15cb3dSCy Schubert
35472b15cb3dSCy Schubert error:
35482b15cb3dSCy Schubert errno = ENOMEM;
35492b15cb3dSCy Schubert return NULL;
35502b15cb3dSCy Schubert }
35512b15cb3dSCy Schubert
35522b15cb3dSCy Schubert void *
event_mm_realloc_(void * ptr,size_t sz)35532b15cb3dSCy Schubert event_mm_realloc_(void *ptr, size_t sz)
35542b15cb3dSCy Schubert {
35552b15cb3dSCy Schubert if (mm_realloc_fn_)
35562b15cb3dSCy Schubert return mm_realloc_fn_(ptr, sz);
35572b15cb3dSCy Schubert else
35582b15cb3dSCy Schubert return realloc(ptr, sz);
35592b15cb3dSCy Schubert }
35602b15cb3dSCy Schubert
35612b15cb3dSCy Schubert void
event_mm_free_(void * ptr)35622b15cb3dSCy Schubert event_mm_free_(void *ptr)
35632b15cb3dSCy Schubert {
35642b15cb3dSCy Schubert if (mm_free_fn_)
35652b15cb3dSCy Schubert mm_free_fn_(ptr);
35662b15cb3dSCy Schubert else
35672b15cb3dSCy Schubert free(ptr);
35682b15cb3dSCy Schubert }
35692b15cb3dSCy Schubert
35702b15cb3dSCy Schubert void
event_set_mem_functions(void * (* malloc_fn)(size_t sz),void * (* realloc_fn)(void * ptr,size_t sz),void (* free_fn)(void * ptr))35712b15cb3dSCy Schubert event_set_mem_functions(void *(*malloc_fn)(size_t sz),
35722b15cb3dSCy Schubert void *(*realloc_fn)(void *ptr, size_t sz),
35732b15cb3dSCy Schubert void (*free_fn)(void *ptr))
35742b15cb3dSCy Schubert {
35752b15cb3dSCy Schubert mm_malloc_fn_ = malloc_fn;
35762b15cb3dSCy Schubert mm_realloc_fn_ = realloc_fn;
35772b15cb3dSCy Schubert mm_free_fn_ = free_fn;
35782b15cb3dSCy Schubert }
35792b15cb3dSCy Schubert #endif
35802b15cb3dSCy Schubert
35812b15cb3dSCy Schubert #ifdef EVENT__HAVE_EVENTFD
35822b15cb3dSCy Schubert static void
evthread_notify_drain_eventfd(evutil_socket_t fd,short what,void * arg)35832b15cb3dSCy Schubert evthread_notify_drain_eventfd(evutil_socket_t fd, short what, void *arg)
35842b15cb3dSCy Schubert {
35852b15cb3dSCy Schubert ev_uint64_t msg;
35862b15cb3dSCy Schubert ev_ssize_t r;
35872b15cb3dSCy Schubert struct event_base *base = arg;
35882b15cb3dSCy Schubert
35892b15cb3dSCy Schubert r = read(fd, (void*) &msg, sizeof(msg));
35902b15cb3dSCy Schubert if (r<0 && errno != EAGAIN) {
35912b15cb3dSCy Schubert event_sock_warn(fd, "Error reading from eventfd");
35922b15cb3dSCy Schubert }
35932b15cb3dSCy Schubert EVBASE_ACQUIRE_LOCK(base, th_base_lock);
35942b15cb3dSCy Schubert base->is_notify_pending = 0;
35952b15cb3dSCy Schubert EVBASE_RELEASE_LOCK(base, th_base_lock);
35962b15cb3dSCy Schubert }
35972b15cb3dSCy Schubert #endif
35982b15cb3dSCy Schubert
35992b15cb3dSCy Schubert static void
evthread_notify_drain_default(evutil_socket_t fd,short what,void * arg)36002b15cb3dSCy Schubert evthread_notify_drain_default(evutil_socket_t fd, short what, void *arg)
36012b15cb3dSCy Schubert {
36022b15cb3dSCy Schubert unsigned char buf[1024];
36032b15cb3dSCy Schubert struct event_base *base = arg;
36042b15cb3dSCy Schubert #ifdef _WIN32
36052b15cb3dSCy Schubert while (recv(fd, (char*)buf, sizeof(buf), 0) > 0)
36062b15cb3dSCy Schubert ;
36072b15cb3dSCy Schubert #else
36082b15cb3dSCy Schubert while (read(fd, (char*)buf, sizeof(buf)) > 0)
36092b15cb3dSCy Schubert ;
36102b15cb3dSCy Schubert #endif
36112b15cb3dSCy Schubert
36122b15cb3dSCy Schubert EVBASE_ACQUIRE_LOCK(base, th_base_lock);
36132b15cb3dSCy Schubert base->is_notify_pending = 0;
36142b15cb3dSCy Schubert EVBASE_RELEASE_LOCK(base, th_base_lock);
36152b15cb3dSCy Schubert }
36162b15cb3dSCy Schubert
36172b15cb3dSCy Schubert int
evthread_make_base_notifiable(struct event_base * base)36182b15cb3dSCy Schubert evthread_make_base_notifiable(struct event_base *base)
36192b15cb3dSCy Schubert {
36202b15cb3dSCy Schubert int r;
36212b15cb3dSCy Schubert if (!base)
36222b15cb3dSCy Schubert return -1;
36232b15cb3dSCy Schubert
36242b15cb3dSCy Schubert EVBASE_ACQUIRE_LOCK(base, th_base_lock);
36252b15cb3dSCy Schubert r = evthread_make_base_notifiable_nolock_(base);
36262b15cb3dSCy Schubert EVBASE_RELEASE_LOCK(base, th_base_lock);
36272b15cb3dSCy Schubert return r;
36282b15cb3dSCy Schubert }
36292b15cb3dSCy Schubert
36302b15cb3dSCy Schubert static int
evthread_make_base_notifiable_nolock_(struct event_base * base)36312b15cb3dSCy Schubert evthread_make_base_notifiable_nolock_(struct event_base *base)
36322b15cb3dSCy Schubert {
36332b15cb3dSCy Schubert void (*cb)(evutil_socket_t, short, void *);
36342b15cb3dSCy Schubert int (*notify)(struct event_base *);
36352b15cb3dSCy Schubert
36362b15cb3dSCy Schubert if (base->th_notify_fn != NULL) {
36372b15cb3dSCy Schubert /* The base is already notifiable: we're doing fine. */
36382b15cb3dSCy Schubert return 0;
36392b15cb3dSCy Schubert }
36402b15cb3dSCy Schubert
36412b15cb3dSCy Schubert #if defined(EVENT__HAVE_WORKING_KQUEUE)
36422b15cb3dSCy Schubert if (base->evsel == &kqops && event_kq_add_notify_event_(base) == 0) {
36432b15cb3dSCy Schubert base->th_notify_fn = event_kq_notify_base_;
36442b15cb3dSCy Schubert /* No need to add an event here; the backend can wake
36452b15cb3dSCy Schubert * itself up just fine. */
36462b15cb3dSCy Schubert return 0;
36472b15cb3dSCy Schubert }
36482b15cb3dSCy Schubert #endif
36492b15cb3dSCy Schubert
36502b15cb3dSCy Schubert #ifdef EVENT__HAVE_EVENTFD
36512b15cb3dSCy Schubert base->th_notify_fd[0] = evutil_eventfd_(0,
36522b15cb3dSCy Schubert EVUTIL_EFD_CLOEXEC|EVUTIL_EFD_NONBLOCK);
36532b15cb3dSCy Schubert if (base->th_notify_fd[0] >= 0) {
36542b15cb3dSCy Schubert base->th_notify_fd[1] = -1;
36552b15cb3dSCy Schubert notify = evthread_notify_base_eventfd;
36562b15cb3dSCy Schubert cb = evthread_notify_drain_eventfd;
36572b15cb3dSCy Schubert } else
36582b15cb3dSCy Schubert #endif
36592b15cb3dSCy Schubert if (evutil_make_internal_pipe_(base->th_notify_fd) == 0) {
36602b15cb3dSCy Schubert notify = evthread_notify_base_default;
36612b15cb3dSCy Schubert cb = evthread_notify_drain_default;
36622b15cb3dSCy Schubert } else {
36632b15cb3dSCy Schubert return -1;
36642b15cb3dSCy Schubert }
36652b15cb3dSCy Schubert
36662b15cb3dSCy Schubert base->th_notify_fn = notify;
36672b15cb3dSCy Schubert
36682b15cb3dSCy Schubert /* prepare an event that we can use for wakeup */
36692b15cb3dSCy Schubert event_assign(&base->th_notify, base, base->th_notify_fd[0],
36702b15cb3dSCy Schubert EV_READ|EV_PERSIST, cb, base);
36712b15cb3dSCy Schubert
36722b15cb3dSCy Schubert /* we need to mark this as internal event */
36732b15cb3dSCy Schubert base->th_notify.ev_flags |= EVLIST_INTERNAL;
36742b15cb3dSCy Schubert event_priority_set(&base->th_notify, 0);
36752b15cb3dSCy Schubert
36762b15cb3dSCy Schubert return event_add_nolock_(&base->th_notify, NULL, 0);
36772b15cb3dSCy Schubert }
36782b15cb3dSCy Schubert
36792b15cb3dSCy Schubert int
event_base_foreach_event_nolock_(struct event_base * base,event_base_foreach_event_cb fn,void * arg)36802b15cb3dSCy Schubert event_base_foreach_event_nolock_(struct event_base *base,
36812b15cb3dSCy Schubert event_base_foreach_event_cb fn, void *arg)
36822b15cb3dSCy Schubert {
36832b15cb3dSCy Schubert int r, i;
36842b15cb3dSCy Schubert unsigned u;
36852b15cb3dSCy Schubert struct event *ev;
36862b15cb3dSCy Schubert
36872b15cb3dSCy Schubert /* Start out with all the EVLIST_INSERTED events. */
36882b15cb3dSCy Schubert if ((r = evmap_foreach_event_(base, fn, arg)))
36892b15cb3dSCy Schubert return r;
36902b15cb3dSCy Schubert
36912b15cb3dSCy Schubert /* Okay, now we deal with those events that have timeouts and are in
36922b15cb3dSCy Schubert * the min-heap. */
36932b15cb3dSCy Schubert for (u = 0; u < base->timeheap.n; ++u) {
36942b15cb3dSCy Schubert ev = base->timeheap.p[u];
36952b15cb3dSCy Schubert if (ev->ev_flags & EVLIST_INSERTED) {
36962b15cb3dSCy Schubert /* we already processed this one */
36972b15cb3dSCy Schubert continue;
36982b15cb3dSCy Schubert }
36992b15cb3dSCy Schubert if ((r = fn(base, ev, arg)))
37002b15cb3dSCy Schubert return r;
37012b15cb3dSCy Schubert }
37022b15cb3dSCy Schubert
37032b15cb3dSCy Schubert /* Now for the events in one of the timeout queues.
37042b15cb3dSCy Schubert * the min-heap. */
37052b15cb3dSCy Schubert for (i = 0; i < base->n_common_timeouts; ++i) {
37062b15cb3dSCy Schubert struct common_timeout_list *ctl =
37072b15cb3dSCy Schubert base->common_timeout_queues[i];
37082b15cb3dSCy Schubert TAILQ_FOREACH(ev, &ctl->events,
37092b15cb3dSCy Schubert ev_timeout_pos.ev_next_with_common_timeout) {
37102b15cb3dSCy Schubert if (ev->ev_flags & EVLIST_INSERTED) {
37112b15cb3dSCy Schubert /* we already processed this one */
37122b15cb3dSCy Schubert continue;
37132b15cb3dSCy Schubert }
37142b15cb3dSCy Schubert if ((r = fn(base, ev, arg)))
37152b15cb3dSCy Schubert return r;
37162b15cb3dSCy Schubert }
37172b15cb3dSCy Schubert }
37182b15cb3dSCy Schubert
37192b15cb3dSCy Schubert /* Finally, we deal wit all the active events that we haven't touched
37202b15cb3dSCy Schubert * yet. */
37212b15cb3dSCy Schubert for (i = 0; i < base->nactivequeues; ++i) {
37222b15cb3dSCy Schubert struct event_callback *evcb;
37232b15cb3dSCy Schubert TAILQ_FOREACH(evcb, &base->activequeues[i], evcb_active_next) {
37242b15cb3dSCy Schubert if ((evcb->evcb_flags & (EVLIST_INIT|EVLIST_INSERTED|EVLIST_TIMEOUT)) != EVLIST_INIT) {
37252b15cb3dSCy Schubert /* This isn't an event (evlist_init clear), or
37262b15cb3dSCy Schubert * we already processed it. (inserted or
37272b15cb3dSCy Schubert * timeout set */
37282b15cb3dSCy Schubert continue;
37292b15cb3dSCy Schubert }
37302b15cb3dSCy Schubert ev = event_callback_to_event(evcb);
37312b15cb3dSCy Schubert if ((r = fn(base, ev, arg)))
37322b15cb3dSCy Schubert return r;
37332b15cb3dSCy Schubert }
37342b15cb3dSCy Schubert }
37352b15cb3dSCy Schubert
37362b15cb3dSCy Schubert return 0;
37372b15cb3dSCy Schubert }
37382b15cb3dSCy Schubert
37392b15cb3dSCy Schubert /* Helper for event_base_dump_events: called on each event in the event base;
37402b15cb3dSCy Schubert * dumps only the inserted events. */
37412b15cb3dSCy Schubert static int
dump_inserted_event_fn(const struct event_base * base,const struct event * e,void * arg)37422b15cb3dSCy Schubert dump_inserted_event_fn(const struct event_base *base, const struct event *e, void *arg)
37432b15cb3dSCy Schubert {
37442b15cb3dSCy Schubert FILE *output = arg;
37452b15cb3dSCy Schubert const char *gloss = (e->ev_events & EV_SIGNAL) ?
37462b15cb3dSCy Schubert "sig" : "fd ";
37472b15cb3dSCy Schubert
37482b15cb3dSCy Schubert if (! (e->ev_flags & (EVLIST_INSERTED|EVLIST_TIMEOUT)))
37492b15cb3dSCy Schubert return 0;
37502b15cb3dSCy Schubert
3751*a466cc55SCy Schubert fprintf(output, " %p [%s "EV_SOCK_FMT"]%s%s%s%s%s%s%s",
37522b15cb3dSCy Schubert (void*)e, gloss, EV_SOCK_ARG(e->ev_fd),
37532b15cb3dSCy Schubert (e->ev_events&EV_READ)?" Read":"",
37542b15cb3dSCy Schubert (e->ev_events&EV_WRITE)?" Write":"",
37552b15cb3dSCy Schubert (e->ev_events&EV_CLOSED)?" EOF":"",
37562b15cb3dSCy Schubert (e->ev_events&EV_SIGNAL)?" Signal":"",
37572b15cb3dSCy Schubert (e->ev_events&EV_PERSIST)?" Persist":"",
3758*a466cc55SCy Schubert (e->ev_events&EV_ET)?" ET":"",
37592b15cb3dSCy Schubert (e->ev_flags&EVLIST_INTERNAL)?" Internal":"");
37602b15cb3dSCy Schubert if (e->ev_flags & EVLIST_TIMEOUT) {
37612b15cb3dSCy Schubert struct timeval tv;
37622b15cb3dSCy Schubert tv.tv_sec = e->ev_timeout.tv_sec;
37632b15cb3dSCy Schubert tv.tv_usec = e->ev_timeout.tv_usec & MICROSECONDS_MASK;
37642b15cb3dSCy Schubert evutil_timeradd(&tv, &base->tv_clock_diff, &tv);
37652b15cb3dSCy Schubert fprintf(output, " Timeout=%ld.%06d",
37662b15cb3dSCy Schubert (long)tv.tv_sec, (int)(tv.tv_usec & MICROSECONDS_MASK));
37672b15cb3dSCy Schubert }
37682b15cb3dSCy Schubert fputc('\n', output);
37692b15cb3dSCy Schubert
37702b15cb3dSCy Schubert return 0;
37712b15cb3dSCy Schubert }
37722b15cb3dSCy Schubert
37732b15cb3dSCy Schubert /* Helper for event_base_dump_events: called on each event in the event base;
37742b15cb3dSCy Schubert * dumps only the active events. */
37752b15cb3dSCy Schubert static int
dump_active_event_fn(const struct event_base * base,const struct event * e,void * arg)37762b15cb3dSCy Schubert dump_active_event_fn(const struct event_base *base, const struct event *e, void *arg)
37772b15cb3dSCy Schubert {
37782b15cb3dSCy Schubert FILE *output = arg;
37792b15cb3dSCy Schubert const char *gloss = (e->ev_events & EV_SIGNAL) ?
37802b15cb3dSCy Schubert "sig" : "fd ";
37812b15cb3dSCy Schubert
37822b15cb3dSCy Schubert if (! (e->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)))
37832b15cb3dSCy Schubert return 0;
37842b15cb3dSCy Schubert
37852b15cb3dSCy Schubert fprintf(output, " %p [%s "EV_SOCK_FMT", priority=%d]%s%s%s%s%s active%s%s\n",
37862b15cb3dSCy Schubert (void*)e, gloss, EV_SOCK_ARG(e->ev_fd), e->ev_pri,
37872b15cb3dSCy Schubert (e->ev_res&EV_READ)?" Read":"",
37882b15cb3dSCy Schubert (e->ev_res&EV_WRITE)?" Write":"",
37892b15cb3dSCy Schubert (e->ev_res&EV_CLOSED)?" EOF":"",
37902b15cb3dSCy Schubert (e->ev_res&EV_SIGNAL)?" Signal":"",
37912b15cb3dSCy Schubert (e->ev_res&EV_TIMEOUT)?" Timeout":"",
37922b15cb3dSCy Schubert (e->ev_flags&EVLIST_INTERNAL)?" [Internal]":"",
37932b15cb3dSCy Schubert (e->ev_flags&EVLIST_ACTIVE_LATER)?" [NextTime]":"");
37942b15cb3dSCy Schubert
37952b15cb3dSCy Schubert return 0;
37962b15cb3dSCy Schubert }
37972b15cb3dSCy Schubert
37982b15cb3dSCy Schubert int
event_base_foreach_event(struct event_base * base,event_base_foreach_event_cb fn,void * arg)37992b15cb3dSCy Schubert event_base_foreach_event(struct event_base *base,
38002b15cb3dSCy Schubert event_base_foreach_event_cb fn, void *arg)
38012b15cb3dSCy Schubert {
38022b15cb3dSCy Schubert int r;
38032b15cb3dSCy Schubert if ((!fn) || (!base)) {
38042b15cb3dSCy Schubert return -1;
38052b15cb3dSCy Schubert }
38062b15cb3dSCy Schubert EVBASE_ACQUIRE_LOCK(base, th_base_lock);
38072b15cb3dSCy Schubert r = event_base_foreach_event_nolock_(base, fn, arg);
38082b15cb3dSCy Schubert EVBASE_RELEASE_LOCK(base, th_base_lock);
38092b15cb3dSCy Schubert return r;
38102b15cb3dSCy Schubert }
38112b15cb3dSCy Schubert
38122b15cb3dSCy Schubert
38132b15cb3dSCy Schubert void
event_base_dump_events(struct event_base * base,FILE * output)38142b15cb3dSCy Schubert event_base_dump_events(struct event_base *base, FILE *output)
38152b15cb3dSCy Schubert {
38162b15cb3dSCy Schubert EVBASE_ACQUIRE_LOCK(base, th_base_lock);
38172b15cb3dSCy Schubert fprintf(output, "Inserted events:\n");
38182b15cb3dSCy Schubert event_base_foreach_event_nolock_(base, dump_inserted_event_fn, output);
38192b15cb3dSCy Schubert
38202b15cb3dSCy Schubert fprintf(output, "Active events:\n");
38212b15cb3dSCy Schubert event_base_foreach_event_nolock_(base, dump_active_event_fn, output);
38222b15cb3dSCy Schubert EVBASE_RELEASE_LOCK(base, th_base_lock);
38232b15cb3dSCy Schubert }
38242b15cb3dSCy Schubert
38252b15cb3dSCy Schubert void
event_base_active_by_fd(struct event_base * base,evutil_socket_t fd,short events)38262b15cb3dSCy Schubert event_base_active_by_fd(struct event_base *base, evutil_socket_t fd, short events)
38272b15cb3dSCy Schubert {
38282b15cb3dSCy Schubert EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3829*a466cc55SCy Schubert
3830*a466cc55SCy Schubert /* Activate any non timer events */
3831*a466cc55SCy Schubert if (!(events & EV_TIMEOUT)) {
38322b15cb3dSCy Schubert evmap_io_active_(base, fd, events & (EV_READ|EV_WRITE|EV_CLOSED));
3833*a466cc55SCy Schubert } else {
3834*a466cc55SCy Schubert /* If we want to activate timer events, loop and activate each event with
3835*a466cc55SCy Schubert * the same fd in both the timeheap and common timeouts list */
3836*a466cc55SCy Schubert int i;
3837*a466cc55SCy Schubert unsigned u;
3838*a466cc55SCy Schubert struct event *ev;
3839*a466cc55SCy Schubert
3840*a466cc55SCy Schubert for (u = 0; u < base->timeheap.n; ++u) {
3841*a466cc55SCy Schubert ev = base->timeheap.p[u];
3842*a466cc55SCy Schubert if (ev->ev_fd == fd) {
3843*a466cc55SCy Schubert event_active_nolock_(ev, EV_TIMEOUT, 1);
3844*a466cc55SCy Schubert }
3845*a466cc55SCy Schubert }
3846*a466cc55SCy Schubert
3847*a466cc55SCy Schubert for (i = 0; i < base->n_common_timeouts; ++i) {
3848*a466cc55SCy Schubert struct common_timeout_list *ctl = base->common_timeout_queues[i];
3849*a466cc55SCy Schubert TAILQ_FOREACH(ev, &ctl->events,
3850*a466cc55SCy Schubert ev_timeout_pos.ev_next_with_common_timeout) {
3851*a466cc55SCy Schubert if (ev->ev_fd == fd) {
3852*a466cc55SCy Schubert event_active_nolock_(ev, EV_TIMEOUT, 1);
3853*a466cc55SCy Schubert }
3854*a466cc55SCy Schubert }
3855*a466cc55SCy Schubert }
3856*a466cc55SCy Schubert }
3857*a466cc55SCy Schubert
38582b15cb3dSCy Schubert EVBASE_RELEASE_LOCK(base, th_base_lock);
38592b15cb3dSCy Schubert }
38602b15cb3dSCy Schubert
38612b15cb3dSCy Schubert void
event_base_active_by_signal(struct event_base * base,int sig)38622b15cb3dSCy Schubert event_base_active_by_signal(struct event_base *base, int sig)
38632b15cb3dSCy Schubert {
38642b15cb3dSCy Schubert EVBASE_ACQUIRE_LOCK(base, th_base_lock);
38652b15cb3dSCy Schubert evmap_signal_active_(base, sig, 1);
38662b15cb3dSCy Schubert EVBASE_RELEASE_LOCK(base, th_base_lock);
38672b15cb3dSCy Schubert }
38682b15cb3dSCy Schubert
38692b15cb3dSCy Schubert
38702b15cb3dSCy Schubert void
event_base_add_virtual_(struct event_base * base)38712b15cb3dSCy Schubert event_base_add_virtual_(struct event_base *base)
38722b15cb3dSCy Schubert {
38732b15cb3dSCy Schubert EVBASE_ACQUIRE_LOCK(base, th_base_lock);
38742b15cb3dSCy Schubert base->virtual_event_count++;
38752b15cb3dSCy Schubert MAX_EVENT_COUNT(base->virtual_event_count_max, base->virtual_event_count);
38762b15cb3dSCy Schubert EVBASE_RELEASE_LOCK(base, th_base_lock);
38772b15cb3dSCy Schubert }
38782b15cb3dSCy Schubert
38792b15cb3dSCy Schubert void
event_base_del_virtual_(struct event_base * base)38802b15cb3dSCy Schubert event_base_del_virtual_(struct event_base *base)
38812b15cb3dSCy Schubert {
38822b15cb3dSCy Schubert EVBASE_ACQUIRE_LOCK(base, th_base_lock);
38832b15cb3dSCy Schubert EVUTIL_ASSERT(base->virtual_event_count > 0);
38842b15cb3dSCy Schubert base->virtual_event_count--;
38852b15cb3dSCy Schubert if (base->virtual_event_count == 0 && EVBASE_NEED_NOTIFY(base))
38862b15cb3dSCy Schubert evthread_notify_base(base);
38872b15cb3dSCy Schubert EVBASE_RELEASE_LOCK(base, th_base_lock);
38882b15cb3dSCy Schubert }
38892b15cb3dSCy Schubert
38902b15cb3dSCy Schubert static void
event_free_debug_globals_locks(void)38912b15cb3dSCy Schubert event_free_debug_globals_locks(void)
38922b15cb3dSCy Schubert {
38932b15cb3dSCy Schubert #ifndef EVENT__DISABLE_THREAD_SUPPORT
38942b15cb3dSCy Schubert #ifndef EVENT__DISABLE_DEBUG_MODE
38952b15cb3dSCy Schubert if (event_debug_map_lock_ != NULL) {
38962b15cb3dSCy Schubert EVTHREAD_FREE_LOCK(event_debug_map_lock_, 0);
38972b15cb3dSCy Schubert event_debug_map_lock_ = NULL;
3898a25439b6SCy Schubert evthreadimpl_disable_lock_debugging_();
38992b15cb3dSCy Schubert }
39002b15cb3dSCy Schubert #endif /* EVENT__DISABLE_DEBUG_MODE */
39012b15cb3dSCy Schubert #endif /* EVENT__DISABLE_THREAD_SUPPORT */
39022b15cb3dSCy Schubert return;
39032b15cb3dSCy Schubert }
39042b15cb3dSCy Schubert
39052b15cb3dSCy Schubert static void
event_free_debug_globals(void)39062b15cb3dSCy Schubert event_free_debug_globals(void)
39072b15cb3dSCy Schubert {
39082b15cb3dSCy Schubert event_free_debug_globals_locks();
39092b15cb3dSCy Schubert }
39102b15cb3dSCy Schubert
39112b15cb3dSCy Schubert static void
event_free_evsig_globals(void)39122b15cb3dSCy Schubert event_free_evsig_globals(void)
39132b15cb3dSCy Schubert {
39142b15cb3dSCy Schubert evsig_free_globals_();
39152b15cb3dSCy Schubert }
39162b15cb3dSCy Schubert
39172b15cb3dSCy Schubert static void
event_free_evutil_globals(void)39182b15cb3dSCy Schubert event_free_evutil_globals(void)
39192b15cb3dSCy Schubert {
39202b15cb3dSCy Schubert evutil_free_globals_();
39212b15cb3dSCy Schubert }
39222b15cb3dSCy Schubert
39232b15cb3dSCy Schubert static void
event_free_globals(void)39242b15cb3dSCy Schubert event_free_globals(void)
39252b15cb3dSCy Schubert {
39262b15cb3dSCy Schubert event_free_debug_globals();
39272b15cb3dSCy Schubert event_free_evsig_globals();
39282b15cb3dSCy Schubert event_free_evutil_globals();
39292b15cb3dSCy Schubert }
39302b15cb3dSCy Schubert
39312b15cb3dSCy Schubert void
libevent_global_shutdown(void)39322b15cb3dSCy Schubert libevent_global_shutdown(void)
39332b15cb3dSCy Schubert {
3934a25439b6SCy Schubert event_disable_debug_mode();
39352b15cb3dSCy Schubert event_free_globals();
39362b15cb3dSCy Schubert }
39372b15cb3dSCy Schubert
39382b15cb3dSCy Schubert #ifndef EVENT__DISABLE_THREAD_SUPPORT
39392b15cb3dSCy Schubert int
event_global_setup_locks_(const int enable_locks)39402b15cb3dSCy Schubert event_global_setup_locks_(const int enable_locks)
39412b15cb3dSCy Schubert {
39422b15cb3dSCy Schubert #ifndef EVENT__DISABLE_DEBUG_MODE
39432b15cb3dSCy Schubert EVTHREAD_SETUP_GLOBAL_LOCK(event_debug_map_lock_, 0);
39442b15cb3dSCy Schubert #endif
39452b15cb3dSCy Schubert if (evsig_global_setup_locks_(enable_locks) < 0)
39462b15cb3dSCy Schubert return -1;
39472b15cb3dSCy Schubert if (evutil_global_setup_locks_(enable_locks) < 0)
39482b15cb3dSCy Schubert return -1;
39492b15cb3dSCy Schubert if (evutil_secure_rng_global_setup_locks_(enable_locks) < 0)
39502b15cb3dSCy Schubert return -1;
39512b15cb3dSCy Schubert return 0;
39522b15cb3dSCy Schubert }
39532b15cb3dSCy Schubert #endif
39542b15cb3dSCy Schubert
39552b15cb3dSCy Schubert void
event_base_assert_ok_(struct event_base * base)39562b15cb3dSCy Schubert event_base_assert_ok_(struct event_base *base)
39572b15cb3dSCy Schubert {
39582b15cb3dSCy Schubert EVBASE_ACQUIRE_LOCK(base, th_base_lock);
39592b15cb3dSCy Schubert event_base_assert_ok_nolock_(base);
39602b15cb3dSCy Schubert EVBASE_RELEASE_LOCK(base, th_base_lock);
39612b15cb3dSCy Schubert }
39622b15cb3dSCy Schubert
39632b15cb3dSCy Schubert void
event_base_assert_ok_nolock_(struct event_base * base)39642b15cb3dSCy Schubert event_base_assert_ok_nolock_(struct event_base *base)
39652b15cb3dSCy Schubert {
39662b15cb3dSCy Schubert int i;
39672b15cb3dSCy Schubert int count;
39682b15cb3dSCy Schubert
39692b15cb3dSCy Schubert /* First do checks on the per-fd and per-signal lists */
39702b15cb3dSCy Schubert evmap_check_integrity_(base);
39712b15cb3dSCy Schubert
39722b15cb3dSCy Schubert /* Check the heap property */
39732b15cb3dSCy Schubert for (i = 1; i < (int)base->timeheap.n; ++i) {
39742b15cb3dSCy Schubert int parent = (i - 1) / 2;
39752b15cb3dSCy Schubert struct event *ev, *p_ev;
39762b15cb3dSCy Schubert ev = base->timeheap.p[i];
39772b15cb3dSCy Schubert p_ev = base->timeheap.p[parent];
39782b15cb3dSCy Schubert EVUTIL_ASSERT(ev->ev_flags & EVLIST_TIMEOUT);
39792b15cb3dSCy Schubert EVUTIL_ASSERT(evutil_timercmp(&p_ev->ev_timeout, &ev->ev_timeout, <=));
39802b15cb3dSCy Schubert EVUTIL_ASSERT(ev->ev_timeout_pos.min_heap_idx == i);
39812b15cb3dSCy Schubert }
39822b15cb3dSCy Schubert
39832b15cb3dSCy Schubert /* Check that the common timeouts are fine */
39842b15cb3dSCy Schubert for (i = 0; i < base->n_common_timeouts; ++i) {
39852b15cb3dSCy Schubert struct common_timeout_list *ctl = base->common_timeout_queues[i];
39862b15cb3dSCy Schubert struct event *last=NULL, *ev;
39872b15cb3dSCy Schubert
39882b15cb3dSCy Schubert EVUTIL_ASSERT_TAILQ_OK(&ctl->events, event, ev_timeout_pos.ev_next_with_common_timeout);
39892b15cb3dSCy Schubert
39902b15cb3dSCy Schubert TAILQ_FOREACH(ev, &ctl->events, ev_timeout_pos.ev_next_with_common_timeout) {
39912b15cb3dSCy Schubert if (last)
39922b15cb3dSCy Schubert EVUTIL_ASSERT(evutil_timercmp(&last->ev_timeout, &ev->ev_timeout, <=));
39932b15cb3dSCy Schubert EVUTIL_ASSERT(ev->ev_flags & EVLIST_TIMEOUT);
39942b15cb3dSCy Schubert EVUTIL_ASSERT(is_common_timeout(&ev->ev_timeout,base));
39952b15cb3dSCy Schubert EVUTIL_ASSERT(COMMON_TIMEOUT_IDX(&ev->ev_timeout) == i);
39962b15cb3dSCy Schubert last = ev;
39972b15cb3dSCy Schubert }
39982b15cb3dSCy Schubert }
39992b15cb3dSCy Schubert
40002b15cb3dSCy Schubert /* Check the active queues. */
40012b15cb3dSCy Schubert count = 0;
40022b15cb3dSCy Schubert for (i = 0; i < base->nactivequeues; ++i) {
40032b15cb3dSCy Schubert struct event_callback *evcb;
40042b15cb3dSCy Schubert EVUTIL_ASSERT_TAILQ_OK(&base->activequeues[i], event_callback, evcb_active_next);
40052b15cb3dSCy Schubert TAILQ_FOREACH(evcb, &base->activequeues[i], evcb_active_next) {
40062b15cb3dSCy Schubert EVUTIL_ASSERT((evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) == EVLIST_ACTIVE);
40072b15cb3dSCy Schubert EVUTIL_ASSERT(evcb->evcb_pri == i);
40082b15cb3dSCy Schubert ++count;
40092b15cb3dSCy Schubert }
40102b15cb3dSCy Schubert }
40112b15cb3dSCy Schubert
40122b15cb3dSCy Schubert {
40132b15cb3dSCy Schubert struct event_callback *evcb;
40142b15cb3dSCy Schubert TAILQ_FOREACH(evcb, &base->active_later_queue, evcb_active_next) {
40152b15cb3dSCy Schubert EVUTIL_ASSERT((evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) == EVLIST_ACTIVE_LATER);
40162b15cb3dSCy Schubert ++count;
40172b15cb3dSCy Schubert }
40182b15cb3dSCy Schubert }
40192b15cb3dSCy Schubert EVUTIL_ASSERT(count == base->event_count_active);
40202b15cb3dSCy Schubert }
4021