Lines Matching +full:tv +full:- +full:set

2  * Copyright (c) 2000-2007 Niels Provos <provos@citi.umich.edu>
3 * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
27 #include "event2/event-config.h"
28 #include "evconfig-private.h"
62 #include "event-internal.h"
63 #include "defer-internal.h"
64 #include "evthread-internal.h"
67 #include "log-internal.h"
68 #include "evmap-internal.h"
69 #include "iocp-internal.h"
70 #include "changelist-internal.h"
72 #include "ht-internal.h"
73 #include "util-internal.h"
77 #include "kqueue-internal.h"
188 * honestly mean to cast e->ptr to an integer, and discard any in hash_debug_entry()
191 unsigned u = (unsigned) ((ev_uintptr_t) e->ptr); in hash_debug_entry()
202 return a->ptr == b->ptr; in eq_debug_entry()
210 * @brief debug mode variable which is set for any function/structure that needs
214 * and if set to something other than zero, this means the evthread setup
222 /* Set if it's too late to enable event_debug_mode. */
247 dent->added = 0; in HT_PROTOTYPE()
253 dent->ptr = ev; in HT_PROTOTYPE()
254 dent->added = 0; in HT_PROTOTYPE()
292 dent->added = 1; in event_debug_note_add_()
295 "%s: noting an add on a non-setup event %p" in event_debug_note_add_()
298 __func__, ev, ev->ev_events, in event_debug_note_add_()
299 EV_SOCK_ARG(ev->ev_fd), ev->ev_flags); in event_debug_note_add_()
318 dent->added = 0; in event_debug_note_del_()
321 "%s: noting a del on a non-setup event %p" in event_debug_note_del_()
324 __func__, ev, ev->ev_events, in event_debug_note_del_()
325 EV_SOCK_ARG(ev->ev_fd), ev->ev_flags); in event_debug_note_del_()
345 "%s called on a non-initialized event %p" in event_debug_assert_is_setup_()
348 __func__, ev, ev->ev_events, in event_debug_assert_is_setup_()
349 EV_SOCK_ARG(ev->ev_fd), ev->ev_flags); in event_debug_assert_is_setup_()
353 /* assert that ev is not added (i.e., okay to tear down or set up again) */
364 if (dent && dent->added) { in event_debug_assert_not_added_()
369 __func__, ev, ev->ev_events, in event_debug_assert_not_added_()
370 EV_SOCK_ARG(ev->ev_fd), ev->ev_flags); in event_debug_assert_not_added_()
401 EVLOCK_ASSERT_LOCKED((base)->th_base_lock)
404 * to monotonic time? Set this to -1 for 'never.' */
407 /** Set 'tp' to the current time according to 'base'. We must hold the lock
410 * Return 0 on success, -1 on failure.
417 if (base->tv_cache.tv_sec) { in gettime()
418 *tp = base->tv_cache; in gettime()
422 if (evutil_gettime_monotonic_(&base->monotonic_timer, tp) == -1) { in gettime()
423 return -1; in gettime()
426 if (base->last_updated_clock_diff + CLOCK_SYNC_INTERVAL in gettime()
427 < tp->tv_sec) { in gettime()
428 struct timeval tv; in gettime() local
429 evutil_gettimeofday(&tv,NULL); in gettime()
430 evutil_timersub(&tv, tp, &base->tv_clock_diff); in gettime()
431 base->last_updated_clock_diff = tp->tv_sec; in gettime()
438 event_base_gettimeofday_cached(struct event_base *base, struct timeval *tv) in event_base_gettimeofday_cached() argument
444 return evutil_gettimeofday(tv, NULL); in event_base_gettimeofday_cached()
448 if (base->tv_cache.tv_sec == 0) { in event_base_gettimeofday_cached()
449 r = evutil_gettimeofday(tv, NULL); in event_base_gettimeofday_cached()
451 evutil_timeradd(&base->tv_cache, &base->tv_clock_diff, tv); in event_base_gettimeofday_cached()
462 base->tv_cache.tv_sec = 0; in clear_time_cache()
469 base->tv_cache.tv_sec = 0; in update_time_cache()
470 if (!(base->flags & EVENT_BASE_FLAG_NO_CACHE_TIME)) in update_time_cache()
471 gettime(base, &base->tv_cache); in update_time_cache()
481 return -1; in event_base_update_cache_time()
485 if (base->running_loop) in event_base_update_cache_time()
494 EVUTIL_ASSERT((evcb->evcb_flags & EVLIST_INIT)); in event_callback_to_event()
501 return &ev->ev_evcallback; in event_to_event_callback()
539 TAILQ_FOREACH(entry, &cfg->entries, next) { in event_config_is_avoided_method()
540 if (entry->avoid_method != NULL && in event_config_is_avoided_method()
541 strcmp(entry->avoid_method, method) == 0) in event_config_is_avoided_method()
566 return base->evsel->features; in event_base_get_features()
621 base->flags = cfg->flags; in event_base_new_with_config()
624 !(cfg && (cfg->flags & EVENT_BASE_FLAG_IGNORE_ENV)); in event_base_new_with_config()
629 cfg && (cfg->flags & EVENT_BASE_FLAG_PRECISE_TIMER); in event_base_new_with_config()
634 base->flags |= EVENT_BASE_FLAG_PRECISE_TIMER; in event_base_new_with_config()
638 evutil_configure_monotonic_time_(&base->monotonic_timer, flags); in event_base_new_with_config()
643 min_heap_ctor_(&base->timeheap); in event_base_new_with_config()
645 base->sig.ev_signal_pair[0] = -1; in event_base_new_with_config()
646 base->sig.ev_signal_pair[1] = -1; in event_base_new_with_config()
647 base->th_notify_fd[0] = -1; in event_base_new_with_config()
648 base->th_notify_fd[1] = -1; in event_base_new_with_config()
650 TAILQ_INIT(&base->active_later_queue); in event_base_new_with_config()
652 evmap_io_initmap_(&base->io); in event_base_new_with_config()
653 evmap_signal_initmap_(&base->sigmap); in event_base_new_with_config()
654 event_changelist_init_(&base->changelist); in event_base_new_with_config()
656 base->evbase = NULL; in event_base_new_with_config()
659 memcpy(&base->max_dispatch_time, in event_base_new_with_config()
660 &cfg->max_dispatch_interval, sizeof(struct timeval)); in event_base_new_with_config()
661 base->limit_callbacks_after_prio = in event_base_new_with_config()
662 cfg->limit_callbacks_after_prio; in event_base_new_with_config()
664 base->max_dispatch_time.tv_sec = -1; in event_base_new_with_config()
665 base->limit_callbacks_after_prio = 1; in event_base_new_with_config()
667 if (cfg && cfg->max_dispatch_callbacks >= 0) { in event_base_new_with_config()
668 base->max_dispatch_callbacks = cfg->max_dispatch_callbacks; in event_base_new_with_config()
670 base->max_dispatch_callbacks = INT_MAX; in event_base_new_with_config()
672 if (base->max_dispatch_callbacks == INT_MAX && in event_base_new_with_config()
673 base->max_dispatch_time.tv_sec == -1) in event_base_new_with_config()
674 base->limit_callbacks_after_prio = INT_MAX; in event_base_new_with_config()
676 for (i = 0; eventops[i] && !base->evbase; i++) { in event_base_new_with_config()
680 eventops[i]->name)) in event_base_new_with_config()
682 if ((eventops[i]->features & cfg->require_features) in event_base_new_with_config()
683 != cfg->require_features) in event_base_new_with_config()
689 event_is_method_disabled(eventops[i]->name)) in event_base_new_with_config()
692 base->evsel = eventops[i]; in event_base_new_with_config()
694 base->evbase = base->evsel->init(base); in event_base_new_with_config()
697 if (base->evbase == NULL) { in event_base_new_with_config()
700 base->evsel = NULL; in event_base_new_with_config()
706 event_msgx("libevent using: %s", base->evsel->name); in event_base_new_with_config()
722 (!cfg || !(cfg->flags & EVENT_BASE_FLAG_NOLOCK))) { in event_base_new_with_config()
724 EVTHREAD_ALLOC_LOCK(base->th_base_lock, 0); in event_base_new_with_config()
725 EVTHREAD_ALLOC_COND(base->current_event_cond); in event_base_new_with_config()
736 if (cfg && (cfg->flags & EVENT_BASE_FLAG_STARTUP_IOCP)) in event_base_new_with_config()
737 event_base_start_iocp_(base, cfg->n_cpus_hint); in event_base_new_with_config()
747 if (base->iocp) in event_base_start_iocp_()
749 base->iocp = event_iocp_port_launch_(n_cpus); in event_base_start_iocp_()
750 if (!base->iocp) { in event_base_start_iocp_()
752 return -1; in event_base_start_iocp_()
756 return -1; in event_base_start_iocp_()
766 if (!base->iocp) in event_base_stop_iocp_()
768 rv = event_iocp_shutdown_(base->iocp, -1); in event_base_stop_iocp_()
770 base->iocp = NULL; in event_base_stop_iocp_()
781 if (evcb->evcb_flags & EVLIST_INIT) { in event_base_cancel_single_callback_()
783 if (!(ev->ev_flags & EVLIST_INTERNAL)) { in event_base_cancel_single_callback_()
794 if (run_finalizers && (evcb->evcb_flags & EVLIST_FINALIZING)) { in event_base_cancel_single_callback_()
795 switch (evcb->evcb_closure) { in event_base_cancel_single_callback_()
799 ev->ev_evcallback.evcb_cb_union.evcb_evfinalize(ev, ev->ev_arg); in event_base_cancel_single_callback_()
800 if (evcb->evcb_closure == EV_CLOSURE_EVENT_FINALIZE_FREE) in event_base_cancel_single_callback_()
805 evcb->evcb_cb_union.evcb_cbfinalize(evcb, evcb->evcb_arg); in event_base_cancel_single_callback_()
818 for (i = 0; i < base->nactivequeues; ++i) { in event_base_free_queues_()
820 for (evcb = TAILQ_FIRST(&base->activequeues[i]); evcb; ) { in event_base_free_queues_()
829 while ((evcb = TAILQ_FIRST(&base->active_later_queue))) { in event_base_free_queues_()
854 /* XXX(niels) - check for internal events first */ in event_base_free_()
861 if (base->th_notify_fd[0] != -1) { in event_base_free_()
862 event_del(&base->th_notify); in event_base_free_()
863 EVUTIL_CLOSESOCKET(base->th_notify_fd[0]); in event_base_free_()
864 if (base->th_notify_fd[1] != -1) in event_base_free_()
865 EVUTIL_CLOSESOCKET(base->th_notify_fd[1]); in event_base_free_()
866 base->th_notify_fd[0] = -1; in event_base_free_()
867 base->th_notify_fd[1] = -1; in event_base_free_()
868 event_debug_unassign(&base->th_notify); in event_base_free_()
871 /* Delete all non-internal events. */ in event_base_free_()
874 while ((ev = min_heap_top_(&base->timeheap)) != NULL) { in event_base_free_()
878 for (i = 0; i < base->n_common_timeouts; ++i) { in event_base_free_()
880 base->common_timeout_queues[i]; in event_base_free_()
881 event_del(&ctl->timeout_event); /* Internal; doesn't count */ in event_base_free_()
882 event_debug_unassign(&ctl->timeout_event); in event_base_free_()
883 for (ev = TAILQ_FIRST(&ctl->events); ev; ) { in event_base_free_()
886 if (!(ev->ev_flags & EVLIST_INTERNAL)) { in event_base_free_()
894 if (base->common_timeout_queues) in event_base_free_()
895 mm_free(base->common_timeout_queues); in event_base_free_()
915 event_debug(("%s: %d events were still set in base", in event_base_free_()
918 while (LIST_FIRST(&base->once_events)) { in event_base_free_()
919 struct event_once *eonce = LIST_FIRST(&base->once_events); in event_base_free_()
924 if (base->evsel != NULL && base->evsel->dealloc != NULL) in event_base_free_()
925 base->evsel->dealloc(base); in event_base_free_()
927 for (i = 0; i < base->nactivequeues; ++i) in event_base_free_()
928 EVUTIL_ASSERT(TAILQ_EMPTY(&base->activequeues[i])); in event_base_free_()
930 EVUTIL_ASSERT(min_heap_empty_(&base->timeheap)); in event_base_free_()
931 min_heap_dtor_(&base->timeheap); in event_base_free_()
933 mm_free(base->activequeues); in event_base_free_()
935 evmap_io_clear_(&base->io); in event_base_free_()
936 evmap_signal_clear_(&base->sigmap); in event_base_free_()
937 event_changelist_freemem_(&base->changelist); in event_base_free_()
939 EVTHREAD_FREE_LOCK(base->th_base_lock, 0); in event_base_free_()
940 EVTHREAD_FREE_COND(base->current_event_cond); in event_base_free_()
990 evsel = base->evsel; in event_reinit()
993 if (evsel->need_reinit) { in event_reinit()
1002 base->evsel = &nil_eventop; in event_reinit()
1005 /* We need to re-create a new signal-notification fd and a new in event_reinit()
1006 * thread-notification fd. Otherwise, we'll still share those with in event_reinit()
1011 if (base->sig.ev_signal_added) { in event_reinit()
1012 event_del_nolock_(&base->sig.ev_signal, EVENT_DEL_AUTOBLOCK); in event_reinit()
1013 event_debug_unassign(&base->sig.ev_signal); in event_reinit()
1014 memset(&base->sig.ev_signal, 0, sizeof(base->sig.ev_signal)); in event_reinit()
1016 base->sig.ev_signal_added = 0; in event_reinit()
1018 if (base->sig.ev_signal_pair[0] != -1) in event_reinit()
1019 EVUTIL_CLOSESOCKET(base->sig.ev_signal_pair[0]); in event_reinit()
1020 if (base->sig.ev_signal_pair[1] != -1) in event_reinit()
1021 EVUTIL_CLOSESOCKET(base->sig.ev_signal_pair[1]); in event_reinit()
1022 if (base->th_notify_fn != NULL) { in event_reinit()
1024 base->th_notify_fn = NULL; in event_reinit()
1026 if (base->th_notify_fd[0] != -1) { in event_reinit()
1027 event_del_nolock_(&base->th_notify, EVENT_DEL_AUTOBLOCK); in event_reinit()
1028 EVUTIL_CLOSESOCKET(base->th_notify_fd[0]); in event_reinit()
1029 if (base->th_notify_fd[1] != -1) in event_reinit()
1030 EVUTIL_CLOSESOCKET(base->th_notify_fd[1]); in event_reinit()
1031 base->th_notify_fd[0] = -1; in event_reinit()
1032 base->th_notify_fd[1] = -1; in event_reinit()
1033 event_debug_unassign(&base->th_notify); in event_reinit()
1037 base->evsel = evsel; in event_reinit()
1039 if (evsel->need_reinit) { in event_reinit()
1040 /* Reconstruct the backend through brute-force, so that we do in event_reinit()
1049 if (base->evsel->dealloc != NULL) in event_reinit()
1050 base->evsel->dealloc(base); in event_reinit()
1051 base->evbase = evsel->init(base); in event_reinit()
1052 if (base->evbase == NULL) { in event_reinit()
1056 res = -1; in event_reinit()
1062 event_changelist_freemem_(&base->changelist); in event_reinit()
1064 /* Tell the event maps to re-inform the backend about all in event_reinit()
1066 * event get re-created if necessary. */ in event_reinit()
1068 res = -1; in event_reinit()
1072 res = event_add_nolock_(&base->sig.ev_signal, NULL, 0); in event_reinit()
1074 base->sig.ev_signal_added = 1; in event_reinit()
1090 event_gettime_monotonic(struct event_base *base, struct timeval *tv) in event_gettime_monotonic() argument
1092 int rv = -1; in event_gettime_monotonic()
1094 if (base && tv) { in event_gettime_monotonic()
1096 rv = evutil_gettime_monotonic_(&(base->monotonic_timer), tv); in event_gettime_monotonic()
1123 tmp[i++] = eventops[k]->name; in event_get_supported_methods()
1143 TAILQ_INIT(&cfg->entries); in event_config_new()
1144 cfg->max_dispatch_interval.tv_sec = -1; in event_config_new()
1145 cfg->max_dispatch_callbacks = INT_MAX; in event_config_new()
1146 cfg->limit_callbacks_after_prio = 1; in event_config_new()
1154 if (entry->avoid_method != NULL) in event_config_entry_free()
1155 mm_free((char *)entry->avoid_method); in event_config_entry_free()
1164 while ((entry = TAILQ_FIRST(&cfg->entries)) != NULL) { in event_config_free()
1165 TAILQ_REMOVE(&cfg->entries, entry, next); in event_config_free()
1175 return -1; in event_config_set_flag()
1176 cfg->flags |= flag; in event_config_set_flag()
1185 return (-1); in event_config_avoid_method()
1187 if ((entry->avoid_method = mm_strdup(method)) == NULL) { in event_config_avoid_method()
1189 return (-1); in event_config_avoid_method()
1192 TAILQ_INSERT_TAIL(&cfg->entries, entry, next); in event_config_avoid_method()
1202 return (-1); in event_config_require_features()
1203 cfg->require_features = features; in event_config_require_features()
1211 return (-1); in event_config_set_num_cpus_hint()
1212 cfg->n_cpus_hint = cpus; in event_config_set_num_cpus_hint()
1221 memcpy(&cfg->max_dispatch_interval, max_interval, in event_config_set_max_dispatch_interval()
1224 cfg->max_dispatch_interval.tv_sec = -1; in event_config_set_max_dispatch_interval()
1225 cfg->max_dispatch_callbacks = in event_config_set_max_dispatch_interval()
1229 cfg->limit_callbacks_after_prio = min_priority; in event_config_set_max_dispatch_interval()
1243 r = -1; in event_base_priority_init()
1251 if (npriorities == base->nactivequeues) in event_base_priority_init()
1254 if (base->nactivequeues) { in event_base_priority_init()
1255 mm_free(base->activequeues); in event_base_priority_init()
1256 base->nactivequeues = 0; in event_base_priority_init()
1260 base->activequeues = (struct evcallback_list *) in event_base_priority_init()
1262 if (base->activequeues == NULL) { in event_base_priority_init()
1266 base->nactivequeues = npriorities; in event_base_priority_init()
1268 for (i = 0; i < base->nactivequeues; ++i) { in event_base_priority_init()
1269 TAILQ_INIT(&base->activequeues[i]); in event_base_priority_init()
1288 n = base->nactivequeues; in event_base_get_npriorities()
1301 r += base->event_count_active; in event_base_get_num_events()
1304 r += base->virtual_event_count; in event_base_get_num_events()
1307 r += base->event_count; in event_base_get_num_events()
1322 r += base->event_count_active_max; in event_base_get_max_events()
1324 base->event_count_active_max = 0; in event_base_get_max_events()
1328 r += base->virtual_event_count_max; in event_base_get_max_events()
1330 base->virtual_event_count_max = 0; in event_base_get_max_events()
1334 r += base->event_count_max; in event_base_get_max_events()
1336 base->event_count_max = 0; in event_base_get_max_events()
1349 return (base->virtual_event_count > 0 || base->event_count > 0); in event_haveevents()
1360 ncalls = ev->ev_ncalls; in event_signal_closure()
1362 ev->ev_pncalls = &ncalls; in event_signal_closure()
1365 ncalls--; in event_signal_closure()
1366 ev->ev_ncalls = ncalls; in event_signal_closure()
1368 ev->ev_pncalls = NULL; in event_signal_closure()
1369 (*ev->ev_callback)(ev->ev_fd, ev->ev_res, ev->ev_arg); in event_signal_closure()
1372 should_break = base->event_break; in event_signal_closure()
1377 ev->ev_pncalls = NULL; in event_signal_closure()
1403 #define COMMON_TIMEOUT_IDX(tv) \ argument
1404 (((tv)->tv_usec & COMMON_TIMEOUT_IDX_MASK)>>COMMON_TIMEOUT_IDX_SHIFT)
1406 /** Return true iff if 'tv' is a common timeout in 'base' */
1408 is_common_timeout(const struct timeval *tv, in is_common_timeout() argument
1412 if ((tv->tv_usec & COMMON_TIMEOUT_MASK) != COMMON_TIMEOUT_MAGIC) in is_common_timeout()
1414 idx = COMMON_TIMEOUT_IDX(tv); in is_common_timeout()
1415 return idx < base->n_common_timeouts; in is_common_timeout()
1418 /* True iff tv1 and tv2 have the same common-timeout index, or if neither
1423 return (tv1->tv_usec & ~MICROSECONDS_MASK) == in is_same_common_timeout()
1424 (tv2->tv_usec & ~MICROSECONDS_MASK); in is_same_common_timeout()
1427 /** Requires that 'tv' is a common timeout. Return the corresponding
1430 get_common_timeout_list(struct event_base *base, const struct timeval *tv) in get_common_timeout_list() argument
1432 return base->common_timeout_queues[COMMON_TIMEOUT_IDX(tv)]; in get_common_timeout_list()
1437 common_timeout_ok(const struct timeval *tv,
1441 &get_common_timeout_list(base, tv)->duration;
1442 return tv->tv_sec == expect->tv_sec &&
1443 tv->tv_usec == expect->tv_usec;
1453 struct timeval timeout = head->ev_timeout; in common_timeout_schedule()
1455 event_add_nolock_(&ctl->timeout_event, &timeout, 1); in common_timeout_schedule()
1466 struct event_base *base = ctl->base; in common_timeout_callback()
1471 ev = TAILQ_FIRST(&ctl->events); in common_timeout_callback()
1472 if (!ev || ev->ev_timeout.tv_sec > now.tv_sec || in common_timeout_callback()
1473 (ev->ev_timeout.tv_sec == now.tv_sec && in common_timeout_callback()
1474 (ev->ev_timeout.tv_usec&MICROSECONDS_MASK) > now.tv_usec)) in common_timeout_callback()
1491 struct timeval tv; in event_base_init_common_timeout() local
1496 if (duration->tv_usec > 1000000) { in event_base_init_common_timeout()
1497 memcpy(&tv, duration, sizeof(struct timeval)); in event_base_init_common_timeout()
1499 tv.tv_usec &= MICROSECONDS_MASK; in event_base_init_common_timeout()
1500 tv.tv_sec += tv.tv_usec / 1000000; in event_base_init_common_timeout()
1501 tv.tv_usec %= 1000000; in event_base_init_common_timeout()
1502 duration = &tv; in event_base_init_common_timeout()
1504 for (i = 0; i < base->n_common_timeouts; ++i) { in event_base_init_common_timeout()
1506 base->common_timeout_queues[i]; in event_base_init_common_timeout()
1507 if (duration->tv_sec == ctl->duration.tv_sec && in event_base_init_common_timeout()
1508 duration->tv_usec == in event_base_init_common_timeout()
1509 (ctl->duration.tv_usec & MICROSECONDS_MASK)) { in event_base_init_common_timeout()
1510 EVUTIL_ASSERT(is_common_timeout(&ctl->duration, base)); in event_base_init_common_timeout()
1511 result = &ctl->duration; in event_base_init_common_timeout()
1515 if (base->n_common_timeouts == MAX_COMMON_TIMEOUTS) { in event_base_init_common_timeout()
1521 if (base->n_common_timeouts_allocated == base->n_common_timeouts) { in event_base_init_common_timeout()
1522 int n = base->n_common_timeouts < 16 ? 16 : in event_base_init_common_timeout()
1523 base->n_common_timeouts*2; in event_base_init_common_timeout()
1525 mm_realloc(base->common_timeout_queues, in event_base_init_common_timeout()
1531 base->n_common_timeouts_allocated = n; in event_base_init_common_timeout()
1532 base->common_timeout_queues = newqueues; in event_base_init_common_timeout()
1539 TAILQ_INIT(&new_ctl->events); in event_base_init_common_timeout()
1540 new_ctl->duration.tv_sec = duration->tv_sec; in event_base_init_common_timeout()
1541 new_ctl->duration.tv_usec = in event_base_init_common_timeout()
1542 duration->tv_usec | COMMON_TIMEOUT_MAGIC | in event_base_init_common_timeout()
1543 (base->n_common_timeouts << COMMON_TIMEOUT_IDX_SHIFT); in event_base_init_common_timeout()
1544 evtimer_assign(&new_ctl->timeout_event, base, in event_base_init_common_timeout()
1546 new_ctl->timeout_event.ev_flags |= EVLIST_INTERNAL; in event_base_init_common_timeout()
1547 event_priority_set(&new_ctl->timeout_event, 0); in event_base_init_common_timeout()
1548 new_ctl->base = base; in event_base_init_common_timeout()
1549 base->common_timeout_queues[base->n_common_timeouts++] = new_ctl; in event_base_init_common_timeout()
1550 result = &new_ctl->duration; in event_base_init_common_timeout()
1572 if (ev->ev_io_timeout.tv_sec || ev->ev_io_timeout.tv_usec) { in event_persist_closure()
1579 EVUTIL_ASSERT(is_same_common_timeout(&ev->ev_timeout, in event_persist_closure()
1580 &ev->ev_io_timeout)); in event_persist_closure()
1582 if (is_common_timeout(&ev->ev_timeout, base)) { in event_persist_closure()
1583 delay = ev->ev_io_timeout; in event_persist_closure()
1586 if (ev->ev_res & EV_TIMEOUT) { in event_persist_closure()
1587 relative_to = ev->ev_timeout; in event_persist_closure()
1593 delay = ev->ev_io_timeout; in event_persist_closure()
1594 if (ev->ev_res & EV_TIMEOUT) { in event_persist_closure()
1595 relative_to = ev->ev_timeout; in event_persist_closure()
1614 evcb_callback = ev->ev_callback; in event_persist_closure()
1615 evcb_fd = ev->ev_fd; in event_persist_closure()
1616 evcb_res = ev->ev_res; in event_persist_closure()
1617 evcb_arg = ev->ev_arg; in event_persist_closure()
1629 when it's invoked. Returns -1 if we get a signal or an event_break that
1631 the number of non-internal event_callbacks that we processed.
1645 if (evcb->evcb_flags & EVLIST_INIT) { in event_process_active_single_queue()
1648 if (ev->ev_events & EV_PERSIST || ev->ev_flags & EVLIST_FINALIZING) in event_process_active_single_queue()
1655 ev->ev_res & EV_READ ? "EV_READ " : " ", in event_process_active_single_queue()
1656 ev->ev_res & EV_WRITE ? "EV_WRITE " : " ", in event_process_active_single_queue()
1657 ev->ev_res & EV_CLOSED ? "EV_CLOSED " : " ", in event_process_active_single_queue()
1658 ev->ev_callback)); in event_process_active_single_queue()
1663 evcb, evcb->evcb_closure, evcb->evcb_cb_union.evcb_callback)); in event_process_active_single_queue()
1666 if (!(evcb->evcb_flags & EVLIST_INTERNAL)) in event_process_active_single_queue()
1670 base->current_event = evcb; in event_process_active_single_queue()
1672 base->current_event_waiters = 0; in event_process_active_single_queue()
1675 switch (evcb->evcb_closure) { in event_process_active_single_queue()
1688 evcb_callback = *ev->ev_callback; in event_process_active_single_queue()
1689 res = ev->ev_res; in event_process_active_single_queue()
1691 evcb_callback(ev->ev_fd, res, ev->ev_arg); in event_process_active_single_queue()
1695 void (*evcb_selfcb)(struct event_callback *, void *) = evcb->evcb_cb_union.evcb_selfcb; in event_process_active_single_queue()
1697 evcb_selfcb(evcb, evcb->evcb_arg); in event_process_active_single_queue()
1703 int evcb_closure = evcb->evcb_closure; in event_process_active_single_queue()
1705 base->current_event = NULL; in event_process_active_single_queue()
1706 evcb_evfinalize = ev->ev_evcallback.evcb_cb_union.evcb_evfinalize; in event_process_active_single_queue()
1707 EVUTIL_ASSERT((evcb->evcb_flags & EVLIST_FINALIZING)); in event_process_active_single_queue()
1710 evcb_evfinalize(ev, ev->ev_arg); in event_process_active_single_queue()
1716 void (*evcb_cbfinalize)(struct event_callback *, void *) = evcb->evcb_cb_union.evcb_cbfinalize; in event_process_active_single_queue()
1717 base->current_event = NULL; in event_process_active_single_queue()
1718 EVUTIL_ASSERT((evcb->evcb_flags & EVLIST_FINALIZING)); in event_process_active_single_queue()
1720 evcb_cbfinalize(evcb, evcb->evcb_arg); in event_process_active_single_queue()
1728 base->current_event = NULL; in event_process_active_single_queue()
1730 if (base->current_event_waiters) { in event_process_active_single_queue()
1731 base->current_event_waiters = 0; in event_process_active_single_queue()
1732 EVTHREAD_COND_BROADCAST(base->current_event_cond); in event_process_active_single_queue()
1736 if (base->event_break) in event_process_active_single_queue()
1737 return -1; in event_process_active_single_queue()
1747 if (base->event_continue) in event_process_active_single_queue()
1766 struct timeval tv; in event_process_active() local
1767 const int maxcb = base->max_dispatch_callbacks; in event_process_active()
1768 const int limit_after_prio = base->limit_callbacks_after_prio; in event_process_active()
1769 if (base->max_dispatch_time.tv_sec >= 0) { in event_process_active()
1771 gettime(base, &tv); in event_process_active()
1772 evutil_timeradd(&base->max_dispatch_time, &tv, &tv); in event_process_active()
1773 endtime = &tv; in event_process_active()
1778 for (i = 0; i < base->nactivequeues; ++i) { in event_process_active()
1779 if (TAILQ_FIRST(&base->activequeues[i]) != NULL) { in event_process_active()
1780 base->event_running_priority = i; in event_process_active()
1781 activeq = &base->activequeues[i]; in event_process_active()
1792 * consider lower-priority events */ in event_process_active()
1799 base->event_running_priority = -1; in event_process_active()
1824 return (base->evsel->name); in event_base_get_method()
1833 base->event_gotterm = 1; in event_loopexit_cb()
1837 event_loopexit(const struct timeval *tv) in event_loopexit() argument
1839 return (event_once(-1, EV_TIMEOUT, event_loopexit_cb, in event_loopexit()
1840 current_base, tv)); in event_loopexit()
1844 event_base_loopexit(struct event_base *event_base, const struct timeval *tv) in event_base_loopexit() argument
1846 return (event_base_once(event_base, -1, EV_TIMEOUT, event_loopexit_cb, in event_base_loopexit()
1847 event_base, tv)); in event_base_loopexit()
1861 return (-1); in event_base_loopbreak()
1864 event_base->event_break = 1; in event_base_loopbreak()
1880 return (-1); in event_base_loopcontinue()
1883 event_base->event_continue = 1; in event_base_loopcontinue()
1899 res = event_base->event_break; in event_base_got_break()
1909 res = event_base->event_gotterm; in event_base_got_exit()
1925 const struct eventop *evsel = base->evsel; in event_base_loop()
1926 struct timeval tv; in event_base_loop() local
1934 if (base->running_loop) { in event_base_loop()
1938 return -1; in event_base_loop()
1941 base->running_loop = 1; in event_base_loop()
1945 if (base->sig.ev_signal_added && base->sig.ev_n_signals_added) in event_base_loop()
1951 base->th_owner_id = EVTHREAD_GET_ID(); in event_base_loop()
1954 base->event_gotterm = base->event_break = 0; in event_base_loop()
1957 base->event_continue = 0; in event_base_loop()
1958 base->n_deferreds_queued = 0; in event_base_loop()
1961 if (base->event_gotterm) { in event_base_loop()
1965 if (base->event_break) { in event_base_loop()
1969 tv_p = &tv; in event_base_loop()
1977 evutil_timerclear(&tv); in event_base_loop()
1992 res = evsel->dispatch(base, tv_p); in event_base_loop()
1994 if (res == -1) { in event_base_loop()
1997 retval = -1; in event_base_loop()
2018 base->running_loop = 0; in event_base_loop()
2025 /* One-time callback to implement event_base_once: invokes the user callback,
2032 (*eonce->cb)(fd, events, eonce->arg); in event_once_cb()
2033 EVBASE_ACQUIRE_LOCK(eonce->ev.ev_base, th_base_lock); in event_once_cb()
2035 EVBASE_RELEASE_LOCK(eonce->ev.ev_base, th_base_lock); in event_once_cb()
2036 event_debug_unassign(&eonce->ev); in event_once_cb()
2044 void *arg, const struct timeval *tv) in event_once() argument
2046 return event_base_once(current_base, fd, events, callback, arg, tv); in event_once()
2053 void *arg, const struct timeval *tv) in event_base_once() argument
2060 return (-1); in event_base_once()
2065 return (-1); in event_base_once()
2068 return (-1); in event_base_once()
2070 eonce->cb = callback; in event_base_once()
2071 eonce->arg = arg; in event_base_once()
2074 evtimer_assign(&eonce->ev, base, event_once_cb, eonce); in event_base_once()
2076 if (tv == NULL || ! evutil_timerisset(tv)) { in event_base_once()
2080 * it fast (and order-preserving). */ in event_base_once()
2086 event_assign(&eonce->ev, base, fd, events, event_once_cb, eonce); in event_base_once()
2090 return (-1); in event_base_once()
2096 event_active_nolock_(&eonce->ev, EV_TIMEOUT, 1); in event_base_once()
2098 res = event_add_nolock_(&eonce->ev, tv, 0); in event_base_once()
2104 LIST_INSERT_HEAD(&base->once_events, eonce, next_once); in event_base_once()
2124 ev->ev_base = base; in event_assign()
2126 ev->ev_callback = callback; in event_assign()
2127 ev->ev_arg = arg; in event_assign()
2128 ev->ev_fd = fd; in event_assign()
2129 ev->ev_events = events; in event_assign()
2130 ev->ev_res = 0; in event_assign()
2131 ev->ev_flags = EVLIST_INIT; in event_assign()
2132 ev->ev_ncalls = 0; in event_assign()
2133 ev->ev_pncalls = NULL; in event_assign()
2139 return -1; in event_assign()
2141 ev->ev_closure = EV_CLOSURE_EVENT_SIGNAL; in event_assign()
2144 evutil_timerclear(&ev->ev_io_timeout); in event_assign()
2145 ev->ev_closure = EV_CLOSURE_EVENT_PERSIST; in event_assign()
2147 ev->ev_closure = EV_CLOSURE_EVENT; in event_assign()
2155 ev->ev_pri = base->nactivequeues / 2; in event_assign()
2167 if (ev->ev_flags != EVLIST_INIT) in event_base_set()
2168 return (-1); in event_base_set()
2172 ev->ev_base = base; in event_base_set()
2173 ev->ev_pri = base->nactivequeues/2; in event_base_set()
2199 struct event_callback *evcb = base->current_event; in event_base_get_running_event()
2200 if (evcb->evcb_flags & EVLIST_INIT) in event_base_get_running_event()
2242 ev->ev_flags &= ~EVLIST_INIT; in event_debug_unassign()
2253 ev->ev_closure = closure; in event_finalize_nolock_()
2254 ev->ev_evcallback.evcb_cb_union.evcb_evfinalize = cb; in event_finalize_nolock_()
2256 ev->ev_flags |= EVLIST_FINALIZING; in event_finalize_nolock_()
2264 struct event_base *base = ev->ev_base; in event_finalize_impl_()
2266 event_warnx("%s: event has no event_base set.", __func__); in event_finalize_impl_()
2267 return -1; in event_finalize_impl_()
2292 if (evcb->evcb_flags & EVLIST_INIT) { in event_callback_finalize_nolock_()
2299 evcb->evcb_closure = EV_CLOSURE_CB_FINALIZE; in event_callback_finalize_nolock_()
2300 evcb->evcb_cb_union.evcb_cbfinalize = cb; in event_callback_finalize_nolock_()
2302 evcb->evcb_flags |= EVLIST_FINALIZING; in event_callback_finalize_nolock_()
2333 if (evcb == base->current_event) { in event_callback_finalize_many_()
2351 * Set's the priority of an event - if an event is already scheduled
2360 if (ev->ev_flags & EVLIST_ACTIVE) in event_priority_set()
2361 return (-1); in event_priority_set()
2362 if (pri < 0 || pri >= ev->ev_base->nactivequeues) in event_priority_set()
2363 return (-1); in event_priority_set()
2365 ev->ev_pri = pri; in event_priority_set()
2375 event_pending(const struct event *ev, short event, struct timeval *tv) in event_pending() argument
2379 if (EVUTIL_FAILURE_CHECK(ev->ev_base == NULL)) { in event_pending()
2380 event_warnx("%s: event has no event_base set.", __func__); in event_pending()
2384 EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock); in event_pending()
2387 if (ev->ev_flags & EVLIST_INSERTED) in event_pending()
2388 flags |= (ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED|EV_SIGNAL)); in event_pending()
2389 if (ev->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) in event_pending()
2390 flags |= ev->ev_res; in event_pending()
2391 if (ev->ev_flags & EVLIST_TIMEOUT) in event_pending()
2397 if (tv != NULL && (flags & event & EV_TIMEOUT)) { in event_pending()
2398 struct timeval tmp = ev->ev_timeout; in event_pending()
2401 evutil_timeradd(&ev->ev_base->tv_clock_diff, &tmp, tv); in event_pending()
2404 EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock); in event_pending()
2412 if (!(ev->ev_flags & EVLIST_INIT)) in event_initialized()
2424 *base_out = event->ev_base; in event_get_assignment()
2426 *fd_out = event->ev_fd; in event_get_assignment()
2428 *events_out = event->ev_events; in event_get_assignment()
2430 *callback_out = event->ev_callback; in event_get_assignment()
2432 *arg_out = event->ev_arg; in event_get_assignment()
2445 return ev->ev_fd; in event_get_fd()
2452 return ev->ev_base; in event_get_base()
2459 return ev->ev_events; in event_get_events()
2466 return ev->ev_callback; in event_get_callback()
2473 return ev->ev_arg; in event_get_callback_arg()
2480 return ev->ev_pri; in event_get_priority()
2484 event_add(struct event *ev, const struct timeval *tv) in event_add() argument
2488 if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) { in event_add()
2489 event_warnx("%s: event has no event_base set.", __func__); in event_add()
2490 return -1; in event_add()
2493 EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock); in event_add()
2495 res = event_add_nolock_(ev, tv, 0); in event_add()
2497 EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock); in event_add()
2513 r = send(base->th_notify_fd[1], buf, 1, 0); in evthread_notify_base_default()
2515 r = write(base->th_notify_fd[1], buf, 1); in evthread_notify_base_default()
2517 return (r < 0 && ! EVUTIL_ERR_IS_EAGAIN(errno)) ? -1 : 0; in evthread_notify_base_default()
2529 r = write(base->th_notify_fd[0], (void*) &msg, sizeof(msg)); in evthread_notify_base_eventfd()
2532 return (r < 0) ? -1 : 0; in evthread_notify_base_eventfd()
2544 if (!base->th_notify_fn) in evthread_notify_base()
2545 return -1; in evthread_notify_base()
2546 if (base->is_notify_pending) in evthread_notify_base()
2548 base->is_notify_pending = 1; in evthread_notify_base()
2549 return base->th_notify_fn(base); in evthread_notify_base()
2557 struct event_base *base = ev->ev_base; in event_remove_timer_nolock_()
2565 if (ev->ev_flags & EVLIST_TIMEOUT) { in event_remove_timer_nolock_()
2567 evutil_timerclear(&ev->ev_.ev_io.ev_timeout); in event_remove_timer_nolock_()
2578 if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) { in event_remove_timer()
2579 event_warnx("%s: event has no event_base set.", __func__); in event_remove_timer()
2580 return -1; in event_remove_timer()
2583 EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock); in event_remove_timer()
2587 EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock); in event_remove_timer()
2593 * except: 1) it requires that we have the lock. 2) if tv_is_absolute is set,
2594 * we treat tv as an absolute time, not as an interval to add to the current
2597 event_add_nolock_(struct event *ev, const struct timeval *tv, in event_add_nolock_() argument
2600 struct event_base *base = ev->ev_base; in event_add_nolock_()
2610 EV_SOCK_ARG(ev->ev_fd), in event_add_nolock_()
2611 ev->ev_events & EV_READ ? "EV_READ " : " ", in event_add_nolock_()
2612 ev->ev_events & EV_WRITE ? "EV_WRITE " : " ", in event_add_nolock_()
2613 ev->ev_events & EV_CLOSED ? "EV_CLOSED " : " ", in event_add_nolock_()
2614 tv ? "EV_TIMEOUT " : " ", in event_add_nolock_()
2615 ev->ev_callback)); in event_add_nolock_()
2617 EVUTIL_ASSERT(!(ev->ev_flags & ~EVLIST_ALL)); in event_add_nolock_()
2619 if (ev->ev_flags & EVLIST_FINALIZING) { in event_add_nolock_()
2621 return (-1); in event_add_nolock_()
2628 if (tv != NULL && !(ev->ev_flags & EVLIST_TIMEOUT)) { in event_add_nolock_()
2629 if (min_heap_reserve_(&base->timeheap, in event_add_nolock_()
2630 1 + min_heap_size_(&base->timeheap)) == -1) in event_add_nolock_()
2631 return (-1); /* ENOMEM == errno */ in event_add_nolock_()
2639 if (base->current_event == event_to_event_callback(ev) && in event_add_nolock_()
2640 (ev->ev_events & EV_SIGNAL) in event_add_nolock_()
2642 ++base->current_event_waiters; in event_add_nolock_()
2643 EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock); in event_add_nolock_()
2647 if ((ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED|EV_SIGNAL)) && in event_add_nolock_()
2648 !(ev->ev_flags & (EVLIST_INSERTED|EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))) { in event_add_nolock_()
2649 if (ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED)) in event_add_nolock_()
2650 res = evmap_io_add_(base, ev->ev_fd, ev); in event_add_nolock_()
2651 else if (ev->ev_events & EV_SIGNAL) in event_add_nolock_()
2652 res = evmap_signal_add_(base, (int)ev->ev_fd, ev); in event_add_nolock_()
2653 if (res != -1) in event_add_nolock_()
2666 if (res != -1 && tv != NULL) { in event_add_nolock_()
2676 * timeout value and re-add the event. in event_add_nolock_()
2678 * If tv_is_absolute, this was already set. in event_add_nolock_()
2680 if (ev->ev_closure == EV_CLOSURE_EVENT_PERSIST && !tv_is_absolute) in event_add_nolock_()
2681 ev->ev_io_timeout = *tv; in event_add_nolock_()
2684 if (ev->ev_flags & EVLIST_TIMEOUT) { in event_add_nolock_()
2692 if ((ev->ev_flags & EVLIST_ACTIVE) && in event_add_nolock_()
2693 (ev->ev_res & EV_TIMEOUT)) { in event_add_nolock_()
2694 if (ev->ev_events & EV_SIGNAL) { in event_add_nolock_()
2698 if (ev->ev_ncalls && ev->ev_pncalls) { in event_add_nolock_()
2700 *ev->ev_pncalls = 0; in event_add_nolock_()
2709 common_timeout = is_common_timeout(tv, base); in event_add_nolock_()
2711 was_common = is_common_timeout(&ev->ev_timeout, base); in event_add_nolock_()
2712 old_timeout_idx = COMMON_TIMEOUT_IDX(&ev->ev_timeout); in event_add_nolock_()
2716 ev->ev_timeout = *tv; in event_add_nolock_()
2718 struct timeval tmp = *tv; in event_add_nolock_()
2720 evutil_timeradd(&now, &tmp, &ev->ev_timeout); in event_add_nolock_()
2721 ev->ev_timeout.tv_usec |= in event_add_nolock_()
2722 (tv->tv_usec & ~MICROSECONDS_MASK); in event_add_nolock_()
2724 evutil_timeradd(&now, tv, &ev->ev_timeout); in event_add_nolock_()
2729 ev, (int)tv->tv_sec, (int)tv->tv_usec, ev->ev_callback)); in event_add_nolock_()
2739 get_common_timeout_list(base, &ev->ev_timeout); in event_add_nolock_()
2740 if (ev == TAILQ_FIRST(&ctl->events)) { in event_add_nolock_()
2753 else if ((top = min_heap_top_(&base->timeheap)) != NULL && in event_add_nolock_()
2754 evutil_timercmp(&top->ev_timeout, &now, <)) in event_add_nolock_()
2760 if (res != -1 && notify && EVBASE_NEED_NOTIFY(base)) in event_add_nolock_()
2772 struct event_base *base = ev->ev_base; in event_del_()
2775 event_warnx("%s: event has no event_base set.", __func__); in event_del_()
2776 return -1; in event_del_()
2816 ev, EV_SOCK_ARG(ev->ev_fd), ev->ev_callback)); in event_del_nolock_()
2819 if (ev->ev_base == NULL) in event_del_nolock_()
2820 return (-1); in event_del_nolock_()
2822 EVENT_BASE_ASSERT_LOCKED(ev->ev_base); in event_del_nolock_()
2825 if (ev->ev_flags & EVLIST_FINALIZING) { in event_del_nolock_()
2831 base = ev->ev_base; in event_del_nolock_()
2833 EVUTIL_ASSERT(!(ev->ev_flags & ~EVLIST_ALL)); in event_del_nolock_()
2836 if (ev->ev_events & EV_SIGNAL) { in event_del_nolock_()
2837 if (ev->ev_ncalls && ev->ev_pncalls) { in event_del_nolock_()
2839 *ev->ev_pncalls = 0; in event_del_nolock_()
2843 if (ev->ev_flags & EVLIST_TIMEOUT) { in event_del_nolock_()
2854 if (ev->ev_flags & EVLIST_ACTIVE) in event_del_nolock_()
2856 else if (ev->ev_flags & EVLIST_ACTIVE_LATER) in event_del_nolock_()
2859 if (ev->ev_flags & EVLIST_INSERTED) { in event_del_nolock_()
2861 if (ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED)) in event_del_nolock_()
2862 res = evmap_io_del_(base, ev->ev_fd, ev); in event_del_nolock_()
2864 res = evmap_signal_del_(base, (int)ev->ev_fd, ev); in event_del_nolock_()
2877 if (res != -1 && notify && EVBASE_NEED_NOTIFY(base)) in event_del_nolock_()
2885 * returns, it will be safe to free the user-supplied argument. in event_del_nolock_()
2889 base->current_event == event_to_event_callback(ev) && in event_del_nolock_()
2891 (blocking == EVENT_DEL_BLOCK || !(ev->ev_events & EV_FINALIZE))) { in event_del_nolock_()
2892 ++base->current_event_waiters; in event_del_nolock_()
2893 EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock); in event_del_nolock_()
2903 if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) { in event_active()
2904 event_warnx("%s: event has no event_base set.", __func__); in event_active()
2908 EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock); in event_active()
2914 EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock); in event_active()
2924 ev, EV_SOCK_ARG(ev->ev_fd), (int)res, ev->ev_callback)); in event_active_nolock_()
2926 base = ev->ev_base; in event_active_nolock_()
2929 if (ev->ev_flags & EVLIST_FINALIZING) { in event_active_nolock_()
2934 switch ((ev->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))) { in event_active_nolock_()
2941 ev->ev_res |= res; in event_active_nolock_()
2944 ev->ev_res |= res; in event_active_nolock_()
2947 ev->ev_res = res; in event_active_nolock_()
2951 if (ev->ev_pri < base->event_running_priority) in event_active_nolock_()
2952 base->event_continue = 1; in event_active_nolock_()
2954 if (ev->ev_events & EV_SIGNAL) { in event_active_nolock_()
2956 if (base->current_event == event_to_event_callback(ev) && in event_active_nolock_()
2958 ++base->current_event_waiters; in event_active_nolock_()
2959 EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock); in event_active_nolock_()
2962 ev->ev_ncalls = ncalls; in event_active_nolock_()
2963 ev->ev_pncalls = NULL; in event_active_nolock_()
2972 EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock); in event_active_later_()
2974 EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock); in event_active_later_()
2980 struct event_base *base = ev->ev_base; in event_active_later_nolock_()
2983 if (ev->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) { in event_active_later_nolock_()
2985 ev->ev_res |= res; in event_active_later_nolock_()
2989 ev->ev_res = res; in event_active_later_nolock_()
3011 if (evcb->evcb_flags & EVLIST_FINALIZING) in event_callback_activate_nolock_()
3014 switch (evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) { in event_callback_activate_nolock_()
3040 if (evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) in event_callback_activate_later_nolock_()
3054 cb->evcb_pri = base->nactivequeues - 1; in event_callback_init_()
3072 if ((evcb->evcb_flags & EVLIST_FINALIZING) && !even_if_finalizing) in event_callback_cancel_nolock_()
3075 if (evcb->evcb_flags & EVLIST_INIT) in event_callback_cancel_nolock_()
3079 switch ((evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))) { in event_callback_cancel_nolock_()
3102 cb->evcb_cb_union.evcb_selfcb = fn; in event_deferred_cb_init_()
3103 cb->evcb_arg = arg; in event_deferred_cb_init_()
3104 cb->evcb_pri = priority; in event_deferred_cb_init_()
3105 cb->evcb_closure = EV_CLOSURE_CB_SELF; in event_deferred_cb_init_()
3111 cb->evcb_pri = priority; in event_deferred_cb_set_priority_()
3130 if (base->n_deferreds_queued > MAX_DEFERREDS_QUEUED) { in event_deferred_cb_schedule_()
3135 ++base->n_deferreds_queued; in event_deferred_cb_schedule_()
3148 struct timeval *tv = *tv_p; in timeout_next() local
3151 ev = min_heap_top_(&base->timeheap); in timeout_next()
3154 /* if no time-based events are active wait for I/O */ in timeout_next()
3159 if (gettime(base, &now) == -1) { in timeout_next()
3160 res = -1; in timeout_next()
3164 if (evutil_timercmp(&ev->ev_timeout, &now, <=)) { in timeout_next()
3165 evutil_timerclear(tv); in timeout_next()
3169 evutil_timersub(&ev->ev_timeout, &now, tv); in timeout_next()
3171 EVUTIL_ASSERT(tv->tv_sec >= 0); in timeout_next()
3172 EVUTIL_ASSERT(tv->tv_usec >= 0); in timeout_next()
3173 …event_debug(("timeout_next: event: %p, in %d seconds, %d useconds", ev, (int)tv->tv_sec, (int)tv->… in timeout_next()
3187 if (min_heap_empty_(&base->timeheap)) { in timeout_process()
3193 while ((ev = min_heap_top_(&base->timeheap))) { in timeout_process()
3194 if (evutil_timercmp(&ev->ev_timeout, &now, >)) in timeout_process()
3201 ev, ev->ev_callback)); in timeout_process()
3214 base->event_count--/++;
3217 ((base)->event_count -= !((flags) & EVLIST_INTERNAL))
3219 ((base)->event_count += !((flags) & EVLIST_INTERNAL)); \
3220 MAX_EVENT_COUNT((base)->event_count_max, (base)->event_count); \
3227 if (EVUTIL_FAILURE_CHECK(!(ev->ev_flags & EVLIST_INSERTED))) { in event_queue_remove_inserted()
3229 ev, EV_SOCK_ARG(ev->ev_fd), EVLIST_INSERTED); in event_queue_remove_inserted()
3232 DECR_EVENT_COUNT(base, ev->ev_flags); in event_queue_remove_inserted()
3233 ev->ev_flags &= ~EVLIST_INSERTED; in event_queue_remove_inserted()
3239 if (EVUTIL_FAILURE_CHECK(!(evcb->evcb_flags & EVLIST_ACTIVE))) { in event_queue_remove_active()
3244 DECR_EVENT_COUNT(base, evcb->evcb_flags); in event_queue_remove_active()
3245 evcb->evcb_flags &= ~EVLIST_ACTIVE; in event_queue_remove_active()
3246 base->event_count_active--; in event_queue_remove_active()
3248 TAILQ_REMOVE(&base->activequeues[evcb->evcb_pri], in event_queue_remove_active()
3255 if (EVUTIL_FAILURE_CHECK(!(evcb->evcb_flags & EVLIST_ACTIVE_LATER))) { in event_queue_remove_active_later()
3260 DECR_EVENT_COUNT(base, evcb->evcb_flags); in event_queue_remove_active_later()
3261 evcb->evcb_flags &= ~EVLIST_ACTIVE_LATER; in event_queue_remove_active_later()
3262 base->event_count_active--; in event_queue_remove_active_later()
3264 TAILQ_REMOVE(&base->active_later_queue, evcb, evcb_active_next); in event_queue_remove_active_later()
3270 if (EVUTIL_FAILURE_CHECK(!(ev->ev_flags & EVLIST_TIMEOUT))) { in event_queue_remove_timeout()
3272 ev, EV_SOCK_ARG(ev->ev_fd), EVLIST_TIMEOUT); in event_queue_remove_timeout()
3275 DECR_EVENT_COUNT(base, ev->ev_flags); in event_queue_remove_timeout()
3276 ev->ev_flags &= ~EVLIST_TIMEOUT; in event_queue_remove_timeout()
3278 if (is_common_timeout(&ev->ev_timeout, base)) { in event_queue_remove_timeout()
3280 get_common_timeout_list(base, &ev->ev_timeout); in event_queue_remove_timeout()
3281 TAILQ_REMOVE(&ctl->events, ev, in event_queue_remove_timeout()
3284 min_heap_erase_(&base->timeheap, ev); in event_queue_remove_timeout()
3295 if (!(ev->ev_flags & EVLIST_TIMEOUT)) { in event_queue_reinsert_timeout()
3302 ctl = base->common_timeout_queues[old_timeout_idx]; in event_queue_reinsert_timeout()
3303 TAILQ_REMOVE(&ctl->events, ev, in event_queue_reinsert_timeout()
3305 ctl = get_common_timeout_list(base, &ev->ev_timeout); in event_queue_reinsert_timeout()
3309 ctl = base->common_timeout_queues[old_timeout_idx]; in event_queue_reinsert_timeout()
3310 TAILQ_REMOVE(&ctl->events, ev, in event_queue_reinsert_timeout()
3312 min_heap_push_(&base->timeheap, ev); in event_queue_reinsert_timeout()
3315 min_heap_erase_(&base->timeheap, ev); in event_queue_reinsert_timeout()
3316 ctl = get_common_timeout_list(base, &ev->ev_timeout); in event_queue_reinsert_timeout()
3320 min_heap_adjust_(&base->timeheap, ev); in event_queue_reinsert_timeout()
3336 * ctl->events, since the timeout on each 'ev' is set to {the common in insert_common_timeout_inorder()
3342 TAILQ_FOREACH_REVERSE(e, &ctl->events, in insert_common_timeout_inorder()
3349 is_same_common_timeout(&e->ev_timeout, &ev->ev_timeout)); in insert_common_timeout_inorder()
3350 if (evutil_timercmp(&ev->ev_timeout, &e->ev_timeout, >=)) { in insert_common_timeout_inorder()
3351 TAILQ_INSERT_AFTER(&ctl->events, e, ev, in insert_common_timeout_inorder()
3356 TAILQ_INSERT_HEAD(&ctl->events, ev, in insert_common_timeout_inorder()
3365 if (EVUTIL_FAILURE_CHECK(ev->ev_flags & EVLIST_INSERTED)) { in event_queue_insert_inserted()
3367 ev, EV_SOCK_ARG(ev->ev_fd)); in event_queue_insert_inserted()
3371 INCR_EVENT_COUNT(base, ev->ev_flags); in event_queue_insert_inserted()
3373 ev->ev_flags |= EVLIST_INSERTED; in event_queue_insert_inserted()
3381 if (evcb->evcb_flags & EVLIST_ACTIVE) { in event_queue_insert_active()
3386 INCR_EVENT_COUNT(base, evcb->evcb_flags); in event_queue_insert_active()
3388 evcb->evcb_flags |= EVLIST_ACTIVE; in event_queue_insert_active()
3390 base->event_count_active++; in event_queue_insert_active()
3391 MAX_EVENT_COUNT(base->event_count_active_max, base->event_count_active); in event_queue_insert_active()
3392 EVUTIL_ASSERT(evcb->evcb_pri < base->nactivequeues); in event_queue_insert_active()
3393 TAILQ_INSERT_TAIL(&base->activequeues[evcb->evcb_pri], in event_queue_insert_active()
3401 if (evcb->evcb_flags & (EVLIST_ACTIVE_LATER|EVLIST_ACTIVE)) { in event_queue_insert_active_later()
3406 INCR_EVENT_COUNT(base, evcb->evcb_flags); in event_queue_insert_active_later()
3407 evcb->evcb_flags |= EVLIST_ACTIVE_LATER; in event_queue_insert_active_later()
3408 base->event_count_active++; in event_queue_insert_active_later()
3409 MAX_EVENT_COUNT(base->event_count_active_max, base->event_count_active); in event_queue_insert_active_later()
3410 EVUTIL_ASSERT(evcb->evcb_pri < base->nactivequeues); in event_queue_insert_active_later()
3411 TAILQ_INSERT_TAIL(&base->active_later_queue, evcb, evcb_active_next); in event_queue_insert_active_later()
3419 if (EVUTIL_FAILURE_CHECK(ev->ev_flags & EVLIST_TIMEOUT)) { in event_queue_insert_timeout()
3421 ev, EV_SOCK_ARG(ev->ev_fd)); in event_queue_insert_timeout()
3425 INCR_EVENT_COUNT(base, ev->ev_flags); in event_queue_insert_timeout()
3427 ev->ev_flags |= EVLIST_TIMEOUT; in event_queue_insert_timeout()
3429 if (is_common_timeout(&ev->ev_timeout, base)) { in event_queue_insert_timeout()
3431 get_common_timeout_list(base, &ev->ev_timeout); in event_queue_insert_timeout()
3434 min_heap_push_(&base->timeheap, ev); in event_queue_insert_timeout()
3444 while ((evcb = TAILQ_FIRST(&base->active_later_queue))) { in event_queue_make_later_events_active()
3445 TAILQ_REMOVE(&base->active_later_queue, evcb, evcb_active_next); in event_queue_make_later_events_active()
3446 evcb->evcb_flags = (evcb->evcb_flags & ~EVLIST_ACTIVE_LATER) | EVLIST_ACTIVE; in event_queue_make_later_events_active()
3447 EVUTIL_ASSERT(evcb->evcb_pri < base->nactivequeues); in event_queue_make_later_events_active()
3448 TAILQ_INSERT_TAIL(&base->activequeues[evcb->evcb_pri], evcb, evcb_active_next); in event_queue_make_later_events_active()
3449 base->n_deferreds_queued += (evcb->evcb_closure == EV_CLOSURE_CB_SELF); in event_queue_make_later_events_active()
3468 * No thread-safe interface needed - the information should be the same
3475 return (current_base->evsel->name); in event_get_method()
3512 /* Windows calloc doesn't reliably set ENOMEM */ in event_mm_calloc_()
3594 base->is_notify_pending = 0; in evthread_notify_drain_eventfd()
3613 base->is_notify_pending = 0; in evthread_notify_drain_default()
3622 return -1; in evthread_make_base_notifiable()
3636 if (base->th_notify_fn != NULL) { in evthread_make_base_notifiable_nolock_()
3642 if (base->evsel == &kqops && event_kq_add_notify_event_(base) == 0) { in evthread_make_base_notifiable_nolock_()
3643 base->th_notify_fn = event_kq_notify_base_; in evthread_make_base_notifiable_nolock_()
3651 base->th_notify_fd[0] = evutil_eventfd_(0, in evthread_make_base_notifiable_nolock_()
3653 if (base->th_notify_fd[0] >= 0) { in evthread_make_base_notifiable_nolock_()
3654 base->th_notify_fd[1] = -1; in evthread_make_base_notifiable_nolock_()
3659 if (evutil_make_internal_pipe_(base->th_notify_fd) == 0) { in evthread_make_base_notifiable_nolock_()
3663 return -1; in evthread_make_base_notifiable_nolock_()
3666 base->th_notify_fn = notify; in evthread_make_base_notifiable_nolock_()
3669 event_assign(&base->th_notify, base, base->th_notify_fd[0], in evthread_make_base_notifiable_nolock_()
3673 base->th_notify.ev_flags |= EVLIST_INTERNAL; in evthread_make_base_notifiable_nolock_()
3674 event_priority_set(&base->th_notify, 0); in evthread_make_base_notifiable_nolock_()
3676 return event_add_nolock_(&base->th_notify, NULL, 0); in evthread_make_base_notifiable_nolock_()
3692 * the min-heap. */ in event_base_foreach_event_nolock_()
3693 for (u = 0; u < base->timeheap.n; ++u) { in event_base_foreach_event_nolock_()
3694 ev = base->timeheap.p[u]; in event_base_foreach_event_nolock_()
3695 if (ev->ev_flags & EVLIST_INSERTED) { in event_base_foreach_event_nolock_()
3704 * the min-heap. */ in event_base_foreach_event_nolock_()
3705 for (i = 0; i < base->n_common_timeouts; ++i) { in event_base_foreach_event_nolock_()
3707 base->common_timeout_queues[i]; in event_base_foreach_event_nolock_()
3708 TAILQ_FOREACH(ev, &ctl->events, in event_base_foreach_event_nolock_()
3710 if (ev->ev_flags & EVLIST_INSERTED) { in event_base_foreach_event_nolock_()
3721 for (i = 0; i < base->nactivequeues; ++i) { in event_base_foreach_event_nolock_()
3723 TAILQ_FOREACH(evcb, &base->activequeues[i], evcb_active_next) { in event_base_foreach_event_nolock_()
3724 if ((evcb->evcb_flags & (EVLIST_INIT|EVLIST_INSERTED|EVLIST_TIMEOUT)) != EVLIST_INIT) { in event_base_foreach_event_nolock_()
3727 * timeout set */ in event_base_foreach_event_nolock_()
3745 const char *gloss = (e->ev_events & EV_SIGNAL) ? in dump_inserted_event_fn()
3748 if (! (e->ev_flags & (EVLIST_INSERTED|EVLIST_TIMEOUT))) in dump_inserted_event_fn()
3752 (void*)e, gloss, EV_SOCK_ARG(e->ev_fd), in dump_inserted_event_fn()
3753 (e->ev_events&EV_READ)?" Read":"", in dump_inserted_event_fn()
3754 (e->ev_events&EV_WRITE)?" Write":"", in dump_inserted_event_fn()
3755 (e->ev_events&EV_CLOSED)?" EOF":"", in dump_inserted_event_fn()
3756 (e->ev_events&EV_SIGNAL)?" Signal":"", in dump_inserted_event_fn()
3757 (e->ev_events&EV_PERSIST)?" Persist":"", in dump_inserted_event_fn()
3758 (e->ev_events&EV_ET)?" ET":"", in dump_inserted_event_fn()
3759 (e->ev_flags&EVLIST_INTERNAL)?" Internal":""); in dump_inserted_event_fn()
3760 if (e->ev_flags & EVLIST_TIMEOUT) { in dump_inserted_event_fn()
3761 struct timeval tv; in dump_inserted_event_fn() local
3762 tv.tv_sec = e->ev_timeout.tv_sec; in dump_inserted_event_fn()
3763 tv.tv_usec = e->ev_timeout.tv_usec & MICROSECONDS_MASK; in dump_inserted_event_fn()
3764 evutil_timeradd(&tv, &base->tv_clock_diff, &tv); in dump_inserted_event_fn()
3766 (long)tv.tv_sec, (int)(tv.tv_usec & MICROSECONDS_MASK)); in dump_inserted_event_fn()
3779 const char *gloss = (e->ev_events & EV_SIGNAL) ? in dump_active_event_fn()
3782 if (! (e->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))) in dump_active_event_fn()
3786 (void*)e, gloss, EV_SOCK_ARG(e->ev_fd), e->ev_pri, in dump_active_event_fn()
3787 (e->ev_res&EV_READ)?" Read":"", in dump_active_event_fn()
3788 (e->ev_res&EV_WRITE)?" Write":"", in dump_active_event_fn()
3789 (e->ev_res&EV_CLOSED)?" EOF":"", in dump_active_event_fn()
3790 (e->ev_res&EV_SIGNAL)?" Signal":"", in dump_active_event_fn()
3791 (e->ev_res&EV_TIMEOUT)?" Timeout":"", in dump_active_event_fn()
3792 (e->ev_flags&EVLIST_INTERNAL)?" [Internal]":"", in dump_active_event_fn()
3793 (e->ev_flags&EVLIST_ACTIVE_LATER)?" [NextTime]":""); in dump_active_event_fn()
3804 return -1; in event_base_foreach_event()
3840 for (u = 0; u < base->timeheap.n; ++u) { in event_base_active_by_fd()
3841 ev = base->timeheap.p[u]; in event_base_active_by_fd()
3842 if (ev->ev_fd == fd) { in event_base_active_by_fd()
3847 for (i = 0; i < base->n_common_timeouts; ++i) { in event_base_active_by_fd()
3848 struct common_timeout_list *ctl = base->common_timeout_queues[i]; in event_base_active_by_fd()
3849 TAILQ_FOREACH(ev, &ctl->events, in event_base_active_by_fd()
3851 if (ev->ev_fd == fd) { in event_base_active_by_fd()
3874 base->virtual_event_count++; in event_base_add_virtual_()
3875 MAX_EVENT_COUNT(base->virtual_event_count_max, base->virtual_event_count); in event_base_add_virtual_()
3883 EVUTIL_ASSERT(base->virtual_event_count > 0); in event_base_del_virtual_()
3884 base->virtual_event_count--; in event_base_del_virtual_()
3885 if (base->virtual_event_count == 0 && EVBASE_NEED_NOTIFY(base)) in event_base_del_virtual_()
3946 return -1; in event_global_setup_locks_()
3948 return -1; in event_global_setup_locks_()
3950 return -1; in event_global_setup_locks_()
3969 /* First do checks on the per-fd and per-signal lists */ in event_base_assert_ok_nolock_()
3973 for (i = 1; i < (int)base->timeheap.n; ++i) { in event_base_assert_ok_nolock_()
3974 int parent = (i - 1) / 2; in event_base_assert_ok_nolock_()
3976 ev = base->timeheap.p[i]; in event_base_assert_ok_nolock_()
3977 p_ev = base->timeheap.p[parent]; in event_base_assert_ok_nolock_()
3978 EVUTIL_ASSERT(ev->ev_flags & EVLIST_TIMEOUT); in event_base_assert_ok_nolock_()
3979 EVUTIL_ASSERT(evutil_timercmp(&p_ev->ev_timeout, &ev->ev_timeout, <=)); in event_base_assert_ok_nolock_()
3980 EVUTIL_ASSERT(ev->ev_timeout_pos.min_heap_idx == i); in event_base_assert_ok_nolock_()
3984 for (i = 0; i < base->n_common_timeouts; ++i) { in event_base_assert_ok_nolock_()
3985 struct common_timeout_list *ctl = base->common_timeout_queues[i]; in event_base_assert_ok_nolock_()
3988 EVUTIL_ASSERT_TAILQ_OK(&ctl->events, event, ev_timeout_pos.ev_next_with_common_timeout); in event_base_assert_ok_nolock_()
3990 TAILQ_FOREACH(ev, &ctl->events, ev_timeout_pos.ev_next_with_common_timeout) { in event_base_assert_ok_nolock_()
3992 EVUTIL_ASSERT(evutil_timercmp(&last->ev_timeout, &ev->ev_timeout, <=)); in event_base_assert_ok_nolock_()
3993 EVUTIL_ASSERT(ev->ev_flags & EVLIST_TIMEOUT); in event_base_assert_ok_nolock_()
3994 EVUTIL_ASSERT(is_common_timeout(&ev->ev_timeout,base)); in event_base_assert_ok_nolock_()
3995 EVUTIL_ASSERT(COMMON_TIMEOUT_IDX(&ev->ev_timeout) == i); in event_base_assert_ok_nolock_()
4002 for (i = 0; i < base->nactivequeues; ++i) { in event_base_assert_ok_nolock_()
4004 EVUTIL_ASSERT_TAILQ_OK(&base->activequeues[i], event_callback, evcb_active_next); in event_base_assert_ok_nolock_()
4005 TAILQ_FOREACH(evcb, &base->activequeues[i], evcb_active_next) { in event_base_assert_ok_nolock_()
4006 EVUTIL_ASSERT((evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) == EVLIST_ACTIVE); in event_base_assert_ok_nolock_()
4007 EVUTIL_ASSERT(evcb->evcb_pri == i); in event_base_assert_ok_nolock_()
4014 TAILQ_FOREACH(evcb, &base->active_later_queue, evcb_active_next) { in event_base_assert_ok_nolock_()
4015 EVUTIL_ASSERT((evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) == EVLIST_ACTIVE_LATER); in event_base_assert_ok_nolock_()
4019 EVUTIL_ASSERT(count == base->event_count_active); in event_base_assert_ok_nolock_()