1 /*
2 * Copyright (c) 2000-2004 Niels Provos <provos@citi.umich.edu>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. The name of the author may not be used to endorse or promote products
14 * derived from this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 */
27 #ifdef HAVE_CONFIG_H
28 #include "config.h"
29 #endif
30
31 #ifdef WIN32
32 #define WIN32_LEAN_AND_MEAN
33 #include <windows.h>
34 #undef WIN32_LEAN_AND_MEAN
35 #include "misc.h"
36 #endif
37 #include <sys/types.h>
38 #include <sys/tree.h>
39 #ifdef HAVE_SYS_TIME_H
40 #include <sys/time.h>
41 #else
42 #include <sys/_time.h>
43 #endif
44 #include <sys/queue.h>
45 #include <stdio.h>
46 #include <stdlib.h>
47 #ifndef WIN32
48 #include <unistd.h>
49 #endif
50 #include <errno.h>
51 #include <signal.h>
52 #include <string.h>
53 #include <assert.h>
54
55 #include "event.h"
56 #include "event-internal.h"
57 #include "log.h"
58
59 #ifdef HAVE_EVENT_PORTS
60 extern const struct eventop evportops;
61 #endif
62 #ifdef HAVE_SELECT
63 extern const struct eventop selectops;
64 #endif
65 #ifdef HAVE_POLL
66 extern const struct eventop pollops;
67 #endif
68 #ifdef HAVE_RTSIG
69 extern const struct eventop rtsigops;
70 #endif
71 #ifdef HAVE_EPOLL
72 extern const struct eventop epollops;
73 #endif
74 #ifdef HAVE_WORKING_KQUEUE
75 extern const struct eventop kqops;
76 #endif
77 #ifdef HAVE_DEVPOLL
78 extern const struct eventop devpollops;
79 #endif
80 #ifdef WIN32
81 extern const struct eventop win32ops;
82 #endif
83
84 /* In order of preference */
85 const struct eventop *eventops[] = {
86 #ifdef HAVE_EVENT_PORTS
87 &evportops,
88 #endif
89 #ifdef HAVE_WORKING_KQUEUE
90 &kqops,
91 #endif
92 #ifdef HAVE_EPOLL
93 &epollops,
94 #endif
95 #ifdef HAVE_DEVPOLL
96 &devpollops,
97 #endif
98 #ifdef HAVE_RTSIG
99 &rtsigops,
100 #endif
101 #ifdef HAVE_POLL
102 &pollops,
103 #endif
104 #ifdef HAVE_SELECT
105 &selectops,
106 #endif
107 #ifdef WIN32
108 &win32ops,
109 #endif
110 NULL
111 };
112
113 /* Global state */
114 struct event_list signalqueue;
115
116 struct event_base *current_base = NULL;
117
118 /* Handle signals - This is a deprecated interface */
119 int (*event_sigcb)(void); /* Signal callback when gotsig is set */
120 volatile sig_atomic_t event_gotsig; /* Set in signal handler */
121
122 /* Prototypes */
123 static void event_queue_insert(struct event_base *, struct event *, int);
124 static void event_queue_remove(struct event_base *, struct event *, int);
125 static int event_haveevents(struct event_base *);
126
127 static void event_process_active(struct event_base *);
128
129 static int timeout_next(struct event_base *, struct timeval *);
130 static void timeout_process(struct event_base *);
131 static void timeout_correct(struct event_base *, struct timeval *);
132
133 static int
compare(struct event * a,struct event * b)134 compare(struct event *a, struct event *b)
135 {
136 if (timercmp(&a->ev_timeout, &b->ev_timeout, <))
137 return (-1);
138 else if (timercmp(&a->ev_timeout, &b->ev_timeout, >))
139 return (1);
140 if (a < b)
141 return (-1);
142 else if (a > b)
143 return (1);
144 return (0);
145 }
146
147 static int
gettime(struct timeval * tp)148 gettime(struct timeval *tp)
149 {
150 #ifdef HAVE_CLOCK_GETTIME
151 struct timespec ts;
152
153 #ifdef HAVE_CLOCK_MONOTONIC
154 if (clock_gettime(CLOCK_MONOTONIC, &ts) == -1)
155 #else
156 if (clock_gettime(CLOCK_REALTIME, &ts) == -1)
157 #endif
158 return (-1);
159 tp->tv_sec = ts.tv_sec;
160 tp->tv_usec = ts.tv_nsec / 1000;
161 #else
162 gettimeofday(tp, NULL);
163 #endif
164
165 return (0);
166 }
167
168 RB_PROTOTYPE(event_tree, event, ev_timeout_node, compare);
169
170 RB_GENERATE(event_tree, event, ev_timeout_node, compare);
171
172
173 void *
event_init(void)174 event_init(void)
175 {
176 int i;
177
178 if ((current_base = calloc(1, sizeof(struct event_base))) == NULL)
179 event_err(1, "%s: calloc");
180
181 event_sigcb = NULL;
182 event_gotsig = 0;
183 gettime(¤t_base->event_tv);
184
185 RB_INIT(¤t_base->timetree);
186 TAILQ_INIT(¤t_base->eventqueue);
187 TAILQ_INIT(&signalqueue);
188
189 current_base->evbase = NULL;
190 for (i = 0; eventops[i] && !current_base->evbase; i++) {
191 current_base->evsel = eventops[i];
192
193 current_base->evbase = current_base->evsel->init();
194 }
195
196 if (current_base->evbase == NULL)
197 event_errx(1, "%s: no event mechanism available", __func__);
198
199 if (getenv("EVENT_SHOW_METHOD"))
200 event_msgx("libevent using: %s\n",
201 current_base->evsel->name);
202
203 /* allocate a single active event queue */
204 event_base_priority_init(current_base, 1);
205
206 return (current_base);
207 }
208
209 void
event_base_free(struct event_base * base)210 event_base_free(struct event_base *base)
211 {
212 int i;
213
214 if (base == NULL && current_base)
215 base = current_base;
216 if (base == current_base)
217 current_base = NULL;
218
219 assert(base);
220 assert(TAILQ_EMPTY(&base->eventqueue));
221 for (i=0; i < base->nactivequeues; ++i)
222 assert(TAILQ_EMPTY(base->activequeues[i]));
223
224 assert(RB_EMPTY(&base->timetree));
225
226 for (i = 0; i < base->nactivequeues; ++i)
227 free(base->activequeues[i]);
228 free(base->activequeues);
229
230 if (base->evsel->dealloc != NULL)
231 base->evsel->dealloc(base->evbase);
232
233 free(base);
234 }
235
236 int
event_priority_init(int npriorities)237 event_priority_init(int npriorities)
238 {
239 return event_base_priority_init(current_base, npriorities);
240 }
241
242 int
event_base_priority_init(struct event_base * base,int npriorities)243 event_base_priority_init(struct event_base *base, int npriorities)
244 {
245 int i;
246
247 if (base->event_count_active)
248 return (-1);
249
250 if (base->nactivequeues && npriorities != base->nactivequeues) {
251 for (i = 0; i < base->nactivequeues; ++i) {
252 free(base->activequeues[i]);
253 }
254 free(base->activequeues);
255 }
256
257 /* Allocate our priority queues */
258 base->nactivequeues = npriorities;
259 base->activequeues = (struct event_list **)calloc(base->nactivequeues,
260 npriorities * sizeof(struct event_list *));
261 if (base->activequeues == NULL)
262 event_err(1, "%s: calloc", __func__);
263
264 for (i = 0; i < base->nactivequeues; ++i) {
265 base->activequeues[i] = malloc(sizeof(struct event_list));
266 if (base->activequeues[i] == NULL)
267 event_err(1, "%s: malloc", __func__);
268 TAILQ_INIT(base->activequeues[i]);
269 }
270
271 return (0);
272 }
273
274 int
event_haveevents(struct event_base * base)275 event_haveevents(struct event_base *base)
276 {
277 return (base->event_count > 0);
278 }
279
280 /*
281 * Active events are stored in priority queues. Lower priorities are always
282 * process before higher priorities. Low priority events can starve high
283 * priority ones.
284 */
285
286 static void
event_process_active(struct event_base * base)287 event_process_active(struct event_base *base)
288 {
289 struct event *ev;
290 struct event_list *activeq = NULL;
291 int i;
292 short ncalls;
293
294 if (!base->event_count_active)
295 return;
296
297 for (i = 0; i < base->nactivequeues; ++i) {
298 if (TAILQ_FIRST(base->activequeues[i]) != NULL) {
299 activeq = base->activequeues[i];
300 break;
301 }
302 }
303
304 assert(activeq != NULL);
305
306 for (ev = TAILQ_FIRST(activeq); ev; ev = TAILQ_FIRST(activeq)) {
307 event_queue_remove(base, ev, EVLIST_ACTIVE);
308
309 /* Allows deletes to work */
310 ncalls = ev->ev_ncalls;
311 ev->ev_pncalls = &ncalls;
312 while (ncalls) {
313 ncalls--;
314 ev->ev_ncalls = ncalls;
315 (*ev->ev_callback)((int)ev->ev_fd, ev->ev_res, ev->ev_arg);
316 if (event_gotsig)
317 return;
318 }
319 }
320 }
321
322 /*
323 * Wait continously for events. We exit only if no events are left.
324 */
325
326 int
event_dispatch(void)327 event_dispatch(void)
328 {
329 return (event_loop(0));
330 }
331
332 int
event_base_dispatch(struct event_base * event_base)333 event_base_dispatch(struct event_base *event_base)
334 {
335 return (event_base_loop(event_base, 0));
336 }
337
338 static void
event_loopexit_cb(int fd,short what,void * arg)339 event_loopexit_cb(int fd, short what, void *arg)
340 {
341 struct event_base *base = arg;
342 base->event_gotterm = 1;
343 }
344
345 /* not thread safe */
346
347 int
event_loopexit(struct timeval * tv)348 event_loopexit(struct timeval *tv)
349 {
350 return (event_once(-1, EV_TIMEOUT, event_loopexit_cb,
351 current_base, tv));
352 }
353
354 int
event_base_loopexit(struct event_base * event_base,struct timeval * tv)355 event_base_loopexit(struct event_base *event_base, struct timeval *tv)
356 {
357 return (event_once(-1, EV_TIMEOUT, event_loopexit_cb,
358 event_base, tv));
359 }
360
361 /* not thread safe */
362
363 int
event_loop(int flags)364 event_loop(int flags)
365 {
366 return event_base_loop(current_base, flags);
367 }
368
369 int
event_base_loop(struct event_base * base,int flags)370 event_base_loop(struct event_base *base, int flags)
371 {
372 const struct eventop *evsel = base->evsel;
373 void *evbase = base->evbase;
374 struct timeval tv;
375 int res, done;
376
377 done = 0;
378 while (!done) {
379 /* Calculate the initial events that we are waiting for */
380 if (evsel->recalc(base, evbase, 0) == -1)
381 return (-1);
382
383 /* Terminate the loop if we have been asked to */
384 if (base->event_gotterm) {
385 base->event_gotterm = 0;
386 break;
387 }
388
389 /* You cannot use this interface for multi-threaded apps */
390 while (event_gotsig) {
391 event_gotsig = 0;
392 if (event_sigcb) {
393 res = (*event_sigcb)();
394 if (res == -1) {
395 errno = EINTR;
396 return (-1);
397 }
398 }
399 }
400
401 /* Check if time is running backwards */
402 gettime(&tv);
403 if (timercmp(&tv, &base->event_tv, <)) {
404 struct timeval off;
405 event_debug(("%s: time is running backwards, corrected",
406 __func__));
407 timersub(&base->event_tv, &tv, &off);
408 timeout_correct(base, &off);
409 }
410 base->event_tv = tv;
411
412 if (!base->event_count_active && !(flags & EVLOOP_NONBLOCK))
413 timeout_next(base, &tv);
414 else
415 timerclear(&tv);
416
417 /* If we have no events, we just exit */
418 if (!event_haveevents(base)) {
419 event_debug(("%s: no events registered.", __func__));
420 return (1);
421 }
422
423 res = evsel->dispatch(base, evbase, &tv);
424
425 if (res == -1)
426 return (-1);
427
428 timeout_process(base);
429
430 if (base->event_count_active) {
431 event_process_active(base);
432 if (!base->event_count_active && (flags & EVLOOP_ONCE))
433 done = 1;
434 } else if (flags & EVLOOP_NONBLOCK)
435 done = 1;
436 }
437
438 event_debug(("%s: asked to terminate loop.", __func__));
439 return (0);
440 }
441
442 /* Sets up an event for processing once */
443
444 struct event_once {
445 struct event ev;
446
447 void (*cb)(int, short, void *);
448 void *arg;
449 };
450
451 /* One-time callback, it deletes itself */
452
453 static void
event_once_cb(int fd,short events,void * arg)454 event_once_cb(int fd, short events, void *arg)
455 {
456 struct event_once *eonce = arg;
457
458 (*eonce->cb)(fd, events, eonce->arg);
459 free(eonce);
460 }
461
462 /* Schedules an event once */
463
464 int
event_once(int fd,short events,void (* callback)(int,short,void *),void * arg,struct timeval * tv)465 event_once(int fd, short events,
466 void (*callback)(int, short, void *), void *arg, struct timeval *tv)
467 {
468 struct event_once *eonce;
469 struct timeval etv;
470 int res;
471
472 /* We cannot support signals that just fire once */
473 if (events & EV_SIGNAL)
474 return (-1);
475
476 if ((eonce = calloc(1, sizeof(struct event_once))) == NULL)
477 return (-1);
478
479 eonce->cb = callback;
480 eonce->arg = arg;
481
482 if (events == EV_TIMEOUT) {
483 if (tv == NULL) {
484 timerclear(&etv);
485 tv = &etv;
486 }
487
488 evtimer_set(&eonce->ev, event_once_cb, eonce);
489 } else if (events & (EV_READ|EV_WRITE)) {
490 events &= EV_READ|EV_WRITE;
491
492 event_set(&eonce->ev, fd, events, event_once_cb, eonce);
493 } else {
494 /* Bad event combination */
495 free(eonce);
496 return (-1);
497 }
498
499 res = event_add(&eonce->ev, tv);
500 if (res != 0) {
501 free(eonce);
502 return (res);
503 }
504
505 return (0);
506 }
507
508 void
event_set(struct event * ev,int fd,short events,void (* callback)(int,short,void *),void * arg)509 event_set(struct event *ev, int fd, short events,
510 void (*callback)(int, short, void *), void *arg)
511 {
512 /* Take the current base - caller needs to set the real base later */
513 ev->ev_base = current_base;
514
515 ev->ev_callback = callback;
516 ev->ev_arg = arg;
517 ev->ev_fd = fd;
518 ev->ev_events = events;
519 ev->ev_flags = EVLIST_INIT;
520 ev->ev_ncalls = 0;
521 ev->ev_pncalls = NULL;
522
523 /* by default, we put new events into the middle priority */
524 ev->ev_pri = current_base->nactivequeues/2;
525 }
526
527 int
event_base_set(struct event_base * base,struct event * ev)528 event_base_set(struct event_base *base, struct event *ev)
529 {
530 /* Only innocent events may be assigned to a different base */
531 if (ev->ev_flags != EVLIST_INIT)
532 return (-1);
533
534 ev->ev_base = base;
535 ev->ev_pri = base->nactivequeues/2;
536
537 return (0);
538 }
539
540 /*
541 * Set's the priority of an event - if an event is already scheduled
542 * changing the priority is going to fail.
543 */
544
545 int
event_priority_set(struct event * ev,int pri)546 event_priority_set(struct event *ev, int pri)
547 {
548 if (ev->ev_flags & EVLIST_ACTIVE)
549 return (-1);
550 if (pri < 0 || pri >= ev->ev_base->nactivequeues)
551 return (-1);
552
553 ev->ev_pri = pri;
554
555 return (0);
556 }
557
558 /*
559 * Checks if a specific event is pending or scheduled.
560 */
561
562 int
event_pending(struct event * ev,short event,struct timeval * tv)563 event_pending(struct event *ev, short event, struct timeval *tv)
564 {
565 struct timeval now, res;
566 int flags = 0;
567
568 if (ev->ev_flags & EVLIST_INSERTED)
569 flags |= (ev->ev_events & (EV_READ|EV_WRITE));
570 if (ev->ev_flags & EVLIST_ACTIVE)
571 flags |= ev->ev_res;
572 if (ev->ev_flags & EVLIST_TIMEOUT)
573 flags |= EV_TIMEOUT;
574 if (ev->ev_flags & EVLIST_SIGNAL)
575 flags |= EV_SIGNAL;
576
577 event &= (EV_TIMEOUT|EV_READ|EV_WRITE|EV_SIGNAL);
578
579 /* See if there is a timeout that we should report */
580 if (tv != NULL && (flags & event & EV_TIMEOUT)) {
581 gettime(&now);
582 timersub(&ev->ev_timeout, &now, &res);
583 /* correctly remap to real time */
584 gettimeofday(&now, NULL);
585 timeradd(&now, &res, tv);
586 }
587
588 return (flags & event);
589 }
590
591 int
event_add(struct event * ev,struct timeval * tv)592 event_add(struct event *ev, struct timeval *tv)
593 {
594 struct event_base *base = ev->ev_base;
595 const struct eventop *evsel = base->evsel;
596 void *evbase = base->evbase;
597
598 event_debug((
599 "event_add: event: %p, %s%s%scall %p",
600 ev,
601 ev->ev_events & EV_READ ? "EV_READ " : " ",
602 ev->ev_events & EV_WRITE ? "EV_WRITE " : " ",
603 tv ? "EV_TIMEOUT " : " ",
604 ev->ev_callback));
605
606 assert(!(ev->ev_flags & ~EVLIST_ALL));
607
608 if (tv != NULL) {
609 struct timeval now;
610
611 if (ev->ev_flags & EVLIST_TIMEOUT)
612 event_queue_remove(base, ev, EVLIST_TIMEOUT);
613
614 /* Check if it is active due to a timeout. Rescheduling
615 * this timeout before the callback can be executed
616 * removes it from the active list. */
617 if ((ev->ev_flags & EVLIST_ACTIVE) &&
618 (ev->ev_res & EV_TIMEOUT)) {
619 /* See if we are just active executing this
620 * event in a loop
621 */
622 if (ev->ev_ncalls && ev->ev_pncalls) {
623 /* Abort loop */
624 *ev->ev_pncalls = 0;
625 }
626
627 event_queue_remove(base, ev, EVLIST_ACTIVE);
628 }
629
630 gettime(&now);
631 timeradd(&now, tv, &ev->ev_timeout);
632
633 event_debug((
634 "event_add: timeout in %d seconds, call %p",
635 tv->tv_sec, ev->ev_callback));
636
637 event_queue_insert(base, ev, EVLIST_TIMEOUT);
638 }
639
640 if ((ev->ev_events & (EV_READ|EV_WRITE)) &&
641 !(ev->ev_flags & (EVLIST_INSERTED|EVLIST_ACTIVE))) {
642 event_queue_insert(base, ev, EVLIST_INSERTED);
643
644 return (evsel->add(evbase, ev));
645 } else if ((ev->ev_events & EV_SIGNAL) &&
646 !(ev->ev_flags & EVLIST_SIGNAL)) {
647 event_queue_insert(base, ev, EVLIST_SIGNAL);
648
649 return (evsel->add(evbase, ev));
650 }
651
652 return (0);
653 }
654
655 int
event_del(struct event * ev)656 event_del(struct event *ev)
657 {
658 struct event_base *base;
659 const struct eventop *evsel;
660 void *evbase;
661
662 event_debug(("event_del: %p, callback %p",
663 ev, ev->ev_callback));
664
665 /* An event without a base has not been added */
666 if (ev->ev_base == NULL)
667 return (-1);
668
669 base = ev->ev_base;
670 evsel = base->evsel;
671 evbase = base->evbase;
672
673 assert(!(ev->ev_flags & ~EVLIST_ALL));
674
675 /* See if we are just active executing this event in a loop */
676 if (ev->ev_ncalls && ev->ev_pncalls) {
677 /* Abort loop */
678 *ev->ev_pncalls = 0;
679 }
680
681 if (ev->ev_flags & EVLIST_TIMEOUT)
682 event_queue_remove(base, ev, EVLIST_TIMEOUT);
683
684 if (ev->ev_flags & EVLIST_ACTIVE)
685 event_queue_remove(base, ev, EVLIST_ACTIVE);
686
687 if (ev->ev_flags & EVLIST_INSERTED) {
688 event_queue_remove(base, ev, EVLIST_INSERTED);
689 return (evsel->del(evbase, ev));
690 } else if (ev->ev_flags & EVLIST_SIGNAL) {
691 event_queue_remove(base, ev, EVLIST_SIGNAL);
692 return (evsel->del(evbase, ev));
693 }
694
695 return (0);
696 }
697
698 void
event_active(struct event * ev,int res,short ncalls)699 event_active(struct event *ev, int res, short ncalls)
700 {
701 /* We get different kinds of events, add them together */
702 if (ev->ev_flags & EVLIST_ACTIVE) {
703 ev->ev_res |= res;
704 return;
705 }
706
707 ev->ev_res = res;
708 ev->ev_ncalls = ncalls;
709 ev->ev_pncalls = NULL;
710 event_queue_insert(ev->ev_base, ev, EVLIST_ACTIVE);
711 }
712
713 int
timeout_next(struct event_base * base,struct timeval * tv)714 timeout_next(struct event_base *base, struct timeval *tv)
715 {
716 struct timeval dflt = TIMEOUT_DEFAULT;
717
718 struct timeval now;
719 struct event *ev;
720
721 if ((ev = RB_MIN(event_tree, &base->timetree)) == NULL) {
722 *tv = dflt;
723 return (0);
724 }
725
726 if (gettime(&now) == -1)
727 return (-1);
728
729 if (timercmp(&ev->ev_timeout, &now, <=)) {
730 timerclear(tv);
731 return (0);
732 }
733
734 timersub(&ev->ev_timeout, &now, tv);
735
736 assert(tv->tv_sec >= 0);
737 assert(tv->tv_usec >= 0);
738
739 event_debug(("timeout_next: in %d seconds", tv->tv_sec));
740 return (0);
741 }
742
743 static void
timeout_correct(struct event_base * base,struct timeval * off)744 timeout_correct(struct event_base *base, struct timeval *off)
745 {
746 struct event *ev;
747
748 /*
749 * We can modify the key element of the node without destroying
750 * the key, beause we apply it to all in the right order.
751 */
752 RB_FOREACH(ev, event_tree, &base->timetree)
753 timersub(&ev->ev_timeout, off, &ev->ev_timeout);
754 }
755
756 void
timeout_process(struct event_base * base)757 timeout_process(struct event_base *base)
758 {
759 struct timeval now;
760 struct event *ev, *next;
761
762 gettime(&now);
763
764 for (ev = RB_MIN(event_tree, &base->timetree); ev; ev = next) {
765 if (timercmp(&ev->ev_timeout, &now, >))
766 break;
767 next = RB_NEXT(event_tree, &base->timetree, ev);
768
769 event_queue_remove(base, ev, EVLIST_TIMEOUT);
770
771 /* delete this event from the I/O queues */
772 event_del(ev);
773
774 event_debug(("timeout_process: call %p",
775 ev->ev_callback));
776 event_active(ev, EV_TIMEOUT, 1);
777 }
778 }
779
780 void
event_queue_remove(struct event_base * base,struct event * ev,int queue)781 event_queue_remove(struct event_base *base, struct event *ev, int queue)
782 {
783 int docount = 1;
784
785 if (!(ev->ev_flags & queue))
786 event_errx(1, "%s: %p(fd %d) not on queue %x", __func__,
787 ev, ev->ev_fd, queue);
788
789 if (ev->ev_flags & EVLIST_INTERNAL)
790 docount = 0;
791
792 if (docount)
793 base->event_count--;
794
795 ev->ev_flags &= ~queue;
796 switch (queue) {
797 case EVLIST_ACTIVE:
798 if (docount)
799 base->event_count_active--;
800 TAILQ_REMOVE(base->activequeues[ev->ev_pri],
801 ev, ev_active_next);
802 break;
803 case EVLIST_SIGNAL:
804 TAILQ_REMOVE(&signalqueue, ev, ev_signal_next);
805 break;
806 case EVLIST_TIMEOUT:
807 RB_REMOVE(event_tree, &base->timetree, ev);
808 break;
809 case EVLIST_INSERTED:
810 TAILQ_REMOVE(&base->eventqueue, ev, ev_next);
811 break;
812 default:
813 event_errx(1, "%s: unknown queue %x", __func__, queue);
814 }
815 }
816
817 void
event_queue_insert(struct event_base * base,struct event * ev,int queue)818 event_queue_insert(struct event_base *base, struct event *ev, int queue)
819 {
820 int docount = 1;
821
822 if (ev->ev_flags & queue) {
823 /* Double insertion is possible for active events */
824 if (queue & EVLIST_ACTIVE)
825 return;
826
827 event_errx(1, "%s: %p(fd %d) already on queue %x", __func__,
828 ev, ev->ev_fd, queue);
829 }
830
831 if (ev->ev_flags & EVLIST_INTERNAL)
832 docount = 0;
833
834 if (docount)
835 base->event_count++;
836
837 ev->ev_flags |= queue;
838 switch (queue) {
839 case EVLIST_ACTIVE:
840 if (docount)
841 base->event_count_active++;
842 TAILQ_INSERT_TAIL(base->activequeues[ev->ev_pri],
843 ev,ev_active_next);
844 break;
845 case EVLIST_SIGNAL:
846 TAILQ_INSERT_TAIL(&signalqueue, ev, ev_signal_next);
847 break;
848 case EVLIST_TIMEOUT: {
849 struct event *tmp = RB_INSERT(event_tree, &base->timetree, ev);
850 assert(tmp == NULL);
851 break;
852 }
853 case EVLIST_INSERTED:
854 TAILQ_INSERT_TAIL(&base->eventqueue, ev, ev_next);
855 break;
856 default:
857 event_errx(1, "%s: unknown queue %x", __func__, queue);
858 }
859 }
860
861 /* Functions for debugging */
862
863 const char *
event_get_version(void)864 event_get_version(void)
865 {
866 return (VERSION);
867 }
868
869 /*
870 * No thread-safe interface needed - the information should be the same
871 * for all threads.
872 */
873
874 const char *
event_get_method(void)875 event_get_method(void)
876 {
877 return (current_base->evsel->name);
878 }
879