1*c43e99fdSEd Maste /* $OpenBSD: kqueue.c,v 1.5 2002/07/10 14:41:31 art Exp $ */ 2*c43e99fdSEd Maste 3*c43e99fdSEd Maste /* 4*c43e99fdSEd Maste * Copyright 2000-2007 Niels Provos <provos@citi.umich.edu> 5*c43e99fdSEd Maste * Copyright 2007-2012 Niels Provos and Nick Mathewson 6*c43e99fdSEd Maste * 7*c43e99fdSEd Maste * Redistribution and use in source and binary forms, with or without 8*c43e99fdSEd Maste * modification, are permitted provided that the following conditions 9*c43e99fdSEd Maste * are met: 10*c43e99fdSEd Maste * 1. Redistributions of source code must retain the above copyright 11*c43e99fdSEd Maste * notice, this list of conditions and the following disclaimer. 12*c43e99fdSEd Maste * 2. Redistributions in binary form must reproduce the above copyright 13*c43e99fdSEd Maste * notice, this list of conditions and the following disclaimer in the 14*c43e99fdSEd Maste * documentation and/or other materials provided with the distribution. 15*c43e99fdSEd Maste * 3. The name of the author may not be used to endorse or promote products 16*c43e99fdSEd Maste * derived from this software without specific prior written permission. 17*c43e99fdSEd Maste * 18*c43e99fdSEd Maste * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 19*c43e99fdSEd Maste * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 20*c43e99fdSEd Maste * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 21*c43e99fdSEd Maste * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 22*c43e99fdSEd Maste * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 23*c43e99fdSEd Maste * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24*c43e99fdSEd Maste * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25*c43e99fdSEd Maste * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26*c43e99fdSEd Maste * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 27*c43e99fdSEd Maste * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28*c43e99fdSEd Maste */ 29*c43e99fdSEd Maste #include "event2/event-config.h" 30*c43e99fdSEd Maste #include "evconfig-private.h" 31*c43e99fdSEd Maste 32*c43e99fdSEd Maste #ifdef EVENT__HAVE_KQUEUE 33*c43e99fdSEd Maste 34*c43e99fdSEd Maste #include <sys/types.h> 35*c43e99fdSEd Maste #ifdef EVENT__HAVE_SYS_TIME_H 36*c43e99fdSEd Maste #include <sys/time.h> 37*c43e99fdSEd Maste #endif 38*c43e99fdSEd Maste #include <sys/queue.h> 39*c43e99fdSEd Maste #include <sys/event.h> 40*c43e99fdSEd Maste #include <signal.h> 41*c43e99fdSEd Maste #include <stdio.h> 42*c43e99fdSEd Maste #include <stdlib.h> 43*c43e99fdSEd Maste #include <string.h> 44*c43e99fdSEd Maste #include <unistd.h> 45*c43e99fdSEd Maste #include <errno.h> 46*c43e99fdSEd Maste #ifdef EVENT__HAVE_INTTYPES_H 47*c43e99fdSEd Maste #include <inttypes.h> 48*c43e99fdSEd Maste #endif 49*c43e99fdSEd Maste 50*c43e99fdSEd Maste /* Some platforms apparently define the udata field of struct kevent as 51*c43e99fdSEd Maste * intptr_t, whereas others define it as void*. There doesn't seem to be an 52*c43e99fdSEd Maste * easy way to tell them apart via autoconf, so we need to use OS macros. */ 53*c43e99fdSEd Maste #if defined(EVENT__HAVE_INTTYPES_H) && !defined(__OpenBSD__) && !defined(__FreeBSD__) && !defined(__darwin__) && !defined(__APPLE__) && !defined(__CloudABI__) 54*c43e99fdSEd Maste #define PTR_TO_UDATA(x) ((intptr_t)(x)) 55*c43e99fdSEd Maste #define INT_TO_UDATA(x) ((intptr_t)(x)) 56*c43e99fdSEd Maste #else 57*c43e99fdSEd Maste #define PTR_TO_UDATA(x) (x) 58*c43e99fdSEd Maste #define INT_TO_UDATA(x) ((void*)(x)) 59*c43e99fdSEd Maste #endif 60*c43e99fdSEd Maste 61*c43e99fdSEd Maste #include "event-internal.h" 62*c43e99fdSEd Maste #include "log-internal.h" 63*c43e99fdSEd Maste #include "evmap-internal.h" 64*c43e99fdSEd Maste #include "event2/thread.h" 65*c43e99fdSEd Maste #include "evthread-internal.h" 66*c43e99fdSEd Maste #include "changelist-internal.h" 67*c43e99fdSEd Maste 68*c43e99fdSEd Maste #include "kqueue-internal.h" 69*c43e99fdSEd Maste 70*c43e99fdSEd Maste #define NEVENT 64 71*c43e99fdSEd Maste 72*c43e99fdSEd Maste struct kqop { 73*c43e99fdSEd Maste struct kevent *changes; 74*c43e99fdSEd Maste int changes_size; 75*c43e99fdSEd Maste 76*c43e99fdSEd Maste struct kevent *events; 77*c43e99fdSEd Maste int events_size; 78*c43e99fdSEd Maste int kq; 79*c43e99fdSEd Maste int notify_event_added; 80*c43e99fdSEd Maste pid_t pid; 81*c43e99fdSEd Maste }; 82*c43e99fdSEd Maste 83*c43e99fdSEd Maste static void kqop_free(struct kqop *kqop); 84*c43e99fdSEd Maste 85*c43e99fdSEd Maste static void *kq_init(struct event_base *); 86*c43e99fdSEd Maste static int kq_sig_add(struct event_base *, int, short, short, void *); 87*c43e99fdSEd Maste static int kq_sig_del(struct event_base *, int, short, short, void *); 88*c43e99fdSEd Maste static int kq_dispatch(struct event_base *, struct timeval *); 89*c43e99fdSEd Maste static void kq_dealloc(struct event_base *); 90*c43e99fdSEd Maste 91*c43e99fdSEd Maste const struct eventop kqops = { 92*c43e99fdSEd Maste "kqueue", 93*c43e99fdSEd Maste kq_init, 94*c43e99fdSEd Maste event_changelist_add_, 95*c43e99fdSEd Maste event_changelist_del_, 96*c43e99fdSEd Maste kq_dispatch, 97*c43e99fdSEd Maste kq_dealloc, 98*c43e99fdSEd Maste 1 /* need reinit */, 99*c43e99fdSEd Maste EV_FEATURE_ET|EV_FEATURE_O1|EV_FEATURE_FDS, 100*c43e99fdSEd Maste EVENT_CHANGELIST_FDINFO_SIZE 101*c43e99fdSEd Maste }; 102*c43e99fdSEd Maste 103*c43e99fdSEd Maste static const struct eventop kqsigops = { 104*c43e99fdSEd Maste "kqueue_signal", 105*c43e99fdSEd Maste NULL, 106*c43e99fdSEd Maste kq_sig_add, 107*c43e99fdSEd Maste kq_sig_del, 108*c43e99fdSEd Maste NULL, 109*c43e99fdSEd Maste NULL, 110*c43e99fdSEd Maste 1 /* need reinit */, 111*c43e99fdSEd Maste 0, 112*c43e99fdSEd Maste 0 113*c43e99fdSEd Maste }; 114*c43e99fdSEd Maste 115*c43e99fdSEd Maste static void * 116*c43e99fdSEd Maste kq_init(struct event_base *base) 117*c43e99fdSEd Maste { 118*c43e99fdSEd Maste int kq = -1; 119*c43e99fdSEd Maste struct kqop *kqueueop = NULL; 120*c43e99fdSEd Maste 121*c43e99fdSEd Maste if (!(kqueueop = mm_calloc(1, sizeof(struct kqop)))) 122*c43e99fdSEd Maste return (NULL); 123*c43e99fdSEd Maste 124*c43e99fdSEd Maste /* Initialize the kernel queue */ 125*c43e99fdSEd Maste 126*c43e99fdSEd Maste if ((kq = kqueue()) == -1) { 127*c43e99fdSEd Maste event_warn("kqueue"); 128*c43e99fdSEd Maste goto err; 129*c43e99fdSEd Maste } 130*c43e99fdSEd Maste 131*c43e99fdSEd Maste kqueueop->kq = kq; 132*c43e99fdSEd Maste 133*c43e99fdSEd Maste kqueueop->pid = getpid(); 134*c43e99fdSEd Maste 135*c43e99fdSEd Maste /* Initialize fields */ 136*c43e99fdSEd Maste kqueueop->changes = mm_calloc(NEVENT, sizeof(struct kevent)); 137*c43e99fdSEd Maste if (kqueueop->changes == NULL) 138*c43e99fdSEd Maste goto err; 139*c43e99fdSEd Maste kqueueop->events = mm_calloc(NEVENT, sizeof(struct kevent)); 140*c43e99fdSEd Maste if (kqueueop->events == NULL) 141*c43e99fdSEd Maste goto err; 142*c43e99fdSEd Maste kqueueop->events_size = kqueueop->changes_size = NEVENT; 143*c43e99fdSEd Maste 144*c43e99fdSEd Maste /* Check for Mac OS X kqueue bug. */ 145*c43e99fdSEd Maste memset(&kqueueop->changes[0], 0, sizeof kqueueop->changes[0]); 146*c43e99fdSEd Maste kqueueop->changes[0].ident = -1; 147*c43e99fdSEd Maste kqueueop->changes[0].filter = EVFILT_READ; 148*c43e99fdSEd Maste kqueueop->changes[0].flags = EV_ADD; 149*c43e99fdSEd Maste /* 150*c43e99fdSEd Maste * If kqueue works, then kevent will succeed, and it will 151*c43e99fdSEd Maste * stick an error in events[0]. If kqueue is broken, then 152*c43e99fdSEd Maste * kevent will fail. 153*c43e99fdSEd Maste */ 154*c43e99fdSEd Maste if (kevent(kq, 155*c43e99fdSEd Maste kqueueop->changes, 1, kqueueop->events, NEVENT, NULL) != 1 || 156*c43e99fdSEd Maste (int)kqueueop->events[0].ident != -1 || 157*c43e99fdSEd Maste !(kqueueop->events[0].flags & EV_ERROR)) { 158*c43e99fdSEd Maste event_warn("%s: detected broken kqueue; not using.", __func__); 159*c43e99fdSEd Maste goto err; 160*c43e99fdSEd Maste } 161*c43e99fdSEd Maste 162*c43e99fdSEd Maste base->evsigsel = &kqsigops; 163*c43e99fdSEd Maste 164*c43e99fdSEd Maste return (kqueueop); 165*c43e99fdSEd Maste err: 166*c43e99fdSEd Maste if (kqueueop) 167*c43e99fdSEd Maste kqop_free(kqueueop); 168*c43e99fdSEd Maste 169*c43e99fdSEd Maste return (NULL); 170*c43e99fdSEd Maste } 171*c43e99fdSEd Maste 172*c43e99fdSEd Maste #define ADD_UDATA 0x30303 173*c43e99fdSEd Maste 174*c43e99fdSEd Maste static void 175*c43e99fdSEd Maste kq_setup_kevent(struct kevent *out, evutil_socket_t fd, int filter, short change) 176*c43e99fdSEd Maste { 177*c43e99fdSEd Maste memset(out, 0, sizeof(struct kevent)); 178*c43e99fdSEd Maste out->ident = fd; 179*c43e99fdSEd Maste out->filter = filter; 180*c43e99fdSEd Maste 181*c43e99fdSEd Maste if (change & EV_CHANGE_ADD) { 182*c43e99fdSEd Maste out->flags = EV_ADD; 183*c43e99fdSEd Maste /* We set a magic number here so that we can tell 'add' 184*c43e99fdSEd Maste * errors from 'del' errors. */ 185*c43e99fdSEd Maste out->udata = INT_TO_UDATA(ADD_UDATA); 186*c43e99fdSEd Maste if (change & EV_ET) 187*c43e99fdSEd Maste out->flags |= EV_CLEAR; 188*c43e99fdSEd Maste #ifdef NOTE_EOF 189*c43e99fdSEd Maste /* Make it behave like select() and poll() */ 190*c43e99fdSEd Maste if (filter == EVFILT_READ) 191*c43e99fdSEd Maste out->fflags = NOTE_EOF; 192*c43e99fdSEd Maste #endif 193*c43e99fdSEd Maste } else { 194*c43e99fdSEd Maste EVUTIL_ASSERT(change & EV_CHANGE_DEL); 195*c43e99fdSEd Maste out->flags = EV_DELETE; 196*c43e99fdSEd Maste } 197*c43e99fdSEd Maste } 198*c43e99fdSEd Maste 199*c43e99fdSEd Maste static int 200*c43e99fdSEd Maste kq_build_changes_list(const struct event_changelist *changelist, 201*c43e99fdSEd Maste struct kqop *kqop) 202*c43e99fdSEd Maste { 203*c43e99fdSEd Maste int i; 204*c43e99fdSEd Maste int n_changes = 0; 205*c43e99fdSEd Maste 206*c43e99fdSEd Maste for (i = 0; i < changelist->n_changes; ++i) { 207*c43e99fdSEd Maste struct event_change *in_ch = &changelist->changes[i]; 208*c43e99fdSEd Maste struct kevent *out_ch; 209*c43e99fdSEd Maste if (n_changes >= kqop->changes_size - 1) { 210*c43e99fdSEd Maste int newsize = kqop->changes_size * 2; 211*c43e99fdSEd Maste struct kevent *newchanges; 212*c43e99fdSEd Maste 213*c43e99fdSEd Maste newchanges = mm_realloc(kqop->changes, 214*c43e99fdSEd Maste newsize * sizeof(struct kevent)); 215*c43e99fdSEd Maste if (newchanges == NULL) { 216*c43e99fdSEd Maste event_warn("%s: realloc", __func__); 217*c43e99fdSEd Maste return (-1); 218*c43e99fdSEd Maste } 219*c43e99fdSEd Maste kqop->changes = newchanges; 220*c43e99fdSEd Maste kqop->changes_size = newsize; 221*c43e99fdSEd Maste } 222*c43e99fdSEd Maste if (in_ch->read_change) { 223*c43e99fdSEd Maste out_ch = &kqop->changes[n_changes++]; 224*c43e99fdSEd Maste kq_setup_kevent(out_ch, in_ch->fd, EVFILT_READ, 225*c43e99fdSEd Maste in_ch->read_change); 226*c43e99fdSEd Maste } 227*c43e99fdSEd Maste if (in_ch->write_change) { 228*c43e99fdSEd Maste out_ch = &kqop->changes[n_changes++]; 229*c43e99fdSEd Maste kq_setup_kevent(out_ch, in_ch->fd, EVFILT_WRITE, 230*c43e99fdSEd Maste in_ch->write_change); 231*c43e99fdSEd Maste } 232*c43e99fdSEd Maste } 233*c43e99fdSEd Maste return n_changes; 234*c43e99fdSEd Maste } 235*c43e99fdSEd Maste 236*c43e99fdSEd Maste static int 237*c43e99fdSEd Maste kq_grow_events(struct kqop *kqop, size_t new_size) 238*c43e99fdSEd Maste { 239*c43e99fdSEd Maste struct kevent *newresult; 240*c43e99fdSEd Maste 241*c43e99fdSEd Maste newresult = mm_realloc(kqop->events, 242*c43e99fdSEd Maste new_size * sizeof(struct kevent)); 243*c43e99fdSEd Maste 244*c43e99fdSEd Maste if (newresult) { 245*c43e99fdSEd Maste kqop->events = newresult; 246*c43e99fdSEd Maste kqop->events_size = new_size; 247*c43e99fdSEd Maste return 0; 248*c43e99fdSEd Maste } else { 249*c43e99fdSEd Maste return -1; 250*c43e99fdSEd Maste } 251*c43e99fdSEd Maste } 252*c43e99fdSEd Maste 253*c43e99fdSEd Maste static int 254*c43e99fdSEd Maste kq_dispatch(struct event_base *base, struct timeval *tv) 255*c43e99fdSEd Maste { 256*c43e99fdSEd Maste struct kqop *kqop = base->evbase; 257*c43e99fdSEd Maste struct kevent *events = kqop->events; 258*c43e99fdSEd Maste struct kevent *changes; 259*c43e99fdSEd Maste struct timespec ts, *ts_p = NULL; 260*c43e99fdSEd Maste int i, n_changes, res; 261*c43e99fdSEd Maste 262*c43e99fdSEd Maste if (tv != NULL) { 263*c43e99fdSEd Maste ts.tv_sec = tv->tv_sec; 264*c43e99fdSEd Maste ts.tv_nsec = tv->tv_usec * 1000; 265*c43e99fdSEd Maste ts_p = &ts; 266*c43e99fdSEd Maste } 267*c43e99fdSEd Maste 268*c43e99fdSEd Maste /* Build "changes" from "base->changes" */ 269*c43e99fdSEd Maste EVUTIL_ASSERT(kqop->changes); 270*c43e99fdSEd Maste n_changes = kq_build_changes_list(&base->changelist, kqop); 271*c43e99fdSEd Maste if (n_changes < 0) 272*c43e99fdSEd Maste return -1; 273*c43e99fdSEd Maste 274*c43e99fdSEd Maste event_changelist_remove_all_(&base->changelist, base); 275*c43e99fdSEd Maste 276*c43e99fdSEd Maste /* steal the changes array in case some broken code tries to call 277*c43e99fdSEd Maste * dispatch twice at once. */ 278*c43e99fdSEd Maste changes = kqop->changes; 279*c43e99fdSEd Maste kqop->changes = NULL; 280*c43e99fdSEd Maste 281*c43e99fdSEd Maste /* Make sure that 'events' is at least as long as the list of changes: 282*c43e99fdSEd Maste * otherwise errors in the changes can get reported as a -1 return 283*c43e99fdSEd Maste * value from kevent() rather than as EV_ERROR events in the events 284*c43e99fdSEd Maste * array. 285*c43e99fdSEd Maste * 286*c43e99fdSEd Maste * (We could instead handle -1 return values from kevent() by 287*c43e99fdSEd Maste * retrying with a smaller changes array or a larger events array, 288*c43e99fdSEd Maste * but this approach seems less risky for now.) 289*c43e99fdSEd Maste */ 290*c43e99fdSEd Maste if (kqop->events_size < n_changes) { 291*c43e99fdSEd Maste int new_size = kqop->events_size; 292*c43e99fdSEd Maste do { 293*c43e99fdSEd Maste new_size *= 2; 294*c43e99fdSEd Maste } while (new_size < n_changes); 295*c43e99fdSEd Maste 296*c43e99fdSEd Maste kq_grow_events(kqop, new_size); 297*c43e99fdSEd Maste events = kqop->events; 298*c43e99fdSEd Maste } 299*c43e99fdSEd Maste 300*c43e99fdSEd Maste EVBASE_RELEASE_LOCK(base, th_base_lock); 301*c43e99fdSEd Maste 302*c43e99fdSEd Maste res = kevent(kqop->kq, changes, n_changes, 303*c43e99fdSEd Maste events, kqop->events_size, ts_p); 304*c43e99fdSEd Maste 305*c43e99fdSEd Maste EVBASE_ACQUIRE_LOCK(base, th_base_lock); 306*c43e99fdSEd Maste 307*c43e99fdSEd Maste EVUTIL_ASSERT(kqop->changes == NULL); 308*c43e99fdSEd Maste kqop->changes = changes; 309*c43e99fdSEd Maste 310*c43e99fdSEd Maste if (res == -1) { 311*c43e99fdSEd Maste if (errno != EINTR) { 312*c43e99fdSEd Maste event_warn("kevent"); 313*c43e99fdSEd Maste return (-1); 314*c43e99fdSEd Maste } 315*c43e99fdSEd Maste 316*c43e99fdSEd Maste return (0); 317*c43e99fdSEd Maste } 318*c43e99fdSEd Maste 319*c43e99fdSEd Maste event_debug(("%s: kevent reports %d", __func__, res)); 320*c43e99fdSEd Maste 321*c43e99fdSEd Maste for (i = 0; i < res; i++) { 322*c43e99fdSEd Maste int which = 0; 323*c43e99fdSEd Maste 324*c43e99fdSEd Maste if (events[i].flags & EV_ERROR) { 325*c43e99fdSEd Maste switch (events[i].data) { 326*c43e99fdSEd Maste 327*c43e99fdSEd Maste /* Can occur on delete if we are not currently 328*c43e99fdSEd Maste * watching any events on this fd. That can 329*c43e99fdSEd Maste * happen when the fd was closed and another 330*c43e99fdSEd Maste * file was opened with that fd. */ 331*c43e99fdSEd Maste case ENOENT: 332*c43e99fdSEd Maste /* Can occur for reasons not fully understood 333*c43e99fdSEd Maste * on FreeBSD. */ 334*c43e99fdSEd Maste case EINVAL: 335*c43e99fdSEd Maste continue; 336*c43e99fdSEd Maste #if defined(__FreeBSD__) 337*c43e99fdSEd Maste /* 338*c43e99fdSEd Maste * This currently occurs if an FD is closed 339*c43e99fdSEd Maste * before the EV_DELETE makes it out via kevent(). 340*c43e99fdSEd Maste * The FreeBSD capabilities code sees the blank 341*c43e99fdSEd Maste * capability set and rejects the request to 342*c43e99fdSEd Maste * modify an event. 343*c43e99fdSEd Maste * 344*c43e99fdSEd Maste * To be strictly correct - when an FD is closed, 345*c43e99fdSEd Maste * all the registered events are also removed. 346*c43e99fdSEd Maste * Queuing EV_DELETE to a closed FD is wrong. 347*c43e99fdSEd Maste * The event(s) should just be deleted from 348*c43e99fdSEd Maste * the pending changelist. 349*c43e99fdSEd Maste */ 350*c43e99fdSEd Maste case ENOTCAPABLE: 351*c43e99fdSEd Maste continue; 352*c43e99fdSEd Maste #endif 353*c43e99fdSEd Maste 354*c43e99fdSEd Maste /* Can occur on a delete if the fd is closed. */ 355*c43e99fdSEd Maste case EBADF: 356*c43e99fdSEd Maste /* XXXX On NetBSD, we can also get EBADF if we 357*c43e99fdSEd Maste * try to add the write side of a pipe, but 358*c43e99fdSEd Maste * the read side has already been closed. 359*c43e99fdSEd Maste * Other BSDs call this situation 'EPIPE'. It 360*c43e99fdSEd Maste * would be good if we had a way to report 361*c43e99fdSEd Maste * this situation. */ 362*c43e99fdSEd Maste continue; 363*c43e99fdSEd Maste /* These two can occur on an add if the fd was one side 364*c43e99fdSEd Maste * of a pipe, and the other side was closed. */ 365*c43e99fdSEd Maste case EPERM: 366*c43e99fdSEd Maste case EPIPE: 367*c43e99fdSEd Maste /* Report read events, if we're listening for 368*c43e99fdSEd Maste * them, so that the user can learn about any 369*c43e99fdSEd Maste * add errors. (If the operation was a 370*c43e99fdSEd Maste * delete, then udata should be cleared.) */ 371*c43e99fdSEd Maste if (events[i].udata) { 372*c43e99fdSEd Maste /* The operation was an add: 373*c43e99fdSEd Maste * report the error as a read. */ 374*c43e99fdSEd Maste which |= EV_READ; 375*c43e99fdSEd Maste break; 376*c43e99fdSEd Maste } else { 377*c43e99fdSEd Maste /* The operation was a del: 378*c43e99fdSEd Maste * report nothing. */ 379*c43e99fdSEd Maste continue; 380*c43e99fdSEd Maste } 381*c43e99fdSEd Maste 382*c43e99fdSEd Maste /* Other errors shouldn't occur. */ 383*c43e99fdSEd Maste default: 384*c43e99fdSEd Maste errno = events[i].data; 385*c43e99fdSEd Maste return (-1); 386*c43e99fdSEd Maste } 387*c43e99fdSEd Maste } else if (events[i].filter == EVFILT_READ) { 388*c43e99fdSEd Maste which |= EV_READ; 389*c43e99fdSEd Maste } else if (events[i].filter == EVFILT_WRITE) { 390*c43e99fdSEd Maste which |= EV_WRITE; 391*c43e99fdSEd Maste } else if (events[i].filter == EVFILT_SIGNAL) { 392*c43e99fdSEd Maste which |= EV_SIGNAL; 393*c43e99fdSEd Maste #ifdef EVFILT_USER 394*c43e99fdSEd Maste } else if (events[i].filter == EVFILT_USER) { 395*c43e99fdSEd Maste base->is_notify_pending = 0; 396*c43e99fdSEd Maste #endif 397*c43e99fdSEd Maste } 398*c43e99fdSEd Maste 399*c43e99fdSEd Maste if (!which) 400*c43e99fdSEd Maste continue; 401*c43e99fdSEd Maste 402*c43e99fdSEd Maste if (events[i].filter == EVFILT_SIGNAL) { 403*c43e99fdSEd Maste evmap_signal_active_(base, events[i].ident, 1); 404*c43e99fdSEd Maste } else { 405*c43e99fdSEd Maste evmap_io_active_(base, events[i].ident, which | EV_ET); 406*c43e99fdSEd Maste } 407*c43e99fdSEd Maste } 408*c43e99fdSEd Maste 409*c43e99fdSEd Maste if (res == kqop->events_size) { 410*c43e99fdSEd Maste /* We used all the events space that we have. Maybe we should 411*c43e99fdSEd Maste make it bigger. */ 412*c43e99fdSEd Maste kq_grow_events(kqop, kqop->events_size * 2); 413*c43e99fdSEd Maste } 414*c43e99fdSEd Maste 415*c43e99fdSEd Maste return (0); 416*c43e99fdSEd Maste } 417*c43e99fdSEd Maste 418*c43e99fdSEd Maste static void 419*c43e99fdSEd Maste kqop_free(struct kqop *kqop) 420*c43e99fdSEd Maste { 421*c43e99fdSEd Maste if (kqop->changes) 422*c43e99fdSEd Maste mm_free(kqop->changes); 423*c43e99fdSEd Maste if (kqop->events) 424*c43e99fdSEd Maste mm_free(kqop->events); 425*c43e99fdSEd Maste if (kqop->kq >= 0 && kqop->pid == getpid()) 426*c43e99fdSEd Maste close(kqop->kq); 427*c43e99fdSEd Maste memset(kqop, 0, sizeof(struct kqop)); 428*c43e99fdSEd Maste mm_free(kqop); 429*c43e99fdSEd Maste } 430*c43e99fdSEd Maste 431*c43e99fdSEd Maste static void 432*c43e99fdSEd Maste kq_dealloc(struct event_base *base) 433*c43e99fdSEd Maste { 434*c43e99fdSEd Maste struct kqop *kqop = base->evbase; 435*c43e99fdSEd Maste evsig_dealloc_(base); 436*c43e99fdSEd Maste kqop_free(kqop); 437*c43e99fdSEd Maste } 438*c43e99fdSEd Maste 439*c43e99fdSEd Maste /* signal handling */ 440*c43e99fdSEd Maste static int 441*c43e99fdSEd Maste kq_sig_add(struct event_base *base, int nsignal, short old, short events, void *p) 442*c43e99fdSEd Maste { 443*c43e99fdSEd Maste struct kqop *kqop = base->evbase; 444*c43e99fdSEd Maste struct kevent kev; 445*c43e99fdSEd Maste struct timespec timeout = { 0, 0 }; 446*c43e99fdSEd Maste (void)p; 447*c43e99fdSEd Maste 448*c43e99fdSEd Maste EVUTIL_ASSERT(nsignal >= 0 && nsignal < NSIG); 449*c43e99fdSEd Maste 450*c43e99fdSEd Maste memset(&kev, 0, sizeof(kev)); 451*c43e99fdSEd Maste kev.ident = nsignal; 452*c43e99fdSEd Maste kev.filter = EVFILT_SIGNAL; 453*c43e99fdSEd Maste kev.flags = EV_ADD; 454*c43e99fdSEd Maste 455*c43e99fdSEd Maste /* Be ready for the signal if it is sent any 456*c43e99fdSEd Maste * time between now and the next call to 457*c43e99fdSEd Maste * kq_dispatch. */ 458*c43e99fdSEd Maste if (kevent(kqop->kq, &kev, 1, NULL, 0, &timeout) == -1) 459*c43e99fdSEd Maste return (-1); 460*c43e99fdSEd Maste 461*c43e99fdSEd Maste /* We can set the handler for most signals to SIG_IGN and 462*c43e99fdSEd Maste * still have them reported to us in the queue. However, 463*c43e99fdSEd Maste * if the handler for SIGCHLD is SIG_IGN, the system reaps 464*c43e99fdSEd Maste * zombie processes for us, and we don't get any notification. 465*c43e99fdSEd Maste * This appears to be the only signal with this quirk. */ 466*c43e99fdSEd Maste if (evsig_set_handler_(base, nsignal, 467*c43e99fdSEd Maste nsignal == SIGCHLD ? SIG_DFL : SIG_IGN) == -1) 468*c43e99fdSEd Maste return (-1); 469*c43e99fdSEd Maste 470*c43e99fdSEd Maste return (0); 471*c43e99fdSEd Maste } 472*c43e99fdSEd Maste 473*c43e99fdSEd Maste static int 474*c43e99fdSEd Maste kq_sig_del(struct event_base *base, int nsignal, short old, short events, void *p) 475*c43e99fdSEd Maste { 476*c43e99fdSEd Maste struct kqop *kqop = base->evbase; 477*c43e99fdSEd Maste struct kevent kev; 478*c43e99fdSEd Maste 479*c43e99fdSEd Maste struct timespec timeout = { 0, 0 }; 480*c43e99fdSEd Maste (void)p; 481*c43e99fdSEd Maste 482*c43e99fdSEd Maste EVUTIL_ASSERT(nsignal >= 0 && nsignal < NSIG); 483*c43e99fdSEd Maste 484*c43e99fdSEd Maste memset(&kev, 0, sizeof(kev)); 485*c43e99fdSEd Maste kev.ident = nsignal; 486*c43e99fdSEd Maste kev.filter = EVFILT_SIGNAL; 487*c43e99fdSEd Maste kev.flags = EV_DELETE; 488*c43e99fdSEd Maste 489*c43e99fdSEd Maste /* Because we insert signal events 490*c43e99fdSEd Maste * immediately, we need to delete them 491*c43e99fdSEd Maste * immediately, too */ 492*c43e99fdSEd Maste if (kevent(kqop->kq, &kev, 1, NULL, 0, &timeout) == -1) 493*c43e99fdSEd Maste return (-1); 494*c43e99fdSEd Maste 495*c43e99fdSEd Maste if (evsig_restore_handler_(base, nsignal) == -1) 496*c43e99fdSEd Maste return (-1); 497*c43e99fdSEd Maste 498*c43e99fdSEd Maste return (0); 499*c43e99fdSEd Maste } 500*c43e99fdSEd Maste 501*c43e99fdSEd Maste 502*c43e99fdSEd Maste /* OSX 10.6 and FreeBSD 8.1 add support for EVFILT_USER, which we can use 503*c43e99fdSEd Maste * to wake up the event loop from another thread. */ 504*c43e99fdSEd Maste 505*c43e99fdSEd Maste /* Magic number we use for our filter ID. */ 506*c43e99fdSEd Maste #define NOTIFY_IDENT 42 507*c43e99fdSEd Maste 508*c43e99fdSEd Maste int 509*c43e99fdSEd Maste event_kq_add_notify_event_(struct event_base *base) 510*c43e99fdSEd Maste { 511*c43e99fdSEd Maste struct kqop *kqop = base->evbase; 512*c43e99fdSEd Maste #if defined(EVFILT_USER) && defined(NOTE_TRIGGER) 513*c43e99fdSEd Maste struct kevent kev; 514*c43e99fdSEd Maste struct timespec timeout = { 0, 0 }; 515*c43e99fdSEd Maste #endif 516*c43e99fdSEd Maste 517*c43e99fdSEd Maste if (kqop->notify_event_added) 518*c43e99fdSEd Maste return 0; 519*c43e99fdSEd Maste 520*c43e99fdSEd Maste #if defined(EVFILT_USER) && defined(NOTE_TRIGGER) 521*c43e99fdSEd Maste memset(&kev, 0, sizeof(kev)); 522*c43e99fdSEd Maste kev.ident = NOTIFY_IDENT; 523*c43e99fdSEd Maste kev.filter = EVFILT_USER; 524*c43e99fdSEd Maste kev.flags = EV_ADD | EV_CLEAR; 525*c43e99fdSEd Maste 526*c43e99fdSEd Maste if (kevent(kqop->kq, &kev, 1, NULL, 0, &timeout) == -1) { 527*c43e99fdSEd Maste event_warn("kevent: adding EVFILT_USER event"); 528*c43e99fdSEd Maste return -1; 529*c43e99fdSEd Maste } 530*c43e99fdSEd Maste 531*c43e99fdSEd Maste kqop->notify_event_added = 1; 532*c43e99fdSEd Maste 533*c43e99fdSEd Maste return 0; 534*c43e99fdSEd Maste #else 535*c43e99fdSEd Maste return -1; 536*c43e99fdSEd Maste #endif 537*c43e99fdSEd Maste } 538*c43e99fdSEd Maste 539*c43e99fdSEd Maste int 540*c43e99fdSEd Maste event_kq_notify_base_(struct event_base *base) 541*c43e99fdSEd Maste { 542*c43e99fdSEd Maste struct kqop *kqop = base->evbase; 543*c43e99fdSEd Maste #if defined(EVFILT_USER) && defined(NOTE_TRIGGER) 544*c43e99fdSEd Maste struct kevent kev; 545*c43e99fdSEd Maste struct timespec timeout = { 0, 0 }; 546*c43e99fdSEd Maste #endif 547*c43e99fdSEd Maste if (! kqop->notify_event_added) 548*c43e99fdSEd Maste return -1; 549*c43e99fdSEd Maste 550*c43e99fdSEd Maste #if defined(EVFILT_USER) && defined(NOTE_TRIGGER) 551*c43e99fdSEd Maste memset(&kev, 0, sizeof(kev)); 552*c43e99fdSEd Maste kev.ident = NOTIFY_IDENT; 553*c43e99fdSEd Maste kev.filter = EVFILT_USER; 554*c43e99fdSEd Maste kev.fflags = NOTE_TRIGGER; 555*c43e99fdSEd Maste 556*c43e99fdSEd Maste if (kevent(kqop->kq, &kev, 1, NULL, 0, &timeout) == -1) { 557*c43e99fdSEd Maste event_warn("kevent: triggering EVFILT_USER event"); 558*c43e99fdSEd Maste return -1; 559*c43e99fdSEd Maste } 560*c43e99fdSEd Maste 561*c43e99fdSEd Maste return 0; 562*c43e99fdSEd Maste #else 563*c43e99fdSEd Maste return -1; 564*c43e99fdSEd Maste #endif 565*c43e99fdSEd Maste } 566*c43e99fdSEd Maste 567*c43e99fdSEd Maste #endif /* EVENT__HAVE_KQUEUE */ 568