1 /*- 2 * Copyright (c) 1999,2000,2001 Jonathan Lemon <jlemon@FreeBSD.org> 3 * Copyright 2004 John-Mark Gurney <jmg@FreeBSD.org> 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 #include "opt_ktrace.h" 32 33 #include <sys/param.h> 34 #include <sys/systm.h> 35 #include <sys/kernel.h> 36 #include <sys/lock.h> 37 #include <sys/mutex.h> 38 #include <sys/proc.h> 39 #include <sys/malloc.h> 40 #include <sys/unistd.h> 41 #include <sys/file.h> 42 #include <sys/filedesc.h> 43 #include <sys/filio.h> 44 #include <sys/fcntl.h> 45 #include <sys/kthread.h> 46 #include <sys/selinfo.h> 47 #include <sys/queue.h> 48 #include <sys/event.h> 49 #include <sys/eventvar.h> 50 #include <sys/poll.h> 51 #include <sys/protosw.h> 52 #include <sys/sigio.h> 53 #include <sys/signalvar.h> 54 #include <sys/socket.h> 55 #include <sys/socketvar.h> 56 #include <sys/stat.h> 57 #include <sys/sysctl.h> 58 #include <sys/sysproto.h> 59 #include <sys/syscallsubr.h> 60 #include <sys/taskqueue.h> 61 #include <sys/uio.h> 62 #ifdef KTRACE 63 #include <sys/ktrace.h> 64 #endif 65 66 #include <vm/uma.h> 67 68 static MALLOC_DEFINE(M_KQUEUE, "kqueue", "memory for kqueue system"); 69 70 /* 71 * This lock is used if multiple kq locks are required. This possibly 72 * should be made into a per proc lock. 73 */ 74 static struct mtx kq_global; 75 MTX_SYSINIT(kq_global, &kq_global, "kqueue order", MTX_DEF); 76 #define KQ_GLOBAL_LOCK(lck, haslck) do { \ 77 if (!haslck) \ 78 mtx_lock(lck); \ 79 haslck = 1; \ 80 } while (0) 81 #define KQ_GLOBAL_UNLOCK(lck, haslck) do { \ 82 if (haslck) \ 83 mtx_unlock(lck); \ 84 haslck = 0; \ 85 } while (0) 86 87 TASKQUEUE_DEFINE_THREAD(kqueue); 88 89 static int kevent_copyout(void *arg, struct kevent *kevp, int count); 90 static int kevent_copyin(void *arg, struct kevent *kevp, int count); 91 static int kqueue_register(struct kqueue *kq, struct kevent *kev, 92 struct thread *td, int waitok); 93 static int kqueue_acquire(struct file *fp, struct kqueue **kqp); 94 static void kqueue_release(struct kqueue *kq, int locked); 95 static int kqueue_expand(struct kqueue *kq, struct filterops *fops, 96 uintptr_t ident, int waitok); 97 static void kqueue_task(void *arg, int pending); 98 static int kqueue_scan(struct kqueue *kq, int maxevents, 99 struct kevent_copyops *k_ops, 100 const struct timespec *timeout, 101 struct kevent *keva, struct thread *td); 102 static void kqueue_wakeup(struct kqueue *kq); 103 static struct filterops *kqueue_fo_find(int filt); 104 static void kqueue_fo_release(int filt); 105 106 static fo_rdwr_t kqueue_read; 107 static fo_rdwr_t kqueue_write; 108 static fo_truncate_t kqueue_truncate; 109 static fo_ioctl_t kqueue_ioctl; 110 static fo_poll_t kqueue_poll; 111 static fo_kqfilter_t kqueue_kqfilter; 112 static fo_stat_t kqueue_stat; 113 static fo_close_t kqueue_close; 114 115 static struct fileops kqueueops = { 116 .fo_read = kqueue_read, 117 .fo_write = kqueue_write, 118 .fo_truncate = kqueue_truncate, 119 .fo_ioctl = kqueue_ioctl, 120 .fo_poll = kqueue_poll, 121 .fo_kqfilter = kqueue_kqfilter, 122 .fo_stat = kqueue_stat, 123 .fo_close = kqueue_close, 124 }; 125 126 static int knote_attach(struct knote *kn, struct kqueue *kq); 127 static void knote_drop(struct knote *kn, struct thread *td); 128 static void knote_enqueue(struct knote *kn); 129 static void knote_dequeue(struct knote *kn); 130 static void knote_init(void); 131 static struct knote *knote_alloc(int waitok); 132 static void knote_free(struct knote *kn); 133 134 static void filt_kqdetach(struct knote *kn); 135 static int filt_kqueue(struct knote *kn, long hint); 136 static int filt_procattach(struct knote *kn); 137 static void filt_procdetach(struct knote *kn); 138 static int filt_proc(struct knote *kn, long hint); 139 static int filt_fileattach(struct knote *kn); 140 static void filt_timerexpire(void *knx); 141 static int filt_timerattach(struct knote *kn); 142 static void filt_timerdetach(struct knote *kn); 143 static int filt_timer(struct knote *kn, long hint); 144 145 static struct filterops file_filtops = 146 { 1, filt_fileattach, NULL, NULL }; 147 static struct filterops kqread_filtops = 148 { 1, NULL, filt_kqdetach, filt_kqueue }; 149 /* XXX - move to kern_proc.c? */ 150 static struct filterops proc_filtops = 151 { 0, filt_procattach, filt_procdetach, filt_proc }; 152 static struct filterops timer_filtops = 153 { 0, filt_timerattach, filt_timerdetach, filt_timer }; 154 155 static uma_zone_t knote_zone; 156 static int kq_ncallouts = 0; 157 static int kq_calloutmax = (4 * 1024); 158 SYSCTL_INT(_kern, OID_AUTO, kq_calloutmax, CTLFLAG_RW, 159 &kq_calloutmax, 0, "Maximum number of callouts allocated for kqueue"); 160 161 /* XXX - ensure not KN_INFLUX?? */ 162 #define KNOTE_ACTIVATE(kn, islock) do { \ 163 if ((islock)) \ 164 mtx_assert(&(kn)->kn_kq->kq_lock, MA_OWNED); \ 165 else \ 166 KQ_LOCK((kn)->kn_kq); \ 167 (kn)->kn_status |= KN_ACTIVE; \ 168 if (((kn)->kn_status & (KN_QUEUED | KN_DISABLED)) == 0) \ 169 knote_enqueue((kn)); \ 170 if (!(islock)) \ 171 KQ_UNLOCK((kn)->kn_kq); \ 172 } while(0) 173 #define KQ_LOCK(kq) do { \ 174 mtx_lock(&(kq)->kq_lock); \ 175 } while (0) 176 #define KQ_FLUX_WAKEUP(kq) do { \ 177 if (((kq)->kq_state & KQ_FLUXWAIT) == KQ_FLUXWAIT) { \ 178 (kq)->kq_state &= ~KQ_FLUXWAIT; \ 179 wakeup((kq)); \ 180 } \ 181 } while (0) 182 #define KQ_UNLOCK_FLUX(kq) do { \ 183 KQ_FLUX_WAKEUP(kq); \ 184 mtx_unlock(&(kq)->kq_lock); \ 185 } while (0) 186 #define KQ_UNLOCK(kq) do { \ 187 mtx_unlock(&(kq)->kq_lock); \ 188 } while (0) 189 #define KQ_OWNED(kq) do { \ 190 mtx_assert(&(kq)->kq_lock, MA_OWNED); \ 191 } while (0) 192 #define KQ_NOTOWNED(kq) do { \ 193 mtx_assert(&(kq)->kq_lock, MA_NOTOWNED); \ 194 } while (0) 195 #define KN_LIST_LOCK(kn) do { \ 196 if (kn->kn_knlist != NULL) \ 197 kn->kn_knlist->kl_lock(kn->kn_knlist->kl_lockarg); \ 198 } while (0) 199 #define KN_LIST_UNLOCK(kn) do { \ 200 if (kn->kn_knlist != NULL) \ 201 kn->kn_knlist->kl_unlock(kn->kn_knlist->kl_lockarg); \ 202 } while (0) 203 #define KNL_ASSERT_LOCK(knl, islocked) do { \ 204 if (islocked) \ 205 KNL_ASSERT_LOCKED(knl); \ 206 else \ 207 KNL_ASSERT_UNLOCKED(knl); \ 208 } while (0) 209 #ifdef INVARIANTS 210 #define KNL_ASSERT_LOCKED(knl) do { \ 211 if (!knl->kl_locked((knl)->kl_lockarg)) \ 212 panic("knlist not locked, but should be"); \ 213 } while (0) 214 #define KNL_ASSERT_UNLOCKED(knl) do { \ 215 if (knl->kl_locked((knl)->kl_lockarg)) \ 216 panic("knlist locked, but should not be"); \ 217 } while (0) 218 #else /* !INVARIANTS */ 219 #define KNL_ASSERT_LOCKED(knl) do {} while(0) 220 #define KNL_ASSERT_UNLOCKED(knl) do {} while (0) 221 #endif /* INVARIANTS */ 222 223 #define KN_HASHSIZE 64 /* XXX should be tunable */ 224 #define KN_HASH(val, mask) (((val) ^ (val >> 8)) & (mask)) 225 226 static int 227 filt_nullattach(struct knote *kn) 228 { 229 230 return (ENXIO); 231 }; 232 233 struct filterops null_filtops = 234 { 0, filt_nullattach, NULL, NULL }; 235 236 /* XXX - make SYSINIT to add these, and move into respective modules. */ 237 extern struct filterops sig_filtops; 238 extern struct filterops fs_filtops; 239 240 /* 241 * Table for for all system-defined filters. 242 */ 243 static struct mtx filterops_lock; 244 MTX_SYSINIT(kqueue_filterops, &filterops_lock, "protect sysfilt_ops", 245 MTX_DEF); 246 static struct { 247 struct filterops *for_fop; 248 int for_refcnt; 249 } sysfilt_ops[EVFILT_SYSCOUNT] = { 250 { &file_filtops }, /* EVFILT_READ */ 251 { &file_filtops }, /* EVFILT_WRITE */ 252 { &null_filtops }, /* EVFILT_AIO */ 253 { &file_filtops }, /* EVFILT_VNODE */ 254 { &proc_filtops }, /* EVFILT_PROC */ 255 { &sig_filtops }, /* EVFILT_SIGNAL */ 256 { &timer_filtops }, /* EVFILT_TIMER */ 257 { &file_filtops }, /* EVFILT_NETDEV */ 258 { &fs_filtops }, /* EVFILT_FS */ 259 { &null_filtops }, /* EVFILT_LIO */ 260 }; 261 262 /* 263 * Simple redirection for all cdevsw style objects to call their fo_kqfilter 264 * method. 265 */ 266 static int 267 filt_fileattach(struct knote *kn) 268 { 269 270 return (fo_kqfilter(kn->kn_fp, kn)); 271 } 272 273 /*ARGSUSED*/ 274 static int 275 kqueue_kqfilter(struct file *fp, struct knote *kn) 276 { 277 struct kqueue *kq = kn->kn_fp->f_data; 278 279 if (kn->kn_filter != EVFILT_READ) 280 return (EINVAL); 281 282 kn->kn_status |= KN_KQUEUE; 283 kn->kn_fop = &kqread_filtops; 284 knlist_add(&kq->kq_sel.si_note, kn, 0); 285 286 return (0); 287 } 288 289 static void 290 filt_kqdetach(struct knote *kn) 291 { 292 struct kqueue *kq = kn->kn_fp->f_data; 293 294 knlist_remove(&kq->kq_sel.si_note, kn, 0); 295 } 296 297 /*ARGSUSED*/ 298 static int 299 filt_kqueue(struct knote *kn, long hint) 300 { 301 struct kqueue *kq = kn->kn_fp->f_data; 302 303 kn->kn_data = kq->kq_count; 304 return (kn->kn_data > 0); 305 } 306 307 /* XXX - move to kern_proc.c? */ 308 static int 309 filt_procattach(struct knote *kn) 310 { 311 struct proc *p; 312 int immediate; 313 int error; 314 315 immediate = 0; 316 p = pfind(kn->kn_id); 317 if (p == NULL && (kn->kn_sfflags & NOTE_EXIT)) { 318 p = zpfind(kn->kn_id); 319 immediate = 1; 320 } else if (p != NULL && (p->p_flag & P_WEXIT)) { 321 immediate = 1; 322 } 323 324 if (p == NULL) 325 return (ESRCH); 326 if ((error = p_cansee(curthread, p))) 327 return (error); 328 329 kn->kn_ptr.p_proc = p; 330 kn->kn_flags |= EV_CLEAR; /* automatically set */ 331 332 /* 333 * internal flag indicating registration done by kernel 334 */ 335 if (kn->kn_flags & EV_FLAG1) { 336 kn->kn_data = kn->kn_sdata; /* ppid */ 337 kn->kn_fflags = NOTE_CHILD; 338 kn->kn_flags &= ~EV_FLAG1; 339 } 340 341 if (immediate == 0) 342 knlist_add(&p->p_klist, kn, 1); 343 344 /* 345 * Immediately activate any exit notes if the target process is a 346 * zombie. This is necessary to handle the case where the target 347 * process, e.g. a child, dies before the kevent is registered. 348 */ 349 if (immediate && filt_proc(kn, NOTE_EXIT)) 350 KNOTE_ACTIVATE(kn, 0); 351 352 PROC_UNLOCK(p); 353 354 return (0); 355 } 356 357 /* 358 * The knote may be attached to a different process, which may exit, 359 * leaving nothing for the knote to be attached to. So when the process 360 * exits, the knote is marked as DETACHED and also flagged as ONESHOT so 361 * it will be deleted when read out. However, as part of the knote deletion, 362 * this routine is called, so a check is needed to avoid actually performing 363 * a detach, because the original process does not exist any more. 364 */ 365 /* XXX - move to kern_proc.c? */ 366 static void 367 filt_procdetach(struct knote *kn) 368 { 369 struct proc *p; 370 371 p = kn->kn_ptr.p_proc; 372 knlist_remove(&p->p_klist, kn, 0); 373 kn->kn_ptr.p_proc = NULL; 374 } 375 376 /* XXX - move to kern_proc.c? */ 377 static int 378 filt_proc(struct knote *kn, long hint) 379 { 380 struct proc *p = kn->kn_ptr.p_proc; 381 u_int event; 382 383 /* 384 * mask off extra data 385 */ 386 event = (u_int)hint & NOTE_PCTRLMASK; 387 388 /* 389 * if the user is interested in this event, record it. 390 */ 391 if (kn->kn_sfflags & event) 392 kn->kn_fflags |= event; 393 394 /* 395 * process is gone, so flag the event as finished. 396 */ 397 if (event == NOTE_EXIT) { 398 if (!(kn->kn_status & KN_DETACHED)) 399 knlist_remove_inevent(&p->p_klist, kn); 400 kn->kn_flags |= (EV_EOF | EV_ONESHOT); 401 kn->kn_data = p->p_xstat; 402 kn->kn_ptr.p_proc = NULL; 403 return (1); 404 } 405 406 return (kn->kn_fflags != 0); 407 } 408 409 /* 410 * Called when the process forked. It mostly does the same as the 411 * knote(), activating all knotes registered to be activated when the 412 * process forked. Additionally, for each knote attached to the 413 * parent, check whether user wants to track the new process. If so 414 * attach a new knote to it, and immediately report an event with the 415 * child's pid. 416 */ 417 void 418 knote_fork(struct knlist *list, int pid) 419 { 420 struct kqueue *kq; 421 struct knote *kn; 422 struct kevent kev; 423 int error; 424 425 if (list == NULL) 426 return; 427 list->kl_lock(list->kl_lockarg); 428 429 SLIST_FOREACH(kn, &list->kl_list, kn_selnext) { 430 if ((kn->kn_status & KN_INFLUX) == KN_INFLUX) 431 continue; 432 kq = kn->kn_kq; 433 KQ_LOCK(kq); 434 if ((kn->kn_status & KN_INFLUX) == KN_INFLUX) { 435 KQ_UNLOCK(kq); 436 continue; 437 } 438 439 /* 440 * The same as knote(), activate the event. 441 */ 442 if ((kn->kn_sfflags & NOTE_TRACK) == 0) { 443 kn->kn_status |= KN_HASKQLOCK; 444 if (kn->kn_fop->f_event(kn, NOTE_FORK | pid)) 445 KNOTE_ACTIVATE(kn, 1); 446 kn->kn_status &= ~KN_HASKQLOCK; 447 KQ_UNLOCK(kq); 448 continue; 449 } 450 451 /* 452 * The NOTE_TRACK case. In addition to the activation 453 * of the event, we need to register new event to 454 * track the child. Drop the locks in preparation for 455 * the call to kqueue_register(). 456 */ 457 kn->kn_status |= KN_INFLUX; 458 KQ_UNLOCK(kq); 459 list->kl_unlock(list->kl_lockarg); 460 461 /* 462 * Activate existing knote and register a knote with 463 * new process. 464 */ 465 kev.ident = pid; 466 kev.filter = kn->kn_filter; 467 kev.flags = kn->kn_flags | EV_ADD | EV_ENABLE | EV_FLAG1; 468 kev.fflags = kn->kn_sfflags; 469 kev.data = kn->kn_id; /* parent */ 470 kev.udata = kn->kn_kevent.udata;/* preserve udata */ 471 error = kqueue_register(kq, &kev, NULL, 0); 472 if (kn->kn_fop->f_event(kn, NOTE_FORK | pid)) 473 KNOTE_ACTIVATE(kn, 0); 474 if (error) 475 kn->kn_fflags |= NOTE_TRACKERR; 476 KQ_LOCK(kq); 477 kn->kn_status &= ~KN_INFLUX; 478 KQ_UNLOCK_FLUX(kq); 479 list->kl_lock(list->kl_lockarg); 480 } 481 list->kl_unlock(list->kl_lockarg); 482 } 483 484 static int 485 timertoticks(intptr_t data) 486 { 487 struct timeval tv; 488 int tticks; 489 490 tv.tv_sec = data / 1000; 491 tv.tv_usec = (data % 1000) * 1000; 492 tticks = tvtohz(&tv); 493 494 return tticks; 495 } 496 497 /* XXX - move to kern_timeout.c? */ 498 static void 499 filt_timerexpire(void *knx) 500 { 501 struct knote *kn = knx; 502 struct callout *calloutp; 503 504 kn->kn_data++; 505 KNOTE_ACTIVATE(kn, 0); /* XXX - handle locking */ 506 507 if ((kn->kn_flags & EV_ONESHOT) != EV_ONESHOT) { 508 calloutp = (struct callout *)kn->kn_hook; 509 callout_reset_curcpu(calloutp, timertoticks(kn->kn_sdata), 510 filt_timerexpire, kn); 511 } 512 } 513 514 /* 515 * data contains amount of time to sleep, in milliseconds 516 */ 517 /* XXX - move to kern_timeout.c? */ 518 static int 519 filt_timerattach(struct knote *kn) 520 { 521 struct callout *calloutp; 522 523 atomic_add_int(&kq_ncallouts, 1); 524 525 if (kq_ncallouts >= kq_calloutmax) { 526 atomic_add_int(&kq_ncallouts, -1); 527 return (ENOMEM); 528 } 529 530 kn->kn_flags |= EV_CLEAR; /* automatically set */ 531 kn->kn_status &= ~KN_DETACHED; /* knlist_add usually sets it */ 532 calloutp = malloc(sizeof(*calloutp), M_KQUEUE, M_WAITOK); 533 callout_init(calloutp, CALLOUT_MPSAFE); 534 kn->kn_hook = calloutp; 535 callout_reset_curcpu(calloutp, timertoticks(kn->kn_sdata), 536 filt_timerexpire, kn); 537 538 return (0); 539 } 540 541 /* XXX - move to kern_timeout.c? */ 542 static void 543 filt_timerdetach(struct knote *kn) 544 { 545 struct callout *calloutp; 546 547 calloutp = (struct callout *)kn->kn_hook; 548 callout_drain(calloutp); 549 free(calloutp, M_KQUEUE); 550 atomic_add_int(&kq_ncallouts, -1); 551 kn->kn_status |= KN_DETACHED; /* knlist_remove usually clears it */ 552 } 553 554 /* XXX - move to kern_timeout.c? */ 555 static int 556 filt_timer(struct knote *kn, long hint) 557 { 558 559 return (kn->kn_data != 0); 560 } 561 562 int 563 kqueue(struct thread *td, struct kqueue_args *uap) 564 { 565 struct filedesc *fdp; 566 struct kqueue *kq; 567 struct file *fp; 568 int fd, error; 569 570 fdp = td->td_proc->p_fd; 571 error = falloc(td, &fp, &fd); 572 if (error) 573 goto done2; 574 575 /* An extra reference on `nfp' has been held for us by falloc(). */ 576 kq = malloc(sizeof *kq, M_KQUEUE, M_WAITOK | M_ZERO); 577 mtx_init(&kq->kq_lock, "kqueue", NULL, MTX_DEF|MTX_DUPOK); 578 TAILQ_INIT(&kq->kq_head); 579 kq->kq_fdp = fdp; 580 knlist_init(&kq->kq_sel.si_note, &kq->kq_lock, NULL, NULL, NULL); 581 TASK_INIT(&kq->kq_task, 0, kqueue_task, kq); 582 583 FILEDESC_XLOCK(fdp); 584 SLIST_INSERT_HEAD(&fdp->fd_kqlist, kq, kq_list); 585 FILEDESC_XUNLOCK(fdp); 586 587 finit(fp, FREAD | FWRITE, DTYPE_KQUEUE, kq, &kqueueops); 588 fdrop(fp, td); 589 590 td->td_retval[0] = fd; 591 done2: 592 return (error); 593 } 594 595 #ifndef _SYS_SYSPROTO_H_ 596 struct kevent_args { 597 int fd; 598 const struct kevent *changelist; 599 int nchanges; 600 struct kevent *eventlist; 601 int nevents; 602 const struct timespec *timeout; 603 }; 604 #endif 605 int 606 kevent(struct thread *td, struct kevent_args *uap) 607 { 608 struct timespec ts, *tsp; 609 struct kevent_copyops k_ops = { uap, 610 kevent_copyout, 611 kevent_copyin}; 612 int error; 613 #ifdef KTRACE 614 struct uio ktruio; 615 struct iovec ktriov; 616 struct uio *ktruioin = NULL; 617 struct uio *ktruioout = NULL; 618 #endif 619 620 if (uap->timeout != NULL) { 621 error = copyin(uap->timeout, &ts, sizeof(ts)); 622 if (error) 623 return (error); 624 tsp = &ts; 625 } else 626 tsp = NULL; 627 628 #ifdef KTRACE 629 if (KTRPOINT(td, KTR_GENIO)) { 630 ktriov.iov_base = uap->changelist; 631 ktriov.iov_len = uap->nchanges * sizeof(struct kevent); 632 ktruio = (struct uio){ .uio_iov = &ktriov, .uio_iovcnt = 1, 633 .uio_segflg = UIO_USERSPACE, .uio_rw = UIO_READ, 634 .uio_td = td }; 635 ktruioin = cloneuio(&ktruio); 636 ktriov.iov_base = uap->eventlist; 637 ktriov.iov_len = uap->nevents * sizeof(struct kevent); 638 ktruioout = cloneuio(&ktruio); 639 } 640 #endif 641 642 error = kern_kevent(td, uap->fd, uap->nchanges, uap->nevents, 643 &k_ops, tsp); 644 645 #ifdef KTRACE 646 if (ktruioin != NULL) { 647 ktruioin->uio_resid = uap->nchanges * sizeof(struct kevent); 648 ktrgenio(uap->fd, UIO_WRITE, ktruioin, 0); 649 ktruioout->uio_resid = td->td_retval[0] * sizeof(struct kevent); 650 ktrgenio(uap->fd, UIO_READ, ktruioout, error); 651 } 652 #endif 653 654 return (error); 655 } 656 657 /* 658 * Copy 'count' items into the destination list pointed to by uap->eventlist. 659 */ 660 static int 661 kevent_copyout(void *arg, struct kevent *kevp, int count) 662 { 663 struct kevent_args *uap; 664 int error; 665 666 KASSERT(count <= KQ_NEVENTS, ("count (%d) > KQ_NEVENTS", count)); 667 uap = (struct kevent_args *)arg; 668 669 error = copyout(kevp, uap->eventlist, count * sizeof *kevp); 670 if (error == 0) 671 uap->eventlist += count; 672 return (error); 673 } 674 675 /* 676 * Copy 'count' items from the list pointed to by uap->changelist. 677 */ 678 static int 679 kevent_copyin(void *arg, struct kevent *kevp, int count) 680 { 681 struct kevent_args *uap; 682 int error; 683 684 KASSERT(count <= KQ_NEVENTS, ("count (%d) > KQ_NEVENTS", count)); 685 uap = (struct kevent_args *)arg; 686 687 error = copyin(uap->changelist, kevp, count * sizeof *kevp); 688 if (error == 0) 689 uap->changelist += count; 690 return (error); 691 } 692 693 int 694 kern_kevent(struct thread *td, int fd, int nchanges, int nevents, 695 struct kevent_copyops *k_ops, const struct timespec *timeout) 696 { 697 struct kevent keva[KQ_NEVENTS]; 698 struct kevent *kevp, *changes; 699 struct kqueue *kq; 700 struct file *fp; 701 int i, n, nerrors, error; 702 703 if ((error = fget(td, fd, &fp)) != 0) 704 return (error); 705 if ((error = kqueue_acquire(fp, &kq)) != 0) 706 goto done_norel; 707 708 nerrors = 0; 709 710 while (nchanges > 0) { 711 n = nchanges > KQ_NEVENTS ? KQ_NEVENTS : nchanges; 712 error = k_ops->k_copyin(k_ops->arg, keva, n); 713 if (error) 714 goto done; 715 changes = keva; 716 for (i = 0; i < n; i++) { 717 kevp = &changes[i]; 718 if (!kevp->filter) 719 continue; 720 kevp->flags &= ~EV_SYSFLAGS; 721 error = kqueue_register(kq, kevp, td, 1); 722 if (error) { 723 if (nevents != 0) { 724 kevp->flags = EV_ERROR; 725 kevp->data = error; 726 (void) k_ops->k_copyout(k_ops->arg, 727 kevp, 1); 728 nevents--; 729 nerrors++; 730 } else { 731 goto done; 732 } 733 } 734 } 735 nchanges -= n; 736 } 737 if (nerrors) { 738 td->td_retval[0] = nerrors; 739 error = 0; 740 goto done; 741 } 742 743 error = kqueue_scan(kq, nevents, k_ops, timeout, keva, td); 744 done: 745 kqueue_release(kq, 0); 746 done_norel: 747 fdrop(fp, td); 748 return (error); 749 } 750 751 int 752 kqueue_add_filteropts(int filt, struct filterops *filtops) 753 { 754 int error; 755 756 if (filt > 0 || filt + EVFILT_SYSCOUNT < 0) { 757 printf( 758 "trying to add a filterop that is out of range: %d is beyond %d\n", 759 ~filt, EVFILT_SYSCOUNT); 760 return EINVAL; 761 } 762 mtx_lock(&filterops_lock); 763 if (sysfilt_ops[~filt].for_fop != &null_filtops && 764 sysfilt_ops[~filt].for_fop != NULL) 765 error = EEXIST; 766 else { 767 sysfilt_ops[~filt].for_fop = filtops; 768 sysfilt_ops[~filt].for_refcnt = 0; 769 } 770 mtx_unlock(&filterops_lock); 771 772 return (0); 773 } 774 775 int 776 kqueue_del_filteropts(int filt) 777 { 778 int error; 779 780 error = 0; 781 if (filt > 0 || filt + EVFILT_SYSCOUNT < 0) 782 return EINVAL; 783 784 mtx_lock(&filterops_lock); 785 if (sysfilt_ops[~filt].for_fop == &null_filtops || 786 sysfilt_ops[~filt].for_fop == NULL) 787 error = EINVAL; 788 else if (sysfilt_ops[~filt].for_refcnt != 0) 789 error = EBUSY; 790 else { 791 sysfilt_ops[~filt].for_fop = &null_filtops; 792 sysfilt_ops[~filt].for_refcnt = 0; 793 } 794 mtx_unlock(&filterops_lock); 795 796 return error; 797 } 798 799 static struct filterops * 800 kqueue_fo_find(int filt) 801 { 802 803 if (filt > 0 || filt + EVFILT_SYSCOUNT < 0) 804 return NULL; 805 806 mtx_lock(&filterops_lock); 807 sysfilt_ops[~filt].for_refcnt++; 808 if (sysfilt_ops[~filt].for_fop == NULL) 809 sysfilt_ops[~filt].for_fop = &null_filtops; 810 mtx_unlock(&filterops_lock); 811 812 return sysfilt_ops[~filt].for_fop; 813 } 814 815 static void 816 kqueue_fo_release(int filt) 817 { 818 819 if (filt > 0 || filt + EVFILT_SYSCOUNT < 0) 820 return; 821 822 mtx_lock(&filterops_lock); 823 KASSERT(sysfilt_ops[~filt].for_refcnt > 0, 824 ("filter object refcount not valid on release")); 825 sysfilt_ops[~filt].for_refcnt--; 826 mtx_unlock(&filterops_lock); 827 } 828 829 /* 830 * A ref to kq (obtained via kqueue_acquire) must be held. waitok will 831 * influence if memory allocation should wait. Make sure it is 0 if you 832 * hold any mutexes. 833 */ 834 static int 835 kqueue_register(struct kqueue *kq, struct kevent *kev, struct thread *td, int waitok) 836 { 837 struct filterops *fops; 838 struct file *fp; 839 struct knote *kn, *tkn; 840 int error, filt, event; 841 int haskqglobal; 842 843 fp = NULL; 844 kn = NULL; 845 error = 0; 846 haskqglobal = 0; 847 848 filt = kev->filter; 849 fops = kqueue_fo_find(filt); 850 if (fops == NULL) 851 return EINVAL; 852 853 tkn = knote_alloc(waitok); /* prevent waiting with locks */ 854 855 findkn: 856 if (fops->f_isfd) { 857 KASSERT(td != NULL, ("td is NULL")); 858 error = fget(td, kev->ident, &fp); 859 if (error) 860 goto done; 861 862 if ((kev->flags & EV_ADD) == EV_ADD && kqueue_expand(kq, fops, 863 kev->ident, 0) != 0) { 864 /* try again */ 865 fdrop(fp, td); 866 fp = NULL; 867 error = kqueue_expand(kq, fops, kev->ident, waitok); 868 if (error) 869 goto done; 870 goto findkn; 871 } 872 873 if (fp->f_type == DTYPE_KQUEUE) { 874 /* 875 * if we add some inteligence about what we are doing, 876 * we should be able to support events on ourselves. 877 * We need to know when we are doing this to prevent 878 * getting both the knlist lock and the kq lock since 879 * they are the same thing. 880 */ 881 if (fp->f_data == kq) { 882 error = EINVAL; 883 goto done; 884 } 885 886 KQ_GLOBAL_LOCK(&kq_global, haskqglobal); 887 } 888 889 KQ_LOCK(kq); 890 if (kev->ident < kq->kq_knlistsize) { 891 SLIST_FOREACH(kn, &kq->kq_knlist[kev->ident], kn_link) 892 if (kev->filter == kn->kn_filter) 893 break; 894 } 895 } else { 896 if ((kev->flags & EV_ADD) == EV_ADD) 897 kqueue_expand(kq, fops, kev->ident, waitok); 898 899 KQ_LOCK(kq); 900 if (kq->kq_knhashmask != 0) { 901 struct klist *list; 902 903 list = &kq->kq_knhash[ 904 KN_HASH((u_long)kev->ident, kq->kq_knhashmask)]; 905 SLIST_FOREACH(kn, list, kn_link) 906 if (kev->ident == kn->kn_id && 907 kev->filter == kn->kn_filter) 908 break; 909 } 910 } 911 912 /* knote is in the process of changing, wait for it to stablize. */ 913 if (kn != NULL && (kn->kn_status & KN_INFLUX) == KN_INFLUX) { 914 if (fp != NULL) { 915 fdrop(fp, td); 916 fp = NULL; 917 } 918 KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal); 919 kq->kq_state |= KQ_FLUXWAIT; 920 msleep(kq, &kq->kq_lock, PSOCK | PDROP, "kqflxwt", 0); 921 goto findkn; 922 } 923 924 if (kn == NULL && ((kev->flags & EV_ADD) == 0)) { 925 KQ_UNLOCK(kq); 926 error = ENOENT; 927 goto done; 928 } 929 930 /* 931 * kn now contains the matching knote, or NULL if no match 932 */ 933 if (kev->flags & EV_ADD) { 934 if (kn == NULL) { 935 kn = tkn; 936 tkn = NULL; 937 if (kn == NULL) { 938 KQ_UNLOCK(kq); 939 error = ENOMEM; 940 goto done; 941 } 942 kn->kn_fp = fp; 943 kn->kn_kq = kq; 944 kn->kn_fop = fops; 945 /* 946 * apply reference counts to knote structure, and 947 * do not release it at the end of this routine. 948 */ 949 fops = NULL; 950 fp = NULL; 951 952 kn->kn_sfflags = kev->fflags; 953 kn->kn_sdata = kev->data; 954 kev->fflags = 0; 955 kev->data = 0; 956 kn->kn_kevent = *kev; 957 kn->kn_kevent.flags &= ~(EV_ADD | EV_DELETE | 958 EV_ENABLE | EV_DISABLE); 959 kn->kn_status = KN_INFLUX|KN_DETACHED; 960 961 error = knote_attach(kn, kq); 962 KQ_UNLOCK(kq); 963 if (error != 0) { 964 tkn = kn; 965 goto done; 966 } 967 968 if ((error = kn->kn_fop->f_attach(kn)) != 0) { 969 knote_drop(kn, td); 970 goto done; 971 } 972 KN_LIST_LOCK(kn); 973 } else { 974 /* 975 * The user may change some filter values after the 976 * initial EV_ADD, but doing so will not reset any 977 * filter which has already been triggered. 978 */ 979 kn->kn_status |= KN_INFLUX; 980 KQ_UNLOCK(kq); 981 KN_LIST_LOCK(kn); 982 kn->kn_sfflags = kev->fflags; 983 kn->kn_sdata = kev->data; 984 kn->kn_kevent.udata = kev->udata; 985 } 986 987 /* 988 * We can get here with kn->kn_knlist == NULL. 989 * This can happen when the initial attach event decides that 990 * the event is "completed" already. i.e. filt_procattach 991 * is called on a zombie process. It will call filt_proc 992 * which will remove it from the list, and NULL kn_knlist. 993 */ 994 event = kn->kn_fop->f_event(kn, 0); 995 KQ_LOCK(kq); 996 if (event) 997 KNOTE_ACTIVATE(kn, 1); 998 kn->kn_status &= ~KN_INFLUX; 999 KN_LIST_UNLOCK(kn); 1000 } else if (kev->flags & EV_DELETE) { 1001 kn->kn_status |= KN_INFLUX; 1002 KQ_UNLOCK(kq); 1003 if (!(kn->kn_status & KN_DETACHED)) 1004 kn->kn_fop->f_detach(kn); 1005 knote_drop(kn, td); 1006 goto done; 1007 } 1008 1009 if ((kev->flags & EV_DISABLE) && 1010 ((kn->kn_status & KN_DISABLED) == 0)) { 1011 kn->kn_status |= KN_DISABLED; 1012 } 1013 1014 if ((kev->flags & EV_ENABLE) && (kn->kn_status & KN_DISABLED)) { 1015 kn->kn_status &= ~KN_DISABLED; 1016 if ((kn->kn_status & KN_ACTIVE) && 1017 ((kn->kn_status & KN_QUEUED) == 0)) 1018 knote_enqueue(kn); 1019 } 1020 KQ_UNLOCK_FLUX(kq); 1021 1022 done: 1023 KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal); 1024 if (fp != NULL) 1025 fdrop(fp, td); 1026 if (tkn != NULL) 1027 knote_free(tkn); 1028 if (fops != NULL) 1029 kqueue_fo_release(filt); 1030 return (error); 1031 } 1032 1033 static int 1034 kqueue_acquire(struct file *fp, struct kqueue **kqp) 1035 { 1036 int error; 1037 struct kqueue *kq; 1038 1039 error = 0; 1040 1041 kq = fp->f_data; 1042 if (fp->f_type != DTYPE_KQUEUE || kq == NULL) 1043 return (EBADF); 1044 *kqp = kq; 1045 KQ_LOCK(kq); 1046 if ((kq->kq_state & KQ_CLOSING) == KQ_CLOSING) { 1047 KQ_UNLOCK(kq); 1048 return (EBADF); 1049 } 1050 kq->kq_refcnt++; 1051 KQ_UNLOCK(kq); 1052 1053 return error; 1054 } 1055 1056 static void 1057 kqueue_release(struct kqueue *kq, int locked) 1058 { 1059 if (locked) 1060 KQ_OWNED(kq); 1061 else 1062 KQ_LOCK(kq); 1063 kq->kq_refcnt--; 1064 if (kq->kq_refcnt == 1) 1065 wakeup(&kq->kq_refcnt); 1066 if (!locked) 1067 KQ_UNLOCK(kq); 1068 } 1069 1070 static void 1071 kqueue_schedtask(struct kqueue *kq) 1072 { 1073 1074 KQ_OWNED(kq); 1075 KASSERT(((kq->kq_state & KQ_TASKDRAIN) != KQ_TASKDRAIN), 1076 ("scheduling kqueue task while draining")); 1077 1078 if ((kq->kq_state & KQ_TASKSCHED) != KQ_TASKSCHED) { 1079 taskqueue_enqueue(taskqueue_kqueue, &kq->kq_task); 1080 kq->kq_state |= KQ_TASKSCHED; 1081 } 1082 } 1083 1084 /* 1085 * Expand the kq to make sure we have storage for fops/ident pair. 1086 * 1087 * Return 0 on success (or no work necessary), return errno on failure. 1088 * 1089 * Not calling hashinit w/ waitok (proper malloc flag) should be safe. 1090 * If kqueue_register is called from a non-fd context, there usually/should 1091 * be no locks held. 1092 */ 1093 static int 1094 kqueue_expand(struct kqueue *kq, struct filterops *fops, uintptr_t ident, 1095 int waitok) 1096 { 1097 struct klist *list, *tmp_knhash; 1098 u_long tmp_knhashmask; 1099 int size; 1100 int fd; 1101 int mflag = waitok ? M_WAITOK : M_NOWAIT; 1102 1103 KQ_NOTOWNED(kq); 1104 1105 if (fops->f_isfd) { 1106 fd = ident; 1107 if (kq->kq_knlistsize <= fd) { 1108 size = kq->kq_knlistsize; 1109 while (size <= fd) 1110 size += KQEXTENT; 1111 list = malloc(size * sizeof list, M_KQUEUE, mflag); 1112 if (list == NULL) 1113 return ENOMEM; 1114 KQ_LOCK(kq); 1115 if (kq->kq_knlistsize > fd) { 1116 free(list, M_KQUEUE); 1117 list = NULL; 1118 } else { 1119 if (kq->kq_knlist != NULL) { 1120 bcopy(kq->kq_knlist, list, 1121 kq->kq_knlistsize * sizeof list); 1122 free(kq->kq_knlist, M_KQUEUE); 1123 kq->kq_knlist = NULL; 1124 } 1125 bzero((caddr_t)list + 1126 kq->kq_knlistsize * sizeof list, 1127 (size - kq->kq_knlistsize) * sizeof list); 1128 kq->kq_knlistsize = size; 1129 kq->kq_knlist = list; 1130 } 1131 KQ_UNLOCK(kq); 1132 } 1133 } else { 1134 if (kq->kq_knhashmask == 0) { 1135 tmp_knhash = hashinit(KN_HASHSIZE, M_KQUEUE, 1136 &tmp_knhashmask); 1137 if (tmp_knhash == NULL) 1138 return ENOMEM; 1139 KQ_LOCK(kq); 1140 if (kq->kq_knhashmask == 0) { 1141 kq->kq_knhash = tmp_knhash; 1142 kq->kq_knhashmask = tmp_knhashmask; 1143 } else { 1144 free(tmp_knhash, M_KQUEUE); 1145 } 1146 KQ_UNLOCK(kq); 1147 } 1148 } 1149 1150 KQ_NOTOWNED(kq); 1151 return 0; 1152 } 1153 1154 static void 1155 kqueue_task(void *arg, int pending) 1156 { 1157 struct kqueue *kq; 1158 int haskqglobal; 1159 1160 haskqglobal = 0; 1161 kq = arg; 1162 1163 KQ_GLOBAL_LOCK(&kq_global, haskqglobal); 1164 KQ_LOCK(kq); 1165 1166 KNOTE_LOCKED(&kq->kq_sel.si_note, 0); 1167 1168 kq->kq_state &= ~KQ_TASKSCHED; 1169 if ((kq->kq_state & KQ_TASKDRAIN) == KQ_TASKDRAIN) { 1170 wakeup(&kq->kq_state); 1171 } 1172 KQ_UNLOCK(kq); 1173 KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal); 1174 } 1175 1176 /* 1177 * Scan, update kn_data (if not ONESHOT), and copyout triggered events. 1178 * We treat KN_MARKER knotes as if they are INFLUX. 1179 */ 1180 static int 1181 kqueue_scan(struct kqueue *kq, int maxevents, struct kevent_copyops *k_ops, 1182 const struct timespec *tsp, struct kevent *keva, struct thread *td) 1183 { 1184 struct kevent *kevp; 1185 struct timeval atv, rtv, ttv; 1186 struct knote *kn, *marker; 1187 int count, timeout, nkev, error, influx; 1188 int haskqglobal; 1189 1190 count = maxevents; 1191 nkev = 0; 1192 error = 0; 1193 haskqglobal = 0; 1194 1195 if (maxevents == 0) 1196 goto done_nl; 1197 1198 if (tsp != NULL) { 1199 TIMESPEC_TO_TIMEVAL(&atv, tsp); 1200 if (itimerfix(&atv)) { 1201 error = EINVAL; 1202 goto done_nl; 1203 } 1204 if (tsp->tv_sec == 0 && tsp->tv_nsec == 0) 1205 timeout = -1; 1206 else 1207 timeout = atv.tv_sec > 24 * 60 * 60 ? 1208 24 * 60 * 60 * hz : tvtohz(&atv); 1209 getmicrouptime(&rtv); 1210 timevaladd(&atv, &rtv); 1211 } else { 1212 atv.tv_sec = 0; 1213 atv.tv_usec = 0; 1214 timeout = 0; 1215 } 1216 marker = knote_alloc(1); 1217 if (marker == NULL) { 1218 error = ENOMEM; 1219 goto done_nl; 1220 } 1221 marker->kn_status = KN_MARKER; 1222 KQ_LOCK(kq); 1223 goto start; 1224 1225 retry: 1226 if (atv.tv_sec || atv.tv_usec) { 1227 getmicrouptime(&rtv); 1228 if (timevalcmp(&rtv, &atv, >=)) 1229 goto done; 1230 ttv = atv; 1231 timevalsub(&ttv, &rtv); 1232 timeout = ttv.tv_sec > 24 * 60 * 60 ? 1233 24 * 60 * 60 * hz : tvtohz(&ttv); 1234 } 1235 1236 start: 1237 kevp = keva; 1238 if (kq->kq_count == 0) { 1239 if (timeout < 0) { 1240 error = EWOULDBLOCK; 1241 } else { 1242 kq->kq_state |= KQ_SLEEP; 1243 error = msleep(kq, &kq->kq_lock, PSOCK | PCATCH, 1244 "kqread", timeout); 1245 } 1246 if (error == 0) 1247 goto retry; 1248 /* don't restart after signals... */ 1249 if (error == ERESTART) 1250 error = EINTR; 1251 else if (error == EWOULDBLOCK) 1252 error = 0; 1253 goto done; 1254 } 1255 1256 TAILQ_INSERT_TAIL(&kq->kq_head, marker, kn_tqe); 1257 influx = 0; 1258 while (count) { 1259 KQ_OWNED(kq); 1260 kn = TAILQ_FIRST(&kq->kq_head); 1261 1262 if ((kn->kn_status == KN_MARKER && kn != marker) || 1263 (kn->kn_status & KN_INFLUX) == KN_INFLUX) { 1264 if (influx) { 1265 influx = 0; 1266 KQ_FLUX_WAKEUP(kq); 1267 } 1268 kq->kq_state |= KQ_FLUXWAIT; 1269 error = msleep(kq, &kq->kq_lock, PSOCK, 1270 "kqflxwt", 0); 1271 continue; 1272 } 1273 1274 TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe); 1275 if ((kn->kn_status & KN_DISABLED) == KN_DISABLED) { 1276 kn->kn_status &= ~KN_QUEUED; 1277 kq->kq_count--; 1278 continue; 1279 } 1280 if (kn == marker) { 1281 KQ_FLUX_WAKEUP(kq); 1282 if (count == maxevents) 1283 goto retry; 1284 goto done; 1285 } 1286 KASSERT((kn->kn_status & KN_INFLUX) == 0, 1287 ("KN_INFLUX set when not suppose to be")); 1288 1289 if ((kn->kn_flags & EV_ONESHOT) == EV_ONESHOT) { 1290 kn->kn_status &= ~KN_QUEUED; 1291 kn->kn_status |= KN_INFLUX; 1292 kq->kq_count--; 1293 KQ_UNLOCK(kq); 1294 /* 1295 * We don't need to lock the list since we've marked 1296 * it _INFLUX. 1297 */ 1298 *kevp = kn->kn_kevent; 1299 if (!(kn->kn_status & KN_DETACHED)) 1300 kn->kn_fop->f_detach(kn); 1301 knote_drop(kn, td); 1302 KQ_LOCK(kq); 1303 kn = NULL; 1304 } else { 1305 kn->kn_status |= KN_INFLUX; 1306 KQ_UNLOCK(kq); 1307 if ((kn->kn_status & KN_KQUEUE) == KN_KQUEUE) 1308 KQ_GLOBAL_LOCK(&kq_global, haskqglobal); 1309 KN_LIST_LOCK(kn); 1310 if (kn->kn_fop->f_event(kn, 0) == 0) { 1311 KQ_LOCK(kq); 1312 KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal); 1313 kn->kn_status &= 1314 ~(KN_QUEUED | KN_ACTIVE | KN_INFLUX); 1315 kq->kq_count--; 1316 KN_LIST_UNLOCK(kn); 1317 influx = 1; 1318 continue; 1319 } 1320 *kevp = kn->kn_kevent; 1321 KQ_LOCK(kq); 1322 KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal); 1323 if (kn->kn_flags & EV_CLEAR) { 1324 kn->kn_data = 0; 1325 kn->kn_fflags = 0; 1326 kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE); 1327 kq->kq_count--; 1328 } else 1329 TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe); 1330 1331 kn->kn_status &= ~(KN_INFLUX); 1332 KN_LIST_UNLOCK(kn); 1333 influx = 1; 1334 } 1335 1336 /* we are returning a copy to the user */ 1337 kevp++; 1338 nkev++; 1339 count--; 1340 1341 if (nkev == KQ_NEVENTS) { 1342 influx = 0; 1343 KQ_UNLOCK_FLUX(kq); 1344 error = k_ops->k_copyout(k_ops->arg, keva, nkev); 1345 nkev = 0; 1346 kevp = keva; 1347 KQ_LOCK(kq); 1348 if (error) 1349 break; 1350 } 1351 } 1352 TAILQ_REMOVE(&kq->kq_head, marker, kn_tqe); 1353 done: 1354 KQ_OWNED(kq); 1355 KQ_UNLOCK_FLUX(kq); 1356 knote_free(marker); 1357 done_nl: 1358 KQ_NOTOWNED(kq); 1359 if (nkev != 0) 1360 error = k_ops->k_copyout(k_ops->arg, keva, nkev); 1361 td->td_retval[0] = maxevents - count; 1362 return (error); 1363 } 1364 1365 /* 1366 * XXX 1367 * This could be expanded to call kqueue_scan, if desired. 1368 */ 1369 /*ARGSUSED*/ 1370 static int 1371 kqueue_read(struct file *fp, struct uio *uio, struct ucred *active_cred, 1372 int flags, struct thread *td) 1373 { 1374 return (ENXIO); 1375 } 1376 1377 /*ARGSUSED*/ 1378 static int 1379 kqueue_write(struct file *fp, struct uio *uio, struct ucred *active_cred, 1380 int flags, struct thread *td) 1381 { 1382 return (ENXIO); 1383 } 1384 1385 /*ARGSUSED*/ 1386 static int 1387 kqueue_truncate(struct file *fp, off_t length, struct ucred *active_cred, 1388 struct thread *td) 1389 { 1390 1391 return (EINVAL); 1392 } 1393 1394 /*ARGSUSED*/ 1395 static int 1396 kqueue_ioctl(struct file *fp, u_long cmd, void *data, 1397 struct ucred *active_cred, struct thread *td) 1398 { 1399 /* 1400 * Enabling sigio causes two major problems: 1401 * 1) infinite recursion: 1402 * Synopsys: kevent is being used to track signals and have FIOASYNC 1403 * set. On receipt of a signal this will cause a kqueue to recurse 1404 * into itself over and over. Sending the sigio causes the kqueue 1405 * to become ready, which in turn posts sigio again, forever. 1406 * Solution: this can be solved by setting a flag in the kqueue that 1407 * we have a SIGIO in progress. 1408 * 2) locking problems: 1409 * Synopsys: Kqueue is a leaf subsystem, but adding signalling puts 1410 * us above the proc and pgrp locks. 1411 * Solution: Post a signal using an async mechanism, being sure to 1412 * record a generation count in the delivery so that we do not deliver 1413 * a signal to the wrong process. 1414 * 1415 * Note, these two mechanisms are somewhat mutually exclusive! 1416 */ 1417 #if 0 1418 struct kqueue *kq; 1419 1420 kq = fp->f_data; 1421 switch (cmd) { 1422 case FIOASYNC: 1423 if (*(int *)data) { 1424 kq->kq_state |= KQ_ASYNC; 1425 } else { 1426 kq->kq_state &= ~KQ_ASYNC; 1427 } 1428 return (0); 1429 1430 case FIOSETOWN: 1431 return (fsetown(*(int *)data, &kq->kq_sigio)); 1432 1433 case FIOGETOWN: 1434 *(int *)data = fgetown(&kq->kq_sigio); 1435 return (0); 1436 } 1437 #endif 1438 1439 return (ENOTTY); 1440 } 1441 1442 /*ARGSUSED*/ 1443 static int 1444 kqueue_poll(struct file *fp, int events, struct ucred *active_cred, 1445 struct thread *td) 1446 { 1447 struct kqueue *kq; 1448 int revents = 0; 1449 int error; 1450 1451 if ((error = kqueue_acquire(fp, &kq))) 1452 return POLLERR; 1453 1454 KQ_LOCK(kq); 1455 if (events & (POLLIN | POLLRDNORM)) { 1456 if (kq->kq_count) { 1457 revents |= events & (POLLIN | POLLRDNORM); 1458 } else { 1459 selrecord(td, &kq->kq_sel); 1460 if (SEL_WAITING(&kq->kq_sel)) 1461 kq->kq_state |= KQ_SEL; 1462 } 1463 } 1464 kqueue_release(kq, 1); 1465 KQ_UNLOCK(kq); 1466 return (revents); 1467 } 1468 1469 /*ARGSUSED*/ 1470 static int 1471 kqueue_stat(struct file *fp, struct stat *st, struct ucred *active_cred, 1472 struct thread *td) 1473 { 1474 1475 bzero((void *)st, sizeof *st); 1476 /* 1477 * We no longer return kq_count because the unlocked value is useless. 1478 * If you spent all this time getting the count, why not spend your 1479 * syscall better by calling kevent? 1480 * 1481 * XXX - This is needed for libc_r. 1482 */ 1483 st->st_mode = S_IFIFO; 1484 return (0); 1485 } 1486 1487 /*ARGSUSED*/ 1488 static int 1489 kqueue_close(struct file *fp, struct thread *td) 1490 { 1491 struct kqueue *kq = fp->f_data; 1492 struct filedesc *fdp; 1493 struct knote *kn; 1494 int i; 1495 int error; 1496 1497 if ((error = kqueue_acquire(fp, &kq))) 1498 return error; 1499 1500 KQ_LOCK(kq); 1501 1502 KASSERT((kq->kq_state & KQ_CLOSING) != KQ_CLOSING, 1503 ("kqueue already closing")); 1504 kq->kq_state |= KQ_CLOSING; 1505 if (kq->kq_refcnt > 1) 1506 msleep(&kq->kq_refcnt, &kq->kq_lock, PSOCK, "kqclose", 0); 1507 1508 KASSERT(kq->kq_refcnt == 1, ("other refs are out there!")); 1509 fdp = kq->kq_fdp; 1510 1511 KASSERT(knlist_empty(&kq->kq_sel.si_note), 1512 ("kqueue's knlist not empty")); 1513 1514 for (i = 0; i < kq->kq_knlistsize; i++) { 1515 while ((kn = SLIST_FIRST(&kq->kq_knlist[i])) != NULL) { 1516 if ((kn->kn_status & KN_INFLUX) == KN_INFLUX) { 1517 kq->kq_state |= KQ_FLUXWAIT; 1518 msleep(kq, &kq->kq_lock, PSOCK, "kqclo1", 0); 1519 continue; 1520 } 1521 kn->kn_status |= KN_INFLUX; 1522 KQ_UNLOCK(kq); 1523 if (!(kn->kn_status & KN_DETACHED)) 1524 kn->kn_fop->f_detach(kn); 1525 knote_drop(kn, td); 1526 KQ_LOCK(kq); 1527 } 1528 } 1529 if (kq->kq_knhashmask != 0) { 1530 for (i = 0; i <= kq->kq_knhashmask; i++) { 1531 while ((kn = SLIST_FIRST(&kq->kq_knhash[i])) != NULL) { 1532 if ((kn->kn_status & KN_INFLUX) == KN_INFLUX) { 1533 kq->kq_state |= KQ_FLUXWAIT; 1534 msleep(kq, &kq->kq_lock, PSOCK, 1535 "kqclo2", 0); 1536 continue; 1537 } 1538 kn->kn_status |= KN_INFLUX; 1539 KQ_UNLOCK(kq); 1540 if (!(kn->kn_status & KN_DETACHED)) 1541 kn->kn_fop->f_detach(kn); 1542 knote_drop(kn, td); 1543 KQ_LOCK(kq); 1544 } 1545 } 1546 } 1547 1548 if ((kq->kq_state & KQ_TASKSCHED) == KQ_TASKSCHED) { 1549 kq->kq_state |= KQ_TASKDRAIN; 1550 msleep(&kq->kq_state, &kq->kq_lock, PSOCK, "kqtqdr", 0); 1551 } 1552 1553 if ((kq->kq_state & KQ_SEL) == KQ_SEL) { 1554 selwakeuppri(&kq->kq_sel, PSOCK); 1555 if (!SEL_WAITING(&kq->kq_sel)) 1556 kq->kq_state &= ~KQ_SEL; 1557 } 1558 1559 KQ_UNLOCK(kq); 1560 1561 FILEDESC_XLOCK(fdp); 1562 SLIST_REMOVE(&fdp->fd_kqlist, kq, kqueue, kq_list); 1563 FILEDESC_XUNLOCK(fdp); 1564 1565 knlist_destroy(&kq->kq_sel.si_note); 1566 mtx_destroy(&kq->kq_lock); 1567 kq->kq_fdp = NULL; 1568 1569 if (kq->kq_knhash != NULL) 1570 free(kq->kq_knhash, M_KQUEUE); 1571 if (kq->kq_knlist != NULL) 1572 free(kq->kq_knlist, M_KQUEUE); 1573 1574 funsetown(&kq->kq_sigio); 1575 free(kq, M_KQUEUE); 1576 fp->f_data = NULL; 1577 1578 return (0); 1579 } 1580 1581 static void 1582 kqueue_wakeup(struct kqueue *kq) 1583 { 1584 KQ_OWNED(kq); 1585 1586 if ((kq->kq_state & KQ_SLEEP) == KQ_SLEEP) { 1587 kq->kq_state &= ~KQ_SLEEP; 1588 wakeup(kq); 1589 } 1590 if ((kq->kq_state & KQ_SEL) == KQ_SEL) { 1591 selwakeuppri(&kq->kq_sel, PSOCK); 1592 if (!SEL_WAITING(&kq->kq_sel)) 1593 kq->kq_state &= ~KQ_SEL; 1594 } 1595 if (!knlist_empty(&kq->kq_sel.si_note)) 1596 kqueue_schedtask(kq); 1597 if ((kq->kq_state & KQ_ASYNC) == KQ_ASYNC) { 1598 pgsigio(&kq->kq_sigio, SIGIO, 0); 1599 } 1600 } 1601 1602 /* 1603 * Walk down a list of knotes, activating them if their event has triggered. 1604 * 1605 * There is a possibility to optimize in the case of one kq watching another. 1606 * Instead of scheduling a task to wake it up, you could pass enough state 1607 * down the chain to make up the parent kqueue. Make this code functional 1608 * first. 1609 */ 1610 void 1611 knote(struct knlist *list, long hint, int islocked) 1612 { 1613 struct kqueue *kq; 1614 struct knote *kn; 1615 1616 if (list == NULL) 1617 return; 1618 1619 KNL_ASSERT_LOCK(list, islocked); 1620 1621 if (!islocked) 1622 list->kl_lock(list->kl_lockarg); 1623 1624 /* 1625 * If we unlock the list lock (and set KN_INFLUX), we can eliminate 1626 * the kqueue scheduling, but this will introduce four 1627 * lock/unlock's for each knote to test. If we do, continue to use 1628 * SLIST_FOREACH, SLIST_FOREACH_SAFE is not safe in our case, it is 1629 * only safe if you want to remove the current item, which we are 1630 * not doing. 1631 */ 1632 SLIST_FOREACH(kn, &list->kl_list, kn_selnext) { 1633 kq = kn->kn_kq; 1634 if ((kn->kn_status & KN_INFLUX) != KN_INFLUX) { 1635 KQ_LOCK(kq); 1636 if ((kn->kn_status & KN_INFLUX) != KN_INFLUX) { 1637 kn->kn_status |= KN_HASKQLOCK; 1638 if (kn->kn_fop->f_event(kn, hint)) 1639 KNOTE_ACTIVATE(kn, 1); 1640 kn->kn_status &= ~KN_HASKQLOCK; 1641 } 1642 KQ_UNLOCK(kq); 1643 } 1644 kq = NULL; 1645 } 1646 if (!islocked) 1647 list->kl_unlock(list->kl_lockarg); 1648 } 1649 1650 /* 1651 * add a knote to a knlist 1652 */ 1653 void 1654 knlist_add(struct knlist *knl, struct knote *kn, int islocked) 1655 { 1656 KNL_ASSERT_LOCK(knl, islocked); 1657 KQ_NOTOWNED(kn->kn_kq); 1658 KASSERT((kn->kn_status & (KN_INFLUX|KN_DETACHED)) == 1659 (KN_INFLUX|KN_DETACHED), ("knote not KN_INFLUX and KN_DETACHED")); 1660 if (!islocked) 1661 knl->kl_lock(knl->kl_lockarg); 1662 SLIST_INSERT_HEAD(&knl->kl_list, kn, kn_selnext); 1663 if (!islocked) 1664 knl->kl_unlock(knl->kl_lockarg); 1665 KQ_LOCK(kn->kn_kq); 1666 kn->kn_knlist = knl; 1667 kn->kn_status &= ~KN_DETACHED; 1668 KQ_UNLOCK(kn->kn_kq); 1669 } 1670 1671 static void 1672 knlist_remove_kq(struct knlist *knl, struct knote *kn, int knlislocked, int kqislocked) 1673 { 1674 KASSERT(!(!!kqislocked && !knlislocked), ("kq locked w/o knl locked")); 1675 KNL_ASSERT_LOCK(knl, knlislocked); 1676 mtx_assert(&kn->kn_kq->kq_lock, kqislocked ? MA_OWNED : MA_NOTOWNED); 1677 if (!kqislocked) 1678 KASSERT((kn->kn_status & (KN_INFLUX|KN_DETACHED)) == KN_INFLUX, 1679 ("knlist_remove called w/o knote being KN_INFLUX or already removed")); 1680 if (!knlislocked) 1681 knl->kl_lock(knl->kl_lockarg); 1682 SLIST_REMOVE(&knl->kl_list, kn, knote, kn_selnext); 1683 kn->kn_knlist = NULL; 1684 if (!knlislocked) 1685 knl->kl_unlock(knl->kl_lockarg); 1686 if (!kqislocked) 1687 KQ_LOCK(kn->kn_kq); 1688 kn->kn_status |= KN_DETACHED; 1689 if (!kqislocked) 1690 KQ_UNLOCK(kn->kn_kq); 1691 } 1692 1693 /* 1694 * remove all knotes from a specified klist 1695 */ 1696 void 1697 knlist_remove(struct knlist *knl, struct knote *kn, int islocked) 1698 { 1699 1700 knlist_remove_kq(knl, kn, islocked, 0); 1701 } 1702 1703 /* 1704 * remove knote from a specified klist while in f_event handler. 1705 */ 1706 void 1707 knlist_remove_inevent(struct knlist *knl, struct knote *kn) 1708 { 1709 1710 knlist_remove_kq(knl, kn, 1, 1711 (kn->kn_status & KN_HASKQLOCK) == KN_HASKQLOCK); 1712 } 1713 1714 int 1715 knlist_empty(struct knlist *knl) 1716 { 1717 KNL_ASSERT_LOCKED(knl); 1718 return SLIST_EMPTY(&knl->kl_list); 1719 } 1720 1721 static struct mtx knlist_lock; 1722 MTX_SYSINIT(knlist_lock, &knlist_lock, "knlist lock for lockless objects", 1723 MTX_DEF); 1724 static void knlist_mtx_lock(void *arg); 1725 static void knlist_mtx_unlock(void *arg); 1726 static int knlist_mtx_locked(void *arg); 1727 1728 static void 1729 knlist_mtx_lock(void *arg) 1730 { 1731 mtx_lock((struct mtx *)arg); 1732 } 1733 1734 static void 1735 knlist_mtx_unlock(void *arg) 1736 { 1737 mtx_unlock((struct mtx *)arg); 1738 } 1739 1740 static int 1741 knlist_mtx_locked(void *arg) 1742 { 1743 return (mtx_owned((struct mtx *)arg)); 1744 } 1745 1746 void 1747 knlist_init(struct knlist *knl, void *lock, void (*kl_lock)(void *), 1748 void (*kl_unlock)(void *), int (*kl_locked)(void *)) 1749 { 1750 1751 if (lock == NULL) 1752 knl->kl_lockarg = &knlist_lock; 1753 else 1754 knl->kl_lockarg = lock; 1755 1756 if (kl_lock == NULL) 1757 knl->kl_lock = knlist_mtx_lock; 1758 else 1759 knl->kl_lock = kl_lock; 1760 if (kl_unlock == NULL) 1761 knl->kl_unlock = knlist_mtx_unlock; 1762 else 1763 knl->kl_unlock = kl_unlock; 1764 if (kl_locked == NULL) 1765 knl->kl_locked = knlist_mtx_locked; 1766 else 1767 knl->kl_locked = kl_locked; 1768 1769 SLIST_INIT(&knl->kl_list); 1770 } 1771 1772 void 1773 knlist_destroy(struct knlist *knl) 1774 { 1775 1776 #ifdef INVARIANTS 1777 /* 1778 * if we run across this error, we need to find the offending 1779 * driver and have it call knlist_clear. 1780 */ 1781 if (!SLIST_EMPTY(&knl->kl_list)) 1782 printf("WARNING: destroying knlist w/ knotes on it!\n"); 1783 #endif 1784 1785 knl->kl_lockarg = knl->kl_lock = knl->kl_unlock = NULL; 1786 SLIST_INIT(&knl->kl_list); 1787 } 1788 1789 /* 1790 * Even if we are locked, we may need to drop the lock to allow any influx 1791 * knotes time to "settle". 1792 */ 1793 void 1794 knlist_cleardel(struct knlist *knl, struct thread *td, int islocked, int killkn) 1795 { 1796 struct knote *kn, *kn2; 1797 struct kqueue *kq; 1798 1799 if (islocked) 1800 KNL_ASSERT_LOCKED(knl); 1801 else { 1802 KNL_ASSERT_UNLOCKED(knl); 1803 again: /* need to reacquire lock since we have dropped it */ 1804 knl->kl_lock(knl->kl_lockarg); 1805 } 1806 1807 SLIST_FOREACH_SAFE(kn, &knl->kl_list, kn_selnext, kn2) { 1808 kq = kn->kn_kq; 1809 KQ_LOCK(kq); 1810 if ((kn->kn_status & KN_INFLUX)) { 1811 KQ_UNLOCK(kq); 1812 continue; 1813 } 1814 knlist_remove_kq(knl, kn, 1, 1); 1815 if (killkn) { 1816 kn->kn_status |= KN_INFLUX | KN_DETACHED; 1817 KQ_UNLOCK(kq); 1818 knote_drop(kn, td); 1819 } else { 1820 /* Make sure cleared knotes disappear soon */ 1821 kn->kn_flags |= (EV_EOF | EV_ONESHOT); 1822 KQ_UNLOCK(kq); 1823 } 1824 kq = NULL; 1825 } 1826 1827 if (!SLIST_EMPTY(&knl->kl_list)) { 1828 /* there are still KN_INFLUX remaining */ 1829 kn = SLIST_FIRST(&knl->kl_list); 1830 kq = kn->kn_kq; 1831 KQ_LOCK(kq); 1832 KASSERT(kn->kn_status & KN_INFLUX, 1833 ("knote removed w/o list lock")); 1834 knl->kl_unlock(knl->kl_lockarg); 1835 kq->kq_state |= KQ_FLUXWAIT; 1836 msleep(kq, &kq->kq_lock, PSOCK | PDROP, "kqkclr", 0); 1837 kq = NULL; 1838 goto again; 1839 } 1840 1841 if (islocked) 1842 KNL_ASSERT_LOCKED(knl); 1843 else { 1844 knl->kl_unlock(knl->kl_lockarg); 1845 KNL_ASSERT_UNLOCKED(knl); 1846 } 1847 } 1848 1849 /* 1850 * Remove all knotes referencing a specified fd must be called with FILEDESC 1851 * lock. This prevents a race where a new fd comes along and occupies the 1852 * entry and we attach a knote to the fd. 1853 */ 1854 void 1855 knote_fdclose(struct thread *td, int fd) 1856 { 1857 struct filedesc *fdp = td->td_proc->p_fd; 1858 struct kqueue *kq; 1859 struct knote *kn; 1860 int influx; 1861 1862 FILEDESC_XLOCK_ASSERT(fdp); 1863 1864 /* 1865 * We shouldn't have to worry about new kevents appearing on fd 1866 * since filedesc is locked. 1867 */ 1868 SLIST_FOREACH(kq, &fdp->fd_kqlist, kq_list) { 1869 KQ_LOCK(kq); 1870 1871 again: 1872 influx = 0; 1873 while (kq->kq_knlistsize > fd && 1874 (kn = SLIST_FIRST(&kq->kq_knlist[fd])) != NULL) { 1875 if (kn->kn_status & KN_INFLUX) { 1876 /* someone else might be waiting on our knote */ 1877 if (influx) 1878 wakeup(kq); 1879 kq->kq_state |= KQ_FLUXWAIT; 1880 msleep(kq, &kq->kq_lock, PSOCK, "kqflxwt", 0); 1881 goto again; 1882 } 1883 kn->kn_status |= KN_INFLUX; 1884 KQ_UNLOCK(kq); 1885 if (!(kn->kn_status & KN_DETACHED)) 1886 kn->kn_fop->f_detach(kn); 1887 knote_drop(kn, td); 1888 influx = 1; 1889 KQ_LOCK(kq); 1890 } 1891 KQ_UNLOCK_FLUX(kq); 1892 } 1893 } 1894 1895 static int 1896 knote_attach(struct knote *kn, struct kqueue *kq) 1897 { 1898 struct klist *list; 1899 1900 KASSERT(kn->kn_status & KN_INFLUX, ("knote not marked INFLUX")); 1901 KQ_OWNED(kq); 1902 1903 if (kn->kn_fop->f_isfd) { 1904 if (kn->kn_id >= kq->kq_knlistsize) 1905 return ENOMEM; 1906 list = &kq->kq_knlist[kn->kn_id]; 1907 } else { 1908 if (kq->kq_knhash == NULL) 1909 return ENOMEM; 1910 list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)]; 1911 } 1912 1913 SLIST_INSERT_HEAD(list, kn, kn_link); 1914 1915 return 0; 1916 } 1917 1918 /* 1919 * knote must already have been detached using the f_detach method. 1920 * no lock need to be held, it is assumed that the KN_INFLUX flag is set 1921 * to prevent other removal. 1922 */ 1923 static void 1924 knote_drop(struct knote *kn, struct thread *td) 1925 { 1926 struct kqueue *kq; 1927 struct klist *list; 1928 1929 kq = kn->kn_kq; 1930 1931 KQ_NOTOWNED(kq); 1932 KASSERT((kn->kn_status & KN_INFLUX) == KN_INFLUX, 1933 ("knote_drop called without KN_INFLUX set in kn_status")); 1934 1935 KQ_LOCK(kq); 1936 if (kn->kn_fop->f_isfd) 1937 list = &kq->kq_knlist[kn->kn_id]; 1938 else 1939 list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)]; 1940 1941 if (!SLIST_EMPTY(list)) 1942 SLIST_REMOVE(list, kn, knote, kn_link); 1943 if (kn->kn_status & KN_QUEUED) 1944 knote_dequeue(kn); 1945 KQ_UNLOCK_FLUX(kq); 1946 1947 if (kn->kn_fop->f_isfd) { 1948 fdrop(kn->kn_fp, td); 1949 kn->kn_fp = NULL; 1950 } 1951 kqueue_fo_release(kn->kn_kevent.filter); 1952 kn->kn_fop = NULL; 1953 knote_free(kn); 1954 } 1955 1956 static void 1957 knote_enqueue(struct knote *kn) 1958 { 1959 struct kqueue *kq = kn->kn_kq; 1960 1961 KQ_OWNED(kn->kn_kq); 1962 KASSERT((kn->kn_status & KN_QUEUED) == 0, ("knote already queued")); 1963 1964 TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe); 1965 kn->kn_status |= KN_QUEUED; 1966 kq->kq_count++; 1967 kqueue_wakeup(kq); 1968 } 1969 1970 static void 1971 knote_dequeue(struct knote *kn) 1972 { 1973 struct kqueue *kq = kn->kn_kq; 1974 1975 KQ_OWNED(kn->kn_kq); 1976 KASSERT(kn->kn_status & KN_QUEUED, ("knote not queued")); 1977 1978 TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe); 1979 kn->kn_status &= ~KN_QUEUED; 1980 kq->kq_count--; 1981 } 1982 1983 static void 1984 knote_init(void) 1985 { 1986 1987 knote_zone = uma_zcreate("KNOTE", sizeof(struct knote), NULL, NULL, 1988 NULL, NULL, UMA_ALIGN_PTR, 0); 1989 } 1990 SYSINIT(knote, SI_SUB_PSEUDO, SI_ORDER_ANY, knote_init, NULL); 1991 1992 static struct knote * 1993 knote_alloc(int waitok) 1994 { 1995 return ((struct knote *)uma_zalloc(knote_zone, 1996 (waitok ? M_WAITOK : M_NOWAIT)|M_ZERO)); 1997 } 1998 1999 static void 2000 knote_free(struct knote *kn) 2001 { 2002 if (kn != NULL) 2003 uma_zfree(knote_zone, kn); 2004 } 2005 2006 /* 2007 * Register the kev w/ the kq specified by fd. 2008 */ 2009 int 2010 kqfd_register(int fd, struct kevent *kev, struct thread *td, int waitok) 2011 { 2012 struct kqueue *kq; 2013 struct file *fp; 2014 int error; 2015 2016 if ((error = fget(td, fd, &fp)) != 0) 2017 return (error); 2018 if ((error = kqueue_acquire(fp, &kq)) != 0) 2019 goto noacquire; 2020 2021 error = kqueue_register(kq, kev, td, waitok); 2022 2023 kqueue_release(kq, 0); 2024 2025 noacquire: 2026 fdrop(fp, td); 2027 2028 return error; 2029 } 2030