1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 1999,2000,2001 Jonathan Lemon <jlemon@FreeBSD.org> 5 * Copyright 2004 John-Mark Gurney <jmg@FreeBSD.org> 6 * Copyright (c) 2009 Apple, Inc. 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 #include "opt_ktrace.h" 35 #include "opt_kqueue.h" 36 37 #ifdef COMPAT_FREEBSD11 38 #define _WANT_FREEBSD11_KEVENT 39 #endif 40 41 #include <sys/param.h> 42 #include <sys/systm.h> 43 #include <sys/capsicum.h> 44 #include <sys/kernel.h> 45 #include <sys/lock.h> 46 #include <sys/mutex.h> 47 #include <sys/rwlock.h> 48 #include <sys/proc.h> 49 #include <sys/malloc.h> 50 #include <sys/unistd.h> 51 #include <sys/file.h> 52 #include <sys/filedesc.h> 53 #include <sys/filio.h> 54 #include <sys/fcntl.h> 55 #include <sys/kthread.h> 56 #include <sys/selinfo.h> 57 #include <sys/queue.h> 58 #include <sys/event.h> 59 #include <sys/eventvar.h> 60 #include <sys/poll.h> 61 #include <sys/protosw.h> 62 #include <sys/resourcevar.h> 63 #include <sys/sigio.h> 64 #include <sys/signalvar.h> 65 #include <sys/socket.h> 66 #include <sys/socketvar.h> 67 #include <sys/stat.h> 68 #include <sys/sysctl.h> 69 #include <sys/sysproto.h> 70 #include <sys/syscallsubr.h> 71 #include <sys/taskqueue.h> 72 #include <sys/uio.h> 73 #include <sys/user.h> 74 #ifdef KTRACE 75 #include <sys/ktrace.h> 76 #endif 77 #include <machine/atomic.h> 78 79 #include <vm/uma.h> 80 81 static MALLOC_DEFINE(M_KQUEUE, "kqueue", "memory for kqueue system"); 82 83 /* 84 * This lock is used if multiple kq locks are required. This possibly 85 * should be made into a per proc lock. 86 */ 87 static struct mtx kq_global; 88 MTX_SYSINIT(kq_global, &kq_global, "kqueue order", MTX_DEF); 89 #define KQ_GLOBAL_LOCK(lck, haslck) do { \ 90 if (!haslck) \ 91 mtx_lock(lck); \ 92 haslck = 1; \ 93 } while (0) 94 #define KQ_GLOBAL_UNLOCK(lck, haslck) do { \ 95 if (haslck) \ 96 mtx_unlock(lck); \ 97 haslck = 0; \ 98 } while (0) 99 100 TASKQUEUE_DEFINE_THREAD(kqueue_ctx); 101 102 static int kevent_copyout(void *arg, struct kevent *kevp, int count); 103 static int kevent_copyin(void *arg, struct kevent *kevp, int count); 104 static int kqueue_register(struct kqueue *kq, struct kevent *kev, 105 struct thread *td, int waitok); 106 static int kqueue_acquire(struct file *fp, struct kqueue **kqp); 107 static void kqueue_release(struct kqueue *kq, int locked); 108 static void kqueue_destroy(struct kqueue *kq); 109 static void kqueue_drain(struct kqueue *kq, struct thread *td); 110 static int kqueue_expand(struct kqueue *kq, struct filterops *fops, 111 uintptr_t ident, int waitok); 112 static void kqueue_task(void *arg, int pending); 113 static int kqueue_scan(struct kqueue *kq, int maxevents, 114 struct kevent_copyops *k_ops, 115 const struct timespec *timeout, 116 struct kevent *keva, struct thread *td); 117 static void kqueue_wakeup(struct kqueue *kq); 118 static struct filterops *kqueue_fo_find(int filt); 119 static void kqueue_fo_release(int filt); 120 struct g_kevent_args; 121 static int kern_kevent_generic(struct thread *td, 122 struct g_kevent_args *uap, 123 struct kevent_copyops *k_ops, const char *struct_name); 124 125 static fo_ioctl_t kqueue_ioctl; 126 static fo_poll_t kqueue_poll; 127 static fo_kqfilter_t kqueue_kqfilter; 128 static fo_stat_t kqueue_stat; 129 static fo_close_t kqueue_close; 130 static fo_fill_kinfo_t kqueue_fill_kinfo; 131 132 static struct fileops kqueueops = { 133 .fo_read = invfo_rdwr, 134 .fo_write = invfo_rdwr, 135 .fo_truncate = invfo_truncate, 136 .fo_ioctl = kqueue_ioctl, 137 .fo_poll = kqueue_poll, 138 .fo_kqfilter = kqueue_kqfilter, 139 .fo_stat = kqueue_stat, 140 .fo_close = kqueue_close, 141 .fo_chmod = invfo_chmod, 142 .fo_chown = invfo_chown, 143 .fo_sendfile = invfo_sendfile, 144 .fo_fill_kinfo = kqueue_fill_kinfo, 145 }; 146 147 static int knote_attach(struct knote *kn, struct kqueue *kq); 148 static void knote_drop(struct knote *kn, struct thread *td); 149 static void knote_drop_detached(struct knote *kn, struct thread *td); 150 static void knote_enqueue(struct knote *kn); 151 static void knote_dequeue(struct knote *kn); 152 static void knote_init(void); 153 static struct knote *knote_alloc(int waitok); 154 static void knote_free(struct knote *kn); 155 156 static void filt_kqdetach(struct knote *kn); 157 static int filt_kqueue(struct knote *kn, long hint); 158 static int filt_procattach(struct knote *kn); 159 static void filt_procdetach(struct knote *kn); 160 static int filt_proc(struct knote *kn, long hint); 161 static int filt_fileattach(struct knote *kn); 162 static void filt_timerexpire(void *knx); 163 static int filt_timerattach(struct knote *kn); 164 static void filt_timerdetach(struct knote *kn); 165 static void filt_timerstart(struct knote *kn, sbintime_t to); 166 static void filt_timertouch(struct knote *kn, struct kevent *kev, 167 u_long type); 168 static int filt_timervalidate(struct knote *kn, sbintime_t *to); 169 static int filt_timer(struct knote *kn, long hint); 170 static int filt_userattach(struct knote *kn); 171 static void filt_userdetach(struct knote *kn); 172 static int filt_user(struct knote *kn, long hint); 173 static void filt_usertouch(struct knote *kn, struct kevent *kev, 174 u_long type); 175 176 static struct filterops file_filtops = { 177 .f_isfd = 1, 178 .f_attach = filt_fileattach, 179 }; 180 static struct filterops kqread_filtops = { 181 .f_isfd = 1, 182 .f_detach = filt_kqdetach, 183 .f_event = filt_kqueue, 184 }; 185 /* XXX - move to kern_proc.c? */ 186 static struct filterops proc_filtops = { 187 .f_isfd = 0, 188 .f_attach = filt_procattach, 189 .f_detach = filt_procdetach, 190 .f_event = filt_proc, 191 }; 192 static struct filterops timer_filtops = { 193 .f_isfd = 0, 194 .f_attach = filt_timerattach, 195 .f_detach = filt_timerdetach, 196 .f_event = filt_timer, 197 .f_touch = filt_timertouch, 198 }; 199 static struct filterops user_filtops = { 200 .f_attach = filt_userattach, 201 .f_detach = filt_userdetach, 202 .f_event = filt_user, 203 .f_touch = filt_usertouch, 204 }; 205 206 static uma_zone_t knote_zone; 207 static unsigned int kq_ncallouts = 0; 208 static unsigned int kq_calloutmax = 4 * 1024; 209 SYSCTL_UINT(_kern, OID_AUTO, kq_calloutmax, CTLFLAG_RW, 210 &kq_calloutmax, 0, "Maximum number of callouts allocated for kqueue"); 211 212 /* XXX - ensure not influx ? */ 213 #define KNOTE_ACTIVATE(kn, islock) do { \ 214 if ((islock)) \ 215 mtx_assert(&(kn)->kn_kq->kq_lock, MA_OWNED); \ 216 else \ 217 KQ_LOCK((kn)->kn_kq); \ 218 (kn)->kn_status |= KN_ACTIVE; \ 219 if (((kn)->kn_status & (KN_QUEUED | KN_DISABLED)) == 0) \ 220 knote_enqueue((kn)); \ 221 if (!(islock)) \ 222 KQ_UNLOCK((kn)->kn_kq); \ 223 } while(0) 224 #define KQ_LOCK(kq) do { \ 225 mtx_lock(&(kq)->kq_lock); \ 226 } while (0) 227 #define KQ_FLUX_WAKEUP(kq) do { \ 228 if (((kq)->kq_state & KQ_FLUXWAIT) == KQ_FLUXWAIT) { \ 229 (kq)->kq_state &= ~KQ_FLUXWAIT; \ 230 wakeup((kq)); \ 231 } \ 232 } while (0) 233 #define KQ_UNLOCK_FLUX(kq) do { \ 234 KQ_FLUX_WAKEUP(kq); \ 235 mtx_unlock(&(kq)->kq_lock); \ 236 } while (0) 237 #define KQ_UNLOCK(kq) do { \ 238 mtx_unlock(&(kq)->kq_lock); \ 239 } while (0) 240 #define KQ_OWNED(kq) do { \ 241 mtx_assert(&(kq)->kq_lock, MA_OWNED); \ 242 } while (0) 243 #define KQ_NOTOWNED(kq) do { \ 244 mtx_assert(&(kq)->kq_lock, MA_NOTOWNED); \ 245 } while (0) 246 247 static struct knlist * 248 kn_list_lock(struct knote *kn) 249 { 250 struct knlist *knl; 251 252 knl = kn->kn_knlist; 253 if (knl != NULL) 254 knl->kl_lock(knl->kl_lockarg); 255 return (knl); 256 } 257 258 static void 259 kn_list_unlock(struct knlist *knl) 260 { 261 bool do_free; 262 263 if (knl == NULL) 264 return; 265 do_free = knl->kl_autodestroy && knlist_empty(knl); 266 knl->kl_unlock(knl->kl_lockarg); 267 if (do_free) { 268 knlist_destroy(knl); 269 free(knl, M_KQUEUE); 270 } 271 } 272 273 static bool 274 kn_in_flux(struct knote *kn) 275 { 276 277 return (kn->kn_influx > 0); 278 } 279 280 static void 281 kn_enter_flux(struct knote *kn) 282 { 283 284 KQ_OWNED(kn->kn_kq); 285 MPASS(kn->kn_influx < INT_MAX); 286 kn->kn_influx++; 287 } 288 289 static bool 290 kn_leave_flux(struct knote *kn) 291 { 292 293 KQ_OWNED(kn->kn_kq); 294 MPASS(kn->kn_influx > 0); 295 kn->kn_influx--; 296 return (kn->kn_influx == 0); 297 } 298 299 #define KNL_ASSERT_LOCK(knl, islocked) do { \ 300 if (islocked) \ 301 KNL_ASSERT_LOCKED(knl); \ 302 else \ 303 KNL_ASSERT_UNLOCKED(knl); \ 304 } while (0) 305 #ifdef INVARIANTS 306 #define KNL_ASSERT_LOCKED(knl) do { \ 307 knl->kl_assert_locked((knl)->kl_lockarg); \ 308 } while (0) 309 #define KNL_ASSERT_UNLOCKED(knl) do { \ 310 knl->kl_assert_unlocked((knl)->kl_lockarg); \ 311 } while (0) 312 #else /* !INVARIANTS */ 313 #define KNL_ASSERT_LOCKED(knl) do {} while(0) 314 #define KNL_ASSERT_UNLOCKED(knl) do {} while (0) 315 #endif /* INVARIANTS */ 316 317 #ifndef KN_HASHSIZE 318 #define KN_HASHSIZE 64 /* XXX should be tunable */ 319 #endif 320 321 #define KN_HASH(val, mask) (((val) ^ (val >> 8)) & (mask)) 322 323 static int 324 filt_nullattach(struct knote *kn) 325 { 326 327 return (ENXIO); 328 }; 329 330 struct filterops null_filtops = { 331 .f_isfd = 0, 332 .f_attach = filt_nullattach, 333 }; 334 335 /* XXX - make SYSINIT to add these, and move into respective modules. */ 336 extern struct filterops sig_filtops; 337 extern struct filterops fs_filtops; 338 339 /* 340 * Table for for all system-defined filters. 341 */ 342 static struct mtx filterops_lock; 343 MTX_SYSINIT(kqueue_filterops, &filterops_lock, "protect sysfilt_ops", 344 MTX_DEF); 345 static struct { 346 struct filterops *for_fop; 347 int for_nolock; 348 int for_refcnt; 349 } sysfilt_ops[EVFILT_SYSCOUNT] = { 350 { &file_filtops, 1 }, /* EVFILT_READ */ 351 { &file_filtops, 1 }, /* EVFILT_WRITE */ 352 { &null_filtops }, /* EVFILT_AIO */ 353 { &file_filtops, 1 }, /* EVFILT_VNODE */ 354 { &proc_filtops, 1 }, /* EVFILT_PROC */ 355 { &sig_filtops, 1 }, /* EVFILT_SIGNAL */ 356 { &timer_filtops, 1 }, /* EVFILT_TIMER */ 357 { &file_filtops, 1 }, /* EVFILT_PROCDESC */ 358 { &fs_filtops, 1 }, /* EVFILT_FS */ 359 { &null_filtops }, /* EVFILT_LIO */ 360 { &user_filtops, 1 }, /* EVFILT_USER */ 361 { &null_filtops }, /* EVFILT_SENDFILE */ 362 { &file_filtops, 1 }, /* EVFILT_EMPTY */ 363 }; 364 365 /* 366 * Simple redirection for all cdevsw style objects to call their fo_kqfilter 367 * method. 368 */ 369 static int 370 filt_fileattach(struct knote *kn) 371 { 372 373 return (fo_kqfilter(kn->kn_fp, kn)); 374 } 375 376 /*ARGSUSED*/ 377 static int 378 kqueue_kqfilter(struct file *fp, struct knote *kn) 379 { 380 struct kqueue *kq = kn->kn_fp->f_data; 381 382 if (kn->kn_filter != EVFILT_READ) 383 return (EINVAL); 384 385 kn->kn_status |= KN_KQUEUE; 386 kn->kn_fop = &kqread_filtops; 387 knlist_add(&kq->kq_sel.si_note, kn, 0); 388 389 return (0); 390 } 391 392 static void 393 filt_kqdetach(struct knote *kn) 394 { 395 struct kqueue *kq = kn->kn_fp->f_data; 396 397 knlist_remove(&kq->kq_sel.si_note, kn, 0); 398 } 399 400 /*ARGSUSED*/ 401 static int 402 filt_kqueue(struct knote *kn, long hint) 403 { 404 struct kqueue *kq = kn->kn_fp->f_data; 405 406 kn->kn_data = kq->kq_count; 407 return (kn->kn_data > 0); 408 } 409 410 /* XXX - move to kern_proc.c? */ 411 static int 412 filt_procattach(struct knote *kn) 413 { 414 struct proc *p; 415 int error; 416 bool exiting, immediate; 417 418 exiting = immediate = false; 419 if (kn->kn_sfflags & NOTE_EXIT) 420 p = pfind_any(kn->kn_id); 421 else 422 p = pfind(kn->kn_id); 423 if (p == NULL) 424 return (ESRCH); 425 if (p->p_flag & P_WEXIT) 426 exiting = true; 427 428 if ((error = p_cansee(curthread, p))) { 429 PROC_UNLOCK(p); 430 return (error); 431 } 432 433 kn->kn_ptr.p_proc = p; 434 kn->kn_flags |= EV_CLEAR; /* automatically set */ 435 436 /* 437 * Internal flag indicating registration done by kernel for the 438 * purposes of getting a NOTE_CHILD notification. 439 */ 440 if (kn->kn_flags & EV_FLAG2) { 441 kn->kn_flags &= ~EV_FLAG2; 442 kn->kn_data = kn->kn_sdata; /* ppid */ 443 kn->kn_fflags = NOTE_CHILD; 444 kn->kn_sfflags &= ~(NOTE_EXIT | NOTE_EXEC | NOTE_FORK); 445 immediate = true; /* Force immediate activation of child note. */ 446 } 447 /* 448 * Internal flag indicating registration done by kernel (for other than 449 * NOTE_CHILD). 450 */ 451 if (kn->kn_flags & EV_FLAG1) { 452 kn->kn_flags &= ~EV_FLAG1; 453 } 454 455 knlist_add(p->p_klist, kn, 1); 456 457 /* 458 * Immediately activate any child notes or, in the case of a zombie 459 * target process, exit notes. The latter is necessary to handle the 460 * case where the target process, e.g. a child, dies before the kevent 461 * is registered. 462 */ 463 if (immediate || (exiting && filt_proc(kn, NOTE_EXIT))) 464 KNOTE_ACTIVATE(kn, 0); 465 466 PROC_UNLOCK(p); 467 468 return (0); 469 } 470 471 /* 472 * The knote may be attached to a different process, which may exit, 473 * leaving nothing for the knote to be attached to. So when the process 474 * exits, the knote is marked as DETACHED and also flagged as ONESHOT so 475 * it will be deleted when read out. However, as part of the knote deletion, 476 * this routine is called, so a check is needed to avoid actually performing 477 * a detach, because the original process does not exist any more. 478 */ 479 /* XXX - move to kern_proc.c? */ 480 static void 481 filt_procdetach(struct knote *kn) 482 { 483 484 knlist_remove(kn->kn_knlist, kn, 0); 485 kn->kn_ptr.p_proc = NULL; 486 } 487 488 /* XXX - move to kern_proc.c? */ 489 static int 490 filt_proc(struct knote *kn, long hint) 491 { 492 struct proc *p; 493 u_int event; 494 495 p = kn->kn_ptr.p_proc; 496 if (p == NULL) /* already activated, from attach filter */ 497 return (0); 498 499 /* Mask off extra data. */ 500 event = (u_int)hint & NOTE_PCTRLMASK; 501 502 /* If the user is interested in this event, record it. */ 503 if (kn->kn_sfflags & event) 504 kn->kn_fflags |= event; 505 506 /* Process is gone, so flag the event as finished. */ 507 if (event == NOTE_EXIT) { 508 kn->kn_flags |= EV_EOF | EV_ONESHOT; 509 kn->kn_ptr.p_proc = NULL; 510 if (kn->kn_fflags & NOTE_EXIT) 511 kn->kn_data = KW_EXITCODE(p->p_xexit, p->p_xsig); 512 if (kn->kn_fflags == 0) 513 kn->kn_flags |= EV_DROP; 514 return (1); 515 } 516 517 return (kn->kn_fflags != 0); 518 } 519 520 /* 521 * Called when the process forked. It mostly does the same as the 522 * knote(), activating all knotes registered to be activated when the 523 * process forked. Additionally, for each knote attached to the 524 * parent, check whether user wants to track the new process. If so 525 * attach a new knote to it, and immediately report an event with the 526 * child's pid. 527 */ 528 void 529 knote_fork(struct knlist *list, int pid) 530 { 531 struct kqueue *kq; 532 struct knote *kn; 533 struct kevent kev; 534 int error; 535 536 if (list == NULL) 537 return; 538 list->kl_lock(list->kl_lockarg); 539 540 SLIST_FOREACH(kn, &list->kl_list, kn_selnext) { 541 kq = kn->kn_kq; 542 KQ_LOCK(kq); 543 if (kn_in_flux(kn) && (kn->kn_status & KN_SCAN) == 0) { 544 KQ_UNLOCK(kq); 545 continue; 546 } 547 548 /* 549 * The same as knote(), activate the event. 550 */ 551 if ((kn->kn_sfflags & NOTE_TRACK) == 0) { 552 kn->kn_status |= KN_HASKQLOCK; 553 if (kn->kn_fop->f_event(kn, NOTE_FORK)) 554 KNOTE_ACTIVATE(kn, 1); 555 kn->kn_status &= ~KN_HASKQLOCK; 556 KQ_UNLOCK(kq); 557 continue; 558 } 559 560 /* 561 * The NOTE_TRACK case. In addition to the activation 562 * of the event, we need to register new events to 563 * track the child. Drop the locks in preparation for 564 * the call to kqueue_register(). 565 */ 566 kn_enter_flux(kn); 567 KQ_UNLOCK(kq); 568 list->kl_unlock(list->kl_lockarg); 569 570 /* 571 * Activate existing knote and register tracking knotes with 572 * new process. 573 * 574 * First register a knote to get just the child notice. This 575 * must be a separate note from a potential NOTE_EXIT 576 * notification since both NOTE_CHILD and NOTE_EXIT are defined 577 * to use the data field (in conflicting ways). 578 */ 579 kev.ident = pid; 580 kev.filter = kn->kn_filter; 581 kev.flags = kn->kn_flags | EV_ADD | EV_ENABLE | EV_ONESHOT | 582 EV_FLAG2; 583 kev.fflags = kn->kn_sfflags; 584 kev.data = kn->kn_id; /* parent */ 585 kev.udata = kn->kn_kevent.udata;/* preserve udata */ 586 error = kqueue_register(kq, &kev, NULL, 0); 587 if (error) 588 kn->kn_fflags |= NOTE_TRACKERR; 589 590 /* 591 * Then register another knote to track other potential events 592 * from the new process. 593 */ 594 kev.ident = pid; 595 kev.filter = kn->kn_filter; 596 kev.flags = kn->kn_flags | EV_ADD | EV_ENABLE | EV_FLAG1; 597 kev.fflags = kn->kn_sfflags; 598 kev.data = kn->kn_id; /* parent */ 599 kev.udata = kn->kn_kevent.udata;/* preserve udata */ 600 error = kqueue_register(kq, &kev, NULL, 0); 601 if (error) 602 kn->kn_fflags |= NOTE_TRACKERR; 603 if (kn->kn_fop->f_event(kn, NOTE_FORK)) 604 KNOTE_ACTIVATE(kn, 0); 605 KQ_LOCK(kq); 606 kn_leave_flux(kn); 607 KQ_UNLOCK_FLUX(kq); 608 list->kl_lock(list->kl_lockarg); 609 } 610 list->kl_unlock(list->kl_lockarg); 611 } 612 613 /* 614 * XXX: EVFILT_TIMER should perhaps live in kern_time.c beside the 615 * interval timer support code. 616 */ 617 618 #define NOTE_TIMER_PRECMASK \ 619 (NOTE_SECONDS | NOTE_MSECONDS | NOTE_USECONDS | NOTE_NSECONDS) 620 621 static sbintime_t 622 timer2sbintime(intptr_t data, int flags) 623 { 624 int64_t secs; 625 626 /* 627 * Macros for converting to the fractional second portion of an 628 * sbintime_t using 64bit multiplication to improve precision. 629 */ 630 #define NS_TO_SBT(ns) (((ns) * (((uint64_t)1 << 63) / 500000000)) >> 32) 631 #define US_TO_SBT(us) (((us) * (((uint64_t)1 << 63) / 500000)) >> 32) 632 #define MS_TO_SBT(ms) (((ms) * (((uint64_t)1 << 63) / 500)) >> 32) 633 switch (flags & NOTE_TIMER_PRECMASK) { 634 case NOTE_SECONDS: 635 #ifdef __LP64__ 636 if (data > (SBT_MAX / SBT_1S)) 637 return (SBT_MAX); 638 #endif 639 return ((sbintime_t)data << 32); 640 case NOTE_MSECONDS: /* FALLTHROUGH */ 641 case 0: 642 if (data >= 1000) { 643 secs = data / 1000; 644 #ifdef __LP64__ 645 if (secs > (SBT_MAX / SBT_1S)) 646 return (SBT_MAX); 647 #endif 648 return (secs << 32 | MS_TO_SBT(data % 1000)); 649 } 650 return (MS_TO_SBT(data)); 651 case NOTE_USECONDS: 652 if (data >= 1000000) { 653 secs = data / 1000000; 654 #ifdef __LP64__ 655 if (secs > (SBT_MAX / SBT_1S)) 656 return (SBT_MAX); 657 #endif 658 return (secs << 32 | US_TO_SBT(data % 1000000)); 659 } 660 return (US_TO_SBT(data)); 661 case NOTE_NSECONDS: 662 if (data >= 1000000000) { 663 secs = data / 1000000000; 664 #ifdef __LP64__ 665 if (secs > (SBT_MAX / SBT_1S)) 666 return (SBT_MAX); 667 #endif 668 return (secs << 32 | US_TO_SBT(data % 1000000000)); 669 } 670 return (NS_TO_SBT(data)); 671 default: 672 break; 673 } 674 return (-1); 675 } 676 677 struct kq_timer_cb_data { 678 struct callout c; 679 sbintime_t next; /* next timer event fires at */ 680 sbintime_t to; /* precalculated timer period, 0 for abs */ 681 }; 682 683 static void 684 filt_timerexpire(void *knx) 685 { 686 struct knote *kn; 687 struct kq_timer_cb_data *kc; 688 689 kn = knx; 690 kn->kn_data++; 691 KNOTE_ACTIVATE(kn, 0); /* XXX - handle locking */ 692 693 if ((kn->kn_flags & EV_ONESHOT) != 0) 694 return; 695 kc = kn->kn_ptr.p_v; 696 if (kc->to == 0) 697 return; 698 kc->next += kc->to; 699 callout_reset_sbt_on(&kc->c, kc->next, 0, filt_timerexpire, kn, 700 PCPU_GET(cpuid), C_ABSOLUTE); 701 } 702 703 /* 704 * data contains amount of time to sleep 705 */ 706 static int 707 filt_timervalidate(struct knote *kn, sbintime_t *to) 708 { 709 struct bintime bt; 710 sbintime_t sbt; 711 712 if (kn->kn_sdata < 0) 713 return (EINVAL); 714 if (kn->kn_sdata == 0 && (kn->kn_flags & EV_ONESHOT) == 0) 715 kn->kn_sdata = 1; 716 /* 717 * The only fflags values supported are the timer unit 718 * (precision) and the absolute time indicator. 719 */ 720 if ((kn->kn_sfflags & ~(NOTE_TIMER_PRECMASK | NOTE_ABSTIME)) != 0) 721 return (EINVAL); 722 723 *to = timer2sbintime(kn->kn_sdata, kn->kn_sfflags); 724 if ((kn->kn_sfflags & NOTE_ABSTIME) != 0) { 725 getboottimebin(&bt); 726 sbt = bttosbt(bt); 727 *to -= sbt; 728 } 729 if (*to < 0) 730 return (EINVAL); 731 return (0); 732 } 733 734 static int 735 filt_timerattach(struct knote *kn) 736 { 737 struct kq_timer_cb_data *kc; 738 sbintime_t to; 739 unsigned int ncallouts; 740 int error; 741 742 error = filt_timervalidate(kn, &to); 743 if (error != 0) 744 return (error); 745 746 do { 747 ncallouts = kq_ncallouts; 748 if (ncallouts >= kq_calloutmax) 749 return (ENOMEM); 750 } while (!atomic_cmpset_int(&kq_ncallouts, ncallouts, ncallouts + 1)); 751 752 if ((kn->kn_sfflags & NOTE_ABSTIME) == 0) 753 kn->kn_flags |= EV_CLEAR; /* automatically set */ 754 kn->kn_status &= ~KN_DETACHED; /* knlist_add clears it */ 755 kn->kn_ptr.p_v = kc = malloc(sizeof(*kc), M_KQUEUE, M_WAITOK); 756 callout_init(&kc->c, 1); 757 filt_timerstart(kn, to); 758 759 return (0); 760 } 761 762 static void 763 filt_timerstart(struct knote *kn, sbintime_t to) 764 { 765 struct kq_timer_cb_data *kc; 766 767 kc = kn->kn_ptr.p_v; 768 if ((kn->kn_sfflags & NOTE_ABSTIME) != 0) { 769 kc->next = to; 770 kc->to = 0; 771 } else { 772 kc->next = to + sbinuptime(); 773 kc->to = to; 774 } 775 callout_reset_sbt_on(&kc->c, kc->next, 0, filt_timerexpire, kn, 776 PCPU_GET(cpuid), C_ABSOLUTE); 777 } 778 779 static void 780 filt_timerdetach(struct knote *kn) 781 { 782 struct kq_timer_cb_data *kc; 783 unsigned int old __unused; 784 785 kc = kn->kn_ptr.p_v; 786 callout_drain(&kc->c); 787 free(kc, M_KQUEUE); 788 old = atomic_fetchadd_int(&kq_ncallouts, -1); 789 KASSERT(old > 0, ("Number of callouts cannot become negative")); 790 kn->kn_status |= KN_DETACHED; /* knlist_remove sets it */ 791 } 792 793 static void 794 filt_timertouch(struct knote *kn, struct kevent *kev, u_long type) 795 { 796 struct kq_timer_cb_data *kc; 797 struct kqueue *kq; 798 sbintime_t to; 799 int error; 800 801 switch (type) { 802 case EVENT_REGISTER: 803 /* Handle re-added timers that update data/fflags */ 804 if (kev->flags & EV_ADD) { 805 kc = kn->kn_ptr.p_v; 806 807 /* Drain any existing callout. */ 808 callout_drain(&kc->c); 809 810 /* Throw away any existing undelivered record 811 * of the timer expiration. This is done under 812 * the presumption that if a process is 813 * re-adding this timer with new parameters, 814 * it is no longer interested in what may have 815 * happened under the old parameters. If it is 816 * interested, it can wait for the expiration, 817 * delete the old timer definition, and then 818 * add the new one. 819 * 820 * This has to be done while the kq is locked: 821 * - if enqueued, dequeue 822 * - make it no longer active 823 * - clear the count of expiration events 824 */ 825 kq = kn->kn_kq; 826 KQ_LOCK(kq); 827 if (kn->kn_status & KN_QUEUED) 828 knote_dequeue(kn); 829 830 kn->kn_status &= ~KN_ACTIVE; 831 kn->kn_data = 0; 832 KQ_UNLOCK(kq); 833 834 /* Reschedule timer based on new data/fflags */ 835 kn->kn_sfflags = kev->fflags; 836 kn->kn_sdata = kev->data; 837 error = filt_timervalidate(kn, &to); 838 if (error != 0) { 839 kn->kn_flags |= EV_ERROR; 840 kn->kn_data = error; 841 } else 842 filt_timerstart(kn, to); 843 } 844 break; 845 846 case EVENT_PROCESS: 847 *kev = kn->kn_kevent; 848 if (kn->kn_flags & EV_CLEAR) { 849 kn->kn_data = 0; 850 kn->kn_fflags = 0; 851 } 852 break; 853 854 default: 855 panic("filt_timertouch() - invalid type (%ld)", type); 856 break; 857 } 858 } 859 860 static int 861 filt_timer(struct knote *kn, long hint) 862 { 863 864 return (kn->kn_data != 0); 865 } 866 867 static int 868 filt_userattach(struct knote *kn) 869 { 870 871 /* 872 * EVFILT_USER knotes are not attached to anything in the kernel. 873 */ 874 kn->kn_hook = NULL; 875 if (kn->kn_fflags & NOTE_TRIGGER) 876 kn->kn_hookid = 1; 877 else 878 kn->kn_hookid = 0; 879 return (0); 880 } 881 882 static void 883 filt_userdetach(__unused struct knote *kn) 884 { 885 886 /* 887 * EVFILT_USER knotes are not attached to anything in the kernel. 888 */ 889 } 890 891 static int 892 filt_user(struct knote *kn, __unused long hint) 893 { 894 895 return (kn->kn_hookid); 896 } 897 898 static void 899 filt_usertouch(struct knote *kn, struct kevent *kev, u_long type) 900 { 901 u_int ffctrl; 902 903 switch (type) { 904 case EVENT_REGISTER: 905 if (kev->fflags & NOTE_TRIGGER) 906 kn->kn_hookid = 1; 907 908 ffctrl = kev->fflags & NOTE_FFCTRLMASK; 909 kev->fflags &= NOTE_FFLAGSMASK; 910 switch (ffctrl) { 911 case NOTE_FFNOP: 912 break; 913 914 case NOTE_FFAND: 915 kn->kn_sfflags &= kev->fflags; 916 break; 917 918 case NOTE_FFOR: 919 kn->kn_sfflags |= kev->fflags; 920 break; 921 922 case NOTE_FFCOPY: 923 kn->kn_sfflags = kev->fflags; 924 break; 925 926 default: 927 /* XXX Return error? */ 928 break; 929 } 930 kn->kn_sdata = kev->data; 931 if (kev->flags & EV_CLEAR) { 932 kn->kn_hookid = 0; 933 kn->kn_data = 0; 934 kn->kn_fflags = 0; 935 } 936 break; 937 938 case EVENT_PROCESS: 939 *kev = kn->kn_kevent; 940 kev->fflags = kn->kn_sfflags; 941 kev->data = kn->kn_sdata; 942 if (kn->kn_flags & EV_CLEAR) { 943 kn->kn_hookid = 0; 944 kn->kn_data = 0; 945 kn->kn_fflags = 0; 946 } 947 break; 948 949 default: 950 panic("filt_usertouch() - invalid type (%ld)", type); 951 break; 952 } 953 } 954 955 int 956 sys_kqueue(struct thread *td, struct kqueue_args *uap) 957 { 958 959 return (kern_kqueue(td, 0, NULL)); 960 } 961 962 static void 963 kqueue_init(struct kqueue *kq) 964 { 965 966 mtx_init(&kq->kq_lock, "kqueue", NULL, MTX_DEF | MTX_DUPOK); 967 TAILQ_INIT(&kq->kq_head); 968 knlist_init_mtx(&kq->kq_sel.si_note, &kq->kq_lock); 969 TASK_INIT(&kq->kq_task, 0, kqueue_task, kq); 970 } 971 972 int 973 kern_kqueue(struct thread *td, int flags, struct filecaps *fcaps) 974 { 975 struct filedesc *fdp; 976 struct kqueue *kq; 977 struct file *fp; 978 struct ucred *cred; 979 int fd, error; 980 981 fdp = td->td_proc->p_fd; 982 cred = td->td_ucred; 983 if (!chgkqcnt(cred->cr_ruidinfo, 1, lim_cur(td, RLIMIT_KQUEUES))) 984 return (ENOMEM); 985 986 error = falloc_caps(td, &fp, &fd, flags, fcaps); 987 if (error != 0) { 988 chgkqcnt(cred->cr_ruidinfo, -1, 0); 989 return (error); 990 } 991 992 /* An extra reference on `fp' has been held for us by falloc(). */ 993 kq = malloc(sizeof *kq, M_KQUEUE, M_WAITOK | M_ZERO); 994 kqueue_init(kq); 995 kq->kq_fdp = fdp; 996 kq->kq_cred = crhold(cred); 997 998 FILEDESC_XLOCK(fdp); 999 TAILQ_INSERT_HEAD(&fdp->fd_kqlist, kq, kq_list); 1000 FILEDESC_XUNLOCK(fdp); 1001 1002 finit(fp, FREAD | FWRITE, DTYPE_KQUEUE, kq, &kqueueops); 1003 fdrop(fp, td); 1004 1005 td->td_retval[0] = fd; 1006 return (0); 1007 } 1008 1009 struct g_kevent_args { 1010 int fd; 1011 void *changelist; 1012 int nchanges; 1013 void *eventlist; 1014 int nevents; 1015 const struct timespec *timeout; 1016 }; 1017 1018 int 1019 sys_kevent(struct thread *td, struct kevent_args *uap) 1020 { 1021 struct kevent_copyops k_ops = { 1022 .arg = uap, 1023 .k_copyout = kevent_copyout, 1024 .k_copyin = kevent_copyin, 1025 .kevent_size = sizeof(struct kevent), 1026 }; 1027 struct g_kevent_args gk_args = { 1028 .fd = uap->fd, 1029 .changelist = uap->changelist, 1030 .nchanges = uap->nchanges, 1031 .eventlist = uap->eventlist, 1032 .nevents = uap->nevents, 1033 .timeout = uap->timeout, 1034 }; 1035 1036 return (kern_kevent_generic(td, &gk_args, &k_ops, "kevent")); 1037 } 1038 1039 static int 1040 kern_kevent_generic(struct thread *td, struct g_kevent_args *uap, 1041 struct kevent_copyops *k_ops, const char *struct_name) 1042 { 1043 struct timespec ts, *tsp; 1044 #ifdef KTRACE 1045 struct kevent *eventlist = uap->eventlist; 1046 #endif 1047 int error; 1048 1049 if (uap->timeout != NULL) { 1050 error = copyin(uap->timeout, &ts, sizeof(ts)); 1051 if (error) 1052 return (error); 1053 tsp = &ts; 1054 } else 1055 tsp = NULL; 1056 1057 #ifdef KTRACE 1058 if (KTRPOINT(td, KTR_STRUCT_ARRAY)) 1059 ktrstructarray(struct_name, UIO_USERSPACE, uap->changelist, 1060 uap->nchanges, k_ops->kevent_size); 1061 #endif 1062 1063 error = kern_kevent(td, uap->fd, uap->nchanges, uap->nevents, 1064 k_ops, tsp); 1065 1066 #ifdef KTRACE 1067 if (error == 0 && KTRPOINT(td, KTR_STRUCT_ARRAY)) 1068 ktrstructarray(struct_name, UIO_USERSPACE, eventlist, 1069 td->td_retval[0], k_ops->kevent_size); 1070 #endif 1071 1072 return (error); 1073 } 1074 1075 /* 1076 * Copy 'count' items into the destination list pointed to by uap->eventlist. 1077 */ 1078 static int 1079 kevent_copyout(void *arg, struct kevent *kevp, int count) 1080 { 1081 struct kevent_args *uap; 1082 int error; 1083 1084 KASSERT(count <= KQ_NEVENTS, ("count (%d) > KQ_NEVENTS", count)); 1085 uap = (struct kevent_args *)arg; 1086 1087 error = copyout(kevp, uap->eventlist, count * sizeof *kevp); 1088 if (error == 0) 1089 uap->eventlist += count; 1090 return (error); 1091 } 1092 1093 /* 1094 * Copy 'count' items from the list pointed to by uap->changelist. 1095 */ 1096 static int 1097 kevent_copyin(void *arg, struct kevent *kevp, int count) 1098 { 1099 struct kevent_args *uap; 1100 int error; 1101 1102 KASSERT(count <= KQ_NEVENTS, ("count (%d) > KQ_NEVENTS", count)); 1103 uap = (struct kevent_args *)arg; 1104 1105 error = copyin(uap->changelist, kevp, count * sizeof *kevp); 1106 if (error == 0) 1107 uap->changelist += count; 1108 return (error); 1109 } 1110 1111 #ifdef COMPAT_FREEBSD11 1112 static int 1113 kevent11_copyout(void *arg, struct kevent *kevp, int count) 1114 { 1115 struct freebsd11_kevent_args *uap; 1116 struct kevent_freebsd11 kev11; 1117 int error, i; 1118 1119 KASSERT(count <= KQ_NEVENTS, ("count (%d) > KQ_NEVENTS", count)); 1120 uap = (struct freebsd11_kevent_args *)arg; 1121 1122 for (i = 0; i < count; i++) { 1123 kev11.ident = kevp->ident; 1124 kev11.filter = kevp->filter; 1125 kev11.flags = kevp->flags; 1126 kev11.fflags = kevp->fflags; 1127 kev11.data = kevp->data; 1128 kev11.udata = kevp->udata; 1129 error = copyout(&kev11, uap->eventlist, sizeof(kev11)); 1130 if (error != 0) 1131 break; 1132 uap->eventlist++; 1133 kevp++; 1134 } 1135 return (error); 1136 } 1137 1138 /* 1139 * Copy 'count' items from the list pointed to by uap->changelist. 1140 */ 1141 static int 1142 kevent11_copyin(void *arg, struct kevent *kevp, int count) 1143 { 1144 struct freebsd11_kevent_args *uap; 1145 struct kevent_freebsd11 kev11; 1146 int error, i; 1147 1148 KASSERT(count <= KQ_NEVENTS, ("count (%d) > KQ_NEVENTS", count)); 1149 uap = (struct freebsd11_kevent_args *)arg; 1150 1151 for (i = 0; i < count; i++) { 1152 error = copyin(uap->changelist, &kev11, sizeof(kev11)); 1153 if (error != 0) 1154 break; 1155 kevp->ident = kev11.ident; 1156 kevp->filter = kev11.filter; 1157 kevp->flags = kev11.flags; 1158 kevp->fflags = kev11.fflags; 1159 kevp->data = (uintptr_t)kev11.data; 1160 kevp->udata = kev11.udata; 1161 bzero(&kevp->ext, sizeof(kevp->ext)); 1162 uap->changelist++; 1163 kevp++; 1164 } 1165 return (error); 1166 } 1167 1168 int 1169 freebsd11_kevent(struct thread *td, struct freebsd11_kevent_args *uap) 1170 { 1171 struct kevent_copyops k_ops = { 1172 .arg = uap, 1173 .k_copyout = kevent11_copyout, 1174 .k_copyin = kevent11_copyin, 1175 .kevent_size = sizeof(struct kevent_freebsd11), 1176 }; 1177 struct g_kevent_args gk_args = { 1178 .fd = uap->fd, 1179 .changelist = uap->changelist, 1180 .nchanges = uap->nchanges, 1181 .eventlist = uap->eventlist, 1182 .nevents = uap->nevents, 1183 .timeout = uap->timeout, 1184 }; 1185 1186 return (kern_kevent_generic(td, &gk_args, &k_ops, "kevent_freebsd11")); 1187 } 1188 #endif 1189 1190 int 1191 kern_kevent(struct thread *td, int fd, int nchanges, int nevents, 1192 struct kevent_copyops *k_ops, const struct timespec *timeout) 1193 { 1194 cap_rights_t rights; 1195 struct file *fp; 1196 int error; 1197 1198 cap_rights_init(&rights); 1199 if (nchanges > 0) 1200 cap_rights_set(&rights, CAP_KQUEUE_CHANGE); 1201 if (nevents > 0) 1202 cap_rights_set(&rights, CAP_KQUEUE_EVENT); 1203 error = fget(td, fd, &rights, &fp); 1204 if (error != 0) 1205 return (error); 1206 1207 error = kern_kevent_fp(td, fp, nchanges, nevents, k_ops, timeout); 1208 fdrop(fp, td); 1209 1210 return (error); 1211 } 1212 1213 static int 1214 kqueue_kevent(struct kqueue *kq, struct thread *td, int nchanges, int nevents, 1215 struct kevent_copyops *k_ops, const struct timespec *timeout) 1216 { 1217 struct kevent keva[KQ_NEVENTS]; 1218 struct kevent *kevp, *changes; 1219 int i, n, nerrors, error; 1220 1221 nerrors = 0; 1222 while (nchanges > 0) { 1223 n = nchanges > KQ_NEVENTS ? KQ_NEVENTS : nchanges; 1224 error = k_ops->k_copyin(k_ops->arg, keva, n); 1225 if (error) 1226 return (error); 1227 changes = keva; 1228 for (i = 0; i < n; i++) { 1229 kevp = &changes[i]; 1230 if (!kevp->filter) 1231 continue; 1232 kevp->flags &= ~EV_SYSFLAGS; 1233 error = kqueue_register(kq, kevp, td, 1); 1234 if (error || (kevp->flags & EV_RECEIPT)) { 1235 if (nevents == 0) 1236 return (error); 1237 kevp->flags = EV_ERROR; 1238 kevp->data = error; 1239 (void)k_ops->k_copyout(k_ops->arg, kevp, 1); 1240 nevents--; 1241 nerrors++; 1242 } 1243 } 1244 nchanges -= n; 1245 } 1246 if (nerrors) { 1247 td->td_retval[0] = nerrors; 1248 return (0); 1249 } 1250 1251 return (kqueue_scan(kq, nevents, k_ops, timeout, keva, td)); 1252 } 1253 1254 int 1255 kern_kevent_fp(struct thread *td, struct file *fp, int nchanges, int nevents, 1256 struct kevent_copyops *k_ops, const struct timespec *timeout) 1257 { 1258 struct kqueue *kq; 1259 int error; 1260 1261 error = kqueue_acquire(fp, &kq); 1262 if (error != 0) 1263 return (error); 1264 error = kqueue_kevent(kq, td, nchanges, nevents, k_ops, timeout); 1265 kqueue_release(kq, 0); 1266 return (error); 1267 } 1268 1269 /* 1270 * Performs a kevent() call on a temporarily created kqueue. This can be 1271 * used to perform one-shot polling, similar to poll() and select(). 1272 */ 1273 int 1274 kern_kevent_anonymous(struct thread *td, int nevents, 1275 struct kevent_copyops *k_ops) 1276 { 1277 struct kqueue kq = {}; 1278 int error; 1279 1280 kqueue_init(&kq); 1281 kq.kq_refcnt = 1; 1282 error = kqueue_kevent(&kq, td, nevents, nevents, k_ops, NULL); 1283 kqueue_drain(&kq, td); 1284 kqueue_destroy(&kq); 1285 return (error); 1286 } 1287 1288 int 1289 kqueue_add_filteropts(int filt, struct filterops *filtops) 1290 { 1291 int error; 1292 1293 error = 0; 1294 if (filt > 0 || filt + EVFILT_SYSCOUNT < 0) { 1295 printf( 1296 "trying to add a filterop that is out of range: %d is beyond %d\n", 1297 ~filt, EVFILT_SYSCOUNT); 1298 return EINVAL; 1299 } 1300 mtx_lock(&filterops_lock); 1301 if (sysfilt_ops[~filt].for_fop != &null_filtops && 1302 sysfilt_ops[~filt].for_fop != NULL) 1303 error = EEXIST; 1304 else { 1305 sysfilt_ops[~filt].for_fop = filtops; 1306 sysfilt_ops[~filt].for_refcnt = 0; 1307 } 1308 mtx_unlock(&filterops_lock); 1309 1310 return (error); 1311 } 1312 1313 int 1314 kqueue_del_filteropts(int filt) 1315 { 1316 int error; 1317 1318 error = 0; 1319 if (filt > 0 || filt + EVFILT_SYSCOUNT < 0) 1320 return EINVAL; 1321 1322 mtx_lock(&filterops_lock); 1323 if (sysfilt_ops[~filt].for_fop == &null_filtops || 1324 sysfilt_ops[~filt].for_fop == NULL) 1325 error = EINVAL; 1326 else if (sysfilt_ops[~filt].for_refcnt != 0) 1327 error = EBUSY; 1328 else { 1329 sysfilt_ops[~filt].for_fop = &null_filtops; 1330 sysfilt_ops[~filt].for_refcnt = 0; 1331 } 1332 mtx_unlock(&filterops_lock); 1333 1334 return error; 1335 } 1336 1337 static struct filterops * 1338 kqueue_fo_find(int filt) 1339 { 1340 1341 if (filt > 0 || filt + EVFILT_SYSCOUNT < 0) 1342 return NULL; 1343 1344 if (sysfilt_ops[~filt].for_nolock) 1345 return sysfilt_ops[~filt].for_fop; 1346 1347 mtx_lock(&filterops_lock); 1348 sysfilt_ops[~filt].for_refcnt++; 1349 if (sysfilt_ops[~filt].for_fop == NULL) 1350 sysfilt_ops[~filt].for_fop = &null_filtops; 1351 mtx_unlock(&filterops_lock); 1352 1353 return sysfilt_ops[~filt].for_fop; 1354 } 1355 1356 static void 1357 kqueue_fo_release(int filt) 1358 { 1359 1360 if (filt > 0 || filt + EVFILT_SYSCOUNT < 0) 1361 return; 1362 1363 if (sysfilt_ops[~filt].for_nolock) 1364 return; 1365 1366 mtx_lock(&filterops_lock); 1367 KASSERT(sysfilt_ops[~filt].for_refcnt > 0, 1368 ("filter object refcount not valid on release")); 1369 sysfilt_ops[~filt].for_refcnt--; 1370 mtx_unlock(&filterops_lock); 1371 } 1372 1373 /* 1374 * A ref to kq (obtained via kqueue_acquire) must be held. waitok will 1375 * influence if memory allocation should wait. Make sure it is 0 if you 1376 * hold any mutexes. 1377 */ 1378 static int 1379 kqueue_register(struct kqueue *kq, struct kevent *kev, struct thread *td, int waitok) 1380 { 1381 struct filterops *fops; 1382 struct file *fp; 1383 struct knote *kn, *tkn; 1384 struct knlist *knl; 1385 int error, filt, event; 1386 int haskqglobal, filedesc_unlock; 1387 1388 if ((kev->flags & (EV_ENABLE | EV_DISABLE)) == (EV_ENABLE | EV_DISABLE)) 1389 return (EINVAL); 1390 1391 fp = NULL; 1392 kn = NULL; 1393 knl = NULL; 1394 error = 0; 1395 haskqglobal = 0; 1396 filedesc_unlock = 0; 1397 1398 filt = kev->filter; 1399 fops = kqueue_fo_find(filt); 1400 if (fops == NULL) 1401 return EINVAL; 1402 1403 if (kev->flags & EV_ADD) { 1404 /* 1405 * Prevent waiting with locks. Non-sleepable 1406 * allocation failures are handled in the loop, only 1407 * if the spare knote appears to be actually required. 1408 */ 1409 tkn = knote_alloc(waitok); 1410 } else { 1411 tkn = NULL; 1412 } 1413 1414 findkn: 1415 if (fops->f_isfd) { 1416 KASSERT(td != NULL, ("td is NULL")); 1417 if (kev->ident > INT_MAX) 1418 error = EBADF; 1419 else 1420 error = fget(td, kev->ident, &cap_event_rights, &fp); 1421 if (error) 1422 goto done; 1423 1424 if ((kev->flags & EV_ADD) == EV_ADD && kqueue_expand(kq, fops, 1425 kev->ident, 0) != 0) { 1426 /* try again */ 1427 fdrop(fp, td); 1428 fp = NULL; 1429 error = kqueue_expand(kq, fops, kev->ident, waitok); 1430 if (error) 1431 goto done; 1432 goto findkn; 1433 } 1434 1435 if (fp->f_type == DTYPE_KQUEUE) { 1436 /* 1437 * If we add some intelligence about what we are doing, 1438 * we should be able to support events on ourselves. 1439 * We need to know when we are doing this to prevent 1440 * getting both the knlist lock and the kq lock since 1441 * they are the same thing. 1442 */ 1443 if (fp->f_data == kq) { 1444 error = EINVAL; 1445 goto done; 1446 } 1447 1448 /* 1449 * Pre-lock the filedesc before the global 1450 * lock mutex, see the comment in 1451 * kqueue_close(). 1452 */ 1453 FILEDESC_XLOCK(td->td_proc->p_fd); 1454 filedesc_unlock = 1; 1455 KQ_GLOBAL_LOCK(&kq_global, haskqglobal); 1456 } 1457 1458 KQ_LOCK(kq); 1459 if (kev->ident < kq->kq_knlistsize) { 1460 SLIST_FOREACH(kn, &kq->kq_knlist[kev->ident], kn_link) 1461 if (kev->filter == kn->kn_filter) 1462 break; 1463 } 1464 } else { 1465 if ((kev->flags & EV_ADD) == EV_ADD) 1466 kqueue_expand(kq, fops, kev->ident, waitok); 1467 1468 KQ_LOCK(kq); 1469 1470 /* 1471 * If possible, find an existing knote to use for this kevent. 1472 */ 1473 if (kev->filter == EVFILT_PROC && 1474 (kev->flags & (EV_FLAG1 | EV_FLAG2)) != 0) { 1475 /* This is an internal creation of a process tracking 1476 * note. Don't attempt to coalesce this with an 1477 * existing note. 1478 */ 1479 ; 1480 } else if (kq->kq_knhashmask != 0) { 1481 struct klist *list; 1482 1483 list = &kq->kq_knhash[ 1484 KN_HASH((u_long)kev->ident, kq->kq_knhashmask)]; 1485 SLIST_FOREACH(kn, list, kn_link) 1486 if (kev->ident == kn->kn_id && 1487 kev->filter == kn->kn_filter) 1488 break; 1489 } 1490 } 1491 1492 /* knote is in the process of changing, wait for it to stabilize. */ 1493 if (kn != NULL && kn_in_flux(kn)) { 1494 KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal); 1495 if (filedesc_unlock) { 1496 FILEDESC_XUNLOCK(td->td_proc->p_fd); 1497 filedesc_unlock = 0; 1498 } 1499 kq->kq_state |= KQ_FLUXWAIT; 1500 msleep(kq, &kq->kq_lock, PSOCK | PDROP, "kqflxwt", 0); 1501 if (fp != NULL) { 1502 fdrop(fp, td); 1503 fp = NULL; 1504 } 1505 goto findkn; 1506 } 1507 1508 /* 1509 * kn now contains the matching knote, or NULL if no match 1510 */ 1511 if (kn == NULL) { 1512 if (kev->flags & EV_ADD) { 1513 kn = tkn; 1514 tkn = NULL; 1515 if (kn == NULL) { 1516 KQ_UNLOCK(kq); 1517 error = ENOMEM; 1518 goto done; 1519 } 1520 kn->kn_fp = fp; 1521 kn->kn_kq = kq; 1522 kn->kn_fop = fops; 1523 /* 1524 * apply reference counts to knote structure, and 1525 * do not release it at the end of this routine. 1526 */ 1527 fops = NULL; 1528 fp = NULL; 1529 1530 kn->kn_sfflags = kev->fflags; 1531 kn->kn_sdata = kev->data; 1532 kev->fflags = 0; 1533 kev->data = 0; 1534 kn->kn_kevent = *kev; 1535 kn->kn_kevent.flags &= ~(EV_ADD | EV_DELETE | 1536 EV_ENABLE | EV_DISABLE | EV_FORCEONESHOT); 1537 kn->kn_status = KN_DETACHED; 1538 kn_enter_flux(kn); 1539 1540 error = knote_attach(kn, kq); 1541 KQ_UNLOCK(kq); 1542 if (error != 0) { 1543 tkn = kn; 1544 goto done; 1545 } 1546 1547 if ((error = kn->kn_fop->f_attach(kn)) != 0) { 1548 knote_drop_detached(kn, td); 1549 goto done; 1550 } 1551 knl = kn_list_lock(kn); 1552 goto done_ev_add; 1553 } else { 1554 /* No matching knote and the EV_ADD flag is not set. */ 1555 KQ_UNLOCK(kq); 1556 error = ENOENT; 1557 goto done; 1558 } 1559 } 1560 1561 if (kev->flags & EV_DELETE) { 1562 kn_enter_flux(kn); 1563 KQ_UNLOCK(kq); 1564 knote_drop(kn, td); 1565 goto done; 1566 } 1567 1568 if (kev->flags & EV_FORCEONESHOT) { 1569 kn->kn_flags |= EV_ONESHOT; 1570 KNOTE_ACTIVATE(kn, 1); 1571 } 1572 1573 /* 1574 * The user may change some filter values after the initial EV_ADD, 1575 * but doing so will not reset any filter which has already been 1576 * triggered. 1577 */ 1578 kn->kn_status |= KN_SCAN; 1579 kn_enter_flux(kn); 1580 KQ_UNLOCK(kq); 1581 knl = kn_list_lock(kn); 1582 kn->kn_kevent.udata = kev->udata; 1583 if (!fops->f_isfd && fops->f_touch != NULL) { 1584 fops->f_touch(kn, kev, EVENT_REGISTER); 1585 } else { 1586 kn->kn_sfflags = kev->fflags; 1587 kn->kn_sdata = kev->data; 1588 } 1589 1590 /* 1591 * We can get here with kn->kn_knlist == NULL. This can happen when 1592 * the initial attach event decides that the event is "completed" 1593 * already. i.e. filt_procattach is called on a zombie process. It 1594 * will call filt_proc which will remove it from the list, and NULL 1595 * kn_knlist. 1596 */ 1597 done_ev_add: 1598 if ((kev->flags & EV_ENABLE) != 0) 1599 kn->kn_status &= ~KN_DISABLED; 1600 else if ((kev->flags & EV_DISABLE) != 0) 1601 kn->kn_status |= KN_DISABLED; 1602 1603 if ((kn->kn_status & KN_DISABLED) == 0) 1604 event = kn->kn_fop->f_event(kn, 0); 1605 else 1606 event = 0; 1607 1608 KQ_LOCK(kq); 1609 if (event) 1610 kn->kn_status |= KN_ACTIVE; 1611 if ((kn->kn_status & (KN_ACTIVE | KN_DISABLED | KN_QUEUED)) == 1612 KN_ACTIVE) 1613 knote_enqueue(kn); 1614 kn->kn_status &= ~KN_SCAN; 1615 kn_leave_flux(kn); 1616 kn_list_unlock(knl); 1617 KQ_UNLOCK_FLUX(kq); 1618 1619 done: 1620 KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal); 1621 if (filedesc_unlock) 1622 FILEDESC_XUNLOCK(td->td_proc->p_fd); 1623 if (fp != NULL) 1624 fdrop(fp, td); 1625 knote_free(tkn); 1626 if (fops != NULL) 1627 kqueue_fo_release(filt); 1628 return (error); 1629 } 1630 1631 static int 1632 kqueue_acquire(struct file *fp, struct kqueue **kqp) 1633 { 1634 int error; 1635 struct kqueue *kq; 1636 1637 error = 0; 1638 1639 kq = fp->f_data; 1640 if (fp->f_type != DTYPE_KQUEUE || kq == NULL) 1641 return (EBADF); 1642 *kqp = kq; 1643 KQ_LOCK(kq); 1644 if ((kq->kq_state & KQ_CLOSING) == KQ_CLOSING) { 1645 KQ_UNLOCK(kq); 1646 return (EBADF); 1647 } 1648 kq->kq_refcnt++; 1649 KQ_UNLOCK(kq); 1650 1651 return error; 1652 } 1653 1654 static void 1655 kqueue_release(struct kqueue *kq, int locked) 1656 { 1657 if (locked) 1658 KQ_OWNED(kq); 1659 else 1660 KQ_LOCK(kq); 1661 kq->kq_refcnt--; 1662 if (kq->kq_refcnt == 1) 1663 wakeup(&kq->kq_refcnt); 1664 if (!locked) 1665 KQ_UNLOCK(kq); 1666 } 1667 1668 static void 1669 kqueue_schedtask(struct kqueue *kq) 1670 { 1671 1672 KQ_OWNED(kq); 1673 KASSERT(((kq->kq_state & KQ_TASKDRAIN) != KQ_TASKDRAIN), 1674 ("scheduling kqueue task while draining")); 1675 1676 if ((kq->kq_state & KQ_TASKSCHED) != KQ_TASKSCHED) { 1677 taskqueue_enqueue(taskqueue_kqueue_ctx, &kq->kq_task); 1678 kq->kq_state |= KQ_TASKSCHED; 1679 } 1680 } 1681 1682 /* 1683 * Expand the kq to make sure we have storage for fops/ident pair. 1684 * 1685 * Return 0 on success (or no work necessary), return errno on failure. 1686 * 1687 * Not calling hashinit w/ waitok (proper malloc flag) should be safe. 1688 * If kqueue_register is called from a non-fd context, there usually/should 1689 * be no locks held. 1690 */ 1691 static int 1692 kqueue_expand(struct kqueue *kq, struct filterops *fops, uintptr_t ident, 1693 int waitok) 1694 { 1695 struct klist *list, *tmp_knhash, *to_free; 1696 u_long tmp_knhashmask; 1697 int size; 1698 int fd; 1699 int mflag = waitok ? M_WAITOK : M_NOWAIT; 1700 1701 KQ_NOTOWNED(kq); 1702 1703 to_free = NULL; 1704 if (fops->f_isfd) { 1705 fd = ident; 1706 if (kq->kq_knlistsize <= fd) { 1707 size = kq->kq_knlistsize; 1708 while (size <= fd) 1709 size += KQEXTENT; 1710 list = malloc(size * sizeof(*list), M_KQUEUE, mflag); 1711 if (list == NULL) 1712 return ENOMEM; 1713 KQ_LOCK(kq); 1714 if (kq->kq_knlistsize > fd) { 1715 to_free = list; 1716 list = NULL; 1717 } else { 1718 if (kq->kq_knlist != NULL) { 1719 bcopy(kq->kq_knlist, list, 1720 kq->kq_knlistsize * sizeof(*list)); 1721 to_free = kq->kq_knlist; 1722 kq->kq_knlist = NULL; 1723 } 1724 bzero((caddr_t)list + 1725 kq->kq_knlistsize * sizeof(*list), 1726 (size - kq->kq_knlistsize) * sizeof(*list)); 1727 kq->kq_knlistsize = size; 1728 kq->kq_knlist = list; 1729 } 1730 KQ_UNLOCK(kq); 1731 } 1732 } else { 1733 if (kq->kq_knhashmask == 0) { 1734 tmp_knhash = hashinit(KN_HASHSIZE, M_KQUEUE, 1735 &tmp_knhashmask); 1736 if (tmp_knhash == NULL) 1737 return ENOMEM; 1738 KQ_LOCK(kq); 1739 if (kq->kq_knhashmask == 0) { 1740 kq->kq_knhash = tmp_knhash; 1741 kq->kq_knhashmask = tmp_knhashmask; 1742 } else { 1743 to_free = tmp_knhash; 1744 } 1745 KQ_UNLOCK(kq); 1746 } 1747 } 1748 free(to_free, M_KQUEUE); 1749 1750 KQ_NOTOWNED(kq); 1751 return 0; 1752 } 1753 1754 static void 1755 kqueue_task(void *arg, int pending) 1756 { 1757 struct kqueue *kq; 1758 int haskqglobal; 1759 1760 haskqglobal = 0; 1761 kq = arg; 1762 1763 KQ_GLOBAL_LOCK(&kq_global, haskqglobal); 1764 KQ_LOCK(kq); 1765 1766 KNOTE_LOCKED(&kq->kq_sel.si_note, 0); 1767 1768 kq->kq_state &= ~KQ_TASKSCHED; 1769 if ((kq->kq_state & KQ_TASKDRAIN) == KQ_TASKDRAIN) { 1770 wakeup(&kq->kq_state); 1771 } 1772 KQ_UNLOCK(kq); 1773 KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal); 1774 } 1775 1776 /* 1777 * Scan, update kn_data (if not ONESHOT), and copyout triggered events. 1778 * We treat KN_MARKER knotes as if they are in flux. 1779 */ 1780 static int 1781 kqueue_scan(struct kqueue *kq, int maxevents, struct kevent_copyops *k_ops, 1782 const struct timespec *tsp, struct kevent *keva, struct thread *td) 1783 { 1784 struct kevent *kevp; 1785 struct knote *kn, *marker; 1786 struct knlist *knl; 1787 sbintime_t asbt, rsbt; 1788 int count, error, haskqglobal, influx, nkev, touch; 1789 1790 count = maxevents; 1791 nkev = 0; 1792 error = 0; 1793 haskqglobal = 0; 1794 1795 if (maxevents == 0) 1796 goto done_nl; 1797 1798 rsbt = 0; 1799 if (tsp != NULL) { 1800 if (tsp->tv_sec < 0 || tsp->tv_nsec < 0 || 1801 tsp->tv_nsec >= 1000000000) { 1802 error = EINVAL; 1803 goto done_nl; 1804 } 1805 if (timespecisset(tsp)) { 1806 if (tsp->tv_sec <= INT32_MAX) { 1807 rsbt = tstosbt(*tsp); 1808 if (TIMESEL(&asbt, rsbt)) 1809 asbt += tc_tick_sbt; 1810 if (asbt <= SBT_MAX - rsbt) 1811 asbt += rsbt; 1812 else 1813 asbt = 0; 1814 rsbt >>= tc_precexp; 1815 } else 1816 asbt = 0; 1817 } else 1818 asbt = -1; 1819 } else 1820 asbt = 0; 1821 marker = knote_alloc(1); 1822 marker->kn_status = KN_MARKER; 1823 KQ_LOCK(kq); 1824 1825 retry: 1826 kevp = keva; 1827 if (kq->kq_count == 0) { 1828 if (asbt == -1) { 1829 error = EWOULDBLOCK; 1830 } else { 1831 kq->kq_state |= KQ_SLEEP; 1832 error = msleep_sbt(kq, &kq->kq_lock, PSOCK | PCATCH, 1833 "kqread", asbt, rsbt, C_ABSOLUTE); 1834 } 1835 if (error == 0) 1836 goto retry; 1837 /* don't restart after signals... */ 1838 if (error == ERESTART) 1839 error = EINTR; 1840 else if (error == EWOULDBLOCK) 1841 error = 0; 1842 goto done; 1843 } 1844 1845 TAILQ_INSERT_TAIL(&kq->kq_head, marker, kn_tqe); 1846 influx = 0; 1847 while (count) { 1848 KQ_OWNED(kq); 1849 kn = TAILQ_FIRST(&kq->kq_head); 1850 1851 if ((kn->kn_status == KN_MARKER && kn != marker) || 1852 kn_in_flux(kn)) { 1853 if (influx) { 1854 influx = 0; 1855 KQ_FLUX_WAKEUP(kq); 1856 } 1857 kq->kq_state |= KQ_FLUXWAIT; 1858 error = msleep(kq, &kq->kq_lock, PSOCK, 1859 "kqflxwt", 0); 1860 continue; 1861 } 1862 1863 TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe); 1864 if ((kn->kn_status & KN_DISABLED) == KN_DISABLED) { 1865 kn->kn_status &= ~KN_QUEUED; 1866 kq->kq_count--; 1867 continue; 1868 } 1869 if (kn == marker) { 1870 KQ_FLUX_WAKEUP(kq); 1871 if (count == maxevents) 1872 goto retry; 1873 goto done; 1874 } 1875 KASSERT(!kn_in_flux(kn), 1876 ("knote %p is unexpectedly in flux", kn)); 1877 1878 if ((kn->kn_flags & EV_DROP) == EV_DROP) { 1879 kn->kn_status &= ~KN_QUEUED; 1880 kn_enter_flux(kn); 1881 kq->kq_count--; 1882 KQ_UNLOCK(kq); 1883 /* 1884 * We don't need to lock the list since we've 1885 * marked it as in flux. 1886 */ 1887 knote_drop(kn, td); 1888 KQ_LOCK(kq); 1889 continue; 1890 } else if ((kn->kn_flags & EV_ONESHOT) == EV_ONESHOT) { 1891 kn->kn_status &= ~KN_QUEUED; 1892 kn_enter_flux(kn); 1893 kq->kq_count--; 1894 KQ_UNLOCK(kq); 1895 /* 1896 * We don't need to lock the list since we've 1897 * marked the knote as being in flux. 1898 */ 1899 *kevp = kn->kn_kevent; 1900 knote_drop(kn, td); 1901 KQ_LOCK(kq); 1902 kn = NULL; 1903 } else { 1904 kn->kn_status |= KN_SCAN; 1905 kn_enter_flux(kn); 1906 KQ_UNLOCK(kq); 1907 if ((kn->kn_status & KN_KQUEUE) == KN_KQUEUE) 1908 KQ_GLOBAL_LOCK(&kq_global, haskqglobal); 1909 knl = kn_list_lock(kn); 1910 if (kn->kn_fop->f_event(kn, 0) == 0) { 1911 KQ_LOCK(kq); 1912 KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal); 1913 kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE | 1914 KN_SCAN); 1915 kn_leave_flux(kn); 1916 kq->kq_count--; 1917 kn_list_unlock(knl); 1918 influx = 1; 1919 continue; 1920 } 1921 touch = (!kn->kn_fop->f_isfd && 1922 kn->kn_fop->f_touch != NULL); 1923 if (touch) 1924 kn->kn_fop->f_touch(kn, kevp, EVENT_PROCESS); 1925 else 1926 *kevp = kn->kn_kevent; 1927 KQ_LOCK(kq); 1928 KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal); 1929 if (kn->kn_flags & (EV_CLEAR | EV_DISPATCH)) { 1930 /* 1931 * Manually clear knotes who weren't 1932 * 'touch'ed. 1933 */ 1934 if (touch == 0 && kn->kn_flags & EV_CLEAR) { 1935 kn->kn_data = 0; 1936 kn->kn_fflags = 0; 1937 } 1938 if (kn->kn_flags & EV_DISPATCH) 1939 kn->kn_status |= KN_DISABLED; 1940 kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE); 1941 kq->kq_count--; 1942 } else 1943 TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe); 1944 1945 kn->kn_status &= ~KN_SCAN; 1946 kn_leave_flux(kn); 1947 kn_list_unlock(knl); 1948 influx = 1; 1949 } 1950 1951 /* we are returning a copy to the user */ 1952 kevp++; 1953 nkev++; 1954 count--; 1955 1956 if (nkev == KQ_NEVENTS) { 1957 influx = 0; 1958 KQ_UNLOCK_FLUX(kq); 1959 error = k_ops->k_copyout(k_ops->arg, keva, nkev); 1960 nkev = 0; 1961 kevp = keva; 1962 KQ_LOCK(kq); 1963 if (error) 1964 break; 1965 } 1966 } 1967 TAILQ_REMOVE(&kq->kq_head, marker, kn_tqe); 1968 done: 1969 KQ_OWNED(kq); 1970 KQ_UNLOCK_FLUX(kq); 1971 knote_free(marker); 1972 done_nl: 1973 KQ_NOTOWNED(kq); 1974 if (nkev != 0) 1975 error = k_ops->k_copyout(k_ops->arg, keva, nkev); 1976 td->td_retval[0] = maxevents - count; 1977 return (error); 1978 } 1979 1980 /*ARGSUSED*/ 1981 static int 1982 kqueue_ioctl(struct file *fp, u_long cmd, void *data, 1983 struct ucred *active_cred, struct thread *td) 1984 { 1985 /* 1986 * Enabling sigio causes two major problems: 1987 * 1) infinite recursion: 1988 * Synopsys: kevent is being used to track signals and have FIOASYNC 1989 * set. On receipt of a signal this will cause a kqueue to recurse 1990 * into itself over and over. Sending the sigio causes the kqueue 1991 * to become ready, which in turn posts sigio again, forever. 1992 * Solution: this can be solved by setting a flag in the kqueue that 1993 * we have a SIGIO in progress. 1994 * 2) locking problems: 1995 * Synopsys: Kqueue is a leaf subsystem, but adding signalling puts 1996 * us above the proc and pgrp locks. 1997 * Solution: Post a signal using an async mechanism, being sure to 1998 * record a generation count in the delivery so that we do not deliver 1999 * a signal to the wrong process. 2000 * 2001 * Note, these two mechanisms are somewhat mutually exclusive! 2002 */ 2003 #if 0 2004 struct kqueue *kq; 2005 2006 kq = fp->f_data; 2007 switch (cmd) { 2008 case FIOASYNC: 2009 if (*(int *)data) { 2010 kq->kq_state |= KQ_ASYNC; 2011 } else { 2012 kq->kq_state &= ~KQ_ASYNC; 2013 } 2014 return (0); 2015 2016 case FIOSETOWN: 2017 return (fsetown(*(int *)data, &kq->kq_sigio)); 2018 2019 case FIOGETOWN: 2020 *(int *)data = fgetown(&kq->kq_sigio); 2021 return (0); 2022 } 2023 #endif 2024 2025 return (ENOTTY); 2026 } 2027 2028 /*ARGSUSED*/ 2029 static int 2030 kqueue_poll(struct file *fp, int events, struct ucred *active_cred, 2031 struct thread *td) 2032 { 2033 struct kqueue *kq; 2034 int revents = 0; 2035 int error; 2036 2037 if ((error = kqueue_acquire(fp, &kq))) 2038 return POLLERR; 2039 2040 KQ_LOCK(kq); 2041 if (events & (POLLIN | POLLRDNORM)) { 2042 if (kq->kq_count) { 2043 revents |= events & (POLLIN | POLLRDNORM); 2044 } else { 2045 selrecord(td, &kq->kq_sel); 2046 if (SEL_WAITING(&kq->kq_sel)) 2047 kq->kq_state |= KQ_SEL; 2048 } 2049 } 2050 kqueue_release(kq, 1); 2051 KQ_UNLOCK(kq); 2052 return (revents); 2053 } 2054 2055 /*ARGSUSED*/ 2056 static int 2057 kqueue_stat(struct file *fp, struct stat *st, struct ucred *active_cred, 2058 struct thread *td) 2059 { 2060 2061 bzero((void *)st, sizeof *st); 2062 /* 2063 * We no longer return kq_count because the unlocked value is useless. 2064 * If you spent all this time getting the count, why not spend your 2065 * syscall better by calling kevent? 2066 * 2067 * XXX - This is needed for libc_r. 2068 */ 2069 st->st_mode = S_IFIFO; 2070 return (0); 2071 } 2072 2073 static void 2074 kqueue_drain(struct kqueue *kq, struct thread *td) 2075 { 2076 struct knote *kn; 2077 int i; 2078 2079 KQ_LOCK(kq); 2080 2081 KASSERT((kq->kq_state & KQ_CLOSING) != KQ_CLOSING, 2082 ("kqueue already closing")); 2083 kq->kq_state |= KQ_CLOSING; 2084 if (kq->kq_refcnt > 1) 2085 msleep(&kq->kq_refcnt, &kq->kq_lock, PSOCK, "kqclose", 0); 2086 2087 KASSERT(kq->kq_refcnt == 1, ("other refs are out there!")); 2088 2089 KASSERT(knlist_empty(&kq->kq_sel.si_note), 2090 ("kqueue's knlist not empty")); 2091 2092 for (i = 0; i < kq->kq_knlistsize; i++) { 2093 while ((kn = SLIST_FIRST(&kq->kq_knlist[i])) != NULL) { 2094 if (kn_in_flux(kn)) { 2095 kq->kq_state |= KQ_FLUXWAIT; 2096 msleep(kq, &kq->kq_lock, PSOCK, "kqclo1", 0); 2097 continue; 2098 } 2099 kn_enter_flux(kn); 2100 KQ_UNLOCK(kq); 2101 knote_drop(kn, td); 2102 KQ_LOCK(kq); 2103 } 2104 } 2105 if (kq->kq_knhashmask != 0) { 2106 for (i = 0; i <= kq->kq_knhashmask; i++) { 2107 while ((kn = SLIST_FIRST(&kq->kq_knhash[i])) != NULL) { 2108 if (kn_in_flux(kn)) { 2109 kq->kq_state |= KQ_FLUXWAIT; 2110 msleep(kq, &kq->kq_lock, PSOCK, 2111 "kqclo2", 0); 2112 continue; 2113 } 2114 kn_enter_flux(kn); 2115 KQ_UNLOCK(kq); 2116 knote_drop(kn, td); 2117 KQ_LOCK(kq); 2118 } 2119 } 2120 } 2121 2122 if ((kq->kq_state & KQ_TASKSCHED) == KQ_TASKSCHED) { 2123 kq->kq_state |= KQ_TASKDRAIN; 2124 msleep(&kq->kq_state, &kq->kq_lock, PSOCK, "kqtqdr", 0); 2125 } 2126 2127 if ((kq->kq_state & KQ_SEL) == KQ_SEL) { 2128 selwakeuppri(&kq->kq_sel, PSOCK); 2129 if (!SEL_WAITING(&kq->kq_sel)) 2130 kq->kq_state &= ~KQ_SEL; 2131 } 2132 2133 KQ_UNLOCK(kq); 2134 } 2135 2136 static void 2137 kqueue_destroy(struct kqueue *kq) 2138 { 2139 2140 KASSERT(kq->kq_fdp == NULL, 2141 ("kqueue still attached to a file descriptor")); 2142 seldrain(&kq->kq_sel); 2143 knlist_destroy(&kq->kq_sel.si_note); 2144 mtx_destroy(&kq->kq_lock); 2145 2146 if (kq->kq_knhash != NULL) 2147 free(kq->kq_knhash, M_KQUEUE); 2148 if (kq->kq_knlist != NULL) 2149 free(kq->kq_knlist, M_KQUEUE); 2150 2151 funsetown(&kq->kq_sigio); 2152 } 2153 2154 /*ARGSUSED*/ 2155 static int 2156 kqueue_close(struct file *fp, struct thread *td) 2157 { 2158 struct kqueue *kq = fp->f_data; 2159 struct filedesc *fdp; 2160 int error; 2161 int filedesc_unlock; 2162 2163 if ((error = kqueue_acquire(fp, &kq))) 2164 return error; 2165 kqueue_drain(kq, td); 2166 2167 /* 2168 * We could be called due to the knote_drop() doing fdrop(), 2169 * called from kqueue_register(). In this case the global 2170 * lock is owned, and filedesc sx is locked before, to not 2171 * take the sleepable lock after non-sleepable. 2172 */ 2173 fdp = kq->kq_fdp; 2174 kq->kq_fdp = NULL; 2175 if (!sx_xlocked(FILEDESC_LOCK(fdp))) { 2176 FILEDESC_XLOCK(fdp); 2177 filedesc_unlock = 1; 2178 } else 2179 filedesc_unlock = 0; 2180 TAILQ_REMOVE(&fdp->fd_kqlist, kq, kq_list); 2181 if (filedesc_unlock) 2182 FILEDESC_XUNLOCK(fdp); 2183 2184 kqueue_destroy(kq); 2185 chgkqcnt(kq->kq_cred->cr_ruidinfo, -1, 0); 2186 crfree(kq->kq_cred); 2187 free(kq, M_KQUEUE); 2188 fp->f_data = NULL; 2189 2190 return (0); 2191 } 2192 2193 static int 2194 kqueue_fill_kinfo(struct file *fp, struct kinfo_file *kif, struct filedesc *fdp) 2195 { 2196 2197 kif->kf_type = KF_TYPE_KQUEUE; 2198 return (0); 2199 } 2200 2201 static void 2202 kqueue_wakeup(struct kqueue *kq) 2203 { 2204 KQ_OWNED(kq); 2205 2206 if ((kq->kq_state & KQ_SLEEP) == KQ_SLEEP) { 2207 kq->kq_state &= ~KQ_SLEEP; 2208 wakeup(kq); 2209 } 2210 if ((kq->kq_state & KQ_SEL) == KQ_SEL) { 2211 selwakeuppri(&kq->kq_sel, PSOCK); 2212 if (!SEL_WAITING(&kq->kq_sel)) 2213 kq->kq_state &= ~KQ_SEL; 2214 } 2215 if (!knlist_empty(&kq->kq_sel.si_note)) 2216 kqueue_schedtask(kq); 2217 if ((kq->kq_state & KQ_ASYNC) == KQ_ASYNC) { 2218 pgsigio(&kq->kq_sigio, SIGIO, 0); 2219 } 2220 } 2221 2222 /* 2223 * Walk down a list of knotes, activating them if their event has triggered. 2224 * 2225 * There is a possibility to optimize in the case of one kq watching another. 2226 * Instead of scheduling a task to wake it up, you could pass enough state 2227 * down the chain to make up the parent kqueue. Make this code functional 2228 * first. 2229 */ 2230 void 2231 knote(struct knlist *list, long hint, int lockflags) 2232 { 2233 struct kqueue *kq; 2234 struct knote *kn, *tkn; 2235 int error; 2236 2237 if (list == NULL) 2238 return; 2239 2240 KNL_ASSERT_LOCK(list, lockflags & KNF_LISTLOCKED); 2241 2242 if ((lockflags & KNF_LISTLOCKED) == 0) 2243 list->kl_lock(list->kl_lockarg); 2244 2245 /* 2246 * If we unlock the list lock (and enter influx), we can 2247 * eliminate the kqueue scheduling, but this will introduce 2248 * four lock/unlock's for each knote to test. Also, marker 2249 * would be needed to keep iteration position, since filters 2250 * or other threads could remove events. 2251 */ 2252 SLIST_FOREACH_SAFE(kn, &list->kl_list, kn_selnext, tkn) { 2253 kq = kn->kn_kq; 2254 KQ_LOCK(kq); 2255 if (kn_in_flux(kn) && (kn->kn_status & KN_SCAN) == 0) { 2256 /* 2257 * Do not process the influx notes, except for 2258 * the influx coming from the kq unlock in the 2259 * kqueue_scan(). In the later case, we do 2260 * not interfere with the scan, since the code 2261 * fragment in kqueue_scan() locks the knlist, 2262 * and cannot proceed until we finished. 2263 */ 2264 KQ_UNLOCK(kq); 2265 } else if ((lockflags & KNF_NOKQLOCK) != 0) { 2266 kn_enter_flux(kn); 2267 KQ_UNLOCK(kq); 2268 error = kn->kn_fop->f_event(kn, hint); 2269 KQ_LOCK(kq); 2270 kn_leave_flux(kn); 2271 if (error) 2272 KNOTE_ACTIVATE(kn, 1); 2273 KQ_UNLOCK_FLUX(kq); 2274 } else { 2275 kn->kn_status |= KN_HASKQLOCK; 2276 if (kn->kn_fop->f_event(kn, hint)) 2277 KNOTE_ACTIVATE(kn, 1); 2278 kn->kn_status &= ~KN_HASKQLOCK; 2279 KQ_UNLOCK(kq); 2280 } 2281 } 2282 if ((lockflags & KNF_LISTLOCKED) == 0) 2283 list->kl_unlock(list->kl_lockarg); 2284 } 2285 2286 /* 2287 * add a knote to a knlist 2288 */ 2289 void 2290 knlist_add(struct knlist *knl, struct knote *kn, int islocked) 2291 { 2292 2293 KNL_ASSERT_LOCK(knl, islocked); 2294 KQ_NOTOWNED(kn->kn_kq); 2295 KASSERT(kn_in_flux(kn), ("knote %p not in flux", kn)); 2296 KASSERT((kn->kn_status & KN_DETACHED) != 0, 2297 ("knote %p was not detached", kn)); 2298 if (!islocked) 2299 knl->kl_lock(knl->kl_lockarg); 2300 SLIST_INSERT_HEAD(&knl->kl_list, kn, kn_selnext); 2301 if (!islocked) 2302 knl->kl_unlock(knl->kl_lockarg); 2303 KQ_LOCK(kn->kn_kq); 2304 kn->kn_knlist = knl; 2305 kn->kn_status &= ~KN_DETACHED; 2306 KQ_UNLOCK(kn->kn_kq); 2307 } 2308 2309 static void 2310 knlist_remove_kq(struct knlist *knl, struct knote *kn, int knlislocked, 2311 int kqislocked) 2312 { 2313 2314 KASSERT(!kqislocked || knlislocked, ("kq locked w/o knl locked")); 2315 KNL_ASSERT_LOCK(knl, knlislocked); 2316 mtx_assert(&kn->kn_kq->kq_lock, kqislocked ? MA_OWNED : MA_NOTOWNED); 2317 KASSERT(kqislocked || kn_in_flux(kn), ("knote %p not in flux", kn)); 2318 KASSERT((kn->kn_status & KN_DETACHED) == 0, 2319 ("knote %p was already detached", kn)); 2320 if (!knlislocked) 2321 knl->kl_lock(knl->kl_lockarg); 2322 SLIST_REMOVE(&knl->kl_list, kn, knote, kn_selnext); 2323 kn->kn_knlist = NULL; 2324 if (!knlislocked) 2325 kn_list_unlock(knl); 2326 if (!kqislocked) 2327 KQ_LOCK(kn->kn_kq); 2328 kn->kn_status |= KN_DETACHED; 2329 if (!kqislocked) 2330 KQ_UNLOCK(kn->kn_kq); 2331 } 2332 2333 /* 2334 * remove knote from the specified knlist 2335 */ 2336 void 2337 knlist_remove(struct knlist *knl, struct knote *kn, int islocked) 2338 { 2339 2340 knlist_remove_kq(knl, kn, islocked, 0); 2341 } 2342 2343 int 2344 knlist_empty(struct knlist *knl) 2345 { 2346 2347 KNL_ASSERT_LOCKED(knl); 2348 return (SLIST_EMPTY(&knl->kl_list)); 2349 } 2350 2351 static struct mtx knlist_lock; 2352 MTX_SYSINIT(knlist_lock, &knlist_lock, "knlist lock for lockless objects", 2353 MTX_DEF); 2354 static void knlist_mtx_lock(void *arg); 2355 static void knlist_mtx_unlock(void *arg); 2356 2357 static void 2358 knlist_mtx_lock(void *arg) 2359 { 2360 2361 mtx_lock((struct mtx *)arg); 2362 } 2363 2364 static void 2365 knlist_mtx_unlock(void *arg) 2366 { 2367 2368 mtx_unlock((struct mtx *)arg); 2369 } 2370 2371 static void 2372 knlist_mtx_assert_locked(void *arg) 2373 { 2374 2375 mtx_assert((struct mtx *)arg, MA_OWNED); 2376 } 2377 2378 static void 2379 knlist_mtx_assert_unlocked(void *arg) 2380 { 2381 2382 mtx_assert((struct mtx *)arg, MA_NOTOWNED); 2383 } 2384 2385 static void 2386 knlist_rw_rlock(void *arg) 2387 { 2388 2389 rw_rlock((struct rwlock *)arg); 2390 } 2391 2392 static void 2393 knlist_rw_runlock(void *arg) 2394 { 2395 2396 rw_runlock((struct rwlock *)arg); 2397 } 2398 2399 static void 2400 knlist_rw_assert_locked(void *arg) 2401 { 2402 2403 rw_assert((struct rwlock *)arg, RA_LOCKED); 2404 } 2405 2406 static void 2407 knlist_rw_assert_unlocked(void *arg) 2408 { 2409 2410 rw_assert((struct rwlock *)arg, RA_UNLOCKED); 2411 } 2412 2413 void 2414 knlist_init(struct knlist *knl, void *lock, void (*kl_lock)(void *), 2415 void (*kl_unlock)(void *), 2416 void (*kl_assert_locked)(void *), void (*kl_assert_unlocked)(void *)) 2417 { 2418 2419 if (lock == NULL) 2420 knl->kl_lockarg = &knlist_lock; 2421 else 2422 knl->kl_lockarg = lock; 2423 2424 if (kl_lock == NULL) 2425 knl->kl_lock = knlist_mtx_lock; 2426 else 2427 knl->kl_lock = kl_lock; 2428 if (kl_unlock == NULL) 2429 knl->kl_unlock = knlist_mtx_unlock; 2430 else 2431 knl->kl_unlock = kl_unlock; 2432 if (kl_assert_locked == NULL) 2433 knl->kl_assert_locked = knlist_mtx_assert_locked; 2434 else 2435 knl->kl_assert_locked = kl_assert_locked; 2436 if (kl_assert_unlocked == NULL) 2437 knl->kl_assert_unlocked = knlist_mtx_assert_unlocked; 2438 else 2439 knl->kl_assert_unlocked = kl_assert_unlocked; 2440 2441 knl->kl_autodestroy = 0; 2442 SLIST_INIT(&knl->kl_list); 2443 } 2444 2445 void 2446 knlist_init_mtx(struct knlist *knl, struct mtx *lock) 2447 { 2448 2449 knlist_init(knl, lock, NULL, NULL, NULL, NULL); 2450 } 2451 2452 struct knlist * 2453 knlist_alloc(struct mtx *lock) 2454 { 2455 struct knlist *knl; 2456 2457 knl = malloc(sizeof(struct knlist), M_KQUEUE, M_WAITOK); 2458 knlist_init_mtx(knl, lock); 2459 return (knl); 2460 } 2461 2462 void 2463 knlist_init_rw_reader(struct knlist *knl, struct rwlock *lock) 2464 { 2465 2466 knlist_init(knl, lock, knlist_rw_rlock, knlist_rw_runlock, 2467 knlist_rw_assert_locked, knlist_rw_assert_unlocked); 2468 } 2469 2470 void 2471 knlist_destroy(struct knlist *knl) 2472 { 2473 2474 KASSERT(KNLIST_EMPTY(knl), 2475 ("destroying knlist %p with knotes on it", knl)); 2476 } 2477 2478 void 2479 knlist_detach(struct knlist *knl) 2480 { 2481 2482 KNL_ASSERT_LOCKED(knl); 2483 knl->kl_autodestroy = 1; 2484 if (knlist_empty(knl)) { 2485 knlist_destroy(knl); 2486 free(knl, M_KQUEUE); 2487 } 2488 } 2489 2490 /* 2491 * Even if we are locked, we may need to drop the lock to allow any influx 2492 * knotes time to "settle". 2493 */ 2494 void 2495 knlist_cleardel(struct knlist *knl, struct thread *td, int islocked, int killkn) 2496 { 2497 struct knote *kn, *kn2; 2498 struct kqueue *kq; 2499 2500 KASSERT(!knl->kl_autodestroy, ("cleardel for autodestroy %p", knl)); 2501 if (islocked) 2502 KNL_ASSERT_LOCKED(knl); 2503 else { 2504 KNL_ASSERT_UNLOCKED(knl); 2505 again: /* need to reacquire lock since we have dropped it */ 2506 knl->kl_lock(knl->kl_lockarg); 2507 } 2508 2509 SLIST_FOREACH_SAFE(kn, &knl->kl_list, kn_selnext, kn2) { 2510 kq = kn->kn_kq; 2511 KQ_LOCK(kq); 2512 if (kn_in_flux(kn)) { 2513 KQ_UNLOCK(kq); 2514 continue; 2515 } 2516 knlist_remove_kq(knl, kn, 1, 1); 2517 if (killkn) { 2518 kn_enter_flux(kn); 2519 KQ_UNLOCK(kq); 2520 knote_drop_detached(kn, td); 2521 } else { 2522 /* Make sure cleared knotes disappear soon */ 2523 kn->kn_flags |= EV_EOF | EV_ONESHOT; 2524 KQ_UNLOCK(kq); 2525 } 2526 kq = NULL; 2527 } 2528 2529 if (!SLIST_EMPTY(&knl->kl_list)) { 2530 /* there are still in flux knotes remaining */ 2531 kn = SLIST_FIRST(&knl->kl_list); 2532 kq = kn->kn_kq; 2533 KQ_LOCK(kq); 2534 KASSERT(kn_in_flux(kn), ("knote removed w/o list lock")); 2535 knl->kl_unlock(knl->kl_lockarg); 2536 kq->kq_state |= KQ_FLUXWAIT; 2537 msleep(kq, &kq->kq_lock, PSOCK | PDROP, "kqkclr", 0); 2538 kq = NULL; 2539 goto again; 2540 } 2541 2542 if (islocked) 2543 KNL_ASSERT_LOCKED(knl); 2544 else { 2545 knl->kl_unlock(knl->kl_lockarg); 2546 KNL_ASSERT_UNLOCKED(knl); 2547 } 2548 } 2549 2550 /* 2551 * Remove all knotes referencing a specified fd must be called with FILEDESC 2552 * lock. This prevents a race where a new fd comes along and occupies the 2553 * entry and we attach a knote to the fd. 2554 */ 2555 void 2556 knote_fdclose(struct thread *td, int fd) 2557 { 2558 struct filedesc *fdp = td->td_proc->p_fd; 2559 struct kqueue *kq; 2560 struct knote *kn; 2561 int influx; 2562 2563 FILEDESC_XLOCK_ASSERT(fdp); 2564 2565 /* 2566 * We shouldn't have to worry about new kevents appearing on fd 2567 * since filedesc is locked. 2568 */ 2569 TAILQ_FOREACH(kq, &fdp->fd_kqlist, kq_list) { 2570 KQ_LOCK(kq); 2571 2572 again: 2573 influx = 0; 2574 while (kq->kq_knlistsize > fd && 2575 (kn = SLIST_FIRST(&kq->kq_knlist[fd])) != NULL) { 2576 if (kn_in_flux(kn)) { 2577 /* someone else might be waiting on our knote */ 2578 if (influx) 2579 wakeup(kq); 2580 kq->kq_state |= KQ_FLUXWAIT; 2581 msleep(kq, &kq->kq_lock, PSOCK, "kqflxwt", 0); 2582 goto again; 2583 } 2584 kn_enter_flux(kn); 2585 KQ_UNLOCK(kq); 2586 influx = 1; 2587 knote_drop(kn, td); 2588 KQ_LOCK(kq); 2589 } 2590 KQ_UNLOCK_FLUX(kq); 2591 } 2592 } 2593 2594 static int 2595 knote_attach(struct knote *kn, struct kqueue *kq) 2596 { 2597 struct klist *list; 2598 2599 KASSERT(kn_in_flux(kn), ("knote %p not marked influx", kn)); 2600 KQ_OWNED(kq); 2601 2602 if (kn->kn_fop->f_isfd) { 2603 if (kn->kn_id >= kq->kq_knlistsize) 2604 return (ENOMEM); 2605 list = &kq->kq_knlist[kn->kn_id]; 2606 } else { 2607 if (kq->kq_knhash == NULL) 2608 return (ENOMEM); 2609 list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)]; 2610 } 2611 SLIST_INSERT_HEAD(list, kn, kn_link); 2612 return (0); 2613 } 2614 2615 static void 2616 knote_drop(struct knote *kn, struct thread *td) 2617 { 2618 2619 if ((kn->kn_status & KN_DETACHED) == 0) 2620 kn->kn_fop->f_detach(kn); 2621 knote_drop_detached(kn, td); 2622 } 2623 2624 static void 2625 knote_drop_detached(struct knote *kn, struct thread *td) 2626 { 2627 struct kqueue *kq; 2628 struct klist *list; 2629 2630 kq = kn->kn_kq; 2631 2632 KASSERT((kn->kn_status & KN_DETACHED) != 0, 2633 ("knote %p still attached", kn)); 2634 KQ_NOTOWNED(kq); 2635 2636 KQ_LOCK(kq); 2637 KASSERT(kn->kn_influx == 1, 2638 ("knote_drop called on %p with influx %d", kn, kn->kn_influx)); 2639 2640 if (kn->kn_fop->f_isfd) 2641 list = &kq->kq_knlist[kn->kn_id]; 2642 else 2643 list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)]; 2644 2645 if (!SLIST_EMPTY(list)) 2646 SLIST_REMOVE(list, kn, knote, kn_link); 2647 if (kn->kn_status & KN_QUEUED) 2648 knote_dequeue(kn); 2649 KQ_UNLOCK_FLUX(kq); 2650 2651 if (kn->kn_fop->f_isfd) { 2652 fdrop(kn->kn_fp, td); 2653 kn->kn_fp = NULL; 2654 } 2655 kqueue_fo_release(kn->kn_kevent.filter); 2656 kn->kn_fop = NULL; 2657 knote_free(kn); 2658 } 2659 2660 static void 2661 knote_enqueue(struct knote *kn) 2662 { 2663 struct kqueue *kq = kn->kn_kq; 2664 2665 KQ_OWNED(kn->kn_kq); 2666 KASSERT((kn->kn_status & KN_QUEUED) == 0, ("knote already queued")); 2667 2668 TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe); 2669 kn->kn_status |= KN_QUEUED; 2670 kq->kq_count++; 2671 kqueue_wakeup(kq); 2672 } 2673 2674 static void 2675 knote_dequeue(struct knote *kn) 2676 { 2677 struct kqueue *kq = kn->kn_kq; 2678 2679 KQ_OWNED(kn->kn_kq); 2680 KASSERT(kn->kn_status & KN_QUEUED, ("knote not queued")); 2681 2682 TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe); 2683 kn->kn_status &= ~KN_QUEUED; 2684 kq->kq_count--; 2685 } 2686 2687 static void 2688 knote_init(void) 2689 { 2690 2691 knote_zone = uma_zcreate("KNOTE", sizeof(struct knote), NULL, NULL, 2692 NULL, NULL, UMA_ALIGN_PTR, 0); 2693 } 2694 SYSINIT(knote, SI_SUB_PSEUDO, SI_ORDER_ANY, knote_init, NULL); 2695 2696 static struct knote * 2697 knote_alloc(int waitok) 2698 { 2699 2700 return (uma_zalloc(knote_zone, (waitok ? M_WAITOK : M_NOWAIT) | 2701 M_ZERO)); 2702 } 2703 2704 static void 2705 knote_free(struct knote *kn) 2706 { 2707 2708 uma_zfree(knote_zone, kn); 2709 } 2710 2711 /* 2712 * Register the kev w/ the kq specified by fd. 2713 */ 2714 int 2715 kqfd_register(int fd, struct kevent *kev, struct thread *td, int waitok) 2716 { 2717 struct kqueue *kq; 2718 struct file *fp; 2719 cap_rights_t rights; 2720 int error; 2721 2722 error = fget(td, fd, cap_rights_init(&rights, CAP_KQUEUE_CHANGE), &fp); 2723 if (error != 0) 2724 return (error); 2725 if ((error = kqueue_acquire(fp, &kq)) != 0) 2726 goto noacquire; 2727 2728 error = kqueue_register(kq, kev, td, waitok); 2729 kqueue_release(kq, 0); 2730 2731 noacquire: 2732 fdrop(fp, td); 2733 return (error); 2734 } 2735