1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 1999,2000,2001 Jonathan Lemon <jlemon@FreeBSD.org> 5 * Copyright 2004 John-Mark Gurney <jmg@FreeBSD.org> 6 * Copyright (c) 2009 Apple, Inc. 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 #include "opt_ktrace.h" 35 #include "opt_kqueue.h" 36 37 #ifdef COMPAT_FREEBSD11 38 #define _WANT_FREEBSD11_KEVENT 39 #endif 40 41 #include <sys/param.h> 42 #include <sys/systm.h> 43 #include <sys/capsicum.h> 44 #include <sys/kernel.h> 45 #include <sys/limits.h> 46 #include <sys/lock.h> 47 #include <sys/mutex.h> 48 #include <sys/rwlock.h> 49 #include <sys/proc.h> 50 #include <sys/malloc.h> 51 #include <sys/unistd.h> 52 #include <sys/file.h> 53 #include <sys/filedesc.h> 54 #include <sys/filio.h> 55 #include <sys/fcntl.h> 56 #include <sys/kthread.h> 57 #include <sys/selinfo.h> 58 #include <sys/queue.h> 59 #include <sys/event.h> 60 #include <sys/eventvar.h> 61 #include <sys/poll.h> 62 #include <sys/protosw.h> 63 #include <sys/resourcevar.h> 64 #include <sys/sigio.h> 65 #include <sys/signalvar.h> 66 #include <sys/socket.h> 67 #include <sys/socketvar.h> 68 #include <sys/stat.h> 69 #include <sys/sysctl.h> 70 #include <sys/sysproto.h> 71 #include <sys/syscallsubr.h> 72 #include <sys/taskqueue.h> 73 #include <sys/uio.h> 74 #include <sys/user.h> 75 #ifdef KTRACE 76 #include <sys/ktrace.h> 77 #endif 78 #include <machine/atomic.h> 79 80 #include <vm/uma.h> 81 82 static MALLOC_DEFINE(M_KQUEUE, "kqueue", "memory for kqueue system"); 83 84 /* 85 * This lock is used if multiple kq locks are required. This possibly 86 * should be made into a per proc lock. 87 */ 88 static struct mtx kq_global; 89 MTX_SYSINIT(kq_global, &kq_global, "kqueue order", MTX_DEF); 90 #define KQ_GLOBAL_LOCK(lck, haslck) do { \ 91 if (!haslck) \ 92 mtx_lock(lck); \ 93 haslck = 1; \ 94 } while (0) 95 #define KQ_GLOBAL_UNLOCK(lck, haslck) do { \ 96 if (haslck) \ 97 mtx_unlock(lck); \ 98 haslck = 0; \ 99 } while (0) 100 101 TASKQUEUE_DEFINE_THREAD(kqueue_ctx); 102 103 static int kevent_copyout(void *arg, struct kevent *kevp, int count); 104 static int kevent_copyin(void *arg, struct kevent *kevp, int count); 105 static int kqueue_register(struct kqueue *kq, struct kevent *kev, 106 struct thread *td, int mflag); 107 static int kqueue_acquire(struct file *fp, struct kqueue **kqp); 108 static void kqueue_release(struct kqueue *kq, int locked); 109 static void kqueue_destroy(struct kqueue *kq); 110 static void kqueue_drain(struct kqueue *kq, struct thread *td); 111 static int kqueue_expand(struct kqueue *kq, struct filterops *fops, 112 uintptr_t ident, int mflag); 113 static void kqueue_task(void *arg, int pending); 114 static int kqueue_scan(struct kqueue *kq, int maxevents, 115 struct kevent_copyops *k_ops, 116 const struct timespec *timeout, 117 struct kevent *keva, struct thread *td); 118 static void kqueue_wakeup(struct kqueue *kq); 119 static struct filterops *kqueue_fo_find(int filt); 120 static void kqueue_fo_release(int filt); 121 struct g_kevent_args; 122 static int kern_kevent_generic(struct thread *td, 123 struct g_kevent_args *uap, 124 struct kevent_copyops *k_ops, const char *struct_name); 125 126 static fo_ioctl_t kqueue_ioctl; 127 static fo_poll_t kqueue_poll; 128 static fo_kqfilter_t kqueue_kqfilter; 129 static fo_stat_t kqueue_stat; 130 static fo_close_t kqueue_close; 131 static fo_fill_kinfo_t kqueue_fill_kinfo; 132 133 static struct fileops kqueueops = { 134 .fo_read = invfo_rdwr, 135 .fo_write = invfo_rdwr, 136 .fo_truncate = invfo_truncate, 137 .fo_ioctl = kqueue_ioctl, 138 .fo_poll = kqueue_poll, 139 .fo_kqfilter = kqueue_kqfilter, 140 .fo_stat = kqueue_stat, 141 .fo_close = kqueue_close, 142 .fo_chmod = invfo_chmod, 143 .fo_chown = invfo_chown, 144 .fo_sendfile = invfo_sendfile, 145 .fo_fill_kinfo = kqueue_fill_kinfo, 146 }; 147 148 static int knote_attach(struct knote *kn, struct kqueue *kq); 149 static void knote_drop(struct knote *kn, struct thread *td); 150 static void knote_drop_detached(struct knote *kn, struct thread *td); 151 static void knote_enqueue(struct knote *kn); 152 static void knote_dequeue(struct knote *kn); 153 static void knote_init(void); 154 static struct knote *knote_alloc(int mflag); 155 static void knote_free(struct knote *kn); 156 157 static void filt_kqdetach(struct knote *kn); 158 static int filt_kqueue(struct knote *kn, long hint); 159 static int filt_procattach(struct knote *kn); 160 static void filt_procdetach(struct knote *kn); 161 static int filt_proc(struct knote *kn, long hint); 162 static int filt_fileattach(struct knote *kn); 163 static void filt_timerexpire(void *knx); 164 static void filt_timerexpire_l(struct knote *kn, bool proc_locked); 165 static int filt_timerattach(struct knote *kn); 166 static void filt_timerdetach(struct knote *kn); 167 static void filt_timerstart(struct knote *kn, sbintime_t to); 168 static void filt_timertouch(struct knote *kn, struct kevent *kev, 169 u_long type); 170 static int filt_timervalidate(struct knote *kn, sbintime_t *to); 171 static int filt_timer(struct knote *kn, long hint); 172 static int filt_userattach(struct knote *kn); 173 static void filt_userdetach(struct knote *kn); 174 static int filt_user(struct knote *kn, long hint); 175 static void filt_usertouch(struct knote *kn, struct kevent *kev, 176 u_long type); 177 178 static struct filterops file_filtops = { 179 .f_isfd = 1, 180 .f_attach = filt_fileattach, 181 }; 182 static struct filterops kqread_filtops = { 183 .f_isfd = 1, 184 .f_detach = filt_kqdetach, 185 .f_event = filt_kqueue, 186 }; 187 /* XXX - move to kern_proc.c? */ 188 static struct filterops proc_filtops = { 189 .f_isfd = 0, 190 .f_attach = filt_procattach, 191 .f_detach = filt_procdetach, 192 .f_event = filt_proc, 193 }; 194 static struct filterops timer_filtops = { 195 .f_isfd = 0, 196 .f_attach = filt_timerattach, 197 .f_detach = filt_timerdetach, 198 .f_event = filt_timer, 199 .f_touch = filt_timertouch, 200 }; 201 static struct filterops user_filtops = { 202 .f_attach = filt_userattach, 203 .f_detach = filt_userdetach, 204 .f_event = filt_user, 205 .f_touch = filt_usertouch, 206 }; 207 208 static uma_zone_t knote_zone; 209 static unsigned int kq_ncallouts = 0; 210 static unsigned int kq_calloutmax = 4 * 1024; 211 SYSCTL_UINT(_kern, OID_AUTO, kq_calloutmax, CTLFLAG_RW, 212 &kq_calloutmax, 0, "Maximum number of callouts allocated for kqueue"); 213 214 /* XXX - ensure not influx ? */ 215 #define KNOTE_ACTIVATE(kn, islock) do { \ 216 if ((islock)) \ 217 mtx_assert(&(kn)->kn_kq->kq_lock, MA_OWNED); \ 218 else \ 219 KQ_LOCK((kn)->kn_kq); \ 220 (kn)->kn_status |= KN_ACTIVE; \ 221 if (((kn)->kn_status & (KN_QUEUED | KN_DISABLED)) == 0) \ 222 knote_enqueue((kn)); \ 223 if (!(islock)) \ 224 KQ_UNLOCK((kn)->kn_kq); \ 225 } while (0) 226 #define KQ_LOCK(kq) do { \ 227 mtx_lock(&(kq)->kq_lock); \ 228 } while (0) 229 #define KQ_FLUX_WAKEUP(kq) do { \ 230 if (((kq)->kq_state & KQ_FLUXWAIT) == KQ_FLUXWAIT) { \ 231 (kq)->kq_state &= ~KQ_FLUXWAIT; \ 232 wakeup((kq)); \ 233 } \ 234 } while (0) 235 #define KQ_UNLOCK_FLUX(kq) do { \ 236 KQ_FLUX_WAKEUP(kq); \ 237 mtx_unlock(&(kq)->kq_lock); \ 238 } while (0) 239 #define KQ_UNLOCK(kq) do { \ 240 mtx_unlock(&(kq)->kq_lock); \ 241 } while (0) 242 #define KQ_OWNED(kq) do { \ 243 mtx_assert(&(kq)->kq_lock, MA_OWNED); \ 244 } while (0) 245 #define KQ_NOTOWNED(kq) do { \ 246 mtx_assert(&(kq)->kq_lock, MA_NOTOWNED); \ 247 } while (0) 248 249 static struct knlist * 250 kn_list_lock(struct knote *kn) 251 { 252 struct knlist *knl; 253 254 knl = kn->kn_knlist; 255 if (knl != NULL) 256 knl->kl_lock(knl->kl_lockarg); 257 return (knl); 258 } 259 260 static void 261 kn_list_unlock(struct knlist *knl) 262 { 263 bool do_free; 264 265 if (knl == NULL) 266 return; 267 do_free = knl->kl_autodestroy && knlist_empty(knl); 268 knl->kl_unlock(knl->kl_lockarg); 269 if (do_free) { 270 knlist_destroy(knl); 271 free(knl, M_KQUEUE); 272 } 273 } 274 275 static bool 276 kn_in_flux(struct knote *kn) 277 { 278 279 return (kn->kn_influx > 0); 280 } 281 282 static void 283 kn_enter_flux(struct knote *kn) 284 { 285 286 KQ_OWNED(kn->kn_kq); 287 MPASS(kn->kn_influx < INT_MAX); 288 kn->kn_influx++; 289 } 290 291 static bool 292 kn_leave_flux(struct knote *kn) 293 { 294 295 KQ_OWNED(kn->kn_kq); 296 MPASS(kn->kn_influx > 0); 297 kn->kn_influx--; 298 return (kn->kn_influx == 0); 299 } 300 301 #define KNL_ASSERT_LOCK(knl, islocked) do { \ 302 if (islocked) \ 303 KNL_ASSERT_LOCKED(knl); \ 304 else \ 305 KNL_ASSERT_UNLOCKED(knl); \ 306 } while (0) 307 #ifdef INVARIANTS 308 #define KNL_ASSERT_LOCKED(knl) do { \ 309 knl->kl_assert_lock((knl)->kl_lockarg, LA_LOCKED); \ 310 } while (0) 311 #define KNL_ASSERT_UNLOCKED(knl) do { \ 312 knl->kl_assert_lock((knl)->kl_lockarg, LA_UNLOCKED); \ 313 } while (0) 314 #else /* !INVARIANTS */ 315 #define KNL_ASSERT_LOCKED(knl) do {} while (0) 316 #define KNL_ASSERT_UNLOCKED(knl) do {} while (0) 317 #endif /* INVARIANTS */ 318 319 #ifndef KN_HASHSIZE 320 #define KN_HASHSIZE 64 /* XXX should be tunable */ 321 #endif 322 323 #define KN_HASH(val, mask) (((val) ^ (val >> 8)) & (mask)) 324 325 static int 326 filt_nullattach(struct knote *kn) 327 { 328 329 return (ENXIO); 330 }; 331 332 struct filterops null_filtops = { 333 .f_isfd = 0, 334 .f_attach = filt_nullattach, 335 }; 336 337 /* XXX - make SYSINIT to add these, and move into respective modules. */ 338 extern struct filterops sig_filtops; 339 extern struct filterops fs_filtops; 340 341 /* 342 * Table for for all system-defined filters. 343 */ 344 static struct mtx filterops_lock; 345 MTX_SYSINIT(kqueue_filterops, &filterops_lock, "protect sysfilt_ops", 346 MTX_DEF); 347 static struct { 348 struct filterops *for_fop; 349 int for_nolock; 350 int for_refcnt; 351 } sysfilt_ops[EVFILT_SYSCOUNT] = { 352 { &file_filtops, 1 }, /* EVFILT_READ */ 353 { &file_filtops, 1 }, /* EVFILT_WRITE */ 354 { &null_filtops }, /* EVFILT_AIO */ 355 { &file_filtops, 1 }, /* EVFILT_VNODE */ 356 { &proc_filtops, 1 }, /* EVFILT_PROC */ 357 { &sig_filtops, 1 }, /* EVFILT_SIGNAL */ 358 { &timer_filtops, 1 }, /* EVFILT_TIMER */ 359 { &file_filtops, 1 }, /* EVFILT_PROCDESC */ 360 { &fs_filtops, 1 }, /* EVFILT_FS */ 361 { &null_filtops }, /* EVFILT_LIO */ 362 { &user_filtops, 1 }, /* EVFILT_USER */ 363 { &null_filtops }, /* EVFILT_SENDFILE */ 364 { &file_filtops, 1 }, /* EVFILT_EMPTY */ 365 }; 366 367 /* 368 * Simple redirection for all cdevsw style objects to call their fo_kqfilter 369 * method. 370 */ 371 static int 372 filt_fileattach(struct knote *kn) 373 { 374 375 return (fo_kqfilter(kn->kn_fp, kn)); 376 } 377 378 /*ARGSUSED*/ 379 static int 380 kqueue_kqfilter(struct file *fp, struct knote *kn) 381 { 382 struct kqueue *kq = kn->kn_fp->f_data; 383 384 if (kn->kn_filter != EVFILT_READ) 385 return (EINVAL); 386 387 kn->kn_status |= KN_KQUEUE; 388 kn->kn_fop = &kqread_filtops; 389 knlist_add(&kq->kq_sel.si_note, kn, 0); 390 391 return (0); 392 } 393 394 static void 395 filt_kqdetach(struct knote *kn) 396 { 397 struct kqueue *kq = kn->kn_fp->f_data; 398 399 knlist_remove(&kq->kq_sel.si_note, kn, 0); 400 } 401 402 /*ARGSUSED*/ 403 static int 404 filt_kqueue(struct knote *kn, long hint) 405 { 406 struct kqueue *kq = kn->kn_fp->f_data; 407 408 kn->kn_data = kq->kq_count; 409 return (kn->kn_data > 0); 410 } 411 412 /* XXX - move to kern_proc.c? */ 413 static int 414 filt_procattach(struct knote *kn) 415 { 416 struct proc *p; 417 int error; 418 bool exiting, immediate; 419 420 exiting = immediate = false; 421 if (kn->kn_sfflags & NOTE_EXIT) 422 p = pfind_any(kn->kn_id); 423 else 424 p = pfind(kn->kn_id); 425 if (p == NULL) 426 return (ESRCH); 427 if (p->p_flag & P_WEXIT) 428 exiting = true; 429 430 if ((error = p_cansee(curthread, p))) { 431 PROC_UNLOCK(p); 432 return (error); 433 } 434 435 kn->kn_ptr.p_proc = p; 436 kn->kn_flags |= EV_CLEAR; /* automatically set */ 437 438 /* 439 * Internal flag indicating registration done by kernel for the 440 * purposes of getting a NOTE_CHILD notification. 441 */ 442 if (kn->kn_flags & EV_FLAG2) { 443 kn->kn_flags &= ~EV_FLAG2; 444 kn->kn_data = kn->kn_sdata; /* ppid */ 445 kn->kn_fflags = NOTE_CHILD; 446 kn->kn_sfflags &= ~(NOTE_EXIT | NOTE_EXEC | NOTE_FORK); 447 immediate = true; /* Force immediate activation of child note. */ 448 } 449 /* 450 * Internal flag indicating registration done by kernel (for other than 451 * NOTE_CHILD). 452 */ 453 if (kn->kn_flags & EV_FLAG1) { 454 kn->kn_flags &= ~EV_FLAG1; 455 } 456 457 knlist_add(p->p_klist, kn, 1); 458 459 /* 460 * Immediately activate any child notes or, in the case of a zombie 461 * target process, exit notes. The latter is necessary to handle the 462 * case where the target process, e.g. a child, dies before the kevent 463 * is registered. 464 */ 465 if (immediate || (exiting && filt_proc(kn, NOTE_EXIT))) 466 KNOTE_ACTIVATE(kn, 0); 467 468 PROC_UNLOCK(p); 469 470 return (0); 471 } 472 473 /* 474 * The knote may be attached to a different process, which may exit, 475 * leaving nothing for the knote to be attached to. So when the process 476 * exits, the knote is marked as DETACHED and also flagged as ONESHOT so 477 * it will be deleted when read out. However, as part of the knote deletion, 478 * this routine is called, so a check is needed to avoid actually performing 479 * a detach, because the original process does not exist any more. 480 */ 481 /* XXX - move to kern_proc.c? */ 482 static void 483 filt_procdetach(struct knote *kn) 484 { 485 486 knlist_remove(kn->kn_knlist, kn, 0); 487 kn->kn_ptr.p_proc = NULL; 488 } 489 490 /* XXX - move to kern_proc.c? */ 491 static int 492 filt_proc(struct knote *kn, long hint) 493 { 494 struct proc *p; 495 u_int event; 496 497 p = kn->kn_ptr.p_proc; 498 if (p == NULL) /* already activated, from attach filter */ 499 return (0); 500 501 /* Mask off extra data. */ 502 event = (u_int)hint & NOTE_PCTRLMASK; 503 504 /* If the user is interested in this event, record it. */ 505 if (kn->kn_sfflags & event) 506 kn->kn_fflags |= event; 507 508 /* Process is gone, so flag the event as finished. */ 509 if (event == NOTE_EXIT) { 510 kn->kn_flags |= EV_EOF | EV_ONESHOT; 511 kn->kn_ptr.p_proc = NULL; 512 if (kn->kn_fflags & NOTE_EXIT) 513 kn->kn_data = KW_EXITCODE(p->p_xexit, p->p_xsig); 514 if (kn->kn_fflags == 0) 515 kn->kn_flags |= EV_DROP; 516 return (1); 517 } 518 519 return (kn->kn_fflags != 0); 520 } 521 522 /* 523 * Called when the process forked. It mostly does the same as the 524 * knote(), activating all knotes registered to be activated when the 525 * process forked. Additionally, for each knote attached to the 526 * parent, check whether user wants to track the new process. If so 527 * attach a new knote to it, and immediately report an event with the 528 * child's pid. 529 */ 530 void 531 knote_fork(struct knlist *list, int pid) 532 { 533 struct kqueue *kq; 534 struct knote *kn; 535 struct kevent kev; 536 int error; 537 538 MPASS(list != NULL); 539 KNL_ASSERT_LOCKED(list); 540 if (SLIST_EMPTY(&list->kl_list)) 541 return; 542 543 memset(&kev, 0, sizeof(kev)); 544 SLIST_FOREACH(kn, &list->kl_list, kn_selnext) { 545 kq = kn->kn_kq; 546 KQ_LOCK(kq); 547 if (kn_in_flux(kn) && (kn->kn_status & KN_SCAN) == 0) { 548 KQ_UNLOCK(kq); 549 continue; 550 } 551 552 /* 553 * The same as knote(), activate the event. 554 */ 555 if ((kn->kn_sfflags & NOTE_TRACK) == 0) { 556 if (kn->kn_fop->f_event(kn, NOTE_FORK)) 557 KNOTE_ACTIVATE(kn, 1); 558 KQ_UNLOCK(kq); 559 continue; 560 } 561 562 /* 563 * The NOTE_TRACK case. In addition to the activation 564 * of the event, we need to register new events to 565 * track the child. Drop the locks in preparation for 566 * the call to kqueue_register(). 567 */ 568 kn_enter_flux(kn); 569 KQ_UNLOCK(kq); 570 list->kl_unlock(list->kl_lockarg); 571 572 /* 573 * Activate existing knote and register tracking knotes with 574 * new process. 575 * 576 * First register a knote to get just the child notice. This 577 * must be a separate note from a potential NOTE_EXIT 578 * notification since both NOTE_CHILD and NOTE_EXIT are defined 579 * to use the data field (in conflicting ways). 580 */ 581 kev.ident = pid; 582 kev.filter = kn->kn_filter; 583 kev.flags = kn->kn_flags | EV_ADD | EV_ENABLE | EV_ONESHOT | 584 EV_FLAG2; 585 kev.fflags = kn->kn_sfflags; 586 kev.data = kn->kn_id; /* parent */ 587 kev.udata = kn->kn_kevent.udata;/* preserve udata */ 588 error = kqueue_register(kq, &kev, NULL, M_NOWAIT); 589 if (error) 590 kn->kn_fflags |= NOTE_TRACKERR; 591 592 /* 593 * Then register another knote to track other potential events 594 * from the new process. 595 */ 596 kev.ident = pid; 597 kev.filter = kn->kn_filter; 598 kev.flags = kn->kn_flags | EV_ADD | EV_ENABLE | EV_FLAG1; 599 kev.fflags = kn->kn_sfflags; 600 kev.data = kn->kn_id; /* parent */ 601 kev.udata = kn->kn_kevent.udata;/* preserve udata */ 602 error = kqueue_register(kq, &kev, NULL, M_NOWAIT); 603 if (error) 604 kn->kn_fflags |= NOTE_TRACKERR; 605 if (kn->kn_fop->f_event(kn, NOTE_FORK)) 606 KNOTE_ACTIVATE(kn, 0); 607 list->kl_lock(list->kl_lockarg); 608 KQ_LOCK(kq); 609 kn_leave_flux(kn); 610 KQ_UNLOCK_FLUX(kq); 611 } 612 } 613 614 /* 615 * XXX: EVFILT_TIMER should perhaps live in kern_time.c beside the 616 * interval timer support code. 617 */ 618 619 #define NOTE_TIMER_PRECMASK \ 620 (NOTE_SECONDS | NOTE_MSECONDS | NOTE_USECONDS | NOTE_NSECONDS) 621 622 static sbintime_t 623 timer2sbintime(int64_t data, int flags) 624 { 625 int64_t secs; 626 627 /* 628 * Macros for converting to the fractional second portion of an 629 * sbintime_t using 64bit multiplication to improve precision. 630 */ 631 #define NS_TO_SBT(ns) (((ns) * (((uint64_t)1 << 63) / 500000000)) >> 32) 632 #define US_TO_SBT(us) (((us) * (((uint64_t)1 << 63) / 500000)) >> 32) 633 #define MS_TO_SBT(ms) (((ms) * (((uint64_t)1 << 63) / 500)) >> 32) 634 switch (flags & NOTE_TIMER_PRECMASK) { 635 case NOTE_SECONDS: 636 #ifdef __LP64__ 637 if (data > (SBT_MAX / SBT_1S)) 638 return (SBT_MAX); 639 #endif 640 return ((sbintime_t)data << 32); 641 case NOTE_MSECONDS: /* FALLTHROUGH */ 642 case 0: 643 if (data >= 1000) { 644 secs = data / 1000; 645 #ifdef __LP64__ 646 if (secs > (SBT_MAX / SBT_1S)) 647 return (SBT_MAX); 648 #endif 649 return (secs << 32 | MS_TO_SBT(data % 1000)); 650 } 651 return (MS_TO_SBT(data)); 652 case NOTE_USECONDS: 653 if (data >= 1000000) { 654 secs = data / 1000000; 655 #ifdef __LP64__ 656 if (secs > (SBT_MAX / SBT_1S)) 657 return (SBT_MAX); 658 #endif 659 return (secs << 32 | US_TO_SBT(data % 1000000)); 660 } 661 return (US_TO_SBT(data)); 662 case NOTE_NSECONDS: 663 if (data >= 1000000000) { 664 secs = data / 1000000000; 665 #ifdef __LP64__ 666 if (secs > (SBT_MAX / SBT_1S)) 667 return (SBT_MAX); 668 #endif 669 return (secs << 32 | NS_TO_SBT(data % 1000000000)); 670 } 671 return (NS_TO_SBT(data)); 672 default: 673 break; 674 } 675 return (-1); 676 } 677 678 struct kq_timer_cb_data { 679 struct callout c; 680 struct proc *p; 681 struct knote *kn; 682 int cpuid; 683 TAILQ_ENTRY(kq_timer_cb_data) link; 684 sbintime_t next; /* next timer event fires at */ 685 sbintime_t to; /* precalculated timer period, 0 for abs */ 686 }; 687 688 static void 689 kqtimer_sched_callout(struct kq_timer_cb_data *kc) 690 { 691 callout_reset_sbt_on(&kc->c, kc->next, 0, filt_timerexpire, kc->kn, 692 kc->cpuid, C_ABSOLUTE); 693 } 694 695 void 696 kqtimer_proc_continue(struct proc *p) 697 { 698 struct kq_timer_cb_data *kc, *kc1; 699 struct bintime bt; 700 sbintime_t now; 701 702 PROC_LOCK_ASSERT(p, MA_OWNED); 703 704 getboottimebin(&bt); 705 now = bttosbt(bt); 706 707 TAILQ_FOREACH_SAFE(kc, &p->p_kqtim_stop, link, kc1) { 708 TAILQ_REMOVE(&p->p_kqtim_stop, kc, link); 709 if (kc->next <= now) 710 filt_timerexpire_l(kc->kn, true); 711 else 712 kqtimer_sched_callout(kc); 713 } 714 } 715 716 static void 717 filt_timerexpire_l(struct knote *kn, bool proc_locked) 718 { 719 struct kq_timer_cb_data *kc; 720 struct proc *p; 721 sbintime_t now; 722 723 kc = kn->kn_ptr.p_v; 724 725 if ((kn->kn_flags & EV_ONESHOT) != 0 || kc->to == 0) { 726 kn->kn_data++; 727 KNOTE_ACTIVATE(kn, 0); 728 return; 729 } 730 731 for (now = sbinuptime(); kc->next <= now; kc->next += kc->to) 732 kn->kn_data++; 733 KNOTE_ACTIVATE(kn, 0); /* XXX - handle locking */ 734 735 /* 736 * Initial check for stopped kc->p is racy. It is fine to 737 * miss the set of the stop flags, at worst we would schedule 738 * one more callout. On the other hand, it is not fine to not 739 * schedule when we we missed clearing of the flags, we 740 * recheck them under the lock and observe consistent state. 741 */ 742 p = kc->p; 743 if (P_SHOULDSTOP(p) || P_KILLED(p)) { 744 if (!proc_locked) 745 PROC_LOCK(p); 746 if (P_SHOULDSTOP(p) || P_KILLED(p)) { 747 TAILQ_INSERT_TAIL(&p->p_kqtim_stop, kc, link); 748 if (!proc_locked) 749 PROC_UNLOCK(p); 750 return; 751 } 752 if (!proc_locked) 753 PROC_UNLOCK(p); 754 } 755 kqtimer_sched_callout(kc); 756 } 757 758 static void 759 filt_timerexpire(void *knx) 760 { 761 filt_timerexpire_l(knx, false); 762 } 763 764 /* 765 * data contains amount of time to sleep 766 */ 767 static int 768 filt_timervalidate(struct knote *kn, sbintime_t *to) 769 { 770 struct bintime bt; 771 sbintime_t sbt; 772 773 if (kn->kn_sdata < 0) 774 return (EINVAL); 775 if (kn->kn_sdata == 0 && (kn->kn_flags & EV_ONESHOT) == 0) 776 kn->kn_sdata = 1; 777 /* 778 * The only fflags values supported are the timer unit 779 * (precision) and the absolute time indicator. 780 */ 781 if ((kn->kn_sfflags & ~(NOTE_TIMER_PRECMASK | NOTE_ABSTIME)) != 0) 782 return (EINVAL); 783 784 *to = timer2sbintime(kn->kn_sdata, kn->kn_sfflags); 785 if ((kn->kn_sfflags & NOTE_ABSTIME) != 0) { 786 getboottimebin(&bt); 787 sbt = bttosbt(bt); 788 *to -= sbt; 789 } 790 if (*to < 0) 791 return (EINVAL); 792 return (0); 793 } 794 795 static int 796 filt_timerattach(struct knote *kn) 797 { 798 struct kq_timer_cb_data *kc; 799 sbintime_t to; 800 unsigned int ncallouts; 801 int error; 802 803 error = filt_timervalidate(kn, &to); 804 if (error != 0) 805 return (error); 806 807 do { 808 ncallouts = kq_ncallouts; 809 if (ncallouts >= kq_calloutmax) 810 return (ENOMEM); 811 } while (!atomic_cmpset_int(&kq_ncallouts, ncallouts, ncallouts + 1)); 812 813 if ((kn->kn_sfflags & NOTE_ABSTIME) == 0) 814 kn->kn_flags |= EV_CLEAR; /* automatically set */ 815 kn->kn_status &= ~KN_DETACHED; /* knlist_add clears it */ 816 kn->kn_ptr.p_v = kc = malloc(sizeof(*kc), M_KQUEUE, M_WAITOK); 817 kc->kn = kn; 818 kc->p = curproc; 819 kc->cpuid = PCPU_GET(cpuid); 820 callout_init(&kc->c, 1); 821 filt_timerstart(kn, to); 822 823 return (0); 824 } 825 826 static void 827 filt_timerstart(struct knote *kn, sbintime_t to) 828 { 829 struct kq_timer_cb_data *kc; 830 831 kc = kn->kn_ptr.p_v; 832 if ((kn->kn_sfflags & NOTE_ABSTIME) != 0) { 833 kc->next = to; 834 kc->to = 0; 835 } else { 836 kc->next = to + sbinuptime(); 837 kc->to = to; 838 } 839 kqtimer_sched_callout(kc); 840 } 841 842 static void 843 filt_timerdetach(struct knote *kn) 844 { 845 struct kq_timer_cb_data *kc; 846 unsigned int old __unused; 847 848 kc = kn->kn_ptr.p_v; 849 callout_drain(&kc->c); 850 free(kc, M_KQUEUE); 851 old = atomic_fetchadd_int(&kq_ncallouts, -1); 852 KASSERT(old > 0, ("Number of callouts cannot become negative")); 853 kn->kn_status |= KN_DETACHED; /* knlist_remove sets it */ 854 } 855 856 static void 857 filt_timertouch(struct knote *kn, struct kevent *kev, u_long type) 858 { 859 struct kq_timer_cb_data *kc; 860 struct kqueue *kq; 861 sbintime_t to; 862 int error; 863 864 switch (type) { 865 case EVENT_REGISTER: 866 /* Handle re-added timers that update data/fflags */ 867 if (kev->flags & EV_ADD) { 868 kc = kn->kn_ptr.p_v; 869 870 /* Drain any existing callout. */ 871 callout_drain(&kc->c); 872 873 /* Throw away any existing undelivered record 874 * of the timer expiration. This is done under 875 * the presumption that if a process is 876 * re-adding this timer with new parameters, 877 * it is no longer interested in what may have 878 * happened under the old parameters. If it is 879 * interested, it can wait for the expiration, 880 * delete the old timer definition, and then 881 * add the new one. 882 * 883 * This has to be done while the kq is locked: 884 * - if enqueued, dequeue 885 * - make it no longer active 886 * - clear the count of expiration events 887 */ 888 kq = kn->kn_kq; 889 KQ_LOCK(kq); 890 if (kn->kn_status & KN_QUEUED) 891 knote_dequeue(kn); 892 893 kn->kn_status &= ~KN_ACTIVE; 894 kn->kn_data = 0; 895 KQ_UNLOCK(kq); 896 897 /* Reschedule timer based on new data/fflags */ 898 kn->kn_sfflags = kev->fflags; 899 kn->kn_sdata = kev->data; 900 error = filt_timervalidate(kn, &to); 901 if (error != 0) { 902 kn->kn_flags |= EV_ERROR; 903 kn->kn_data = error; 904 } else 905 filt_timerstart(kn, to); 906 } 907 break; 908 909 case EVENT_PROCESS: 910 *kev = kn->kn_kevent; 911 if (kn->kn_flags & EV_CLEAR) { 912 kn->kn_data = 0; 913 kn->kn_fflags = 0; 914 } 915 break; 916 917 default: 918 panic("filt_timertouch() - invalid type (%ld)", type); 919 break; 920 } 921 } 922 923 static int 924 filt_timer(struct knote *kn, long hint) 925 { 926 927 return (kn->kn_data != 0); 928 } 929 930 static int 931 filt_userattach(struct knote *kn) 932 { 933 934 /* 935 * EVFILT_USER knotes are not attached to anything in the kernel. 936 */ 937 kn->kn_hook = NULL; 938 if (kn->kn_fflags & NOTE_TRIGGER) 939 kn->kn_hookid = 1; 940 else 941 kn->kn_hookid = 0; 942 return (0); 943 } 944 945 static void 946 filt_userdetach(__unused struct knote *kn) 947 { 948 949 /* 950 * EVFILT_USER knotes are not attached to anything in the kernel. 951 */ 952 } 953 954 static int 955 filt_user(struct knote *kn, __unused long hint) 956 { 957 958 return (kn->kn_hookid); 959 } 960 961 static void 962 filt_usertouch(struct knote *kn, struct kevent *kev, u_long type) 963 { 964 u_int ffctrl; 965 966 switch (type) { 967 case EVENT_REGISTER: 968 if (kev->fflags & NOTE_TRIGGER) 969 kn->kn_hookid = 1; 970 971 ffctrl = kev->fflags & NOTE_FFCTRLMASK; 972 kev->fflags &= NOTE_FFLAGSMASK; 973 switch (ffctrl) { 974 case NOTE_FFNOP: 975 break; 976 977 case NOTE_FFAND: 978 kn->kn_sfflags &= kev->fflags; 979 break; 980 981 case NOTE_FFOR: 982 kn->kn_sfflags |= kev->fflags; 983 break; 984 985 case NOTE_FFCOPY: 986 kn->kn_sfflags = kev->fflags; 987 break; 988 989 default: 990 /* XXX Return error? */ 991 break; 992 } 993 kn->kn_sdata = kev->data; 994 if (kev->flags & EV_CLEAR) { 995 kn->kn_hookid = 0; 996 kn->kn_data = 0; 997 kn->kn_fflags = 0; 998 } 999 break; 1000 1001 case EVENT_PROCESS: 1002 *kev = kn->kn_kevent; 1003 kev->fflags = kn->kn_sfflags; 1004 kev->data = kn->kn_sdata; 1005 if (kn->kn_flags & EV_CLEAR) { 1006 kn->kn_hookid = 0; 1007 kn->kn_data = 0; 1008 kn->kn_fflags = 0; 1009 } 1010 break; 1011 1012 default: 1013 panic("filt_usertouch() - invalid type (%ld)", type); 1014 break; 1015 } 1016 } 1017 1018 int 1019 sys_kqueue(struct thread *td, struct kqueue_args *uap) 1020 { 1021 1022 return (kern_kqueue(td, 0, NULL)); 1023 } 1024 1025 static void 1026 kqueue_init(struct kqueue *kq) 1027 { 1028 1029 mtx_init(&kq->kq_lock, "kqueue", NULL, MTX_DEF | MTX_DUPOK); 1030 TAILQ_INIT(&kq->kq_head); 1031 knlist_init_mtx(&kq->kq_sel.si_note, &kq->kq_lock); 1032 TASK_INIT(&kq->kq_task, 0, kqueue_task, kq); 1033 } 1034 1035 int 1036 kern_kqueue(struct thread *td, int flags, struct filecaps *fcaps) 1037 { 1038 struct filedesc *fdp; 1039 struct kqueue *kq; 1040 struct file *fp; 1041 struct ucred *cred; 1042 int fd, error; 1043 1044 fdp = td->td_proc->p_fd; 1045 cred = td->td_ucred; 1046 if (!chgkqcnt(cred->cr_ruidinfo, 1, lim_cur(td, RLIMIT_KQUEUES))) 1047 return (ENOMEM); 1048 1049 error = falloc_caps(td, &fp, &fd, flags, fcaps); 1050 if (error != 0) { 1051 chgkqcnt(cred->cr_ruidinfo, -1, 0); 1052 return (error); 1053 } 1054 1055 /* An extra reference on `fp' has been held for us by falloc(). */ 1056 kq = malloc(sizeof *kq, M_KQUEUE, M_WAITOK | M_ZERO); 1057 kqueue_init(kq); 1058 kq->kq_fdp = fdp; 1059 kq->kq_cred = crhold(cred); 1060 1061 FILEDESC_XLOCK(fdp); 1062 TAILQ_INSERT_HEAD(&fdp->fd_kqlist, kq, kq_list); 1063 FILEDESC_XUNLOCK(fdp); 1064 1065 finit(fp, FREAD | FWRITE, DTYPE_KQUEUE, kq, &kqueueops); 1066 fdrop(fp, td); 1067 1068 td->td_retval[0] = fd; 1069 return (0); 1070 } 1071 1072 struct g_kevent_args { 1073 int fd; 1074 void *changelist; 1075 int nchanges; 1076 void *eventlist; 1077 int nevents; 1078 const struct timespec *timeout; 1079 }; 1080 1081 int 1082 sys_kevent(struct thread *td, struct kevent_args *uap) 1083 { 1084 struct kevent_copyops k_ops = { 1085 .arg = uap, 1086 .k_copyout = kevent_copyout, 1087 .k_copyin = kevent_copyin, 1088 .kevent_size = sizeof(struct kevent), 1089 }; 1090 struct g_kevent_args gk_args = { 1091 .fd = uap->fd, 1092 .changelist = uap->changelist, 1093 .nchanges = uap->nchanges, 1094 .eventlist = uap->eventlist, 1095 .nevents = uap->nevents, 1096 .timeout = uap->timeout, 1097 }; 1098 1099 return (kern_kevent_generic(td, &gk_args, &k_ops, "kevent")); 1100 } 1101 1102 static int 1103 kern_kevent_generic(struct thread *td, struct g_kevent_args *uap, 1104 struct kevent_copyops *k_ops, const char *struct_name) 1105 { 1106 struct timespec ts, *tsp; 1107 #ifdef KTRACE 1108 struct kevent *eventlist = uap->eventlist; 1109 #endif 1110 int error; 1111 1112 if (uap->timeout != NULL) { 1113 error = copyin(uap->timeout, &ts, sizeof(ts)); 1114 if (error) 1115 return (error); 1116 tsp = &ts; 1117 } else 1118 tsp = NULL; 1119 1120 #ifdef KTRACE 1121 if (KTRPOINT(td, KTR_STRUCT_ARRAY)) 1122 ktrstructarray(struct_name, UIO_USERSPACE, uap->changelist, 1123 uap->nchanges, k_ops->kevent_size); 1124 #endif 1125 1126 error = kern_kevent(td, uap->fd, uap->nchanges, uap->nevents, 1127 k_ops, tsp); 1128 1129 #ifdef KTRACE 1130 if (error == 0 && KTRPOINT(td, KTR_STRUCT_ARRAY)) 1131 ktrstructarray(struct_name, UIO_USERSPACE, eventlist, 1132 td->td_retval[0], k_ops->kevent_size); 1133 #endif 1134 1135 return (error); 1136 } 1137 1138 /* 1139 * Copy 'count' items into the destination list pointed to by uap->eventlist. 1140 */ 1141 static int 1142 kevent_copyout(void *arg, struct kevent *kevp, int count) 1143 { 1144 struct kevent_args *uap; 1145 int error; 1146 1147 KASSERT(count <= KQ_NEVENTS, ("count (%d) > KQ_NEVENTS", count)); 1148 uap = (struct kevent_args *)arg; 1149 1150 error = copyout(kevp, uap->eventlist, count * sizeof *kevp); 1151 if (error == 0) 1152 uap->eventlist += count; 1153 return (error); 1154 } 1155 1156 /* 1157 * Copy 'count' items from the list pointed to by uap->changelist. 1158 */ 1159 static int 1160 kevent_copyin(void *arg, struct kevent *kevp, int count) 1161 { 1162 struct kevent_args *uap; 1163 int error; 1164 1165 KASSERT(count <= KQ_NEVENTS, ("count (%d) > KQ_NEVENTS", count)); 1166 uap = (struct kevent_args *)arg; 1167 1168 error = copyin(uap->changelist, kevp, count * sizeof *kevp); 1169 if (error == 0) 1170 uap->changelist += count; 1171 return (error); 1172 } 1173 1174 #ifdef COMPAT_FREEBSD11 1175 static int 1176 kevent11_copyout(void *arg, struct kevent *kevp, int count) 1177 { 1178 struct freebsd11_kevent_args *uap; 1179 struct kevent_freebsd11 kev11; 1180 int error, i; 1181 1182 KASSERT(count <= KQ_NEVENTS, ("count (%d) > KQ_NEVENTS", count)); 1183 uap = (struct freebsd11_kevent_args *)arg; 1184 1185 for (i = 0; i < count; i++) { 1186 kev11.ident = kevp->ident; 1187 kev11.filter = kevp->filter; 1188 kev11.flags = kevp->flags; 1189 kev11.fflags = kevp->fflags; 1190 kev11.data = kevp->data; 1191 kev11.udata = kevp->udata; 1192 error = copyout(&kev11, uap->eventlist, sizeof(kev11)); 1193 if (error != 0) 1194 break; 1195 uap->eventlist++; 1196 kevp++; 1197 } 1198 return (error); 1199 } 1200 1201 /* 1202 * Copy 'count' items from the list pointed to by uap->changelist. 1203 */ 1204 static int 1205 kevent11_copyin(void *arg, struct kevent *kevp, int count) 1206 { 1207 struct freebsd11_kevent_args *uap; 1208 struct kevent_freebsd11 kev11; 1209 int error, i; 1210 1211 KASSERT(count <= KQ_NEVENTS, ("count (%d) > KQ_NEVENTS", count)); 1212 uap = (struct freebsd11_kevent_args *)arg; 1213 1214 for (i = 0; i < count; i++) { 1215 error = copyin(uap->changelist, &kev11, sizeof(kev11)); 1216 if (error != 0) 1217 break; 1218 kevp->ident = kev11.ident; 1219 kevp->filter = kev11.filter; 1220 kevp->flags = kev11.flags; 1221 kevp->fflags = kev11.fflags; 1222 kevp->data = (uintptr_t)kev11.data; 1223 kevp->udata = kev11.udata; 1224 bzero(&kevp->ext, sizeof(kevp->ext)); 1225 uap->changelist++; 1226 kevp++; 1227 } 1228 return (error); 1229 } 1230 1231 int 1232 freebsd11_kevent(struct thread *td, struct freebsd11_kevent_args *uap) 1233 { 1234 struct kevent_copyops k_ops = { 1235 .arg = uap, 1236 .k_copyout = kevent11_copyout, 1237 .k_copyin = kevent11_copyin, 1238 .kevent_size = sizeof(struct kevent_freebsd11), 1239 }; 1240 struct g_kevent_args gk_args = { 1241 .fd = uap->fd, 1242 .changelist = uap->changelist, 1243 .nchanges = uap->nchanges, 1244 .eventlist = uap->eventlist, 1245 .nevents = uap->nevents, 1246 .timeout = uap->timeout, 1247 }; 1248 1249 return (kern_kevent_generic(td, &gk_args, &k_ops, "kevent_freebsd11")); 1250 } 1251 #endif 1252 1253 int 1254 kern_kevent(struct thread *td, int fd, int nchanges, int nevents, 1255 struct kevent_copyops *k_ops, const struct timespec *timeout) 1256 { 1257 cap_rights_t rights; 1258 struct file *fp; 1259 int error; 1260 1261 cap_rights_init_zero(&rights); 1262 if (nchanges > 0) 1263 cap_rights_set_one(&rights, CAP_KQUEUE_CHANGE); 1264 if (nevents > 0) 1265 cap_rights_set_one(&rights, CAP_KQUEUE_EVENT); 1266 error = fget(td, fd, &rights, &fp); 1267 if (error != 0) 1268 return (error); 1269 1270 error = kern_kevent_fp(td, fp, nchanges, nevents, k_ops, timeout); 1271 fdrop(fp, td); 1272 1273 return (error); 1274 } 1275 1276 static int 1277 kqueue_kevent(struct kqueue *kq, struct thread *td, int nchanges, int nevents, 1278 struct kevent_copyops *k_ops, const struct timespec *timeout) 1279 { 1280 struct kevent keva[KQ_NEVENTS]; 1281 struct kevent *kevp, *changes; 1282 int i, n, nerrors, error; 1283 1284 nerrors = 0; 1285 while (nchanges > 0) { 1286 n = nchanges > KQ_NEVENTS ? KQ_NEVENTS : nchanges; 1287 error = k_ops->k_copyin(k_ops->arg, keva, n); 1288 if (error) 1289 return (error); 1290 changes = keva; 1291 for (i = 0; i < n; i++) { 1292 kevp = &changes[i]; 1293 if (!kevp->filter) 1294 continue; 1295 kevp->flags &= ~EV_SYSFLAGS; 1296 error = kqueue_register(kq, kevp, td, M_WAITOK); 1297 if (error || (kevp->flags & EV_RECEIPT)) { 1298 if (nevents == 0) 1299 return (error); 1300 kevp->flags = EV_ERROR; 1301 kevp->data = error; 1302 (void)k_ops->k_copyout(k_ops->arg, kevp, 1); 1303 nevents--; 1304 nerrors++; 1305 } 1306 } 1307 nchanges -= n; 1308 } 1309 if (nerrors) { 1310 td->td_retval[0] = nerrors; 1311 return (0); 1312 } 1313 1314 return (kqueue_scan(kq, nevents, k_ops, timeout, keva, td)); 1315 } 1316 1317 int 1318 kern_kevent_fp(struct thread *td, struct file *fp, int nchanges, int nevents, 1319 struct kevent_copyops *k_ops, const struct timespec *timeout) 1320 { 1321 struct kqueue *kq; 1322 int error; 1323 1324 error = kqueue_acquire(fp, &kq); 1325 if (error != 0) 1326 return (error); 1327 error = kqueue_kevent(kq, td, nchanges, nevents, k_ops, timeout); 1328 kqueue_release(kq, 0); 1329 return (error); 1330 } 1331 1332 /* 1333 * Performs a kevent() call on a temporarily created kqueue. This can be 1334 * used to perform one-shot polling, similar to poll() and select(). 1335 */ 1336 int 1337 kern_kevent_anonymous(struct thread *td, int nevents, 1338 struct kevent_copyops *k_ops) 1339 { 1340 struct kqueue kq = {}; 1341 int error; 1342 1343 kqueue_init(&kq); 1344 kq.kq_refcnt = 1; 1345 error = kqueue_kevent(&kq, td, nevents, nevents, k_ops, NULL); 1346 kqueue_drain(&kq, td); 1347 kqueue_destroy(&kq); 1348 return (error); 1349 } 1350 1351 int 1352 kqueue_add_filteropts(int filt, struct filterops *filtops) 1353 { 1354 int error; 1355 1356 error = 0; 1357 if (filt > 0 || filt + EVFILT_SYSCOUNT < 0) { 1358 printf( 1359 "trying to add a filterop that is out of range: %d is beyond %d\n", 1360 ~filt, EVFILT_SYSCOUNT); 1361 return EINVAL; 1362 } 1363 mtx_lock(&filterops_lock); 1364 if (sysfilt_ops[~filt].for_fop != &null_filtops && 1365 sysfilt_ops[~filt].for_fop != NULL) 1366 error = EEXIST; 1367 else { 1368 sysfilt_ops[~filt].for_fop = filtops; 1369 sysfilt_ops[~filt].for_refcnt = 0; 1370 } 1371 mtx_unlock(&filterops_lock); 1372 1373 return (error); 1374 } 1375 1376 int 1377 kqueue_del_filteropts(int filt) 1378 { 1379 int error; 1380 1381 error = 0; 1382 if (filt > 0 || filt + EVFILT_SYSCOUNT < 0) 1383 return EINVAL; 1384 1385 mtx_lock(&filterops_lock); 1386 if (sysfilt_ops[~filt].for_fop == &null_filtops || 1387 sysfilt_ops[~filt].for_fop == NULL) 1388 error = EINVAL; 1389 else if (sysfilt_ops[~filt].for_refcnt != 0) 1390 error = EBUSY; 1391 else { 1392 sysfilt_ops[~filt].for_fop = &null_filtops; 1393 sysfilt_ops[~filt].for_refcnt = 0; 1394 } 1395 mtx_unlock(&filterops_lock); 1396 1397 return error; 1398 } 1399 1400 static struct filterops * 1401 kqueue_fo_find(int filt) 1402 { 1403 1404 if (filt > 0 || filt + EVFILT_SYSCOUNT < 0) 1405 return NULL; 1406 1407 if (sysfilt_ops[~filt].for_nolock) 1408 return sysfilt_ops[~filt].for_fop; 1409 1410 mtx_lock(&filterops_lock); 1411 sysfilt_ops[~filt].for_refcnt++; 1412 if (sysfilt_ops[~filt].for_fop == NULL) 1413 sysfilt_ops[~filt].for_fop = &null_filtops; 1414 mtx_unlock(&filterops_lock); 1415 1416 return sysfilt_ops[~filt].for_fop; 1417 } 1418 1419 static void 1420 kqueue_fo_release(int filt) 1421 { 1422 1423 if (filt > 0 || filt + EVFILT_SYSCOUNT < 0) 1424 return; 1425 1426 if (sysfilt_ops[~filt].for_nolock) 1427 return; 1428 1429 mtx_lock(&filterops_lock); 1430 KASSERT(sysfilt_ops[~filt].for_refcnt > 0, 1431 ("filter object refcount not valid on release")); 1432 sysfilt_ops[~filt].for_refcnt--; 1433 mtx_unlock(&filterops_lock); 1434 } 1435 1436 /* 1437 * A ref to kq (obtained via kqueue_acquire) must be held. 1438 */ 1439 static int 1440 kqueue_register(struct kqueue *kq, struct kevent *kev, struct thread *td, 1441 int mflag) 1442 { 1443 struct filterops *fops; 1444 struct file *fp; 1445 struct knote *kn, *tkn; 1446 struct knlist *knl; 1447 int error, filt, event; 1448 int haskqglobal, filedesc_unlock; 1449 1450 if ((kev->flags & (EV_ENABLE | EV_DISABLE)) == (EV_ENABLE | EV_DISABLE)) 1451 return (EINVAL); 1452 1453 fp = NULL; 1454 kn = NULL; 1455 knl = NULL; 1456 error = 0; 1457 haskqglobal = 0; 1458 filedesc_unlock = 0; 1459 1460 filt = kev->filter; 1461 fops = kqueue_fo_find(filt); 1462 if (fops == NULL) 1463 return EINVAL; 1464 1465 if (kev->flags & EV_ADD) { 1466 /* 1467 * Prevent waiting with locks. Non-sleepable 1468 * allocation failures are handled in the loop, only 1469 * if the spare knote appears to be actually required. 1470 */ 1471 tkn = knote_alloc(mflag); 1472 } else { 1473 tkn = NULL; 1474 } 1475 1476 findkn: 1477 if (fops->f_isfd) { 1478 KASSERT(td != NULL, ("td is NULL")); 1479 if (kev->ident > INT_MAX) 1480 error = EBADF; 1481 else 1482 error = fget(td, kev->ident, &cap_event_rights, &fp); 1483 if (error) 1484 goto done; 1485 1486 if ((kev->flags & EV_ADD) == EV_ADD && kqueue_expand(kq, fops, 1487 kev->ident, M_NOWAIT) != 0) { 1488 /* try again */ 1489 fdrop(fp, td); 1490 fp = NULL; 1491 error = kqueue_expand(kq, fops, kev->ident, mflag); 1492 if (error) 1493 goto done; 1494 goto findkn; 1495 } 1496 1497 if (fp->f_type == DTYPE_KQUEUE) { 1498 /* 1499 * If we add some intelligence about what we are doing, 1500 * we should be able to support events on ourselves. 1501 * We need to know when we are doing this to prevent 1502 * getting both the knlist lock and the kq lock since 1503 * they are the same thing. 1504 */ 1505 if (fp->f_data == kq) { 1506 error = EINVAL; 1507 goto done; 1508 } 1509 1510 /* 1511 * Pre-lock the filedesc before the global 1512 * lock mutex, see the comment in 1513 * kqueue_close(). 1514 */ 1515 FILEDESC_XLOCK(td->td_proc->p_fd); 1516 filedesc_unlock = 1; 1517 KQ_GLOBAL_LOCK(&kq_global, haskqglobal); 1518 } 1519 1520 KQ_LOCK(kq); 1521 if (kev->ident < kq->kq_knlistsize) { 1522 SLIST_FOREACH(kn, &kq->kq_knlist[kev->ident], kn_link) 1523 if (kev->filter == kn->kn_filter) 1524 break; 1525 } 1526 } else { 1527 if ((kev->flags & EV_ADD) == EV_ADD) { 1528 error = kqueue_expand(kq, fops, kev->ident, mflag); 1529 if (error != 0) 1530 goto done; 1531 } 1532 1533 KQ_LOCK(kq); 1534 1535 /* 1536 * If possible, find an existing knote to use for this kevent. 1537 */ 1538 if (kev->filter == EVFILT_PROC && 1539 (kev->flags & (EV_FLAG1 | EV_FLAG2)) != 0) { 1540 /* This is an internal creation of a process tracking 1541 * note. Don't attempt to coalesce this with an 1542 * existing note. 1543 */ 1544 ; 1545 } else if (kq->kq_knhashmask != 0) { 1546 struct klist *list; 1547 1548 list = &kq->kq_knhash[ 1549 KN_HASH((u_long)kev->ident, kq->kq_knhashmask)]; 1550 SLIST_FOREACH(kn, list, kn_link) 1551 if (kev->ident == kn->kn_id && 1552 kev->filter == kn->kn_filter) 1553 break; 1554 } 1555 } 1556 1557 /* knote is in the process of changing, wait for it to stabilize. */ 1558 if (kn != NULL && kn_in_flux(kn)) { 1559 KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal); 1560 if (filedesc_unlock) { 1561 FILEDESC_XUNLOCK(td->td_proc->p_fd); 1562 filedesc_unlock = 0; 1563 } 1564 kq->kq_state |= KQ_FLUXWAIT; 1565 msleep(kq, &kq->kq_lock, PSOCK | PDROP, "kqflxwt", 0); 1566 if (fp != NULL) { 1567 fdrop(fp, td); 1568 fp = NULL; 1569 } 1570 goto findkn; 1571 } 1572 1573 /* 1574 * kn now contains the matching knote, or NULL if no match 1575 */ 1576 if (kn == NULL) { 1577 if (kev->flags & EV_ADD) { 1578 kn = tkn; 1579 tkn = NULL; 1580 if (kn == NULL) { 1581 KQ_UNLOCK(kq); 1582 error = ENOMEM; 1583 goto done; 1584 } 1585 kn->kn_fp = fp; 1586 kn->kn_kq = kq; 1587 kn->kn_fop = fops; 1588 /* 1589 * apply reference counts to knote structure, and 1590 * do not release it at the end of this routine. 1591 */ 1592 fops = NULL; 1593 fp = NULL; 1594 1595 kn->kn_sfflags = kev->fflags; 1596 kn->kn_sdata = kev->data; 1597 kev->fflags = 0; 1598 kev->data = 0; 1599 kn->kn_kevent = *kev; 1600 kn->kn_kevent.flags &= ~(EV_ADD | EV_DELETE | 1601 EV_ENABLE | EV_DISABLE | EV_FORCEONESHOT); 1602 kn->kn_status = KN_DETACHED; 1603 if ((kev->flags & EV_DISABLE) != 0) 1604 kn->kn_status |= KN_DISABLED; 1605 kn_enter_flux(kn); 1606 1607 error = knote_attach(kn, kq); 1608 KQ_UNLOCK(kq); 1609 if (error != 0) { 1610 tkn = kn; 1611 goto done; 1612 } 1613 1614 if ((error = kn->kn_fop->f_attach(kn)) != 0) { 1615 knote_drop_detached(kn, td); 1616 goto done; 1617 } 1618 knl = kn_list_lock(kn); 1619 goto done_ev_add; 1620 } else { 1621 /* No matching knote and the EV_ADD flag is not set. */ 1622 KQ_UNLOCK(kq); 1623 error = ENOENT; 1624 goto done; 1625 } 1626 } 1627 1628 if (kev->flags & EV_DELETE) { 1629 kn_enter_flux(kn); 1630 KQ_UNLOCK(kq); 1631 knote_drop(kn, td); 1632 goto done; 1633 } 1634 1635 if (kev->flags & EV_FORCEONESHOT) { 1636 kn->kn_flags |= EV_ONESHOT; 1637 KNOTE_ACTIVATE(kn, 1); 1638 } 1639 1640 if ((kev->flags & EV_ENABLE) != 0) 1641 kn->kn_status &= ~KN_DISABLED; 1642 else if ((kev->flags & EV_DISABLE) != 0) 1643 kn->kn_status |= KN_DISABLED; 1644 1645 /* 1646 * The user may change some filter values after the initial EV_ADD, 1647 * but doing so will not reset any filter which has already been 1648 * triggered. 1649 */ 1650 kn->kn_status |= KN_SCAN; 1651 kn_enter_flux(kn); 1652 KQ_UNLOCK(kq); 1653 knl = kn_list_lock(kn); 1654 kn->kn_kevent.udata = kev->udata; 1655 if (!fops->f_isfd && fops->f_touch != NULL) { 1656 fops->f_touch(kn, kev, EVENT_REGISTER); 1657 } else { 1658 kn->kn_sfflags = kev->fflags; 1659 kn->kn_sdata = kev->data; 1660 } 1661 1662 done_ev_add: 1663 /* 1664 * We can get here with kn->kn_knlist == NULL. This can happen when 1665 * the initial attach event decides that the event is "completed" 1666 * already, e.g., filt_procattach() is called on a zombie process. It 1667 * will call filt_proc() which will remove it from the list, and NULL 1668 * kn_knlist. 1669 * 1670 * KN_DISABLED will be stable while the knote is in flux, so the 1671 * unlocked read will not race with an update. 1672 */ 1673 if ((kn->kn_status & KN_DISABLED) == 0) 1674 event = kn->kn_fop->f_event(kn, 0); 1675 else 1676 event = 0; 1677 1678 KQ_LOCK(kq); 1679 if (event) 1680 kn->kn_status |= KN_ACTIVE; 1681 if ((kn->kn_status & (KN_ACTIVE | KN_DISABLED | KN_QUEUED)) == 1682 KN_ACTIVE) 1683 knote_enqueue(kn); 1684 kn->kn_status &= ~KN_SCAN; 1685 kn_leave_flux(kn); 1686 kn_list_unlock(knl); 1687 KQ_UNLOCK_FLUX(kq); 1688 1689 done: 1690 KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal); 1691 if (filedesc_unlock) 1692 FILEDESC_XUNLOCK(td->td_proc->p_fd); 1693 if (fp != NULL) 1694 fdrop(fp, td); 1695 knote_free(tkn); 1696 if (fops != NULL) 1697 kqueue_fo_release(filt); 1698 return (error); 1699 } 1700 1701 static int 1702 kqueue_acquire(struct file *fp, struct kqueue **kqp) 1703 { 1704 int error; 1705 struct kqueue *kq; 1706 1707 error = 0; 1708 1709 kq = fp->f_data; 1710 if (fp->f_type != DTYPE_KQUEUE || kq == NULL) 1711 return (EBADF); 1712 *kqp = kq; 1713 KQ_LOCK(kq); 1714 if ((kq->kq_state & KQ_CLOSING) == KQ_CLOSING) { 1715 KQ_UNLOCK(kq); 1716 return (EBADF); 1717 } 1718 kq->kq_refcnt++; 1719 KQ_UNLOCK(kq); 1720 1721 return error; 1722 } 1723 1724 static void 1725 kqueue_release(struct kqueue *kq, int locked) 1726 { 1727 if (locked) 1728 KQ_OWNED(kq); 1729 else 1730 KQ_LOCK(kq); 1731 kq->kq_refcnt--; 1732 if (kq->kq_refcnt == 1) 1733 wakeup(&kq->kq_refcnt); 1734 if (!locked) 1735 KQ_UNLOCK(kq); 1736 } 1737 1738 static void 1739 kqueue_schedtask(struct kqueue *kq) 1740 { 1741 1742 KQ_OWNED(kq); 1743 KASSERT(((kq->kq_state & KQ_TASKDRAIN) != KQ_TASKDRAIN), 1744 ("scheduling kqueue task while draining")); 1745 1746 if ((kq->kq_state & KQ_TASKSCHED) != KQ_TASKSCHED) { 1747 taskqueue_enqueue(taskqueue_kqueue_ctx, &kq->kq_task); 1748 kq->kq_state |= KQ_TASKSCHED; 1749 } 1750 } 1751 1752 /* 1753 * Expand the kq to make sure we have storage for fops/ident pair. 1754 * 1755 * Return 0 on success (or no work necessary), return errno on failure. 1756 */ 1757 static int 1758 kqueue_expand(struct kqueue *kq, struct filterops *fops, uintptr_t ident, 1759 int mflag) 1760 { 1761 struct klist *list, *tmp_knhash, *to_free; 1762 u_long tmp_knhashmask; 1763 int error, fd, size; 1764 1765 KQ_NOTOWNED(kq); 1766 1767 error = 0; 1768 to_free = NULL; 1769 if (fops->f_isfd) { 1770 fd = ident; 1771 if (kq->kq_knlistsize <= fd) { 1772 size = kq->kq_knlistsize; 1773 while (size <= fd) 1774 size += KQEXTENT; 1775 list = malloc(size * sizeof(*list), M_KQUEUE, mflag); 1776 if (list == NULL) 1777 return ENOMEM; 1778 KQ_LOCK(kq); 1779 if ((kq->kq_state & KQ_CLOSING) != 0) { 1780 to_free = list; 1781 error = EBADF; 1782 } else if (kq->kq_knlistsize > fd) { 1783 to_free = list; 1784 } else { 1785 if (kq->kq_knlist != NULL) { 1786 bcopy(kq->kq_knlist, list, 1787 kq->kq_knlistsize * sizeof(*list)); 1788 to_free = kq->kq_knlist; 1789 kq->kq_knlist = NULL; 1790 } 1791 bzero((caddr_t)list + 1792 kq->kq_knlistsize * sizeof(*list), 1793 (size - kq->kq_knlistsize) * sizeof(*list)); 1794 kq->kq_knlistsize = size; 1795 kq->kq_knlist = list; 1796 } 1797 KQ_UNLOCK(kq); 1798 } 1799 } else { 1800 if (kq->kq_knhashmask == 0) { 1801 tmp_knhash = hashinit_flags(KN_HASHSIZE, M_KQUEUE, 1802 &tmp_knhashmask, (mflag & M_WAITOK) != 0 ? 1803 HASH_WAITOK : HASH_NOWAIT); 1804 if (tmp_knhash == NULL) 1805 return (ENOMEM); 1806 KQ_LOCK(kq); 1807 if ((kq->kq_state & KQ_CLOSING) != 0) { 1808 to_free = tmp_knhash; 1809 error = EBADF; 1810 } else if (kq->kq_knhashmask == 0) { 1811 kq->kq_knhash = tmp_knhash; 1812 kq->kq_knhashmask = tmp_knhashmask; 1813 } else { 1814 to_free = tmp_knhash; 1815 } 1816 KQ_UNLOCK(kq); 1817 } 1818 } 1819 free(to_free, M_KQUEUE); 1820 1821 KQ_NOTOWNED(kq); 1822 return (error); 1823 } 1824 1825 static void 1826 kqueue_task(void *arg, int pending) 1827 { 1828 struct kqueue *kq; 1829 int haskqglobal; 1830 1831 haskqglobal = 0; 1832 kq = arg; 1833 1834 KQ_GLOBAL_LOCK(&kq_global, haskqglobal); 1835 KQ_LOCK(kq); 1836 1837 KNOTE_LOCKED(&kq->kq_sel.si_note, 0); 1838 1839 kq->kq_state &= ~KQ_TASKSCHED; 1840 if ((kq->kq_state & KQ_TASKDRAIN) == KQ_TASKDRAIN) { 1841 wakeup(&kq->kq_state); 1842 } 1843 KQ_UNLOCK(kq); 1844 KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal); 1845 } 1846 1847 /* 1848 * Scan, update kn_data (if not ONESHOT), and copyout triggered events. 1849 * We treat KN_MARKER knotes as if they are in flux. 1850 */ 1851 static int 1852 kqueue_scan(struct kqueue *kq, int maxevents, struct kevent_copyops *k_ops, 1853 const struct timespec *tsp, struct kevent *keva, struct thread *td) 1854 { 1855 struct kevent *kevp; 1856 struct knote *kn, *marker; 1857 struct knlist *knl; 1858 sbintime_t asbt, rsbt; 1859 int count, error, haskqglobal, influx, nkev, touch; 1860 1861 count = maxevents; 1862 nkev = 0; 1863 error = 0; 1864 haskqglobal = 0; 1865 1866 if (maxevents == 0) 1867 goto done_nl; 1868 1869 rsbt = 0; 1870 if (tsp != NULL) { 1871 if (tsp->tv_sec < 0 || tsp->tv_nsec < 0 || 1872 tsp->tv_nsec >= 1000000000) { 1873 error = EINVAL; 1874 goto done_nl; 1875 } 1876 if (timespecisset(tsp)) { 1877 if (tsp->tv_sec <= INT32_MAX) { 1878 rsbt = tstosbt(*tsp); 1879 if (TIMESEL(&asbt, rsbt)) 1880 asbt += tc_tick_sbt; 1881 if (asbt <= SBT_MAX - rsbt) 1882 asbt += rsbt; 1883 else 1884 asbt = 0; 1885 rsbt >>= tc_precexp; 1886 } else 1887 asbt = 0; 1888 } else 1889 asbt = -1; 1890 } else 1891 asbt = 0; 1892 marker = knote_alloc(M_WAITOK); 1893 marker->kn_status = KN_MARKER; 1894 KQ_LOCK(kq); 1895 1896 retry: 1897 kevp = keva; 1898 if (kq->kq_count == 0) { 1899 if (asbt == -1) { 1900 error = EWOULDBLOCK; 1901 } else { 1902 kq->kq_state |= KQ_SLEEP; 1903 error = msleep_sbt(kq, &kq->kq_lock, PSOCK | PCATCH, 1904 "kqread", asbt, rsbt, C_ABSOLUTE); 1905 } 1906 if (error == 0) 1907 goto retry; 1908 /* don't restart after signals... */ 1909 if (error == ERESTART) 1910 error = EINTR; 1911 else if (error == EWOULDBLOCK) 1912 error = 0; 1913 goto done; 1914 } 1915 1916 TAILQ_INSERT_TAIL(&kq->kq_head, marker, kn_tqe); 1917 influx = 0; 1918 while (count) { 1919 KQ_OWNED(kq); 1920 kn = TAILQ_FIRST(&kq->kq_head); 1921 1922 if ((kn->kn_status == KN_MARKER && kn != marker) || 1923 kn_in_flux(kn)) { 1924 if (influx) { 1925 influx = 0; 1926 KQ_FLUX_WAKEUP(kq); 1927 } 1928 kq->kq_state |= KQ_FLUXWAIT; 1929 error = msleep(kq, &kq->kq_lock, PSOCK, 1930 "kqflxwt", 0); 1931 continue; 1932 } 1933 1934 TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe); 1935 if ((kn->kn_status & KN_DISABLED) == KN_DISABLED) { 1936 kn->kn_status &= ~KN_QUEUED; 1937 kq->kq_count--; 1938 continue; 1939 } 1940 if (kn == marker) { 1941 KQ_FLUX_WAKEUP(kq); 1942 if (count == maxevents) 1943 goto retry; 1944 goto done; 1945 } 1946 KASSERT(!kn_in_flux(kn), 1947 ("knote %p is unexpectedly in flux", kn)); 1948 1949 if ((kn->kn_flags & EV_DROP) == EV_DROP) { 1950 kn->kn_status &= ~KN_QUEUED; 1951 kn_enter_flux(kn); 1952 kq->kq_count--; 1953 KQ_UNLOCK(kq); 1954 /* 1955 * We don't need to lock the list since we've 1956 * marked it as in flux. 1957 */ 1958 knote_drop(kn, td); 1959 KQ_LOCK(kq); 1960 continue; 1961 } else if ((kn->kn_flags & EV_ONESHOT) == EV_ONESHOT) { 1962 kn->kn_status &= ~KN_QUEUED; 1963 kn_enter_flux(kn); 1964 kq->kq_count--; 1965 KQ_UNLOCK(kq); 1966 /* 1967 * We don't need to lock the list since we've 1968 * marked the knote as being in flux. 1969 */ 1970 *kevp = kn->kn_kevent; 1971 knote_drop(kn, td); 1972 KQ_LOCK(kq); 1973 kn = NULL; 1974 } else { 1975 kn->kn_status |= KN_SCAN; 1976 kn_enter_flux(kn); 1977 KQ_UNLOCK(kq); 1978 if ((kn->kn_status & KN_KQUEUE) == KN_KQUEUE) 1979 KQ_GLOBAL_LOCK(&kq_global, haskqglobal); 1980 knl = kn_list_lock(kn); 1981 if (kn->kn_fop->f_event(kn, 0) == 0) { 1982 KQ_LOCK(kq); 1983 KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal); 1984 kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE | 1985 KN_SCAN); 1986 kn_leave_flux(kn); 1987 kq->kq_count--; 1988 kn_list_unlock(knl); 1989 influx = 1; 1990 continue; 1991 } 1992 touch = (!kn->kn_fop->f_isfd && 1993 kn->kn_fop->f_touch != NULL); 1994 if (touch) 1995 kn->kn_fop->f_touch(kn, kevp, EVENT_PROCESS); 1996 else 1997 *kevp = kn->kn_kevent; 1998 KQ_LOCK(kq); 1999 KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal); 2000 if (kn->kn_flags & (EV_CLEAR | EV_DISPATCH)) { 2001 /* 2002 * Manually clear knotes who weren't 2003 * 'touch'ed. 2004 */ 2005 if (touch == 0 && kn->kn_flags & EV_CLEAR) { 2006 kn->kn_data = 0; 2007 kn->kn_fflags = 0; 2008 } 2009 if (kn->kn_flags & EV_DISPATCH) 2010 kn->kn_status |= KN_DISABLED; 2011 kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE); 2012 kq->kq_count--; 2013 } else 2014 TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe); 2015 2016 kn->kn_status &= ~KN_SCAN; 2017 kn_leave_flux(kn); 2018 kn_list_unlock(knl); 2019 influx = 1; 2020 } 2021 2022 /* we are returning a copy to the user */ 2023 kevp++; 2024 nkev++; 2025 count--; 2026 2027 if (nkev == KQ_NEVENTS) { 2028 influx = 0; 2029 KQ_UNLOCK_FLUX(kq); 2030 error = k_ops->k_copyout(k_ops->arg, keva, nkev); 2031 nkev = 0; 2032 kevp = keva; 2033 KQ_LOCK(kq); 2034 if (error) 2035 break; 2036 } 2037 } 2038 TAILQ_REMOVE(&kq->kq_head, marker, kn_tqe); 2039 done: 2040 KQ_OWNED(kq); 2041 KQ_UNLOCK_FLUX(kq); 2042 knote_free(marker); 2043 done_nl: 2044 KQ_NOTOWNED(kq); 2045 if (nkev != 0) 2046 error = k_ops->k_copyout(k_ops->arg, keva, nkev); 2047 td->td_retval[0] = maxevents - count; 2048 return (error); 2049 } 2050 2051 /*ARGSUSED*/ 2052 static int 2053 kqueue_ioctl(struct file *fp, u_long cmd, void *data, 2054 struct ucred *active_cred, struct thread *td) 2055 { 2056 /* 2057 * Enabling sigio causes two major problems: 2058 * 1) infinite recursion: 2059 * Synopsys: kevent is being used to track signals and have FIOASYNC 2060 * set. On receipt of a signal this will cause a kqueue to recurse 2061 * into itself over and over. Sending the sigio causes the kqueue 2062 * to become ready, which in turn posts sigio again, forever. 2063 * Solution: this can be solved by setting a flag in the kqueue that 2064 * we have a SIGIO in progress. 2065 * 2) locking problems: 2066 * Synopsys: Kqueue is a leaf subsystem, but adding signalling puts 2067 * us above the proc and pgrp locks. 2068 * Solution: Post a signal using an async mechanism, being sure to 2069 * record a generation count in the delivery so that we do not deliver 2070 * a signal to the wrong process. 2071 * 2072 * Note, these two mechanisms are somewhat mutually exclusive! 2073 */ 2074 #if 0 2075 struct kqueue *kq; 2076 2077 kq = fp->f_data; 2078 switch (cmd) { 2079 case FIOASYNC: 2080 if (*(int *)data) { 2081 kq->kq_state |= KQ_ASYNC; 2082 } else { 2083 kq->kq_state &= ~KQ_ASYNC; 2084 } 2085 return (0); 2086 2087 case FIOSETOWN: 2088 return (fsetown(*(int *)data, &kq->kq_sigio)); 2089 2090 case FIOGETOWN: 2091 *(int *)data = fgetown(&kq->kq_sigio); 2092 return (0); 2093 } 2094 #endif 2095 2096 return (ENOTTY); 2097 } 2098 2099 /*ARGSUSED*/ 2100 static int 2101 kqueue_poll(struct file *fp, int events, struct ucred *active_cred, 2102 struct thread *td) 2103 { 2104 struct kqueue *kq; 2105 int revents = 0; 2106 int error; 2107 2108 if ((error = kqueue_acquire(fp, &kq))) 2109 return POLLERR; 2110 2111 KQ_LOCK(kq); 2112 if (events & (POLLIN | POLLRDNORM)) { 2113 if (kq->kq_count) { 2114 revents |= events & (POLLIN | POLLRDNORM); 2115 } else { 2116 selrecord(td, &kq->kq_sel); 2117 if (SEL_WAITING(&kq->kq_sel)) 2118 kq->kq_state |= KQ_SEL; 2119 } 2120 } 2121 kqueue_release(kq, 1); 2122 KQ_UNLOCK(kq); 2123 return (revents); 2124 } 2125 2126 /*ARGSUSED*/ 2127 static int 2128 kqueue_stat(struct file *fp, struct stat *st, struct ucred *active_cred, 2129 struct thread *td) 2130 { 2131 2132 bzero((void *)st, sizeof *st); 2133 /* 2134 * We no longer return kq_count because the unlocked value is useless. 2135 * If you spent all this time getting the count, why not spend your 2136 * syscall better by calling kevent? 2137 * 2138 * XXX - This is needed for libc_r. 2139 */ 2140 st->st_mode = S_IFIFO; 2141 return (0); 2142 } 2143 2144 static void 2145 kqueue_drain(struct kqueue *kq, struct thread *td) 2146 { 2147 struct knote *kn; 2148 int i; 2149 2150 KQ_LOCK(kq); 2151 2152 KASSERT((kq->kq_state & KQ_CLOSING) != KQ_CLOSING, 2153 ("kqueue already closing")); 2154 kq->kq_state |= KQ_CLOSING; 2155 if (kq->kq_refcnt > 1) 2156 msleep(&kq->kq_refcnt, &kq->kq_lock, PSOCK, "kqclose", 0); 2157 2158 KASSERT(kq->kq_refcnt == 1, ("other refs are out there!")); 2159 2160 KASSERT(knlist_empty(&kq->kq_sel.si_note), 2161 ("kqueue's knlist not empty")); 2162 2163 for (i = 0; i < kq->kq_knlistsize; i++) { 2164 while ((kn = SLIST_FIRST(&kq->kq_knlist[i])) != NULL) { 2165 if (kn_in_flux(kn)) { 2166 kq->kq_state |= KQ_FLUXWAIT; 2167 msleep(kq, &kq->kq_lock, PSOCK, "kqclo1", 0); 2168 continue; 2169 } 2170 kn_enter_flux(kn); 2171 KQ_UNLOCK(kq); 2172 knote_drop(kn, td); 2173 KQ_LOCK(kq); 2174 } 2175 } 2176 if (kq->kq_knhashmask != 0) { 2177 for (i = 0; i <= kq->kq_knhashmask; i++) { 2178 while ((kn = SLIST_FIRST(&kq->kq_knhash[i])) != NULL) { 2179 if (kn_in_flux(kn)) { 2180 kq->kq_state |= KQ_FLUXWAIT; 2181 msleep(kq, &kq->kq_lock, PSOCK, 2182 "kqclo2", 0); 2183 continue; 2184 } 2185 kn_enter_flux(kn); 2186 KQ_UNLOCK(kq); 2187 knote_drop(kn, td); 2188 KQ_LOCK(kq); 2189 } 2190 } 2191 } 2192 2193 if ((kq->kq_state & KQ_TASKSCHED) == KQ_TASKSCHED) { 2194 kq->kq_state |= KQ_TASKDRAIN; 2195 msleep(&kq->kq_state, &kq->kq_lock, PSOCK, "kqtqdr", 0); 2196 } 2197 2198 if ((kq->kq_state & KQ_SEL) == KQ_SEL) { 2199 selwakeuppri(&kq->kq_sel, PSOCK); 2200 if (!SEL_WAITING(&kq->kq_sel)) 2201 kq->kq_state &= ~KQ_SEL; 2202 } 2203 2204 KQ_UNLOCK(kq); 2205 } 2206 2207 static void 2208 kqueue_destroy(struct kqueue *kq) 2209 { 2210 2211 KASSERT(kq->kq_fdp == NULL, 2212 ("kqueue still attached to a file descriptor")); 2213 seldrain(&kq->kq_sel); 2214 knlist_destroy(&kq->kq_sel.si_note); 2215 mtx_destroy(&kq->kq_lock); 2216 2217 if (kq->kq_knhash != NULL) 2218 free(kq->kq_knhash, M_KQUEUE); 2219 if (kq->kq_knlist != NULL) 2220 free(kq->kq_knlist, M_KQUEUE); 2221 2222 funsetown(&kq->kq_sigio); 2223 } 2224 2225 /*ARGSUSED*/ 2226 static int 2227 kqueue_close(struct file *fp, struct thread *td) 2228 { 2229 struct kqueue *kq = fp->f_data; 2230 struct filedesc *fdp; 2231 int error; 2232 int filedesc_unlock; 2233 2234 if ((error = kqueue_acquire(fp, &kq))) 2235 return error; 2236 kqueue_drain(kq, td); 2237 2238 /* 2239 * We could be called due to the knote_drop() doing fdrop(), 2240 * called from kqueue_register(). In this case the global 2241 * lock is owned, and filedesc sx is locked before, to not 2242 * take the sleepable lock after non-sleepable. 2243 */ 2244 fdp = kq->kq_fdp; 2245 kq->kq_fdp = NULL; 2246 if (!sx_xlocked(FILEDESC_LOCK(fdp))) { 2247 FILEDESC_XLOCK(fdp); 2248 filedesc_unlock = 1; 2249 } else 2250 filedesc_unlock = 0; 2251 TAILQ_REMOVE(&fdp->fd_kqlist, kq, kq_list); 2252 if (filedesc_unlock) 2253 FILEDESC_XUNLOCK(fdp); 2254 2255 kqueue_destroy(kq); 2256 chgkqcnt(kq->kq_cred->cr_ruidinfo, -1, 0); 2257 crfree(kq->kq_cred); 2258 free(kq, M_KQUEUE); 2259 fp->f_data = NULL; 2260 2261 return (0); 2262 } 2263 2264 static int 2265 kqueue_fill_kinfo(struct file *fp, struct kinfo_file *kif, struct filedesc *fdp) 2266 { 2267 2268 kif->kf_type = KF_TYPE_KQUEUE; 2269 return (0); 2270 } 2271 2272 static void 2273 kqueue_wakeup(struct kqueue *kq) 2274 { 2275 KQ_OWNED(kq); 2276 2277 if ((kq->kq_state & KQ_SLEEP) == KQ_SLEEP) { 2278 kq->kq_state &= ~KQ_SLEEP; 2279 wakeup(kq); 2280 } 2281 if ((kq->kq_state & KQ_SEL) == KQ_SEL) { 2282 selwakeuppri(&kq->kq_sel, PSOCK); 2283 if (!SEL_WAITING(&kq->kq_sel)) 2284 kq->kq_state &= ~KQ_SEL; 2285 } 2286 if (!knlist_empty(&kq->kq_sel.si_note)) 2287 kqueue_schedtask(kq); 2288 if ((kq->kq_state & KQ_ASYNC) == KQ_ASYNC) { 2289 pgsigio(&kq->kq_sigio, SIGIO, 0); 2290 } 2291 } 2292 2293 /* 2294 * Walk down a list of knotes, activating them if their event has triggered. 2295 * 2296 * There is a possibility to optimize in the case of one kq watching another. 2297 * Instead of scheduling a task to wake it up, you could pass enough state 2298 * down the chain to make up the parent kqueue. Make this code functional 2299 * first. 2300 */ 2301 void 2302 knote(struct knlist *list, long hint, int lockflags) 2303 { 2304 struct kqueue *kq; 2305 struct knote *kn, *tkn; 2306 int error; 2307 2308 if (list == NULL) 2309 return; 2310 2311 KNL_ASSERT_LOCK(list, lockflags & KNF_LISTLOCKED); 2312 2313 if ((lockflags & KNF_LISTLOCKED) == 0) 2314 list->kl_lock(list->kl_lockarg); 2315 2316 /* 2317 * If we unlock the list lock (and enter influx), we can 2318 * eliminate the kqueue scheduling, but this will introduce 2319 * four lock/unlock's for each knote to test. Also, marker 2320 * would be needed to keep iteration position, since filters 2321 * or other threads could remove events. 2322 */ 2323 SLIST_FOREACH_SAFE(kn, &list->kl_list, kn_selnext, tkn) { 2324 kq = kn->kn_kq; 2325 KQ_LOCK(kq); 2326 if (kn_in_flux(kn) && (kn->kn_status & KN_SCAN) == 0) { 2327 /* 2328 * Do not process the influx notes, except for 2329 * the influx coming from the kq unlock in the 2330 * kqueue_scan(). In the later case, we do 2331 * not interfere with the scan, since the code 2332 * fragment in kqueue_scan() locks the knlist, 2333 * and cannot proceed until we finished. 2334 */ 2335 KQ_UNLOCK(kq); 2336 } else if ((lockflags & KNF_NOKQLOCK) != 0) { 2337 kn_enter_flux(kn); 2338 KQ_UNLOCK(kq); 2339 error = kn->kn_fop->f_event(kn, hint); 2340 KQ_LOCK(kq); 2341 kn_leave_flux(kn); 2342 if (error) 2343 KNOTE_ACTIVATE(kn, 1); 2344 KQ_UNLOCK_FLUX(kq); 2345 } else { 2346 if (kn->kn_fop->f_event(kn, hint)) 2347 KNOTE_ACTIVATE(kn, 1); 2348 KQ_UNLOCK(kq); 2349 } 2350 } 2351 if ((lockflags & KNF_LISTLOCKED) == 0) 2352 list->kl_unlock(list->kl_lockarg); 2353 } 2354 2355 /* 2356 * add a knote to a knlist 2357 */ 2358 void 2359 knlist_add(struct knlist *knl, struct knote *kn, int islocked) 2360 { 2361 2362 KNL_ASSERT_LOCK(knl, islocked); 2363 KQ_NOTOWNED(kn->kn_kq); 2364 KASSERT(kn_in_flux(kn), ("knote %p not in flux", kn)); 2365 KASSERT((kn->kn_status & KN_DETACHED) != 0, 2366 ("knote %p was not detached", kn)); 2367 if (!islocked) 2368 knl->kl_lock(knl->kl_lockarg); 2369 SLIST_INSERT_HEAD(&knl->kl_list, kn, kn_selnext); 2370 if (!islocked) 2371 knl->kl_unlock(knl->kl_lockarg); 2372 KQ_LOCK(kn->kn_kq); 2373 kn->kn_knlist = knl; 2374 kn->kn_status &= ~KN_DETACHED; 2375 KQ_UNLOCK(kn->kn_kq); 2376 } 2377 2378 static void 2379 knlist_remove_kq(struct knlist *knl, struct knote *kn, int knlislocked, 2380 int kqislocked) 2381 { 2382 2383 KASSERT(!kqislocked || knlislocked, ("kq locked w/o knl locked")); 2384 KNL_ASSERT_LOCK(knl, knlislocked); 2385 mtx_assert(&kn->kn_kq->kq_lock, kqislocked ? MA_OWNED : MA_NOTOWNED); 2386 KASSERT(kqislocked || kn_in_flux(kn), ("knote %p not in flux", kn)); 2387 KASSERT((kn->kn_status & KN_DETACHED) == 0, 2388 ("knote %p was already detached", kn)); 2389 if (!knlislocked) 2390 knl->kl_lock(knl->kl_lockarg); 2391 SLIST_REMOVE(&knl->kl_list, kn, knote, kn_selnext); 2392 kn->kn_knlist = NULL; 2393 if (!knlislocked) 2394 kn_list_unlock(knl); 2395 if (!kqislocked) 2396 KQ_LOCK(kn->kn_kq); 2397 kn->kn_status |= KN_DETACHED; 2398 if (!kqislocked) 2399 KQ_UNLOCK(kn->kn_kq); 2400 } 2401 2402 /* 2403 * remove knote from the specified knlist 2404 */ 2405 void 2406 knlist_remove(struct knlist *knl, struct knote *kn, int islocked) 2407 { 2408 2409 knlist_remove_kq(knl, kn, islocked, 0); 2410 } 2411 2412 int 2413 knlist_empty(struct knlist *knl) 2414 { 2415 2416 KNL_ASSERT_LOCKED(knl); 2417 return (SLIST_EMPTY(&knl->kl_list)); 2418 } 2419 2420 static struct mtx knlist_lock; 2421 MTX_SYSINIT(knlist_lock, &knlist_lock, "knlist lock for lockless objects", 2422 MTX_DEF); 2423 static void knlist_mtx_lock(void *arg); 2424 static void knlist_mtx_unlock(void *arg); 2425 2426 static void 2427 knlist_mtx_lock(void *arg) 2428 { 2429 2430 mtx_lock((struct mtx *)arg); 2431 } 2432 2433 static void 2434 knlist_mtx_unlock(void *arg) 2435 { 2436 2437 mtx_unlock((struct mtx *)arg); 2438 } 2439 2440 static void 2441 knlist_mtx_assert_lock(void *arg, int what) 2442 { 2443 2444 if (what == LA_LOCKED) 2445 mtx_assert((struct mtx *)arg, MA_OWNED); 2446 else 2447 mtx_assert((struct mtx *)arg, MA_NOTOWNED); 2448 } 2449 2450 static void 2451 knlist_rw_rlock(void *arg) 2452 { 2453 2454 rw_rlock((struct rwlock *)arg); 2455 } 2456 2457 static void 2458 knlist_rw_runlock(void *arg) 2459 { 2460 2461 rw_runlock((struct rwlock *)arg); 2462 } 2463 2464 static void 2465 knlist_rw_assert_lock(void *arg, int what) 2466 { 2467 2468 if (what == LA_LOCKED) 2469 rw_assert((struct rwlock *)arg, RA_LOCKED); 2470 else 2471 rw_assert((struct rwlock *)arg, RA_UNLOCKED); 2472 } 2473 2474 void 2475 knlist_init(struct knlist *knl, void *lock, void (*kl_lock)(void *), 2476 void (*kl_unlock)(void *), 2477 void (*kl_assert_lock)(void *, int)) 2478 { 2479 2480 if (lock == NULL) 2481 knl->kl_lockarg = &knlist_lock; 2482 else 2483 knl->kl_lockarg = lock; 2484 2485 if (kl_lock == NULL) 2486 knl->kl_lock = knlist_mtx_lock; 2487 else 2488 knl->kl_lock = kl_lock; 2489 if (kl_unlock == NULL) 2490 knl->kl_unlock = knlist_mtx_unlock; 2491 else 2492 knl->kl_unlock = kl_unlock; 2493 if (kl_assert_lock == NULL) 2494 knl->kl_assert_lock = knlist_mtx_assert_lock; 2495 else 2496 knl->kl_assert_lock = kl_assert_lock; 2497 2498 knl->kl_autodestroy = 0; 2499 SLIST_INIT(&knl->kl_list); 2500 } 2501 2502 void 2503 knlist_init_mtx(struct knlist *knl, struct mtx *lock) 2504 { 2505 2506 knlist_init(knl, lock, NULL, NULL, NULL); 2507 } 2508 2509 struct knlist * 2510 knlist_alloc(struct mtx *lock) 2511 { 2512 struct knlist *knl; 2513 2514 knl = malloc(sizeof(struct knlist), M_KQUEUE, M_WAITOK); 2515 knlist_init_mtx(knl, lock); 2516 return (knl); 2517 } 2518 2519 void 2520 knlist_init_rw_reader(struct knlist *knl, struct rwlock *lock) 2521 { 2522 2523 knlist_init(knl, lock, knlist_rw_rlock, knlist_rw_runlock, 2524 knlist_rw_assert_lock); 2525 } 2526 2527 void 2528 knlist_destroy(struct knlist *knl) 2529 { 2530 2531 KASSERT(KNLIST_EMPTY(knl), 2532 ("destroying knlist %p with knotes on it", knl)); 2533 } 2534 2535 void 2536 knlist_detach(struct knlist *knl) 2537 { 2538 2539 KNL_ASSERT_LOCKED(knl); 2540 knl->kl_autodestroy = 1; 2541 if (knlist_empty(knl)) { 2542 knlist_destroy(knl); 2543 free(knl, M_KQUEUE); 2544 } 2545 } 2546 2547 /* 2548 * Even if we are locked, we may need to drop the lock to allow any influx 2549 * knotes time to "settle". 2550 */ 2551 void 2552 knlist_cleardel(struct knlist *knl, struct thread *td, int islocked, int killkn) 2553 { 2554 struct knote *kn, *kn2; 2555 struct kqueue *kq; 2556 2557 KASSERT(!knl->kl_autodestroy, ("cleardel for autodestroy %p", knl)); 2558 if (islocked) 2559 KNL_ASSERT_LOCKED(knl); 2560 else { 2561 KNL_ASSERT_UNLOCKED(knl); 2562 again: /* need to reacquire lock since we have dropped it */ 2563 knl->kl_lock(knl->kl_lockarg); 2564 } 2565 2566 SLIST_FOREACH_SAFE(kn, &knl->kl_list, kn_selnext, kn2) { 2567 kq = kn->kn_kq; 2568 KQ_LOCK(kq); 2569 if (kn_in_flux(kn)) { 2570 KQ_UNLOCK(kq); 2571 continue; 2572 } 2573 knlist_remove_kq(knl, kn, 1, 1); 2574 if (killkn) { 2575 kn_enter_flux(kn); 2576 KQ_UNLOCK(kq); 2577 knote_drop_detached(kn, td); 2578 } else { 2579 /* Make sure cleared knotes disappear soon */ 2580 kn->kn_flags |= EV_EOF | EV_ONESHOT; 2581 KQ_UNLOCK(kq); 2582 } 2583 kq = NULL; 2584 } 2585 2586 if (!SLIST_EMPTY(&knl->kl_list)) { 2587 /* there are still in flux knotes remaining */ 2588 kn = SLIST_FIRST(&knl->kl_list); 2589 kq = kn->kn_kq; 2590 KQ_LOCK(kq); 2591 KASSERT(kn_in_flux(kn), ("knote removed w/o list lock")); 2592 knl->kl_unlock(knl->kl_lockarg); 2593 kq->kq_state |= KQ_FLUXWAIT; 2594 msleep(kq, &kq->kq_lock, PSOCK | PDROP, "kqkclr", 0); 2595 kq = NULL; 2596 goto again; 2597 } 2598 2599 if (islocked) 2600 KNL_ASSERT_LOCKED(knl); 2601 else { 2602 knl->kl_unlock(knl->kl_lockarg); 2603 KNL_ASSERT_UNLOCKED(knl); 2604 } 2605 } 2606 2607 /* 2608 * Remove all knotes referencing a specified fd must be called with FILEDESC 2609 * lock. This prevents a race where a new fd comes along and occupies the 2610 * entry and we attach a knote to the fd. 2611 */ 2612 void 2613 knote_fdclose(struct thread *td, int fd) 2614 { 2615 struct filedesc *fdp = td->td_proc->p_fd; 2616 struct kqueue *kq; 2617 struct knote *kn; 2618 int influx; 2619 2620 FILEDESC_XLOCK_ASSERT(fdp); 2621 2622 /* 2623 * We shouldn't have to worry about new kevents appearing on fd 2624 * since filedesc is locked. 2625 */ 2626 TAILQ_FOREACH(kq, &fdp->fd_kqlist, kq_list) { 2627 KQ_LOCK(kq); 2628 2629 again: 2630 influx = 0; 2631 while (kq->kq_knlistsize > fd && 2632 (kn = SLIST_FIRST(&kq->kq_knlist[fd])) != NULL) { 2633 if (kn_in_flux(kn)) { 2634 /* someone else might be waiting on our knote */ 2635 if (influx) 2636 wakeup(kq); 2637 kq->kq_state |= KQ_FLUXWAIT; 2638 msleep(kq, &kq->kq_lock, PSOCK, "kqflxwt", 0); 2639 goto again; 2640 } 2641 kn_enter_flux(kn); 2642 KQ_UNLOCK(kq); 2643 influx = 1; 2644 knote_drop(kn, td); 2645 KQ_LOCK(kq); 2646 } 2647 KQ_UNLOCK_FLUX(kq); 2648 } 2649 } 2650 2651 static int 2652 knote_attach(struct knote *kn, struct kqueue *kq) 2653 { 2654 struct klist *list; 2655 2656 KASSERT(kn_in_flux(kn), ("knote %p not marked influx", kn)); 2657 KQ_OWNED(kq); 2658 2659 if ((kq->kq_state & KQ_CLOSING) != 0) 2660 return (EBADF); 2661 if (kn->kn_fop->f_isfd) { 2662 if (kn->kn_id >= kq->kq_knlistsize) 2663 return (ENOMEM); 2664 list = &kq->kq_knlist[kn->kn_id]; 2665 } else { 2666 if (kq->kq_knhash == NULL) 2667 return (ENOMEM); 2668 list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)]; 2669 } 2670 SLIST_INSERT_HEAD(list, kn, kn_link); 2671 return (0); 2672 } 2673 2674 static void 2675 knote_drop(struct knote *kn, struct thread *td) 2676 { 2677 2678 if ((kn->kn_status & KN_DETACHED) == 0) 2679 kn->kn_fop->f_detach(kn); 2680 knote_drop_detached(kn, td); 2681 } 2682 2683 static void 2684 knote_drop_detached(struct knote *kn, struct thread *td) 2685 { 2686 struct kqueue *kq; 2687 struct klist *list; 2688 2689 kq = kn->kn_kq; 2690 2691 KASSERT((kn->kn_status & KN_DETACHED) != 0, 2692 ("knote %p still attached", kn)); 2693 KQ_NOTOWNED(kq); 2694 2695 KQ_LOCK(kq); 2696 KASSERT(kn->kn_influx == 1, 2697 ("knote_drop called on %p with influx %d", kn, kn->kn_influx)); 2698 2699 if (kn->kn_fop->f_isfd) 2700 list = &kq->kq_knlist[kn->kn_id]; 2701 else 2702 list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)]; 2703 2704 if (!SLIST_EMPTY(list)) 2705 SLIST_REMOVE(list, kn, knote, kn_link); 2706 if (kn->kn_status & KN_QUEUED) 2707 knote_dequeue(kn); 2708 KQ_UNLOCK_FLUX(kq); 2709 2710 if (kn->kn_fop->f_isfd) { 2711 fdrop(kn->kn_fp, td); 2712 kn->kn_fp = NULL; 2713 } 2714 kqueue_fo_release(kn->kn_kevent.filter); 2715 kn->kn_fop = NULL; 2716 knote_free(kn); 2717 } 2718 2719 static void 2720 knote_enqueue(struct knote *kn) 2721 { 2722 struct kqueue *kq = kn->kn_kq; 2723 2724 KQ_OWNED(kn->kn_kq); 2725 KASSERT((kn->kn_status & KN_QUEUED) == 0, ("knote already queued")); 2726 2727 TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe); 2728 kn->kn_status |= KN_QUEUED; 2729 kq->kq_count++; 2730 kqueue_wakeup(kq); 2731 } 2732 2733 static void 2734 knote_dequeue(struct knote *kn) 2735 { 2736 struct kqueue *kq = kn->kn_kq; 2737 2738 KQ_OWNED(kn->kn_kq); 2739 KASSERT(kn->kn_status & KN_QUEUED, ("knote not queued")); 2740 2741 TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe); 2742 kn->kn_status &= ~KN_QUEUED; 2743 kq->kq_count--; 2744 } 2745 2746 static void 2747 knote_init(void) 2748 { 2749 2750 knote_zone = uma_zcreate("KNOTE", sizeof(struct knote), NULL, NULL, 2751 NULL, NULL, UMA_ALIGN_PTR, 0); 2752 } 2753 SYSINIT(knote, SI_SUB_PSEUDO, SI_ORDER_ANY, knote_init, NULL); 2754 2755 static struct knote * 2756 knote_alloc(int mflag) 2757 { 2758 2759 return (uma_zalloc(knote_zone, mflag | M_ZERO)); 2760 } 2761 2762 static void 2763 knote_free(struct knote *kn) 2764 { 2765 2766 uma_zfree(knote_zone, kn); 2767 } 2768 2769 /* 2770 * Register the kev w/ the kq specified by fd. 2771 */ 2772 int 2773 kqfd_register(int fd, struct kevent *kev, struct thread *td, int mflag) 2774 { 2775 struct kqueue *kq; 2776 struct file *fp; 2777 cap_rights_t rights; 2778 int error; 2779 2780 error = fget(td, fd, cap_rights_init_one(&rights, CAP_KQUEUE_CHANGE), 2781 &fp); 2782 if (error != 0) 2783 return (error); 2784 if ((error = kqueue_acquire(fp, &kq)) != 0) 2785 goto noacquire; 2786 2787 error = kqueue_register(kq, kev, td, mflag); 2788 kqueue_release(kq, 0); 2789 2790 noacquire: 2791 fdrop(fp, td); 2792 return (error); 2793 } 2794