1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 1999,2000,2001 Jonathan Lemon <jlemon@FreeBSD.org> 5 * Copyright 2004 John-Mark Gurney <jmg@FreeBSD.org> 6 * Copyright (c) 2009 Apple, Inc. 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 #include "opt_ktrace.h" 35 #include "opt_kqueue.h" 36 37 #ifdef COMPAT_FREEBSD11 38 #define _WANT_FREEBSD11_KEVENT 39 #endif 40 41 #include <sys/param.h> 42 #include <sys/systm.h> 43 #include <sys/capsicum.h> 44 #include <sys/kernel.h> 45 #include <sys/limits.h> 46 #include <sys/lock.h> 47 #include <sys/mutex.h> 48 #include <sys/rwlock.h> 49 #include <sys/proc.h> 50 #include <sys/malloc.h> 51 #include <sys/unistd.h> 52 #include <sys/file.h> 53 #include <sys/filedesc.h> 54 #include <sys/filio.h> 55 #include <sys/fcntl.h> 56 #include <sys/kthread.h> 57 #include <sys/selinfo.h> 58 #include <sys/queue.h> 59 #include <sys/event.h> 60 #include <sys/eventvar.h> 61 #include <sys/poll.h> 62 #include <sys/protosw.h> 63 #include <sys/resourcevar.h> 64 #include <sys/sigio.h> 65 #include <sys/signalvar.h> 66 #include <sys/socket.h> 67 #include <sys/socketvar.h> 68 #include <sys/stat.h> 69 #include <sys/sysctl.h> 70 #include <sys/sysproto.h> 71 #include <sys/syscallsubr.h> 72 #include <sys/taskqueue.h> 73 #include <sys/uio.h> 74 #include <sys/user.h> 75 #ifdef KTRACE 76 #include <sys/ktrace.h> 77 #endif 78 #include <machine/atomic.h> 79 80 #include <vm/uma.h> 81 82 static MALLOC_DEFINE(M_KQUEUE, "kqueue", "memory for kqueue system"); 83 84 /* 85 * This lock is used if multiple kq locks are required. This possibly 86 * should be made into a per proc lock. 87 */ 88 static struct mtx kq_global; 89 MTX_SYSINIT(kq_global, &kq_global, "kqueue order", MTX_DEF); 90 #define KQ_GLOBAL_LOCK(lck, haslck) do { \ 91 if (!haslck) \ 92 mtx_lock(lck); \ 93 haslck = 1; \ 94 } while (0) 95 #define KQ_GLOBAL_UNLOCK(lck, haslck) do { \ 96 if (haslck) \ 97 mtx_unlock(lck); \ 98 haslck = 0; \ 99 } while (0) 100 101 TASKQUEUE_DEFINE_THREAD(kqueue_ctx); 102 103 static int kevent_copyout(void *arg, struct kevent *kevp, int count); 104 static int kevent_copyin(void *arg, struct kevent *kevp, int count); 105 static int kqueue_register(struct kqueue *kq, struct kevent *kev, 106 struct thread *td, int mflag); 107 static int kqueue_acquire(struct file *fp, struct kqueue **kqp); 108 static void kqueue_release(struct kqueue *kq, int locked); 109 static void kqueue_destroy(struct kqueue *kq); 110 static void kqueue_drain(struct kqueue *kq, struct thread *td); 111 static int kqueue_expand(struct kqueue *kq, struct filterops *fops, 112 uintptr_t ident, int mflag); 113 static void kqueue_task(void *arg, int pending); 114 static int kqueue_scan(struct kqueue *kq, int maxevents, 115 struct kevent_copyops *k_ops, 116 const struct timespec *timeout, 117 struct kevent *keva, struct thread *td); 118 static void kqueue_wakeup(struct kqueue *kq); 119 static struct filterops *kqueue_fo_find(int filt); 120 static void kqueue_fo_release(int filt); 121 struct g_kevent_args; 122 static int kern_kevent_generic(struct thread *td, 123 struct g_kevent_args *uap, 124 struct kevent_copyops *k_ops, const char *struct_name); 125 126 static fo_ioctl_t kqueue_ioctl; 127 static fo_poll_t kqueue_poll; 128 static fo_kqfilter_t kqueue_kqfilter; 129 static fo_stat_t kqueue_stat; 130 static fo_close_t kqueue_close; 131 static fo_fill_kinfo_t kqueue_fill_kinfo; 132 133 static struct fileops kqueueops = { 134 .fo_read = invfo_rdwr, 135 .fo_write = invfo_rdwr, 136 .fo_truncate = invfo_truncate, 137 .fo_ioctl = kqueue_ioctl, 138 .fo_poll = kqueue_poll, 139 .fo_kqfilter = kqueue_kqfilter, 140 .fo_stat = kqueue_stat, 141 .fo_close = kqueue_close, 142 .fo_chmod = invfo_chmod, 143 .fo_chown = invfo_chown, 144 .fo_sendfile = invfo_sendfile, 145 .fo_fill_kinfo = kqueue_fill_kinfo, 146 }; 147 148 static int knote_attach(struct knote *kn, struct kqueue *kq); 149 static void knote_drop(struct knote *kn, struct thread *td); 150 static void knote_drop_detached(struct knote *kn, struct thread *td); 151 static void knote_enqueue(struct knote *kn); 152 static void knote_dequeue(struct knote *kn); 153 static void knote_init(void); 154 static struct knote *knote_alloc(int mflag); 155 static void knote_free(struct knote *kn); 156 157 static void filt_kqdetach(struct knote *kn); 158 static int filt_kqueue(struct knote *kn, long hint); 159 static int filt_procattach(struct knote *kn); 160 static void filt_procdetach(struct knote *kn); 161 static int filt_proc(struct knote *kn, long hint); 162 static int filt_fileattach(struct knote *kn); 163 static void filt_timerexpire(void *knx); 164 static void filt_timerexpire_l(struct knote *kn, bool proc_locked); 165 static int filt_timerattach(struct knote *kn); 166 static void filt_timerdetach(struct knote *kn); 167 static void filt_timerstart(struct knote *kn, sbintime_t to); 168 static void filt_timertouch(struct knote *kn, struct kevent *kev, 169 u_long type); 170 static int filt_timervalidate(struct knote *kn, sbintime_t *to); 171 static int filt_timer(struct knote *kn, long hint); 172 static int filt_userattach(struct knote *kn); 173 static void filt_userdetach(struct knote *kn); 174 static int filt_user(struct knote *kn, long hint); 175 static void filt_usertouch(struct knote *kn, struct kevent *kev, 176 u_long type); 177 178 static struct filterops file_filtops = { 179 .f_isfd = 1, 180 .f_attach = filt_fileattach, 181 }; 182 static struct filterops kqread_filtops = { 183 .f_isfd = 1, 184 .f_detach = filt_kqdetach, 185 .f_event = filt_kqueue, 186 }; 187 /* XXX - move to kern_proc.c? */ 188 static struct filterops proc_filtops = { 189 .f_isfd = 0, 190 .f_attach = filt_procattach, 191 .f_detach = filt_procdetach, 192 .f_event = filt_proc, 193 }; 194 static struct filterops timer_filtops = { 195 .f_isfd = 0, 196 .f_attach = filt_timerattach, 197 .f_detach = filt_timerdetach, 198 .f_event = filt_timer, 199 .f_touch = filt_timertouch, 200 }; 201 static struct filterops user_filtops = { 202 .f_attach = filt_userattach, 203 .f_detach = filt_userdetach, 204 .f_event = filt_user, 205 .f_touch = filt_usertouch, 206 }; 207 208 static uma_zone_t knote_zone; 209 static unsigned int __exclusive_cache_line kq_ncallouts; 210 static unsigned int kq_calloutmax = 4 * 1024; 211 SYSCTL_UINT(_kern, OID_AUTO, kq_calloutmax, CTLFLAG_RW, 212 &kq_calloutmax, 0, "Maximum number of callouts allocated for kqueue"); 213 214 /* XXX - ensure not influx ? */ 215 #define KNOTE_ACTIVATE(kn, islock) do { \ 216 if ((islock)) \ 217 mtx_assert(&(kn)->kn_kq->kq_lock, MA_OWNED); \ 218 else \ 219 KQ_LOCK((kn)->kn_kq); \ 220 (kn)->kn_status |= KN_ACTIVE; \ 221 if (((kn)->kn_status & (KN_QUEUED | KN_DISABLED)) == 0) \ 222 knote_enqueue((kn)); \ 223 if (!(islock)) \ 224 KQ_UNLOCK((kn)->kn_kq); \ 225 } while (0) 226 #define KQ_LOCK(kq) do { \ 227 mtx_lock(&(kq)->kq_lock); \ 228 } while (0) 229 #define KQ_FLUX_WAKEUP(kq) do { \ 230 if (((kq)->kq_state & KQ_FLUXWAIT) == KQ_FLUXWAIT) { \ 231 (kq)->kq_state &= ~KQ_FLUXWAIT; \ 232 wakeup((kq)); \ 233 } \ 234 } while (0) 235 #define KQ_UNLOCK_FLUX(kq) do { \ 236 KQ_FLUX_WAKEUP(kq); \ 237 mtx_unlock(&(kq)->kq_lock); \ 238 } while (0) 239 #define KQ_UNLOCK(kq) do { \ 240 mtx_unlock(&(kq)->kq_lock); \ 241 } while (0) 242 #define KQ_OWNED(kq) do { \ 243 mtx_assert(&(kq)->kq_lock, MA_OWNED); \ 244 } while (0) 245 #define KQ_NOTOWNED(kq) do { \ 246 mtx_assert(&(kq)->kq_lock, MA_NOTOWNED); \ 247 } while (0) 248 249 static struct knlist * 250 kn_list_lock(struct knote *kn) 251 { 252 struct knlist *knl; 253 254 knl = kn->kn_knlist; 255 if (knl != NULL) 256 knl->kl_lock(knl->kl_lockarg); 257 return (knl); 258 } 259 260 static void 261 kn_list_unlock(struct knlist *knl) 262 { 263 bool do_free; 264 265 if (knl == NULL) 266 return; 267 do_free = knl->kl_autodestroy && knlist_empty(knl); 268 knl->kl_unlock(knl->kl_lockarg); 269 if (do_free) { 270 knlist_destroy(knl); 271 free(knl, M_KQUEUE); 272 } 273 } 274 275 static bool 276 kn_in_flux(struct knote *kn) 277 { 278 279 return (kn->kn_influx > 0); 280 } 281 282 static void 283 kn_enter_flux(struct knote *kn) 284 { 285 286 KQ_OWNED(kn->kn_kq); 287 MPASS(kn->kn_influx < INT_MAX); 288 kn->kn_influx++; 289 } 290 291 static bool 292 kn_leave_flux(struct knote *kn) 293 { 294 295 KQ_OWNED(kn->kn_kq); 296 MPASS(kn->kn_influx > 0); 297 kn->kn_influx--; 298 return (kn->kn_influx == 0); 299 } 300 301 #define KNL_ASSERT_LOCK(knl, islocked) do { \ 302 if (islocked) \ 303 KNL_ASSERT_LOCKED(knl); \ 304 else \ 305 KNL_ASSERT_UNLOCKED(knl); \ 306 } while (0) 307 #ifdef INVARIANTS 308 #define KNL_ASSERT_LOCKED(knl) do { \ 309 knl->kl_assert_lock((knl)->kl_lockarg, LA_LOCKED); \ 310 } while (0) 311 #define KNL_ASSERT_UNLOCKED(knl) do { \ 312 knl->kl_assert_lock((knl)->kl_lockarg, LA_UNLOCKED); \ 313 } while (0) 314 #else /* !INVARIANTS */ 315 #define KNL_ASSERT_LOCKED(knl) do {} while (0) 316 #define KNL_ASSERT_UNLOCKED(knl) do {} while (0) 317 #endif /* INVARIANTS */ 318 319 #ifndef KN_HASHSIZE 320 #define KN_HASHSIZE 64 /* XXX should be tunable */ 321 #endif 322 323 #define KN_HASH(val, mask) (((val) ^ (val >> 8)) & (mask)) 324 325 static int 326 filt_nullattach(struct knote *kn) 327 { 328 329 return (ENXIO); 330 }; 331 332 struct filterops null_filtops = { 333 .f_isfd = 0, 334 .f_attach = filt_nullattach, 335 }; 336 337 /* XXX - make SYSINIT to add these, and move into respective modules. */ 338 extern struct filterops sig_filtops; 339 extern struct filterops fs_filtops; 340 341 /* 342 * Table for for all system-defined filters. 343 */ 344 static struct mtx filterops_lock; 345 MTX_SYSINIT(kqueue_filterops, &filterops_lock, "protect sysfilt_ops", 346 MTX_DEF); 347 static struct { 348 struct filterops *for_fop; 349 int for_nolock; 350 int for_refcnt; 351 } sysfilt_ops[EVFILT_SYSCOUNT] = { 352 { &file_filtops, 1 }, /* EVFILT_READ */ 353 { &file_filtops, 1 }, /* EVFILT_WRITE */ 354 { &null_filtops }, /* EVFILT_AIO */ 355 { &file_filtops, 1 }, /* EVFILT_VNODE */ 356 { &proc_filtops, 1 }, /* EVFILT_PROC */ 357 { &sig_filtops, 1 }, /* EVFILT_SIGNAL */ 358 { &timer_filtops, 1 }, /* EVFILT_TIMER */ 359 { &file_filtops, 1 }, /* EVFILT_PROCDESC */ 360 { &fs_filtops, 1 }, /* EVFILT_FS */ 361 { &null_filtops }, /* EVFILT_LIO */ 362 { &user_filtops, 1 }, /* EVFILT_USER */ 363 { &null_filtops }, /* EVFILT_SENDFILE */ 364 { &file_filtops, 1 }, /* EVFILT_EMPTY */ 365 }; 366 367 /* 368 * Simple redirection for all cdevsw style objects to call their fo_kqfilter 369 * method. 370 */ 371 static int 372 filt_fileattach(struct knote *kn) 373 { 374 375 return (fo_kqfilter(kn->kn_fp, kn)); 376 } 377 378 /*ARGSUSED*/ 379 static int 380 kqueue_kqfilter(struct file *fp, struct knote *kn) 381 { 382 struct kqueue *kq = kn->kn_fp->f_data; 383 384 if (kn->kn_filter != EVFILT_READ) 385 return (EINVAL); 386 387 kn->kn_status |= KN_KQUEUE; 388 kn->kn_fop = &kqread_filtops; 389 knlist_add(&kq->kq_sel.si_note, kn, 0); 390 391 return (0); 392 } 393 394 static void 395 filt_kqdetach(struct knote *kn) 396 { 397 struct kqueue *kq = kn->kn_fp->f_data; 398 399 knlist_remove(&kq->kq_sel.si_note, kn, 0); 400 } 401 402 /*ARGSUSED*/ 403 static int 404 filt_kqueue(struct knote *kn, long hint) 405 { 406 struct kqueue *kq = kn->kn_fp->f_data; 407 408 kn->kn_data = kq->kq_count; 409 return (kn->kn_data > 0); 410 } 411 412 /* XXX - move to kern_proc.c? */ 413 static int 414 filt_procattach(struct knote *kn) 415 { 416 struct proc *p; 417 int error; 418 bool exiting, immediate; 419 420 exiting = immediate = false; 421 if (kn->kn_sfflags & NOTE_EXIT) 422 p = pfind_any(kn->kn_id); 423 else 424 p = pfind(kn->kn_id); 425 if (p == NULL) 426 return (ESRCH); 427 if (p->p_flag & P_WEXIT) 428 exiting = true; 429 430 if ((error = p_cansee(curthread, p))) { 431 PROC_UNLOCK(p); 432 return (error); 433 } 434 435 kn->kn_ptr.p_proc = p; 436 kn->kn_flags |= EV_CLEAR; /* automatically set */ 437 438 /* 439 * Internal flag indicating registration done by kernel for the 440 * purposes of getting a NOTE_CHILD notification. 441 */ 442 if (kn->kn_flags & EV_FLAG2) { 443 kn->kn_flags &= ~EV_FLAG2; 444 kn->kn_data = kn->kn_sdata; /* ppid */ 445 kn->kn_fflags = NOTE_CHILD; 446 kn->kn_sfflags &= ~(NOTE_EXIT | NOTE_EXEC | NOTE_FORK); 447 immediate = true; /* Force immediate activation of child note. */ 448 } 449 /* 450 * Internal flag indicating registration done by kernel (for other than 451 * NOTE_CHILD). 452 */ 453 if (kn->kn_flags & EV_FLAG1) { 454 kn->kn_flags &= ~EV_FLAG1; 455 } 456 457 knlist_add(p->p_klist, kn, 1); 458 459 /* 460 * Immediately activate any child notes or, in the case of a zombie 461 * target process, exit notes. The latter is necessary to handle the 462 * case where the target process, e.g. a child, dies before the kevent 463 * is registered. 464 */ 465 if (immediate || (exiting && filt_proc(kn, NOTE_EXIT))) 466 KNOTE_ACTIVATE(kn, 0); 467 468 PROC_UNLOCK(p); 469 470 return (0); 471 } 472 473 /* 474 * The knote may be attached to a different process, which may exit, 475 * leaving nothing for the knote to be attached to. So when the process 476 * exits, the knote is marked as DETACHED and also flagged as ONESHOT so 477 * it will be deleted when read out. However, as part of the knote deletion, 478 * this routine is called, so a check is needed to avoid actually performing 479 * a detach, because the original process does not exist any more. 480 */ 481 /* XXX - move to kern_proc.c? */ 482 static void 483 filt_procdetach(struct knote *kn) 484 { 485 486 knlist_remove(kn->kn_knlist, kn, 0); 487 kn->kn_ptr.p_proc = NULL; 488 } 489 490 /* XXX - move to kern_proc.c? */ 491 static int 492 filt_proc(struct knote *kn, long hint) 493 { 494 struct proc *p; 495 u_int event; 496 497 p = kn->kn_ptr.p_proc; 498 if (p == NULL) /* already activated, from attach filter */ 499 return (0); 500 501 /* Mask off extra data. */ 502 event = (u_int)hint & NOTE_PCTRLMASK; 503 504 /* If the user is interested in this event, record it. */ 505 if (kn->kn_sfflags & event) 506 kn->kn_fflags |= event; 507 508 /* Process is gone, so flag the event as finished. */ 509 if (event == NOTE_EXIT) { 510 kn->kn_flags |= EV_EOF | EV_ONESHOT; 511 kn->kn_ptr.p_proc = NULL; 512 if (kn->kn_fflags & NOTE_EXIT) 513 kn->kn_data = KW_EXITCODE(p->p_xexit, p->p_xsig); 514 if (kn->kn_fflags == 0) 515 kn->kn_flags |= EV_DROP; 516 return (1); 517 } 518 519 return (kn->kn_fflags != 0); 520 } 521 522 /* 523 * Called when the process forked. It mostly does the same as the 524 * knote(), activating all knotes registered to be activated when the 525 * process forked. Additionally, for each knote attached to the 526 * parent, check whether user wants to track the new process. If so 527 * attach a new knote to it, and immediately report an event with the 528 * child's pid. 529 */ 530 void 531 knote_fork(struct knlist *list, int pid) 532 { 533 struct kqueue *kq; 534 struct knote *kn; 535 struct kevent kev; 536 int error; 537 538 MPASS(list != NULL); 539 KNL_ASSERT_LOCKED(list); 540 if (SLIST_EMPTY(&list->kl_list)) 541 return; 542 543 memset(&kev, 0, sizeof(kev)); 544 SLIST_FOREACH(kn, &list->kl_list, kn_selnext) { 545 kq = kn->kn_kq; 546 KQ_LOCK(kq); 547 if (kn_in_flux(kn) && (kn->kn_status & KN_SCAN) == 0) { 548 KQ_UNLOCK(kq); 549 continue; 550 } 551 552 /* 553 * The same as knote(), activate the event. 554 */ 555 if ((kn->kn_sfflags & NOTE_TRACK) == 0) { 556 if (kn->kn_fop->f_event(kn, NOTE_FORK)) 557 KNOTE_ACTIVATE(kn, 1); 558 KQ_UNLOCK(kq); 559 continue; 560 } 561 562 /* 563 * The NOTE_TRACK case. In addition to the activation 564 * of the event, we need to register new events to 565 * track the child. Drop the locks in preparation for 566 * the call to kqueue_register(). 567 */ 568 kn_enter_flux(kn); 569 KQ_UNLOCK(kq); 570 list->kl_unlock(list->kl_lockarg); 571 572 /* 573 * Activate existing knote and register tracking knotes with 574 * new process. 575 * 576 * First register a knote to get just the child notice. This 577 * must be a separate note from a potential NOTE_EXIT 578 * notification since both NOTE_CHILD and NOTE_EXIT are defined 579 * to use the data field (in conflicting ways). 580 */ 581 kev.ident = pid; 582 kev.filter = kn->kn_filter; 583 kev.flags = kn->kn_flags | EV_ADD | EV_ENABLE | EV_ONESHOT | 584 EV_FLAG2; 585 kev.fflags = kn->kn_sfflags; 586 kev.data = kn->kn_id; /* parent */ 587 kev.udata = kn->kn_kevent.udata;/* preserve udata */ 588 error = kqueue_register(kq, &kev, NULL, M_NOWAIT); 589 if (error) 590 kn->kn_fflags |= NOTE_TRACKERR; 591 592 /* 593 * Then register another knote to track other potential events 594 * from the new process. 595 */ 596 kev.ident = pid; 597 kev.filter = kn->kn_filter; 598 kev.flags = kn->kn_flags | EV_ADD | EV_ENABLE | EV_FLAG1; 599 kev.fflags = kn->kn_sfflags; 600 kev.data = kn->kn_id; /* parent */ 601 kev.udata = kn->kn_kevent.udata;/* preserve udata */ 602 error = kqueue_register(kq, &kev, NULL, M_NOWAIT); 603 if (error) 604 kn->kn_fflags |= NOTE_TRACKERR; 605 if (kn->kn_fop->f_event(kn, NOTE_FORK)) 606 KNOTE_ACTIVATE(kn, 0); 607 list->kl_lock(list->kl_lockarg); 608 KQ_LOCK(kq); 609 kn_leave_flux(kn); 610 KQ_UNLOCK_FLUX(kq); 611 } 612 } 613 614 /* 615 * XXX: EVFILT_TIMER should perhaps live in kern_time.c beside the 616 * interval timer support code. 617 */ 618 619 #define NOTE_TIMER_PRECMASK \ 620 (NOTE_SECONDS | NOTE_MSECONDS | NOTE_USECONDS | NOTE_NSECONDS) 621 622 static sbintime_t 623 timer2sbintime(int64_t data, int flags) 624 { 625 int64_t secs; 626 627 /* 628 * Macros for converting to the fractional second portion of an 629 * sbintime_t using 64bit multiplication to improve precision. 630 */ 631 #define NS_TO_SBT(ns) (((ns) * (((uint64_t)1 << 63) / 500000000)) >> 32) 632 #define US_TO_SBT(us) (((us) * (((uint64_t)1 << 63) / 500000)) >> 32) 633 #define MS_TO_SBT(ms) (((ms) * (((uint64_t)1 << 63) / 500)) >> 32) 634 switch (flags & NOTE_TIMER_PRECMASK) { 635 case NOTE_SECONDS: 636 #ifdef __LP64__ 637 if (data > (SBT_MAX / SBT_1S)) 638 return (SBT_MAX); 639 #endif 640 return ((sbintime_t)data << 32); 641 case NOTE_MSECONDS: /* FALLTHROUGH */ 642 case 0: 643 if (data >= 1000) { 644 secs = data / 1000; 645 #ifdef __LP64__ 646 if (secs > (SBT_MAX / SBT_1S)) 647 return (SBT_MAX); 648 #endif 649 return (secs << 32 | MS_TO_SBT(data % 1000)); 650 } 651 return (MS_TO_SBT(data)); 652 case NOTE_USECONDS: 653 if (data >= 1000000) { 654 secs = data / 1000000; 655 #ifdef __LP64__ 656 if (secs > (SBT_MAX / SBT_1S)) 657 return (SBT_MAX); 658 #endif 659 return (secs << 32 | US_TO_SBT(data % 1000000)); 660 } 661 return (US_TO_SBT(data)); 662 case NOTE_NSECONDS: 663 if (data >= 1000000000) { 664 secs = data / 1000000000; 665 #ifdef __LP64__ 666 if (secs > (SBT_MAX / SBT_1S)) 667 return (SBT_MAX); 668 #endif 669 return (secs << 32 | NS_TO_SBT(data % 1000000000)); 670 } 671 return (NS_TO_SBT(data)); 672 default: 673 break; 674 } 675 return (-1); 676 } 677 678 struct kq_timer_cb_data { 679 struct callout c; 680 struct proc *p; 681 struct knote *kn; 682 int cpuid; 683 int flags; 684 TAILQ_ENTRY(kq_timer_cb_data) link; 685 sbintime_t next; /* next timer event fires at */ 686 sbintime_t to; /* precalculated timer period, 0 for abs */ 687 }; 688 689 #define KQ_TIMER_CB_ENQUEUED 0x01 690 691 static void 692 kqtimer_sched_callout(struct kq_timer_cb_data *kc) 693 { 694 callout_reset_sbt_on(&kc->c, kc->next, 0, filt_timerexpire, kc->kn, 695 kc->cpuid, C_ABSOLUTE); 696 } 697 698 void 699 kqtimer_proc_continue(struct proc *p) 700 { 701 struct kq_timer_cb_data *kc, *kc1; 702 struct bintime bt; 703 sbintime_t now; 704 705 PROC_LOCK_ASSERT(p, MA_OWNED); 706 707 getboottimebin(&bt); 708 now = bttosbt(bt); 709 710 TAILQ_FOREACH_SAFE(kc, &p->p_kqtim_stop, link, kc1) { 711 TAILQ_REMOVE(&p->p_kqtim_stop, kc, link); 712 kc->flags &= ~KQ_TIMER_CB_ENQUEUED; 713 if (kc->next <= now) 714 filt_timerexpire_l(kc->kn, true); 715 else 716 kqtimer_sched_callout(kc); 717 } 718 } 719 720 static void 721 filt_timerexpire_l(struct knote *kn, bool proc_locked) 722 { 723 struct kq_timer_cb_data *kc; 724 struct proc *p; 725 uint64_t delta; 726 sbintime_t now; 727 728 kc = kn->kn_ptr.p_v; 729 730 if ((kn->kn_flags & EV_ONESHOT) != 0 || kc->to == 0) { 731 kn->kn_data++; 732 KNOTE_ACTIVATE(kn, 0); 733 return; 734 } 735 736 now = sbinuptime(); 737 if (now >= kc->next) { 738 delta = (now - kc->next) / kc->to; 739 if (delta == 0) 740 delta = 1; 741 kn->kn_data += delta; 742 kc->next += (delta + 1) * kc->to; 743 if (now >= kc->next) /* overflow */ 744 kc->next = now + kc->to; 745 KNOTE_ACTIVATE(kn, 0); /* XXX - handle locking */ 746 } 747 748 /* 749 * Initial check for stopped kc->p is racy. It is fine to 750 * miss the set of the stop flags, at worst we would schedule 751 * one more callout. On the other hand, it is not fine to not 752 * schedule when we we missed clearing of the flags, we 753 * recheck them under the lock and observe consistent state. 754 */ 755 p = kc->p; 756 if (P_SHOULDSTOP(p) || P_KILLED(p)) { 757 if (!proc_locked) 758 PROC_LOCK(p); 759 if (P_SHOULDSTOP(p) || P_KILLED(p)) { 760 if ((kc->flags & KQ_TIMER_CB_ENQUEUED) == 0) { 761 kc->flags |= KQ_TIMER_CB_ENQUEUED; 762 TAILQ_INSERT_TAIL(&p->p_kqtim_stop, kc, link); 763 } 764 if (!proc_locked) 765 PROC_UNLOCK(p); 766 return; 767 } 768 if (!proc_locked) 769 PROC_UNLOCK(p); 770 } 771 kqtimer_sched_callout(kc); 772 } 773 774 static void 775 filt_timerexpire(void *knx) 776 { 777 filt_timerexpire_l(knx, false); 778 } 779 780 /* 781 * data contains amount of time to sleep 782 */ 783 static int 784 filt_timervalidate(struct knote *kn, sbintime_t *to) 785 { 786 struct bintime bt; 787 sbintime_t sbt; 788 789 if (kn->kn_sdata < 0) 790 return (EINVAL); 791 if (kn->kn_sdata == 0 && (kn->kn_flags & EV_ONESHOT) == 0) 792 kn->kn_sdata = 1; 793 /* 794 * The only fflags values supported are the timer unit 795 * (precision) and the absolute time indicator. 796 */ 797 if ((kn->kn_sfflags & ~(NOTE_TIMER_PRECMASK | NOTE_ABSTIME)) != 0) 798 return (EINVAL); 799 800 *to = timer2sbintime(kn->kn_sdata, kn->kn_sfflags); 801 if ((kn->kn_sfflags & NOTE_ABSTIME) != 0) { 802 getboottimebin(&bt); 803 sbt = bttosbt(bt); 804 *to -= sbt; 805 } 806 if (*to < 0) 807 return (EINVAL); 808 return (0); 809 } 810 811 static int 812 filt_timerattach(struct knote *kn) 813 { 814 struct kq_timer_cb_data *kc; 815 sbintime_t to; 816 int error; 817 818 error = filt_timervalidate(kn, &to); 819 if (error != 0) 820 return (error); 821 822 if (atomic_fetchadd_int(&kq_ncallouts, 1) + 1 > kq_calloutmax) { 823 atomic_subtract_int(&kq_ncallouts, 1); 824 return (ENOMEM); 825 } 826 827 if ((kn->kn_sfflags & NOTE_ABSTIME) == 0) 828 kn->kn_flags |= EV_CLEAR; /* automatically set */ 829 kn->kn_status &= ~KN_DETACHED; /* knlist_add clears it */ 830 kn->kn_ptr.p_v = kc = malloc(sizeof(*kc), M_KQUEUE, M_WAITOK); 831 kc->kn = kn; 832 kc->p = curproc; 833 kc->cpuid = PCPU_GET(cpuid); 834 kc->flags = 0; 835 callout_init(&kc->c, 1); 836 filt_timerstart(kn, to); 837 838 return (0); 839 } 840 841 static void 842 filt_timerstart(struct knote *kn, sbintime_t to) 843 { 844 struct kq_timer_cb_data *kc; 845 846 kc = kn->kn_ptr.p_v; 847 if ((kn->kn_sfflags & NOTE_ABSTIME) != 0) { 848 kc->next = to; 849 kc->to = 0; 850 } else { 851 kc->next = to + sbinuptime(); 852 kc->to = to; 853 } 854 kqtimer_sched_callout(kc); 855 } 856 857 static void 858 filt_timerdetach(struct knote *kn) 859 { 860 struct kq_timer_cb_data *kc; 861 unsigned int old __unused; 862 863 kc = kn->kn_ptr.p_v; 864 callout_drain(&kc->c); 865 if ((kc->flags & KQ_TIMER_CB_ENQUEUED) != 0) { 866 PROC_LOCK(kc->p); 867 TAILQ_REMOVE(&kc->p->p_kqtim_stop, kc, link); 868 PROC_UNLOCK(kc->p); 869 } 870 free(kc, M_KQUEUE); 871 old = atomic_fetchadd_int(&kq_ncallouts, -1); 872 KASSERT(old > 0, ("Number of callouts cannot become negative")); 873 kn->kn_status |= KN_DETACHED; /* knlist_remove sets it */ 874 } 875 876 static void 877 filt_timertouch(struct knote *kn, struct kevent *kev, u_long type) 878 { 879 struct kq_timer_cb_data *kc; 880 struct kqueue *kq; 881 sbintime_t to; 882 int error; 883 884 switch (type) { 885 case EVENT_REGISTER: 886 /* Handle re-added timers that update data/fflags */ 887 if (kev->flags & EV_ADD) { 888 kc = kn->kn_ptr.p_v; 889 890 /* Drain any existing callout. */ 891 callout_drain(&kc->c); 892 893 /* Throw away any existing undelivered record 894 * of the timer expiration. This is done under 895 * the presumption that if a process is 896 * re-adding this timer with new parameters, 897 * it is no longer interested in what may have 898 * happened under the old parameters. If it is 899 * interested, it can wait for the expiration, 900 * delete the old timer definition, and then 901 * add the new one. 902 * 903 * This has to be done while the kq is locked: 904 * - if enqueued, dequeue 905 * - make it no longer active 906 * - clear the count of expiration events 907 */ 908 kq = kn->kn_kq; 909 KQ_LOCK(kq); 910 if (kn->kn_status & KN_QUEUED) 911 knote_dequeue(kn); 912 913 kn->kn_status &= ~KN_ACTIVE; 914 kn->kn_data = 0; 915 KQ_UNLOCK(kq); 916 917 /* Reschedule timer based on new data/fflags */ 918 kn->kn_sfflags = kev->fflags; 919 kn->kn_sdata = kev->data; 920 error = filt_timervalidate(kn, &to); 921 if (error != 0) { 922 kn->kn_flags |= EV_ERROR; 923 kn->kn_data = error; 924 } else 925 filt_timerstart(kn, to); 926 } 927 break; 928 929 case EVENT_PROCESS: 930 *kev = kn->kn_kevent; 931 if (kn->kn_flags & EV_CLEAR) { 932 kn->kn_data = 0; 933 kn->kn_fflags = 0; 934 } 935 break; 936 937 default: 938 panic("filt_timertouch() - invalid type (%ld)", type); 939 break; 940 } 941 } 942 943 static int 944 filt_timer(struct knote *kn, long hint) 945 { 946 947 return (kn->kn_data != 0); 948 } 949 950 static int 951 filt_userattach(struct knote *kn) 952 { 953 954 /* 955 * EVFILT_USER knotes are not attached to anything in the kernel. 956 */ 957 kn->kn_hook = NULL; 958 if (kn->kn_fflags & NOTE_TRIGGER) 959 kn->kn_hookid = 1; 960 else 961 kn->kn_hookid = 0; 962 return (0); 963 } 964 965 static void 966 filt_userdetach(__unused struct knote *kn) 967 { 968 969 /* 970 * EVFILT_USER knotes are not attached to anything in the kernel. 971 */ 972 } 973 974 static int 975 filt_user(struct knote *kn, __unused long hint) 976 { 977 978 return (kn->kn_hookid); 979 } 980 981 static void 982 filt_usertouch(struct knote *kn, struct kevent *kev, u_long type) 983 { 984 u_int ffctrl; 985 986 switch (type) { 987 case EVENT_REGISTER: 988 if (kev->fflags & NOTE_TRIGGER) 989 kn->kn_hookid = 1; 990 991 ffctrl = kev->fflags & NOTE_FFCTRLMASK; 992 kev->fflags &= NOTE_FFLAGSMASK; 993 switch (ffctrl) { 994 case NOTE_FFNOP: 995 break; 996 997 case NOTE_FFAND: 998 kn->kn_sfflags &= kev->fflags; 999 break; 1000 1001 case NOTE_FFOR: 1002 kn->kn_sfflags |= kev->fflags; 1003 break; 1004 1005 case NOTE_FFCOPY: 1006 kn->kn_sfflags = kev->fflags; 1007 break; 1008 1009 default: 1010 /* XXX Return error? */ 1011 break; 1012 } 1013 kn->kn_sdata = kev->data; 1014 if (kev->flags & EV_CLEAR) { 1015 kn->kn_hookid = 0; 1016 kn->kn_data = 0; 1017 kn->kn_fflags = 0; 1018 } 1019 break; 1020 1021 case EVENT_PROCESS: 1022 *kev = kn->kn_kevent; 1023 kev->fflags = kn->kn_sfflags; 1024 kev->data = kn->kn_sdata; 1025 if (kn->kn_flags & EV_CLEAR) { 1026 kn->kn_hookid = 0; 1027 kn->kn_data = 0; 1028 kn->kn_fflags = 0; 1029 } 1030 break; 1031 1032 default: 1033 panic("filt_usertouch() - invalid type (%ld)", type); 1034 break; 1035 } 1036 } 1037 1038 int 1039 sys_kqueue(struct thread *td, struct kqueue_args *uap) 1040 { 1041 1042 return (kern_kqueue(td, 0, NULL)); 1043 } 1044 1045 static void 1046 kqueue_init(struct kqueue *kq) 1047 { 1048 1049 mtx_init(&kq->kq_lock, "kqueue", NULL, MTX_DEF | MTX_DUPOK); 1050 TAILQ_INIT(&kq->kq_head); 1051 knlist_init_mtx(&kq->kq_sel.si_note, &kq->kq_lock); 1052 TASK_INIT(&kq->kq_task, 0, kqueue_task, kq); 1053 } 1054 1055 int 1056 kern_kqueue(struct thread *td, int flags, struct filecaps *fcaps) 1057 { 1058 struct filedesc *fdp; 1059 struct kqueue *kq; 1060 struct file *fp; 1061 struct ucred *cred; 1062 int fd, error; 1063 1064 fdp = td->td_proc->p_fd; 1065 cred = td->td_ucred; 1066 if (!chgkqcnt(cred->cr_ruidinfo, 1, lim_cur(td, RLIMIT_KQUEUES))) 1067 return (ENOMEM); 1068 1069 error = falloc_caps(td, &fp, &fd, flags, fcaps); 1070 if (error != 0) { 1071 chgkqcnt(cred->cr_ruidinfo, -1, 0); 1072 return (error); 1073 } 1074 1075 /* An extra reference on `fp' has been held for us by falloc(). */ 1076 kq = malloc(sizeof *kq, M_KQUEUE, M_WAITOK | M_ZERO); 1077 kqueue_init(kq); 1078 kq->kq_fdp = fdp; 1079 kq->kq_cred = crhold(cred); 1080 1081 FILEDESC_XLOCK(fdp); 1082 TAILQ_INSERT_HEAD(&fdp->fd_kqlist, kq, kq_list); 1083 FILEDESC_XUNLOCK(fdp); 1084 1085 finit(fp, FREAD | FWRITE, DTYPE_KQUEUE, kq, &kqueueops); 1086 fdrop(fp, td); 1087 1088 td->td_retval[0] = fd; 1089 return (0); 1090 } 1091 1092 struct g_kevent_args { 1093 int fd; 1094 void *changelist; 1095 int nchanges; 1096 void *eventlist; 1097 int nevents; 1098 const struct timespec *timeout; 1099 }; 1100 1101 int 1102 sys_kevent(struct thread *td, struct kevent_args *uap) 1103 { 1104 struct kevent_copyops k_ops = { 1105 .arg = uap, 1106 .k_copyout = kevent_copyout, 1107 .k_copyin = kevent_copyin, 1108 .kevent_size = sizeof(struct kevent), 1109 }; 1110 struct g_kevent_args gk_args = { 1111 .fd = uap->fd, 1112 .changelist = uap->changelist, 1113 .nchanges = uap->nchanges, 1114 .eventlist = uap->eventlist, 1115 .nevents = uap->nevents, 1116 .timeout = uap->timeout, 1117 }; 1118 1119 return (kern_kevent_generic(td, &gk_args, &k_ops, "kevent")); 1120 } 1121 1122 static int 1123 kern_kevent_generic(struct thread *td, struct g_kevent_args *uap, 1124 struct kevent_copyops *k_ops, const char *struct_name) 1125 { 1126 struct timespec ts, *tsp; 1127 #ifdef KTRACE 1128 struct kevent *eventlist = uap->eventlist; 1129 #endif 1130 int error; 1131 1132 if (uap->timeout != NULL) { 1133 error = copyin(uap->timeout, &ts, sizeof(ts)); 1134 if (error) 1135 return (error); 1136 tsp = &ts; 1137 } else 1138 tsp = NULL; 1139 1140 #ifdef KTRACE 1141 if (KTRPOINT(td, KTR_STRUCT_ARRAY)) 1142 ktrstructarray(struct_name, UIO_USERSPACE, uap->changelist, 1143 uap->nchanges, k_ops->kevent_size); 1144 #endif 1145 1146 error = kern_kevent(td, uap->fd, uap->nchanges, uap->nevents, 1147 k_ops, tsp); 1148 1149 #ifdef KTRACE 1150 if (error == 0 && KTRPOINT(td, KTR_STRUCT_ARRAY)) 1151 ktrstructarray(struct_name, UIO_USERSPACE, eventlist, 1152 td->td_retval[0], k_ops->kevent_size); 1153 #endif 1154 1155 return (error); 1156 } 1157 1158 /* 1159 * Copy 'count' items into the destination list pointed to by uap->eventlist. 1160 */ 1161 static int 1162 kevent_copyout(void *arg, struct kevent *kevp, int count) 1163 { 1164 struct kevent_args *uap; 1165 int error; 1166 1167 KASSERT(count <= KQ_NEVENTS, ("count (%d) > KQ_NEVENTS", count)); 1168 uap = (struct kevent_args *)arg; 1169 1170 error = copyout(kevp, uap->eventlist, count * sizeof *kevp); 1171 if (error == 0) 1172 uap->eventlist += count; 1173 return (error); 1174 } 1175 1176 /* 1177 * Copy 'count' items from the list pointed to by uap->changelist. 1178 */ 1179 static int 1180 kevent_copyin(void *arg, struct kevent *kevp, int count) 1181 { 1182 struct kevent_args *uap; 1183 int error; 1184 1185 KASSERT(count <= KQ_NEVENTS, ("count (%d) > KQ_NEVENTS", count)); 1186 uap = (struct kevent_args *)arg; 1187 1188 error = copyin(uap->changelist, kevp, count * sizeof *kevp); 1189 if (error == 0) 1190 uap->changelist += count; 1191 return (error); 1192 } 1193 1194 #ifdef COMPAT_FREEBSD11 1195 static int 1196 kevent11_copyout(void *arg, struct kevent *kevp, int count) 1197 { 1198 struct freebsd11_kevent_args *uap; 1199 struct kevent_freebsd11 kev11; 1200 int error, i; 1201 1202 KASSERT(count <= KQ_NEVENTS, ("count (%d) > KQ_NEVENTS", count)); 1203 uap = (struct freebsd11_kevent_args *)arg; 1204 1205 for (i = 0; i < count; i++) { 1206 kev11.ident = kevp->ident; 1207 kev11.filter = kevp->filter; 1208 kev11.flags = kevp->flags; 1209 kev11.fflags = kevp->fflags; 1210 kev11.data = kevp->data; 1211 kev11.udata = kevp->udata; 1212 error = copyout(&kev11, uap->eventlist, sizeof(kev11)); 1213 if (error != 0) 1214 break; 1215 uap->eventlist++; 1216 kevp++; 1217 } 1218 return (error); 1219 } 1220 1221 /* 1222 * Copy 'count' items from the list pointed to by uap->changelist. 1223 */ 1224 static int 1225 kevent11_copyin(void *arg, struct kevent *kevp, int count) 1226 { 1227 struct freebsd11_kevent_args *uap; 1228 struct kevent_freebsd11 kev11; 1229 int error, i; 1230 1231 KASSERT(count <= KQ_NEVENTS, ("count (%d) > KQ_NEVENTS", count)); 1232 uap = (struct freebsd11_kevent_args *)arg; 1233 1234 for (i = 0; i < count; i++) { 1235 error = copyin(uap->changelist, &kev11, sizeof(kev11)); 1236 if (error != 0) 1237 break; 1238 kevp->ident = kev11.ident; 1239 kevp->filter = kev11.filter; 1240 kevp->flags = kev11.flags; 1241 kevp->fflags = kev11.fflags; 1242 kevp->data = (uintptr_t)kev11.data; 1243 kevp->udata = kev11.udata; 1244 bzero(&kevp->ext, sizeof(kevp->ext)); 1245 uap->changelist++; 1246 kevp++; 1247 } 1248 return (error); 1249 } 1250 1251 int 1252 freebsd11_kevent(struct thread *td, struct freebsd11_kevent_args *uap) 1253 { 1254 struct kevent_copyops k_ops = { 1255 .arg = uap, 1256 .k_copyout = kevent11_copyout, 1257 .k_copyin = kevent11_copyin, 1258 .kevent_size = sizeof(struct kevent_freebsd11), 1259 }; 1260 struct g_kevent_args gk_args = { 1261 .fd = uap->fd, 1262 .changelist = uap->changelist, 1263 .nchanges = uap->nchanges, 1264 .eventlist = uap->eventlist, 1265 .nevents = uap->nevents, 1266 .timeout = uap->timeout, 1267 }; 1268 1269 return (kern_kevent_generic(td, &gk_args, &k_ops, "kevent_freebsd11")); 1270 } 1271 #endif 1272 1273 int 1274 kern_kevent(struct thread *td, int fd, int nchanges, int nevents, 1275 struct kevent_copyops *k_ops, const struct timespec *timeout) 1276 { 1277 cap_rights_t rights; 1278 struct file *fp; 1279 int error; 1280 1281 cap_rights_init_zero(&rights); 1282 if (nchanges > 0) 1283 cap_rights_set_one(&rights, CAP_KQUEUE_CHANGE); 1284 if (nevents > 0) 1285 cap_rights_set_one(&rights, CAP_KQUEUE_EVENT); 1286 error = fget(td, fd, &rights, &fp); 1287 if (error != 0) 1288 return (error); 1289 1290 error = kern_kevent_fp(td, fp, nchanges, nevents, k_ops, timeout); 1291 fdrop(fp, td); 1292 1293 return (error); 1294 } 1295 1296 static int 1297 kqueue_kevent(struct kqueue *kq, struct thread *td, int nchanges, int nevents, 1298 struct kevent_copyops *k_ops, const struct timespec *timeout) 1299 { 1300 struct kevent keva[KQ_NEVENTS]; 1301 struct kevent *kevp, *changes; 1302 int i, n, nerrors, error; 1303 1304 if (nchanges < 0) 1305 return (EINVAL); 1306 1307 nerrors = 0; 1308 while (nchanges > 0) { 1309 n = nchanges > KQ_NEVENTS ? KQ_NEVENTS : nchanges; 1310 error = k_ops->k_copyin(k_ops->arg, keva, n); 1311 if (error) 1312 return (error); 1313 changes = keva; 1314 for (i = 0; i < n; i++) { 1315 kevp = &changes[i]; 1316 if (!kevp->filter) 1317 continue; 1318 kevp->flags &= ~EV_SYSFLAGS; 1319 error = kqueue_register(kq, kevp, td, M_WAITOK); 1320 if (error || (kevp->flags & EV_RECEIPT)) { 1321 if (nevents == 0) 1322 return (error); 1323 kevp->flags = EV_ERROR; 1324 kevp->data = error; 1325 (void)k_ops->k_copyout(k_ops->arg, kevp, 1); 1326 nevents--; 1327 nerrors++; 1328 } 1329 } 1330 nchanges -= n; 1331 } 1332 if (nerrors) { 1333 td->td_retval[0] = nerrors; 1334 return (0); 1335 } 1336 1337 return (kqueue_scan(kq, nevents, k_ops, timeout, keva, td)); 1338 } 1339 1340 int 1341 kern_kevent_fp(struct thread *td, struct file *fp, int nchanges, int nevents, 1342 struct kevent_copyops *k_ops, const struct timespec *timeout) 1343 { 1344 struct kqueue *kq; 1345 int error; 1346 1347 error = kqueue_acquire(fp, &kq); 1348 if (error != 0) 1349 return (error); 1350 error = kqueue_kevent(kq, td, nchanges, nevents, k_ops, timeout); 1351 kqueue_release(kq, 0); 1352 return (error); 1353 } 1354 1355 /* 1356 * Performs a kevent() call on a temporarily created kqueue. This can be 1357 * used to perform one-shot polling, similar to poll() and select(). 1358 */ 1359 int 1360 kern_kevent_anonymous(struct thread *td, int nevents, 1361 struct kevent_copyops *k_ops) 1362 { 1363 struct kqueue kq = {}; 1364 int error; 1365 1366 kqueue_init(&kq); 1367 kq.kq_refcnt = 1; 1368 error = kqueue_kevent(&kq, td, nevents, nevents, k_ops, NULL); 1369 kqueue_drain(&kq, td); 1370 kqueue_destroy(&kq); 1371 return (error); 1372 } 1373 1374 int 1375 kqueue_add_filteropts(int filt, struct filterops *filtops) 1376 { 1377 int error; 1378 1379 error = 0; 1380 if (filt > 0 || filt + EVFILT_SYSCOUNT < 0) { 1381 printf( 1382 "trying to add a filterop that is out of range: %d is beyond %d\n", 1383 ~filt, EVFILT_SYSCOUNT); 1384 return EINVAL; 1385 } 1386 mtx_lock(&filterops_lock); 1387 if (sysfilt_ops[~filt].for_fop != &null_filtops && 1388 sysfilt_ops[~filt].for_fop != NULL) 1389 error = EEXIST; 1390 else { 1391 sysfilt_ops[~filt].for_fop = filtops; 1392 sysfilt_ops[~filt].for_refcnt = 0; 1393 } 1394 mtx_unlock(&filterops_lock); 1395 1396 return (error); 1397 } 1398 1399 int 1400 kqueue_del_filteropts(int filt) 1401 { 1402 int error; 1403 1404 error = 0; 1405 if (filt > 0 || filt + EVFILT_SYSCOUNT < 0) 1406 return EINVAL; 1407 1408 mtx_lock(&filterops_lock); 1409 if (sysfilt_ops[~filt].for_fop == &null_filtops || 1410 sysfilt_ops[~filt].for_fop == NULL) 1411 error = EINVAL; 1412 else if (sysfilt_ops[~filt].for_refcnt != 0) 1413 error = EBUSY; 1414 else { 1415 sysfilt_ops[~filt].for_fop = &null_filtops; 1416 sysfilt_ops[~filt].for_refcnt = 0; 1417 } 1418 mtx_unlock(&filterops_lock); 1419 1420 return error; 1421 } 1422 1423 static struct filterops * 1424 kqueue_fo_find(int filt) 1425 { 1426 1427 if (filt > 0 || filt + EVFILT_SYSCOUNT < 0) 1428 return NULL; 1429 1430 if (sysfilt_ops[~filt].for_nolock) 1431 return sysfilt_ops[~filt].for_fop; 1432 1433 mtx_lock(&filterops_lock); 1434 sysfilt_ops[~filt].for_refcnt++; 1435 if (sysfilt_ops[~filt].for_fop == NULL) 1436 sysfilt_ops[~filt].for_fop = &null_filtops; 1437 mtx_unlock(&filterops_lock); 1438 1439 return sysfilt_ops[~filt].for_fop; 1440 } 1441 1442 static void 1443 kqueue_fo_release(int filt) 1444 { 1445 1446 if (filt > 0 || filt + EVFILT_SYSCOUNT < 0) 1447 return; 1448 1449 if (sysfilt_ops[~filt].for_nolock) 1450 return; 1451 1452 mtx_lock(&filterops_lock); 1453 KASSERT(sysfilt_ops[~filt].for_refcnt > 0, 1454 ("filter object refcount not valid on release")); 1455 sysfilt_ops[~filt].for_refcnt--; 1456 mtx_unlock(&filterops_lock); 1457 } 1458 1459 /* 1460 * A ref to kq (obtained via kqueue_acquire) must be held. 1461 */ 1462 static int 1463 kqueue_register(struct kqueue *kq, struct kevent *kev, struct thread *td, 1464 int mflag) 1465 { 1466 struct filterops *fops; 1467 struct file *fp; 1468 struct knote *kn, *tkn; 1469 struct knlist *knl; 1470 int error, filt, event; 1471 int haskqglobal, filedesc_unlock; 1472 1473 if ((kev->flags & (EV_ENABLE | EV_DISABLE)) == (EV_ENABLE | EV_DISABLE)) 1474 return (EINVAL); 1475 1476 fp = NULL; 1477 kn = NULL; 1478 knl = NULL; 1479 error = 0; 1480 haskqglobal = 0; 1481 filedesc_unlock = 0; 1482 1483 filt = kev->filter; 1484 fops = kqueue_fo_find(filt); 1485 if (fops == NULL) 1486 return EINVAL; 1487 1488 if (kev->flags & EV_ADD) { 1489 /* 1490 * Prevent waiting with locks. Non-sleepable 1491 * allocation failures are handled in the loop, only 1492 * if the spare knote appears to be actually required. 1493 */ 1494 tkn = knote_alloc(mflag); 1495 } else { 1496 tkn = NULL; 1497 } 1498 1499 findkn: 1500 if (fops->f_isfd) { 1501 KASSERT(td != NULL, ("td is NULL")); 1502 if (kev->ident > INT_MAX) 1503 error = EBADF; 1504 else 1505 error = fget(td, kev->ident, &cap_event_rights, &fp); 1506 if (error) 1507 goto done; 1508 1509 if ((kev->flags & EV_ADD) == EV_ADD && kqueue_expand(kq, fops, 1510 kev->ident, M_NOWAIT) != 0) { 1511 /* try again */ 1512 fdrop(fp, td); 1513 fp = NULL; 1514 error = kqueue_expand(kq, fops, kev->ident, mflag); 1515 if (error) 1516 goto done; 1517 goto findkn; 1518 } 1519 1520 if (fp->f_type == DTYPE_KQUEUE) { 1521 /* 1522 * If we add some intelligence about what we are doing, 1523 * we should be able to support events on ourselves. 1524 * We need to know when we are doing this to prevent 1525 * getting both the knlist lock and the kq lock since 1526 * they are the same thing. 1527 */ 1528 if (fp->f_data == kq) { 1529 error = EINVAL; 1530 goto done; 1531 } 1532 1533 /* 1534 * Pre-lock the filedesc before the global 1535 * lock mutex, see the comment in 1536 * kqueue_close(). 1537 */ 1538 FILEDESC_XLOCK(td->td_proc->p_fd); 1539 filedesc_unlock = 1; 1540 KQ_GLOBAL_LOCK(&kq_global, haskqglobal); 1541 } 1542 1543 KQ_LOCK(kq); 1544 if (kev->ident < kq->kq_knlistsize) { 1545 SLIST_FOREACH(kn, &kq->kq_knlist[kev->ident], kn_link) 1546 if (kev->filter == kn->kn_filter) 1547 break; 1548 } 1549 } else { 1550 if ((kev->flags & EV_ADD) == EV_ADD) { 1551 error = kqueue_expand(kq, fops, kev->ident, mflag); 1552 if (error != 0) 1553 goto done; 1554 } 1555 1556 KQ_LOCK(kq); 1557 1558 /* 1559 * If possible, find an existing knote to use for this kevent. 1560 */ 1561 if (kev->filter == EVFILT_PROC && 1562 (kev->flags & (EV_FLAG1 | EV_FLAG2)) != 0) { 1563 /* This is an internal creation of a process tracking 1564 * note. Don't attempt to coalesce this with an 1565 * existing note. 1566 */ 1567 ; 1568 } else if (kq->kq_knhashmask != 0) { 1569 struct klist *list; 1570 1571 list = &kq->kq_knhash[ 1572 KN_HASH((u_long)kev->ident, kq->kq_knhashmask)]; 1573 SLIST_FOREACH(kn, list, kn_link) 1574 if (kev->ident == kn->kn_id && 1575 kev->filter == kn->kn_filter) 1576 break; 1577 } 1578 } 1579 1580 /* knote is in the process of changing, wait for it to stabilize. */ 1581 if (kn != NULL && kn_in_flux(kn)) { 1582 KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal); 1583 if (filedesc_unlock) { 1584 FILEDESC_XUNLOCK(td->td_proc->p_fd); 1585 filedesc_unlock = 0; 1586 } 1587 kq->kq_state |= KQ_FLUXWAIT; 1588 msleep(kq, &kq->kq_lock, PSOCK | PDROP, "kqflxwt", 0); 1589 if (fp != NULL) { 1590 fdrop(fp, td); 1591 fp = NULL; 1592 } 1593 goto findkn; 1594 } 1595 1596 /* 1597 * kn now contains the matching knote, or NULL if no match 1598 */ 1599 if (kn == NULL) { 1600 if (kev->flags & EV_ADD) { 1601 kn = tkn; 1602 tkn = NULL; 1603 if (kn == NULL) { 1604 KQ_UNLOCK(kq); 1605 error = ENOMEM; 1606 goto done; 1607 } 1608 kn->kn_fp = fp; 1609 kn->kn_kq = kq; 1610 kn->kn_fop = fops; 1611 /* 1612 * apply reference counts to knote structure, and 1613 * do not release it at the end of this routine. 1614 */ 1615 fops = NULL; 1616 fp = NULL; 1617 1618 kn->kn_sfflags = kev->fflags; 1619 kn->kn_sdata = kev->data; 1620 kev->fflags = 0; 1621 kev->data = 0; 1622 kn->kn_kevent = *kev; 1623 kn->kn_kevent.flags &= ~(EV_ADD | EV_DELETE | 1624 EV_ENABLE | EV_DISABLE | EV_FORCEONESHOT); 1625 kn->kn_status = KN_DETACHED; 1626 if ((kev->flags & EV_DISABLE) != 0) 1627 kn->kn_status |= KN_DISABLED; 1628 kn_enter_flux(kn); 1629 1630 error = knote_attach(kn, kq); 1631 KQ_UNLOCK(kq); 1632 if (error != 0) { 1633 tkn = kn; 1634 goto done; 1635 } 1636 1637 if ((error = kn->kn_fop->f_attach(kn)) != 0) { 1638 knote_drop_detached(kn, td); 1639 goto done; 1640 } 1641 knl = kn_list_lock(kn); 1642 goto done_ev_add; 1643 } else { 1644 /* No matching knote and the EV_ADD flag is not set. */ 1645 KQ_UNLOCK(kq); 1646 error = ENOENT; 1647 goto done; 1648 } 1649 } 1650 1651 if (kev->flags & EV_DELETE) { 1652 kn_enter_flux(kn); 1653 KQ_UNLOCK(kq); 1654 knote_drop(kn, td); 1655 goto done; 1656 } 1657 1658 if (kev->flags & EV_FORCEONESHOT) { 1659 kn->kn_flags |= EV_ONESHOT; 1660 KNOTE_ACTIVATE(kn, 1); 1661 } 1662 1663 if ((kev->flags & EV_ENABLE) != 0) 1664 kn->kn_status &= ~KN_DISABLED; 1665 else if ((kev->flags & EV_DISABLE) != 0) 1666 kn->kn_status |= KN_DISABLED; 1667 1668 /* 1669 * The user may change some filter values after the initial EV_ADD, 1670 * but doing so will not reset any filter which has already been 1671 * triggered. 1672 */ 1673 kn->kn_status |= KN_SCAN; 1674 kn_enter_flux(kn); 1675 KQ_UNLOCK(kq); 1676 knl = kn_list_lock(kn); 1677 kn->kn_kevent.udata = kev->udata; 1678 if (!fops->f_isfd && fops->f_touch != NULL) { 1679 fops->f_touch(kn, kev, EVENT_REGISTER); 1680 } else { 1681 kn->kn_sfflags = kev->fflags; 1682 kn->kn_sdata = kev->data; 1683 } 1684 1685 done_ev_add: 1686 /* 1687 * We can get here with kn->kn_knlist == NULL. This can happen when 1688 * the initial attach event decides that the event is "completed" 1689 * already, e.g., filt_procattach() is called on a zombie process. It 1690 * will call filt_proc() which will remove it from the list, and NULL 1691 * kn_knlist. 1692 * 1693 * KN_DISABLED will be stable while the knote is in flux, so the 1694 * unlocked read will not race with an update. 1695 */ 1696 if ((kn->kn_status & KN_DISABLED) == 0) 1697 event = kn->kn_fop->f_event(kn, 0); 1698 else 1699 event = 0; 1700 1701 KQ_LOCK(kq); 1702 if (event) 1703 kn->kn_status |= KN_ACTIVE; 1704 if ((kn->kn_status & (KN_ACTIVE | KN_DISABLED | KN_QUEUED)) == 1705 KN_ACTIVE) 1706 knote_enqueue(kn); 1707 kn->kn_status &= ~KN_SCAN; 1708 kn_leave_flux(kn); 1709 kn_list_unlock(knl); 1710 KQ_UNLOCK_FLUX(kq); 1711 1712 done: 1713 KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal); 1714 if (filedesc_unlock) 1715 FILEDESC_XUNLOCK(td->td_proc->p_fd); 1716 if (fp != NULL) 1717 fdrop(fp, td); 1718 knote_free(tkn); 1719 if (fops != NULL) 1720 kqueue_fo_release(filt); 1721 return (error); 1722 } 1723 1724 static int 1725 kqueue_acquire(struct file *fp, struct kqueue **kqp) 1726 { 1727 int error; 1728 struct kqueue *kq; 1729 1730 error = 0; 1731 1732 kq = fp->f_data; 1733 if (fp->f_type != DTYPE_KQUEUE || kq == NULL) 1734 return (EBADF); 1735 *kqp = kq; 1736 KQ_LOCK(kq); 1737 if ((kq->kq_state & KQ_CLOSING) == KQ_CLOSING) { 1738 KQ_UNLOCK(kq); 1739 return (EBADF); 1740 } 1741 kq->kq_refcnt++; 1742 KQ_UNLOCK(kq); 1743 1744 return error; 1745 } 1746 1747 static void 1748 kqueue_release(struct kqueue *kq, int locked) 1749 { 1750 if (locked) 1751 KQ_OWNED(kq); 1752 else 1753 KQ_LOCK(kq); 1754 kq->kq_refcnt--; 1755 if (kq->kq_refcnt == 1) 1756 wakeup(&kq->kq_refcnt); 1757 if (!locked) 1758 KQ_UNLOCK(kq); 1759 } 1760 1761 static void 1762 kqueue_schedtask(struct kqueue *kq) 1763 { 1764 1765 KQ_OWNED(kq); 1766 KASSERT(((kq->kq_state & KQ_TASKDRAIN) != KQ_TASKDRAIN), 1767 ("scheduling kqueue task while draining")); 1768 1769 if ((kq->kq_state & KQ_TASKSCHED) != KQ_TASKSCHED) { 1770 taskqueue_enqueue(taskqueue_kqueue_ctx, &kq->kq_task); 1771 kq->kq_state |= KQ_TASKSCHED; 1772 } 1773 } 1774 1775 /* 1776 * Expand the kq to make sure we have storage for fops/ident pair. 1777 * 1778 * Return 0 on success (or no work necessary), return errno on failure. 1779 */ 1780 static int 1781 kqueue_expand(struct kqueue *kq, struct filterops *fops, uintptr_t ident, 1782 int mflag) 1783 { 1784 struct klist *list, *tmp_knhash, *to_free; 1785 u_long tmp_knhashmask; 1786 int error, fd, size; 1787 1788 KQ_NOTOWNED(kq); 1789 1790 error = 0; 1791 to_free = NULL; 1792 if (fops->f_isfd) { 1793 fd = ident; 1794 if (kq->kq_knlistsize <= fd) { 1795 size = kq->kq_knlistsize; 1796 while (size <= fd) 1797 size += KQEXTENT; 1798 list = malloc(size * sizeof(*list), M_KQUEUE, mflag); 1799 if (list == NULL) 1800 return ENOMEM; 1801 KQ_LOCK(kq); 1802 if ((kq->kq_state & KQ_CLOSING) != 0) { 1803 to_free = list; 1804 error = EBADF; 1805 } else if (kq->kq_knlistsize > fd) { 1806 to_free = list; 1807 } else { 1808 if (kq->kq_knlist != NULL) { 1809 bcopy(kq->kq_knlist, list, 1810 kq->kq_knlistsize * sizeof(*list)); 1811 to_free = kq->kq_knlist; 1812 kq->kq_knlist = NULL; 1813 } 1814 bzero((caddr_t)list + 1815 kq->kq_knlistsize * sizeof(*list), 1816 (size - kq->kq_knlistsize) * sizeof(*list)); 1817 kq->kq_knlistsize = size; 1818 kq->kq_knlist = list; 1819 } 1820 KQ_UNLOCK(kq); 1821 } 1822 } else { 1823 if (kq->kq_knhashmask == 0) { 1824 tmp_knhash = hashinit_flags(KN_HASHSIZE, M_KQUEUE, 1825 &tmp_knhashmask, (mflag & M_WAITOK) != 0 ? 1826 HASH_WAITOK : HASH_NOWAIT); 1827 if (tmp_knhash == NULL) 1828 return (ENOMEM); 1829 KQ_LOCK(kq); 1830 if ((kq->kq_state & KQ_CLOSING) != 0) { 1831 to_free = tmp_knhash; 1832 error = EBADF; 1833 } else if (kq->kq_knhashmask == 0) { 1834 kq->kq_knhash = tmp_knhash; 1835 kq->kq_knhashmask = tmp_knhashmask; 1836 } else { 1837 to_free = tmp_knhash; 1838 } 1839 KQ_UNLOCK(kq); 1840 } 1841 } 1842 free(to_free, M_KQUEUE); 1843 1844 KQ_NOTOWNED(kq); 1845 return (error); 1846 } 1847 1848 static void 1849 kqueue_task(void *arg, int pending) 1850 { 1851 struct kqueue *kq; 1852 int haskqglobal; 1853 1854 haskqglobal = 0; 1855 kq = arg; 1856 1857 KQ_GLOBAL_LOCK(&kq_global, haskqglobal); 1858 KQ_LOCK(kq); 1859 1860 KNOTE_LOCKED(&kq->kq_sel.si_note, 0); 1861 1862 kq->kq_state &= ~KQ_TASKSCHED; 1863 if ((kq->kq_state & KQ_TASKDRAIN) == KQ_TASKDRAIN) { 1864 wakeup(&kq->kq_state); 1865 } 1866 KQ_UNLOCK(kq); 1867 KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal); 1868 } 1869 1870 /* 1871 * Scan, update kn_data (if not ONESHOT), and copyout triggered events. 1872 * We treat KN_MARKER knotes as if they are in flux. 1873 */ 1874 static int 1875 kqueue_scan(struct kqueue *kq, int maxevents, struct kevent_copyops *k_ops, 1876 const struct timespec *tsp, struct kevent *keva, struct thread *td) 1877 { 1878 struct kevent *kevp; 1879 struct knote *kn, *marker; 1880 struct knlist *knl; 1881 sbintime_t asbt, rsbt; 1882 int count, error, haskqglobal, influx, nkev, touch; 1883 1884 count = maxevents; 1885 nkev = 0; 1886 error = 0; 1887 haskqglobal = 0; 1888 1889 if (maxevents == 0) 1890 goto done_nl; 1891 if (maxevents < 0) { 1892 error = EINVAL; 1893 goto done_nl; 1894 } 1895 1896 rsbt = 0; 1897 if (tsp != NULL) { 1898 if (tsp->tv_sec < 0 || tsp->tv_nsec < 0 || 1899 tsp->tv_nsec >= 1000000000) { 1900 error = EINVAL; 1901 goto done_nl; 1902 } 1903 if (timespecisset(tsp)) { 1904 if (tsp->tv_sec <= INT32_MAX) { 1905 rsbt = tstosbt(*tsp); 1906 if (TIMESEL(&asbt, rsbt)) 1907 asbt += tc_tick_sbt; 1908 if (asbt <= SBT_MAX - rsbt) 1909 asbt += rsbt; 1910 else 1911 asbt = 0; 1912 rsbt >>= tc_precexp; 1913 } else 1914 asbt = 0; 1915 } else 1916 asbt = -1; 1917 } else 1918 asbt = 0; 1919 marker = knote_alloc(M_WAITOK); 1920 marker->kn_status = KN_MARKER; 1921 KQ_LOCK(kq); 1922 1923 retry: 1924 kevp = keva; 1925 if (kq->kq_count == 0) { 1926 if (asbt == -1) { 1927 error = EWOULDBLOCK; 1928 } else { 1929 kq->kq_state |= KQ_SLEEP; 1930 error = msleep_sbt(kq, &kq->kq_lock, PSOCK | PCATCH, 1931 "kqread", asbt, rsbt, C_ABSOLUTE); 1932 } 1933 if (error == 0) 1934 goto retry; 1935 /* don't restart after signals... */ 1936 if (error == ERESTART) 1937 error = EINTR; 1938 else if (error == EWOULDBLOCK) 1939 error = 0; 1940 goto done; 1941 } 1942 1943 TAILQ_INSERT_TAIL(&kq->kq_head, marker, kn_tqe); 1944 influx = 0; 1945 while (count) { 1946 KQ_OWNED(kq); 1947 kn = TAILQ_FIRST(&kq->kq_head); 1948 1949 if ((kn->kn_status == KN_MARKER && kn != marker) || 1950 kn_in_flux(kn)) { 1951 if (influx) { 1952 influx = 0; 1953 KQ_FLUX_WAKEUP(kq); 1954 } 1955 kq->kq_state |= KQ_FLUXWAIT; 1956 error = msleep(kq, &kq->kq_lock, PSOCK, 1957 "kqflxwt", 0); 1958 continue; 1959 } 1960 1961 TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe); 1962 if ((kn->kn_status & KN_DISABLED) == KN_DISABLED) { 1963 kn->kn_status &= ~KN_QUEUED; 1964 kq->kq_count--; 1965 continue; 1966 } 1967 if (kn == marker) { 1968 KQ_FLUX_WAKEUP(kq); 1969 if (count == maxevents) 1970 goto retry; 1971 goto done; 1972 } 1973 KASSERT(!kn_in_flux(kn), 1974 ("knote %p is unexpectedly in flux", kn)); 1975 1976 if ((kn->kn_flags & EV_DROP) == EV_DROP) { 1977 kn->kn_status &= ~KN_QUEUED; 1978 kn_enter_flux(kn); 1979 kq->kq_count--; 1980 KQ_UNLOCK(kq); 1981 /* 1982 * We don't need to lock the list since we've 1983 * marked it as in flux. 1984 */ 1985 knote_drop(kn, td); 1986 KQ_LOCK(kq); 1987 continue; 1988 } else if ((kn->kn_flags & EV_ONESHOT) == EV_ONESHOT) { 1989 kn->kn_status &= ~KN_QUEUED; 1990 kn_enter_flux(kn); 1991 kq->kq_count--; 1992 KQ_UNLOCK(kq); 1993 /* 1994 * We don't need to lock the list since we've 1995 * marked the knote as being in flux. 1996 */ 1997 *kevp = kn->kn_kevent; 1998 knote_drop(kn, td); 1999 KQ_LOCK(kq); 2000 kn = NULL; 2001 } else { 2002 kn->kn_status |= KN_SCAN; 2003 kn_enter_flux(kn); 2004 KQ_UNLOCK(kq); 2005 if ((kn->kn_status & KN_KQUEUE) == KN_KQUEUE) 2006 KQ_GLOBAL_LOCK(&kq_global, haskqglobal); 2007 knl = kn_list_lock(kn); 2008 if (kn->kn_fop->f_event(kn, 0) == 0) { 2009 KQ_LOCK(kq); 2010 KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal); 2011 kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE | 2012 KN_SCAN); 2013 kn_leave_flux(kn); 2014 kq->kq_count--; 2015 kn_list_unlock(knl); 2016 influx = 1; 2017 continue; 2018 } 2019 touch = (!kn->kn_fop->f_isfd && 2020 kn->kn_fop->f_touch != NULL); 2021 if (touch) 2022 kn->kn_fop->f_touch(kn, kevp, EVENT_PROCESS); 2023 else 2024 *kevp = kn->kn_kevent; 2025 KQ_LOCK(kq); 2026 KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal); 2027 if (kn->kn_flags & (EV_CLEAR | EV_DISPATCH)) { 2028 /* 2029 * Manually clear knotes who weren't 2030 * 'touch'ed. 2031 */ 2032 if (touch == 0 && kn->kn_flags & EV_CLEAR) { 2033 kn->kn_data = 0; 2034 kn->kn_fflags = 0; 2035 } 2036 if (kn->kn_flags & EV_DISPATCH) 2037 kn->kn_status |= KN_DISABLED; 2038 kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE); 2039 kq->kq_count--; 2040 } else 2041 TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe); 2042 2043 kn->kn_status &= ~KN_SCAN; 2044 kn_leave_flux(kn); 2045 kn_list_unlock(knl); 2046 influx = 1; 2047 } 2048 2049 /* we are returning a copy to the user */ 2050 kevp++; 2051 nkev++; 2052 count--; 2053 2054 if (nkev == KQ_NEVENTS) { 2055 influx = 0; 2056 KQ_UNLOCK_FLUX(kq); 2057 error = k_ops->k_copyout(k_ops->arg, keva, nkev); 2058 nkev = 0; 2059 kevp = keva; 2060 KQ_LOCK(kq); 2061 if (error) 2062 break; 2063 } 2064 } 2065 TAILQ_REMOVE(&kq->kq_head, marker, kn_tqe); 2066 done: 2067 KQ_OWNED(kq); 2068 KQ_UNLOCK_FLUX(kq); 2069 knote_free(marker); 2070 done_nl: 2071 KQ_NOTOWNED(kq); 2072 if (nkev != 0) 2073 error = k_ops->k_copyout(k_ops->arg, keva, nkev); 2074 td->td_retval[0] = maxevents - count; 2075 return (error); 2076 } 2077 2078 /*ARGSUSED*/ 2079 static int 2080 kqueue_ioctl(struct file *fp, u_long cmd, void *data, 2081 struct ucred *active_cred, struct thread *td) 2082 { 2083 /* 2084 * Enabling sigio causes two major problems: 2085 * 1) infinite recursion: 2086 * Synopsys: kevent is being used to track signals and have FIOASYNC 2087 * set. On receipt of a signal this will cause a kqueue to recurse 2088 * into itself over and over. Sending the sigio causes the kqueue 2089 * to become ready, which in turn posts sigio again, forever. 2090 * Solution: this can be solved by setting a flag in the kqueue that 2091 * we have a SIGIO in progress. 2092 * 2) locking problems: 2093 * Synopsys: Kqueue is a leaf subsystem, but adding signalling puts 2094 * us above the proc and pgrp locks. 2095 * Solution: Post a signal using an async mechanism, being sure to 2096 * record a generation count in the delivery so that we do not deliver 2097 * a signal to the wrong process. 2098 * 2099 * Note, these two mechanisms are somewhat mutually exclusive! 2100 */ 2101 #if 0 2102 struct kqueue *kq; 2103 2104 kq = fp->f_data; 2105 switch (cmd) { 2106 case FIOASYNC: 2107 if (*(int *)data) { 2108 kq->kq_state |= KQ_ASYNC; 2109 } else { 2110 kq->kq_state &= ~KQ_ASYNC; 2111 } 2112 return (0); 2113 2114 case FIOSETOWN: 2115 return (fsetown(*(int *)data, &kq->kq_sigio)); 2116 2117 case FIOGETOWN: 2118 *(int *)data = fgetown(&kq->kq_sigio); 2119 return (0); 2120 } 2121 #endif 2122 2123 return (ENOTTY); 2124 } 2125 2126 /*ARGSUSED*/ 2127 static int 2128 kqueue_poll(struct file *fp, int events, struct ucred *active_cred, 2129 struct thread *td) 2130 { 2131 struct kqueue *kq; 2132 int revents = 0; 2133 int error; 2134 2135 if ((error = kqueue_acquire(fp, &kq))) 2136 return POLLERR; 2137 2138 KQ_LOCK(kq); 2139 if (events & (POLLIN | POLLRDNORM)) { 2140 if (kq->kq_count) { 2141 revents |= events & (POLLIN | POLLRDNORM); 2142 } else { 2143 selrecord(td, &kq->kq_sel); 2144 if (SEL_WAITING(&kq->kq_sel)) 2145 kq->kq_state |= KQ_SEL; 2146 } 2147 } 2148 kqueue_release(kq, 1); 2149 KQ_UNLOCK(kq); 2150 return (revents); 2151 } 2152 2153 /*ARGSUSED*/ 2154 static int 2155 kqueue_stat(struct file *fp, struct stat *st, struct ucred *active_cred, 2156 struct thread *td) 2157 { 2158 2159 bzero((void *)st, sizeof *st); 2160 /* 2161 * We no longer return kq_count because the unlocked value is useless. 2162 * If you spent all this time getting the count, why not spend your 2163 * syscall better by calling kevent? 2164 * 2165 * XXX - This is needed for libc_r. 2166 */ 2167 st->st_mode = S_IFIFO; 2168 return (0); 2169 } 2170 2171 static void 2172 kqueue_drain(struct kqueue *kq, struct thread *td) 2173 { 2174 struct knote *kn; 2175 int i; 2176 2177 KQ_LOCK(kq); 2178 2179 KASSERT((kq->kq_state & KQ_CLOSING) != KQ_CLOSING, 2180 ("kqueue already closing")); 2181 kq->kq_state |= KQ_CLOSING; 2182 if (kq->kq_refcnt > 1) 2183 msleep(&kq->kq_refcnt, &kq->kq_lock, PSOCK, "kqclose", 0); 2184 2185 KASSERT(kq->kq_refcnt == 1, ("other refs are out there!")); 2186 2187 KASSERT(knlist_empty(&kq->kq_sel.si_note), 2188 ("kqueue's knlist not empty")); 2189 2190 for (i = 0; i < kq->kq_knlistsize; i++) { 2191 while ((kn = SLIST_FIRST(&kq->kq_knlist[i])) != NULL) { 2192 if (kn_in_flux(kn)) { 2193 kq->kq_state |= KQ_FLUXWAIT; 2194 msleep(kq, &kq->kq_lock, PSOCK, "kqclo1", 0); 2195 continue; 2196 } 2197 kn_enter_flux(kn); 2198 KQ_UNLOCK(kq); 2199 knote_drop(kn, td); 2200 KQ_LOCK(kq); 2201 } 2202 } 2203 if (kq->kq_knhashmask != 0) { 2204 for (i = 0; i <= kq->kq_knhashmask; i++) { 2205 while ((kn = SLIST_FIRST(&kq->kq_knhash[i])) != NULL) { 2206 if (kn_in_flux(kn)) { 2207 kq->kq_state |= KQ_FLUXWAIT; 2208 msleep(kq, &kq->kq_lock, PSOCK, 2209 "kqclo2", 0); 2210 continue; 2211 } 2212 kn_enter_flux(kn); 2213 KQ_UNLOCK(kq); 2214 knote_drop(kn, td); 2215 KQ_LOCK(kq); 2216 } 2217 } 2218 } 2219 2220 if ((kq->kq_state & KQ_TASKSCHED) == KQ_TASKSCHED) { 2221 kq->kq_state |= KQ_TASKDRAIN; 2222 msleep(&kq->kq_state, &kq->kq_lock, PSOCK, "kqtqdr", 0); 2223 } 2224 2225 if ((kq->kq_state & KQ_SEL) == KQ_SEL) { 2226 selwakeuppri(&kq->kq_sel, PSOCK); 2227 if (!SEL_WAITING(&kq->kq_sel)) 2228 kq->kq_state &= ~KQ_SEL; 2229 } 2230 2231 KQ_UNLOCK(kq); 2232 } 2233 2234 static void 2235 kqueue_destroy(struct kqueue *kq) 2236 { 2237 2238 KASSERT(kq->kq_fdp == NULL, 2239 ("kqueue still attached to a file descriptor")); 2240 seldrain(&kq->kq_sel); 2241 knlist_destroy(&kq->kq_sel.si_note); 2242 mtx_destroy(&kq->kq_lock); 2243 2244 if (kq->kq_knhash != NULL) 2245 free(kq->kq_knhash, M_KQUEUE); 2246 if (kq->kq_knlist != NULL) 2247 free(kq->kq_knlist, M_KQUEUE); 2248 2249 funsetown(&kq->kq_sigio); 2250 } 2251 2252 /*ARGSUSED*/ 2253 static int 2254 kqueue_close(struct file *fp, struct thread *td) 2255 { 2256 struct kqueue *kq = fp->f_data; 2257 struct filedesc *fdp; 2258 int error; 2259 int filedesc_unlock; 2260 2261 if ((error = kqueue_acquire(fp, &kq))) 2262 return error; 2263 kqueue_drain(kq, td); 2264 2265 /* 2266 * We could be called due to the knote_drop() doing fdrop(), 2267 * called from kqueue_register(). In this case the global 2268 * lock is owned, and filedesc sx is locked before, to not 2269 * take the sleepable lock after non-sleepable. 2270 */ 2271 fdp = kq->kq_fdp; 2272 kq->kq_fdp = NULL; 2273 if (!sx_xlocked(FILEDESC_LOCK(fdp))) { 2274 FILEDESC_XLOCK(fdp); 2275 filedesc_unlock = 1; 2276 } else 2277 filedesc_unlock = 0; 2278 TAILQ_REMOVE(&fdp->fd_kqlist, kq, kq_list); 2279 if (filedesc_unlock) 2280 FILEDESC_XUNLOCK(fdp); 2281 2282 kqueue_destroy(kq); 2283 chgkqcnt(kq->kq_cred->cr_ruidinfo, -1, 0); 2284 crfree(kq->kq_cred); 2285 free(kq, M_KQUEUE); 2286 fp->f_data = NULL; 2287 2288 return (0); 2289 } 2290 2291 static int 2292 kqueue_fill_kinfo(struct file *fp, struct kinfo_file *kif, struct filedesc *fdp) 2293 { 2294 2295 kif->kf_type = KF_TYPE_KQUEUE; 2296 return (0); 2297 } 2298 2299 static void 2300 kqueue_wakeup(struct kqueue *kq) 2301 { 2302 KQ_OWNED(kq); 2303 2304 if ((kq->kq_state & KQ_SLEEP) == KQ_SLEEP) { 2305 kq->kq_state &= ~KQ_SLEEP; 2306 wakeup(kq); 2307 } 2308 if ((kq->kq_state & KQ_SEL) == KQ_SEL) { 2309 selwakeuppri(&kq->kq_sel, PSOCK); 2310 if (!SEL_WAITING(&kq->kq_sel)) 2311 kq->kq_state &= ~KQ_SEL; 2312 } 2313 if (!knlist_empty(&kq->kq_sel.si_note)) 2314 kqueue_schedtask(kq); 2315 if ((kq->kq_state & KQ_ASYNC) == KQ_ASYNC) { 2316 pgsigio(&kq->kq_sigio, SIGIO, 0); 2317 } 2318 } 2319 2320 /* 2321 * Walk down a list of knotes, activating them if their event has triggered. 2322 * 2323 * There is a possibility to optimize in the case of one kq watching another. 2324 * Instead of scheduling a task to wake it up, you could pass enough state 2325 * down the chain to make up the parent kqueue. Make this code functional 2326 * first. 2327 */ 2328 void 2329 knote(struct knlist *list, long hint, int lockflags) 2330 { 2331 struct kqueue *kq; 2332 struct knote *kn, *tkn; 2333 int error; 2334 2335 if (list == NULL) 2336 return; 2337 2338 KNL_ASSERT_LOCK(list, lockflags & KNF_LISTLOCKED); 2339 2340 if ((lockflags & KNF_LISTLOCKED) == 0) 2341 list->kl_lock(list->kl_lockarg); 2342 2343 /* 2344 * If we unlock the list lock (and enter influx), we can 2345 * eliminate the kqueue scheduling, but this will introduce 2346 * four lock/unlock's for each knote to test. Also, marker 2347 * would be needed to keep iteration position, since filters 2348 * or other threads could remove events. 2349 */ 2350 SLIST_FOREACH_SAFE(kn, &list->kl_list, kn_selnext, tkn) { 2351 kq = kn->kn_kq; 2352 KQ_LOCK(kq); 2353 if (kn_in_flux(kn) && (kn->kn_status & KN_SCAN) == 0) { 2354 /* 2355 * Do not process the influx notes, except for 2356 * the influx coming from the kq unlock in the 2357 * kqueue_scan(). In the later case, we do 2358 * not interfere with the scan, since the code 2359 * fragment in kqueue_scan() locks the knlist, 2360 * and cannot proceed until we finished. 2361 */ 2362 KQ_UNLOCK(kq); 2363 } else if ((lockflags & KNF_NOKQLOCK) != 0) { 2364 kn_enter_flux(kn); 2365 KQ_UNLOCK(kq); 2366 error = kn->kn_fop->f_event(kn, hint); 2367 KQ_LOCK(kq); 2368 kn_leave_flux(kn); 2369 if (error) 2370 KNOTE_ACTIVATE(kn, 1); 2371 KQ_UNLOCK_FLUX(kq); 2372 } else { 2373 if (kn->kn_fop->f_event(kn, hint)) 2374 KNOTE_ACTIVATE(kn, 1); 2375 KQ_UNLOCK(kq); 2376 } 2377 } 2378 if ((lockflags & KNF_LISTLOCKED) == 0) 2379 list->kl_unlock(list->kl_lockarg); 2380 } 2381 2382 /* 2383 * add a knote to a knlist 2384 */ 2385 void 2386 knlist_add(struct knlist *knl, struct knote *kn, int islocked) 2387 { 2388 2389 KNL_ASSERT_LOCK(knl, islocked); 2390 KQ_NOTOWNED(kn->kn_kq); 2391 KASSERT(kn_in_flux(kn), ("knote %p not in flux", kn)); 2392 KASSERT((kn->kn_status & KN_DETACHED) != 0, 2393 ("knote %p was not detached", kn)); 2394 if (!islocked) 2395 knl->kl_lock(knl->kl_lockarg); 2396 SLIST_INSERT_HEAD(&knl->kl_list, kn, kn_selnext); 2397 if (!islocked) 2398 knl->kl_unlock(knl->kl_lockarg); 2399 KQ_LOCK(kn->kn_kq); 2400 kn->kn_knlist = knl; 2401 kn->kn_status &= ~KN_DETACHED; 2402 KQ_UNLOCK(kn->kn_kq); 2403 } 2404 2405 static void 2406 knlist_remove_kq(struct knlist *knl, struct knote *kn, int knlislocked, 2407 int kqislocked) 2408 { 2409 2410 KASSERT(!kqislocked || knlislocked, ("kq locked w/o knl locked")); 2411 KNL_ASSERT_LOCK(knl, knlislocked); 2412 mtx_assert(&kn->kn_kq->kq_lock, kqislocked ? MA_OWNED : MA_NOTOWNED); 2413 KASSERT(kqislocked || kn_in_flux(kn), ("knote %p not in flux", kn)); 2414 KASSERT((kn->kn_status & KN_DETACHED) == 0, 2415 ("knote %p was already detached", kn)); 2416 if (!knlislocked) 2417 knl->kl_lock(knl->kl_lockarg); 2418 SLIST_REMOVE(&knl->kl_list, kn, knote, kn_selnext); 2419 kn->kn_knlist = NULL; 2420 if (!knlislocked) 2421 kn_list_unlock(knl); 2422 if (!kqislocked) 2423 KQ_LOCK(kn->kn_kq); 2424 kn->kn_status |= KN_DETACHED; 2425 if (!kqislocked) 2426 KQ_UNLOCK(kn->kn_kq); 2427 } 2428 2429 /* 2430 * remove knote from the specified knlist 2431 */ 2432 void 2433 knlist_remove(struct knlist *knl, struct knote *kn, int islocked) 2434 { 2435 2436 knlist_remove_kq(knl, kn, islocked, 0); 2437 } 2438 2439 int 2440 knlist_empty(struct knlist *knl) 2441 { 2442 2443 KNL_ASSERT_LOCKED(knl); 2444 return (SLIST_EMPTY(&knl->kl_list)); 2445 } 2446 2447 static struct mtx knlist_lock; 2448 MTX_SYSINIT(knlist_lock, &knlist_lock, "knlist lock for lockless objects", 2449 MTX_DEF); 2450 static void knlist_mtx_lock(void *arg); 2451 static void knlist_mtx_unlock(void *arg); 2452 2453 static void 2454 knlist_mtx_lock(void *arg) 2455 { 2456 2457 mtx_lock((struct mtx *)arg); 2458 } 2459 2460 static void 2461 knlist_mtx_unlock(void *arg) 2462 { 2463 2464 mtx_unlock((struct mtx *)arg); 2465 } 2466 2467 static void 2468 knlist_mtx_assert_lock(void *arg, int what) 2469 { 2470 2471 if (what == LA_LOCKED) 2472 mtx_assert((struct mtx *)arg, MA_OWNED); 2473 else 2474 mtx_assert((struct mtx *)arg, MA_NOTOWNED); 2475 } 2476 2477 static void 2478 knlist_rw_rlock(void *arg) 2479 { 2480 2481 rw_rlock((struct rwlock *)arg); 2482 } 2483 2484 static void 2485 knlist_rw_runlock(void *arg) 2486 { 2487 2488 rw_runlock((struct rwlock *)arg); 2489 } 2490 2491 static void 2492 knlist_rw_assert_lock(void *arg, int what) 2493 { 2494 2495 if (what == LA_LOCKED) 2496 rw_assert((struct rwlock *)arg, RA_LOCKED); 2497 else 2498 rw_assert((struct rwlock *)arg, RA_UNLOCKED); 2499 } 2500 2501 void 2502 knlist_init(struct knlist *knl, void *lock, void (*kl_lock)(void *), 2503 void (*kl_unlock)(void *), 2504 void (*kl_assert_lock)(void *, int)) 2505 { 2506 2507 if (lock == NULL) 2508 knl->kl_lockarg = &knlist_lock; 2509 else 2510 knl->kl_lockarg = lock; 2511 2512 if (kl_lock == NULL) 2513 knl->kl_lock = knlist_mtx_lock; 2514 else 2515 knl->kl_lock = kl_lock; 2516 if (kl_unlock == NULL) 2517 knl->kl_unlock = knlist_mtx_unlock; 2518 else 2519 knl->kl_unlock = kl_unlock; 2520 if (kl_assert_lock == NULL) 2521 knl->kl_assert_lock = knlist_mtx_assert_lock; 2522 else 2523 knl->kl_assert_lock = kl_assert_lock; 2524 2525 knl->kl_autodestroy = 0; 2526 SLIST_INIT(&knl->kl_list); 2527 } 2528 2529 void 2530 knlist_init_mtx(struct knlist *knl, struct mtx *lock) 2531 { 2532 2533 knlist_init(knl, lock, NULL, NULL, NULL); 2534 } 2535 2536 struct knlist * 2537 knlist_alloc(struct mtx *lock) 2538 { 2539 struct knlist *knl; 2540 2541 knl = malloc(sizeof(struct knlist), M_KQUEUE, M_WAITOK); 2542 knlist_init_mtx(knl, lock); 2543 return (knl); 2544 } 2545 2546 void 2547 knlist_init_rw_reader(struct knlist *knl, struct rwlock *lock) 2548 { 2549 2550 knlist_init(knl, lock, knlist_rw_rlock, knlist_rw_runlock, 2551 knlist_rw_assert_lock); 2552 } 2553 2554 void 2555 knlist_destroy(struct knlist *knl) 2556 { 2557 2558 KASSERT(KNLIST_EMPTY(knl), 2559 ("destroying knlist %p with knotes on it", knl)); 2560 } 2561 2562 void 2563 knlist_detach(struct knlist *knl) 2564 { 2565 2566 KNL_ASSERT_LOCKED(knl); 2567 knl->kl_autodestroy = 1; 2568 if (knlist_empty(knl)) { 2569 knlist_destroy(knl); 2570 free(knl, M_KQUEUE); 2571 } 2572 } 2573 2574 /* 2575 * Even if we are locked, we may need to drop the lock to allow any influx 2576 * knotes time to "settle". 2577 */ 2578 void 2579 knlist_cleardel(struct knlist *knl, struct thread *td, int islocked, int killkn) 2580 { 2581 struct knote *kn, *kn2; 2582 struct kqueue *kq; 2583 2584 KASSERT(!knl->kl_autodestroy, ("cleardel for autodestroy %p", knl)); 2585 if (islocked) 2586 KNL_ASSERT_LOCKED(knl); 2587 else { 2588 KNL_ASSERT_UNLOCKED(knl); 2589 again: /* need to reacquire lock since we have dropped it */ 2590 knl->kl_lock(knl->kl_lockarg); 2591 } 2592 2593 SLIST_FOREACH_SAFE(kn, &knl->kl_list, kn_selnext, kn2) { 2594 kq = kn->kn_kq; 2595 KQ_LOCK(kq); 2596 if (kn_in_flux(kn)) { 2597 KQ_UNLOCK(kq); 2598 continue; 2599 } 2600 knlist_remove_kq(knl, kn, 1, 1); 2601 if (killkn) { 2602 kn_enter_flux(kn); 2603 KQ_UNLOCK(kq); 2604 knote_drop_detached(kn, td); 2605 } else { 2606 /* Make sure cleared knotes disappear soon */ 2607 kn->kn_flags |= EV_EOF | EV_ONESHOT; 2608 KQ_UNLOCK(kq); 2609 } 2610 kq = NULL; 2611 } 2612 2613 if (!SLIST_EMPTY(&knl->kl_list)) { 2614 /* there are still in flux knotes remaining */ 2615 kn = SLIST_FIRST(&knl->kl_list); 2616 kq = kn->kn_kq; 2617 KQ_LOCK(kq); 2618 KASSERT(kn_in_flux(kn), ("knote removed w/o list lock")); 2619 knl->kl_unlock(knl->kl_lockarg); 2620 kq->kq_state |= KQ_FLUXWAIT; 2621 msleep(kq, &kq->kq_lock, PSOCK | PDROP, "kqkclr", 0); 2622 kq = NULL; 2623 goto again; 2624 } 2625 2626 if (islocked) 2627 KNL_ASSERT_LOCKED(knl); 2628 else { 2629 knl->kl_unlock(knl->kl_lockarg); 2630 KNL_ASSERT_UNLOCKED(knl); 2631 } 2632 } 2633 2634 /* 2635 * Remove all knotes referencing a specified fd must be called with FILEDESC 2636 * lock. This prevents a race where a new fd comes along and occupies the 2637 * entry and we attach a knote to the fd. 2638 */ 2639 void 2640 knote_fdclose(struct thread *td, int fd) 2641 { 2642 struct filedesc *fdp = td->td_proc->p_fd; 2643 struct kqueue *kq; 2644 struct knote *kn; 2645 int influx; 2646 2647 FILEDESC_XLOCK_ASSERT(fdp); 2648 2649 /* 2650 * We shouldn't have to worry about new kevents appearing on fd 2651 * since filedesc is locked. 2652 */ 2653 TAILQ_FOREACH(kq, &fdp->fd_kqlist, kq_list) { 2654 KQ_LOCK(kq); 2655 2656 again: 2657 influx = 0; 2658 while (kq->kq_knlistsize > fd && 2659 (kn = SLIST_FIRST(&kq->kq_knlist[fd])) != NULL) { 2660 if (kn_in_flux(kn)) { 2661 /* someone else might be waiting on our knote */ 2662 if (influx) 2663 wakeup(kq); 2664 kq->kq_state |= KQ_FLUXWAIT; 2665 msleep(kq, &kq->kq_lock, PSOCK, "kqflxwt", 0); 2666 goto again; 2667 } 2668 kn_enter_flux(kn); 2669 KQ_UNLOCK(kq); 2670 influx = 1; 2671 knote_drop(kn, td); 2672 KQ_LOCK(kq); 2673 } 2674 KQ_UNLOCK_FLUX(kq); 2675 } 2676 } 2677 2678 static int 2679 knote_attach(struct knote *kn, struct kqueue *kq) 2680 { 2681 struct klist *list; 2682 2683 KASSERT(kn_in_flux(kn), ("knote %p not marked influx", kn)); 2684 KQ_OWNED(kq); 2685 2686 if ((kq->kq_state & KQ_CLOSING) != 0) 2687 return (EBADF); 2688 if (kn->kn_fop->f_isfd) { 2689 if (kn->kn_id >= kq->kq_knlistsize) 2690 return (ENOMEM); 2691 list = &kq->kq_knlist[kn->kn_id]; 2692 } else { 2693 if (kq->kq_knhash == NULL) 2694 return (ENOMEM); 2695 list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)]; 2696 } 2697 SLIST_INSERT_HEAD(list, kn, kn_link); 2698 return (0); 2699 } 2700 2701 static void 2702 knote_drop(struct knote *kn, struct thread *td) 2703 { 2704 2705 if ((kn->kn_status & KN_DETACHED) == 0) 2706 kn->kn_fop->f_detach(kn); 2707 knote_drop_detached(kn, td); 2708 } 2709 2710 static void 2711 knote_drop_detached(struct knote *kn, struct thread *td) 2712 { 2713 struct kqueue *kq; 2714 struct klist *list; 2715 2716 kq = kn->kn_kq; 2717 2718 KASSERT((kn->kn_status & KN_DETACHED) != 0, 2719 ("knote %p still attached", kn)); 2720 KQ_NOTOWNED(kq); 2721 2722 KQ_LOCK(kq); 2723 KASSERT(kn->kn_influx == 1, 2724 ("knote_drop called on %p with influx %d", kn, kn->kn_influx)); 2725 2726 if (kn->kn_fop->f_isfd) 2727 list = &kq->kq_knlist[kn->kn_id]; 2728 else 2729 list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)]; 2730 2731 if (!SLIST_EMPTY(list)) 2732 SLIST_REMOVE(list, kn, knote, kn_link); 2733 if (kn->kn_status & KN_QUEUED) 2734 knote_dequeue(kn); 2735 KQ_UNLOCK_FLUX(kq); 2736 2737 if (kn->kn_fop->f_isfd) { 2738 fdrop(kn->kn_fp, td); 2739 kn->kn_fp = NULL; 2740 } 2741 kqueue_fo_release(kn->kn_kevent.filter); 2742 kn->kn_fop = NULL; 2743 knote_free(kn); 2744 } 2745 2746 static void 2747 knote_enqueue(struct knote *kn) 2748 { 2749 struct kqueue *kq = kn->kn_kq; 2750 2751 KQ_OWNED(kn->kn_kq); 2752 KASSERT((kn->kn_status & KN_QUEUED) == 0, ("knote already queued")); 2753 2754 TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe); 2755 kn->kn_status |= KN_QUEUED; 2756 kq->kq_count++; 2757 kqueue_wakeup(kq); 2758 } 2759 2760 static void 2761 knote_dequeue(struct knote *kn) 2762 { 2763 struct kqueue *kq = kn->kn_kq; 2764 2765 KQ_OWNED(kn->kn_kq); 2766 KASSERT(kn->kn_status & KN_QUEUED, ("knote not queued")); 2767 2768 TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe); 2769 kn->kn_status &= ~KN_QUEUED; 2770 kq->kq_count--; 2771 } 2772 2773 static void 2774 knote_init(void) 2775 { 2776 2777 knote_zone = uma_zcreate("KNOTE", sizeof(struct knote), NULL, NULL, 2778 NULL, NULL, UMA_ALIGN_PTR, 0); 2779 } 2780 SYSINIT(knote, SI_SUB_PSEUDO, SI_ORDER_ANY, knote_init, NULL); 2781 2782 static struct knote * 2783 knote_alloc(int mflag) 2784 { 2785 2786 return (uma_zalloc(knote_zone, mflag | M_ZERO)); 2787 } 2788 2789 static void 2790 knote_free(struct knote *kn) 2791 { 2792 2793 uma_zfree(knote_zone, kn); 2794 } 2795 2796 /* 2797 * Register the kev w/ the kq specified by fd. 2798 */ 2799 int 2800 kqfd_register(int fd, struct kevent *kev, struct thread *td, int mflag) 2801 { 2802 struct kqueue *kq; 2803 struct file *fp; 2804 cap_rights_t rights; 2805 int error; 2806 2807 error = fget(td, fd, cap_rights_init_one(&rights, CAP_KQUEUE_CHANGE), 2808 &fp); 2809 if (error != 0) 2810 return (error); 2811 if ((error = kqueue_acquire(fp, &kq)) != 0) 2812 goto noacquire; 2813 2814 error = kqueue_register(kq, kev, td, mflag); 2815 kqueue_release(kq, 0); 2816 2817 noacquire: 2818 fdrop(fp, td); 2819 return (error); 2820 } 2821