1 /*- 2 * Copyright (c) 1999,2000,2001 Jonathan Lemon <jlemon@FreeBSD.org> 3 * Copyright 2004 John-Mark Gurney <jmg@FreeBSD.org> 4 * Copyright (c) 2009 Apple, Inc. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include "opt_ktrace.h" 33 #include "opt_kqueue.h" 34 35 #include <sys/param.h> 36 #include <sys/systm.h> 37 #include <sys/capsicum.h> 38 #include <sys/kernel.h> 39 #include <sys/lock.h> 40 #include <sys/mutex.h> 41 #include <sys/rwlock.h> 42 #include <sys/proc.h> 43 #include <sys/malloc.h> 44 #include <sys/unistd.h> 45 #include <sys/file.h> 46 #include <sys/filedesc.h> 47 #include <sys/filio.h> 48 #include <sys/fcntl.h> 49 #include <sys/kthread.h> 50 #include <sys/selinfo.h> 51 #include <sys/queue.h> 52 #include <sys/event.h> 53 #include <sys/eventvar.h> 54 #include <sys/poll.h> 55 #include <sys/protosw.h> 56 #include <sys/resourcevar.h> 57 #include <sys/sigio.h> 58 #include <sys/signalvar.h> 59 #include <sys/socket.h> 60 #include <sys/socketvar.h> 61 #include <sys/stat.h> 62 #include <sys/sysctl.h> 63 #include <sys/sysproto.h> 64 #include <sys/syscallsubr.h> 65 #include <sys/taskqueue.h> 66 #include <sys/uio.h> 67 #include <sys/user.h> 68 #ifdef KTRACE 69 #include <sys/ktrace.h> 70 #endif 71 #include <machine/atomic.h> 72 73 #include <vm/uma.h> 74 75 static MALLOC_DEFINE(M_KQUEUE, "kqueue", "memory for kqueue system"); 76 77 /* 78 * This lock is used if multiple kq locks are required. This possibly 79 * should be made into a per proc lock. 80 */ 81 static struct mtx kq_global; 82 MTX_SYSINIT(kq_global, &kq_global, "kqueue order", MTX_DEF); 83 #define KQ_GLOBAL_LOCK(lck, haslck) do { \ 84 if (!haslck) \ 85 mtx_lock(lck); \ 86 haslck = 1; \ 87 } while (0) 88 #define KQ_GLOBAL_UNLOCK(lck, haslck) do { \ 89 if (haslck) \ 90 mtx_unlock(lck); \ 91 haslck = 0; \ 92 } while (0) 93 94 TASKQUEUE_DEFINE_THREAD(kqueue_ctx); 95 96 static int kevent_copyout(void *arg, struct kevent *kevp, int count); 97 static int kevent_copyin(void *arg, struct kevent *kevp, int count); 98 static int kqueue_register(struct kqueue *kq, struct kevent *kev, 99 struct thread *td, int waitok); 100 static int kqueue_acquire(struct file *fp, struct kqueue **kqp); 101 static void kqueue_release(struct kqueue *kq, int locked); 102 static void kqueue_destroy(struct kqueue *kq); 103 static void kqueue_drain(struct kqueue *kq, struct thread *td); 104 static int kqueue_expand(struct kqueue *kq, struct filterops *fops, 105 uintptr_t ident, int waitok); 106 static void kqueue_task(void *arg, int pending); 107 static int kqueue_scan(struct kqueue *kq, int maxevents, 108 struct kevent_copyops *k_ops, 109 const struct timespec *timeout, 110 struct kevent *keva, struct thread *td); 111 static void kqueue_wakeup(struct kqueue *kq); 112 static struct filterops *kqueue_fo_find(int filt); 113 static void kqueue_fo_release(int filt); 114 115 static fo_ioctl_t kqueue_ioctl; 116 static fo_poll_t kqueue_poll; 117 static fo_kqfilter_t kqueue_kqfilter; 118 static fo_stat_t kqueue_stat; 119 static fo_close_t kqueue_close; 120 static fo_fill_kinfo_t kqueue_fill_kinfo; 121 122 static struct fileops kqueueops = { 123 .fo_read = invfo_rdwr, 124 .fo_write = invfo_rdwr, 125 .fo_truncate = invfo_truncate, 126 .fo_ioctl = kqueue_ioctl, 127 .fo_poll = kqueue_poll, 128 .fo_kqfilter = kqueue_kqfilter, 129 .fo_stat = kqueue_stat, 130 .fo_close = kqueue_close, 131 .fo_chmod = invfo_chmod, 132 .fo_chown = invfo_chown, 133 .fo_sendfile = invfo_sendfile, 134 .fo_fill_kinfo = kqueue_fill_kinfo, 135 }; 136 137 static int knote_attach(struct knote *kn, struct kqueue *kq); 138 static void knote_drop(struct knote *kn, struct thread *td); 139 static void knote_drop_detached(struct knote *kn, struct thread *td); 140 static void knote_enqueue(struct knote *kn); 141 static void knote_dequeue(struct knote *kn); 142 static void knote_init(void); 143 static struct knote *knote_alloc(int waitok); 144 static void knote_free(struct knote *kn); 145 146 static void filt_kqdetach(struct knote *kn); 147 static int filt_kqueue(struct knote *kn, long hint); 148 static int filt_procattach(struct knote *kn); 149 static void filt_procdetach(struct knote *kn); 150 static int filt_proc(struct knote *kn, long hint); 151 static int filt_fileattach(struct knote *kn); 152 static void filt_timerexpire(void *knx); 153 static int filt_timerattach(struct knote *kn); 154 static void filt_timerdetach(struct knote *kn); 155 static int filt_timer(struct knote *kn, long hint); 156 static int filt_userattach(struct knote *kn); 157 static void filt_userdetach(struct knote *kn); 158 static int filt_user(struct knote *kn, long hint); 159 static void filt_usertouch(struct knote *kn, struct kevent *kev, 160 u_long type); 161 162 static struct filterops file_filtops = { 163 .f_isfd = 1, 164 .f_attach = filt_fileattach, 165 }; 166 static struct filterops kqread_filtops = { 167 .f_isfd = 1, 168 .f_detach = filt_kqdetach, 169 .f_event = filt_kqueue, 170 }; 171 /* XXX - move to kern_proc.c? */ 172 static struct filterops proc_filtops = { 173 .f_isfd = 0, 174 .f_attach = filt_procattach, 175 .f_detach = filt_procdetach, 176 .f_event = filt_proc, 177 }; 178 static struct filterops timer_filtops = { 179 .f_isfd = 0, 180 .f_attach = filt_timerattach, 181 .f_detach = filt_timerdetach, 182 .f_event = filt_timer, 183 }; 184 static struct filterops user_filtops = { 185 .f_attach = filt_userattach, 186 .f_detach = filt_userdetach, 187 .f_event = filt_user, 188 .f_touch = filt_usertouch, 189 }; 190 191 static uma_zone_t knote_zone; 192 static unsigned int kq_ncallouts = 0; 193 static unsigned int kq_calloutmax = 4 * 1024; 194 SYSCTL_UINT(_kern, OID_AUTO, kq_calloutmax, CTLFLAG_RW, 195 &kq_calloutmax, 0, "Maximum number of callouts allocated for kqueue"); 196 197 /* XXX - ensure not influx ? */ 198 #define KNOTE_ACTIVATE(kn, islock) do { \ 199 if ((islock)) \ 200 mtx_assert(&(kn)->kn_kq->kq_lock, MA_OWNED); \ 201 else \ 202 KQ_LOCK((kn)->kn_kq); \ 203 (kn)->kn_status |= KN_ACTIVE; \ 204 if (((kn)->kn_status & (KN_QUEUED | KN_DISABLED)) == 0) \ 205 knote_enqueue((kn)); \ 206 if (!(islock)) \ 207 KQ_UNLOCK((kn)->kn_kq); \ 208 } while(0) 209 #define KQ_LOCK(kq) do { \ 210 mtx_lock(&(kq)->kq_lock); \ 211 } while (0) 212 #define KQ_FLUX_WAKEUP(kq) do { \ 213 if (((kq)->kq_state & KQ_FLUXWAIT) == KQ_FLUXWAIT) { \ 214 (kq)->kq_state &= ~KQ_FLUXWAIT; \ 215 wakeup((kq)); \ 216 } \ 217 } while (0) 218 #define KQ_UNLOCK_FLUX(kq) do { \ 219 KQ_FLUX_WAKEUP(kq); \ 220 mtx_unlock(&(kq)->kq_lock); \ 221 } while (0) 222 #define KQ_UNLOCK(kq) do { \ 223 mtx_unlock(&(kq)->kq_lock); \ 224 } while (0) 225 #define KQ_OWNED(kq) do { \ 226 mtx_assert(&(kq)->kq_lock, MA_OWNED); \ 227 } while (0) 228 #define KQ_NOTOWNED(kq) do { \ 229 mtx_assert(&(kq)->kq_lock, MA_NOTOWNED); \ 230 } while (0) 231 232 static struct knlist * 233 kn_list_lock(struct knote *kn) 234 { 235 struct knlist *knl; 236 237 knl = kn->kn_knlist; 238 if (knl != NULL) 239 knl->kl_lock(knl->kl_lockarg); 240 return (knl); 241 } 242 243 static void 244 kn_list_unlock(struct knlist *knl) 245 { 246 bool do_free; 247 248 if (knl == NULL) 249 return; 250 do_free = knl->kl_autodestroy && knlist_empty(knl); 251 knl->kl_unlock(knl->kl_lockarg); 252 if (do_free) { 253 knlist_destroy(knl); 254 free(knl, M_KQUEUE); 255 } 256 } 257 258 static bool 259 kn_in_flux(struct knote *kn) 260 { 261 262 return (kn->kn_influx > 0); 263 } 264 265 static void 266 kn_enter_flux(struct knote *kn) 267 { 268 269 KQ_OWNED(kn->kn_kq); 270 MPASS(kn->kn_influx < INT_MAX); 271 kn->kn_influx++; 272 } 273 274 static bool 275 kn_leave_flux(struct knote *kn) 276 { 277 278 KQ_OWNED(kn->kn_kq); 279 MPASS(kn->kn_influx > 0); 280 kn->kn_influx--; 281 return (kn->kn_influx == 0); 282 } 283 284 #define KNL_ASSERT_LOCK(knl, islocked) do { \ 285 if (islocked) \ 286 KNL_ASSERT_LOCKED(knl); \ 287 else \ 288 KNL_ASSERT_UNLOCKED(knl); \ 289 } while (0) 290 #ifdef INVARIANTS 291 #define KNL_ASSERT_LOCKED(knl) do { \ 292 knl->kl_assert_locked((knl)->kl_lockarg); \ 293 } while (0) 294 #define KNL_ASSERT_UNLOCKED(knl) do { \ 295 knl->kl_assert_unlocked((knl)->kl_lockarg); \ 296 } while (0) 297 #else /* !INVARIANTS */ 298 #define KNL_ASSERT_LOCKED(knl) do {} while(0) 299 #define KNL_ASSERT_UNLOCKED(knl) do {} while (0) 300 #endif /* INVARIANTS */ 301 302 #ifndef KN_HASHSIZE 303 #define KN_HASHSIZE 64 /* XXX should be tunable */ 304 #endif 305 306 #define KN_HASH(val, mask) (((val) ^ (val >> 8)) & (mask)) 307 308 static int 309 filt_nullattach(struct knote *kn) 310 { 311 312 return (ENXIO); 313 }; 314 315 struct filterops null_filtops = { 316 .f_isfd = 0, 317 .f_attach = filt_nullattach, 318 }; 319 320 /* XXX - make SYSINIT to add these, and move into respective modules. */ 321 extern struct filterops sig_filtops; 322 extern struct filterops fs_filtops; 323 324 /* 325 * Table for for all system-defined filters. 326 */ 327 static struct mtx filterops_lock; 328 MTX_SYSINIT(kqueue_filterops, &filterops_lock, "protect sysfilt_ops", 329 MTX_DEF); 330 static struct { 331 struct filterops *for_fop; 332 int for_nolock; 333 int for_refcnt; 334 } sysfilt_ops[EVFILT_SYSCOUNT] = { 335 { &file_filtops, 1 }, /* EVFILT_READ */ 336 { &file_filtops, 1 }, /* EVFILT_WRITE */ 337 { &null_filtops }, /* EVFILT_AIO */ 338 { &file_filtops, 1 }, /* EVFILT_VNODE */ 339 { &proc_filtops, 1 }, /* EVFILT_PROC */ 340 { &sig_filtops, 1 }, /* EVFILT_SIGNAL */ 341 { &timer_filtops, 1 }, /* EVFILT_TIMER */ 342 { &file_filtops, 1 }, /* EVFILT_PROCDESC */ 343 { &fs_filtops, 1 }, /* EVFILT_FS */ 344 { &null_filtops }, /* EVFILT_LIO */ 345 { &user_filtops, 1 }, /* EVFILT_USER */ 346 { &null_filtops }, /* EVFILT_SENDFILE */ 347 { &file_filtops, 1 }, /* EVFILT_EMPTY */ 348 }; 349 350 /* 351 * Simple redirection for all cdevsw style objects to call their fo_kqfilter 352 * method. 353 */ 354 static int 355 filt_fileattach(struct knote *kn) 356 { 357 358 return (fo_kqfilter(kn->kn_fp, kn)); 359 } 360 361 /*ARGSUSED*/ 362 static int 363 kqueue_kqfilter(struct file *fp, struct knote *kn) 364 { 365 struct kqueue *kq = kn->kn_fp->f_data; 366 367 if (kn->kn_filter != EVFILT_READ) 368 return (EINVAL); 369 370 kn->kn_status |= KN_KQUEUE; 371 kn->kn_fop = &kqread_filtops; 372 knlist_add(&kq->kq_sel.si_note, kn, 0); 373 374 return (0); 375 } 376 377 static void 378 filt_kqdetach(struct knote *kn) 379 { 380 struct kqueue *kq = kn->kn_fp->f_data; 381 382 knlist_remove(&kq->kq_sel.si_note, kn, 0); 383 } 384 385 /*ARGSUSED*/ 386 static int 387 filt_kqueue(struct knote *kn, long hint) 388 { 389 struct kqueue *kq = kn->kn_fp->f_data; 390 391 kn->kn_data = kq->kq_count; 392 return (kn->kn_data > 0); 393 } 394 395 /* XXX - move to kern_proc.c? */ 396 static int 397 filt_procattach(struct knote *kn) 398 { 399 struct proc *p; 400 int error; 401 bool exiting, immediate; 402 403 exiting = immediate = false; 404 p = pfind(kn->kn_id); 405 if (p == NULL && (kn->kn_sfflags & NOTE_EXIT)) { 406 p = zpfind(kn->kn_id); 407 exiting = true; 408 } else if (p != NULL && (p->p_flag & P_WEXIT)) { 409 exiting = true; 410 } 411 412 if (p == NULL) 413 return (ESRCH); 414 if ((error = p_cansee(curthread, p))) { 415 PROC_UNLOCK(p); 416 return (error); 417 } 418 419 kn->kn_ptr.p_proc = p; 420 kn->kn_flags |= EV_CLEAR; /* automatically set */ 421 422 /* 423 * Internal flag indicating registration done by kernel for the 424 * purposes of getting a NOTE_CHILD notification. 425 */ 426 if (kn->kn_flags & EV_FLAG2) { 427 kn->kn_flags &= ~EV_FLAG2; 428 kn->kn_data = kn->kn_sdata; /* ppid */ 429 kn->kn_fflags = NOTE_CHILD; 430 kn->kn_sfflags &= ~(NOTE_EXIT | NOTE_EXEC | NOTE_FORK); 431 immediate = true; /* Force immediate activation of child note. */ 432 } 433 /* 434 * Internal flag indicating registration done by kernel (for other than 435 * NOTE_CHILD). 436 */ 437 if (kn->kn_flags & EV_FLAG1) { 438 kn->kn_flags &= ~EV_FLAG1; 439 } 440 441 knlist_add(p->p_klist, kn, 1); 442 443 /* 444 * Immediately activate any child notes or, in the case of a zombie 445 * target process, exit notes. The latter is necessary to handle the 446 * case where the target process, e.g. a child, dies before the kevent 447 * is registered. 448 */ 449 if (immediate || (exiting && filt_proc(kn, NOTE_EXIT))) 450 KNOTE_ACTIVATE(kn, 0); 451 452 PROC_UNLOCK(p); 453 454 return (0); 455 } 456 457 /* 458 * The knote may be attached to a different process, which may exit, 459 * leaving nothing for the knote to be attached to. So when the process 460 * exits, the knote is marked as DETACHED and also flagged as ONESHOT so 461 * it will be deleted when read out. However, as part of the knote deletion, 462 * this routine is called, so a check is needed to avoid actually performing 463 * a detach, because the original process does not exist any more. 464 */ 465 /* XXX - move to kern_proc.c? */ 466 static void 467 filt_procdetach(struct knote *kn) 468 { 469 470 knlist_remove(kn->kn_knlist, kn, 0); 471 kn->kn_ptr.p_proc = NULL; 472 } 473 474 /* XXX - move to kern_proc.c? */ 475 static int 476 filt_proc(struct knote *kn, long hint) 477 { 478 struct proc *p; 479 u_int event; 480 481 p = kn->kn_ptr.p_proc; 482 if (p == NULL) /* already activated, from attach filter */ 483 return (0); 484 485 /* Mask off extra data. */ 486 event = (u_int)hint & NOTE_PCTRLMASK; 487 488 /* If the user is interested in this event, record it. */ 489 if (kn->kn_sfflags & event) 490 kn->kn_fflags |= event; 491 492 /* Process is gone, so flag the event as finished. */ 493 if (event == NOTE_EXIT) { 494 kn->kn_flags |= EV_EOF | EV_ONESHOT; 495 kn->kn_ptr.p_proc = NULL; 496 if (kn->kn_fflags & NOTE_EXIT) 497 kn->kn_data = KW_EXITCODE(p->p_xexit, p->p_xsig); 498 if (kn->kn_fflags == 0) 499 kn->kn_flags |= EV_DROP; 500 return (1); 501 } 502 503 return (kn->kn_fflags != 0); 504 } 505 506 /* 507 * Called when the process forked. It mostly does the same as the 508 * knote(), activating all knotes registered to be activated when the 509 * process forked. Additionally, for each knote attached to the 510 * parent, check whether user wants to track the new process. If so 511 * attach a new knote to it, and immediately report an event with the 512 * child's pid. 513 */ 514 void 515 knote_fork(struct knlist *list, int pid) 516 { 517 struct kqueue *kq; 518 struct knote *kn; 519 struct kevent kev; 520 int error; 521 522 if (list == NULL) 523 return; 524 list->kl_lock(list->kl_lockarg); 525 526 SLIST_FOREACH(kn, &list->kl_list, kn_selnext) { 527 kq = kn->kn_kq; 528 KQ_LOCK(kq); 529 if (kn_in_flux(kn) && (kn->kn_status & KN_SCAN) == 0) { 530 KQ_UNLOCK(kq); 531 continue; 532 } 533 534 /* 535 * The same as knote(), activate the event. 536 */ 537 if ((kn->kn_sfflags & NOTE_TRACK) == 0) { 538 kn->kn_status |= KN_HASKQLOCK; 539 if (kn->kn_fop->f_event(kn, NOTE_FORK)) 540 KNOTE_ACTIVATE(kn, 1); 541 kn->kn_status &= ~KN_HASKQLOCK; 542 KQ_UNLOCK(kq); 543 continue; 544 } 545 546 /* 547 * The NOTE_TRACK case. In addition to the activation 548 * of the event, we need to register new events to 549 * track the child. Drop the locks in preparation for 550 * the call to kqueue_register(). 551 */ 552 kn_enter_flux(kn); 553 KQ_UNLOCK(kq); 554 list->kl_unlock(list->kl_lockarg); 555 556 /* 557 * Activate existing knote and register tracking knotes with 558 * new process. 559 * 560 * First register a knote to get just the child notice. This 561 * must be a separate note from a potential NOTE_EXIT 562 * notification since both NOTE_CHILD and NOTE_EXIT are defined 563 * to use the data field (in conflicting ways). 564 */ 565 kev.ident = pid; 566 kev.filter = kn->kn_filter; 567 kev.flags = kn->kn_flags | EV_ADD | EV_ENABLE | EV_ONESHOT | 568 EV_FLAG2; 569 kev.fflags = kn->kn_sfflags; 570 kev.data = kn->kn_id; /* parent */ 571 kev.udata = kn->kn_kevent.udata;/* preserve udata */ 572 error = kqueue_register(kq, &kev, NULL, 0); 573 if (error) 574 kn->kn_fflags |= NOTE_TRACKERR; 575 576 /* 577 * Then register another knote to track other potential events 578 * from the new process. 579 */ 580 kev.ident = pid; 581 kev.filter = kn->kn_filter; 582 kev.flags = kn->kn_flags | EV_ADD | EV_ENABLE | EV_FLAG1; 583 kev.fflags = kn->kn_sfflags; 584 kev.data = kn->kn_id; /* parent */ 585 kev.udata = kn->kn_kevent.udata;/* preserve udata */ 586 error = kqueue_register(kq, &kev, NULL, 0); 587 if (error) 588 kn->kn_fflags |= NOTE_TRACKERR; 589 if (kn->kn_fop->f_event(kn, NOTE_FORK)) 590 KNOTE_ACTIVATE(kn, 0); 591 KQ_LOCK(kq); 592 kn_leave_flux(kn); 593 KQ_UNLOCK_FLUX(kq); 594 list->kl_lock(list->kl_lockarg); 595 } 596 list->kl_unlock(list->kl_lockarg); 597 } 598 599 /* 600 * XXX: EVFILT_TIMER should perhaps live in kern_time.c beside the 601 * interval timer support code. 602 */ 603 604 #define NOTE_TIMER_PRECMASK (NOTE_SECONDS|NOTE_MSECONDS|NOTE_USECONDS| \ 605 NOTE_NSECONDS) 606 607 static sbintime_t 608 timer2sbintime(intptr_t data, int flags) 609 { 610 611 /* 612 * Macros for converting to the fractional second portion of an 613 * sbintime_t using 64bit multiplication to improve precision. 614 */ 615 #define NS_TO_SBT(ns) (((ns) * (((uint64_t)1 << 63) / 500000000)) >> 32) 616 #define US_TO_SBT(us) (((us) * (((uint64_t)1 << 63) / 500000)) >> 32) 617 #define MS_TO_SBT(ms) (((ms) * (((uint64_t)1 << 63) / 500)) >> 32) 618 switch (flags & NOTE_TIMER_PRECMASK) { 619 case NOTE_SECONDS: 620 #ifdef __LP64__ 621 if (data > (SBT_MAX / SBT_1S)) 622 return (SBT_MAX); 623 #endif 624 return ((sbintime_t)data << 32); 625 case NOTE_MSECONDS: /* FALLTHROUGH */ 626 case 0: 627 if (data >= 1000) { 628 int64_t secs = data / 1000; 629 #ifdef __LP64__ 630 if (secs > (SBT_MAX / SBT_1S)) 631 return (SBT_MAX); 632 #endif 633 return (secs << 32 | MS_TO_SBT(data % 1000)); 634 } 635 return MS_TO_SBT(data); 636 case NOTE_USECONDS: 637 if (data >= 1000000) { 638 int64_t secs = data / 1000000; 639 #ifdef __LP64__ 640 if (secs > (SBT_MAX / SBT_1S)) 641 return (SBT_MAX); 642 #endif 643 return (secs << 32 | US_TO_SBT(data % 1000000)); 644 } 645 return US_TO_SBT(data); 646 case NOTE_NSECONDS: 647 if (data >= 1000000000) { 648 int64_t secs = data / 1000000000; 649 #ifdef __LP64__ 650 if (secs > (SBT_MAX / SBT_1S)) 651 return (SBT_MAX); 652 #endif 653 return (secs << 32 | US_TO_SBT(data % 1000000000)); 654 } 655 return (NS_TO_SBT(data)); 656 default: 657 break; 658 } 659 return (-1); 660 } 661 662 struct kq_timer_cb_data { 663 struct callout c; 664 sbintime_t next; /* next timer event fires at */ 665 sbintime_t to; /* precalculated timer period */ 666 }; 667 668 static void 669 filt_timerexpire(void *knx) 670 { 671 struct knote *kn; 672 struct kq_timer_cb_data *kc; 673 674 kn = knx; 675 kn->kn_data++; 676 KNOTE_ACTIVATE(kn, 0); /* XXX - handle locking */ 677 678 if ((kn->kn_flags & EV_ONESHOT) != 0) 679 return; 680 681 kc = kn->kn_ptr.p_v; 682 kc->next += kc->to; 683 callout_reset_sbt_on(&kc->c, kc->next, 0, filt_timerexpire, kn, 684 PCPU_GET(cpuid), C_ABSOLUTE); 685 } 686 687 /* 688 * data contains amount of time to sleep 689 */ 690 static int 691 filt_timerattach(struct knote *kn) 692 { 693 struct kq_timer_cb_data *kc; 694 sbintime_t to; 695 unsigned int ncallouts; 696 697 if (kn->kn_sdata < 0) 698 return (EINVAL); 699 if (kn->kn_sdata == 0 && (kn->kn_flags & EV_ONESHOT) == 0) 700 kn->kn_sdata = 1; 701 /* Only precision unit are supported in flags so far */ 702 if ((kn->kn_sfflags & ~NOTE_TIMER_PRECMASK) != 0) 703 return (EINVAL); 704 705 to = timer2sbintime(kn->kn_sdata, kn->kn_sfflags); 706 if (to < 0) 707 return (EINVAL); 708 709 do { 710 ncallouts = kq_ncallouts; 711 if (ncallouts >= kq_calloutmax) 712 return (ENOMEM); 713 } while (!atomic_cmpset_int(&kq_ncallouts, ncallouts, ncallouts + 1)); 714 715 kn->kn_flags |= EV_CLEAR; /* automatically set */ 716 kn->kn_status &= ~KN_DETACHED; /* knlist_add clears it */ 717 kn->kn_ptr.p_v = kc = malloc(sizeof(*kc), M_KQUEUE, M_WAITOK); 718 callout_init(&kc->c, 1); 719 kc->next = to + sbinuptime(); 720 kc->to = to; 721 callout_reset_sbt_on(&kc->c, kc->next, 0, filt_timerexpire, kn, 722 PCPU_GET(cpuid), C_ABSOLUTE); 723 724 return (0); 725 } 726 727 static void 728 filt_timerdetach(struct knote *kn) 729 { 730 struct kq_timer_cb_data *kc; 731 unsigned int old; 732 733 kc = kn->kn_ptr.p_v; 734 callout_drain(&kc->c); 735 free(kc, M_KQUEUE); 736 old = atomic_fetchadd_int(&kq_ncallouts, -1); 737 KASSERT(old > 0, ("Number of callouts cannot become negative")); 738 kn->kn_status |= KN_DETACHED; /* knlist_remove sets it */ 739 } 740 741 static int 742 filt_timer(struct knote *kn, long hint) 743 { 744 745 return (kn->kn_data != 0); 746 } 747 748 static int 749 filt_userattach(struct knote *kn) 750 { 751 752 /* 753 * EVFILT_USER knotes are not attached to anything in the kernel. 754 */ 755 kn->kn_hook = NULL; 756 if (kn->kn_fflags & NOTE_TRIGGER) 757 kn->kn_hookid = 1; 758 else 759 kn->kn_hookid = 0; 760 return (0); 761 } 762 763 static void 764 filt_userdetach(__unused struct knote *kn) 765 { 766 767 /* 768 * EVFILT_USER knotes are not attached to anything in the kernel. 769 */ 770 } 771 772 static int 773 filt_user(struct knote *kn, __unused long hint) 774 { 775 776 return (kn->kn_hookid); 777 } 778 779 static void 780 filt_usertouch(struct knote *kn, struct kevent *kev, u_long type) 781 { 782 u_int ffctrl; 783 784 switch (type) { 785 case EVENT_REGISTER: 786 if (kev->fflags & NOTE_TRIGGER) 787 kn->kn_hookid = 1; 788 789 ffctrl = kev->fflags & NOTE_FFCTRLMASK; 790 kev->fflags &= NOTE_FFLAGSMASK; 791 switch (ffctrl) { 792 case NOTE_FFNOP: 793 break; 794 795 case NOTE_FFAND: 796 kn->kn_sfflags &= kev->fflags; 797 break; 798 799 case NOTE_FFOR: 800 kn->kn_sfflags |= kev->fflags; 801 break; 802 803 case NOTE_FFCOPY: 804 kn->kn_sfflags = kev->fflags; 805 break; 806 807 default: 808 /* XXX Return error? */ 809 break; 810 } 811 kn->kn_sdata = kev->data; 812 if (kev->flags & EV_CLEAR) { 813 kn->kn_hookid = 0; 814 kn->kn_data = 0; 815 kn->kn_fflags = 0; 816 } 817 break; 818 819 case EVENT_PROCESS: 820 *kev = kn->kn_kevent; 821 kev->fflags = kn->kn_sfflags; 822 kev->data = kn->kn_sdata; 823 if (kn->kn_flags & EV_CLEAR) { 824 kn->kn_hookid = 0; 825 kn->kn_data = 0; 826 kn->kn_fflags = 0; 827 } 828 break; 829 830 default: 831 panic("filt_usertouch() - invalid type (%ld)", type); 832 break; 833 } 834 } 835 836 int 837 sys_kqueue(struct thread *td, struct kqueue_args *uap) 838 { 839 840 return (kern_kqueue(td, 0, NULL)); 841 } 842 843 static void 844 kqueue_init(struct kqueue *kq) 845 { 846 847 mtx_init(&kq->kq_lock, "kqueue", NULL, MTX_DEF | MTX_DUPOK); 848 TAILQ_INIT(&kq->kq_head); 849 knlist_init_mtx(&kq->kq_sel.si_note, &kq->kq_lock); 850 TASK_INIT(&kq->kq_task, 0, kqueue_task, kq); 851 } 852 853 int 854 kern_kqueue(struct thread *td, int flags, struct filecaps *fcaps) 855 { 856 struct filedesc *fdp; 857 struct kqueue *kq; 858 struct file *fp; 859 struct ucred *cred; 860 int fd, error; 861 862 fdp = td->td_proc->p_fd; 863 cred = td->td_ucred; 864 if (!chgkqcnt(cred->cr_ruidinfo, 1, lim_cur(td, RLIMIT_KQUEUES))) 865 return (ENOMEM); 866 867 error = falloc_caps(td, &fp, &fd, flags, fcaps); 868 if (error != 0) { 869 chgkqcnt(cred->cr_ruidinfo, -1, 0); 870 return (error); 871 } 872 873 /* An extra reference on `fp' has been held for us by falloc(). */ 874 kq = malloc(sizeof *kq, M_KQUEUE, M_WAITOK | M_ZERO); 875 kqueue_init(kq); 876 kq->kq_fdp = fdp; 877 kq->kq_cred = crhold(cred); 878 879 FILEDESC_XLOCK(fdp); 880 TAILQ_INSERT_HEAD(&fdp->fd_kqlist, kq, kq_list); 881 FILEDESC_XUNLOCK(fdp); 882 883 finit(fp, FREAD | FWRITE, DTYPE_KQUEUE, kq, &kqueueops); 884 fdrop(fp, td); 885 886 td->td_retval[0] = fd; 887 return (0); 888 } 889 890 #ifdef KTRACE 891 static size_t 892 kev_iovlen(int n, u_int kgio) 893 { 894 895 if (n < 0 || n >= kgio / sizeof(struct kevent)) 896 return (kgio); 897 return (n * sizeof(struct kevent)); 898 } 899 #endif 900 901 #ifndef _SYS_SYSPROTO_H_ 902 struct kevent_args { 903 int fd; 904 const struct kevent *changelist; 905 int nchanges; 906 struct kevent *eventlist; 907 int nevents; 908 const struct timespec *timeout; 909 }; 910 #endif 911 int 912 sys_kevent(struct thread *td, struct kevent_args *uap) 913 { 914 struct timespec ts, *tsp; 915 struct kevent_copyops k_ops = { uap, 916 kevent_copyout, 917 kevent_copyin}; 918 int error; 919 #ifdef KTRACE 920 struct uio ktruio; 921 struct iovec ktriov; 922 struct uio *ktruioin = NULL; 923 struct uio *ktruioout = NULL; 924 u_int kgio; 925 #endif 926 927 if (uap->timeout != NULL) { 928 error = copyin(uap->timeout, &ts, sizeof(ts)); 929 if (error) 930 return (error); 931 tsp = &ts; 932 } else 933 tsp = NULL; 934 935 #ifdef KTRACE 936 if (KTRPOINT(td, KTR_GENIO)) { 937 kgio = ktr_geniosize; 938 ktriov.iov_base = uap->changelist; 939 ktriov.iov_len = kev_iovlen(uap->nchanges, kgio); 940 ktruio = (struct uio){ .uio_iov = &ktriov, .uio_iovcnt = 1, 941 .uio_segflg = UIO_USERSPACE, .uio_rw = UIO_READ, 942 .uio_td = td }; 943 ktruioin = cloneuio(&ktruio); 944 ktriov.iov_base = uap->eventlist; 945 ktriov.iov_len = kev_iovlen(uap->nevents, kgio); 946 ktriov.iov_len = uap->nevents * sizeof(struct kevent); 947 ktruioout = cloneuio(&ktruio); 948 } 949 #endif 950 951 error = kern_kevent(td, uap->fd, uap->nchanges, uap->nevents, 952 &k_ops, tsp); 953 954 #ifdef KTRACE 955 if (ktruioin != NULL) { 956 ktruioin->uio_resid = kev_iovlen(uap->nchanges, kgio); 957 ktrgenio(uap->fd, UIO_WRITE, ktruioin, 0); 958 ktruioout->uio_resid = kev_iovlen(td->td_retval[0], kgio); 959 ktrgenio(uap->fd, UIO_READ, ktruioout, error); 960 } 961 #endif 962 963 return (error); 964 } 965 966 /* 967 * Copy 'count' items into the destination list pointed to by uap->eventlist. 968 */ 969 static int 970 kevent_copyout(void *arg, struct kevent *kevp, int count) 971 { 972 struct kevent_args *uap; 973 int error; 974 975 KASSERT(count <= KQ_NEVENTS, ("count (%d) > KQ_NEVENTS", count)); 976 uap = (struct kevent_args *)arg; 977 978 error = copyout(kevp, uap->eventlist, count * sizeof *kevp); 979 if (error == 0) 980 uap->eventlist += count; 981 return (error); 982 } 983 984 /* 985 * Copy 'count' items from the list pointed to by uap->changelist. 986 */ 987 static int 988 kevent_copyin(void *arg, struct kevent *kevp, int count) 989 { 990 struct kevent_args *uap; 991 int error; 992 993 KASSERT(count <= KQ_NEVENTS, ("count (%d) > KQ_NEVENTS", count)); 994 uap = (struct kevent_args *)arg; 995 996 error = copyin(uap->changelist, kevp, count * sizeof *kevp); 997 if (error == 0) 998 uap->changelist += count; 999 return (error); 1000 } 1001 1002 int 1003 kern_kevent(struct thread *td, int fd, int nchanges, int nevents, 1004 struct kevent_copyops *k_ops, const struct timespec *timeout) 1005 { 1006 cap_rights_t rights; 1007 struct file *fp; 1008 int error; 1009 1010 cap_rights_init(&rights); 1011 if (nchanges > 0) 1012 cap_rights_set(&rights, CAP_KQUEUE_CHANGE); 1013 if (nevents > 0) 1014 cap_rights_set(&rights, CAP_KQUEUE_EVENT); 1015 error = fget(td, fd, &rights, &fp); 1016 if (error != 0) 1017 return (error); 1018 1019 error = kern_kevent_fp(td, fp, nchanges, nevents, k_ops, timeout); 1020 fdrop(fp, td); 1021 1022 return (error); 1023 } 1024 1025 static int 1026 kqueue_kevent(struct kqueue *kq, struct thread *td, int nchanges, int nevents, 1027 struct kevent_copyops *k_ops, const struct timespec *timeout) 1028 { 1029 struct kevent keva[KQ_NEVENTS]; 1030 struct kevent *kevp, *changes; 1031 int i, n, nerrors, error; 1032 1033 nerrors = 0; 1034 while (nchanges > 0) { 1035 n = nchanges > KQ_NEVENTS ? KQ_NEVENTS : nchanges; 1036 error = k_ops->k_copyin(k_ops->arg, keva, n); 1037 if (error) 1038 return (error); 1039 changes = keva; 1040 for (i = 0; i < n; i++) { 1041 kevp = &changes[i]; 1042 if (!kevp->filter) 1043 continue; 1044 kevp->flags &= ~EV_SYSFLAGS; 1045 error = kqueue_register(kq, kevp, td, 1); 1046 if (error || (kevp->flags & EV_RECEIPT)) { 1047 if (nevents == 0) 1048 return (error); 1049 kevp->flags = EV_ERROR; 1050 kevp->data = error; 1051 (void)k_ops->k_copyout(k_ops->arg, kevp, 1); 1052 nevents--; 1053 nerrors++; 1054 } 1055 } 1056 nchanges -= n; 1057 } 1058 if (nerrors) { 1059 td->td_retval[0] = nerrors; 1060 return (0); 1061 } 1062 1063 return (kqueue_scan(kq, nevents, k_ops, timeout, keva, td)); 1064 } 1065 1066 int 1067 kern_kevent_fp(struct thread *td, struct file *fp, int nchanges, int nevents, 1068 struct kevent_copyops *k_ops, const struct timespec *timeout) 1069 { 1070 struct kqueue *kq; 1071 int error; 1072 1073 error = kqueue_acquire(fp, &kq); 1074 if (error != 0) 1075 return (error); 1076 error = kqueue_kevent(kq, td, nchanges, nevents, k_ops, timeout); 1077 kqueue_release(kq, 0); 1078 return (error); 1079 } 1080 1081 /* 1082 * Performs a kevent() call on a temporarily created kqueue. This can be 1083 * used to perform one-shot polling, similar to poll() and select(). 1084 */ 1085 int 1086 kern_kevent_anonymous(struct thread *td, int nevents, 1087 struct kevent_copyops *k_ops) 1088 { 1089 struct kqueue kq = {}; 1090 int error; 1091 1092 kqueue_init(&kq); 1093 kq.kq_refcnt = 1; 1094 error = kqueue_kevent(&kq, td, nevents, nevents, k_ops, NULL); 1095 kqueue_drain(&kq, td); 1096 kqueue_destroy(&kq); 1097 return (error); 1098 } 1099 1100 int 1101 kqueue_add_filteropts(int filt, struct filterops *filtops) 1102 { 1103 int error; 1104 1105 error = 0; 1106 if (filt > 0 || filt + EVFILT_SYSCOUNT < 0) { 1107 printf( 1108 "trying to add a filterop that is out of range: %d is beyond %d\n", 1109 ~filt, EVFILT_SYSCOUNT); 1110 return EINVAL; 1111 } 1112 mtx_lock(&filterops_lock); 1113 if (sysfilt_ops[~filt].for_fop != &null_filtops && 1114 sysfilt_ops[~filt].for_fop != NULL) 1115 error = EEXIST; 1116 else { 1117 sysfilt_ops[~filt].for_fop = filtops; 1118 sysfilt_ops[~filt].for_refcnt = 0; 1119 } 1120 mtx_unlock(&filterops_lock); 1121 1122 return (error); 1123 } 1124 1125 int 1126 kqueue_del_filteropts(int filt) 1127 { 1128 int error; 1129 1130 error = 0; 1131 if (filt > 0 || filt + EVFILT_SYSCOUNT < 0) 1132 return EINVAL; 1133 1134 mtx_lock(&filterops_lock); 1135 if (sysfilt_ops[~filt].for_fop == &null_filtops || 1136 sysfilt_ops[~filt].for_fop == NULL) 1137 error = EINVAL; 1138 else if (sysfilt_ops[~filt].for_refcnt != 0) 1139 error = EBUSY; 1140 else { 1141 sysfilt_ops[~filt].for_fop = &null_filtops; 1142 sysfilt_ops[~filt].for_refcnt = 0; 1143 } 1144 mtx_unlock(&filterops_lock); 1145 1146 return error; 1147 } 1148 1149 static struct filterops * 1150 kqueue_fo_find(int filt) 1151 { 1152 1153 if (filt > 0 || filt + EVFILT_SYSCOUNT < 0) 1154 return NULL; 1155 1156 if (sysfilt_ops[~filt].for_nolock) 1157 return sysfilt_ops[~filt].for_fop; 1158 1159 mtx_lock(&filterops_lock); 1160 sysfilt_ops[~filt].for_refcnt++; 1161 if (sysfilt_ops[~filt].for_fop == NULL) 1162 sysfilt_ops[~filt].for_fop = &null_filtops; 1163 mtx_unlock(&filterops_lock); 1164 1165 return sysfilt_ops[~filt].for_fop; 1166 } 1167 1168 static void 1169 kqueue_fo_release(int filt) 1170 { 1171 1172 if (filt > 0 || filt + EVFILT_SYSCOUNT < 0) 1173 return; 1174 1175 if (sysfilt_ops[~filt].for_nolock) 1176 return; 1177 1178 mtx_lock(&filterops_lock); 1179 KASSERT(sysfilt_ops[~filt].for_refcnt > 0, 1180 ("filter object refcount not valid on release")); 1181 sysfilt_ops[~filt].for_refcnt--; 1182 mtx_unlock(&filterops_lock); 1183 } 1184 1185 /* 1186 * A ref to kq (obtained via kqueue_acquire) must be held. waitok will 1187 * influence if memory allocation should wait. Make sure it is 0 if you 1188 * hold any mutexes. 1189 */ 1190 static int 1191 kqueue_register(struct kqueue *kq, struct kevent *kev, struct thread *td, int waitok) 1192 { 1193 struct filterops *fops; 1194 struct file *fp; 1195 struct knote *kn, *tkn; 1196 struct knlist *knl; 1197 cap_rights_t rights; 1198 int error, filt, event; 1199 int haskqglobal, filedesc_unlock; 1200 1201 if ((kev->flags & (EV_ENABLE | EV_DISABLE)) == (EV_ENABLE | EV_DISABLE)) 1202 return (EINVAL); 1203 1204 fp = NULL; 1205 kn = NULL; 1206 knl = NULL; 1207 error = 0; 1208 haskqglobal = 0; 1209 filedesc_unlock = 0; 1210 1211 filt = kev->filter; 1212 fops = kqueue_fo_find(filt); 1213 if (fops == NULL) 1214 return EINVAL; 1215 1216 if (kev->flags & EV_ADD) { 1217 /* 1218 * Prevent waiting with locks. Non-sleepable 1219 * allocation failures are handled in the loop, only 1220 * if the spare knote appears to be actually required. 1221 */ 1222 tkn = knote_alloc(waitok); 1223 } else { 1224 tkn = NULL; 1225 } 1226 1227 findkn: 1228 if (fops->f_isfd) { 1229 KASSERT(td != NULL, ("td is NULL")); 1230 if (kev->ident > INT_MAX) 1231 error = EBADF; 1232 else 1233 error = fget(td, kev->ident, 1234 cap_rights_init(&rights, CAP_EVENT), &fp); 1235 if (error) 1236 goto done; 1237 1238 if ((kev->flags & EV_ADD) == EV_ADD && kqueue_expand(kq, fops, 1239 kev->ident, 0) != 0) { 1240 /* try again */ 1241 fdrop(fp, td); 1242 fp = NULL; 1243 error = kqueue_expand(kq, fops, kev->ident, waitok); 1244 if (error) 1245 goto done; 1246 goto findkn; 1247 } 1248 1249 if (fp->f_type == DTYPE_KQUEUE) { 1250 /* 1251 * If we add some intelligence about what we are doing, 1252 * we should be able to support events on ourselves. 1253 * We need to know when we are doing this to prevent 1254 * getting both the knlist lock and the kq lock since 1255 * they are the same thing. 1256 */ 1257 if (fp->f_data == kq) { 1258 error = EINVAL; 1259 goto done; 1260 } 1261 1262 /* 1263 * Pre-lock the filedesc before the global 1264 * lock mutex, see the comment in 1265 * kqueue_close(). 1266 */ 1267 FILEDESC_XLOCK(td->td_proc->p_fd); 1268 filedesc_unlock = 1; 1269 KQ_GLOBAL_LOCK(&kq_global, haskqglobal); 1270 } 1271 1272 KQ_LOCK(kq); 1273 if (kev->ident < kq->kq_knlistsize) { 1274 SLIST_FOREACH(kn, &kq->kq_knlist[kev->ident], kn_link) 1275 if (kev->filter == kn->kn_filter) 1276 break; 1277 } 1278 } else { 1279 if ((kev->flags & EV_ADD) == EV_ADD) 1280 kqueue_expand(kq, fops, kev->ident, waitok); 1281 1282 KQ_LOCK(kq); 1283 1284 /* 1285 * If possible, find an existing knote to use for this kevent. 1286 */ 1287 if (kev->filter == EVFILT_PROC && 1288 (kev->flags & (EV_FLAG1 | EV_FLAG2)) != 0) { 1289 /* This is an internal creation of a process tracking 1290 * note. Don't attempt to coalesce this with an 1291 * existing note. 1292 */ 1293 ; 1294 } else if (kq->kq_knhashmask != 0) { 1295 struct klist *list; 1296 1297 list = &kq->kq_knhash[ 1298 KN_HASH((u_long)kev->ident, kq->kq_knhashmask)]; 1299 SLIST_FOREACH(kn, list, kn_link) 1300 if (kev->ident == kn->kn_id && 1301 kev->filter == kn->kn_filter) 1302 break; 1303 } 1304 } 1305 1306 /* knote is in the process of changing, wait for it to stabilize. */ 1307 if (kn != NULL && kn_in_flux(kn)) { 1308 KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal); 1309 if (filedesc_unlock) { 1310 FILEDESC_XUNLOCK(td->td_proc->p_fd); 1311 filedesc_unlock = 0; 1312 } 1313 kq->kq_state |= KQ_FLUXWAIT; 1314 msleep(kq, &kq->kq_lock, PSOCK | PDROP, "kqflxwt", 0); 1315 if (fp != NULL) { 1316 fdrop(fp, td); 1317 fp = NULL; 1318 } 1319 goto findkn; 1320 } 1321 1322 /* 1323 * kn now contains the matching knote, or NULL if no match 1324 */ 1325 if (kn == NULL) { 1326 if (kev->flags & EV_ADD) { 1327 kn = tkn; 1328 tkn = NULL; 1329 if (kn == NULL) { 1330 KQ_UNLOCK(kq); 1331 error = ENOMEM; 1332 goto done; 1333 } 1334 kn->kn_fp = fp; 1335 kn->kn_kq = kq; 1336 kn->kn_fop = fops; 1337 /* 1338 * apply reference counts to knote structure, and 1339 * do not release it at the end of this routine. 1340 */ 1341 fops = NULL; 1342 fp = NULL; 1343 1344 kn->kn_sfflags = kev->fflags; 1345 kn->kn_sdata = kev->data; 1346 kev->fflags = 0; 1347 kev->data = 0; 1348 kn->kn_kevent = *kev; 1349 kn->kn_kevent.flags &= ~(EV_ADD | EV_DELETE | 1350 EV_ENABLE | EV_DISABLE | EV_FORCEONESHOT); 1351 kn->kn_status = KN_DETACHED; 1352 kn_enter_flux(kn); 1353 1354 error = knote_attach(kn, kq); 1355 KQ_UNLOCK(kq); 1356 if (error != 0) { 1357 tkn = kn; 1358 goto done; 1359 } 1360 1361 if ((error = kn->kn_fop->f_attach(kn)) != 0) { 1362 knote_drop_detached(kn, td); 1363 goto done; 1364 } 1365 knl = kn_list_lock(kn); 1366 goto done_ev_add; 1367 } else { 1368 /* No matching knote and the EV_ADD flag is not set. */ 1369 KQ_UNLOCK(kq); 1370 error = ENOENT; 1371 goto done; 1372 } 1373 } 1374 1375 if (kev->flags & EV_DELETE) { 1376 kn_enter_flux(kn); 1377 KQ_UNLOCK(kq); 1378 knote_drop(kn, td); 1379 goto done; 1380 } 1381 1382 if (kev->flags & EV_FORCEONESHOT) { 1383 kn->kn_flags |= EV_ONESHOT; 1384 KNOTE_ACTIVATE(kn, 1); 1385 } 1386 1387 /* 1388 * The user may change some filter values after the initial EV_ADD, 1389 * but doing so will not reset any filter which has already been 1390 * triggered. 1391 */ 1392 kn->kn_status |= KN_SCAN; 1393 kn_enter_flux(kn); 1394 KQ_UNLOCK(kq); 1395 knl = kn_list_lock(kn); 1396 kn->kn_kevent.udata = kev->udata; 1397 if (!fops->f_isfd && fops->f_touch != NULL) { 1398 fops->f_touch(kn, kev, EVENT_REGISTER); 1399 } else { 1400 kn->kn_sfflags = kev->fflags; 1401 kn->kn_sdata = kev->data; 1402 } 1403 1404 /* 1405 * We can get here with kn->kn_knlist == NULL. This can happen when 1406 * the initial attach event decides that the event is "completed" 1407 * already. i.e. filt_procattach is called on a zombie process. It 1408 * will call filt_proc which will remove it from the list, and NULL 1409 * kn_knlist. 1410 */ 1411 done_ev_add: 1412 if ((kev->flags & EV_ENABLE) != 0) 1413 kn->kn_status &= ~KN_DISABLED; 1414 else if ((kev->flags & EV_DISABLE) != 0) 1415 kn->kn_status |= KN_DISABLED; 1416 1417 if ((kn->kn_status & KN_DISABLED) == 0) 1418 event = kn->kn_fop->f_event(kn, 0); 1419 else 1420 event = 0; 1421 1422 KQ_LOCK(kq); 1423 if (event) 1424 kn->kn_status |= KN_ACTIVE; 1425 if ((kn->kn_status & (KN_ACTIVE | KN_DISABLED | KN_QUEUED)) == 1426 KN_ACTIVE) 1427 knote_enqueue(kn); 1428 kn->kn_status &= ~KN_SCAN; 1429 kn_leave_flux(kn); 1430 kn_list_unlock(knl); 1431 KQ_UNLOCK_FLUX(kq); 1432 1433 done: 1434 KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal); 1435 if (filedesc_unlock) 1436 FILEDESC_XUNLOCK(td->td_proc->p_fd); 1437 if (fp != NULL) 1438 fdrop(fp, td); 1439 knote_free(tkn); 1440 if (fops != NULL) 1441 kqueue_fo_release(filt); 1442 return (error); 1443 } 1444 1445 static int 1446 kqueue_acquire(struct file *fp, struct kqueue **kqp) 1447 { 1448 int error; 1449 struct kqueue *kq; 1450 1451 error = 0; 1452 1453 kq = fp->f_data; 1454 if (fp->f_type != DTYPE_KQUEUE || kq == NULL) 1455 return (EBADF); 1456 *kqp = kq; 1457 KQ_LOCK(kq); 1458 if ((kq->kq_state & KQ_CLOSING) == KQ_CLOSING) { 1459 KQ_UNLOCK(kq); 1460 return (EBADF); 1461 } 1462 kq->kq_refcnt++; 1463 KQ_UNLOCK(kq); 1464 1465 return error; 1466 } 1467 1468 static void 1469 kqueue_release(struct kqueue *kq, int locked) 1470 { 1471 if (locked) 1472 KQ_OWNED(kq); 1473 else 1474 KQ_LOCK(kq); 1475 kq->kq_refcnt--; 1476 if (kq->kq_refcnt == 1) 1477 wakeup(&kq->kq_refcnt); 1478 if (!locked) 1479 KQ_UNLOCK(kq); 1480 } 1481 1482 static void 1483 kqueue_schedtask(struct kqueue *kq) 1484 { 1485 1486 KQ_OWNED(kq); 1487 KASSERT(((kq->kq_state & KQ_TASKDRAIN) != KQ_TASKDRAIN), 1488 ("scheduling kqueue task while draining")); 1489 1490 if ((kq->kq_state & KQ_TASKSCHED) != KQ_TASKSCHED) { 1491 taskqueue_enqueue(taskqueue_kqueue_ctx, &kq->kq_task); 1492 kq->kq_state |= KQ_TASKSCHED; 1493 } 1494 } 1495 1496 /* 1497 * Expand the kq to make sure we have storage for fops/ident pair. 1498 * 1499 * Return 0 on success (or no work necessary), return errno on failure. 1500 * 1501 * Not calling hashinit w/ waitok (proper malloc flag) should be safe. 1502 * If kqueue_register is called from a non-fd context, there usually/should 1503 * be no locks held. 1504 */ 1505 static int 1506 kqueue_expand(struct kqueue *kq, struct filterops *fops, uintptr_t ident, 1507 int waitok) 1508 { 1509 struct klist *list, *tmp_knhash, *to_free; 1510 u_long tmp_knhashmask; 1511 int size; 1512 int fd; 1513 int mflag = waitok ? M_WAITOK : M_NOWAIT; 1514 1515 KQ_NOTOWNED(kq); 1516 1517 to_free = NULL; 1518 if (fops->f_isfd) { 1519 fd = ident; 1520 if (kq->kq_knlistsize <= fd) { 1521 size = kq->kq_knlistsize; 1522 while (size <= fd) 1523 size += KQEXTENT; 1524 list = malloc(size * sizeof(*list), M_KQUEUE, mflag); 1525 if (list == NULL) 1526 return ENOMEM; 1527 KQ_LOCK(kq); 1528 if (kq->kq_knlistsize > fd) { 1529 to_free = list; 1530 list = NULL; 1531 } else { 1532 if (kq->kq_knlist != NULL) { 1533 bcopy(kq->kq_knlist, list, 1534 kq->kq_knlistsize * sizeof(*list)); 1535 to_free = kq->kq_knlist; 1536 kq->kq_knlist = NULL; 1537 } 1538 bzero((caddr_t)list + 1539 kq->kq_knlistsize * sizeof(*list), 1540 (size - kq->kq_knlistsize) * sizeof(*list)); 1541 kq->kq_knlistsize = size; 1542 kq->kq_knlist = list; 1543 } 1544 KQ_UNLOCK(kq); 1545 } 1546 } else { 1547 if (kq->kq_knhashmask == 0) { 1548 tmp_knhash = hashinit(KN_HASHSIZE, M_KQUEUE, 1549 &tmp_knhashmask); 1550 if (tmp_knhash == NULL) 1551 return ENOMEM; 1552 KQ_LOCK(kq); 1553 if (kq->kq_knhashmask == 0) { 1554 kq->kq_knhash = tmp_knhash; 1555 kq->kq_knhashmask = tmp_knhashmask; 1556 } else { 1557 to_free = tmp_knhash; 1558 } 1559 KQ_UNLOCK(kq); 1560 } 1561 } 1562 free(to_free, M_KQUEUE); 1563 1564 KQ_NOTOWNED(kq); 1565 return 0; 1566 } 1567 1568 static void 1569 kqueue_task(void *arg, int pending) 1570 { 1571 struct kqueue *kq; 1572 int haskqglobal; 1573 1574 haskqglobal = 0; 1575 kq = arg; 1576 1577 KQ_GLOBAL_LOCK(&kq_global, haskqglobal); 1578 KQ_LOCK(kq); 1579 1580 KNOTE_LOCKED(&kq->kq_sel.si_note, 0); 1581 1582 kq->kq_state &= ~KQ_TASKSCHED; 1583 if ((kq->kq_state & KQ_TASKDRAIN) == KQ_TASKDRAIN) { 1584 wakeup(&kq->kq_state); 1585 } 1586 KQ_UNLOCK(kq); 1587 KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal); 1588 } 1589 1590 /* 1591 * Scan, update kn_data (if not ONESHOT), and copyout triggered events. 1592 * We treat KN_MARKER knotes as if they are in flux. 1593 */ 1594 static int 1595 kqueue_scan(struct kqueue *kq, int maxevents, struct kevent_copyops *k_ops, 1596 const struct timespec *tsp, struct kevent *keva, struct thread *td) 1597 { 1598 struct kevent *kevp; 1599 struct knote *kn, *marker; 1600 struct knlist *knl; 1601 sbintime_t asbt, rsbt; 1602 int count, error, haskqglobal, influx, nkev, touch; 1603 1604 count = maxevents; 1605 nkev = 0; 1606 error = 0; 1607 haskqglobal = 0; 1608 1609 if (maxevents == 0) 1610 goto done_nl; 1611 1612 rsbt = 0; 1613 if (tsp != NULL) { 1614 if (tsp->tv_sec < 0 || tsp->tv_nsec < 0 || 1615 tsp->tv_nsec >= 1000000000) { 1616 error = EINVAL; 1617 goto done_nl; 1618 } 1619 if (timespecisset(tsp)) { 1620 if (tsp->tv_sec <= INT32_MAX) { 1621 rsbt = tstosbt(*tsp); 1622 if (TIMESEL(&asbt, rsbt)) 1623 asbt += tc_tick_sbt; 1624 if (asbt <= SBT_MAX - rsbt) 1625 asbt += rsbt; 1626 else 1627 asbt = 0; 1628 rsbt >>= tc_precexp; 1629 } else 1630 asbt = 0; 1631 } else 1632 asbt = -1; 1633 } else 1634 asbt = 0; 1635 marker = knote_alloc(1); 1636 marker->kn_status = KN_MARKER; 1637 KQ_LOCK(kq); 1638 1639 retry: 1640 kevp = keva; 1641 if (kq->kq_count == 0) { 1642 if (asbt == -1) { 1643 error = EWOULDBLOCK; 1644 } else { 1645 kq->kq_state |= KQ_SLEEP; 1646 error = msleep_sbt(kq, &kq->kq_lock, PSOCK | PCATCH, 1647 "kqread", asbt, rsbt, C_ABSOLUTE); 1648 } 1649 if (error == 0) 1650 goto retry; 1651 /* don't restart after signals... */ 1652 if (error == ERESTART) 1653 error = EINTR; 1654 else if (error == EWOULDBLOCK) 1655 error = 0; 1656 goto done; 1657 } 1658 1659 TAILQ_INSERT_TAIL(&kq->kq_head, marker, kn_tqe); 1660 influx = 0; 1661 while (count) { 1662 KQ_OWNED(kq); 1663 kn = TAILQ_FIRST(&kq->kq_head); 1664 1665 if ((kn->kn_status == KN_MARKER && kn != marker) || 1666 kn_in_flux(kn)) { 1667 if (influx) { 1668 influx = 0; 1669 KQ_FLUX_WAKEUP(kq); 1670 } 1671 kq->kq_state |= KQ_FLUXWAIT; 1672 error = msleep(kq, &kq->kq_lock, PSOCK, 1673 "kqflxwt", 0); 1674 continue; 1675 } 1676 1677 TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe); 1678 if ((kn->kn_status & KN_DISABLED) == KN_DISABLED) { 1679 kn->kn_status &= ~KN_QUEUED; 1680 kq->kq_count--; 1681 continue; 1682 } 1683 if (kn == marker) { 1684 KQ_FLUX_WAKEUP(kq); 1685 if (count == maxevents) 1686 goto retry; 1687 goto done; 1688 } 1689 KASSERT(!kn_in_flux(kn), 1690 ("knote %p is unexpectedly in flux", kn)); 1691 1692 if ((kn->kn_flags & EV_DROP) == EV_DROP) { 1693 kn->kn_status &= ~KN_QUEUED; 1694 kn_enter_flux(kn); 1695 kq->kq_count--; 1696 KQ_UNLOCK(kq); 1697 /* 1698 * We don't need to lock the list since we've 1699 * marked it as in flux. 1700 */ 1701 knote_drop(kn, td); 1702 KQ_LOCK(kq); 1703 continue; 1704 } else if ((kn->kn_flags & EV_ONESHOT) == EV_ONESHOT) { 1705 kn->kn_status &= ~KN_QUEUED; 1706 kn_enter_flux(kn); 1707 kq->kq_count--; 1708 KQ_UNLOCK(kq); 1709 /* 1710 * We don't need to lock the list since we've 1711 * marked the knote as being in flux. 1712 */ 1713 *kevp = kn->kn_kevent; 1714 knote_drop(kn, td); 1715 KQ_LOCK(kq); 1716 kn = NULL; 1717 } else { 1718 kn->kn_status |= KN_SCAN; 1719 kn_enter_flux(kn); 1720 KQ_UNLOCK(kq); 1721 if ((kn->kn_status & KN_KQUEUE) == KN_KQUEUE) 1722 KQ_GLOBAL_LOCK(&kq_global, haskqglobal); 1723 knl = kn_list_lock(kn); 1724 if (kn->kn_fop->f_event(kn, 0) == 0) { 1725 KQ_LOCK(kq); 1726 KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal); 1727 kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE | 1728 KN_SCAN); 1729 kn_leave_flux(kn); 1730 kq->kq_count--; 1731 kn_list_unlock(knl); 1732 influx = 1; 1733 continue; 1734 } 1735 touch = (!kn->kn_fop->f_isfd && 1736 kn->kn_fop->f_touch != NULL); 1737 if (touch) 1738 kn->kn_fop->f_touch(kn, kevp, EVENT_PROCESS); 1739 else 1740 *kevp = kn->kn_kevent; 1741 KQ_LOCK(kq); 1742 KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal); 1743 if (kn->kn_flags & (EV_CLEAR | EV_DISPATCH)) { 1744 /* 1745 * Manually clear knotes who weren't 1746 * 'touch'ed. 1747 */ 1748 if (touch == 0 && kn->kn_flags & EV_CLEAR) { 1749 kn->kn_data = 0; 1750 kn->kn_fflags = 0; 1751 } 1752 if (kn->kn_flags & EV_DISPATCH) 1753 kn->kn_status |= KN_DISABLED; 1754 kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE); 1755 kq->kq_count--; 1756 } else 1757 TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe); 1758 1759 kn->kn_status &= ~KN_SCAN; 1760 kn_leave_flux(kn); 1761 kn_list_unlock(knl); 1762 influx = 1; 1763 } 1764 1765 /* we are returning a copy to the user */ 1766 kevp++; 1767 nkev++; 1768 count--; 1769 1770 if (nkev == KQ_NEVENTS) { 1771 influx = 0; 1772 KQ_UNLOCK_FLUX(kq); 1773 error = k_ops->k_copyout(k_ops->arg, keva, nkev); 1774 nkev = 0; 1775 kevp = keva; 1776 KQ_LOCK(kq); 1777 if (error) 1778 break; 1779 } 1780 } 1781 TAILQ_REMOVE(&kq->kq_head, marker, kn_tqe); 1782 done: 1783 KQ_OWNED(kq); 1784 KQ_UNLOCK_FLUX(kq); 1785 knote_free(marker); 1786 done_nl: 1787 KQ_NOTOWNED(kq); 1788 if (nkev != 0) 1789 error = k_ops->k_copyout(k_ops->arg, keva, nkev); 1790 td->td_retval[0] = maxevents - count; 1791 return (error); 1792 } 1793 1794 /*ARGSUSED*/ 1795 static int 1796 kqueue_ioctl(struct file *fp, u_long cmd, void *data, 1797 struct ucred *active_cred, struct thread *td) 1798 { 1799 /* 1800 * Enabling sigio causes two major problems: 1801 * 1) infinite recursion: 1802 * Synopsys: kevent is being used to track signals and have FIOASYNC 1803 * set. On receipt of a signal this will cause a kqueue to recurse 1804 * into itself over and over. Sending the sigio causes the kqueue 1805 * to become ready, which in turn posts sigio again, forever. 1806 * Solution: this can be solved by setting a flag in the kqueue that 1807 * we have a SIGIO in progress. 1808 * 2) locking problems: 1809 * Synopsys: Kqueue is a leaf subsystem, but adding signalling puts 1810 * us above the proc and pgrp locks. 1811 * Solution: Post a signal using an async mechanism, being sure to 1812 * record a generation count in the delivery so that we do not deliver 1813 * a signal to the wrong process. 1814 * 1815 * Note, these two mechanisms are somewhat mutually exclusive! 1816 */ 1817 #if 0 1818 struct kqueue *kq; 1819 1820 kq = fp->f_data; 1821 switch (cmd) { 1822 case FIOASYNC: 1823 if (*(int *)data) { 1824 kq->kq_state |= KQ_ASYNC; 1825 } else { 1826 kq->kq_state &= ~KQ_ASYNC; 1827 } 1828 return (0); 1829 1830 case FIOSETOWN: 1831 return (fsetown(*(int *)data, &kq->kq_sigio)); 1832 1833 case FIOGETOWN: 1834 *(int *)data = fgetown(&kq->kq_sigio); 1835 return (0); 1836 } 1837 #endif 1838 1839 return (ENOTTY); 1840 } 1841 1842 /*ARGSUSED*/ 1843 static int 1844 kqueue_poll(struct file *fp, int events, struct ucred *active_cred, 1845 struct thread *td) 1846 { 1847 struct kqueue *kq; 1848 int revents = 0; 1849 int error; 1850 1851 if ((error = kqueue_acquire(fp, &kq))) 1852 return POLLERR; 1853 1854 KQ_LOCK(kq); 1855 if (events & (POLLIN | POLLRDNORM)) { 1856 if (kq->kq_count) { 1857 revents |= events & (POLLIN | POLLRDNORM); 1858 } else { 1859 selrecord(td, &kq->kq_sel); 1860 if (SEL_WAITING(&kq->kq_sel)) 1861 kq->kq_state |= KQ_SEL; 1862 } 1863 } 1864 kqueue_release(kq, 1); 1865 KQ_UNLOCK(kq); 1866 return (revents); 1867 } 1868 1869 /*ARGSUSED*/ 1870 static int 1871 kqueue_stat(struct file *fp, struct stat *st, struct ucred *active_cred, 1872 struct thread *td) 1873 { 1874 1875 bzero((void *)st, sizeof *st); 1876 /* 1877 * We no longer return kq_count because the unlocked value is useless. 1878 * If you spent all this time getting the count, why not spend your 1879 * syscall better by calling kevent? 1880 * 1881 * XXX - This is needed for libc_r. 1882 */ 1883 st->st_mode = S_IFIFO; 1884 return (0); 1885 } 1886 1887 static void 1888 kqueue_drain(struct kqueue *kq, struct thread *td) 1889 { 1890 struct knote *kn; 1891 int i; 1892 1893 KQ_LOCK(kq); 1894 1895 KASSERT((kq->kq_state & KQ_CLOSING) != KQ_CLOSING, 1896 ("kqueue already closing")); 1897 kq->kq_state |= KQ_CLOSING; 1898 if (kq->kq_refcnt > 1) 1899 msleep(&kq->kq_refcnt, &kq->kq_lock, PSOCK, "kqclose", 0); 1900 1901 KASSERT(kq->kq_refcnt == 1, ("other refs are out there!")); 1902 1903 KASSERT(knlist_empty(&kq->kq_sel.si_note), 1904 ("kqueue's knlist not empty")); 1905 1906 for (i = 0; i < kq->kq_knlistsize; i++) { 1907 while ((kn = SLIST_FIRST(&kq->kq_knlist[i])) != NULL) { 1908 if (kn_in_flux(kn)) { 1909 kq->kq_state |= KQ_FLUXWAIT; 1910 msleep(kq, &kq->kq_lock, PSOCK, "kqclo1", 0); 1911 continue; 1912 } 1913 kn_enter_flux(kn); 1914 KQ_UNLOCK(kq); 1915 knote_drop(kn, td); 1916 KQ_LOCK(kq); 1917 } 1918 } 1919 if (kq->kq_knhashmask != 0) { 1920 for (i = 0; i <= kq->kq_knhashmask; i++) { 1921 while ((kn = SLIST_FIRST(&kq->kq_knhash[i])) != NULL) { 1922 if (kn_in_flux(kn)) { 1923 kq->kq_state |= KQ_FLUXWAIT; 1924 msleep(kq, &kq->kq_lock, PSOCK, 1925 "kqclo2", 0); 1926 continue; 1927 } 1928 kn_enter_flux(kn); 1929 KQ_UNLOCK(kq); 1930 knote_drop(kn, td); 1931 KQ_LOCK(kq); 1932 } 1933 } 1934 } 1935 1936 if ((kq->kq_state & KQ_TASKSCHED) == KQ_TASKSCHED) { 1937 kq->kq_state |= KQ_TASKDRAIN; 1938 msleep(&kq->kq_state, &kq->kq_lock, PSOCK, "kqtqdr", 0); 1939 } 1940 1941 if ((kq->kq_state & KQ_SEL) == KQ_SEL) { 1942 selwakeuppri(&kq->kq_sel, PSOCK); 1943 if (!SEL_WAITING(&kq->kq_sel)) 1944 kq->kq_state &= ~KQ_SEL; 1945 } 1946 1947 KQ_UNLOCK(kq); 1948 } 1949 1950 static void 1951 kqueue_destroy(struct kqueue *kq) 1952 { 1953 1954 KASSERT(kq->kq_fdp == NULL, 1955 ("kqueue still attached to a file descriptor")); 1956 seldrain(&kq->kq_sel); 1957 knlist_destroy(&kq->kq_sel.si_note); 1958 mtx_destroy(&kq->kq_lock); 1959 1960 if (kq->kq_knhash != NULL) 1961 free(kq->kq_knhash, M_KQUEUE); 1962 if (kq->kq_knlist != NULL) 1963 free(kq->kq_knlist, M_KQUEUE); 1964 1965 funsetown(&kq->kq_sigio); 1966 } 1967 1968 /*ARGSUSED*/ 1969 static int 1970 kqueue_close(struct file *fp, struct thread *td) 1971 { 1972 struct kqueue *kq = fp->f_data; 1973 struct filedesc *fdp; 1974 int error; 1975 int filedesc_unlock; 1976 1977 if ((error = kqueue_acquire(fp, &kq))) 1978 return error; 1979 kqueue_drain(kq, td); 1980 1981 /* 1982 * We could be called due to the knote_drop() doing fdrop(), 1983 * called from kqueue_register(). In this case the global 1984 * lock is owned, and filedesc sx is locked before, to not 1985 * take the sleepable lock after non-sleepable. 1986 */ 1987 fdp = kq->kq_fdp; 1988 kq->kq_fdp = NULL; 1989 if (!sx_xlocked(FILEDESC_LOCK(fdp))) { 1990 FILEDESC_XLOCK(fdp); 1991 filedesc_unlock = 1; 1992 } else 1993 filedesc_unlock = 0; 1994 TAILQ_REMOVE(&fdp->fd_kqlist, kq, kq_list); 1995 if (filedesc_unlock) 1996 FILEDESC_XUNLOCK(fdp); 1997 1998 kqueue_destroy(kq); 1999 chgkqcnt(kq->kq_cred->cr_ruidinfo, -1, 0); 2000 crfree(kq->kq_cred); 2001 free(kq, M_KQUEUE); 2002 fp->f_data = NULL; 2003 2004 return (0); 2005 } 2006 2007 static int 2008 kqueue_fill_kinfo(struct file *fp, struct kinfo_file *kif, struct filedesc *fdp) 2009 { 2010 2011 kif->kf_type = KF_TYPE_KQUEUE; 2012 return (0); 2013 } 2014 2015 static void 2016 kqueue_wakeup(struct kqueue *kq) 2017 { 2018 KQ_OWNED(kq); 2019 2020 if ((kq->kq_state & KQ_SLEEP) == KQ_SLEEP) { 2021 kq->kq_state &= ~KQ_SLEEP; 2022 wakeup(kq); 2023 } 2024 if ((kq->kq_state & KQ_SEL) == KQ_SEL) { 2025 selwakeuppri(&kq->kq_sel, PSOCK); 2026 if (!SEL_WAITING(&kq->kq_sel)) 2027 kq->kq_state &= ~KQ_SEL; 2028 } 2029 if (!knlist_empty(&kq->kq_sel.si_note)) 2030 kqueue_schedtask(kq); 2031 if ((kq->kq_state & KQ_ASYNC) == KQ_ASYNC) { 2032 pgsigio(&kq->kq_sigio, SIGIO, 0); 2033 } 2034 } 2035 2036 /* 2037 * Walk down a list of knotes, activating them if their event has triggered. 2038 * 2039 * There is a possibility to optimize in the case of one kq watching another. 2040 * Instead of scheduling a task to wake it up, you could pass enough state 2041 * down the chain to make up the parent kqueue. Make this code functional 2042 * first. 2043 */ 2044 void 2045 knote(struct knlist *list, long hint, int lockflags) 2046 { 2047 struct kqueue *kq; 2048 struct knote *kn, *tkn; 2049 int error; 2050 2051 if (list == NULL) 2052 return; 2053 2054 KNL_ASSERT_LOCK(list, lockflags & KNF_LISTLOCKED); 2055 2056 if ((lockflags & KNF_LISTLOCKED) == 0) 2057 list->kl_lock(list->kl_lockarg); 2058 2059 /* 2060 * If we unlock the list lock (and enter influx), we can 2061 * eliminate the kqueue scheduling, but this will introduce 2062 * four lock/unlock's for each knote to test. Also, marker 2063 * would be needed to keep iteration position, since filters 2064 * or other threads could remove events. 2065 */ 2066 SLIST_FOREACH_SAFE(kn, &list->kl_list, kn_selnext, tkn) { 2067 kq = kn->kn_kq; 2068 KQ_LOCK(kq); 2069 if (kn_in_flux(kn) && (kn->kn_status & KN_SCAN) == 0) { 2070 /* 2071 * Do not process the influx notes, except for 2072 * the influx coming from the kq unlock in the 2073 * kqueue_scan(). In the later case, we do 2074 * not interfere with the scan, since the code 2075 * fragment in kqueue_scan() locks the knlist, 2076 * and cannot proceed until we finished. 2077 */ 2078 KQ_UNLOCK(kq); 2079 } else if ((lockflags & KNF_NOKQLOCK) != 0) { 2080 kn_enter_flux(kn); 2081 KQ_UNLOCK(kq); 2082 error = kn->kn_fop->f_event(kn, hint); 2083 KQ_LOCK(kq); 2084 kn_leave_flux(kn); 2085 if (error) 2086 KNOTE_ACTIVATE(kn, 1); 2087 KQ_UNLOCK_FLUX(kq); 2088 } else { 2089 kn->kn_status |= KN_HASKQLOCK; 2090 if (kn->kn_fop->f_event(kn, hint)) 2091 KNOTE_ACTIVATE(kn, 1); 2092 kn->kn_status &= ~KN_HASKQLOCK; 2093 KQ_UNLOCK(kq); 2094 } 2095 } 2096 if ((lockflags & KNF_LISTLOCKED) == 0) 2097 list->kl_unlock(list->kl_lockarg); 2098 } 2099 2100 /* 2101 * add a knote to a knlist 2102 */ 2103 void 2104 knlist_add(struct knlist *knl, struct knote *kn, int islocked) 2105 { 2106 2107 KNL_ASSERT_LOCK(knl, islocked); 2108 KQ_NOTOWNED(kn->kn_kq); 2109 KASSERT(kn_in_flux(kn), ("knote %p not in flux", kn)); 2110 KASSERT((kn->kn_status & KN_DETACHED) != 0, 2111 ("knote %p was not detached", kn)); 2112 if (!islocked) 2113 knl->kl_lock(knl->kl_lockarg); 2114 SLIST_INSERT_HEAD(&knl->kl_list, kn, kn_selnext); 2115 if (!islocked) 2116 knl->kl_unlock(knl->kl_lockarg); 2117 KQ_LOCK(kn->kn_kq); 2118 kn->kn_knlist = knl; 2119 kn->kn_status &= ~KN_DETACHED; 2120 KQ_UNLOCK(kn->kn_kq); 2121 } 2122 2123 static void 2124 knlist_remove_kq(struct knlist *knl, struct knote *kn, int knlislocked, 2125 int kqislocked) 2126 { 2127 2128 KASSERT(!kqislocked || knlislocked, ("kq locked w/o knl locked")); 2129 KNL_ASSERT_LOCK(knl, knlislocked); 2130 mtx_assert(&kn->kn_kq->kq_lock, kqislocked ? MA_OWNED : MA_NOTOWNED); 2131 KASSERT(kqislocked || kn_in_flux(kn), ("knote %p not in flux", kn)); 2132 KASSERT((kn->kn_status & KN_DETACHED) == 0, 2133 ("knote %p was already detached", kn)); 2134 if (!knlislocked) 2135 knl->kl_lock(knl->kl_lockarg); 2136 SLIST_REMOVE(&knl->kl_list, kn, knote, kn_selnext); 2137 kn->kn_knlist = NULL; 2138 if (!knlislocked) 2139 kn_list_unlock(knl); 2140 if (!kqislocked) 2141 KQ_LOCK(kn->kn_kq); 2142 kn->kn_status |= KN_DETACHED; 2143 if (!kqislocked) 2144 KQ_UNLOCK(kn->kn_kq); 2145 } 2146 2147 /* 2148 * remove knote from the specified knlist 2149 */ 2150 void 2151 knlist_remove(struct knlist *knl, struct knote *kn, int islocked) 2152 { 2153 2154 knlist_remove_kq(knl, kn, islocked, 0); 2155 } 2156 2157 int 2158 knlist_empty(struct knlist *knl) 2159 { 2160 2161 KNL_ASSERT_LOCKED(knl); 2162 return (SLIST_EMPTY(&knl->kl_list)); 2163 } 2164 2165 static struct mtx knlist_lock; 2166 MTX_SYSINIT(knlist_lock, &knlist_lock, "knlist lock for lockless objects", 2167 MTX_DEF); 2168 static void knlist_mtx_lock(void *arg); 2169 static void knlist_mtx_unlock(void *arg); 2170 2171 static void 2172 knlist_mtx_lock(void *arg) 2173 { 2174 2175 mtx_lock((struct mtx *)arg); 2176 } 2177 2178 static void 2179 knlist_mtx_unlock(void *arg) 2180 { 2181 2182 mtx_unlock((struct mtx *)arg); 2183 } 2184 2185 static void 2186 knlist_mtx_assert_locked(void *arg) 2187 { 2188 2189 mtx_assert((struct mtx *)arg, MA_OWNED); 2190 } 2191 2192 static void 2193 knlist_mtx_assert_unlocked(void *arg) 2194 { 2195 2196 mtx_assert((struct mtx *)arg, MA_NOTOWNED); 2197 } 2198 2199 static void 2200 knlist_rw_rlock(void *arg) 2201 { 2202 2203 rw_rlock((struct rwlock *)arg); 2204 } 2205 2206 static void 2207 knlist_rw_runlock(void *arg) 2208 { 2209 2210 rw_runlock((struct rwlock *)arg); 2211 } 2212 2213 static void 2214 knlist_rw_assert_locked(void *arg) 2215 { 2216 2217 rw_assert((struct rwlock *)arg, RA_LOCKED); 2218 } 2219 2220 static void 2221 knlist_rw_assert_unlocked(void *arg) 2222 { 2223 2224 rw_assert((struct rwlock *)arg, RA_UNLOCKED); 2225 } 2226 2227 void 2228 knlist_init(struct knlist *knl, void *lock, void (*kl_lock)(void *), 2229 void (*kl_unlock)(void *), 2230 void (*kl_assert_locked)(void *), void (*kl_assert_unlocked)(void *)) 2231 { 2232 2233 if (lock == NULL) 2234 knl->kl_lockarg = &knlist_lock; 2235 else 2236 knl->kl_lockarg = lock; 2237 2238 if (kl_lock == NULL) 2239 knl->kl_lock = knlist_mtx_lock; 2240 else 2241 knl->kl_lock = kl_lock; 2242 if (kl_unlock == NULL) 2243 knl->kl_unlock = knlist_mtx_unlock; 2244 else 2245 knl->kl_unlock = kl_unlock; 2246 if (kl_assert_locked == NULL) 2247 knl->kl_assert_locked = knlist_mtx_assert_locked; 2248 else 2249 knl->kl_assert_locked = kl_assert_locked; 2250 if (kl_assert_unlocked == NULL) 2251 knl->kl_assert_unlocked = knlist_mtx_assert_unlocked; 2252 else 2253 knl->kl_assert_unlocked = kl_assert_unlocked; 2254 2255 knl->kl_autodestroy = 0; 2256 SLIST_INIT(&knl->kl_list); 2257 } 2258 2259 void 2260 knlist_init_mtx(struct knlist *knl, struct mtx *lock) 2261 { 2262 2263 knlist_init(knl, lock, NULL, NULL, NULL, NULL); 2264 } 2265 2266 struct knlist * 2267 knlist_alloc(struct mtx *lock) 2268 { 2269 struct knlist *knl; 2270 2271 knl = malloc(sizeof(struct knlist), M_KQUEUE, M_WAITOK); 2272 knlist_init_mtx(knl, lock); 2273 return (knl); 2274 } 2275 2276 void 2277 knlist_init_rw_reader(struct knlist *knl, struct rwlock *lock) 2278 { 2279 2280 knlist_init(knl, lock, knlist_rw_rlock, knlist_rw_runlock, 2281 knlist_rw_assert_locked, knlist_rw_assert_unlocked); 2282 } 2283 2284 void 2285 knlist_destroy(struct knlist *knl) 2286 { 2287 2288 KASSERT(KNLIST_EMPTY(knl), 2289 ("destroying knlist %p with knotes on it", knl)); 2290 } 2291 2292 void 2293 knlist_detach(struct knlist *knl) 2294 { 2295 2296 KNL_ASSERT_LOCKED(knl); 2297 knl->kl_autodestroy = 1; 2298 if (knlist_empty(knl)) { 2299 knlist_destroy(knl); 2300 free(knl, M_KQUEUE); 2301 } 2302 } 2303 2304 /* 2305 * Even if we are locked, we may need to drop the lock to allow any influx 2306 * knotes time to "settle". 2307 */ 2308 void 2309 knlist_cleardel(struct knlist *knl, struct thread *td, int islocked, int killkn) 2310 { 2311 struct knote *kn, *kn2; 2312 struct kqueue *kq; 2313 2314 KASSERT(!knl->kl_autodestroy, ("cleardel for autodestroy %p", knl)); 2315 if (islocked) 2316 KNL_ASSERT_LOCKED(knl); 2317 else { 2318 KNL_ASSERT_UNLOCKED(knl); 2319 again: /* need to reacquire lock since we have dropped it */ 2320 knl->kl_lock(knl->kl_lockarg); 2321 } 2322 2323 SLIST_FOREACH_SAFE(kn, &knl->kl_list, kn_selnext, kn2) { 2324 kq = kn->kn_kq; 2325 KQ_LOCK(kq); 2326 if (kn_in_flux(kn)) { 2327 KQ_UNLOCK(kq); 2328 continue; 2329 } 2330 knlist_remove_kq(knl, kn, 1, 1); 2331 if (killkn) { 2332 kn_enter_flux(kn); 2333 KQ_UNLOCK(kq); 2334 knote_drop_detached(kn, td); 2335 } else { 2336 /* Make sure cleared knotes disappear soon */ 2337 kn->kn_flags |= EV_EOF | EV_ONESHOT; 2338 KQ_UNLOCK(kq); 2339 } 2340 kq = NULL; 2341 } 2342 2343 if (!SLIST_EMPTY(&knl->kl_list)) { 2344 /* there are still in flux knotes remaining */ 2345 kn = SLIST_FIRST(&knl->kl_list); 2346 kq = kn->kn_kq; 2347 KQ_LOCK(kq); 2348 KASSERT(kn_in_flux(kn), ("knote removed w/o list lock")); 2349 knl->kl_unlock(knl->kl_lockarg); 2350 kq->kq_state |= KQ_FLUXWAIT; 2351 msleep(kq, &kq->kq_lock, PSOCK | PDROP, "kqkclr", 0); 2352 kq = NULL; 2353 goto again; 2354 } 2355 2356 if (islocked) 2357 KNL_ASSERT_LOCKED(knl); 2358 else { 2359 knl->kl_unlock(knl->kl_lockarg); 2360 KNL_ASSERT_UNLOCKED(knl); 2361 } 2362 } 2363 2364 /* 2365 * Remove all knotes referencing a specified fd must be called with FILEDESC 2366 * lock. This prevents a race where a new fd comes along and occupies the 2367 * entry and we attach a knote to the fd. 2368 */ 2369 void 2370 knote_fdclose(struct thread *td, int fd) 2371 { 2372 struct filedesc *fdp = td->td_proc->p_fd; 2373 struct kqueue *kq; 2374 struct knote *kn; 2375 int influx; 2376 2377 FILEDESC_XLOCK_ASSERT(fdp); 2378 2379 /* 2380 * We shouldn't have to worry about new kevents appearing on fd 2381 * since filedesc is locked. 2382 */ 2383 TAILQ_FOREACH(kq, &fdp->fd_kqlist, kq_list) { 2384 KQ_LOCK(kq); 2385 2386 again: 2387 influx = 0; 2388 while (kq->kq_knlistsize > fd && 2389 (kn = SLIST_FIRST(&kq->kq_knlist[fd])) != NULL) { 2390 if (kn_in_flux(kn)) { 2391 /* someone else might be waiting on our knote */ 2392 if (influx) 2393 wakeup(kq); 2394 kq->kq_state |= KQ_FLUXWAIT; 2395 msleep(kq, &kq->kq_lock, PSOCK, "kqflxwt", 0); 2396 goto again; 2397 } 2398 kn_enter_flux(kn); 2399 KQ_UNLOCK(kq); 2400 influx = 1; 2401 knote_drop(kn, td); 2402 KQ_LOCK(kq); 2403 } 2404 KQ_UNLOCK_FLUX(kq); 2405 } 2406 } 2407 2408 static int 2409 knote_attach(struct knote *kn, struct kqueue *kq) 2410 { 2411 struct klist *list; 2412 2413 KASSERT(kn_in_flux(kn), ("knote %p not marked influx", kn)); 2414 KQ_OWNED(kq); 2415 2416 if (kn->kn_fop->f_isfd) { 2417 if (kn->kn_id >= kq->kq_knlistsize) 2418 return (ENOMEM); 2419 list = &kq->kq_knlist[kn->kn_id]; 2420 } else { 2421 if (kq->kq_knhash == NULL) 2422 return (ENOMEM); 2423 list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)]; 2424 } 2425 SLIST_INSERT_HEAD(list, kn, kn_link); 2426 return (0); 2427 } 2428 2429 static void 2430 knote_drop(struct knote *kn, struct thread *td) 2431 { 2432 2433 if ((kn->kn_status & KN_DETACHED) == 0) 2434 kn->kn_fop->f_detach(kn); 2435 knote_drop_detached(kn, td); 2436 } 2437 2438 static void 2439 knote_drop_detached(struct knote *kn, struct thread *td) 2440 { 2441 struct kqueue *kq; 2442 struct klist *list; 2443 2444 kq = kn->kn_kq; 2445 2446 KASSERT((kn->kn_status & KN_DETACHED) != 0, 2447 ("knote %p still attached", kn)); 2448 KQ_NOTOWNED(kq); 2449 2450 KQ_LOCK(kq); 2451 KASSERT(kn->kn_influx == 1, 2452 ("knote_drop called on %p with influx %d", kn, kn->kn_influx)); 2453 2454 if (kn->kn_fop->f_isfd) 2455 list = &kq->kq_knlist[kn->kn_id]; 2456 else 2457 list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)]; 2458 2459 if (!SLIST_EMPTY(list)) 2460 SLIST_REMOVE(list, kn, knote, kn_link); 2461 if (kn->kn_status & KN_QUEUED) 2462 knote_dequeue(kn); 2463 KQ_UNLOCK_FLUX(kq); 2464 2465 if (kn->kn_fop->f_isfd) { 2466 fdrop(kn->kn_fp, td); 2467 kn->kn_fp = NULL; 2468 } 2469 kqueue_fo_release(kn->kn_kevent.filter); 2470 kn->kn_fop = NULL; 2471 knote_free(kn); 2472 } 2473 2474 static void 2475 knote_enqueue(struct knote *kn) 2476 { 2477 struct kqueue *kq = kn->kn_kq; 2478 2479 KQ_OWNED(kn->kn_kq); 2480 KASSERT((kn->kn_status & KN_QUEUED) == 0, ("knote already queued")); 2481 2482 TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe); 2483 kn->kn_status |= KN_QUEUED; 2484 kq->kq_count++; 2485 kqueue_wakeup(kq); 2486 } 2487 2488 static void 2489 knote_dequeue(struct knote *kn) 2490 { 2491 struct kqueue *kq = kn->kn_kq; 2492 2493 KQ_OWNED(kn->kn_kq); 2494 KASSERT(kn->kn_status & KN_QUEUED, ("knote not queued")); 2495 2496 TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe); 2497 kn->kn_status &= ~KN_QUEUED; 2498 kq->kq_count--; 2499 } 2500 2501 static void 2502 knote_init(void) 2503 { 2504 2505 knote_zone = uma_zcreate("KNOTE", sizeof(struct knote), NULL, NULL, 2506 NULL, NULL, UMA_ALIGN_PTR, 0); 2507 } 2508 SYSINIT(knote, SI_SUB_PSEUDO, SI_ORDER_ANY, knote_init, NULL); 2509 2510 static struct knote * 2511 knote_alloc(int waitok) 2512 { 2513 2514 return (uma_zalloc(knote_zone, (waitok ? M_WAITOK : M_NOWAIT) | 2515 M_ZERO)); 2516 } 2517 2518 static void 2519 knote_free(struct knote *kn) 2520 { 2521 2522 uma_zfree(knote_zone, kn); 2523 } 2524 2525 /* 2526 * Register the kev w/ the kq specified by fd. 2527 */ 2528 int 2529 kqfd_register(int fd, struct kevent *kev, struct thread *td, int waitok) 2530 { 2531 struct kqueue *kq; 2532 struct file *fp; 2533 cap_rights_t rights; 2534 int error; 2535 2536 error = fget(td, fd, cap_rights_init(&rights, CAP_KQUEUE_CHANGE), &fp); 2537 if (error != 0) 2538 return (error); 2539 if ((error = kqueue_acquire(fp, &kq)) != 0) 2540 goto noacquire; 2541 2542 error = kqueue_register(kq, kev, td, waitok); 2543 kqueue_release(kq, 0); 2544 2545 noacquire: 2546 fdrop(fp, td); 2547 return (error); 2548 } 2549