1 /*- 2 * Copyright (c) 1999,2000,2001 Jonathan Lemon <jlemon@FreeBSD.org> 3 * Copyright 2004 John-Mark Gurney <jmg@FreeBSD.org> 4 * Copyright (c) 2009 Apple, Inc. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include "opt_ktrace.h" 33 34 #include <sys/param.h> 35 #include <sys/systm.h> 36 #include <sys/capability.h> 37 #include <sys/kernel.h> 38 #include <sys/lock.h> 39 #include <sys/mutex.h> 40 #include <sys/rwlock.h> 41 #include <sys/proc.h> 42 #include <sys/malloc.h> 43 #include <sys/unistd.h> 44 #include <sys/file.h> 45 #include <sys/filedesc.h> 46 #include <sys/filio.h> 47 #include <sys/fcntl.h> 48 #include <sys/kthread.h> 49 #include <sys/selinfo.h> 50 #include <sys/stdatomic.h> 51 #include <sys/queue.h> 52 #include <sys/event.h> 53 #include <sys/eventvar.h> 54 #include <sys/poll.h> 55 #include <sys/protosw.h> 56 #include <sys/sigio.h> 57 #include <sys/signalvar.h> 58 #include <sys/socket.h> 59 #include <sys/socketvar.h> 60 #include <sys/stat.h> 61 #include <sys/sysctl.h> 62 #include <sys/sysproto.h> 63 #include <sys/syscallsubr.h> 64 #include <sys/taskqueue.h> 65 #include <sys/uio.h> 66 #ifdef KTRACE 67 #include <sys/ktrace.h> 68 #endif 69 70 #include <vm/uma.h> 71 72 static MALLOC_DEFINE(M_KQUEUE, "kqueue", "memory for kqueue system"); 73 74 /* 75 * This lock is used if multiple kq locks are required. This possibly 76 * should be made into a per proc lock. 77 */ 78 static struct mtx kq_global; 79 MTX_SYSINIT(kq_global, &kq_global, "kqueue order", MTX_DEF); 80 #define KQ_GLOBAL_LOCK(lck, haslck) do { \ 81 if (!haslck) \ 82 mtx_lock(lck); \ 83 haslck = 1; \ 84 } while (0) 85 #define KQ_GLOBAL_UNLOCK(lck, haslck) do { \ 86 if (haslck) \ 87 mtx_unlock(lck); \ 88 haslck = 0; \ 89 } while (0) 90 91 TASKQUEUE_DEFINE_THREAD(kqueue); 92 93 static int kevent_copyout(void *arg, struct kevent *kevp, int count); 94 static int kevent_copyin(void *arg, struct kevent *kevp, int count); 95 static int kqueue_register(struct kqueue *kq, struct kevent *kev, 96 struct thread *td, int waitok); 97 static int kqueue_acquire(struct file *fp, struct kqueue **kqp); 98 static void kqueue_release(struct kqueue *kq, int locked); 99 static int kqueue_expand(struct kqueue *kq, struct filterops *fops, 100 uintptr_t ident, int waitok); 101 static void kqueue_task(void *arg, int pending); 102 static int kqueue_scan(struct kqueue *kq, int maxevents, 103 struct kevent_copyops *k_ops, 104 const struct timespec *timeout, 105 struct kevent *keva, struct thread *td); 106 static void kqueue_wakeup(struct kqueue *kq); 107 static struct filterops *kqueue_fo_find(int filt); 108 static void kqueue_fo_release(int filt); 109 110 static fo_rdwr_t kqueue_read; 111 static fo_rdwr_t kqueue_write; 112 static fo_truncate_t kqueue_truncate; 113 static fo_ioctl_t kqueue_ioctl; 114 static fo_poll_t kqueue_poll; 115 static fo_kqfilter_t kqueue_kqfilter; 116 static fo_stat_t kqueue_stat; 117 static fo_close_t kqueue_close; 118 119 static struct fileops kqueueops = { 120 .fo_read = kqueue_read, 121 .fo_write = kqueue_write, 122 .fo_truncate = kqueue_truncate, 123 .fo_ioctl = kqueue_ioctl, 124 .fo_poll = kqueue_poll, 125 .fo_kqfilter = kqueue_kqfilter, 126 .fo_stat = kqueue_stat, 127 .fo_close = kqueue_close, 128 .fo_chmod = invfo_chmod, 129 .fo_chown = invfo_chown, 130 .fo_sendfile = invfo_sendfile, 131 }; 132 133 static int knote_attach(struct knote *kn, struct kqueue *kq); 134 static void knote_drop(struct knote *kn, struct thread *td); 135 static void knote_enqueue(struct knote *kn); 136 static void knote_dequeue(struct knote *kn); 137 static void knote_init(void); 138 static struct knote *knote_alloc(int waitok); 139 static void knote_free(struct knote *kn); 140 141 static void filt_kqdetach(struct knote *kn); 142 static int filt_kqueue(struct knote *kn, long hint); 143 static int filt_procattach(struct knote *kn); 144 static void filt_procdetach(struct knote *kn); 145 static int filt_proc(struct knote *kn, long hint); 146 static int filt_fileattach(struct knote *kn); 147 static void filt_timerexpire(void *knx); 148 static int filt_timerattach(struct knote *kn); 149 static void filt_timerdetach(struct knote *kn); 150 static int filt_timer(struct knote *kn, long hint); 151 static int filt_userattach(struct knote *kn); 152 static void filt_userdetach(struct knote *kn); 153 static int filt_user(struct knote *kn, long hint); 154 static void filt_usertouch(struct knote *kn, struct kevent *kev, 155 u_long type); 156 157 static struct filterops file_filtops = { 158 .f_isfd = 1, 159 .f_attach = filt_fileattach, 160 }; 161 static struct filterops kqread_filtops = { 162 .f_isfd = 1, 163 .f_detach = filt_kqdetach, 164 .f_event = filt_kqueue, 165 }; 166 /* XXX - move to kern_proc.c? */ 167 static struct filterops proc_filtops = { 168 .f_isfd = 0, 169 .f_attach = filt_procattach, 170 .f_detach = filt_procdetach, 171 .f_event = filt_proc, 172 }; 173 static struct filterops timer_filtops = { 174 .f_isfd = 0, 175 .f_attach = filt_timerattach, 176 .f_detach = filt_timerdetach, 177 .f_event = filt_timer, 178 }; 179 static struct filterops user_filtops = { 180 .f_attach = filt_userattach, 181 .f_detach = filt_userdetach, 182 .f_event = filt_user, 183 .f_touch = filt_usertouch, 184 }; 185 186 static uma_zone_t knote_zone; 187 static atomic_uint kq_ncallouts = ATOMIC_VAR_INIT(0); 188 static unsigned int kq_calloutmax = 4 * 1024; 189 SYSCTL_UINT(_kern, OID_AUTO, kq_calloutmax, CTLFLAG_RW, 190 &kq_calloutmax, 0, "Maximum number of callouts allocated for kqueue"); 191 192 /* XXX - ensure not KN_INFLUX?? */ 193 #define KNOTE_ACTIVATE(kn, islock) do { \ 194 if ((islock)) \ 195 mtx_assert(&(kn)->kn_kq->kq_lock, MA_OWNED); \ 196 else \ 197 KQ_LOCK((kn)->kn_kq); \ 198 (kn)->kn_status |= KN_ACTIVE; \ 199 if (((kn)->kn_status & (KN_QUEUED | KN_DISABLED)) == 0) \ 200 knote_enqueue((kn)); \ 201 if (!(islock)) \ 202 KQ_UNLOCK((kn)->kn_kq); \ 203 } while(0) 204 #define KQ_LOCK(kq) do { \ 205 mtx_lock(&(kq)->kq_lock); \ 206 } while (0) 207 #define KQ_FLUX_WAKEUP(kq) do { \ 208 if (((kq)->kq_state & KQ_FLUXWAIT) == KQ_FLUXWAIT) { \ 209 (kq)->kq_state &= ~KQ_FLUXWAIT; \ 210 wakeup((kq)); \ 211 } \ 212 } while (0) 213 #define KQ_UNLOCK_FLUX(kq) do { \ 214 KQ_FLUX_WAKEUP(kq); \ 215 mtx_unlock(&(kq)->kq_lock); \ 216 } while (0) 217 #define KQ_UNLOCK(kq) do { \ 218 mtx_unlock(&(kq)->kq_lock); \ 219 } while (0) 220 #define KQ_OWNED(kq) do { \ 221 mtx_assert(&(kq)->kq_lock, MA_OWNED); \ 222 } while (0) 223 #define KQ_NOTOWNED(kq) do { \ 224 mtx_assert(&(kq)->kq_lock, MA_NOTOWNED); \ 225 } while (0) 226 #define KN_LIST_LOCK(kn) do { \ 227 if (kn->kn_knlist != NULL) \ 228 kn->kn_knlist->kl_lock(kn->kn_knlist->kl_lockarg); \ 229 } while (0) 230 #define KN_LIST_UNLOCK(kn) do { \ 231 if (kn->kn_knlist != NULL) \ 232 kn->kn_knlist->kl_unlock(kn->kn_knlist->kl_lockarg); \ 233 } while (0) 234 #define KNL_ASSERT_LOCK(knl, islocked) do { \ 235 if (islocked) \ 236 KNL_ASSERT_LOCKED(knl); \ 237 else \ 238 KNL_ASSERT_UNLOCKED(knl); \ 239 } while (0) 240 #ifdef INVARIANTS 241 #define KNL_ASSERT_LOCKED(knl) do { \ 242 knl->kl_assert_locked((knl)->kl_lockarg); \ 243 } while (0) 244 #define KNL_ASSERT_UNLOCKED(knl) do { \ 245 knl->kl_assert_unlocked((knl)->kl_lockarg); \ 246 } while (0) 247 #else /* !INVARIANTS */ 248 #define KNL_ASSERT_LOCKED(knl) do {} while(0) 249 #define KNL_ASSERT_UNLOCKED(knl) do {} while (0) 250 #endif /* INVARIANTS */ 251 252 #define KN_HASHSIZE 64 /* XXX should be tunable */ 253 #define KN_HASH(val, mask) (((val) ^ (val >> 8)) & (mask)) 254 255 static int 256 filt_nullattach(struct knote *kn) 257 { 258 259 return (ENXIO); 260 }; 261 262 struct filterops null_filtops = { 263 .f_isfd = 0, 264 .f_attach = filt_nullattach, 265 }; 266 267 /* XXX - make SYSINIT to add these, and move into respective modules. */ 268 extern struct filterops sig_filtops; 269 extern struct filterops fs_filtops; 270 271 /* 272 * Table for for all system-defined filters. 273 */ 274 static struct mtx filterops_lock; 275 MTX_SYSINIT(kqueue_filterops, &filterops_lock, "protect sysfilt_ops", 276 MTX_DEF); 277 static struct { 278 struct filterops *for_fop; 279 int for_refcnt; 280 } sysfilt_ops[EVFILT_SYSCOUNT] = { 281 { &file_filtops }, /* EVFILT_READ */ 282 { &file_filtops }, /* EVFILT_WRITE */ 283 { &null_filtops }, /* EVFILT_AIO */ 284 { &file_filtops }, /* EVFILT_VNODE */ 285 { &proc_filtops }, /* EVFILT_PROC */ 286 { &sig_filtops }, /* EVFILT_SIGNAL */ 287 { &timer_filtops }, /* EVFILT_TIMER */ 288 { &null_filtops }, /* former EVFILT_NETDEV */ 289 { &fs_filtops }, /* EVFILT_FS */ 290 { &null_filtops }, /* EVFILT_LIO */ 291 { &user_filtops }, /* EVFILT_USER */ 292 }; 293 294 /* 295 * Simple redirection for all cdevsw style objects to call their fo_kqfilter 296 * method. 297 */ 298 static int 299 filt_fileattach(struct knote *kn) 300 { 301 302 return (fo_kqfilter(kn->kn_fp, kn)); 303 } 304 305 /*ARGSUSED*/ 306 static int 307 kqueue_kqfilter(struct file *fp, struct knote *kn) 308 { 309 struct kqueue *kq = kn->kn_fp->f_data; 310 311 if (kn->kn_filter != EVFILT_READ) 312 return (EINVAL); 313 314 kn->kn_status |= KN_KQUEUE; 315 kn->kn_fop = &kqread_filtops; 316 knlist_add(&kq->kq_sel.si_note, kn, 0); 317 318 return (0); 319 } 320 321 static void 322 filt_kqdetach(struct knote *kn) 323 { 324 struct kqueue *kq = kn->kn_fp->f_data; 325 326 knlist_remove(&kq->kq_sel.si_note, kn, 0); 327 } 328 329 /*ARGSUSED*/ 330 static int 331 filt_kqueue(struct knote *kn, long hint) 332 { 333 struct kqueue *kq = kn->kn_fp->f_data; 334 335 kn->kn_data = kq->kq_count; 336 return (kn->kn_data > 0); 337 } 338 339 /* XXX - move to kern_proc.c? */ 340 static int 341 filt_procattach(struct knote *kn) 342 { 343 struct proc *p; 344 int immediate; 345 int error; 346 347 immediate = 0; 348 p = pfind(kn->kn_id); 349 if (p == NULL && (kn->kn_sfflags & NOTE_EXIT)) { 350 p = zpfind(kn->kn_id); 351 immediate = 1; 352 } else if (p != NULL && (p->p_flag & P_WEXIT)) { 353 immediate = 1; 354 } 355 356 if (p == NULL) 357 return (ESRCH); 358 if ((error = p_cansee(curthread, p))) { 359 PROC_UNLOCK(p); 360 return (error); 361 } 362 363 kn->kn_ptr.p_proc = p; 364 kn->kn_flags |= EV_CLEAR; /* automatically set */ 365 366 /* 367 * internal flag indicating registration done by kernel 368 */ 369 if (kn->kn_flags & EV_FLAG1) { 370 kn->kn_data = kn->kn_sdata; /* ppid */ 371 kn->kn_fflags = NOTE_CHILD; 372 kn->kn_flags &= ~EV_FLAG1; 373 } 374 375 if (immediate == 0) 376 knlist_add(&p->p_klist, kn, 1); 377 378 /* 379 * Immediately activate any exit notes if the target process is a 380 * zombie. This is necessary to handle the case where the target 381 * process, e.g. a child, dies before the kevent is registered. 382 */ 383 if (immediate && filt_proc(kn, NOTE_EXIT)) 384 KNOTE_ACTIVATE(kn, 0); 385 386 PROC_UNLOCK(p); 387 388 return (0); 389 } 390 391 /* 392 * The knote may be attached to a different process, which may exit, 393 * leaving nothing for the knote to be attached to. So when the process 394 * exits, the knote is marked as DETACHED and also flagged as ONESHOT so 395 * it will be deleted when read out. However, as part of the knote deletion, 396 * this routine is called, so a check is needed to avoid actually performing 397 * a detach, because the original process does not exist any more. 398 */ 399 /* XXX - move to kern_proc.c? */ 400 static void 401 filt_procdetach(struct knote *kn) 402 { 403 struct proc *p; 404 405 p = kn->kn_ptr.p_proc; 406 knlist_remove(&p->p_klist, kn, 0); 407 kn->kn_ptr.p_proc = NULL; 408 } 409 410 /* XXX - move to kern_proc.c? */ 411 static int 412 filt_proc(struct knote *kn, long hint) 413 { 414 struct proc *p = kn->kn_ptr.p_proc; 415 u_int event; 416 417 /* 418 * mask off extra data 419 */ 420 event = (u_int)hint & NOTE_PCTRLMASK; 421 422 /* 423 * if the user is interested in this event, record it. 424 */ 425 if (kn->kn_sfflags & event) 426 kn->kn_fflags |= event; 427 428 /* 429 * process is gone, so flag the event as finished. 430 */ 431 if (event == NOTE_EXIT) { 432 if (!(kn->kn_status & KN_DETACHED)) 433 knlist_remove_inevent(&p->p_klist, kn); 434 kn->kn_flags |= (EV_EOF | EV_ONESHOT); 435 kn->kn_ptr.p_proc = NULL; 436 if (kn->kn_fflags & NOTE_EXIT) 437 kn->kn_data = p->p_xstat; 438 if (kn->kn_fflags == 0) 439 kn->kn_flags |= EV_DROP; 440 return (1); 441 } 442 443 return (kn->kn_fflags != 0); 444 } 445 446 /* 447 * Called when the process forked. It mostly does the same as the 448 * knote(), activating all knotes registered to be activated when the 449 * process forked. Additionally, for each knote attached to the 450 * parent, check whether user wants to track the new process. If so 451 * attach a new knote to it, and immediately report an event with the 452 * child's pid. 453 */ 454 void 455 knote_fork(struct knlist *list, int pid) 456 { 457 struct kqueue *kq; 458 struct knote *kn; 459 struct kevent kev; 460 int error; 461 462 if (list == NULL) 463 return; 464 list->kl_lock(list->kl_lockarg); 465 466 SLIST_FOREACH(kn, &list->kl_list, kn_selnext) { 467 if ((kn->kn_status & KN_INFLUX) == KN_INFLUX) 468 continue; 469 kq = kn->kn_kq; 470 KQ_LOCK(kq); 471 if ((kn->kn_status & KN_INFLUX) == KN_INFLUX) { 472 KQ_UNLOCK(kq); 473 continue; 474 } 475 476 /* 477 * The same as knote(), activate the event. 478 */ 479 if ((kn->kn_sfflags & NOTE_TRACK) == 0) { 480 kn->kn_status |= KN_HASKQLOCK; 481 if (kn->kn_fop->f_event(kn, NOTE_FORK)) 482 KNOTE_ACTIVATE(kn, 1); 483 kn->kn_status &= ~KN_HASKQLOCK; 484 KQ_UNLOCK(kq); 485 continue; 486 } 487 488 /* 489 * The NOTE_TRACK case. In addition to the activation 490 * of the event, we need to register new event to 491 * track the child. Drop the locks in preparation for 492 * the call to kqueue_register(). 493 */ 494 kn->kn_status |= KN_INFLUX; 495 KQ_UNLOCK(kq); 496 list->kl_unlock(list->kl_lockarg); 497 498 /* 499 * Activate existing knote and register a knote with 500 * new process. 501 */ 502 kev.ident = pid; 503 kev.filter = kn->kn_filter; 504 kev.flags = kn->kn_flags | EV_ADD | EV_ENABLE | EV_FLAG1; 505 kev.fflags = kn->kn_sfflags; 506 kev.data = kn->kn_id; /* parent */ 507 kev.udata = kn->kn_kevent.udata;/* preserve udata */ 508 error = kqueue_register(kq, &kev, NULL, 0); 509 if (error) 510 kn->kn_fflags |= NOTE_TRACKERR; 511 if (kn->kn_fop->f_event(kn, NOTE_FORK)) 512 KNOTE_ACTIVATE(kn, 0); 513 KQ_LOCK(kq); 514 kn->kn_status &= ~KN_INFLUX; 515 KQ_UNLOCK_FLUX(kq); 516 list->kl_lock(list->kl_lockarg); 517 } 518 list->kl_unlock(list->kl_lockarg); 519 } 520 521 /* 522 * XXX: EVFILT_TIMER should perhaps live in kern_time.c beside the 523 * interval timer support code. 524 */ 525 static __inline sbintime_t 526 timer2sbintime(intptr_t data) 527 { 528 529 return (SBT_1MS * data); 530 } 531 532 static void 533 filt_timerexpire(void *knx) 534 { 535 struct callout *calloutp; 536 struct knote *kn; 537 538 kn = knx; 539 kn->kn_data++; 540 KNOTE_ACTIVATE(kn, 0); /* XXX - handle locking */ 541 542 if ((kn->kn_flags & EV_ONESHOT) != EV_ONESHOT) { 543 calloutp = (struct callout *)kn->kn_hook; 544 callout_reset_sbt_on(calloutp, 545 timer2sbintime(kn->kn_sdata), 0 /* 1ms? */, 546 filt_timerexpire, kn, PCPU_GET(cpuid), 0); 547 } 548 } 549 550 /* 551 * data contains amount of time to sleep, in milliseconds 552 */ 553 static int 554 filt_timerattach(struct knote *kn) 555 { 556 struct callout *calloutp; 557 unsigned int ncallouts; 558 559 ncallouts = atomic_load_explicit(&kq_ncallouts, memory_order_relaxed); 560 do { 561 if (ncallouts >= kq_calloutmax) 562 return (ENOMEM); 563 } while (!atomic_compare_exchange_weak_explicit(&kq_ncallouts, 564 &ncallouts, ncallouts + 1, memory_order_relaxed, 565 memory_order_relaxed)); 566 567 kn->kn_flags |= EV_CLEAR; /* automatically set */ 568 kn->kn_status &= ~KN_DETACHED; /* knlist_add clears it */ 569 calloutp = malloc(sizeof(*calloutp), M_KQUEUE, M_WAITOK); 570 callout_init(calloutp, CALLOUT_MPSAFE); 571 kn->kn_hook = calloutp; 572 callout_reset_sbt_on(calloutp, 573 timer2sbintime(kn->kn_sdata), 0 /* 1ms? */, 574 filt_timerexpire, kn, PCPU_GET(cpuid), 0); 575 576 return (0); 577 } 578 579 static void 580 filt_timerdetach(struct knote *kn) 581 { 582 struct callout *calloutp; 583 unsigned int old; 584 585 calloutp = (struct callout *)kn->kn_hook; 586 callout_drain(calloutp); 587 free(calloutp, M_KQUEUE); 588 old = atomic_fetch_sub_explicit(&kq_ncallouts, 1, memory_order_relaxed); 589 KASSERT(old > 0, ("Number of callouts cannot become negative")); 590 kn->kn_status |= KN_DETACHED; /* knlist_remove sets it */ 591 } 592 593 static int 594 filt_timer(struct knote *kn, long hint) 595 { 596 597 return (kn->kn_data != 0); 598 } 599 600 static int 601 filt_userattach(struct knote *kn) 602 { 603 604 /* 605 * EVFILT_USER knotes are not attached to anything in the kernel. 606 */ 607 kn->kn_hook = NULL; 608 if (kn->kn_fflags & NOTE_TRIGGER) 609 kn->kn_hookid = 1; 610 else 611 kn->kn_hookid = 0; 612 return (0); 613 } 614 615 static void 616 filt_userdetach(__unused struct knote *kn) 617 { 618 619 /* 620 * EVFILT_USER knotes are not attached to anything in the kernel. 621 */ 622 } 623 624 static int 625 filt_user(struct knote *kn, __unused long hint) 626 { 627 628 return (kn->kn_hookid); 629 } 630 631 static void 632 filt_usertouch(struct knote *kn, struct kevent *kev, u_long type) 633 { 634 u_int ffctrl; 635 636 switch (type) { 637 case EVENT_REGISTER: 638 if (kev->fflags & NOTE_TRIGGER) 639 kn->kn_hookid = 1; 640 641 ffctrl = kev->fflags & NOTE_FFCTRLMASK; 642 kev->fflags &= NOTE_FFLAGSMASK; 643 switch (ffctrl) { 644 case NOTE_FFNOP: 645 break; 646 647 case NOTE_FFAND: 648 kn->kn_sfflags &= kev->fflags; 649 break; 650 651 case NOTE_FFOR: 652 kn->kn_sfflags |= kev->fflags; 653 break; 654 655 case NOTE_FFCOPY: 656 kn->kn_sfflags = kev->fflags; 657 break; 658 659 default: 660 /* XXX Return error? */ 661 break; 662 } 663 kn->kn_sdata = kev->data; 664 if (kev->flags & EV_CLEAR) { 665 kn->kn_hookid = 0; 666 kn->kn_data = 0; 667 kn->kn_fflags = 0; 668 } 669 break; 670 671 case EVENT_PROCESS: 672 *kev = kn->kn_kevent; 673 kev->fflags = kn->kn_sfflags; 674 kev->data = kn->kn_sdata; 675 if (kn->kn_flags & EV_CLEAR) { 676 kn->kn_hookid = 0; 677 kn->kn_data = 0; 678 kn->kn_fflags = 0; 679 } 680 break; 681 682 default: 683 panic("filt_usertouch() - invalid type (%ld)", type); 684 break; 685 } 686 } 687 688 int 689 sys_kqueue(struct thread *td, struct kqueue_args *uap) 690 { 691 struct filedesc *fdp; 692 struct kqueue *kq; 693 struct file *fp; 694 int fd, error; 695 696 fdp = td->td_proc->p_fd; 697 error = falloc(td, &fp, &fd, 0); 698 if (error) 699 goto done2; 700 701 /* An extra reference on `fp' has been held for us by falloc(). */ 702 kq = malloc(sizeof *kq, M_KQUEUE, M_WAITOK | M_ZERO); 703 mtx_init(&kq->kq_lock, "kqueue", NULL, MTX_DEF|MTX_DUPOK); 704 TAILQ_INIT(&kq->kq_head); 705 kq->kq_fdp = fdp; 706 knlist_init_mtx(&kq->kq_sel.si_note, &kq->kq_lock); 707 TASK_INIT(&kq->kq_task, 0, kqueue_task, kq); 708 709 FILEDESC_XLOCK(fdp); 710 TAILQ_INSERT_HEAD(&fdp->fd_kqlist, kq, kq_list); 711 FILEDESC_XUNLOCK(fdp); 712 713 finit(fp, FREAD | FWRITE, DTYPE_KQUEUE, kq, &kqueueops); 714 fdrop(fp, td); 715 716 td->td_retval[0] = fd; 717 done2: 718 return (error); 719 } 720 721 #ifndef _SYS_SYSPROTO_H_ 722 struct kevent_args { 723 int fd; 724 const struct kevent *changelist; 725 int nchanges; 726 struct kevent *eventlist; 727 int nevents; 728 const struct timespec *timeout; 729 }; 730 #endif 731 int 732 sys_kevent(struct thread *td, struct kevent_args *uap) 733 { 734 struct timespec ts, *tsp; 735 struct kevent_copyops k_ops = { uap, 736 kevent_copyout, 737 kevent_copyin}; 738 int error; 739 #ifdef KTRACE 740 struct uio ktruio; 741 struct iovec ktriov; 742 struct uio *ktruioin = NULL; 743 struct uio *ktruioout = NULL; 744 #endif 745 746 if (uap->timeout != NULL) { 747 error = copyin(uap->timeout, &ts, sizeof(ts)); 748 if (error) 749 return (error); 750 tsp = &ts; 751 } else 752 tsp = NULL; 753 754 #ifdef KTRACE 755 if (KTRPOINT(td, KTR_GENIO)) { 756 ktriov.iov_base = uap->changelist; 757 ktriov.iov_len = uap->nchanges * sizeof(struct kevent); 758 ktruio = (struct uio){ .uio_iov = &ktriov, .uio_iovcnt = 1, 759 .uio_segflg = UIO_USERSPACE, .uio_rw = UIO_READ, 760 .uio_td = td }; 761 ktruioin = cloneuio(&ktruio); 762 ktriov.iov_base = uap->eventlist; 763 ktriov.iov_len = uap->nevents * sizeof(struct kevent); 764 ktruioout = cloneuio(&ktruio); 765 } 766 #endif 767 768 error = kern_kevent(td, uap->fd, uap->nchanges, uap->nevents, 769 &k_ops, tsp); 770 771 #ifdef KTRACE 772 if (ktruioin != NULL) { 773 ktruioin->uio_resid = uap->nchanges * sizeof(struct kevent); 774 ktrgenio(uap->fd, UIO_WRITE, ktruioin, 0); 775 ktruioout->uio_resid = td->td_retval[0] * sizeof(struct kevent); 776 ktrgenio(uap->fd, UIO_READ, ktruioout, error); 777 } 778 #endif 779 780 return (error); 781 } 782 783 /* 784 * Copy 'count' items into the destination list pointed to by uap->eventlist. 785 */ 786 static int 787 kevent_copyout(void *arg, struct kevent *kevp, int count) 788 { 789 struct kevent_args *uap; 790 int error; 791 792 KASSERT(count <= KQ_NEVENTS, ("count (%d) > KQ_NEVENTS", count)); 793 uap = (struct kevent_args *)arg; 794 795 error = copyout(kevp, uap->eventlist, count * sizeof *kevp); 796 if (error == 0) 797 uap->eventlist += count; 798 return (error); 799 } 800 801 /* 802 * Copy 'count' items from the list pointed to by uap->changelist. 803 */ 804 static int 805 kevent_copyin(void *arg, struct kevent *kevp, int count) 806 { 807 struct kevent_args *uap; 808 int error; 809 810 KASSERT(count <= KQ_NEVENTS, ("count (%d) > KQ_NEVENTS", count)); 811 uap = (struct kevent_args *)arg; 812 813 error = copyin(uap->changelist, kevp, count * sizeof *kevp); 814 if (error == 0) 815 uap->changelist += count; 816 return (error); 817 } 818 819 int 820 kern_kevent(struct thread *td, int fd, int nchanges, int nevents, 821 struct kevent_copyops *k_ops, const struct timespec *timeout) 822 { 823 struct kevent keva[KQ_NEVENTS]; 824 struct kevent *kevp, *changes; 825 struct kqueue *kq; 826 struct file *fp; 827 cap_rights_t rights; 828 int i, n, nerrors, error; 829 830 error = fget(td, fd, cap_rights_init(&rights, CAP_POST_EVENT), &fp); 831 if (error != 0) 832 return (error); 833 if ((error = kqueue_acquire(fp, &kq)) != 0) 834 goto done_norel; 835 836 nerrors = 0; 837 838 while (nchanges > 0) { 839 n = nchanges > KQ_NEVENTS ? KQ_NEVENTS : nchanges; 840 error = k_ops->k_copyin(k_ops->arg, keva, n); 841 if (error) 842 goto done; 843 changes = keva; 844 for (i = 0; i < n; i++) { 845 kevp = &changes[i]; 846 if (!kevp->filter) 847 continue; 848 kevp->flags &= ~EV_SYSFLAGS; 849 error = kqueue_register(kq, kevp, td, 1); 850 if (error || (kevp->flags & EV_RECEIPT)) { 851 if (nevents != 0) { 852 kevp->flags = EV_ERROR; 853 kevp->data = error; 854 (void) k_ops->k_copyout(k_ops->arg, 855 kevp, 1); 856 nevents--; 857 nerrors++; 858 } else { 859 goto done; 860 } 861 } 862 } 863 nchanges -= n; 864 } 865 if (nerrors) { 866 td->td_retval[0] = nerrors; 867 error = 0; 868 goto done; 869 } 870 871 error = kqueue_scan(kq, nevents, k_ops, timeout, keva, td); 872 done: 873 kqueue_release(kq, 0); 874 done_norel: 875 fdrop(fp, td); 876 return (error); 877 } 878 879 int 880 kqueue_add_filteropts(int filt, struct filterops *filtops) 881 { 882 int error; 883 884 error = 0; 885 if (filt > 0 || filt + EVFILT_SYSCOUNT < 0) { 886 printf( 887 "trying to add a filterop that is out of range: %d is beyond %d\n", 888 ~filt, EVFILT_SYSCOUNT); 889 return EINVAL; 890 } 891 mtx_lock(&filterops_lock); 892 if (sysfilt_ops[~filt].for_fop != &null_filtops && 893 sysfilt_ops[~filt].for_fop != NULL) 894 error = EEXIST; 895 else { 896 sysfilt_ops[~filt].for_fop = filtops; 897 sysfilt_ops[~filt].for_refcnt = 0; 898 } 899 mtx_unlock(&filterops_lock); 900 901 return (error); 902 } 903 904 int 905 kqueue_del_filteropts(int filt) 906 { 907 int error; 908 909 error = 0; 910 if (filt > 0 || filt + EVFILT_SYSCOUNT < 0) 911 return EINVAL; 912 913 mtx_lock(&filterops_lock); 914 if (sysfilt_ops[~filt].for_fop == &null_filtops || 915 sysfilt_ops[~filt].for_fop == NULL) 916 error = EINVAL; 917 else if (sysfilt_ops[~filt].for_refcnt != 0) 918 error = EBUSY; 919 else { 920 sysfilt_ops[~filt].for_fop = &null_filtops; 921 sysfilt_ops[~filt].for_refcnt = 0; 922 } 923 mtx_unlock(&filterops_lock); 924 925 return error; 926 } 927 928 static struct filterops * 929 kqueue_fo_find(int filt) 930 { 931 932 if (filt > 0 || filt + EVFILT_SYSCOUNT < 0) 933 return NULL; 934 935 mtx_lock(&filterops_lock); 936 sysfilt_ops[~filt].for_refcnt++; 937 if (sysfilt_ops[~filt].for_fop == NULL) 938 sysfilt_ops[~filt].for_fop = &null_filtops; 939 mtx_unlock(&filterops_lock); 940 941 return sysfilt_ops[~filt].for_fop; 942 } 943 944 static void 945 kqueue_fo_release(int filt) 946 { 947 948 if (filt > 0 || filt + EVFILT_SYSCOUNT < 0) 949 return; 950 951 mtx_lock(&filterops_lock); 952 KASSERT(sysfilt_ops[~filt].for_refcnt > 0, 953 ("filter object refcount not valid on release")); 954 sysfilt_ops[~filt].for_refcnt--; 955 mtx_unlock(&filterops_lock); 956 } 957 958 /* 959 * A ref to kq (obtained via kqueue_acquire) must be held. waitok will 960 * influence if memory allocation should wait. Make sure it is 0 if you 961 * hold any mutexes. 962 */ 963 static int 964 kqueue_register(struct kqueue *kq, struct kevent *kev, struct thread *td, int waitok) 965 { 966 struct filterops *fops; 967 struct file *fp; 968 struct knote *kn, *tkn; 969 cap_rights_t rights; 970 int error, filt, event; 971 int haskqglobal, filedesc_unlock; 972 973 fp = NULL; 974 kn = NULL; 975 error = 0; 976 haskqglobal = 0; 977 filedesc_unlock = 0; 978 979 filt = kev->filter; 980 fops = kqueue_fo_find(filt); 981 if (fops == NULL) 982 return EINVAL; 983 984 tkn = knote_alloc(waitok); /* prevent waiting with locks */ 985 986 findkn: 987 if (fops->f_isfd) { 988 KASSERT(td != NULL, ("td is NULL")); 989 error = fget(td, kev->ident, 990 cap_rights_init(&rights, CAP_POLL_EVENT), &fp); 991 if (error) 992 goto done; 993 994 if ((kev->flags & EV_ADD) == EV_ADD && kqueue_expand(kq, fops, 995 kev->ident, 0) != 0) { 996 /* try again */ 997 fdrop(fp, td); 998 fp = NULL; 999 error = kqueue_expand(kq, fops, kev->ident, waitok); 1000 if (error) 1001 goto done; 1002 goto findkn; 1003 } 1004 1005 if (fp->f_type == DTYPE_KQUEUE) { 1006 /* 1007 * if we add some inteligence about what we are doing, 1008 * we should be able to support events on ourselves. 1009 * We need to know when we are doing this to prevent 1010 * getting both the knlist lock and the kq lock since 1011 * they are the same thing. 1012 */ 1013 if (fp->f_data == kq) { 1014 error = EINVAL; 1015 goto done; 1016 } 1017 1018 /* 1019 * Pre-lock the filedesc before the global 1020 * lock mutex, see the comment in 1021 * kqueue_close(). 1022 */ 1023 FILEDESC_XLOCK(td->td_proc->p_fd); 1024 filedesc_unlock = 1; 1025 KQ_GLOBAL_LOCK(&kq_global, haskqglobal); 1026 } 1027 1028 KQ_LOCK(kq); 1029 if (kev->ident < kq->kq_knlistsize) { 1030 SLIST_FOREACH(kn, &kq->kq_knlist[kev->ident], kn_link) 1031 if (kev->filter == kn->kn_filter) 1032 break; 1033 } 1034 } else { 1035 if ((kev->flags & EV_ADD) == EV_ADD) 1036 kqueue_expand(kq, fops, kev->ident, waitok); 1037 1038 KQ_LOCK(kq); 1039 if (kq->kq_knhashmask != 0) { 1040 struct klist *list; 1041 1042 list = &kq->kq_knhash[ 1043 KN_HASH((u_long)kev->ident, kq->kq_knhashmask)]; 1044 SLIST_FOREACH(kn, list, kn_link) 1045 if (kev->ident == kn->kn_id && 1046 kev->filter == kn->kn_filter) 1047 break; 1048 } 1049 } 1050 1051 /* knote is in the process of changing, wait for it to stablize. */ 1052 if (kn != NULL && (kn->kn_status & KN_INFLUX) == KN_INFLUX) { 1053 KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal); 1054 if (filedesc_unlock) { 1055 FILEDESC_XUNLOCK(td->td_proc->p_fd); 1056 filedesc_unlock = 0; 1057 } 1058 kq->kq_state |= KQ_FLUXWAIT; 1059 msleep(kq, &kq->kq_lock, PSOCK | PDROP, "kqflxwt", 0); 1060 if (fp != NULL) { 1061 fdrop(fp, td); 1062 fp = NULL; 1063 } 1064 goto findkn; 1065 } 1066 1067 /* 1068 * kn now contains the matching knote, or NULL if no match 1069 */ 1070 if (kn == NULL) { 1071 if (kev->flags & EV_ADD) { 1072 kn = tkn; 1073 tkn = NULL; 1074 if (kn == NULL) { 1075 KQ_UNLOCK(kq); 1076 error = ENOMEM; 1077 goto done; 1078 } 1079 kn->kn_fp = fp; 1080 kn->kn_kq = kq; 1081 kn->kn_fop = fops; 1082 /* 1083 * apply reference counts to knote structure, and 1084 * do not release it at the end of this routine. 1085 */ 1086 fops = NULL; 1087 fp = NULL; 1088 1089 kn->kn_sfflags = kev->fflags; 1090 kn->kn_sdata = kev->data; 1091 kev->fflags = 0; 1092 kev->data = 0; 1093 kn->kn_kevent = *kev; 1094 kn->kn_kevent.flags &= ~(EV_ADD | EV_DELETE | 1095 EV_ENABLE | EV_DISABLE); 1096 kn->kn_status = KN_INFLUX|KN_DETACHED; 1097 1098 error = knote_attach(kn, kq); 1099 KQ_UNLOCK(kq); 1100 if (error != 0) { 1101 tkn = kn; 1102 goto done; 1103 } 1104 1105 if ((error = kn->kn_fop->f_attach(kn)) != 0) { 1106 knote_drop(kn, td); 1107 goto done; 1108 } 1109 KN_LIST_LOCK(kn); 1110 goto done_ev_add; 1111 } else { 1112 /* No matching knote and the EV_ADD flag is not set. */ 1113 KQ_UNLOCK(kq); 1114 error = ENOENT; 1115 goto done; 1116 } 1117 } 1118 1119 if (kev->flags & EV_DELETE) { 1120 kn->kn_status |= KN_INFLUX; 1121 KQ_UNLOCK(kq); 1122 if (!(kn->kn_status & KN_DETACHED)) 1123 kn->kn_fop->f_detach(kn); 1124 knote_drop(kn, td); 1125 goto done; 1126 } 1127 1128 /* 1129 * The user may change some filter values after the initial EV_ADD, 1130 * but doing so will not reset any filter which has already been 1131 * triggered. 1132 */ 1133 kn->kn_status |= KN_INFLUX; 1134 KQ_UNLOCK(kq); 1135 KN_LIST_LOCK(kn); 1136 kn->kn_kevent.udata = kev->udata; 1137 if (!fops->f_isfd && fops->f_touch != NULL) { 1138 fops->f_touch(kn, kev, EVENT_REGISTER); 1139 } else { 1140 kn->kn_sfflags = kev->fflags; 1141 kn->kn_sdata = kev->data; 1142 } 1143 1144 /* 1145 * We can get here with kn->kn_knlist == NULL. This can happen when 1146 * the initial attach event decides that the event is "completed" 1147 * already. i.e. filt_procattach is called on a zombie process. It 1148 * will call filt_proc which will remove it from the list, and NULL 1149 * kn_knlist. 1150 */ 1151 done_ev_add: 1152 event = kn->kn_fop->f_event(kn, 0); 1153 KQ_LOCK(kq); 1154 if (event) 1155 KNOTE_ACTIVATE(kn, 1); 1156 kn->kn_status &= ~KN_INFLUX; 1157 KN_LIST_UNLOCK(kn); 1158 1159 if ((kev->flags & EV_DISABLE) && 1160 ((kn->kn_status & KN_DISABLED) == 0)) { 1161 kn->kn_status |= KN_DISABLED; 1162 } 1163 1164 if ((kev->flags & EV_ENABLE) && (kn->kn_status & KN_DISABLED)) { 1165 kn->kn_status &= ~KN_DISABLED; 1166 if ((kn->kn_status & KN_ACTIVE) && 1167 ((kn->kn_status & KN_QUEUED) == 0)) 1168 knote_enqueue(kn); 1169 } 1170 KQ_UNLOCK_FLUX(kq); 1171 1172 done: 1173 KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal); 1174 if (filedesc_unlock) 1175 FILEDESC_XUNLOCK(td->td_proc->p_fd); 1176 if (fp != NULL) 1177 fdrop(fp, td); 1178 if (tkn != NULL) 1179 knote_free(tkn); 1180 if (fops != NULL) 1181 kqueue_fo_release(filt); 1182 return (error); 1183 } 1184 1185 static int 1186 kqueue_acquire(struct file *fp, struct kqueue **kqp) 1187 { 1188 int error; 1189 struct kqueue *kq; 1190 1191 error = 0; 1192 1193 kq = fp->f_data; 1194 if (fp->f_type != DTYPE_KQUEUE || kq == NULL) 1195 return (EBADF); 1196 *kqp = kq; 1197 KQ_LOCK(kq); 1198 if ((kq->kq_state & KQ_CLOSING) == KQ_CLOSING) { 1199 KQ_UNLOCK(kq); 1200 return (EBADF); 1201 } 1202 kq->kq_refcnt++; 1203 KQ_UNLOCK(kq); 1204 1205 return error; 1206 } 1207 1208 static void 1209 kqueue_release(struct kqueue *kq, int locked) 1210 { 1211 if (locked) 1212 KQ_OWNED(kq); 1213 else 1214 KQ_LOCK(kq); 1215 kq->kq_refcnt--; 1216 if (kq->kq_refcnt == 1) 1217 wakeup(&kq->kq_refcnt); 1218 if (!locked) 1219 KQ_UNLOCK(kq); 1220 } 1221 1222 static void 1223 kqueue_schedtask(struct kqueue *kq) 1224 { 1225 1226 KQ_OWNED(kq); 1227 KASSERT(((kq->kq_state & KQ_TASKDRAIN) != KQ_TASKDRAIN), 1228 ("scheduling kqueue task while draining")); 1229 1230 if ((kq->kq_state & KQ_TASKSCHED) != KQ_TASKSCHED) { 1231 taskqueue_enqueue(taskqueue_kqueue, &kq->kq_task); 1232 kq->kq_state |= KQ_TASKSCHED; 1233 } 1234 } 1235 1236 /* 1237 * Expand the kq to make sure we have storage for fops/ident pair. 1238 * 1239 * Return 0 on success (or no work necessary), return errno on failure. 1240 * 1241 * Not calling hashinit w/ waitok (proper malloc flag) should be safe. 1242 * If kqueue_register is called from a non-fd context, there usually/should 1243 * be no locks held. 1244 */ 1245 static int 1246 kqueue_expand(struct kqueue *kq, struct filterops *fops, uintptr_t ident, 1247 int waitok) 1248 { 1249 struct klist *list, *tmp_knhash, *to_free; 1250 u_long tmp_knhashmask; 1251 int size; 1252 int fd; 1253 int mflag = waitok ? M_WAITOK : M_NOWAIT; 1254 1255 KQ_NOTOWNED(kq); 1256 1257 to_free = NULL; 1258 if (fops->f_isfd) { 1259 fd = ident; 1260 if (kq->kq_knlistsize <= fd) { 1261 size = kq->kq_knlistsize; 1262 while (size <= fd) 1263 size += KQEXTENT; 1264 list = malloc(size * sizeof(*list), M_KQUEUE, mflag); 1265 if (list == NULL) 1266 return ENOMEM; 1267 KQ_LOCK(kq); 1268 if (kq->kq_knlistsize > fd) { 1269 to_free = list; 1270 list = NULL; 1271 } else { 1272 if (kq->kq_knlist != NULL) { 1273 bcopy(kq->kq_knlist, list, 1274 kq->kq_knlistsize * sizeof(*list)); 1275 to_free = kq->kq_knlist; 1276 kq->kq_knlist = NULL; 1277 } 1278 bzero((caddr_t)list + 1279 kq->kq_knlistsize * sizeof(*list), 1280 (size - kq->kq_knlistsize) * sizeof(*list)); 1281 kq->kq_knlistsize = size; 1282 kq->kq_knlist = list; 1283 } 1284 KQ_UNLOCK(kq); 1285 } 1286 } else { 1287 if (kq->kq_knhashmask == 0) { 1288 tmp_knhash = hashinit(KN_HASHSIZE, M_KQUEUE, 1289 &tmp_knhashmask); 1290 if (tmp_knhash == NULL) 1291 return ENOMEM; 1292 KQ_LOCK(kq); 1293 if (kq->kq_knhashmask == 0) { 1294 kq->kq_knhash = tmp_knhash; 1295 kq->kq_knhashmask = tmp_knhashmask; 1296 } else { 1297 to_free = tmp_knhash; 1298 } 1299 KQ_UNLOCK(kq); 1300 } 1301 } 1302 free(to_free, M_KQUEUE); 1303 1304 KQ_NOTOWNED(kq); 1305 return 0; 1306 } 1307 1308 static void 1309 kqueue_task(void *arg, int pending) 1310 { 1311 struct kqueue *kq; 1312 int haskqglobal; 1313 1314 haskqglobal = 0; 1315 kq = arg; 1316 1317 KQ_GLOBAL_LOCK(&kq_global, haskqglobal); 1318 KQ_LOCK(kq); 1319 1320 KNOTE_LOCKED(&kq->kq_sel.si_note, 0); 1321 1322 kq->kq_state &= ~KQ_TASKSCHED; 1323 if ((kq->kq_state & KQ_TASKDRAIN) == KQ_TASKDRAIN) { 1324 wakeup(&kq->kq_state); 1325 } 1326 KQ_UNLOCK(kq); 1327 KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal); 1328 } 1329 1330 /* 1331 * Scan, update kn_data (if not ONESHOT), and copyout triggered events. 1332 * We treat KN_MARKER knotes as if they are INFLUX. 1333 */ 1334 static int 1335 kqueue_scan(struct kqueue *kq, int maxevents, struct kevent_copyops *k_ops, 1336 const struct timespec *tsp, struct kevent *keva, struct thread *td) 1337 { 1338 struct kevent *kevp; 1339 struct knote *kn, *marker; 1340 sbintime_t asbt, rsbt; 1341 int count, error, haskqglobal, influx, nkev, touch; 1342 1343 count = maxevents; 1344 nkev = 0; 1345 error = 0; 1346 haskqglobal = 0; 1347 1348 if (maxevents == 0) 1349 goto done_nl; 1350 1351 rsbt = 0; 1352 if (tsp != NULL) { 1353 if (tsp->tv_sec < 0 || tsp->tv_nsec < 0 || 1354 tsp->tv_nsec >= 1000000000) { 1355 error = EINVAL; 1356 goto done_nl; 1357 } 1358 if (timespecisset(tsp)) { 1359 if (tsp->tv_sec <= INT32_MAX) { 1360 rsbt = tstosbt(*tsp); 1361 if (TIMESEL(&asbt, rsbt)) 1362 asbt += tc_tick_sbt; 1363 if (asbt <= INT64_MAX - rsbt) 1364 asbt += rsbt; 1365 else 1366 asbt = 0; 1367 rsbt >>= tc_precexp; 1368 } else 1369 asbt = 0; 1370 } else 1371 asbt = -1; 1372 } else 1373 asbt = 0; 1374 marker = knote_alloc(1); 1375 if (marker == NULL) { 1376 error = ENOMEM; 1377 goto done_nl; 1378 } 1379 marker->kn_status = KN_MARKER; 1380 KQ_LOCK(kq); 1381 1382 retry: 1383 kevp = keva; 1384 if (kq->kq_count == 0) { 1385 if (asbt == -1) { 1386 error = EWOULDBLOCK; 1387 } else { 1388 kq->kq_state |= KQ_SLEEP; 1389 error = msleep_sbt(kq, &kq->kq_lock, PSOCK | PCATCH, 1390 "kqread", asbt, rsbt, C_ABSOLUTE); 1391 } 1392 if (error == 0) 1393 goto retry; 1394 /* don't restart after signals... */ 1395 if (error == ERESTART) 1396 error = EINTR; 1397 else if (error == EWOULDBLOCK) 1398 error = 0; 1399 goto done; 1400 } 1401 1402 TAILQ_INSERT_TAIL(&kq->kq_head, marker, kn_tqe); 1403 influx = 0; 1404 while (count) { 1405 KQ_OWNED(kq); 1406 kn = TAILQ_FIRST(&kq->kq_head); 1407 1408 if ((kn->kn_status == KN_MARKER && kn != marker) || 1409 (kn->kn_status & KN_INFLUX) == KN_INFLUX) { 1410 if (influx) { 1411 influx = 0; 1412 KQ_FLUX_WAKEUP(kq); 1413 } 1414 kq->kq_state |= KQ_FLUXWAIT; 1415 error = msleep(kq, &kq->kq_lock, PSOCK, 1416 "kqflxwt", 0); 1417 continue; 1418 } 1419 1420 TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe); 1421 if ((kn->kn_status & KN_DISABLED) == KN_DISABLED) { 1422 kn->kn_status &= ~KN_QUEUED; 1423 kq->kq_count--; 1424 continue; 1425 } 1426 if (kn == marker) { 1427 KQ_FLUX_WAKEUP(kq); 1428 if (count == maxevents) 1429 goto retry; 1430 goto done; 1431 } 1432 KASSERT((kn->kn_status & KN_INFLUX) == 0, 1433 ("KN_INFLUX set when not suppose to be")); 1434 1435 if ((kn->kn_flags & EV_DROP) == EV_DROP) { 1436 kn->kn_status &= ~KN_QUEUED; 1437 kn->kn_status |= KN_INFLUX; 1438 kq->kq_count--; 1439 KQ_UNLOCK(kq); 1440 /* 1441 * We don't need to lock the list since we've marked 1442 * it _INFLUX. 1443 */ 1444 if (!(kn->kn_status & KN_DETACHED)) 1445 kn->kn_fop->f_detach(kn); 1446 knote_drop(kn, td); 1447 KQ_LOCK(kq); 1448 continue; 1449 } else if ((kn->kn_flags & EV_ONESHOT) == EV_ONESHOT) { 1450 kn->kn_status &= ~KN_QUEUED; 1451 kn->kn_status |= KN_INFLUX; 1452 kq->kq_count--; 1453 KQ_UNLOCK(kq); 1454 /* 1455 * We don't need to lock the list since we've marked 1456 * it _INFLUX. 1457 */ 1458 *kevp = kn->kn_kevent; 1459 if (!(kn->kn_status & KN_DETACHED)) 1460 kn->kn_fop->f_detach(kn); 1461 knote_drop(kn, td); 1462 KQ_LOCK(kq); 1463 kn = NULL; 1464 } else { 1465 kn->kn_status |= KN_INFLUX; 1466 KQ_UNLOCK(kq); 1467 if ((kn->kn_status & KN_KQUEUE) == KN_KQUEUE) 1468 KQ_GLOBAL_LOCK(&kq_global, haskqglobal); 1469 KN_LIST_LOCK(kn); 1470 if (kn->kn_fop->f_event(kn, 0) == 0) { 1471 KQ_LOCK(kq); 1472 KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal); 1473 kn->kn_status &= 1474 ~(KN_QUEUED | KN_ACTIVE | KN_INFLUX); 1475 kq->kq_count--; 1476 KN_LIST_UNLOCK(kn); 1477 influx = 1; 1478 continue; 1479 } 1480 touch = (!kn->kn_fop->f_isfd && 1481 kn->kn_fop->f_touch != NULL); 1482 if (touch) 1483 kn->kn_fop->f_touch(kn, kevp, EVENT_PROCESS); 1484 else 1485 *kevp = kn->kn_kevent; 1486 KQ_LOCK(kq); 1487 KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal); 1488 if (kn->kn_flags & (EV_CLEAR | EV_DISPATCH)) { 1489 /* 1490 * Manually clear knotes who weren't 1491 * 'touch'ed. 1492 */ 1493 if (touch == 0 && kn->kn_flags & EV_CLEAR) { 1494 kn->kn_data = 0; 1495 kn->kn_fflags = 0; 1496 } 1497 if (kn->kn_flags & EV_DISPATCH) 1498 kn->kn_status |= KN_DISABLED; 1499 kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE); 1500 kq->kq_count--; 1501 } else 1502 TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe); 1503 1504 kn->kn_status &= ~(KN_INFLUX); 1505 KN_LIST_UNLOCK(kn); 1506 influx = 1; 1507 } 1508 1509 /* we are returning a copy to the user */ 1510 kevp++; 1511 nkev++; 1512 count--; 1513 1514 if (nkev == KQ_NEVENTS) { 1515 influx = 0; 1516 KQ_UNLOCK_FLUX(kq); 1517 error = k_ops->k_copyout(k_ops->arg, keva, nkev); 1518 nkev = 0; 1519 kevp = keva; 1520 KQ_LOCK(kq); 1521 if (error) 1522 break; 1523 } 1524 } 1525 TAILQ_REMOVE(&kq->kq_head, marker, kn_tqe); 1526 done: 1527 KQ_OWNED(kq); 1528 KQ_UNLOCK_FLUX(kq); 1529 knote_free(marker); 1530 done_nl: 1531 KQ_NOTOWNED(kq); 1532 if (nkev != 0) 1533 error = k_ops->k_copyout(k_ops->arg, keva, nkev); 1534 td->td_retval[0] = maxevents - count; 1535 return (error); 1536 } 1537 1538 /* 1539 * XXX 1540 * This could be expanded to call kqueue_scan, if desired. 1541 */ 1542 /*ARGSUSED*/ 1543 static int 1544 kqueue_read(struct file *fp, struct uio *uio, struct ucred *active_cred, 1545 int flags, struct thread *td) 1546 { 1547 return (ENXIO); 1548 } 1549 1550 /*ARGSUSED*/ 1551 static int 1552 kqueue_write(struct file *fp, struct uio *uio, struct ucred *active_cred, 1553 int flags, struct thread *td) 1554 { 1555 return (ENXIO); 1556 } 1557 1558 /*ARGSUSED*/ 1559 static int 1560 kqueue_truncate(struct file *fp, off_t length, struct ucred *active_cred, 1561 struct thread *td) 1562 { 1563 1564 return (EINVAL); 1565 } 1566 1567 /*ARGSUSED*/ 1568 static int 1569 kqueue_ioctl(struct file *fp, u_long cmd, void *data, 1570 struct ucred *active_cred, struct thread *td) 1571 { 1572 /* 1573 * Enabling sigio causes two major problems: 1574 * 1) infinite recursion: 1575 * Synopsys: kevent is being used to track signals and have FIOASYNC 1576 * set. On receipt of a signal this will cause a kqueue to recurse 1577 * into itself over and over. Sending the sigio causes the kqueue 1578 * to become ready, which in turn posts sigio again, forever. 1579 * Solution: this can be solved by setting a flag in the kqueue that 1580 * we have a SIGIO in progress. 1581 * 2) locking problems: 1582 * Synopsys: Kqueue is a leaf subsystem, but adding signalling puts 1583 * us above the proc and pgrp locks. 1584 * Solution: Post a signal using an async mechanism, being sure to 1585 * record a generation count in the delivery so that we do not deliver 1586 * a signal to the wrong process. 1587 * 1588 * Note, these two mechanisms are somewhat mutually exclusive! 1589 */ 1590 #if 0 1591 struct kqueue *kq; 1592 1593 kq = fp->f_data; 1594 switch (cmd) { 1595 case FIOASYNC: 1596 if (*(int *)data) { 1597 kq->kq_state |= KQ_ASYNC; 1598 } else { 1599 kq->kq_state &= ~KQ_ASYNC; 1600 } 1601 return (0); 1602 1603 case FIOSETOWN: 1604 return (fsetown(*(int *)data, &kq->kq_sigio)); 1605 1606 case FIOGETOWN: 1607 *(int *)data = fgetown(&kq->kq_sigio); 1608 return (0); 1609 } 1610 #endif 1611 1612 return (ENOTTY); 1613 } 1614 1615 /*ARGSUSED*/ 1616 static int 1617 kqueue_poll(struct file *fp, int events, struct ucred *active_cred, 1618 struct thread *td) 1619 { 1620 struct kqueue *kq; 1621 int revents = 0; 1622 int error; 1623 1624 if ((error = kqueue_acquire(fp, &kq))) 1625 return POLLERR; 1626 1627 KQ_LOCK(kq); 1628 if (events & (POLLIN | POLLRDNORM)) { 1629 if (kq->kq_count) { 1630 revents |= events & (POLLIN | POLLRDNORM); 1631 } else { 1632 selrecord(td, &kq->kq_sel); 1633 if (SEL_WAITING(&kq->kq_sel)) 1634 kq->kq_state |= KQ_SEL; 1635 } 1636 } 1637 kqueue_release(kq, 1); 1638 KQ_UNLOCK(kq); 1639 return (revents); 1640 } 1641 1642 /*ARGSUSED*/ 1643 static int 1644 kqueue_stat(struct file *fp, struct stat *st, struct ucred *active_cred, 1645 struct thread *td) 1646 { 1647 1648 bzero((void *)st, sizeof *st); 1649 /* 1650 * We no longer return kq_count because the unlocked value is useless. 1651 * If you spent all this time getting the count, why not spend your 1652 * syscall better by calling kevent? 1653 * 1654 * XXX - This is needed for libc_r. 1655 */ 1656 st->st_mode = S_IFIFO; 1657 return (0); 1658 } 1659 1660 /*ARGSUSED*/ 1661 static int 1662 kqueue_close(struct file *fp, struct thread *td) 1663 { 1664 struct kqueue *kq = fp->f_data; 1665 struct filedesc *fdp; 1666 struct knote *kn; 1667 int i; 1668 int error; 1669 int filedesc_unlock; 1670 1671 if ((error = kqueue_acquire(fp, &kq))) 1672 return error; 1673 1674 filedesc_unlock = 0; 1675 KQ_LOCK(kq); 1676 1677 KASSERT((kq->kq_state & KQ_CLOSING) != KQ_CLOSING, 1678 ("kqueue already closing")); 1679 kq->kq_state |= KQ_CLOSING; 1680 if (kq->kq_refcnt > 1) 1681 msleep(&kq->kq_refcnt, &kq->kq_lock, PSOCK, "kqclose", 0); 1682 1683 KASSERT(kq->kq_refcnt == 1, ("other refs are out there!")); 1684 fdp = kq->kq_fdp; 1685 1686 KASSERT(knlist_empty(&kq->kq_sel.si_note), 1687 ("kqueue's knlist not empty")); 1688 1689 for (i = 0; i < kq->kq_knlistsize; i++) { 1690 while ((kn = SLIST_FIRST(&kq->kq_knlist[i])) != NULL) { 1691 if ((kn->kn_status & KN_INFLUX) == KN_INFLUX) { 1692 kq->kq_state |= KQ_FLUXWAIT; 1693 msleep(kq, &kq->kq_lock, PSOCK, "kqclo1", 0); 1694 continue; 1695 } 1696 kn->kn_status |= KN_INFLUX; 1697 KQ_UNLOCK(kq); 1698 if (!(kn->kn_status & KN_DETACHED)) 1699 kn->kn_fop->f_detach(kn); 1700 knote_drop(kn, td); 1701 KQ_LOCK(kq); 1702 } 1703 } 1704 if (kq->kq_knhashmask != 0) { 1705 for (i = 0; i <= kq->kq_knhashmask; i++) { 1706 while ((kn = SLIST_FIRST(&kq->kq_knhash[i])) != NULL) { 1707 if ((kn->kn_status & KN_INFLUX) == KN_INFLUX) { 1708 kq->kq_state |= KQ_FLUXWAIT; 1709 msleep(kq, &kq->kq_lock, PSOCK, 1710 "kqclo2", 0); 1711 continue; 1712 } 1713 kn->kn_status |= KN_INFLUX; 1714 KQ_UNLOCK(kq); 1715 if (!(kn->kn_status & KN_DETACHED)) 1716 kn->kn_fop->f_detach(kn); 1717 knote_drop(kn, td); 1718 KQ_LOCK(kq); 1719 } 1720 } 1721 } 1722 1723 if ((kq->kq_state & KQ_TASKSCHED) == KQ_TASKSCHED) { 1724 kq->kq_state |= KQ_TASKDRAIN; 1725 msleep(&kq->kq_state, &kq->kq_lock, PSOCK, "kqtqdr", 0); 1726 } 1727 1728 if ((kq->kq_state & KQ_SEL) == KQ_SEL) { 1729 selwakeuppri(&kq->kq_sel, PSOCK); 1730 if (!SEL_WAITING(&kq->kq_sel)) 1731 kq->kq_state &= ~KQ_SEL; 1732 } 1733 1734 KQ_UNLOCK(kq); 1735 1736 /* 1737 * We could be called due to the knote_drop() doing fdrop(), 1738 * called from kqueue_register(). In this case the global 1739 * lock is owned, and filedesc sx is locked before, to not 1740 * take the sleepable lock after non-sleepable. 1741 */ 1742 if (!sx_xlocked(FILEDESC_LOCK(fdp))) { 1743 FILEDESC_XLOCK(fdp); 1744 filedesc_unlock = 1; 1745 } else 1746 filedesc_unlock = 0; 1747 TAILQ_REMOVE(&fdp->fd_kqlist, kq, kq_list); 1748 if (filedesc_unlock) 1749 FILEDESC_XUNLOCK(fdp); 1750 1751 seldrain(&kq->kq_sel); 1752 knlist_destroy(&kq->kq_sel.si_note); 1753 mtx_destroy(&kq->kq_lock); 1754 kq->kq_fdp = NULL; 1755 1756 if (kq->kq_knhash != NULL) 1757 free(kq->kq_knhash, M_KQUEUE); 1758 if (kq->kq_knlist != NULL) 1759 free(kq->kq_knlist, M_KQUEUE); 1760 1761 funsetown(&kq->kq_sigio); 1762 free(kq, M_KQUEUE); 1763 fp->f_data = NULL; 1764 1765 return (0); 1766 } 1767 1768 static void 1769 kqueue_wakeup(struct kqueue *kq) 1770 { 1771 KQ_OWNED(kq); 1772 1773 if ((kq->kq_state & KQ_SLEEP) == KQ_SLEEP) { 1774 kq->kq_state &= ~KQ_SLEEP; 1775 wakeup(kq); 1776 } 1777 if ((kq->kq_state & KQ_SEL) == KQ_SEL) { 1778 selwakeuppri(&kq->kq_sel, PSOCK); 1779 if (!SEL_WAITING(&kq->kq_sel)) 1780 kq->kq_state &= ~KQ_SEL; 1781 } 1782 if (!knlist_empty(&kq->kq_sel.si_note)) 1783 kqueue_schedtask(kq); 1784 if ((kq->kq_state & KQ_ASYNC) == KQ_ASYNC) { 1785 pgsigio(&kq->kq_sigio, SIGIO, 0); 1786 } 1787 } 1788 1789 /* 1790 * Walk down a list of knotes, activating them if their event has triggered. 1791 * 1792 * There is a possibility to optimize in the case of one kq watching another. 1793 * Instead of scheduling a task to wake it up, you could pass enough state 1794 * down the chain to make up the parent kqueue. Make this code functional 1795 * first. 1796 */ 1797 void 1798 knote(struct knlist *list, long hint, int lockflags) 1799 { 1800 struct kqueue *kq; 1801 struct knote *kn; 1802 int error; 1803 1804 if (list == NULL) 1805 return; 1806 1807 KNL_ASSERT_LOCK(list, lockflags & KNF_LISTLOCKED); 1808 1809 if ((lockflags & KNF_LISTLOCKED) == 0) 1810 list->kl_lock(list->kl_lockarg); 1811 1812 /* 1813 * If we unlock the list lock (and set KN_INFLUX), we can eliminate 1814 * the kqueue scheduling, but this will introduce four 1815 * lock/unlock's for each knote to test. If we do, continue to use 1816 * SLIST_FOREACH, SLIST_FOREACH_SAFE is not safe in our case, it is 1817 * only safe if you want to remove the current item, which we are 1818 * not doing. 1819 */ 1820 SLIST_FOREACH(kn, &list->kl_list, kn_selnext) { 1821 kq = kn->kn_kq; 1822 if ((kn->kn_status & KN_INFLUX) != KN_INFLUX) { 1823 KQ_LOCK(kq); 1824 if ((kn->kn_status & KN_INFLUX) == KN_INFLUX) { 1825 KQ_UNLOCK(kq); 1826 } else if ((lockflags & KNF_NOKQLOCK) != 0) { 1827 kn->kn_status |= KN_INFLUX; 1828 KQ_UNLOCK(kq); 1829 error = kn->kn_fop->f_event(kn, hint); 1830 KQ_LOCK(kq); 1831 kn->kn_status &= ~KN_INFLUX; 1832 if (error) 1833 KNOTE_ACTIVATE(kn, 1); 1834 KQ_UNLOCK_FLUX(kq); 1835 } else { 1836 kn->kn_status |= KN_HASKQLOCK; 1837 if (kn->kn_fop->f_event(kn, hint)) 1838 KNOTE_ACTIVATE(kn, 1); 1839 kn->kn_status &= ~KN_HASKQLOCK; 1840 KQ_UNLOCK(kq); 1841 } 1842 } 1843 kq = NULL; 1844 } 1845 if ((lockflags & KNF_LISTLOCKED) == 0) 1846 list->kl_unlock(list->kl_lockarg); 1847 } 1848 1849 /* 1850 * add a knote to a knlist 1851 */ 1852 void 1853 knlist_add(struct knlist *knl, struct knote *kn, int islocked) 1854 { 1855 KNL_ASSERT_LOCK(knl, islocked); 1856 KQ_NOTOWNED(kn->kn_kq); 1857 KASSERT((kn->kn_status & (KN_INFLUX|KN_DETACHED)) == 1858 (KN_INFLUX|KN_DETACHED), ("knote not KN_INFLUX and KN_DETACHED")); 1859 if (!islocked) 1860 knl->kl_lock(knl->kl_lockarg); 1861 SLIST_INSERT_HEAD(&knl->kl_list, kn, kn_selnext); 1862 if (!islocked) 1863 knl->kl_unlock(knl->kl_lockarg); 1864 KQ_LOCK(kn->kn_kq); 1865 kn->kn_knlist = knl; 1866 kn->kn_status &= ~KN_DETACHED; 1867 KQ_UNLOCK(kn->kn_kq); 1868 } 1869 1870 static void 1871 knlist_remove_kq(struct knlist *knl, struct knote *kn, int knlislocked, int kqislocked) 1872 { 1873 KASSERT(!(!!kqislocked && !knlislocked), ("kq locked w/o knl locked")); 1874 KNL_ASSERT_LOCK(knl, knlislocked); 1875 mtx_assert(&kn->kn_kq->kq_lock, kqislocked ? MA_OWNED : MA_NOTOWNED); 1876 if (!kqislocked) 1877 KASSERT((kn->kn_status & (KN_INFLUX|KN_DETACHED)) == KN_INFLUX, 1878 ("knlist_remove called w/o knote being KN_INFLUX or already removed")); 1879 if (!knlislocked) 1880 knl->kl_lock(knl->kl_lockarg); 1881 SLIST_REMOVE(&knl->kl_list, kn, knote, kn_selnext); 1882 kn->kn_knlist = NULL; 1883 if (!knlislocked) 1884 knl->kl_unlock(knl->kl_lockarg); 1885 if (!kqislocked) 1886 KQ_LOCK(kn->kn_kq); 1887 kn->kn_status |= KN_DETACHED; 1888 if (!kqislocked) 1889 KQ_UNLOCK(kn->kn_kq); 1890 } 1891 1892 /* 1893 * remove knote from the specified knlist 1894 */ 1895 void 1896 knlist_remove(struct knlist *knl, struct knote *kn, int islocked) 1897 { 1898 1899 knlist_remove_kq(knl, kn, islocked, 0); 1900 } 1901 1902 /* 1903 * remove knote from the specified knlist while in f_event handler. 1904 */ 1905 void 1906 knlist_remove_inevent(struct knlist *knl, struct knote *kn) 1907 { 1908 1909 knlist_remove_kq(knl, kn, 1, 1910 (kn->kn_status & KN_HASKQLOCK) == KN_HASKQLOCK); 1911 } 1912 1913 int 1914 knlist_empty(struct knlist *knl) 1915 { 1916 1917 KNL_ASSERT_LOCKED(knl); 1918 return SLIST_EMPTY(&knl->kl_list); 1919 } 1920 1921 static struct mtx knlist_lock; 1922 MTX_SYSINIT(knlist_lock, &knlist_lock, "knlist lock for lockless objects", 1923 MTX_DEF); 1924 static void knlist_mtx_lock(void *arg); 1925 static void knlist_mtx_unlock(void *arg); 1926 1927 static void 1928 knlist_mtx_lock(void *arg) 1929 { 1930 1931 mtx_lock((struct mtx *)arg); 1932 } 1933 1934 static void 1935 knlist_mtx_unlock(void *arg) 1936 { 1937 1938 mtx_unlock((struct mtx *)arg); 1939 } 1940 1941 static void 1942 knlist_mtx_assert_locked(void *arg) 1943 { 1944 1945 mtx_assert((struct mtx *)arg, MA_OWNED); 1946 } 1947 1948 static void 1949 knlist_mtx_assert_unlocked(void *arg) 1950 { 1951 1952 mtx_assert((struct mtx *)arg, MA_NOTOWNED); 1953 } 1954 1955 static void 1956 knlist_rw_rlock(void *arg) 1957 { 1958 1959 rw_rlock((struct rwlock *)arg); 1960 } 1961 1962 static void 1963 knlist_rw_runlock(void *arg) 1964 { 1965 1966 rw_runlock((struct rwlock *)arg); 1967 } 1968 1969 static void 1970 knlist_rw_assert_locked(void *arg) 1971 { 1972 1973 rw_assert((struct rwlock *)arg, RA_LOCKED); 1974 } 1975 1976 static void 1977 knlist_rw_assert_unlocked(void *arg) 1978 { 1979 1980 rw_assert((struct rwlock *)arg, RA_UNLOCKED); 1981 } 1982 1983 void 1984 knlist_init(struct knlist *knl, void *lock, void (*kl_lock)(void *), 1985 void (*kl_unlock)(void *), 1986 void (*kl_assert_locked)(void *), void (*kl_assert_unlocked)(void *)) 1987 { 1988 1989 if (lock == NULL) 1990 knl->kl_lockarg = &knlist_lock; 1991 else 1992 knl->kl_lockarg = lock; 1993 1994 if (kl_lock == NULL) 1995 knl->kl_lock = knlist_mtx_lock; 1996 else 1997 knl->kl_lock = kl_lock; 1998 if (kl_unlock == NULL) 1999 knl->kl_unlock = knlist_mtx_unlock; 2000 else 2001 knl->kl_unlock = kl_unlock; 2002 if (kl_assert_locked == NULL) 2003 knl->kl_assert_locked = knlist_mtx_assert_locked; 2004 else 2005 knl->kl_assert_locked = kl_assert_locked; 2006 if (kl_assert_unlocked == NULL) 2007 knl->kl_assert_unlocked = knlist_mtx_assert_unlocked; 2008 else 2009 knl->kl_assert_unlocked = kl_assert_unlocked; 2010 2011 SLIST_INIT(&knl->kl_list); 2012 } 2013 2014 void 2015 knlist_init_mtx(struct knlist *knl, struct mtx *lock) 2016 { 2017 2018 knlist_init(knl, lock, NULL, NULL, NULL, NULL); 2019 } 2020 2021 void 2022 knlist_init_rw_reader(struct knlist *knl, struct rwlock *lock) 2023 { 2024 2025 knlist_init(knl, lock, knlist_rw_rlock, knlist_rw_runlock, 2026 knlist_rw_assert_locked, knlist_rw_assert_unlocked); 2027 } 2028 2029 void 2030 knlist_destroy(struct knlist *knl) 2031 { 2032 2033 #ifdef INVARIANTS 2034 /* 2035 * if we run across this error, we need to find the offending 2036 * driver and have it call knlist_clear or knlist_delete. 2037 */ 2038 if (!SLIST_EMPTY(&knl->kl_list)) 2039 printf("WARNING: destroying knlist w/ knotes on it!\n"); 2040 #endif 2041 2042 knl->kl_lockarg = knl->kl_lock = knl->kl_unlock = NULL; 2043 SLIST_INIT(&knl->kl_list); 2044 } 2045 2046 /* 2047 * Even if we are locked, we may need to drop the lock to allow any influx 2048 * knotes time to "settle". 2049 */ 2050 void 2051 knlist_cleardel(struct knlist *knl, struct thread *td, int islocked, int killkn) 2052 { 2053 struct knote *kn, *kn2; 2054 struct kqueue *kq; 2055 2056 if (islocked) 2057 KNL_ASSERT_LOCKED(knl); 2058 else { 2059 KNL_ASSERT_UNLOCKED(knl); 2060 again: /* need to reacquire lock since we have dropped it */ 2061 knl->kl_lock(knl->kl_lockarg); 2062 } 2063 2064 SLIST_FOREACH_SAFE(kn, &knl->kl_list, kn_selnext, kn2) { 2065 kq = kn->kn_kq; 2066 KQ_LOCK(kq); 2067 if ((kn->kn_status & KN_INFLUX)) { 2068 KQ_UNLOCK(kq); 2069 continue; 2070 } 2071 knlist_remove_kq(knl, kn, 1, 1); 2072 if (killkn) { 2073 kn->kn_status |= KN_INFLUX | KN_DETACHED; 2074 KQ_UNLOCK(kq); 2075 knote_drop(kn, td); 2076 } else { 2077 /* Make sure cleared knotes disappear soon */ 2078 kn->kn_flags |= (EV_EOF | EV_ONESHOT); 2079 KQ_UNLOCK(kq); 2080 } 2081 kq = NULL; 2082 } 2083 2084 if (!SLIST_EMPTY(&knl->kl_list)) { 2085 /* there are still KN_INFLUX remaining */ 2086 kn = SLIST_FIRST(&knl->kl_list); 2087 kq = kn->kn_kq; 2088 KQ_LOCK(kq); 2089 KASSERT(kn->kn_status & KN_INFLUX, 2090 ("knote removed w/o list lock")); 2091 knl->kl_unlock(knl->kl_lockarg); 2092 kq->kq_state |= KQ_FLUXWAIT; 2093 msleep(kq, &kq->kq_lock, PSOCK | PDROP, "kqkclr", 0); 2094 kq = NULL; 2095 goto again; 2096 } 2097 2098 if (islocked) 2099 KNL_ASSERT_LOCKED(knl); 2100 else { 2101 knl->kl_unlock(knl->kl_lockarg); 2102 KNL_ASSERT_UNLOCKED(knl); 2103 } 2104 } 2105 2106 /* 2107 * Remove all knotes referencing a specified fd must be called with FILEDESC 2108 * lock. This prevents a race where a new fd comes along and occupies the 2109 * entry and we attach a knote to the fd. 2110 */ 2111 void 2112 knote_fdclose(struct thread *td, int fd) 2113 { 2114 struct filedesc *fdp = td->td_proc->p_fd; 2115 struct kqueue *kq; 2116 struct knote *kn; 2117 int influx; 2118 2119 FILEDESC_XLOCK_ASSERT(fdp); 2120 2121 /* 2122 * We shouldn't have to worry about new kevents appearing on fd 2123 * since filedesc is locked. 2124 */ 2125 TAILQ_FOREACH(kq, &fdp->fd_kqlist, kq_list) { 2126 KQ_LOCK(kq); 2127 2128 again: 2129 influx = 0; 2130 while (kq->kq_knlistsize > fd && 2131 (kn = SLIST_FIRST(&kq->kq_knlist[fd])) != NULL) { 2132 if (kn->kn_status & KN_INFLUX) { 2133 /* someone else might be waiting on our knote */ 2134 if (influx) 2135 wakeup(kq); 2136 kq->kq_state |= KQ_FLUXWAIT; 2137 msleep(kq, &kq->kq_lock, PSOCK, "kqflxwt", 0); 2138 goto again; 2139 } 2140 kn->kn_status |= KN_INFLUX; 2141 KQ_UNLOCK(kq); 2142 if (!(kn->kn_status & KN_DETACHED)) 2143 kn->kn_fop->f_detach(kn); 2144 knote_drop(kn, td); 2145 influx = 1; 2146 KQ_LOCK(kq); 2147 } 2148 KQ_UNLOCK_FLUX(kq); 2149 } 2150 } 2151 2152 static int 2153 knote_attach(struct knote *kn, struct kqueue *kq) 2154 { 2155 struct klist *list; 2156 2157 KASSERT(kn->kn_status & KN_INFLUX, ("knote not marked INFLUX")); 2158 KQ_OWNED(kq); 2159 2160 if (kn->kn_fop->f_isfd) { 2161 if (kn->kn_id >= kq->kq_knlistsize) 2162 return ENOMEM; 2163 list = &kq->kq_knlist[kn->kn_id]; 2164 } else { 2165 if (kq->kq_knhash == NULL) 2166 return ENOMEM; 2167 list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)]; 2168 } 2169 2170 SLIST_INSERT_HEAD(list, kn, kn_link); 2171 2172 return 0; 2173 } 2174 2175 /* 2176 * knote must already have been detached using the f_detach method. 2177 * no lock need to be held, it is assumed that the KN_INFLUX flag is set 2178 * to prevent other removal. 2179 */ 2180 static void 2181 knote_drop(struct knote *kn, struct thread *td) 2182 { 2183 struct kqueue *kq; 2184 struct klist *list; 2185 2186 kq = kn->kn_kq; 2187 2188 KQ_NOTOWNED(kq); 2189 KASSERT((kn->kn_status & KN_INFLUX) == KN_INFLUX, 2190 ("knote_drop called without KN_INFLUX set in kn_status")); 2191 2192 KQ_LOCK(kq); 2193 if (kn->kn_fop->f_isfd) 2194 list = &kq->kq_knlist[kn->kn_id]; 2195 else 2196 list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)]; 2197 2198 if (!SLIST_EMPTY(list)) 2199 SLIST_REMOVE(list, kn, knote, kn_link); 2200 if (kn->kn_status & KN_QUEUED) 2201 knote_dequeue(kn); 2202 KQ_UNLOCK_FLUX(kq); 2203 2204 if (kn->kn_fop->f_isfd) { 2205 fdrop(kn->kn_fp, td); 2206 kn->kn_fp = NULL; 2207 } 2208 kqueue_fo_release(kn->kn_kevent.filter); 2209 kn->kn_fop = NULL; 2210 knote_free(kn); 2211 } 2212 2213 static void 2214 knote_enqueue(struct knote *kn) 2215 { 2216 struct kqueue *kq = kn->kn_kq; 2217 2218 KQ_OWNED(kn->kn_kq); 2219 KASSERT((kn->kn_status & KN_QUEUED) == 0, ("knote already queued")); 2220 2221 TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe); 2222 kn->kn_status |= KN_QUEUED; 2223 kq->kq_count++; 2224 kqueue_wakeup(kq); 2225 } 2226 2227 static void 2228 knote_dequeue(struct knote *kn) 2229 { 2230 struct kqueue *kq = kn->kn_kq; 2231 2232 KQ_OWNED(kn->kn_kq); 2233 KASSERT(kn->kn_status & KN_QUEUED, ("knote not queued")); 2234 2235 TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe); 2236 kn->kn_status &= ~KN_QUEUED; 2237 kq->kq_count--; 2238 } 2239 2240 static void 2241 knote_init(void) 2242 { 2243 2244 knote_zone = uma_zcreate("KNOTE", sizeof(struct knote), NULL, NULL, 2245 NULL, NULL, UMA_ALIGN_PTR, 0); 2246 } 2247 SYSINIT(knote, SI_SUB_PSEUDO, SI_ORDER_ANY, knote_init, NULL); 2248 2249 static struct knote * 2250 knote_alloc(int waitok) 2251 { 2252 return ((struct knote *)uma_zalloc(knote_zone, 2253 (waitok ? M_WAITOK : M_NOWAIT)|M_ZERO)); 2254 } 2255 2256 static void 2257 knote_free(struct knote *kn) 2258 { 2259 if (kn != NULL) 2260 uma_zfree(knote_zone, kn); 2261 } 2262 2263 /* 2264 * Register the kev w/ the kq specified by fd. 2265 */ 2266 int 2267 kqfd_register(int fd, struct kevent *kev, struct thread *td, int waitok) 2268 { 2269 struct kqueue *kq; 2270 struct file *fp; 2271 cap_rights_t rights; 2272 int error; 2273 2274 error = fget(td, fd, cap_rights_init(&rights, CAP_POST_EVENT), &fp); 2275 if (error != 0) 2276 return (error); 2277 if ((error = kqueue_acquire(fp, &kq)) != 0) 2278 goto noacquire; 2279 2280 error = kqueue_register(kq, kev, td, waitok); 2281 2282 kqueue_release(kq, 0); 2283 2284 noacquire: 2285 fdrop(fp, td); 2286 2287 return error; 2288 } 2289