1 /*- 2 * Copyright (c) 1999,2000,2001 Jonathan Lemon <jlemon@FreeBSD.org> 3 * Copyright 2004 John-Mark Gurney <jmg@FreeBSD.org> 4 * Copyright (c) 2009 Apple, Inc. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include "opt_ktrace.h" 33 34 #include <sys/param.h> 35 #include <sys/systm.h> 36 #include <sys/capability.h> 37 #include <sys/kernel.h> 38 #include <sys/lock.h> 39 #include <sys/mutex.h> 40 #include <sys/rwlock.h> 41 #include <sys/proc.h> 42 #include <sys/malloc.h> 43 #include <sys/unistd.h> 44 #include <sys/file.h> 45 #include <sys/filedesc.h> 46 #include <sys/filio.h> 47 #include <sys/fcntl.h> 48 #include <sys/kthread.h> 49 #include <sys/selinfo.h> 50 #include <sys/stdatomic.h> 51 #include <sys/queue.h> 52 #include <sys/event.h> 53 #include <sys/eventvar.h> 54 #include <sys/poll.h> 55 #include <sys/protosw.h> 56 #include <sys/sigio.h> 57 #include <sys/signalvar.h> 58 #include <sys/socket.h> 59 #include <sys/socketvar.h> 60 #include <sys/stat.h> 61 #include <sys/sysctl.h> 62 #include <sys/sysproto.h> 63 #include <sys/syscallsubr.h> 64 #include <sys/taskqueue.h> 65 #include <sys/uio.h> 66 #ifdef KTRACE 67 #include <sys/ktrace.h> 68 #endif 69 70 #include <vm/uma.h> 71 72 static MALLOC_DEFINE(M_KQUEUE, "kqueue", "memory for kqueue system"); 73 74 /* 75 * This lock is used if multiple kq locks are required. This possibly 76 * should be made into a per proc lock. 77 */ 78 static struct mtx kq_global; 79 MTX_SYSINIT(kq_global, &kq_global, "kqueue order", MTX_DEF); 80 #define KQ_GLOBAL_LOCK(lck, haslck) do { \ 81 if (!haslck) \ 82 mtx_lock(lck); \ 83 haslck = 1; \ 84 } while (0) 85 #define KQ_GLOBAL_UNLOCK(lck, haslck) do { \ 86 if (haslck) \ 87 mtx_unlock(lck); \ 88 haslck = 0; \ 89 } while (0) 90 91 TASKQUEUE_DEFINE_THREAD(kqueue); 92 93 static int kevent_copyout(void *arg, struct kevent *kevp, int count); 94 static int kevent_copyin(void *arg, struct kevent *kevp, int count); 95 static int kqueue_register(struct kqueue *kq, struct kevent *kev, 96 struct thread *td, int waitok); 97 static int kqueue_acquire(struct file *fp, struct kqueue **kqp); 98 static void kqueue_release(struct kqueue *kq, int locked); 99 static int kqueue_expand(struct kqueue *kq, struct filterops *fops, 100 uintptr_t ident, int waitok); 101 static void kqueue_task(void *arg, int pending); 102 static int kqueue_scan(struct kqueue *kq, int maxevents, 103 struct kevent_copyops *k_ops, 104 const struct timespec *timeout, 105 struct kevent *keva, struct thread *td); 106 static void kqueue_wakeup(struct kqueue *kq); 107 static struct filterops *kqueue_fo_find(int filt); 108 static void kqueue_fo_release(int filt); 109 110 static fo_rdwr_t kqueue_read; 111 static fo_rdwr_t kqueue_write; 112 static fo_truncate_t kqueue_truncate; 113 static fo_ioctl_t kqueue_ioctl; 114 static fo_poll_t kqueue_poll; 115 static fo_kqfilter_t kqueue_kqfilter; 116 static fo_stat_t kqueue_stat; 117 static fo_close_t kqueue_close; 118 119 static struct fileops kqueueops = { 120 .fo_read = kqueue_read, 121 .fo_write = kqueue_write, 122 .fo_truncate = kqueue_truncate, 123 .fo_ioctl = kqueue_ioctl, 124 .fo_poll = kqueue_poll, 125 .fo_kqfilter = kqueue_kqfilter, 126 .fo_stat = kqueue_stat, 127 .fo_close = kqueue_close, 128 .fo_chmod = invfo_chmod, 129 .fo_chown = invfo_chown, 130 }; 131 132 static int knote_attach(struct knote *kn, struct kqueue *kq); 133 static void knote_drop(struct knote *kn, struct thread *td); 134 static void knote_enqueue(struct knote *kn); 135 static void knote_dequeue(struct knote *kn); 136 static void knote_init(void); 137 static struct knote *knote_alloc(int waitok); 138 static void knote_free(struct knote *kn); 139 140 static void filt_kqdetach(struct knote *kn); 141 static int filt_kqueue(struct knote *kn, long hint); 142 static int filt_procattach(struct knote *kn); 143 static void filt_procdetach(struct knote *kn); 144 static int filt_proc(struct knote *kn, long hint); 145 static int filt_fileattach(struct knote *kn); 146 static void filt_timerexpire(void *knx); 147 static int filt_timerattach(struct knote *kn); 148 static void filt_timerdetach(struct knote *kn); 149 static int filt_timer(struct knote *kn, long hint); 150 static int filt_userattach(struct knote *kn); 151 static void filt_userdetach(struct knote *kn); 152 static int filt_user(struct knote *kn, long hint); 153 static void filt_usertouch(struct knote *kn, struct kevent *kev, 154 u_long type); 155 156 static struct filterops file_filtops = { 157 .f_isfd = 1, 158 .f_attach = filt_fileattach, 159 }; 160 static struct filterops kqread_filtops = { 161 .f_isfd = 1, 162 .f_detach = filt_kqdetach, 163 .f_event = filt_kqueue, 164 }; 165 /* XXX - move to kern_proc.c? */ 166 static struct filterops proc_filtops = { 167 .f_isfd = 0, 168 .f_attach = filt_procattach, 169 .f_detach = filt_procdetach, 170 .f_event = filt_proc, 171 }; 172 static struct filterops timer_filtops = { 173 .f_isfd = 0, 174 .f_attach = filt_timerattach, 175 .f_detach = filt_timerdetach, 176 .f_event = filt_timer, 177 }; 178 static struct filterops user_filtops = { 179 .f_attach = filt_userattach, 180 .f_detach = filt_userdetach, 181 .f_event = filt_user, 182 .f_touch = filt_usertouch, 183 }; 184 185 static uma_zone_t knote_zone; 186 static atomic_uint kq_ncallouts = ATOMIC_VAR_INIT(0); 187 static unsigned int kq_calloutmax = 4 * 1024; 188 SYSCTL_UINT(_kern, OID_AUTO, kq_calloutmax, CTLFLAG_RW, 189 &kq_calloutmax, 0, "Maximum number of callouts allocated for kqueue"); 190 191 /* XXX - ensure not KN_INFLUX?? */ 192 #define KNOTE_ACTIVATE(kn, islock) do { \ 193 if ((islock)) \ 194 mtx_assert(&(kn)->kn_kq->kq_lock, MA_OWNED); \ 195 else \ 196 KQ_LOCK((kn)->kn_kq); \ 197 (kn)->kn_status |= KN_ACTIVE; \ 198 if (((kn)->kn_status & (KN_QUEUED | KN_DISABLED)) == 0) \ 199 knote_enqueue((kn)); \ 200 if (!(islock)) \ 201 KQ_UNLOCK((kn)->kn_kq); \ 202 } while(0) 203 #define KQ_LOCK(kq) do { \ 204 mtx_lock(&(kq)->kq_lock); \ 205 } while (0) 206 #define KQ_FLUX_WAKEUP(kq) do { \ 207 if (((kq)->kq_state & KQ_FLUXWAIT) == KQ_FLUXWAIT) { \ 208 (kq)->kq_state &= ~KQ_FLUXWAIT; \ 209 wakeup((kq)); \ 210 } \ 211 } while (0) 212 #define KQ_UNLOCK_FLUX(kq) do { \ 213 KQ_FLUX_WAKEUP(kq); \ 214 mtx_unlock(&(kq)->kq_lock); \ 215 } while (0) 216 #define KQ_UNLOCK(kq) do { \ 217 mtx_unlock(&(kq)->kq_lock); \ 218 } while (0) 219 #define KQ_OWNED(kq) do { \ 220 mtx_assert(&(kq)->kq_lock, MA_OWNED); \ 221 } while (0) 222 #define KQ_NOTOWNED(kq) do { \ 223 mtx_assert(&(kq)->kq_lock, MA_NOTOWNED); \ 224 } while (0) 225 #define KN_LIST_LOCK(kn) do { \ 226 if (kn->kn_knlist != NULL) \ 227 kn->kn_knlist->kl_lock(kn->kn_knlist->kl_lockarg); \ 228 } while (0) 229 #define KN_LIST_UNLOCK(kn) do { \ 230 if (kn->kn_knlist != NULL) \ 231 kn->kn_knlist->kl_unlock(kn->kn_knlist->kl_lockarg); \ 232 } while (0) 233 #define KNL_ASSERT_LOCK(knl, islocked) do { \ 234 if (islocked) \ 235 KNL_ASSERT_LOCKED(knl); \ 236 else \ 237 KNL_ASSERT_UNLOCKED(knl); \ 238 } while (0) 239 #ifdef INVARIANTS 240 #define KNL_ASSERT_LOCKED(knl) do { \ 241 knl->kl_assert_locked((knl)->kl_lockarg); \ 242 } while (0) 243 #define KNL_ASSERT_UNLOCKED(knl) do { \ 244 knl->kl_assert_unlocked((knl)->kl_lockarg); \ 245 } while (0) 246 #else /* !INVARIANTS */ 247 #define KNL_ASSERT_LOCKED(knl) do {} while(0) 248 #define KNL_ASSERT_UNLOCKED(knl) do {} while (0) 249 #endif /* INVARIANTS */ 250 251 #define KN_HASHSIZE 64 /* XXX should be tunable */ 252 #define KN_HASH(val, mask) (((val) ^ (val >> 8)) & (mask)) 253 254 static int 255 filt_nullattach(struct knote *kn) 256 { 257 258 return (ENXIO); 259 }; 260 261 struct filterops null_filtops = { 262 .f_isfd = 0, 263 .f_attach = filt_nullattach, 264 }; 265 266 /* XXX - make SYSINIT to add these, and move into respective modules. */ 267 extern struct filterops sig_filtops; 268 extern struct filterops fs_filtops; 269 270 /* 271 * Table for for all system-defined filters. 272 */ 273 static struct mtx filterops_lock; 274 MTX_SYSINIT(kqueue_filterops, &filterops_lock, "protect sysfilt_ops", 275 MTX_DEF); 276 static struct { 277 struct filterops *for_fop; 278 int for_refcnt; 279 } sysfilt_ops[EVFILT_SYSCOUNT] = { 280 { &file_filtops }, /* EVFILT_READ */ 281 { &file_filtops }, /* EVFILT_WRITE */ 282 { &null_filtops }, /* EVFILT_AIO */ 283 { &file_filtops }, /* EVFILT_VNODE */ 284 { &proc_filtops }, /* EVFILT_PROC */ 285 { &sig_filtops }, /* EVFILT_SIGNAL */ 286 { &timer_filtops }, /* EVFILT_TIMER */ 287 { &null_filtops }, /* former EVFILT_NETDEV */ 288 { &fs_filtops }, /* EVFILT_FS */ 289 { &null_filtops }, /* EVFILT_LIO */ 290 { &user_filtops }, /* EVFILT_USER */ 291 }; 292 293 /* 294 * Simple redirection for all cdevsw style objects to call their fo_kqfilter 295 * method. 296 */ 297 static int 298 filt_fileattach(struct knote *kn) 299 { 300 301 return (fo_kqfilter(kn->kn_fp, kn)); 302 } 303 304 /*ARGSUSED*/ 305 static int 306 kqueue_kqfilter(struct file *fp, struct knote *kn) 307 { 308 struct kqueue *kq = kn->kn_fp->f_data; 309 310 if (kn->kn_filter != EVFILT_READ) 311 return (EINVAL); 312 313 kn->kn_status |= KN_KQUEUE; 314 kn->kn_fop = &kqread_filtops; 315 knlist_add(&kq->kq_sel.si_note, kn, 0); 316 317 return (0); 318 } 319 320 static void 321 filt_kqdetach(struct knote *kn) 322 { 323 struct kqueue *kq = kn->kn_fp->f_data; 324 325 knlist_remove(&kq->kq_sel.si_note, kn, 0); 326 } 327 328 /*ARGSUSED*/ 329 static int 330 filt_kqueue(struct knote *kn, long hint) 331 { 332 struct kqueue *kq = kn->kn_fp->f_data; 333 334 kn->kn_data = kq->kq_count; 335 return (kn->kn_data > 0); 336 } 337 338 /* XXX - move to kern_proc.c? */ 339 static int 340 filt_procattach(struct knote *kn) 341 { 342 struct proc *p; 343 int immediate; 344 int error; 345 346 immediate = 0; 347 p = pfind(kn->kn_id); 348 if (p == NULL && (kn->kn_sfflags & NOTE_EXIT)) { 349 p = zpfind(kn->kn_id); 350 immediate = 1; 351 } else if (p != NULL && (p->p_flag & P_WEXIT)) { 352 immediate = 1; 353 } 354 355 if (p == NULL) 356 return (ESRCH); 357 if ((error = p_cansee(curthread, p))) { 358 PROC_UNLOCK(p); 359 return (error); 360 } 361 362 kn->kn_ptr.p_proc = p; 363 kn->kn_flags |= EV_CLEAR; /* automatically set */ 364 365 /* 366 * internal flag indicating registration done by kernel 367 */ 368 if (kn->kn_flags & EV_FLAG1) { 369 kn->kn_data = kn->kn_sdata; /* ppid */ 370 kn->kn_fflags = NOTE_CHILD; 371 kn->kn_flags &= ~EV_FLAG1; 372 } 373 374 if (immediate == 0) 375 knlist_add(&p->p_klist, kn, 1); 376 377 /* 378 * Immediately activate any exit notes if the target process is a 379 * zombie. This is necessary to handle the case where the target 380 * process, e.g. a child, dies before the kevent is registered. 381 */ 382 if (immediate && filt_proc(kn, NOTE_EXIT)) 383 KNOTE_ACTIVATE(kn, 0); 384 385 PROC_UNLOCK(p); 386 387 return (0); 388 } 389 390 /* 391 * The knote may be attached to a different process, which may exit, 392 * leaving nothing for the knote to be attached to. So when the process 393 * exits, the knote is marked as DETACHED and also flagged as ONESHOT so 394 * it will be deleted when read out. However, as part of the knote deletion, 395 * this routine is called, so a check is needed to avoid actually performing 396 * a detach, because the original process does not exist any more. 397 */ 398 /* XXX - move to kern_proc.c? */ 399 static void 400 filt_procdetach(struct knote *kn) 401 { 402 struct proc *p; 403 404 p = kn->kn_ptr.p_proc; 405 knlist_remove(&p->p_klist, kn, 0); 406 kn->kn_ptr.p_proc = NULL; 407 } 408 409 /* XXX - move to kern_proc.c? */ 410 static int 411 filt_proc(struct knote *kn, long hint) 412 { 413 struct proc *p = kn->kn_ptr.p_proc; 414 u_int event; 415 416 /* 417 * mask off extra data 418 */ 419 event = (u_int)hint & NOTE_PCTRLMASK; 420 421 /* 422 * if the user is interested in this event, record it. 423 */ 424 if (kn->kn_sfflags & event) 425 kn->kn_fflags |= event; 426 427 /* 428 * process is gone, so flag the event as finished. 429 */ 430 if (event == NOTE_EXIT) { 431 if (!(kn->kn_status & KN_DETACHED)) 432 knlist_remove_inevent(&p->p_klist, kn); 433 kn->kn_flags |= (EV_EOF | EV_ONESHOT); 434 kn->kn_data = p->p_xstat; 435 kn->kn_ptr.p_proc = NULL; 436 return (1); 437 } 438 439 return (kn->kn_fflags != 0); 440 } 441 442 /* 443 * Called when the process forked. It mostly does the same as the 444 * knote(), activating all knotes registered to be activated when the 445 * process forked. Additionally, for each knote attached to the 446 * parent, check whether user wants to track the new process. If so 447 * attach a new knote to it, and immediately report an event with the 448 * child's pid. 449 */ 450 void 451 knote_fork(struct knlist *list, int pid) 452 { 453 struct kqueue *kq; 454 struct knote *kn; 455 struct kevent kev; 456 int error; 457 458 if (list == NULL) 459 return; 460 list->kl_lock(list->kl_lockarg); 461 462 SLIST_FOREACH(kn, &list->kl_list, kn_selnext) { 463 if ((kn->kn_status & KN_INFLUX) == KN_INFLUX) 464 continue; 465 kq = kn->kn_kq; 466 KQ_LOCK(kq); 467 if ((kn->kn_status & KN_INFLUX) == KN_INFLUX) { 468 KQ_UNLOCK(kq); 469 continue; 470 } 471 472 /* 473 * The same as knote(), activate the event. 474 */ 475 if ((kn->kn_sfflags & NOTE_TRACK) == 0) { 476 kn->kn_status |= KN_HASKQLOCK; 477 if (kn->kn_fop->f_event(kn, NOTE_FORK | pid)) 478 KNOTE_ACTIVATE(kn, 1); 479 kn->kn_status &= ~KN_HASKQLOCK; 480 KQ_UNLOCK(kq); 481 continue; 482 } 483 484 /* 485 * The NOTE_TRACK case. In addition to the activation 486 * of the event, we need to register new event to 487 * track the child. Drop the locks in preparation for 488 * the call to kqueue_register(). 489 */ 490 kn->kn_status |= KN_INFLUX; 491 KQ_UNLOCK(kq); 492 list->kl_unlock(list->kl_lockarg); 493 494 /* 495 * Activate existing knote and register a knote with 496 * new process. 497 */ 498 kev.ident = pid; 499 kev.filter = kn->kn_filter; 500 kev.flags = kn->kn_flags | EV_ADD | EV_ENABLE | EV_FLAG1; 501 kev.fflags = kn->kn_sfflags; 502 kev.data = kn->kn_id; /* parent */ 503 kev.udata = kn->kn_kevent.udata;/* preserve udata */ 504 error = kqueue_register(kq, &kev, NULL, 0); 505 if (kn->kn_fop->f_event(kn, NOTE_FORK | pid)) 506 KNOTE_ACTIVATE(kn, 0); 507 if (error) 508 kn->kn_fflags |= NOTE_TRACKERR; 509 KQ_LOCK(kq); 510 kn->kn_status &= ~KN_INFLUX; 511 KQ_UNLOCK_FLUX(kq); 512 list->kl_lock(list->kl_lockarg); 513 } 514 list->kl_unlock(list->kl_lockarg); 515 } 516 517 /* 518 * XXX: EVFILT_TIMER should perhaps live in kern_time.c beside the 519 * interval timer support code. 520 */ 521 static __inline sbintime_t 522 timer2sbintime(intptr_t data) 523 { 524 525 return (SBT_1MS * data); 526 } 527 528 static void 529 filt_timerexpire(void *knx) 530 { 531 struct callout *calloutp; 532 struct knote *kn; 533 534 kn = knx; 535 kn->kn_data++; 536 KNOTE_ACTIVATE(kn, 0); /* XXX - handle locking */ 537 538 if ((kn->kn_flags & EV_ONESHOT) != EV_ONESHOT) { 539 calloutp = (struct callout *)kn->kn_hook; 540 callout_reset_sbt_on(calloutp, 541 timer2sbintime(kn->kn_sdata), 0 /* 1ms? */, 542 filt_timerexpire, kn, PCPU_GET(cpuid), 0); 543 } 544 } 545 546 /* 547 * data contains amount of time to sleep, in milliseconds 548 */ 549 static int 550 filt_timerattach(struct knote *kn) 551 { 552 struct callout *calloutp; 553 unsigned int ncallouts; 554 555 ncallouts = atomic_load_explicit(&kq_ncallouts, memory_order_relaxed); 556 do { 557 if (ncallouts >= kq_calloutmax) 558 return (ENOMEM); 559 } while (!atomic_compare_exchange_weak_explicit(&kq_ncallouts, 560 &ncallouts, ncallouts + 1, memory_order_relaxed, 561 memory_order_relaxed)); 562 563 kn->kn_flags |= EV_CLEAR; /* automatically set */ 564 kn->kn_status &= ~KN_DETACHED; /* knlist_add usually sets it */ 565 calloutp = malloc(sizeof(*calloutp), M_KQUEUE, M_WAITOK); 566 callout_init(calloutp, CALLOUT_MPSAFE); 567 kn->kn_hook = calloutp; 568 callout_reset_sbt_on(calloutp, 569 timer2sbintime(kn->kn_sdata), 0 /* 1ms? */, 570 filt_timerexpire, kn, PCPU_GET(cpuid), 0); 571 572 return (0); 573 } 574 575 static void 576 filt_timerdetach(struct knote *kn) 577 { 578 struct callout *calloutp; 579 unsigned int old; 580 581 calloutp = (struct callout *)kn->kn_hook; 582 callout_drain(calloutp); 583 free(calloutp, M_KQUEUE); 584 old = atomic_fetch_sub_explicit(&kq_ncallouts, 1, memory_order_relaxed); 585 KASSERT(old > 0, ("Number of callouts cannot become negative")); 586 kn->kn_status |= KN_DETACHED; /* knlist_remove usually clears it */ 587 } 588 589 static int 590 filt_timer(struct knote *kn, long hint) 591 { 592 593 return (kn->kn_data != 0); 594 } 595 596 static int 597 filt_userattach(struct knote *kn) 598 { 599 600 /* 601 * EVFILT_USER knotes are not attached to anything in the kernel. 602 */ 603 kn->kn_hook = NULL; 604 if (kn->kn_fflags & NOTE_TRIGGER) 605 kn->kn_hookid = 1; 606 else 607 kn->kn_hookid = 0; 608 return (0); 609 } 610 611 static void 612 filt_userdetach(__unused struct knote *kn) 613 { 614 615 /* 616 * EVFILT_USER knotes are not attached to anything in the kernel. 617 */ 618 } 619 620 static int 621 filt_user(struct knote *kn, __unused long hint) 622 { 623 624 return (kn->kn_hookid); 625 } 626 627 static void 628 filt_usertouch(struct knote *kn, struct kevent *kev, u_long type) 629 { 630 u_int ffctrl; 631 632 switch (type) { 633 case EVENT_REGISTER: 634 if (kev->fflags & NOTE_TRIGGER) 635 kn->kn_hookid = 1; 636 637 ffctrl = kev->fflags & NOTE_FFCTRLMASK; 638 kev->fflags &= NOTE_FFLAGSMASK; 639 switch (ffctrl) { 640 case NOTE_FFNOP: 641 break; 642 643 case NOTE_FFAND: 644 kn->kn_sfflags &= kev->fflags; 645 break; 646 647 case NOTE_FFOR: 648 kn->kn_sfflags |= kev->fflags; 649 break; 650 651 case NOTE_FFCOPY: 652 kn->kn_sfflags = kev->fflags; 653 break; 654 655 default: 656 /* XXX Return error? */ 657 break; 658 } 659 kn->kn_sdata = kev->data; 660 if (kev->flags & EV_CLEAR) { 661 kn->kn_hookid = 0; 662 kn->kn_data = 0; 663 kn->kn_fflags = 0; 664 } 665 break; 666 667 case EVENT_PROCESS: 668 *kev = kn->kn_kevent; 669 kev->fflags = kn->kn_sfflags; 670 kev->data = kn->kn_sdata; 671 if (kn->kn_flags & EV_CLEAR) { 672 kn->kn_hookid = 0; 673 kn->kn_data = 0; 674 kn->kn_fflags = 0; 675 } 676 break; 677 678 default: 679 panic("filt_usertouch() - invalid type (%ld)", type); 680 break; 681 } 682 } 683 684 int 685 sys_kqueue(struct thread *td, struct kqueue_args *uap) 686 { 687 struct filedesc *fdp; 688 struct kqueue *kq; 689 struct file *fp; 690 int fd, error; 691 692 fdp = td->td_proc->p_fd; 693 error = falloc(td, &fp, &fd, 0); 694 if (error) 695 goto done2; 696 697 /* An extra reference on `fp' has been held for us by falloc(). */ 698 kq = malloc(sizeof *kq, M_KQUEUE, M_WAITOK | M_ZERO); 699 mtx_init(&kq->kq_lock, "kqueue", NULL, MTX_DEF|MTX_DUPOK); 700 TAILQ_INIT(&kq->kq_head); 701 kq->kq_fdp = fdp; 702 knlist_init_mtx(&kq->kq_sel.si_note, &kq->kq_lock); 703 TASK_INIT(&kq->kq_task, 0, kqueue_task, kq); 704 705 FILEDESC_XLOCK(fdp); 706 SLIST_INSERT_HEAD(&fdp->fd_kqlist, kq, kq_list); 707 FILEDESC_XUNLOCK(fdp); 708 709 finit(fp, FREAD | FWRITE, DTYPE_KQUEUE, kq, &kqueueops); 710 fdrop(fp, td); 711 712 td->td_retval[0] = fd; 713 done2: 714 return (error); 715 } 716 717 #ifndef _SYS_SYSPROTO_H_ 718 struct kevent_args { 719 int fd; 720 const struct kevent *changelist; 721 int nchanges; 722 struct kevent *eventlist; 723 int nevents; 724 const struct timespec *timeout; 725 }; 726 #endif 727 int 728 sys_kevent(struct thread *td, struct kevent_args *uap) 729 { 730 struct timespec ts, *tsp; 731 struct kevent_copyops k_ops = { uap, 732 kevent_copyout, 733 kevent_copyin}; 734 int error; 735 #ifdef KTRACE 736 struct uio ktruio; 737 struct iovec ktriov; 738 struct uio *ktruioin = NULL; 739 struct uio *ktruioout = NULL; 740 #endif 741 742 if (uap->timeout != NULL) { 743 error = copyin(uap->timeout, &ts, sizeof(ts)); 744 if (error) 745 return (error); 746 tsp = &ts; 747 } else 748 tsp = NULL; 749 750 #ifdef KTRACE 751 if (KTRPOINT(td, KTR_GENIO)) { 752 ktriov.iov_base = uap->changelist; 753 ktriov.iov_len = uap->nchanges * sizeof(struct kevent); 754 ktruio = (struct uio){ .uio_iov = &ktriov, .uio_iovcnt = 1, 755 .uio_segflg = UIO_USERSPACE, .uio_rw = UIO_READ, 756 .uio_td = td }; 757 ktruioin = cloneuio(&ktruio); 758 ktriov.iov_base = uap->eventlist; 759 ktriov.iov_len = uap->nevents * sizeof(struct kevent); 760 ktruioout = cloneuio(&ktruio); 761 } 762 #endif 763 764 error = kern_kevent(td, uap->fd, uap->nchanges, uap->nevents, 765 &k_ops, tsp); 766 767 #ifdef KTRACE 768 if (ktruioin != NULL) { 769 ktruioin->uio_resid = uap->nchanges * sizeof(struct kevent); 770 ktrgenio(uap->fd, UIO_WRITE, ktruioin, 0); 771 ktruioout->uio_resid = td->td_retval[0] * sizeof(struct kevent); 772 ktrgenio(uap->fd, UIO_READ, ktruioout, error); 773 } 774 #endif 775 776 return (error); 777 } 778 779 /* 780 * Copy 'count' items into the destination list pointed to by uap->eventlist. 781 */ 782 static int 783 kevent_copyout(void *arg, struct kevent *kevp, int count) 784 { 785 struct kevent_args *uap; 786 int error; 787 788 KASSERT(count <= KQ_NEVENTS, ("count (%d) > KQ_NEVENTS", count)); 789 uap = (struct kevent_args *)arg; 790 791 error = copyout(kevp, uap->eventlist, count * sizeof *kevp); 792 if (error == 0) 793 uap->eventlist += count; 794 return (error); 795 } 796 797 /* 798 * Copy 'count' items from the list pointed to by uap->changelist. 799 */ 800 static int 801 kevent_copyin(void *arg, struct kevent *kevp, int count) 802 { 803 struct kevent_args *uap; 804 int error; 805 806 KASSERT(count <= KQ_NEVENTS, ("count (%d) > KQ_NEVENTS", count)); 807 uap = (struct kevent_args *)arg; 808 809 error = copyin(uap->changelist, kevp, count * sizeof *kevp); 810 if (error == 0) 811 uap->changelist += count; 812 return (error); 813 } 814 815 int 816 kern_kevent(struct thread *td, int fd, int nchanges, int nevents, 817 struct kevent_copyops *k_ops, const struct timespec *timeout) 818 { 819 struct kevent keva[KQ_NEVENTS]; 820 struct kevent *kevp, *changes; 821 struct kqueue *kq; 822 struct file *fp; 823 int i, n, nerrors, error; 824 825 if ((error = fget(td, fd, CAP_POST_EVENT, &fp)) != 0) 826 return (error); 827 if ((error = kqueue_acquire(fp, &kq)) != 0) 828 goto done_norel; 829 830 nerrors = 0; 831 832 while (nchanges > 0) { 833 n = nchanges > KQ_NEVENTS ? KQ_NEVENTS : nchanges; 834 error = k_ops->k_copyin(k_ops->arg, keva, n); 835 if (error) 836 goto done; 837 changes = keva; 838 for (i = 0; i < n; i++) { 839 kevp = &changes[i]; 840 if (!kevp->filter) 841 continue; 842 kevp->flags &= ~EV_SYSFLAGS; 843 error = kqueue_register(kq, kevp, td, 1); 844 if (error || (kevp->flags & EV_RECEIPT)) { 845 if (nevents != 0) { 846 kevp->flags = EV_ERROR; 847 kevp->data = error; 848 (void) k_ops->k_copyout(k_ops->arg, 849 kevp, 1); 850 nevents--; 851 nerrors++; 852 } else { 853 goto done; 854 } 855 } 856 } 857 nchanges -= n; 858 } 859 if (nerrors) { 860 td->td_retval[0] = nerrors; 861 error = 0; 862 goto done; 863 } 864 865 error = kqueue_scan(kq, nevents, k_ops, timeout, keva, td); 866 done: 867 kqueue_release(kq, 0); 868 done_norel: 869 fdrop(fp, td); 870 return (error); 871 } 872 873 int 874 kqueue_add_filteropts(int filt, struct filterops *filtops) 875 { 876 int error; 877 878 error = 0; 879 if (filt > 0 || filt + EVFILT_SYSCOUNT < 0) { 880 printf( 881 "trying to add a filterop that is out of range: %d is beyond %d\n", 882 ~filt, EVFILT_SYSCOUNT); 883 return EINVAL; 884 } 885 mtx_lock(&filterops_lock); 886 if (sysfilt_ops[~filt].for_fop != &null_filtops && 887 sysfilt_ops[~filt].for_fop != NULL) 888 error = EEXIST; 889 else { 890 sysfilt_ops[~filt].for_fop = filtops; 891 sysfilt_ops[~filt].for_refcnt = 0; 892 } 893 mtx_unlock(&filterops_lock); 894 895 return (error); 896 } 897 898 int 899 kqueue_del_filteropts(int filt) 900 { 901 int error; 902 903 error = 0; 904 if (filt > 0 || filt + EVFILT_SYSCOUNT < 0) 905 return EINVAL; 906 907 mtx_lock(&filterops_lock); 908 if (sysfilt_ops[~filt].for_fop == &null_filtops || 909 sysfilt_ops[~filt].for_fop == NULL) 910 error = EINVAL; 911 else if (sysfilt_ops[~filt].for_refcnt != 0) 912 error = EBUSY; 913 else { 914 sysfilt_ops[~filt].for_fop = &null_filtops; 915 sysfilt_ops[~filt].for_refcnt = 0; 916 } 917 mtx_unlock(&filterops_lock); 918 919 return error; 920 } 921 922 static struct filterops * 923 kqueue_fo_find(int filt) 924 { 925 926 if (filt > 0 || filt + EVFILT_SYSCOUNT < 0) 927 return NULL; 928 929 mtx_lock(&filterops_lock); 930 sysfilt_ops[~filt].for_refcnt++; 931 if (sysfilt_ops[~filt].for_fop == NULL) 932 sysfilt_ops[~filt].for_fop = &null_filtops; 933 mtx_unlock(&filterops_lock); 934 935 return sysfilt_ops[~filt].for_fop; 936 } 937 938 static void 939 kqueue_fo_release(int filt) 940 { 941 942 if (filt > 0 || filt + EVFILT_SYSCOUNT < 0) 943 return; 944 945 mtx_lock(&filterops_lock); 946 KASSERT(sysfilt_ops[~filt].for_refcnt > 0, 947 ("filter object refcount not valid on release")); 948 sysfilt_ops[~filt].for_refcnt--; 949 mtx_unlock(&filterops_lock); 950 } 951 952 /* 953 * A ref to kq (obtained via kqueue_acquire) must be held. waitok will 954 * influence if memory allocation should wait. Make sure it is 0 if you 955 * hold any mutexes. 956 */ 957 static int 958 kqueue_register(struct kqueue *kq, struct kevent *kev, struct thread *td, int waitok) 959 { 960 struct filterops *fops; 961 struct file *fp; 962 struct knote *kn, *tkn; 963 int error, filt, event; 964 int haskqglobal; 965 966 fp = NULL; 967 kn = NULL; 968 error = 0; 969 haskqglobal = 0; 970 971 filt = kev->filter; 972 fops = kqueue_fo_find(filt); 973 if (fops == NULL) 974 return EINVAL; 975 976 tkn = knote_alloc(waitok); /* prevent waiting with locks */ 977 978 findkn: 979 if (fops->f_isfd) { 980 KASSERT(td != NULL, ("td is NULL")); 981 error = fget(td, kev->ident, CAP_POLL_EVENT, &fp); 982 if (error) 983 goto done; 984 985 if ((kev->flags & EV_ADD) == EV_ADD && kqueue_expand(kq, fops, 986 kev->ident, 0) != 0) { 987 /* try again */ 988 fdrop(fp, td); 989 fp = NULL; 990 error = kqueue_expand(kq, fops, kev->ident, waitok); 991 if (error) 992 goto done; 993 goto findkn; 994 } 995 996 if (fp->f_type == DTYPE_KQUEUE) { 997 /* 998 * if we add some inteligence about what we are doing, 999 * we should be able to support events on ourselves. 1000 * We need to know when we are doing this to prevent 1001 * getting both the knlist lock and the kq lock since 1002 * they are the same thing. 1003 */ 1004 if (fp->f_data == kq) { 1005 error = EINVAL; 1006 goto done; 1007 } 1008 1009 KQ_GLOBAL_LOCK(&kq_global, haskqglobal); 1010 } 1011 1012 KQ_LOCK(kq); 1013 if (kev->ident < kq->kq_knlistsize) { 1014 SLIST_FOREACH(kn, &kq->kq_knlist[kev->ident], kn_link) 1015 if (kev->filter == kn->kn_filter) 1016 break; 1017 } 1018 } else { 1019 if ((kev->flags & EV_ADD) == EV_ADD) 1020 kqueue_expand(kq, fops, kev->ident, waitok); 1021 1022 KQ_LOCK(kq); 1023 if (kq->kq_knhashmask != 0) { 1024 struct klist *list; 1025 1026 list = &kq->kq_knhash[ 1027 KN_HASH((u_long)kev->ident, kq->kq_knhashmask)]; 1028 SLIST_FOREACH(kn, list, kn_link) 1029 if (kev->ident == kn->kn_id && 1030 kev->filter == kn->kn_filter) 1031 break; 1032 } 1033 } 1034 1035 /* knote is in the process of changing, wait for it to stablize. */ 1036 if (kn != NULL && (kn->kn_status & KN_INFLUX) == KN_INFLUX) { 1037 KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal); 1038 kq->kq_state |= KQ_FLUXWAIT; 1039 msleep(kq, &kq->kq_lock, PSOCK | PDROP, "kqflxwt", 0); 1040 if (fp != NULL) { 1041 fdrop(fp, td); 1042 fp = NULL; 1043 } 1044 goto findkn; 1045 } 1046 1047 /* 1048 * kn now contains the matching knote, or NULL if no match 1049 */ 1050 if (kn == NULL) { 1051 if (kev->flags & EV_ADD) { 1052 kn = tkn; 1053 tkn = NULL; 1054 if (kn == NULL) { 1055 KQ_UNLOCK(kq); 1056 error = ENOMEM; 1057 goto done; 1058 } 1059 kn->kn_fp = fp; 1060 kn->kn_kq = kq; 1061 kn->kn_fop = fops; 1062 /* 1063 * apply reference counts to knote structure, and 1064 * do not release it at the end of this routine. 1065 */ 1066 fops = NULL; 1067 fp = NULL; 1068 1069 kn->kn_sfflags = kev->fflags; 1070 kn->kn_sdata = kev->data; 1071 kev->fflags = 0; 1072 kev->data = 0; 1073 kn->kn_kevent = *kev; 1074 kn->kn_kevent.flags &= ~(EV_ADD | EV_DELETE | 1075 EV_ENABLE | EV_DISABLE); 1076 kn->kn_status = KN_INFLUX|KN_DETACHED; 1077 1078 error = knote_attach(kn, kq); 1079 KQ_UNLOCK(kq); 1080 if (error != 0) { 1081 tkn = kn; 1082 goto done; 1083 } 1084 1085 if ((error = kn->kn_fop->f_attach(kn)) != 0) { 1086 knote_drop(kn, td); 1087 goto done; 1088 } 1089 KN_LIST_LOCK(kn); 1090 goto done_ev_add; 1091 } else { 1092 /* No matching knote and the EV_ADD flag is not set. */ 1093 KQ_UNLOCK(kq); 1094 error = ENOENT; 1095 goto done; 1096 } 1097 } 1098 1099 if (kev->flags & EV_DELETE) { 1100 kn->kn_status |= KN_INFLUX; 1101 KQ_UNLOCK(kq); 1102 if (!(kn->kn_status & KN_DETACHED)) 1103 kn->kn_fop->f_detach(kn); 1104 knote_drop(kn, td); 1105 goto done; 1106 } 1107 1108 /* 1109 * The user may change some filter values after the initial EV_ADD, 1110 * but doing so will not reset any filter which has already been 1111 * triggered. 1112 */ 1113 kn->kn_status |= KN_INFLUX; 1114 KQ_UNLOCK(kq); 1115 KN_LIST_LOCK(kn); 1116 kn->kn_kevent.udata = kev->udata; 1117 if (!fops->f_isfd && fops->f_touch != NULL) { 1118 fops->f_touch(kn, kev, EVENT_REGISTER); 1119 } else { 1120 kn->kn_sfflags = kev->fflags; 1121 kn->kn_sdata = kev->data; 1122 } 1123 1124 /* 1125 * We can get here with kn->kn_knlist == NULL. This can happen when 1126 * the initial attach event decides that the event is "completed" 1127 * already. i.e. filt_procattach is called on a zombie process. It 1128 * will call filt_proc which will remove it from the list, and NULL 1129 * kn_knlist. 1130 */ 1131 done_ev_add: 1132 event = kn->kn_fop->f_event(kn, 0); 1133 KQ_LOCK(kq); 1134 if (event) 1135 KNOTE_ACTIVATE(kn, 1); 1136 kn->kn_status &= ~KN_INFLUX; 1137 KN_LIST_UNLOCK(kn); 1138 1139 if ((kev->flags & EV_DISABLE) && 1140 ((kn->kn_status & KN_DISABLED) == 0)) { 1141 kn->kn_status |= KN_DISABLED; 1142 } 1143 1144 if ((kev->flags & EV_ENABLE) && (kn->kn_status & KN_DISABLED)) { 1145 kn->kn_status &= ~KN_DISABLED; 1146 if ((kn->kn_status & KN_ACTIVE) && 1147 ((kn->kn_status & KN_QUEUED) == 0)) 1148 knote_enqueue(kn); 1149 } 1150 KQ_UNLOCK_FLUX(kq); 1151 1152 done: 1153 KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal); 1154 if (fp != NULL) 1155 fdrop(fp, td); 1156 if (tkn != NULL) 1157 knote_free(tkn); 1158 if (fops != NULL) 1159 kqueue_fo_release(filt); 1160 return (error); 1161 } 1162 1163 static int 1164 kqueue_acquire(struct file *fp, struct kqueue **kqp) 1165 { 1166 int error; 1167 struct kqueue *kq; 1168 1169 error = 0; 1170 1171 kq = fp->f_data; 1172 if (fp->f_type != DTYPE_KQUEUE || kq == NULL) 1173 return (EBADF); 1174 *kqp = kq; 1175 KQ_LOCK(kq); 1176 if ((kq->kq_state & KQ_CLOSING) == KQ_CLOSING) { 1177 KQ_UNLOCK(kq); 1178 return (EBADF); 1179 } 1180 kq->kq_refcnt++; 1181 KQ_UNLOCK(kq); 1182 1183 return error; 1184 } 1185 1186 static void 1187 kqueue_release(struct kqueue *kq, int locked) 1188 { 1189 if (locked) 1190 KQ_OWNED(kq); 1191 else 1192 KQ_LOCK(kq); 1193 kq->kq_refcnt--; 1194 if (kq->kq_refcnt == 1) 1195 wakeup(&kq->kq_refcnt); 1196 if (!locked) 1197 KQ_UNLOCK(kq); 1198 } 1199 1200 static void 1201 kqueue_schedtask(struct kqueue *kq) 1202 { 1203 1204 KQ_OWNED(kq); 1205 KASSERT(((kq->kq_state & KQ_TASKDRAIN) != KQ_TASKDRAIN), 1206 ("scheduling kqueue task while draining")); 1207 1208 if ((kq->kq_state & KQ_TASKSCHED) != KQ_TASKSCHED) { 1209 taskqueue_enqueue(taskqueue_kqueue, &kq->kq_task); 1210 kq->kq_state |= KQ_TASKSCHED; 1211 } 1212 } 1213 1214 /* 1215 * Expand the kq to make sure we have storage for fops/ident pair. 1216 * 1217 * Return 0 on success (or no work necessary), return errno on failure. 1218 * 1219 * Not calling hashinit w/ waitok (proper malloc flag) should be safe. 1220 * If kqueue_register is called from a non-fd context, there usually/should 1221 * be no locks held. 1222 */ 1223 static int 1224 kqueue_expand(struct kqueue *kq, struct filterops *fops, uintptr_t ident, 1225 int waitok) 1226 { 1227 struct klist *list, *tmp_knhash, *to_free; 1228 u_long tmp_knhashmask; 1229 int size; 1230 int fd; 1231 int mflag = waitok ? M_WAITOK : M_NOWAIT; 1232 1233 KQ_NOTOWNED(kq); 1234 1235 to_free = NULL; 1236 if (fops->f_isfd) { 1237 fd = ident; 1238 if (kq->kq_knlistsize <= fd) { 1239 size = kq->kq_knlistsize; 1240 while (size <= fd) 1241 size += KQEXTENT; 1242 list = malloc(size * sizeof(*list), M_KQUEUE, mflag); 1243 if (list == NULL) 1244 return ENOMEM; 1245 KQ_LOCK(kq); 1246 if (kq->kq_knlistsize > fd) { 1247 to_free = list; 1248 list = NULL; 1249 } else { 1250 if (kq->kq_knlist != NULL) { 1251 bcopy(kq->kq_knlist, list, 1252 kq->kq_knlistsize * sizeof(*list)); 1253 to_free = kq->kq_knlist; 1254 kq->kq_knlist = NULL; 1255 } 1256 bzero((caddr_t)list + 1257 kq->kq_knlistsize * sizeof(*list), 1258 (size - kq->kq_knlistsize) * sizeof(*list)); 1259 kq->kq_knlistsize = size; 1260 kq->kq_knlist = list; 1261 } 1262 KQ_UNLOCK(kq); 1263 } 1264 } else { 1265 if (kq->kq_knhashmask == 0) { 1266 tmp_knhash = hashinit(KN_HASHSIZE, M_KQUEUE, 1267 &tmp_knhashmask); 1268 if (tmp_knhash == NULL) 1269 return ENOMEM; 1270 KQ_LOCK(kq); 1271 if (kq->kq_knhashmask == 0) { 1272 kq->kq_knhash = tmp_knhash; 1273 kq->kq_knhashmask = tmp_knhashmask; 1274 } else { 1275 to_free = tmp_knhash; 1276 } 1277 KQ_UNLOCK(kq); 1278 } 1279 } 1280 free(to_free, M_KQUEUE); 1281 1282 KQ_NOTOWNED(kq); 1283 return 0; 1284 } 1285 1286 static void 1287 kqueue_task(void *arg, int pending) 1288 { 1289 struct kqueue *kq; 1290 int haskqglobal; 1291 1292 haskqglobal = 0; 1293 kq = arg; 1294 1295 KQ_GLOBAL_LOCK(&kq_global, haskqglobal); 1296 KQ_LOCK(kq); 1297 1298 KNOTE_LOCKED(&kq->kq_sel.si_note, 0); 1299 1300 kq->kq_state &= ~KQ_TASKSCHED; 1301 if ((kq->kq_state & KQ_TASKDRAIN) == KQ_TASKDRAIN) { 1302 wakeup(&kq->kq_state); 1303 } 1304 KQ_UNLOCK(kq); 1305 KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal); 1306 } 1307 1308 /* 1309 * Scan, update kn_data (if not ONESHOT), and copyout triggered events. 1310 * We treat KN_MARKER knotes as if they are INFLUX. 1311 */ 1312 static int 1313 kqueue_scan(struct kqueue *kq, int maxevents, struct kevent_copyops *k_ops, 1314 const struct timespec *tsp, struct kevent *keva, struct thread *td) 1315 { 1316 struct kevent *kevp; 1317 struct knote *kn, *marker; 1318 sbintime_t asbt, rsbt; 1319 int count, error, haskqglobal, influx, nkev, touch; 1320 1321 count = maxevents; 1322 nkev = 0; 1323 error = 0; 1324 haskqglobal = 0; 1325 1326 if (maxevents == 0) 1327 goto done_nl; 1328 1329 rsbt = 0; 1330 if (tsp != NULL) { 1331 if (tsp->tv_sec < 0 || tsp->tv_nsec < 0 || 1332 tsp->tv_nsec >= 1000000000) { 1333 error = EINVAL; 1334 goto done_nl; 1335 } 1336 if (timespecisset(tsp)) { 1337 if (tsp->tv_sec <= INT32_MAX) { 1338 rsbt = tstosbt(*tsp); 1339 if (TIMESEL(&asbt, rsbt)) 1340 asbt += tc_tick_sbt; 1341 if (asbt <= INT64_MAX - rsbt) 1342 asbt += rsbt; 1343 else 1344 asbt = 0; 1345 rsbt >>= tc_precexp; 1346 } else 1347 asbt = 0; 1348 } else 1349 asbt = -1; 1350 } else 1351 asbt = 0; 1352 marker = knote_alloc(1); 1353 if (marker == NULL) { 1354 error = ENOMEM; 1355 goto done_nl; 1356 } 1357 marker->kn_status = KN_MARKER; 1358 KQ_LOCK(kq); 1359 1360 retry: 1361 kevp = keva; 1362 if (kq->kq_count == 0) { 1363 if (asbt == -1) { 1364 error = EWOULDBLOCK; 1365 } else { 1366 kq->kq_state |= KQ_SLEEP; 1367 error = msleep_sbt(kq, &kq->kq_lock, PSOCK | PCATCH, 1368 "kqread", asbt, rsbt, C_ABSOLUTE); 1369 } 1370 if (error == 0) 1371 goto retry; 1372 /* don't restart after signals... */ 1373 if (error == ERESTART) 1374 error = EINTR; 1375 else if (error == EWOULDBLOCK) 1376 error = 0; 1377 goto done; 1378 } 1379 1380 TAILQ_INSERT_TAIL(&kq->kq_head, marker, kn_tqe); 1381 influx = 0; 1382 while (count) { 1383 KQ_OWNED(kq); 1384 kn = TAILQ_FIRST(&kq->kq_head); 1385 1386 if ((kn->kn_status == KN_MARKER && kn != marker) || 1387 (kn->kn_status & KN_INFLUX) == KN_INFLUX) { 1388 if (influx) { 1389 influx = 0; 1390 KQ_FLUX_WAKEUP(kq); 1391 } 1392 kq->kq_state |= KQ_FLUXWAIT; 1393 error = msleep(kq, &kq->kq_lock, PSOCK, 1394 "kqflxwt", 0); 1395 continue; 1396 } 1397 1398 TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe); 1399 if ((kn->kn_status & KN_DISABLED) == KN_DISABLED) { 1400 kn->kn_status &= ~KN_QUEUED; 1401 kq->kq_count--; 1402 continue; 1403 } 1404 if (kn == marker) { 1405 KQ_FLUX_WAKEUP(kq); 1406 if (count == maxevents) 1407 goto retry; 1408 goto done; 1409 } 1410 KASSERT((kn->kn_status & KN_INFLUX) == 0, 1411 ("KN_INFLUX set when not suppose to be")); 1412 1413 if ((kn->kn_flags & EV_ONESHOT) == EV_ONESHOT) { 1414 kn->kn_status &= ~KN_QUEUED; 1415 kn->kn_status |= KN_INFLUX; 1416 kq->kq_count--; 1417 KQ_UNLOCK(kq); 1418 /* 1419 * We don't need to lock the list since we've marked 1420 * it _INFLUX. 1421 */ 1422 *kevp = kn->kn_kevent; 1423 if (!(kn->kn_status & KN_DETACHED)) 1424 kn->kn_fop->f_detach(kn); 1425 knote_drop(kn, td); 1426 KQ_LOCK(kq); 1427 kn = NULL; 1428 } else { 1429 kn->kn_status |= KN_INFLUX; 1430 KQ_UNLOCK(kq); 1431 if ((kn->kn_status & KN_KQUEUE) == KN_KQUEUE) 1432 KQ_GLOBAL_LOCK(&kq_global, haskqglobal); 1433 KN_LIST_LOCK(kn); 1434 if (kn->kn_fop->f_event(kn, 0) == 0) { 1435 KQ_LOCK(kq); 1436 KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal); 1437 kn->kn_status &= 1438 ~(KN_QUEUED | KN_ACTIVE | KN_INFLUX); 1439 kq->kq_count--; 1440 KN_LIST_UNLOCK(kn); 1441 influx = 1; 1442 continue; 1443 } 1444 touch = (!kn->kn_fop->f_isfd && 1445 kn->kn_fop->f_touch != NULL); 1446 if (touch) 1447 kn->kn_fop->f_touch(kn, kevp, EVENT_PROCESS); 1448 else 1449 *kevp = kn->kn_kevent; 1450 KQ_LOCK(kq); 1451 KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal); 1452 if (kn->kn_flags & (EV_CLEAR | EV_DISPATCH)) { 1453 /* 1454 * Manually clear knotes who weren't 1455 * 'touch'ed. 1456 */ 1457 if (touch == 0 && kn->kn_flags & EV_CLEAR) { 1458 kn->kn_data = 0; 1459 kn->kn_fflags = 0; 1460 } 1461 if (kn->kn_flags & EV_DISPATCH) 1462 kn->kn_status |= KN_DISABLED; 1463 kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE); 1464 kq->kq_count--; 1465 } else 1466 TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe); 1467 1468 kn->kn_status &= ~(KN_INFLUX); 1469 KN_LIST_UNLOCK(kn); 1470 influx = 1; 1471 } 1472 1473 /* we are returning a copy to the user */ 1474 kevp++; 1475 nkev++; 1476 count--; 1477 1478 if (nkev == KQ_NEVENTS) { 1479 influx = 0; 1480 KQ_UNLOCK_FLUX(kq); 1481 error = k_ops->k_copyout(k_ops->arg, keva, nkev); 1482 nkev = 0; 1483 kevp = keva; 1484 KQ_LOCK(kq); 1485 if (error) 1486 break; 1487 } 1488 } 1489 TAILQ_REMOVE(&kq->kq_head, marker, kn_tqe); 1490 done: 1491 KQ_OWNED(kq); 1492 KQ_UNLOCK_FLUX(kq); 1493 knote_free(marker); 1494 done_nl: 1495 KQ_NOTOWNED(kq); 1496 if (nkev != 0) 1497 error = k_ops->k_copyout(k_ops->arg, keva, nkev); 1498 td->td_retval[0] = maxevents - count; 1499 return (error); 1500 } 1501 1502 /* 1503 * XXX 1504 * This could be expanded to call kqueue_scan, if desired. 1505 */ 1506 /*ARGSUSED*/ 1507 static int 1508 kqueue_read(struct file *fp, struct uio *uio, struct ucred *active_cred, 1509 int flags, struct thread *td) 1510 { 1511 return (ENXIO); 1512 } 1513 1514 /*ARGSUSED*/ 1515 static int 1516 kqueue_write(struct file *fp, struct uio *uio, struct ucred *active_cred, 1517 int flags, struct thread *td) 1518 { 1519 return (ENXIO); 1520 } 1521 1522 /*ARGSUSED*/ 1523 static int 1524 kqueue_truncate(struct file *fp, off_t length, struct ucred *active_cred, 1525 struct thread *td) 1526 { 1527 1528 return (EINVAL); 1529 } 1530 1531 /*ARGSUSED*/ 1532 static int 1533 kqueue_ioctl(struct file *fp, u_long cmd, void *data, 1534 struct ucred *active_cred, struct thread *td) 1535 { 1536 /* 1537 * Enabling sigio causes two major problems: 1538 * 1) infinite recursion: 1539 * Synopsys: kevent is being used to track signals and have FIOASYNC 1540 * set. On receipt of a signal this will cause a kqueue to recurse 1541 * into itself over and over. Sending the sigio causes the kqueue 1542 * to become ready, which in turn posts sigio again, forever. 1543 * Solution: this can be solved by setting a flag in the kqueue that 1544 * we have a SIGIO in progress. 1545 * 2) locking problems: 1546 * Synopsys: Kqueue is a leaf subsystem, but adding signalling puts 1547 * us above the proc and pgrp locks. 1548 * Solution: Post a signal using an async mechanism, being sure to 1549 * record a generation count in the delivery so that we do not deliver 1550 * a signal to the wrong process. 1551 * 1552 * Note, these two mechanisms are somewhat mutually exclusive! 1553 */ 1554 #if 0 1555 struct kqueue *kq; 1556 1557 kq = fp->f_data; 1558 switch (cmd) { 1559 case FIOASYNC: 1560 if (*(int *)data) { 1561 kq->kq_state |= KQ_ASYNC; 1562 } else { 1563 kq->kq_state &= ~KQ_ASYNC; 1564 } 1565 return (0); 1566 1567 case FIOSETOWN: 1568 return (fsetown(*(int *)data, &kq->kq_sigio)); 1569 1570 case FIOGETOWN: 1571 *(int *)data = fgetown(&kq->kq_sigio); 1572 return (0); 1573 } 1574 #endif 1575 1576 return (ENOTTY); 1577 } 1578 1579 /*ARGSUSED*/ 1580 static int 1581 kqueue_poll(struct file *fp, int events, struct ucred *active_cred, 1582 struct thread *td) 1583 { 1584 struct kqueue *kq; 1585 int revents = 0; 1586 int error; 1587 1588 if ((error = kqueue_acquire(fp, &kq))) 1589 return POLLERR; 1590 1591 KQ_LOCK(kq); 1592 if (events & (POLLIN | POLLRDNORM)) { 1593 if (kq->kq_count) { 1594 revents |= events & (POLLIN | POLLRDNORM); 1595 } else { 1596 selrecord(td, &kq->kq_sel); 1597 if (SEL_WAITING(&kq->kq_sel)) 1598 kq->kq_state |= KQ_SEL; 1599 } 1600 } 1601 kqueue_release(kq, 1); 1602 KQ_UNLOCK(kq); 1603 return (revents); 1604 } 1605 1606 /*ARGSUSED*/ 1607 static int 1608 kqueue_stat(struct file *fp, struct stat *st, struct ucred *active_cred, 1609 struct thread *td) 1610 { 1611 1612 bzero((void *)st, sizeof *st); 1613 /* 1614 * We no longer return kq_count because the unlocked value is useless. 1615 * If you spent all this time getting the count, why not spend your 1616 * syscall better by calling kevent? 1617 * 1618 * XXX - This is needed for libc_r. 1619 */ 1620 st->st_mode = S_IFIFO; 1621 return (0); 1622 } 1623 1624 /*ARGSUSED*/ 1625 static int 1626 kqueue_close(struct file *fp, struct thread *td) 1627 { 1628 struct kqueue *kq = fp->f_data; 1629 struct filedesc *fdp; 1630 struct knote *kn; 1631 int i; 1632 int error; 1633 1634 if ((error = kqueue_acquire(fp, &kq))) 1635 return error; 1636 1637 KQ_LOCK(kq); 1638 1639 KASSERT((kq->kq_state & KQ_CLOSING) != KQ_CLOSING, 1640 ("kqueue already closing")); 1641 kq->kq_state |= KQ_CLOSING; 1642 if (kq->kq_refcnt > 1) 1643 msleep(&kq->kq_refcnt, &kq->kq_lock, PSOCK, "kqclose", 0); 1644 1645 KASSERT(kq->kq_refcnt == 1, ("other refs are out there!")); 1646 fdp = kq->kq_fdp; 1647 1648 KASSERT(knlist_empty(&kq->kq_sel.si_note), 1649 ("kqueue's knlist not empty")); 1650 1651 for (i = 0; i < kq->kq_knlistsize; i++) { 1652 while ((kn = SLIST_FIRST(&kq->kq_knlist[i])) != NULL) { 1653 if ((kn->kn_status & KN_INFLUX) == KN_INFLUX) { 1654 kq->kq_state |= KQ_FLUXWAIT; 1655 msleep(kq, &kq->kq_lock, PSOCK, "kqclo1", 0); 1656 continue; 1657 } 1658 kn->kn_status |= KN_INFLUX; 1659 KQ_UNLOCK(kq); 1660 if (!(kn->kn_status & KN_DETACHED)) 1661 kn->kn_fop->f_detach(kn); 1662 knote_drop(kn, td); 1663 KQ_LOCK(kq); 1664 } 1665 } 1666 if (kq->kq_knhashmask != 0) { 1667 for (i = 0; i <= kq->kq_knhashmask; i++) { 1668 while ((kn = SLIST_FIRST(&kq->kq_knhash[i])) != NULL) { 1669 if ((kn->kn_status & KN_INFLUX) == KN_INFLUX) { 1670 kq->kq_state |= KQ_FLUXWAIT; 1671 msleep(kq, &kq->kq_lock, PSOCK, 1672 "kqclo2", 0); 1673 continue; 1674 } 1675 kn->kn_status |= KN_INFLUX; 1676 KQ_UNLOCK(kq); 1677 if (!(kn->kn_status & KN_DETACHED)) 1678 kn->kn_fop->f_detach(kn); 1679 knote_drop(kn, td); 1680 KQ_LOCK(kq); 1681 } 1682 } 1683 } 1684 1685 if ((kq->kq_state & KQ_TASKSCHED) == KQ_TASKSCHED) { 1686 kq->kq_state |= KQ_TASKDRAIN; 1687 msleep(&kq->kq_state, &kq->kq_lock, PSOCK, "kqtqdr", 0); 1688 } 1689 1690 if ((kq->kq_state & KQ_SEL) == KQ_SEL) { 1691 selwakeuppri(&kq->kq_sel, PSOCK); 1692 if (!SEL_WAITING(&kq->kq_sel)) 1693 kq->kq_state &= ~KQ_SEL; 1694 } 1695 1696 KQ_UNLOCK(kq); 1697 1698 FILEDESC_XLOCK(fdp); 1699 SLIST_REMOVE(&fdp->fd_kqlist, kq, kqueue, kq_list); 1700 FILEDESC_XUNLOCK(fdp); 1701 1702 seldrain(&kq->kq_sel); 1703 knlist_destroy(&kq->kq_sel.si_note); 1704 mtx_destroy(&kq->kq_lock); 1705 kq->kq_fdp = NULL; 1706 1707 if (kq->kq_knhash != NULL) 1708 free(kq->kq_knhash, M_KQUEUE); 1709 if (kq->kq_knlist != NULL) 1710 free(kq->kq_knlist, M_KQUEUE); 1711 1712 funsetown(&kq->kq_sigio); 1713 free(kq, M_KQUEUE); 1714 fp->f_data = NULL; 1715 1716 return (0); 1717 } 1718 1719 static void 1720 kqueue_wakeup(struct kqueue *kq) 1721 { 1722 KQ_OWNED(kq); 1723 1724 if ((kq->kq_state & KQ_SLEEP) == KQ_SLEEP) { 1725 kq->kq_state &= ~KQ_SLEEP; 1726 wakeup(kq); 1727 } 1728 if ((kq->kq_state & KQ_SEL) == KQ_SEL) { 1729 selwakeuppri(&kq->kq_sel, PSOCK); 1730 if (!SEL_WAITING(&kq->kq_sel)) 1731 kq->kq_state &= ~KQ_SEL; 1732 } 1733 if (!knlist_empty(&kq->kq_sel.si_note)) 1734 kqueue_schedtask(kq); 1735 if ((kq->kq_state & KQ_ASYNC) == KQ_ASYNC) { 1736 pgsigio(&kq->kq_sigio, SIGIO, 0); 1737 } 1738 } 1739 1740 /* 1741 * Walk down a list of knotes, activating them if their event has triggered. 1742 * 1743 * There is a possibility to optimize in the case of one kq watching another. 1744 * Instead of scheduling a task to wake it up, you could pass enough state 1745 * down the chain to make up the parent kqueue. Make this code functional 1746 * first. 1747 */ 1748 void 1749 knote(struct knlist *list, long hint, int lockflags) 1750 { 1751 struct kqueue *kq; 1752 struct knote *kn; 1753 int error; 1754 1755 if (list == NULL) 1756 return; 1757 1758 KNL_ASSERT_LOCK(list, lockflags & KNF_LISTLOCKED); 1759 1760 if ((lockflags & KNF_LISTLOCKED) == 0) 1761 list->kl_lock(list->kl_lockarg); 1762 1763 /* 1764 * If we unlock the list lock (and set KN_INFLUX), we can eliminate 1765 * the kqueue scheduling, but this will introduce four 1766 * lock/unlock's for each knote to test. If we do, continue to use 1767 * SLIST_FOREACH, SLIST_FOREACH_SAFE is not safe in our case, it is 1768 * only safe if you want to remove the current item, which we are 1769 * not doing. 1770 */ 1771 SLIST_FOREACH(kn, &list->kl_list, kn_selnext) { 1772 kq = kn->kn_kq; 1773 if ((kn->kn_status & KN_INFLUX) != KN_INFLUX) { 1774 KQ_LOCK(kq); 1775 if ((kn->kn_status & KN_INFLUX) == KN_INFLUX) { 1776 KQ_UNLOCK(kq); 1777 } else if ((lockflags & KNF_NOKQLOCK) != 0) { 1778 kn->kn_status |= KN_INFLUX; 1779 KQ_UNLOCK(kq); 1780 error = kn->kn_fop->f_event(kn, hint); 1781 KQ_LOCK(kq); 1782 kn->kn_status &= ~KN_INFLUX; 1783 if (error) 1784 KNOTE_ACTIVATE(kn, 1); 1785 KQ_UNLOCK_FLUX(kq); 1786 } else { 1787 kn->kn_status |= KN_HASKQLOCK; 1788 if (kn->kn_fop->f_event(kn, hint)) 1789 KNOTE_ACTIVATE(kn, 1); 1790 kn->kn_status &= ~KN_HASKQLOCK; 1791 KQ_UNLOCK(kq); 1792 } 1793 } 1794 kq = NULL; 1795 } 1796 if ((lockflags & KNF_LISTLOCKED) == 0) 1797 list->kl_unlock(list->kl_lockarg); 1798 } 1799 1800 /* 1801 * add a knote to a knlist 1802 */ 1803 void 1804 knlist_add(struct knlist *knl, struct knote *kn, int islocked) 1805 { 1806 KNL_ASSERT_LOCK(knl, islocked); 1807 KQ_NOTOWNED(kn->kn_kq); 1808 KASSERT((kn->kn_status & (KN_INFLUX|KN_DETACHED)) == 1809 (KN_INFLUX|KN_DETACHED), ("knote not KN_INFLUX and KN_DETACHED")); 1810 if (!islocked) 1811 knl->kl_lock(knl->kl_lockarg); 1812 SLIST_INSERT_HEAD(&knl->kl_list, kn, kn_selnext); 1813 if (!islocked) 1814 knl->kl_unlock(knl->kl_lockarg); 1815 KQ_LOCK(kn->kn_kq); 1816 kn->kn_knlist = knl; 1817 kn->kn_status &= ~KN_DETACHED; 1818 KQ_UNLOCK(kn->kn_kq); 1819 } 1820 1821 static void 1822 knlist_remove_kq(struct knlist *knl, struct knote *kn, int knlislocked, int kqislocked) 1823 { 1824 KASSERT(!(!!kqislocked && !knlislocked), ("kq locked w/o knl locked")); 1825 KNL_ASSERT_LOCK(knl, knlislocked); 1826 mtx_assert(&kn->kn_kq->kq_lock, kqislocked ? MA_OWNED : MA_NOTOWNED); 1827 if (!kqislocked) 1828 KASSERT((kn->kn_status & (KN_INFLUX|KN_DETACHED)) == KN_INFLUX, 1829 ("knlist_remove called w/o knote being KN_INFLUX or already removed")); 1830 if (!knlislocked) 1831 knl->kl_lock(knl->kl_lockarg); 1832 SLIST_REMOVE(&knl->kl_list, kn, knote, kn_selnext); 1833 kn->kn_knlist = NULL; 1834 if (!knlislocked) 1835 knl->kl_unlock(knl->kl_lockarg); 1836 if (!kqislocked) 1837 KQ_LOCK(kn->kn_kq); 1838 kn->kn_status |= KN_DETACHED; 1839 if (!kqislocked) 1840 KQ_UNLOCK(kn->kn_kq); 1841 } 1842 1843 /* 1844 * remove all knotes from a specified klist 1845 */ 1846 void 1847 knlist_remove(struct knlist *knl, struct knote *kn, int islocked) 1848 { 1849 1850 knlist_remove_kq(knl, kn, islocked, 0); 1851 } 1852 1853 /* 1854 * remove knote from a specified klist while in f_event handler. 1855 */ 1856 void 1857 knlist_remove_inevent(struct knlist *knl, struct knote *kn) 1858 { 1859 1860 knlist_remove_kq(knl, kn, 1, 1861 (kn->kn_status & KN_HASKQLOCK) == KN_HASKQLOCK); 1862 } 1863 1864 int 1865 knlist_empty(struct knlist *knl) 1866 { 1867 1868 KNL_ASSERT_LOCKED(knl); 1869 return SLIST_EMPTY(&knl->kl_list); 1870 } 1871 1872 static struct mtx knlist_lock; 1873 MTX_SYSINIT(knlist_lock, &knlist_lock, "knlist lock for lockless objects", 1874 MTX_DEF); 1875 static void knlist_mtx_lock(void *arg); 1876 static void knlist_mtx_unlock(void *arg); 1877 1878 static void 1879 knlist_mtx_lock(void *arg) 1880 { 1881 1882 mtx_lock((struct mtx *)arg); 1883 } 1884 1885 static void 1886 knlist_mtx_unlock(void *arg) 1887 { 1888 1889 mtx_unlock((struct mtx *)arg); 1890 } 1891 1892 static void 1893 knlist_mtx_assert_locked(void *arg) 1894 { 1895 1896 mtx_assert((struct mtx *)arg, MA_OWNED); 1897 } 1898 1899 static void 1900 knlist_mtx_assert_unlocked(void *arg) 1901 { 1902 1903 mtx_assert((struct mtx *)arg, MA_NOTOWNED); 1904 } 1905 1906 static void 1907 knlist_rw_rlock(void *arg) 1908 { 1909 1910 rw_rlock((struct rwlock *)arg); 1911 } 1912 1913 static void 1914 knlist_rw_runlock(void *arg) 1915 { 1916 1917 rw_runlock((struct rwlock *)arg); 1918 } 1919 1920 static void 1921 knlist_rw_assert_locked(void *arg) 1922 { 1923 1924 rw_assert((struct rwlock *)arg, RA_LOCKED); 1925 } 1926 1927 static void 1928 knlist_rw_assert_unlocked(void *arg) 1929 { 1930 1931 rw_assert((struct rwlock *)arg, RA_UNLOCKED); 1932 } 1933 1934 void 1935 knlist_init(struct knlist *knl, void *lock, void (*kl_lock)(void *), 1936 void (*kl_unlock)(void *), 1937 void (*kl_assert_locked)(void *), void (*kl_assert_unlocked)(void *)) 1938 { 1939 1940 if (lock == NULL) 1941 knl->kl_lockarg = &knlist_lock; 1942 else 1943 knl->kl_lockarg = lock; 1944 1945 if (kl_lock == NULL) 1946 knl->kl_lock = knlist_mtx_lock; 1947 else 1948 knl->kl_lock = kl_lock; 1949 if (kl_unlock == NULL) 1950 knl->kl_unlock = knlist_mtx_unlock; 1951 else 1952 knl->kl_unlock = kl_unlock; 1953 if (kl_assert_locked == NULL) 1954 knl->kl_assert_locked = knlist_mtx_assert_locked; 1955 else 1956 knl->kl_assert_locked = kl_assert_locked; 1957 if (kl_assert_unlocked == NULL) 1958 knl->kl_assert_unlocked = knlist_mtx_assert_unlocked; 1959 else 1960 knl->kl_assert_unlocked = kl_assert_unlocked; 1961 1962 SLIST_INIT(&knl->kl_list); 1963 } 1964 1965 void 1966 knlist_init_mtx(struct knlist *knl, struct mtx *lock) 1967 { 1968 1969 knlist_init(knl, lock, NULL, NULL, NULL, NULL); 1970 } 1971 1972 void 1973 knlist_init_rw_reader(struct knlist *knl, struct rwlock *lock) 1974 { 1975 1976 knlist_init(knl, lock, knlist_rw_rlock, knlist_rw_runlock, 1977 knlist_rw_assert_locked, knlist_rw_assert_unlocked); 1978 } 1979 1980 void 1981 knlist_destroy(struct knlist *knl) 1982 { 1983 1984 #ifdef INVARIANTS 1985 /* 1986 * if we run across this error, we need to find the offending 1987 * driver and have it call knlist_clear. 1988 */ 1989 if (!SLIST_EMPTY(&knl->kl_list)) 1990 printf("WARNING: destroying knlist w/ knotes on it!\n"); 1991 #endif 1992 1993 knl->kl_lockarg = knl->kl_lock = knl->kl_unlock = NULL; 1994 SLIST_INIT(&knl->kl_list); 1995 } 1996 1997 /* 1998 * Even if we are locked, we may need to drop the lock to allow any influx 1999 * knotes time to "settle". 2000 */ 2001 void 2002 knlist_cleardel(struct knlist *knl, struct thread *td, int islocked, int killkn) 2003 { 2004 struct knote *kn, *kn2; 2005 struct kqueue *kq; 2006 2007 if (islocked) 2008 KNL_ASSERT_LOCKED(knl); 2009 else { 2010 KNL_ASSERT_UNLOCKED(knl); 2011 again: /* need to reacquire lock since we have dropped it */ 2012 knl->kl_lock(knl->kl_lockarg); 2013 } 2014 2015 SLIST_FOREACH_SAFE(kn, &knl->kl_list, kn_selnext, kn2) { 2016 kq = kn->kn_kq; 2017 KQ_LOCK(kq); 2018 if ((kn->kn_status & KN_INFLUX)) { 2019 KQ_UNLOCK(kq); 2020 continue; 2021 } 2022 knlist_remove_kq(knl, kn, 1, 1); 2023 if (killkn) { 2024 kn->kn_status |= KN_INFLUX | KN_DETACHED; 2025 KQ_UNLOCK(kq); 2026 knote_drop(kn, td); 2027 } else { 2028 /* Make sure cleared knotes disappear soon */ 2029 kn->kn_flags |= (EV_EOF | EV_ONESHOT); 2030 KQ_UNLOCK(kq); 2031 } 2032 kq = NULL; 2033 } 2034 2035 if (!SLIST_EMPTY(&knl->kl_list)) { 2036 /* there are still KN_INFLUX remaining */ 2037 kn = SLIST_FIRST(&knl->kl_list); 2038 kq = kn->kn_kq; 2039 KQ_LOCK(kq); 2040 KASSERT(kn->kn_status & KN_INFLUX, 2041 ("knote removed w/o list lock")); 2042 knl->kl_unlock(knl->kl_lockarg); 2043 kq->kq_state |= KQ_FLUXWAIT; 2044 msleep(kq, &kq->kq_lock, PSOCK | PDROP, "kqkclr", 0); 2045 kq = NULL; 2046 goto again; 2047 } 2048 2049 if (islocked) 2050 KNL_ASSERT_LOCKED(knl); 2051 else { 2052 knl->kl_unlock(knl->kl_lockarg); 2053 KNL_ASSERT_UNLOCKED(knl); 2054 } 2055 } 2056 2057 /* 2058 * Remove all knotes referencing a specified fd must be called with FILEDESC 2059 * lock. This prevents a race where a new fd comes along and occupies the 2060 * entry and we attach a knote to the fd. 2061 */ 2062 void 2063 knote_fdclose(struct thread *td, int fd) 2064 { 2065 struct filedesc *fdp = td->td_proc->p_fd; 2066 struct kqueue *kq; 2067 struct knote *kn; 2068 int influx; 2069 2070 FILEDESC_XLOCK_ASSERT(fdp); 2071 2072 /* 2073 * We shouldn't have to worry about new kevents appearing on fd 2074 * since filedesc is locked. 2075 */ 2076 SLIST_FOREACH(kq, &fdp->fd_kqlist, kq_list) { 2077 KQ_LOCK(kq); 2078 2079 again: 2080 influx = 0; 2081 while (kq->kq_knlistsize > fd && 2082 (kn = SLIST_FIRST(&kq->kq_knlist[fd])) != NULL) { 2083 if (kn->kn_status & KN_INFLUX) { 2084 /* someone else might be waiting on our knote */ 2085 if (influx) 2086 wakeup(kq); 2087 kq->kq_state |= KQ_FLUXWAIT; 2088 msleep(kq, &kq->kq_lock, PSOCK, "kqflxwt", 0); 2089 goto again; 2090 } 2091 kn->kn_status |= KN_INFLUX; 2092 KQ_UNLOCK(kq); 2093 if (!(kn->kn_status & KN_DETACHED)) 2094 kn->kn_fop->f_detach(kn); 2095 knote_drop(kn, td); 2096 influx = 1; 2097 KQ_LOCK(kq); 2098 } 2099 KQ_UNLOCK_FLUX(kq); 2100 } 2101 } 2102 2103 static int 2104 knote_attach(struct knote *kn, struct kqueue *kq) 2105 { 2106 struct klist *list; 2107 2108 KASSERT(kn->kn_status & KN_INFLUX, ("knote not marked INFLUX")); 2109 KQ_OWNED(kq); 2110 2111 if (kn->kn_fop->f_isfd) { 2112 if (kn->kn_id >= kq->kq_knlistsize) 2113 return ENOMEM; 2114 list = &kq->kq_knlist[kn->kn_id]; 2115 } else { 2116 if (kq->kq_knhash == NULL) 2117 return ENOMEM; 2118 list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)]; 2119 } 2120 2121 SLIST_INSERT_HEAD(list, kn, kn_link); 2122 2123 return 0; 2124 } 2125 2126 /* 2127 * knote must already have been detached using the f_detach method. 2128 * no lock need to be held, it is assumed that the KN_INFLUX flag is set 2129 * to prevent other removal. 2130 */ 2131 static void 2132 knote_drop(struct knote *kn, struct thread *td) 2133 { 2134 struct kqueue *kq; 2135 struct klist *list; 2136 2137 kq = kn->kn_kq; 2138 2139 KQ_NOTOWNED(kq); 2140 KASSERT((kn->kn_status & KN_INFLUX) == KN_INFLUX, 2141 ("knote_drop called without KN_INFLUX set in kn_status")); 2142 2143 KQ_LOCK(kq); 2144 if (kn->kn_fop->f_isfd) 2145 list = &kq->kq_knlist[kn->kn_id]; 2146 else 2147 list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)]; 2148 2149 if (!SLIST_EMPTY(list)) 2150 SLIST_REMOVE(list, kn, knote, kn_link); 2151 if (kn->kn_status & KN_QUEUED) 2152 knote_dequeue(kn); 2153 KQ_UNLOCK_FLUX(kq); 2154 2155 if (kn->kn_fop->f_isfd) { 2156 fdrop(kn->kn_fp, td); 2157 kn->kn_fp = NULL; 2158 } 2159 kqueue_fo_release(kn->kn_kevent.filter); 2160 kn->kn_fop = NULL; 2161 knote_free(kn); 2162 } 2163 2164 static void 2165 knote_enqueue(struct knote *kn) 2166 { 2167 struct kqueue *kq = kn->kn_kq; 2168 2169 KQ_OWNED(kn->kn_kq); 2170 KASSERT((kn->kn_status & KN_QUEUED) == 0, ("knote already queued")); 2171 2172 TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe); 2173 kn->kn_status |= KN_QUEUED; 2174 kq->kq_count++; 2175 kqueue_wakeup(kq); 2176 } 2177 2178 static void 2179 knote_dequeue(struct knote *kn) 2180 { 2181 struct kqueue *kq = kn->kn_kq; 2182 2183 KQ_OWNED(kn->kn_kq); 2184 KASSERT(kn->kn_status & KN_QUEUED, ("knote not queued")); 2185 2186 TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe); 2187 kn->kn_status &= ~KN_QUEUED; 2188 kq->kq_count--; 2189 } 2190 2191 static void 2192 knote_init(void) 2193 { 2194 2195 knote_zone = uma_zcreate("KNOTE", sizeof(struct knote), NULL, NULL, 2196 NULL, NULL, UMA_ALIGN_PTR, 0); 2197 } 2198 SYSINIT(knote, SI_SUB_PSEUDO, SI_ORDER_ANY, knote_init, NULL); 2199 2200 static struct knote * 2201 knote_alloc(int waitok) 2202 { 2203 return ((struct knote *)uma_zalloc(knote_zone, 2204 (waitok ? M_WAITOK : M_NOWAIT)|M_ZERO)); 2205 } 2206 2207 static void 2208 knote_free(struct knote *kn) 2209 { 2210 if (kn != NULL) 2211 uma_zfree(knote_zone, kn); 2212 } 2213 2214 /* 2215 * Register the kev w/ the kq specified by fd. 2216 */ 2217 int 2218 kqfd_register(int fd, struct kevent *kev, struct thread *td, int waitok) 2219 { 2220 struct kqueue *kq; 2221 struct file *fp; 2222 int error; 2223 2224 if ((error = fget(td, fd, CAP_POST_EVENT, &fp)) != 0) 2225 return (error); 2226 if ((error = kqueue_acquire(fp, &kq)) != 0) 2227 goto noacquire; 2228 2229 error = kqueue_register(kq, kev, td, waitok); 2230 2231 kqueue_release(kq, 0); 2232 2233 noacquire: 2234 fdrop(fp, td); 2235 2236 return error; 2237 } 2238