1 /*- 2 * Copyright (c) 1999,2000,2001 Jonathan Lemon <jlemon@FreeBSD.org> 3 * Copyright 2004 John-Mark Gurney <jmg@FreeBSD.org> 4 * Copyright (c) 2009 Apple, Inc. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include "opt_ktrace.h" 33 #include "opt_kqueue.h" 34 35 #include <sys/param.h> 36 #include <sys/systm.h> 37 #include <sys/capability.h> 38 #include <sys/kernel.h> 39 #include <sys/lock.h> 40 #include <sys/mutex.h> 41 #include <sys/rwlock.h> 42 #include <sys/proc.h> 43 #include <sys/malloc.h> 44 #include <sys/unistd.h> 45 #include <sys/file.h> 46 #include <sys/filedesc.h> 47 #include <sys/filio.h> 48 #include <sys/fcntl.h> 49 #include <sys/kthread.h> 50 #include <sys/selinfo.h> 51 #include <sys/stdatomic.h> 52 #include <sys/queue.h> 53 #include <sys/event.h> 54 #include <sys/eventvar.h> 55 #include <sys/poll.h> 56 #include <sys/protosw.h> 57 #include <sys/resourcevar.h> 58 #include <sys/sigio.h> 59 #include <sys/signalvar.h> 60 #include <sys/socket.h> 61 #include <sys/socketvar.h> 62 #include <sys/stat.h> 63 #include <sys/sysctl.h> 64 #include <sys/sysproto.h> 65 #include <sys/syscallsubr.h> 66 #include <sys/taskqueue.h> 67 #include <sys/uio.h> 68 #ifdef KTRACE 69 #include <sys/ktrace.h> 70 #endif 71 72 #include <vm/uma.h> 73 74 static MALLOC_DEFINE(M_KQUEUE, "kqueue", "memory for kqueue system"); 75 76 /* 77 * This lock is used if multiple kq locks are required. This possibly 78 * should be made into a per proc lock. 79 */ 80 static struct mtx kq_global; 81 MTX_SYSINIT(kq_global, &kq_global, "kqueue order", MTX_DEF); 82 #define KQ_GLOBAL_LOCK(lck, haslck) do { \ 83 if (!haslck) \ 84 mtx_lock(lck); \ 85 haslck = 1; \ 86 } while (0) 87 #define KQ_GLOBAL_UNLOCK(lck, haslck) do { \ 88 if (haslck) \ 89 mtx_unlock(lck); \ 90 haslck = 0; \ 91 } while (0) 92 93 TASKQUEUE_DEFINE_THREAD(kqueue); 94 95 static int kevent_copyout(void *arg, struct kevent *kevp, int count); 96 static int kevent_copyin(void *arg, struct kevent *kevp, int count); 97 static int kqueue_register(struct kqueue *kq, struct kevent *kev, 98 struct thread *td, int waitok); 99 static int kqueue_acquire(struct file *fp, struct kqueue **kqp); 100 static void kqueue_release(struct kqueue *kq, int locked); 101 static int kqueue_expand(struct kqueue *kq, struct filterops *fops, 102 uintptr_t ident, int waitok); 103 static void kqueue_task(void *arg, int pending); 104 static int kqueue_scan(struct kqueue *kq, int maxevents, 105 struct kevent_copyops *k_ops, 106 const struct timespec *timeout, 107 struct kevent *keva, struct thread *td); 108 static void kqueue_wakeup(struct kqueue *kq); 109 static struct filterops *kqueue_fo_find(int filt); 110 static void kqueue_fo_release(int filt); 111 112 static fo_rdwr_t kqueue_read; 113 static fo_rdwr_t kqueue_write; 114 static fo_truncate_t kqueue_truncate; 115 static fo_ioctl_t kqueue_ioctl; 116 static fo_poll_t kqueue_poll; 117 static fo_kqfilter_t kqueue_kqfilter; 118 static fo_stat_t kqueue_stat; 119 static fo_close_t kqueue_close; 120 121 static struct fileops kqueueops = { 122 .fo_read = kqueue_read, 123 .fo_write = kqueue_write, 124 .fo_truncate = kqueue_truncate, 125 .fo_ioctl = kqueue_ioctl, 126 .fo_poll = kqueue_poll, 127 .fo_kqfilter = kqueue_kqfilter, 128 .fo_stat = kqueue_stat, 129 .fo_close = kqueue_close, 130 .fo_chmod = invfo_chmod, 131 .fo_chown = invfo_chown, 132 .fo_sendfile = invfo_sendfile, 133 }; 134 135 static int knote_attach(struct knote *kn, struct kqueue *kq); 136 static void knote_drop(struct knote *kn, struct thread *td); 137 static void knote_enqueue(struct knote *kn); 138 static void knote_dequeue(struct knote *kn); 139 static void knote_init(void); 140 static struct knote *knote_alloc(int waitok); 141 static void knote_free(struct knote *kn); 142 143 static void filt_kqdetach(struct knote *kn); 144 static int filt_kqueue(struct knote *kn, long hint); 145 static int filt_procattach(struct knote *kn); 146 static void filt_procdetach(struct knote *kn); 147 static int filt_proc(struct knote *kn, long hint); 148 static int filt_fileattach(struct knote *kn); 149 static void filt_timerexpire(void *knx); 150 static int filt_timerattach(struct knote *kn); 151 static void filt_timerdetach(struct knote *kn); 152 static int filt_timer(struct knote *kn, long hint); 153 static int filt_userattach(struct knote *kn); 154 static void filt_userdetach(struct knote *kn); 155 static int filt_user(struct knote *kn, long hint); 156 static void filt_usertouch(struct knote *kn, struct kevent *kev, 157 u_long type); 158 159 static struct filterops file_filtops = { 160 .f_isfd = 1, 161 .f_attach = filt_fileattach, 162 }; 163 static struct filterops kqread_filtops = { 164 .f_isfd = 1, 165 .f_detach = filt_kqdetach, 166 .f_event = filt_kqueue, 167 }; 168 /* XXX - move to kern_proc.c? */ 169 static struct filterops proc_filtops = { 170 .f_isfd = 0, 171 .f_attach = filt_procattach, 172 .f_detach = filt_procdetach, 173 .f_event = filt_proc, 174 }; 175 static struct filterops timer_filtops = { 176 .f_isfd = 0, 177 .f_attach = filt_timerattach, 178 .f_detach = filt_timerdetach, 179 .f_event = filt_timer, 180 }; 181 static struct filterops user_filtops = { 182 .f_attach = filt_userattach, 183 .f_detach = filt_userdetach, 184 .f_event = filt_user, 185 .f_touch = filt_usertouch, 186 }; 187 188 static uma_zone_t knote_zone; 189 static atomic_uint kq_ncallouts = ATOMIC_VAR_INIT(0); 190 static unsigned int kq_calloutmax = 4 * 1024; 191 SYSCTL_UINT(_kern, OID_AUTO, kq_calloutmax, CTLFLAG_RW, 192 &kq_calloutmax, 0, "Maximum number of callouts allocated for kqueue"); 193 194 /* XXX - ensure not KN_INFLUX?? */ 195 #define KNOTE_ACTIVATE(kn, islock) do { \ 196 if ((islock)) \ 197 mtx_assert(&(kn)->kn_kq->kq_lock, MA_OWNED); \ 198 else \ 199 KQ_LOCK((kn)->kn_kq); \ 200 (kn)->kn_status |= KN_ACTIVE; \ 201 if (((kn)->kn_status & (KN_QUEUED | KN_DISABLED)) == 0) \ 202 knote_enqueue((kn)); \ 203 if (!(islock)) \ 204 KQ_UNLOCK((kn)->kn_kq); \ 205 } while(0) 206 #define KQ_LOCK(kq) do { \ 207 mtx_lock(&(kq)->kq_lock); \ 208 } while (0) 209 #define KQ_FLUX_WAKEUP(kq) do { \ 210 if (((kq)->kq_state & KQ_FLUXWAIT) == KQ_FLUXWAIT) { \ 211 (kq)->kq_state &= ~KQ_FLUXWAIT; \ 212 wakeup((kq)); \ 213 } \ 214 } while (0) 215 #define KQ_UNLOCK_FLUX(kq) do { \ 216 KQ_FLUX_WAKEUP(kq); \ 217 mtx_unlock(&(kq)->kq_lock); \ 218 } while (0) 219 #define KQ_UNLOCK(kq) do { \ 220 mtx_unlock(&(kq)->kq_lock); \ 221 } while (0) 222 #define KQ_OWNED(kq) do { \ 223 mtx_assert(&(kq)->kq_lock, MA_OWNED); \ 224 } while (0) 225 #define KQ_NOTOWNED(kq) do { \ 226 mtx_assert(&(kq)->kq_lock, MA_NOTOWNED); \ 227 } while (0) 228 #define KN_LIST_LOCK(kn) do { \ 229 if (kn->kn_knlist != NULL) \ 230 kn->kn_knlist->kl_lock(kn->kn_knlist->kl_lockarg); \ 231 } while (0) 232 #define KN_LIST_UNLOCK(kn) do { \ 233 if (kn->kn_knlist != NULL) \ 234 kn->kn_knlist->kl_unlock(kn->kn_knlist->kl_lockarg); \ 235 } while (0) 236 #define KNL_ASSERT_LOCK(knl, islocked) do { \ 237 if (islocked) \ 238 KNL_ASSERT_LOCKED(knl); \ 239 else \ 240 KNL_ASSERT_UNLOCKED(knl); \ 241 } while (0) 242 #ifdef INVARIANTS 243 #define KNL_ASSERT_LOCKED(knl) do { \ 244 knl->kl_assert_locked((knl)->kl_lockarg); \ 245 } while (0) 246 #define KNL_ASSERT_UNLOCKED(knl) do { \ 247 knl->kl_assert_unlocked((knl)->kl_lockarg); \ 248 } while (0) 249 #else /* !INVARIANTS */ 250 #define KNL_ASSERT_LOCKED(knl) do {} while(0) 251 #define KNL_ASSERT_UNLOCKED(knl) do {} while (0) 252 #endif /* INVARIANTS */ 253 254 #ifndef KN_HASHSIZE 255 #define KN_HASHSIZE 64 /* XXX should be tunable */ 256 #endif 257 258 #define KN_HASH(val, mask) (((val) ^ (val >> 8)) & (mask)) 259 260 static int 261 filt_nullattach(struct knote *kn) 262 { 263 264 return (ENXIO); 265 }; 266 267 struct filterops null_filtops = { 268 .f_isfd = 0, 269 .f_attach = filt_nullattach, 270 }; 271 272 /* XXX - make SYSINIT to add these, and move into respective modules. */ 273 extern struct filterops sig_filtops; 274 extern struct filterops fs_filtops; 275 276 /* 277 * Table for for all system-defined filters. 278 */ 279 static struct mtx filterops_lock; 280 MTX_SYSINIT(kqueue_filterops, &filterops_lock, "protect sysfilt_ops", 281 MTX_DEF); 282 static struct { 283 struct filterops *for_fop; 284 int for_refcnt; 285 } sysfilt_ops[EVFILT_SYSCOUNT] = { 286 { &file_filtops }, /* EVFILT_READ */ 287 { &file_filtops }, /* EVFILT_WRITE */ 288 { &null_filtops }, /* EVFILT_AIO */ 289 { &file_filtops }, /* EVFILT_VNODE */ 290 { &proc_filtops }, /* EVFILT_PROC */ 291 { &sig_filtops }, /* EVFILT_SIGNAL */ 292 { &timer_filtops }, /* EVFILT_TIMER */ 293 { &null_filtops }, /* former EVFILT_NETDEV */ 294 { &fs_filtops }, /* EVFILT_FS */ 295 { &null_filtops }, /* EVFILT_LIO */ 296 { &user_filtops }, /* EVFILT_USER */ 297 }; 298 299 /* 300 * Simple redirection for all cdevsw style objects to call their fo_kqfilter 301 * method. 302 */ 303 static int 304 filt_fileattach(struct knote *kn) 305 { 306 307 return (fo_kqfilter(kn->kn_fp, kn)); 308 } 309 310 /*ARGSUSED*/ 311 static int 312 kqueue_kqfilter(struct file *fp, struct knote *kn) 313 { 314 struct kqueue *kq = kn->kn_fp->f_data; 315 316 if (kn->kn_filter != EVFILT_READ) 317 return (EINVAL); 318 319 kn->kn_status |= KN_KQUEUE; 320 kn->kn_fop = &kqread_filtops; 321 knlist_add(&kq->kq_sel.si_note, kn, 0); 322 323 return (0); 324 } 325 326 static void 327 filt_kqdetach(struct knote *kn) 328 { 329 struct kqueue *kq = kn->kn_fp->f_data; 330 331 knlist_remove(&kq->kq_sel.si_note, kn, 0); 332 } 333 334 /*ARGSUSED*/ 335 static int 336 filt_kqueue(struct knote *kn, long hint) 337 { 338 struct kqueue *kq = kn->kn_fp->f_data; 339 340 kn->kn_data = kq->kq_count; 341 return (kn->kn_data > 0); 342 } 343 344 /* XXX - move to kern_proc.c? */ 345 static int 346 filt_procattach(struct knote *kn) 347 { 348 struct proc *p; 349 int immediate; 350 int error; 351 352 immediate = 0; 353 p = pfind(kn->kn_id); 354 if (p == NULL && (kn->kn_sfflags & NOTE_EXIT)) { 355 p = zpfind(kn->kn_id); 356 immediate = 1; 357 } else if (p != NULL && (p->p_flag & P_WEXIT)) { 358 immediate = 1; 359 } 360 361 if (p == NULL) 362 return (ESRCH); 363 if ((error = p_cansee(curthread, p))) { 364 PROC_UNLOCK(p); 365 return (error); 366 } 367 368 kn->kn_ptr.p_proc = p; 369 kn->kn_flags |= EV_CLEAR; /* automatically set */ 370 371 /* 372 * internal flag indicating registration done by kernel 373 */ 374 if (kn->kn_flags & EV_FLAG1) { 375 kn->kn_data = kn->kn_sdata; /* ppid */ 376 kn->kn_fflags = NOTE_CHILD; 377 kn->kn_flags &= ~EV_FLAG1; 378 } 379 380 if (immediate == 0) 381 knlist_add(&p->p_klist, kn, 1); 382 383 /* 384 * Immediately activate any exit notes if the target process is a 385 * zombie. This is necessary to handle the case where the target 386 * process, e.g. a child, dies before the kevent is registered. 387 */ 388 if (immediate && filt_proc(kn, NOTE_EXIT)) 389 KNOTE_ACTIVATE(kn, 0); 390 391 PROC_UNLOCK(p); 392 393 return (0); 394 } 395 396 /* 397 * The knote may be attached to a different process, which may exit, 398 * leaving nothing for the knote to be attached to. So when the process 399 * exits, the knote is marked as DETACHED and also flagged as ONESHOT so 400 * it will be deleted when read out. However, as part of the knote deletion, 401 * this routine is called, so a check is needed to avoid actually performing 402 * a detach, because the original process does not exist any more. 403 */ 404 /* XXX - move to kern_proc.c? */ 405 static void 406 filt_procdetach(struct knote *kn) 407 { 408 struct proc *p; 409 410 p = kn->kn_ptr.p_proc; 411 knlist_remove(&p->p_klist, kn, 0); 412 kn->kn_ptr.p_proc = NULL; 413 } 414 415 /* XXX - move to kern_proc.c? */ 416 static int 417 filt_proc(struct knote *kn, long hint) 418 { 419 struct proc *p = kn->kn_ptr.p_proc; 420 u_int event; 421 422 /* 423 * mask off extra data 424 */ 425 event = (u_int)hint & NOTE_PCTRLMASK; 426 427 /* 428 * if the user is interested in this event, record it. 429 */ 430 if (kn->kn_sfflags & event) 431 kn->kn_fflags |= event; 432 433 /* 434 * process is gone, so flag the event as finished. 435 */ 436 if (event == NOTE_EXIT) { 437 if (!(kn->kn_status & KN_DETACHED)) 438 knlist_remove_inevent(&p->p_klist, kn); 439 kn->kn_flags |= (EV_EOF | EV_ONESHOT); 440 kn->kn_ptr.p_proc = NULL; 441 if (kn->kn_fflags & NOTE_EXIT) 442 kn->kn_data = p->p_xstat; 443 if (kn->kn_fflags == 0) 444 kn->kn_flags |= EV_DROP; 445 return (1); 446 } 447 448 return (kn->kn_fflags != 0); 449 } 450 451 /* 452 * Called when the process forked. It mostly does the same as the 453 * knote(), activating all knotes registered to be activated when the 454 * process forked. Additionally, for each knote attached to the 455 * parent, check whether user wants to track the new process. If so 456 * attach a new knote to it, and immediately report an event with the 457 * child's pid. 458 */ 459 void 460 knote_fork(struct knlist *list, int pid) 461 { 462 struct kqueue *kq; 463 struct knote *kn; 464 struct kevent kev; 465 int error; 466 467 if (list == NULL) 468 return; 469 list->kl_lock(list->kl_lockarg); 470 471 SLIST_FOREACH(kn, &list->kl_list, kn_selnext) { 472 if ((kn->kn_status & KN_INFLUX) == KN_INFLUX) 473 continue; 474 kq = kn->kn_kq; 475 KQ_LOCK(kq); 476 if ((kn->kn_status & KN_INFLUX) == KN_INFLUX) { 477 KQ_UNLOCK(kq); 478 continue; 479 } 480 481 /* 482 * The same as knote(), activate the event. 483 */ 484 if ((kn->kn_sfflags & NOTE_TRACK) == 0) { 485 kn->kn_status |= KN_HASKQLOCK; 486 if (kn->kn_fop->f_event(kn, NOTE_FORK)) 487 KNOTE_ACTIVATE(kn, 1); 488 kn->kn_status &= ~KN_HASKQLOCK; 489 KQ_UNLOCK(kq); 490 continue; 491 } 492 493 /* 494 * The NOTE_TRACK case. In addition to the activation 495 * of the event, we need to register new event to 496 * track the child. Drop the locks in preparation for 497 * the call to kqueue_register(). 498 */ 499 kn->kn_status |= KN_INFLUX; 500 KQ_UNLOCK(kq); 501 list->kl_unlock(list->kl_lockarg); 502 503 /* 504 * Activate existing knote and register a knote with 505 * new process. 506 */ 507 kev.ident = pid; 508 kev.filter = kn->kn_filter; 509 kev.flags = kn->kn_flags | EV_ADD | EV_ENABLE | EV_FLAG1; 510 kev.fflags = kn->kn_sfflags; 511 kev.data = kn->kn_id; /* parent */ 512 kev.udata = kn->kn_kevent.udata;/* preserve udata */ 513 error = kqueue_register(kq, &kev, NULL, 0); 514 if (error) 515 kn->kn_fflags |= NOTE_TRACKERR; 516 if (kn->kn_fop->f_event(kn, NOTE_FORK)) 517 KNOTE_ACTIVATE(kn, 0); 518 KQ_LOCK(kq); 519 kn->kn_status &= ~KN_INFLUX; 520 KQ_UNLOCK_FLUX(kq); 521 list->kl_lock(list->kl_lockarg); 522 } 523 list->kl_unlock(list->kl_lockarg); 524 } 525 526 /* 527 * XXX: EVFILT_TIMER should perhaps live in kern_time.c beside the 528 * interval timer support code. 529 */ 530 static __inline sbintime_t 531 timer2sbintime(intptr_t data) 532 { 533 534 #ifdef __LP64__ 535 if (data > INT64_MAX / SBT_1MS) 536 return INT64_MAX; 537 #endif 538 return (SBT_1MS * data); 539 } 540 541 static void 542 filt_timerexpire(void *knx) 543 { 544 struct callout *calloutp; 545 struct knote *kn; 546 547 kn = knx; 548 kn->kn_data++; 549 KNOTE_ACTIVATE(kn, 0); /* XXX - handle locking */ 550 551 if ((kn->kn_flags & EV_ONESHOT) != EV_ONESHOT) { 552 calloutp = (struct callout *)kn->kn_hook; 553 callout_reset_sbt_on(calloutp, 554 timer2sbintime(kn->kn_sdata), 0 /* 1ms? */, 555 filt_timerexpire, kn, PCPU_GET(cpuid), 0); 556 } 557 } 558 559 /* 560 * data contains amount of time to sleep, in milliseconds 561 */ 562 static int 563 filt_timerattach(struct knote *kn) 564 { 565 struct callout *calloutp; 566 sbintime_t to; 567 unsigned int ncallouts; 568 569 if ((intptr_t)kn->kn_sdata < 0) 570 return (EINVAL); 571 if ((intptr_t)kn->kn_sdata == 0 && (kn->kn_flags & EV_ONESHOT) == 0) 572 kn->kn_sdata = 1; 573 to = timer2sbintime(kn->kn_sdata); 574 if (to < 0) 575 return (EINVAL); 576 577 ncallouts = atomic_load_explicit(&kq_ncallouts, memory_order_relaxed); 578 do { 579 if (ncallouts >= kq_calloutmax) 580 return (ENOMEM); 581 } while (!atomic_compare_exchange_weak_explicit(&kq_ncallouts, 582 &ncallouts, ncallouts + 1, memory_order_relaxed, 583 memory_order_relaxed)); 584 585 kn->kn_flags |= EV_CLEAR; /* automatically set */ 586 kn->kn_status &= ~KN_DETACHED; /* knlist_add clears it */ 587 calloutp = malloc(sizeof(*calloutp), M_KQUEUE, M_WAITOK); 588 callout_init(calloutp, CALLOUT_MPSAFE); 589 kn->kn_hook = calloutp; 590 callout_reset_sbt_on(calloutp, to, 0 /* 1ms? */, 591 filt_timerexpire, kn, PCPU_GET(cpuid), 0); 592 593 return (0); 594 } 595 596 static void 597 filt_timerdetach(struct knote *kn) 598 { 599 struct callout *calloutp; 600 unsigned int old; 601 602 calloutp = (struct callout *)kn->kn_hook; 603 callout_drain(calloutp); 604 free(calloutp, M_KQUEUE); 605 old = atomic_fetch_sub_explicit(&kq_ncallouts, 1, memory_order_relaxed); 606 KASSERT(old > 0, ("Number of callouts cannot become negative")); 607 kn->kn_status |= KN_DETACHED; /* knlist_remove sets it */ 608 } 609 610 static int 611 filt_timer(struct knote *kn, long hint) 612 { 613 614 return (kn->kn_data != 0); 615 } 616 617 static int 618 filt_userattach(struct knote *kn) 619 { 620 621 /* 622 * EVFILT_USER knotes are not attached to anything in the kernel. 623 */ 624 kn->kn_hook = NULL; 625 if (kn->kn_fflags & NOTE_TRIGGER) 626 kn->kn_hookid = 1; 627 else 628 kn->kn_hookid = 0; 629 return (0); 630 } 631 632 static void 633 filt_userdetach(__unused struct knote *kn) 634 { 635 636 /* 637 * EVFILT_USER knotes are not attached to anything in the kernel. 638 */ 639 } 640 641 static int 642 filt_user(struct knote *kn, __unused long hint) 643 { 644 645 return (kn->kn_hookid); 646 } 647 648 static void 649 filt_usertouch(struct knote *kn, struct kevent *kev, u_long type) 650 { 651 u_int ffctrl; 652 653 switch (type) { 654 case EVENT_REGISTER: 655 if (kev->fflags & NOTE_TRIGGER) 656 kn->kn_hookid = 1; 657 658 ffctrl = kev->fflags & NOTE_FFCTRLMASK; 659 kev->fflags &= NOTE_FFLAGSMASK; 660 switch (ffctrl) { 661 case NOTE_FFNOP: 662 break; 663 664 case NOTE_FFAND: 665 kn->kn_sfflags &= kev->fflags; 666 break; 667 668 case NOTE_FFOR: 669 kn->kn_sfflags |= kev->fflags; 670 break; 671 672 case NOTE_FFCOPY: 673 kn->kn_sfflags = kev->fflags; 674 break; 675 676 default: 677 /* XXX Return error? */ 678 break; 679 } 680 kn->kn_sdata = kev->data; 681 if (kev->flags & EV_CLEAR) { 682 kn->kn_hookid = 0; 683 kn->kn_data = 0; 684 kn->kn_fflags = 0; 685 } 686 break; 687 688 case EVENT_PROCESS: 689 *kev = kn->kn_kevent; 690 kev->fflags = kn->kn_sfflags; 691 kev->data = kn->kn_sdata; 692 if (kn->kn_flags & EV_CLEAR) { 693 kn->kn_hookid = 0; 694 kn->kn_data = 0; 695 kn->kn_fflags = 0; 696 } 697 break; 698 699 default: 700 panic("filt_usertouch() - invalid type (%ld)", type); 701 break; 702 } 703 } 704 705 int 706 sys_kqueue(struct thread *td, struct kqueue_args *uap) 707 { 708 struct filedesc *fdp; 709 struct kqueue *kq; 710 struct file *fp; 711 struct proc *p; 712 struct ucred *cred; 713 int fd, error; 714 715 p = td->td_proc; 716 cred = td->td_ucred; 717 crhold(cred); 718 PROC_LOCK(p); 719 if (!chgkqcnt(cred->cr_ruidinfo, 1, lim_cur(td->td_proc, 720 RLIMIT_KQUEUES))) { 721 PROC_UNLOCK(p); 722 crfree(cred); 723 return (ENOMEM); 724 } 725 PROC_UNLOCK(p); 726 727 fdp = p->p_fd; 728 error = falloc(td, &fp, &fd, 0); 729 if (error) 730 goto done2; 731 732 /* An extra reference on `fp' has been held for us by falloc(). */ 733 kq = malloc(sizeof *kq, M_KQUEUE, M_WAITOK | M_ZERO); 734 mtx_init(&kq->kq_lock, "kqueue", NULL, MTX_DEF|MTX_DUPOK); 735 TAILQ_INIT(&kq->kq_head); 736 kq->kq_fdp = fdp; 737 kq->kq_cred = cred; 738 knlist_init_mtx(&kq->kq_sel.si_note, &kq->kq_lock); 739 TASK_INIT(&kq->kq_task, 0, kqueue_task, kq); 740 741 FILEDESC_XLOCK(fdp); 742 TAILQ_INSERT_HEAD(&fdp->fd_kqlist, kq, kq_list); 743 FILEDESC_XUNLOCK(fdp); 744 745 finit(fp, FREAD | FWRITE, DTYPE_KQUEUE, kq, &kqueueops); 746 fdrop(fp, td); 747 748 td->td_retval[0] = fd; 749 done2: 750 if (error != 0) { 751 chgkqcnt(cred->cr_ruidinfo, -1, 0); 752 crfree(cred); 753 } 754 return (error); 755 } 756 757 #ifndef _SYS_SYSPROTO_H_ 758 struct kevent_args { 759 int fd; 760 const struct kevent *changelist; 761 int nchanges; 762 struct kevent *eventlist; 763 int nevents; 764 const struct timespec *timeout; 765 }; 766 #endif 767 int 768 sys_kevent(struct thread *td, struct kevent_args *uap) 769 { 770 struct timespec ts, *tsp; 771 struct kevent_copyops k_ops = { uap, 772 kevent_copyout, 773 kevent_copyin}; 774 int error; 775 #ifdef KTRACE 776 struct uio ktruio; 777 struct iovec ktriov; 778 struct uio *ktruioin = NULL; 779 struct uio *ktruioout = NULL; 780 #endif 781 782 if (uap->timeout != NULL) { 783 error = copyin(uap->timeout, &ts, sizeof(ts)); 784 if (error) 785 return (error); 786 tsp = &ts; 787 } else 788 tsp = NULL; 789 790 #ifdef KTRACE 791 if (KTRPOINT(td, KTR_GENIO)) { 792 ktriov.iov_base = uap->changelist; 793 ktriov.iov_len = uap->nchanges * sizeof(struct kevent); 794 ktruio = (struct uio){ .uio_iov = &ktriov, .uio_iovcnt = 1, 795 .uio_segflg = UIO_USERSPACE, .uio_rw = UIO_READ, 796 .uio_td = td }; 797 ktruioin = cloneuio(&ktruio); 798 ktriov.iov_base = uap->eventlist; 799 ktriov.iov_len = uap->nevents * sizeof(struct kevent); 800 ktruioout = cloneuio(&ktruio); 801 } 802 #endif 803 804 error = kern_kevent(td, uap->fd, uap->nchanges, uap->nevents, 805 &k_ops, tsp); 806 807 #ifdef KTRACE 808 if (ktruioin != NULL) { 809 ktruioin->uio_resid = uap->nchanges * sizeof(struct kevent); 810 ktrgenio(uap->fd, UIO_WRITE, ktruioin, 0); 811 ktruioout->uio_resid = td->td_retval[0] * sizeof(struct kevent); 812 ktrgenio(uap->fd, UIO_READ, ktruioout, error); 813 } 814 #endif 815 816 return (error); 817 } 818 819 /* 820 * Copy 'count' items into the destination list pointed to by uap->eventlist. 821 */ 822 static int 823 kevent_copyout(void *arg, struct kevent *kevp, int count) 824 { 825 struct kevent_args *uap; 826 int error; 827 828 KASSERT(count <= KQ_NEVENTS, ("count (%d) > KQ_NEVENTS", count)); 829 uap = (struct kevent_args *)arg; 830 831 error = copyout(kevp, uap->eventlist, count * sizeof *kevp); 832 if (error == 0) 833 uap->eventlist += count; 834 return (error); 835 } 836 837 /* 838 * Copy 'count' items from the list pointed to by uap->changelist. 839 */ 840 static int 841 kevent_copyin(void *arg, struct kevent *kevp, int count) 842 { 843 struct kevent_args *uap; 844 int error; 845 846 KASSERT(count <= KQ_NEVENTS, ("count (%d) > KQ_NEVENTS", count)); 847 uap = (struct kevent_args *)arg; 848 849 error = copyin(uap->changelist, kevp, count * sizeof *kevp); 850 if (error == 0) 851 uap->changelist += count; 852 return (error); 853 } 854 855 int 856 kern_kevent(struct thread *td, int fd, int nchanges, int nevents, 857 struct kevent_copyops *k_ops, const struct timespec *timeout) 858 { 859 struct kevent keva[KQ_NEVENTS]; 860 struct kevent *kevp, *changes; 861 struct kqueue *kq; 862 struct file *fp; 863 cap_rights_t rights; 864 int i, n, nerrors, error; 865 866 cap_rights_init(&rights); 867 if (nchanges > 0) 868 cap_rights_set(&rights, CAP_KQUEUE_CHANGE); 869 if (nevents > 0) 870 cap_rights_set(&rights, CAP_KQUEUE_EVENT); 871 error = fget(td, fd, &rights, &fp); 872 if (error != 0) 873 return (error); 874 875 error = kqueue_acquire(fp, &kq); 876 if (error != 0) 877 goto done_norel; 878 879 nerrors = 0; 880 881 while (nchanges > 0) { 882 n = nchanges > KQ_NEVENTS ? KQ_NEVENTS : nchanges; 883 error = k_ops->k_copyin(k_ops->arg, keva, n); 884 if (error) 885 goto done; 886 changes = keva; 887 for (i = 0; i < n; i++) { 888 kevp = &changes[i]; 889 if (!kevp->filter) 890 continue; 891 kevp->flags &= ~EV_SYSFLAGS; 892 error = kqueue_register(kq, kevp, td, 1); 893 if (error || (kevp->flags & EV_RECEIPT)) { 894 if (nevents != 0) { 895 kevp->flags = EV_ERROR; 896 kevp->data = error; 897 (void) k_ops->k_copyout(k_ops->arg, 898 kevp, 1); 899 nevents--; 900 nerrors++; 901 } else { 902 goto done; 903 } 904 } 905 } 906 nchanges -= n; 907 } 908 if (nerrors) { 909 td->td_retval[0] = nerrors; 910 error = 0; 911 goto done; 912 } 913 914 error = kqueue_scan(kq, nevents, k_ops, timeout, keva, td); 915 done: 916 kqueue_release(kq, 0); 917 done_norel: 918 fdrop(fp, td); 919 return (error); 920 } 921 922 int 923 kqueue_add_filteropts(int filt, struct filterops *filtops) 924 { 925 int error; 926 927 error = 0; 928 if (filt > 0 || filt + EVFILT_SYSCOUNT < 0) { 929 printf( 930 "trying to add a filterop that is out of range: %d is beyond %d\n", 931 ~filt, EVFILT_SYSCOUNT); 932 return EINVAL; 933 } 934 mtx_lock(&filterops_lock); 935 if (sysfilt_ops[~filt].for_fop != &null_filtops && 936 sysfilt_ops[~filt].for_fop != NULL) 937 error = EEXIST; 938 else { 939 sysfilt_ops[~filt].for_fop = filtops; 940 sysfilt_ops[~filt].for_refcnt = 0; 941 } 942 mtx_unlock(&filterops_lock); 943 944 return (error); 945 } 946 947 int 948 kqueue_del_filteropts(int filt) 949 { 950 int error; 951 952 error = 0; 953 if (filt > 0 || filt + EVFILT_SYSCOUNT < 0) 954 return EINVAL; 955 956 mtx_lock(&filterops_lock); 957 if (sysfilt_ops[~filt].for_fop == &null_filtops || 958 sysfilt_ops[~filt].for_fop == NULL) 959 error = EINVAL; 960 else if (sysfilt_ops[~filt].for_refcnt != 0) 961 error = EBUSY; 962 else { 963 sysfilt_ops[~filt].for_fop = &null_filtops; 964 sysfilt_ops[~filt].for_refcnt = 0; 965 } 966 mtx_unlock(&filterops_lock); 967 968 return error; 969 } 970 971 static struct filterops * 972 kqueue_fo_find(int filt) 973 { 974 975 if (filt > 0 || filt + EVFILT_SYSCOUNT < 0) 976 return NULL; 977 978 mtx_lock(&filterops_lock); 979 sysfilt_ops[~filt].for_refcnt++; 980 if (sysfilt_ops[~filt].for_fop == NULL) 981 sysfilt_ops[~filt].for_fop = &null_filtops; 982 mtx_unlock(&filterops_lock); 983 984 return sysfilt_ops[~filt].for_fop; 985 } 986 987 static void 988 kqueue_fo_release(int filt) 989 { 990 991 if (filt > 0 || filt + EVFILT_SYSCOUNT < 0) 992 return; 993 994 mtx_lock(&filterops_lock); 995 KASSERT(sysfilt_ops[~filt].for_refcnt > 0, 996 ("filter object refcount not valid on release")); 997 sysfilt_ops[~filt].for_refcnt--; 998 mtx_unlock(&filterops_lock); 999 } 1000 1001 /* 1002 * A ref to kq (obtained via kqueue_acquire) must be held. waitok will 1003 * influence if memory allocation should wait. Make sure it is 0 if you 1004 * hold any mutexes. 1005 */ 1006 static int 1007 kqueue_register(struct kqueue *kq, struct kevent *kev, struct thread *td, int waitok) 1008 { 1009 struct filterops *fops; 1010 struct file *fp; 1011 struct knote *kn, *tkn; 1012 cap_rights_t rights; 1013 int error, filt, event; 1014 int haskqglobal, filedesc_unlock; 1015 1016 fp = NULL; 1017 kn = NULL; 1018 error = 0; 1019 haskqglobal = 0; 1020 filedesc_unlock = 0; 1021 1022 filt = kev->filter; 1023 fops = kqueue_fo_find(filt); 1024 if (fops == NULL) 1025 return EINVAL; 1026 1027 tkn = knote_alloc(waitok); /* prevent waiting with locks */ 1028 1029 findkn: 1030 if (fops->f_isfd) { 1031 KASSERT(td != NULL, ("td is NULL")); 1032 error = fget(td, kev->ident, 1033 cap_rights_init(&rights, CAP_EVENT), &fp); 1034 if (error) 1035 goto done; 1036 1037 if ((kev->flags & EV_ADD) == EV_ADD && kqueue_expand(kq, fops, 1038 kev->ident, 0) != 0) { 1039 /* try again */ 1040 fdrop(fp, td); 1041 fp = NULL; 1042 error = kqueue_expand(kq, fops, kev->ident, waitok); 1043 if (error) 1044 goto done; 1045 goto findkn; 1046 } 1047 1048 if (fp->f_type == DTYPE_KQUEUE) { 1049 /* 1050 * if we add some inteligence about what we are doing, 1051 * we should be able to support events on ourselves. 1052 * We need to know when we are doing this to prevent 1053 * getting both the knlist lock and the kq lock since 1054 * they are the same thing. 1055 */ 1056 if (fp->f_data == kq) { 1057 error = EINVAL; 1058 goto done; 1059 } 1060 1061 /* 1062 * Pre-lock the filedesc before the global 1063 * lock mutex, see the comment in 1064 * kqueue_close(). 1065 */ 1066 FILEDESC_XLOCK(td->td_proc->p_fd); 1067 filedesc_unlock = 1; 1068 KQ_GLOBAL_LOCK(&kq_global, haskqglobal); 1069 } 1070 1071 KQ_LOCK(kq); 1072 if (kev->ident < kq->kq_knlistsize) { 1073 SLIST_FOREACH(kn, &kq->kq_knlist[kev->ident], kn_link) 1074 if (kev->filter == kn->kn_filter) 1075 break; 1076 } 1077 } else { 1078 if ((kev->flags & EV_ADD) == EV_ADD) 1079 kqueue_expand(kq, fops, kev->ident, waitok); 1080 1081 KQ_LOCK(kq); 1082 if (kq->kq_knhashmask != 0) { 1083 struct klist *list; 1084 1085 list = &kq->kq_knhash[ 1086 KN_HASH((u_long)kev->ident, kq->kq_knhashmask)]; 1087 SLIST_FOREACH(kn, list, kn_link) 1088 if (kev->ident == kn->kn_id && 1089 kev->filter == kn->kn_filter) 1090 break; 1091 } 1092 } 1093 1094 /* knote is in the process of changing, wait for it to stablize. */ 1095 if (kn != NULL && (kn->kn_status & KN_INFLUX) == KN_INFLUX) { 1096 KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal); 1097 if (filedesc_unlock) { 1098 FILEDESC_XUNLOCK(td->td_proc->p_fd); 1099 filedesc_unlock = 0; 1100 } 1101 kq->kq_state |= KQ_FLUXWAIT; 1102 msleep(kq, &kq->kq_lock, PSOCK | PDROP, "kqflxwt", 0); 1103 if (fp != NULL) { 1104 fdrop(fp, td); 1105 fp = NULL; 1106 } 1107 goto findkn; 1108 } 1109 1110 /* 1111 * kn now contains the matching knote, or NULL if no match 1112 */ 1113 if (kn == NULL) { 1114 if (kev->flags & EV_ADD) { 1115 kn = tkn; 1116 tkn = NULL; 1117 if (kn == NULL) { 1118 KQ_UNLOCK(kq); 1119 error = ENOMEM; 1120 goto done; 1121 } 1122 kn->kn_fp = fp; 1123 kn->kn_kq = kq; 1124 kn->kn_fop = fops; 1125 /* 1126 * apply reference counts to knote structure, and 1127 * do not release it at the end of this routine. 1128 */ 1129 fops = NULL; 1130 fp = NULL; 1131 1132 kn->kn_sfflags = kev->fflags; 1133 kn->kn_sdata = kev->data; 1134 kev->fflags = 0; 1135 kev->data = 0; 1136 kn->kn_kevent = *kev; 1137 kn->kn_kevent.flags &= ~(EV_ADD | EV_DELETE | 1138 EV_ENABLE | EV_DISABLE); 1139 kn->kn_status = KN_INFLUX|KN_DETACHED; 1140 1141 error = knote_attach(kn, kq); 1142 KQ_UNLOCK(kq); 1143 if (error != 0) { 1144 tkn = kn; 1145 goto done; 1146 } 1147 1148 if ((error = kn->kn_fop->f_attach(kn)) != 0) { 1149 knote_drop(kn, td); 1150 goto done; 1151 } 1152 KN_LIST_LOCK(kn); 1153 goto done_ev_add; 1154 } else { 1155 /* No matching knote and the EV_ADD flag is not set. */ 1156 KQ_UNLOCK(kq); 1157 error = ENOENT; 1158 goto done; 1159 } 1160 } 1161 1162 if (kev->flags & EV_DELETE) { 1163 kn->kn_status |= KN_INFLUX; 1164 KQ_UNLOCK(kq); 1165 if (!(kn->kn_status & KN_DETACHED)) 1166 kn->kn_fop->f_detach(kn); 1167 knote_drop(kn, td); 1168 goto done; 1169 } 1170 1171 /* 1172 * The user may change some filter values after the initial EV_ADD, 1173 * but doing so will not reset any filter which has already been 1174 * triggered. 1175 */ 1176 kn->kn_status |= KN_INFLUX; 1177 KQ_UNLOCK(kq); 1178 KN_LIST_LOCK(kn); 1179 kn->kn_kevent.udata = kev->udata; 1180 if (!fops->f_isfd && fops->f_touch != NULL) { 1181 fops->f_touch(kn, kev, EVENT_REGISTER); 1182 } else { 1183 kn->kn_sfflags = kev->fflags; 1184 kn->kn_sdata = kev->data; 1185 } 1186 1187 /* 1188 * We can get here with kn->kn_knlist == NULL. This can happen when 1189 * the initial attach event decides that the event is "completed" 1190 * already. i.e. filt_procattach is called on a zombie process. It 1191 * will call filt_proc which will remove it from the list, and NULL 1192 * kn_knlist. 1193 */ 1194 done_ev_add: 1195 event = kn->kn_fop->f_event(kn, 0); 1196 KQ_LOCK(kq); 1197 if (event) 1198 KNOTE_ACTIVATE(kn, 1); 1199 kn->kn_status &= ~KN_INFLUX; 1200 KN_LIST_UNLOCK(kn); 1201 1202 if ((kev->flags & EV_DISABLE) && 1203 ((kn->kn_status & KN_DISABLED) == 0)) { 1204 kn->kn_status |= KN_DISABLED; 1205 } 1206 1207 if ((kev->flags & EV_ENABLE) && (kn->kn_status & KN_DISABLED)) { 1208 kn->kn_status &= ~KN_DISABLED; 1209 if ((kn->kn_status & KN_ACTIVE) && 1210 ((kn->kn_status & KN_QUEUED) == 0)) 1211 knote_enqueue(kn); 1212 } 1213 KQ_UNLOCK_FLUX(kq); 1214 1215 done: 1216 KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal); 1217 if (filedesc_unlock) 1218 FILEDESC_XUNLOCK(td->td_proc->p_fd); 1219 if (fp != NULL) 1220 fdrop(fp, td); 1221 if (tkn != NULL) 1222 knote_free(tkn); 1223 if (fops != NULL) 1224 kqueue_fo_release(filt); 1225 return (error); 1226 } 1227 1228 static int 1229 kqueue_acquire(struct file *fp, struct kqueue **kqp) 1230 { 1231 int error; 1232 struct kqueue *kq; 1233 1234 error = 0; 1235 1236 kq = fp->f_data; 1237 if (fp->f_type != DTYPE_KQUEUE || kq == NULL) 1238 return (EBADF); 1239 *kqp = kq; 1240 KQ_LOCK(kq); 1241 if ((kq->kq_state & KQ_CLOSING) == KQ_CLOSING) { 1242 KQ_UNLOCK(kq); 1243 return (EBADF); 1244 } 1245 kq->kq_refcnt++; 1246 KQ_UNLOCK(kq); 1247 1248 return error; 1249 } 1250 1251 static void 1252 kqueue_release(struct kqueue *kq, int locked) 1253 { 1254 if (locked) 1255 KQ_OWNED(kq); 1256 else 1257 KQ_LOCK(kq); 1258 kq->kq_refcnt--; 1259 if (kq->kq_refcnt == 1) 1260 wakeup(&kq->kq_refcnt); 1261 if (!locked) 1262 KQ_UNLOCK(kq); 1263 } 1264 1265 static void 1266 kqueue_schedtask(struct kqueue *kq) 1267 { 1268 1269 KQ_OWNED(kq); 1270 KASSERT(((kq->kq_state & KQ_TASKDRAIN) != KQ_TASKDRAIN), 1271 ("scheduling kqueue task while draining")); 1272 1273 if ((kq->kq_state & KQ_TASKSCHED) != KQ_TASKSCHED) { 1274 taskqueue_enqueue(taskqueue_kqueue, &kq->kq_task); 1275 kq->kq_state |= KQ_TASKSCHED; 1276 } 1277 } 1278 1279 /* 1280 * Expand the kq to make sure we have storage for fops/ident pair. 1281 * 1282 * Return 0 on success (or no work necessary), return errno on failure. 1283 * 1284 * Not calling hashinit w/ waitok (proper malloc flag) should be safe. 1285 * If kqueue_register is called from a non-fd context, there usually/should 1286 * be no locks held. 1287 */ 1288 static int 1289 kqueue_expand(struct kqueue *kq, struct filterops *fops, uintptr_t ident, 1290 int waitok) 1291 { 1292 struct klist *list, *tmp_knhash, *to_free; 1293 u_long tmp_knhashmask; 1294 int size; 1295 int fd; 1296 int mflag = waitok ? M_WAITOK : M_NOWAIT; 1297 1298 KQ_NOTOWNED(kq); 1299 1300 to_free = NULL; 1301 if (fops->f_isfd) { 1302 fd = ident; 1303 if (kq->kq_knlistsize <= fd) { 1304 size = kq->kq_knlistsize; 1305 while (size <= fd) 1306 size += KQEXTENT; 1307 list = malloc(size * sizeof(*list), M_KQUEUE, mflag); 1308 if (list == NULL) 1309 return ENOMEM; 1310 KQ_LOCK(kq); 1311 if (kq->kq_knlistsize > fd) { 1312 to_free = list; 1313 list = NULL; 1314 } else { 1315 if (kq->kq_knlist != NULL) { 1316 bcopy(kq->kq_knlist, list, 1317 kq->kq_knlistsize * sizeof(*list)); 1318 to_free = kq->kq_knlist; 1319 kq->kq_knlist = NULL; 1320 } 1321 bzero((caddr_t)list + 1322 kq->kq_knlistsize * sizeof(*list), 1323 (size - kq->kq_knlistsize) * sizeof(*list)); 1324 kq->kq_knlistsize = size; 1325 kq->kq_knlist = list; 1326 } 1327 KQ_UNLOCK(kq); 1328 } 1329 } else { 1330 if (kq->kq_knhashmask == 0) { 1331 tmp_knhash = hashinit(KN_HASHSIZE, M_KQUEUE, 1332 &tmp_knhashmask); 1333 if (tmp_knhash == NULL) 1334 return ENOMEM; 1335 KQ_LOCK(kq); 1336 if (kq->kq_knhashmask == 0) { 1337 kq->kq_knhash = tmp_knhash; 1338 kq->kq_knhashmask = tmp_knhashmask; 1339 } else { 1340 to_free = tmp_knhash; 1341 } 1342 KQ_UNLOCK(kq); 1343 } 1344 } 1345 free(to_free, M_KQUEUE); 1346 1347 KQ_NOTOWNED(kq); 1348 return 0; 1349 } 1350 1351 static void 1352 kqueue_task(void *arg, int pending) 1353 { 1354 struct kqueue *kq; 1355 int haskqglobal; 1356 1357 haskqglobal = 0; 1358 kq = arg; 1359 1360 KQ_GLOBAL_LOCK(&kq_global, haskqglobal); 1361 KQ_LOCK(kq); 1362 1363 KNOTE_LOCKED(&kq->kq_sel.si_note, 0); 1364 1365 kq->kq_state &= ~KQ_TASKSCHED; 1366 if ((kq->kq_state & KQ_TASKDRAIN) == KQ_TASKDRAIN) { 1367 wakeup(&kq->kq_state); 1368 } 1369 KQ_UNLOCK(kq); 1370 KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal); 1371 } 1372 1373 /* 1374 * Scan, update kn_data (if not ONESHOT), and copyout triggered events. 1375 * We treat KN_MARKER knotes as if they are INFLUX. 1376 */ 1377 static int 1378 kqueue_scan(struct kqueue *kq, int maxevents, struct kevent_copyops *k_ops, 1379 const struct timespec *tsp, struct kevent *keva, struct thread *td) 1380 { 1381 struct kevent *kevp; 1382 struct knote *kn, *marker; 1383 sbintime_t asbt, rsbt; 1384 int count, error, haskqglobal, influx, nkev, touch; 1385 1386 count = maxevents; 1387 nkev = 0; 1388 error = 0; 1389 haskqglobal = 0; 1390 1391 if (maxevents == 0) 1392 goto done_nl; 1393 1394 rsbt = 0; 1395 if (tsp != NULL) { 1396 if (tsp->tv_sec < 0 || tsp->tv_nsec < 0 || 1397 tsp->tv_nsec >= 1000000000) { 1398 error = EINVAL; 1399 goto done_nl; 1400 } 1401 if (timespecisset(tsp)) { 1402 if (tsp->tv_sec <= INT32_MAX) { 1403 rsbt = tstosbt(*tsp); 1404 if (TIMESEL(&asbt, rsbt)) 1405 asbt += tc_tick_sbt; 1406 if (asbt <= INT64_MAX - rsbt) 1407 asbt += rsbt; 1408 else 1409 asbt = 0; 1410 rsbt >>= tc_precexp; 1411 } else 1412 asbt = 0; 1413 } else 1414 asbt = -1; 1415 } else 1416 asbt = 0; 1417 marker = knote_alloc(1); 1418 if (marker == NULL) { 1419 error = ENOMEM; 1420 goto done_nl; 1421 } 1422 marker->kn_status = KN_MARKER; 1423 KQ_LOCK(kq); 1424 1425 retry: 1426 kevp = keva; 1427 if (kq->kq_count == 0) { 1428 if (asbt == -1) { 1429 error = EWOULDBLOCK; 1430 } else { 1431 kq->kq_state |= KQ_SLEEP; 1432 error = msleep_sbt(kq, &kq->kq_lock, PSOCK | PCATCH, 1433 "kqread", asbt, rsbt, C_ABSOLUTE); 1434 } 1435 if (error == 0) 1436 goto retry; 1437 /* don't restart after signals... */ 1438 if (error == ERESTART) 1439 error = EINTR; 1440 else if (error == EWOULDBLOCK) 1441 error = 0; 1442 goto done; 1443 } 1444 1445 TAILQ_INSERT_TAIL(&kq->kq_head, marker, kn_tqe); 1446 influx = 0; 1447 while (count) { 1448 KQ_OWNED(kq); 1449 kn = TAILQ_FIRST(&kq->kq_head); 1450 1451 if ((kn->kn_status == KN_MARKER && kn != marker) || 1452 (kn->kn_status & KN_INFLUX) == KN_INFLUX) { 1453 if (influx) { 1454 influx = 0; 1455 KQ_FLUX_WAKEUP(kq); 1456 } 1457 kq->kq_state |= KQ_FLUXWAIT; 1458 error = msleep(kq, &kq->kq_lock, PSOCK, 1459 "kqflxwt", 0); 1460 continue; 1461 } 1462 1463 TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe); 1464 if ((kn->kn_status & KN_DISABLED) == KN_DISABLED) { 1465 kn->kn_status &= ~KN_QUEUED; 1466 kq->kq_count--; 1467 continue; 1468 } 1469 if (kn == marker) { 1470 KQ_FLUX_WAKEUP(kq); 1471 if (count == maxevents) 1472 goto retry; 1473 goto done; 1474 } 1475 KASSERT((kn->kn_status & KN_INFLUX) == 0, 1476 ("KN_INFLUX set when not suppose to be")); 1477 1478 if ((kn->kn_flags & EV_DROP) == EV_DROP) { 1479 kn->kn_status &= ~KN_QUEUED; 1480 kn->kn_status |= KN_INFLUX; 1481 kq->kq_count--; 1482 KQ_UNLOCK(kq); 1483 /* 1484 * We don't need to lock the list since we've marked 1485 * it _INFLUX. 1486 */ 1487 if (!(kn->kn_status & KN_DETACHED)) 1488 kn->kn_fop->f_detach(kn); 1489 knote_drop(kn, td); 1490 KQ_LOCK(kq); 1491 continue; 1492 } else if ((kn->kn_flags & EV_ONESHOT) == EV_ONESHOT) { 1493 kn->kn_status &= ~KN_QUEUED; 1494 kn->kn_status |= KN_INFLUX; 1495 kq->kq_count--; 1496 KQ_UNLOCK(kq); 1497 /* 1498 * We don't need to lock the list since we've marked 1499 * it _INFLUX. 1500 */ 1501 *kevp = kn->kn_kevent; 1502 if (!(kn->kn_status & KN_DETACHED)) 1503 kn->kn_fop->f_detach(kn); 1504 knote_drop(kn, td); 1505 KQ_LOCK(kq); 1506 kn = NULL; 1507 } else { 1508 kn->kn_status |= KN_INFLUX; 1509 KQ_UNLOCK(kq); 1510 if ((kn->kn_status & KN_KQUEUE) == KN_KQUEUE) 1511 KQ_GLOBAL_LOCK(&kq_global, haskqglobal); 1512 KN_LIST_LOCK(kn); 1513 if (kn->kn_fop->f_event(kn, 0) == 0) { 1514 KQ_LOCK(kq); 1515 KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal); 1516 kn->kn_status &= 1517 ~(KN_QUEUED | KN_ACTIVE | KN_INFLUX); 1518 kq->kq_count--; 1519 KN_LIST_UNLOCK(kn); 1520 influx = 1; 1521 continue; 1522 } 1523 touch = (!kn->kn_fop->f_isfd && 1524 kn->kn_fop->f_touch != NULL); 1525 if (touch) 1526 kn->kn_fop->f_touch(kn, kevp, EVENT_PROCESS); 1527 else 1528 *kevp = kn->kn_kevent; 1529 KQ_LOCK(kq); 1530 KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal); 1531 if (kn->kn_flags & (EV_CLEAR | EV_DISPATCH)) { 1532 /* 1533 * Manually clear knotes who weren't 1534 * 'touch'ed. 1535 */ 1536 if (touch == 0 && kn->kn_flags & EV_CLEAR) { 1537 kn->kn_data = 0; 1538 kn->kn_fflags = 0; 1539 } 1540 if (kn->kn_flags & EV_DISPATCH) 1541 kn->kn_status |= KN_DISABLED; 1542 kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE); 1543 kq->kq_count--; 1544 } else 1545 TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe); 1546 1547 kn->kn_status &= ~(KN_INFLUX); 1548 KN_LIST_UNLOCK(kn); 1549 influx = 1; 1550 } 1551 1552 /* we are returning a copy to the user */ 1553 kevp++; 1554 nkev++; 1555 count--; 1556 1557 if (nkev == KQ_NEVENTS) { 1558 influx = 0; 1559 KQ_UNLOCK_FLUX(kq); 1560 error = k_ops->k_copyout(k_ops->arg, keva, nkev); 1561 nkev = 0; 1562 kevp = keva; 1563 KQ_LOCK(kq); 1564 if (error) 1565 break; 1566 } 1567 } 1568 TAILQ_REMOVE(&kq->kq_head, marker, kn_tqe); 1569 done: 1570 KQ_OWNED(kq); 1571 KQ_UNLOCK_FLUX(kq); 1572 knote_free(marker); 1573 done_nl: 1574 KQ_NOTOWNED(kq); 1575 if (nkev != 0) 1576 error = k_ops->k_copyout(k_ops->arg, keva, nkev); 1577 td->td_retval[0] = maxevents - count; 1578 return (error); 1579 } 1580 1581 /* 1582 * XXX 1583 * This could be expanded to call kqueue_scan, if desired. 1584 */ 1585 /*ARGSUSED*/ 1586 static int 1587 kqueue_read(struct file *fp, struct uio *uio, struct ucred *active_cred, 1588 int flags, struct thread *td) 1589 { 1590 return (ENXIO); 1591 } 1592 1593 /*ARGSUSED*/ 1594 static int 1595 kqueue_write(struct file *fp, struct uio *uio, struct ucred *active_cred, 1596 int flags, struct thread *td) 1597 { 1598 return (ENXIO); 1599 } 1600 1601 /*ARGSUSED*/ 1602 static int 1603 kqueue_truncate(struct file *fp, off_t length, struct ucred *active_cred, 1604 struct thread *td) 1605 { 1606 1607 return (EINVAL); 1608 } 1609 1610 /*ARGSUSED*/ 1611 static int 1612 kqueue_ioctl(struct file *fp, u_long cmd, void *data, 1613 struct ucred *active_cred, struct thread *td) 1614 { 1615 /* 1616 * Enabling sigio causes two major problems: 1617 * 1) infinite recursion: 1618 * Synopsys: kevent is being used to track signals and have FIOASYNC 1619 * set. On receipt of a signal this will cause a kqueue to recurse 1620 * into itself over and over. Sending the sigio causes the kqueue 1621 * to become ready, which in turn posts sigio again, forever. 1622 * Solution: this can be solved by setting a flag in the kqueue that 1623 * we have a SIGIO in progress. 1624 * 2) locking problems: 1625 * Synopsys: Kqueue is a leaf subsystem, but adding signalling puts 1626 * us above the proc and pgrp locks. 1627 * Solution: Post a signal using an async mechanism, being sure to 1628 * record a generation count in the delivery so that we do not deliver 1629 * a signal to the wrong process. 1630 * 1631 * Note, these two mechanisms are somewhat mutually exclusive! 1632 */ 1633 #if 0 1634 struct kqueue *kq; 1635 1636 kq = fp->f_data; 1637 switch (cmd) { 1638 case FIOASYNC: 1639 if (*(int *)data) { 1640 kq->kq_state |= KQ_ASYNC; 1641 } else { 1642 kq->kq_state &= ~KQ_ASYNC; 1643 } 1644 return (0); 1645 1646 case FIOSETOWN: 1647 return (fsetown(*(int *)data, &kq->kq_sigio)); 1648 1649 case FIOGETOWN: 1650 *(int *)data = fgetown(&kq->kq_sigio); 1651 return (0); 1652 } 1653 #endif 1654 1655 return (ENOTTY); 1656 } 1657 1658 /*ARGSUSED*/ 1659 static int 1660 kqueue_poll(struct file *fp, int events, struct ucred *active_cred, 1661 struct thread *td) 1662 { 1663 struct kqueue *kq; 1664 int revents = 0; 1665 int error; 1666 1667 if ((error = kqueue_acquire(fp, &kq))) 1668 return POLLERR; 1669 1670 KQ_LOCK(kq); 1671 if (events & (POLLIN | POLLRDNORM)) { 1672 if (kq->kq_count) { 1673 revents |= events & (POLLIN | POLLRDNORM); 1674 } else { 1675 selrecord(td, &kq->kq_sel); 1676 if (SEL_WAITING(&kq->kq_sel)) 1677 kq->kq_state |= KQ_SEL; 1678 } 1679 } 1680 kqueue_release(kq, 1); 1681 KQ_UNLOCK(kq); 1682 return (revents); 1683 } 1684 1685 /*ARGSUSED*/ 1686 static int 1687 kqueue_stat(struct file *fp, struct stat *st, struct ucred *active_cred, 1688 struct thread *td) 1689 { 1690 1691 bzero((void *)st, sizeof *st); 1692 /* 1693 * We no longer return kq_count because the unlocked value is useless. 1694 * If you spent all this time getting the count, why not spend your 1695 * syscall better by calling kevent? 1696 * 1697 * XXX - This is needed for libc_r. 1698 */ 1699 st->st_mode = S_IFIFO; 1700 return (0); 1701 } 1702 1703 /*ARGSUSED*/ 1704 static int 1705 kqueue_close(struct file *fp, struct thread *td) 1706 { 1707 struct kqueue *kq = fp->f_data; 1708 struct filedesc *fdp; 1709 struct knote *kn; 1710 int i; 1711 int error; 1712 int filedesc_unlock; 1713 1714 if ((error = kqueue_acquire(fp, &kq))) 1715 return error; 1716 1717 filedesc_unlock = 0; 1718 KQ_LOCK(kq); 1719 1720 KASSERT((kq->kq_state & KQ_CLOSING) != KQ_CLOSING, 1721 ("kqueue already closing")); 1722 kq->kq_state |= KQ_CLOSING; 1723 if (kq->kq_refcnt > 1) 1724 msleep(&kq->kq_refcnt, &kq->kq_lock, PSOCK, "kqclose", 0); 1725 1726 KASSERT(kq->kq_refcnt == 1, ("other refs are out there!")); 1727 fdp = kq->kq_fdp; 1728 1729 KASSERT(knlist_empty(&kq->kq_sel.si_note), 1730 ("kqueue's knlist not empty")); 1731 1732 for (i = 0; i < kq->kq_knlistsize; i++) { 1733 while ((kn = SLIST_FIRST(&kq->kq_knlist[i])) != NULL) { 1734 if ((kn->kn_status & KN_INFLUX) == KN_INFLUX) { 1735 kq->kq_state |= KQ_FLUXWAIT; 1736 msleep(kq, &kq->kq_lock, PSOCK, "kqclo1", 0); 1737 continue; 1738 } 1739 kn->kn_status |= KN_INFLUX; 1740 KQ_UNLOCK(kq); 1741 if (!(kn->kn_status & KN_DETACHED)) 1742 kn->kn_fop->f_detach(kn); 1743 knote_drop(kn, td); 1744 KQ_LOCK(kq); 1745 } 1746 } 1747 if (kq->kq_knhashmask != 0) { 1748 for (i = 0; i <= kq->kq_knhashmask; i++) { 1749 while ((kn = SLIST_FIRST(&kq->kq_knhash[i])) != NULL) { 1750 if ((kn->kn_status & KN_INFLUX) == KN_INFLUX) { 1751 kq->kq_state |= KQ_FLUXWAIT; 1752 msleep(kq, &kq->kq_lock, PSOCK, 1753 "kqclo2", 0); 1754 continue; 1755 } 1756 kn->kn_status |= KN_INFLUX; 1757 KQ_UNLOCK(kq); 1758 if (!(kn->kn_status & KN_DETACHED)) 1759 kn->kn_fop->f_detach(kn); 1760 knote_drop(kn, td); 1761 KQ_LOCK(kq); 1762 } 1763 } 1764 } 1765 1766 if ((kq->kq_state & KQ_TASKSCHED) == KQ_TASKSCHED) { 1767 kq->kq_state |= KQ_TASKDRAIN; 1768 msleep(&kq->kq_state, &kq->kq_lock, PSOCK, "kqtqdr", 0); 1769 } 1770 1771 if ((kq->kq_state & KQ_SEL) == KQ_SEL) { 1772 selwakeuppri(&kq->kq_sel, PSOCK); 1773 if (!SEL_WAITING(&kq->kq_sel)) 1774 kq->kq_state &= ~KQ_SEL; 1775 } 1776 1777 KQ_UNLOCK(kq); 1778 1779 /* 1780 * We could be called due to the knote_drop() doing fdrop(), 1781 * called from kqueue_register(). In this case the global 1782 * lock is owned, and filedesc sx is locked before, to not 1783 * take the sleepable lock after non-sleepable. 1784 */ 1785 if (!sx_xlocked(FILEDESC_LOCK(fdp))) { 1786 FILEDESC_XLOCK(fdp); 1787 filedesc_unlock = 1; 1788 } else 1789 filedesc_unlock = 0; 1790 TAILQ_REMOVE(&fdp->fd_kqlist, kq, kq_list); 1791 if (filedesc_unlock) 1792 FILEDESC_XUNLOCK(fdp); 1793 1794 seldrain(&kq->kq_sel); 1795 knlist_destroy(&kq->kq_sel.si_note); 1796 mtx_destroy(&kq->kq_lock); 1797 kq->kq_fdp = NULL; 1798 1799 if (kq->kq_knhash != NULL) 1800 free(kq->kq_knhash, M_KQUEUE); 1801 if (kq->kq_knlist != NULL) 1802 free(kq->kq_knlist, M_KQUEUE); 1803 1804 funsetown(&kq->kq_sigio); 1805 chgkqcnt(kq->kq_cred->cr_ruidinfo, -1, 0); 1806 crfree(kq->kq_cred); 1807 free(kq, M_KQUEUE); 1808 fp->f_data = NULL; 1809 1810 return (0); 1811 } 1812 1813 static void 1814 kqueue_wakeup(struct kqueue *kq) 1815 { 1816 KQ_OWNED(kq); 1817 1818 if ((kq->kq_state & KQ_SLEEP) == KQ_SLEEP) { 1819 kq->kq_state &= ~KQ_SLEEP; 1820 wakeup(kq); 1821 } 1822 if ((kq->kq_state & KQ_SEL) == KQ_SEL) { 1823 selwakeuppri(&kq->kq_sel, PSOCK); 1824 if (!SEL_WAITING(&kq->kq_sel)) 1825 kq->kq_state &= ~KQ_SEL; 1826 } 1827 if (!knlist_empty(&kq->kq_sel.si_note)) 1828 kqueue_schedtask(kq); 1829 if ((kq->kq_state & KQ_ASYNC) == KQ_ASYNC) { 1830 pgsigio(&kq->kq_sigio, SIGIO, 0); 1831 } 1832 } 1833 1834 /* 1835 * Walk down a list of knotes, activating them if their event has triggered. 1836 * 1837 * There is a possibility to optimize in the case of one kq watching another. 1838 * Instead of scheduling a task to wake it up, you could pass enough state 1839 * down the chain to make up the parent kqueue. Make this code functional 1840 * first. 1841 */ 1842 void 1843 knote(struct knlist *list, long hint, int lockflags) 1844 { 1845 struct kqueue *kq; 1846 struct knote *kn; 1847 int error; 1848 1849 if (list == NULL) 1850 return; 1851 1852 KNL_ASSERT_LOCK(list, lockflags & KNF_LISTLOCKED); 1853 1854 if ((lockflags & KNF_LISTLOCKED) == 0) 1855 list->kl_lock(list->kl_lockarg); 1856 1857 /* 1858 * If we unlock the list lock (and set KN_INFLUX), we can eliminate 1859 * the kqueue scheduling, but this will introduce four 1860 * lock/unlock's for each knote to test. If we do, continue to use 1861 * SLIST_FOREACH, SLIST_FOREACH_SAFE is not safe in our case, it is 1862 * only safe if you want to remove the current item, which we are 1863 * not doing. 1864 */ 1865 SLIST_FOREACH(kn, &list->kl_list, kn_selnext) { 1866 kq = kn->kn_kq; 1867 if ((kn->kn_status & KN_INFLUX) != KN_INFLUX) { 1868 KQ_LOCK(kq); 1869 if ((kn->kn_status & KN_INFLUX) == KN_INFLUX) { 1870 KQ_UNLOCK(kq); 1871 } else if ((lockflags & KNF_NOKQLOCK) != 0) { 1872 kn->kn_status |= KN_INFLUX; 1873 KQ_UNLOCK(kq); 1874 error = kn->kn_fop->f_event(kn, hint); 1875 KQ_LOCK(kq); 1876 kn->kn_status &= ~KN_INFLUX; 1877 if (error) 1878 KNOTE_ACTIVATE(kn, 1); 1879 KQ_UNLOCK_FLUX(kq); 1880 } else { 1881 kn->kn_status |= KN_HASKQLOCK; 1882 if (kn->kn_fop->f_event(kn, hint)) 1883 KNOTE_ACTIVATE(kn, 1); 1884 kn->kn_status &= ~KN_HASKQLOCK; 1885 KQ_UNLOCK(kq); 1886 } 1887 } 1888 kq = NULL; 1889 } 1890 if ((lockflags & KNF_LISTLOCKED) == 0) 1891 list->kl_unlock(list->kl_lockarg); 1892 } 1893 1894 /* 1895 * add a knote to a knlist 1896 */ 1897 void 1898 knlist_add(struct knlist *knl, struct knote *kn, int islocked) 1899 { 1900 KNL_ASSERT_LOCK(knl, islocked); 1901 KQ_NOTOWNED(kn->kn_kq); 1902 KASSERT((kn->kn_status & (KN_INFLUX|KN_DETACHED)) == 1903 (KN_INFLUX|KN_DETACHED), ("knote not KN_INFLUX and KN_DETACHED")); 1904 if (!islocked) 1905 knl->kl_lock(knl->kl_lockarg); 1906 SLIST_INSERT_HEAD(&knl->kl_list, kn, kn_selnext); 1907 if (!islocked) 1908 knl->kl_unlock(knl->kl_lockarg); 1909 KQ_LOCK(kn->kn_kq); 1910 kn->kn_knlist = knl; 1911 kn->kn_status &= ~KN_DETACHED; 1912 KQ_UNLOCK(kn->kn_kq); 1913 } 1914 1915 static void 1916 knlist_remove_kq(struct knlist *knl, struct knote *kn, int knlislocked, int kqislocked) 1917 { 1918 KASSERT(!(!!kqislocked && !knlislocked), ("kq locked w/o knl locked")); 1919 KNL_ASSERT_LOCK(knl, knlislocked); 1920 mtx_assert(&kn->kn_kq->kq_lock, kqislocked ? MA_OWNED : MA_NOTOWNED); 1921 if (!kqislocked) 1922 KASSERT((kn->kn_status & (KN_INFLUX|KN_DETACHED)) == KN_INFLUX, 1923 ("knlist_remove called w/o knote being KN_INFLUX or already removed")); 1924 if (!knlislocked) 1925 knl->kl_lock(knl->kl_lockarg); 1926 SLIST_REMOVE(&knl->kl_list, kn, knote, kn_selnext); 1927 kn->kn_knlist = NULL; 1928 if (!knlislocked) 1929 knl->kl_unlock(knl->kl_lockarg); 1930 if (!kqislocked) 1931 KQ_LOCK(kn->kn_kq); 1932 kn->kn_status |= KN_DETACHED; 1933 if (!kqislocked) 1934 KQ_UNLOCK(kn->kn_kq); 1935 } 1936 1937 /* 1938 * remove knote from the specified knlist 1939 */ 1940 void 1941 knlist_remove(struct knlist *knl, struct knote *kn, int islocked) 1942 { 1943 1944 knlist_remove_kq(knl, kn, islocked, 0); 1945 } 1946 1947 /* 1948 * remove knote from the specified knlist while in f_event handler. 1949 */ 1950 void 1951 knlist_remove_inevent(struct knlist *knl, struct knote *kn) 1952 { 1953 1954 knlist_remove_kq(knl, kn, 1, 1955 (kn->kn_status & KN_HASKQLOCK) == KN_HASKQLOCK); 1956 } 1957 1958 int 1959 knlist_empty(struct knlist *knl) 1960 { 1961 1962 KNL_ASSERT_LOCKED(knl); 1963 return SLIST_EMPTY(&knl->kl_list); 1964 } 1965 1966 static struct mtx knlist_lock; 1967 MTX_SYSINIT(knlist_lock, &knlist_lock, "knlist lock for lockless objects", 1968 MTX_DEF); 1969 static void knlist_mtx_lock(void *arg); 1970 static void knlist_mtx_unlock(void *arg); 1971 1972 static void 1973 knlist_mtx_lock(void *arg) 1974 { 1975 1976 mtx_lock((struct mtx *)arg); 1977 } 1978 1979 static void 1980 knlist_mtx_unlock(void *arg) 1981 { 1982 1983 mtx_unlock((struct mtx *)arg); 1984 } 1985 1986 static void 1987 knlist_mtx_assert_locked(void *arg) 1988 { 1989 1990 mtx_assert((struct mtx *)arg, MA_OWNED); 1991 } 1992 1993 static void 1994 knlist_mtx_assert_unlocked(void *arg) 1995 { 1996 1997 mtx_assert((struct mtx *)arg, MA_NOTOWNED); 1998 } 1999 2000 static void 2001 knlist_rw_rlock(void *arg) 2002 { 2003 2004 rw_rlock((struct rwlock *)arg); 2005 } 2006 2007 static void 2008 knlist_rw_runlock(void *arg) 2009 { 2010 2011 rw_runlock((struct rwlock *)arg); 2012 } 2013 2014 static void 2015 knlist_rw_assert_locked(void *arg) 2016 { 2017 2018 rw_assert((struct rwlock *)arg, RA_LOCKED); 2019 } 2020 2021 static void 2022 knlist_rw_assert_unlocked(void *arg) 2023 { 2024 2025 rw_assert((struct rwlock *)arg, RA_UNLOCKED); 2026 } 2027 2028 void 2029 knlist_init(struct knlist *knl, void *lock, void (*kl_lock)(void *), 2030 void (*kl_unlock)(void *), 2031 void (*kl_assert_locked)(void *), void (*kl_assert_unlocked)(void *)) 2032 { 2033 2034 if (lock == NULL) 2035 knl->kl_lockarg = &knlist_lock; 2036 else 2037 knl->kl_lockarg = lock; 2038 2039 if (kl_lock == NULL) 2040 knl->kl_lock = knlist_mtx_lock; 2041 else 2042 knl->kl_lock = kl_lock; 2043 if (kl_unlock == NULL) 2044 knl->kl_unlock = knlist_mtx_unlock; 2045 else 2046 knl->kl_unlock = kl_unlock; 2047 if (kl_assert_locked == NULL) 2048 knl->kl_assert_locked = knlist_mtx_assert_locked; 2049 else 2050 knl->kl_assert_locked = kl_assert_locked; 2051 if (kl_assert_unlocked == NULL) 2052 knl->kl_assert_unlocked = knlist_mtx_assert_unlocked; 2053 else 2054 knl->kl_assert_unlocked = kl_assert_unlocked; 2055 2056 SLIST_INIT(&knl->kl_list); 2057 } 2058 2059 void 2060 knlist_init_mtx(struct knlist *knl, struct mtx *lock) 2061 { 2062 2063 knlist_init(knl, lock, NULL, NULL, NULL, NULL); 2064 } 2065 2066 void 2067 knlist_init_rw_reader(struct knlist *knl, struct rwlock *lock) 2068 { 2069 2070 knlist_init(knl, lock, knlist_rw_rlock, knlist_rw_runlock, 2071 knlist_rw_assert_locked, knlist_rw_assert_unlocked); 2072 } 2073 2074 void 2075 knlist_destroy(struct knlist *knl) 2076 { 2077 2078 #ifdef INVARIANTS 2079 /* 2080 * if we run across this error, we need to find the offending 2081 * driver and have it call knlist_clear or knlist_delete. 2082 */ 2083 if (!SLIST_EMPTY(&knl->kl_list)) 2084 printf("WARNING: destroying knlist w/ knotes on it!\n"); 2085 #endif 2086 2087 knl->kl_lockarg = knl->kl_lock = knl->kl_unlock = NULL; 2088 SLIST_INIT(&knl->kl_list); 2089 } 2090 2091 /* 2092 * Even if we are locked, we may need to drop the lock to allow any influx 2093 * knotes time to "settle". 2094 */ 2095 void 2096 knlist_cleardel(struct knlist *knl, struct thread *td, int islocked, int killkn) 2097 { 2098 struct knote *kn, *kn2; 2099 struct kqueue *kq; 2100 2101 if (islocked) 2102 KNL_ASSERT_LOCKED(knl); 2103 else { 2104 KNL_ASSERT_UNLOCKED(knl); 2105 again: /* need to reacquire lock since we have dropped it */ 2106 knl->kl_lock(knl->kl_lockarg); 2107 } 2108 2109 SLIST_FOREACH_SAFE(kn, &knl->kl_list, kn_selnext, kn2) { 2110 kq = kn->kn_kq; 2111 KQ_LOCK(kq); 2112 if ((kn->kn_status & KN_INFLUX)) { 2113 KQ_UNLOCK(kq); 2114 continue; 2115 } 2116 knlist_remove_kq(knl, kn, 1, 1); 2117 if (killkn) { 2118 kn->kn_status |= KN_INFLUX | KN_DETACHED; 2119 KQ_UNLOCK(kq); 2120 knote_drop(kn, td); 2121 } else { 2122 /* Make sure cleared knotes disappear soon */ 2123 kn->kn_flags |= (EV_EOF | EV_ONESHOT); 2124 KQ_UNLOCK(kq); 2125 } 2126 kq = NULL; 2127 } 2128 2129 if (!SLIST_EMPTY(&knl->kl_list)) { 2130 /* there are still KN_INFLUX remaining */ 2131 kn = SLIST_FIRST(&knl->kl_list); 2132 kq = kn->kn_kq; 2133 KQ_LOCK(kq); 2134 KASSERT(kn->kn_status & KN_INFLUX, 2135 ("knote removed w/o list lock")); 2136 knl->kl_unlock(knl->kl_lockarg); 2137 kq->kq_state |= KQ_FLUXWAIT; 2138 msleep(kq, &kq->kq_lock, PSOCK | PDROP, "kqkclr", 0); 2139 kq = NULL; 2140 goto again; 2141 } 2142 2143 if (islocked) 2144 KNL_ASSERT_LOCKED(knl); 2145 else { 2146 knl->kl_unlock(knl->kl_lockarg); 2147 KNL_ASSERT_UNLOCKED(knl); 2148 } 2149 } 2150 2151 /* 2152 * Remove all knotes referencing a specified fd must be called with FILEDESC 2153 * lock. This prevents a race where a new fd comes along and occupies the 2154 * entry and we attach a knote to the fd. 2155 */ 2156 void 2157 knote_fdclose(struct thread *td, int fd) 2158 { 2159 struct filedesc *fdp = td->td_proc->p_fd; 2160 struct kqueue *kq; 2161 struct knote *kn; 2162 int influx; 2163 2164 FILEDESC_XLOCK_ASSERT(fdp); 2165 2166 /* 2167 * We shouldn't have to worry about new kevents appearing on fd 2168 * since filedesc is locked. 2169 */ 2170 TAILQ_FOREACH(kq, &fdp->fd_kqlist, kq_list) { 2171 KQ_LOCK(kq); 2172 2173 again: 2174 influx = 0; 2175 while (kq->kq_knlistsize > fd && 2176 (kn = SLIST_FIRST(&kq->kq_knlist[fd])) != NULL) { 2177 if (kn->kn_status & KN_INFLUX) { 2178 /* someone else might be waiting on our knote */ 2179 if (influx) 2180 wakeup(kq); 2181 kq->kq_state |= KQ_FLUXWAIT; 2182 msleep(kq, &kq->kq_lock, PSOCK, "kqflxwt", 0); 2183 goto again; 2184 } 2185 kn->kn_status |= KN_INFLUX; 2186 KQ_UNLOCK(kq); 2187 if (!(kn->kn_status & KN_DETACHED)) 2188 kn->kn_fop->f_detach(kn); 2189 knote_drop(kn, td); 2190 influx = 1; 2191 KQ_LOCK(kq); 2192 } 2193 KQ_UNLOCK_FLUX(kq); 2194 } 2195 } 2196 2197 static int 2198 knote_attach(struct knote *kn, struct kqueue *kq) 2199 { 2200 struct klist *list; 2201 2202 KASSERT(kn->kn_status & KN_INFLUX, ("knote not marked INFLUX")); 2203 KQ_OWNED(kq); 2204 2205 if (kn->kn_fop->f_isfd) { 2206 if (kn->kn_id >= kq->kq_knlistsize) 2207 return ENOMEM; 2208 list = &kq->kq_knlist[kn->kn_id]; 2209 } else { 2210 if (kq->kq_knhash == NULL) 2211 return ENOMEM; 2212 list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)]; 2213 } 2214 2215 SLIST_INSERT_HEAD(list, kn, kn_link); 2216 2217 return 0; 2218 } 2219 2220 /* 2221 * knote must already have been detached using the f_detach method. 2222 * no lock need to be held, it is assumed that the KN_INFLUX flag is set 2223 * to prevent other removal. 2224 */ 2225 static void 2226 knote_drop(struct knote *kn, struct thread *td) 2227 { 2228 struct kqueue *kq; 2229 struct klist *list; 2230 2231 kq = kn->kn_kq; 2232 2233 KQ_NOTOWNED(kq); 2234 KASSERT((kn->kn_status & KN_INFLUX) == KN_INFLUX, 2235 ("knote_drop called without KN_INFLUX set in kn_status")); 2236 2237 KQ_LOCK(kq); 2238 if (kn->kn_fop->f_isfd) 2239 list = &kq->kq_knlist[kn->kn_id]; 2240 else 2241 list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)]; 2242 2243 if (!SLIST_EMPTY(list)) 2244 SLIST_REMOVE(list, kn, knote, kn_link); 2245 if (kn->kn_status & KN_QUEUED) 2246 knote_dequeue(kn); 2247 KQ_UNLOCK_FLUX(kq); 2248 2249 if (kn->kn_fop->f_isfd) { 2250 fdrop(kn->kn_fp, td); 2251 kn->kn_fp = NULL; 2252 } 2253 kqueue_fo_release(kn->kn_kevent.filter); 2254 kn->kn_fop = NULL; 2255 knote_free(kn); 2256 } 2257 2258 static void 2259 knote_enqueue(struct knote *kn) 2260 { 2261 struct kqueue *kq = kn->kn_kq; 2262 2263 KQ_OWNED(kn->kn_kq); 2264 KASSERT((kn->kn_status & KN_QUEUED) == 0, ("knote already queued")); 2265 2266 TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe); 2267 kn->kn_status |= KN_QUEUED; 2268 kq->kq_count++; 2269 kqueue_wakeup(kq); 2270 } 2271 2272 static void 2273 knote_dequeue(struct knote *kn) 2274 { 2275 struct kqueue *kq = kn->kn_kq; 2276 2277 KQ_OWNED(kn->kn_kq); 2278 KASSERT(kn->kn_status & KN_QUEUED, ("knote not queued")); 2279 2280 TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe); 2281 kn->kn_status &= ~KN_QUEUED; 2282 kq->kq_count--; 2283 } 2284 2285 static void 2286 knote_init(void) 2287 { 2288 2289 knote_zone = uma_zcreate("KNOTE", sizeof(struct knote), NULL, NULL, 2290 NULL, NULL, UMA_ALIGN_PTR, 0); 2291 } 2292 SYSINIT(knote, SI_SUB_PSEUDO, SI_ORDER_ANY, knote_init, NULL); 2293 2294 static struct knote * 2295 knote_alloc(int waitok) 2296 { 2297 return ((struct knote *)uma_zalloc(knote_zone, 2298 (waitok ? M_WAITOK : M_NOWAIT)|M_ZERO)); 2299 } 2300 2301 static void 2302 knote_free(struct knote *kn) 2303 { 2304 if (kn != NULL) 2305 uma_zfree(knote_zone, kn); 2306 } 2307 2308 /* 2309 * Register the kev w/ the kq specified by fd. 2310 */ 2311 int 2312 kqfd_register(int fd, struct kevent *kev, struct thread *td, int waitok) 2313 { 2314 struct kqueue *kq; 2315 struct file *fp; 2316 cap_rights_t rights; 2317 int error; 2318 2319 error = fget(td, fd, cap_rights_init(&rights, CAP_KQUEUE_CHANGE), &fp); 2320 if (error != 0) 2321 return (error); 2322 if ((error = kqueue_acquire(fp, &kq)) != 0) 2323 goto noacquire; 2324 2325 error = kqueue_register(kq, kev, td, waitok); 2326 2327 kqueue_release(kq, 0); 2328 2329 noacquire: 2330 fdrop(fp, td); 2331 2332 return error; 2333 } 2334