1 /*- 2 * Copyright (c) 1999,2000,2001 Jonathan Lemon <jlemon@FreeBSD.org> 3 * Copyright 2004 John-Mark Gurney <jmg@FreeBSD.org> 4 * Copyright (c) 2009 Apple, Inc. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include "opt_ktrace.h" 33 #include "opt_kqueue.h" 34 35 #include <sys/param.h> 36 #include <sys/systm.h> 37 #include <sys/capsicum.h> 38 #include <sys/kernel.h> 39 #include <sys/lock.h> 40 #include <sys/mutex.h> 41 #include <sys/rwlock.h> 42 #include <sys/proc.h> 43 #include <sys/malloc.h> 44 #include <sys/unistd.h> 45 #include <sys/file.h> 46 #include <sys/filedesc.h> 47 #include <sys/filio.h> 48 #include <sys/fcntl.h> 49 #include <sys/kthread.h> 50 #include <sys/selinfo.h> 51 #include <sys/stdatomic.h> 52 #include <sys/queue.h> 53 #include <sys/event.h> 54 #include <sys/eventvar.h> 55 #include <sys/poll.h> 56 #include <sys/protosw.h> 57 #include <sys/resourcevar.h> 58 #include <sys/sigio.h> 59 #include <sys/signalvar.h> 60 #include <sys/socket.h> 61 #include <sys/socketvar.h> 62 #include <sys/stat.h> 63 #include <sys/sysctl.h> 64 #include <sys/sysproto.h> 65 #include <sys/syscallsubr.h> 66 #include <sys/taskqueue.h> 67 #include <sys/uio.h> 68 #include <sys/user.h> 69 #ifdef KTRACE 70 #include <sys/ktrace.h> 71 #endif 72 73 #include <vm/uma.h> 74 75 static MALLOC_DEFINE(M_KQUEUE, "kqueue", "memory for kqueue system"); 76 77 /* 78 * This lock is used if multiple kq locks are required. This possibly 79 * should be made into a per proc lock. 80 */ 81 static struct mtx kq_global; 82 MTX_SYSINIT(kq_global, &kq_global, "kqueue order", MTX_DEF); 83 #define KQ_GLOBAL_LOCK(lck, haslck) do { \ 84 if (!haslck) \ 85 mtx_lock(lck); \ 86 haslck = 1; \ 87 } while (0) 88 #define KQ_GLOBAL_UNLOCK(lck, haslck) do { \ 89 if (haslck) \ 90 mtx_unlock(lck); \ 91 haslck = 0; \ 92 } while (0) 93 94 TASKQUEUE_DEFINE_THREAD(kqueue); 95 96 static int kevent_copyout(void *arg, struct kevent *kevp, int count); 97 static int kevent_copyin(void *arg, struct kevent *kevp, int count); 98 static int kqueue_register(struct kqueue *kq, struct kevent *kev, 99 struct thread *td, int waitok); 100 static int kqueue_acquire(struct file *fp, struct kqueue **kqp); 101 static void kqueue_release(struct kqueue *kq, int locked); 102 static void kqueue_destroy(struct kqueue *kq); 103 static void kqueue_drain(struct kqueue *kq, struct thread *td); 104 static int kqueue_expand(struct kqueue *kq, struct filterops *fops, 105 uintptr_t ident, int waitok); 106 static void kqueue_task(void *arg, int pending); 107 static int kqueue_scan(struct kqueue *kq, int maxevents, 108 struct kevent_copyops *k_ops, 109 const struct timespec *timeout, 110 struct kevent *keva, struct thread *td); 111 static void kqueue_wakeup(struct kqueue *kq); 112 static struct filterops *kqueue_fo_find(int filt); 113 static void kqueue_fo_release(int filt); 114 115 static fo_ioctl_t kqueue_ioctl; 116 static fo_poll_t kqueue_poll; 117 static fo_kqfilter_t kqueue_kqfilter; 118 static fo_stat_t kqueue_stat; 119 static fo_close_t kqueue_close; 120 static fo_fill_kinfo_t kqueue_fill_kinfo; 121 122 static struct fileops kqueueops = { 123 .fo_read = invfo_rdwr, 124 .fo_write = invfo_rdwr, 125 .fo_truncate = invfo_truncate, 126 .fo_ioctl = kqueue_ioctl, 127 .fo_poll = kqueue_poll, 128 .fo_kqfilter = kqueue_kqfilter, 129 .fo_stat = kqueue_stat, 130 .fo_close = kqueue_close, 131 .fo_chmod = invfo_chmod, 132 .fo_chown = invfo_chown, 133 .fo_sendfile = invfo_sendfile, 134 .fo_fill_kinfo = kqueue_fill_kinfo, 135 }; 136 137 static int knote_attach(struct knote *kn, struct kqueue *kq); 138 static void knote_drop(struct knote *kn, struct thread *td); 139 static void knote_enqueue(struct knote *kn); 140 static void knote_dequeue(struct knote *kn); 141 static void knote_init(void); 142 static struct knote *knote_alloc(int waitok); 143 static void knote_free(struct knote *kn); 144 145 static void filt_kqdetach(struct knote *kn); 146 static int filt_kqueue(struct knote *kn, long hint); 147 static int filt_procattach(struct knote *kn); 148 static void filt_procdetach(struct knote *kn); 149 static int filt_proc(struct knote *kn, long hint); 150 static int filt_fileattach(struct knote *kn); 151 static void filt_timerexpire(void *knx); 152 static int filt_timerattach(struct knote *kn); 153 static void filt_timerdetach(struct knote *kn); 154 static int filt_timer(struct knote *kn, long hint); 155 static int filt_userattach(struct knote *kn); 156 static void filt_userdetach(struct knote *kn); 157 static int filt_user(struct knote *kn, long hint); 158 static void filt_usertouch(struct knote *kn, struct kevent *kev, 159 u_long type); 160 161 static struct filterops file_filtops = { 162 .f_isfd = 1, 163 .f_attach = filt_fileattach, 164 }; 165 static struct filterops kqread_filtops = { 166 .f_isfd = 1, 167 .f_detach = filt_kqdetach, 168 .f_event = filt_kqueue, 169 }; 170 /* XXX - move to kern_proc.c? */ 171 static struct filterops proc_filtops = { 172 .f_isfd = 0, 173 .f_attach = filt_procattach, 174 .f_detach = filt_procdetach, 175 .f_event = filt_proc, 176 }; 177 static struct filterops timer_filtops = { 178 .f_isfd = 0, 179 .f_attach = filt_timerattach, 180 .f_detach = filt_timerdetach, 181 .f_event = filt_timer, 182 }; 183 static struct filterops user_filtops = { 184 .f_attach = filt_userattach, 185 .f_detach = filt_userdetach, 186 .f_event = filt_user, 187 .f_touch = filt_usertouch, 188 }; 189 190 static uma_zone_t knote_zone; 191 static atomic_uint kq_ncallouts = ATOMIC_VAR_INIT(0); 192 static unsigned int kq_calloutmax = 4 * 1024; 193 SYSCTL_UINT(_kern, OID_AUTO, kq_calloutmax, CTLFLAG_RW, 194 &kq_calloutmax, 0, "Maximum number of callouts allocated for kqueue"); 195 196 /* XXX - ensure not KN_INFLUX?? */ 197 #define KNOTE_ACTIVATE(kn, islock) do { \ 198 if ((islock)) \ 199 mtx_assert(&(kn)->kn_kq->kq_lock, MA_OWNED); \ 200 else \ 201 KQ_LOCK((kn)->kn_kq); \ 202 (kn)->kn_status |= KN_ACTIVE; \ 203 if (((kn)->kn_status & (KN_QUEUED | KN_DISABLED)) == 0) \ 204 knote_enqueue((kn)); \ 205 if (!(islock)) \ 206 KQ_UNLOCK((kn)->kn_kq); \ 207 } while(0) 208 #define KQ_LOCK(kq) do { \ 209 mtx_lock(&(kq)->kq_lock); \ 210 } while (0) 211 #define KQ_FLUX_WAKEUP(kq) do { \ 212 if (((kq)->kq_state & KQ_FLUXWAIT) == KQ_FLUXWAIT) { \ 213 (kq)->kq_state &= ~KQ_FLUXWAIT; \ 214 wakeup((kq)); \ 215 } \ 216 } while (0) 217 #define KQ_UNLOCK_FLUX(kq) do { \ 218 KQ_FLUX_WAKEUP(kq); \ 219 mtx_unlock(&(kq)->kq_lock); \ 220 } while (0) 221 #define KQ_UNLOCK(kq) do { \ 222 mtx_unlock(&(kq)->kq_lock); \ 223 } while (0) 224 #define KQ_OWNED(kq) do { \ 225 mtx_assert(&(kq)->kq_lock, MA_OWNED); \ 226 } while (0) 227 #define KQ_NOTOWNED(kq) do { \ 228 mtx_assert(&(kq)->kq_lock, MA_NOTOWNED); \ 229 } while (0) 230 #define KN_LIST_LOCK(kn) do { \ 231 if (kn->kn_knlist != NULL) \ 232 kn->kn_knlist->kl_lock(kn->kn_knlist->kl_lockarg); \ 233 } while (0) 234 #define KN_LIST_UNLOCK(kn) do { \ 235 if (kn->kn_knlist != NULL) \ 236 kn->kn_knlist->kl_unlock(kn->kn_knlist->kl_lockarg); \ 237 } while (0) 238 #define KNL_ASSERT_LOCK(knl, islocked) do { \ 239 if (islocked) \ 240 KNL_ASSERT_LOCKED(knl); \ 241 else \ 242 KNL_ASSERT_UNLOCKED(knl); \ 243 } while (0) 244 #ifdef INVARIANTS 245 #define KNL_ASSERT_LOCKED(knl) do { \ 246 knl->kl_assert_locked((knl)->kl_lockarg); \ 247 } while (0) 248 #define KNL_ASSERT_UNLOCKED(knl) do { \ 249 knl->kl_assert_unlocked((knl)->kl_lockarg); \ 250 } while (0) 251 #else /* !INVARIANTS */ 252 #define KNL_ASSERT_LOCKED(knl) do {} while(0) 253 #define KNL_ASSERT_UNLOCKED(knl) do {} while (0) 254 #endif /* INVARIANTS */ 255 256 #ifndef KN_HASHSIZE 257 #define KN_HASHSIZE 64 /* XXX should be tunable */ 258 #endif 259 260 #define KN_HASH(val, mask) (((val) ^ (val >> 8)) & (mask)) 261 262 static int 263 filt_nullattach(struct knote *kn) 264 { 265 266 return (ENXIO); 267 }; 268 269 struct filterops null_filtops = { 270 .f_isfd = 0, 271 .f_attach = filt_nullattach, 272 }; 273 274 /* XXX - make SYSINIT to add these, and move into respective modules. */ 275 extern struct filterops sig_filtops; 276 extern struct filterops fs_filtops; 277 278 /* 279 * Table for for all system-defined filters. 280 */ 281 static struct mtx filterops_lock; 282 MTX_SYSINIT(kqueue_filterops, &filterops_lock, "protect sysfilt_ops", 283 MTX_DEF); 284 static struct { 285 struct filterops *for_fop; 286 int for_nolock; 287 int for_refcnt; 288 } sysfilt_ops[EVFILT_SYSCOUNT] = { 289 { &file_filtops, 1 }, /* EVFILT_READ */ 290 { &file_filtops, 1 }, /* EVFILT_WRITE */ 291 { &null_filtops }, /* EVFILT_AIO */ 292 { &file_filtops, 1 }, /* EVFILT_VNODE */ 293 { &proc_filtops, 1 }, /* EVFILT_PROC */ 294 { &sig_filtops, 1 }, /* EVFILT_SIGNAL */ 295 { &timer_filtops, 1 }, /* EVFILT_TIMER */ 296 { &file_filtops, 1 }, /* EVFILT_PROCDESC */ 297 { &fs_filtops, 1 }, /* EVFILT_FS */ 298 { &null_filtops }, /* EVFILT_LIO */ 299 { &user_filtops, 1 }, /* EVFILT_USER */ 300 { &null_filtops }, /* EVFILT_SENDFILE */ 301 }; 302 303 /* 304 * Simple redirection for all cdevsw style objects to call their fo_kqfilter 305 * method. 306 */ 307 static int 308 filt_fileattach(struct knote *kn) 309 { 310 311 return (fo_kqfilter(kn->kn_fp, kn)); 312 } 313 314 /*ARGSUSED*/ 315 static int 316 kqueue_kqfilter(struct file *fp, struct knote *kn) 317 { 318 struct kqueue *kq = kn->kn_fp->f_data; 319 320 if (kn->kn_filter != EVFILT_READ) 321 return (EINVAL); 322 323 kn->kn_status |= KN_KQUEUE; 324 kn->kn_fop = &kqread_filtops; 325 knlist_add(&kq->kq_sel.si_note, kn, 0); 326 327 return (0); 328 } 329 330 static void 331 filt_kqdetach(struct knote *kn) 332 { 333 struct kqueue *kq = kn->kn_fp->f_data; 334 335 knlist_remove(&kq->kq_sel.si_note, kn, 0); 336 } 337 338 /*ARGSUSED*/ 339 static int 340 filt_kqueue(struct knote *kn, long hint) 341 { 342 struct kqueue *kq = kn->kn_fp->f_data; 343 344 kn->kn_data = kq->kq_count; 345 return (kn->kn_data > 0); 346 } 347 348 /* XXX - move to kern_proc.c? */ 349 static int 350 filt_procattach(struct knote *kn) 351 { 352 struct proc *p; 353 int immediate; 354 int error; 355 356 immediate = 0; 357 p = pfind(kn->kn_id); 358 if (p == NULL && (kn->kn_sfflags & NOTE_EXIT)) { 359 p = zpfind(kn->kn_id); 360 immediate = 1; 361 } else if (p != NULL && (p->p_flag & P_WEXIT)) { 362 immediate = 1; 363 } 364 365 if (p == NULL) 366 return (ESRCH); 367 if ((error = p_cansee(curthread, p))) { 368 PROC_UNLOCK(p); 369 return (error); 370 } 371 372 kn->kn_ptr.p_proc = p; 373 kn->kn_flags |= EV_CLEAR; /* automatically set */ 374 375 /* 376 * internal flag indicating registration done by kernel 377 */ 378 if (kn->kn_flags & EV_FLAG1) { 379 kn->kn_data = kn->kn_sdata; /* ppid */ 380 kn->kn_fflags = NOTE_CHILD; 381 kn->kn_flags &= ~EV_FLAG1; 382 } 383 384 if (immediate == 0) 385 knlist_add(&p->p_klist, kn, 1); 386 387 /* 388 * Immediately activate any exit notes if the target process is a 389 * zombie. This is necessary to handle the case where the target 390 * process, e.g. a child, dies before the kevent is registered. 391 */ 392 if (immediate && filt_proc(kn, NOTE_EXIT)) 393 KNOTE_ACTIVATE(kn, 0); 394 395 PROC_UNLOCK(p); 396 397 return (0); 398 } 399 400 /* 401 * The knote may be attached to a different process, which may exit, 402 * leaving nothing for the knote to be attached to. So when the process 403 * exits, the knote is marked as DETACHED and also flagged as ONESHOT so 404 * it will be deleted when read out. However, as part of the knote deletion, 405 * this routine is called, so a check is needed to avoid actually performing 406 * a detach, because the original process does not exist any more. 407 */ 408 /* XXX - move to kern_proc.c? */ 409 static void 410 filt_procdetach(struct knote *kn) 411 { 412 struct proc *p; 413 414 p = kn->kn_ptr.p_proc; 415 knlist_remove(&p->p_klist, kn, 0); 416 kn->kn_ptr.p_proc = NULL; 417 } 418 419 /* XXX - move to kern_proc.c? */ 420 static int 421 filt_proc(struct knote *kn, long hint) 422 { 423 struct proc *p; 424 u_int event; 425 426 p = kn->kn_ptr.p_proc; 427 /* Mask off extra data. */ 428 event = (u_int)hint & NOTE_PCTRLMASK; 429 430 /* If the user is interested in this event, record it. */ 431 if (kn->kn_sfflags & event) 432 kn->kn_fflags |= event; 433 434 /* Process is gone, so flag the event as finished. */ 435 if (event == NOTE_EXIT) { 436 if (!(kn->kn_status & KN_DETACHED)) 437 knlist_remove_inevent(&p->p_klist, kn); 438 kn->kn_flags |= EV_EOF | EV_ONESHOT; 439 kn->kn_ptr.p_proc = NULL; 440 if (kn->kn_fflags & NOTE_EXIT) 441 kn->kn_data = KW_EXITCODE(p->p_xexit, p->p_xsig); 442 if (kn->kn_fflags == 0) 443 kn->kn_flags |= EV_DROP; 444 return (1); 445 } 446 447 return (kn->kn_fflags != 0); 448 } 449 450 /* 451 * Called when the process forked. It mostly does the same as the 452 * knote(), activating all knotes registered to be activated when the 453 * process forked. Additionally, for each knote attached to the 454 * parent, check whether user wants to track the new process. If so 455 * attach a new knote to it, and immediately report an event with the 456 * child's pid. 457 */ 458 void 459 knote_fork(struct knlist *list, int pid) 460 { 461 struct kqueue *kq; 462 struct knote *kn; 463 struct kevent kev; 464 int error; 465 466 if (list == NULL) 467 return; 468 list->kl_lock(list->kl_lockarg); 469 470 SLIST_FOREACH(kn, &list->kl_list, kn_selnext) { 471 /* 472 * XXX - Why do we skip the kn if it is _INFLUX? Does this 473 * mean we will not properly wake up some notes? 474 */ 475 if ((kn->kn_status & KN_INFLUX) == KN_INFLUX) 476 continue; 477 kq = kn->kn_kq; 478 KQ_LOCK(kq); 479 if ((kn->kn_status & (KN_INFLUX | KN_SCAN)) == KN_INFLUX) { 480 KQ_UNLOCK(kq); 481 continue; 482 } 483 484 /* 485 * The same as knote(), activate the event. 486 */ 487 if ((kn->kn_sfflags & NOTE_TRACK) == 0) { 488 kn->kn_status |= KN_HASKQLOCK; 489 if (kn->kn_fop->f_event(kn, NOTE_FORK)) 490 KNOTE_ACTIVATE(kn, 1); 491 kn->kn_status &= ~KN_HASKQLOCK; 492 KQ_UNLOCK(kq); 493 continue; 494 } 495 496 /* 497 * The NOTE_TRACK case. In addition to the activation 498 * of the event, we need to register new event to 499 * track the child. Drop the locks in preparation for 500 * the call to kqueue_register(). 501 */ 502 kn->kn_status |= KN_INFLUX; 503 KQ_UNLOCK(kq); 504 list->kl_unlock(list->kl_lockarg); 505 506 /* 507 * Activate existing knote and register a knote with 508 * new process. 509 */ 510 kev.ident = pid; 511 kev.filter = kn->kn_filter; 512 kev.flags = kn->kn_flags | EV_ADD | EV_ENABLE | EV_FLAG1; 513 kev.fflags = kn->kn_sfflags; 514 kev.data = kn->kn_id; /* parent */ 515 kev.udata = kn->kn_kevent.udata;/* preserve udata */ 516 error = kqueue_register(kq, &kev, NULL, 0); 517 if (error) 518 kn->kn_fflags |= NOTE_TRACKERR; 519 if (kn->kn_fop->f_event(kn, NOTE_FORK)) 520 KNOTE_ACTIVATE(kn, 0); 521 KQ_LOCK(kq); 522 kn->kn_status &= ~KN_INFLUX; 523 KQ_UNLOCK_FLUX(kq); 524 list->kl_lock(list->kl_lockarg); 525 } 526 list->kl_unlock(list->kl_lockarg); 527 } 528 529 /* 530 * XXX: EVFILT_TIMER should perhaps live in kern_time.c beside the 531 * interval timer support code. 532 */ 533 534 #define NOTE_TIMER_PRECMASK (NOTE_SECONDS|NOTE_MSECONDS|NOTE_USECONDS| \ 535 NOTE_NSECONDS) 536 537 static __inline sbintime_t 538 timer2sbintime(intptr_t data, int flags) 539 { 540 sbintime_t modifier; 541 542 switch (flags & NOTE_TIMER_PRECMASK) { 543 case NOTE_SECONDS: 544 modifier = SBT_1S; 545 break; 546 case NOTE_MSECONDS: /* FALLTHROUGH */ 547 case 0: 548 modifier = SBT_1MS; 549 break; 550 case NOTE_USECONDS: 551 modifier = SBT_1US; 552 break; 553 case NOTE_NSECONDS: 554 modifier = SBT_1NS; 555 break; 556 default: 557 return (-1); 558 } 559 560 #ifdef __LP64__ 561 if (data > SBT_MAX / modifier) 562 return (SBT_MAX); 563 #endif 564 return (modifier * data); 565 } 566 567 static void 568 filt_timerexpire(void *knx) 569 { 570 struct callout *calloutp; 571 struct knote *kn; 572 573 kn = knx; 574 kn->kn_data++; 575 KNOTE_ACTIVATE(kn, 0); /* XXX - handle locking */ 576 577 if ((kn->kn_flags & EV_ONESHOT) != EV_ONESHOT) { 578 calloutp = (struct callout *)kn->kn_hook; 579 *kn->kn_ptr.p_nexttime += timer2sbintime(kn->kn_sdata, 580 kn->kn_sfflags); 581 callout_reset_sbt_on(calloutp, *kn->kn_ptr.p_nexttime, 0, 582 filt_timerexpire, kn, PCPU_GET(cpuid), C_ABSOLUTE); 583 } 584 } 585 586 /* 587 * data contains amount of time to sleep 588 */ 589 static int 590 filt_timerattach(struct knote *kn) 591 { 592 struct callout *calloutp; 593 sbintime_t to; 594 unsigned int ncallouts; 595 596 if ((intptr_t)kn->kn_sdata < 0) 597 return (EINVAL); 598 if ((intptr_t)kn->kn_sdata == 0 && (kn->kn_flags & EV_ONESHOT) == 0) 599 kn->kn_sdata = 1; 600 /* Only precision unit are supported in flags so far */ 601 if (kn->kn_sfflags & ~NOTE_TIMER_PRECMASK) 602 return (EINVAL); 603 604 to = timer2sbintime(kn->kn_sdata, kn->kn_sfflags); 605 if (to < 0) 606 return (EINVAL); 607 608 ncallouts = atomic_load_explicit(&kq_ncallouts, memory_order_relaxed); 609 do { 610 if (ncallouts >= kq_calloutmax) 611 return (ENOMEM); 612 } while (!atomic_compare_exchange_weak_explicit(&kq_ncallouts, 613 &ncallouts, ncallouts + 1, memory_order_relaxed, 614 memory_order_relaxed)); 615 616 kn->kn_flags |= EV_CLEAR; /* automatically set */ 617 kn->kn_status &= ~KN_DETACHED; /* knlist_add clears it */ 618 kn->kn_ptr.p_nexttime = malloc(sizeof(sbintime_t), M_KQUEUE, M_WAITOK); 619 calloutp = malloc(sizeof(*calloutp), M_KQUEUE, M_WAITOK); 620 callout_init(calloutp, 1); 621 kn->kn_hook = calloutp; 622 *kn->kn_ptr.p_nexttime = to + sbinuptime(); 623 callout_reset_sbt_on(calloutp, *kn->kn_ptr.p_nexttime, 0, 624 filt_timerexpire, kn, PCPU_GET(cpuid), C_ABSOLUTE); 625 626 return (0); 627 } 628 629 static void 630 filt_timerdetach(struct knote *kn) 631 { 632 struct callout *calloutp; 633 unsigned int old; 634 635 calloutp = (struct callout *)kn->kn_hook; 636 callout_drain(calloutp); 637 free(calloutp, M_KQUEUE); 638 free(kn->kn_ptr.p_nexttime, M_KQUEUE); 639 old = atomic_fetch_sub_explicit(&kq_ncallouts, 1, memory_order_relaxed); 640 KASSERT(old > 0, ("Number of callouts cannot become negative")); 641 kn->kn_status |= KN_DETACHED; /* knlist_remove sets it */ 642 } 643 644 static int 645 filt_timer(struct knote *kn, long hint) 646 { 647 648 return (kn->kn_data != 0); 649 } 650 651 static int 652 filt_userattach(struct knote *kn) 653 { 654 655 /* 656 * EVFILT_USER knotes are not attached to anything in the kernel. 657 */ 658 kn->kn_hook = NULL; 659 if (kn->kn_fflags & NOTE_TRIGGER) 660 kn->kn_hookid = 1; 661 else 662 kn->kn_hookid = 0; 663 return (0); 664 } 665 666 static void 667 filt_userdetach(__unused struct knote *kn) 668 { 669 670 /* 671 * EVFILT_USER knotes are not attached to anything in the kernel. 672 */ 673 } 674 675 static int 676 filt_user(struct knote *kn, __unused long hint) 677 { 678 679 return (kn->kn_hookid); 680 } 681 682 static void 683 filt_usertouch(struct knote *kn, struct kevent *kev, u_long type) 684 { 685 u_int ffctrl; 686 687 switch (type) { 688 case EVENT_REGISTER: 689 if (kev->fflags & NOTE_TRIGGER) 690 kn->kn_hookid = 1; 691 692 ffctrl = kev->fflags & NOTE_FFCTRLMASK; 693 kev->fflags &= NOTE_FFLAGSMASK; 694 switch (ffctrl) { 695 case NOTE_FFNOP: 696 break; 697 698 case NOTE_FFAND: 699 kn->kn_sfflags &= kev->fflags; 700 break; 701 702 case NOTE_FFOR: 703 kn->kn_sfflags |= kev->fflags; 704 break; 705 706 case NOTE_FFCOPY: 707 kn->kn_sfflags = kev->fflags; 708 break; 709 710 default: 711 /* XXX Return error? */ 712 break; 713 } 714 kn->kn_sdata = kev->data; 715 if (kev->flags & EV_CLEAR) { 716 kn->kn_hookid = 0; 717 kn->kn_data = 0; 718 kn->kn_fflags = 0; 719 } 720 break; 721 722 case EVENT_PROCESS: 723 *kev = kn->kn_kevent; 724 kev->fflags = kn->kn_sfflags; 725 kev->data = kn->kn_sdata; 726 if (kn->kn_flags & EV_CLEAR) { 727 kn->kn_hookid = 0; 728 kn->kn_data = 0; 729 kn->kn_fflags = 0; 730 } 731 break; 732 733 default: 734 panic("filt_usertouch() - invalid type (%ld)", type); 735 break; 736 } 737 } 738 739 int 740 sys_kqueue(struct thread *td, struct kqueue_args *uap) 741 { 742 743 return (kern_kqueue(td, 0, NULL)); 744 } 745 746 static void 747 kqueue_init(struct kqueue *kq) 748 { 749 750 mtx_init(&kq->kq_lock, "kqueue", NULL, MTX_DEF | MTX_DUPOK); 751 TAILQ_INIT(&kq->kq_head); 752 knlist_init_mtx(&kq->kq_sel.si_note, &kq->kq_lock); 753 TASK_INIT(&kq->kq_task, 0, kqueue_task, kq); 754 } 755 756 int 757 kern_kqueue(struct thread *td, int flags, struct filecaps *fcaps) 758 { 759 struct filedesc *fdp; 760 struct kqueue *kq; 761 struct file *fp; 762 struct ucred *cred; 763 int fd, error; 764 765 fdp = td->td_proc->p_fd; 766 cred = td->td_ucred; 767 if (!chgkqcnt(cred->cr_ruidinfo, 1, lim_cur(td, RLIMIT_KQUEUES))) 768 return (ENOMEM); 769 770 error = falloc_caps(td, &fp, &fd, flags, fcaps); 771 if (error != 0) { 772 chgkqcnt(cred->cr_ruidinfo, -1, 0); 773 return (error); 774 } 775 776 /* An extra reference on `fp' has been held for us by falloc(). */ 777 kq = malloc(sizeof *kq, M_KQUEUE, M_WAITOK | M_ZERO); 778 kqueue_init(kq); 779 kq->kq_fdp = fdp; 780 kq->kq_cred = crhold(cred); 781 782 FILEDESC_XLOCK(fdp); 783 TAILQ_INSERT_HEAD(&fdp->fd_kqlist, kq, kq_list); 784 FILEDESC_XUNLOCK(fdp); 785 786 finit(fp, FREAD | FWRITE, DTYPE_KQUEUE, kq, &kqueueops); 787 fdrop(fp, td); 788 789 td->td_retval[0] = fd; 790 return (0); 791 } 792 793 #ifndef _SYS_SYSPROTO_H_ 794 struct kevent_args { 795 int fd; 796 const struct kevent *changelist; 797 int nchanges; 798 struct kevent *eventlist; 799 int nevents; 800 const struct timespec *timeout; 801 }; 802 #endif 803 int 804 sys_kevent(struct thread *td, struct kevent_args *uap) 805 { 806 struct timespec ts, *tsp; 807 struct kevent_copyops k_ops = { uap, 808 kevent_copyout, 809 kevent_copyin}; 810 int error; 811 #ifdef KTRACE 812 struct uio ktruio; 813 struct iovec ktriov; 814 struct uio *ktruioin = NULL; 815 struct uio *ktruioout = NULL; 816 #endif 817 818 if (uap->timeout != NULL) { 819 error = copyin(uap->timeout, &ts, sizeof(ts)); 820 if (error) 821 return (error); 822 tsp = &ts; 823 } else 824 tsp = NULL; 825 826 #ifdef KTRACE 827 if (KTRPOINT(td, KTR_GENIO)) { 828 ktriov.iov_base = uap->changelist; 829 ktriov.iov_len = uap->nchanges * sizeof(struct kevent); 830 ktruio = (struct uio){ .uio_iov = &ktriov, .uio_iovcnt = 1, 831 .uio_segflg = UIO_USERSPACE, .uio_rw = UIO_READ, 832 .uio_td = td }; 833 ktruioin = cloneuio(&ktruio); 834 ktriov.iov_base = uap->eventlist; 835 ktriov.iov_len = uap->nevents * sizeof(struct kevent); 836 ktruioout = cloneuio(&ktruio); 837 } 838 #endif 839 840 error = kern_kevent(td, uap->fd, uap->nchanges, uap->nevents, 841 &k_ops, tsp); 842 843 #ifdef KTRACE 844 if (ktruioin != NULL) { 845 ktruioin->uio_resid = uap->nchanges * sizeof(struct kevent); 846 ktrgenio(uap->fd, UIO_WRITE, ktruioin, 0); 847 ktruioout->uio_resid = td->td_retval[0] * sizeof(struct kevent); 848 ktrgenio(uap->fd, UIO_READ, ktruioout, error); 849 } 850 #endif 851 852 return (error); 853 } 854 855 /* 856 * Copy 'count' items into the destination list pointed to by uap->eventlist. 857 */ 858 static int 859 kevent_copyout(void *arg, struct kevent *kevp, int count) 860 { 861 struct kevent_args *uap; 862 int error; 863 864 KASSERT(count <= KQ_NEVENTS, ("count (%d) > KQ_NEVENTS", count)); 865 uap = (struct kevent_args *)arg; 866 867 error = copyout(kevp, uap->eventlist, count * sizeof *kevp); 868 if (error == 0) 869 uap->eventlist += count; 870 return (error); 871 } 872 873 /* 874 * Copy 'count' items from the list pointed to by uap->changelist. 875 */ 876 static int 877 kevent_copyin(void *arg, struct kevent *kevp, int count) 878 { 879 struct kevent_args *uap; 880 int error; 881 882 KASSERT(count <= KQ_NEVENTS, ("count (%d) > KQ_NEVENTS", count)); 883 uap = (struct kevent_args *)arg; 884 885 error = copyin(uap->changelist, kevp, count * sizeof *kevp); 886 if (error == 0) 887 uap->changelist += count; 888 return (error); 889 } 890 891 int 892 kern_kevent(struct thread *td, int fd, int nchanges, int nevents, 893 struct kevent_copyops *k_ops, const struct timespec *timeout) 894 { 895 cap_rights_t rights; 896 struct file *fp; 897 int error; 898 899 cap_rights_init(&rights); 900 if (nchanges > 0) 901 cap_rights_set(&rights, CAP_KQUEUE_CHANGE); 902 if (nevents > 0) 903 cap_rights_set(&rights, CAP_KQUEUE_EVENT); 904 error = fget(td, fd, &rights, &fp); 905 if (error != 0) 906 return (error); 907 908 error = kern_kevent_fp(td, fp, nchanges, nevents, k_ops, timeout); 909 fdrop(fp, td); 910 911 return (error); 912 } 913 914 static int 915 kqueue_kevent(struct kqueue *kq, struct thread *td, int nchanges, int nevents, 916 struct kevent_copyops *k_ops, const struct timespec *timeout) 917 { 918 struct kevent keva[KQ_NEVENTS]; 919 struct kevent *kevp, *changes; 920 int i, n, nerrors, error; 921 922 nerrors = 0; 923 while (nchanges > 0) { 924 n = nchanges > KQ_NEVENTS ? KQ_NEVENTS : nchanges; 925 error = k_ops->k_copyin(k_ops->arg, keva, n); 926 if (error) 927 return (error); 928 changes = keva; 929 for (i = 0; i < n; i++) { 930 kevp = &changes[i]; 931 if (!kevp->filter) 932 continue; 933 kevp->flags &= ~EV_SYSFLAGS; 934 error = kqueue_register(kq, kevp, td, 1); 935 if (error || (kevp->flags & EV_RECEIPT)) { 936 if (nevents == 0) 937 return (error); 938 kevp->flags = EV_ERROR; 939 kevp->data = error; 940 (void)k_ops->k_copyout(k_ops->arg, kevp, 1); 941 nevents--; 942 nerrors++; 943 } 944 } 945 nchanges -= n; 946 } 947 if (nerrors) { 948 td->td_retval[0] = nerrors; 949 return (0); 950 } 951 952 return (kqueue_scan(kq, nevents, k_ops, timeout, keva, td)); 953 } 954 955 int 956 kern_kevent_fp(struct thread *td, struct file *fp, int nchanges, int nevents, 957 struct kevent_copyops *k_ops, const struct timespec *timeout) 958 { 959 struct kqueue *kq; 960 int error; 961 962 error = kqueue_acquire(fp, &kq); 963 if (error != 0) 964 return (error); 965 error = kqueue_kevent(kq, td, nchanges, nevents, k_ops, timeout); 966 kqueue_release(kq, 0); 967 return (error); 968 } 969 970 /* 971 * Performs a kevent() call on a temporarily created kqueue. This can be 972 * used to perform one-shot polling, similar to poll() and select(). 973 */ 974 int 975 kern_kevent_anonymous(struct thread *td, int nevents, 976 struct kevent_copyops *k_ops) 977 { 978 struct kqueue kq = {}; 979 int error; 980 981 kqueue_init(&kq); 982 kq.kq_refcnt = 1; 983 error = kqueue_kevent(&kq, td, nevents, nevents, k_ops, NULL); 984 kqueue_drain(&kq, td); 985 kqueue_destroy(&kq); 986 return (error); 987 } 988 989 int 990 kqueue_add_filteropts(int filt, struct filterops *filtops) 991 { 992 int error; 993 994 error = 0; 995 if (filt > 0 || filt + EVFILT_SYSCOUNT < 0) { 996 printf( 997 "trying to add a filterop that is out of range: %d is beyond %d\n", 998 ~filt, EVFILT_SYSCOUNT); 999 return EINVAL; 1000 } 1001 mtx_lock(&filterops_lock); 1002 if (sysfilt_ops[~filt].for_fop != &null_filtops && 1003 sysfilt_ops[~filt].for_fop != NULL) 1004 error = EEXIST; 1005 else { 1006 sysfilt_ops[~filt].for_fop = filtops; 1007 sysfilt_ops[~filt].for_refcnt = 0; 1008 } 1009 mtx_unlock(&filterops_lock); 1010 1011 return (error); 1012 } 1013 1014 int 1015 kqueue_del_filteropts(int filt) 1016 { 1017 int error; 1018 1019 error = 0; 1020 if (filt > 0 || filt + EVFILT_SYSCOUNT < 0) 1021 return EINVAL; 1022 1023 mtx_lock(&filterops_lock); 1024 if (sysfilt_ops[~filt].for_fop == &null_filtops || 1025 sysfilt_ops[~filt].for_fop == NULL) 1026 error = EINVAL; 1027 else if (sysfilt_ops[~filt].for_refcnt != 0) 1028 error = EBUSY; 1029 else { 1030 sysfilt_ops[~filt].for_fop = &null_filtops; 1031 sysfilt_ops[~filt].for_refcnt = 0; 1032 } 1033 mtx_unlock(&filterops_lock); 1034 1035 return error; 1036 } 1037 1038 static struct filterops * 1039 kqueue_fo_find(int filt) 1040 { 1041 1042 if (filt > 0 || filt + EVFILT_SYSCOUNT < 0) 1043 return NULL; 1044 1045 if (sysfilt_ops[~filt].for_nolock) 1046 return sysfilt_ops[~filt].for_fop; 1047 1048 mtx_lock(&filterops_lock); 1049 sysfilt_ops[~filt].for_refcnt++; 1050 if (sysfilt_ops[~filt].for_fop == NULL) 1051 sysfilt_ops[~filt].for_fop = &null_filtops; 1052 mtx_unlock(&filterops_lock); 1053 1054 return sysfilt_ops[~filt].for_fop; 1055 } 1056 1057 static void 1058 kqueue_fo_release(int filt) 1059 { 1060 1061 if (filt > 0 || filt + EVFILT_SYSCOUNT < 0) 1062 return; 1063 1064 if (sysfilt_ops[~filt].for_nolock) 1065 return; 1066 1067 mtx_lock(&filterops_lock); 1068 KASSERT(sysfilt_ops[~filt].for_refcnt > 0, 1069 ("filter object refcount not valid on release")); 1070 sysfilt_ops[~filt].for_refcnt--; 1071 mtx_unlock(&filterops_lock); 1072 } 1073 1074 /* 1075 * A ref to kq (obtained via kqueue_acquire) must be held. waitok will 1076 * influence if memory allocation should wait. Make sure it is 0 if you 1077 * hold any mutexes. 1078 */ 1079 static int 1080 kqueue_register(struct kqueue *kq, struct kevent *kev, struct thread *td, int waitok) 1081 { 1082 struct filterops *fops; 1083 struct file *fp; 1084 struct knote *kn, *tkn; 1085 cap_rights_t rights; 1086 int error, filt, event; 1087 int haskqglobal, filedesc_unlock; 1088 1089 fp = NULL; 1090 kn = NULL; 1091 error = 0; 1092 haskqglobal = 0; 1093 filedesc_unlock = 0; 1094 1095 filt = kev->filter; 1096 fops = kqueue_fo_find(filt); 1097 if (fops == NULL) 1098 return EINVAL; 1099 1100 if (kev->flags & EV_ADD) { 1101 /* 1102 * Prevent waiting with locks. Non-sleepable 1103 * allocation failures are handled in the loop, only 1104 * if the spare knote appears to be actually required. 1105 */ 1106 tkn = knote_alloc(waitok); 1107 } else { 1108 tkn = NULL; 1109 } 1110 1111 findkn: 1112 if (fops->f_isfd) { 1113 KASSERT(td != NULL, ("td is NULL")); 1114 error = fget(td, kev->ident, 1115 cap_rights_init(&rights, CAP_EVENT), &fp); 1116 if (error) 1117 goto done; 1118 1119 if ((kev->flags & EV_ADD) == EV_ADD && kqueue_expand(kq, fops, 1120 kev->ident, 0) != 0) { 1121 /* try again */ 1122 fdrop(fp, td); 1123 fp = NULL; 1124 error = kqueue_expand(kq, fops, kev->ident, waitok); 1125 if (error) 1126 goto done; 1127 goto findkn; 1128 } 1129 1130 if (fp->f_type == DTYPE_KQUEUE) { 1131 /* 1132 * if we add some inteligence about what we are doing, 1133 * we should be able to support events on ourselves. 1134 * We need to know when we are doing this to prevent 1135 * getting both the knlist lock and the kq lock since 1136 * they are the same thing. 1137 */ 1138 if (fp->f_data == kq) { 1139 error = EINVAL; 1140 goto done; 1141 } 1142 1143 /* 1144 * Pre-lock the filedesc before the global 1145 * lock mutex, see the comment in 1146 * kqueue_close(). 1147 */ 1148 FILEDESC_XLOCK(td->td_proc->p_fd); 1149 filedesc_unlock = 1; 1150 KQ_GLOBAL_LOCK(&kq_global, haskqglobal); 1151 } 1152 1153 KQ_LOCK(kq); 1154 if (kev->ident < kq->kq_knlistsize) { 1155 SLIST_FOREACH(kn, &kq->kq_knlist[kev->ident], kn_link) 1156 if (kev->filter == kn->kn_filter) 1157 break; 1158 } 1159 } else { 1160 if ((kev->flags & EV_ADD) == EV_ADD) 1161 kqueue_expand(kq, fops, kev->ident, waitok); 1162 1163 KQ_LOCK(kq); 1164 if (kq->kq_knhashmask != 0) { 1165 struct klist *list; 1166 1167 list = &kq->kq_knhash[ 1168 KN_HASH((u_long)kev->ident, kq->kq_knhashmask)]; 1169 SLIST_FOREACH(kn, list, kn_link) 1170 if (kev->ident == kn->kn_id && 1171 kev->filter == kn->kn_filter) 1172 break; 1173 } 1174 } 1175 1176 /* knote is in the process of changing, wait for it to stablize. */ 1177 if (kn != NULL && (kn->kn_status & KN_INFLUX) == KN_INFLUX) { 1178 KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal); 1179 if (filedesc_unlock) { 1180 FILEDESC_XUNLOCK(td->td_proc->p_fd); 1181 filedesc_unlock = 0; 1182 } 1183 kq->kq_state |= KQ_FLUXWAIT; 1184 msleep(kq, &kq->kq_lock, PSOCK | PDROP, "kqflxwt", 0); 1185 if (fp != NULL) { 1186 fdrop(fp, td); 1187 fp = NULL; 1188 } 1189 goto findkn; 1190 } 1191 1192 /* 1193 * kn now contains the matching knote, or NULL if no match 1194 */ 1195 if (kn == NULL) { 1196 if (kev->flags & EV_ADD) { 1197 kn = tkn; 1198 tkn = NULL; 1199 if (kn == NULL) { 1200 KQ_UNLOCK(kq); 1201 error = ENOMEM; 1202 goto done; 1203 } 1204 kn->kn_fp = fp; 1205 kn->kn_kq = kq; 1206 kn->kn_fop = fops; 1207 /* 1208 * apply reference counts to knote structure, and 1209 * do not release it at the end of this routine. 1210 */ 1211 fops = NULL; 1212 fp = NULL; 1213 1214 kn->kn_sfflags = kev->fflags; 1215 kn->kn_sdata = kev->data; 1216 kev->fflags = 0; 1217 kev->data = 0; 1218 kn->kn_kevent = *kev; 1219 kn->kn_kevent.flags &= ~(EV_ADD | EV_DELETE | 1220 EV_ENABLE | EV_DISABLE | EV_FORCEONESHOT); 1221 kn->kn_status = KN_INFLUX|KN_DETACHED; 1222 1223 error = knote_attach(kn, kq); 1224 KQ_UNLOCK(kq); 1225 if (error != 0) { 1226 tkn = kn; 1227 goto done; 1228 } 1229 1230 if ((error = kn->kn_fop->f_attach(kn)) != 0) { 1231 knote_drop(kn, td); 1232 goto done; 1233 } 1234 KN_LIST_LOCK(kn); 1235 goto done_ev_add; 1236 } else { 1237 /* No matching knote and the EV_ADD flag is not set. */ 1238 KQ_UNLOCK(kq); 1239 error = ENOENT; 1240 goto done; 1241 } 1242 } 1243 1244 if (kev->flags & EV_DELETE) { 1245 kn->kn_status |= KN_INFLUX; 1246 KQ_UNLOCK(kq); 1247 if (!(kn->kn_status & KN_DETACHED)) 1248 kn->kn_fop->f_detach(kn); 1249 knote_drop(kn, td); 1250 goto done; 1251 } 1252 1253 if (kev->flags & EV_FORCEONESHOT) { 1254 kn->kn_flags |= EV_ONESHOT; 1255 KNOTE_ACTIVATE(kn, 1); 1256 } 1257 1258 /* 1259 * The user may change some filter values after the initial EV_ADD, 1260 * but doing so will not reset any filter which has already been 1261 * triggered. 1262 */ 1263 kn->kn_status |= KN_INFLUX | KN_SCAN; 1264 KQ_UNLOCK(kq); 1265 KN_LIST_LOCK(kn); 1266 kn->kn_kevent.udata = kev->udata; 1267 if (!fops->f_isfd && fops->f_touch != NULL) { 1268 fops->f_touch(kn, kev, EVENT_REGISTER); 1269 } else { 1270 kn->kn_sfflags = kev->fflags; 1271 kn->kn_sdata = kev->data; 1272 } 1273 1274 /* 1275 * We can get here with kn->kn_knlist == NULL. This can happen when 1276 * the initial attach event decides that the event is "completed" 1277 * already. i.e. filt_procattach is called on a zombie process. It 1278 * will call filt_proc which will remove it from the list, and NULL 1279 * kn_knlist. 1280 */ 1281 done_ev_add: 1282 if ((kev->flags & EV_DISABLE) && 1283 ((kn->kn_status & KN_DISABLED) == 0)) { 1284 kn->kn_status |= KN_DISABLED; 1285 } 1286 1287 if ((kn->kn_status & KN_DISABLED) == 0) 1288 event = kn->kn_fop->f_event(kn, 0); 1289 else 1290 event = 0; 1291 KQ_LOCK(kq); 1292 if (event) 1293 KNOTE_ACTIVATE(kn, 1); 1294 kn->kn_status &= ~(KN_INFLUX | KN_SCAN); 1295 KN_LIST_UNLOCK(kn); 1296 1297 if ((kev->flags & EV_ENABLE) && (kn->kn_status & KN_DISABLED)) { 1298 kn->kn_status &= ~KN_DISABLED; 1299 if ((kn->kn_status & KN_ACTIVE) && 1300 ((kn->kn_status & KN_QUEUED) == 0)) 1301 knote_enqueue(kn); 1302 } 1303 KQ_UNLOCK_FLUX(kq); 1304 1305 done: 1306 KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal); 1307 if (filedesc_unlock) 1308 FILEDESC_XUNLOCK(td->td_proc->p_fd); 1309 if (fp != NULL) 1310 fdrop(fp, td); 1311 knote_free(tkn); 1312 if (fops != NULL) 1313 kqueue_fo_release(filt); 1314 return (error); 1315 } 1316 1317 static int 1318 kqueue_acquire(struct file *fp, struct kqueue **kqp) 1319 { 1320 int error; 1321 struct kqueue *kq; 1322 1323 error = 0; 1324 1325 kq = fp->f_data; 1326 if (fp->f_type != DTYPE_KQUEUE || kq == NULL) 1327 return (EBADF); 1328 *kqp = kq; 1329 KQ_LOCK(kq); 1330 if ((kq->kq_state & KQ_CLOSING) == KQ_CLOSING) { 1331 KQ_UNLOCK(kq); 1332 return (EBADF); 1333 } 1334 kq->kq_refcnt++; 1335 KQ_UNLOCK(kq); 1336 1337 return error; 1338 } 1339 1340 static void 1341 kqueue_release(struct kqueue *kq, int locked) 1342 { 1343 if (locked) 1344 KQ_OWNED(kq); 1345 else 1346 KQ_LOCK(kq); 1347 kq->kq_refcnt--; 1348 if (kq->kq_refcnt == 1) 1349 wakeup(&kq->kq_refcnt); 1350 if (!locked) 1351 KQ_UNLOCK(kq); 1352 } 1353 1354 static void 1355 kqueue_schedtask(struct kqueue *kq) 1356 { 1357 1358 KQ_OWNED(kq); 1359 KASSERT(((kq->kq_state & KQ_TASKDRAIN) != KQ_TASKDRAIN), 1360 ("scheduling kqueue task while draining")); 1361 1362 if ((kq->kq_state & KQ_TASKSCHED) != KQ_TASKSCHED) { 1363 taskqueue_enqueue(taskqueue_kqueue, &kq->kq_task); 1364 kq->kq_state |= KQ_TASKSCHED; 1365 } 1366 } 1367 1368 /* 1369 * Expand the kq to make sure we have storage for fops/ident pair. 1370 * 1371 * Return 0 on success (or no work necessary), return errno on failure. 1372 * 1373 * Not calling hashinit w/ waitok (proper malloc flag) should be safe. 1374 * If kqueue_register is called from a non-fd context, there usually/should 1375 * be no locks held. 1376 */ 1377 static int 1378 kqueue_expand(struct kqueue *kq, struct filterops *fops, uintptr_t ident, 1379 int waitok) 1380 { 1381 struct klist *list, *tmp_knhash, *to_free; 1382 u_long tmp_knhashmask; 1383 int size; 1384 int fd; 1385 int mflag = waitok ? M_WAITOK : M_NOWAIT; 1386 1387 KQ_NOTOWNED(kq); 1388 1389 to_free = NULL; 1390 if (fops->f_isfd) { 1391 fd = ident; 1392 if (kq->kq_knlistsize <= fd) { 1393 size = kq->kq_knlistsize; 1394 while (size <= fd) 1395 size += KQEXTENT; 1396 list = malloc(size * sizeof(*list), M_KQUEUE, mflag); 1397 if (list == NULL) 1398 return ENOMEM; 1399 KQ_LOCK(kq); 1400 if (kq->kq_knlistsize > fd) { 1401 to_free = list; 1402 list = NULL; 1403 } else { 1404 if (kq->kq_knlist != NULL) { 1405 bcopy(kq->kq_knlist, list, 1406 kq->kq_knlistsize * sizeof(*list)); 1407 to_free = kq->kq_knlist; 1408 kq->kq_knlist = NULL; 1409 } 1410 bzero((caddr_t)list + 1411 kq->kq_knlistsize * sizeof(*list), 1412 (size - kq->kq_knlistsize) * sizeof(*list)); 1413 kq->kq_knlistsize = size; 1414 kq->kq_knlist = list; 1415 } 1416 KQ_UNLOCK(kq); 1417 } 1418 } else { 1419 if (kq->kq_knhashmask == 0) { 1420 tmp_knhash = hashinit(KN_HASHSIZE, M_KQUEUE, 1421 &tmp_knhashmask); 1422 if (tmp_knhash == NULL) 1423 return ENOMEM; 1424 KQ_LOCK(kq); 1425 if (kq->kq_knhashmask == 0) { 1426 kq->kq_knhash = tmp_knhash; 1427 kq->kq_knhashmask = tmp_knhashmask; 1428 } else { 1429 to_free = tmp_knhash; 1430 } 1431 KQ_UNLOCK(kq); 1432 } 1433 } 1434 free(to_free, M_KQUEUE); 1435 1436 KQ_NOTOWNED(kq); 1437 return 0; 1438 } 1439 1440 static void 1441 kqueue_task(void *arg, int pending) 1442 { 1443 struct kqueue *kq; 1444 int haskqglobal; 1445 1446 haskqglobal = 0; 1447 kq = arg; 1448 1449 KQ_GLOBAL_LOCK(&kq_global, haskqglobal); 1450 KQ_LOCK(kq); 1451 1452 KNOTE_LOCKED(&kq->kq_sel.si_note, 0); 1453 1454 kq->kq_state &= ~KQ_TASKSCHED; 1455 if ((kq->kq_state & KQ_TASKDRAIN) == KQ_TASKDRAIN) { 1456 wakeup(&kq->kq_state); 1457 } 1458 KQ_UNLOCK(kq); 1459 KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal); 1460 } 1461 1462 /* 1463 * Scan, update kn_data (if not ONESHOT), and copyout triggered events. 1464 * We treat KN_MARKER knotes as if they are INFLUX. 1465 */ 1466 static int 1467 kqueue_scan(struct kqueue *kq, int maxevents, struct kevent_copyops *k_ops, 1468 const struct timespec *tsp, struct kevent *keva, struct thread *td) 1469 { 1470 struct kevent *kevp; 1471 struct knote *kn, *marker; 1472 sbintime_t asbt, rsbt; 1473 int count, error, haskqglobal, influx, nkev, touch; 1474 1475 count = maxevents; 1476 nkev = 0; 1477 error = 0; 1478 haskqglobal = 0; 1479 1480 if (maxevents == 0) 1481 goto done_nl; 1482 1483 rsbt = 0; 1484 if (tsp != NULL) { 1485 if (tsp->tv_sec < 0 || tsp->tv_nsec < 0 || 1486 tsp->tv_nsec >= 1000000000) { 1487 error = EINVAL; 1488 goto done_nl; 1489 } 1490 if (timespecisset(tsp)) { 1491 if (tsp->tv_sec <= INT32_MAX) { 1492 rsbt = tstosbt(*tsp); 1493 if (TIMESEL(&asbt, rsbt)) 1494 asbt += tc_tick_sbt; 1495 if (asbt <= SBT_MAX - rsbt) 1496 asbt += rsbt; 1497 else 1498 asbt = 0; 1499 rsbt >>= tc_precexp; 1500 } else 1501 asbt = 0; 1502 } else 1503 asbt = -1; 1504 } else 1505 asbt = 0; 1506 marker = knote_alloc(1); 1507 marker->kn_status = KN_MARKER; 1508 KQ_LOCK(kq); 1509 1510 retry: 1511 kevp = keva; 1512 if (kq->kq_count == 0) { 1513 if (asbt == -1) { 1514 error = EWOULDBLOCK; 1515 } else { 1516 kq->kq_state |= KQ_SLEEP; 1517 error = msleep_sbt(kq, &kq->kq_lock, PSOCK | PCATCH, 1518 "kqread", asbt, rsbt, C_ABSOLUTE); 1519 } 1520 if (error == 0) 1521 goto retry; 1522 /* don't restart after signals... */ 1523 if (error == ERESTART) 1524 error = EINTR; 1525 else if (error == EWOULDBLOCK) 1526 error = 0; 1527 goto done; 1528 } 1529 1530 TAILQ_INSERT_TAIL(&kq->kq_head, marker, kn_tqe); 1531 influx = 0; 1532 while (count) { 1533 KQ_OWNED(kq); 1534 kn = TAILQ_FIRST(&kq->kq_head); 1535 1536 if ((kn->kn_status == KN_MARKER && kn != marker) || 1537 (kn->kn_status & KN_INFLUX) == KN_INFLUX) { 1538 if (influx) { 1539 influx = 0; 1540 KQ_FLUX_WAKEUP(kq); 1541 } 1542 kq->kq_state |= KQ_FLUXWAIT; 1543 error = msleep(kq, &kq->kq_lock, PSOCK, 1544 "kqflxwt", 0); 1545 continue; 1546 } 1547 1548 TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe); 1549 if ((kn->kn_status & KN_DISABLED) == KN_DISABLED) { 1550 kn->kn_status &= ~KN_QUEUED; 1551 kq->kq_count--; 1552 continue; 1553 } 1554 if (kn == marker) { 1555 KQ_FLUX_WAKEUP(kq); 1556 if (count == maxevents) 1557 goto retry; 1558 goto done; 1559 } 1560 KASSERT((kn->kn_status & KN_INFLUX) == 0, 1561 ("KN_INFLUX set when not suppose to be")); 1562 1563 if ((kn->kn_flags & EV_DROP) == EV_DROP) { 1564 kn->kn_status &= ~KN_QUEUED; 1565 kn->kn_status |= KN_INFLUX; 1566 kq->kq_count--; 1567 KQ_UNLOCK(kq); 1568 /* 1569 * We don't need to lock the list since we've marked 1570 * it _INFLUX. 1571 */ 1572 if (!(kn->kn_status & KN_DETACHED)) 1573 kn->kn_fop->f_detach(kn); 1574 knote_drop(kn, td); 1575 KQ_LOCK(kq); 1576 continue; 1577 } else if ((kn->kn_flags & EV_ONESHOT) == EV_ONESHOT) { 1578 kn->kn_status &= ~KN_QUEUED; 1579 kn->kn_status |= KN_INFLUX; 1580 kq->kq_count--; 1581 KQ_UNLOCK(kq); 1582 /* 1583 * We don't need to lock the list since we've marked 1584 * it _INFLUX. 1585 */ 1586 *kevp = kn->kn_kevent; 1587 if (!(kn->kn_status & KN_DETACHED)) 1588 kn->kn_fop->f_detach(kn); 1589 knote_drop(kn, td); 1590 KQ_LOCK(kq); 1591 kn = NULL; 1592 } else { 1593 kn->kn_status |= KN_INFLUX | KN_SCAN; 1594 KQ_UNLOCK(kq); 1595 if ((kn->kn_status & KN_KQUEUE) == KN_KQUEUE) 1596 KQ_GLOBAL_LOCK(&kq_global, haskqglobal); 1597 KN_LIST_LOCK(kn); 1598 if (kn->kn_fop->f_event(kn, 0) == 0) { 1599 KQ_LOCK(kq); 1600 KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal); 1601 kn->kn_status &= 1602 ~(KN_QUEUED | KN_ACTIVE | KN_INFLUX | 1603 KN_SCAN); 1604 kq->kq_count--; 1605 KN_LIST_UNLOCK(kn); 1606 influx = 1; 1607 continue; 1608 } 1609 touch = (!kn->kn_fop->f_isfd && 1610 kn->kn_fop->f_touch != NULL); 1611 if (touch) 1612 kn->kn_fop->f_touch(kn, kevp, EVENT_PROCESS); 1613 else 1614 *kevp = kn->kn_kevent; 1615 KQ_LOCK(kq); 1616 KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal); 1617 if (kn->kn_flags & (EV_CLEAR | EV_DISPATCH)) { 1618 /* 1619 * Manually clear knotes who weren't 1620 * 'touch'ed. 1621 */ 1622 if (touch == 0 && kn->kn_flags & EV_CLEAR) { 1623 kn->kn_data = 0; 1624 kn->kn_fflags = 0; 1625 } 1626 if (kn->kn_flags & EV_DISPATCH) 1627 kn->kn_status |= KN_DISABLED; 1628 kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE); 1629 kq->kq_count--; 1630 } else 1631 TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe); 1632 1633 kn->kn_status &= ~(KN_INFLUX | KN_SCAN); 1634 KN_LIST_UNLOCK(kn); 1635 influx = 1; 1636 } 1637 1638 /* we are returning a copy to the user */ 1639 kevp++; 1640 nkev++; 1641 count--; 1642 1643 if (nkev == KQ_NEVENTS) { 1644 influx = 0; 1645 KQ_UNLOCK_FLUX(kq); 1646 error = k_ops->k_copyout(k_ops->arg, keva, nkev); 1647 nkev = 0; 1648 kevp = keva; 1649 KQ_LOCK(kq); 1650 if (error) 1651 break; 1652 } 1653 } 1654 TAILQ_REMOVE(&kq->kq_head, marker, kn_tqe); 1655 done: 1656 KQ_OWNED(kq); 1657 KQ_UNLOCK_FLUX(kq); 1658 knote_free(marker); 1659 done_nl: 1660 KQ_NOTOWNED(kq); 1661 if (nkev != 0) 1662 error = k_ops->k_copyout(k_ops->arg, keva, nkev); 1663 td->td_retval[0] = maxevents - count; 1664 return (error); 1665 } 1666 1667 /*ARGSUSED*/ 1668 static int 1669 kqueue_ioctl(struct file *fp, u_long cmd, void *data, 1670 struct ucred *active_cred, struct thread *td) 1671 { 1672 /* 1673 * Enabling sigio causes two major problems: 1674 * 1) infinite recursion: 1675 * Synopsys: kevent is being used to track signals and have FIOASYNC 1676 * set. On receipt of a signal this will cause a kqueue to recurse 1677 * into itself over and over. Sending the sigio causes the kqueue 1678 * to become ready, which in turn posts sigio again, forever. 1679 * Solution: this can be solved by setting a flag in the kqueue that 1680 * we have a SIGIO in progress. 1681 * 2) locking problems: 1682 * Synopsys: Kqueue is a leaf subsystem, but adding signalling puts 1683 * us above the proc and pgrp locks. 1684 * Solution: Post a signal using an async mechanism, being sure to 1685 * record a generation count in the delivery so that we do not deliver 1686 * a signal to the wrong process. 1687 * 1688 * Note, these two mechanisms are somewhat mutually exclusive! 1689 */ 1690 #if 0 1691 struct kqueue *kq; 1692 1693 kq = fp->f_data; 1694 switch (cmd) { 1695 case FIOASYNC: 1696 if (*(int *)data) { 1697 kq->kq_state |= KQ_ASYNC; 1698 } else { 1699 kq->kq_state &= ~KQ_ASYNC; 1700 } 1701 return (0); 1702 1703 case FIOSETOWN: 1704 return (fsetown(*(int *)data, &kq->kq_sigio)); 1705 1706 case FIOGETOWN: 1707 *(int *)data = fgetown(&kq->kq_sigio); 1708 return (0); 1709 } 1710 #endif 1711 1712 return (ENOTTY); 1713 } 1714 1715 /*ARGSUSED*/ 1716 static int 1717 kqueue_poll(struct file *fp, int events, struct ucred *active_cred, 1718 struct thread *td) 1719 { 1720 struct kqueue *kq; 1721 int revents = 0; 1722 int error; 1723 1724 if ((error = kqueue_acquire(fp, &kq))) 1725 return POLLERR; 1726 1727 KQ_LOCK(kq); 1728 if (events & (POLLIN | POLLRDNORM)) { 1729 if (kq->kq_count) { 1730 revents |= events & (POLLIN | POLLRDNORM); 1731 } else { 1732 selrecord(td, &kq->kq_sel); 1733 if (SEL_WAITING(&kq->kq_sel)) 1734 kq->kq_state |= KQ_SEL; 1735 } 1736 } 1737 kqueue_release(kq, 1); 1738 KQ_UNLOCK(kq); 1739 return (revents); 1740 } 1741 1742 /*ARGSUSED*/ 1743 static int 1744 kqueue_stat(struct file *fp, struct stat *st, struct ucred *active_cred, 1745 struct thread *td) 1746 { 1747 1748 bzero((void *)st, sizeof *st); 1749 /* 1750 * We no longer return kq_count because the unlocked value is useless. 1751 * If you spent all this time getting the count, why not spend your 1752 * syscall better by calling kevent? 1753 * 1754 * XXX - This is needed for libc_r. 1755 */ 1756 st->st_mode = S_IFIFO; 1757 return (0); 1758 } 1759 1760 static void 1761 kqueue_drain(struct kqueue *kq, struct thread *td) 1762 { 1763 struct knote *kn; 1764 int i; 1765 1766 KQ_LOCK(kq); 1767 1768 KASSERT((kq->kq_state & KQ_CLOSING) != KQ_CLOSING, 1769 ("kqueue already closing")); 1770 kq->kq_state |= KQ_CLOSING; 1771 if (kq->kq_refcnt > 1) 1772 msleep(&kq->kq_refcnt, &kq->kq_lock, PSOCK, "kqclose", 0); 1773 1774 KASSERT(kq->kq_refcnt == 1, ("other refs are out there!")); 1775 1776 KASSERT(knlist_empty(&kq->kq_sel.si_note), 1777 ("kqueue's knlist not empty")); 1778 1779 for (i = 0; i < kq->kq_knlistsize; i++) { 1780 while ((kn = SLIST_FIRST(&kq->kq_knlist[i])) != NULL) { 1781 if ((kn->kn_status & KN_INFLUX) == KN_INFLUX) { 1782 kq->kq_state |= KQ_FLUXWAIT; 1783 msleep(kq, &kq->kq_lock, PSOCK, "kqclo1", 0); 1784 continue; 1785 } 1786 kn->kn_status |= KN_INFLUX; 1787 KQ_UNLOCK(kq); 1788 if (!(kn->kn_status & KN_DETACHED)) 1789 kn->kn_fop->f_detach(kn); 1790 knote_drop(kn, td); 1791 KQ_LOCK(kq); 1792 } 1793 } 1794 if (kq->kq_knhashmask != 0) { 1795 for (i = 0; i <= kq->kq_knhashmask; i++) { 1796 while ((kn = SLIST_FIRST(&kq->kq_knhash[i])) != NULL) { 1797 if ((kn->kn_status & KN_INFLUX) == KN_INFLUX) { 1798 kq->kq_state |= KQ_FLUXWAIT; 1799 msleep(kq, &kq->kq_lock, PSOCK, 1800 "kqclo2", 0); 1801 continue; 1802 } 1803 kn->kn_status |= KN_INFLUX; 1804 KQ_UNLOCK(kq); 1805 if (!(kn->kn_status & KN_DETACHED)) 1806 kn->kn_fop->f_detach(kn); 1807 knote_drop(kn, td); 1808 KQ_LOCK(kq); 1809 } 1810 } 1811 } 1812 1813 if ((kq->kq_state & KQ_TASKSCHED) == KQ_TASKSCHED) { 1814 kq->kq_state |= KQ_TASKDRAIN; 1815 msleep(&kq->kq_state, &kq->kq_lock, PSOCK, "kqtqdr", 0); 1816 } 1817 1818 if ((kq->kq_state & KQ_SEL) == KQ_SEL) { 1819 selwakeuppri(&kq->kq_sel, PSOCK); 1820 if (!SEL_WAITING(&kq->kq_sel)) 1821 kq->kq_state &= ~KQ_SEL; 1822 } 1823 1824 KQ_UNLOCK(kq); 1825 } 1826 1827 static void 1828 kqueue_destroy(struct kqueue *kq) 1829 { 1830 1831 KASSERT(kq->kq_fdp == NULL, 1832 ("kqueue still attached to a file descriptor")); 1833 seldrain(&kq->kq_sel); 1834 knlist_destroy(&kq->kq_sel.si_note); 1835 mtx_destroy(&kq->kq_lock); 1836 1837 if (kq->kq_knhash != NULL) 1838 free(kq->kq_knhash, M_KQUEUE); 1839 if (kq->kq_knlist != NULL) 1840 free(kq->kq_knlist, M_KQUEUE); 1841 1842 funsetown(&kq->kq_sigio); 1843 } 1844 1845 /*ARGSUSED*/ 1846 static int 1847 kqueue_close(struct file *fp, struct thread *td) 1848 { 1849 struct kqueue *kq = fp->f_data; 1850 struct filedesc *fdp; 1851 int error; 1852 int filedesc_unlock; 1853 1854 if ((error = kqueue_acquire(fp, &kq))) 1855 return error; 1856 kqueue_drain(kq, td); 1857 1858 /* 1859 * We could be called due to the knote_drop() doing fdrop(), 1860 * called from kqueue_register(). In this case the global 1861 * lock is owned, and filedesc sx is locked before, to not 1862 * take the sleepable lock after non-sleepable. 1863 */ 1864 fdp = kq->kq_fdp; 1865 kq->kq_fdp = NULL; 1866 if (!sx_xlocked(FILEDESC_LOCK(fdp))) { 1867 FILEDESC_XLOCK(fdp); 1868 filedesc_unlock = 1; 1869 } else 1870 filedesc_unlock = 0; 1871 TAILQ_REMOVE(&fdp->fd_kqlist, kq, kq_list); 1872 if (filedesc_unlock) 1873 FILEDESC_XUNLOCK(fdp); 1874 1875 kqueue_destroy(kq); 1876 chgkqcnt(kq->kq_cred->cr_ruidinfo, -1, 0); 1877 crfree(kq->kq_cred); 1878 free(kq, M_KQUEUE); 1879 fp->f_data = NULL; 1880 1881 return (0); 1882 } 1883 1884 static int 1885 kqueue_fill_kinfo(struct file *fp, struct kinfo_file *kif, struct filedesc *fdp) 1886 { 1887 1888 kif->kf_type = KF_TYPE_KQUEUE; 1889 return (0); 1890 } 1891 1892 static void 1893 kqueue_wakeup(struct kqueue *kq) 1894 { 1895 KQ_OWNED(kq); 1896 1897 if ((kq->kq_state & KQ_SLEEP) == KQ_SLEEP) { 1898 kq->kq_state &= ~KQ_SLEEP; 1899 wakeup(kq); 1900 } 1901 if ((kq->kq_state & KQ_SEL) == KQ_SEL) { 1902 selwakeuppri(&kq->kq_sel, PSOCK); 1903 if (!SEL_WAITING(&kq->kq_sel)) 1904 kq->kq_state &= ~KQ_SEL; 1905 } 1906 if (!knlist_empty(&kq->kq_sel.si_note)) 1907 kqueue_schedtask(kq); 1908 if ((kq->kq_state & KQ_ASYNC) == KQ_ASYNC) { 1909 pgsigio(&kq->kq_sigio, SIGIO, 0); 1910 } 1911 } 1912 1913 /* 1914 * Walk down a list of knotes, activating them if their event has triggered. 1915 * 1916 * There is a possibility to optimize in the case of one kq watching another. 1917 * Instead of scheduling a task to wake it up, you could pass enough state 1918 * down the chain to make up the parent kqueue. Make this code functional 1919 * first. 1920 */ 1921 void 1922 knote(struct knlist *list, long hint, int lockflags) 1923 { 1924 struct kqueue *kq; 1925 struct knote *kn, *tkn; 1926 int error; 1927 1928 if (list == NULL) 1929 return; 1930 1931 KNL_ASSERT_LOCK(list, lockflags & KNF_LISTLOCKED); 1932 1933 if ((lockflags & KNF_LISTLOCKED) == 0) 1934 list->kl_lock(list->kl_lockarg); 1935 1936 /* 1937 * If we unlock the list lock (and set KN_INFLUX), we can 1938 * eliminate the kqueue scheduling, but this will introduce 1939 * four lock/unlock's for each knote to test. Also, marker 1940 * would be needed to keep iteration position, since filters 1941 * or other threads could remove events. 1942 */ 1943 SLIST_FOREACH_SAFE(kn, &list->kl_list, kn_selnext, tkn) { 1944 kq = kn->kn_kq; 1945 KQ_LOCK(kq); 1946 if ((kn->kn_status & (KN_INFLUX | KN_SCAN)) == KN_INFLUX) { 1947 /* 1948 * Do not process the influx notes, except for 1949 * the influx coming from the kq unlock in the 1950 * kqueue_scan(). In the later case, we do 1951 * not interfere with the scan, since the code 1952 * fragment in kqueue_scan() locks the knlist, 1953 * and cannot proceed until we finished. 1954 */ 1955 KQ_UNLOCK(kq); 1956 } else if ((lockflags & KNF_NOKQLOCK) != 0) { 1957 kn->kn_status |= KN_INFLUX; 1958 KQ_UNLOCK(kq); 1959 error = kn->kn_fop->f_event(kn, hint); 1960 KQ_LOCK(kq); 1961 kn->kn_status &= ~KN_INFLUX; 1962 if (error) 1963 KNOTE_ACTIVATE(kn, 1); 1964 KQ_UNLOCK_FLUX(kq); 1965 } else { 1966 kn->kn_status |= KN_HASKQLOCK; 1967 if (kn->kn_fop->f_event(kn, hint)) 1968 KNOTE_ACTIVATE(kn, 1); 1969 kn->kn_status &= ~KN_HASKQLOCK; 1970 KQ_UNLOCK(kq); 1971 } 1972 } 1973 if ((lockflags & KNF_LISTLOCKED) == 0) 1974 list->kl_unlock(list->kl_lockarg); 1975 } 1976 1977 /* 1978 * add a knote to a knlist 1979 */ 1980 void 1981 knlist_add(struct knlist *knl, struct knote *kn, int islocked) 1982 { 1983 KNL_ASSERT_LOCK(knl, islocked); 1984 KQ_NOTOWNED(kn->kn_kq); 1985 KASSERT((kn->kn_status & (KN_INFLUX|KN_DETACHED)) == 1986 (KN_INFLUX|KN_DETACHED), ("knote not KN_INFLUX and KN_DETACHED")); 1987 if (!islocked) 1988 knl->kl_lock(knl->kl_lockarg); 1989 SLIST_INSERT_HEAD(&knl->kl_list, kn, kn_selnext); 1990 if (!islocked) 1991 knl->kl_unlock(knl->kl_lockarg); 1992 KQ_LOCK(kn->kn_kq); 1993 kn->kn_knlist = knl; 1994 kn->kn_status &= ~KN_DETACHED; 1995 KQ_UNLOCK(kn->kn_kq); 1996 } 1997 1998 static void 1999 knlist_remove_kq(struct knlist *knl, struct knote *kn, int knlislocked, int kqislocked) 2000 { 2001 KASSERT(!(!!kqislocked && !knlislocked), ("kq locked w/o knl locked")); 2002 KNL_ASSERT_LOCK(knl, knlislocked); 2003 mtx_assert(&kn->kn_kq->kq_lock, kqislocked ? MA_OWNED : MA_NOTOWNED); 2004 if (!kqislocked) 2005 KASSERT((kn->kn_status & (KN_INFLUX|KN_DETACHED)) == KN_INFLUX, 2006 ("knlist_remove called w/o knote being KN_INFLUX or already removed")); 2007 if (!knlislocked) 2008 knl->kl_lock(knl->kl_lockarg); 2009 SLIST_REMOVE(&knl->kl_list, kn, knote, kn_selnext); 2010 kn->kn_knlist = NULL; 2011 if (!knlislocked) 2012 knl->kl_unlock(knl->kl_lockarg); 2013 if (!kqislocked) 2014 KQ_LOCK(kn->kn_kq); 2015 kn->kn_status |= KN_DETACHED; 2016 if (!kqislocked) 2017 KQ_UNLOCK(kn->kn_kq); 2018 } 2019 2020 /* 2021 * remove knote from the specified knlist 2022 */ 2023 void 2024 knlist_remove(struct knlist *knl, struct knote *kn, int islocked) 2025 { 2026 2027 knlist_remove_kq(knl, kn, islocked, 0); 2028 } 2029 2030 /* 2031 * remove knote from the specified knlist while in f_event handler. 2032 */ 2033 void 2034 knlist_remove_inevent(struct knlist *knl, struct knote *kn) 2035 { 2036 2037 knlist_remove_kq(knl, kn, 1, 2038 (kn->kn_status & KN_HASKQLOCK) == KN_HASKQLOCK); 2039 } 2040 2041 int 2042 knlist_empty(struct knlist *knl) 2043 { 2044 2045 KNL_ASSERT_LOCKED(knl); 2046 return SLIST_EMPTY(&knl->kl_list); 2047 } 2048 2049 static struct mtx knlist_lock; 2050 MTX_SYSINIT(knlist_lock, &knlist_lock, "knlist lock for lockless objects", 2051 MTX_DEF); 2052 static void knlist_mtx_lock(void *arg); 2053 static void knlist_mtx_unlock(void *arg); 2054 2055 static void 2056 knlist_mtx_lock(void *arg) 2057 { 2058 2059 mtx_lock((struct mtx *)arg); 2060 } 2061 2062 static void 2063 knlist_mtx_unlock(void *arg) 2064 { 2065 2066 mtx_unlock((struct mtx *)arg); 2067 } 2068 2069 static void 2070 knlist_mtx_assert_locked(void *arg) 2071 { 2072 2073 mtx_assert((struct mtx *)arg, MA_OWNED); 2074 } 2075 2076 static void 2077 knlist_mtx_assert_unlocked(void *arg) 2078 { 2079 2080 mtx_assert((struct mtx *)arg, MA_NOTOWNED); 2081 } 2082 2083 static void 2084 knlist_rw_rlock(void *arg) 2085 { 2086 2087 rw_rlock((struct rwlock *)arg); 2088 } 2089 2090 static void 2091 knlist_rw_runlock(void *arg) 2092 { 2093 2094 rw_runlock((struct rwlock *)arg); 2095 } 2096 2097 static void 2098 knlist_rw_assert_locked(void *arg) 2099 { 2100 2101 rw_assert((struct rwlock *)arg, RA_LOCKED); 2102 } 2103 2104 static void 2105 knlist_rw_assert_unlocked(void *arg) 2106 { 2107 2108 rw_assert((struct rwlock *)arg, RA_UNLOCKED); 2109 } 2110 2111 void 2112 knlist_init(struct knlist *knl, void *lock, void (*kl_lock)(void *), 2113 void (*kl_unlock)(void *), 2114 void (*kl_assert_locked)(void *), void (*kl_assert_unlocked)(void *)) 2115 { 2116 2117 if (lock == NULL) 2118 knl->kl_lockarg = &knlist_lock; 2119 else 2120 knl->kl_lockarg = lock; 2121 2122 if (kl_lock == NULL) 2123 knl->kl_lock = knlist_mtx_lock; 2124 else 2125 knl->kl_lock = kl_lock; 2126 if (kl_unlock == NULL) 2127 knl->kl_unlock = knlist_mtx_unlock; 2128 else 2129 knl->kl_unlock = kl_unlock; 2130 if (kl_assert_locked == NULL) 2131 knl->kl_assert_locked = knlist_mtx_assert_locked; 2132 else 2133 knl->kl_assert_locked = kl_assert_locked; 2134 if (kl_assert_unlocked == NULL) 2135 knl->kl_assert_unlocked = knlist_mtx_assert_unlocked; 2136 else 2137 knl->kl_assert_unlocked = kl_assert_unlocked; 2138 2139 SLIST_INIT(&knl->kl_list); 2140 } 2141 2142 void 2143 knlist_init_mtx(struct knlist *knl, struct mtx *lock) 2144 { 2145 2146 knlist_init(knl, lock, NULL, NULL, NULL, NULL); 2147 } 2148 2149 void 2150 knlist_init_rw_reader(struct knlist *knl, struct rwlock *lock) 2151 { 2152 2153 knlist_init(knl, lock, knlist_rw_rlock, knlist_rw_runlock, 2154 knlist_rw_assert_locked, knlist_rw_assert_unlocked); 2155 } 2156 2157 void 2158 knlist_destroy(struct knlist *knl) 2159 { 2160 2161 #ifdef INVARIANTS 2162 /* 2163 * if we run across this error, we need to find the offending 2164 * driver and have it call knlist_clear or knlist_delete. 2165 */ 2166 if (!SLIST_EMPTY(&knl->kl_list)) 2167 printf("WARNING: destroying knlist w/ knotes on it!\n"); 2168 #endif 2169 2170 knl->kl_lockarg = knl->kl_lock = knl->kl_unlock = NULL; 2171 SLIST_INIT(&knl->kl_list); 2172 } 2173 2174 /* 2175 * Even if we are locked, we may need to drop the lock to allow any influx 2176 * knotes time to "settle". 2177 */ 2178 void 2179 knlist_cleardel(struct knlist *knl, struct thread *td, int islocked, int killkn) 2180 { 2181 struct knote *kn, *kn2; 2182 struct kqueue *kq; 2183 2184 if (islocked) 2185 KNL_ASSERT_LOCKED(knl); 2186 else { 2187 KNL_ASSERT_UNLOCKED(knl); 2188 again: /* need to reacquire lock since we have dropped it */ 2189 knl->kl_lock(knl->kl_lockarg); 2190 } 2191 2192 SLIST_FOREACH_SAFE(kn, &knl->kl_list, kn_selnext, kn2) { 2193 kq = kn->kn_kq; 2194 KQ_LOCK(kq); 2195 if ((kn->kn_status & KN_INFLUX)) { 2196 KQ_UNLOCK(kq); 2197 continue; 2198 } 2199 knlist_remove_kq(knl, kn, 1, 1); 2200 if (killkn) { 2201 kn->kn_status |= KN_INFLUX | KN_DETACHED; 2202 KQ_UNLOCK(kq); 2203 knote_drop(kn, td); 2204 } else { 2205 /* Make sure cleared knotes disappear soon */ 2206 kn->kn_flags |= (EV_EOF | EV_ONESHOT); 2207 KQ_UNLOCK(kq); 2208 } 2209 kq = NULL; 2210 } 2211 2212 if (!SLIST_EMPTY(&knl->kl_list)) { 2213 /* there are still KN_INFLUX remaining */ 2214 kn = SLIST_FIRST(&knl->kl_list); 2215 kq = kn->kn_kq; 2216 KQ_LOCK(kq); 2217 KASSERT(kn->kn_status & KN_INFLUX, 2218 ("knote removed w/o list lock")); 2219 knl->kl_unlock(knl->kl_lockarg); 2220 kq->kq_state |= KQ_FLUXWAIT; 2221 msleep(kq, &kq->kq_lock, PSOCK | PDROP, "kqkclr", 0); 2222 kq = NULL; 2223 goto again; 2224 } 2225 2226 if (islocked) 2227 KNL_ASSERT_LOCKED(knl); 2228 else { 2229 knl->kl_unlock(knl->kl_lockarg); 2230 KNL_ASSERT_UNLOCKED(knl); 2231 } 2232 } 2233 2234 /* 2235 * Remove all knotes referencing a specified fd must be called with FILEDESC 2236 * lock. This prevents a race where a new fd comes along and occupies the 2237 * entry and we attach a knote to the fd. 2238 */ 2239 void 2240 knote_fdclose(struct thread *td, int fd) 2241 { 2242 struct filedesc *fdp = td->td_proc->p_fd; 2243 struct kqueue *kq; 2244 struct knote *kn; 2245 int influx; 2246 2247 FILEDESC_XLOCK_ASSERT(fdp); 2248 2249 /* 2250 * We shouldn't have to worry about new kevents appearing on fd 2251 * since filedesc is locked. 2252 */ 2253 TAILQ_FOREACH(kq, &fdp->fd_kqlist, kq_list) { 2254 KQ_LOCK(kq); 2255 2256 again: 2257 influx = 0; 2258 while (kq->kq_knlistsize > fd && 2259 (kn = SLIST_FIRST(&kq->kq_knlist[fd])) != NULL) { 2260 if (kn->kn_status & KN_INFLUX) { 2261 /* someone else might be waiting on our knote */ 2262 if (influx) 2263 wakeup(kq); 2264 kq->kq_state |= KQ_FLUXWAIT; 2265 msleep(kq, &kq->kq_lock, PSOCK, "kqflxwt", 0); 2266 goto again; 2267 } 2268 kn->kn_status |= KN_INFLUX; 2269 KQ_UNLOCK(kq); 2270 if (!(kn->kn_status & KN_DETACHED)) 2271 kn->kn_fop->f_detach(kn); 2272 knote_drop(kn, td); 2273 influx = 1; 2274 KQ_LOCK(kq); 2275 } 2276 KQ_UNLOCK_FLUX(kq); 2277 } 2278 } 2279 2280 static int 2281 knote_attach(struct knote *kn, struct kqueue *kq) 2282 { 2283 struct klist *list; 2284 2285 KASSERT(kn->kn_status & KN_INFLUX, ("knote not marked INFLUX")); 2286 KQ_OWNED(kq); 2287 2288 if (kn->kn_fop->f_isfd) { 2289 if (kn->kn_id >= kq->kq_knlistsize) 2290 return ENOMEM; 2291 list = &kq->kq_knlist[kn->kn_id]; 2292 } else { 2293 if (kq->kq_knhash == NULL) 2294 return ENOMEM; 2295 list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)]; 2296 } 2297 2298 SLIST_INSERT_HEAD(list, kn, kn_link); 2299 2300 return 0; 2301 } 2302 2303 /* 2304 * knote must already have been detached using the f_detach method. 2305 * no lock need to be held, it is assumed that the KN_INFLUX flag is set 2306 * to prevent other removal. 2307 */ 2308 static void 2309 knote_drop(struct knote *kn, struct thread *td) 2310 { 2311 struct kqueue *kq; 2312 struct klist *list; 2313 2314 kq = kn->kn_kq; 2315 2316 KQ_NOTOWNED(kq); 2317 KASSERT((kn->kn_status & KN_INFLUX) == KN_INFLUX, 2318 ("knote_drop called without KN_INFLUX set in kn_status")); 2319 2320 KQ_LOCK(kq); 2321 if (kn->kn_fop->f_isfd) 2322 list = &kq->kq_knlist[kn->kn_id]; 2323 else 2324 list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)]; 2325 2326 if (!SLIST_EMPTY(list)) 2327 SLIST_REMOVE(list, kn, knote, kn_link); 2328 if (kn->kn_status & KN_QUEUED) 2329 knote_dequeue(kn); 2330 KQ_UNLOCK_FLUX(kq); 2331 2332 if (kn->kn_fop->f_isfd) { 2333 fdrop(kn->kn_fp, td); 2334 kn->kn_fp = NULL; 2335 } 2336 kqueue_fo_release(kn->kn_kevent.filter); 2337 kn->kn_fop = NULL; 2338 knote_free(kn); 2339 } 2340 2341 static void 2342 knote_enqueue(struct knote *kn) 2343 { 2344 struct kqueue *kq = kn->kn_kq; 2345 2346 KQ_OWNED(kn->kn_kq); 2347 KASSERT((kn->kn_status & KN_QUEUED) == 0, ("knote already queued")); 2348 2349 TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe); 2350 kn->kn_status |= KN_QUEUED; 2351 kq->kq_count++; 2352 kqueue_wakeup(kq); 2353 } 2354 2355 static void 2356 knote_dequeue(struct knote *kn) 2357 { 2358 struct kqueue *kq = kn->kn_kq; 2359 2360 KQ_OWNED(kn->kn_kq); 2361 KASSERT(kn->kn_status & KN_QUEUED, ("knote not queued")); 2362 2363 TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe); 2364 kn->kn_status &= ~KN_QUEUED; 2365 kq->kq_count--; 2366 } 2367 2368 static void 2369 knote_init(void) 2370 { 2371 2372 knote_zone = uma_zcreate("KNOTE", sizeof(struct knote), NULL, NULL, 2373 NULL, NULL, UMA_ALIGN_PTR, 0); 2374 } 2375 SYSINIT(knote, SI_SUB_PSEUDO, SI_ORDER_ANY, knote_init, NULL); 2376 2377 static struct knote * 2378 knote_alloc(int waitok) 2379 { 2380 2381 return (uma_zalloc(knote_zone, (waitok ? M_WAITOK : M_NOWAIT) | 2382 M_ZERO)); 2383 } 2384 2385 static void 2386 knote_free(struct knote *kn) 2387 { 2388 2389 uma_zfree(knote_zone, kn); 2390 } 2391 2392 /* 2393 * Register the kev w/ the kq specified by fd. 2394 */ 2395 int 2396 kqfd_register(int fd, struct kevent *kev, struct thread *td, int waitok) 2397 { 2398 struct kqueue *kq; 2399 struct file *fp; 2400 cap_rights_t rights; 2401 int error; 2402 2403 error = fget(td, fd, cap_rights_init(&rights, CAP_KQUEUE_CHANGE), &fp); 2404 if (error != 0) 2405 return (error); 2406 if ((error = kqueue_acquire(fp, &kq)) != 0) 2407 goto noacquire; 2408 2409 error = kqueue_register(kq, kev, td, waitok); 2410 2411 kqueue_release(kq, 0); 2412 2413 noacquire: 2414 fdrop(fp, td); 2415 2416 return error; 2417 } 2418