1 /*- 2 * Copyright (c) 1999,2000,2001 Jonathan Lemon <jlemon@FreeBSD.org> 3 * Copyright 2004 John-Mark Gurney <jmg@FreeBSD.org> 4 * Copyright (c) 2009 Apple, Inc. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include "opt_ktrace.h" 33 #include "opt_kqueue.h" 34 35 #include <sys/param.h> 36 #include <sys/systm.h> 37 #include <sys/capsicum.h> 38 #include <sys/kernel.h> 39 #include <sys/lock.h> 40 #include <sys/mutex.h> 41 #include <sys/rwlock.h> 42 #include <sys/proc.h> 43 #include <sys/malloc.h> 44 #include <sys/unistd.h> 45 #include <sys/file.h> 46 #include <sys/filedesc.h> 47 #include <sys/filio.h> 48 #include <sys/fcntl.h> 49 #include <sys/kthread.h> 50 #include <sys/selinfo.h> 51 #include <sys/stdatomic.h> 52 #include <sys/queue.h> 53 #include <sys/event.h> 54 #include <sys/eventvar.h> 55 #include <sys/poll.h> 56 #include <sys/protosw.h> 57 #include <sys/resourcevar.h> 58 #include <sys/sigio.h> 59 #include <sys/signalvar.h> 60 #include <sys/socket.h> 61 #include <sys/socketvar.h> 62 #include <sys/stat.h> 63 #include <sys/sysctl.h> 64 #include <sys/sysproto.h> 65 #include <sys/syscallsubr.h> 66 #include <sys/taskqueue.h> 67 #include <sys/uio.h> 68 #include <sys/user.h> 69 #ifdef KTRACE 70 #include <sys/ktrace.h> 71 #endif 72 73 #include <vm/uma.h> 74 75 static MALLOC_DEFINE(M_KQUEUE, "kqueue", "memory for kqueue system"); 76 77 /* 78 * This lock is used if multiple kq locks are required. This possibly 79 * should be made into a per proc lock. 80 */ 81 static struct mtx kq_global; 82 MTX_SYSINIT(kq_global, &kq_global, "kqueue order", MTX_DEF); 83 #define KQ_GLOBAL_LOCK(lck, haslck) do { \ 84 if (!haslck) \ 85 mtx_lock(lck); \ 86 haslck = 1; \ 87 } while (0) 88 #define KQ_GLOBAL_UNLOCK(lck, haslck) do { \ 89 if (haslck) \ 90 mtx_unlock(lck); \ 91 haslck = 0; \ 92 } while (0) 93 94 TASKQUEUE_DEFINE_THREAD(kqueue); 95 96 static int kevent_copyout(void *arg, struct kevent *kevp, int count); 97 static int kevent_copyin(void *arg, struct kevent *kevp, int count); 98 static int kqueue_register(struct kqueue *kq, struct kevent *kev, 99 struct thread *td, int waitok); 100 static int kqueue_acquire(struct file *fp, struct kqueue **kqp); 101 static void kqueue_release(struct kqueue *kq, int locked); 102 static int kqueue_expand(struct kqueue *kq, struct filterops *fops, 103 uintptr_t ident, int waitok); 104 static void kqueue_task(void *arg, int pending); 105 static int kqueue_scan(struct kqueue *kq, int maxevents, 106 struct kevent_copyops *k_ops, 107 const struct timespec *timeout, 108 struct kevent *keva, struct thread *td); 109 static void kqueue_wakeup(struct kqueue *kq); 110 static struct filterops *kqueue_fo_find(int filt); 111 static void kqueue_fo_release(int filt); 112 113 static fo_ioctl_t kqueue_ioctl; 114 static fo_poll_t kqueue_poll; 115 static fo_kqfilter_t kqueue_kqfilter; 116 static fo_stat_t kqueue_stat; 117 static fo_close_t kqueue_close; 118 static fo_fill_kinfo_t kqueue_fill_kinfo; 119 120 static struct fileops kqueueops = { 121 .fo_read = invfo_rdwr, 122 .fo_write = invfo_rdwr, 123 .fo_truncate = invfo_truncate, 124 .fo_ioctl = kqueue_ioctl, 125 .fo_poll = kqueue_poll, 126 .fo_kqfilter = kqueue_kqfilter, 127 .fo_stat = kqueue_stat, 128 .fo_close = kqueue_close, 129 .fo_chmod = invfo_chmod, 130 .fo_chown = invfo_chown, 131 .fo_sendfile = invfo_sendfile, 132 .fo_fill_kinfo = kqueue_fill_kinfo, 133 }; 134 135 static int knote_attach(struct knote *kn, struct kqueue *kq); 136 static void knote_drop(struct knote *kn, struct thread *td); 137 static void knote_enqueue(struct knote *kn); 138 static void knote_dequeue(struct knote *kn); 139 static void knote_init(void); 140 static struct knote *knote_alloc(int waitok); 141 static void knote_free(struct knote *kn); 142 143 static void filt_kqdetach(struct knote *kn); 144 static int filt_kqueue(struct knote *kn, long hint); 145 static int filt_procattach(struct knote *kn); 146 static void filt_procdetach(struct knote *kn); 147 static int filt_proc(struct knote *kn, long hint); 148 static int filt_fileattach(struct knote *kn); 149 static void filt_timerexpire(void *knx); 150 static int filt_timerattach(struct knote *kn); 151 static void filt_timerdetach(struct knote *kn); 152 static int filt_timer(struct knote *kn, long hint); 153 static int filt_userattach(struct knote *kn); 154 static void filt_userdetach(struct knote *kn); 155 static int filt_user(struct knote *kn, long hint); 156 static void filt_usertouch(struct knote *kn, struct kevent *kev, 157 u_long type); 158 159 static struct filterops file_filtops = { 160 .f_isfd = 1, 161 .f_attach = filt_fileattach, 162 }; 163 static struct filterops kqread_filtops = { 164 .f_isfd = 1, 165 .f_detach = filt_kqdetach, 166 .f_event = filt_kqueue, 167 }; 168 /* XXX - move to kern_proc.c? */ 169 static struct filterops proc_filtops = { 170 .f_isfd = 0, 171 .f_attach = filt_procattach, 172 .f_detach = filt_procdetach, 173 .f_event = filt_proc, 174 }; 175 static struct filterops timer_filtops = { 176 .f_isfd = 0, 177 .f_attach = filt_timerattach, 178 .f_detach = filt_timerdetach, 179 .f_event = filt_timer, 180 }; 181 static struct filterops user_filtops = { 182 .f_attach = filt_userattach, 183 .f_detach = filt_userdetach, 184 .f_event = filt_user, 185 .f_touch = filt_usertouch, 186 }; 187 188 static uma_zone_t knote_zone; 189 static atomic_uint kq_ncallouts = ATOMIC_VAR_INIT(0); 190 static unsigned int kq_calloutmax = 4 * 1024; 191 SYSCTL_UINT(_kern, OID_AUTO, kq_calloutmax, CTLFLAG_RW, 192 &kq_calloutmax, 0, "Maximum number of callouts allocated for kqueue"); 193 194 /* XXX - ensure not KN_INFLUX?? */ 195 #define KNOTE_ACTIVATE(kn, islock) do { \ 196 if ((islock)) \ 197 mtx_assert(&(kn)->kn_kq->kq_lock, MA_OWNED); \ 198 else \ 199 KQ_LOCK((kn)->kn_kq); \ 200 (kn)->kn_status |= KN_ACTIVE; \ 201 if (((kn)->kn_status & (KN_QUEUED | KN_DISABLED)) == 0) \ 202 knote_enqueue((kn)); \ 203 if (!(islock)) \ 204 KQ_UNLOCK((kn)->kn_kq); \ 205 } while(0) 206 #define KQ_LOCK(kq) do { \ 207 mtx_lock(&(kq)->kq_lock); \ 208 } while (0) 209 #define KQ_FLUX_WAKEUP(kq) do { \ 210 if (((kq)->kq_state & KQ_FLUXWAIT) == KQ_FLUXWAIT) { \ 211 (kq)->kq_state &= ~KQ_FLUXWAIT; \ 212 wakeup((kq)); \ 213 } \ 214 } while (0) 215 #define KQ_UNLOCK_FLUX(kq) do { \ 216 KQ_FLUX_WAKEUP(kq); \ 217 mtx_unlock(&(kq)->kq_lock); \ 218 } while (0) 219 #define KQ_UNLOCK(kq) do { \ 220 mtx_unlock(&(kq)->kq_lock); \ 221 } while (0) 222 #define KQ_OWNED(kq) do { \ 223 mtx_assert(&(kq)->kq_lock, MA_OWNED); \ 224 } while (0) 225 #define KQ_NOTOWNED(kq) do { \ 226 mtx_assert(&(kq)->kq_lock, MA_NOTOWNED); \ 227 } while (0) 228 #define KN_LIST_LOCK(kn) do { \ 229 if (kn->kn_knlist != NULL) \ 230 kn->kn_knlist->kl_lock(kn->kn_knlist->kl_lockarg); \ 231 } while (0) 232 #define KN_LIST_UNLOCK(kn) do { \ 233 if (kn->kn_knlist != NULL) \ 234 kn->kn_knlist->kl_unlock(kn->kn_knlist->kl_lockarg); \ 235 } while (0) 236 #define KNL_ASSERT_LOCK(knl, islocked) do { \ 237 if (islocked) \ 238 KNL_ASSERT_LOCKED(knl); \ 239 else \ 240 KNL_ASSERT_UNLOCKED(knl); \ 241 } while (0) 242 #ifdef INVARIANTS 243 #define KNL_ASSERT_LOCKED(knl) do { \ 244 knl->kl_assert_locked((knl)->kl_lockarg); \ 245 } while (0) 246 #define KNL_ASSERT_UNLOCKED(knl) do { \ 247 knl->kl_assert_unlocked((knl)->kl_lockarg); \ 248 } while (0) 249 #else /* !INVARIANTS */ 250 #define KNL_ASSERT_LOCKED(knl) do {} while(0) 251 #define KNL_ASSERT_UNLOCKED(knl) do {} while (0) 252 #endif /* INVARIANTS */ 253 254 #ifndef KN_HASHSIZE 255 #define KN_HASHSIZE 64 /* XXX should be tunable */ 256 #endif 257 258 #define KN_HASH(val, mask) (((val) ^ (val >> 8)) & (mask)) 259 260 static int 261 filt_nullattach(struct knote *kn) 262 { 263 264 return (ENXIO); 265 }; 266 267 struct filterops null_filtops = { 268 .f_isfd = 0, 269 .f_attach = filt_nullattach, 270 }; 271 272 /* XXX - make SYSINIT to add these, and move into respective modules. */ 273 extern struct filterops sig_filtops; 274 extern struct filterops fs_filtops; 275 276 /* 277 * Table for for all system-defined filters. 278 */ 279 static struct mtx filterops_lock; 280 MTX_SYSINIT(kqueue_filterops, &filterops_lock, "protect sysfilt_ops", 281 MTX_DEF); 282 static struct { 283 struct filterops *for_fop; 284 int for_refcnt; 285 } sysfilt_ops[EVFILT_SYSCOUNT] = { 286 { &file_filtops }, /* EVFILT_READ */ 287 { &file_filtops }, /* EVFILT_WRITE */ 288 { &null_filtops }, /* EVFILT_AIO */ 289 { &file_filtops }, /* EVFILT_VNODE */ 290 { &proc_filtops }, /* EVFILT_PROC */ 291 { &sig_filtops }, /* EVFILT_SIGNAL */ 292 { &timer_filtops }, /* EVFILT_TIMER */ 293 { &file_filtops }, /* EVFILT_PROCDESC */ 294 { &fs_filtops }, /* EVFILT_FS */ 295 { &null_filtops }, /* EVFILT_LIO */ 296 { &user_filtops }, /* EVFILT_USER */ 297 { &null_filtops }, /* EVFILT_SENDFILE */ 298 }; 299 300 /* 301 * Simple redirection for all cdevsw style objects to call their fo_kqfilter 302 * method. 303 */ 304 static int 305 filt_fileattach(struct knote *kn) 306 { 307 308 return (fo_kqfilter(kn->kn_fp, kn)); 309 } 310 311 /*ARGSUSED*/ 312 static int 313 kqueue_kqfilter(struct file *fp, struct knote *kn) 314 { 315 struct kqueue *kq = kn->kn_fp->f_data; 316 317 if (kn->kn_filter != EVFILT_READ) 318 return (EINVAL); 319 320 kn->kn_status |= KN_KQUEUE; 321 kn->kn_fop = &kqread_filtops; 322 knlist_add(&kq->kq_sel.si_note, kn, 0); 323 324 return (0); 325 } 326 327 static void 328 filt_kqdetach(struct knote *kn) 329 { 330 struct kqueue *kq = kn->kn_fp->f_data; 331 332 knlist_remove(&kq->kq_sel.si_note, kn, 0); 333 } 334 335 /*ARGSUSED*/ 336 static int 337 filt_kqueue(struct knote *kn, long hint) 338 { 339 struct kqueue *kq = kn->kn_fp->f_data; 340 341 kn->kn_data = kq->kq_count; 342 return (kn->kn_data > 0); 343 } 344 345 /* XXX - move to kern_proc.c? */ 346 static int 347 filt_procattach(struct knote *kn) 348 { 349 struct proc *p; 350 int immediate; 351 int error; 352 353 immediate = 0; 354 p = pfind(kn->kn_id); 355 if (p == NULL && (kn->kn_sfflags & NOTE_EXIT)) { 356 p = zpfind(kn->kn_id); 357 immediate = 1; 358 } else if (p != NULL && (p->p_flag & P_WEXIT)) { 359 immediate = 1; 360 } 361 362 if (p == NULL) 363 return (ESRCH); 364 if ((error = p_cansee(curthread, p))) { 365 PROC_UNLOCK(p); 366 return (error); 367 } 368 369 kn->kn_ptr.p_proc = p; 370 kn->kn_flags |= EV_CLEAR; /* automatically set */ 371 372 /* 373 * internal flag indicating registration done by kernel 374 */ 375 if (kn->kn_flags & EV_FLAG1) { 376 kn->kn_data = kn->kn_sdata; /* ppid */ 377 kn->kn_fflags = NOTE_CHILD; 378 kn->kn_flags &= ~EV_FLAG1; 379 } 380 381 if (immediate == 0) 382 knlist_add(&p->p_klist, kn, 1); 383 384 /* 385 * Immediately activate any exit notes if the target process is a 386 * zombie. This is necessary to handle the case where the target 387 * process, e.g. a child, dies before the kevent is registered. 388 */ 389 if (immediate && filt_proc(kn, NOTE_EXIT)) 390 KNOTE_ACTIVATE(kn, 0); 391 392 PROC_UNLOCK(p); 393 394 return (0); 395 } 396 397 /* 398 * The knote may be attached to a different process, which may exit, 399 * leaving nothing for the knote to be attached to. So when the process 400 * exits, the knote is marked as DETACHED and also flagged as ONESHOT so 401 * it will be deleted when read out. However, as part of the knote deletion, 402 * this routine is called, so a check is needed to avoid actually performing 403 * a detach, because the original process does not exist any more. 404 */ 405 /* XXX - move to kern_proc.c? */ 406 static void 407 filt_procdetach(struct knote *kn) 408 { 409 struct proc *p; 410 411 p = kn->kn_ptr.p_proc; 412 knlist_remove(&p->p_klist, kn, 0); 413 kn->kn_ptr.p_proc = NULL; 414 } 415 416 /* XXX - move to kern_proc.c? */ 417 static int 418 filt_proc(struct knote *kn, long hint) 419 { 420 struct proc *p; 421 u_int event; 422 423 p = kn->kn_ptr.p_proc; 424 /* Mask off extra data. */ 425 event = (u_int)hint & NOTE_PCTRLMASK; 426 427 /* If the user is interested in this event, record it. */ 428 if (kn->kn_sfflags & event) 429 kn->kn_fflags |= event; 430 431 /* Process is gone, so flag the event as finished. */ 432 if (event == NOTE_EXIT) { 433 if (!(kn->kn_status & KN_DETACHED)) 434 knlist_remove_inevent(&p->p_klist, kn); 435 kn->kn_flags |= EV_EOF | EV_ONESHOT; 436 kn->kn_ptr.p_proc = NULL; 437 if (kn->kn_fflags & NOTE_EXIT) 438 kn->kn_data = p->p_xstat; 439 if (kn->kn_fflags == 0) 440 kn->kn_flags |= EV_DROP; 441 return (1); 442 } 443 444 return (kn->kn_fflags != 0); 445 } 446 447 /* 448 * Called when the process forked. It mostly does the same as the 449 * knote(), activating all knotes registered to be activated when the 450 * process forked. Additionally, for each knote attached to the 451 * parent, check whether user wants to track the new process. If so 452 * attach a new knote to it, and immediately report an event with the 453 * child's pid. 454 */ 455 void 456 knote_fork(struct knlist *list, int pid) 457 { 458 struct kqueue *kq; 459 struct knote *kn; 460 struct kevent kev; 461 int error; 462 463 if (list == NULL) 464 return; 465 list->kl_lock(list->kl_lockarg); 466 467 SLIST_FOREACH(kn, &list->kl_list, kn_selnext) { 468 if ((kn->kn_status & KN_INFLUX) == KN_INFLUX) 469 continue; 470 kq = kn->kn_kq; 471 KQ_LOCK(kq); 472 if ((kn->kn_status & (KN_INFLUX | KN_SCAN)) == KN_INFLUX) { 473 KQ_UNLOCK(kq); 474 continue; 475 } 476 477 /* 478 * The same as knote(), activate the event. 479 */ 480 if ((kn->kn_sfflags & NOTE_TRACK) == 0) { 481 kn->kn_status |= KN_HASKQLOCK; 482 if (kn->kn_fop->f_event(kn, NOTE_FORK)) 483 KNOTE_ACTIVATE(kn, 1); 484 kn->kn_status &= ~KN_HASKQLOCK; 485 KQ_UNLOCK(kq); 486 continue; 487 } 488 489 /* 490 * The NOTE_TRACK case. In addition to the activation 491 * of the event, we need to register new event to 492 * track the child. Drop the locks in preparation for 493 * the call to kqueue_register(). 494 */ 495 kn->kn_status |= KN_INFLUX; 496 KQ_UNLOCK(kq); 497 list->kl_unlock(list->kl_lockarg); 498 499 /* 500 * Activate existing knote and register a knote with 501 * new process. 502 */ 503 kev.ident = pid; 504 kev.filter = kn->kn_filter; 505 kev.flags = kn->kn_flags | EV_ADD | EV_ENABLE | EV_FLAG1; 506 kev.fflags = kn->kn_sfflags; 507 kev.data = kn->kn_id; /* parent */ 508 kev.udata = kn->kn_kevent.udata;/* preserve udata */ 509 error = kqueue_register(kq, &kev, NULL, 0); 510 if (error) 511 kn->kn_fflags |= NOTE_TRACKERR; 512 if (kn->kn_fop->f_event(kn, NOTE_FORK)) 513 KNOTE_ACTIVATE(kn, 0); 514 KQ_LOCK(kq); 515 kn->kn_status &= ~KN_INFLUX; 516 KQ_UNLOCK_FLUX(kq); 517 list->kl_lock(list->kl_lockarg); 518 } 519 list->kl_unlock(list->kl_lockarg); 520 } 521 522 /* 523 * XXX: EVFILT_TIMER should perhaps live in kern_time.c beside the 524 * interval timer support code. 525 */ 526 527 #define NOTE_TIMER_PRECMASK (NOTE_SECONDS|NOTE_MSECONDS|NOTE_USECONDS| \ 528 NOTE_NSECONDS) 529 530 static __inline sbintime_t 531 timer2sbintime(intptr_t data, int flags) 532 { 533 sbintime_t modifier; 534 535 switch (flags & NOTE_TIMER_PRECMASK) { 536 case NOTE_SECONDS: 537 modifier = SBT_1S; 538 break; 539 case NOTE_MSECONDS: /* FALLTHROUGH */ 540 case 0: 541 modifier = SBT_1MS; 542 break; 543 case NOTE_USECONDS: 544 modifier = SBT_1US; 545 break; 546 case NOTE_NSECONDS: 547 modifier = SBT_1NS; 548 break; 549 default: 550 return (-1); 551 } 552 553 #ifdef __LP64__ 554 if (data > SBT_MAX / modifier) 555 return (SBT_MAX); 556 #endif 557 return (modifier * data); 558 } 559 560 static void 561 filt_timerexpire(void *knx) 562 { 563 struct callout *calloutp; 564 struct knote *kn; 565 566 kn = knx; 567 kn->kn_data++; 568 KNOTE_ACTIVATE(kn, 0); /* XXX - handle locking */ 569 570 if ((kn->kn_flags & EV_ONESHOT) != EV_ONESHOT) { 571 calloutp = (struct callout *)kn->kn_hook; 572 *kn->kn_ptr.p_nexttime += timer2sbintime(kn->kn_sdata, 573 kn->kn_sfflags); 574 callout_reset_sbt_on(calloutp, *kn->kn_ptr.p_nexttime, 0, 575 filt_timerexpire, kn, PCPU_GET(cpuid), C_ABSOLUTE); 576 } 577 } 578 579 /* 580 * data contains amount of time to sleep 581 */ 582 static int 583 filt_timerattach(struct knote *kn) 584 { 585 struct callout *calloutp; 586 sbintime_t to; 587 unsigned int ncallouts; 588 589 if ((intptr_t)kn->kn_sdata < 0) 590 return (EINVAL); 591 if ((intptr_t)kn->kn_sdata == 0 && (kn->kn_flags & EV_ONESHOT) == 0) 592 kn->kn_sdata = 1; 593 /* Only precision unit are supported in flags so far */ 594 if (kn->kn_sfflags & ~NOTE_TIMER_PRECMASK) 595 return (EINVAL); 596 597 to = timer2sbintime(kn->kn_sdata, kn->kn_sfflags); 598 if (to < 0) 599 return (EINVAL); 600 601 ncallouts = atomic_load_explicit(&kq_ncallouts, memory_order_relaxed); 602 do { 603 if (ncallouts >= kq_calloutmax) 604 return (ENOMEM); 605 } while (!atomic_compare_exchange_weak_explicit(&kq_ncallouts, 606 &ncallouts, ncallouts + 1, memory_order_relaxed, 607 memory_order_relaxed)); 608 609 kn->kn_flags |= EV_CLEAR; /* automatically set */ 610 kn->kn_status &= ~KN_DETACHED; /* knlist_add clears it */ 611 kn->kn_ptr.p_nexttime = malloc(sizeof(sbintime_t), M_KQUEUE, M_WAITOK); 612 calloutp = malloc(sizeof(*calloutp), M_KQUEUE, M_WAITOK); 613 callout_init(calloutp, CALLOUT_MPSAFE); 614 kn->kn_hook = calloutp; 615 *kn->kn_ptr.p_nexttime = to + sbinuptime(); 616 callout_reset_sbt_on(calloutp, *kn->kn_ptr.p_nexttime, 0, 617 filt_timerexpire, kn, PCPU_GET(cpuid), C_ABSOLUTE); 618 619 return (0); 620 } 621 622 static void 623 filt_timerdetach(struct knote *kn) 624 { 625 struct callout *calloutp; 626 unsigned int old; 627 628 calloutp = (struct callout *)kn->kn_hook; 629 callout_drain(calloutp); 630 free(calloutp, M_KQUEUE); 631 free(kn->kn_ptr.p_nexttime, M_KQUEUE); 632 old = atomic_fetch_sub_explicit(&kq_ncallouts, 1, memory_order_relaxed); 633 KASSERT(old > 0, ("Number of callouts cannot become negative")); 634 kn->kn_status |= KN_DETACHED; /* knlist_remove sets it */ 635 } 636 637 static int 638 filt_timer(struct knote *kn, long hint) 639 { 640 641 return (kn->kn_data != 0); 642 } 643 644 static int 645 filt_userattach(struct knote *kn) 646 { 647 648 /* 649 * EVFILT_USER knotes are not attached to anything in the kernel. 650 */ 651 kn->kn_hook = NULL; 652 if (kn->kn_fflags & NOTE_TRIGGER) 653 kn->kn_hookid = 1; 654 else 655 kn->kn_hookid = 0; 656 return (0); 657 } 658 659 static void 660 filt_userdetach(__unused struct knote *kn) 661 { 662 663 /* 664 * EVFILT_USER knotes are not attached to anything in the kernel. 665 */ 666 } 667 668 static int 669 filt_user(struct knote *kn, __unused long hint) 670 { 671 672 return (kn->kn_hookid); 673 } 674 675 static void 676 filt_usertouch(struct knote *kn, struct kevent *kev, u_long type) 677 { 678 u_int ffctrl; 679 680 switch (type) { 681 case EVENT_REGISTER: 682 if (kev->fflags & NOTE_TRIGGER) 683 kn->kn_hookid = 1; 684 685 ffctrl = kev->fflags & NOTE_FFCTRLMASK; 686 kev->fflags &= NOTE_FFLAGSMASK; 687 switch (ffctrl) { 688 case NOTE_FFNOP: 689 break; 690 691 case NOTE_FFAND: 692 kn->kn_sfflags &= kev->fflags; 693 break; 694 695 case NOTE_FFOR: 696 kn->kn_sfflags |= kev->fflags; 697 break; 698 699 case NOTE_FFCOPY: 700 kn->kn_sfflags = kev->fflags; 701 break; 702 703 default: 704 /* XXX Return error? */ 705 break; 706 } 707 kn->kn_sdata = kev->data; 708 if (kev->flags & EV_CLEAR) { 709 kn->kn_hookid = 0; 710 kn->kn_data = 0; 711 kn->kn_fflags = 0; 712 } 713 break; 714 715 case EVENT_PROCESS: 716 *kev = kn->kn_kevent; 717 kev->fflags = kn->kn_sfflags; 718 kev->data = kn->kn_sdata; 719 if (kn->kn_flags & EV_CLEAR) { 720 kn->kn_hookid = 0; 721 kn->kn_data = 0; 722 kn->kn_fflags = 0; 723 } 724 break; 725 726 default: 727 panic("filt_usertouch() - invalid type (%ld)", type); 728 break; 729 } 730 } 731 732 int 733 sys_kqueue(struct thread *td, struct kqueue_args *uap) 734 { 735 struct filedesc *fdp; 736 struct kqueue *kq; 737 struct file *fp; 738 struct proc *p; 739 struct ucred *cred; 740 int fd, error; 741 742 p = td->td_proc; 743 cred = td->td_ucred; 744 crhold(cred); 745 PROC_LOCK(p); 746 if (!chgkqcnt(cred->cr_ruidinfo, 1, lim_cur(td->td_proc, 747 RLIMIT_KQUEUES))) { 748 PROC_UNLOCK(p); 749 crfree(cred); 750 return (ENOMEM); 751 } 752 PROC_UNLOCK(p); 753 754 fdp = p->p_fd; 755 error = falloc(td, &fp, &fd, 0); 756 if (error) 757 goto done2; 758 759 /* An extra reference on `fp' has been held for us by falloc(). */ 760 kq = malloc(sizeof *kq, M_KQUEUE, M_WAITOK | M_ZERO); 761 mtx_init(&kq->kq_lock, "kqueue", NULL, MTX_DEF|MTX_DUPOK); 762 TAILQ_INIT(&kq->kq_head); 763 kq->kq_fdp = fdp; 764 kq->kq_cred = cred; 765 knlist_init_mtx(&kq->kq_sel.si_note, &kq->kq_lock); 766 TASK_INIT(&kq->kq_task, 0, kqueue_task, kq); 767 768 FILEDESC_XLOCK(fdp); 769 TAILQ_INSERT_HEAD(&fdp->fd_kqlist, kq, kq_list); 770 FILEDESC_XUNLOCK(fdp); 771 772 finit(fp, FREAD | FWRITE, DTYPE_KQUEUE, kq, &kqueueops); 773 fdrop(fp, td); 774 775 td->td_retval[0] = fd; 776 done2: 777 if (error != 0) { 778 chgkqcnt(cred->cr_ruidinfo, -1, 0); 779 crfree(cred); 780 } 781 return (error); 782 } 783 784 #ifndef _SYS_SYSPROTO_H_ 785 struct kevent_args { 786 int fd; 787 const struct kevent *changelist; 788 int nchanges; 789 struct kevent *eventlist; 790 int nevents; 791 const struct timespec *timeout; 792 }; 793 #endif 794 int 795 sys_kevent(struct thread *td, struct kevent_args *uap) 796 { 797 struct timespec ts, *tsp; 798 struct kevent_copyops k_ops = { uap, 799 kevent_copyout, 800 kevent_copyin}; 801 int error; 802 #ifdef KTRACE 803 struct uio ktruio; 804 struct iovec ktriov; 805 struct uio *ktruioin = NULL; 806 struct uio *ktruioout = NULL; 807 #endif 808 809 if (uap->timeout != NULL) { 810 error = copyin(uap->timeout, &ts, sizeof(ts)); 811 if (error) 812 return (error); 813 tsp = &ts; 814 } else 815 tsp = NULL; 816 817 #ifdef KTRACE 818 if (KTRPOINT(td, KTR_GENIO)) { 819 ktriov.iov_base = uap->changelist; 820 ktriov.iov_len = uap->nchanges * sizeof(struct kevent); 821 ktruio = (struct uio){ .uio_iov = &ktriov, .uio_iovcnt = 1, 822 .uio_segflg = UIO_USERSPACE, .uio_rw = UIO_READ, 823 .uio_td = td }; 824 ktruioin = cloneuio(&ktruio); 825 ktriov.iov_base = uap->eventlist; 826 ktriov.iov_len = uap->nevents * sizeof(struct kevent); 827 ktruioout = cloneuio(&ktruio); 828 } 829 #endif 830 831 error = kern_kevent(td, uap->fd, uap->nchanges, uap->nevents, 832 &k_ops, tsp); 833 834 #ifdef KTRACE 835 if (ktruioin != NULL) { 836 ktruioin->uio_resid = uap->nchanges * sizeof(struct kevent); 837 ktrgenio(uap->fd, UIO_WRITE, ktruioin, 0); 838 ktruioout->uio_resid = td->td_retval[0] * sizeof(struct kevent); 839 ktrgenio(uap->fd, UIO_READ, ktruioout, error); 840 } 841 #endif 842 843 return (error); 844 } 845 846 /* 847 * Copy 'count' items into the destination list pointed to by uap->eventlist. 848 */ 849 static int 850 kevent_copyout(void *arg, struct kevent *kevp, int count) 851 { 852 struct kevent_args *uap; 853 int error; 854 855 KASSERT(count <= KQ_NEVENTS, ("count (%d) > KQ_NEVENTS", count)); 856 uap = (struct kevent_args *)arg; 857 858 error = copyout(kevp, uap->eventlist, count * sizeof *kevp); 859 if (error == 0) 860 uap->eventlist += count; 861 return (error); 862 } 863 864 /* 865 * Copy 'count' items from the list pointed to by uap->changelist. 866 */ 867 static int 868 kevent_copyin(void *arg, struct kevent *kevp, int count) 869 { 870 struct kevent_args *uap; 871 int error; 872 873 KASSERT(count <= KQ_NEVENTS, ("count (%d) > KQ_NEVENTS", count)); 874 uap = (struct kevent_args *)arg; 875 876 error = copyin(uap->changelist, kevp, count * sizeof *kevp); 877 if (error == 0) 878 uap->changelist += count; 879 return (error); 880 } 881 882 int 883 kern_kevent(struct thread *td, int fd, int nchanges, int nevents, 884 struct kevent_copyops *k_ops, const struct timespec *timeout) 885 { 886 struct kevent keva[KQ_NEVENTS]; 887 struct kevent *kevp, *changes; 888 struct kqueue *kq; 889 struct file *fp; 890 cap_rights_t rights; 891 int i, n, nerrors, error; 892 893 cap_rights_init(&rights); 894 if (nchanges > 0) 895 cap_rights_set(&rights, CAP_KQUEUE_CHANGE); 896 if (nevents > 0) 897 cap_rights_set(&rights, CAP_KQUEUE_EVENT); 898 error = fget(td, fd, &rights, &fp); 899 if (error != 0) 900 return (error); 901 902 error = kqueue_acquire(fp, &kq); 903 if (error != 0) 904 goto done_norel; 905 906 nerrors = 0; 907 908 while (nchanges > 0) { 909 n = nchanges > KQ_NEVENTS ? KQ_NEVENTS : nchanges; 910 error = k_ops->k_copyin(k_ops->arg, keva, n); 911 if (error) 912 goto done; 913 changes = keva; 914 for (i = 0; i < n; i++) { 915 kevp = &changes[i]; 916 if (!kevp->filter) 917 continue; 918 kevp->flags &= ~EV_SYSFLAGS; 919 error = kqueue_register(kq, kevp, td, 1); 920 if (error || (kevp->flags & EV_RECEIPT)) { 921 if (nevents != 0) { 922 kevp->flags = EV_ERROR; 923 kevp->data = error; 924 (void) k_ops->k_copyout(k_ops->arg, 925 kevp, 1); 926 nevents--; 927 nerrors++; 928 } else { 929 goto done; 930 } 931 } 932 } 933 nchanges -= n; 934 } 935 if (nerrors) { 936 td->td_retval[0] = nerrors; 937 error = 0; 938 goto done; 939 } 940 941 error = kqueue_scan(kq, nevents, k_ops, timeout, keva, td); 942 done: 943 kqueue_release(kq, 0); 944 done_norel: 945 fdrop(fp, td); 946 return (error); 947 } 948 949 int 950 kqueue_add_filteropts(int filt, struct filterops *filtops) 951 { 952 int error; 953 954 error = 0; 955 if (filt > 0 || filt + EVFILT_SYSCOUNT < 0) { 956 printf( 957 "trying to add a filterop that is out of range: %d is beyond %d\n", 958 ~filt, EVFILT_SYSCOUNT); 959 return EINVAL; 960 } 961 mtx_lock(&filterops_lock); 962 if (sysfilt_ops[~filt].for_fop != &null_filtops && 963 sysfilt_ops[~filt].for_fop != NULL) 964 error = EEXIST; 965 else { 966 sysfilt_ops[~filt].for_fop = filtops; 967 sysfilt_ops[~filt].for_refcnt = 0; 968 } 969 mtx_unlock(&filterops_lock); 970 971 return (error); 972 } 973 974 int 975 kqueue_del_filteropts(int filt) 976 { 977 int error; 978 979 error = 0; 980 if (filt > 0 || filt + EVFILT_SYSCOUNT < 0) 981 return EINVAL; 982 983 mtx_lock(&filterops_lock); 984 if (sysfilt_ops[~filt].for_fop == &null_filtops || 985 sysfilt_ops[~filt].for_fop == NULL) 986 error = EINVAL; 987 else if (sysfilt_ops[~filt].for_refcnt != 0) 988 error = EBUSY; 989 else { 990 sysfilt_ops[~filt].for_fop = &null_filtops; 991 sysfilt_ops[~filt].for_refcnt = 0; 992 } 993 mtx_unlock(&filterops_lock); 994 995 return error; 996 } 997 998 static struct filterops * 999 kqueue_fo_find(int filt) 1000 { 1001 1002 if (filt > 0 || filt + EVFILT_SYSCOUNT < 0) 1003 return NULL; 1004 1005 mtx_lock(&filterops_lock); 1006 sysfilt_ops[~filt].for_refcnt++; 1007 if (sysfilt_ops[~filt].for_fop == NULL) 1008 sysfilt_ops[~filt].for_fop = &null_filtops; 1009 mtx_unlock(&filterops_lock); 1010 1011 return sysfilt_ops[~filt].for_fop; 1012 } 1013 1014 static void 1015 kqueue_fo_release(int filt) 1016 { 1017 1018 if (filt > 0 || filt + EVFILT_SYSCOUNT < 0) 1019 return; 1020 1021 mtx_lock(&filterops_lock); 1022 KASSERT(sysfilt_ops[~filt].for_refcnt > 0, 1023 ("filter object refcount not valid on release")); 1024 sysfilt_ops[~filt].for_refcnt--; 1025 mtx_unlock(&filterops_lock); 1026 } 1027 1028 /* 1029 * A ref to kq (obtained via kqueue_acquire) must be held. waitok will 1030 * influence if memory allocation should wait. Make sure it is 0 if you 1031 * hold any mutexes. 1032 */ 1033 static int 1034 kqueue_register(struct kqueue *kq, struct kevent *kev, struct thread *td, int waitok) 1035 { 1036 struct filterops *fops; 1037 struct file *fp; 1038 struct knote *kn, *tkn; 1039 cap_rights_t rights; 1040 int error, filt, event; 1041 int haskqglobal, filedesc_unlock; 1042 1043 fp = NULL; 1044 kn = NULL; 1045 error = 0; 1046 haskqglobal = 0; 1047 filedesc_unlock = 0; 1048 1049 filt = kev->filter; 1050 fops = kqueue_fo_find(filt); 1051 if (fops == NULL) 1052 return EINVAL; 1053 1054 tkn = knote_alloc(waitok); /* prevent waiting with locks */ 1055 1056 findkn: 1057 if (fops->f_isfd) { 1058 KASSERT(td != NULL, ("td is NULL")); 1059 error = fget(td, kev->ident, 1060 cap_rights_init(&rights, CAP_EVENT), &fp); 1061 if (error) 1062 goto done; 1063 1064 if ((kev->flags & EV_ADD) == EV_ADD && kqueue_expand(kq, fops, 1065 kev->ident, 0) != 0) { 1066 /* try again */ 1067 fdrop(fp, td); 1068 fp = NULL; 1069 error = kqueue_expand(kq, fops, kev->ident, waitok); 1070 if (error) 1071 goto done; 1072 goto findkn; 1073 } 1074 1075 if (fp->f_type == DTYPE_KQUEUE) { 1076 /* 1077 * if we add some inteligence about what we are doing, 1078 * we should be able to support events on ourselves. 1079 * We need to know when we are doing this to prevent 1080 * getting both the knlist lock and the kq lock since 1081 * they are the same thing. 1082 */ 1083 if (fp->f_data == kq) { 1084 error = EINVAL; 1085 goto done; 1086 } 1087 1088 /* 1089 * Pre-lock the filedesc before the global 1090 * lock mutex, see the comment in 1091 * kqueue_close(). 1092 */ 1093 FILEDESC_XLOCK(td->td_proc->p_fd); 1094 filedesc_unlock = 1; 1095 KQ_GLOBAL_LOCK(&kq_global, haskqglobal); 1096 } 1097 1098 KQ_LOCK(kq); 1099 if (kev->ident < kq->kq_knlistsize) { 1100 SLIST_FOREACH(kn, &kq->kq_knlist[kev->ident], kn_link) 1101 if (kev->filter == kn->kn_filter) 1102 break; 1103 } 1104 } else { 1105 if ((kev->flags & EV_ADD) == EV_ADD) 1106 kqueue_expand(kq, fops, kev->ident, waitok); 1107 1108 KQ_LOCK(kq); 1109 if (kq->kq_knhashmask != 0) { 1110 struct klist *list; 1111 1112 list = &kq->kq_knhash[ 1113 KN_HASH((u_long)kev->ident, kq->kq_knhashmask)]; 1114 SLIST_FOREACH(kn, list, kn_link) 1115 if (kev->ident == kn->kn_id && 1116 kev->filter == kn->kn_filter) 1117 break; 1118 } 1119 } 1120 1121 /* knote is in the process of changing, wait for it to stablize. */ 1122 if (kn != NULL && (kn->kn_status & KN_INFLUX) == KN_INFLUX) { 1123 KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal); 1124 if (filedesc_unlock) { 1125 FILEDESC_XUNLOCK(td->td_proc->p_fd); 1126 filedesc_unlock = 0; 1127 } 1128 kq->kq_state |= KQ_FLUXWAIT; 1129 msleep(kq, &kq->kq_lock, PSOCK | PDROP, "kqflxwt", 0); 1130 if (fp != NULL) { 1131 fdrop(fp, td); 1132 fp = NULL; 1133 } 1134 goto findkn; 1135 } 1136 1137 /* 1138 * kn now contains the matching knote, or NULL if no match 1139 */ 1140 if (kn == NULL) { 1141 if (kev->flags & EV_ADD) { 1142 kn = tkn; 1143 tkn = NULL; 1144 if (kn == NULL) { 1145 KQ_UNLOCK(kq); 1146 error = ENOMEM; 1147 goto done; 1148 } 1149 kn->kn_fp = fp; 1150 kn->kn_kq = kq; 1151 kn->kn_fop = fops; 1152 /* 1153 * apply reference counts to knote structure, and 1154 * do not release it at the end of this routine. 1155 */ 1156 fops = NULL; 1157 fp = NULL; 1158 1159 kn->kn_sfflags = kev->fflags; 1160 kn->kn_sdata = kev->data; 1161 kev->fflags = 0; 1162 kev->data = 0; 1163 kn->kn_kevent = *kev; 1164 kn->kn_kevent.flags &= ~(EV_ADD | EV_DELETE | 1165 EV_ENABLE | EV_DISABLE); 1166 kn->kn_status = KN_INFLUX|KN_DETACHED; 1167 1168 error = knote_attach(kn, kq); 1169 KQ_UNLOCK(kq); 1170 if (error != 0) { 1171 tkn = kn; 1172 goto done; 1173 } 1174 1175 if ((error = kn->kn_fop->f_attach(kn)) != 0) { 1176 knote_drop(kn, td); 1177 goto done; 1178 } 1179 KN_LIST_LOCK(kn); 1180 goto done_ev_add; 1181 } else { 1182 /* No matching knote and the EV_ADD flag is not set. */ 1183 KQ_UNLOCK(kq); 1184 error = ENOENT; 1185 goto done; 1186 } 1187 } 1188 1189 if (kev->flags & EV_DELETE) { 1190 kn->kn_status |= KN_INFLUX; 1191 KQ_UNLOCK(kq); 1192 if (!(kn->kn_status & KN_DETACHED)) 1193 kn->kn_fop->f_detach(kn); 1194 knote_drop(kn, td); 1195 goto done; 1196 } 1197 1198 /* 1199 * The user may change some filter values after the initial EV_ADD, 1200 * but doing so will not reset any filter which has already been 1201 * triggered. 1202 */ 1203 kn->kn_status |= KN_INFLUX | KN_SCAN; 1204 KQ_UNLOCK(kq); 1205 KN_LIST_LOCK(kn); 1206 kn->kn_kevent.udata = kev->udata; 1207 if (!fops->f_isfd && fops->f_touch != NULL) { 1208 fops->f_touch(kn, kev, EVENT_REGISTER); 1209 } else { 1210 kn->kn_sfflags = kev->fflags; 1211 kn->kn_sdata = kev->data; 1212 } 1213 1214 /* 1215 * We can get here with kn->kn_knlist == NULL. This can happen when 1216 * the initial attach event decides that the event is "completed" 1217 * already. i.e. filt_procattach is called on a zombie process. It 1218 * will call filt_proc which will remove it from the list, and NULL 1219 * kn_knlist. 1220 */ 1221 done_ev_add: 1222 event = kn->kn_fop->f_event(kn, 0); 1223 KQ_LOCK(kq); 1224 if (event) 1225 KNOTE_ACTIVATE(kn, 1); 1226 kn->kn_status &= ~(KN_INFLUX | KN_SCAN); 1227 KN_LIST_UNLOCK(kn); 1228 1229 if ((kev->flags & EV_DISABLE) && 1230 ((kn->kn_status & KN_DISABLED) == 0)) { 1231 kn->kn_status |= KN_DISABLED; 1232 } 1233 1234 if ((kev->flags & EV_ENABLE) && (kn->kn_status & KN_DISABLED)) { 1235 kn->kn_status &= ~KN_DISABLED; 1236 if ((kn->kn_status & KN_ACTIVE) && 1237 ((kn->kn_status & KN_QUEUED) == 0)) 1238 knote_enqueue(kn); 1239 } 1240 KQ_UNLOCK_FLUX(kq); 1241 1242 done: 1243 KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal); 1244 if (filedesc_unlock) 1245 FILEDESC_XUNLOCK(td->td_proc->p_fd); 1246 if (fp != NULL) 1247 fdrop(fp, td); 1248 if (tkn != NULL) 1249 knote_free(tkn); 1250 if (fops != NULL) 1251 kqueue_fo_release(filt); 1252 return (error); 1253 } 1254 1255 static int 1256 kqueue_acquire(struct file *fp, struct kqueue **kqp) 1257 { 1258 int error; 1259 struct kqueue *kq; 1260 1261 error = 0; 1262 1263 kq = fp->f_data; 1264 if (fp->f_type != DTYPE_KQUEUE || kq == NULL) 1265 return (EBADF); 1266 *kqp = kq; 1267 KQ_LOCK(kq); 1268 if ((kq->kq_state & KQ_CLOSING) == KQ_CLOSING) { 1269 KQ_UNLOCK(kq); 1270 return (EBADF); 1271 } 1272 kq->kq_refcnt++; 1273 KQ_UNLOCK(kq); 1274 1275 return error; 1276 } 1277 1278 static void 1279 kqueue_release(struct kqueue *kq, int locked) 1280 { 1281 if (locked) 1282 KQ_OWNED(kq); 1283 else 1284 KQ_LOCK(kq); 1285 kq->kq_refcnt--; 1286 if (kq->kq_refcnt == 1) 1287 wakeup(&kq->kq_refcnt); 1288 if (!locked) 1289 KQ_UNLOCK(kq); 1290 } 1291 1292 static void 1293 kqueue_schedtask(struct kqueue *kq) 1294 { 1295 1296 KQ_OWNED(kq); 1297 KASSERT(((kq->kq_state & KQ_TASKDRAIN) != KQ_TASKDRAIN), 1298 ("scheduling kqueue task while draining")); 1299 1300 if ((kq->kq_state & KQ_TASKSCHED) != KQ_TASKSCHED) { 1301 taskqueue_enqueue(taskqueue_kqueue, &kq->kq_task); 1302 kq->kq_state |= KQ_TASKSCHED; 1303 } 1304 } 1305 1306 /* 1307 * Expand the kq to make sure we have storage for fops/ident pair. 1308 * 1309 * Return 0 on success (or no work necessary), return errno on failure. 1310 * 1311 * Not calling hashinit w/ waitok (proper malloc flag) should be safe. 1312 * If kqueue_register is called from a non-fd context, there usually/should 1313 * be no locks held. 1314 */ 1315 static int 1316 kqueue_expand(struct kqueue *kq, struct filterops *fops, uintptr_t ident, 1317 int waitok) 1318 { 1319 struct klist *list, *tmp_knhash, *to_free; 1320 u_long tmp_knhashmask; 1321 int size; 1322 int fd; 1323 int mflag = waitok ? M_WAITOK : M_NOWAIT; 1324 1325 KQ_NOTOWNED(kq); 1326 1327 to_free = NULL; 1328 if (fops->f_isfd) { 1329 fd = ident; 1330 if (kq->kq_knlistsize <= fd) { 1331 size = kq->kq_knlistsize; 1332 while (size <= fd) 1333 size += KQEXTENT; 1334 list = malloc(size * sizeof(*list), M_KQUEUE, mflag); 1335 if (list == NULL) 1336 return ENOMEM; 1337 KQ_LOCK(kq); 1338 if (kq->kq_knlistsize > fd) { 1339 to_free = list; 1340 list = NULL; 1341 } else { 1342 if (kq->kq_knlist != NULL) { 1343 bcopy(kq->kq_knlist, list, 1344 kq->kq_knlistsize * sizeof(*list)); 1345 to_free = kq->kq_knlist; 1346 kq->kq_knlist = NULL; 1347 } 1348 bzero((caddr_t)list + 1349 kq->kq_knlistsize * sizeof(*list), 1350 (size - kq->kq_knlistsize) * sizeof(*list)); 1351 kq->kq_knlistsize = size; 1352 kq->kq_knlist = list; 1353 } 1354 KQ_UNLOCK(kq); 1355 } 1356 } else { 1357 if (kq->kq_knhashmask == 0) { 1358 tmp_knhash = hashinit(KN_HASHSIZE, M_KQUEUE, 1359 &tmp_knhashmask); 1360 if (tmp_knhash == NULL) 1361 return ENOMEM; 1362 KQ_LOCK(kq); 1363 if (kq->kq_knhashmask == 0) { 1364 kq->kq_knhash = tmp_knhash; 1365 kq->kq_knhashmask = tmp_knhashmask; 1366 } else { 1367 to_free = tmp_knhash; 1368 } 1369 KQ_UNLOCK(kq); 1370 } 1371 } 1372 free(to_free, M_KQUEUE); 1373 1374 KQ_NOTOWNED(kq); 1375 return 0; 1376 } 1377 1378 static void 1379 kqueue_task(void *arg, int pending) 1380 { 1381 struct kqueue *kq; 1382 int haskqglobal; 1383 1384 haskqglobal = 0; 1385 kq = arg; 1386 1387 KQ_GLOBAL_LOCK(&kq_global, haskqglobal); 1388 KQ_LOCK(kq); 1389 1390 KNOTE_LOCKED(&kq->kq_sel.si_note, 0); 1391 1392 kq->kq_state &= ~KQ_TASKSCHED; 1393 if ((kq->kq_state & KQ_TASKDRAIN) == KQ_TASKDRAIN) { 1394 wakeup(&kq->kq_state); 1395 } 1396 KQ_UNLOCK(kq); 1397 KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal); 1398 } 1399 1400 /* 1401 * Scan, update kn_data (if not ONESHOT), and copyout triggered events. 1402 * We treat KN_MARKER knotes as if they are INFLUX. 1403 */ 1404 static int 1405 kqueue_scan(struct kqueue *kq, int maxevents, struct kevent_copyops *k_ops, 1406 const struct timespec *tsp, struct kevent *keva, struct thread *td) 1407 { 1408 struct kevent *kevp; 1409 struct knote *kn, *marker; 1410 sbintime_t asbt, rsbt; 1411 int count, error, haskqglobal, influx, nkev, touch; 1412 1413 count = maxevents; 1414 nkev = 0; 1415 error = 0; 1416 haskqglobal = 0; 1417 1418 if (maxevents == 0) 1419 goto done_nl; 1420 1421 rsbt = 0; 1422 if (tsp != NULL) { 1423 if (tsp->tv_sec < 0 || tsp->tv_nsec < 0 || 1424 tsp->tv_nsec >= 1000000000) { 1425 error = EINVAL; 1426 goto done_nl; 1427 } 1428 if (timespecisset(tsp)) { 1429 if (tsp->tv_sec <= INT32_MAX) { 1430 rsbt = tstosbt(*tsp); 1431 if (TIMESEL(&asbt, rsbt)) 1432 asbt += tc_tick_sbt; 1433 if (asbt <= SBT_MAX - rsbt) 1434 asbt += rsbt; 1435 else 1436 asbt = 0; 1437 rsbt >>= tc_precexp; 1438 } else 1439 asbt = 0; 1440 } else 1441 asbt = -1; 1442 } else 1443 asbt = 0; 1444 marker = knote_alloc(1); 1445 if (marker == NULL) { 1446 error = ENOMEM; 1447 goto done_nl; 1448 } 1449 marker->kn_status = KN_MARKER; 1450 KQ_LOCK(kq); 1451 1452 retry: 1453 kevp = keva; 1454 if (kq->kq_count == 0) { 1455 if (asbt == -1) { 1456 error = EWOULDBLOCK; 1457 } else { 1458 kq->kq_state |= KQ_SLEEP; 1459 error = msleep_sbt(kq, &kq->kq_lock, PSOCK | PCATCH, 1460 "kqread", asbt, rsbt, C_ABSOLUTE); 1461 } 1462 if (error == 0) 1463 goto retry; 1464 /* don't restart after signals... */ 1465 if (error == ERESTART) 1466 error = EINTR; 1467 else if (error == EWOULDBLOCK) 1468 error = 0; 1469 goto done; 1470 } 1471 1472 TAILQ_INSERT_TAIL(&kq->kq_head, marker, kn_tqe); 1473 influx = 0; 1474 while (count) { 1475 KQ_OWNED(kq); 1476 kn = TAILQ_FIRST(&kq->kq_head); 1477 1478 if ((kn->kn_status == KN_MARKER && kn != marker) || 1479 (kn->kn_status & KN_INFLUX) == KN_INFLUX) { 1480 if (influx) { 1481 influx = 0; 1482 KQ_FLUX_WAKEUP(kq); 1483 } 1484 kq->kq_state |= KQ_FLUXWAIT; 1485 error = msleep(kq, &kq->kq_lock, PSOCK, 1486 "kqflxwt", 0); 1487 continue; 1488 } 1489 1490 TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe); 1491 if ((kn->kn_status & KN_DISABLED) == KN_DISABLED) { 1492 kn->kn_status &= ~KN_QUEUED; 1493 kq->kq_count--; 1494 continue; 1495 } 1496 if (kn == marker) { 1497 KQ_FLUX_WAKEUP(kq); 1498 if (count == maxevents) 1499 goto retry; 1500 goto done; 1501 } 1502 KASSERT((kn->kn_status & KN_INFLUX) == 0, 1503 ("KN_INFLUX set when not suppose to be")); 1504 1505 if ((kn->kn_flags & EV_DROP) == EV_DROP) { 1506 kn->kn_status &= ~KN_QUEUED; 1507 kn->kn_status |= KN_INFLUX; 1508 kq->kq_count--; 1509 KQ_UNLOCK(kq); 1510 /* 1511 * We don't need to lock the list since we've marked 1512 * it _INFLUX. 1513 */ 1514 if (!(kn->kn_status & KN_DETACHED)) 1515 kn->kn_fop->f_detach(kn); 1516 knote_drop(kn, td); 1517 KQ_LOCK(kq); 1518 continue; 1519 } else if ((kn->kn_flags & EV_ONESHOT) == EV_ONESHOT) { 1520 kn->kn_status &= ~KN_QUEUED; 1521 kn->kn_status |= KN_INFLUX; 1522 kq->kq_count--; 1523 KQ_UNLOCK(kq); 1524 /* 1525 * We don't need to lock the list since we've marked 1526 * it _INFLUX. 1527 */ 1528 *kevp = kn->kn_kevent; 1529 if (!(kn->kn_status & KN_DETACHED)) 1530 kn->kn_fop->f_detach(kn); 1531 knote_drop(kn, td); 1532 KQ_LOCK(kq); 1533 kn = NULL; 1534 } else { 1535 kn->kn_status |= KN_INFLUX | KN_SCAN; 1536 KQ_UNLOCK(kq); 1537 if ((kn->kn_status & KN_KQUEUE) == KN_KQUEUE) 1538 KQ_GLOBAL_LOCK(&kq_global, haskqglobal); 1539 KN_LIST_LOCK(kn); 1540 if (kn->kn_fop->f_event(kn, 0) == 0) { 1541 KQ_LOCK(kq); 1542 KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal); 1543 kn->kn_status &= 1544 ~(KN_QUEUED | KN_ACTIVE | KN_INFLUX | 1545 KN_SCAN); 1546 kq->kq_count--; 1547 KN_LIST_UNLOCK(kn); 1548 influx = 1; 1549 continue; 1550 } 1551 touch = (!kn->kn_fop->f_isfd && 1552 kn->kn_fop->f_touch != NULL); 1553 if (touch) 1554 kn->kn_fop->f_touch(kn, kevp, EVENT_PROCESS); 1555 else 1556 *kevp = kn->kn_kevent; 1557 KQ_LOCK(kq); 1558 KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal); 1559 if (kn->kn_flags & (EV_CLEAR | EV_DISPATCH)) { 1560 /* 1561 * Manually clear knotes who weren't 1562 * 'touch'ed. 1563 */ 1564 if (touch == 0 && kn->kn_flags & EV_CLEAR) { 1565 kn->kn_data = 0; 1566 kn->kn_fflags = 0; 1567 } 1568 if (kn->kn_flags & EV_DISPATCH) 1569 kn->kn_status |= KN_DISABLED; 1570 kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE); 1571 kq->kq_count--; 1572 } else 1573 TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe); 1574 1575 kn->kn_status &= ~(KN_INFLUX | KN_SCAN); 1576 KN_LIST_UNLOCK(kn); 1577 influx = 1; 1578 } 1579 1580 /* we are returning a copy to the user */ 1581 kevp++; 1582 nkev++; 1583 count--; 1584 1585 if (nkev == KQ_NEVENTS) { 1586 influx = 0; 1587 KQ_UNLOCK_FLUX(kq); 1588 error = k_ops->k_copyout(k_ops->arg, keva, nkev); 1589 nkev = 0; 1590 kevp = keva; 1591 KQ_LOCK(kq); 1592 if (error) 1593 break; 1594 } 1595 } 1596 TAILQ_REMOVE(&kq->kq_head, marker, kn_tqe); 1597 done: 1598 KQ_OWNED(kq); 1599 KQ_UNLOCK_FLUX(kq); 1600 knote_free(marker); 1601 done_nl: 1602 KQ_NOTOWNED(kq); 1603 if (nkev != 0) 1604 error = k_ops->k_copyout(k_ops->arg, keva, nkev); 1605 td->td_retval[0] = maxevents - count; 1606 return (error); 1607 } 1608 1609 /*ARGSUSED*/ 1610 static int 1611 kqueue_ioctl(struct file *fp, u_long cmd, void *data, 1612 struct ucred *active_cred, struct thread *td) 1613 { 1614 /* 1615 * Enabling sigio causes two major problems: 1616 * 1) infinite recursion: 1617 * Synopsys: kevent is being used to track signals and have FIOASYNC 1618 * set. On receipt of a signal this will cause a kqueue to recurse 1619 * into itself over and over. Sending the sigio causes the kqueue 1620 * to become ready, which in turn posts sigio again, forever. 1621 * Solution: this can be solved by setting a flag in the kqueue that 1622 * we have a SIGIO in progress. 1623 * 2) locking problems: 1624 * Synopsys: Kqueue is a leaf subsystem, but adding signalling puts 1625 * us above the proc and pgrp locks. 1626 * Solution: Post a signal using an async mechanism, being sure to 1627 * record a generation count in the delivery so that we do not deliver 1628 * a signal to the wrong process. 1629 * 1630 * Note, these two mechanisms are somewhat mutually exclusive! 1631 */ 1632 #if 0 1633 struct kqueue *kq; 1634 1635 kq = fp->f_data; 1636 switch (cmd) { 1637 case FIOASYNC: 1638 if (*(int *)data) { 1639 kq->kq_state |= KQ_ASYNC; 1640 } else { 1641 kq->kq_state &= ~KQ_ASYNC; 1642 } 1643 return (0); 1644 1645 case FIOSETOWN: 1646 return (fsetown(*(int *)data, &kq->kq_sigio)); 1647 1648 case FIOGETOWN: 1649 *(int *)data = fgetown(&kq->kq_sigio); 1650 return (0); 1651 } 1652 #endif 1653 1654 return (ENOTTY); 1655 } 1656 1657 /*ARGSUSED*/ 1658 static int 1659 kqueue_poll(struct file *fp, int events, struct ucred *active_cred, 1660 struct thread *td) 1661 { 1662 struct kqueue *kq; 1663 int revents = 0; 1664 int error; 1665 1666 if ((error = kqueue_acquire(fp, &kq))) 1667 return POLLERR; 1668 1669 KQ_LOCK(kq); 1670 if (events & (POLLIN | POLLRDNORM)) { 1671 if (kq->kq_count) { 1672 revents |= events & (POLLIN | POLLRDNORM); 1673 } else { 1674 selrecord(td, &kq->kq_sel); 1675 if (SEL_WAITING(&kq->kq_sel)) 1676 kq->kq_state |= KQ_SEL; 1677 } 1678 } 1679 kqueue_release(kq, 1); 1680 KQ_UNLOCK(kq); 1681 return (revents); 1682 } 1683 1684 /*ARGSUSED*/ 1685 static int 1686 kqueue_stat(struct file *fp, struct stat *st, struct ucred *active_cred, 1687 struct thread *td) 1688 { 1689 1690 bzero((void *)st, sizeof *st); 1691 /* 1692 * We no longer return kq_count because the unlocked value is useless. 1693 * If you spent all this time getting the count, why not spend your 1694 * syscall better by calling kevent? 1695 * 1696 * XXX - This is needed for libc_r. 1697 */ 1698 st->st_mode = S_IFIFO; 1699 return (0); 1700 } 1701 1702 /*ARGSUSED*/ 1703 static int 1704 kqueue_close(struct file *fp, struct thread *td) 1705 { 1706 struct kqueue *kq = fp->f_data; 1707 struct filedesc *fdp; 1708 struct knote *kn; 1709 int i; 1710 int error; 1711 int filedesc_unlock; 1712 1713 if ((error = kqueue_acquire(fp, &kq))) 1714 return error; 1715 1716 filedesc_unlock = 0; 1717 KQ_LOCK(kq); 1718 1719 KASSERT((kq->kq_state & KQ_CLOSING) != KQ_CLOSING, 1720 ("kqueue already closing")); 1721 kq->kq_state |= KQ_CLOSING; 1722 if (kq->kq_refcnt > 1) 1723 msleep(&kq->kq_refcnt, &kq->kq_lock, PSOCK, "kqclose", 0); 1724 1725 KASSERT(kq->kq_refcnt == 1, ("other refs are out there!")); 1726 fdp = kq->kq_fdp; 1727 1728 KASSERT(knlist_empty(&kq->kq_sel.si_note), 1729 ("kqueue's knlist not empty")); 1730 1731 for (i = 0; i < kq->kq_knlistsize; i++) { 1732 while ((kn = SLIST_FIRST(&kq->kq_knlist[i])) != NULL) { 1733 if ((kn->kn_status & KN_INFLUX) == KN_INFLUX) { 1734 kq->kq_state |= KQ_FLUXWAIT; 1735 msleep(kq, &kq->kq_lock, PSOCK, "kqclo1", 0); 1736 continue; 1737 } 1738 kn->kn_status |= KN_INFLUX; 1739 KQ_UNLOCK(kq); 1740 if (!(kn->kn_status & KN_DETACHED)) 1741 kn->kn_fop->f_detach(kn); 1742 knote_drop(kn, td); 1743 KQ_LOCK(kq); 1744 } 1745 } 1746 if (kq->kq_knhashmask != 0) { 1747 for (i = 0; i <= kq->kq_knhashmask; i++) { 1748 while ((kn = SLIST_FIRST(&kq->kq_knhash[i])) != NULL) { 1749 if ((kn->kn_status & KN_INFLUX) == KN_INFLUX) { 1750 kq->kq_state |= KQ_FLUXWAIT; 1751 msleep(kq, &kq->kq_lock, PSOCK, 1752 "kqclo2", 0); 1753 continue; 1754 } 1755 kn->kn_status |= KN_INFLUX; 1756 KQ_UNLOCK(kq); 1757 if (!(kn->kn_status & KN_DETACHED)) 1758 kn->kn_fop->f_detach(kn); 1759 knote_drop(kn, td); 1760 KQ_LOCK(kq); 1761 } 1762 } 1763 } 1764 1765 if ((kq->kq_state & KQ_TASKSCHED) == KQ_TASKSCHED) { 1766 kq->kq_state |= KQ_TASKDRAIN; 1767 msleep(&kq->kq_state, &kq->kq_lock, PSOCK, "kqtqdr", 0); 1768 } 1769 1770 if ((kq->kq_state & KQ_SEL) == KQ_SEL) { 1771 selwakeuppri(&kq->kq_sel, PSOCK); 1772 if (!SEL_WAITING(&kq->kq_sel)) 1773 kq->kq_state &= ~KQ_SEL; 1774 } 1775 1776 KQ_UNLOCK(kq); 1777 1778 /* 1779 * We could be called due to the knote_drop() doing fdrop(), 1780 * called from kqueue_register(). In this case the global 1781 * lock is owned, and filedesc sx is locked before, to not 1782 * take the sleepable lock after non-sleepable. 1783 */ 1784 if (!sx_xlocked(FILEDESC_LOCK(fdp))) { 1785 FILEDESC_XLOCK(fdp); 1786 filedesc_unlock = 1; 1787 } else 1788 filedesc_unlock = 0; 1789 TAILQ_REMOVE(&fdp->fd_kqlist, kq, kq_list); 1790 if (filedesc_unlock) 1791 FILEDESC_XUNLOCK(fdp); 1792 1793 seldrain(&kq->kq_sel); 1794 knlist_destroy(&kq->kq_sel.si_note); 1795 mtx_destroy(&kq->kq_lock); 1796 kq->kq_fdp = NULL; 1797 1798 if (kq->kq_knhash != NULL) 1799 free(kq->kq_knhash, M_KQUEUE); 1800 if (kq->kq_knlist != NULL) 1801 free(kq->kq_knlist, M_KQUEUE); 1802 1803 funsetown(&kq->kq_sigio); 1804 chgkqcnt(kq->kq_cred->cr_ruidinfo, -1, 0); 1805 crfree(kq->kq_cred); 1806 free(kq, M_KQUEUE); 1807 fp->f_data = NULL; 1808 1809 return (0); 1810 } 1811 1812 static int 1813 kqueue_fill_kinfo(struct file *fp, struct kinfo_file *kif, struct filedesc *fdp) 1814 { 1815 1816 kif->kf_type = KF_TYPE_KQUEUE; 1817 return (0); 1818 } 1819 1820 static void 1821 kqueue_wakeup(struct kqueue *kq) 1822 { 1823 KQ_OWNED(kq); 1824 1825 if ((kq->kq_state & KQ_SLEEP) == KQ_SLEEP) { 1826 kq->kq_state &= ~KQ_SLEEP; 1827 wakeup(kq); 1828 } 1829 if ((kq->kq_state & KQ_SEL) == KQ_SEL) { 1830 selwakeuppri(&kq->kq_sel, PSOCK); 1831 if (!SEL_WAITING(&kq->kq_sel)) 1832 kq->kq_state &= ~KQ_SEL; 1833 } 1834 if (!knlist_empty(&kq->kq_sel.si_note)) 1835 kqueue_schedtask(kq); 1836 if ((kq->kq_state & KQ_ASYNC) == KQ_ASYNC) { 1837 pgsigio(&kq->kq_sigio, SIGIO, 0); 1838 } 1839 } 1840 1841 /* 1842 * Walk down a list of knotes, activating them if their event has triggered. 1843 * 1844 * There is a possibility to optimize in the case of one kq watching another. 1845 * Instead of scheduling a task to wake it up, you could pass enough state 1846 * down the chain to make up the parent kqueue. Make this code functional 1847 * first. 1848 */ 1849 void 1850 knote(struct knlist *list, long hint, int lockflags) 1851 { 1852 struct kqueue *kq; 1853 struct knote *kn; 1854 int error; 1855 1856 if (list == NULL) 1857 return; 1858 1859 KNL_ASSERT_LOCK(list, lockflags & KNF_LISTLOCKED); 1860 1861 if ((lockflags & KNF_LISTLOCKED) == 0) 1862 list->kl_lock(list->kl_lockarg); 1863 1864 /* 1865 * If we unlock the list lock (and set KN_INFLUX), we can eliminate 1866 * the kqueue scheduling, but this will introduce four 1867 * lock/unlock's for each knote to test. If we do, continue to use 1868 * SLIST_FOREACH, SLIST_FOREACH_SAFE is not safe in our case, it is 1869 * only safe if you want to remove the current item, which we are 1870 * not doing. 1871 */ 1872 SLIST_FOREACH(kn, &list->kl_list, kn_selnext) { 1873 kq = kn->kn_kq; 1874 KQ_LOCK(kq); 1875 if ((kn->kn_status & (KN_INFLUX | KN_SCAN)) == KN_INFLUX) { 1876 /* 1877 * Do not process the influx notes, except for 1878 * the influx coming from the kq unlock in the 1879 * kqueue_scan(). In the later case, we do 1880 * not interfere with the scan, since the code 1881 * fragment in kqueue_scan() locks the knlist, 1882 * and cannot proceed until we finished. 1883 */ 1884 KQ_UNLOCK(kq); 1885 } else if ((lockflags & KNF_NOKQLOCK) != 0) { 1886 kn->kn_status |= KN_INFLUX; 1887 KQ_UNLOCK(kq); 1888 error = kn->kn_fop->f_event(kn, hint); 1889 KQ_LOCK(kq); 1890 kn->kn_status &= ~KN_INFLUX; 1891 if (error) 1892 KNOTE_ACTIVATE(kn, 1); 1893 KQ_UNLOCK_FLUX(kq); 1894 } else { 1895 kn->kn_status |= KN_HASKQLOCK; 1896 if (kn->kn_fop->f_event(kn, hint)) 1897 KNOTE_ACTIVATE(kn, 1); 1898 kn->kn_status &= ~KN_HASKQLOCK; 1899 KQ_UNLOCK(kq); 1900 } 1901 } 1902 if ((lockflags & KNF_LISTLOCKED) == 0) 1903 list->kl_unlock(list->kl_lockarg); 1904 } 1905 1906 /* 1907 * add a knote to a knlist 1908 */ 1909 void 1910 knlist_add(struct knlist *knl, struct knote *kn, int islocked) 1911 { 1912 KNL_ASSERT_LOCK(knl, islocked); 1913 KQ_NOTOWNED(kn->kn_kq); 1914 KASSERT((kn->kn_status & (KN_INFLUX|KN_DETACHED)) == 1915 (KN_INFLUX|KN_DETACHED), ("knote not KN_INFLUX and KN_DETACHED")); 1916 if (!islocked) 1917 knl->kl_lock(knl->kl_lockarg); 1918 SLIST_INSERT_HEAD(&knl->kl_list, kn, kn_selnext); 1919 if (!islocked) 1920 knl->kl_unlock(knl->kl_lockarg); 1921 KQ_LOCK(kn->kn_kq); 1922 kn->kn_knlist = knl; 1923 kn->kn_status &= ~KN_DETACHED; 1924 KQ_UNLOCK(kn->kn_kq); 1925 } 1926 1927 static void 1928 knlist_remove_kq(struct knlist *knl, struct knote *kn, int knlislocked, int kqislocked) 1929 { 1930 KASSERT(!(!!kqislocked && !knlislocked), ("kq locked w/o knl locked")); 1931 KNL_ASSERT_LOCK(knl, knlislocked); 1932 mtx_assert(&kn->kn_kq->kq_lock, kqislocked ? MA_OWNED : MA_NOTOWNED); 1933 if (!kqislocked) 1934 KASSERT((kn->kn_status & (KN_INFLUX|KN_DETACHED)) == KN_INFLUX, 1935 ("knlist_remove called w/o knote being KN_INFLUX or already removed")); 1936 if (!knlislocked) 1937 knl->kl_lock(knl->kl_lockarg); 1938 SLIST_REMOVE(&knl->kl_list, kn, knote, kn_selnext); 1939 kn->kn_knlist = NULL; 1940 if (!knlislocked) 1941 knl->kl_unlock(knl->kl_lockarg); 1942 if (!kqislocked) 1943 KQ_LOCK(kn->kn_kq); 1944 kn->kn_status |= KN_DETACHED; 1945 if (!kqislocked) 1946 KQ_UNLOCK(kn->kn_kq); 1947 } 1948 1949 /* 1950 * remove knote from the specified knlist 1951 */ 1952 void 1953 knlist_remove(struct knlist *knl, struct knote *kn, int islocked) 1954 { 1955 1956 knlist_remove_kq(knl, kn, islocked, 0); 1957 } 1958 1959 /* 1960 * remove knote from the specified knlist while in f_event handler. 1961 */ 1962 void 1963 knlist_remove_inevent(struct knlist *knl, struct knote *kn) 1964 { 1965 1966 knlist_remove_kq(knl, kn, 1, 1967 (kn->kn_status & KN_HASKQLOCK) == KN_HASKQLOCK); 1968 } 1969 1970 int 1971 knlist_empty(struct knlist *knl) 1972 { 1973 1974 KNL_ASSERT_LOCKED(knl); 1975 return SLIST_EMPTY(&knl->kl_list); 1976 } 1977 1978 static struct mtx knlist_lock; 1979 MTX_SYSINIT(knlist_lock, &knlist_lock, "knlist lock for lockless objects", 1980 MTX_DEF); 1981 static void knlist_mtx_lock(void *arg); 1982 static void knlist_mtx_unlock(void *arg); 1983 1984 static void 1985 knlist_mtx_lock(void *arg) 1986 { 1987 1988 mtx_lock((struct mtx *)arg); 1989 } 1990 1991 static void 1992 knlist_mtx_unlock(void *arg) 1993 { 1994 1995 mtx_unlock((struct mtx *)arg); 1996 } 1997 1998 static void 1999 knlist_mtx_assert_locked(void *arg) 2000 { 2001 2002 mtx_assert((struct mtx *)arg, MA_OWNED); 2003 } 2004 2005 static void 2006 knlist_mtx_assert_unlocked(void *arg) 2007 { 2008 2009 mtx_assert((struct mtx *)arg, MA_NOTOWNED); 2010 } 2011 2012 static void 2013 knlist_rw_rlock(void *arg) 2014 { 2015 2016 rw_rlock((struct rwlock *)arg); 2017 } 2018 2019 static void 2020 knlist_rw_runlock(void *arg) 2021 { 2022 2023 rw_runlock((struct rwlock *)arg); 2024 } 2025 2026 static void 2027 knlist_rw_assert_locked(void *arg) 2028 { 2029 2030 rw_assert((struct rwlock *)arg, RA_LOCKED); 2031 } 2032 2033 static void 2034 knlist_rw_assert_unlocked(void *arg) 2035 { 2036 2037 rw_assert((struct rwlock *)arg, RA_UNLOCKED); 2038 } 2039 2040 void 2041 knlist_init(struct knlist *knl, void *lock, void (*kl_lock)(void *), 2042 void (*kl_unlock)(void *), 2043 void (*kl_assert_locked)(void *), void (*kl_assert_unlocked)(void *)) 2044 { 2045 2046 if (lock == NULL) 2047 knl->kl_lockarg = &knlist_lock; 2048 else 2049 knl->kl_lockarg = lock; 2050 2051 if (kl_lock == NULL) 2052 knl->kl_lock = knlist_mtx_lock; 2053 else 2054 knl->kl_lock = kl_lock; 2055 if (kl_unlock == NULL) 2056 knl->kl_unlock = knlist_mtx_unlock; 2057 else 2058 knl->kl_unlock = kl_unlock; 2059 if (kl_assert_locked == NULL) 2060 knl->kl_assert_locked = knlist_mtx_assert_locked; 2061 else 2062 knl->kl_assert_locked = kl_assert_locked; 2063 if (kl_assert_unlocked == NULL) 2064 knl->kl_assert_unlocked = knlist_mtx_assert_unlocked; 2065 else 2066 knl->kl_assert_unlocked = kl_assert_unlocked; 2067 2068 SLIST_INIT(&knl->kl_list); 2069 } 2070 2071 void 2072 knlist_init_mtx(struct knlist *knl, struct mtx *lock) 2073 { 2074 2075 knlist_init(knl, lock, NULL, NULL, NULL, NULL); 2076 } 2077 2078 void 2079 knlist_init_rw_reader(struct knlist *knl, struct rwlock *lock) 2080 { 2081 2082 knlist_init(knl, lock, knlist_rw_rlock, knlist_rw_runlock, 2083 knlist_rw_assert_locked, knlist_rw_assert_unlocked); 2084 } 2085 2086 void 2087 knlist_destroy(struct knlist *knl) 2088 { 2089 2090 #ifdef INVARIANTS 2091 /* 2092 * if we run across this error, we need to find the offending 2093 * driver and have it call knlist_clear or knlist_delete. 2094 */ 2095 if (!SLIST_EMPTY(&knl->kl_list)) 2096 printf("WARNING: destroying knlist w/ knotes on it!\n"); 2097 #endif 2098 2099 knl->kl_lockarg = knl->kl_lock = knl->kl_unlock = NULL; 2100 SLIST_INIT(&knl->kl_list); 2101 } 2102 2103 /* 2104 * Even if we are locked, we may need to drop the lock to allow any influx 2105 * knotes time to "settle". 2106 */ 2107 void 2108 knlist_cleardel(struct knlist *knl, struct thread *td, int islocked, int killkn) 2109 { 2110 struct knote *kn, *kn2; 2111 struct kqueue *kq; 2112 2113 if (islocked) 2114 KNL_ASSERT_LOCKED(knl); 2115 else { 2116 KNL_ASSERT_UNLOCKED(knl); 2117 again: /* need to reacquire lock since we have dropped it */ 2118 knl->kl_lock(knl->kl_lockarg); 2119 } 2120 2121 SLIST_FOREACH_SAFE(kn, &knl->kl_list, kn_selnext, kn2) { 2122 kq = kn->kn_kq; 2123 KQ_LOCK(kq); 2124 if ((kn->kn_status & KN_INFLUX)) { 2125 KQ_UNLOCK(kq); 2126 continue; 2127 } 2128 knlist_remove_kq(knl, kn, 1, 1); 2129 if (killkn) { 2130 kn->kn_status |= KN_INFLUX | KN_DETACHED; 2131 KQ_UNLOCK(kq); 2132 knote_drop(kn, td); 2133 } else { 2134 /* Make sure cleared knotes disappear soon */ 2135 kn->kn_flags |= (EV_EOF | EV_ONESHOT); 2136 KQ_UNLOCK(kq); 2137 } 2138 kq = NULL; 2139 } 2140 2141 if (!SLIST_EMPTY(&knl->kl_list)) { 2142 /* there are still KN_INFLUX remaining */ 2143 kn = SLIST_FIRST(&knl->kl_list); 2144 kq = kn->kn_kq; 2145 KQ_LOCK(kq); 2146 KASSERT(kn->kn_status & KN_INFLUX, 2147 ("knote removed w/o list lock")); 2148 knl->kl_unlock(knl->kl_lockarg); 2149 kq->kq_state |= KQ_FLUXWAIT; 2150 msleep(kq, &kq->kq_lock, PSOCK | PDROP, "kqkclr", 0); 2151 kq = NULL; 2152 goto again; 2153 } 2154 2155 if (islocked) 2156 KNL_ASSERT_LOCKED(knl); 2157 else { 2158 knl->kl_unlock(knl->kl_lockarg); 2159 KNL_ASSERT_UNLOCKED(knl); 2160 } 2161 } 2162 2163 /* 2164 * Remove all knotes referencing a specified fd must be called with FILEDESC 2165 * lock. This prevents a race where a new fd comes along and occupies the 2166 * entry and we attach a knote to the fd. 2167 */ 2168 void 2169 knote_fdclose(struct thread *td, int fd) 2170 { 2171 struct filedesc *fdp = td->td_proc->p_fd; 2172 struct kqueue *kq; 2173 struct knote *kn; 2174 int influx; 2175 2176 FILEDESC_XLOCK_ASSERT(fdp); 2177 2178 /* 2179 * We shouldn't have to worry about new kevents appearing on fd 2180 * since filedesc is locked. 2181 */ 2182 TAILQ_FOREACH(kq, &fdp->fd_kqlist, kq_list) { 2183 KQ_LOCK(kq); 2184 2185 again: 2186 influx = 0; 2187 while (kq->kq_knlistsize > fd && 2188 (kn = SLIST_FIRST(&kq->kq_knlist[fd])) != NULL) { 2189 if (kn->kn_status & KN_INFLUX) { 2190 /* someone else might be waiting on our knote */ 2191 if (influx) 2192 wakeup(kq); 2193 kq->kq_state |= KQ_FLUXWAIT; 2194 msleep(kq, &kq->kq_lock, PSOCK, "kqflxwt", 0); 2195 goto again; 2196 } 2197 kn->kn_status |= KN_INFLUX; 2198 KQ_UNLOCK(kq); 2199 if (!(kn->kn_status & KN_DETACHED)) 2200 kn->kn_fop->f_detach(kn); 2201 knote_drop(kn, td); 2202 influx = 1; 2203 KQ_LOCK(kq); 2204 } 2205 KQ_UNLOCK_FLUX(kq); 2206 } 2207 } 2208 2209 static int 2210 knote_attach(struct knote *kn, struct kqueue *kq) 2211 { 2212 struct klist *list; 2213 2214 KASSERT(kn->kn_status & KN_INFLUX, ("knote not marked INFLUX")); 2215 KQ_OWNED(kq); 2216 2217 if (kn->kn_fop->f_isfd) { 2218 if (kn->kn_id >= kq->kq_knlistsize) 2219 return ENOMEM; 2220 list = &kq->kq_knlist[kn->kn_id]; 2221 } else { 2222 if (kq->kq_knhash == NULL) 2223 return ENOMEM; 2224 list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)]; 2225 } 2226 2227 SLIST_INSERT_HEAD(list, kn, kn_link); 2228 2229 return 0; 2230 } 2231 2232 /* 2233 * knote must already have been detached using the f_detach method. 2234 * no lock need to be held, it is assumed that the KN_INFLUX flag is set 2235 * to prevent other removal. 2236 */ 2237 static void 2238 knote_drop(struct knote *kn, struct thread *td) 2239 { 2240 struct kqueue *kq; 2241 struct klist *list; 2242 2243 kq = kn->kn_kq; 2244 2245 KQ_NOTOWNED(kq); 2246 KASSERT((kn->kn_status & KN_INFLUX) == KN_INFLUX, 2247 ("knote_drop called without KN_INFLUX set in kn_status")); 2248 2249 KQ_LOCK(kq); 2250 if (kn->kn_fop->f_isfd) 2251 list = &kq->kq_knlist[kn->kn_id]; 2252 else 2253 list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)]; 2254 2255 if (!SLIST_EMPTY(list)) 2256 SLIST_REMOVE(list, kn, knote, kn_link); 2257 if (kn->kn_status & KN_QUEUED) 2258 knote_dequeue(kn); 2259 KQ_UNLOCK_FLUX(kq); 2260 2261 if (kn->kn_fop->f_isfd) { 2262 fdrop(kn->kn_fp, td); 2263 kn->kn_fp = NULL; 2264 } 2265 kqueue_fo_release(kn->kn_kevent.filter); 2266 kn->kn_fop = NULL; 2267 knote_free(kn); 2268 } 2269 2270 static void 2271 knote_enqueue(struct knote *kn) 2272 { 2273 struct kqueue *kq = kn->kn_kq; 2274 2275 KQ_OWNED(kn->kn_kq); 2276 KASSERT((kn->kn_status & KN_QUEUED) == 0, ("knote already queued")); 2277 2278 TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe); 2279 kn->kn_status |= KN_QUEUED; 2280 kq->kq_count++; 2281 kqueue_wakeup(kq); 2282 } 2283 2284 static void 2285 knote_dequeue(struct knote *kn) 2286 { 2287 struct kqueue *kq = kn->kn_kq; 2288 2289 KQ_OWNED(kn->kn_kq); 2290 KASSERT(kn->kn_status & KN_QUEUED, ("knote not queued")); 2291 2292 TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe); 2293 kn->kn_status &= ~KN_QUEUED; 2294 kq->kq_count--; 2295 } 2296 2297 static void 2298 knote_init(void) 2299 { 2300 2301 knote_zone = uma_zcreate("KNOTE", sizeof(struct knote), NULL, NULL, 2302 NULL, NULL, UMA_ALIGN_PTR, 0); 2303 } 2304 SYSINIT(knote, SI_SUB_PSEUDO, SI_ORDER_ANY, knote_init, NULL); 2305 2306 static struct knote * 2307 knote_alloc(int waitok) 2308 { 2309 return ((struct knote *)uma_zalloc(knote_zone, 2310 (waitok ? M_WAITOK : M_NOWAIT)|M_ZERO)); 2311 } 2312 2313 static void 2314 knote_free(struct knote *kn) 2315 { 2316 if (kn != NULL) 2317 uma_zfree(knote_zone, kn); 2318 } 2319 2320 /* 2321 * Register the kev w/ the kq specified by fd. 2322 */ 2323 int 2324 kqfd_register(int fd, struct kevent *kev, struct thread *td, int waitok) 2325 { 2326 struct kqueue *kq; 2327 struct file *fp; 2328 cap_rights_t rights; 2329 int error; 2330 2331 error = fget(td, fd, cap_rights_init(&rights, CAP_KQUEUE_CHANGE), &fp); 2332 if (error != 0) 2333 return (error); 2334 if ((error = kqueue_acquire(fp, &kq)) != 0) 2335 goto noacquire; 2336 2337 error = kqueue_register(kq, kev, td, waitok); 2338 2339 kqueue_release(kq, 0); 2340 2341 noacquire: 2342 fdrop(fp, td); 2343 2344 return error; 2345 } 2346