1 /*- 2 * Copyright (c) 1999,2000,2001 Jonathan Lemon <jlemon@FreeBSD.org> 3 * Copyright 2004 John-Mark Gurney <jmg@FreeBSD.org> 4 * Copyright (c) 2009 Apple, Inc. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include "opt_ktrace.h" 33 #include "opt_kqueue.h" 34 35 #include <sys/param.h> 36 #include <sys/systm.h> 37 #include <sys/capsicum.h> 38 #include <sys/kernel.h> 39 #include <sys/lock.h> 40 #include <sys/mutex.h> 41 #include <sys/rwlock.h> 42 #include <sys/proc.h> 43 #include <sys/malloc.h> 44 #include <sys/unistd.h> 45 #include <sys/file.h> 46 #include <sys/filedesc.h> 47 #include <sys/filio.h> 48 #include <sys/fcntl.h> 49 #include <sys/kthread.h> 50 #include <sys/selinfo.h> 51 #include <sys/stdatomic.h> 52 #include <sys/queue.h> 53 #include <sys/event.h> 54 #include <sys/eventvar.h> 55 #include <sys/poll.h> 56 #include <sys/protosw.h> 57 #include <sys/resourcevar.h> 58 #include <sys/sigio.h> 59 #include <sys/signalvar.h> 60 #include <sys/socket.h> 61 #include <sys/socketvar.h> 62 #include <sys/stat.h> 63 #include <sys/sysctl.h> 64 #include <sys/sysproto.h> 65 #include <sys/syscallsubr.h> 66 #include <sys/taskqueue.h> 67 #include <sys/uio.h> 68 #include <sys/user.h> 69 #ifdef KTRACE 70 #include <sys/ktrace.h> 71 #endif 72 73 #include <vm/uma.h> 74 75 static MALLOC_DEFINE(M_KQUEUE, "kqueue", "memory for kqueue system"); 76 77 /* 78 * This lock is used if multiple kq locks are required. This possibly 79 * should be made into a per proc lock. 80 */ 81 static struct mtx kq_global; 82 MTX_SYSINIT(kq_global, &kq_global, "kqueue order", MTX_DEF); 83 #define KQ_GLOBAL_LOCK(lck, haslck) do { \ 84 if (!haslck) \ 85 mtx_lock(lck); \ 86 haslck = 1; \ 87 } while (0) 88 #define KQ_GLOBAL_UNLOCK(lck, haslck) do { \ 89 if (haslck) \ 90 mtx_unlock(lck); \ 91 haslck = 0; \ 92 } while (0) 93 94 TASKQUEUE_DEFINE_THREAD(kqueue); 95 96 static int kevent_copyout(void *arg, struct kevent *kevp, int count); 97 static int kevent_copyin(void *arg, struct kevent *kevp, int count); 98 static int kqueue_register(struct kqueue *kq, struct kevent *kev, 99 struct thread *td, int waitok); 100 static int kqueue_acquire(struct file *fp, struct kqueue **kqp); 101 static void kqueue_release(struct kqueue *kq, int locked); 102 static void kqueue_destroy(struct kqueue *kq); 103 static void kqueue_drain(struct kqueue *kq, struct thread *td); 104 static int kqueue_expand(struct kqueue *kq, struct filterops *fops, 105 uintptr_t ident, int waitok); 106 static void kqueue_task(void *arg, int pending); 107 static int kqueue_scan(struct kqueue *kq, int maxevents, 108 struct kevent_copyops *k_ops, 109 const struct timespec *timeout, 110 struct kevent *keva, struct thread *td); 111 static void kqueue_wakeup(struct kqueue *kq); 112 static struct filterops *kqueue_fo_find(int filt); 113 static void kqueue_fo_release(int filt); 114 115 static fo_ioctl_t kqueue_ioctl; 116 static fo_poll_t kqueue_poll; 117 static fo_kqfilter_t kqueue_kqfilter; 118 static fo_stat_t kqueue_stat; 119 static fo_close_t kqueue_close; 120 static fo_fill_kinfo_t kqueue_fill_kinfo; 121 122 static struct fileops kqueueops = { 123 .fo_read = invfo_rdwr, 124 .fo_write = invfo_rdwr, 125 .fo_truncate = invfo_truncate, 126 .fo_ioctl = kqueue_ioctl, 127 .fo_poll = kqueue_poll, 128 .fo_kqfilter = kqueue_kqfilter, 129 .fo_stat = kqueue_stat, 130 .fo_close = kqueue_close, 131 .fo_chmod = invfo_chmod, 132 .fo_chown = invfo_chown, 133 .fo_sendfile = invfo_sendfile, 134 .fo_fill_kinfo = kqueue_fill_kinfo, 135 }; 136 137 static int knote_attach(struct knote *kn, struct kqueue *kq); 138 static void knote_drop(struct knote *kn, struct thread *td); 139 static void knote_enqueue(struct knote *kn); 140 static void knote_dequeue(struct knote *kn); 141 static void knote_init(void); 142 static struct knote *knote_alloc(int waitok); 143 static void knote_free(struct knote *kn); 144 145 static void filt_kqdetach(struct knote *kn); 146 static int filt_kqueue(struct knote *kn, long hint); 147 static int filt_procattach(struct knote *kn); 148 static void filt_procdetach(struct knote *kn); 149 static int filt_proc(struct knote *kn, long hint); 150 static int filt_fileattach(struct knote *kn); 151 static void filt_timerexpire(void *knx); 152 static int filt_timerattach(struct knote *kn); 153 static void filt_timerdetach(struct knote *kn); 154 static int filt_timer(struct knote *kn, long hint); 155 static int filt_userattach(struct knote *kn); 156 static void filt_userdetach(struct knote *kn); 157 static int filt_user(struct knote *kn, long hint); 158 static void filt_usertouch(struct knote *kn, struct kevent *kev, 159 u_long type); 160 161 static struct filterops file_filtops = { 162 .f_isfd = 1, 163 .f_attach = filt_fileattach, 164 }; 165 static struct filterops kqread_filtops = { 166 .f_isfd = 1, 167 .f_detach = filt_kqdetach, 168 .f_event = filt_kqueue, 169 }; 170 /* XXX - move to kern_proc.c? */ 171 static struct filterops proc_filtops = { 172 .f_isfd = 0, 173 .f_attach = filt_procattach, 174 .f_detach = filt_procdetach, 175 .f_event = filt_proc, 176 }; 177 static struct filterops timer_filtops = { 178 .f_isfd = 0, 179 .f_attach = filt_timerattach, 180 .f_detach = filt_timerdetach, 181 .f_event = filt_timer, 182 }; 183 static struct filterops user_filtops = { 184 .f_attach = filt_userattach, 185 .f_detach = filt_userdetach, 186 .f_event = filt_user, 187 .f_touch = filt_usertouch, 188 }; 189 190 static uma_zone_t knote_zone; 191 static atomic_uint kq_ncallouts = ATOMIC_VAR_INIT(0); 192 static unsigned int kq_calloutmax = 4 * 1024; 193 SYSCTL_UINT(_kern, OID_AUTO, kq_calloutmax, CTLFLAG_RW, 194 &kq_calloutmax, 0, "Maximum number of callouts allocated for kqueue"); 195 196 /* XXX - ensure not KN_INFLUX?? */ 197 #define KNOTE_ACTIVATE(kn, islock) do { \ 198 if ((islock)) \ 199 mtx_assert(&(kn)->kn_kq->kq_lock, MA_OWNED); \ 200 else \ 201 KQ_LOCK((kn)->kn_kq); \ 202 (kn)->kn_status |= KN_ACTIVE; \ 203 if (((kn)->kn_status & (KN_QUEUED | KN_DISABLED)) == 0) \ 204 knote_enqueue((kn)); \ 205 if (!(islock)) \ 206 KQ_UNLOCK((kn)->kn_kq); \ 207 } while(0) 208 #define KQ_LOCK(kq) do { \ 209 mtx_lock(&(kq)->kq_lock); \ 210 } while (0) 211 #define KQ_FLUX_WAKEUP(kq) do { \ 212 if (((kq)->kq_state & KQ_FLUXWAIT) == KQ_FLUXWAIT) { \ 213 (kq)->kq_state &= ~KQ_FLUXWAIT; \ 214 wakeup((kq)); \ 215 } \ 216 } while (0) 217 #define KQ_UNLOCK_FLUX(kq) do { \ 218 KQ_FLUX_WAKEUP(kq); \ 219 mtx_unlock(&(kq)->kq_lock); \ 220 } while (0) 221 #define KQ_UNLOCK(kq) do { \ 222 mtx_unlock(&(kq)->kq_lock); \ 223 } while (0) 224 #define KQ_OWNED(kq) do { \ 225 mtx_assert(&(kq)->kq_lock, MA_OWNED); \ 226 } while (0) 227 #define KQ_NOTOWNED(kq) do { \ 228 mtx_assert(&(kq)->kq_lock, MA_NOTOWNED); \ 229 } while (0) 230 #define KN_LIST_LOCK(kn) do { \ 231 if (kn->kn_knlist != NULL) \ 232 kn->kn_knlist->kl_lock(kn->kn_knlist->kl_lockarg); \ 233 } while (0) 234 #define KN_LIST_UNLOCK(kn) do { \ 235 if (kn->kn_knlist != NULL) \ 236 kn->kn_knlist->kl_unlock(kn->kn_knlist->kl_lockarg); \ 237 } while (0) 238 #define KNL_ASSERT_LOCK(knl, islocked) do { \ 239 if (islocked) \ 240 KNL_ASSERT_LOCKED(knl); \ 241 else \ 242 KNL_ASSERT_UNLOCKED(knl); \ 243 } while (0) 244 #ifdef INVARIANTS 245 #define KNL_ASSERT_LOCKED(knl) do { \ 246 knl->kl_assert_locked((knl)->kl_lockarg); \ 247 } while (0) 248 #define KNL_ASSERT_UNLOCKED(knl) do { \ 249 knl->kl_assert_unlocked((knl)->kl_lockarg); \ 250 } while (0) 251 #else /* !INVARIANTS */ 252 #define KNL_ASSERT_LOCKED(knl) do {} while(0) 253 #define KNL_ASSERT_UNLOCKED(knl) do {} while (0) 254 #endif /* INVARIANTS */ 255 256 #ifndef KN_HASHSIZE 257 #define KN_HASHSIZE 64 /* XXX should be tunable */ 258 #endif 259 260 #define KN_HASH(val, mask) (((val) ^ (val >> 8)) & (mask)) 261 262 static int 263 filt_nullattach(struct knote *kn) 264 { 265 266 return (ENXIO); 267 }; 268 269 struct filterops null_filtops = { 270 .f_isfd = 0, 271 .f_attach = filt_nullattach, 272 }; 273 274 /* XXX - make SYSINIT to add these, and move into respective modules. */ 275 extern struct filterops sig_filtops; 276 extern struct filterops fs_filtops; 277 278 /* 279 * Table for for all system-defined filters. 280 */ 281 static struct mtx filterops_lock; 282 MTX_SYSINIT(kqueue_filterops, &filterops_lock, "protect sysfilt_ops", 283 MTX_DEF); 284 static struct { 285 struct filterops *for_fop; 286 int for_nolock; 287 int for_refcnt; 288 } sysfilt_ops[EVFILT_SYSCOUNT] = { 289 { &file_filtops, 1 }, /* EVFILT_READ */ 290 { &file_filtops, 1 }, /* EVFILT_WRITE */ 291 { &null_filtops }, /* EVFILT_AIO */ 292 { &file_filtops, 1 }, /* EVFILT_VNODE */ 293 { &proc_filtops, 1 }, /* EVFILT_PROC */ 294 { &sig_filtops, 1 }, /* EVFILT_SIGNAL */ 295 { &timer_filtops, 1 }, /* EVFILT_TIMER */ 296 { &file_filtops, 1 }, /* EVFILT_PROCDESC */ 297 { &fs_filtops, 1 }, /* EVFILT_FS */ 298 { &null_filtops }, /* EVFILT_LIO */ 299 { &user_filtops, 1 }, /* EVFILT_USER */ 300 { &null_filtops }, /* EVFILT_SENDFILE */ 301 }; 302 303 /* 304 * Simple redirection for all cdevsw style objects to call their fo_kqfilter 305 * method. 306 */ 307 static int 308 filt_fileattach(struct knote *kn) 309 { 310 311 return (fo_kqfilter(kn->kn_fp, kn)); 312 } 313 314 /*ARGSUSED*/ 315 static int 316 kqueue_kqfilter(struct file *fp, struct knote *kn) 317 { 318 struct kqueue *kq = kn->kn_fp->f_data; 319 320 if (kn->kn_filter != EVFILT_READ) 321 return (EINVAL); 322 323 kn->kn_status |= KN_KQUEUE; 324 kn->kn_fop = &kqread_filtops; 325 knlist_add(&kq->kq_sel.si_note, kn, 0); 326 327 return (0); 328 } 329 330 static void 331 filt_kqdetach(struct knote *kn) 332 { 333 struct kqueue *kq = kn->kn_fp->f_data; 334 335 knlist_remove(&kq->kq_sel.si_note, kn, 0); 336 } 337 338 /*ARGSUSED*/ 339 static int 340 filt_kqueue(struct knote *kn, long hint) 341 { 342 struct kqueue *kq = kn->kn_fp->f_data; 343 344 kn->kn_data = kq->kq_count; 345 return (kn->kn_data > 0); 346 } 347 348 /* XXX - move to kern_proc.c? */ 349 static int 350 filt_procattach(struct knote *kn) 351 { 352 struct proc *p; 353 int immediate; 354 int error; 355 356 immediate = 0; 357 p = pfind(kn->kn_id); 358 if (p == NULL && (kn->kn_sfflags & NOTE_EXIT)) { 359 p = zpfind(kn->kn_id); 360 immediate = 1; 361 } else if (p != NULL && (p->p_flag & P_WEXIT)) { 362 immediate = 1; 363 } 364 365 if (p == NULL) 366 return (ESRCH); 367 if ((error = p_cansee(curthread, p))) { 368 PROC_UNLOCK(p); 369 return (error); 370 } 371 372 kn->kn_ptr.p_proc = p; 373 kn->kn_flags |= EV_CLEAR; /* automatically set */ 374 375 /* 376 * Internal flag indicating registration done by kernel for the 377 * purposes of getting a NOTE_CHILD notification. 378 */ 379 if (kn->kn_flags & EV_FLAG2) { 380 kn->kn_flags &= ~EV_FLAG2; 381 kn->kn_data = kn->kn_sdata; /* ppid */ 382 kn->kn_fflags = NOTE_CHILD; 383 kn->kn_sfflags &= ~NOTE_EXIT; 384 immediate = 1; /* Force immediate activation of child note. */ 385 } 386 /* 387 * Internal flag indicating registration done by kernel (for other than 388 * NOTE_CHILD). 389 */ 390 if (kn->kn_flags & EV_FLAG1) { 391 kn->kn_flags &= ~EV_FLAG1; 392 } 393 394 if (immediate == 0) 395 knlist_add(&p->p_klist, kn, 1); 396 397 /* 398 * Immediately activate any child notes or, in the case of a zombie 399 * target process, exit notes. The latter is necessary to handle the 400 * case where the target process, e.g. a child, dies before the kevent 401 * is registered. 402 */ 403 if (immediate && filt_proc(kn, NOTE_EXIT)) 404 KNOTE_ACTIVATE(kn, 0); 405 406 PROC_UNLOCK(p); 407 408 return (0); 409 } 410 411 /* 412 * The knote may be attached to a different process, which may exit, 413 * leaving nothing for the knote to be attached to. So when the process 414 * exits, the knote is marked as DETACHED and also flagged as ONESHOT so 415 * it will be deleted when read out. However, as part of the knote deletion, 416 * this routine is called, so a check is needed to avoid actually performing 417 * a detach, because the original process does not exist any more. 418 */ 419 /* XXX - move to kern_proc.c? */ 420 static void 421 filt_procdetach(struct knote *kn) 422 { 423 struct proc *p; 424 425 p = kn->kn_ptr.p_proc; 426 knlist_remove(&p->p_klist, kn, 0); 427 kn->kn_ptr.p_proc = NULL; 428 } 429 430 /* XXX - move to kern_proc.c? */ 431 static int 432 filt_proc(struct knote *kn, long hint) 433 { 434 struct proc *p; 435 u_int event; 436 437 p = kn->kn_ptr.p_proc; 438 /* Mask off extra data. */ 439 event = (u_int)hint & NOTE_PCTRLMASK; 440 441 /* If the user is interested in this event, record it. */ 442 if (kn->kn_sfflags & event) 443 kn->kn_fflags |= event; 444 445 /* Process is gone, so flag the event as finished. */ 446 if (event == NOTE_EXIT) { 447 if (!(kn->kn_status & KN_DETACHED)) 448 knlist_remove_inevent(&p->p_klist, kn); 449 kn->kn_flags |= EV_EOF | EV_ONESHOT; 450 kn->kn_ptr.p_proc = NULL; 451 if (kn->kn_fflags & NOTE_EXIT) 452 kn->kn_data = KW_EXITCODE(p->p_xexit, p->p_xsig); 453 if (kn->kn_fflags == 0) 454 kn->kn_flags |= EV_DROP; 455 return (1); 456 } 457 458 return (kn->kn_fflags != 0); 459 } 460 461 /* 462 * Called when the process forked. It mostly does the same as the 463 * knote(), activating all knotes registered to be activated when the 464 * process forked. Additionally, for each knote attached to the 465 * parent, check whether user wants to track the new process. If so 466 * attach a new knote to it, and immediately report an event with the 467 * child's pid. 468 */ 469 void 470 knote_fork(struct knlist *list, int pid) 471 { 472 struct kqueue *kq; 473 struct knote *kn; 474 struct kevent kev; 475 int error; 476 477 if (list == NULL) 478 return; 479 list->kl_lock(list->kl_lockarg); 480 481 SLIST_FOREACH(kn, &list->kl_list, kn_selnext) { 482 /* 483 * XXX - Why do we skip the kn if it is _INFLUX? Does this 484 * mean we will not properly wake up some notes? 485 */ 486 if ((kn->kn_status & KN_INFLUX) == KN_INFLUX) 487 continue; 488 kq = kn->kn_kq; 489 KQ_LOCK(kq); 490 if ((kn->kn_status & (KN_INFLUX | KN_SCAN)) == KN_INFLUX) { 491 KQ_UNLOCK(kq); 492 continue; 493 } 494 495 /* 496 * The same as knote(), activate the event. 497 */ 498 if ((kn->kn_sfflags & NOTE_TRACK) == 0) { 499 kn->kn_status |= KN_HASKQLOCK; 500 if (kn->kn_fop->f_event(kn, NOTE_FORK)) 501 KNOTE_ACTIVATE(kn, 1); 502 kn->kn_status &= ~KN_HASKQLOCK; 503 KQ_UNLOCK(kq); 504 continue; 505 } 506 507 /* 508 * The NOTE_TRACK case. In addition to the activation 509 * of the event, we need to register new events to 510 * track the child. Drop the locks in preparation for 511 * the call to kqueue_register(). 512 */ 513 kn->kn_status |= KN_INFLUX; 514 KQ_UNLOCK(kq); 515 list->kl_unlock(list->kl_lockarg); 516 517 /* 518 * Activate existing knote and register tracking knotes with 519 * new process. 520 * 521 * First register a knote to get just the child notice. This 522 * must be a separate note from a potential NOTE_EXIT 523 * notification since both NOTE_CHILD and NOTE_EXIT are defined 524 * to use the data field (in conflicting ways). 525 */ 526 kev.ident = pid; 527 kev.filter = kn->kn_filter; 528 kev.flags = kn->kn_flags | EV_ADD | EV_ENABLE | EV_ONESHOT | EV_FLAG2; 529 kev.fflags = kn->kn_sfflags; 530 kev.data = kn->kn_id; /* parent */ 531 kev.udata = kn->kn_kevent.udata;/* preserve udata */ 532 error = kqueue_register(kq, &kev, NULL, 0); 533 if (error) 534 kn->kn_fflags |= NOTE_TRACKERR; 535 536 /* 537 * Then register another knote to track other potential events 538 * from the new process. 539 */ 540 kev.ident = pid; 541 kev.filter = kn->kn_filter; 542 kev.flags = kn->kn_flags | EV_ADD | EV_ENABLE | EV_FLAG1; 543 kev.fflags = kn->kn_sfflags; 544 kev.data = kn->kn_id; /* parent */ 545 kev.udata = kn->kn_kevent.udata;/* preserve udata */ 546 error = kqueue_register(kq, &kev, NULL, 0); 547 if (error) 548 kn->kn_fflags |= NOTE_TRACKERR; 549 if (kn->kn_fop->f_event(kn, NOTE_FORK)) 550 KNOTE_ACTIVATE(kn, 0); 551 KQ_LOCK(kq); 552 kn->kn_status &= ~KN_INFLUX; 553 KQ_UNLOCK_FLUX(kq); 554 list->kl_lock(list->kl_lockarg); 555 } 556 list->kl_unlock(list->kl_lockarg); 557 } 558 559 /* 560 * XXX: EVFILT_TIMER should perhaps live in kern_time.c beside the 561 * interval timer support code. 562 */ 563 564 #define NOTE_TIMER_PRECMASK (NOTE_SECONDS|NOTE_MSECONDS|NOTE_USECONDS| \ 565 NOTE_NSECONDS) 566 567 static sbintime_t 568 timer2sbintime(intptr_t data, int flags) 569 { 570 571 /* 572 * Macros for converting to the fractional second portion of an 573 * sbintime_t using 64bit multiplication to improve precision. 574 */ 575 #define NS_TO_SBT(ns) (((ns) * (((uint64_t)1 << 63) / 500000000)) >> 32) 576 #define US_TO_SBT(us) (((us) * (((uint64_t)1 << 63) / 500000)) >> 32) 577 #define MS_TO_SBT(ms) (((ms) * (((uint64_t)1 << 63) / 500)) >> 32) 578 switch (flags & NOTE_TIMER_PRECMASK) { 579 case NOTE_SECONDS: 580 #ifdef __LP64__ 581 if (data > (SBT_MAX / SBT_1S)) 582 return SBT_MAX; 583 #endif 584 return ((sbintime_t)data << 32); 585 case NOTE_MSECONDS: /* FALLTHROUGH */ 586 case 0: 587 if (data >= 1000) { 588 int64_t secs = data / 1000; 589 #ifdef __LP64__ 590 if (secs > (SBT_MAX / SBT_1S)) 591 return SBT_MAX; 592 #endif 593 return (secs << 32 | MS_TO_SBT(data % 1000)); 594 } 595 return MS_TO_SBT(data); 596 case NOTE_USECONDS: 597 if (data >= 1000000) { 598 int64_t secs = data / 1000000; 599 #ifdef __LP64__ 600 if (secs > (SBT_MAX / SBT_1S)) 601 return SBT_MAX; 602 #endif 603 return (secs << 32 | US_TO_SBT(data % 1000000)); 604 } 605 return US_TO_SBT(data); 606 case NOTE_NSECONDS: 607 if (data >= 1000000000) { 608 int64_t secs = data / 1000000000; 609 #ifdef __LP64__ 610 if (secs > (SBT_MAX / SBT_1S)) 611 return SBT_MAX; 612 #endif 613 return (secs << 32 | US_TO_SBT(data % 1000000000)); 614 } 615 return NS_TO_SBT(data); 616 default: 617 break; 618 } 619 return (-1); 620 } 621 622 static void 623 filt_timerexpire(void *knx) 624 { 625 struct callout *calloutp; 626 struct knote *kn; 627 628 kn = knx; 629 kn->kn_data++; 630 KNOTE_ACTIVATE(kn, 0); /* XXX - handle locking */ 631 632 if ((kn->kn_flags & EV_ONESHOT) != EV_ONESHOT) { 633 calloutp = (struct callout *)kn->kn_hook; 634 *kn->kn_ptr.p_nexttime += timer2sbintime(kn->kn_sdata, 635 kn->kn_sfflags); 636 callout_reset_sbt_on(calloutp, *kn->kn_ptr.p_nexttime, 0, 637 filt_timerexpire, kn, PCPU_GET(cpuid), C_ABSOLUTE); 638 } 639 } 640 641 /* 642 * data contains amount of time to sleep 643 */ 644 static int 645 filt_timerattach(struct knote *kn) 646 { 647 struct callout *calloutp; 648 sbintime_t to; 649 unsigned int ncallouts; 650 651 if ((intptr_t)kn->kn_sdata < 0) 652 return (EINVAL); 653 if ((intptr_t)kn->kn_sdata == 0 && (kn->kn_flags & EV_ONESHOT) == 0) 654 kn->kn_sdata = 1; 655 /* Only precision unit are supported in flags so far */ 656 if (kn->kn_sfflags & ~NOTE_TIMER_PRECMASK) 657 return (EINVAL); 658 659 to = timer2sbintime(kn->kn_sdata, kn->kn_sfflags); 660 if (to < 0) 661 return (EINVAL); 662 663 ncallouts = atomic_load_explicit(&kq_ncallouts, memory_order_relaxed); 664 do { 665 if (ncallouts >= kq_calloutmax) 666 return (ENOMEM); 667 } while (!atomic_compare_exchange_weak_explicit(&kq_ncallouts, 668 &ncallouts, ncallouts + 1, memory_order_relaxed, 669 memory_order_relaxed)); 670 671 kn->kn_flags |= EV_CLEAR; /* automatically set */ 672 kn->kn_status &= ~KN_DETACHED; /* knlist_add clears it */ 673 kn->kn_ptr.p_nexttime = malloc(sizeof(sbintime_t), M_KQUEUE, M_WAITOK); 674 calloutp = malloc(sizeof(*calloutp), M_KQUEUE, M_WAITOK); 675 callout_init(calloutp, 1); 676 kn->kn_hook = calloutp; 677 *kn->kn_ptr.p_nexttime = to + sbinuptime(); 678 callout_reset_sbt_on(calloutp, *kn->kn_ptr.p_nexttime, 0, 679 filt_timerexpire, kn, PCPU_GET(cpuid), C_ABSOLUTE); 680 681 return (0); 682 } 683 684 static void 685 filt_timerdetach(struct knote *kn) 686 { 687 struct callout *calloutp; 688 unsigned int old; 689 690 calloutp = (struct callout *)kn->kn_hook; 691 callout_drain(calloutp); 692 free(calloutp, M_KQUEUE); 693 free(kn->kn_ptr.p_nexttime, M_KQUEUE); 694 old = atomic_fetch_sub_explicit(&kq_ncallouts, 1, memory_order_relaxed); 695 KASSERT(old > 0, ("Number of callouts cannot become negative")); 696 kn->kn_status |= KN_DETACHED; /* knlist_remove sets it */ 697 } 698 699 static int 700 filt_timer(struct knote *kn, long hint) 701 { 702 703 return (kn->kn_data != 0); 704 } 705 706 static int 707 filt_userattach(struct knote *kn) 708 { 709 710 /* 711 * EVFILT_USER knotes are not attached to anything in the kernel. 712 */ 713 kn->kn_hook = NULL; 714 if (kn->kn_fflags & NOTE_TRIGGER) 715 kn->kn_hookid = 1; 716 else 717 kn->kn_hookid = 0; 718 return (0); 719 } 720 721 static void 722 filt_userdetach(__unused struct knote *kn) 723 { 724 725 /* 726 * EVFILT_USER knotes are not attached to anything in the kernel. 727 */ 728 } 729 730 static int 731 filt_user(struct knote *kn, __unused long hint) 732 { 733 734 return (kn->kn_hookid); 735 } 736 737 static void 738 filt_usertouch(struct knote *kn, struct kevent *kev, u_long type) 739 { 740 u_int ffctrl; 741 742 switch (type) { 743 case EVENT_REGISTER: 744 if (kev->fflags & NOTE_TRIGGER) 745 kn->kn_hookid = 1; 746 747 ffctrl = kev->fflags & NOTE_FFCTRLMASK; 748 kev->fflags &= NOTE_FFLAGSMASK; 749 switch (ffctrl) { 750 case NOTE_FFNOP: 751 break; 752 753 case NOTE_FFAND: 754 kn->kn_sfflags &= kev->fflags; 755 break; 756 757 case NOTE_FFOR: 758 kn->kn_sfflags |= kev->fflags; 759 break; 760 761 case NOTE_FFCOPY: 762 kn->kn_sfflags = kev->fflags; 763 break; 764 765 default: 766 /* XXX Return error? */ 767 break; 768 } 769 kn->kn_sdata = kev->data; 770 if (kev->flags & EV_CLEAR) { 771 kn->kn_hookid = 0; 772 kn->kn_data = 0; 773 kn->kn_fflags = 0; 774 } 775 break; 776 777 case EVENT_PROCESS: 778 *kev = kn->kn_kevent; 779 kev->fflags = kn->kn_sfflags; 780 kev->data = kn->kn_sdata; 781 if (kn->kn_flags & EV_CLEAR) { 782 kn->kn_hookid = 0; 783 kn->kn_data = 0; 784 kn->kn_fflags = 0; 785 } 786 break; 787 788 default: 789 panic("filt_usertouch() - invalid type (%ld)", type); 790 break; 791 } 792 } 793 794 int 795 sys_kqueue(struct thread *td, struct kqueue_args *uap) 796 { 797 798 return (kern_kqueue(td, 0, NULL)); 799 } 800 801 static void 802 kqueue_init(struct kqueue *kq) 803 { 804 805 mtx_init(&kq->kq_lock, "kqueue", NULL, MTX_DEF | MTX_DUPOK); 806 TAILQ_INIT(&kq->kq_head); 807 knlist_init_mtx(&kq->kq_sel.si_note, &kq->kq_lock); 808 TASK_INIT(&kq->kq_task, 0, kqueue_task, kq); 809 } 810 811 int 812 kern_kqueue(struct thread *td, int flags, struct filecaps *fcaps) 813 { 814 struct filedesc *fdp; 815 struct kqueue *kq; 816 struct file *fp; 817 struct ucred *cred; 818 int fd, error; 819 820 fdp = td->td_proc->p_fd; 821 cred = td->td_ucred; 822 if (!chgkqcnt(cred->cr_ruidinfo, 1, lim_cur(td, RLIMIT_KQUEUES))) 823 return (ENOMEM); 824 825 error = falloc_caps(td, &fp, &fd, flags, fcaps); 826 if (error != 0) { 827 chgkqcnt(cred->cr_ruidinfo, -1, 0); 828 return (error); 829 } 830 831 /* An extra reference on `fp' has been held for us by falloc(). */ 832 kq = malloc(sizeof *kq, M_KQUEUE, M_WAITOK | M_ZERO); 833 kqueue_init(kq); 834 kq->kq_fdp = fdp; 835 kq->kq_cred = crhold(cred); 836 837 FILEDESC_XLOCK(fdp); 838 TAILQ_INSERT_HEAD(&fdp->fd_kqlist, kq, kq_list); 839 FILEDESC_XUNLOCK(fdp); 840 841 finit(fp, FREAD | FWRITE, DTYPE_KQUEUE, kq, &kqueueops); 842 fdrop(fp, td); 843 844 td->td_retval[0] = fd; 845 return (0); 846 } 847 848 #ifndef _SYS_SYSPROTO_H_ 849 struct kevent_args { 850 int fd; 851 const struct kevent *changelist; 852 int nchanges; 853 struct kevent *eventlist; 854 int nevents; 855 const struct timespec *timeout; 856 }; 857 #endif 858 int 859 sys_kevent(struct thread *td, struct kevent_args *uap) 860 { 861 struct timespec ts, *tsp; 862 struct kevent_copyops k_ops = { uap, 863 kevent_copyout, 864 kevent_copyin}; 865 int error; 866 #ifdef KTRACE 867 struct uio ktruio; 868 struct iovec ktriov; 869 struct uio *ktruioin = NULL; 870 struct uio *ktruioout = NULL; 871 #endif 872 873 if (uap->timeout != NULL) { 874 error = copyin(uap->timeout, &ts, sizeof(ts)); 875 if (error) 876 return (error); 877 tsp = &ts; 878 } else 879 tsp = NULL; 880 881 #ifdef KTRACE 882 if (KTRPOINT(td, KTR_GENIO)) { 883 ktriov.iov_base = uap->changelist; 884 ktriov.iov_len = uap->nchanges * sizeof(struct kevent); 885 ktruio = (struct uio){ .uio_iov = &ktriov, .uio_iovcnt = 1, 886 .uio_segflg = UIO_USERSPACE, .uio_rw = UIO_READ, 887 .uio_td = td }; 888 ktruioin = cloneuio(&ktruio); 889 ktriov.iov_base = uap->eventlist; 890 ktriov.iov_len = uap->nevents * sizeof(struct kevent); 891 ktruioout = cloneuio(&ktruio); 892 } 893 #endif 894 895 error = kern_kevent(td, uap->fd, uap->nchanges, uap->nevents, 896 &k_ops, tsp); 897 898 #ifdef KTRACE 899 if (ktruioin != NULL) { 900 ktruioin->uio_resid = uap->nchanges * sizeof(struct kevent); 901 ktrgenio(uap->fd, UIO_WRITE, ktruioin, 0); 902 ktruioout->uio_resid = td->td_retval[0] * sizeof(struct kevent); 903 ktrgenio(uap->fd, UIO_READ, ktruioout, error); 904 } 905 #endif 906 907 return (error); 908 } 909 910 /* 911 * Copy 'count' items into the destination list pointed to by uap->eventlist. 912 */ 913 static int 914 kevent_copyout(void *arg, struct kevent *kevp, int count) 915 { 916 struct kevent_args *uap; 917 int error; 918 919 KASSERT(count <= KQ_NEVENTS, ("count (%d) > KQ_NEVENTS", count)); 920 uap = (struct kevent_args *)arg; 921 922 error = copyout(kevp, uap->eventlist, count * sizeof *kevp); 923 if (error == 0) 924 uap->eventlist += count; 925 return (error); 926 } 927 928 /* 929 * Copy 'count' items from the list pointed to by uap->changelist. 930 */ 931 static int 932 kevent_copyin(void *arg, struct kevent *kevp, int count) 933 { 934 struct kevent_args *uap; 935 int error; 936 937 KASSERT(count <= KQ_NEVENTS, ("count (%d) > KQ_NEVENTS", count)); 938 uap = (struct kevent_args *)arg; 939 940 error = copyin(uap->changelist, kevp, count * sizeof *kevp); 941 if (error == 0) 942 uap->changelist += count; 943 return (error); 944 } 945 946 int 947 kern_kevent(struct thread *td, int fd, int nchanges, int nevents, 948 struct kevent_copyops *k_ops, const struct timespec *timeout) 949 { 950 cap_rights_t rights; 951 struct file *fp; 952 int error; 953 954 cap_rights_init(&rights); 955 if (nchanges > 0) 956 cap_rights_set(&rights, CAP_KQUEUE_CHANGE); 957 if (nevents > 0) 958 cap_rights_set(&rights, CAP_KQUEUE_EVENT); 959 error = fget(td, fd, &rights, &fp); 960 if (error != 0) 961 return (error); 962 963 error = kern_kevent_fp(td, fp, nchanges, nevents, k_ops, timeout); 964 fdrop(fp, td); 965 966 return (error); 967 } 968 969 static int 970 kqueue_kevent(struct kqueue *kq, struct thread *td, int nchanges, int nevents, 971 struct kevent_copyops *k_ops, const struct timespec *timeout) 972 { 973 struct kevent keva[KQ_NEVENTS]; 974 struct kevent *kevp, *changes; 975 int i, n, nerrors, error; 976 977 nerrors = 0; 978 while (nchanges > 0) { 979 n = nchanges > KQ_NEVENTS ? KQ_NEVENTS : nchanges; 980 error = k_ops->k_copyin(k_ops->arg, keva, n); 981 if (error) 982 return (error); 983 changes = keva; 984 for (i = 0; i < n; i++) { 985 kevp = &changes[i]; 986 if (!kevp->filter) 987 continue; 988 kevp->flags &= ~EV_SYSFLAGS; 989 error = kqueue_register(kq, kevp, td, 1); 990 if (error || (kevp->flags & EV_RECEIPT)) { 991 if (nevents == 0) 992 return (error); 993 kevp->flags = EV_ERROR; 994 kevp->data = error; 995 (void)k_ops->k_copyout(k_ops->arg, kevp, 1); 996 nevents--; 997 nerrors++; 998 } 999 } 1000 nchanges -= n; 1001 } 1002 if (nerrors) { 1003 td->td_retval[0] = nerrors; 1004 return (0); 1005 } 1006 1007 return (kqueue_scan(kq, nevents, k_ops, timeout, keva, td)); 1008 } 1009 1010 int 1011 kern_kevent_fp(struct thread *td, struct file *fp, int nchanges, int nevents, 1012 struct kevent_copyops *k_ops, const struct timespec *timeout) 1013 { 1014 struct kqueue *kq; 1015 int error; 1016 1017 error = kqueue_acquire(fp, &kq); 1018 if (error != 0) 1019 return (error); 1020 error = kqueue_kevent(kq, td, nchanges, nevents, k_ops, timeout); 1021 kqueue_release(kq, 0); 1022 return (error); 1023 } 1024 1025 /* 1026 * Performs a kevent() call on a temporarily created kqueue. This can be 1027 * used to perform one-shot polling, similar to poll() and select(). 1028 */ 1029 int 1030 kern_kevent_anonymous(struct thread *td, int nevents, 1031 struct kevent_copyops *k_ops) 1032 { 1033 struct kqueue kq = {}; 1034 int error; 1035 1036 kqueue_init(&kq); 1037 kq.kq_refcnt = 1; 1038 error = kqueue_kevent(&kq, td, nevents, nevents, k_ops, NULL); 1039 kqueue_drain(&kq, td); 1040 kqueue_destroy(&kq); 1041 return (error); 1042 } 1043 1044 int 1045 kqueue_add_filteropts(int filt, struct filterops *filtops) 1046 { 1047 int error; 1048 1049 error = 0; 1050 if (filt > 0 || filt + EVFILT_SYSCOUNT < 0) { 1051 printf( 1052 "trying to add a filterop that is out of range: %d is beyond %d\n", 1053 ~filt, EVFILT_SYSCOUNT); 1054 return EINVAL; 1055 } 1056 mtx_lock(&filterops_lock); 1057 if (sysfilt_ops[~filt].for_fop != &null_filtops && 1058 sysfilt_ops[~filt].for_fop != NULL) 1059 error = EEXIST; 1060 else { 1061 sysfilt_ops[~filt].for_fop = filtops; 1062 sysfilt_ops[~filt].for_refcnt = 0; 1063 } 1064 mtx_unlock(&filterops_lock); 1065 1066 return (error); 1067 } 1068 1069 int 1070 kqueue_del_filteropts(int filt) 1071 { 1072 int error; 1073 1074 error = 0; 1075 if (filt > 0 || filt + EVFILT_SYSCOUNT < 0) 1076 return EINVAL; 1077 1078 mtx_lock(&filterops_lock); 1079 if (sysfilt_ops[~filt].for_fop == &null_filtops || 1080 sysfilt_ops[~filt].for_fop == NULL) 1081 error = EINVAL; 1082 else if (sysfilt_ops[~filt].for_refcnt != 0) 1083 error = EBUSY; 1084 else { 1085 sysfilt_ops[~filt].for_fop = &null_filtops; 1086 sysfilt_ops[~filt].for_refcnt = 0; 1087 } 1088 mtx_unlock(&filterops_lock); 1089 1090 return error; 1091 } 1092 1093 static struct filterops * 1094 kqueue_fo_find(int filt) 1095 { 1096 1097 if (filt > 0 || filt + EVFILT_SYSCOUNT < 0) 1098 return NULL; 1099 1100 if (sysfilt_ops[~filt].for_nolock) 1101 return sysfilt_ops[~filt].for_fop; 1102 1103 mtx_lock(&filterops_lock); 1104 sysfilt_ops[~filt].for_refcnt++; 1105 if (sysfilt_ops[~filt].for_fop == NULL) 1106 sysfilt_ops[~filt].for_fop = &null_filtops; 1107 mtx_unlock(&filterops_lock); 1108 1109 return sysfilt_ops[~filt].for_fop; 1110 } 1111 1112 static void 1113 kqueue_fo_release(int filt) 1114 { 1115 1116 if (filt > 0 || filt + EVFILT_SYSCOUNT < 0) 1117 return; 1118 1119 if (sysfilt_ops[~filt].for_nolock) 1120 return; 1121 1122 mtx_lock(&filterops_lock); 1123 KASSERT(sysfilt_ops[~filt].for_refcnt > 0, 1124 ("filter object refcount not valid on release")); 1125 sysfilt_ops[~filt].for_refcnt--; 1126 mtx_unlock(&filterops_lock); 1127 } 1128 1129 /* 1130 * A ref to kq (obtained via kqueue_acquire) must be held. waitok will 1131 * influence if memory allocation should wait. Make sure it is 0 if you 1132 * hold any mutexes. 1133 */ 1134 static int 1135 kqueue_register(struct kqueue *kq, struct kevent *kev, struct thread *td, int waitok) 1136 { 1137 struct filterops *fops; 1138 struct file *fp; 1139 struct knote *kn, *tkn; 1140 cap_rights_t rights; 1141 int error, filt, event; 1142 int haskqglobal, filedesc_unlock; 1143 1144 if ((kev->flags & (EV_ENABLE | EV_DISABLE)) == (EV_ENABLE | EV_DISABLE)) 1145 return (EINVAL); 1146 1147 fp = NULL; 1148 kn = NULL; 1149 error = 0; 1150 haskqglobal = 0; 1151 filedesc_unlock = 0; 1152 1153 filt = kev->filter; 1154 fops = kqueue_fo_find(filt); 1155 if (fops == NULL) 1156 return EINVAL; 1157 1158 if (kev->flags & EV_ADD) { 1159 /* 1160 * Prevent waiting with locks. Non-sleepable 1161 * allocation failures are handled in the loop, only 1162 * if the spare knote appears to be actually required. 1163 */ 1164 tkn = knote_alloc(waitok); 1165 } else { 1166 tkn = NULL; 1167 } 1168 1169 findkn: 1170 if (fops->f_isfd) { 1171 KASSERT(td != NULL, ("td is NULL")); 1172 error = fget(td, kev->ident, 1173 cap_rights_init(&rights, CAP_EVENT), &fp); 1174 if (error) 1175 goto done; 1176 1177 if ((kev->flags & EV_ADD) == EV_ADD && kqueue_expand(kq, fops, 1178 kev->ident, 0) != 0) { 1179 /* try again */ 1180 fdrop(fp, td); 1181 fp = NULL; 1182 error = kqueue_expand(kq, fops, kev->ident, waitok); 1183 if (error) 1184 goto done; 1185 goto findkn; 1186 } 1187 1188 if (fp->f_type == DTYPE_KQUEUE) { 1189 /* 1190 * If we add some intelligence about what we are doing, 1191 * we should be able to support events on ourselves. 1192 * We need to know when we are doing this to prevent 1193 * getting both the knlist lock and the kq lock since 1194 * they are the same thing. 1195 */ 1196 if (fp->f_data == kq) { 1197 error = EINVAL; 1198 goto done; 1199 } 1200 1201 /* 1202 * Pre-lock the filedesc before the global 1203 * lock mutex, see the comment in 1204 * kqueue_close(). 1205 */ 1206 FILEDESC_XLOCK(td->td_proc->p_fd); 1207 filedesc_unlock = 1; 1208 KQ_GLOBAL_LOCK(&kq_global, haskqglobal); 1209 } 1210 1211 KQ_LOCK(kq); 1212 if (kev->ident < kq->kq_knlistsize) { 1213 SLIST_FOREACH(kn, &kq->kq_knlist[kev->ident], kn_link) 1214 if (kev->filter == kn->kn_filter) 1215 break; 1216 } 1217 } else { 1218 if ((kev->flags & EV_ADD) == EV_ADD) 1219 kqueue_expand(kq, fops, kev->ident, waitok); 1220 1221 KQ_LOCK(kq); 1222 1223 /* 1224 * If possible, find an existing knote to use for this kevent. 1225 */ 1226 if (kev->filter == EVFILT_PROC && 1227 (kev->flags & (EV_FLAG1 | EV_FLAG2)) != 0) { 1228 /* This is an internal creation of a process tracking 1229 * note. Don't attempt to coalesce this with an 1230 * existing note. 1231 */ 1232 ; 1233 } else if (kq->kq_knhashmask != 0) { 1234 struct klist *list; 1235 1236 list = &kq->kq_knhash[ 1237 KN_HASH((u_long)kev->ident, kq->kq_knhashmask)]; 1238 SLIST_FOREACH(kn, list, kn_link) 1239 if (kev->ident == kn->kn_id && 1240 kev->filter == kn->kn_filter) 1241 break; 1242 } 1243 } 1244 1245 /* knote is in the process of changing, wait for it to stabilize. */ 1246 if (kn != NULL && (kn->kn_status & KN_INFLUX) == KN_INFLUX) { 1247 KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal); 1248 if (filedesc_unlock) { 1249 FILEDESC_XUNLOCK(td->td_proc->p_fd); 1250 filedesc_unlock = 0; 1251 } 1252 kq->kq_state |= KQ_FLUXWAIT; 1253 msleep(kq, &kq->kq_lock, PSOCK | PDROP, "kqflxwt", 0); 1254 if (fp != NULL) { 1255 fdrop(fp, td); 1256 fp = NULL; 1257 } 1258 goto findkn; 1259 } 1260 1261 /* 1262 * kn now contains the matching knote, or NULL if no match 1263 */ 1264 if (kn == NULL) { 1265 if (kev->flags & EV_ADD) { 1266 kn = tkn; 1267 tkn = NULL; 1268 if (kn == NULL) { 1269 KQ_UNLOCK(kq); 1270 error = ENOMEM; 1271 goto done; 1272 } 1273 kn->kn_fp = fp; 1274 kn->kn_kq = kq; 1275 kn->kn_fop = fops; 1276 /* 1277 * apply reference counts to knote structure, and 1278 * do not release it at the end of this routine. 1279 */ 1280 fops = NULL; 1281 fp = NULL; 1282 1283 kn->kn_sfflags = kev->fflags; 1284 kn->kn_sdata = kev->data; 1285 kev->fflags = 0; 1286 kev->data = 0; 1287 kn->kn_kevent = *kev; 1288 kn->kn_kevent.flags &= ~(EV_ADD | EV_DELETE | 1289 EV_ENABLE | EV_DISABLE | EV_FORCEONESHOT); 1290 kn->kn_status = KN_INFLUX|KN_DETACHED; 1291 1292 error = knote_attach(kn, kq); 1293 KQ_UNLOCK(kq); 1294 if (error != 0) { 1295 tkn = kn; 1296 goto done; 1297 } 1298 1299 if ((error = kn->kn_fop->f_attach(kn)) != 0) { 1300 knote_drop(kn, td); 1301 goto done; 1302 } 1303 KN_LIST_LOCK(kn); 1304 goto done_ev_add; 1305 } else { 1306 /* No matching knote and the EV_ADD flag is not set. */ 1307 KQ_UNLOCK(kq); 1308 error = ENOENT; 1309 goto done; 1310 } 1311 } 1312 1313 if (kev->flags & EV_DELETE) { 1314 kn->kn_status |= KN_INFLUX; 1315 KQ_UNLOCK(kq); 1316 if (!(kn->kn_status & KN_DETACHED)) 1317 kn->kn_fop->f_detach(kn); 1318 knote_drop(kn, td); 1319 goto done; 1320 } 1321 1322 if (kev->flags & EV_FORCEONESHOT) { 1323 kn->kn_flags |= EV_ONESHOT; 1324 KNOTE_ACTIVATE(kn, 1); 1325 } 1326 1327 /* 1328 * The user may change some filter values after the initial EV_ADD, 1329 * but doing so will not reset any filter which has already been 1330 * triggered. 1331 */ 1332 kn->kn_status |= KN_INFLUX | KN_SCAN; 1333 KQ_UNLOCK(kq); 1334 KN_LIST_LOCK(kn); 1335 kn->kn_kevent.udata = kev->udata; 1336 if (!fops->f_isfd && fops->f_touch != NULL) { 1337 fops->f_touch(kn, kev, EVENT_REGISTER); 1338 } else { 1339 kn->kn_sfflags = kev->fflags; 1340 kn->kn_sdata = kev->data; 1341 } 1342 1343 /* 1344 * We can get here with kn->kn_knlist == NULL. This can happen when 1345 * the initial attach event decides that the event is "completed" 1346 * already. i.e. filt_procattach is called on a zombie process. It 1347 * will call filt_proc which will remove it from the list, and NULL 1348 * kn_knlist. 1349 */ 1350 done_ev_add: 1351 if ((kev->flags & EV_ENABLE) != 0) 1352 kn->kn_status &= ~KN_DISABLED; 1353 else if ((kev->flags & EV_DISABLE) != 0) 1354 kn->kn_status |= KN_DISABLED; 1355 1356 if ((kn->kn_status & KN_DISABLED) == 0) 1357 event = kn->kn_fop->f_event(kn, 0); 1358 else 1359 event = 0; 1360 1361 KQ_LOCK(kq); 1362 if (event) 1363 kn->kn_status |= KN_ACTIVE; 1364 if ((kn->kn_status & (KN_ACTIVE | KN_DISABLED | KN_QUEUED)) == 1365 KN_ACTIVE) 1366 knote_enqueue(kn); 1367 kn->kn_status &= ~(KN_INFLUX | KN_SCAN); 1368 KN_LIST_UNLOCK(kn); 1369 KQ_UNLOCK_FLUX(kq); 1370 1371 done: 1372 KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal); 1373 if (filedesc_unlock) 1374 FILEDESC_XUNLOCK(td->td_proc->p_fd); 1375 if (fp != NULL) 1376 fdrop(fp, td); 1377 knote_free(tkn); 1378 if (fops != NULL) 1379 kqueue_fo_release(filt); 1380 return (error); 1381 } 1382 1383 static int 1384 kqueue_acquire(struct file *fp, struct kqueue **kqp) 1385 { 1386 int error; 1387 struct kqueue *kq; 1388 1389 error = 0; 1390 1391 kq = fp->f_data; 1392 if (fp->f_type != DTYPE_KQUEUE || kq == NULL) 1393 return (EBADF); 1394 *kqp = kq; 1395 KQ_LOCK(kq); 1396 if ((kq->kq_state & KQ_CLOSING) == KQ_CLOSING) { 1397 KQ_UNLOCK(kq); 1398 return (EBADF); 1399 } 1400 kq->kq_refcnt++; 1401 KQ_UNLOCK(kq); 1402 1403 return error; 1404 } 1405 1406 static void 1407 kqueue_release(struct kqueue *kq, int locked) 1408 { 1409 if (locked) 1410 KQ_OWNED(kq); 1411 else 1412 KQ_LOCK(kq); 1413 kq->kq_refcnt--; 1414 if (kq->kq_refcnt == 1) 1415 wakeup(&kq->kq_refcnt); 1416 if (!locked) 1417 KQ_UNLOCK(kq); 1418 } 1419 1420 static void 1421 kqueue_schedtask(struct kqueue *kq) 1422 { 1423 1424 KQ_OWNED(kq); 1425 KASSERT(((kq->kq_state & KQ_TASKDRAIN) != KQ_TASKDRAIN), 1426 ("scheduling kqueue task while draining")); 1427 1428 if ((kq->kq_state & KQ_TASKSCHED) != KQ_TASKSCHED) { 1429 taskqueue_enqueue(taskqueue_kqueue, &kq->kq_task); 1430 kq->kq_state |= KQ_TASKSCHED; 1431 } 1432 } 1433 1434 /* 1435 * Expand the kq to make sure we have storage for fops/ident pair. 1436 * 1437 * Return 0 on success (or no work necessary), return errno on failure. 1438 * 1439 * Not calling hashinit w/ waitok (proper malloc flag) should be safe. 1440 * If kqueue_register is called from a non-fd context, there usually/should 1441 * be no locks held. 1442 */ 1443 static int 1444 kqueue_expand(struct kqueue *kq, struct filterops *fops, uintptr_t ident, 1445 int waitok) 1446 { 1447 struct klist *list, *tmp_knhash, *to_free; 1448 u_long tmp_knhashmask; 1449 int size; 1450 int fd; 1451 int mflag = waitok ? M_WAITOK : M_NOWAIT; 1452 1453 KQ_NOTOWNED(kq); 1454 1455 to_free = NULL; 1456 if (fops->f_isfd) { 1457 fd = ident; 1458 if (kq->kq_knlistsize <= fd) { 1459 size = kq->kq_knlistsize; 1460 while (size <= fd) 1461 size += KQEXTENT; 1462 list = malloc(size * sizeof(*list), M_KQUEUE, mflag); 1463 if (list == NULL) 1464 return ENOMEM; 1465 KQ_LOCK(kq); 1466 if (kq->kq_knlistsize > fd) { 1467 to_free = list; 1468 list = NULL; 1469 } else { 1470 if (kq->kq_knlist != NULL) { 1471 bcopy(kq->kq_knlist, list, 1472 kq->kq_knlistsize * sizeof(*list)); 1473 to_free = kq->kq_knlist; 1474 kq->kq_knlist = NULL; 1475 } 1476 bzero((caddr_t)list + 1477 kq->kq_knlistsize * sizeof(*list), 1478 (size - kq->kq_knlistsize) * sizeof(*list)); 1479 kq->kq_knlistsize = size; 1480 kq->kq_knlist = list; 1481 } 1482 KQ_UNLOCK(kq); 1483 } 1484 } else { 1485 if (kq->kq_knhashmask == 0) { 1486 tmp_knhash = hashinit(KN_HASHSIZE, M_KQUEUE, 1487 &tmp_knhashmask); 1488 if (tmp_knhash == NULL) 1489 return ENOMEM; 1490 KQ_LOCK(kq); 1491 if (kq->kq_knhashmask == 0) { 1492 kq->kq_knhash = tmp_knhash; 1493 kq->kq_knhashmask = tmp_knhashmask; 1494 } else { 1495 to_free = tmp_knhash; 1496 } 1497 KQ_UNLOCK(kq); 1498 } 1499 } 1500 free(to_free, M_KQUEUE); 1501 1502 KQ_NOTOWNED(kq); 1503 return 0; 1504 } 1505 1506 static void 1507 kqueue_task(void *arg, int pending) 1508 { 1509 struct kqueue *kq; 1510 int haskqglobal; 1511 1512 haskqglobal = 0; 1513 kq = arg; 1514 1515 KQ_GLOBAL_LOCK(&kq_global, haskqglobal); 1516 KQ_LOCK(kq); 1517 1518 KNOTE_LOCKED(&kq->kq_sel.si_note, 0); 1519 1520 kq->kq_state &= ~KQ_TASKSCHED; 1521 if ((kq->kq_state & KQ_TASKDRAIN) == KQ_TASKDRAIN) { 1522 wakeup(&kq->kq_state); 1523 } 1524 KQ_UNLOCK(kq); 1525 KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal); 1526 } 1527 1528 /* 1529 * Scan, update kn_data (if not ONESHOT), and copyout triggered events. 1530 * We treat KN_MARKER knotes as if they are INFLUX. 1531 */ 1532 static int 1533 kqueue_scan(struct kqueue *kq, int maxevents, struct kevent_copyops *k_ops, 1534 const struct timespec *tsp, struct kevent *keva, struct thread *td) 1535 { 1536 struct kevent *kevp; 1537 struct knote *kn, *marker; 1538 sbintime_t asbt, rsbt; 1539 int count, error, haskqglobal, influx, nkev, touch; 1540 1541 count = maxevents; 1542 nkev = 0; 1543 error = 0; 1544 haskqglobal = 0; 1545 1546 if (maxevents == 0) 1547 goto done_nl; 1548 1549 rsbt = 0; 1550 if (tsp != NULL) { 1551 if (tsp->tv_sec < 0 || tsp->tv_nsec < 0 || 1552 tsp->tv_nsec >= 1000000000) { 1553 error = EINVAL; 1554 goto done_nl; 1555 } 1556 if (timespecisset(tsp)) { 1557 if (tsp->tv_sec <= INT32_MAX) { 1558 rsbt = tstosbt(*tsp); 1559 if (TIMESEL(&asbt, rsbt)) 1560 asbt += tc_tick_sbt; 1561 if (asbt <= SBT_MAX - rsbt) 1562 asbt += rsbt; 1563 else 1564 asbt = 0; 1565 rsbt >>= tc_precexp; 1566 } else 1567 asbt = 0; 1568 } else 1569 asbt = -1; 1570 } else 1571 asbt = 0; 1572 marker = knote_alloc(1); 1573 marker->kn_status = KN_MARKER; 1574 KQ_LOCK(kq); 1575 1576 retry: 1577 kevp = keva; 1578 if (kq->kq_count == 0) { 1579 if (asbt == -1) { 1580 error = EWOULDBLOCK; 1581 } else { 1582 kq->kq_state |= KQ_SLEEP; 1583 error = msleep_sbt(kq, &kq->kq_lock, PSOCK | PCATCH, 1584 "kqread", asbt, rsbt, C_ABSOLUTE); 1585 } 1586 if (error == 0) 1587 goto retry; 1588 /* don't restart after signals... */ 1589 if (error == ERESTART) 1590 error = EINTR; 1591 else if (error == EWOULDBLOCK) 1592 error = 0; 1593 goto done; 1594 } 1595 1596 TAILQ_INSERT_TAIL(&kq->kq_head, marker, kn_tqe); 1597 influx = 0; 1598 while (count) { 1599 KQ_OWNED(kq); 1600 kn = TAILQ_FIRST(&kq->kq_head); 1601 1602 if ((kn->kn_status == KN_MARKER && kn != marker) || 1603 (kn->kn_status & KN_INFLUX) == KN_INFLUX) { 1604 if (influx) { 1605 influx = 0; 1606 KQ_FLUX_WAKEUP(kq); 1607 } 1608 kq->kq_state |= KQ_FLUXWAIT; 1609 error = msleep(kq, &kq->kq_lock, PSOCK, 1610 "kqflxwt", 0); 1611 continue; 1612 } 1613 1614 TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe); 1615 if ((kn->kn_status & KN_DISABLED) == KN_DISABLED) { 1616 kn->kn_status &= ~KN_QUEUED; 1617 kq->kq_count--; 1618 continue; 1619 } 1620 if (kn == marker) { 1621 KQ_FLUX_WAKEUP(kq); 1622 if (count == maxevents) 1623 goto retry; 1624 goto done; 1625 } 1626 KASSERT((kn->kn_status & KN_INFLUX) == 0, 1627 ("KN_INFLUX set when not suppose to be")); 1628 1629 if ((kn->kn_flags & EV_DROP) == EV_DROP) { 1630 kn->kn_status &= ~KN_QUEUED; 1631 kn->kn_status |= KN_INFLUX; 1632 kq->kq_count--; 1633 KQ_UNLOCK(kq); 1634 /* 1635 * We don't need to lock the list since we've marked 1636 * it _INFLUX. 1637 */ 1638 if (!(kn->kn_status & KN_DETACHED)) 1639 kn->kn_fop->f_detach(kn); 1640 knote_drop(kn, td); 1641 KQ_LOCK(kq); 1642 continue; 1643 } else if ((kn->kn_flags & EV_ONESHOT) == EV_ONESHOT) { 1644 kn->kn_status &= ~KN_QUEUED; 1645 kn->kn_status |= KN_INFLUX; 1646 kq->kq_count--; 1647 KQ_UNLOCK(kq); 1648 /* 1649 * We don't need to lock the list since we've marked 1650 * it _INFLUX. 1651 */ 1652 *kevp = kn->kn_kevent; 1653 if (!(kn->kn_status & KN_DETACHED)) 1654 kn->kn_fop->f_detach(kn); 1655 knote_drop(kn, td); 1656 KQ_LOCK(kq); 1657 kn = NULL; 1658 } else { 1659 kn->kn_status |= KN_INFLUX | KN_SCAN; 1660 KQ_UNLOCK(kq); 1661 if ((kn->kn_status & KN_KQUEUE) == KN_KQUEUE) 1662 KQ_GLOBAL_LOCK(&kq_global, haskqglobal); 1663 KN_LIST_LOCK(kn); 1664 if (kn->kn_fop->f_event(kn, 0) == 0) { 1665 KQ_LOCK(kq); 1666 KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal); 1667 kn->kn_status &= 1668 ~(KN_QUEUED | KN_ACTIVE | KN_INFLUX | 1669 KN_SCAN); 1670 kq->kq_count--; 1671 KN_LIST_UNLOCK(kn); 1672 influx = 1; 1673 continue; 1674 } 1675 touch = (!kn->kn_fop->f_isfd && 1676 kn->kn_fop->f_touch != NULL); 1677 if (touch) 1678 kn->kn_fop->f_touch(kn, kevp, EVENT_PROCESS); 1679 else 1680 *kevp = kn->kn_kevent; 1681 KQ_LOCK(kq); 1682 KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal); 1683 if (kn->kn_flags & (EV_CLEAR | EV_DISPATCH)) { 1684 /* 1685 * Manually clear knotes who weren't 1686 * 'touch'ed. 1687 */ 1688 if (touch == 0 && kn->kn_flags & EV_CLEAR) { 1689 kn->kn_data = 0; 1690 kn->kn_fflags = 0; 1691 } 1692 if (kn->kn_flags & EV_DISPATCH) 1693 kn->kn_status |= KN_DISABLED; 1694 kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE); 1695 kq->kq_count--; 1696 } else 1697 TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe); 1698 1699 kn->kn_status &= ~(KN_INFLUX | KN_SCAN); 1700 KN_LIST_UNLOCK(kn); 1701 influx = 1; 1702 } 1703 1704 /* we are returning a copy to the user */ 1705 kevp++; 1706 nkev++; 1707 count--; 1708 1709 if (nkev == KQ_NEVENTS) { 1710 influx = 0; 1711 KQ_UNLOCK_FLUX(kq); 1712 error = k_ops->k_copyout(k_ops->arg, keva, nkev); 1713 nkev = 0; 1714 kevp = keva; 1715 KQ_LOCK(kq); 1716 if (error) 1717 break; 1718 } 1719 } 1720 TAILQ_REMOVE(&kq->kq_head, marker, kn_tqe); 1721 done: 1722 KQ_OWNED(kq); 1723 KQ_UNLOCK_FLUX(kq); 1724 knote_free(marker); 1725 done_nl: 1726 KQ_NOTOWNED(kq); 1727 if (nkev != 0) 1728 error = k_ops->k_copyout(k_ops->arg, keva, nkev); 1729 td->td_retval[0] = maxevents - count; 1730 return (error); 1731 } 1732 1733 /*ARGSUSED*/ 1734 static int 1735 kqueue_ioctl(struct file *fp, u_long cmd, void *data, 1736 struct ucred *active_cred, struct thread *td) 1737 { 1738 /* 1739 * Enabling sigio causes two major problems: 1740 * 1) infinite recursion: 1741 * Synopsys: kevent is being used to track signals and have FIOASYNC 1742 * set. On receipt of a signal this will cause a kqueue to recurse 1743 * into itself over and over. Sending the sigio causes the kqueue 1744 * to become ready, which in turn posts sigio again, forever. 1745 * Solution: this can be solved by setting a flag in the kqueue that 1746 * we have a SIGIO in progress. 1747 * 2) locking problems: 1748 * Synopsys: Kqueue is a leaf subsystem, but adding signalling puts 1749 * us above the proc and pgrp locks. 1750 * Solution: Post a signal using an async mechanism, being sure to 1751 * record a generation count in the delivery so that we do not deliver 1752 * a signal to the wrong process. 1753 * 1754 * Note, these two mechanisms are somewhat mutually exclusive! 1755 */ 1756 #if 0 1757 struct kqueue *kq; 1758 1759 kq = fp->f_data; 1760 switch (cmd) { 1761 case FIOASYNC: 1762 if (*(int *)data) { 1763 kq->kq_state |= KQ_ASYNC; 1764 } else { 1765 kq->kq_state &= ~KQ_ASYNC; 1766 } 1767 return (0); 1768 1769 case FIOSETOWN: 1770 return (fsetown(*(int *)data, &kq->kq_sigio)); 1771 1772 case FIOGETOWN: 1773 *(int *)data = fgetown(&kq->kq_sigio); 1774 return (0); 1775 } 1776 #endif 1777 1778 return (ENOTTY); 1779 } 1780 1781 /*ARGSUSED*/ 1782 static int 1783 kqueue_poll(struct file *fp, int events, struct ucred *active_cred, 1784 struct thread *td) 1785 { 1786 struct kqueue *kq; 1787 int revents = 0; 1788 int error; 1789 1790 if ((error = kqueue_acquire(fp, &kq))) 1791 return POLLERR; 1792 1793 KQ_LOCK(kq); 1794 if (events & (POLLIN | POLLRDNORM)) { 1795 if (kq->kq_count) { 1796 revents |= events & (POLLIN | POLLRDNORM); 1797 } else { 1798 selrecord(td, &kq->kq_sel); 1799 if (SEL_WAITING(&kq->kq_sel)) 1800 kq->kq_state |= KQ_SEL; 1801 } 1802 } 1803 kqueue_release(kq, 1); 1804 KQ_UNLOCK(kq); 1805 return (revents); 1806 } 1807 1808 /*ARGSUSED*/ 1809 static int 1810 kqueue_stat(struct file *fp, struct stat *st, struct ucred *active_cred, 1811 struct thread *td) 1812 { 1813 1814 bzero((void *)st, sizeof *st); 1815 /* 1816 * We no longer return kq_count because the unlocked value is useless. 1817 * If you spent all this time getting the count, why not spend your 1818 * syscall better by calling kevent? 1819 * 1820 * XXX - This is needed for libc_r. 1821 */ 1822 st->st_mode = S_IFIFO; 1823 return (0); 1824 } 1825 1826 static void 1827 kqueue_drain(struct kqueue *kq, struct thread *td) 1828 { 1829 struct knote *kn; 1830 int i; 1831 1832 KQ_LOCK(kq); 1833 1834 KASSERT((kq->kq_state & KQ_CLOSING) != KQ_CLOSING, 1835 ("kqueue already closing")); 1836 kq->kq_state |= KQ_CLOSING; 1837 if (kq->kq_refcnt > 1) 1838 msleep(&kq->kq_refcnt, &kq->kq_lock, PSOCK, "kqclose", 0); 1839 1840 KASSERT(kq->kq_refcnt == 1, ("other refs are out there!")); 1841 1842 KASSERT(knlist_empty(&kq->kq_sel.si_note), 1843 ("kqueue's knlist not empty")); 1844 1845 for (i = 0; i < kq->kq_knlistsize; i++) { 1846 while ((kn = SLIST_FIRST(&kq->kq_knlist[i])) != NULL) { 1847 if ((kn->kn_status & KN_INFLUX) == KN_INFLUX) { 1848 kq->kq_state |= KQ_FLUXWAIT; 1849 msleep(kq, &kq->kq_lock, PSOCK, "kqclo1", 0); 1850 continue; 1851 } 1852 kn->kn_status |= KN_INFLUX; 1853 KQ_UNLOCK(kq); 1854 if (!(kn->kn_status & KN_DETACHED)) 1855 kn->kn_fop->f_detach(kn); 1856 knote_drop(kn, td); 1857 KQ_LOCK(kq); 1858 } 1859 } 1860 if (kq->kq_knhashmask != 0) { 1861 for (i = 0; i <= kq->kq_knhashmask; i++) { 1862 while ((kn = SLIST_FIRST(&kq->kq_knhash[i])) != NULL) { 1863 if ((kn->kn_status & KN_INFLUX) == KN_INFLUX) { 1864 kq->kq_state |= KQ_FLUXWAIT; 1865 msleep(kq, &kq->kq_lock, PSOCK, 1866 "kqclo2", 0); 1867 continue; 1868 } 1869 kn->kn_status |= KN_INFLUX; 1870 KQ_UNLOCK(kq); 1871 if (!(kn->kn_status & KN_DETACHED)) 1872 kn->kn_fop->f_detach(kn); 1873 knote_drop(kn, td); 1874 KQ_LOCK(kq); 1875 } 1876 } 1877 } 1878 1879 if ((kq->kq_state & KQ_TASKSCHED) == KQ_TASKSCHED) { 1880 kq->kq_state |= KQ_TASKDRAIN; 1881 msleep(&kq->kq_state, &kq->kq_lock, PSOCK, "kqtqdr", 0); 1882 } 1883 1884 if ((kq->kq_state & KQ_SEL) == KQ_SEL) { 1885 selwakeuppri(&kq->kq_sel, PSOCK); 1886 if (!SEL_WAITING(&kq->kq_sel)) 1887 kq->kq_state &= ~KQ_SEL; 1888 } 1889 1890 KQ_UNLOCK(kq); 1891 } 1892 1893 static void 1894 kqueue_destroy(struct kqueue *kq) 1895 { 1896 1897 KASSERT(kq->kq_fdp == NULL, 1898 ("kqueue still attached to a file descriptor")); 1899 seldrain(&kq->kq_sel); 1900 knlist_destroy(&kq->kq_sel.si_note); 1901 mtx_destroy(&kq->kq_lock); 1902 1903 if (kq->kq_knhash != NULL) 1904 free(kq->kq_knhash, M_KQUEUE); 1905 if (kq->kq_knlist != NULL) 1906 free(kq->kq_knlist, M_KQUEUE); 1907 1908 funsetown(&kq->kq_sigio); 1909 } 1910 1911 /*ARGSUSED*/ 1912 static int 1913 kqueue_close(struct file *fp, struct thread *td) 1914 { 1915 struct kqueue *kq = fp->f_data; 1916 struct filedesc *fdp; 1917 int error; 1918 int filedesc_unlock; 1919 1920 if ((error = kqueue_acquire(fp, &kq))) 1921 return error; 1922 kqueue_drain(kq, td); 1923 1924 /* 1925 * We could be called due to the knote_drop() doing fdrop(), 1926 * called from kqueue_register(). In this case the global 1927 * lock is owned, and filedesc sx is locked before, to not 1928 * take the sleepable lock after non-sleepable. 1929 */ 1930 fdp = kq->kq_fdp; 1931 kq->kq_fdp = NULL; 1932 if (!sx_xlocked(FILEDESC_LOCK(fdp))) { 1933 FILEDESC_XLOCK(fdp); 1934 filedesc_unlock = 1; 1935 } else 1936 filedesc_unlock = 0; 1937 TAILQ_REMOVE(&fdp->fd_kqlist, kq, kq_list); 1938 if (filedesc_unlock) 1939 FILEDESC_XUNLOCK(fdp); 1940 1941 kqueue_destroy(kq); 1942 chgkqcnt(kq->kq_cred->cr_ruidinfo, -1, 0); 1943 crfree(kq->kq_cred); 1944 free(kq, M_KQUEUE); 1945 fp->f_data = NULL; 1946 1947 return (0); 1948 } 1949 1950 static int 1951 kqueue_fill_kinfo(struct file *fp, struct kinfo_file *kif, struct filedesc *fdp) 1952 { 1953 1954 kif->kf_type = KF_TYPE_KQUEUE; 1955 return (0); 1956 } 1957 1958 static void 1959 kqueue_wakeup(struct kqueue *kq) 1960 { 1961 KQ_OWNED(kq); 1962 1963 if ((kq->kq_state & KQ_SLEEP) == KQ_SLEEP) { 1964 kq->kq_state &= ~KQ_SLEEP; 1965 wakeup(kq); 1966 } 1967 if ((kq->kq_state & KQ_SEL) == KQ_SEL) { 1968 selwakeuppri(&kq->kq_sel, PSOCK); 1969 if (!SEL_WAITING(&kq->kq_sel)) 1970 kq->kq_state &= ~KQ_SEL; 1971 } 1972 if (!knlist_empty(&kq->kq_sel.si_note)) 1973 kqueue_schedtask(kq); 1974 if ((kq->kq_state & KQ_ASYNC) == KQ_ASYNC) { 1975 pgsigio(&kq->kq_sigio, SIGIO, 0); 1976 } 1977 } 1978 1979 /* 1980 * Walk down a list of knotes, activating them if their event has triggered. 1981 * 1982 * There is a possibility to optimize in the case of one kq watching another. 1983 * Instead of scheduling a task to wake it up, you could pass enough state 1984 * down the chain to make up the parent kqueue. Make this code functional 1985 * first. 1986 */ 1987 void 1988 knote(struct knlist *list, long hint, int lockflags) 1989 { 1990 struct kqueue *kq; 1991 struct knote *kn, *tkn; 1992 int error; 1993 1994 if (list == NULL) 1995 return; 1996 1997 KNL_ASSERT_LOCK(list, lockflags & KNF_LISTLOCKED); 1998 1999 if ((lockflags & KNF_LISTLOCKED) == 0) 2000 list->kl_lock(list->kl_lockarg); 2001 2002 /* 2003 * If we unlock the list lock (and set KN_INFLUX), we can 2004 * eliminate the kqueue scheduling, but this will introduce 2005 * four lock/unlock's for each knote to test. Also, marker 2006 * would be needed to keep iteration position, since filters 2007 * or other threads could remove events. 2008 */ 2009 SLIST_FOREACH_SAFE(kn, &list->kl_list, kn_selnext, tkn) { 2010 kq = kn->kn_kq; 2011 KQ_LOCK(kq); 2012 if ((kn->kn_status & (KN_INFLUX | KN_SCAN)) == KN_INFLUX) { 2013 /* 2014 * Do not process the influx notes, except for 2015 * the influx coming from the kq unlock in the 2016 * kqueue_scan(). In the later case, we do 2017 * not interfere with the scan, since the code 2018 * fragment in kqueue_scan() locks the knlist, 2019 * and cannot proceed until we finished. 2020 */ 2021 KQ_UNLOCK(kq); 2022 } else if ((lockflags & KNF_NOKQLOCK) != 0) { 2023 kn->kn_status |= KN_INFLUX; 2024 KQ_UNLOCK(kq); 2025 error = kn->kn_fop->f_event(kn, hint); 2026 KQ_LOCK(kq); 2027 kn->kn_status &= ~KN_INFLUX; 2028 if (error) 2029 KNOTE_ACTIVATE(kn, 1); 2030 KQ_UNLOCK_FLUX(kq); 2031 } else { 2032 kn->kn_status |= KN_HASKQLOCK; 2033 if (kn->kn_fop->f_event(kn, hint)) 2034 KNOTE_ACTIVATE(kn, 1); 2035 kn->kn_status &= ~KN_HASKQLOCK; 2036 KQ_UNLOCK(kq); 2037 } 2038 } 2039 if ((lockflags & KNF_LISTLOCKED) == 0) 2040 list->kl_unlock(list->kl_lockarg); 2041 } 2042 2043 /* 2044 * add a knote to a knlist 2045 */ 2046 void 2047 knlist_add(struct knlist *knl, struct knote *kn, int islocked) 2048 { 2049 KNL_ASSERT_LOCK(knl, islocked); 2050 KQ_NOTOWNED(kn->kn_kq); 2051 KASSERT((kn->kn_status & (KN_INFLUX|KN_DETACHED)) == 2052 (KN_INFLUX|KN_DETACHED), ("knote not KN_INFLUX and KN_DETACHED")); 2053 if (!islocked) 2054 knl->kl_lock(knl->kl_lockarg); 2055 SLIST_INSERT_HEAD(&knl->kl_list, kn, kn_selnext); 2056 if (!islocked) 2057 knl->kl_unlock(knl->kl_lockarg); 2058 KQ_LOCK(kn->kn_kq); 2059 kn->kn_knlist = knl; 2060 kn->kn_status &= ~KN_DETACHED; 2061 KQ_UNLOCK(kn->kn_kq); 2062 } 2063 2064 static void 2065 knlist_remove_kq(struct knlist *knl, struct knote *kn, int knlislocked, int kqislocked) 2066 { 2067 KASSERT(!(!!kqislocked && !knlislocked), ("kq locked w/o knl locked")); 2068 KNL_ASSERT_LOCK(knl, knlislocked); 2069 mtx_assert(&kn->kn_kq->kq_lock, kqislocked ? MA_OWNED : MA_NOTOWNED); 2070 if (!kqislocked) 2071 KASSERT((kn->kn_status & (KN_INFLUX|KN_DETACHED)) == KN_INFLUX, 2072 ("knlist_remove called w/o knote being KN_INFLUX or already removed")); 2073 if (!knlislocked) 2074 knl->kl_lock(knl->kl_lockarg); 2075 SLIST_REMOVE(&knl->kl_list, kn, knote, kn_selnext); 2076 kn->kn_knlist = NULL; 2077 if (!knlislocked) 2078 knl->kl_unlock(knl->kl_lockarg); 2079 if (!kqislocked) 2080 KQ_LOCK(kn->kn_kq); 2081 kn->kn_status |= KN_DETACHED; 2082 if (!kqislocked) 2083 KQ_UNLOCK(kn->kn_kq); 2084 } 2085 2086 /* 2087 * remove knote from the specified knlist 2088 */ 2089 void 2090 knlist_remove(struct knlist *knl, struct knote *kn, int islocked) 2091 { 2092 2093 knlist_remove_kq(knl, kn, islocked, 0); 2094 } 2095 2096 /* 2097 * remove knote from the specified knlist while in f_event handler. 2098 */ 2099 void 2100 knlist_remove_inevent(struct knlist *knl, struct knote *kn) 2101 { 2102 2103 knlist_remove_kq(knl, kn, 1, 2104 (kn->kn_status & KN_HASKQLOCK) == KN_HASKQLOCK); 2105 } 2106 2107 int 2108 knlist_empty(struct knlist *knl) 2109 { 2110 2111 KNL_ASSERT_LOCKED(knl); 2112 return SLIST_EMPTY(&knl->kl_list); 2113 } 2114 2115 static struct mtx knlist_lock; 2116 MTX_SYSINIT(knlist_lock, &knlist_lock, "knlist lock for lockless objects", 2117 MTX_DEF); 2118 static void knlist_mtx_lock(void *arg); 2119 static void knlist_mtx_unlock(void *arg); 2120 2121 static void 2122 knlist_mtx_lock(void *arg) 2123 { 2124 2125 mtx_lock((struct mtx *)arg); 2126 } 2127 2128 static void 2129 knlist_mtx_unlock(void *arg) 2130 { 2131 2132 mtx_unlock((struct mtx *)arg); 2133 } 2134 2135 static void 2136 knlist_mtx_assert_locked(void *arg) 2137 { 2138 2139 mtx_assert((struct mtx *)arg, MA_OWNED); 2140 } 2141 2142 static void 2143 knlist_mtx_assert_unlocked(void *arg) 2144 { 2145 2146 mtx_assert((struct mtx *)arg, MA_NOTOWNED); 2147 } 2148 2149 static void 2150 knlist_rw_rlock(void *arg) 2151 { 2152 2153 rw_rlock((struct rwlock *)arg); 2154 } 2155 2156 static void 2157 knlist_rw_runlock(void *arg) 2158 { 2159 2160 rw_runlock((struct rwlock *)arg); 2161 } 2162 2163 static void 2164 knlist_rw_assert_locked(void *arg) 2165 { 2166 2167 rw_assert((struct rwlock *)arg, RA_LOCKED); 2168 } 2169 2170 static void 2171 knlist_rw_assert_unlocked(void *arg) 2172 { 2173 2174 rw_assert((struct rwlock *)arg, RA_UNLOCKED); 2175 } 2176 2177 void 2178 knlist_init(struct knlist *knl, void *lock, void (*kl_lock)(void *), 2179 void (*kl_unlock)(void *), 2180 void (*kl_assert_locked)(void *), void (*kl_assert_unlocked)(void *)) 2181 { 2182 2183 if (lock == NULL) 2184 knl->kl_lockarg = &knlist_lock; 2185 else 2186 knl->kl_lockarg = lock; 2187 2188 if (kl_lock == NULL) 2189 knl->kl_lock = knlist_mtx_lock; 2190 else 2191 knl->kl_lock = kl_lock; 2192 if (kl_unlock == NULL) 2193 knl->kl_unlock = knlist_mtx_unlock; 2194 else 2195 knl->kl_unlock = kl_unlock; 2196 if (kl_assert_locked == NULL) 2197 knl->kl_assert_locked = knlist_mtx_assert_locked; 2198 else 2199 knl->kl_assert_locked = kl_assert_locked; 2200 if (kl_assert_unlocked == NULL) 2201 knl->kl_assert_unlocked = knlist_mtx_assert_unlocked; 2202 else 2203 knl->kl_assert_unlocked = kl_assert_unlocked; 2204 2205 SLIST_INIT(&knl->kl_list); 2206 } 2207 2208 void 2209 knlist_init_mtx(struct knlist *knl, struct mtx *lock) 2210 { 2211 2212 knlist_init(knl, lock, NULL, NULL, NULL, NULL); 2213 } 2214 2215 void 2216 knlist_init_rw_reader(struct knlist *knl, struct rwlock *lock) 2217 { 2218 2219 knlist_init(knl, lock, knlist_rw_rlock, knlist_rw_runlock, 2220 knlist_rw_assert_locked, knlist_rw_assert_unlocked); 2221 } 2222 2223 void 2224 knlist_destroy(struct knlist *knl) 2225 { 2226 2227 #ifdef INVARIANTS 2228 /* 2229 * if we run across this error, we need to find the offending 2230 * driver and have it call knlist_clear or knlist_delete. 2231 */ 2232 if (!SLIST_EMPTY(&knl->kl_list)) 2233 printf("WARNING: destroying knlist w/ knotes on it!\n"); 2234 #endif 2235 2236 knl->kl_lockarg = knl->kl_lock = knl->kl_unlock = NULL; 2237 SLIST_INIT(&knl->kl_list); 2238 } 2239 2240 /* 2241 * Even if we are locked, we may need to drop the lock to allow any influx 2242 * knotes time to "settle". 2243 */ 2244 void 2245 knlist_cleardel(struct knlist *knl, struct thread *td, int islocked, int killkn) 2246 { 2247 struct knote *kn, *kn2; 2248 struct kqueue *kq; 2249 2250 if (islocked) 2251 KNL_ASSERT_LOCKED(knl); 2252 else { 2253 KNL_ASSERT_UNLOCKED(knl); 2254 again: /* need to reacquire lock since we have dropped it */ 2255 knl->kl_lock(knl->kl_lockarg); 2256 } 2257 2258 SLIST_FOREACH_SAFE(kn, &knl->kl_list, kn_selnext, kn2) { 2259 kq = kn->kn_kq; 2260 KQ_LOCK(kq); 2261 if ((kn->kn_status & KN_INFLUX)) { 2262 KQ_UNLOCK(kq); 2263 continue; 2264 } 2265 knlist_remove_kq(knl, kn, 1, 1); 2266 if (killkn) { 2267 kn->kn_status |= KN_INFLUX | KN_DETACHED; 2268 KQ_UNLOCK(kq); 2269 knote_drop(kn, td); 2270 } else { 2271 /* Make sure cleared knotes disappear soon */ 2272 kn->kn_flags |= (EV_EOF | EV_ONESHOT); 2273 KQ_UNLOCK(kq); 2274 } 2275 kq = NULL; 2276 } 2277 2278 if (!SLIST_EMPTY(&knl->kl_list)) { 2279 /* there are still KN_INFLUX remaining */ 2280 kn = SLIST_FIRST(&knl->kl_list); 2281 kq = kn->kn_kq; 2282 KQ_LOCK(kq); 2283 KASSERT(kn->kn_status & KN_INFLUX, 2284 ("knote removed w/o list lock")); 2285 knl->kl_unlock(knl->kl_lockarg); 2286 kq->kq_state |= KQ_FLUXWAIT; 2287 msleep(kq, &kq->kq_lock, PSOCK | PDROP, "kqkclr", 0); 2288 kq = NULL; 2289 goto again; 2290 } 2291 2292 if (islocked) 2293 KNL_ASSERT_LOCKED(knl); 2294 else { 2295 knl->kl_unlock(knl->kl_lockarg); 2296 KNL_ASSERT_UNLOCKED(knl); 2297 } 2298 } 2299 2300 /* 2301 * Remove all knotes referencing a specified fd must be called with FILEDESC 2302 * lock. This prevents a race where a new fd comes along and occupies the 2303 * entry and we attach a knote to the fd. 2304 */ 2305 void 2306 knote_fdclose(struct thread *td, int fd) 2307 { 2308 struct filedesc *fdp = td->td_proc->p_fd; 2309 struct kqueue *kq; 2310 struct knote *kn; 2311 int influx; 2312 2313 FILEDESC_XLOCK_ASSERT(fdp); 2314 2315 /* 2316 * We shouldn't have to worry about new kevents appearing on fd 2317 * since filedesc is locked. 2318 */ 2319 TAILQ_FOREACH(kq, &fdp->fd_kqlist, kq_list) { 2320 KQ_LOCK(kq); 2321 2322 again: 2323 influx = 0; 2324 while (kq->kq_knlistsize > fd && 2325 (kn = SLIST_FIRST(&kq->kq_knlist[fd])) != NULL) { 2326 if (kn->kn_status & KN_INFLUX) { 2327 /* someone else might be waiting on our knote */ 2328 if (influx) 2329 wakeup(kq); 2330 kq->kq_state |= KQ_FLUXWAIT; 2331 msleep(kq, &kq->kq_lock, PSOCK, "kqflxwt", 0); 2332 goto again; 2333 } 2334 kn->kn_status |= KN_INFLUX; 2335 KQ_UNLOCK(kq); 2336 if (!(kn->kn_status & KN_DETACHED)) 2337 kn->kn_fop->f_detach(kn); 2338 knote_drop(kn, td); 2339 influx = 1; 2340 KQ_LOCK(kq); 2341 } 2342 KQ_UNLOCK_FLUX(kq); 2343 } 2344 } 2345 2346 static int 2347 knote_attach(struct knote *kn, struct kqueue *kq) 2348 { 2349 struct klist *list; 2350 2351 KASSERT(kn->kn_status & KN_INFLUX, ("knote not marked INFLUX")); 2352 KQ_OWNED(kq); 2353 2354 if (kn->kn_fop->f_isfd) { 2355 if (kn->kn_id >= kq->kq_knlistsize) 2356 return ENOMEM; 2357 list = &kq->kq_knlist[kn->kn_id]; 2358 } else { 2359 if (kq->kq_knhash == NULL) 2360 return ENOMEM; 2361 list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)]; 2362 } 2363 2364 SLIST_INSERT_HEAD(list, kn, kn_link); 2365 2366 return 0; 2367 } 2368 2369 /* 2370 * knote must already have been detached using the f_detach method. 2371 * no lock need to be held, it is assumed that the KN_INFLUX flag is set 2372 * to prevent other removal. 2373 */ 2374 static void 2375 knote_drop(struct knote *kn, struct thread *td) 2376 { 2377 struct kqueue *kq; 2378 struct klist *list; 2379 2380 kq = kn->kn_kq; 2381 2382 KQ_NOTOWNED(kq); 2383 KASSERT((kn->kn_status & KN_INFLUX) == KN_INFLUX, 2384 ("knote_drop called without KN_INFLUX set in kn_status")); 2385 2386 KQ_LOCK(kq); 2387 if (kn->kn_fop->f_isfd) 2388 list = &kq->kq_knlist[kn->kn_id]; 2389 else 2390 list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)]; 2391 2392 if (!SLIST_EMPTY(list)) 2393 SLIST_REMOVE(list, kn, knote, kn_link); 2394 if (kn->kn_status & KN_QUEUED) 2395 knote_dequeue(kn); 2396 KQ_UNLOCK_FLUX(kq); 2397 2398 if (kn->kn_fop->f_isfd) { 2399 fdrop(kn->kn_fp, td); 2400 kn->kn_fp = NULL; 2401 } 2402 kqueue_fo_release(kn->kn_kevent.filter); 2403 kn->kn_fop = NULL; 2404 knote_free(kn); 2405 } 2406 2407 static void 2408 knote_enqueue(struct knote *kn) 2409 { 2410 struct kqueue *kq = kn->kn_kq; 2411 2412 KQ_OWNED(kn->kn_kq); 2413 KASSERT((kn->kn_status & KN_QUEUED) == 0, ("knote already queued")); 2414 2415 TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe); 2416 kn->kn_status |= KN_QUEUED; 2417 kq->kq_count++; 2418 kqueue_wakeup(kq); 2419 } 2420 2421 static void 2422 knote_dequeue(struct knote *kn) 2423 { 2424 struct kqueue *kq = kn->kn_kq; 2425 2426 KQ_OWNED(kn->kn_kq); 2427 KASSERT(kn->kn_status & KN_QUEUED, ("knote not queued")); 2428 2429 TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe); 2430 kn->kn_status &= ~KN_QUEUED; 2431 kq->kq_count--; 2432 } 2433 2434 static void 2435 knote_init(void) 2436 { 2437 2438 knote_zone = uma_zcreate("KNOTE", sizeof(struct knote), NULL, NULL, 2439 NULL, NULL, UMA_ALIGN_PTR, 0); 2440 } 2441 SYSINIT(knote, SI_SUB_PSEUDO, SI_ORDER_ANY, knote_init, NULL); 2442 2443 static struct knote * 2444 knote_alloc(int waitok) 2445 { 2446 2447 return (uma_zalloc(knote_zone, (waitok ? M_WAITOK : M_NOWAIT) | 2448 M_ZERO)); 2449 } 2450 2451 static void 2452 knote_free(struct knote *kn) 2453 { 2454 2455 uma_zfree(knote_zone, kn); 2456 } 2457 2458 /* 2459 * Register the kev w/ the kq specified by fd. 2460 */ 2461 int 2462 kqfd_register(int fd, struct kevent *kev, struct thread *td, int waitok) 2463 { 2464 struct kqueue *kq; 2465 struct file *fp; 2466 cap_rights_t rights; 2467 int error; 2468 2469 error = fget(td, fd, cap_rights_init(&rights, CAP_KQUEUE_CHANGE), &fp); 2470 if (error != 0) 2471 return (error); 2472 if ((error = kqueue_acquire(fp, &kq)) != 0) 2473 goto noacquire; 2474 2475 error = kqueue_register(kq, kev, td, waitok); 2476 2477 kqueue_release(kq, 0); 2478 2479 noacquire: 2480 fdrop(fp, td); 2481 2482 return error; 2483 } 2484