1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 1999,2000,2001 Jonathan Lemon <jlemon@FreeBSD.org> 5 * Copyright 2004 John-Mark Gurney <jmg@FreeBSD.org> 6 * Copyright (c) 2009 Apple, Inc. 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 #include "opt_compat.h" 35 #include "opt_ktrace.h" 36 #include "opt_kqueue.h" 37 38 #ifdef COMPAT_FREEBSD11 39 #define _WANT_FREEBSD11_KEVENT 40 #endif 41 42 #include <sys/param.h> 43 #include <sys/systm.h> 44 #include <sys/capsicum.h> 45 #include <sys/kernel.h> 46 #include <sys/lock.h> 47 #include <sys/mutex.h> 48 #include <sys/rwlock.h> 49 #include <sys/proc.h> 50 #include <sys/malloc.h> 51 #include <sys/unistd.h> 52 #include <sys/file.h> 53 #include <sys/filedesc.h> 54 #include <sys/filio.h> 55 #include <sys/fcntl.h> 56 #include <sys/kthread.h> 57 #include <sys/selinfo.h> 58 #include <sys/queue.h> 59 #include <sys/event.h> 60 #include <sys/eventvar.h> 61 #include <sys/poll.h> 62 #include <sys/protosw.h> 63 #include <sys/resourcevar.h> 64 #include <sys/sigio.h> 65 #include <sys/signalvar.h> 66 #include <sys/socket.h> 67 #include <sys/socketvar.h> 68 #include <sys/stat.h> 69 #include <sys/sysctl.h> 70 #include <sys/sysproto.h> 71 #include <sys/syscallsubr.h> 72 #include <sys/taskqueue.h> 73 #include <sys/uio.h> 74 #include <sys/user.h> 75 #ifdef KTRACE 76 #include <sys/ktrace.h> 77 #endif 78 #include <machine/atomic.h> 79 80 #include <vm/uma.h> 81 82 static MALLOC_DEFINE(M_KQUEUE, "kqueue", "memory for kqueue system"); 83 84 /* 85 * This lock is used if multiple kq locks are required. This possibly 86 * should be made into a per proc lock. 87 */ 88 static struct mtx kq_global; 89 MTX_SYSINIT(kq_global, &kq_global, "kqueue order", MTX_DEF); 90 #define KQ_GLOBAL_LOCK(lck, haslck) do { \ 91 if (!haslck) \ 92 mtx_lock(lck); \ 93 haslck = 1; \ 94 } while (0) 95 #define KQ_GLOBAL_UNLOCK(lck, haslck) do { \ 96 if (haslck) \ 97 mtx_unlock(lck); \ 98 haslck = 0; \ 99 } while (0) 100 101 TASKQUEUE_DEFINE_THREAD(kqueue_ctx); 102 103 static int kevent_copyout(void *arg, struct kevent *kevp, int count); 104 static int kevent_copyin(void *arg, struct kevent *kevp, int count); 105 static int kqueue_register(struct kqueue *kq, struct kevent *kev, 106 struct thread *td, int waitok); 107 static int kqueue_acquire(struct file *fp, struct kqueue **kqp); 108 static void kqueue_release(struct kqueue *kq, int locked); 109 static void kqueue_destroy(struct kqueue *kq); 110 static void kqueue_drain(struct kqueue *kq, struct thread *td); 111 static int kqueue_expand(struct kqueue *kq, struct filterops *fops, 112 uintptr_t ident, int waitok); 113 static void kqueue_task(void *arg, int pending); 114 static int kqueue_scan(struct kqueue *kq, int maxevents, 115 struct kevent_copyops *k_ops, 116 const struct timespec *timeout, 117 struct kevent *keva, struct thread *td); 118 static void kqueue_wakeup(struct kqueue *kq); 119 static struct filterops *kqueue_fo_find(int filt); 120 static void kqueue_fo_release(int filt); 121 struct g_kevent_args; 122 static int kern_kevent_generic(struct thread *td, 123 struct g_kevent_args *uap, 124 struct kevent_copyops *k_ops, const char *struct_name); 125 126 static fo_ioctl_t kqueue_ioctl; 127 static fo_poll_t kqueue_poll; 128 static fo_kqfilter_t kqueue_kqfilter; 129 static fo_stat_t kqueue_stat; 130 static fo_close_t kqueue_close; 131 static fo_fill_kinfo_t kqueue_fill_kinfo; 132 133 static struct fileops kqueueops = { 134 .fo_read = invfo_rdwr, 135 .fo_write = invfo_rdwr, 136 .fo_truncate = invfo_truncate, 137 .fo_ioctl = kqueue_ioctl, 138 .fo_poll = kqueue_poll, 139 .fo_kqfilter = kqueue_kqfilter, 140 .fo_stat = kqueue_stat, 141 .fo_close = kqueue_close, 142 .fo_chmod = invfo_chmod, 143 .fo_chown = invfo_chown, 144 .fo_sendfile = invfo_sendfile, 145 .fo_fill_kinfo = kqueue_fill_kinfo, 146 }; 147 148 static int knote_attach(struct knote *kn, struct kqueue *kq); 149 static void knote_drop(struct knote *kn, struct thread *td); 150 static void knote_drop_detached(struct knote *kn, struct thread *td); 151 static void knote_enqueue(struct knote *kn); 152 static void knote_dequeue(struct knote *kn); 153 static void knote_init(void); 154 static struct knote *knote_alloc(int waitok); 155 static void knote_free(struct knote *kn); 156 157 static void filt_kqdetach(struct knote *kn); 158 static int filt_kqueue(struct knote *kn, long hint); 159 static int filt_procattach(struct knote *kn); 160 static void filt_procdetach(struct knote *kn); 161 static int filt_proc(struct knote *kn, long hint); 162 static int filt_fileattach(struct knote *kn); 163 static void filt_timerexpire(void *knx); 164 static int filt_timerattach(struct knote *kn); 165 static void filt_timerdetach(struct knote *kn); 166 static int filt_timer(struct knote *kn, long hint); 167 static int filt_userattach(struct knote *kn); 168 static void filt_userdetach(struct knote *kn); 169 static int filt_user(struct knote *kn, long hint); 170 static void filt_usertouch(struct knote *kn, struct kevent *kev, 171 u_long type); 172 173 static struct filterops file_filtops = { 174 .f_isfd = 1, 175 .f_attach = filt_fileattach, 176 }; 177 static struct filterops kqread_filtops = { 178 .f_isfd = 1, 179 .f_detach = filt_kqdetach, 180 .f_event = filt_kqueue, 181 }; 182 /* XXX - move to kern_proc.c? */ 183 static struct filterops proc_filtops = { 184 .f_isfd = 0, 185 .f_attach = filt_procattach, 186 .f_detach = filt_procdetach, 187 .f_event = filt_proc, 188 }; 189 static struct filterops timer_filtops = { 190 .f_isfd = 0, 191 .f_attach = filt_timerattach, 192 .f_detach = filt_timerdetach, 193 .f_event = filt_timer, 194 }; 195 static struct filterops user_filtops = { 196 .f_attach = filt_userattach, 197 .f_detach = filt_userdetach, 198 .f_event = filt_user, 199 .f_touch = filt_usertouch, 200 }; 201 202 static uma_zone_t knote_zone; 203 static unsigned int kq_ncallouts = 0; 204 static unsigned int kq_calloutmax = 4 * 1024; 205 SYSCTL_UINT(_kern, OID_AUTO, kq_calloutmax, CTLFLAG_RW, 206 &kq_calloutmax, 0, "Maximum number of callouts allocated for kqueue"); 207 208 /* XXX - ensure not influx ? */ 209 #define KNOTE_ACTIVATE(kn, islock) do { \ 210 if ((islock)) \ 211 mtx_assert(&(kn)->kn_kq->kq_lock, MA_OWNED); \ 212 else \ 213 KQ_LOCK((kn)->kn_kq); \ 214 (kn)->kn_status |= KN_ACTIVE; \ 215 if (((kn)->kn_status & (KN_QUEUED | KN_DISABLED)) == 0) \ 216 knote_enqueue((kn)); \ 217 if (!(islock)) \ 218 KQ_UNLOCK((kn)->kn_kq); \ 219 } while(0) 220 #define KQ_LOCK(kq) do { \ 221 mtx_lock(&(kq)->kq_lock); \ 222 } while (0) 223 #define KQ_FLUX_WAKEUP(kq) do { \ 224 if (((kq)->kq_state & KQ_FLUXWAIT) == KQ_FLUXWAIT) { \ 225 (kq)->kq_state &= ~KQ_FLUXWAIT; \ 226 wakeup((kq)); \ 227 } \ 228 } while (0) 229 #define KQ_UNLOCK_FLUX(kq) do { \ 230 KQ_FLUX_WAKEUP(kq); \ 231 mtx_unlock(&(kq)->kq_lock); \ 232 } while (0) 233 #define KQ_UNLOCK(kq) do { \ 234 mtx_unlock(&(kq)->kq_lock); \ 235 } while (0) 236 #define KQ_OWNED(kq) do { \ 237 mtx_assert(&(kq)->kq_lock, MA_OWNED); \ 238 } while (0) 239 #define KQ_NOTOWNED(kq) do { \ 240 mtx_assert(&(kq)->kq_lock, MA_NOTOWNED); \ 241 } while (0) 242 243 static struct knlist * 244 kn_list_lock(struct knote *kn) 245 { 246 struct knlist *knl; 247 248 knl = kn->kn_knlist; 249 if (knl != NULL) 250 knl->kl_lock(knl->kl_lockarg); 251 return (knl); 252 } 253 254 static void 255 kn_list_unlock(struct knlist *knl) 256 { 257 bool do_free; 258 259 if (knl == NULL) 260 return; 261 do_free = knl->kl_autodestroy && knlist_empty(knl); 262 knl->kl_unlock(knl->kl_lockarg); 263 if (do_free) { 264 knlist_destroy(knl); 265 free(knl, M_KQUEUE); 266 } 267 } 268 269 static bool 270 kn_in_flux(struct knote *kn) 271 { 272 273 return (kn->kn_influx > 0); 274 } 275 276 static void 277 kn_enter_flux(struct knote *kn) 278 { 279 280 KQ_OWNED(kn->kn_kq); 281 MPASS(kn->kn_influx < INT_MAX); 282 kn->kn_influx++; 283 } 284 285 static bool 286 kn_leave_flux(struct knote *kn) 287 { 288 289 KQ_OWNED(kn->kn_kq); 290 MPASS(kn->kn_influx > 0); 291 kn->kn_influx--; 292 return (kn->kn_influx == 0); 293 } 294 295 #define KNL_ASSERT_LOCK(knl, islocked) do { \ 296 if (islocked) \ 297 KNL_ASSERT_LOCKED(knl); \ 298 else \ 299 KNL_ASSERT_UNLOCKED(knl); \ 300 } while (0) 301 #ifdef INVARIANTS 302 #define KNL_ASSERT_LOCKED(knl) do { \ 303 knl->kl_assert_locked((knl)->kl_lockarg); \ 304 } while (0) 305 #define KNL_ASSERT_UNLOCKED(knl) do { \ 306 knl->kl_assert_unlocked((knl)->kl_lockarg); \ 307 } while (0) 308 #else /* !INVARIANTS */ 309 #define KNL_ASSERT_LOCKED(knl) do {} while(0) 310 #define KNL_ASSERT_UNLOCKED(knl) do {} while (0) 311 #endif /* INVARIANTS */ 312 313 #ifndef KN_HASHSIZE 314 #define KN_HASHSIZE 64 /* XXX should be tunable */ 315 #endif 316 317 #define KN_HASH(val, mask) (((val) ^ (val >> 8)) & (mask)) 318 319 static int 320 filt_nullattach(struct knote *kn) 321 { 322 323 return (ENXIO); 324 }; 325 326 struct filterops null_filtops = { 327 .f_isfd = 0, 328 .f_attach = filt_nullattach, 329 }; 330 331 /* XXX - make SYSINIT to add these, and move into respective modules. */ 332 extern struct filterops sig_filtops; 333 extern struct filterops fs_filtops; 334 335 /* 336 * Table for for all system-defined filters. 337 */ 338 static struct mtx filterops_lock; 339 MTX_SYSINIT(kqueue_filterops, &filterops_lock, "protect sysfilt_ops", 340 MTX_DEF); 341 static struct { 342 struct filterops *for_fop; 343 int for_nolock; 344 int for_refcnt; 345 } sysfilt_ops[EVFILT_SYSCOUNT] = { 346 { &file_filtops, 1 }, /* EVFILT_READ */ 347 { &file_filtops, 1 }, /* EVFILT_WRITE */ 348 { &null_filtops }, /* EVFILT_AIO */ 349 { &file_filtops, 1 }, /* EVFILT_VNODE */ 350 { &proc_filtops, 1 }, /* EVFILT_PROC */ 351 { &sig_filtops, 1 }, /* EVFILT_SIGNAL */ 352 { &timer_filtops, 1 }, /* EVFILT_TIMER */ 353 { &file_filtops, 1 }, /* EVFILT_PROCDESC */ 354 { &fs_filtops, 1 }, /* EVFILT_FS */ 355 { &null_filtops }, /* EVFILT_LIO */ 356 { &user_filtops, 1 }, /* EVFILT_USER */ 357 { &null_filtops }, /* EVFILT_SENDFILE */ 358 { &file_filtops, 1 }, /* EVFILT_EMPTY */ 359 }; 360 361 /* 362 * Simple redirection for all cdevsw style objects to call their fo_kqfilter 363 * method. 364 */ 365 static int 366 filt_fileattach(struct knote *kn) 367 { 368 369 return (fo_kqfilter(kn->kn_fp, kn)); 370 } 371 372 /*ARGSUSED*/ 373 static int 374 kqueue_kqfilter(struct file *fp, struct knote *kn) 375 { 376 struct kqueue *kq = kn->kn_fp->f_data; 377 378 if (kn->kn_filter != EVFILT_READ) 379 return (EINVAL); 380 381 kn->kn_status |= KN_KQUEUE; 382 kn->kn_fop = &kqread_filtops; 383 knlist_add(&kq->kq_sel.si_note, kn, 0); 384 385 return (0); 386 } 387 388 static void 389 filt_kqdetach(struct knote *kn) 390 { 391 struct kqueue *kq = kn->kn_fp->f_data; 392 393 knlist_remove(&kq->kq_sel.si_note, kn, 0); 394 } 395 396 /*ARGSUSED*/ 397 static int 398 filt_kqueue(struct knote *kn, long hint) 399 { 400 struct kqueue *kq = kn->kn_fp->f_data; 401 402 kn->kn_data = kq->kq_count; 403 return (kn->kn_data > 0); 404 } 405 406 /* XXX - move to kern_proc.c? */ 407 static int 408 filt_procattach(struct knote *kn) 409 { 410 struct proc *p; 411 int error; 412 bool exiting, immediate; 413 414 exiting = immediate = false; 415 if (kn->kn_sfflags & NOTE_EXIT) 416 p = pfind_any(kn->kn_id); 417 else 418 p = pfind(kn->kn_id); 419 if (p == NULL) 420 return (ESRCH); 421 if (p->p_flag & P_WEXIT) 422 exiting = true; 423 424 if ((error = p_cansee(curthread, p))) { 425 PROC_UNLOCK(p); 426 return (error); 427 } 428 429 kn->kn_ptr.p_proc = p; 430 kn->kn_flags |= EV_CLEAR; /* automatically set */ 431 432 /* 433 * Internal flag indicating registration done by kernel for the 434 * purposes of getting a NOTE_CHILD notification. 435 */ 436 if (kn->kn_flags & EV_FLAG2) { 437 kn->kn_flags &= ~EV_FLAG2; 438 kn->kn_data = kn->kn_sdata; /* ppid */ 439 kn->kn_fflags = NOTE_CHILD; 440 kn->kn_sfflags &= ~(NOTE_EXIT | NOTE_EXEC | NOTE_FORK); 441 immediate = true; /* Force immediate activation of child note. */ 442 } 443 /* 444 * Internal flag indicating registration done by kernel (for other than 445 * NOTE_CHILD). 446 */ 447 if (kn->kn_flags & EV_FLAG1) { 448 kn->kn_flags &= ~EV_FLAG1; 449 } 450 451 knlist_add(p->p_klist, kn, 1); 452 453 /* 454 * Immediately activate any child notes or, in the case of a zombie 455 * target process, exit notes. The latter is necessary to handle the 456 * case where the target process, e.g. a child, dies before the kevent 457 * is registered. 458 */ 459 if (immediate || (exiting && filt_proc(kn, NOTE_EXIT))) 460 KNOTE_ACTIVATE(kn, 0); 461 462 PROC_UNLOCK(p); 463 464 return (0); 465 } 466 467 /* 468 * The knote may be attached to a different process, which may exit, 469 * leaving nothing for the knote to be attached to. So when the process 470 * exits, the knote is marked as DETACHED and also flagged as ONESHOT so 471 * it will be deleted when read out. However, as part of the knote deletion, 472 * this routine is called, so a check is needed to avoid actually performing 473 * a detach, because the original process does not exist any more. 474 */ 475 /* XXX - move to kern_proc.c? */ 476 static void 477 filt_procdetach(struct knote *kn) 478 { 479 480 knlist_remove(kn->kn_knlist, kn, 0); 481 kn->kn_ptr.p_proc = NULL; 482 } 483 484 /* XXX - move to kern_proc.c? */ 485 static int 486 filt_proc(struct knote *kn, long hint) 487 { 488 struct proc *p; 489 u_int event; 490 491 p = kn->kn_ptr.p_proc; 492 if (p == NULL) /* already activated, from attach filter */ 493 return (0); 494 495 /* Mask off extra data. */ 496 event = (u_int)hint & NOTE_PCTRLMASK; 497 498 /* If the user is interested in this event, record it. */ 499 if (kn->kn_sfflags & event) 500 kn->kn_fflags |= event; 501 502 /* Process is gone, so flag the event as finished. */ 503 if (event == NOTE_EXIT) { 504 kn->kn_flags |= EV_EOF | EV_ONESHOT; 505 kn->kn_ptr.p_proc = NULL; 506 if (kn->kn_fflags & NOTE_EXIT) 507 kn->kn_data = KW_EXITCODE(p->p_xexit, p->p_xsig); 508 if (kn->kn_fflags == 0) 509 kn->kn_flags |= EV_DROP; 510 return (1); 511 } 512 513 return (kn->kn_fflags != 0); 514 } 515 516 /* 517 * Called when the process forked. It mostly does the same as the 518 * knote(), activating all knotes registered to be activated when the 519 * process forked. Additionally, for each knote attached to the 520 * parent, check whether user wants to track the new process. If so 521 * attach a new knote to it, and immediately report an event with the 522 * child's pid. 523 */ 524 void 525 knote_fork(struct knlist *list, int pid) 526 { 527 struct kqueue *kq; 528 struct knote *kn; 529 struct kevent kev; 530 int error; 531 532 if (list == NULL) 533 return; 534 list->kl_lock(list->kl_lockarg); 535 536 SLIST_FOREACH(kn, &list->kl_list, kn_selnext) { 537 kq = kn->kn_kq; 538 KQ_LOCK(kq); 539 if (kn_in_flux(kn) && (kn->kn_status & KN_SCAN) == 0) { 540 KQ_UNLOCK(kq); 541 continue; 542 } 543 544 /* 545 * The same as knote(), activate the event. 546 */ 547 if ((kn->kn_sfflags & NOTE_TRACK) == 0) { 548 kn->kn_status |= KN_HASKQLOCK; 549 if (kn->kn_fop->f_event(kn, NOTE_FORK)) 550 KNOTE_ACTIVATE(kn, 1); 551 kn->kn_status &= ~KN_HASKQLOCK; 552 KQ_UNLOCK(kq); 553 continue; 554 } 555 556 /* 557 * The NOTE_TRACK case. In addition to the activation 558 * of the event, we need to register new events to 559 * track the child. Drop the locks in preparation for 560 * the call to kqueue_register(). 561 */ 562 kn_enter_flux(kn); 563 KQ_UNLOCK(kq); 564 list->kl_unlock(list->kl_lockarg); 565 566 /* 567 * Activate existing knote and register tracking knotes with 568 * new process. 569 * 570 * First register a knote to get just the child notice. This 571 * must be a separate note from a potential NOTE_EXIT 572 * notification since both NOTE_CHILD and NOTE_EXIT are defined 573 * to use the data field (in conflicting ways). 574 */ 575 kev.ident = pid; 576 kev.filter = kn->kn_filter; 577 kev.flags = kn->kn_flags | EV_ADD | EV_ENABLE | EV_ONESHOT | 578 EV_FLAG2; 579 kev.fflags = kn->kn_sfflags; 580 kev.data = kn->kn_id; /* parent */ 581 kev.udata = kn->kn_kevent.udata;/* preserve udata */ 582 error = kqueue_register(kq, &kev, NULL, 0); 583 if (error) 584 kn->kn_fflags |= NOTE_TRACKERR; 585 586 /* 587 * Then register another knote to track other potential events 588 * from the new process. 589 */ 590 kev.ident = pid; 591 kev.filter = kn->kn_filter; 592 kev.flags = kn->kn_flags | EV_ADD | EV_ENABLE | EV_FLAG1; 593 kev.fflags = kn->kn_sfflags; 594 kev.data = kn->kn_id; /* parent */ 595 kev.udata = kn->kn_kevent.udata;/* preserve udata */ 596 error = kqueue_register(kq, &kev, NULL, 0); 597 if (error) 598 kn->kn_fflags |= NOTE_TRACKERR; 599 if (kn->kn_fop->f_event(kn, NOTE_FORK)) 600 KNOTE_ACTIVATE(kn, 0); 601 KQ_LOCK(kq); 602 kn_leave_flux(kn); 603 KQ_UNLOCK_FLUX(kq); 604 list->kl_lock(list->kl_lockarg); 605 } 606 list->kl_unlock(list->kl_lockarg); 607 } 608 609 /* 610 * XXX: EVFILT_TIMER should perhaps live in kern_time.c beside the 611 * interval timer support code. 612 */ 613 614 #define NOTE_TIMER_PRECMASK \ 615 (NOTE_SECONDS | NOTE_MSECONDS | NOTE_USECONDS | NOTE_NSECONDS) 616 617 static sbintime_t 618 timer2sbintime(intptr_t data, int flags) 619 { 620 int64_t secs; 621 622 /* 623 * Macros for converting to the fractional second portion of an 624 * sbintime_t using 64bit multiplication to improve precision. 625 */ 626 #define NS_TO_SBT(ns) (((ns) * (((uint64_t)1 << 63) / 500000000)) >> 32) 627 #define US_TO_SBT(us) (((us) * (((uint64_t)1 << 63) / 500000)) >> 32) 628 #define MS_TO_SBT(ms) (((ms) * (((uint64_t)1 << 63) / 500)) >> 32) 629 switch (flags & NOTE_TIMER_PRECMASK) { 630 case NOTE_SECONDS: 631 #ifdef __LP64__ 632 if (data > (SBT_MAX / SBT_1S)) 633 return (SBT_MAX); 634 #endif 635 return ((sbintime_t)data << 32); 636 case NOTE_MSECONDS: /* FALLTHROUGH */ 637 case 0: 638 if (data >= 1000) { 639 secs = data / 1000; 640 #ifdef __LP64__ 641 if (secs > (SBT_MAX / SBT_1S)) 642 return (SBT_MAX); 643 #endif 644 return (secs << 32 | MS_TO_SBT(data % 1000)); 645 } 646 return (MS_TO_SBT(data)); 647 case NOTE_USECONDS: 648 if (data >= 1000000) { 649 secs = data / 1000000; 650 #ifdef __LP64__ 651 if (secs > (SBT_MAX / SBT_1S)) 652 return (SBT_MAX); 653 #endif 654 return (secs << 32 | US_TO_SBT(data % 1000000)); 655 } 656 return (US_TO_SBT(data)); 657 case NOTE_NSECONDS: 658 if (data >= 1000000000) { 659 secs = data / 1000000000; 660 #ifdef __LP64__ 661 if (secs > (SBT_MAX / SBT_1S)) 662 return (SBT_MAX); 663 #endif 664 return (secs << 32 | US_TO_SBT(data % 1000000000)); 665 } 666 return (NS_TO_SBT(data)); 667 default: 668 break; 669 } 670 return (-1); 671 } 672 673 struct kq_timer_cb_data { 674 struct callout c; 675 sbintime_t next; /* next timer event fires at */ 676 sbintime_t to; /* precalculated timer period, 0 for abs */ 677 }; 678 679 static void 680 filt_timerexpire(void *knx) 681 { 682 struct knote *kn; 683 struct kq_timer_cb_data *kc; 684 685 kn = knx; 686 kn->kn_data++; 687 KNOTE_ACTIVATE(kn, 0); /* XXX - handle locking */ 688 689 if ((kn->kn_flags & EV_ONESHOT) != 0) 690 return; 691 kc = kn->kn_ptr.p_v; 692 if (kc->to == 0) 693 return; 694 kc->next += kc->to; 695 callout_reset_sbt_on(&kc->c, kc->next, 0, filt_timerexpire, kn, 696 PCPU_GET(cpuid), C_ABSOLUTE); 697 } 698 699 /* 700 * data contains amount of time to sleep 701 */ 702 static int 703 filt_timerattach(struct knote *kn) 704 { 705 struct kq_timer_cb_data *kc; 706 struct bintime bt; 707 sbintime_t to, sbt; 708 unsigned int ncallouts; 709 710 if (kn->kn_sdata < 0) 711 return (EINVAL); 712 if (kn->kn_sdata == 0 && (kn->kn_flags & EV_ONESHOT) == 0) 713 kn->kn_sdata = 1; 714 /* Only precision unit are supported in flags so far */ 715 if ((kn->kn_sfflags & ~(NOTE_TIMER_PRECMASK | NOTE_ABSTIME)) != 0) 716 return (EINVAL); 717 718 to = timer2sbintime(kn->kn_sdata, kn->kn_sfflags); 719 if ((kn->kn_sfflags & NOTE_ABSTIME) != 0) { 720 getboottimebin(&bt); 721 sbt = bttosbt(bt); 722 to -= sbt; 723 } 724 if (to < 0) 725 return (EINVAL); 726 727 do { 728 ncallouts = kq_ncallouts; 729 if (ncallouts >= kq_calloutmax) 730 return (ENOMEM); 731 } while (!atomic_cmpset_int(&kq_ncallouts, ncallouts, ncallouts + 1)); 732 733 if ((kn->kn_sfflags & NOTE_ABSTIME) == 0) 734 kn->kn_flags |= EV_CLEAR; /* automatically set */ 735 kn->kn_status &= ~KN_DETACHED; /* knlist_add clears it */ 736 kn->kn_ptr.p_v = kc = malloc(sizeof(*kc), M_KQUEUE, M_WAITOK); 737 callout_init(&kc->c, 1); 738 if ((kn->kn_sfflags & NOTE_ABSTIME) != 0) { 739 kc->next = to; 740 kc->to = 0; 741 } else { 742 kc->next = to + sbinuptime(); 743 kc->to = to; 744 } 745 callout_reset_sbt_on(&kc->c, kc->next, 0, filt_timerexpire, kn, 746 PCPU_GET(cpuid), C_ABSOLUTE); 747 748 return (0); 749 } 750 751 static void 752 filt_timerdetach(struct knote *kn) 753 { 754 struct kq_timer_cb_data *kc; 755 unsigned int old; 756 757 kc = kn->kn_ptr.p_v; 758 callout_drain(&kc->c); 759 free(kc, M_KQUEUE); 760 old = atomic_fetchadd_int(&kq_ncallouts, -1); 761 KASSERT(old > 0, ("Number of callouts cannot become negative")); 762 kn->kn_status |= KN_DETACHED; /* knlist_remove sets it */ 763 } 764 765 static int 766 filt_timer(struct knote *kn, long hint) 767 { 768 769 return (kn->kn_data != 0); 770 } 771 772 static int 773 filt_userattach(struct knote *kn) 774 { 775 776 /* 777 * EVFILT_USER knotes are not attached to anything in the kernel. 778 */ 779 kn->kn_hook = NULL; 780 if (kn->kn_fflags & NOTE_TRIGGER) 781 kn->kn_hookid = 1; 782 else 783 kn->kn_hookid = 0; 784 return (0); 785 } 786 787 static void 788 filt_userdetach(__unused struct knote *kn) 789 { 790 791 /* 792 * EVFILT_USER knotes are not attached to anything in the kernel. 793 */ 794 } 795 796 static int 797 filt_user(struct knote *kn, __unused long hint) 798 { 799 800 return (kn->kn_hookid); 801 } 802 803 static void 804 filt_usertouch(struct knote *kn, struct kevent *kev, u_long type) 805 { 806 u_int ffctrl; 807 808 switch (type) { 809 case EVENT_REGISTER: 810 if (kev->fflags & NOTE_TRIGGER) 811 kn->kn_hookid = 1; 812 813 ffctrl = kev->fflags & NOTE_FFCTRLMASK; 814 kev->fflags &= NOTE_FFLAGSMASK; 815 switch (ffctrl) { 816 case NOTE_FFNOP: 817 break; 818 819 case NOTE_FFAND: 820 kn->kn_sfflags &= kev->fflags; 821 break; 822 823 case NOTE_FFOR: 824 kn->kn_sfflags |= kev->fflags; 825 break; 826 827 case NOTE_FFCOPY: 828 kn->kn_sfflags = kev->fflags; 829 break; 830 831 default: 832 /* XXX Return error? */ 833 break; 834 } 835 kn->kn_sdata = kev->data; 836 if (kev->flags & EV_CLEAR) { 837 kn->kn_hookid = 0; 838 kn->kn_data = 0; 839 kn->kn_fflags = 0; 840 } 841 break; 842 843 case EVENT_PROCESS: 844 *kev = kn->kn_kevent; 845 kev->fflags = kn->kn_sfflags; 846 kev->data = kn->kn_sdata; 847 if (kn->kn_flags & EV_CLEAR) { 848 kn->kn_hookid = 0; 849 kn->kn_data = 0; 850 kn->kn_fflags = 0; 851 } 852 break; 853 854 default: 855 panic("filt_usertouch() - invalid type (%ld)", type); 856 break; 857 } 858 } 859 860 int 861 sys_kqueue(struct thread *td, struct kqueue_args *uap) 862 { 863 864 return (kern_kqueue(td, 0, NULL)); 865 } 866 867 static void 868 kqueue_init(struct kqueue *kq) 869 { 870 871 mtx_init(&kq->kq_lock, "kqueue", NULL, MTX_DEF | MTX_DUPOK); 872 TAILQ_INIT(&kq->kq_head); 873 knlist_init_mtx(&kq->kq_sel.si_note, &kq->kq_lock); 874 TASK_INIT(&kq->kq_task, 0, kqueue_task, kq); 875 } 876 877 int 878 kern_kqueue(struct thread *td, int flags, struct filecaps *fcaps) 879 { 880 struct filedesc *fdp; 881 struct kqueue *kq; 882 struct file *fp; 883 struct ucred *cred; 884 int fd, error; 885 886 fdp = td->td_proc->p_fd; 887 cred = td->td_ucred; 888 if (!chgkqcnt(cred->cr_ruidinfo, 1, lim_cur(td, RLIMIT_KQUEUES))) 889 return (ENOMEM); 890 891 error = falloc_caps(td, &fp, &fd, flags, fcaps); 892 if (error != 0) { 893 chgkqcnt(cred->cr_ruidinfo, -1, 0); 894 return (error); 895 } 896 897 /* An extra reference on `fp' has been held for us by falloc(). */ 898 kq = malloc(sizeof *kq, M_KQUEUE, M_WAITOK | M_ZERO); 899 kqueue_init(kq); 900 kq->kq_fdp = fdp; 901 kq->kq_cred = crhold(cred); 902 903 FILEDESC_XLOCK(fdp); 904 TAILQ_INSERT_HEAD(&fdp->fd_kqlist, kq, kq_list); 905 FILEDESC_XUNLOCK(fdp); 906 907 finit(fp, FREAD | FWRITE, DTYPE_KQUEUE, kq, &kqueueops); 908 fdrop(fp, td); 909 910 td->td_retval[0] = fd; 911 return (0); 912 } 913 914 struct g_kevent_args { 915 int fd; 916 void *changelist; 917 int nchanges; 918 void *eventlist; 919 int nevents; 920 const struct timespec *timeout; 921 }; 922 923 int 924 sys_kevent(struct thread *td, struct kevent_args *uap) 925 { 926 struct kevent_copyops k_ops = { 927 .arg = uap, 928 .k_copyout = kevent_copyout, 929 .k_copyin = kevent_copyin, 930 .kevent_size = sizeof(struct kevent), 931 }; 932 struct g_kevent_args gk_args = { 933 .fd = uap->fd, 934 .changelist = uap->changelist, 935 .nchanges = uap->nchanges, 936 .eventlist = uap->eventlist, 937 .nevents = uap->nevents, 938 .timeout = uap->timeout, 939 }; 940 941 return (kern_kevent_generic(td, &gk_args, &k_ops, "kevent")); 942 } 943 944 static int 945 kern_kevent_generic(struct thread *td, struct g_kevent_args *uap, 946 struct kevent_copyops *k_ops, const char *struct_name) 947 { 948 struct timespec ts, *tsp; 949 #ifdef KTRACE 950 struct kevent *eventlist = uap->eventlist; 951 #endif 952 int error; 953 954 if (uap->timeout != NULL) { 955 error = copyin(uap->timeout, &ts, sizeof(ts)); 956 if (error) 957 return (error); 958 tsp = &ts; 959 } else 960 tsp = NULL; 961 962 #ifdef KTRACE 963 if (KTRPOINT(td, KTR_STRUCT_ARRAY)) 964 ktrstructarray(struct_name, UIO_USERSPACE, uap->changelist, 965 uap->nchanges, k_ops->kevent_size); 966 #endif 967 968 error = kern_kevent(td, uap->fd, uap->nchanges, uap->nevents, 969 k_ops, tsp); 970 971 #ifdef KTRACE 972 if (error == 0 && KTRPOINT(td, KTR_STRUCT_ARRAY)) 973 ktrstructarray(struct_name, UIO_USERSPACE, eventlist, 974 td->td_retval[0], k_ops->kevent_size); 975 #endif 976 977 return (error); 978 } 979 980 /* 981 * Copy 'count' items into the destination list pointed to by uap->eventlist. 982 */ 983 static int 984 kevent_copyout(void *arg, struct kevent *kevp, int count) 985 { 986 struct kevent_args *uap; 987 int error; 988 989 KASSERT(count <= KQ_NEVENTS, ("count (%d) > KQ_NEVENTS", count)); 990 uap = (struct kevent_args *)arg; 991 992 error = copyout(kevp, uap->eventlist, count * sizeof *kevp); 993 if (error == 0) 994 uap->eventlist += count; 995 return (error); 996 } 997 998 /* 999 * Copy 'count' items from the list pointed to by uap->changelist. 1000 */ 1001 static int 1002 kevent_copyin(void *arg, struct kevent *kevp, int count) 1003 { 1004 struct kevent_args *uap; 1005 int error; 1006 1007 KASSERT(count <= KQ_NEVENTS, ("count (%d) > KQ_NEVENTS", count)); 1008 uap = (struct kevent_args *)arg; 1009 1010 error = copyin(uap->changelist, kevp, count * sizeof *kevp); 1011 if (error == 0) 1012 uap->changelist += count; 1013 return (error); 1014 } 1015 1016 #ifdef COMPAT_FREEBSD11 1017 static int 1018 kevent11_copyout(void *arg, struct kevent *kevp, int count) 1019 { 1020 struct freebsd11_kevent_args *uap; 1021 struct kevent_freebsd11 kev11; 1022 int error, i; 1023 1024 KASSERT(count <= KQ_NEVENTS, ("count (%d) > KQ_NEVENTS", count)); 1025 uap = (struct freebsd11_kevent_args *)arg; 1026 1027 for (i = 0; i < count; i++) { 1028 kev11.ident = kevp->ident; 1029 kev11.filter = kevp->filter; 1030 kev11.flags = kevp->flags; 1031 kev11.fflags = kevp->fflags; 1032 kev11.data = kevp->data; 1033 kev11.udata = kevp->udata; 1034 error = copyout(&kev11, uap->eventlist, sizeof(kev11)); 1035 if (error != 0) 1036 break; 1037 uap->eventlist++; 1038 kevp++; 1039 } 1040 return (error); 1041 } 1042 1043 /* 1044 * Copy 'count' items from the list pointed to by uap->changelist. 1045 */ 1046 static int 1047 kevent11_copyin(void *arg, struct kevent *kevp, int count) 1048 { 1049 struct freebsd11_kevent_args *uap; 1050 struct kevent_freebsd11 kev11; 1051 int error, i; 1052 1053 KASSERT(count <= KQ_NEVENTS, ("count (%d) > KQ_NEVENTS", count)); 1054 uap = (struct freebsd11_kevent_args *)arg; 1055 1056 for (i = 0; i < count; i++) { 1057 error = copyin(uap->changelist, &kev11, sizeof(kev11)); 1058 if (error != 0) 1059 break; 1060 kevp->ident = kev11.ident; 1061 kevp->filter = kev11.filter; 1062 kevp->flags = kev11.flags; 1063 kevp->fflags = kev11.fflags; 1064 kevp->data = (uintptr_t)kev11.data; 1065 kevp->udata = kev11.udata; 1066 bzero(&kevp->ext, sizeof(kevp->ext)); 1067 uap->changelist++; 1068 kevp++; 1069 } 1070 return (error); 1071 } 1072 1073 int 1074 freebsd11_kevent(struct thread *td, struct freebsd11_kevent_args *uap) 1075 { 1076 struct kevent_copyops k_ops = { 1077 .arg = uap, 1078 .k_copyout = kevent11_copyout, 1079 .k_copyin = kevent11_copyin, 1080 .kevent_size = sizeof(struct kevent_freebsd11), 1081 }; 1082 struct g_kevent_args gk_args = { 1083 .fd = uap->fd, 1084 .changelist = uap->changelist, 1085 .nchanges = uap->nchanges, 1086 .eventlist = uap->eventlist, 1087 .nevents = uap->nevents, 1088 .timeout = uap->timeout, 1089 }; 1090 1091 return (kern_kevent_generic(td, &gk_args, &k_ops, "kevent_freebsd11")); 1092 } 1093 #endif 1094 1095 int 1096 kern_kevent(struct thread *td, int fd, int nchanges, int nevents, 1097 struct kevent_copyops *k_ops, const struct timespec *timeout) 1098 { 1099 cap_rights_t rights; 1100 struct file *fp; 1101 int error; 1102 1103 cap_rights_init(&rights); 1104 if (nchanges > 0) 1105 cap_rights_set(&rights, CAP_KQUEUE_CHANGE); 1106 if (nevents > 0) 1107 cap_rights_set(&rights, CAP_KQUEUE_EVENT); 1108 error = fget(td, fd, &rights, &fp); 1109 if (error != 0) 1110 return (error); 1111 1112 error = kern_kevent_fp(td, fp, nchanges, nevents, k_ops, timeout); 1113 fdrop(fp, td); 1114 1115 return (error); 1116 } 1117 1118 static int 1119 kqueue_kevent(struct kqueue *kq, struct thread *td, int nchanges, int nevents, 1120 struct kevent_copyops *k_ops, const struct timespec *timeout) 1121 { 1122 struct kevent keva[KQ_NEVENTS]; 1123 struct kevent *kevp, *changes; 1124 int i, n, nerrors, error; 1125 1126 nerrors = 0; 1127 while (nchanges > 0) { 1128 n = nchanges > KQ_NEVENTS ? KQ_NEVENTS : nchanges; 1129 error = k_ops->k_copyin(k_ops->arg, keva, n); 1130 if (error) 1131 return (error); 1132 changes = keva; 1133 for (i = 0; i < n; i++) { 1134 kevp = &changes[i]; 1135 if (!kevp->filter) 1136 continue; 1137 kevp->flags &= ~EV_SYSFLAGS; 1138 error = kqueue_register(kq, kevp, td, 1); 1139 if (error || (kevp->flags & EV_RECEIPT)) { 1140 if (nevents == 0) 1141 return (error); 1142 kevp->flags = EV_ERROR; 1143 kevp->data = error; 1144 (void)k_ops->k_copyout(k_ops->arg, kevp, 1); 1145 nevents--; 1146 nerrors++; 1147 } 1148 } 1149 nchanges -= n; 1150 } 1151 if (nerrors) { 1152 td->td_retval[0] = nerrors; 1153 return (0); 1154 } 1155 1156 return (kqueue_scan(kq, nevents, k_ops, timeout, keva, td)); 1157 } 1158 1159 int 1160 kern_kevent_fp(struct thread *td, struct file *fp, int nchanges, int nevents, 1161 struct kevent_copyops *k_ops, const struct timespec *timeout) 1162 { 1163 struct kqueue *kq; 1164 int error; 1165 1166 error = kqueue_acquire(fp, &kq); 1167 if (error != 0) 1168 return (error); 1169 error = kqueue_kevent(kq, td, nchanges, nevents, k_ops, timeout); 1170 kqueue_release(kq, 0); 1171 return (error); 1172 } 1173 1174 /* 1175 * Performs a kevent() call on a temporarily created kqueue. This can be 1176 * used to perform one-shot polling, similar to poll() and select(). 1177 */ 1178 int 1179 kern_kevent_anonymous(struct thread *td, int nevents, 1180 struct kevent_copyops *k_ops) 1181 { 1182 struct kqueue kq = {}; 1183 int error; 1184 1185 kqueue_init(&kq); 1186 kq.kq_refcnt = 1; 1187 error = kqueue_kevent(&kq, td, nevents, nevents, k_ops, NULL); 1188 kqueue_drain(&kq, td); 1189 kqueue_destroy(&kq); 1190 return (error); 1191 } 1192 1193 int 1194 kqueue_add_filteropts(int filt, struct filterops *filtops) 1195 { 1196 int error; 1197 1198 error = 0; 1199 if (filt > 0 || filt + EVFILT_SYSCOUNT < 0) { 1200 printf( 1201 "trying to add a filterop that is out of range: %d is beyond %d\n", 1202 ~filt, EVFILT_SYSCOUNT); 1203 return EINVAL; 1204 } 1205 mtx_lock(&filterops_lock); 1206 if (sysfilt_ops[~filt].for_fop != &null_filtops && 1207 sysfilt_ops[~filt].for_fop != NULL) 1208 error = EEXIST; 1209 else { 1210 sysfilt_ops[~filt].for_fop = filtops; 1211 sysfilt_ops[~filt].for_refcnt = 0; 1212 } 1213 mtx_unlock(&filterops_lock); 1214 1215 return (error); 1216 } 1217 1218 int 1219 kqueue_del_filteropts(int filt) 1220 { 1221 int error; 1222 1223 error = 0; 1224 if (filt > 0 || filt + EVFILT_SYSCOUNT < 0) 1225 return EINVAL; 1226 1227 mtx_lock(&filterops_lock); 1228 if (sysfilt_ops[~filt].for_fop == &null_filtops || 1229 sysfilt_ops[~filt].for_fop == NULL) 1230 error = EINVAL; 1231 else if (sysfilt_ops[~filt].for_refcnt != 0) 1232 error = EBUSY; 1233 else { 1234 sysfilt_ops[~filt].for_fop = &null_filtops; 1235 sysfilt_ops[~filt].for_refcnt = 0; 1236 } 1237 mtx_unlock(&filterops_lock); 1238 1239 return error; 1240 } 1241 1242 static struct filterops * 1243 kqueue_fo_find(int filt) 1244 { 1245 1246 if (filt > 0 || filt + EVFILT_SYSCOUNT < 0) 1247 return NULL; 1248 1249 if (sysfilt_ops[~filt].for_nolock) 1250 return sysfilt_ops[~filt].for_fop; 1251 1252 mtx_lock(&filterops_lock); 1253 sysfilt_ops[~filt].for_refcnt++; 1254 if (sysfilt_ops[~filt].for_fop == NULL) 1255 sysfilt_ops[~filt].for_fop = &null_filtops; 1256 mtx_unlock(&filterops_lock); 1257 1258 return sysfilt_ops[~filt].for_fop; 1259 } 1260 1261 static void 1262 kqueue_fo_release(int filt) 1263 { 1264 1265 if (filt > 0 || filt + EVFILT_SYSCOUNT < 0) 1266 return; 1267 1268 if (sysfilt_ops[~filt].for_nolock) 1269 return; 1270 1271 mtx_lock(&filterops_lock); 1272 KASSERT(sysfilt_ops[~filt].for_refcnt > 0, 1273 ("filter object refcount not valid on release")); 1274 sysfilt_ops[~filt].for_refcnt--; 1275 mtx_unlock(&filterops_lock); 1276 } 1277 1278 /* 1279 * A ref to kq (obtained via kqueue_acquire) must be held. waitok will 1280 * influence if memory allocation should wait. Make sure it is 0 if you 1281 * hold any mutexes. 1282 */ 1283 static int 1284 kqueue_register(struct kqueue *kq, struct kevent *kev, struct thread *td, int waitok) 1285 { 1286 struct filterops *fops; 1287 struct file *fp; 1288 struct knote *kn, *tkn; 1289 struct knlist *knl; 1290 cap_rights_t rights; 1291 int error, filt, event; 1292 int haskqglobal, filedesc_unlock; 1293 1294 if ((kev->flags & (EV_ENABLE | EV_DISABLE)) == (EV_ENABLE | EV_DISABLE)) 1295 return (EINVAL); 1296 1297 fp = NULL; 1298 kn = NULL; 1299 knl = NULL; 1300 error = 0; 1301 haskqglobal = 0; 1302 filedesc_unlock = 0; 1303 1304 filt = kev->filter; 1305 fops = kqueue_fo_find(filt); 1306 if (fops == NULL) 1307 return EINVAL; 1308 1309 if (kev->flags & EV_ADD) { 1310 /* 1311 * Prevent waiting with locks. Non-sleepable 1312 * allocation failures are handled in the loop, only 1313 * if the spare knote appears to be actually required. 1314 */ 1315 tkn = knote_alloc(waitok); 1316 } else { 1317 tkn = NULL; 1318 } 1319 1320 findkn: 1321 if (fops->f_isfd) { 1322 KASSERT(td != NULL, ("td is NULL")); 1323 if (kev->ident > INT_MAX) 1324 error = EBADF; 1325 else 1326 error = fget(td, kev->ident, 1327 cap_rights_init(&rights, CAP_EVENT), &fp); 1328 if (error) 1329 goto done; 1330 1331 if ((kev->flags & EV_ADD) == EV_ADD && kqueue_expand(kq, fops, 1332 kev->ident, 0) != 0) { 1333 /* try again */ 1334 fdrop(fp, td); 1335 fp = NULL; 1336 error = kqueue_expand(kq, fops, kev->ident, waitok); 1337 if (error) 1338 goto done; 1339 goto findkn; 1340 } 1341 1342 if (fp->f_type == DTYPE_KQUEUE) { 1343 /* 1344 * If we add some intelligence about what we are doing, 1345 * we should be able to support events on ourselves. 1346 * We need to know when we are doing this to prevent 1347 * getting both the knlist lock and the kq lock since 1348 * they are the same thing. 1349 */ 1350 if (fp->f_data == kq) { 1351 error = EINVAL; 1352 goto done; 1353 } 1354 1355 /* 1356 * Pre-lock the filedesc before the global 1357 * lock mutex, see the comment in 1358 * kqueue_close(). 1359 */ 1360 FILEDESC_XLOCK(td->td_proc->p_fd); 1361 filedesc_unlock = 1; 1362 KQ_GLOBAL_LOCK(&kq_global, haskqglobal); 1363 } 1364 1365 KQ_LOCK(kq); 1366 if (kev->ident < kq->kq_knlistsize) { 1367 SLIST_FOREACH(kn, &kq->kq_knlist[kev->ident], kn_link) 1368 if (kev->filter == kn->kn_filter) 1369 break; 1370 } 1371 } else { 1372 if ((kev->flags & EV_ADD) == EV_ADD) 1373 kqueue_expand(kq, fops, kev->ident, waitok); 1374 1375 KQ_LOCK(kq); 1376 1377 /* 1378 * If possible, find an existing knote to use for this kevent. 1379 */ 1380 if (kev->filter == EVFILT_PROC && 1381 (kev->flags & (EV_FLAG1 | EV_FLAG2)) != 0) { 1382 /* This is an internal creation of a process tracking 1383 * note. Don't attempt to coalesce this with an 1384 * existing note. 1385 */ 1386 ; 1387 } else if (kq->kq_knhashmask != 0) { 1388 struct klist *list; 1389 1390 list = &kq->kq_knhash[ 1391 KN_HASH((u_long)kev->ident, kq->kq_knhashmask)]; 1392 SLIST_FOREACH(kn, list, kn_link) 1393 if (kev->ident == kn->kn_id && 1394 kev->filter == kn->kn_filter) 1395 break; 1396 } 1397 } 1398 1399 /* knote is in the process of changing, wait for it to stabilize. */ 1400 if (kn != NULL && kn_in_flux(kn)) { 1401 KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal); 1402 if (filedesc_unlock) { 1403 FILEDESC_XUNLOCK(td->td_proc->p_fd); 1404 filedesc_unlock = 0; 1405 } 1406 kq->kq_state |= KQ_FLUXWAIT; 1407 msleep(kq, &kq->kq_lock, PSOCK | PDROP, "kqflxwt", 0); 1408 if (fp != NULL) { 1409 fdrop(fp, td); 1410 fp = NULL; 1411 } 1412 goto findkn; 1413 } 1414 1415 /* 1416 * kn now contains the matching knote, or NULL if no match 1417 */ 1418 if (kn == NULL) { 1419 if (kev->flags & EV_ADD) { 1420 kn = tkn; 1421 tkn = NULL; 1422 if (kn == NULL) { 1423 KQ_UNLOCK(kq); 1424 error = ENOMEM; 1425 goto done; 1426 } 1427 kn->kn_fp = fp; 1428 kn->kn_kq = kq; 1429 kn->kn_fop = fops; 1430 /* 1431 * apply reference counts to knote structure, and 1432 * do not release it at the end of this routine. 1433 */ 1434 fops = NULL; 1435 fp = NULL; 1436 1437 kn->kn_sfflags = kev->fflags; 1438 kn->kn_sdata = kev->data; 1439 kev->fflags = 0; 1440 kev->data = 0; 1441 kn->kn_kevent = *kev; 1442 kn->kn_kevent.flags &= ~(EV_ADD | EV_DELETE | 1443 EV_ENABLE | EV_DISABLE | EV_FORCEONESHOT); 1444 kn->kn_status = KN_DETACHED; 1445 kn_enter_flux(kn); 1446 1447 error = knote_attach(kn, kq); 1448 KQ_UNLOCK(kq); 1449 if (error != 0) { 1450 tkn = kn; 1451 goto done; 1452 } 1453 1454 if ((error = kn->kn_fop->f_attach(kn)) != 0) { 1455 knote_drop_detached(kn, td); 1456 goto done; 1457 } 1458 knl = kn_list_lock(kn); 1459 goto done_ev_add; 1460 } else { 1461 /* No matching knote and the EV_ADD flag is not set. */ 1462 KQ_UNLOCK(kq); 1463 error = ENOENT; 1464 goto done; 1465 } 1466 } 1467 1468 if (kev->flags & EV_DELETE) { 1469 kn_enter_flux(kn); 1470 KQ_UNLOCK(kq); 1471 knote_drop(kn, td); 1472 goto done; 1473 } 1474 1475 if (kev->flags & EV_FORCEONESHOT) { 1476 kn->kn_flags |= EV_ONESHOT; 1477 KNOTE_ACTIVATE(kn, 1); 1478 } 1479 1480 /* 1481 * The user may change some filter values after the initial EV_ADD, 1482 * but doing so will not reset any filter which has already been 1483 * triggered. 1484 */ 1485 kn->kn_status |= KN_SCAN; 1486 kn_enter_flux(kn); 1487 KQ_UNLOCK(kq); 1488 knl = kn_list_lock(kn); 1489 kn->kn_kevent.udata = kev->udata; 1490 if (!fops->f_isfd && fops->f_touch != NULL) { 1491 fops->f_touch(kn, kev, EVENT_REGISTER); 1492 } else { 1493 kn->kn_sfflags = kev->fflags; 1494 kn->kn_sdata = kev->data; 1495 } 1496 1497 /* 1498 * We can get here with kn->kn_knlist == NULL. This can happen when 1499 * the initial attach event decides that the event is "completed" 1500 * already. i.e. filt_procattach is called on a zombie process. It 1501 * will call filt_proc which will remove it from the list, and NULL 1502 * kn_knlist. 1503 */ 1504 done_ev_add: 1505 if ((kev->flags & EV_ENABLE) != 0) 1506 kn->kn_status &= ~KN_DISABLED; 1507 else if ((kev->flags & EV_DISABLE) != 0) 1508 kn->kn_status |= KN_DISABLED; 1509 1510 if ((kn->kn_status & KN_DISABLED) == 0) 1511 event = kn->kn_fop->f_event(kn, 0); 1512 else 1513 event = 0; 1514 1515 KQ_LOCK(kq); 1516 if (event) 1517 kn->kn_status |= KN_ACTIVE; 1518 if ((kn->kn_status & (KN_ACTIVE | KN_DISABLED | KN_QUEUED)) == 1519 KN_ACTIVE) 1520 knote_enqueue(kn); 1521 kn->kn_status &= ~KN_SCAN; 1522 kn_leave_flux(kn); 1523 kn_list_unlock(knl); 1524 KQ_UNLOCK_FLUX(kq); 1525 1526 done: 1527 KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal); 1528 if (filedesc_unlock) 1529 FILEDESC_XUNLOCK(td->td_proc->p_fd); 1530 if (fp != NULL) 1531 fdrop(fp, td); 1532 knote_free(tkn); 1533 if (fops != NULL) 1534 kqueue_fo_release(filt); 1535 return (error); 1536 } 1537 1538 static int 1539 kqueue_acquire(struct file *fp, struct kqueue **kqp) 1540 { 1541 int error; 1542 struct kqueue *kq; 1543 1544 error = 0; 1545 1546 kq = fp->f_data; 1547 if (fp->f_type != DTYPE_KQUEUE || kq == NULL) 1548 return (EBADF); 1549 *kqp = kq; 1550 KQ_LOCK(kq); 1551 if ((kq->kq_state & KQ_CLOSING) == KQ_CLOSING) { 1552 KQ_UNLOCK(kq); 1553 return (EBADF); 1554 } 1555 kq->kq_refcnt++; 1556 KQ_UNLOCK(kq); 1557 1558 return error; 1559 } 1560 1561 static void 1562 kqueue_release(struct kqueue *kq, int locked) 1563 { 1564 if (locked) 1565 KQ_OWNED(kq); 1566 else 1567 KQ_LOCK(kq); 1568 kq->kq_refcnt--; 1569 if (kq->kq_refcnt == 1) 1570 wakeup(&kq->kq_refcnt); 1571 if (!locked) 1572 KQ_UNLOCK(kq); 1573 } 1574 1575 static void 1576 kqueue_schedtask(struct kqueue *kq) 1577 { 1578 1579 KQ_OWNED(kq); 1580 KASSERT(((kq->kq_state & KQ_TASKDRAIN) != KQ_TASKDRAIN), 1581 ("scheduling kqueue task while draining")); 1582 1583 if ((kq->kq_state & KQ_TASKSCHED) != KQ_TASKSCHED) { 1584 taskqueue_enqueue(taskqueue_kqueue_ctx, &kq->kq_task); 1585 kq->kq_state |= KQ_TASKSCHED; 1586 } 1587 } 1588 1589 /* 1590 * Expand the kq to make sure we have storage for fops/ident pair. 1591 * 1592 * Return 0 on success (or no work necessary), return errno on failure. 1593 * 1594 * Not calling hashinit w/ waitok (proper malloc flag) should be safe. 1595 * If kqueue_register is called from a non-fd context, there usually/should 1596 * be no locks held. 1597 */ 1598 static int 1599 kqueue_expand(struct kqueue *kq, struct filterops *fops, uintptr_t ident, 1600 int waitok) 1601 { 1602 struct klist *list, *tmp_knhash, *to_free; 1603 u_long tmp_knhashmask; 1604 int size; 1605 int fd; 1606 int mflag = waitok ? M_WAITOK : M_NOWAIT; 1607 1608 KQ_NOTOWNED(kq); 1609 1610 to_free = NULL; 1611 if (fops->f_isfd) { 1612 fd = ident; 1613 if (kq->kq_knlistsize <= fd) { 1614 size = kq->kq_knlistsize; 1615 while (size <= fd) 1616 size += KQEXTENT; 1617 list = malloc(size * sizeof(*list), M_KQUEUE, mflag); 1618 if (list == NULL) 1619 return ENOMEM; 1620 KQ_LOCK(kq); 1621 if (kq->kq_knlistsize > fd) { 1622 to_free = list; 1623 list = NULL; 1624 } else { 1625 if (kq->kq_knlist != NULL) { 1626 bcopy(kq->kq_knlist, list, 1627 kq->kq_knlistsize * sizeof(*list)); 1628 to_free = kq->kq_knlist; 1629 kq->kq_knlist = NULL; 1630 } 1631 bzero((caddr_t)list + 1632 kq->kq_knlistsize * sizeof(*list), 1633 (size - kq->kq_knlistsize) * sizeof(*list)); 1634 kq->kq_knlistsize = size; 1635 kq->kq_knlist = list; 1636 } 1637 KQ_UNLOCK(kq); 1638 } 1639 } else { 1640 if (kq->kq_knhashmask == 0) { 1641 tmp_knhash = hashinit(KN_HASHSIZE, M_KQUEUE, 1642 &tmp_knhashmask); 1643 if (tmp_knhash == NULL) 1644 return ENOMEM; 1645 KQ_LOCK(kq); 1646 if (kq->kq_knhashmask == 0) { 1647 kq->kq_knhash = tmp_knhash; 1648 kq->kq_knhashmask = tmp_knhashmask; 1649 } else { 1650 to_free = tmp_knhash; 1651 } 1652 KQ_UNLOCK(kq); 1653 } 1654 } 1655 free(to_free, M_KQUEUE); 1656 1657 KQ_NOTOWNED(kq); 1658 return 0; 1659 } 1660 1661 static void 1662 kqueue_task(void *arg, int pending) 1663 { 1664 struct kqueue *kq; 1665 int haskqglobal; 1666 1667 haskqglobal = 0; 1668 kq = arg; 1669 1670 KQ_GLOBAL_LOCK(&kq_global, haskqglobal); 1671 KQ_LOCK(kq); 1672 1673 KNOTE_LOCKED(&kq->kq_sel.si_note, 0); 1674 1675 kq->kq_state &= ~KQ_TASKSCHED; 1676 if ((kq->kq_state & KQ_TASKDRAIN) == KQ_TASKDRAIN) { 1677 wakeup(&kq->kq_state); 1678 } 1679 KQ_UNLOCK(kq); 1680 KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal); 1681 } 1682 1683 /* 1684 * Scan, update kn_data (if not ONESHOT), and copyout triggered events. 1685 * We treat KN_MARKER knotes as if they are in flux. 1686 */ 1687 static int 1688 kqueue_scan(struct kqueue *kq, int maxevents, struct kevent_copyops *k_ops, 1689 const struct timespec *tsp, struct kevent *keva, struct thread *td) 1690 { 1691 struct kevent *kevp; 1692 struct knote *kn, *marker; 1693 struct knlist *knl; 1694 sbintime_t asbt, rsbt; 1695 int count, error, haskqglobal, influx, nkev, touch; 1696 1697 count = maxevents; 1698 nkev = 0; 1699 error = 0; 1700 haskqglobal = 0; 1701 1702 if (maxevents == 0) 1703 goto done_nl; 1704 1705 rsbt = 0; 1706 if (tsp != NULL) { 1707 if (tsp->tv_sec < 0 || tsp->tv_nsec < 0 || 1708 tsp->tv_nsec >= 1000000000) { 1709 error = EINVAL; 1710 goto done_nl; 1711 } 1712 if (timespecisset(tsp)) { 1713 if (tsp->tv_sec <= INT32_MAX) { 1714 rsbt = tstosbt(*tsp); 1715 if (TIMESEL(&asbt, rsbt)) 1716 asbt += tc_tick_sbt; 1717 if (asbt <= SBT_MAX - rsbt) 1718 asbt += rsbt; 1719 else 1720 asbt = 0; 1721 rsbt >>= tc_precexp; 1722 } else 1723 asbt = 0; 1724 } else 1725 asbt = -1; 1726 } else 1727 asbt = 0; 1728 marker = knote_alloc(1); 1729 marker->kn_status = KN_MARKER; 1730 KQ_LOCK(kq); 1731 1732 retry: 1733 kevp = keva; 1734 if (kq->kq_count == 0) { 1735 if (asbt == -1) { 1736 error = EWOULDBLOCK; 1737 } else { 1738 kq->kq_state |= KQ_SLEEP; 1739 error = msleep_sbt(kq, &kq->kq_lock, PSOCK | PCATCH, 1740 "kqread", asbt, rsbt, C_ABSOLUTE); 1741 } 1742 if (error == 0) 1743 goto retry; 1744 /* don't restart after signals... */ 1745 if (error == ERESTART) 1746 error = EINTR; 1747 else if (error == EWOULDBLOCK) 1748 error = 0; 1749 goto done; 1750 } 1751 1752 TAILQ_INSERT_TAIL(&kq->kq_head, marker, kn_tqe); 1753 influx = 0; 1754 while (count) { 1755 KQ_OWNED(kq); 1756 kn = TAILQ_FIRST(&kq->kq_head); 1757 1758 if ((kn->kn_status == KN_MARKER && kn != marker) || 1759 kn_in_flux(kn)) { 1760 if (influx) { 1761 influx = 0; 1762 KQ_FLUX_WAKEUP(kq); 1763 } 1764 kq->kq_state |= KQ_FLUXWAIT; 1765 error = msleep(kq, &kq->kq_lock, PSOCK, 1766 "kqflxwt", 0); 1767 continue; 1768 } 1769 1770 TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe); 1771 if ((kn->kn_status & KN_DISABLED) == KN_DISABLED) { 1772 kn->kn_status &= ~KN_QUEUED; 1773 kq->kq_count--; 1774 continue; 1775 } 1776 if (kn == marker) { 1777 KQ_FLUX_WAKEUP(kq); 1778 if (count == maxevents) 1779 goto retry; 1780 goto done; 1781 } 1782 KASSERT(!kn_in_flux(kn), 1783 ("knote %p is unexpectedly in flux", kn)); 1784 1785 if ((kn->kn_flags & EV_DROP) == EV_DROP) { 1786 kn->kn_status &= ~KN_QUEUED; 1787 kn_enter_flux(kn); 1788 kq->kq_count--; 1789 KQ_UNLOCK(kq); 1790 /* 1791 * We don't need to lock the list since we've 1792 * marked it as in flux. 1793 */ 1794 knote_drop(kn, td); 1795 KQ_LOCK(kq); 1796 continue; 1797 } else if ((kn->kn_flags & EV_ONESHOT) == EV_ONESHOT) { 1798 kn->kn_status &= ~KN_QUEUED; 1799 kn_enter_flux(kn); 1800 kq->kq_count--; 1801 KQ_UNLOCK(kq); 1802 /* 1803 * We don't need to lock the list since we've 1804 * marked the knote as being in flux. 1805 */ 1806 *kevp = kn->kn_kevent; 1807 knote_drop(kn, td); 1808 KQ_LOCK(kq); 1809 kn = NULL; 1810 } else { 1811 kn->kn_status |= KN_SCAN; 1812 kn_enter_flux(kn); 1813 KQ_UNLOCK(kq); 1814 if ((kn->kn_status & KN_KQUEUE) == KN_KQUEUE) 1815 KQ_GLOBAL_LOCK(&kq_global, haskqglobal); 1816 knl = kn_list_lock(kn); 1817 if (kn->kn_fop->f_event(kn, 0) == 0) { 1818 KQ_LOCK(kq); 1819 KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal); 1820 kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE | 1821 KN_SCAN); 1822 kn_leave_flux(kn); 1823 kq->kq_count--; 1824 kn_list_unlock(knl); 1825 influx = 1; 1826 continue; 1827 } 1828 touch = (!kn->kn_fop->f_isfd && 1829 kn->kn_fop->f_touch != NULL); 1830 if (touch) 1831 kn->kn_fop->f_touch(kn, kevp, EVENT_PROCESS); 1832 else 1833 *kevp = kn->kn_kevent; 1834 KQ_LOCK(kq); 1835 KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal); 1836 if (kn->kn_flags & (EV_CLEAR | EV_DISPATCH)) { 1837 /* 1838 * Manually clear knotes who weren't 1839 * 'touch'ed. 1840 */ 1841 if (touch == 0 && kn->kn_flags & EV_CLEAR) { 1842 kn->kn_data = 0; 1843 kn->kn_fflags = 0; 1844 } 1845 if (kn->kn_flags & EV_DISPATCH) 1846 kn->kn_status |= KN_DISABLED; 1847 kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE); 1848 kq->kq_count--; 1849 } else 1850 TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe); 1851 1852 kn->kn_status &= ~KN_SCAN; 1853 kn_leave_flux(kn); 1854 kn_list_unlock(knl); 1855 influx = 1; 1856 } 1857 1858 /* we are returning a copy to the user */ 1859 kevp++; 1860 nkev++; 1861 count--; 1862 1863 if (nkev == KQ_NEVENTS) { 1864 influx = 0; 1865 KQ_UNLOCK_FLUX(kq); 1866 error = k_ops->k_copyout(k_ops->arg, keva, nkev); 1867 nkev = 0; 1868 kevp = keva; 1869 KQ_LOCK(kq); 1870 if (error) 1871 break; 1872 } 1873 } 1874 TAILQ_REMOVE(&kq->kq_head, marker, kn_tqe); 1875 done: 1876 KQ_OWNED(kq); 1877 KQ_UNLOCK_FLUX(kq); 1878 knote_free(marker); 1879 done_nl: 1880 KQ_NOTOWNED(kq); 1881 if (nkev != 0) 1882 error = k_ops->k_copyout(k_ops->arg, keva, nkev); 1883 td->td_retval[0] = maxevents - count; 1884 return (error); 1885 } 1886 1887 /*ARGSUSED*/ 1888 static int 1889 kqueue_ioctl(struct file *fp, u_long cmd, void *data, 1890 struct ucred *active_cred, struct thread *td) 1891 { 1892 /* 1893 * Enabling sigio causes two major problems: 1894 * 1) infinite recursion: 1895 * Synopsys: kevent is being used to track signals and have FIOASYNC 1896 * set. On receipt of a signal this will cause a kqueue to recurse 1897 * into itself over and over. Sending the sigio causes the kqueue 1898 * to become ready, which in turn posts sigio again, forever. 1899 * Solution: this can be solved by setting a flag in the kqueue that 1900 * we have a SIGIO in progress. 1901 * 2) locking problems: 1902 * Synopsys: Kqueue is a leaf subsystem, but adding signalling puts 1903 * us above the proc and pgrp locks. 1904 * Solution: Post a signal using an async mechanism, being sure to 1905 * record a generation count in the delivery so that we do not deliver 1906 * a signal to the wrong process. 1907 * 1908 * Note, these two mechanisms are somewhat mutually exclusive! 1909 */ 1910 #if 0 1911 struct kqueue *kq; 1912 1913 kq = fp->f_data; 1914 switch (cmd) { 1915 case FIOASYNC: 1916 if (*(int *)data) { 1917 kq->kq_state |= KQ_ASYNC; 1918 } else { 1919 kq->kq_state &= ~KQ_ASYNC; 1920 } 1921 return (0); 1922 1923 case FIOSETOWN: 1924 return (fsetown(*(int *)data, &kq->kq_sigio)); 1925 1926 case FIOGETOWN: 1927 *(int *)data = fgetown(&kq->kq_sigio); 1928 return (0); 1929 } 1930 #endif 1931 1932 return (ENOTTY); 1933 } 1934 1935 /*ARGSUSED*/ 1936 static int 1937 kqueue_poll(struct file *fp, int events, struct ucred *active_cred, 1938 struct thread *td) 1939 { 1940 struct kqueue *kq; 1941 int revents = 0; 1942 int error; 1943 1944 if ((error = kqueue_acquire(fp, &kq))) 1945 return POLLERR; 1946 1947 KQ_LOCK(kq); 1948 if (events & (POLLIN | POLLRDNORM)) { 1949 if (kq->kq_count) { 1950 revents |= events & (POLLIN | POLLRDNORM); 1951 } else { 1952 selrecord(td, &kq->kq_sel); 1953 if (SEL_WAITING(&kq->kq_sel)) 1954 kq->kq_state |= KQ_SEL; 1955 } 1956 } 1957 kqueue_release(kq, 1); 1958 KQ_UNLOCK(kq); 1959 return (revents); 1960 } 1961 1962 /*ARGSUSED*/ 1963 static int 1964 kqueue_stat(struct file *fp, struct stat *st, struct ucred *active_cred, 1965 struct thread *td) 1966 { 1967 1968 bzero((void *)st, sizeof *st); 1969 /* 1970 * We no longer return kq_count because the unlocked value is useless. 1971 * If you spent all this time getting the count, why not spend your 1972 * syscall better by calling kevent? 1973 * 1974 * XXX - This is needed for libc_r. 1975 */ 1976 st->st_mode = S_IFIFO; 1977 return (0); 1978 } 1979 1980 static void 1981 kqueue_drain(struct kqueue *kq, struct thread *td) 1982 { 1983 struct knote *kn; 1984 int i; 1985 1986 KQ_LOCK(kq); 1987 1988 KASSERT((kq->kq_state & KQ_CLOSING) != KQ_CLOSING, 1989 ("kqueue already closing")); 1990 kq->kq_state |= KQ_CLOSING; 1991 if (kq->kq_refcnt > 1) 1992 msleep(&kq->kq_refcnt, &kq->kq_lock, PSOCK, "kqclose", 0); 1993 1994 KASSERT(kq->kq_refcnt == 1, ("other refs are out there!")); 1995 1996 KASSERT(knlist_empty(&kq->kq_sel.si_note), 1997 ("kqueue's knlist not empty")); 1998 1999 for (i = 0; i < kq->kq_knlistsize; i++) { 2000 while ((kn = SLIST_FIRST(&kq->kq_knlist[i])) != NULL) { 2001 if (kn_in_flux(kn)) { 2002 kq->kq_state |= KQ_FLUXWAIT; 2003 msleep(kq, &kq->kq_lock, PSOCK, "kqclo1", 0); 2004 continue; 2005 } 2006 kn_enter_flux(kn); 2007 KQ_UNLOCK(kq); 2008 knote_drop(kn, td); 2009 KQ_LOCK(kq); 2010 } 2011 } 2012 if (kq->kq_knhashmask != 0) { 2013 for (i = 0; i <= kq->kq_knhashmask; i++) { 2014 while ((kn = SLIST_FIRST(&kq->kq_knhash[i])) != NULL) { 2015 if (kn_in_flux(kn)) { 2016 kq->kq_state |= KQ_FLUXWAIT; 2017 msleep(kq, &kq->kq_lock, PSOCK, 2018 "kqclo2", 0); 2019 continue; 2020 } 2021 kn_enter_flux(kn); 2022 KQ_UNLOCK(kq); 2023 knote_drop(kn, td); 2024 KQ_LOCK(kq); 2025 } 2026 } 2027 } 2028 2029 if ((kq->kq_state & KQ_TASKSCHED) == KQ_TASKSCHED) { 2030 kq->kq_state |= KQ_TASKDRAIN; 2031 msleep(&kq->kq_state, &kq->kq_lock, PSOCK, "kqtqdr", 0); 2032 } 2033 2034 if ((kq->kq_state & KQ_SEL) == KQ_SEL) { 2035 selwakeuppri(&kq->kq_sel, PSOCK); 2036 if (!SEL_WAITING(&kq->kq_sel)) 2037 kq->kq_state &= ~KQ_SEL; 2038 } 2039 2040 KQ_UNLOCK(kq); 2041 } 2042 2043 static void 2044 kqueue_destroy(struct kqueue *kq) 2045 { 2046 2047 KASSERT(kq->kq_fdp == NULL, 2048 ("kqueue still attached to a file descriptor")); 2049 seldrain(&kq->kq_sel); 2050 knlist_destroy(&kq->kq_sel.si_note); 2051 mtx_destroy(&kq->kq_lock); 2052 2053 if (kq->kq_knhash != NULL) 2054 free(kq->kq_knhash, M_KQUEUE); 2055 if (kq->kq_knlist != NULL) 2056 free(kq->kq_knlist, M_KQUEUE); 2057 2058 funsetown(&kq->kq_sigio); 2059 } 2060 2061 /*ARGSUSED*/ 2062 static int 2063 kqueue_close(struct file *fp, struct thread *td) 2064 { 2065 struct kqueue *kq = fp->f_data; 2066 struct filedesc *fdp; 2067 int error; 2068 int filedesc_unlock; 2069 2070 if ((error = kqueue_acquire(fp, &kq))) 2071 return error; 2072 kqueue_drain(kq, td); 2073 2074 /* 2075 * We could be called due to the knote_drop() doing fdrop(), 2076 * called from kqueue_register(). In this case the global 2077 * lock is owned, and filedesc sx is locked before, to not 2078 * take the sleepable lock after non-sleepable. 2079 */ 2080 fdp = kq->kq_fdp; 2081 kq->kq_fdp = NULL; 2082 if (!sx_xlocked(FILEDESC_LOCK(fdp))) { 2083 FILEDESC_XLOCK(fdp); 2084 filedesc_unlock = 1; 2085 } else 2086 filedesc_unlock = 0; 2087 TAILQ_REMOVE(&fdp->fd_kqlist, kq, kq_list); 2088 if (filedesc_unlock) 2089 FILEDESC_XUNLOCK(fdp); 2090 2091 kqueue_destroy(kq); 2092 chgkqcnt(kq->kq_cred->cr_ruidinfo, -1, 0); 2093 crfree(kq->kq_cred); 2094 free(kq, M_KQUEUE); 2095 fp->f_data = NULL; 2096 2097 return (0); 2098 } 2099 2100 static int 2101 kqueue_fill_kinfo(struct file *fp, struct kinfo_file *kif, struct filedesc *fdp) 2102 { 2103 2104 kif->kf_type = KF_TYPE_KQUEUE; 2105 return (0); 2106 } 2107 2108 static void 2109 kqueue_wakeup(struct kqueue *kq) 2110 { 2111 KQ_OWNED(kq); 2112 2113 if ((kq->kq_state & KQ_SLEEP) == KQ_SLEEP) { 2114 kq->kq_state &= ~KQ_SLEEP; 2115 wakeup(kq); 2116 } 2117 if ((kq->kq_state & KQ_SEL) == KQ_SEL) { 2118 selwakeuppri(&kq->kq_sel, PSOCK); 2119 if (!SEL_WAITING(&kq->kq_sel)) 2120 kq->kq_state &= ~KQ_SEL; 2121 } 2122 if (!knlist_empty(&kq->kq_sel.si_note)) 2123 kqueue_schedtask(kq); 2124 if ((kq->kq_state & KQ_ASYNC) == KQ_ASYNC) { 2125 pgsigio(&kq->kq_sigio, SIGIO, 0); 2126 } 2127 } 2128 2129 /* 2130 * Walk down a list of knotes, activating them if their event has triggered. 2131 * 2132 * There is a possibility to optimize in the case of one kq watching another. 2133 * Instead of scheduling a task to wake it up, you could pass enough state 2134 * down the chain to make up the parent kqueue. Make this code functional 2135 * first. 2136 */ 2137 void 2138 knote(struct knlist *list, long hint, int lockflags) 2139 { 2140 struct kqueue *kq; 2141 struct knote *kn, *tkn; 2142 int error; 2143 2144 if (list == NULL) 2145 return; 2146 2147 KNL_ASSERT_LOCK(list, lockflags & KNF_LISTLOCKED); 2148 2149 if ((lockflags & KNF_LISTLOCKED) == 0) 2150 list->kl_lock(list->kl_lockarg); 2151 2152 /* 2153 * If we unlock the list lock (and enter influx), we can 2154 * eliminate the kqueue scheduling, but this will introduce 2155 * four lock/unlock's for each knote to test. Also, marker 2156 * would be needed to keep iteration position, since filters 2157 * or other threads could remove events. 2158 */ 2159 SLIST_FOREACH_SAFE(kn, &list->kl_list, kn_selnext, tkn) { 2160 kq = kn->kn_kq; 2161 KQ_LOCK(kq); 2162 if (kn_in_flux(kn) && (kn->kn_status & KN_SCAN) == 0) { 2163 /* 2164 * Do not process the influx notes, except for 2165 * the influx coming from the kq unlock in the 2166 * kqueue_scan(). In the later case, we do 2167 * not interfere with the scan, since the code 2168 * fragment in kqueue_scan() locks the knlist, 2169 * and cannot proceed until we finished. 2170 */ 2171 KQ_UNLOCK(kq); 2172 } else if ((lockflags & KNF_NOKQLOCK) != 0) { 2173 kn_enter_flux(kn); 2174 KQ_UNLOCK(kq); 2175 error = kn->kn_fop->f_event(kn, hint); 2176 KQ_LOCK(kq); 2177 kn_leave_flux(kn); 2178 if (error) 2179 KNOTE_ACTIVATE(kn, 1); 2180 KQ_UNLOCK_FLUX(kq); 2181 } else { 2182 kn->kn_status |= KN_HASKQLOCK; 2183 if (kn->kn_fop->f_event(kn, hint)) 2184 KNOTE_ACTIVATE(kn, 1); 2185 kn->kn_status &= ~KN_HASKQLOCK; 2186 KQ_UNLOCK(kq); 2187 } 2188 } 2189 if ((lockflags & KNF_LISTLOCKED) == 0) 2190 list->kl_unlock(list->kl_lockarg); 2191 } 2192 2193 /* 2194 * add a knote to a knlist 2195 */ 2196 void 2197 knlist_add(struct knlist *knl, struct knote *kn, int islocked) 2198 { 2199 2200 KNL_ASSERT_LOCK(knl, islocked); 2201 KQ_NOTOWNED(kn->kn_kq); 2202 KASSERT(kn_in_flux(kn), ("knote %p not in flux", kn)); 2203 KASSERT((kn->kn_status & KN_DETACHED) != 0, 2204 ("knote %p was not detached", kn)); 2205 if (!islocked) 2206 knl->kl_lock(knl->kl_lockarg); 2207 SLIST_INSERT_HEAD(&knl->kl_list, kn, kn_selnext); 2208 if (!islocked) 2209 knl->kl_unlock(knl->kl_lockarg); 2210 KQ_LOCK(kn->kn_kq); 2211 kn->kn_knlist = knl; 2212 kn->kn_status &= ~KN_DETACHED; 2213 KQ_UNLOCK(kn->kn_kq); 2214 } 2215 2216 static void 2217 knlist_remove_kq(struct knlist *knl, struct knote *kn, int knlislocked, 2218 int kqislocked) 2219 { 2220 2221 KASSERT(!kqislocked || knlislocked, ("kq locked w/o knl locked")); 2222 KNL_ASSERT_LOCK(knl, knlislocked); 2223 mtx_assert(&kn->kn_kq->kq_lock, kqislocked ? MA_OWNED : MA_NOTOWNED); 2224 KASSERT(kqislocked || kn_in_flux(kn), ("knote %p not in flux", kn)); 2225 KASSERT((kn->kn_status & KN_DETACHED) == 0, 2226 ("knote %p was already detached", kn)); 2227 if (!knlislocked) 2228 knl->kl_lock(knl->kl_lockarg); 2229 SLIST_REMOVE(&knl->kl_list, kn, knote, kn_selnext); 2230 kn->kn_knlist = NULL; 2231 if (!knlislocked) 2232 kn_list_unlock(knl); 2233 if (!kqislocked) 2234 KQ_LOCK(kn->kn_kq); 2235 kn->kn_status |= KN_DETACHED; 2236 if (!kqislocked) 2237 KQ_UNLOCK(kn->kn_kq); 2238 } 2239 2240 /* 2241 * remove knote from the specified knlist 2242 */ 2243 void 2244 knlist_remove(struct knlist *knl, struct knote *kn, int islocked) 2245 { 2246 2247 knlist_remove_kq(knl, kn, islocked, 0); 2248 } 2249 2250 int 2251 knlist_empty(struct knlist *knl) 2252 { 2253 2254 KNL_ASSERT_LOCKED(knl); 2255 return (SLIST_EMPTY(&knl->kl_list)); 2256 } 2257 2258 static struct mtx knlist_lock; 2259 MTX_SYSINIT(knlist_lock, &knlist_lock, "knlist lock for lockless objects", 2260 MTX_DEF); 2261 static void knlist_mtx_lock(void *arg); 2262 static void knlist_mtx_unlock(void *arg); 2263 2264 static void 2265 knlist_mtx_lock(void *arg) 2266 { 2267 2268 mtx_lock((struct mtx *)arg); 2269 } 2270 2271 static void 2272 knlist_mtx_unlock(void *arg) 2273 { 2274 2275 mtx_unlock((struct mtx *)arg); 2276 } 2277 2278 static void 2279 knlist_mtx_assert_locked(void *arg) 2280 { 2281 2282 mtx_assert((struct mtx *)arg, MA_OWNED); 2283 } 2284 2285 static void 2286 knlist_mtx_assert_unlocked(void *arg) 2287 { 2288 2289 mtx_assert((struct mtx *)arg, MA_NOTOWNED); 2290 } 2291 2292 static void 2293 knlist_rw_rlock(void *arg) 2294 { 2295 2296 rw_rlock((struct rwlock *)arg); 2297 } 2298 2299 static void 2300 knlist_rw_runlock(void *arg) 2301 { 2302 2303 rw_runlock((struct rwlock *)arg); 2304 } 2305 2306 static void 2307 knlist_rw_assert_locked(void *arg) 2308 { 2309 2310 rw_assert((struct rwlock *)arg, RA_LOCKED); 2311 } 2312 2313 static void 2314 knlist_rw_assert_unlocked(void *arg) 2315 { 2316 2317 rw_assert((struct rwlock *)arg, RA_UNLOCKED); 2318 } 2319 2320 void 2321 knlist_init(struct knlist *knl, void *lock, void (*kl_lock)(void *), 2322 void (*kl_unlock)(void *), 2323 void (*kl_assert_locked)(void *), void (*kl_assert_unlocked)(void *)) 2324 { 2325 2326 if (lock == NULL) 2327 knl->kl_lockarg = &knlist_lock; 2328 else 2329 knl->kl_lockarg = lock; 2330 2331 if (kl_lock == NULL) 2332 knl->kl_lock = knlist_mtx_lock; 2333 else 2334 knl->kl_lock = kl_lock; 2335 if (kl_unlock == NULL) 2336 knl->kl_unlock = knlist_mtx_unlock; 2337 else 2338 knl->kl_unlock = kl_unlock; 2339 if (kl_assert_locked == NULL) 2340 knl->kl_assert_locked = knlist_mtx_assert_locked; 2341 else 2342 knl->kl_assert_locked = kl_assert_locked; 2343 if (kl_assert_unlocked == NULL) 2344 knl->kl_assert_unlocked = knlist_mtx_assert_unlocked; 2345 else 2346 knl->kl_assert_unlocked = kl_assert_unlocked; 2347 2348 knl->kl_autodestroy = 0; 2349 SLIST_INIT(&knl->kl_list); 2350 } 2351 2352 void 2353 knlist_init_mtx(struct knlist *knl, struct mtx *lock) 2354 { 2355 2356 knlist_init(knl, lock, NULL, NULL, NULL, NULL); 2357 } 2358 2359 struct knlist * 2360 knlist_alloc(struct mtx *lock) 2361 { 2362 struct knlist *knl; 2363 2364 knl = malloc(sizeof(struct knlist), M_KQUEUE, M_WAITOK); 2365 knlist_init_mtx(knl, lock); 2366 return (knl); 2367 } 2368 2369 void 2370 knlist_init_rw_reader(struct knlist *knl, struct rwlock *lock) 2371 { 2372 2373 knlist_init(knl, lock, knlist_rw_rlock, knlist_rw_runlock, 2374 knlist_rw_assert_locked, knlist_rw_assert_unlocked); 2375 } 2376 2377 void 2378 knlist_destroy(struct knlist *knl) 2379 { 2380 2381 KASSERT(KNLIST_EMPTY(knl), 2382 ("destroying knlist %p with knotes on it", knl)); 2383 } 2384 2385 void 2386 knlist_detach(struct knlist *knl) 2387 { 2388 2389 KNL_ASSERT_LOCKED(knl); 2390 knl->kl_autodestroy = 1; 2391 if (knlist_empty(knl)) { 2392 knlist_destroy(knl); 2393 free(knl, M_KQUEUE); 2394 } 2395 } 2396 2397 /* 2398 * Even if we are locked, we may need to drop the lock to allow any influx 2399 * knotes time to "settle". 2400 */ 2401 void 2402 knlist_cleardel(struct knlist *knl, struct thread *td, int islocked, int killkn) 2403 { 2404 struct knote *kn, *kn2; 2405 struct kqueue *kq; 2406 2407 KASSERT(!knl->kl_autodestroy, ("cleardel for autodestroy %p", knl)); 2408 if (islocked) 2409 KNL_ASSERT_LOCKED(knl); 2410 else { 2411 KNL_ASSERT_UNLOCKED(knl); 2412 again: /* need to reacquire lock since we have dropped it */ 2413 knl->kl_lock(knl->kl_lockarg); 2414 } 2415 2416 SLIST_FOREACH_SAFE(kn, &knl->kl_list, kn_selnext, kn2) { 2417 kq = kn->kn_kq; 2418 KQ_LOCK(kq); 2419 if (kn_in_flux(kn)) { 2420 KQ_UNLOCK(kq); 2421 continue; 2422 } 2423 knlist_remove_kq(knl, kn, 1, 1); 2424 if (killkn) { 2425 kn_enter_flux(kn); 2426 KQ_UNLOCK(kq); 2427 knote_drop_detached(kn, td); 2428 } else { 2429 /* Make sure cleared knotes disappear soon */ 2430 kn->kn_flags |= EV_EOF | EV_ONESHOT; 2431 KQ_UNLOCK(kq); 2432 } 2433 kq = NULL; 2434 } 2435 2436 if (!SLIST_EMPTY(&knl->kl_list)) { 2437 /* there are still in flux knotes remaining */ 2438 kn = SLIST_FIRST(&knl->kl_list); 2439 kq = kn->kn_kq; 2440 KQ_LOCK(kq); 2441 KASSERT(kn_in_flux(kn), ("knote removed w/o list lock")); 2442 knl->kl_unlock(knl->kl_lockarg); 2443 kq->kq_state |= KQ_FLUXWAIT; 2444 msleep(kq, &kq->kq_lock, PSOCK | PDROP, "kqkclr", 0); 2445 kq = NULL; 2446 goto again; 2447 } 2448 2449 if (islocked) 2450 KNL_ASSERT_LOCKED(knl); 2451 else { 2452 knl->kl_unlock(knl->kl_lockarg); 2453 KNL_ASSERT_UNLOCKED(knl); 2454 } 2455 } 2456 2457 /* 2458 * Remove all knotes referencing a specified fd must be called with FILEDESC 2459 * lock. This prevents a race where a new fd comes along and occupies the 2460 * entry and we attach a knote to the fd. 2461 */ 2462 void 2463 knote_fdclose(struct thread *td, int fd) 2464 { 2465 struct filedesc *fdp = td->td_proc->p_fd; 2466 struct kqueue *kq; 2467 struct knote *kn; 2468 int influx; 2469 2470 FILEDESC_XLOCK_ASSERT(fdp); 2471 2472 /* 2473 * We shouldn't have to worry about new kevents appearing on fd 2474 * since filedesc is locked. 2475 */ 2476 TAILQ_FOREACH(kq, &fdp->fd_kqlist, kq_list) { 2477 KQ_LOCK(kq); 2478 2479 again: 2480 influx = 0; 2481 while (kq->kq_knlistsize > fd && 2482 (kn = SLIST_FIRST(&kq->kq_knlist[fd])) != NULL) { 2483 if (kn_in_flux(kn)) { 2484 /* someone else might be waiting on our knote */ 2485 if (influx) 2486 wakeup(kq); 2487 kq->kq_state |= KQ_FLUXWAIT; 2488 msleep(kq, &kq->kq_lock, PSOCK, "kqflxwt", 0); 2489 goto again; 2490 } 2491 kn_enter_flux(kn); 2492 KQ_UNLOCK(kq); 2493 influx = 1; 2494 knote_drop(kn, td); 2495 KQ_LOCK(kq); 2496 } 2497 KQ_UNLOCK_FLUX(kq); 2498 } 2499 } 2500 2501 static int 2502 knote_attach(struct knote *kn, struct kqueue *kq) 2503 { 2504 struct klist *list; 2505 2506 KASSERT(kn_in_flux(kn), ("knote %p not marked influx", kn)); 2507 KQ_OWNED(kq); 2508 2509 if (kn->kn_fop->f_isfd) { 2510 if (kn->kn_id >= kq->kq_knlistsize) 2511 return (ENOMEM); 2512 list = &kq->kq_knlist[kn->kn_id]; 2513 } else { 2514 if (kq->kq_knhash == NULL) 2515 return (ENOMEM); 2516 list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)]; 2517 } 2518 SLIST_INSERT_HEAD(list, kn, kn_link); 2519 return (0); 2520 } 2521 2522 static void 2523 knote_drop(struct knote *kn, struct thread *td) 2524 { 2525 2526 if ((kn->kn_status & KN_DETACHED) == 0) 2527 kn->kn_fop->f_detach(kn); 2528 knote_drop_detached(kn, td); 2529 } 2530 2531 static void 2532 knote_drop_detached(struct knote *kn, struct thread *td) 2533 { 2534 struct kqueue *kq; 2535 struct klist *list; 2536 2537 kq = kn->kn_kq; 2538 2539 KASSERT((kn->kn_status & KN_DETACHED) != 0, 2540 ("knote %p still attached", kn)); 2541 KQ_NOTOWNED(kq); 2542 2543 KQ_LOCK(kq); 2544 KASSERT(kn->kn_influx == 1, 2545 ("knote_drop called on %p with influx %d", kn, kn->kn_influx)); 2546 2547 if (kn->kn_fop->f_isfd) 2548 list = &kq->kq_knlist[kn->kn_id]; 2549 else 2550 list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)]; 2551 2552 if (!SLIST_EMPTY(list)) 2553 SLIST_REMOVE(list, kn, knote, kn_link); 2554 if (kn->kn_status & KN_QUEUED) 2555 knote_dequeue(kn); 2556 KQ_UNLOCK_FLUX(kq); 2557 2558 if (kn->kn_fop->f_isfd) { 2559 fdrop(kn->kn_fp, td); 2560 kn->kn_fp = NULL; 2561 } 2562 kqueue_fo_release(kn->kn_kevent.filter); 2563 kn->kn_fop = NULL; 2564 knote_free(kn); 2565 } 2566 2567 static void 2568 knote_enqueue(struct knote *kn) 2569 { 2570 struct kqueue *kq = kn->kn_kq; 2571 2572 KQ_OWNED(kn->kn_kq); 2573 KASSERT((kn->kn_status & KN_QUEUED) == 0, ("knote already queued")); 2574 2575 TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe); 2576 kn->kn_status |= KN_QUEUED; 2577 kq->kq_count++; 2578 kqueue_wakeup(kq); 2579 } 2580 2581 static void 2582 knote_dequeue(struct knote *kn) 2583 { 2584 struct kqueue *kq = kn->kn_kq; 2585 2586 KQ_OWNED(kn->kn_kq); 2587 KASSERT(kn->kn_status & KN_QUEUED, ("knote not queued")); 2588 2589 TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe); 2590 kn->kn_status &= ~KN_QUEUED; 2591 kq->kq_count--; 2592 } 2593 2594 static void 2595 knote_init(void) 2596 { 2597 2598 knote_zone = uma_zcreate("KNOTE", sizeof(struct knote), NULL, NULL, 2599 NULL, NULL, UMA_ALIGN_PTR, 0); 2600 } 2601 SYSINIT(knote, SI_SUB_PSEUDO, SI_ORDER_ANY, knote_init, NULL); 2602 2603 static struct knote * 2604 knote_alloc(int waitok) 2605 { 2606 2607 return (uma_zalloc(knote_zone, (waitok ? M_WAITOK : M_NOWAIT) | 2608 M_ZERO)); 2609 } 2610 2611 static void 2612 knote_free(struct knote *kn) 2613 { 2614 2615 uma_zfree(knote_zone, kn); 2616 } 2617 2618 /* 2619 * Register the kev w/ the kq specified by fd. 2620 */ 2621 int 2622 kqfd_register(int fd, struct kevent *kev, struct thread *td, int waitok) 2623 { 2624 struct kqueue *kq; 2625 struct file *fp; 2626 cap_rights_t rights; 2627 int error; 2628 2629 error = fget(td, fd, cap_rights_init(&rights, CAP_KQUEUE_CHANGE), &fp); 2630 if (error != 0) 2631 return (error); 2632 if ((error = kqueue_acquire(fp, &kq)) != 0) 2633 goto noacquire; 2634 2635 error = kqueue_register(kq, kev, td, waitok); 2636 kqueue_release(kq, 0); 2637 2638 noacquire: 2639 fdrop(fp, td); 2640 return (error); 2641 } 2642