1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 1999,2000,2001 Jonathan Lemon <jlemon@FreeBSD.org> 5 * Copyright 2004 John-Mark Gurney <jmg@FreeBSD.org> 6 * Copyright (c) 2009 Apple, Inc. 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 #include "opt_ktrace.h" 35 #include "opt_kqueue.h" 36 37 #ifdef COMPAT_FREEBSD11 38 #define _WANT_FREEBSD11_KEVENT 39 #endif 40 41 #include <sys/param.h> 42 #include <sys/systm.h> 43 #include <sys/capsicum.h> 44 #include <sys/kernel.h> 45 #include <sys/limits.h> 46 #include <sys/lock.h> 47 #include <sys/mutex.h> 48 #include <sys/proc.h> 49 #include <sys/malloc.h> 50 #include <sys/unistd.h> 51 #include <sys/file.h> 52 #include <sys/filedesc.h> 53 #include <sys/filio.h> 54 #include <sys/fcntl.h> 55 #include <sys/kthread.h> 56 #include <sys/selinfo.h> 57 #include <sys/queue.h> 58 #include <sys/event.h> 59 #include <sys/eventvar.h> 60 #include <sys/poll.h> 61 #include <sys/protosw.h> 62 #include <sys/resourcevar.h> 63 #include <sys/sigio.h> 64 #include <sys/signalvar.h> 65 #include <sys/socket.h> 66 #include <sys/socketvar.h> 67 #include <sys/stat.h> 68 #include <sys/sysctl.h> 69 #include <sys/sysproto.h> 70 #include <sys/syscallsubr.h> 71 #include <sys/taskqueue.h> 72 #include <sys/uio.h> 73 #include <sys/user.h> 74 #ifdef KTRACE 75 #include <sys/ktrace.h> 76 #endif 77 #include <machine/atomic.h> 78 79 #include <vm/uma.h> 80 81 static MALLOC_DEFINE(M_KQUEUE, "kqueue", "memory for kqueue system"); 82 83 /* 84 * This lock is used if multiple kq locks are required. This possibly 85 * should be made into a per proc lock. 86 */ 87 static struct mtx kq_global; 88 MTX_SYSINIT(kq_global, &kq_global, "kqueue order", MTX_DEF); 89 #define KQ_GLOBAL_LOCK(lck, haslck) do { \ 90 if (!haslck) \ 91 mtx_lock(lck); \ 92 haslck = 1; \ 93 } while (0) 94 #define KQ_GLOBAL_UNLOCK(lck, haslck) do { \ 95 if (haslck) \ 96 mtx_unlock(lck); \ 97 haslck = 0; \ 98 } while (0) 99 100 TASKQUEUE_DEFINE_THREAD(kqueue_ctx); 101 102 static int kevent_copyout(void *arg, struct kevent *kevp, int count); 103 static int kevent_copyin(void *arg, struct kevent *kevp, int count); 104 static int kqueue_register(struct kqueue *kq, struct kevent *kev, 105 struct thread *td, int mflag); 106 static int kqueue_acquire(struct file *fp, struct kqueue **kqp); 107 static void kqueue_release(struct kqueue *kq, int locked); 108 static void kqueue_destroy(struct kqueue *kq); 109 static void kqueue_drain(struct kqueue *kq, struct thread *td); 110 static int kqueue_expand(struct kqueue *kq, struct filterops *fops, 111 uintptr_t ident, int mflag); 112 static void kqueue_task(void *arg, int pending); 113 static int kqueue_scan(struct kqueue *kq, int maxevents, 114 struct kevent_copyops *k_ops, 115 const struct timespec *timeout, 116 struct kevent *keva, struct thread *td); 117 static void kqueue_wakeup(struct kqueue *kq); 118 static struct filterops *kqueue_fo_find(int filt); 119 static void kqueue_fo_release(int filt); 120 struct g_kevent_args; 121 static int kern_kevent_generic(struct thread *td, 122 struct g_kevent_args *uap, 123 struct kevent_copyops *k_ops, const char *struct_name); 124 125 static fo_ioctl_t kqueue_ioctl; 126 static fo_poll_t kqueue_poll; 127 static fo_kqfilter_t kqueue_kqfilter; 128 static fo_stat_t kqueue_stat; 129 static fo_close_t kqueue_close; 130 static fo_fill_kinfo_t kqueue_fill_kinfo; 131 132 static struct fileops kqueueops = { 133 .fo_read = invfo_rdwr, 134 .fo_write = invfo_rdwr, 135 .fo_truncate = invfo_truncate, 136 .fo_ioctl = kqueue_ioctl, 137 .fo_poll = kqueue_poll, 138 .fo_kqfilter = kqueue_kqfilter, 139 .fo_stat = kqueue_stat, 140 .fo_close = kqueue_close, 141 .fo_chmod = invfo_chmod, 142 .fo_chown = invfo_chown, 143 .fo_sendfile = invfo_sendfile, 144 .fo_fill_kinfo = kqueue_fill_kinfo, 145 }; 146 147 static int knote_attach(struct knote *kn, struct kqueue *kq); 148 static void knote_drop(struct knote *kn, struct thread *td); 149 static void knote_drop_detached(struct knote *kn, struct thread *td); 150 static void knote_enqueue(struct knote *kn); 151 static void knote_dequeue(struct knote *kn); 152 static void knote_init(void); 153 static struct knote *knote_alloc(int mflag); 154 static void knote_free(struct knote *kn); 155 156 static void filt_kqdetach(struct knote *kn); 157 static int filt_kqueue(struct knote *kn, long hint); 158 static int filt_procattach(struct knote *kn); 159 static void filt_procdetach(struct knote *kn); 160 static int filt_proc(struct knote *kn, long hint); 161 static int filt_fileattach(struct knote *kn); 162 static void filt_timerexpire(void *knx); 163 static void filt_timerexpire_l(struct knote *kn, bool proc_locked); 164 static int filt_timerattach(struct knote *kn); 165 static void filt_timerdetach(struct knote *kn); 166 static void filt_timerstart(struct knote *kn, sbintime_t to); 167 static void filt_timertouch(struct knote *kn, struct kevent *kev, 168 u_long type); 169 static int filt_timervalidate(struct knote *kn, sbintime_t *to); 170 static int filt_timer(struct knote *kn, long hint); 171 static int filt_userattach(struct knote *kn); 172 static void filt_userdetach(struct knote *kn); 173 static int filt_user(struct knote *kn, long hint); 174 static void filt_usertouch(struct knote *kn, struct kevent *kev, 175 u_long type); 176 177 static struct filterops file_filtops = { 178 .f_isfd = 1, 179 .f_attach = filt_fileattach, 180 }; 181 static struct filterops kqread_filtops = { 182 .f_isfd = 1, 183 .f_detach = filt_kqdetach, 184 .f_event = filt_kqueue, 185 }; 186 /* XXX - move to kern_proc.c? */ 187 static struct filterops proc_filtops = { 188 .f_isfd = 0, 189 .f_attach = filt_procattach, 190 .f_detach = filt_procdetach, 191 .f_event = filt_proc, 192 }; 193 static struct filterops timer_filtops = { 194 .f_isfd = 0, 195 .f_attach = filt_timerattach, 196 .f_detach = filt_timerdetach, 197 .f_event = filt_timer, 198 .f_touch = filt_timertouch, 199 }; 200 static struct filterops user_filtops = { 201 .f_attach = filt_userattach, 202 .f_detach = filt_userdetach, 203 .f_event = filt_user, 204 .f_touch = filt_usertouch, 205 }; 206 207 static uma_zone_t knote_zone; 208 static unsigned int __exclusive_cache_line kq_ncallouts; 209 static unsigned int kq_calloutmax = 4 * 1024; 210 SYSCTL_UINT(_kern, OID_AUTO, kq_calloutmax, CTLFLAG_RW, 211 &kq_calloutmax, 0, "Maximum number of callouts allocated for kqueue"); 212 213 /* XXX - ensure not influx ? */ 214 #define KNOTE_ACTIVATE(kn, islock) do { \ 215 if ((islock)) \ 216 mtx_assert(&(kn)->kn_kq->kq_lock, MA_OWNED); \ 217 else \ 218 KQ_LOCK((kn)->kn_kq); \ 219 (kn)->kn_status |= KN_ACTIVE; \ 220 if (((kn)->kn_status & (KN_QUEUED | KN_DISABLED)) == 0) \ 221 knote_enqueue((kn)); \ 222 if (!(islock)) \ 223 KQ_UNLOCK((kn)->kn_kq); \ 224 } while (0) 225 #define KQ_LOCK(kq) do { \ 226 mtx_lock(&(kq)->kq_lock); \ 227 } while (0) 228 #define KQ_FLUX_WAKEUP(kq) do { \ 229 if (((kq)->kq_state & KQ_FLUXWAIT) == KQ_FLUXWAIT) { \ 230 (kq)->kq_state &= ~KQ_FLUXWAIT; \ 231 wakeup((kq)); \ 232 } \ 233 } while (0) 234 #define KQ_UNLOCK_FLUX(kq) do { \ 235 KQ_FLUX_WAKEUP(kq); \ 236 mtx_unlock(&(kq)->kq_lock); \ 237 } while (0) 238 #define KQ_UNLOCK(kq) do { \ 239 mtx_unlock(&(kq)->kq_lock); \ 240 } while (0) 241 #define KQ_OWNED(kq) do { \ 242 mtx_assert(&(kq)->kq_lock, MA_OWNED); \ 243 } while (0) 244 #define KQ_NOTOWNED(kq) do { \ 245 mtx_assert(&(kq)->kq_lock, MA_NOTOWNED); \ 246 } while (0) 247 248 static struct knlist * 249 kn_list_lock(struct knote *kn) 250 { 251 struct knlist *knl; 252 253 knl = kn->kn_knlist; 254 if (knl != NULL) 255 knl->kl_lock(knl->kl_lockarg); 256 return (knl); 257 } 258 259 static void 260 kn_list_unlock(struct knlist *knl) 261 { 262 bool do_free; 263 264 if (knl == NULL) 265 return; 266 do_free = knl->kl_autodestroy && knlist_empty(knl); 267 knl->kl_unlock(knl->kl_lockarg); 268 if (do_free) { 269 knlist_destroy(knl); 270 free(knl, M_KQUEUE); 271 } 272 } 273 274 static bool 275 kn_in_flux(struct knote *kn) 276 { 277 278 return (kn->kn_influx > 0); 279 } 280 281 static void 282 kn_enter_flux(struct knote *kn) 283 { 284 285 KQ_OWNED(kn->kn_kq); 286 MPASS(kn->kn_influx < INT_MAX); 287 kn->kn_influx++; 288 } 289 290 static bool 291 kn_leave_flux(struct knote *kn) 292 { 293 294 KQ_OWNED(kn->kn_kq); 295 MPASS(kn->kn_influx > 0); 296 kn->kn_influx--; 297 return (kn->kn_influx == 0); 298 } 299 300 #define KNL_ASSERT_LOCK(knl, islocked) do { \ 301 if (islocked) \ 302 KNL_ASSERT_LOCKED(knl); \ 303 else \ 304 KNL_ASSERT_UNLOCKED(knl); \ 305 } while (0) 306 #ifdef INVARIANTS 307 #define KNL_ASSERT_LOCKED(knl) do { \ 308 knl->kl_assert_lock((knl)->kl_lockarg, LA_LOCKED); \ 309 } while (0) 310 #define KNL_ASSERT_UNLOCKED(knl) do { \ 311 knl->kl_assert_lock((knl)->kl_lockarg, LA_UNLOCKED); \ 312 } while (0) 313 #else /* !INVARIANTS */ 314 #define KNL_ASSERT_LOCKED(knl) do {} while (0) 315 #define KNL_ASSERT_UNLOCKED(knl) do {} while (0) 316 #endif /* INVARIANTS */ 317 318 #ifndef KN_HASHSIZE 319 #define KN_HASHSIZE 64 /* XXX should be tunable */ 320 #endif 321 322 #define KN_HASH(val, mask) (((val) ^ (val >> 8)) & (mask)) 323 324 static int 325 filt_nullattach(struct knote *kn) 326 { 327 328 return (ENXIO); 329 }; 330 331 struct filterops null_filtops = { 332 .f_isfd = 0, 333 .f_attach = filt_nullattach, 334 }; 335 336 /* XXX - make SYSINIT to add these, and move into respective modules. */ 337 extern struct filterops sig_filtops; 338 extern struct filterops fs_filtops; 339 340 /* 341 * Table for all system-defined filters. 342 */ 343 static struct mtx filterops_lock; 344 MTX_SYSINIT(kqueue_filterops, &filterops_lock, "protect sysfilt_ops", 345 MTX_DEF); 346 static struct { 347 struct filterops *for_fop; 348 int for_nolock; 349 int for_refcnt; 350 } sysfilt_ops[EVFILT_SYSCOUNT] = { 351 { &file_filtops, 1 }, /* EVFILT_READ */ 352 { &file_filtops, 1 }, /* EVFILT_WRITE */ 353 { &null_filtops }, /* EVFILT_AIO */ 354 { &file_filtops, 1 }, /* EVFILT_VNODE */ 355 { &proc_filtops, 1 }, /* EVFILT_PROC */ 356 { &sig_filtops, 1 }, /* EVFILT_SIGNAL */ 357 { &timer_filtops, 1 }, /* EVFILT_TIMER */ 358 { &file_filtops, 1 }, /* EVFILT_PROCDESC */ 359 { &fs_filtops, 1 }, /* EVFILT_FS */ 360 { &null_filtops }, /* EVFILT_LIO */ 361 { &user_filtops, 1 }, /* EVFILT_USER */ 362 { &null_filtops }, /* EVFILT_SENDFILE */ 363 { &file_filtops, 1 }, /* EVFILT_EMPTY */ 364 }; 365 366 /* 367 * Simple redirection for all cdevsw style objects to call their fo_kqfilter 368 * method. 369 */ 370 static int 371 filt_fileattach(struct knote *kn) 372 { 373 374 return (fo_kqfilter(kn->kn_fp, kn)); 375 } 376 377 /*ARGSUSED*/ 378 static int 379 kqueue_kqfilter(struct file *fp, struct knote *kn) 380 { 381 struct kqueue *kq = kn->kn_fp->f_data; 382 383 if (kn->kn_filter != EVFILT_READ) 384 return (EINVAL); 385 386 kn->kn_status |= KN_KQUEUE; 387 kn->kn_fop = &kqread_filtops; 388 knlist_add(&kq->kq_sel.si_note, kn, 0); 389 390 return (0); 391 } 392 393 static void 394 filt_kqdetach(struct knote *kn) 395 { 396 struct kqueue *kq = kn->kn_fp->f_data; 397 398 knlist_remove(&kq->kq_sel.si_note, kn, 0); 399 } 400 401 /*ARGSUSED*/ 402 static int 403 filt_kqueue(struct knote *kn, long hint) 404 { 405 struct kqueue *kq = kn->kn_fp->f_data; 406 407 kn->kn_data = kq->kq_count; 408 return (kn->kn_data > 0); 409 } 410 411 /* XXX - move to kern_proc.c? */ 412 static int 413 filt_procattach(struct knote *kn) 414 { 415 struct proc *p; 416 int error; 417 bool exiting, immediate; 418 419 exiting = immediate = false; 420 if (kn->kn_sfflags & NOTE_EXIT) 421 p = pfind_any(kn->kn_id); 422 else 423 p = pfind(kn->kn_id); 424 if (p == NULL) 425 return (ESRCH); 426 if (p->p_flag & P_WEXIT) 427 exiting = true; 428 429 if ((error = p_cansee(curthread, p))) { 430 PROC_UNLOCK(p); 431 return (error); 432 } 433 434 kn->kn_ptr.p_proc = p; 435 kn->kn_flags |= EV_CLEAR; /* automatically set */ 436 437 /* 438 * Internal flag indicating registration done by kernel for the 439 * purposes of getting a NOTE_CHILD notification. 440 */ 441 if (kn->kn_flags & EV_FLAG2) { 442 kn->kn_flags &= ~EV_FLAG2; 443 kn->kn_data = kn->kn_sdata; /* ppid */ 444 kn->kn_fflags = NOTE_CHILD; 445 kn->kn_sfflags &= ~(NOTE_EXIT | NOTE_EXEC | NOTE_FORK); 446 immediate = true; /* Force immediate activation of child note. */ 447 } 448 /* 449 * Internal flag indicating registration done by kernel (for other than 450 * NOTE_CHILD). 451 */ 452 if (kn->kn_flags & EV_FLAG1) { 453 kn->kn_flags &= ~EV_FLAG1; 454 } 455 456 knlist_add(p->p_klist, kn, 1); 457 458 /* 459 * Immediately activate any child notes or, in the case of a zombie 460 * target process, exit notes. The latter is necessary to handle the 461 * case where the target process, e.g. a child, dies before the kevent 462 * is registered. 463 */ 464 if (immediate || (exiting && filt_proc(kn, NOTE_EXIT))) 465 KNOTE_ACTIVATE(kn, 0); 466 467 PROC_UNLOCK(p); 468 469 return (0); 470 } 471 472 /* 473 * The knote may be attached to a different process, which may exit, 474 * leaving nothing for the knote to be attached to. So when the process 475 * exits, the knote is marked as DETACHED and also flagged as ONESHOT so 476 * it will be deleted when read out. However, as part of the knote deletion, 477 * this routine is called, so a check is needed to avoid actually performing 478 * a detach, because the original process does not exist any more. 479 */ 480 /* XXX - move to kern_proc.c? */ 481 static void 482 filt_procdetach(struct knote *kn) 483 { 484 485 knlist_remove(kn->kn_knlist, kn, 0); 486 kn->kn_ptr.p_proc = NULL; 487 } 488 489 /* XXX - move to kern_proc.c? */ 490 static int 491 filt_proc(struct knote *kn, long hint) 492 { 493 struct proc *p; 494 u_int event; 495 496 p = kn->kn_ptr.p_proc; 497 if (p == NULL) /* already activated, from attach filter */ 498 return (0); 499 500 /* Mask off extra data. */ 501 event = (u_int)hint & NOTE_PCTRLMASK; 502 503 /* If the user is interested in this event, record it. */ 504 if (kn->kn_sfflags & event) 505 kn->kn_fflags |= event; 506 507 /* Process is gone, so flag the event as finished. */ 508 if (event == NOTE_EXIT) { 509 kn->kn_flags |= EV_EOF | EV_ONESHOT; 510 kn->kn_ptr.p_proc = NULL; 511 if (kn->kn_fflags & NOTE_EXIT) 512 kn->kn_data = KW_EXITCODE(p->p_xexit, p->p_xsig); 513 if (kn->kn_fflags == 0) 514 kn->kn_flags |= EV_DROP; 515 return (1); 516 } 517 518 return (kn->kn_fflags != 0); 519 } 520 521 /* 522 * Called when the process forked. It mostly does the same as the 523 * knote(), activating all knotes registered to be activated when the 524 * process forked. Additionally, for each knote attached to the 525 * parent, check whether user wants to track the new process. If so 526 * attach a new knote to it, and immediately report an event with the 527 * child's pid. 528 */ 529 void 530 knote_fork(struct knlist *list, int pid) 531 { 532 struct kqueue *kq; 533 struct knote *kn; 534 struct kevent kev; 535 int error; 536 537 MPASS(list != NULL); 538 KNL_ASSERT_LOCKED(list); 539 if (SLIST_EMPTY(&list->kl_list)) 540 return; 541 542 memset(&kev, 0, sizeof(kev)); 543 SLIST_FOREACH(kn, &list->kl_list, kn_selnext) { 544 kq = kn->kn_kq; 545 KQ_LOCK(kq); 546 if (kn_in_flux(kn) && (kn->kn_status & KN_SCAN) == 0) { 547 KQ_UNLOCK(kq); 548 continue; 549 } 550 551 /* 552 * The same as knote(), activate the event. 553 */ 554 if ((kn->kn_sfflags & NOTE_TRACK) == 0) { 555 if (kn->kn_fop->f_event(kn, NOTE_FORK)) 556 KNOTE_ACTIVATE(kn, 1); 557 KQ_UNLOCK(kq); 558 continue; 559 } 560 561 /* 562 * The NOTE_TRACK case. In addition to the activation 563 * of the event, we need to register new events to 564 * track the child. Drop the locks in preparation for 565 * the call to kqueue_register(). 566 */ 567 kn_enter_flux(kn); 568 KQ_UNLOCK(kq); 569 list->kl_unlock(list->kl_lockarg); 570 571 /* 572 * Activate existing knote and register tracking knotes with 573 * new process. 574 * 575 * First register a knote to get just the child notice. This 576 * must be a separate note from a potential NOTE_EXIT 577 * notification since both NOTE_CHILD and NOTE_EXIT are defined 578 * to use the data field (in conflicting ways). 579 */ 580 kev.ident = pid; 581 kev.filter = kn->kn_filter; 582 kev.flags = kn->kn_flags | EV_ADD | EV_ENABLE | EV_ONESHOT | 583 EV_FLAG2; 584 kev.fflags = kn->kn_sfflags; 585 kev.data = kn->kn_id; /* parent */ 586 kev.udata = kn->kn_kevent.udata;/* preserve udata */ 587 error = kqueue_register(kq, &kev, NULL, M_NOWAIT); 588 if (error) 589 kn->kn_fflags |= NOTE_TRACKERR; 590 591 /* 592 * Then register another knote to track other potential events 593 * from the new process. 594 */ 595 kev.ident = pid; 596 kev.filter = kn->kn_filter; 597 kev.flags = kn->kn_flags | EV_ADD | EV_ENABLE | EV_FLAG1; 598 kev.fflags = kn->kn_sfflags; 599 kev.data = kn->kn_id; /* parent */ 600 kev.udata = kn->kn_kevent.udata;/* preserve udata */ 601 error = kqueue_register(kq, &kev, NULL, M_NOWAIT); 602 if (error) 603 kn->kn_fflags |= NOTE_TRACKERR; 604 if (kn->kn_fop->f_event(kn, NOTE_FORK)) 605 KNOTE_ACTIVATE(kn, 0); 606 list->kl_lock(list->kl_lockarg); 607 KQ_LOCK(kq); 608 kn_leave_flux(kn); 609 KQ_UNLOCK_FLUX(kq); 610 } 611 } 612 613 /* 614 * XXX: EVFILT_TIMER should perhaps live in kern_time.c beside the 615 * interval timer support code. 616 */ 617 618 #define NOTE_TIMER_PRECMASK \ 619 (NOTE_SECONDS | NOTE_MSECONDS | NOTE_USECONDS | NOTE_NSECONDS) 620 621 static sbintime_t 622 timer2sbintime(int64_t data, int flags) 623 { 624 int64_t secs; 625 626 /* 627 * Macros for converting to the fractional second portion of an 628 * sbintime_t using 64bit multiplication to improve precision. 629 */ 630 #define NS_TO_SBT(ns) (((ns) * (((uint64_t)1 << 63) / 500000000)) >> 32) 631 #define US_TO_SBT(us) (((us) * (((uint64_t)1 << 63) / 500000)) >> 32) 632 #define MS_TO_SBT(ms) (((ms) * (((uint64_t)1 << 63) / 500)) >> 32) 633 switch (flags & NOTE_TIMER_PRECMASK) { 634 case NOTE_SECONDS: 635 #ifdef __LP64__ 636 if (data > (SBT_MAX / SBT_1S)) 637 return (SBT_MAX); 638 #endif 639 return ((sbintime_t)data << 32); 640 case NOTE_MSECONDS: /* FALLTHROUGH */ 641 case 0: 642 if (data >= 1000) { 643 secs = data / 1000; 644 #ifdef __LP64__ 645 if (secs > (SBT_MAX / SBT_1S)) 646 return (SBT_MAX); 647 #endif 648 return (secs << 32 | MS_TO_SBT(data % 1000)); 649 } 650 return (MS_TO_SBT(data)); 651 case NOTE_USECONDS: 652 if (data >= 1000000) { 653 secs = data / 1000000; 654 #ifdef __LP64__ 655 if (secs > (SBT_MAX / SBT_1S)) 656 return (SBT_MAX); 657 #endif 658 return (secs << 32 | US_TO_SBT(data % 1000000)); 659 } 660 return (US_TO_SBT(data)); 661 case NOTE_NSECONDS: 662 if (data >= 1000000000) { 663 secs = data / 1000000000; 664 #ifdef __LP64__ 665 if (secs > (SBT_MAX / SBT_1S)) 666 return (SBT_MAX); 667 #endif 668 return (secs << 32 | NS_TO_SBT(data % 1000000000)); 669 } 670 return (NS_TO_SBT(data)); 671 default: 672 break; 673 } 674 return (-1); 675 } 676 677 struct kq_timer_cb_data { 678 struct callout c; 679 struct proc *p; 680 struct knote *kn; 681 int cpuid; 682 int flags; 683 TAILQ_ENTRY(kq_timer_cb_data) link; 684 sbintime_t next; /* next timer event fires at */ 685 sbintime_t to; /* precalculated timer period, 0 for abs */ 686 }; 687 688 #define KQ_TIMER_CB_ENQUEUED 0x01 689 690 static void 691 kqtimer_sched_callout(struct kq_timer_cb_data *kc) 692 { 693 callout_reset_sbt_on(&kc->c, kc->next, 0, filt_timerexpire, kc->kn, 694 kc->cpuid, C_ABSOLUTE); 695 } 696 697 void 698 kqtimer_proc_continue(struct proc *p) 699 { 700 struct kq_timer_cb_data *kc, *kc1; 701 struct bintime bt; 702 sbintime_t now; 703 704 PROC_LOCK_ASSERT(p, MA_OWNED); 705 706 getboottimebin(&bt); 707 now = bttosbt(bt); 708 709 TAILQ_FOREACH_SAFE(kc, &p->p_kqtim_stop, link, kc1) { 710 TAILQ_REMOVE(&p->p_kqtim_stop, kc, link); 711 kc->flags &= ~KQ_TIMER_CB_ENQUEUED; 712 if (kc->next <= now) 713 filt_timerexpire_l(kc->kn, true); 714 else 715 kqtimer_sched_callout(kc); 716 } 717 } 718 719 static void 720 filt_timerexpire_l(struct knote *kn, bool proc_locked) 721 { 722 struct kq_timer_cb_data *kc; 723 struct proc *p; 724 uint64_t delta; 725 sbintime_t now; 726 727 kc = kn->kn_ptr.p_v; 728 729 if ((kn->kn_flags & EV_ONESHOT) != 0 || kc->to == 0) { 730 kn->kn_data++; 731 KNOTE_ACTIVATE(kn, 0); 732 return; 733 } 734 735 now = sbinuptime(); 736 if (now >= kc->next) { 737 delta = (now - kc->next) / kc->to; 738 if (delta == 0) 739 delta = 1; 740 kn->kn_data += delta; 741 kc->next += delta * kc->to; 742 if (now >= kc->next) /* overflow */ 743 kc->next = now + kc->to; 744 KNOTE_ACTIVATE(kn, 0); /* XXX - handle locking */ 745 } 746 747 /* 748 * Initial check for stopped kc->p is racy. It is fine to 749 * miss the set of the stop flags, at worst we would schedule 750 * one more callout. On the other hand, it is not fine to not 751 * schedule when we we missed clearing of the flags, we 752 * recheck them under the lock and observe consistent state. 753 */ 754 p = kc->p; 755 if (P_SHOULDSTOP(p) || P_KILLED(p)) { 756 if (!proc_locked) 757 PROC_LOCK(p); 758 if (P_SHOULDSTOP(p) || P_KILLED(p)) { 759 if ((kc->flags & KQ_TIMER_CB_ENQUEUED) == 0) { 760 kc->flags |= KQ_TIMER_CB_ENQUEUED; 761 TAILQ_INSERT_TAIL(&p->p_kqtim_stop, kc, link); 762 } 763 if (!proc_locked) 764 PROC_UNLOCK(p); 765 return; 766 } 767 if (!proc_locked) 768 PROC_UNLOCK(p); 769 } 770 kqtimer_sched_callout(kc); 771 } 772 773 static void 774 filt_timerexpire(void *knx) 775 { 776 filt_timerexpire_l(knx, false); 777 } 778 779 /* 780 * data contains amount of time to sleep 781 */ 782 static int 783 filt_timervalidate(struct knote *kn, sbintime_t *to) 784 { 785 struct bintime bt; 786 sbintime_t sbt; 787 788 if (kn->kn_sdata < 0) 789 return (EINVAL); 790 if (kn->kn_sdata == 0 && (kn->kn_flags & EV_ONESHOT) == 0) 791 kn->kn_sdata = 1; 792 /* 793 * The only fflags values supported are the timer unit 794 * (precision) and the absolute time indicator. 795 */ 796 if ((kn->kn_sfflags & ~(NOTE_TIMER_PRECMASK | NOTE_ABSTIME)) != 0) 797 return (EINVAL); 798 799 *to = timer2sbintime(kn->kn_sdata, kn->kn_sfflags); 800 if (*to < 0) 801 return (EINVAL); 802 if ((kn->kn_sfflags & NOTE_ABSTIME) != 0) { 803 getboottimebin(&bt); 804 sbt = bttosbt(bt); 805 *to = MAX(0, *to - sbt); 806 } 807 return (0); 808 } 809 810 static int 811 filt_timerattach(struct knote *kn) 812 { 813 struct kq_timer_cb_data *kc; 814 sbintime_t to; 815 int error; 816 817 to = -1; 818 error = filt_timervalidate(kn, &to); 819 if (error != 0) 820 return (error); 821 KASSERT(to > 0 || (kn->kn_flags & EV_ONESHOT) != 0 || 822 (kn->kn_sfflags & NOTE_ABSTIME) != 0, 823 ("%s: periodic timer has a calculated zero timeout", __func__)); 824 KASSERT(to >= 0, 825 ("%s: timer has a calculated negative timeout", __func__)); 826 827 if (atomic_fetchadd_int(&kq_ncallouts, 1) + 1 > kq_calloutmax) { 828 atomic_subtract_int(&kq_ncallouts, 1); 829 return (ENOMEM); 830 } 831 832 if ((kn->kn_sfflags & NOTE_ABSTIME) == 0) 833 kn->kn_flags |= EV_CLEAR; /* automatically set */ 834 kn->kn_status &= ~KN_DETACHED; /* knlist_add clears it */ 835 kn->kn_ptr.p_v = kc = malloc(sizeof(*kc), M_KQUEUE, M_WAITOK); 836 kc->kn = kn; 837 kc->p = curproc; 838 kc->cpuid = PCPU_GET(cpuid); 839 kc->flags = 0; 840 callout_init(&kc->c, 1); 841 filt_timerstart(kn, to); 842 843 return (0); 844 } 845 846 static void 847 filt_timerstart(struct knote *kn, sbintime_t to) 848 { 849 struct kq_timer_cb_data *kc; 850 851 kc = kn->kn_ptr.p_v; 852 if ((kn->kn_sfflags & NOTE_ABSTIME) != 0) { 853 kc->next = to; 854 kc->to = 0; 855 } else { 856 kc->next = to + sbinuptime(); 857 kc->to = to; 858 } 859 kqtimer_sched_callout(kc); 860 } 861 862 static void 863 filt_timerdetach(struct knote *kn) 864 { 865 struct kq_timer_cb_data *kc; 866 unsigned int old __unused; 867 bool pending; 868 869 kc = kn->kn_ptr.p_v; 870 do { 871 callout_drain(&kc->c); 872 873 /* 874 * kqtimer_proc_continue() might have rescheduled this callout. 875 * Double-check, using the process mutex as an interlock. 876 */ 877 PROC_LOCK(kc->p); 878 if ((kc->flags & KQ_TIMER_CB_ENQUEUED) != 0) { 879 kc->flags &= ~KQ_TIMER_CB_ENQUEUED; 880 TAILQ_REMOVE(&kc->p->p_kqtim_stop, kc, link); 881 } 882 pending = callout_pending(&kc->c); 883 PROC_UNLOCK(kc->p); 884 } while (pending); 885 free(kc, M_KQUEUE); 886 old = atomic_fetchadd_int(&kq_ncallouts, -1); 887 KASSERT(old > 0, ("Number of callouts cannot become negative")); 888 kn->kn_status |= KN_DETACHED; /* knlist_remove sets it */ 889 } 890 891 static void 892 filt_timertouch(struct knote *kn, struct kevent *kev, u_long type) 893 { 894 struct kq_timer_cb_data *kc; 895 struct kqueue *kq; 896 sbintime_t to; 897 int error; 898 899 switch (type) { 900 case EVENT_REGISTER: 901 /* Handle re-added timers that update data/fflags */ 902 if (kev->flags & EV_ADD) { 903 kc = kn->kn_ptr.p_v; 904 905 /* Drain any existing callout. */ 906 callout_drain(&kc->c); 907 908 /* Throw away any existing undelivered record 909 * of the timer expiration. This is done under 910 * the presumption that if a process is 911 * re-adding this timer with new parameters, 912 * it is no longer interested in what may have 913 * happened under the old parameters. If it is 914 * interested, it can wait for the expiration, 915 * delete the old timer definition, and then 916 * add the new one. 917 * 918 * This has to be done while the kq is locked: 919 * - if enqueued, dequeue 920 * - make it no longer active 921 * - clear the count of expiration events 922 */ 923 kq = kn->kn_kq; 924 KQ_LOCK(kq); 925 if (kn->kn_status & KN_QUEUED) 926 knote_dequeue(kn); 927 928 kn->kn_status &= ~KN_ACTIVE; 929 kn->kn_data = 0; 930 KQ_UNLOCK(kq); 931 932 /* Reschedule timer based on new data/fflags */ 933 kn->kn_sfflags = kev->fflags; 934 kn->kn_sdata = kev->data; 935 error = filt_timervalidate(kn, &to); 936 if (error != 0) { 937 kn->kn_flags |= EV_ERROR; 938 kn->kn_data = error; 939 } else 940 filt_timerstart(kn, to); 941 } 942 break; 943 944 case EVENT_PROCESS: 945 *kev = kn->kn_kevent; 946 if (kn->kn_flags & EV_CLEAR) { 947 kn->kn_data = 0; 948 kn->kn_fflags = 0; 949 } 950 break; 951 952 default: 953 panic("filt_timertouch() - invalid type (%ld)", type); 954 break; 955 } 956 } 957 958 static int 959 filt_timer(struct knote *kn, long hint) 960 { 961 962 return (kn->kn_data != 0); 963 } 964 965 static int 966 filt_userattach(struct knote *kn) 967 { 968 969 /* 970 * EVFILT_USER knotes are not attached to anything in the kernel. 971 */ 972 kn->kn_hook = NULL; 973 if (kn->kn_fflags & NOTE_TRIGGER) 974 kn->kn_hookid = 1; 975 else 976 kn->kn_hookid = 0; 977 return (0); 978 } 979 980 static void 981 filt_userdetach(__unused struct knote *kn) 982 { 983 984 /* 985 * EVFILT_USER knotes are not attached to anything in the kernel. 986 */ 987 } 988 989 static int 990 filt_user(struct knote *kn, __unused long hint) 991 { 992 993 return (kn->kn_hookid); 994 } 995 996 static void 997 filt_usertouch(struct knote *kn, struct kevent *kev, u_long type) 998 { 999 u_int ffctrl; 1000 1001 switch (type) { 1002 case EVENT_REGISTER: 1003 if (kev->fflags & NOTE_TRIGGER) 1004 kn->kn_hookid = 1; 1005 1006 ffctrl = kev->fflags & NOTE_FFCTRLMASK; 1007 kev->fflags &= NOTE_FFLAGSMASK; 1008 switch (ffctrl) { 1009 case NOTE_FFNOP: 1010 break; 1011 1012 case NOTE_FFAND: 1013 kn->kn_sfflags &= kev->fflags; 1014 break; 1015 1016 case NOTE_FFOR: 1017 kn->kn_sfflags |= kev->fflags; 1018 break; 1019 1020 case NOTE_FFCOPY: 1021 kn->kn_sfflags = kev->fflags; 1022 break; 1023 1024 default: 1025 /* XXX Return error? */ 1026 break; 1027 } 1028 kn->kn_sdata = kev->data; 1029 if (kev->flags & EV_CLEAR) { 1030 kn->kn_hookid = 0; 1031 kn->kn_data = 0; 1032 kn->kn_fflags = 0; 1033 } 1034 break; 1035 1036 case EVENT_PROCESS: 1037 *kev = kn->kn_kevent; 1038 kev->fflags = kn->kn_sfflags; 1039 kev->data = kn->kn_sdata; 1040 if (kn->kn_flags & EV_CLEAR) { 1041 kn->kn_hookid = 0; 1042 kn->kn_data = 0; 1043 kn->kn_fflags = 0; 1044 } 1045 break; 1046 1047 default: 1048 panic("filt_usertouch() - invalid type (%ld)", type); 1049 break; 1050 } 1051 } 1052 1053 int 1054 sys_kqueue(struct thread *td, struct kqueue_args *uap) 1055 { 1056 1057 return (kern_kqueue(td, 0, NULL)); 1058 } 1059 1060 static void 1061 kqueue_init(struct kqueue *kq) 1062 { 1063 1064 mtx_init(&kq->kq_lock, "kqueue", NULL, MTX_DEF | MTX_DUPOK); 1065 TAILQ_INIT(&kq->kq_head); 1066 knlist_init_mtx(&kq->kq_sel.si_note, &kq->kq_lock); 1067 TASK_INIT(&kq->kq_task, 0, kqueue_task, kq); 1068 } 1069 1070 int 1071 kern_kqueue(struct thread *td, int flags, struct filecaps *fcaps) 1072 { 1073 struct filedesc *fdp; 1074 struct kqueue *kq; 1075 struct file *fp; 1076 struct ucred *cred; 1077 int fd, error; 1078 1079 fdp = td->td_proc->p_fd; 1080 cred = td->td_ucred; 1081 if (!chgkqcnt(cred->cr_ruidinfo, 1, lim_cur(td, RLIMIT_KQUEUES))) 1082 return (ENOMEM); 1083 1084 error = falloc_caps(td, &fp, &fd, flags, fcaps); 1085 if (error != 0) { 1086 chgkqcnt(cred->cr_ruidinfo, -1, 0); 1087 return (error); 1088 } 1089 1090 /* An extra reference on `fp' has been held for us by falloc(). */ 1091 kq = malloc(sizeof *kq, M_KQUEUE, M_WAITOK | M_ZERO); 1092 kqueue_init(kq); 1093 kq->kq_fdp = fdp; 1094 kq->kq_cred = crhold(cred); 1095 1096 FILEDESC_XLOCK(fdp); 1097 TAILQ_INSERT_HEAD(&fdp->fd_kqlist, kq, kq_list); 1098 FILEDESC_XUNLOCK(fdp); 1099 1100 finit(fp, FREAD | FWRITE, DTYPE_KQUEUE, kq, &kqueueops); 1101 fdrop(fp, td); 1102 1103 td->td_retval[0] = fd; 1104 return (0); 1105 } 1106 1107 struct g_kevent_args { 1108 int fd; 1109 const void *changelist; 1110 int nchanges; 1111 void *eventlist; 1112 int nevents; 1113 const struct timespec *timeout; 1114 }; 1115 1116 int 1117 sys_kevent(struct thread *td, struct kevent_args *uap) 1118 { 1119 struct kevent_copyops k_ops = { 1120 .arg = uap, 1121 .k_copyout = kevent_copyout, 1122 .k_copyin = kevent_copyin, 1123 .kevent_size = sizeof(struct kevent), 1124 }; 1125 struct g_kevent_args gk_args = { 1126 .fd = uap->fd, 1127 .changelist = uap->changelist, 1128 .nchanges = uap->nchanges, 1129 .eventlist = uap->eventlist, 1130 .nevents = uap->nevents, 1131 .timeout = uap->timeout, 1132 }; 1133 1134 return (kern_kevent_generic(td, &gk_args, &k_ops, "kevent")); 1135 } 1136 1137 static int 1138 kern_kevent_generic(struct thread *td, struct g_kevent_args *uap, 1139 struct kevent_copyops *k_ops, const char *struct_name) 1140 { 1141 struct timespec ts, *tsp; 1142 #ifdef KTRACE 1143 struct kevent *eventlist = uap->eventlist; 1144 #endif 1145 int error; 1146 1147 if (uap->timeout != NULL) { 1148 error = copyin(uap->timeout, &ts, sizeof(ts)); 1149 if (error) 1150 return (error); 1151 tsp = &ts; 1152 } else 1153 tsp = NULL; 1154 1155 #ifdef KTRACE 1156 if (KTRPOINT(td, KTR_STRUCT_ARRAY)) 1157 ktrstructarray(struct_name, UIO_USERSPACE, uap->changelist, 1158 uap->nchanges, k_ops->kevent_size); 1159 #endif 1160 1161 error = kern_kevent(td, uap->fd, uap->nchanges, uap->nevents, 1162 k_ops, tsp); 1163 1164 #ifdef KTRACE 1165 if (error == 0 && KTRPOINT(td, KTR_STRUCT_ARRAY)) 1166 ktrstructarray(struct_name, UIO_USERSPACE, eventlist, 1167 td->td_retval[0], k_ops->kevent_size); 1168 #endif 1169 1170 return (error); 1171 } 1172 1173 /* 1174 * Copy 'count' items into the destination list pointed to by uap->eventlist. 1175 */ 1176 static int 1177 kevent_copyout(void *arg, struct kevent *kevp, int count) 1178 { 1179 struct kevent_args *uap; 1180 int error; 1181 1182 KASSERT(count <= KQ_NEVENTS, ("count (%d) > KQ_NEVENTS", count)); 1183 uap = (struct kevent_args *)arg; 1184 1185 error = copyout(kevp, uap->eventlist, count * sizeof *kevp); 1186 if (error == 0) 1187 uap->eventlist += count; 1188 return (error); 1189 } 1190 1191 /* 1192 * Copy 'count' items from the list pointed to by uap->changelist. 1193 */ 1194 static int 1195 kevent_copyin(void *arg, struct kevent *kevp, int count) 1196 { 1197 struct kevent_args *uap; 1198 int error; 1199 1200 KASSERT(count <= KQ_NEVENTS, ("count (%d) > KQ_NEVENTS", count)); 1201 uap = (struct kevent_args *)arg; 1202 1203 error = copyin(uap->changelist, kevp, count * sizeof *kevp); 1204 if (error == 0) 1205 uap->changelist += count; 1206 return (error); 1207 } 1208 1209 #ifdef COMPAT_FREEBSD11 1210 static int 1211 kevent11_copyout(void *arg, struct kevent *kevp, int count) 1212 { 1213 struct freebsd11_kevent_args *uap; 1214 struct freebsd11_kevent kev11; 1215 int error, i; 1216 1217 KASSERT(count <= KQ_NEVENTS, ("count (%d) > KQ_NEVENTS", count)); 1218 uap = (struct freebsd11_kevent_args *)arg; 1219 1220 for (i = 0; i < count; i++) { 1221 kev11.ident = kevp->ident; 1222 kev11.filter = kevp->filter; 1223 kev11.flags = kevp->flags; 1224 kev11.fflags = kevp->fflags; 1225 kev11.data = kevp->data; 1226 kev11.udata = kevp->udata; 1227 error = copyout(&kev11, uap->eventlist, sizeof(kev11)); 1228 if (error != 0) 1229 break; 1230 uap->eventlist++; 1231 kevp++; 1232 } 1233 return (error); 1234 } 1235 1236 /* 1237 * Copy 'count' items from the list pointed to by uap->changelist. 1238 */ 1239 static int 1240 kevent11_copyin(void *arg, struct kevent *kevp, int count) 1241 { 1242 struct freebsd11_kevent_args *uap; 1243 struct freebsd11_kevent kev11; 1244 int error, i; 1245 1246 KASSERT(count <= KQ_NEVENTS, ("count (%d) > KQ_NEVENTS", count)); 1247 uap = (struct freebsd11_kevent_args *)arg; 1248 1249 for (i = 0; i < count; i++) { 1250 error = copyin(uap->changelist, &kev11, sizeof(kev11)); 1251 if (error != 0) 1252 break; 1253 kevp->ident = kev11.ident; 1254 kevp->filter = kev11.filter; 1255 kevp->flags = kev11.flags; 1256 kevp->fflags = kev11.fflags; 1257 kevp->data = (uintptr_t)kev11.data; 1258 kevp->udata = kev11.udata; 1259 bzero(&kevp->ext, sizeof(kevp->ext)); 1260 uap->changelist++; 1261 kevp++; 1262 } 1263 return (error); 1264 } 1265 1266 int 1267 freebsd11_kevent(struct thread *td, struct freebsd11_kevent_args *uap) 1268 { 1269 struct kevent_copyops k_ops = { 1270 .arg = uap, 1271 .k_copyout = kevent11_copyout, 1272 .k_copyin = kevent11_copyin, 1273 .kevent_size = sizeof(struct freebsd11_kevent), 1274 }; 1275 struct g_kevent_args gk_args = { 1276 .fd = uap->fd, 1277 .changelist = uap->changelist, 1278 .nchanges = uap->nchanges, 1279 .eventlist = uap->eventlist, 1280 .nevents = uap->nevents, 1281 .timeout = uap->timeout, 1282 }; 1283 1284 return (kern_kevent_generic(td, &gk_args, &k_ops, "freebsd11_kevent")); 1285 } 1286 #endif 1287 1288 int 1289 kern_kevent(struct thread *td, int fd, int nchanges, int nevents, 1290 struct kevent_copyops *k_ops, const struct timespec *timeout) 1291 { 1292 cap_rights_t rights; 1293 struct file *fp; 1294 int error; 1295 1296 cap_rights_init_zero(&rights); 1297 if (nchanges > 0) 1298 cap_rights_set_one(&rights, CAP_KQUEUE_CHANGE); 1299 if (nevents > 0) 1300 cap_rights_set_one(&rights, CAP_KQUEUE_EVENT); 1301 error = fget(td, fd, &rights, &fp); 1302 if (error != 0) 1303 return (error); 1304 1305 error = kern_kevent_fp(td, fp, nchanges, nevents, k_ops, timeout); 1306 fdrop(fp, td); 1307 1308 return (error); 1309 } 1310 1311 static int 1312 kqueue_kevent(struct kqueue *kq, struct thread *td, int nchanges, int nevents, 1313 struct kevent_copyops *k_ops, const struct timespec *timeout) 1314 { 1315 struct kevent keva[KQ_NEVENTS]; 1316 struct kevent *kevp, *changes; 1317 int i, n, nerrors, error; 1318 1319 if (nchanges < 0) 1320 return (EINVAL); 1321 1322 nerrors = 0; 1323 while (nchanges > 0) { 1324 n = nchanges > KQ_NEVENTS ? KQ_NEVENTS : nchanges; 1325 error = k_ops->k_copyin(k_ops->arg, keva, n); 1326 if (error) 1327 return (error); 1328 changes = keva; 1329 for (i = 0; i < n; i++) { 1330 kevp = &changes[i]; 1331 if (!kevp->filter) 1332 continue; 1333 kevp->flags &= ~EV_SYSFLAGS; 1334 error = kqueue_register(kq, kevp, td, M_WAITOK); 1335 if (error || (kevp->flags & EV_RECEIPT)) { 1336 if (nevents == 0) 1337 return (error); 1338 kevp->flags = EV_ERROR; 1339 kevp->data = error; 1340 (void)k_ops->k_copyout(k_ops->arg, kevp, 1); 1341 nevents--; 1342 nerrors++; 1343 } 1344 } 1345 nchanges -= n; 1346 } 1347 if (nerrors) { 1348 td->td_retval[0] = nerrors; 1349 return (0); 1350 } 1351 1352 return (kqueue_scan(kq, nevents, k_ops, timeout, keva, td)); 1353 } 1354 1355 int 1356 kern_kevent_fp(struct thread *td, struct file *fp, int nchanges, int nevents, 1357 struct kevent_copyops *k_ops, const struct timespec *timeout) 1358 { 1359 struct kqueue *kq; 1360 int error; 1361 1362 error = kqueue_acquire(fp, &kq); 1363 if (error != 0) 1364 return (error); 1365 error = kqueue_kevent(kq, td, nchanges, nevents, k_ops, timeout); 1366 kqueue_release(kq, 0); 1367 return (error); 1368 } 1369 1370 /* 1371 * Performs a kevent() call on a temporarily created kqueue. This can be 1372 * used to perform one-shot polling, similar to poll() and select(). 1373 */ 1374 int 1375 kern_kevent_anonymous(struct thread *td, int nevents, 1376 struct kevent_copyops *k_ops) 1377 { 1378 struct kqueue kq = {}; 1379 int error; 1380 1381 kqueue_init(&kq); 1382 kq.kq_refcnt = 1; 1383 error = kqueue_kevent(&kq, td, nevents, nevents, k_ops, NULL); 1384 kqueue_drain(&kq, td); 1385 kqueue_destroy(&kq); 1386 return (error); 1387 } 1388 1389 int 1390 kqueue_add_filteropts(int filt, struct filterops *filtops) 1391 { 1392 int error; 1393 1394 error = 0; 1395 if (filt > 0 || filt + EVFILT_SYSCOUNT < 0) { 1396 printf( 1397 "trying to add a filterop that is out of range: %d is beyond %d\n", 1398 ~filt, EVFILT_SYSCOUNT); 1399 return EINVAL; 1400 } 1401 mtx_lock(&filterops_lock); 1402 if (sysfilt_ops[~filt].for_fop != &null_filtops && 1403 sysfilt_ops[~filt].for_fop != NULL) 1404 error = EEXIST; 1405 else { 1406 sysfilt_ops[~filt].for_fop = filtops; 1407 sysfilt_ops[~filt].for_refcnt = 0; 1408 } 1409 mtx_unlock(&filterops_lock); 1410 1411 return (error); 1412 } 1413 1414 int 1415 kqueue_del_filteropts(int filt) 1416 { 1417 int error; 1418 1419 error = 0; 1420 if (filt > 0 || filt + EVFILT_SYSCOUNT < 0) 1421 return EINVAL; 1422 1423 mtx_lock(&filterops_lock); 1424 if (sysfilt_ops[~filt].for_fop == &null_filtops || 1425 sysfilt_ops[~filt].for_fop == NULL) 1426 error = EINVAL; 1427 else if (sysfilt_ops[~filt].for_refcnt != 0) 1428 error = EBUSY; 1429 else { 1430 sysfilt_ops[~filt].for_fop = &null_filtops; 1431 sysfilt_ops[~filt].for_refcnt = 0; 1432 } 1433 mtx_unlock(&filterops_lock); 1434 1435 return error; 1436 } 1437 1438 static struct filterops * 1439 kqueue_fo_find(int filt) 1440 { 1441 1442 if (filt > 0 || filt + EVFILT_SYSCOUNT < 0) 1443 return NULL; 1444 1445 if (sysfilt_ops[~filt].for_nolock) 1446 return sysfilt_ops[~filt].for_fop; 1447 1448 mtx_lock(&filterops_lock); 1449 sysfilt_ops[~filt].for_refcnt++; 1450 if (sysfilt_ops[~filt].for_fop == NULL) 1451 sysfilt_ops[~filt].for_fop = &null_filtops; 1452 mtx_unlock(&filterops_lock); 1453 1454 return sysfilt_ops[~filt].for_fop; 1455 } 1456 1457 static void 1458 kqueue_fo_release(int filt) 1459 { 1460 1461 if (filt > 0 || filt + EVFILT_SYSCOUNT < 0) 1462 return; 1463 1464 if (sysfilt_ops[~filt].for_nolock) 1465 return; 1466 1467 mtx_lock(&filterops_lock); 1468 KASSERT(sysfilt_ops[~filt].for_refcnt > 0, 1469 ("filter object refcount not valid on release")); 1470 sysfilt_ops[~filt].for_refcnt--; 1471 mtx_unlock(&filterops_lock); 1472 } 1473 1474 /* 1475 * A ref to kq (obtained via kqueue_acquire) must be held. 1476 */ 1477 static int 1478 kqueue_register(struct kqueue *kq, struct kevent *kev, struct thread *td, 1479 int mflag) 1480 { 1481 struct filterops *fops; 1482 struct file *fp; 1483 struct knote *kn, *tkn; 1484 struct knlist *knl; 1485 int error, filt, event; 1486 int haskqglobal, filedesc_unlock; 1487 1488 if ((kev->flags & (EV_ENABLE | EV_DISABLE)) == (EV_ENABLE | EV_DISABLE)) 1489 return (EINVAL); 1490 1491 fp = NULL; 1492 kn = NULL; 1493 knl = NULL; 1494 error = 0; 1495 haskqglobal = 0; 1496 filedesc_unlock = 0; 1497 1498 filt = kev->filter; 1499 fops = kqueue_fo_find(filt); 1500 if (fops == NULL) 1501 return EINVAL; 1502 1503 if (kev->flags & EV_ADD) { 1504 /* Reject an invalid flag pair early */ 1505 if (kev->flags & EV_KEEPUDATA) { 1506 tkn = NULL; 1507 error = EINVAL; 1508 goto done; 1509 } 1510 1511 /* 1512 * Prevent waiting with locks. Non-sleepable 1513 * allocation failures are handled in the loop, only 1514 * if the spare knote appears to be actually required. 1515 */ 1516 tkn = knote_alloc(mflag); 1517 } else { 1518 tkn = NULL; 1519 } 1520 1521 findkn: 1522 if (fops->f_isfd) { 1523 KASSERT(td != NULL, ("td is NULL")); 1524 if (kev->ident > INT_MAX) 1525 error = EBADF; 1526 else 1527 error = fget(td, kev->ident, &cap_event_rights, &fp); 1528 if (error) 1529 goto done; 1530 1531 if ((kev->flags & EV_ADD) == EV_ADD && kqueue_expand(kq, fops, 1532 kev->ident, M_NOWAIT) != 0) { 1533 /* try again */ 1534 fdrop(fp, td); 1535 fp = NULL; 1536 error = kqueue_expand(kq, fops, kev->ident, mflag); 1537 if (error) 1538 goto done; 1539 goto findkn; 1540 } 1541 1542 if (fp->f_type == DTYPE_KQUEUE) { 1543 /* 1544 * If we add some intelligence about what we are doing, 1545 * we should be able to support events on ourselves. 1546 * We need to know when we are doing this to prevent 1547 * getting both the knlist lock and the kq lock since 1548 * they are the same thing. 1549 */ 1550 if (fp->f_data == kq) { 1551 error = EINVAL; 1552 goto done; 1553 } 1554 1555 /* 1556 * Pre-lock the filedesc before the global 1557 * lock mutex, see the comment in 1558 * kqueue_close(). 1559 */ 1560 FILEDESC_XLOCK(td->td_proc->p_fd); 1561 filedesc_unlock = 1; 1562 KQ_GLOBAL_LOCK(&kq_global, haskqglobal); 1563 } 1564 1565 KQ_LOCK(kq); 1566 if (kev->ident < kq->kq_knlistsize) { 1567 SLIST_FOREACH(kn, &kq->kq_knlist[kev->ident], kn_link) 1568 if (kev->filter == kn->kn_filter) 1569 break; 1570 } 1571 } else { 1572 if ((kev->flags & EV_ADD) == EV_ADD) { 1573 error = kqueue_expand(kq, fops, kev->ident, mflag); 1574 if (error != 0) 1575 goto done; 1576 } 1577 1578 KQ_LOCK(kq); 1579 1580 /* 1581 * If possible, find an existing knote to use for this kevent. 1582 */ 1583 if (kev->filter == EVFILT_PROC && 1584 (kev->flags & (EV_FLAG1 | EV_FLAG2)) != 0) { 1585 /* This is an internal creation of a process tracking 1586 * note. Don't attempt to coalesce this with an 1587 * existing note. 1588 */ 1589 ; 1590 } else if (kq->kq_knhashmask != 0) { 1591 struct klist *list; 1592 1593 list = &kq->kq_knhash[ 1594 KN_HASH((u_long)kev->ident, kq->kq_knhashmask)]; 1595 SLIST_FOREACH(kn, list, kn_link) 1596 if (kev->ident == kn->kn_id && 1597 kev->filter == kn->kn_filter) 1598 break; 1599 } 1600 } 1601 1602 /* knote is in the process of changing, wait for it to stabilize. */ 1603 if (kn != NULL && kn_in_flux(kn)) { 1604 KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal); 1605 if (filedesc_unlock) { 1606 FILEDESC_XUNLOCK(td->td_proc->p_fd); 1607 filedesc_unlock = 0; 1608 } 1609 kq->kq_state |= KQ_FLUXWAIT; 1610 msleep(kq, &kq->kq_lock, PSOCK | PDROP, "kqflxwt", 0); 1611 if (fp != NULL) { 1612 fdrop(fp, td); 1613 fp = NULL; 1614 } 1615 goto findkn; 1616 } 1617 1618 /* 1619 * kn now contains the matching knote, or NULL if no match 1620 */ 1621 if (kn == NULL) { 1622 if (kev->flags & EV_ADD) { 1623 kn = tkn; 1624 tkn = NULL; 1625 if (kn == NULL) { 1626 KQ_UNLOCK(kq); 1627 error = ENOMEM; 1628 goto done; 1629 } 1630 kn->kn_fp = fp; 1631 kn->kn_kq = kq; 1632 kn->kn_fop = fops; 1633 /* 1634 * apply reference counts to knote structure, and 1635 * do not release it at the end of this routine. 1636 */ 1637 fops = NULL; 1638 fp = NULL; 1639 1640 kn->kn_sfflags = kev->fflags; 1641 kn->kn_sdata = kev->data; 1642 kev->fflags = 0; 1643 kev->data = 0; 1644 kn->kn_kevent = *kev; 1645 kn->kn_kevent.flags &= ~(EV_ADD | EV_DELETE | 1646 EV_ENABLE | EV_DISABLE | EV_FORCEONESHOT); 1647 kn->kn_status = KN_DETACHED; 1648 if ((kev->flags & EV_DISABLE) != 0) 1649 kn->kn_status |= KN_DISABLED; 1650 kn_enter_flux(kn); 1651 1652 error = knote_attach(kn, kq); 1653 KQ_UNLOCK(kq); 1654 if (error != 0) { 1655 tkn = kn; 1656 goto done; 1657 } 1658 1659 if ((error = kn->kn_fop->f_attach(kn)) != 0) { 1660 knote_drop_detached(kn, td); 1661 goto done; 1662 } 1663 knl = kn_list_lock(kn); 1664 goto done_ev_add; 1665 } else { 1666 /* No matching knote and the EV_ADD flag is not set. */ 1667 KQ_UNLOCK(kq); 1668 error = ENOENT; 1669 goto done; 1670 } 1671 } 1672 1673 if (kev->flags & EV_DELETE) { 1674 kn_enter_flux(kn); 1675 KQ_UNLOCK(kq); 1676 knote_drop(kn, td); 1677 goto done; 1678 } 1679 1680 if (kev->flags & EV_FORCEONESHOT) { 1681 kn->kn_flags |= EV_ONESHOT; 1682 KNOTE_ACTIVATE(kn, 1); 1683 } 1684 1685 if ((kev->flags & EV_ENABLE) != 0) 1686 kn->kn_status &= ~KN_DISABLED; 1687 else if ((kev->flags & EV_DISABLE) != 0) 1688 kn->kn_status |= KN_DISABLED; 1689 1690 /* 1691 * The user may change some filter values after the initial EV_ADD, 1692 * but doing so will not reset any filter which has already been 1693 * triggered. 1694 */ 1695 kn->kn_status |= KN_SCAN; 1696 kn_enter_flux(kn); 1697 KQ_UNLOCK(kq); 1698 knl = kn_list_lock(kn); 1699 if ((kev->flags & EV_KEEPUDATA) == 0) 1700 kn->kn_kevent.udata = kev->udata; 1701 if (!fops->f_isfd && fops->f_touch != NULL) { 1702 fops->f_touch(kn, kev, EVENT_REGISTER); 1703 } else { 1704 kn->kn_sfflags = kev->fflags; 1705 kn->kn_sdata = kev->data; 1706 } 1707 1708 done_ev_add: 1709 /* 1710 * We can get here with kn->kn_knlist == NULL. This can happen when 1711 * the initial attach event decides that the event is "completed" 1712 * already, e.g., filt_procattach() is called on a zombie process. It 1713 * will call filt_proc() which will remove it from the list, and NULL 1714 * kn_knlist. 1715 * 1716 * KN_DISABLED will be stable while the knote is in flux, so the 1717 * unlocked read will not race with an update. 1718 */ 1719 if ((kn->kn_status & KN_DISABLED) == 0) 1720 event = kn->kn_fop->f_event(kn, 0); 1721 else 1722 event = 0; 1723 1724 KQ_LOCK(kq); 1725 if (event) 1726 kn->kn_status |= KN_ACTIVE; 1727 if ((kn->kn_status & (KN_ACTIVE | KN_DISABLED | KN_QUEUED)) == 1728 KN_ACTIVE) 1729 knote_enqueue(kn); 1730 kn->kn_status &= ~KN_SCAN; 1731 kn_leave_flux(kn); 1732 kn_list_unlock(knl); 1733 KQ_UNLOCK_FLUX(kq); 1734 1735 done: 1736 KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal); 1737 if (filedesc_unlock) 1738 FILEDESC_XUNLOCK(td->td_proc->p_fd); 1739 if (fp != NULL) 1740 fdrop(fp, td); 1741 knote_free(tkn); 1742 if (fops != NULL) 1743 kqueue_fo_release(filt); 1744 return (error); 1745 } 1746 1747 static int 1748 kqueue_acquire(struct file *fp, struct kqueue **kqp) 1749 { 1750 int error; 1751 struct kqueue *kq; 1752 1753 error = 0; 1754 1755 kq = fp->f_data; 1756 if (fp->f_type != DTYPE_KQUEUE || kq == NULL) 1757 return (EBADF); 1758 *kqp = kq; 1759 KQ_LOCK(kq); 1760 if ((kq->kq_state & KQ_CLOSING) == KQ_CLOSING) { 1761 KQ_UNLOCK(kq); 1762 return (EBADF); 1763 } 1764 kq->kq_refcnt++; 1765 KQ_UNLOCK(kq); 1766 1767 return error; 1768 } 1769 1770 static void 1771 kqueue_release(struct kqueue *kq, int locked) 1772 { 1773 if (locked) 1774 KQ_OWNED(kq); 1775 else 1776 KQ_LOCK(kq); 1777 kq->kq_refcnt--; 1778 if (kq->kq_refcnt == 1) 1779 wakeup(&kq->kq_refcnt); 1780 if (!locked) 1781 KQ_UNLOCK(kq); 1782 } 1783 1784 static void 1785 ast_kqueue(struct thread *td, int tda __unused) 1786 { 1787 taskqueue_quiesce(taskqueue_kqueue_ctx); 1788 } 1789 1790 static void 1791 kqueue_schedtask(struct kqueue *kq) 1792 { 1793 KQ_OWNED(kq); 1794 KASSERT(((kq->kq_state & KQ_TASKDRAIN) != KQ_TASKDRAIN), 1795 ("scheduling kqueue task while draining")); 1796 1797 if ((kq->kq_state & KQ_TASKSCHED) != KQ_TASKSCHED) { 1798 taskqueue_enqueue(taskqueue_kqueue_ctx, &kq->kq_task); 1799 kq->kq_state |= KQ_TASKSCHED; 1800 ast_sched(curthread, TDA_KQUEUE); 1801 } 1802 } 1803 1804 /* 1805 * Expand the kq to make sure we have storage for fops/ident pair. 1806 * 1807 * Return 0 on success (or no work necessary), return errno on failure. 1808 */ 1809 static int 1810 kqueue_expand(struct kqueue *kq, struct filterops *fops, uintptr_t ident, 1811 int mflag) 1812 { 1813 struct klist *list, *tmp_knhash, *to_free; 1814 u_long tmp_knhashmask; 1815 int error, fd, size; 1816 1817 KQ_NOTOWNED(kq); 1818 1819 error = 0; 1820 to_free = NULL; 1821 if (fops->f_isfd) { 1822 fd = ident; 1823 if (kq->kq_knlistsize <= fd) { 1824 size = kq->kq_knlistsize; 1825 while (size <= fd) 1826 size += KQEXTENT; 1827 list = malloc(size * sizeof(*list), M_KQUEUE, mflag); 1828 if (list == NULL) 1829 return ENOMEM; 1830 KQ_LOCK(kq); 1831 if ((kq->kq_state & KQ_CLOSING) != 0) { 1832 to_free = list; 1833 error = EBADF; 1834 } else if (kq->kq_knlistsize > fd) { 1835 to_free = list; 1836 } else { 1837 if (kq->kq_knlist != NULL) { 1838 bcopy(kq->kq_knlist, list, 1839 kq->kq_knlistsize * sizeof(*list)); 1840 to_free = kq->kq_knlist; 1841 kq->kq_knlist = NULL; 1842 } 1843 bzero((caddr_t)list + 1844 kq->kq_knlistsize * sizeof(*list), 1845 (size - kq->kq_knlistsize) * sizeof(*list)); 1846 kq->kq_knlistsize = size; 1847 kq->kq_knlist = list; 1848 } 1849 KQ_UNLOCK(kq); 1850 } 1851 } else { 1852 if (kq->kq_knhashmask == 0) { 1853 tmp_knhash = hashinit_flags(KN_HASHSIZE, M_KQUEUE, 1854 &tmp_knhashmask, (mflag & M_WAITOK) != 0 ? 1855 HASH_WAITOK : HASH_NOWAIT); 1856 if (tmp_knhash == NULL) 1857 return (ENOMEM); 1858 KQ_LOCK(kq); 1859 if ((kq->kq_state & KQ_CLOSING) != 0) { 1860 to_free = tmp_knhash; 1861 error = EBADF; 1862 } else if (kq->kq_knhashmask == 0) { 1863 kq->kq_knhash = tmp_knhash; 1864 kq->kq_knhashmask = tmp_knhashmask; 1865 } else { 1866 to_free = tmp_knhash; 1867 } 1868 KQ_UNLOCK(kq); 1869 } 1870 } 1871 free(to_free, M_KQUEUE); 1872 1873 KQ_NOTOWNED(kq); 1874 return (error); 1875 } 1876 1877 static void 1878 kqueue_task(void *arg, int pending) 1879 { 1880 struct kqueue *kq; 1881 int haskqglobal; 1882 1883 haskqglobal = 0; 1884 kq = arg; 1885 1886 KQ_GLOBAL_LOCK(&kq_global, haskqglobal); 1887 KQ_LOCK(kq); 1888 1889 KNOTE_LOCKED(&kq->kq_sel.si_note, 0); 1890 1891 kq->kq_state &= ~KQ_TASKSCHED; 1892 if ((kq->kq_state & KQ_TASKDRAIN) == KQ_TASKDRAIN) { 1893 wakeup(&kq->kq_state); 1894 } 1895 KQ_UNLOCK(kq); 1896 KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal); 1897 } 1898 1899 /* 1900 * Scan, update kn_data (if not ONESHOT), and copyout triggered events. 1901 * We treat KN_MARKER knotes as if they are in flux. 1902 */ 1903 static int 1904 kqueue_scan(struct kqueue *kq, int maxevents, struct kevent_copyops *k_ops, 1905 const struct timespec *tsp, struct kevent *keva, struct thread *td) 1906 { 1907 struct kevent *kevp; 1908 struct knote *kn, *marker; 1909 struct knlist *knl; 1910 sbintime_t asbt, rsbt; 1911 int count, error, haskqglobal, influx, nkev, touch; 1912 1913 count = maxevents; 1914 nkev = 0; 1915 error = 0; 1916 haskqglobal = 0; 1917 1918 if (maxevents == 0) 1919 goto done_nl; 1920 if (maxevents < 0) { 1921 error = EINVAL; 1922 goto done_nl; 1923 } 1924 1925 rsbt = 0; 1926 if (tsp != NULL) { 1927 if (!timespecvalid_interval(tsp)) { 1928 error = EINVAL; 1929 goto done_nl; 1930 } 1931 if (timespecisset(tsp)) { 1932 if (tsp->tv_sec <= INT32_MAX) { 1933 rsbt = tstosbt(*tsp); 1934 if (TIMESEL(&asbt, rsbt)) 1935 asbt += tc_tick_sbt; 1936 if (asbt <= SBT_MAX - rsbt) 1937 asbt += rsbt; 1938 else 1939 asbt = 0; 1940 rsbt >>= tc_precexp; 1941 } else 1942 asbt = 0; 1943 } else 1944 asbt = -1; 1945 } else 1946 asbt = 0; 1947 marker = knote_alloc(M_WAITOK); 1948 marker->kn_status = KN_MARKER; 1949 KQ_LOCK(kq); 1950 1951 retry: 1952 kevp = keva; 1953 if (kq->kq_count == 0) { 1954 if (asbt == -1) { 1955 error = EWOULDBLOCK; 1956 } else { 1957 kq->kq_state |= KQ_SLEEP; 1958 error = msleep_sbt(kq, &kq->kq_lock, PSOCK | PCATCH, 1959 "kqread", asbt, rsbt, C_ABSOLUTE); 1960 } 1961 if (error == 0) 1962 goto retry; 1963 /* don't restart after signals... */ 1964 if (error == ERESTART) 1965 error = EINTR; 1966 else if (error == EWOULDBLOCK) 1967 error = 0; 1968 goto done; 1969 } 1970 1971 TAILQ_INSERT_TAIL(&kq->kq_head, marker, kn_tqe); 1972 influx = 0; 1973 while (count) { 1974 KQ_OWNED(kq); 1975 kn = TAILQ_FIRST(&kq->kq_head); 1976 1977 if ((kn->kn_status == KN_MARKER && kn != marker) || 1978 kn_in_flux(kn)) { 1979 if (influx) { 1980 influx = 0; 1981 KQ_FLUX_WAKEUP(kq); 1982 } 1983 kq->kq_state |= KQ_FLUXWAIT; 1984 error = msleep(kq, &kq->kq_lock, PSOCK, 1985 "kqflxwt", 0); 1986 continue; 1987 } 1988 1989 TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe); 1990 if ((kn->kn_status & KN_DISABLED) == KN_DISABLED) { 1991 kn->kn_status &= ~KN_QUEUED; 1992 kq->kq_count--; 1993 continue; 1994 } 1995 if (kn == marker) { 1996 KQ_FLUX_WAKEUP(kq); 1997 if (count == maxevents) 1998 goto retry; 1999 goto done; 2000 } 2001 KASSERT(!kn_in_flux(kn), 2002 ("knote %p is unexpectedly in flux", kn)); 2003 2004 if ((kn->kn_flags & EV_DROP) == EV_DROP) { 2005 kn->kn_status &= ~KN_QUEUED; 2006 kn_enter_flux(kn); 2007 kq->kq_count--; 2008 KQ_UNLOCK(kq); 2009 /* 2010 * We don't need to lock the list since we've 2011 * marked it as in flux. 2012 */ 2013 knote_drop(kn, td); 2014 KQ_LOCK(kq); 2015 continue; 2016 } else if ((kn->kn_flags & EV_ONESHOT) == EV_ONESHOT) { 2017 kn->kn_status &= ~KN_QUEUED; 2018 kn_enter_flux(kn); 2019 kq->kq_count--; 2020 KQ_UNLOCK(kq); 2021 /* 2022 * We don't need to lock the list since we've 2023 * marked the knote as being in flux. 2024 */ 2025 *kevp = kn->kn_kevent; 2026 knote_drop(kn, td); 2027 KQ_LOCK(kq); 2028 kn = NULL; 2029 } else { 2030 kn->kn_status |= KN_SCAN; 2031 kn_enter_flux(kn); 2032 KQ_UNLOCK(kq); 2033 if ((kn->kn_status & KN_KQUEUE) == KN_KQUEUE) 2034 KQ_GLOBAL_LOCK(&kq_global, haskqglobal); 2035 knl = kn_list_lock(kn); 2036 if (kn->kn_fop->f_event(kn, 0) == 0) { 2037 KQ_LOCK(kq); 2038 KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal); 2039 kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE | 2040 KN_SCAN); 2041 kn_leave_flux(kn); 2042 kq->kq_count--; 2043 kn_list_unlock(knl); 2044 influx = 1; 2045 continue; 2046 } 2047 touch = (!kn->kn_fop->f_isfd && 2048 kn->kn_fop->f_touch != NULL); 2049 if (touch) 2050 kn->kn_fop->f_touch(kn, kevp, EVENT_PROCESS); 2051 else 2052 *kevp = kn->kn_kevent; 2053 KQ_LOCK(kq); 2054 KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal); 2055 if (kn->kn_flags & (EV_CLEAR | EV_DISPATCH)) { 2056 /* 2057 * Manually clear knotes who weren't 2058 * 'touch'ed. 2059 */ 2060 if (touch == 0 && kn->kn_flags & EV_CLEAR) { 2061 kn->kn_data = 0; 2062 kn->kn_fflags = 0; 2063 } 2064 if (kn->kn_flags & EV_DISPATCH) 2065 kn->kn_status |= KN_DISABLED; 2066 kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE); 2067 kq->kq_count--; 2068 } else 2069 TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe); 2070 2071 kn->kn_status &= ~KN_SCAN; 2072 kn_leave_flux(kn); 2073 kn_list_unlock(knl); 2074 influx = 1; 2075 } 2076 2077 /* we are returning a copy to the user */ 2078 kevp++; 2079 nkev++; 2080 count--; 2081 2082 if (nkev == KQ_NEVENTS) { 2083 influx = 0; 2084 KQ_UNLOCK_FLUX(kq); 2085 error = k_ops->k_copyout(k_ops->arg, keva, nkev); 2086 nkev = 0; 2087 kevp = keva; 2088 KQ_LOCK(kq); 2089 if (error) 2090 break; 2091 } 2092 } 2093 TAILQ_REMOVE(&kq->kq_head, marker, kn_tqe); 2094 done: 2095 KQ_OWNED(kq); 2096 KQ_UNLOCK_FLUX(kq); 2097 knote_free(marker); 2098 done_nl: 2099 KQ_NOTOWNED(kq); 2100 if (nkev != 0) 2101 error = k_ops->k_copyout(k_ops->arg, keva, nkev); 2102 td->td_retval[0] = maxevents - count; 2103 return (error); 2104 } 2105 2106 /*ARGSUSED*/ 2107 static int 2108 kqueue_ioctl(struct file *fp, u_long cmd, void *data, 2109 struct ucred *active_cred, struct thread *td) 2110 { 2111 /* 2112 * Enabling sigio causes two major problems: 2113 * 1) infinite recursion: 2114 * Synopsys: kevent is being used to track signals and have FIOASYNC 2115 * set. On receipt of a signal this will cause a kqueue to recurse 2116 * into itself over and over. Sending the sigio causes the kqueue 2117 * to become ready, which in turn posts sigio again, forever. 2118 * Solution: this can be solved by setting a flag in the kqueue that 2119 * we have a SIGIO in progress. 2120 * 2) locking problems: 2121 * Synopsys: Kqueue is a leaf subsystem, but adding signalling puts 2122 * us above the proc and pgrp locks. 2123 * Solution: Post a signal using an async mechanism, being sure to 2124 * record a generation count in the delivery so that we do not deliver 2125 * a signal to the wrong process. 2126 * 2127 * Note, these two mechanisms are somewhat mutually exclusive! 2128 */ 2129 #if 0 2130 struct kqueue *kq; 2131 2132 kq = fp->f_data; 2133 switch (cmd) { 2134 case FIOASYNC: 2135 if (*(int *)data) { 2136 kq->kq_state |= KQ_ASYNC; 2137 } else { 2138 kq->kq_state &= ~KQ_ASYNC; 2139 } 2140 return (0); 2141 2142 case FIOSETOWN: 2143 return (fsetown(*(int *)data, &kq->kq_sigio)); 2144 2145 case FIOGETOWN: 2146 *(int *)data = fgetown(&kq->kq_sigio); 2147 return (0); 2148 } 2149 #endif 2150 2151 return (ENOTTY); 2152 } 2153 2154 /*ARGSUSED*/ 2155 static int 2156 kqueue_poll(struct file *fp, int events, struct ucred *active_cred, 2157 struct thread *td) 2158 { 2159 struct kqueue *kq; 2160 int revents = 0; 2161 int error; 2162 2163 if ((error = kqueue_acquire(fp, &kq))) 2164 return POLLERR; 2165 2166 KQ_LOCK(kq); 2167 if (events & (POLLIN | POLLRDNORM)) { 2168 if (kq->kq_count) { 2169 revents |= events & (POLLIN | POLLRDNORM); 2170 } else { 2171 selrecord(td, &kq->kq_sel); 2172 if (SEL_WAITING(&kq->kq_sel)) 2173 kq->kq_state |= KQ_SEL; 2174 } 2175 } 2176 kqueue_release(kq, 1); 2177 KQ_UNLOCK(kq); 2178 return (revents); 2179 } 2180 2181 /*ARGSUSED*/ 2182 static int 2183 kqueue_stat(struct file *fp, struct stat *st, struct ucred *active_cred) 2184 { 2185 2186 bzero((void *)st, sizeof *st); 2187 /* 2188 * We no longer return kq_count because the unlocked value is useless. 2189 * If you spent all this time getting the count, why not spend your 2190 * syscall better by calling kevent? 2191 * 2192 * XXX - This is needed for libc_r. 2193 */ 2194 st->st_mode = S_IFIFO; 2195 return (0); 2196 } 2197 2198 static void 2199 kqueue_drain(struct kqueue *kq, struct thread *td) 2200 { 2201 struct knote *kn; 2202 int i; 2203 2204 KQ_LOCK(kq); 2205 2206 KASSERT((kq->kq_state & KQ_CLOSING) != KQ_CLOSING, 2207 ("kqueue already closing")); 2208 kq->kq_state |= KQ_CLOSING; 2209 if (kq->kq_refcnt > 1) 2210 msleep(&kq->kq_refcnt, &kq->kq_lock, PSOCK, "kqclose", 0); 2211 2212 KASSERT(kq->kq_refcnt == 1, ("other refs are out there!")); 2213 2214 KASSERT(knlist_empty(&kq->kq_sel.si_note), 2215 ("kqueue's knlist not empty")); 2216 2217 for (i = 0; i < kq->kq_knlistsize; i++) { 2218 while ((kn = SLIST_FIRST(&kq->kq_knlist[i])) != NULL) { 2219 if (kn_in_flux(kn)) { 2220 kq->kq_state |= KQ_FLUXWAIT; 2221 msleep(kq, &kq->kq_lock, PSOCK, "kqclo1", 0); 2222 continue; 2223 } 2224 kn_enter_flux(kn); 2225 KQ_UNLOCK(kq); 2226 knote_drop(kn, td); 2227 KQ_LOCK(kq); 2228 } 2229 } 2230 if (kq->kq_knhashmask != 0) { 2231 for (i = 0; i <= kq->kq_knhashmask; i++) { 2232 while ((kn = SLIST_FIRST(&kq->kq_knhash[i])) != NULL) { 2233 if (kn_in_flux(kn)) { 2234 kq->kq_state |= KQ_FLUXWAIT; 2235 msleep(kq, &kq->kq_lock, PSOCK, 2236 "kqclo2", 0); 2237 continue; 2238 } 2239 kn_enter_flux(kn); 2240 KQ_UNLOCK(kq); 2241 knote_drop(kn, td); 2242 KQ_LOCK(kq); 2243 } 2244 } 2245 } 2246 2247 if ((kq->kq_state & KQ_TASKSCHED) == KQ_TASKSCHED) { 2248 kq->kq_state |= KQ_TASKDRAIN; 2249 msleep(&kq->kq_state, &kq->kq_lock, PSOCK, "kqtqdr", 0); 2250 } 2251 2252 if ((kq->kq_state & KQ_SEL) == KQ_SEL) { 2253 selwakeuppri(&kq->kq_sel, PSOCK); 2254 if (!SEL_WAITING(&kq->kq_sel)) 2255 kq->kq_state &= ~KQ_SEL; 2256 } 2257 2258 KQ_UNLOCK(kq); 2259 } 2260 2261 static void 2262 kqueue_destroy(struct kqueue *kq) 2263 { 2264 2265 KASSERT(kq->kq_fdp == NULL, 2266 ("kqueue still attached to a file descriptor")); 2267 seldrain(&kq->kq_sel); 2268 knlist_destroy(&kq->kq_sel.si_note); 2269 mtx_destroy(&kq->kq_lock); 2270 2271 if (kq->kq_knhash != NULL) 2272 free(kq->kq_knhash, M_KQUEUE); 2273 if (kq->kq_knlist != NULL) 2274 free(kq->kq_knlist, M_KQUEUE); 2275 2276 funsetown(&kq->kq_sigio); 2277 } 2278 2279 /*ARGSUSED*/ 2280 static int 2281 kqueue_close(struct file *fp, struct thread *td) 2282 { 2283 struct kqueue *kq = fp->f_data; 2284 struct filedesc *fdp; 2285 int error; 2286 int filedesc_unlock; 2287 2288 if ((error = kqueue_acquire(fp, &kq))) 2289 return error; 2290 kqueue_drain(kq, td); 2291 2292 /* 2293 * We could be called due to the knote_drop() doing fdrop(), 2294 * called from kqueue_register(). In this case the global 2295 * lock is owned, and filedesc sx is locked before, to not 2296 * take the sleepable lock after non-sleepable. 2297 */ 2298 fdp = kq->kq_fdp; 2299 kq->kq_fdp = NULL; 2300 if (!sx_xlocked(FILEDESC_LOCK(fdp))) { 2301 FILEDESC_XLOCK(fdp); 2302 filedesc_unlock = 1; 2303 } else 2304 filedesc_unlock = 0; 2305 TAILQ_REMOVE(&fdp->fd_kqlist, kq, kq_list); 2306 if (filedesc_unlock) 2307 FILEDESC_XUNLOCK(fdp); 2308 2309 kqueue_destroy(kq); 2310 chgkqcnt(kq->kq_cred->cr_ruidinfo, -1, 0); 2311 crfree(kq->kq_cred); 2312 free(kq, M_KQUEUE); 2313 fp->f_data = NULL; 2314 2315 return (0); 2316 } 2317 2318 static int 2319 kqueue_fill_kinfo(struct file *fp, struct kinfo_file *kif, struct filedesc *fdp) 2320 { 2321 struct kqueue *kq = fp->f_data; 2322 2323 kif->kf_type = KF_TYPE_KQUEUE; 2324 kif->kf_un.kf_kqueue.kf_kqueue_addr = (uintptr_t)kq; 2325 kif->kf_un.kf_kqueue.kf_kqueue_count = kq->kq_count; 2326 kif->kf_un.kf_kqueue.kf_kqueue_state = kq->kq_state; 2327 return (0); 2328 } 2329 2330 static void 2331 kqueue_wakeup(struct kqueue *kq) 2332 { 2333 KQ_OWNED(kq); 2334 2335 if ((kq->kq_state & KQ_SLEEP) == KQ_SLEEP) { 2336 kq->kq_state &= ~KQ_SLEEP; 2337 wakeup(kq); 2338 } 2339 if ((kq->kq_state & KQ_SEL) == KQ_SEL) { 2340 selwakeuppri(&kq->kq_sel, PSOCK); 2341 if (!SEL_WAITING(&kq->kq_sel)) 2342 kq->kq_state &= ~KQ_SEL; 2343 } 2344 if (!knlist_empty(&kq->kq_sel.si_note)) 2345 kqueue_schedtask(kq); 2346 if ((kq->kq_state & KQ_ASYNC) == KQ_ASYNC) { 2347 pgsigio(&kq->kq_sigio, SIGIO, 0); 2348 } 2349 } 2350 2351 /* 2352 * Walk down a list of knotes, activating them if their event has triggered. 2353 * 2354 * There is a possibility to optimize in the case of one kq watching another. 2355 * Instead of scheduling a task to wake it up, you could pass enough state 2356 * down the chain to make up the parent kqueue. Make this code functional 2357 * first. 2358 */ 2359 void 2360 knote(struct knlist *list, long hint, int lockflags) 2361 { 2362 struct kqueue *kq; 2363 struct knote *kn, *tkn; 2364 int error; 2365 2366 if (list == NULL) 2367 return; 2368 2369 KNL_ASSERT_LOCK(list, lockflags & KNF_LISTLOCKED); 2370 2371 if ((lockflags & KNF_LISTLOCKED) == 0) 2372 list->kl_lock(list->kl_lockarg); 2373 2374 /* 2375 * If we unlock the list lock (and enter influx), we can 2376 * eliminate the kqueue scheduling, but this will introduce 2377 * four lock/unlock's for each knote to test. Also, marker 2378 * would be needed to keep iteration position, since filters 2379 * or other threads could remove events. 2380 */ 2381 SLIST_FOREACH_SAFE(kn, &list->kl_list, kn_selnext, tkn) { 2382 kq = kn->kn_kq; 2383 KQ_LOCK(kq); 2384 if (kn_in_flux(kn) && (kn->kn_status & KN_SCAN) == 0) { 2385 /* 2386 * Do not process the influx notes, except for 2387 * the influx coming from the kq unlock in the 2388 * kqueue_scan(). In the later case, we do 2389 * not interfere with the scan, since the code 2390 * fragment in kqueue_scan() locks the knlist, 2391 * and cannot proceed until we finished. 2392 */ 2393 KQ_UNLOCK(kq); 2394 } else if ((lockflags & KNF_NOKQLOCK) != 0) { 2395 kn_enter_flux(kn); 2396 KQ_UNLOCK(kq); 2397 error = kn->kn_fop->f_event(kn, hint); 2398 KQ_LOCK(kq); 2399 kn_leave_flux(kn); 2400 if (error) 2401 KNOTE_ACTIVATE(kn, 1); 2402 KQ_UNLOCK_FLUX(kq); 2403 } else { 2404 if (kn->kn_fop->f_event(kn, hint)) 2405 KNOTE_ACTIVATE(kn, 1); 2406 KQ_UNLOCK(kq); 2407 } 2408 } 2409 if ((lockflags & KNF_LISTLOCKED) == 0) 2410 list->kl_unlock(list->kl_lockarg); 2411 } 2412 2413 /* 2414 * add a knote to a knlist 2415 */ 2416 void 2417 knlist_add(struct knlist *knl, struct knote *kn, int islocked) 2418 { 2419 2420 KNL_ASSERT_LOCK(knl, islocked); 2421 KQ_NOTOWNED(kn->kn_kq); 2422 KASSERT(kn_in_flux(kn), ("knote %p not in flux", kn)); 2423 KASSERT((kn->kn_status & KN_DETACHED) != 0, 2424 ("knote %p was not detached", kn)); 2425 if (!islocked) 2426 knl->kl_lock(knl->kl_lockarg); 2427 SLIST_INSERT_HEAD(&knl->kl_list, kn, kn_selnext); 2428 if (!islocked) 2429 knl->kl_unlock(knl->kl_lockarg); 2430 KQ_LOCK(kn->kn_kq); 2431 kn->kn_knlist = knl; 2432 kn->kn_status &= ~KN_DETACHED; 2433 KQ_UNLOCK(kn->kn_kq); 2434 } 2435 2436 static void 2437 knlist_remove_kq(struct knlist *knl, struct knote *kn, int knlislocked, 2438 int kqislocked) 2439 { 2440 2441 KASSERT(!kqislocked || knlislocked, ("kq locked w/o knl locked")); 2442 KNL_ASSERT_LOCK(knl, knlislocked); 2443 mtx_assert(&kn->kn_kq->kq_lock, kqislocked ? MA_OWNED : MA_NOTOWNED); 2444 KASSERT(kqislocked || kn_in_flux(kn), ("knote %p not in flux", kn)); 2445 KASSERT((kn->kn_status & KN_DETACHED) == 0, 2446 ("knote %p was already detached", kn)); 2447 if (!knlislocked) 2448 knl->kl_lock(knl->kl_lockarg); 2449 SLIST_REMOVE(&knl->kl_list, kn, knote, kn_selnext); 2450 kn->kn_knlist = NULL; 2451 if (!knlislocked) 2452 kn_list_unlock(knl); 2453 if (!kqislocked) 2454 KQ_LOCK(kn->kn_kq); 2455 kn->kn_status |= KN_DETACHED; 2456 if (!kqislocked) 2457 KQ_UNLOCK(kn->kn_kq); 2458 } 2459 2460 /* 2461 * remove knote from the specified knlist 2462 */ 2463 void 2464 knlist_remove(struct knlist *knl, struct knote *kn, int islocked) 2465 { 2466 2467 knlist_remove_kq(knl, kn, islocked, 0); 2468 } 2469 2470 int 2471 knlist_empty(struct knlist *knl) 2472 { 2473 2474 KNL_ASSERT_LOCKED(knl); 2475 return (SLIST_EMPTY(&knl->kl_list)); 2476 } 2477 2478 static struct mtx knlist_lock; 2479 MTX_SYSINIT(knlist_lock, &knlist_lock, "knlist lock for lockless objects", 2480 MTX_DEF); 2481 static void knlist_mtx_lock(void *arg); 2482 static void knlist_mtx_unlock(void *arg); 2483 2484 static void 2485 knlist_mtx_lock(void *arg) 2486 { 2487 2488 mtx_lock((struct mtx *)arg); 2489 } 2490 2491 static void 2492 knlist_mtx_unlock(void *arg) 2493 { 2494 2495 mtx_unlock((struct mtx *)arg); 2496 } 2497 2498 static void 2499 knlist_mtx_assert_lock(void *arg, int what) 2500 { 2501 2502 if (what == LA_LOCKED) 2503 mtx_assert((struct mtx *)arg, MA_OWNED); 2504 else 2505 mtx_assert((struct mtx *)arg, MA_NOTOWNED); 2506 } 2507 2508 void 2509 knlist_init(struct knlist *knl, void *lock, void (*kl_lock)(void *), 2510 void (*kl_unlock)(void *), 2511 void (*kl_assert_lock)(void *, int)) 2512 { 2513 2514 if (lock == NULL) 2515 knl->kl_lockarg = &knlist_lock; 2516 else 2517 knl->kl_lockarg = lock; 2518 2519 if (kl_lock == NULL) 2520 knl->kl_lock = knlist_mtx_lock; 2521 else 2522 knl->kl_lock = kl_lock; 2523 if (kl_unlock == NULL) 2524 knl->kl_unlock = knlist_mtx_unlock; 2525 else 2526 knl->kl_unlock = kl_unlock; 2527 if (kl_assert_lock == NULL) 2528 knl->kl_assert_lock = knlist_mtx_assert_lock; 2529 else 2530 knl->kl_assert_lock = kl_assert_lock; 2531 2532 knl->kl_autodestroy = 0; 2533 SLIST_INIT(&knl->kl_list); 2534 } 2535 2536 void 2537 knlist_init_mtx(struct knlist *knl, struct mtx *lock) 2538 { 2539 2540 knlist_init(knl, lock, NULL, NULL, NULL); 2541 } 2542 2543 struct knlist * 2544 knlist_alloc(struct mtx *lock) 2545 { 2546 struct knlist *knl; 2547 2548 knl = malloc(sizeof(struct knlist), M_KQUEUE, M_WAITOK); 2549 knlist_init_mtx(knl, lock); 2550 return (knl); 2551 } 2552 2553 void 2554 knlist_destroy(struct knlist *knl) 2555 { 2556 2557 KASSERT(KNLIST_EMPTY(knl), 2558 ("destroying knlist %p with knotes on it", knl)); 2559 } 2560 2561 void 2562 knlist_detach(struct knlist *knl) 2563 { 2564 2565 KNL_ASSERT_LOCKED(knl); 2566 knl->kl_autodestroy = 1; 2567 if (knlist_empty(knl)) { 2568 knlist_destroy(knl); 2569 free(knl, M_KQUEUE); 2570 } 2571 } 2572 2573 /* 2574 * Even if we are locked, we may need to drop the lock to allow any influx 2575 * knotes time to "settle". 2576 */ 2577 void 2578 knlist_cleardel(struct knlist *knl, struct thread *td, int islocked, int killkn) 2579 { 2580 struct knote *kn, *kn2; 2581 struct kqueue *kq; 2582 2583 KASSERT(!knl->kl_autodestroy, ("cleardel for autodestroy %p", knl)); 2584 if (islocked) 2585 KNL_ASSERT_LOCKED(knl); 2586 else { 2587 KNL_ASSERT_UNLOCKED(knl); 2588 again: /* need to reacquire lock since we have dropped it */ 2589 knl->kl_lock(knl->kl_lockarg); 2590 } 2591 2592 SLIST_FOREACH_SAFE(kn, &knl->kl_list, kn_selnext, kn2) { 2593 kq = kn->kn_kq; 2594 KQ_LOCK(kq); 2595 if (kn_in_flux(kn)) { 2596 KQ_UNLOCK(kq); 2597 continue; 2598 } 2599 knlist_remove_kq(knl, kn, 1, 1); 2600 if (killkn) { 2601 kn_enter_flux(kn); 2602 KQ_UNLOCK(kq); 2603 knote_drop_detached(kn, td); 2604 } else { 2605 /* Make sure cleared knotes disappear soon */ 2606 kn->kn_flags |= EV_EOF | EV_ONESHOT; 2607 KQ_UNLOCK(kq); 2608 } 2609 kq = NULL; 2610 } 2611 2612 if (!SLIST_EMPTY(&knl->kl_list)) { 2613 /* there are still in flux knotes remaining */ 2614 kn = SLIST_FIRST(&knl->kl_list); 2615 kq = kn->kn_kq; 2616 KQ_LOCK(kq); 2617 KASSERT(kn_in_flux(kn), ("knote removed w/o list lock")); 2618 knl->kl_unlock(knl->kl_lockarg); 2619 kq->kq_state |= KQ_FLUXWAIT; 2620 msleep(kq, &kq->kq_lock, PSOCK | PDROP, "kqkclr", 0); 2621 kq = NULL; 2622 goto again; 2623 } 2624 2625 if (islocked) 2626 KNL_ASSERT_LOCKED(knl); 2627 else { 2628 knl->kl_unlock(knl->kl_lockarg); 2629 KNL_ASSERT_UNLOCKED(knl); 2630 } 2631 } 2632 2633 /* 2634 * Remove all knotes referencing a specified fd must be called with FILEDESC 2635 * lock. This prevents a race where a new fd comes along and occupies the 2636 * entry and we attach a knote to the fd. 2637 */ 2638 void 2639 knote_fdclose(struct thread *td, int fd) 2640 { 2641 struct filedesc *fdp = td->td_proc->p_fd; 2642 struct kqueue *kq; 2643 struct knote *kn; 2644 int influx; 2645 2646 FILEDESC_XLOCK_ASSERT(fdp); 2647 2648 /* 2649 * We shouldn't have to worry about new kevents appearing on fd 2650 * since filedesc is locked. 2651 */ 2652 TAILQ_FOREACH(kq, &fdp->fd_kqlist, kq_list) { 2653 KQ_LOCK(kq); 2654 2655 again: 2656 influx = 0; 2657 while (kq->kq_knlistsize > fd && 2658 (kn = SLIST_FIRST(&kq->kq_knlist[fd])) != NULL) { 2659 if (kn_in_flux(kn)) { 2660 /* someone else might be waiting on our knote */ 2661 if (influx) 2662 wakeup(kq); 2663 kq->kq_state |= KQ_FLUXWAIT; 2664 msleep(kq, &kq->kq_lock, PSOCK, "kqflxwt", 0); 2665 goto again; 2666 } 2667 kn_enter_flux(kn); 2668 KQ_UNLOCK(kq); 2669 influx = 1; 2670 knote_drop(kn, td); 2671 KQ_LOCK(kq); 2672 } 2673 KQ_UNLOCK_FLUX(kq); 2674 } 2675 } 2676 2677 static int 2678 knote_attach(struct knote *kn, struct kqueue *kq) 2679 { 2680 struct klist *list; 2681 2682 KASSERT(kn_in_flux(kn), ("knote %p not marked influx", kn)); 2683 KQ_OWNED(kq); 2684 2685 if ((kq->kq_state & KQ_CLOSING) != 0) 2686 return (EBADF); 2687 if (kn->kn_fop->f_isfd) { 2688 if (kn->kn_id >= kq->kq_knlistsize) 2689 return (ENOMEM); 2690 list = &kq->kq_knlist[kn->kn_id]; 2691 } else { 2692 if (kq->kq_knhash == NULL) 2693 return (ENOMEM); 2694 list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)]; 2695 } 2696 SLIST_INSERT_HEAD(list, kn, kn_link); 2697 return (0); 2698 } 2699 2700 static void 2701 knote_drop(struct knote *kn, struct thread *td) 2702 { 2703 2704 if ((kn->kn_status & KN_DETACHED) == 0) 2705 kn->kn_fop->f_detach(kn); 2706 knote_drop_detached(kn, td); 2707 } 2708 2709 static void 2710 knote_drop_detached(struct knote *kn, struct thread *td) 2711 { 2712 struct kqueue *kq; 2713 struct klist *list; 2714 2715 kq = kn->kn_kq; 2716 2717 KASSERT((kn->kn_status & KN_DETACHED) != 0, 2718 ("knote %p still attached", kn)); 2719 KQ_NOTOWNED(kq); 2720 2721 KQ_LOCK(kq); 2722 KASSERT(kn->kn_influx == 1, 2723 ("knote_drop called on %p with influx %d", kn, kn->kn_influx)); 2724 2725 if (kn->kn_fop->f_isfd) 2726 list = &kq->kq_knlist[kn->kn_id]; 2727 else 2728 list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)]; 2729 2730 if (!SLIST_EMPTY(list)) 2731 SLIST_REMOVE(list, kn, knote, kn_link); 2732 if (kn->kn_status & KN_QUEUED) 2733 knote_dequeue(kn); 2734 KQ_UNLOCK_FLUX(kq); 2735 2736 if (kn->kn_fop->f_isfd) { 2737 fdrop(kn->kn_fp, td); 2738 kn->kn_fp = NULL; 2739 } 2740 kqueue_fo_release(kn->kn_kevent.filter); 2741 kn->kn_fop = NULL; 2742 knote_free(kn); 2743 } 2744 2745 static void 2746 knote_enqueue(struct knote *kn) 2747 { 2748 struct kqueue *kq = kn->kn_kq; 2749 2750 KQ_OWNED(kn->kn_kq); 2751 KASSERT((kn->kn_status & KN_QUEUED) == 0, ("knote already queued")); 2752 2753 TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe); 2754 kn->kn_status |= KN_QUEUED; 2755 kq->kq_count++; 2756 kqueue_wakeup(kq); 2757 } 2758 2759 static void 2760 knote_dequeue(struct knote *kn) 2761 { 2762 struct kqueue *kq = kn->kn_kq; 2763 2764 KQ_OWNED(kn->kn_kq); 2765 KASSERT(kn->kn_status & KN_QUEUED, ("knote not queued")); 2766 2767 TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe); 2768 kn->kn_status &= ~KN_QUEUED; 2769 kq->kq_count--; 2770 } 2771 2772 static void 2773 knote_init(void) 2774 { 2775 2776 knote_zone = uma_zcreate("KNOTE", sizeof(struct knote), NULL, NULL, 2777 NULL, NULL, UMA_ALIGN_PTR, 0); 2778 ast_register(TDA_KQUEUE, ASTR_ASTF_REQUIRED, 0, ast_kqueue); 2779 } 2780 SYSINIT(knote, SI_SUB_PSEUDO, SI_ORDER_ANY, knote_init, NULL); 2781 2782 static struct knote * 2783 knote_alloc(int mflag) 2784 { 2785 2786 return (uma_zalloc(knote_zone, mflag | M_ZERO)); 2787 } 2788 2789 static void 2790 knote_free(struct knote *kn) 2791 { 2792 2793 uma_zfree(knote_zone, kn); 2794 } 2795 2796 /* 2797 * Register the kev w/ the kq specified by fd. 2798 */ 2799 int 2800 kqfd_register(int fd, struct kevent *kev, struct thread *td, int mflag) 2801 { 2802 struct kqueue *kq; 2803 struct file *fp; 2804 cap_rights_t rights; 2805 int error; 2806 2807 error = fget(td, fd, cap_rights_init_one(&rights, CAP_KQUEUE_CHANGE), 2808 &fp); 2809 if (error != 0) 2810 return (error); 2811 if ((error = kqueue_acquire(fp, &kq)) != 0) 2812 goto noacquire; 2813 2814 error = kqueue_register(kq, kev, td, mflag); 2815 kqueue_release(kq, 0); 2816 2817 noacquire: 2818 fdrop(fp, td); 2819 return (error); 2820 } 2821