1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 1999,2000,2001 Jonathan Lemon <jlemon@FreeBSD.org> 5 * Copyright 2004 John-Mark Gurney <jmg@FreeBSD.org> 6 * Copyright (c) 2009 Apple, Inc. 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 #include "opt_ktrace.h" 35 #include "opt_kqueue.h" 36 37 #ifdef COMPAT_FREEBSD11 38 #define _WANT_FREEBSD11_KEVENT 39 #endif 40 41 #include <sys/param.h> 42 #include <sys/systm.h> 43 #include <sys/capsicum.h> 44 #include <sys/kernel.h> 45 #include <sys/limits.h> 46 #include <sys/lock.h> 47 #include <sys/mutex.h> 48 #include <sys/proc.h> 49 #include <sys/malloc.h> 50 #include <sys/unistd.h> 51 #include <sys/file.h> 52 #include <sys/filedesc.h> 53 #include <sys/filio.h> 54 #include <sys/fcntl.h> 55 #include <sys/kthread.h> 56 #include <sys/selinfo.h> 57 #include <sys/queue.h> 58 #include <sys/event.h> 59 #include <sys/eventvar.h> 60 #include <sys/poll.h> 61 #include <sys/protosw.h> 62 #include <sys/resourcevar.h> 63 #include <sys/sigio.h> 64 #include <sys/signalvar.h> 65 #include <sys/socket.h> 66 #include <sys/socketvar.h> 67 #include <sys/stat.h> 68 #include <sys/sysctl.h> 69 #include <sys/sysproto.h> 70 #include <sys/syscallsubr.h> 71 #include <sys/taskqueue.h> 72 #include <sys/uio.h> 73 #include <sys/user.h> 74 #ifdef KTRACE 75 #include <sys/ktrace.h> 76 #endif 77 #include <machine/atomic.h> 78 79 #include <vm/uma.h> 80 81 static MALLOC_DEFINE(M_KQUEUE, "kqueue", "memory for kqueue system"); 82 83 /* 84 * This lock is used if multiple kq locks are required. This possibly 85 * should be made into a per proc lock. 86 */ 87 static struct mtx kq_global; 88 MTX_SYSINIT(kq_global, &kq_global, "kqueue order", MTX_DEF); 89 #define KQ_GLOBAL_LOCK(lck, haslck) do { \ 90 if (!haslck) \ 91 mtx_lock(lck); \ 92 haslck = 1; \ 93 } while (0) 94 #define KQ_GLOBAL_UNLOCK(lck, haslck) do { \ 95 if (haslck) \ 96 mtx_unlock(lck); \ 97 haslck = 0; \ 98 } while (0) 99 100 TASKQUEUE_DEFINE_THREAD(kqueue_ctx); 101 102 static int kevent_copyout(void *arg, struct kevent *kevp, int count); 103 static int kevent_copyin(void *arg, struct kevent *kevp, int count); 104 static int kqueue_register(struct kqueue *kq, struct kevent *kev, 105 struct thread *td, int mflag); 106 static int kqueue_acquire(struct file *fp, struct kqueue **kqp); 107 static void kqueue_release(struct kqueue *kq, int locked); 108 static void kqueue_destroy(struct kqueue *kq); 109 static void kqueue_drain(struct kqueue *kq, struct thread *td); 110 static int kqueue_expand(struct kqueue *kq, const struct filterops *fops, 111 uintptr_t ident, int mflag); 112 static void kqueue_task(void *arg, int pending); 113 static int kqueue_scan(struct kqueue *kq, int maxevents, 114 struct kevent_copyops *k_ops, 115 const struct timespec *timeout, 116 struct kevent *keva, struct thread *td); 117 static void kqueue_wakeup(struct kqueue *kq); 118 static const struct filterops *kqueue_fo_find(int filt); 119 static void kqueue_fo_release(int filt); 120 struct g_kevent_args; 121 static int kern_kevent_generic(struct thread *td, 122 struct g_kevent_args *uap, 123 struct kevent_copyops *k_ops, const char *struct_name); 124 125 static fo_ioctl_t kqueue_ioctl; 126 static fo_poll_t kqueue_poll; 127 static fo_kqfilter_t kqueue_kqfilter; 128 static fo_stat_t kqueue_stat; 129 static fo_close_t kqueue_close; 130 static fo_fill_kinfo_t kqueue_fill_kinfo; 131 132 static struct fileops kqueueops = { 133 .fo_read = invfo_rdwr, 134 .fo_write = invfo_rdwr, 135 .fo_truncate = invfo_truncate, 136 .fo_ioctl = kqueue_ioctl, 137 .fo_poll = kqueue_poll, 138 .fo_kqfilter = kqueue_kqfilter, 139 .fo_stat = kqueue_stat, 140 .fo_close = kqueue_close, 141 .fo_chmod = invfo_chmod, 142 .fo_chown = invfo_chown, 143 .fo_sendfile = invfo_sendfile, 144 .fo_fill_kinfo = kqueue_fill_kinfo, 145 }; 146 147 static int knote_attach(struct knote *kn, struct kqueue *kq); 148 static void knote_drop(struct knote *kn, struct thread *td); 149 static void knote_drop_detached(struct knote *kn, struct thread *td); 150 static void knote_enqueue(struct knote *kn); 151 static void knote_dequeue(struct knote *kn); 152 static void knote_init(void); 153 static struct knote *knote_alloc(int mflag); 154 static void knote_free(struct knote *kn); 155 156 static void filt_kqdetach(struct knote *kn); 157 static int filt_kqueue(struct knote *kn, long hint); 158 static int filt_procattach(struct knote *kn); 159 static void filt_procdetach(struct knote *kn); 160 static int filt_proc(struct knote *kn, long hint); 161 static int filt_fileattach(struct knote *kn); 162 static void filt_timerexpire(void *knx); 163 static void filt_timerexpire_l(struct knote *kn, bool proc_locked); 164 static int filt_timerattach(struct knote *kn); 165 static void filt_timerdetach(struct knote *kn); 166 static void filt_timerstart(struct knote *kn, sbintime_t to); 167 static void filt_timertouch(struct knote *kn, struct kevent *kev, 168 u_long type); 169 static int filt_timervalidate(struct knote *kn, sbintime_t *to); 170 static int filt_timer(struct knote *kn, long hint); 171 static int filt_userattach(struct knote *kn); 172 static void filt_userdetach(struct knote *kn); 173 static int filt_user(struct knote *kn, long hint); 174 static void filt_usertouch(struct knote *kn, struct kevent *kev, 175 u_long type); 176 177 static struct filterops file_filtops = { 178 .f_isfd = 1, 179 .f_attach = filt_fileattach, 180 }; 181 static struct filterops kqread_filtops = { 182 .f_isfd = 1, 183 .f_detach = filt_kqdetach, 184 .f_event = filt_kqueue, 185 }; 186 /* XXX - move to kern_proc.c? */ 187 static struct filterops proc_filtops = { 188 .f_isfd = 0, 189 .f_attach = filt_procattach, 190 .f_detach = filt_procdetach, 191 .f_event = filt_proc, 192 }; 193 static struct filterops timer_filtops = { 194 .f_isfd = 0, 195 .f_attach = filt_timerattach, 196 .f_detach = filt_timerdetach, 197 .f_event = filt_timer, 198 .f_touch = filt_timertouch, 199 }; 200 static struct filterops user_filtops = { 201 .f_attach = filt_userattach, 202 .f_detach = filt_userdetach, 203 .f_event = filt_user, 204 .f_touch = filt_usertouch, 205 }; 206 207 static uma_zone_t knote_zone; 208 static unsigned int __exclusive_cache_line kq_ncallouts; 209 static unsigned int kq_calloutmax = 4 * 1024; 210 SYSCTL_UINT(_kern, OID_AUTO, kq_calloutmax, CTLFLAG_RW, 211 &kq_calloutmax, 0, "Maximum number of callouts allocated for kqueue"); 212 213 /* XXX - ensure not influx ? */ 214 #define KNOTE_ACTIVATE(kn, islock) do { \ 215 if ((islock)) \ 216 mtx_assert(&(kn)->kn_kq->kq_lock, MA_OWNED); \ 217 else \ 218 KQ_LOCK((kn)->kn_kq); \ 219 (kn)->kn_status |= KN_ACTIVE; \ 220 if (((kn)->kn_status & (KN_QUEUED | KN_DISABLED)) == 0) \ 221 knote_enqueue((kn)); \ 222 if (!(islock)) \ 223 KQ_UNLOCK((kn)->kn_kq); \ 224 } while (0) 225 #define KQ_LOCK(kq) do { \ 226 mtx_lock(&(kq)->kq_lock); \ 227 } while (0) 228 #define KQ_FLUX_WAKEUP(kq) do { \ 229 if (((kq)->kq_state & KQ_FLUXWAIT) == KQ_FLUXWAIT) { \ 230 (kq)->kq_state &= ~KQ_FLUXWAIT; \ 231 wakeup((kq)); \ 232 } \ 233 } while (0) 234 #define KQ_UNLOCK_FLUX(kq) do { \ 235 KQ_FLUX_WAKEUP(kq); \ 236 mtx_unlock(&(kq)->kq_lock); \ 237 } while (0) 238 #define KQ_UNLOCK(kq) do { \ 239 mtx_unlock(&(kq)->kq_lock); \ 240 } while (0) 241 #define KQ_OWNED(kq) do { \ 242 mtx_assert(&(kq)->kq_lock, MA_OWNED); \ 243 } while (0) 244 #define KQ_NOTOWNED(kq) do { \ 245 mtx_assert(&(kq)->kq_lock, MA_NOTOWNED); \ 246 } while (0) 247 248 static struct knlist * 249 kn_list_lock(struct knote *kn) 250 { 251 struct knlist *knl; 252 253 knl = kn->kn_knlist; 254 if (knl != NULL) 255 knl->kl_lock(knl->kl_lockarg); 256 return (knl); 257 } 258 259 static void 260 kn_list_unlock(struct knlist *knl) 261 { 262 bool do_free; 263 264 if (knl == NULL) 265 return; 266 do_free = knl->kl_autodestroy && knlist_empty(knl); 267 knl->kl_unlock(knl->kl_lockarg); 268 if (do_free) { 269 knlist_destroy(knl); 270 free(knl, M_KQUEUE); 271 } 272 } 273 274 static bool 275 kn_in_flux(struct knote *kn) 276 { 277 278 return (kn->kn_influx > 0); 279 } 280 281 static void 282 kn_enter_flux(struct knote *kn) 283 { 284 285 KQ_OWNED(kn->kn_kq); 286 MPASS(kn->kn_influx < INT_MAX); 287 kn->kn_influx++; 288 } 289 290 static bool 291 kn_leave_flux(struct knote *kn) 292 { 293 294 KQ_OWNED(kn->kn_kq); 295 MPASS(kn->kn_influx > 0); 296 kn->kn_influx--; 297 return (kn->kn_influx == 0); 298 } 299 300 #define KNL_ASSERT_LOCK(knl, islocked) do { \ 301 if (islocked) \ 302 KNL_ASSERT_LOCKED(knl); \ 303 else \ 304 KNL_ASSERT_UNLOCKED(knl); \ 305 } while (0) 306 #ifdef INVARIANTS 307 #define KNL_ASSERT_LOCKED(knl) do { \ 308 knl->kl_assert_lock((knl)->kl_lockarg, LA_LOCKED); \ 309 } while (0) 310 #define KNL_ASSERT_UNLOCKED(knl) do { \ 311 knl->kl_assert_lock((knl)->kl_lockarg, LA_UNLOCKED); \ 312 } while (0) 313 #else /* !INVARIANTS */ 314 #define KNL_ASSERT_LOCKED(knl) do {} while (0) 315 #define KNL_ASSERT_UNLOCKED(knl) do {} while (0) 316 #endif /* INVARIANTS */ 317 318 #ifndef KN_HASHSIZE 319 #define KN_HASHSIZE 64 /* XXX should be tunable */ 320 #endif 321 322 #define KN_HASH(val, mask) (((val) ^ (val >> 8)) & (mask)) 323 324 static int 325 filt_nullattach(struct knote *kn) 326 { 327 328 return (ENXIO); 329 }; 330 331 struct filterops null_filtops = { 332 .f_isfd = 0, 333 .f_attach = filt_nullattach, 334 }; 335 336 /* XXX - make SYSINIT to add these, and move into respective modules. */ 337 extern struct filterops sig_filtops; 338 extern struct filterops fs_filtops; 339 340 /* 341 * Table for all system-defined filters. 342 */ 343 static struct mtx filterops_lock; 344 MTX_SYSINIT(kqueue_filterops, &filterops_lock, "protect sysfilt_ops", MTX_DEF); 345 static struct { 346 const struct filterops *for_fop; 347 int for_nolock; 348 int for_refcnt; 349 } sysfilt_ops[EVFILT_SYSCOUNT] = { 350 { &file_filtops, 1 }, /* EVFILT_READ */ 351 { &file_filtops, 1 }, /* EVFILT_WRITE */ 352 { &null_filtops }, /* EVFILT_AIO */ 353 { &file_filtops, 1 }, /* EVFILT_VNODE */ 354 { &proc_filtops, 1 }, /* EVFILT_PROC */ 355 { &sig_filtops, 1 }, /* EVFILT_SIGNAL */ 356 { &timer_filtops, 1 }, /* EVFILT_TIMER */ 357 { &file_filtops, 1 }, /* EVFILT_PROCDESC */ 358 { &fs_filtops, 1 }, /* EVFILT_FS */ 359 { &null_filtops }, /* EVFILT_LIO */ 360 { &user_filtops, 1 }, /* EVFILT_USER */ 361 { &null_filtops }, /* EVFILT_SENDFILE */ 362 { &file_filtops, 1 }, /* EVFILT_EMPTY */ 363 }; 364 365 /* 366 * Simple redirection for all cdevsw style objects to call their fo_kqfilter 367 * method. 368 */ 369 static int 370 filt_fileattach(struct knote *kn) 371 { 372 373 return (fo_kqfilter(kn->kn_fp, kn)); 374 } 375 376 /*ARGSUSED*/ 377 static int 378 kqueue_kqfilter(struct file *fp, struct knote *kn) 379 { 380 struct kqueue *kq = kn->kn_fp->f_data; 381 382 if (kn->kn_filter != EVFILT_READ) 383 return (EINVAL); 384 385 kn->kn_status |= KN_KQUEUE; 386 kn->kn_fop = &kqread_filtops; 387 knlist_add(&kq->kq_sel.si_note, kn, 0); 388 389 return (0); 390 } 391 392 static void 393 filt_kqdetach(struct knote *kn) 394 { 395 struct kqueue *kq = kn->kn_fp->f_data; 396 397 knlist_remove(&kq->kq_sel.si_note, kn, 0); 398 } 399 400 /*ARGSUSED*/ 401 static int 402 filt_kqueue(struct knote *kn, long hint) 403 { 404 struct kqueue *kq = kn->kn_fp->f_data; 405 406 kn->kn_data = kq->kq_count; 407 return (kn->kn_data > 0); 408 } 409 410 /* XXX - move to kern_proc.c? */ 411 static int 412 filt_procattach(struct knote *kn) 413 { 414 struct proc *p; 415 int error; 416 bool exiting, immediate; 417 418 exiting = immediate = false; 419 if (kn->kn_sfflags & NOTE_EXIT) 420 p = pfind_any(kn->kn_id); 421 else 422 p = pfind(kn->kn_id); 423 if (p == NULL) 424 return (ESRCH); 425 if (p->p_flag & P_WEXIT) 426 exiting = true; 427 428 if ((error = p_cansee(curthread, p))) { 429 PROC_UNLOCK(p); 430 return (error); 431 } 432 433 kn->kn_ptr.p_proc = p; 434 kn->kn_flags |= EV_CLEAR; /* automatically set */ 435 436 /* 437 * Internal flag indicating registration done by kernel for the 438 * purposes of getting a NOTE_CHILD notification. 439 */ 440 if (kn->kn_flags & EV_FLAG2) { 441 kn->kn_flags &= ~EV_FLAG2; 442 kn->kn_data = kn->kn_sdata; /* ppid */ 443 kn->kn_fflags = NOTE_CHILD; 444 kn->kn_sfflags &= ~(NOTE_EXIT | NOTE_EXEC | NOTE_FORK); 445 immediate = true; /* Force immediate activation of child note. */ 446 } 447 /* 448 * Internal flag indicating registration done by kernel (for other than 449 * NOTE_CHILD). 450 */ 451 if (kn->kn_flags & EV_FLAG1) { 452 kn->kn_flags &= ~EV_FLAG1; 453 } 454 455 knlist_add(p->p_klist, kn, 1); 456 457 /* 458 * Immediately activate any child notes or, in the case of a zombie 459 * target process, exit notes. The latter is necessary to handle the 460 * case where the target process, e.g. a child, dies before the kevent 461 * is registered. 462 */ 463 if (immediate || (exiting && filt_proc(kn, NOTE_EXIT))) 464 KNOTE_ACTIVATE(kn, 0); 465 466 PROC_UNLOCK(p); 467 468 return (0); 469 } 470 471 /* 472 * The knote may be attached to a different process, which may exit, 473 * leaving nothing for the knote to be attached to. So when the process 474 * exits, the knote is marked as DETACHED and also flagged as ONESHOT so 475 * it will be deleted when read out. However, as part of the knote deletion, 476 * this routine is called, so a check is needed to avoid actually performing 477 * a detach, because the original process does not exist any more. 478 */ 479 /* XXX - move to kern_proc.c? */ 480 static void 481 filt_procdetach(struct knote *kn) 482 { 483 484 knlist_remove(kn->kn_knlist, kn, 0); 485 kn->kn_ptr.p_proc = NULL; 486 } 487 488 /* XXX - move to kern_proc.c? */ 489 static int 490 filt_proc(struct knote *kn, long hint) 491 { 492 struct proc *p; 493 u_int event; 494 495 p = kn->kn_ptr.p_proc; 496 if (p == NULL) /* already activated, from attach filter */ 497 return (0); 498 499 /* Mask off extra data. */ 500 event = (u_int)hint & NOTE_PCTRLMASK; 501 502 /* If the user is interested in this event, record it. */ 503 if (kn->kn_sfflags & event) 504 kn->kn_fflags |= event; 505 506 /* Process is gone, so flag the event as finished. */ 507 if (event == NOTE_EXIT) { 508 kn->kn_flags |= EV_EOF | EV_ONESHOT; 509 kn->kn_ptr.p_proc = NULL; 510 if (kn->kn_fflags & NOTE_EXIT) 511 kn->kn_data = KW_EXITCODE(p->p_xexit, p->p_xsig); 512 if (kn->kn_fflags == 0) 513 kn->kn_flags |= EV_DROP; 514 return (1); 515 } 516 517 return (kn->kn_fflags != 0); 518 } 519 520 /* 521 * Called when the process forked. It mostly does the same as the 522 * knote(), activating all knotes registered to be activated when the 523 * process forked. Additionally, for each knote attached to the 524 * parent, check whether user wants to track the new process. If so 525 * attach a new knote to it, and immediately report an event with the 526 * child's pid. 527 */ 528 void 529 knote_fork(struct knlist *list, int pid) 530 { 531 struct kqueue *kq; 532 struct knote *kn; 533 struct kevent kev; 534 int error; 535 536 MPASS(list != NULL); 537 KNL_ASSERT_LOCKED(list); 538 if (SLIST_EMPTY(&list->kl_list)) 539 return; 540 541 memset(&kev, 0, sizeof(kev)); 542 SLIST_FOREACH(kn, &list->kl_list, kn_selnext) { 543 kq = kn->kn_kq; 544 KQ_LOCK(kq); 545 if (kn_in_flux(kn) && (kn->kn_status & KN_SCAN) == 0) { 546 KQ_UNLOCK(kq); 547 continue; 548 } 549 550 /* 551 * The same as knote(), activate the event. 552 */ 553 if ((kn->kn_sfflags & NOTE_TRACK) == 0) { 554 if (kn->kn_fop->f_event(kn, NOTE_FORK)) 555 KNOTE_ACTIVATE(kn, 1); 556 KQ_UNLOCK(kq); 557 continue; 558 } 559 560 /* 561 * The NOTE_TRACK case. In addition to the activation 562 * of the event, we need to register new events to 563 * track the child. Drop the locks in preparation for 564 * the call to kqueue_register(). 565 */ 566 kn_enter_flux(kn); 567 KQ_UNLOCK(kq); 568 list->kl_unlock(list->kl_lockarg); 569 570 /* 571 * Activate existing knote and register tracking knotes with 572 * new process. 573 * 574 * First register a knote to get just the child notice. This 575 * must be a separate note from a potential NOTE_EXIT 576 * notification since both NOTE_CHILD and NOTE_EXIT are defined 577 * to use the data field (in conflicting ways). 578 */ 579 kev.ident = pid; 580 kev.filter = kn->kn_filter; 581 kev.flags = kn->kn_flags | EV_ADD | EV_ENABLE | EV_ONESHOT | 582 EV_FLAG2; 583 kev.fflags = kn->kn_sfflags; 584 kev.data = kn->kn_id; /* parent */ 585 kev.udata = kn->kn_kevent.udata;/* preserve udata */ 586 error = kqueue_register(kq, &kev, NULL, M_NOWAIT); 587 if (error) 588 kn->kn_fflags |= NOTE_TRACKERR; 589 590 /* 591 * Then register another knote to track other potential events 592 * from the new process. 593 */ 594 kev.ident = pid; 595 kev.filter = kn->kn_filter; 596 kev.flags = kn->kn_flags | EV_ADD | EV_ENABLE | EV_FLAG1; 597 kev.fflags = kn->kn_sfflags; 598 kev.data = kn->kn_id; /* parent */ 599 kev.udata = kn->kn_kevent.udata;/* preserve udata */ 600 error = kqueue_register(kq, &kev, NULL, M_NOWAIT); 601 if (error) 602 kn->kn_fflags |= NOTE_TRACKERR; 603 if (kn->kn_fop->f_event(kn, NOTE_FORK)) 604 KNOTE_ACTIVATE(kn, 0); 605 list->kl_lock(list->kl_lockarg); 606 KQ_LOCK(kq); 607 kn_leave_flux(kn); 608 KQ_UNLOCK_FLUX(kq); 609 } 610 } 611 612 /* 613 * XXX: EVFILT_TIMER should perhaps live in kern_time.c beside the 614 * interval timer support code. 615 */ 616 617 #define NOTE_TIMER_PRECMASK \ 618 (NOTE_SECONDS | NOTE_MSECONDS | NOTE_USECONDS | NOTE_NSECONDS) 619 620 static sbintime_t 621 timer2sbintime(int64_t data, int flags) 622 { 623 int64_t secs; 624 625 /* 626 * Macros for converting to the fractional second portion of an 627 * sbintime_t using 64bit multiplication to improve precision. 628 */ 629 #define NS_TO_SBT(ns) (((ns) * (((uint64_t)1 << 63) / 500000000)) >> 32) 630 #define US_TO_SBT(us) (((us) * (((uint64_t)1 << 63) / 500000)) >> 32) 631 #define MS_TO_SBT(ms) (((ms) * (((uint64_t)1 << 63) / 500)) >> 32) 632 switch (flags & NOTE_TIMER_PRECMASK) { 633 case NOTE_SECONDS: 634 #ifdef __LP64__ 635 if (data > (SBT_MAX / SBT_1S)) 636 return (SBT_MAX); 637 #endif 638 return ((sbintime_t)data << 32); 639 case NOTE_MSECONDS: /* FALLTHROUGH */ 640 case 0: 641 if (data >= 1000) { 642 secs = data / 1000; 643 #ifdef __LP64__ 644 if (secs > (SBT_MAX / SBT_1S)) 645 return (SBT_MAX); 646 #endif 647 return (secs << 32 | MS_TO_SBT(data % 1000)); 648 } 649 return (MS_TO_SBT(data)); 650 case NOTE_USECONDS: 651 if (data >= 1000000) { 652 secs = data / 1000000; 653 #ifdef __LP64__ 654 if (secs > (SBT_MAX / SBT_1S)) 655 return (SBT_MAX); 656 #endif 657 return (secs << 32 | US_TO_SBT(data % 1000000)); 658 } 659 return (US_TO_SBT(data)); 660 case NOTE_NSECONDS: 661 if (data >= 1000000000) { 662 secs = data / 1000000000; 663 #ifdef __LP64__ 664 if (secs > (SBT_MAX / SBT_1S)) 665 return (SBT_MAX); 666 #endif 667 return (secs << 32 | NS_TO_SBT(data % 1000000000)); 668 } 669 return (NS_TO_SBT(data)); 670 default: 671 break; 672 } 673 return (-1); 674 } 675 676 struct kq_timer_cb_data { 677 struct callout c; 678 struct proc *p; 679 struct knote *kn; 680 int cpuid; 681 int flags; 682 TAILQ_ENTRY(kq_timer_cb_data) link; 683 sbintime_t next; /* next timer event fires at */ 684 sbintime_t to; /* precalculated timer period, 0 for abs */ 685 }; 686 687 #define KQ_TIMER_CB_ENQUEUED 0x01 688 689 static void 690 kqtimer_sched_callout(struct kq_timer_cb_data *kc) 691 { 692 callout_reset_sbt_on(&kc->c, kc->next, 0, filt_timerexpire, kc->kn, 693 kc->cpuid, C_ABSOLUTE); 694 } 695 696 void 697 kqtimer_proc_continue(struct proc *p) 698 { 699 struct kq_timer_cb_data *kc, *kc1; 700 struct bintime bt; 701 sbintime_t now; 702 703 PROC_LOCK_ASSERT(p, MA_OWNED); 704 705 getboottimebin(&bt); 706 now = bttosbt(bt); 707 708 TAILQ_FOREACH_SAFE(kc, &p->p_kqtim_stop, link, kc1) { 709 TAILQ_REMOVE(&p->p_kqtim_stop, kc, link); 710 kc->flags &= ~KQ_TIMER_CB_ENQUEUED; 711 if (kc->next <= now) 712 filt_timerexpire_l(kc->kn, true); 713 else 714 kqtimer_sched_callout(kc); 715 } 716 } 717 718 static void 719 filt_timerexpire_l(struct knote *kn, bool proc_locked) 720 { 721 struct kq_timer_cb_data *kc; 722 struct proc *p; 723 uint64_t delta; 724 sbintime_t now; 725 726 kc = kn->kn_ptr.p_v; 727 728 if ((kn->kn_flags & EV_ONESHOT) != 0 || kc->to == 0) { 729 kn->kn_data++; 730 KNOTE_ACTIVATE(kn, 0); 731 return; 732 } 733 734 now = sbinuptime(); 735 if (now >= kc->next) { 736 delta = (now - kc->next) / kc->to; 737 if (delta == 0) 738 delta = 1; 739 kn->kn_data += delta; 740 kc->next += delta * kc->to; 741 if (now >= kc->next) /* overflow */ 742 kc->next = now + kc->to; 743 KNOTE_ACTIVATE(kn, 0); /* XXX - handle locking */ 744 } 745 746 /* 747 * Initial check for stopped kc->p is racy. It is fine to 748 * miss the set of the stop flags, at worst we would schedule 749 * one more callout. On the other hand, it is not fine to not 750 * schedule when we we missed clearing of the flags, we 751 * recheck them under the lock and observe consistent state. 752 */ 753 p = kc->p; 754 if (P_SHOULDSTOP(p) || P_KILLED(p)) { 755 if (!proc_locked) 756 PROC_LOCK(p); 757 if (P_SHOULDSTOP(p) || P_KILLED(p)) { 758 if ((kc->flags & KQ_TIMER_CB_ENQUEUED) == 0) { 759 kc->flags |= KQ_TIMER_CB_ENQUEUED; 760 TAILQ_INSERT_TAIL(&p->p_kqtim_stop, kc, link); 761 } 762 if (!proc_locked) 763 PROC_UNLOCK(p); 764 return; 765 } 766 if (!proc_locked) 767 PROC_UNLOCK(p); 768 } 769 kqtimer_sched_callout(kc); 770 } 771 772 static void 773 filt_timerexpire(void *knx) 774 { 775 filt_timerexpire_l(knx, false); 776 } 777 778 /* 779 * data contains amount of time to sleep 780 */ 781 static int 782 filt_timervalidate(struct knote *kn, sbintime_t *to) 783 { 784 struct bintime bt; 785 sbintime_t sbt; 786 787 if (kn->kn_sdata < 0) 788 return (EINVAL); 789 if (kn->kn_sdata == 0 && (kn->kn_flags & EV_ONESHOT) == 0) 790 kn->kn_sdata = 1; 791 /* 792 * The only fflags values supported are the timer unit 793 * (precision) and the absolute time indicator. 794 */ 795 if ((kn->kn_sfflags & ~(NOTE_TIMER_PRECMASK | NOTE_ABSTIME)) != 0) 796 return (EINVAL); 797 798 *to = timer2sbintime(kn->kn_sdata, kn->kn_sfflags); 799 if (*to < 0) 800 return (EINVAL); 801 if ((kn->kn_sfflags & NOTE_ABSTIME) != 0) { 802 getboottimebin(&bt); 803 sbt = bttosbt(bt); 804 *to = MAX(0, *to - sbt); 805 } 806 return (0); 807 } 808 809 static int 810 filt_timerattach(struct knote *kn) 811 { 812 struct kq_timer_cb_data *kc; 813 sbintime_t to; 814 int error; 815 816 to = -1; 817 error = filt_timervalidate(kn, &to); 818 if (error != 0) 819 return (error); 820 KASSERT(to > 0 || (kn->kn_flags & EV_ONESHOT) != 0 || 821 (kn->kn_sfflags & NOTE_ABSTIME) != 0, 822 ("%s: periodic timer has a calculated zero timeout", __func__)); 823 KASSERT(to >= 0, 824 ("%s: timer has a calculated negative timeout", __func__)); 825 826 if (atomic_fetchadd_int(&kq_ncallouts, 1) + 1 > kq_calloutmax) { 827 atomic_subtract_int(&kq_ncallouts, 1); 828 return (ENOMEM); 829 } 830 831 if ((kn->kn_sfflags & NOTE_ABSTIME) == 0) 832 kn->kn_flags |= EV_CLEAR; /* automatically set */ 833 kn->kn_status &= ~KN_DETACHED; /* knlist_add clears it */ 834 kn->kn_ptr.p_v = kc = malloc(sizeof(*kc), M_KQUEUE, M_WAITOK); 835 kc->kn = kn; 836 kc->p = curproc; 837 kc->cpuid = PCPU_GET(cpuid); 838 kc->flags = 0; 839 callout_init(&kc->c, 1); 840 filt_timerstart(kn, to); 841 842 return (0); 843 } 844 845 static void 846 filt_timerstart(struct knote *kn, sbintime_t to) 847 { 848 struct kq_timer_cb_data *kc; 849 850 kc = kn->kn_ptr.p_v; 851 if ((kn->kn_sfflags & NOTE_ABSTIME) != 0) { 852 kc->next = to; 853 kc->to = 0; 854 } else { 855 kc->next = to + sbinuptime(); 856 kc->to = to; 857 } 858 kqtimer_sched_callout(kc); 859 } 860 861 static void 862 filt_timerdetach(struct knote *kn) 863 { 864 struct kq_timer_cb_data *kc; 865 unsigned int old __unused; 866 bool pending; 867 868 kc = kn->kn_ptr.p_v; 869 do { 870 callout_drain(&kc->c); 871 872 /* 873 * kqtimer_proc_continue() might have rescheduled this callout. 874 * Double-check, using the process mutex as an interlock. 875 */ 876 PROC_LOCK(kc->p); 877 if ((kc->flags & KQ_TIMER_CB_ENQUEUED) != 0) { 878 kc->flags &= ~KQ_TIMER_CB_ENQUEUED; 879 TAILQ_REMOVE(&kc->p->p_kqtim_stop, kc, link); 880 } 881 pending = callout_pending(&kc->c); 882 PROC_UNLOCK(kc->p); 883 } while (pending); 884 free(kc, M_KQUEUE); 885 old = atomic_fetchadd_int(&kq_ncallouts, -1); 886 KASSERT(old > 0, ("Number of callouts cannot become negative")); 887 kn->kn_status |= KN_DETACHED; /* knlist_remove sets it */ 888 } 889 890 static void 891 filt_timertouch(struct knote *kn, struct kevent *kev, u_long type) 892 { 893 struct kq_timer_cb_data *kc; 894 struct kqueue *kq; 895 sbintime_t to; 896 int error; 897 898 switch (type) { 899 case EVENT_REGISTER: 900 /* Handle re-added timers that update data/fflags */ 901 if (kev->flags & EV_ADD) { 902 kc = kn->kn_ptr.p_v; 903 904 /* Drain any existing callout. */ 905 callout_drain(&kc->c); 906 907 /* Throw away any existing undelivered record 908 * of the timer expiration. This is done under 909 * the presumption that if a process is 910 * re-adding this timer with new parameters, 911 * it is no longer interested in what may have 912 * happened under the old parameters. If it is 913 * interested, it can wait for the expiration, 914 * delete the old timer definition, and then 915 * add the new one. 916 * 917 * This has to be done while the kq is locked: 918 * - if enqueued, dequeue 919 * - make it no longer active 920 * - clear the count of expiration events 921 */ 922 kq = kn->kn_kq; 923 KQ_LOCK(kq); 924 if (kn->kn_status & KN_QUEUED) 925 knote_dequeue(kn); 926 927 kn->kn_status &= ~KN_ACTIVE; 928 kn->kn_data = 0; 929 KQ_UNLOCK(kq); 930 931 /* Reschedule timer based on new data/fflags */ 932 kn->kn_sfflags = kev->fflags; 933 kn->kn_sdata = kev->data; 934 error = filt_timervalidate(kn, &to); 935 if (error != 0) { 936 kn->kn_flags |= EV_ERROR; 937 kn->kn_data = error; 938 } else 939 filt_timerstart(kn, to); 940 } 941 break; 942 943 case EVENT_PROCESS: 944 *kev = kn->kn_kevent; 945 if (kn->kn_flags & EV_CLEAR) { 946 kn->kn_data = 0; 947 kn->kn_fflags = 0; 948 } 949 break; 950 951 default: 952 panic("filt_timertouch() - invalid type (%ld)", type); 953 break; 954 } 955 } 956 957 static int 958 filt_timer(struct knote *kn, long hint) 959 { 960 961 return (kn->kn_data != 0); 962 } 963 964 static int 965 filt_userattach(struct knote *kn) 966 { 967 968 /* 969 * EVFILT_USER knotes are not attached to anything in the kernel. 970 */ 971 kn->kn_hook = NULL; 972 if (kn->kn_fflags & NOTE_TRIGGER) 973 kn->kn_hookid = 1; 974 else 975 kn->kn_hookid = 0; 976 return (0); 977 } 978 979 static void 980 filt_userdetach(__unused struct knote *kn) 981 { 982 983 /* 984 * EVFILT_USER knotes are not attached to anything in the kernel. 985 */ 986 } 987 988 static int 989 filt_user(struct knote *kn, __unused long hint) 990 { 991 992 return (kn->kn_hookid); 993 } 994 995 static void 996 filt_usertouch(struct knote *kn, struct kevent *kev, u_long type) 997 { 998 u_int ffctrl; 999 1000 switch (type) { 1001 case EVENT_REGISTER: 1002 if (kev->fflags & NOTE_TRIGGER) 1003 kn->kn_hookid = 1; 1004 1005 ffctrl = kev->fflags & NOTE_FFCTRLMASK; 1006 kev->fflags &= NOTE_FFLAGSMASK; 1007 switch (ffctrl) { 1008 case NOTE_FFNOP: 1009 break; 1010 1011 case NOTE_FFAND: 1012 kn->kn_sfflags &= kev->fflags; 1013 break; 1014 1015 case NOTE_FFOR: 1016 kn->kn_sfflags |= kev->fflags; 1017 break; 1018 1019 case NOTE_FFCOPY: 1020 kn->kn_sfflags = kev->fflags; 1021 break; 1022 1023 default: 1024 /* XXX Return error? */ 1025 break; 1026 } 1027 kn->kn_sdata = kev->data; 1028 if (kev->flags & EV_CLEAR) { 1029 kn->kn_hookid = 0; 1030 kn->kn_data = 0; 1031 kn->kn_fflags = 0; 1032 } 1033 break; 1034 1035 case EVENT_PROCESS: 1036 *kev = kn->kn_kevent; 1037 kev->fflags = kn->kn_sfflags; 1038 kev->data = kn->kn_sdata; 1039 if (kn->kn_flags & EV_CLEAR) { 1040 kn->kn_hookid = 0; 1041 kn->kn_data = 0; 1042 kn->kn_fflags = 0; 1043 } 1044 break; 1045 1046 default: 1047 panic("filt_usertouch() - invalid type (%ld)", type); 1048 break; 1049 } 1050 } 1051 1052 int 1053 sys_kqueue(struct thread *td, struct kqueue_args *uap) 1054 { 1055 1056 return (kern_kqueue(td, 0, NULL)); 1057 } 1058 1059 int 1060 sys_kqueuex(struct thread *td, struct kqueuex_args *uap) 1061 { 1062 int flags; 1063 1064 if ((uap->flags & ~(KQUEUE_CLOEXEC)) != 0) 1065 return (EINVAL); 1066 flags = 0; 1067 if ((uap->flags & KQUEUE_CLOEXEC) != 0) 1068 flags |= O_CLOEXEC; 1069 return (kern_kqueue(td, flags, NULL)); 1070 } 1071 1072 static void 1073 kqueue_init(struct kqueue *kq) 1074 { 1075 1076 mtx_init(&kq->kq_lock, "kqueue", NULL, MTX_DEF | MTX_DUPOK); 1077 TAILQ_INIT(&kq->kq_head); 1078 knlist_init_mtx(&kq->kq_sel.si_note, &kq->kq_lock); 1079 TASK_INIT(&kq->kq_task, 0, kqueue_task, kq); 1080 } 1081 1082 int 1083 kern_kqueue(struct thread *td, int flags, struct filecaps *fcaps) 1084 { 1085 struct filedesc *fdp; 1086 struct kqueue *kq; 1087 struct file *fp; 1088 struct ucred *cred; 1089 int fd, error; 1090 1091 fdp = td->td_proc->p_fd; 1092 cred = td->td_ucred; 1093 if (!chgkqcnt(cred->cr_ruidinfo, 1, lim_cur(td, RLIMIT_KQUEUES))) 1094 return (ENOMEM); 1095 1096 error = falloc_caps(td, &fp, &fd, flags, fcaps); 1097 if (error != 0) { 1098 chgkqcnt(cred->cr_ruidinfo, -1, 0); 1099 return (error); 1100 } 1101 1102 /* An extra reference on `fp' has been held for us by falloc(). */ 1103 kq = malloc(sizeof *kq, M_KQUEUE, M_WAITOK | M_ZERO); 1104 kqueue_init(kq); 1105 kq->kq_fdp = fdp; 1106 kq->kq_cred = crhold(cred); 1107 1108 FILEDESC_XLOCK(fdp); 1109 TAILQ_INSERT_HEAD(&fdp->fd_kqlist, kq, kq_list); 1110 FILEDESC_XUNLOCK(fdp); 1111 1112 finit(fp, FREAD | FWRITE, DTYPE_KQUEUE, kq, &kqueueops); 1113 fdrop(fp, td); 1114 1115 td->td_retval[0] = fd; 1116 return (0); 1117 } 1118 1119 struct g_kevent_args { 1120 int fd; 1121 const void *changelist; 1122 int nchanges; 1123 void *eventlist; 1124 int nevents; 1125 const struct timespec *timeout; 1126 }; 1127 1128 int 1129 sys_kevent(struct thread *td, struct kevent_args *uap) 1130 { 1131 struct kevent_copyops k_ops = { 1132 .arg = uap, 1133 .k_copyout = kevent_copyout, 1134 .k_copyin = kevent_copyin, 1135 .kevent_size = sizeof(struct kevent), 1136 }; 1137 struct g_kevent_args gk_args = { 1138 .fd = uap->fd, 1139 .changelist = uap->changelist, 1140 .nchanges = uap->nchanges, 1141 .eventlist = uap->eventlist, 1142 .nevents = uap->nevents, 1143 .timeout = uap->timeout, 1144 }; 1145 1146 return (kern_kevent_generic(td, &gk_args, &k_ops, "kevent")); 1147 } 1148 1149 static int 1150 kern_kevent_generic(struct thread *td, struct g_kevent_args *uap, 1151 struct kevent_copyops *k_ops, const char *struct_name) 1152 { 1153 struct timespec ts, *tsp; 1154 #ifdef KTRACE 1155 struct kevent *eventlist = uap->eventlist; 1156 #endif 1157 int error; 1158 1159 if (uap->timeout != NULL) { 1160 error = copyin(uap->timeout, &ts, sizeof(ts)); 1161 if (error) 1162 return (error); 1163 tsp = &ts; 1164 } else 1165 tsp = NULL; 1166 1167 #ifdef KTRACE 1168 if (KTRPOINT(td, KTR_STRUCT_ARRAY)) 1169 ktrstructarray(struct_name, UIO_USERSPACE, uap->changelist, 1170 uap->nchanges, k_ops->kevent_size); 1171 #endif 1172 1173 error = kern_kevent(td, uap->fd, uap->nchanges, uap->nevents, 1174 k_ops, tsp); 1175 1176 #ifdef KTRACE 1177 if (error == 0 && KTRPOINT(td, KTR_STRUCT_ARRAY)) 1178 ktrstructarray(struct_name, UIO_USERSPACE, eventlist, 1179 td->td_retval[0], k_ops->kevent_size); 1180 #endif 1181 1182 return (error); 1183 } 1184 1185 /* 1186 * Copy 'count' items into the destination list pointed to by uap->eventlist. 1187 */ 1188 static int 1189 kevent_copyout(void *arg, struct kevent *kevp, int count) 1190 { 1191 struct kevent_args *uap; 1192 int error; 1193 1194 KASSERT(count <= KQ_NEVENTS, ("count (%d) > KQ_NEVENTS", count)); 1195 uap = (struct kevent_args *)arg; 1196 1197 error = copyout(kevp, uap->eventlist, count * sizeof *kevp); 1198 if (error == 0) 1199 uap->eventlist += count; 1200 return (error); 1201 } 1202 1203 /* 1204 * Copy 'count' items from the list pointed to by uap->changelist. 1205 */ 1206 static int 1207 kevent_copyin(void *arg, struct kevent *kevp, int count) 1208 { 1209 struct kevent_args *uap; 1210 int error; 1211 1212 KASSERT(count <= KQ_NEVENTS, ("count (%d) > KQ_NEVENTS", count)); 1213 uap = (struct kevent_args *)arg; 1214 1215 error = copyin(uap->changelist, kevp, count * sizeof *kevp); 1216 if (error == 0) 1217 uap->changelist += count; 1218 return (error); 1219 } 1220 1221 #ifdef COMPAT_FREEBSD11 1222 static int 1223 kevent11_copyout(void *arg, struct kevent *kevp, int count) 1224 { 1225 struct freebsd11_kevent_args *uap; 1226 struct freebsd11_kevent kev11; 1227 int error, i; 1228 1229 KASSERT(count <= KQ_NEVENTS, ("count (%d) > KQ_NEVENTS", count)); 1230 uap = (struct freebsd11_kevent_args *)arg; 1231 1232 for (i = 0; i < count; i++) { 1233 kev11.ident = kevp->ident; 1234 kev11.filter = kevp->filter; 1235 kev11.flags = kevp->flags; 1236 kev11.fflags = kevp->fflags; 1237 kev11.data = kevp->data; 1238 kev11.udata = kevp->udata; 1239 error = copyout(&kev11, uap->eventlist, sizeof(kev11)); 1240 if (error != 0) 1241 break; 1242 uap->eventlist++; 1243 kevp++; 1244 } 1245 return (error); 1246 } 1247 1248 /* 1249 * Copy 'count' items from the list pointed to by uap->changelist. 1250 */ 1251 static int 1252 kevent11_copyin(void *arg, struct kevent *kevp, int count) 1253 { 1254 struct freebsd11_kevent_args *uap; 1255 struct freebsd11_kevent kev11; 1256 int error, i; 1257 1258 KASSERT(count <= KQ_NEVENTS, ("count (%d) > KQ_NEVENTS", count)); 1259 uap = (struct freebsd11_kevent_args *)arg; 1260 1261 for (i = 0; i < count; i++) { 1262 error = copyin(uap->changelist, &kev11, sizeof(kev11)); 1263 if (error != 0) 1264 break; 1265 kevp->ident = kev11.ident; 1266 kevp->filter = kev11.filter; 1267 kevp->flags = kev11.flags; 1268 kevp->fflags = kev11.fflags; 1269 kevp->data = (uintptr_t)kev11.data; 1270 kevp->udata = kev11.udata; 1271 bzero(&kevp->ext, sizeof(kevp->ext)); 1272 uap->changelist++; 1273 kevp++; 1274 } 1275 return (error); 1276 } 1277 1278 int 1279 freebsd11_kevent(struct thread *td, struct freebsd11_kevent_args *uap) 1280 { 1281 struct kevent_copyops k_ops = { 1282 .arg = uap, 1283 .k_copyout = kevent11_copyout, 1284 .k_copyin = kevent11_copyin, 1285 .kevent_size = sizeof(struct freebsd11_kevent), 1286 }; 1287 struct g_kevent_args gk_args = { 1288 .fd = uap->fd, 1289 .changelist = uap->changelist, 1290 .nchanges = uap->nchanges, 1291 .eventlist = uap->eventlist, 1292 .nevents = uap->nevents, 1293 .timeout = uap->timeout, 1294 }; 1295 1296 return (kern_kevent_generic(td, &gk_args, &k_ops, "freebsd11_kevent")); 1297 } 1298 #endif 1299 1300 int 1301 kern_kevent(struct thread *td, int fd, int nchanges, int nevents, 1302 struct kevent_copyops *k_ops, const struct timespec *timeout) 1303 { 1304 cap_rights_t rights; 1305 struct file *fp; 1306 int error; 1307 1308 cap_rights_init_zero(&rights); 1309 if (nchanges > 0) 1310 cap_rights_set_one(&rights, CAP_KQUEUE_CHANGE); 1311 if (nevents > 0) 1312 cap_rights_set_one(&rights, CAP_KQUEUE_EVENT); 1313 error = fget(td, fd, &rights, &fp); 1314 if (error != 0) 1315 return (error); 1316 1317 error = kern_kevent_fp(td, fp, nchanges, nevents, k_ops, timeout); 1318 fdrop(fp, td); 1319 1320 return (error); 1321 } 1322 1323 static int 1324 kqueue_kevent(struct kqueue *kq, struct thread *td, int nchanges, int nevents, 1325 struct kevent_copyops *k_ops, const struct timespec *timeout) 1326 { 1327 struct kevent keva[KQ_NEVENTS]; 1328 struct kevent *kevp, *changes; 1329 int i, n, nerrors, error; 1330 1331 if (nchanges < 0) 1332 return (EINVAL); 1333 1334 nerrors = 0; 1335 while (nchanges > 0) { 1336 n = nchanges > KQ_NEVENTS ? KQ_NEVENTS : nchanges; 1337 error = k_ops->k_copyin(k_ops->arg, keva, n); 1338 if (error) 1339 return (error); 1340 changes = keva; 1341 for (i = 0; i < n; i++) { 1342 kevp = &changes[i]; 1343 if (!kevp->filter) 1344 continue; 1345 kevp->flags &= ~EV_SYSFLAGS; 1346 error = kqueue_register(kq, kevp, td, M_WAITOK); 1347 if (error || (kevp->flags & EV_RECEIPT)) { 1348 if (nevents == 0) 1349 return (error); 1350 kevp->flags = EV_ERROR; 1351 kevp->data = error; 1352 (void)k_ops->k_copyout(k_ops->arg, kevp, 1); 1353 nevents--; 1354 nerrors++; 1355 } 1356 } 1357 nchanges -= n; 1358 } 1359 if (nerrors) { 1360 td->td_retval[0] = nerrors; 1361 return (0); 1362 } 1363 1364 return (kqueue_scan(kq, nevents, k_ops, timeout, keva, td)); 1365 } 1366 1367 int 1368 kern_kevent_fp(struct thread *td, struct file *fp, int nchanges, int nevents, 1369 struct kevent_copyops *k_ops, const struct timespec *timeout) 1370 { 1371 struct kqueue *kq; 1372 int error; 1373 1374 error = kqueue_acquire(fp, &kq); 1375 if (error != 0) 1376 return (error); 1377 error = kqueue_kevent(kq, td, nchanges, nevents, k_ops, timeout); 1378 kqueue_release(kq, 0); 1379 return (error); 1380 } 1381 1382 /* 1383 * Performs a kevent() call on a temporarily created kqueue. This can be 1384 * used to perform one-shot polling, similar to poll() and select(). 1385 */ 1386 int 1387 kern_kevent_anonymous(struct thread *td, int nevents, 1388 struct kevent_copyops *k_ops) 1389 { 1390 struct kqueue kq = {}; 1391 int error; 1392 1393 kqueue_init(&kq); 1394 kq.kq_refcnt = 1; 1395 error = kqueue_kevent(&kq, td, nevents, nevents, k_ops, NULL); 1396 kqueue_drain(&kq, td); 1397 kqueue_destroy(&kq); 1398 return (error); 1399 } 1400 1401 int 1402 kqueue_add_filteropts(int filt, const struct filterops *filtops) 1403 { 1404 int error; 1405 1406 error = 0; 1407 if (filt > 0 || filt + EVFILT_SYSCOUNT < 0) { 1408 printf( 1409 "trying to add a filterop that is out of range: %d is beyond %d\n", 1410 ~filt, EVFILT_SYSCOUNT); 1411 return EINVAL; 1412 } 1413 mtx_lock(&filterops_lock); 1414 if (sysfilt_ops[~filt].for_fop != &null_filtops && 1415 sysfilt_ops[~filt].for_fop != NULL) 1416 error = EEXIST; 1417 else { 1418 sysfilt_ops[~filt].for_fop = filtops; 1419 sysfilt_ops[~filt].for_refcnt = 0; 1420 } 1421 mtx_unlock(&filterops_lock); 1422 1423 return (error); 1424 } 1425 1426 int 1427 kqueue_del_filteropts(int filt) 1428 { 1429 int error; 1430 1431 error = 0; 1432 if (filt > 0 || filt + EVFILT_SYSCOUNT < 0) 1433 return EINVAL; 1434 1435 mtx_lock(&filterops_lock); 1436 if (sysfilt_ops[~filt].for_fop == &null_filtops || 1437 sysfilt_ops[~filt].for_fop == NULL) 1438 error = EINVAL; 1439 else if (sysfilt_ops[~filt].for_refcnt != 0) 1440 error = EBUSY; 1441 else { 1442 sysfilt_ops[~filt].for_fop = &null_filtops; 1443 sysfilt_ops[~filt].for_refcnt = 0; 1444 } 1445 mtx_unlock(&filterops_lock); 1446 1447 return error; 1448 } 1449 1450 static const struct filterops * 1451 kqueue_fo_find(int filt) 1452 { 1453 1454 if (filt > 0 || filt + EVFILT_SYSCOUNT < 0) 1455 return NULL; 1456 1457 if (sysfilt_ops[~filt].for_nolock) 1458 return sysfilt_ops[~filt].for_fop; 1459 1460 mtx_lock(&filterops_lock); 1461 sysfilt_ops[~filt].for_refcnt++; 1462 if (sysfilt_ops[~filt].for_fop == NULL) 1463 sysfilt_ops[~filt].for_fop = &null_filtops; 1464 mtx_unlock(&filterops_lock); 1465 1466 return sysfilt_ops[~filt].for_fop; 1467 } 1468 1469 static void 1470 kqueue_fo_release(int filt) 1471 { 1472 1473 if (filt > 0 || filt + EVFILT_SYSCOUNT < 0) 1474 return; 1475 1476 if (sysfilt_ops[~filt].for_nolock) 1477 return; 1478 1479 mtx_lock(&filterops_lock); 1480 KASSERT(sysfilt_ops[~filt].for_refcnt > 0, 1481 ("filter object refcount not valid on release")); 1482 sysfilt_ops[~filt].for_refcnt--; 1483 mtx_unlock(&filterops_lock); 1484 } 1485 1486 /* 1487 * A ref to kq (obtained via kqueue_acquire) must be held. 1488 */ 1489 static int 1490 kqueue_register(struct kqueue *kq, struct kevent *kev, struct thread *td, 1491 int mflag) 1492 { 1493 const struct filterops *fops; 1494 struct file *fp; 1495 struct knote *kn, *tkn; 1496 struct knlist *knl; 1497 int error, filt, event; 1498 int haskqglobal, filedesc_unlock; 1499 1500 if ((kev->flags & (EV_ENABLE | EV_DISABLE)) == (EV_ENABLE | EV_DISABLE)) 1501 return (EINVAL); 1502 1503 fp = NULL; 1504 kn = NULL; 1505 knl = NULL; 1506 error = 0; 1507 haskqglobal = 0; 1508 filedesc_unlock = 0; 1509 1510 filt = kev->filter; 1511 fops = kqueue_fo_find(filt); 1512 if (fops == NULL) 1513 return EINVAL; 1514 1515 if (kev->flags & EV_ADD) { 1516 /* Reject an invalid flag pair early */ 1517 if (kev->flags & EV_KEEPUDATA) { 1518 tkn = NULL; 1519 error = EINVAL; 1520 goto done; 1521 } 1522 1523 /* 1524 * Prevent waiting with locks. Non-sleepable 1525 * allocation failures are handled in the loop, only 1526 * if the spare knote appears to be actually required. 1527 */ 1528 tkn = knote_alloc(mflag); 1529 } else { 1530 tkn = NULL; 1531 } 1532 1533 findkn: 1534 if (fops->f_isfd) { 1535 KASSERT(td != NULL, ("td is NULL")); 1536 if (kev->ident > INT_MAX) 1537 error = EBADF; 1538 else 1539 error = fget(td, kev->ident, &cap_event_rights, &fp); 1540 if (error) 1541 goto done; 1542 1543 if ((kev->flags & EV_ADD) == EV_ADD && kqueue_expand(kq, fops, 1544 kev->ident, M_NOWAIT) != 0) { 1545 /* try again */ 1546 fdrop(fp, td); 1547 fp = NULL; 1548 error = kqueue_expand(kq, fops, kev->ident, mflag); 1549 if (error) 1550 goto done; 1551 goto findkn; 1552 } 1553 1554 if (fp->f_type == DTYPE_KQUEUE) { 1555 /* 1556 * If we add some intelligence about what we are doing, 1557 * we should be able to support events on ourselves. 1558 * We need to know when we are doing this to prevent 1559 * getting both the knlist lock and the kq lock since 1560 * they are the same thing. 1561 */ 1562 if (fp->f_data == kq) { 1563 error = EINVAL; 1564 goto done; 1565 } 1566 1567 /* 1568 * Pre-lock the filedesc before the global 1569 * lock mutex, see the comment in 1570 * kqueue_close(). 1571 */ 1572 FILEDESC_XLOCK(td->td_proc->p_fd); 1573 filedesc_unlock = 1; 1574 KQ_GLOBAL_LOCK(&kq_global, haskqglobal); 1575 } 1576 1577 KQ_LOCK(kq); 1578 if (kev->ident < kq->kq_knlistsize) { 1579 SLIST_FOREACH(kn, &kq->kq_knlist[kev->ident], kn_link) 1580 if (kev->filter == kn->kn_filter) 1581 break; 1582 } 1583 } else { 1584 if ((kev->flags & EV_ADD) == EV_ADD) { 1585 error = kqueue_expand(kq, fops, kev->ident, mflag); 1586 if (error != 0) 1587 goto done; 1588 } 1589 1590 KQ_LOCK(kq); 1591 1592 /* 1593 * If possible, find an existing knote to use for this kevent. 1594 */ 1595 if (kev->filter == EVFILT_PROC && 1596 (kev->flags & (EV_FLAG1 | EV_FLAG2)) != 0) { 1597 /* This is an internal creation of a process tracking 1598 * note. Don't attempt to coalesce this with an 1599 * existing note. 1600 */ 1601 ; 1602 } else if (kq->kq_knhashmask != 0) { 1603 struct klist *list; 1604 1605 list = &kq->kq_knhash[ 1606 KN_HASH((u_long)kev->ident, kq->kq_knhashmask)]; 1607 SLIST_FOREACH(kn, list, kn_link) 1608 if (kev->ident == kn->kn_id && 1609 kev->filter == kn->kn_filter) 1610 break; 1611 } 1612 } 1613 1614 /* knote is in the process of changing, wait for it to stabilize. */ 1615 if (kn != NULL && kn_in_flux(kn)) { 1616 KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal); 1617 if (filedesc_unlock) { 1618 FILEDESC_XUNLOCK(td->td_proc->p_fd); 1619 filedesc_unlock = 0; 1620 } 1621 kq->kq_state |= KQ_FLUXWAIT; 1622 msleep(kq, &kq->kq_lock, PSOCK | PDROP, "kqflxwt", 0); 1623 if (fp != NULL) { 1624 fdrop(fp, td); 1625 fp = NULL; 1626 } 1627 goto findkn; 1628 } 1629 1630 /* 1631 * kn now contains the matching knote, or NULL if no match 1632 */ 1633 if (kn == NULL) { 1634 if (kev->flags & EV_ADD) { 1635 kn = tkn; 1636 tkn = NULL; 1637 if (kn == NULL) { 1638 KQ_UNLOCK(kq); 1639 error = ENOMEM; 1640 goto done; 1641 } 1642 kn->kn_fp = fp; 1643 kn->kn_kq = kq; 1644 kn->kn_fop = fops; 1645 /* 1646 * apply reference counts to knote structure, and 1647 * do not release it at the end of this routine. 1648 */ 1649 fops = NULL; 1650 fp = NULL; 1651 1652 kn->kn_sfflags = kev->fflags; 1653 kn->kn_sdata = kev->data; 1654 kev->fflags = 0; 1655 kev->data = 0; 1656 kn->kn_kevent = *kev; 1657 kn->kn_kevent.flags &= ~(EV_ADD | EV_DELETE | 1658 EV_ENABLE | EV_DISABLE | EV_FORCEONESHOT); 1659 kn->kn_status = KN_DETACHED; 1660 if ((kev->flags & EV_DISABLE) != 0) 1661 kn->kn_status |= KN_DISABLED; 1662 kn_enter_flux(kn); 1663 1664 error = knote_attach(kn, kq); 1665 KQ_UNLOCK(kq); 1666 if (error != 0) { 1667 tkn = kn; 1668 goto done; 1669 } 1670 1671 if ((error = kn->kn_fop->f_attach(kn)) != 0) { 1672 knote_drop_detached(kn, td); 1673 goto done; 1674 } 1675 knl = kn_list_lock(kn); 1676 goto done_ev_add; 1677 } else { 1678 /* No matching knote and the EV_ADD flag is not set. */ 1679 KQ_UNLOCK(kq); 1680 error = ENOENT; 1681 goto done; 1682 } 1683 } 1684 1685 if (kev->flags & EV_DELETE) { 1686 kn_enter_flux(kn); 1687 KQ_UNLOCK(kq); 1688 knote_drop(kn, td); 1689 goto done; 1690 } 1691 1692 if (kev->flags & EV_FORCEONESHOT) { 1693 kn->kn_flags |= EV_ONESHOT; 1694 KNOTE_ACTIVATE(kn, 1); 1695 } 1696 1697 if ((kev->flags & EV_ENABLE) != 0) 1698 kn->kn_status &= ~KN_DISABLED; 1699 else if ((kev->flags & EV_DISABLE) != 0) 1700 kn->kn_status |= KN_DISABLED; 1701 1702 /* 1703 * The user may change some filter values after the initial EV_ADD, 1704 * but doing so will not reset any filter which has already been 1705 * triggered. 1706 */ 1707 kn->kn_status |= KN_SCAN; 1708 kn_enter_flux(kn); 1709 KQ_UNLOCK(kq); 1710 knl = kn_list_lock(kn); 1711 if ((kev->flags & EV_KEEPUDATA) == 0) 1712 kn->kn_kevent.udata = kev->udata; 1713 if (!fops->f_isfd && fops->f_touch != NULL) { 1714 fops->f_touch(kn, kev, EVENT_REGISTER); 1715 } else { 1716 kn->kn_sfflags = kev->fflags; 1717 kn->kn_sdata = kev->data; 1718 } 1719 1720 done_ev_add: 1721 /* 1722 * We can get here with kn->kn_knlist == NULL. This can happen when 1723 * the initial attach event decides that the event is "completed" 1724 * already, e.g., filt_procattach() is called on a zombie process. It 1725 * will call filt_proc() which will remove it from the list, and NULL 1726 * kn_knlist. 1727 * 1728 * KN_DISABLED will be stable while the knote is in flux, so the 1729 * unlocked read will not race with an update. 1730 */ 1731 if ((kn->kn_status & KN_DISABLED) == 0) 1732 event = kn->kn_fop->f_event(kn, 0); 1733 else 1734 event = 0; 1735 1736 KQ_LOCK(kq); 1737 if (event) 1738 kn->kn_status |= KN_ACTIVE; 1739 if ((kn->kn_status & (KN_ACTIVE | KN_DISABLED | KN_QUEUED)) == 1740 KN_ACTIVE) 1741 knote_enqueue(kn); 1742 kn->kn_status &= ~KN_SCAN; 1743 kn_leave_flux(kn); 1744 kn_list_unlock(knl); 1745 KQ_UNLOCK_FLUX(kq); 1746 1747 done: 1748 KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal); 1749 if (filedesc_unlock) 1750 FILEDESC_XUNLOCK(td->td_proc->p_fd); 1751 if (fp != NULL) 1752 fdrop(fp, td); 1753 knote_free(tkn); 1754 if (fops != NULL) 1755 kqueue_fo_release(filt); 1756 return (error); 1757 } 1758 1759 static int 1760 kqueue_acquire(struct file *fp, struct kqueue **kqp) 1761 { 1762 int error; 1763 struct kqueue *kq; 1764 1765 error = 0; 1766 1767 kq = fp->f_data; 1768 if (fp->f_type != DTYPE_KQUEUE || kq == NULL) 1769 return (EBADF); 1770 *kqp = kq; 1771 KQ_LOCK(kq); 1772 if ((kq->kq_state & KQ_CLOSING) == KQ_CLOSING) { 1773 KQ_UNLOCK(kq); 1774 return (EBADF); 1775 } 1776 kq->kq_refcnt++; 1777 KQ_UNLOCK(kq); 1778 1779 return error; 1780 } 1781 1782 static void 1783 kqueue_release(struct kqueue *kq, int locked) 1784 { 1785 if (locked) 1786 KQ_OWNED(kq); 1787 else 1788 KQ_LOCK(kq); 1789 kq->kq_refcnt--; 1790 if (kq->kq_refcnt == 1) 1791 wakeup(&kq->kq_refcnt); 1792 if (!locked) 1793 KQ_UNLOCK(kq); 1794 } 1795 1796 static void 1797 ast_kqueue(struct thread *td, int tda __unused) 1798 { 1799 taskqueue_quiesce(taskqueue_kqueue_ctx); 1800 } 1801 1802 static void 1803 kqueue_schedtask(struct kqueue *kq) 1804 { 1805 KQ_OWNED(kq); 1806 KASSERT(((kq->kq_state & KQ_TASKDRAIN) != KQ_TASKDRAIN), 1807 ("scheduling kqueue task while draining")); 1808 1809 if ((kq->kq_state & KQ_TASKSCHED) != KQ_TASKSCHED) { 1810 taskqueue_enqueue(taskqueue_kqueue_ctx, &kq->kq_task); 1811 kq->kq_state |= KQ_TASKSCHED; 1812 ast_sched(curthread, TDA_KQUEUE); 1813 } 1814 } 1815 1816 /* 1817 * Expand the kq to make sure we have storage for fops/ident pair. 1818 * 1819 * Return 0 on success (or no work necessary), return errno on failure. 1820 */ 1821 static int 1822 kqueue_expand(struct kqueue *kq, const struct filterops *fops, uintptr_t ident, 1823 int mflag) 1824 { 1825 struct klist *list, *tmp_knhash, *to_free; 1826 u_long tmp_knhashmask; 1827 int error, fd, size; 1828 1829 KQ_NOTOWNED(kq); 1830 1831 error = 0; 1832 to_free = NULL; 1833 if (fops->f_isfd) { 1834 fd = ident; 1835 if (kq->kq_knlistsize <= fd) { 1836 size = kq->kq_knlistsize; 1837 while (size <= fd) 1838 size += KQEXTENT; 1839 list = malloc(size * sizeof(*list), M_KQUEUE, mflag); 1840 if (list == NULL) 1841 return ENOMEM; 1842 KQ_LOCK(kq); 1843 if ((kq->kq_state & KQ_CLOSING) != 0) { 1844 to_free = list; 1845 error = EBADF; 1846 } else if (kq->kq_knlistsize > fd) { 1847 to_free = list; 1848 } else { 1849 if (kq->kq_knlist != NULL) { 1850 bcopy(kq->kq_knlist, list, 1851 kq->kq_knlistsize * sizeof(*list)); 1852 to_free = kq->kq_knlist; 1853 kq->kq_knlist = NULL; 1854 } 1855 bzero((caddr_t)list + 1856 kq->kq_knlistsize * sizeof(*list), 1857 (size - kq->kq_knlistsize) * sizeof(*list)); 1858 kq->kq_knlistsize = size; 1859 kq->kq_knlist = list; 1860 } 1861 KQ_UNLOCK(kq); 1862 } 1863 } else { 1864 if (kq->kq_knhashmask == 0) { 1865 tmp_knhash = hashinit_flags(KN_HASHSIZE, M_KQUEUE, 1866 &tmp_knhashmask, (mflag & M_WAITOK) != 0 ? 1867 HASH_WAITOK : HASH_NOWAIT); 1868 if (tmp_knhash == NULL) 1869 return (ENOMEM); 1870 KQ_LOCK(kq); 1871 if ((kq->kq_state & KQ_CLOSING) != 0) { 1872 to_free = tmp_knhash; 1873 error = EBADF; 1874 } else if (kq->kq_knhashmask == 0) { 1875 kq->kq_knhash = tmp_knhash; 1876 kq->kq_knhashmask = tmp_knhashmask; 1877 } else { 1878 to_free = tmp_knhash; 1879 } 1880 KQ_UNLOCK(kq); 1881 } 1882 } 1883 free(to_free, M_KQUEUE); 1884 1885 KQ_NOTOWNED(kq); 1886 return (error); 1887 } 1888 1889 static void 1890 kqueue_task(void *arg, int pending) 1891 { 1892 struct kqueue *kq; 1893 int haskqglobal; 1894 1895 haskqglobal = 0; 1896 kq = arg; 1897 1898 KQ_GLOBAL_LOCK(&kq_global, haskqglobal); 1899 KQ_LOCK(kq); 1900 1901 KNOTE_LOCKED(&kq->kq_sel.si_note, 0); 1902 1903 kq->kq_state &= ~KQ_TASKSCHED; 1904 if ((kq->kq_state & KQ_TASKDRAIN) == KQ_TASKDRAIN) { 1905 wakeup(&kq->kq_state); 1906 } 1907 KQ_UNLOCK(kq); 1908 KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal); 1909 } 1910 1911 /* 1912 * Scan, update kn_data (if not ONESHOT), and copyout triggered events. 1913 * We treat KN_MARKER knotes as if they are in flux. 1914 */ 1915 static int 1916 kqueue_scan(struct kqueue *kq, int maxevents, struct kevent_copyops *k_ops, 1917 const struct timespec *tsp, struct kevent *keva, struct thread *td) 1918 { 1919 struct kevent *kevp; 1920 struct knote *kn, *marker; 1921 struct knlist *knl; 1922 sbintime_t asbt, rsbt; 1923 int count, error, haskqglobal, influx, nkev, touch; 1924 1925 count = maxevents; 1926 nkev = 0; 1927 error = 0; 1928 haskqglobal = 0; 1929 1930 if (maxevents == 0) 1931 goto done_nl; 1932 if (maxevents < 0) { 1933 error = EINVAL; 1934 goto done_nl; 1935 } 1936 1937 rsbt = 0; 1938 if (tsp != NULL) { 1939 if (!timespecvalid_interval(tsp)) { 1940 error = EINVAL; 1941 goto done_nl; 1942 } 1943 if (timespecisset(tsp)) { 1944 if (tsp->tv_sec <= INT32_MAX) { 1945 rsbt = tstosbt(*tsp); 1946 if (TIMESEL(&asbt, rsbt)) 1947 asbt += tc_tick_sbt; 1948 if (asbt <= SBT_MAX - rsbt) 1949 asbt += rsbt; 1950 else 1951 asbt = 0; 1952 rsbt >>= tc_precexp; 1953 } else 1954 asbt = 0; 1955 } else 1956 asbt = -1; 1957 } else 1958 asbt = 0; 1959 marker = knote_alloc(M_WAITOK); 1960 marker->kn_status = KN_MARKER; 1961 KQ_LOCK(kq); 1962 1963 retry: 1964 kevp = keva; 1965 if (kq->kq_count == 0) { 1966 if (asbt == -1) { 1967 error = EWOULDBLOCK; 1968 } else { 1969 kq->kq_state |= KQ_SLEEP; 1970 error = msleep_sbt(kq, &kq->kq_lock, PSOCK | PCATCH, 1971 "kqread", asbt, rsbt, C_ABSOLUTE); 1972 } 1973 if (error == 0) 1974 goto retry; 1975 /* don't restart after signals... */ 1976 if (error == ERESTART) 1977 error = EINTR; 1978 else if (error == EWOULDBLOCK) 1979 error = 0; 1980 goto done; 1981 } 1982 1983 TAILQ_INSERT_TAIL(&kq->kq_head, marker, kn_tqe); 1984 influx = 0; 1985 while (count) { 1986 KQ_OWNED(kq); 1987 kn = TAILQ_FIRST(&kq->kq_head); 1988 1989 if ((kn->kn_status == KN_MARKER && kn != marker) || 1990 kn_in_flux(kn)) { 1991 if (influx) { 1992 influx = 0; 1993 KQ_FLUX_WAKEUP(kq); 1994 } 1995 kq->kq_state |= KQ_FLUXWAIT; 1996 error = msleep(kq, &kq->kq_lock, PSOCK, 1997 "kqflxwt", 0); 1998 continue; 1999 } 2000 2001 TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe); 2002 if ((kn->kn_status & KN_DISABLED) == KN_DISABLED) { 2003 kn->kn_status &= ~KN_QUEUED; 2004 kq->kq_count--; 2005 continue; 2006 } 2007 if (kn == marker) { 2008 KQ_FLUX_WAKEUP(kq); 2009 if (count == maxevents) 2010 goto retry; 2011 goto done; 2012 } 2013 KASSERT(!kn_in_flux(kn), 2014 ("knote %p is unexpectedly in flux", kn)); 2015 2016 if ((kn->kn_flags & EV_DROP) == EV_DROP) { 2017 kn->kn_status &= ~KN_QUEUED; 2018 kn_enter_flux(kn); 2019 kq->kq_count--; 2020 KQ_UNLOCK(kq); 2021 /* 2022 * We don't need to lock the list since we've 2023 * marked it as in flux. 2024 */ 2025 knote_drop(kn, td); 2026 KQ_LOCK(kq); 2027 continue; 2028 } else if ((kn->kn_flags & EV_ONESHOT) == EV_ONESHOT) { 2029 kn->kn_status &= ~KN_QUEUED; 2030 kn_enter_flux(kn); 2031 kq->kq_count--; 2032 KQ_UNLOCK(kq); 2033 /* 2034 * We don't need to lock the list since we've 2035 * marked the knote as being in flux. 2036 */ 2037 *kevp = kn->kn_kevent; 2038 knote_drop(kn, td); 2039 KQ_LOCK(kq); 2040 kn = NULL; 2041 } else { 2042 kn->kn_status |= KN_SCAN; 2043 kn_enter_flux(kn); 2044 KQ_UNLOCK(kq); 2045 if ((kn->kn_status & KN_KQUEUE) == KN_KQUEUE) 2046 KQ_GLOBAL_LOCK(&kq_global, haskqglobal); 2047 knl = kn_list_lock(kn); 2048 if (kn->kn_fop->f_event(kn, 0) == 0) { 2049 KQ_LOCK(kq); 2050 KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal); 2051 kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE | 2052 KN_SCAN); 2053 kn_leave_flux(kn); 2054 kq->kq_count--; 2055 kn_list_unlock(knl); 2056 influx = 1; 2057 continue; 2058 } 2059 touch = (!kn->kn_fop->f_isfd && 2060 kn->kn_fop->f_touch != NULL); 2061 if (touch) 2062 kn->kn_fop->f_touch(kn, kevp, EVENT_PROCESS); 2063 else 2064 *kevp = kn->kn_kevent; 2065 KQ_LOCK(kq); 2066 KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal); 2067 if (kn->kn_flags & (EV_CLEAR | EV_DISPATCH)) { 2068 /* 2069 * Manually clear knotes who weren't 2070 * 'touch'ed. 2071 */ 2072 if (touch == 0 && kn->kn_flags & EV_CLEAR) { 2073 kn->kn_data = 0; 2074 kn->kn_fflags = 0; 2075 } 2076 if (kn->kn_flags & EV_DISPATCH) 2077 kn->kn_status |= KN_DISABLED; 2078 kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE); 2079 kq->kq_count--; 2080 } else 2081 TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe); 2082 2083 kn->kn_status &= ~KN_SCAN; 2084 kn_leave_flux(kn); 2085 kn_list_unlock(knl); 2086 influx = 1; 2087 } 2088 2089 /* we are returning a copy to the user */ 2090 kevp++; 2091 nkev++; 2092 count--; 2093 2094 if (nkev == KQ_NEVENTS) { 2095 influx = 0; 2096 KQ_UNLOCK_FLUX(kq); 2097 error = k_ops->k_copyout(k_ops->arg, keva, nkev); 2098 nkev = 0; 2099 kevp = keva; 2100 KQ_LOCK(kq); 2101 if (error) 2102 break; 2103 } 2104 } 2105 TAILQ_REMOVE(&kq->kq_head, marker, kn_tqe); 2106 done: 2107 KQ_OWNED(kq); 2108 KQ_UNLOCK_FLUX(kq); 2109 knote_free(marker); 2110 done_nl: 2111 KQ_NOTOWNED(kq); 2112 if (nkev != 0) 2113 error = k_ops->k_copyout(k_ops->arg, keva, nkev); 2114 td->td_retval[0] = maxevents - count; 2115 return (error); 2116 } 2117 2118 /*ARGSUSED*/ 2119 static int 2120 kqueue_ioctl(struct file *fp, u_long cmd, void *data, 2121 struct ucred *active_cred, struct thread *td) 2122 { 2123 /* 2124 * Enabling sigio causes two major problems: 2125 * 1) infinite recursion: 2126 * Synopsys: kevent is being used to track signals and have FIOASYNC 2127 * set. On receipt of a signal this will cause a kqueue to recurse 2128 * into itself over and over. Sending the sigio causes the kqueue 2129 * to become ready, which in turn posts sigio again, forever. 2130 * Solution: this can be solved by setting a flag in the kqueue that 2131 * we have a SIGIO in progress. 2132 * 2) locking problems: 2133 * Synopsys: Kqueue is a leaf subsystem, but adding signalling puts 2134 * us above the proc and pgrp locks. 2135 * Solution: Post a signal using an async mechanism, being sure to 2136 * record a generation count in the delivery so that we do not deliver 2137 * a signal to the wrong process. 2138 * 2139 * Note, these two mechanisms are somewhat mutually exclusive! 2140 */ 2141 #if 0 2142 struct kqueue *kq; 2143 2144 kq = fp->f_data; 2145 switch (cmd) { 2146 case FIOASYNC: 2147 if (*(int *)data) { 2148 kq->kq_state |= KQ_ASYNC; 2149 } else { 2150 kq->kq_state &= ~KQ_ASYNC; 2151 } 2152 return (0); 2153 2154 case FIOSETOWN: 2155 return (fsetown(*(int *)data, &kq->kq_sigio)); 2156 2157 case FIOGETOWN: 2158 *(int *)data = fgetown(&kq->kq_sigio); 2159 return (0); 2160 } 2161 #endif 2162 2163 return (ENOTTY); 2164 } 2165 2166 /*ARGSUSED*/ 2167 static int 2168 kqueue_poll(struct file *fp, int events, struct ucred *active_cred, 2169 struct thread *td) 2170 { 2171 struct kqueue *kq; 2172 int revents = 0; 2173 int error; 2174 2175 if ((error = kqueue_acquire(fp, &kq))) 2176 return POLLERR; 2177 2178 KQ_LOCK(kq); 2179 if (events & (POLLIN | POLLRDNORM)) { 2180 if (kq->kq_count) { 2181 revents |= events & (POLLIN | POLLRDNORM); 2182 } else { 2183 selrecord(td, &kq->kq_sel); 2184 if (SEL_WAITING(&kq->kq_sel)) 2185 kq->kq_state |= KQ_SEL; 2186 } 2187 } 2188 kqueue_release(kq, 1); 2189 KQ_UNLOCK(kq); 2190 return (revents); 2191 } 2192 2193 /*ARGSUSED*/ 2194 static int 2195 kqueue_stat(struct file *fp, struct stat *st, struct ucred *active_cred) 2196 { 2197 2198 bzero((void *)st, sizeof *st); 2199 /* 2200 * We no longer return kq_count because the unlocked value is useless. 2201 * If you spent all this time getting the count, why not spend your 2202 * syscall better by calling kevent? 2203 * 2204 * XXX - This is needed for libc_r. 2205 */ 2206 st->st_mode = S_IFIFO; 2207 return (0); 2208 } 2209 2210 static void 2211 kqueue_drain(struct kqueue *kq, struct thread *td) 2212 { 2213 struct knote *kn; 2214 int i; 2215 2216 KQ_LOCK(kq); 2217 2218 KASSERT((kq->kq_state & KQ_CLOSING) != KQ_CLOSING, 2219 ("kqueue already closing")); 2220 kq->kq_state |= KQ_CLOSING; 2221 if (kq->kq_refcnt > 1) 2222 msleep(&kq->kq_refcnt, &kq->kq_lock, PSOCK, "kqclose", 0); 2223 2224 KASSERT(kq->kq_refcnt == 1, ("other refs are out there!")); 2225 2226 KASSERT(knlist_empty(&kq->kq_sel.si_note), 2227 ("kqueue's knlist not empty")); 2228 2229 for (i = 0; i < kq->kq_knlistsize; i++) { 2230 while ((kn = SLIST_FIRST(&kq->kq_knlist[i])) != NULL) { 2231 if (kn_in_flux(kn)) { 2232 kq->kq_state |= KQ_FLUXWAIT; 2233 msleep(kq, &kq->kq_lock, PSOCK, "kqclo1", 0); 2234 continue; 2235 } 2236 kn_enter_flux(kn); 2237 KQ_UNLOCK(kq); 2238 knote_drop(kn, td); 2239 KQ_LOCK(kq); 2240 } 2241 } 2242 if (kq->kq_knhashmask != 0) { 2243 for (i = 0; i <= kq->kq_knhashmask; i++) { 2244 while ((kn = SLIST_FIRST(&kq->kq_knhash[i])) != NULL) { 2245 if (kn_in_flux(kn)) { 2246 kq->kq_state |= KQ_FLUXWAIT; 2247 msleep(kq, &kq->kq_lock, PSOCK, 2248 "kqclo2", 0); 2249 continue; 2250 } 2251 kn_enter_flux(kn); 2252 KQ_UNLOCK(kq); 2253 knote_drop(kn, td); 2254 KQ_LOCK(kq); 2255 } 2256 } 2257 } 2258 2259 if ((kq->kq_state & KQ_TASKSCHED) == KQ_TASKSCHED) { 2260 kq->kq_state |= KQ_TASKDRAIN; 2261 msleep(&kq->kq_state, &kq->kq_lock, PSOCK, "kqtqdr", 0); 2262 } 2263 2264 if ((kq->kq_state & KQ_SEL) == KQ_SEL) { 2265 selwakeuppri(&kq->kq_sel, PSOCK); 2266 if (!SEL_WAITING(&kq->kq_sel)) 2267 kq->kq_state &= ~KQ_SEL; 2268 } 2269 2270 KQ_UNLOCK(kq); 2271 } 2272 2273 static void 2274 kqueue_destroy(struct kqueue *kq) 2275 { 2276 2277 KASSERT(kq->kq_fdp == NULL, 2278 ("kqueue still attached to a file descriptor")); 2279 seldrain(&kq->kq_sel); 2280 knlist_destroy(&kq->kq_sel.si_note); 2281 mtx_destroy(&kq->kq_lock); 2282 2283 if (kq->kq_knhash != NULL) 2284 free(kq->kq_knhash, M_KQUEUE); 2285 if (kq->kq_knlist != NULL) 2286 free(kq->kq_knlist, M_KQUEUE); 2287 2288 funsetown(&kq->kq_sigio); 2289 } 2290 2291 /*ARGSUSED*/ 2292 static int 2293 kqueue_close(struct file *fp, struct thread *td) 2294 { 2295 struct kqueue *kq = fp->f_data; 2296 struct filedesc *fdp; 2297 int error; 2298 int filedesc_unlock; 2299 2300 if ((error = kqueue_acquire(fp, &kq))) 2301 return error; 2302 kqueue_drain(kq, td); 2303 2304 /* 2305 * We could be called due to the knote_drop() doing fdrop(), 2306 * called from kqueue_register(). In this case the global 2307 * lock is owned, and filedesc sx is locked before, to not 2308 * take the sleepable lock after non-sleepable. 2309 */ 2310 fdp = kq->kq_fdp; 2311 kq->kq_fdp = NULL; 2312 if (!sx_xlocked(FILEDESC_LOCK(fdp))) { 2313 FILEDESC_XLOCK(fdp); 2314 filedesc_unlock = 1; 2315 } else 2316 filedesc_unlock = 0; 2317 TAILQ_REMOVE(&fdp->fd_kqlist, kq, kq_list); 2318 if (filedesc_unlock) 2319 FILEDESC_XUNLOCK(fdp); 2320 2321 kqueue_destroy(kq); 2322 chgkqcnt(kq->kq_cred->cr_ruidinfo, -1, 0); 2323 crfree(kq->kq_cred); 2324 free(kq, M_KQUEUE); 2325 fp->f_data = NULL; 2326 2327 return (0); 2328 } 2329 2330 static int 2331 kqueue_fill_kinfo(struct file *fp, struct kinfo_file *kif, struct filedesc *fdp) 2332 { 2333 struct kqueue *kq = fp->f_data; 2334 2335 kif->kf_type = KF_TYPE_KQUEUE; 2336 kif->kf_un.kf_kqueue.kf_kqueue_addr = (uintptr_t)kq; 2337 kif->kf_un.kf_kqueue.kf_kqueue_count = kq->kq_count; 2338 kif->kf_un.kf_kqueue.kf_kqueue_state = kq->kq_state; 2339 return (0); 2340 } 2341 2342 static void 2343 kqueue_wakeup(struct kqueue *kq) 2344 { 2345 KQ_OWNED(kq); 2346 2347 if ((kq->kq_state & KQ_SLEEP) == KQ_SLEEP) { 2348 kq->kq_state &= ~KQ_SLEEP; 2349 wakeup(kq); 2350 } 2351 if ((kq->kq_state & KQ_SEL) == KQ_SEL) { 2352 selwakeuppri(&kq->kq_sel, PSOCK); 2353 if (!SEL_WAITING(&kq->kq_sel)) 2354 kq->kq_state &= ~KQ_SEL; 2355 } 2356 if (!knlist_empty(&kq->kq_sel.si_note)) 2357 kqueue_schedtask(kq); 2358 if ((kq->kq_state & KQ_ASYNC) == KQ_ASYNC) { 2359 pgsigio(&kq->kq_sigio, SIGIO, 0); 2360 } 2361 } 2362 2363 /* 2364 * Walk down a list of knotes, activating them if their event has triggered. 2365 * 2366 * There is a possibility to optimize in the case of one kq watching another. 2367 * Instead of scheduling a task to wake it up, you could pass enough state 2368 * down the chain to make up the parent kqueue. Make this code functional 2369 * first. 2370 */ 2371 void 2372 knote(struct knlist *list, long hint, int lockflags) 2373 { 2374 struct kqueue *kq; 2375 struct knote *kn, *tkn; 2376 int error; 2377 2378 if (list == NULL) 2379 return; 2380 2381 KNL_ASSERT_LOCK(list, lockflags & KNF_LISTLOCKED); 2382 2383 if ((lockflags & KNF_LISTLOCKED) == 0) 2384 list->kl_lock(list->kl_lockarg); 2385 2386 /* 2387 * If we unlock the list lock (and enter influx), we can 2388 * eliminate the kqueue scheduling, but this will introduce 2389 * four lock/unlock's for each knote to test. Also, marker 2390 * would be needed to keep iteration position, since filters 2391 * or other threads could remove events. 2392 */ 2393 SLIST_FOREACH_SAFE(kn, &list->kl_list, kn_selnext, tkn) { 2394 kq = kn->kn_kq; 2395 KQ_LOCK(kq); 2396 if (kn_in_flux(kn) && (kn->kn_status & KN_SCAN) == 0) { 2397 /* 2398 * Do not process the influx notes, except for 2399 * the influx coming from the kq unlock in the 2400 * kqueue_scan(). In the later case, we do 2401 * not interfere with the scan, since the code 2402 * fragment in kqueue_scan() locks the knlist, 2403 * and cannot proceed until we finished. 2404 */ 2405 KQ_UNLOCK(kq); 2406 } else if ((lockflags & KNF_NOKQLOCK) != 0) { 2407 kn_enter_flux(kn); 2408 KQ_UNLOCK(kq); 2409 error = kn->kn_fop->f_event(kn, hint); 2410 KQ_LOCK(kq); 2411 kn_leave_flux(kn); 2412 if (error) 2413 KNOTE_ACTIVATE(kn, 1); 2414 KQ_UNLOCK_FLUX(kq); 2415 } else { 2416 if (kn->kn_fop->f_event(kn, hint)) 2417 KNOTE_ACTIVATE(kn, 1); 2418 KQ_UNLOCK(kq); 2419 } 2420 } 2421 if ((lockflags & KNF_LISTLOCKED) == 0) 2422 list->kl_unlock(list->kl_lockarg); 2423 } 2424 2425 /* 2426 * add a knote to a knlist 2427 */ 2428 void 2429 knlist_add(struct knlist *knl, struct knote *kn, int islocked) 2430 { 2431 2432 KNL_ASSERT_LOCK(knl, islocked); 2433 KQ_NOTOWNED(kn->kn_kq); 2434 KASSERT(kn_in_flux(kn), ("knote %p not in flux", kn)); 2435 KASSERT((kn->kn_status & KN_DETACHED) != 0, 2436 ("knote %p was not detached", kn)); 2437 if (!islocked) 2438 knl->kl_lock(knl->kl_lockarg); 2439 SLIST_INSERT_HEAD(&knl->kl_list, kn, kn_selnext); 2440 if (!islocked) 2441 knl->kl_unlock(knl->kl_lockarg); 2442 KQ_LOCK(kn->kn_kq); 2443 kn->kn_knlist = knl; 2444 kn->kn_status &= ~KN_DETACHED; 2445 KQ_UNLOCK(kn->kn_kq); 2446 } 2447 2448 static void 2449 knlist_remove_kq(struct knlist *knl, struct knote *kn, int knlislocked, 2450 int kqislocked) 2451 { 2452 2453 KASSERT(!kqislocked || knlislocked, ("kq locked w/o knl locked")); 2454 KNL_ASSERT_LOCK(knl, knlislocked); 2455 mtx_assert(&kn->kn_kq->kq_lock, kqislocked ? MA_OWNED : MA_NOTOWNED); 2456 KASSERT(kqislocked || kn_in_flux(kn), ("knote %p not in flux", kn)); 2457 KASSERT((kn->kn_status & KN_DETACHED) == 0, 2458 ("knote %p was already detached", kn)); 2459 if (!knlislocked) 2460 knl->kl_lock(knl->kl_lockarg); 2461 SLIST_REMOVE(&knl->kl_list, kn, knote, kn_selnext); 2462 kn->kn_knlist = NULL; 2463 if (!knlislocked) 2464 kn_list_unlock(knl); 2465 if (!kqislocked) 2466 KQ_LOCK(kn->kn_kq); 2467 kn->kn_status |= KN_DETACHED; 2468 if (!kqislocked) 2469 KQ_UNLOCK(kn->kn_kq); 2470 } 2471 2472 /* 2473 * remove knote from the specified knlist 2474 */ 2475 void 2476 knlist_remove(struct knlist *knl, struct knote *kn, int islocked) 2477 { 2478 2479 knlist_remove_kq(knl, kn, islocked, 0); 2480 } 2481 2482 int 2483 knlist_empty(struct knlist *knl) 2484 { 2485 2486 KNL_ASSERT_LOCKED(knl); 2487 return (SLIST_EMPTY(&knl->kl_list)); 2488 } 2489 2490 static struct mtx knlist_lock; 2491 MTX_SYSINIT(knlist_lock, &knlist_lock, "knlist lock for lockless objects", 2492 MTX_DEF); 2493 static void knlist_mtx_lock(void *arg); 2494 static void knlist_mtx_unlock(void *arg); 2495 2496 static void 2497 knlist_mtx_lock(void *arg) 2498 { 2499 2500 mtx_lock((struct mtx *)arg); 2501 } 2502 2503 static void 2504 knlist_mtx_unlock(void *arg) 2505 { 2506 2507 mtx_unlock((struct mtx *)arg); 2508 } 2509 2510 static void 2511 knlist_mtx_assert_lock(void *arg, int what) 2512 { 2513 2514 if (what == LA_LOCKED) 2515 mtx_assert((struct mtx *)arg, MA_OWNED); 2516 else 2517 mtx_assert((struct mtx *)arg, MA_NOTOWNED); 2518 } 2519 2520 void 2521 knlist_init(struct knlist *knl, void *lock, void (*kl_lock)(void *), 2522 void (*kl_unlock)(void *), 2523 void (*kl_assert_lock)(void *, int)) 2524 { 2525 2526 if (lock == NULL) 2527 knl->kl_lockarg = &knlist_lock; 2528 else 2529 knl->kl_lockarg = lock; 2530 2531 if (kl_lock == NULL) 2532 knl->kl_lock = knlist_mtx_lock; 2533 else 2534 knl->kl_lock = kl_lock; 2535 if (kl_unlock == NULL) 2536 knl->kl_unlock = knlist_mtx_unlock; 2537 else 2538 knl->kl_unlock = kl_unlock; 2539 if (kl_assert_lock == NULL) 2540 knl->kl_assert_lock = knlist_mtx_assert_lock; 2541 else 2542 knl->kl_assert_lock = kl_assert_lock; 2543 2544 knl->kl_autodestroy = 0; 2545 SLIST_INIT(&knl->kl_list); 2546 } 2547 2548 void 2549 knlist_init_mtx(struct knlist *knl, struct mtx *lock) 2550 { 2551 2552 knlist_init(knl, lock, NULL, NULL, NULL); 2553 } 2554 2555 struct knlist * 2556 knlist_alloc(struct mtx *lock) 2557 { 2558 struct knlist *knl; 2559 2560 knl = malloc(sizeof(struct knlist), M_KQUEUE, M_WAITOK); 2561 knlist_init_mtx(knl, lock); 2562 return (knl); 2563 } 2564 2565 void 2566 knlist_destroy(struct knlist *knl) 2567 { 2568 2569 KASSERT(KNLIST_EMPTY(knl), 2570 ("destroying knlist %p with knotes on it", knl)); 2571 } 2572 2573 void 2574 knlist_detach(struct knlist *knl) 2575 { 2576 2577 KNL_ASSERT_LOCKED(knl); 2578 knl->kl_autodestroy = 1; 2579 if (knlist_empty(knl)) { 2580 knlist_destroy(knl); 2581 free(knl, M_KQUEUE); 2582 } 2583 } 2584 2585 /* 2586 * Even if we are locked, we may need to drop the lock to allow any influx 2587 * knotes time to "settle". 2588 */ 2589 void 2590 knlist_cleardel(struct knlist *knl, struct thread *td, int islocked, int killkn) 2591 { 2592 struct knote *kn, *kn2; 2593 struct kqueue *kq; 2594 2595 KASSERT(!knl->kl_autodestroy, ("cleardel for autodestroy %p", knl)); 2596 if (islocked) 2597 KNL_ASSERT_LOCKED(knl); 2598 else { 2599 KNL_ASSERT_UNLOCKED(knl); 2600 again: /* need to reacquire lock since we have dropped it */ 2601 knl->kl_lock(knl->kl_lockarg); 2602 } 2603 2604 SLIST_FOREACH_SAFE(kn, &knl->kl_list, kn_selnext, kn2) { 2605 kq = kn->kn_kq; 2606 KQ_LOCK(kq); 2607 if (kn_in_flux(kn)) { 2608 KQ_UNLOCK(kq); 2609 continue; 2610 } 2611 knlist_remove_kq(knl, kn, 1, 1); 2612 if (killkn) { 2613 kn_enter_flux(kn); 2614 KQ_UNLOCK(kq); 2615 knote_drop_detached(kn, td); 2616 } else { 2617 /* Make sure cleared knotes disappear soon */ 2618 kn->kn_flags |= EV_EOF | EV_ONESHOT; 2619 KQ_UNLOCK(kq); 2620 } 2621 kq = NULL; 2622 } 2623 2624 if (!SLIST_EMPTY(&knl->kl_list)) { 2625 /* there are still in flux knotes remaining */ 2626 kn = SLIST_FIRST(&knl->kl_list); 2627 kq = kn->kn_kq; 2628 KQ_LOCK(kq); 2629 KASSERT(kn_in_flux(kn), ("knote removed w/o list lock")); 2630 knl->kl_unlock(knl->kl_lockarg); 2631 kq->kq_state |= KQ_FLUXWAIT; 2632 msleep(kq, &kq->kq_lock, PSOCK | PDROP, "kqkclr", 0); 2633 kq = NULL; 2634 goto again; 2635 } 2636 2637 if (islocked) 2638 KNL_ASSERT_LOCKED(knl); 2639 else { 2640 knl->kl_unlock(knl->kl_lockarg); 2641 KNL_ASSERT_UNLOCKED(knl); 2642 } 2643 } 2644 2645 /* 2646 * Remove all knotes referencing a specified fd must be called with FILEDESC 2647 * lock. This prevents a race where a new fd comes along and occupies the 2648 * entry and we attach a knote to the fd. 2649 */ 2650 void 2651 knote_fdclose(struct thread *td, int fd) 2652 { 2653 struct filedesc *fdp = td->td_proc->p_fd; 2654 struct kqueue *kq; 2655 struct knote *kn; 2656 int influx; 2657 2658 FILEDESC_XLOCK_ASSERT(fdp); 2659 2660 /* 2661 * We shouldn't have to worry about new kevents appearing on fd 2662 * since filedesc is locked. 2663 */ 2664 TAILQ_FOREACH(kq, &fdp->fd_kqlist, kq_list) { 2665 KQ_LOCK(kq); 2666 2667 again: 2668 influx = 0; 2669 while (kq->kq_knlistsize > fd && 2670 (kn = SLIST_FIRST(&kq->kq_knlist[fd])) != NULL) { 2671 if (kn_in_flux(kn)) { 2672 /* someone else might be waiting on our knote */ 2673 if (influx) 2674 wakeup(kq); 2675 kq->kq_state |= KQ_FLUXWAIT; 2676 msleep(kq, &kq->kq_lock, PSOCK, "kqflxwt", 0); 2677 goto again; 2678 } 2679 kn_enter_flux(kn); 2680 KQ_UNLOCK(kq); 2681 influx = 1; 2682 knote_drop(kn, td); 2683 KQ_LOCK(kq); 2684 } 2685 KQ_UNLOCK_FLUX(kq); 2686 } 2687 } 2688 2689 static int 2690 knote_attach(struct knote *kn, struct kqueue *kq) 2691 { 2692 struct klist *list; 2693 2694 KASSERT(kn_in_flux(kn), ("knote %p not marked influx", kn)); 2695 KQ_OWNED(kq); 2696 2697 if ((kq->kq_state & KQ_CLOSING) != 0) 2698 return (EBADF); 2699 if (kn->kn_fop->f_isfd) { 2700 if (kn->kn_id >= kq->kq_knlistsize) 2701 return (ENOMEM); 2702 list = &kq->kq_knlist[kn->kn_id]; 2703 } else { 2704 if (kq->kq_knhash == NULL) 2705 return (ENOMEM); 2706 list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)]; 2707 } 2708 SLIST_INSERT_HEAD(list, kn, kn_link); 2709 return (0); 2710 } 2711 2712 static void 2713 knote_drop(struct knote *kn, struct thread *td) 2714 { 2715 2716 if ((kn->kn_status & KN_DETACHED) == 0) 2717 kn->kn_fop->f_detach(kn); 2718 knote_drop_detached(kn, td); 2719 } 2720 2721 static void 2722 knote_drop_detached(struct knote *kn, struct thread *td) 2723 { 2724 struct kqueue *kq; 2725 struct klist *list; 2726 2727 kq = kn->kn_kq; 2728 2729 KASSERT((kn->kn_status & KN_DETACHED) != 0, 2730 ("knote %p still attached", kn)); 2731 KQ_NOTOWNED(kq); 2732 2733 KQ_LOCK(kq); 2734 KASSERT(kn->kn_influx == 1, 2735 ("knote_drop called on %p with influx %d", kn, kn->kn_influx)); 2736 2737 if (kn->kn_fop->f_isfd) 2738 list = &kq->kq_knlist[kn->kn_id]; 2739 else 2740 list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)]; 2741 2742 if (!SLIST_EMPTY(list)) 2743 SLIST_REMOVE(list, kn, knote, kn_link); 2744 if (kn->kn_status & KN_QUEUED) 2745 knote_dequeue(kn); 2746 KQ_UNLOCK_FLUX(kq); 2747 2748 if (kn->kn_fop->f_isfd) { 2749 fdrop(kn->kn_fp, td); 2750 kn->kn_fp = NULL; 2751 } 2752 kqueue_fo_release(kn->kn_kevent.filter); 2753 kn->kn_fop = NULL; 2754 knote_free(kn); 2755 } 2756 2757 static void 2758 knote_enqueue(struct knote *kn) 2759 { 2760 struct kqueue *kq = kn->kn_kq; 2761 2762 KQ_OWNED(kn->kn_kq); 2763 KASSERT((kn->kn_status & KN_QUEUED) == 0, ("knote already queued")); 2764 2765 TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe); 2766 kn->kn_status |= KN_QUEUED; 2767 kq->kq_count++; 2768 kqueue_wakeup(kq); 2769 } 2770 2771 static void 2772 knote_dequeue(struct knote *kn) 2773 { 2774 struct kqueue *kq = kn->kn_kq; 2775 2776 KQ_OWNED(kn->kn_kq); 2777 KASSERT(kn->kn_status & KN_QUEUED, ("knote not queued")); 2778 2779 TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe); 2780 kn->kn_status &= ~KN_QUEUED; 2781 kq->kq_count--; 2782 } 2783 2784 static void 2785 knote_init(void) 2786 { 2787 2788 knote_zone = uma_zcreate("KNOTE", sizeof(struct knote), NULL, NULL, 2789 NULL, NULL, UMA_ALIGN_PTR, 0); 2790 ast_register(TDA_KQUEUE, ASTR_ASTF_REQUIRED, 0, ast_kqueue); 2791 } 2792 SYSINIT(knote, SI_SUB_PSEUDO, SI_ORDER_ANY, knote_init, NULL); 2793 2794 static struct knote * 2795 knote_alloc(int mflag) 2796 { 2797 2798 return (uma_zalloc(knote_zone, mflag | M_ZERO)); 2799 } 2800 2801 static void 2802 knote_free(struct knote *kn) 2803 { 2804 2805 uma_zfree(knote_zone, kn); 2806 } 2807 2808 /* 2809 * Register the kev w/ the kq specified by fd. 2810 */ 2811 int 2812 kqfd_register(int fd, struct kevent *kev, struct thread *td, int mflag) 2813 { 2814 struct kqueue *kq; 2815 struct file *fp; 2816 cap_rights_t rights; 2817 int error; 2818 2819 error = fget(td, fd, cap_rights_init_one(&rights, CAP_KQUEUE_CHANGE), 2820 &fp); 2821 if (error != 0) 2822 return (error); 2823 if ((error = kqueue_acquire(fp, &kq)) != 0) 2824 goto noacquire; 2825 2826 error = kqueue_register(kq, kev, td, mflag); 2827 kqueue_release(kq, 0); 2828 2829 noacquire: 2830 fdrop(fp, td); 2831 return (error); 2832 } 2833