1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 1999,2000,2001 Jonathan Lemon <jlemon@FreeBSD.org> 5 * Copyright 2004 John-Mark Gurney <jmg@FreeBSD.org> 6 * Copyright (c) 2009 Apple, Inc. 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 */ 30 31 #include <sys/cdefs.h> 32 #include "opt_ktrace.h" 33 #include "opt_kqueue.h" 34 35 #ifdef COMPAT_FREEBSD11 36 #define _WANT_FREEBSD11_KEVENT 37 #endif 38 39 #include <sys/param.h> 40 #include <sys/systm.h> 41 #include <sys/capsicum.h> 42 #include <sys/kernel.h> 43 #include <sys/limits.h> 44 #include <sys/lock.h> 45 #include <sys/mutex.h> 46 #include <sys/proc.h> 47 #include <sys/malloc.h> 48 #include <sys/unistd.h> 49 #include <sys/file.h> 50 #include <sys/filedesc.h> 51 #include <sys/filio.h> 52 #include <sys/fcntl.h> 53 #include <sys/kthread.h> 54 #include <sys/selinfo.h> 55 #include <sys/queue.h> 56 #include <sys/event.h> 57 #include <sys/eventvar.h> 58 #include <sys/poll.h> 59 #include <sys/protosw.h> 60 #include <sys/resourcevar.h> 61 #include <sys/sigio.h> 62 #include <sys/signalvar.h> 63 #include <sys/socket.h> 64 #include <sys/socketvar.h> 65 #include <sys/stat.h> 66 #include <sys/sysctl.h> 67 #include <sys/sysproto.h> 68 #include <sys/syscallsubr.h> 69 #include <sys/taskqueue.h> 70 #include <sys/uio.h> 71 #include <sys/user.h> 72 #ifdef KTRACE 73 #include <sys/ktrace.h> 74 #endif 75 #include <machine/atomic.h> 76 77 #include <vm/uma.h> 78 79 static MALLOC_DEFINE(M_KQUEUE, "kqueue", "memory for kqueue system"); 80 81 /* 82 * This lock is used if multiple kq locks are required. This possibly 83 * should be made into a per proc lock. 84 */ 85 static struct mtx kq_global; 86 MTX_SYSINIT(kq_global, &kq_global, "kqueue order", MTX_DEF); 87 #define KQ_GLOBAL_LOCK(lck, haslck) do { \ 88 if (!haslck) \ 89 mtx_lock(lck); \ 90 haslck = 1; \ 91 } while (0) 92 #define KQ_GLOBAL_UNLOCK(lck, haslck) do { \ 93 if (haslck) \ 94 mtx_unlock(lck); \ 95 haslck = 0; \ 96 } while (0) 97 98 TASKQUEUE_DEFINE_THREAD(kqueue_ctx); 99 100 static int kevent_copyout(void *arg, struct kevent *kevp, int count); 101 static int kevent_copyin(void *arg, struct kevent *kevp, int count); 102 static int kqueue_register(struct kqueue *kq, struct kevent *kev, 103 struct thread *td, int mflag); 104 static int kqueue_acquire(struct file *fp, struct kqueue **kqp); 105 static void kqueue_release(struct kqueue *kq, int locked); 106 static void kqueue_destroy(struct kqueue *kq); 107 static void kqueue_drain(struct kqueue *kq, struct thread *td); 108 static int kqueue_expand(struct kqueue *kq, const struct filterops *fops, 109 uintptr_t ident, int mflag); 110 static void kqueue_task(void *arg, int pending); 111 static int kqueue_scan(struct kqueue *kq, int maxevents, 112 struct kevent_copyops *k_ops, 113 const struct timespec *timeout, 114 struct kevent *keva, struct thread *td); 115 static void kqueue_wakeup(struct kqueue *kq); 116 static const struct filterops *kqueue_fo_find(int filt); 117 static void kqueue_fo_release(int filt); 118 struct g_kevent_args; 119 static int kern_kevent_generic(struct thread *td, 120 struct g_kevent_args *uap, 121 struct kevent_copyops *k_ops, const char *struct_name); 122 123 static fo_ioctl_t kqueue_ioctl; 124 static fo_poll_t kqueue_poll; 125 static fo_kqfilter_t kqueue_kqfilter; 126 static fo_stat_t kqueue_stat; 127 static fo_close_t kqueue_close; 128 static fo_fill_kinfo_t kqueue_fill_kinfo; 129 130 static struct fileops kqueueops = { 131 .fo_read = invfo_rdwr, 132 .fo_write = invfo_rdwr, 133 .fo_truncate = invfo_truncate, 134 .fo_ioctl = kqueue_ioctl, 135 .fo_poll = kqueue_poll, 136 .fo_kqfilter = kqueue_kqfilter, 137 .fo_stat = kqueue_stat, 138 .fo_close = kqueue_close, 139 .fo_chmod = invfo_chmod, 140 .fo_chown = invfo_chown, 141 .fo_sendfile = invfo_sendfile, 142 .fo_fill_kinfo = kqueue_fill_kinfo, 143 }; 144 145 static int knote_attach(struct knote *kn, struct kqueue *kq); 146 static void knote_drop(struct knote *kn, struct thread *td); 147 static void knote_drop_detached(struct knote *kn, struct thread *td); 148 static void knote_enqueue(struct knote *kn); 149 static void knote_dequeue(struct knote *kn); 150 static void knote_init(void); 151 static struct knote *knote_alloc(int mflag); 152 static void knote_free(struct knote *kn); 153 154 static void filt_kqdetach(struct knote *kn); 155 static int filt_kqueue(struct knote *kn, long hint); 156 static int filt_procattach(struct knote *kn); 157 static void filt_procdetach(struct knote *kn); 158 static int filt_proc(struct knote *kn, long hint); 159 static int filt_fileattach(struct knote *kn); 160 static void filt_timerexpire(void *knx); 161 static void filt_timerexpire_l(struct knote *kn, bool proc_locked); 162 static int filt_timerattach(struct knote *kn); 163 static void filt_timerdetach(struct knote *kn); 164 static void filt_timerstart(struct knote *kn, sbintime_t to); 165 static void filt_timertouch(struct knote *kn, struct kevent *kev, 166 u_long type); 167 static int filt_timervalidate(struct knote *kn, sbintime_t *to); 168 static int filt_timer(struct knote *kn, long hint); 169 static int filt_userattach(struct knote *kn); 170 static void filt_userdetach(struct knote *kn); 171 static int filt_user(struct knote *kn, long hint); 172 static void filt_usertouch(struct knote *kn, struct kevent *kev, 173 u_long type); 174 175 static struct filterops file_filtops = { 176 .f_isfd = 1, 177 .f_attach = filt_fileattach, 178 }; 179 static struct filterops kqread_filtops = { 180 .f_isfd = 1, 181 .f_detach = filt_kqdetach, 182 .f_event = filt_kqueue, 183 }; 184 /* XXX - move to kern_proc.c? */ 185 static struct filterops proc_filtops = { 186 .f_isfd = 0, 187 .f_attach = filt_procattach, 188 .f_detach = filt_procdetach, 189 .f_event = filt_proc, 190 }; 191 static struct filterops timer_filtops = { 192 .f_isfd = 0, 193 .f_attach = filt_timerattach, 194 .f_detach = filt_timerdetach, 195 .f_event = filt_timer, 196 .f_touch = filt_timertouch, 197 }; 198 static struct filterops user_filtops = { 199 .f_attach = filt_userattach, 200 .f_detach = filt_userdetach, 201 .f_event = filt_user, 202 .f_touch = filt_usertouch, 203 }; 204 205 static uma_zone_t knote_zone; 206 static unsigned int __exclusive_cache_line kq_ncallouts; 207 static unsigned int kq_calloutmax = 4 * 1024; 208 SYSCTL_UINT(_kern, OID_AUTO, kq_calloutmax, CTLFLAG_RW, 209 &kq_calloutmax, 0, "Maximum number of callouts allocated for kqueue"); 210 211 /* XXX - ensure not influx ? */ 212 #define KNOTE_ACTIVATE(kn, islock) do { \ 213 if ((islock)) \ 214 mtx_assert(&(kn)->kn_kq->kq_lock, MA_OWNED); \ 215 else \ 216 KQ_LOCK((kn)->kn_kq); \ 217 (kn)->kn_status |= KN_ACTIVE; \ 218 if (((kn)->kn_status & (KN_QUEUED | KN_DISABLED)) == 0) \ 219 knote_enqueue((kn)); \ 220 if (!(islock)) \ 221 KQ_UNLOCK((kn)->kn_kq); \ 222 } while (0) 223 #define KQ_LOCK(kq) do { \ 224 mtx_lock(&(kq)->kq_lock); \ 225 } while (0) 226 #define KQ_FLUX_WAKEUP(kq) do { \ 227 if (((kq)->kq_state & KQ_FLUXWAIT) == KQ_FLUXWAIT) { \ 228 (kq)->kq_state &= ~KQ_FLUXWAIT; \ 229 wakeup((kq)); \ 230 } \ 231 } while (0) 232 #define KQ_UNLOCK_FLUX(kq) do { \ 233 KQ_FLUX_WAKEUP(kq); \ 234 mtx_unlock(&(kq)->kq_lock); \ 235 } while (0) 236 #define KQ_UNLOCK(kq) do { \ 237 mtx_unlock(&(kq)->kq_lock); \ 238 } while (0) 239 #define KQ_OWNED(kq) do { \ 240 mtx_assert(&(kq)->kq_lock, MA_OWNED); \ 241 } while (0) 242 #define KQ_NOTOWNED(kq) do { \ 243 mtx_assert(&(kq)->kq_lock, MA_NOTOWNED); \ 244 } while (0) 245 246 static struct knlist * 247 kn_list_lock(struct knote *kn) 248 { 249 struct knlist *knl; 250 251 knl = kn->kn_knlist; 252 if (knl != NULL) 253 knl->kl_lock(knl->kl_lockarg); 254 return (knl); 255 } 256 257 static void 258 kn_list_unlock(struct knlist *knl) 259 { 260 bool do_free; 261 262 if (knl == NULL) 263 return; 264 do_free = knl->kl_autodestroy && knlist_empty(knl); 265 knl->kl_unlock(knl->kl_lockarg); 266 if (do_free) { 267 knlist_destroy(knl); 268 free(knl, M_KQUEUE); 269 } 270 } 271 272 static bool 273 kn_in_flux(struct knote *kn) 274 { 275 276 return (kn->kn_influx > 0); 277 } 278 279 static void 280 kn_enter_flux(struct knote *kn) 281 { 282 283 KQ_OWNED(kn->kn_kq); 284 MPASS(kn->kn_influx < INT_MAX); 285 kn->kn_influx++; 286 } 287 288 static bool 289 kn_leave_flux(struct knote *kn) 290 { 291 292 KQ_OWNED(kn->kn_kq); 293 MPASS(kn->kn_influx > 0); 294 kn->kn_influx--; 295 return (kn->kn_influx == 0); 296 } 297 298 #define KNL_ASSERT_LOCK(knl, islocked) do { \ 299 if (islocked) \ 300 KNL_ASSERT_LOCKED(knl); \ 301 else \ 302 KNL_ASSERT_UNLOCKED(knl); \ 303 } while (0) 304 #ifdef INVARIANTS 305 #define KNL_ASSERT_LOCKED(knl) do { \ 306 knl->kl_assert_lock((knl)->kl_lockarg, LA_LOCKED); \ 307 } while (0) 308 #define KNL_ASSERT_UNLOCKED(knl) do { \ 309 knl->kl_assert_lock((knl)->kl_lockarg, LA_UNLOCKED); \ 310 } while (0) 311 #else /* !INVARIANTS */ 312 #define KNL_ASSERT_LOCKED(knl) do {} while (0) 313 #define KNL_ASSERT_UNLOCKED(knl) do {} while (0) 314 #endif /* INVARIANTS */ 315 316 #ifndef KN_HASHSIZE 317 #define KN_HASHSIZE 64 /* XXX should be tunable */ 318 #endif 319 320 #define KN_HASH(val, mask) (((val) ^ (val >> 8)) & (mask)) 321 322 static int 323 filt_nullattach(struct knote *kn) 324 { 325 326 return (ENXIO); 327 }; 328 329 struct filterops null_filtops = { 330 .f_isfd = 0, 331 .f_attach = filt_nullattach, 332 }; 333 334 /* XXX - make SYSINIT to add these, and move into respective modules. */ 335 extern struct filterops sig_filtops; 336 extern struct filterops fs_filtops; 337 338 /* 339 * Table for all system-defined filters. 340 */ 341 static struct mtx filterops_lock; 342 MTX_SYSINIT(kqueue_filterops, &filterops_lock, "protect sysfilt_ops", MTX_DEF); 343 static struct { 344 const struct filterops *for_fop; 345 int for_nolock; 346 int for_refcnt; 347 } sysfilt_ops[EVFILT_SYSCOUNT] = { 348 { &file_filtops, 1 }, /* EVFILT_READ */ 349 { &file_filtops, 1 }, /* EVFILT_WRITE */ 350 { &null_filtops }, /* EVFILT_AIO */ 351 { &file_filtops, 1 }, /* EVFILT_VNODE */ 352 { &proc_filtops, 1 }, /* EVFILT_PROC */ 353 { &sig_filtops, 1 }, /* EVFILT_SIGNAL */ 354 { &timer_filtops, 1 }, /* EVFILT_TIMER */ 355 { &file_filtops, 1 }, /* EVFILT_PROCDESC */ 356 { &fs_filtops, 1 }, /* EVFILT_FS */ 357 { &null_filtops }, /* EVFILT_LIO */ 358 { &user_filtops, 1 }, /* EVFILT_USER */ 359 { &null_filtops }, /* EVFILT_SENDFILE */ 360 { &file_filtops, 1 }, /* EVFILT_EMPTY */ 361 }; 362 363 /* 364 * Simple redirection for all cdevsw style objects to call their fo_kqfilter 365 * method. 366 */ 367 static int 368 filt_fileattach(struct knote *kn) 369 { 370 371 return (fo_kqfilter(kn->kn_fp, kn)); 372 } 373 374 /*ARGSUSED*/ 375 static int 376 kqueue_kqfilter(struct file *fp, struct knote *kn) 377 { 378 struct kqueue *kq = kn->kn_fp->f_data; 379 380 if (kn->kn_filter != EVFILT_READ) 381 return (EINVAL); 382 383 kn->kn_status |= KN_KQUEUE; 384 kn->kn_fop = &kqread_filtops; 385 knlist_add(&kq->kq_sel.si_note, kn, 0); 386 387 return (0); 388 } 389 390 static void 391 filt_kqdetach(struct knote *kn) 392 { 393 struct kqueue *kq = kn->kn_fp->f_data; 394 395 knlist_remove(&kq->kq_sel.si_note, kn, 0); 396 } 397 398 /*ARGSUSED*/ 399 static int 400 filt_kqueue(struct knote *kn, long hint) 401 { 402 struct kqueue *kq = kn->kn_fp->f_data; 403 404 kn->kn_data = kq->kq_count; 405 return (kn->kn_data > 0); 406 } 407 408 /* XXX - move to kern_proc.c? */ 409 static int 410 filt_procattach(struct knote *kn) 411 { 412 struct proc *p; 413 int error; 414 bool exiting, immediate; 415 416 exiting = immediate = false; 417 if (kn->kn_sfflags & NOTE_EXIT) 418 p = pfind_any(kn->kn_id); 419 else 420 p = pfind(kn->kn_id); 421 if (p == NULL) 422 return (ESRCH); 423 if (p->p_flag & P_WEXIT) 424 exiting = true; 425 426 if ((error = p_cansee(curthread, p))) { 427 PROC_UNLOCK(p); 428 return (error); 429 } 430 431 kn->kn_ptr.p_proc = p; 432 kn->kn_flags |= EV_CLEAR; /* automatically set */ 433 434 /* 435 * Internal flag indicating registration done by kernel for the 436 * purposes of getting a NOTE_CHILD notification. 437 */ 438 if (kn->kn_flags & EV_FLAG2) { 439 kn->kn_flags &= ~EV_FLAG2; 440 kn->kn_data = kn->kn_sdata; /* ppid */ 441 kn->kn_fflags = NOTE_CHILD; 442 kn->kn_sfflags &= ~(NOTE_EXIT | NOTE_EXEC | NOTE_FORK); 443 immediate = true; /* Force immediate activation of child note. */ 444 } 445 /* 446 * Internal flag indicating registration done by kernel (for other than 447 * NOTE_CHILD). 448 */ 449 if (kn->kn_flags & EV_FLAG1) { 450 kn->kn_flags &= ~EV_FLAG1; 451 } 452 453 knlist_add(p->p_klist, kn, 1); 454 455 /* 456 * Immediately activate any child notes or, in the case of a zombie 457 * target process, exit notes. The latter is necessary to handle the 458 * case where the target process, e.g. a child, dies before the kevent 459 * is registered. 460 */ 461 if (immediate || (exiting && filt_proc(kn, NOTE_EXIT))) 462 KNOTE_ACTIVATE(kn, 0); 463 464 PROC_UNLOCK(p); 465 466 return (0); 467 } 468 469 /* 470 * The knote may be attached to a different process, which may exit, 471 * leaving nothing for the knote to be attached to. So when the process 472 * exits, the knote is marked as DETACHED and also flagged as ONESHOT so 473 * it will be deleted when read out. However, as part of the knote deletion, 474 * this routine is called, so a check is needed to avoid actually performing 475 * a detach, because the original process does not exist any more. 476 */ 477 /* XXX - move to kern_proc.c? */ 478 static void 479 filt_procdetach(struct knote *kn) 480 { 481 482 knlist_remove(kn->kn_knlist, kn, 0); 483 kn->kn_ptr.p_proc = NULL; 484 } 485 486 /* XXX - move to kern_proc.c? */ 487 static int 488 filt_proc(struct knote *kn, long hint) 489 { 490 struct proc *p; 491 u_int event; 492 493 p = kn->kn_ptr.p_proc; 494 if (p == NULL) /* already activated, from attach filter */ 495 return (0); 496 497 /* Mask off extra data. */ 498 event = (u_int)hint & NOTE_PCTRLMASK; 499 500 /* If the user is interested in this event, record it. */ 501 if (kn->kn_sfflags & event) 502 kn->kn_fflags |= event; 503 504 /* Process is gone, so flag the event as finished. */ 505 if (event == NOTE_EXIT) { 506 kn->kn_flags |= EV_EOF | EV_ONESHOT; 507 kn->kn_ptr.p_proc = NULL; 508 if (kn->kn_fflags & NOTE_EXIT) 509 kn->kn_data = KW_EXITCODE(p->p_xexit, p->p_xsig); 510 if (kn->kn_fflags == 0) 511 kn->kn_flags |= EV_DROP; 512 return (1); 513 } 514 515 return (kn->kn_fflags != 0); 516 } 517 518 /* 519 * Called when the process forked. It mostly does the same as the 520 * knote(), activating all knotes registered to be activated when the 521 * process forked. Additionally, for each knote attached to the 522 * parent, check whether user wants to track the new process. If so 523 * attach a new knote to it, and immediately report an event with the 524 * child's pid. 525 */ 526 void 527 knote_fork(struct knlist *list, int pid) 528 { 529 struct kqueue *kq; 530 struct knote *kn; 531 struct kevent kev; 532 int error; 533 534 MPASS(list != NULL); 535 KNL_ASSERT_LOCKED(list); 536 if (SLIST_EMPTY(&list->kl_list)) 537 return; 538 539 memset(&kev, 0, sizeof(kev)); 540 SLIST_FOREACH(kn, &list->kl_list, kn_selnext) { 541 kq = kn->kn_kq; 542 KQ_LOCK(kq); 543 if (kn_in_flux(kn) && (kn->kn_status & KN_SCAN) == 0) { 544 KQ_UNLOCK(kq); 545 continue; 546 } 547 548 /* 549 * The same as knote(), activate the event. 550 */ 551 if ((kn->kn_sfflags & NOTE_TRACK) == 0) { 552 if (kn->kn_fop->f_event(kn, NOTE_FORK)) 553 KNOTE_ACTIVATE(kn, 1); 554 KQ_UNLOCK(kq); 555 continue; 556 } 557 558 /* 559 * The NOTE_TRACK case. In addition to the activation 560 * of the event, we need to register new events to 561 * track the child. Drop the locks in preparation for 562 * the call to kqueue_register(). 563 */ 564 kn_enter_flux(kn); 565 KQ_UNLOCK(kq); 566 list->kl_unlock(list->kl_lockarg); 567 568 /* 569 * Activate existing knote and register tracking knotes with 570 * new process. 571 * 572 * First register a knote to get just the child notice. This 573 * must be a separate note from a potential NOTE_EXIT 574 * notification since both NOTE_CHILD and NOTE_EXIT are defined 575 * to use the data field (in conflicting ways). 576 */ 577 kev.ident = pid; 578 kev.filter = kn->kn_filter; 579 kev.flags = kn->kn_flags | EV_ADD | EV_ENABLE | EV_ONESHOT | 580 EV_FLAG2; 581 kev.fflags = kn->kn_sfflags; 582 kev.data = kn->kn_id; /* parent */ 583 kev.udata = kn->kn_kevent.udata;/* preserve udata */ 584 error = kqueue_register(kq, &kev, NULL, M_NOWAIT); 585 if (error) 586 kn->kn_fflags |= NOTE_TRACKERR; 587 588 /* 589 * Then register another knote to track other potential events 590 * from the new process. 591 */ 592 kev.ident = pid; 593 kev.filter = kn->kn_filter; 594 kev.flags = kn->kn_flags | EV_ADD | EV_ENABLE | EV_FLAG1; 595 kev.fflags = kn->kn_sfflags; 596 kev.data = kn->kn_id; /* parent */ 597 kev.udata = kn->kn_kevent.udata;/* preserve udata */ 598 error = kqueue_register(kq, &kev, NULL, M_NOWAIT); 599 if (error) 600 kn->kn_fflags |= NOTE_TRACKERR; 601 if (kn->kn_fop->f_event(kn, NOTE_FORK)) 602 KNOTE_ACTIVATE(kn, 0); 603 list->kl_lock(list->kl_lockarg); 604 KQ_LOCK(kq); 605 kn_leave_flux(kn); 606 KQ_UNLOCK_FLUX(kq); 607 } 608 } 609 610 /* 611 * XXX: EVFILT_TIMER should perhaps live in kern_time.c beside the 612 * interval timer support code. 613 */ 614 615 #define NOTE_TIMER_PRECMASK \ 616 (NOTE_SECONDS | NOTE_MSECONDS | NOTE_USECONDS | NOTE_NSECONDS) 617 618 static sbintime_t 619 timer2sbintime(int64_t data, int flags) 620 { 621 int64_t secs; 622 623 /* 624 * Macros for converting to the fractional second portion of an 625 * sbintime_t using 64bit multiplication to improve precision. 626 */ 627 #define NS_TO_SBT(ns) (((ns) * (((uint64_t)1 << 63) / 500000000)) >> 32) 628 #define US_TO_SBT(us) (((us) * (((uint64_t)1 << 63) / 500000)) >> 32) 629 #define MS_TO_SBT(ms) (((ms) * (((uint64_t)1 << 63) / 500)) >> 32) 630 switch (flags & NOTE_TIMER_PRECMASK) { 631 case NOTE_SECONDS: 632 #ifdef __LP64__ 633 if (data > (SBT_MAX / SBT_1S)) 634 return (SBT_MAX); 635 #endif 636 return ((sbintime_t)data << 32); 637 case NOTE_MSECONDS: /* FALLTHROUGH */ 638 case 0: 639 if (data >= 1000) { 640 secs = data / 1000; 641 #ifdef __LP64__ 642 if (secs > (SBT_MAX / SBT_1S)) 643 return (SBT_MAX); 644 #endif 645 return (secs << 32 | MS_TO_SBT(data % 1000)); 646 } 647 return (MS_TO_SBT(data)); 648 case NOTE_USECONDS: 649 if (data >= 1000000) { 650 secs = data / 1000000; 651 #ifdef __LP64__ 652 if (secs > (SBT_MAX / SBT_1S)) 653 return (SBT_MAX); 654 #endif 655 return (secs << 32 | US_TO_SBT(data % 1000000)); 656 } 657 return (US_TO_SBT(data)); 658 case NOTE_NSECONDS: 659 if (data >= 1000000000) { 660 secs = data / 1000000000; 661 #ifdef __LP64__ 662 if (secs > (SBT_MAX / SBT_1S)) 663 return (SBT_MAX); 664 #endif 665 return (secs << 32 | NS_TO_SBT(data % 1000000000)); 666 } 667 return (NS_TO_SBT(data)); 668 default: 669 break; 670 } 671 return (-1); 672 } 673 674 struct kq_timer_cb_data { 675 struct callout c; 676 struct proc *p; 677 struct knote *kn; 678 int cpuid; 679 int flags; 680 TAILQ_ENTRY(kq_timer_cb_data) link; 681 sbintime_t next; /* next timer event fires at */ 682 sbintime_t to; /* precalculated timer period, 0 for abs */ 683 }; 684 685 #define KQ_TIMER_CB_ENQUEUED 0x01 686 687 static void 688 kqtimer_sched_callout(struct kq_timer_cb_data *kc) 689 { 690 callout_reset_sbt_on(&kc->c, kc->next, 0, filt_timerexpire, kc->kn, 691 kc->cpuid, C_ABSOLUTE); 692 } 693 694 void 695 kqtimer_proc_continue(struct proc *p) 696 { 697 struct kq_timer_cb_data *kc, *kc1; 698 struct bintime bt; 699 sbintime_t now; 700 701 PROC_LOCK_ASSERT(p, MA_OWNED); 702 703 getboottimebin(&bt); 704 now = bttosbt(bt); 705 706 TAILQ_FOREACH_SAFE(kc, &p->p_kqtim_stop, link, kc1) { 707 TAILQ_REMOVE(&p->p_kqtim_stop, kc, link); 708 kc->flags &= ~KQ_TIMER_CB_ENQUEUED; 709 if (kc->next <= now) 710 filt_timerexpire_l(kc->kn, true); 711 else 712 kqtimer_sched_callout(kc); 713 } 714 } 715 716 static void 717 filt_timerexpire_l(struct knote *kn, bool proc_locked) 718 { 719 struct kq_timer_cb_data *kc; 720 struct proc *p; 721 uint64_t delta; 722 sbintime_t now; 723 724 kc = kn->kn_ptr.p_v; 725 726 if ((kn->kn_flags & EV_ONESHOT) != 0 || kc->to == 0) { 727 kn->kn_data++; 728 KNOTE_ACTIVATE(kn, 0); 729 return; 730 } 731 732 now = sbinuptime(); 733 if (now >= kc->next) { 734 delta = (now - kc->next) / kc->to; 735 if (delta == 0) 736 delta = 1; 737 kn->kn_data += delta; 738 kc->next += delta * kc->to; 739 if (now >= kc->next) /* overflow */ 740 kc->next = now + kc->to; 741 KNOTE_ACTIVATE(kn, 0); /* XXX - handle locking */ 742 } 743 744 /* 745 * Initial check for stopped kc->p is racy. It is fine to 746 * miss the set of the stop flags, at worst we would schedule 747 * one more callout. On the other hand, it is not fine to not 748 * schedule when we we missed clearing of the flags, we 749 * recheck them under the lock and observe consistent state. 750 */ 751 p = kc->p; 752 if (P_SHOULDSTOP(p) || P_KILLED(p)) { 753 if (!proc_locked) 754 PROC_LOCK(p); 755 if (P_SHOULDSTOP(p) || P_KILLED(p)) { 756 if ((kc->flags & KQ_TIMER_CB_ENQUEUED) == 0) { 757 kc->flags |= KQ_TIMER_CB_ENQUEUED; 758 TAILQ_INSERT_TAIL(&p->p_kqtim_stop, kc, link); 759 } 760 if (!proc_locked) 761 PROC_UNLOCK(p); 762 return; 763 } 764 if (!proc_locked) 765 PROC_UNLOCK(p); 766 } 767 kqtimer_sched_callout(kc); 768 } 769 770 static void 771 filt_timerexpire(void *knx) 772 { 773 filt_timerexpire_l(knx, false); 774 } 775 776 /* 777 * data contains amount of time to sleep 778 */ 779 static int 780 filt_timervalidate(struct knote *kn, sbintime_t *to) 781 { 782 struct bintime bt; 783 sbintime_t sbt; 784 785 if (kn->kn_sdata < 0) 786 return (EINVAL); 787 if (kn->kn_sdata == 0 && (kn->kn_flags & EV_ONESHOT) == 0) 788 kn->kn_sdata = 1; 789 /* 790 * The only fflags values supported are the timer unit 791 * (precision) and the absolute time indicator. 792 */ 793 if ((kn->kn_sfflags & ~(NOTE_TIMER_PRECMASK | NOTE_ABSTIME)) != 0) 794 return (EINVAL); 795 796 *to = timer2sbintime(kn->kn_sdata, kn->kn_sfflags); 797 if (*to < 0) 798 return (EINVAL); 799 if ((kn->kn_sfflags & NOTE_ABSTIME) != 0) { 800 getboottimebin(&bt); 801 sbt = bttosbt(bt); 802 *to = MAX(0, *to - sbt); 803 } 804 return (0); 805 } 806 807 static int 808 filt_timerattach(struct knote *kn) 809 { 810 struct kq_timer_cb_data *kc; 811 sbintime_t to; 812 int error; 813 814 to = -1; 815 error = filt_timervalidate(kn, &to); 816 if (error != 0) 817 return (error); 818 KASSERT(to > 0 || (kn->kn_flags & EV_ONESHOT) != 0 || 819 (kn->kn_sfflags & NOTE_ABSTIME) != 0, 820 ("%s: periodic timer has a calculated zero timeout", __func__)); 821 KASSERT(to >= 0, 822 ("%s: timer has a calculated negative timeout", __func__)); 823 824 if (atomic_fetchadd_int(&kq_ncallouts, 1) + 1 > kq_calloutmax) { 825 atomic_subtract_int(&kq_ncallouts, 1); 826 return (ENOMEM); 827 } 828 829 if ((kn->kn_sfflags & NOTE_ABSTIME) == 0) 830 kn->kn_flags |= EV_CLEAR; /* automatically set */ 831 kn->kn_status &= ~KN_DETACHED; /* knlist_add clears it */ 832 kn->kn_ptr.p_v = kc = malloc(sizeof(*kc), M_KQUEUE, M_WAITOK); 833 kc->kn = kn; 834 kc->p = curproc; 835 kc->cpuid = PCPU_GET(cpuid); 836 kc->flags = 0; 837 callout_init(&kc->c, 1); 838 filt_timerstart(kn, to); 839 840 return (0); 841 } 842 843 static void 844 filt_timerstart(struct knote *kn, sbintime_t to) 845 { 846 struct kq_timer_cb_data *kc; 847 848 kc = kn->kn_ptr.p_v; 849 if ((kn->kn_sfflags & NOTE_ABSTIME) != 0) { 850 kc->next = to; 851 kc->to = 0; 852 } else { 853 kc->next = to + sbinuptime(); 854 kc->to = to; 855 } 856 kqtimer_sched_callout(kc); 857 } 858 859 static void 860 filt_timerdetach(struct knote *kn) 861 { 862 struct kq_timer_cb_data *kc; 863 unsigned int old __unused; 864 bool pending; 865 866 kc = kn->kn_ptr.p_v; 867 do { 868 callout_drain(&kc->c); 869 870 /* 871 * kqtimer_proc_continue() might have rescheduled this callout. 872 * Double-check, using the process mutex as an interlock. 873 */ 874 PROC_LOCK(kc->p); 875 if ((kc->flags & KQ_TIMER_CB_ENQUEUED) != 0) { 876 kc->flags &= ~KQ_TIMER_CB_ENQUEUED; 877 TAILQ_REMOVE(&kc->p->p_kqtim_stop, kc, link); 878 } 879 pending = callout_pending(&kc->c); 880 PROC_UNLOCK(kc->p); 881 } while (pending); 882 free(kc, M_KQUEUE); 883 old = atomic_fetchadd_int(&kq_ncallouts, -1); 884 KASSERT(old > 0, ("Number of callouts cannot become negative")); 885 kn->kn_status |= KN_DETACHED; /* knlist_remove sets it */ 886 } 887 888 static void 889 filt_timertouch(struct knote *kn, struct kevent *kev, u_long type) 890 { 891 struct kq_timer_cb_data *kc; 892 struct kqueue *kq; 893 sbintime_t to; 894 int error; 895 896 switch (type) { 897 case EVENT_REGISTER: 898 /* Handle re-added timers that update data/fflags */ 899 if (kev->flags & EV_ADD) { 900 kc = kn->kn_ptr.p_v; 901 902 /* Drain any existing callout. */ 903 callout_drain(&kc->c); 904 905 /* Throw away any existing undelivered record 906 * of the timer expiration. This is done under 907 * the presumption that if a process is 908 * re-adding this timer with new parameters, 909 * it is no longer interested in what may have 910 * happened under the old parameters. If it is 911 * interested, it can wait for the expiration, 912 * delete the old timer definition, and then 913 * add the new one. 914 * 915 * This has to be done while the kq is locked: 916 * - if enqueued, dequeue 917 * - make it no longer active 918 * - clear the count of expiration events 919 */ 920 kq = kn->kn_kq; 921 KQ_LOCK(kq); 922 if (kn->kn_status & KN_QUEUED) 923 knote_dequeue(kn); 924 925 kn->kn_status &= ~KN_ACTIVE; 926 kn->kn_data = 0; 927 KQ_UNLOCK(kq); 928 929 /* Reschedule timer based on new data/fflags */ 930 kn->kn_sfflags = kev->fflags; 931 kn->kn_sdata = kev->data; 932 error = filt_timervalidate(kn, &to); 933 if (error != 0) { 934 kn->kn_flags |= EV_ERROR; 935 kn->kn_data = error; 936 } else 937 filt_timerstart(kn, to); 938 } 939 break; 940 941 case EVENT_PROCESS: 942 *kev = kn->kn_kevent; 943 if (kn->kn_flags & EV_CLEAR) { 944 kn->kn_data = 0; 945 kn->kn_fflags = 0; 946 } 947 break; 948 949 default: 950 panic("filt_timertouch() - invalid type (%ld)", type); 951 break; 952 } 953 } 954 955 static int 956 filt_timer(struct knote *kn, long hint) 957 { 958 959 return (kn->kn_data != 0); 960 } 961 962 static int 963 filt_userattach(struct knote *kn) 964 { 965 966 /* 967 * EVFILT_USER knotes are not attached to anything in the kernel. 968 */ 969 kn->kn_hook = NULL; 970 if (kn->kn_fflags & NOTE_TRIGGER) 971 kn->kn_hookid = 1; 972 else 973 kn->kn_hookid = 0; 974 return (0); 975 } 976 977 static void 978 filt_userdetach(__unused struct knote *kn) 979 { 980 981 /* 982 * EVFILT_USER knotes are not attached to anything in the kernel. 983 */ 984 } 985 986 static int 987 filt_user(struct knote *kn, __unused long hint) 988 { 989 990 return (kn->kn_hookid); 991 } 992 993 static void 994 filt_usertouch(struct knote *kn, struct kevent *kev, u_long type) 995 { 996 u_int ffctrl; 997 998 switch (type) { 999 case EVENT_REGISTER: 1000 if (kev->fflags & NOTE_TRIGGER) 1001 kn->kn_hookid = 1; 1002 1003 ffctrl = kev->fflags & NOTE_FFCTRLMASK; 1004 kev->fflags &= NOTE_FFLAGSMASK; 1005 switch (ffctrl) { 1006 case NOTE_FFNOP: 1007 break; 1008 1009 case NOTE_FFAND: 1010 kn->kn_sfflags &= kev->fflags; 1011 break; 1012 1013 case NOTE_FFOR: 1014 kn->kn_sfflags |= kev->fflags; 1015 break; 1016 1017 case NOTE_FFCOPY: 1018 kn->kn_sfflags = kev->fflags; 1019 break; 1020 1021 default: 1022 /* XXX Return error? */ 1023 break; 1024 } 1025 kn->kn_sdata = kev->data; 1026 if (kev->flags & EV_CLEAR) { 1027 kn->kn_hookid = 0; 1028 kn->kn_data = 0; 1029 kn->kn_fflags = 0; 1030 } 1031 break; 1032 1033 case EVENT_PROCESS: 1034 *kev = kn->kn_kevent; 1035 kev->fflags = kn->kn_sfflags; 1036 kev->data = kn->kn_sdata; 1037 if (kn->kn_flags & EV_CLEAR) { 1038 kn->kn_hookid = 0; 1039 kn->kn_data = 0; 1040 kn->kn_fflags = 0; 1041 } 1042 break; 1043 1044 default: 1045 panic("filt_usertouch() - invalid type (%ld)", type); 1046 break; 1047 } 1048 } 1049 1050 int 1051 sys_kqueue(struct thread *td, struct kqueue_args *uap) 1052 { 1053 1054 return (kern_kqueue(td, 0, NULL)); 1055 } 1056 1057 int 1058 sys_kqueuex(struct thread *td, struct kqueuex_args *uap) 1059 { 1060 int flags; 1061 1062 if ((uap->flags & ~(KQUEUE_CLOEXEC)) != 0) 1063 return (EINVAL); 1064 flags = 0; 1065 if ((uap->flags & KQUEUE_CLOEXEC) != 0) 1066 flags |= O_CLOEXEC; 1067 return (kern_kqueue(td, flags, NULL)); 1068 } 1069 1070 static void 1071 kqueue_init(struct kqueue *kq) 1072 { 1073 1074 mtx_init(&kq->kq_lock, "kqueue", NULL, MTX_DEF | MTX_DUPOK); 1075 TAILQ_INIT(&kq->kq_head); 1076 knlist_init_mtx(&kq->kq_sel.si_note, &kq->kq_lock); 1077 TASK_INIT(&kq->kq_task, 0, kqueue_task, kq); 1078 } 1079 1080 int 1081 kern_kqueue(struct thread *td, int flags, struct filecaps *fcaps) 1082 { 1083 struct filedesc *fdp; 1084 struct kqueue *kq; 1085 struct file *fp; 1086 struct ucred *cred; 1087 int fd, error; 1088 1089 fdp = td->td_proc->p_fd; 1090 cred = td->td_ucred; 1091 if (!chgkqcnt(cred->cr_ruidinfo, 1, lim_cur(td, RLIMIT_KQUEUES))) 1092 return (ENOMEM); 1093 1094 error = falloc_caps(td, &fp, &fd, flags, fcaps); 1095 if (error != 0) { 1096 chgkqcnt(cred->cr_ruidinfo, -1, 0); 1097 return (error); 1098 } 1099 1100 /* An extra reference on `fp' has been held for us by falloc(). */ 1101 kq = malloc(sizeof *kq, M_KQUEUE, M_WAITOK | M_ZERO); 1102 kqueue_init(kq); 1103 kq->kq_fdp = fdp; 1104 kq->kq_cred = crhold(cred); 1105 1106 FILEDESC_XLOCK(fdp); 1107 TAILQ_INSERT_HEAD(&fdp->fd_kqlist, kq, kq_list); 1108 FILEDESC_XUNLOCK(fdp); 1109 1110 finit(fp, FREAD | FWRITE, DTYPE_KQUEUE, kq, &kqueueops); 1111 fdrop(fp, td); 1112 1113 td->td_retval[0] = fd; 1114 return (0); 1115 } 1116 1117 struct g_kevent_args { 1118 int fd; 1119 const void *changelist; 1120 int nchanges; 1121 void *eventlist; 1122 int nevents; 1123 const struct timespec *timeout; 1124 }; 1125 1126 int 1127 sys_kevent(struct thread *td, struct kevent_args *uap) 1128 { 1129 struct kevent_copyops k_ops = { 1130 .arg = uap, 1131 .k_copyout = kevent_copyout, 1132 .k_copyin = kevent_copyin, 1133 .kevent_size = sizeof(struct kevent), 1134 }; 1135 struct g_kevent_args gk_args = { 1136 .fd = uap->fd, 1137 .changelist = uap->changelist, 1138 .nchanges = uap->nchanges, 1139 .eventlist = uap->eventlist, 1140 .nevents = uap->nevents, 1141 .timeout = uap->timeout, 1142 }; 1143 1144 return (kern_kevent_generic(td, &gk_args, &k_ops, "kevent")); 1145 } 1146 1147 static int 1148 kern_kevent_generic(struct thread *td, struct g_kevent_args *uap, 1149 struct kevent_copyops *k_ops, const char *struct_name) 1150 { 1151 struct timespec ts, *tsp; 1152 #ifdef KTRACE 1153 struct kevent *eventlist = uap->eventlist; 1154 #endif 1155 int error; 1156 1157 if (uap->timeout != NULL) { 1158 error = copyin(uap->timeout, &ts, sizeof(ts)); 1159 if (error) 1160 return (error); 1161 tsp = &ts; 1162 } else 1163 tsp = NULL; 1164 1165 #ifdef KTRACE 1166 if (KTRPOINT(td, KTR_STRUCT_ARRAY)) 1167 ktrstructarray(struct_name, UIO_USERSPACE, uap->changelist, 1168 uap->nchanges, k_ops->kevent_size); 1169 #endif 1170 1171 error = kern_kevent(td, uap->fd, uap->nchanges, uap->nevents, 1172 k_ops, tsp); 1173 1174 #ifdef KTRACE 1175 if (error == 0 && KTRPOINT(td, KTR_STRUCT_ARRAY)) 1176 ktrstructarray(struct_name, UIO_USERSPACE, eventlist, 1177 td->td_retval[0], k_ops->kevent_size); 1178 #endif 1179 1180 return (error); 1181 } 1182 1183 /* 1184 * Copy 'count' items into the destination list pointed to by uap->eventlist. 1185 */ 1186 static int 1187 kevent_copyout(void *arg, struct kevent *kevp, int count) 1188 { 1189 struct kevent_args *uap; 1190 int error; 1191 1192 KASSERT(count <= KQ_NEVENTS, ("count (%d) > KQ_NEVENTS", count)); 1193 uap = (struct kevent_args *)arg; 1194 1195 error = copyout(kevp, uap->eventlist, count * sizeof *kevp); 1196 if (error == 0) 1197 uap->eventlist += count; 1198 return (error); 1199 } 1200 1201 /* 1202 * Copy 'count' items from the list pointed to by uap->changelist. 1203 */ 1204 static int 1205 kevent_copyin(void *arg, struct kevent *kevp, int count) 1206 { 1207 struct kevent_args *uap; 1208 int error; 1209 1210 KASSERT(count <= KQ_NEVENTS, ("count (%d) > KQ_NEVENTS", count)); 1211 uap = (struct kevent_args *)arg; 1212 1213 error = copyin(uap->changelist, kevp, count * sizeof *kevp); 1214 if (error == 0) 1215 uap->changelist += count; 1216 return (error); 1217 } 1218 1219 #ifdef COMPAT_FREEBSD11 1220 static int 1221 kevent11_copyout(void *arg, struct kevent *kevp, int count) 1222 { 1223 struct freebsd11_kevent_args *uap; 1224 struct freebsd11_kevent kev11; 1225 int error, i; 1226 1227 KASSERT(count <= KQ_NEVENTS, ("count (%d) > KQ_NEVENTS", count)); 1228 uap = (struct freebsd11_kevent_args *)arg; 1229 1230 for (i = 0; i < count; i++) { 1231 kev11.ident = kevp->ident; 1232 kev11.filter = kevp->filter; 1233 kev11.flags = kevp->flags; 1234 kev11.fflags = kevp->fflags; 1235 kev11.data = kevp->data; 1236 kev11.udata = kevp->udata; 1237 error = copyout(&kev11, uap->eventlist, sizeof(kev11)); 1238 if (error != 0) 1239 break; 1240 uap->eventlist++; 1241 kevp++; 1242 } 1243 return (error); 1244 } 1245 1246 /* 1247 * Copy 'count' items from the list pointed to by uap->changelist. 1248 */ 1249 static int 1250 kevent11_copyin(void *arg, struct kevent *kevp, int count) 1251 { 1252 struct freebsd11_kevent_args *uap; 1253 struct freebsd11_kevent kev11; 1254 int error, i; 1255 1256 KASSERT(count <= KQ_NEVENTS, ("count (%d) > KQ_NEVENTS", count)); 1257 uap = (struct freebsd11_kevent_args *)arg; 1258 1259 for (i = 0; i < count; i++) { 1260 error = copyin(uap->changelist, &kev11, sizeof(kev11)); 1261 if (error != 0) 1262 break; 1263 kevp->ident = kev11.ident; 1264 kevp->filter = kev11.filter; 1265 kevp->flags = kev11.flags; 1266 kevp->fflags = kev11.fflags; 1267 kevp->data = (uintptr_t)kev11.data; 1268 kevp->udata = kev11.udata; 1269 bzero(&kevp->ext, sizeof(kevp->ext)); 1270 uap->changelist++; 1271 kevp++; 1272 } 1273 return (error); 1274 } 1275 1276 int 1277 freebsd11_kevent(struct thread *td, struct freebsd11_kevent_args *uap) 1278 { 1279 struct kevent_copyops k_ops = { 1280 .arg = uap, 1281 .k_copyout = kevent11_copyout, 1282 .k_copyin = kevent11_copyin, 1283 .kevent_size = sizeof(struct freebsd11_kevent), 1284 }; 1285 struct g_kevent_args gk_args = { 1286 .fd = uap->fd, 1287 .changelist = uap->changelist, 1288 .nchanges = uap->nchanges, 1289 .eventlist = uap->eventlist, 1290 .nevents = uap->nevents, 1291 .timeout = uap->timeout, 1292 }; 1293 1294 return (kern_kevent_generic(td, &gk_args, &k_ops, "freebsd11_kevent")); 1295 } 1296 #endif 1297 1298 int 1299 kern_kevent(struct thread *td, int fd, int nchanges, int nevents, 1300 struct kevent_copyops *k_ops, const struct timespec *timeout) 1301 { 1302 cap_rights_t rights; 1303 struct file *fp; 1304 int error; 1305 1306 cap_rights_init_zero(&rights); 1307 if (nchanges > 0) 1308 cap_rights_set_one(&rights, CAP_KQUEUE_CHANGE); 1309 if (nevents > 0) 1310 cap_rights_set_one(&rights, CAP_KQUEUE_EVENT); 1311 error = fget(td, fd, &rights, &fp); 1312 if (error != 0) 1313 return (error); 1314 1315 error = kern_kevent_fp(td, fp, nchanges, nevents, k_ops, timeout); 1316 fdrop(fp, td); 1317 1318 return (error); 1319 } 1320 1321 static int 1322 kqueue_kevent(struct kqueue *kq, struct thread *td, int nchanges, int nevents, 1323 struct kevent_copyops *k_ops, const struct timespec *timeout) 1324 { 1325 struct kevent keva[KQ_NEVENTS]; 1326 struct kevent *kevp, *changes; 1327 int i, n, nerrors, error; 1328 1329 if (nchanges < 0) 1330 return (EINVAL); 1331 1332 nerrors = 0; 1333 while (nchanges > 0) { 1334 n = nchanges > KQ_NEVENTS ? KQ_NEVENTS : nchanges; 1335 error = k_ops->k_copyin(k_ops->arg, keva, n); 1336 if (error) 1337 return (error); 1338 changes = keva; 1339 for (i = 0; i < n; i++) { 1340 kevp = &changes[i]; 1341 if (!kevp->filter) 1342 continue; 1343 kevp->flags &= ~EV_SYSFLAGS; 1344 error = kqueue_register(kq, kevp, td, M_WAITOK); 1345 if (error || (kevp->flags & EV_RECEIPT)) { 1346 if (nevents == 0) 1347 return (error); 1348 kevp->flags = EV_ERROR; 1349 kevp->data = error; 1350 (void)k_ops->k_copyout(k_ops->arg, kevp, 1); 1351 nevents--; 1352 nerrors++; 1353 } 1354 } 1355 nchanges -= n; 1356 } 1357 if (nerrors) { 1358 td->td_retval[0] = nerrors; 1359 return (0); 1360 } 1361 1362 return (kqueue_scan(kq, nevents, k_ops, timeout, keva, td)); 1363 } 1364 1365 int 1366 kern_kevent_fp(struct thread *td, struct file *fp, int nchanges, int nevents, 1367 struct kevent_copyops *k_ops, const struct timespec *timeout) 1368 { 1369 struct kqueue *kq; 1370 int error; 1371 1372 error = kqueue_acquire(fp, &kq); 1373 if (error != 0) 1374 return (error); 1375 error = kqueue_kevent(kq, td, nchanges, nevents, k_ops, timeout); 1376 kqueue_release(kq, 0); 1377 return (error); 1378 } 1379 1380 /* 1381 * Performs a kevent() call on a temporarily created kqueue. This can be 1382 * used to perform one-shot polling, similar to poll() and select(). 1383 */ 1384 int 1385 kern_kevent_anonymous(struct thread *td, int nevents, 1386 struct kevent_copyops *k_ops) 1387 { 1388 struct kqueue kq = {}; 1389 int error; 1390 1391 kqueue_init(&kq); 1392 kq.kq_refcnt = 1; 1393 error = kqueue_kevent(&kq, td, nevents, nevents, k_ops, NULL); 1394 kqueue_drain(&kq, td); 1395 kqueue_destroy(&kq); 1396 return (error); 1397 } 1398 1399 int 1400 kqueue_add_filteropts(int filt, const struct filterops *filtops) 1401 { 1402 int error; 1403 1404 error = 0; 1405 if (filt > 0 || filt + EVFILT_SYSCOUNT < 0) { 1406 printf( 1407 "trying to add a filterop that is out of range: %d is beyond %d\n", 1408 ~filt, EVFILT_SYSCOUNT); 1409 return EINVAL; 1410 } 1411 mtx_lock(&filterops_lock); 1412 if (sysfilt_ops[~filt].for_fop != &null_filtops && 1413 sysfilt_ops[~filt].for_fop != NULL) 1414 error = EEXIST; 1415 else { 1416 sysfilt_ops[~filt].for_fop = filtops; 1417 sysfilt_ops[~filt].for_refcnt = 0; 1418 } 1419 mtx_unlock(&filterops_lock); 1420 1421 return (error); 1422 } 1423 1424 int 1425 kqueue_del_filteropts(int filt) 1426 { 1427 int error; 1428 1429 error = 0; 1430 if (filt > 0 || filt + EVFILT_SYSCOUNT < 0) 1431 return EINVAL; 1432 1433 mtx_lock(&filterops_lock); 1434 if (sysfilt_ops[~filt].for_fop == &null_filtops || 1435 sysfilt_ops[~filt].for_fop == NULL) 1436 error = EINVAL; 1437 else if (sysfilt_ops[~filt].for_refcnt != 0) 1438 error = EBUSY; 1439 else { 1440 sysfilt_ops[~filt].for_fop = &null_filtops; 1441 sysfilt_ops[~filt].for_refcnt = 0; 1442 } 1443 mtx_unlock(&filterops_lock); 1444 1445 return error; 1446 } 1447 1448 static const struct filterops * 1449 kqueue_fo_find(int filt) 1450 { 1451 1452 if (filt > 0 || filt + EVFILT_SYSCOUNT < 0) 1453 return NULL; 1454 1455 if (sysfilt_ops[~filt].for_nolock) 1456 return sysfilt_ops[~filt].for_fop; 1457 1458 mtx_lock(&filterops_lock); 1459 sysfilt_ops[~filt].for_refcnt++; 1460 if (sysfilt_ops[~filt].for_fop == NULL) 1461 sysfilt_ops[~filt].for_fop = &null_filtops; 1462 mtx_unlock(&filterops_lock); 1463 1464 return sysfilt_ops[~filt].for_fop; 1465 } 1466 1467 static void 1468 kqueue_fo_release(int filt) 1469 { 1470 1471 if (filt > 0 || filt + EVFILT_SYSCOUNT < 0) 1472 return; 1473 1474 if (sysfilt_ops[~filt].for_nolock) 1475 return; 1476 1477 mtx_lock(&filterops_lock); 1478 KASSERT(sysfilt_ops[~filt].for_refcnt > 0, 1479 ("filter object refcount not valid on release")); 1480 sysfilt_ops[~filt].for_refcnt--; 1481 mtx_unlock(&filterops_lock); 1482 } 1483 1484 /* 1485 * A ref to kq (obtained via kqueue_acquire) must be held. 1486 */ 1487 static int 1488 kqueue_register(struct kqueue *kq, struct kevent *kev, struct thread *td, 1489 int mflag) 1490 { 1491 const struct filterops *fops; 1492 struct file *fp; 1493 struct knote *kn, *tkn; 1494 struct knlist *knl; 1495 int error, filt, event; 1496 int haskqglobal, filedesc_unlock; 1497 1498 if ((kev->flags & (EV_ENABLE | EV_DISABLE)) == (EV_ENABLE | EV_DISABLE)) 1499 return (EINVAL); 1500 1501 fp = NULL; 1502 kn = NULL; 1503 knl = NULL; 1504 error = 0; 1505 haskqglobal = 0; 1506 filedesc_unlock = 0; 1507 1508 filt = kev->filter; 1509 fops = kqueue_fo_find(filt); 1510 if (fops == NULL) 1511 return EINVAL; 1512 1513 if (kev->flags & EV_ADD) { 1514 /* Reject an invalid flag pair early */ 1515 if (kev->flags & EV_KEEPUDATA) { 1516 tkn = NULL; 1517 error = EINVAL; 1518 goto done; 1519 } 1520 1521 /* 1522 * Prevent waiting with locks. Non-sleepable 1523 * allocation failures are handled in the loop, only 1524 * if the spare knote appears to be actually required. 1525 */ 1526 tkn = knote_alloc(mflag); 1527 } else { 1528 tkn = NULL; 1529 } 1530 1531 findkn: 1532 if (fops->f_isfd) { 1533 KASSERT(td != NULL, ("td is NULL")); 1534 if (kev->ident > INT_MAX) 1535 error = EBADF; 1536 else 1537 error = fget(td, kev->ident, &cap_event_rights, &fp); 1538 if (error) 1539 goto done; 1540 1541 if ((kev->flags & EV_ADD) == EV_ADD && kqueue_expand(kq, fops, 1542 kev->ident, M_NOWAIT) != 0) { 1543 /* try again */ 1544 fdrop(fp, td); 1545 fp = NULL; 1546 error = kqueue_expand(kq, fops, kev->ident, mflag); 1547 if (error) 1548 goto done; 1549 goto findkn; 1550 } 1551 1552 if (fp->f_type == DTYPE_KQUEUE) { 1553 /* 1554 * If we add some intelligence about what we are doing, 1555 * we should be able to support events on ourselves. 1556 * We need to know when we are doing this to prevent 1557 * getting both the knlist lock and the kq lock since 1558 * they are the same thing. 1559 */ 1560 if (fp->f_data == kq) { 1561 error = EINVAL; 1562 goto done; 1563 } 1564 1565 /* 1566 * Pre-lock the filedesc before the global 1567 * lock mutex, see the comment in 1568 * kqueue_close(). 1569 */ 1570 FILEDESC_XLOCK(td->td_proc->p_fd); 1571 filedesc_unlock = 1; 1572 KQ_GLOBAL_LOCK(&kq_global, haskqglobal); 1573 } 1574 1575 KQ_LOCK(kq); 1576 if (kev->ident < kq->kq_knlistsize) { 1577 SLIST_FOREACH(kn, &kq->kq_knlist[kev->ident], kn_link) 1578 if (kev->filter == kn->kn_filter) 1579 break; 1580 } 1581 } else { 1582 if ((kev->flags & EV_ADD) == EV_ADD) { 1583 error = kqueue_expand(kq, fops, kev->ident, mflag); 1584 if (error != 0) 1585 goto done; 1586 } 1587 1588 KQ_LOCK(kq); 1589 1590 /* 1591 * If possible, find an existing knote to use for this kevent. 1592 */ 1593 if (kev->filter == EVFILT_PROC && 1594 (kev->flags & (EV_FLAG1 | EV_FLAG2)) != 0) { 1595 /* This is an internal creation of a process tracking 1596 * note. Don't attempt to coalesce this with an 1597 * existing note. 1598 */ 1599 ; 1600 } else if (kq->kq_knhashmask != 0) { 1601 struct klist *list; 1602 1603 list = &kq->kq_knhash[ 1604 KN_HASH((u_long)kev->ident, kq->kq_knhashmask)]; 1605 SLIST_FOREACH(kn, list, kn_link) 1606 if (kev->ident == kn->kn_id && 1607 kev->filter == kn->kn_filter) 1608 break; 1609 } 1610 } 1611 1612 /* knote is in the process of changing, wait for it to stabilize. */ 1613 if (kn != NULL && kn_in_flux(kn)) { 1614 KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal); 1615 if (filedesc_unlock) { 1616 FILEDESC_XUNLOCK(td->td_proc->p_fd); 1617 filedesc_unlock = 0; 1618 } 1619 kq->kq_state |= KQ_FLUXWAIT; 1620 msleep(kq, &kq->kq_lock, PSOCK | PDROP, "kqflxwt", 0); 1621 if (fp != NULL) { 1622 fdrop(fp, td); 1623 fp = NULL; 1624 } 1625 goto findkn; 1626 } 1627 1628 /* 1629 * kn now contains the matching knote, or NULL if no match 1630 */ 1631 if (kn == NULL) { 1632 if (kev->flags & EV_ADD) { 1633 kn = tkn; 1634 tkn = NULL; 1635 if (kn == NULL) { 1636 KQ_UNLOCK(kq); 1637 error = ENOMEM; 1638 goto done; 1639 } 1640 kn->kn_fp = fp; 1641 kn->kn_kq = kq; 1642 kn->kn_fop = fops; 1643 /* 1644 * apply reference counts to knote structure, and 1645 * do not release it at the end of this routine. 1646 */ 1647 fops = NULL; 1648 fp = NULL; 1649 1650 kn->kn_sfflags = kev->fflags; 1651 kn->kn_sdata = kev->data; 1652 kev->fflags = 0; 1653 kev->data = 0; 1654 kn->kn_kevent = *kev; 1655 kn->kn_kevent.flags &= ~(EV_ADD | EV_DELETE | 1656 EV_ENABLE | EV_DISABLE | EV_FORCEONESHOT); 1657 kn->kn_status = KN_DETACHED; 1658 if ((kev->flags & EV_DISABLE) != 0) 1659 kn->kn_status |= KN_DISABLED; 1660 kn_enter_flux(kn); 1661 1662 error = knote_attach(kn, kq); 1663 KQ_UNLOCK(kq); 1664 if (error != 0) { 1665 tkn = kn; 1666 goto done; 1667 } 1668 1669 if ((error = kn->kn_fop->f_attach(kn)) != 0) { 1670 knote_drop_detached(kn, td); 1671 goto done; 1672 } 1673 knl = kn_list_lock(kn); 1674 goto done_ev_add; 1675 } else { 1676 /* No matching knote and the EV_ADD flag is not set. */ 1677 KQ_UNLOCK(kq); 1678 error = ENOENT; 1679 goto done; 1680 } 1681 } 1682 1683 if (kev->flags & EV_DELETE) { 1684 kn_enter_flux(kn); 1685 KQ_UNLOCK(kq); 1686 knote_drop(kn, td); 1687 goto done; 1688 } 1689 1690 if (kev->flags & EV_FORCEONESHOT) { 1691 kn->kn_flags |= EV_ONESHOT; 1692 KNOTE_ACTIVATE(kn, 1); 1693 } 1694 1695 if ((kev->flags & EV_ENABLE) != 0) 1696 kn->kn_status &= ~KN_DISABLED; 1697 else if ((kev->flags & EV_DISABLE) != 0) 1698 kn->kn_status |= KN_DISABLED; 1699 1700 /* 1701 * The user may change some filter values after the initial EV_ADD, 1702 * but doing so will not reset any filter which has already been 1703 * triggered. 1704 */ 1705 kn->kn_status |= KN_SCAN; 1706 kn_enter_flux(kn); 1707 KQ_UNLOCK(kq); 1708 knl = kn_list_lock(kn); 1709 if ((kev->flags & EV_KEEPUDATA) == 0) 1710 kn->kn_kevent.udata = kev->udata; 1711 if (!fops->f_isfd && fops->f_touch != NULL) { 1712 fops->f_touch(kn, kev, EVENT_REGISTER); 1713 } else { 1714 kn->kn_sfflags = kev->fflags; 1715 kn->kn_sdata = kev->data; 1716 } 1717 1718 done_ev_add: 1719 /* 1720 * We can get here with kn->kn_knlist == NULL. This can happen when 1721 * the initial attach event decides that the event is "completed" 1722 * already, e.g., filt_procattach() is called on a zombie process. It 1723 * will call filt_proc() which will remove it from the list, and NULL 1724 * kn_knlist. 1725 * 1726 * KN_DISABLED will be stable while the knote is in flux, so the 1727 * unlocked read will not race with an update. 1728 */ 1729 if ((kn->kn_status & KN_DISABLED) == 0) 1730 event = kn->kn_fop->f_event(kn, 0); 1731 else 1732 event = 0; 1733 1734 KQ_LOCK(kq); 1735 if (event) 1736 kn->kn_status |= KN_ACTIVE; 1737 if ((kn->kn_status & (KN_ACTIVE | KN_DISABLED | KN_QUEUED)) == 1738 KN_ACTIVE) 1739 knote_enqueue(kn); 1740 kn->kn_status &= ~KN_SCAN; 1741 kn_leave_flux(kn); 1742 kn_list_unlock(knl); 1743 KQ_UNLOCK_FLUX(kq); 1744 1745 done: 1746 KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal); 1747 if (filedesc_unlock) 1748 FILEDESC_XUNLOCK(td->td_proc->p_fd); 1749 if (fp != NULL) 1750 fdrop(fp, td); 1751 knote_free(tkn); 1752 if (fops != NULL) 1753 kqueue_fo_release(filt); 1754 return (error); 1755 } 1756 1757 static int 1758 kqueue_acquire(struct file *fp, struct kqueue **kqp) 1759 { 1760 int error; 1761 struct kqueue *kq; 1762 1763 error = 0; 1764 1765 kq = fp->f_data; 1766 if (fp->f_type != DTYPE_KQUEUE || kq == NULL) 1767 return (EBADF); 1768 *kqp = kq; 1769 KQ_LOCK(kq); 1770 if ((kq->kq_state & KQ_CLOSING) == KQ_CLOSING) { 1771 KQ_UNLOCK(kq); 1772 return (EBADF); 1773 } 1774 kq->kq_refcnt++; 1775 KQ_UNLOCK(kq); 1776 1777 return error; 1778 } 1779 1780 static void 1781 kqueue_release(struct kqueue *kq, int locked) 1782 { 1783 if (locked) 1784 KQ_OWNED(kq); 1785 else 1786 KQ_LOCK(kq); 1787 kq->kq_refcnt--; 1788 if (kq->kq_refcnt == 1) 1789 wakeup(&kq->kq_refcnt); 1790 if (!locked) 1791 KQ_UNLOCK(kq); 1792 } 1793 1794 static void 1795 ast_kqueue(struct thread *td, int tda __unused) 1796 { 1797 taskqueue_quiesce(taskqueue_kqueue_ctx); 1798 } 1799 1800 static void 1801 kqueue_schedtask(struct kqueue *kq) 1802 { 1803 KQ_OWNED(kq); 1804 KASSERT(((kq->kq_state & KQ_TASKDRAIN) != KQ_TASKDRAIN), 1805 ("scheduling kqueue task while draining")); 1806 1807 if ((kq->kq_state & KQ_TASKSCHED) != KQ_TASKSCHED) { 1808 taskqueue_enqueue(taskqueue_kqueue_ctx, &kq->kq_task); 1809 kq->kq_state |= KQ_TASKSCHED; 1810 ast_sched(curthread, TDA_KQUEUE); 1811 } 1812 } 1813 1814 /* 1815 * Expand the kq to make sure we have storage for fops/ident pair. 1816 * 1817 * Return 0 on success (or no work necessary), return errno on failure. 1818 */ 1819 static int 1820 kqueue_expand(struct kqueue *kq, const struct filterops *fops, uintptr_t ident, 1821 int mflag) 1822 { 1823 struct klist *list, *tmp_knhash, *to_free; 1824 u_long tmp_knhashmask; 1825 int error, fd, size; 1826 1827 KQ_NOTOWNED(kq); 1828 1829 error = 0; 1830 to_free = NULL; 1831 if (fops->f_isfd) { 1832 fd = ident; 1833 if (kq->kq_knlistsize <= fd) { 1834 size = kq->kq_knlistsize; 1835 while (size <= fd) 1836 size += KQEXTENT; 1837 list = malloc(size * sizeof(*list), M_KQUEUE, mflag); 1838 if (list == NULL) 1839 return ENOMEM; 1840 KQ_LOCK(kq); 1841 if ((kq->kq_state & KQ_CLOSING) != 0) { 1842 to_free = list; 1843 error = EBADF; 1844 } else if (kq->kq_knlistsize > fd) { 1845 to_free = list; 1846 } else { 1847 if (kq->kq_knlist != NULL) { 1848 bcopy(kq->kq_knlist, list, 1849 kq->kq_knlistsize * sizeof(*list)); 1850 to_free = kq->kq_knlist; 1851 kq->kq_knlist = NULL; 1852 } 1853 bzero((caddr_t)list + 1854 kq->kq_knlistsize * sizeof(*list), 1855 (size - kq->kq_knlistsize) * sizeof(*list)); 1856 kq->kq_knlistsize = size; 1857 kq->kq_knlist = list; 1858 } 1859 KQ_UNLOCK(kq); 1860 } 1861 } else { 1862 if (kq->kq_knhashmask == 0) { 1863 tmp_knhash = hashinit_flags(KN_HASHSIZE, M_KQUEUE, 1864 &tmp_knhashmask, (mflag & M_WAITOK) != 0 ? 1865 HASH_WAITOK : HASH_NOWAIT); 1866 if (tmp_knhash == NULL) 1867 return (ENOMEM); 1868 KQ_LOCK(kq); 1869 if ((kq->kq_state & KQ_CLOSING) != 0) { 1870 to_free = tmp_knhash; 1871 error = EBADF; 1872 } else if (kq->kq_knhashmask == 0) { 1873 kq->kq_knhash = tmp_knhash; 1874 kq->kq_knhashmask = tmp_knhashmask; 1875 } else { 1876 to_free = tmp_knhash; 1877 } 1878 KQ_UNLOCK(kq); 1879 } 1880 } 1881 free(to_free, M_KQUEUE); 1882 1883 KQ_NOTOWNED(kq); 1884 return (error); 1885 } 1886 1887 static void 1888 kqueue_task(void *arg, int pending) 1889 { 1890 struct kqueue *kq; 1891 int haskqglobal; 1892 1893 haskqglobal = 0; 1894 kq = arg; 1895 1896 KQ_GLOBAL_LOCK(&kq_global, haskqglobal); 1897 KQ_LOCK(kq); 1898 1899 KNOTE_LOCKED(&kq->kq_sel.si_note, 0); 1900 1901 kq->kq_state &= ~KQ_TASKSCHED; 1902 if ((kq->kq_state & KQ_TASKDRAIN) == KQ_TASKDRAIN) { 1903 wakeup(&kq->kq_state); 1904 } 1905 KQ_UNLOCK(kq); 1906 KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal); 1907 } 1908 1909 /* 1910 * Scan, update kn_data (if not ONESHOT), and copyout triggered events. 1911 * We treat KN_MARKER knotes as if they are in flux. 1912 */ 1913 static int 1914 kqueue_scan(struct kqueue *kq, int maxevents, struct kevent_copyops *k_ops, 1915 const struct timespec *tsp, struct kevent *keva, struct thread *td) 1916 { 1917 struct kevent *kevp; 1918 struct knote *kn, *marker; 1919 struct knlist *knl; 1920 sbintime_t asbt, rsbt; 1921 int count, error, haskqglobal, influx, nkev, touch; 1922 1923 count = maxevents; 1924 nkev = 0; 1925 error = 0; 1926 haskqglobal = 0; 1927 1928 if (maxevents == 0) 1929 goto done_nl; 1930 if (maxevents < 0) { 1931 error = EINVAL; 1932 goto done_nl; 1933 } 1934 1935 rsbt = 0; 1936 if (tsp != NULL) { 1937 if (!timespecvalid_interval(tsp)) { 1938 error = EINVAL; 1939 goto done_nl; 1940 } 1941 if (timespecisset(tsp)) { 1942 if (tsp->tv_sec <= INT32_MAX) { 1943 rsbt = tstosbt(*tsp); 1944 if (TIMESEL(&asbt, rsbt)) 1945 asbt += tc_tick_sbt; 1946 if (asbt <= SBT_MAX - rsbt) 1947 asbt += rsbt; 1948 else 1949 asbt = 0; 1950 rsbt >>= tc_precexp; 1951 } else 1952 asbt = 0; 1953 } else 1954 asbt = -1; 1955 } else 1956 asbt = 0; 1957 marker = knote_alloc(M_WAITOK); 1958 marker->kn_status = KN_MARKER; 1959 KQ_LOCK(kq); 1960 1961 retry: 1962 kevp = keva; 1963 if (kq->kq_count == 0) { 1964 if (asbt == -1) { 1965 error = EWOULDBLOCK; 1966 } else { 1967 kq->kq_state |= KQ_SLEEP; 1968 error = msleep_sbt(kq, &kq->kq_lock, PSOCK | PCATCH, 1969 "kqread", asbt, rsbt, C_ABSOLUTE); 1970 } 1971 if (error == 0) 1972 goto retry; 1973 /* don't restart after signals... */ 1974 if (error == ERESTART) 1975 error = EINTR; 1976 else if (error == EWOULDBLOCK) 1977 error = 0; 1978 goto done; 1979 } 1980 1981 TAILQ_INSERT_TAIL(&kq->kq_head, marker, kn_tqe); 1982 influx = 0; 1983 while (count) { 1984 KQ_OWNED(kq); 1985 kn = TAILQ_FIRST(&kq->kq_head); 1986 1987 if ((kn->kn_status == KN_MARKER && kn != marker) || 1988 kn_in_flux(kn)) { 1989 if (influx) { 1990 influx = 0; 1991 KQ_FLUX_WAKEUP(kq); 1992 } 1993 kq->kq_state |= KQ_FLUXWAIT; 1994 error = msleep(kq, &kq->kq_lock, PSOCK, 1995 "kqflxwt", 0); 1996 continue; 1997 } 1998 1999 TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe); 2000 if ((kn->kn_status & KN_DISABLED) == KN_DISABLED) { 2001 kn->kn_status &= ~KN_QUEUED; 2002 kq->kq_count--; 2003 continue; 2004 } 2005 if (kn == marker) { 2006 KQ_FLUX_WAKEUP(kq); 2007 if (count == maxevents) 2008 goto retry; 2009 goto done; 2010 } 2011 KASSERT(!kn_in_flux(kn), 2012 ("knote %p is unexpectedly in flux", kn)); 2013 2014 if ((kn->kn_flags & EV_DROP) == EV_DROP) { 2015 kn->kn_status &= ~KN_QUEUED; 2016 kn_enter_flux(kn); 2017 kq->kq_count--; 2018 KQ_UNLOCK(kq); 2019 /* 2020 * We don't need to lock the list since we've 2021 * marked it as in flux. 2022 */ 2023 knote_drop(kn, td); 2024 KQ_LOCK(kq); 2025 continue; 2026 } else if ((kn->kn_flags & EV_ONESHOT) == EV_ONESHOT) { 2027 kn->kn_status &= ~KN_QUEUED; 2028 kn_enter_flux(kn); 2029 kq->kq_count--; 2030 KQ_UNLOCK(kq); 2031 /* 2032 * We don't need to lock the list since we've 2033 * marked the knote as being in flux. 2034 */ 2035 *kevp = kn->kn_kevent; 2036 knote_drop(kn, td); 2037 KQ_LOCK(kq); 2038 kn = NULL; 2039 } else { 2040 kn->kn_status |= KN_SCAN; 2041 kn_enter_flux(kn); 2042 KQ_UNLOCK(kq); 2043 if ((kn->kn_status & KN_KQUEUE) == KN_KQUEUE) 2044 KQ_GLOBAL_LOCK(&kq_global, haskqglobal); 2045 knl = kn_list_lock(kn); 2046 if (kn->kn_fop->f_event(kn, 0) == 0) { 2047 KQ_LOCK(kq); 2048 KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal); 2049 kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE | 2050 KN_SCAN); 2051 kn_leave_flux(kn); 2052 kq->kq_count--; 2053 kn_list_unlock(knl); 2054 influx = 1; 2055 continue; 2056 } 2057 touch = (!kn->kn_fop->f_isfd && 2058 kn->kn_fop->f_touch != NULL); 2059 if (touch) 2060 kn->kn_fop->f_touch(kn, kevp, EVENT_PROCESS); 2061 else 2062 *kevp = kn->kn_kevent; 2063 KQ_LOCK(kq); 2064 KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal); 2065 if (kn->kn_flags & (EV_CLEAR | EV_DISPATCH)) { 2066 /* 2067 * Manually clear knotes who weren't 2068 * 'touch'ed. 2069 */ 2070 if (touch == 0 && kn->kn_flags & EV_CLEAR) { 2071 kn->kn_data = 0; 2072 kn->kn_fflags = 0; 2073 } 2074 if (kn->kn_flags & EV_DISPATCH) 2075 kn->kn_status |= KN_DISABLED; 2076 kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE); 2077 kq->kq_count--; 2078 } else 2079 TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe); 2080 2081 kn->kn_status &= ~KN_SCAN; 2082 kn_leave_flux(kn); 2083 kn_list_unlock(knl); 2084 influx = 1; 2085 } 2086 2087 /* we are returning a copy to the user */ 2088 kevp++; 2089 nkev++; 2090 count--; 2091 2092 if (nkev == KQ_NEVENTS) { 2093 influx = 0; 2094 KQ_UNLOCK_FLUX(kq); 2095 error = k_ops->k_copyout(k_ops->arg, keva, nkev); 2096 nkev = 0; 2097 kevp = keva; 2098 KQ_LOCK(kq); 2099 if (error) 2100 break; 2101 } 2102 } 2103 TAILQ_REMOVE(&kq->kq_head, marker, kn_tqe); 2104 done: 2105 KQ_OWNED(kq); 2106 KQ_UNLOCK_FLUX(kq); 2107 knote_free(marker); 2108 done_nl: 2109 KQ_NOTOWNED(kq); 2110 if (nkev != 0) 2111 error = k_ops->k_copyout(k_ops->arg, keva, nkev); 2112 td->td_retval[0] = maxevents - count; 2113 return (error); 2114 } 2115 2116 /*ARGSUSED*/ 2117 static int 2118 kqueue_ioctl(struct file *fp, u_long cmd, void *data, 2119 struct ucred *active_cred, struct thread *td) 2120 { 2121 /* 2122 * Enabling sigio causes two major problems: 2123 * 1) infinite recursion: 2124 * Synopsys: kevent is being used to track signals and have FIOASYNC 2125 * set. On receipt of a signal this will cause a kqueue to recurse 2126 * into itself over and over. Sending the sigio causes the kqueue 2127 * to become ready, which in turn posts sigio again, forever. 2128 * Solution: this can be solved by setting a flag in the kqueue that 2129 * we have a SIGIO in progress. 2130 * 2) locking problems: 2131 * Synopsys: Kqueue is a leaf subsystem, but adding signalling puts 2132 * us above the proc and pgrp locks. 2133 * Solution: Post a signal using an async mechanism, being sure to 2134 * record a generation count in the delivery so that we do not deliver 2135 * a signal to the wrong process. 2136 * 2137 * Note, these two mechanisms are somewhat mutually exclusive! 2138 */ 2139 #if 0 2140 struct kqueue *kq; 2141 2142 kq = fp->f_data; 2143 switch (cmd) { 2144 case FIOASYNC: 2145 if (*(int *)data) { 2146 kq->kq_state |= KQ_ASYNC; 2147 } else { 2148 kq->kq_state &= ~KQ_ASYNC; 2149 } 2150 return (0); 2151 2152 case FIOSETOWN: 2153 return (fsetown(*(int *)data, &kq->kq_sigio)); 2154 2155 case FIOGETOWN: 2156 *(int *)data = fgetown(&kq->kq_sigio); 2157 return (0); 2158 } 2159 #endif 2160 2161 return (ENOTTY); 2162 } 2163 2164 /*ARGSUSED*/ 2165 static int 2166 kqueue_poll(struct file *fp, int events, struct ucred *active_cred, 2167 struct thread *td) 2168 { 2169 struct kqueue *kq; 2170 int revents = 0; 2171 int error; 2172 2173 if ((error = kqueue_acquire(fp, &kq))) 2174 return POLLERR; 2175 2176 KQ_LOCK(kq); 2177 if (events & (POLLIN | POLLRDNORM)) { 2178 if (kq->kq_count) { 2179 revents |= events & (POLLIN | POLLRDNORM); 2180 } else { 2181 selrecord(td, &kq->kq_sel); 2182 if (SEL_WAITING(&kq->kq_sel)) 2183 kq->kq_state |= KQ_SEL; 2184 } 2185 } 2186 kqueue_release(kq, 1); 2187 KQ_UNLOCK(kq); 2188 return (revents); 2189 } 2190 2191 /*ARGSUSED*/ 2192 static int 2193 kqueue_stat(struct file *fp, struct stat *st, struct ucred *active_cred) 2194 { 2195 2196 bzero((void *)st, sizeof *st); 2197 /* 2198 * We no longer return kq_count because the unlocked value is useless. 2199 * If you spent all this time getting the count, why not spend your 2200 * syscall better by calling kevent? 2201 * 2202 * XXX - This is needed for libc_r. 2203 */ 2204 st->st_mode = S_IFIFO; 2205 return (0); 2206 } 2207 2208 static void 2209 kqueue_drain(struct kqueue *kq, struct thread *td) 2210 { 2211 struct knote *kn; 2212 int i; 2213 2214 KQ_LOCK(kq); 2215 2216 KASSERT((kq->kq_state & KQ_CLOSING) != KQ_CLOSING, 2217 ("kqueue already closing")); 2218 kq->kq_state |= KQ_CLOSING; 2219 if (kq->kq_refcnt > 1) 2220 msleep(&kq->kq_refcnt, &kq->kq_lock, PSOCK, "kqclose", 0); 2221 2222 KASSERT(kq->kq_refcnt == 1, ("other refs are out there!")); 2223 2224 KASSERT(knlist_empty(&kq->kq_sel.si_note), 2225 ("kqueue's knlist not empty")); 2226 2227 for (i = 0; i < kq->kq_knlistsize; i++) { 2228 while ((kn = SLIST_FIRST(&kq->kq_knlist[i])) != NULL) { 2229 if (kn_in_flux(kn)) { 2230 kq->kq_state |= KQ_FLUXWAIT; 2231 msleep(kq, &kq->kq_lock, PSOCK, "kqclo1", 0); 2232 continue; 2233 } 2234 kn_enter_flux(kn); 2235 KQ_UNLOCK(kq); 2236 knote_drop(kn, td); 2237 KQ_LOCK(kq); 2238 } 2239 } 2240 if (kq->kq_knhashmask != 0) { 2241 for (i = 0; i <= kq->kq_knhashmask; i++) { 2242 while ((kn = SLIST_FIRST(&kq->kq_knhash[i])) != NULL) { 2243 if (kn_in_flux(kn)) { 2244 kq->kq_state |= KQ_FLUXWAIT; 2245 msleep(kq, &kq->kq_lock, PSOCK, 2246 "kqclo2", 0); 2247 continue; 2248 } 2249 kn_enter_flux(kn); 2250 KQ_UNLOCK(kq); 2251 knote_drop(kn, td); 2252 KQ_LOCK(kq); 2253 } 2254 } 2255 } 2256 2257 if ((kq->kq_state & KQ_TASKSCHED) == KQ_TASKSCHED) { 2258 kq->kq_state |= KQ_TASKDRAIN; 2259 msleep(&kq->kq_state, &kq->kq_lock, PSOCK, "kqtqdr", 0); 2260 } 2261 2262 if ((kq->kq_state & KQ_SEL) == KQ_SEL) { 2263 selwakeuppri(&kq->kq_sel, PSOCK); 2264 if (!SEL_WAITING(&kq->kq_sel)) 2265 kq->kq_state &= ~KQ_SEL; 2266 } 2267 2268 KQ_UNLOCK(kq); 2269 } 2270 2271 static void 2272 kqueue_destroy(struct kqueue *kq) 2273 { 2274 2275 KASSERT(kq->kq_fdp == NULL, 2276 ("kqueue still attached to a file descriptor")); 2277 seldrain(&kq->kq_sel); 2278 knlist_destroy(&kq->kq_sel.si_note); 2279 mtx_destroy(&kq->kq_lock); 2280 2281 if (kq->kq_knhash != NULL) 2282 free(kq->kq_knhash, M_KQUEUE); 2283 if (kq->kq_knlist != NULL) 2284 free(kq->kq_knlist, M_KQUEUE); 2285 2286 funsetown(&kq->kq_sigio); 2287 } 2288 2289 /*ARGSUSED*/ 2290 static int 2291 kqueue_close(struct file *fp, struct thread *td) 2292 { 2293 struct kqueue *kq = fp->f_data; 2294 struct filedesc *fdp; 2295 int error; 2296 int filedesc_unlock; 2297 2298 if ((error = kqueue_acquire(fp, &kq))) 2299 return error; 2300 kqueue_drain(kq, td); 2301 2302 /* 2303 * We could be called due to the knote_drop() doing fdrop(), 2304 * called from kqueue_register(). In this case the global 2305 * lock is owned, and filedesc sx is locked before, to not 2306 * take the sleepable lock after non-sleepable. 2307 */ 2308 fdp = kq->kq_fdp; 2309 kq->kq_fdp = NULL; 2310 if (!sx_xlocked(FILEDESC_LOCK(fdp))) { 2311 FILEDESC_XLOCK(fdp); 2312 filedesc_unlock = 1; 2313 } else 2314 filedesc_unlock = 0; 2315 TAILQ_REMOVE(&fdp->fd_kqlist, kq, kq_list); 2316 if (filedesc_unlock) 2317 FILEDESC_XUNLOCK(fdp); 2318 2319 kqueue_destroy(kq); 2320 chgkqcnt(kq->kq_cred->cr_ruidinfo, -1, 0); 2321 crfree(kq->kq_cred); 2322 free(kq, M_KQUEUE); 2323 fp->f_data = NULL; 2324 2325 return (0); 2326 } 2327 2328 static int 2329 kqueue_fill_kinfo(struct file *fp, struct kinfo_file *kif, struct filedesc *fdp) 2330 { 2331 struct kqueue *kq = fp->f_data; 2332 2333 kif->kf_type = KF_TYPE_KQUEUE; 2334 kif->kf_un.kf_kqueue.kf_kqueue_addr = (uintptr_t)kq; 2335 kif->kf_un.kf_kqueue.kf_kqueue_count = kq->kq_count; 2336 kif->kf_un.kf_kqueue.kf_kqueue_state = kq->kq_state; 2337 return (0); 2338 } 2339 2340 static void 2341 kqueue_wakeup(struct kqueue *kq) 2342 { 2343 KQ_OWNED(kq); 2344 2345 if ((kq->kq_state & KQ_SLEEP) == KQ_SLEEP) { 2346 kq->kq_state &= ~KQ_SLEEP; 2347 wakeup(kq); 2348 } 2349 if ((kq->kq_state & KQ_SEL) == KQ_SEL) { 2350 selwakeuppri(&kq->kq_sel, PSOCK); 2351 if (!SEL_WAITING(&kq->kq_sel)) 2352 kq->kq_state &= ~KQ_SEL; 2353 } 2354 if (!knlist_empty(&kq->kq_sel.si_note)) 2355 kqueue_schedtask(kq); 2356 if ((kq->kq_state & KQ_ASYNC) == KQ_ASYNC) { 2357 pgsigio(&kq->kq_sigio, SIGIO, 0); 2358 } 2359 } 2360 2361 /* 2362 * Walk down a list of knotes, activating them if their event has triggered. 2363 * 2364 * There is a possibility to optimize in the case of one kq watching another. 2365 * Instead of scheduling a task to wake it up, you could pass enough state 2366 * down the chain to make up the parent kqueue. Make this code functional 2367 * first. 2368 */ 2369 void 2370 knote(struct knlist *list, long hint, int lockflags) 2371 { 2372 struct kqueue *kq; 2373 struct knote *kn, *tkn; 2374 int error; 2375 2376 if (list == NULL) 2377 return; 2378 2379 KNL_ASSERT_LOCK(list, lockflags & KNF_LISTLOCKED); 2380 2381 if ((lockflags & KNF_LISTLOCKED) == 0) 2382 list->kl_lock(list->kl_lockarg); 2383 2384 /* 2385 * If we unlock the list lock (and enter influx), we can 2386 * eliminate the kqueue scheduling, but this will introduce 2387 * four lock/unlock's for each knote to test. Also, marker 2388 * would be needed to keep iteration position, since filters 2389 * or other threads could remove events. 2390 */ 2391 SLIST_FOREACH_SAFE(kn, &list->kl_list, kn_selnext, tkn) { 2392 kq = kn->kn_kq; 2393 KQ_LOCK(kq); 2394 if (kn_in_flux(kn) && (kn->kn_status & KN_SCAN) == 0) { 2395 /* 2396 * Do not process the influx notes, except for 2397 * the influx coming from the kq unlock in the 2398 * kqueue_scan(). In the later case, we do 2399 * not interfere with the scan, since the code 2400 * fragment in kqueue_scan() locks the knlist, 2401 * and cannot proceed until we finished. 2402 */ 2403 KQ_UNLOCK(kq); 2404 } else if ((lockflags & KNF_NOKQLOCK) != 0) { 2405 kn_enter_flux(kn); 2406 KQ_UNLOCK(kq); 2407 error = kn->kn_fop->f_event(kn, hint); 2408 KQ_LOCK(kq); 2409 kn_leave_flux(kn); 2410 if (error) 2411 KNOTE_ACTIVATE(kn, 1); 2412 KQ_UNLOCK_FLUX(kq); 2413 } else { 2414 if (kn->kn_fop->f_event(kn, hint)) 2415 KNOTE_ACTIVATE(kn, 1); 2416 KQ_UNLOCK(kq); 2417 } 2418 } 2419 if ((lockflags & KNF_LISTLOCKED) == 0) 2420 list->kl_unlock(list->kl_lockarg); 2421 } 2422 2423 /* 2424 * add a knote to a knlist 2425 */ 2426 void 2427 knlist_add(struct knlist *knl, struct knote *kn, int islocked) 2428 { 2429 2430 KNL_ASSERT_LOCK(knl, islocked); 2431 KQ_NOTOWNED(kn->kn_kq); 2432 KASSERT(kn_in_flux(kn), ("knote %p not in flux", kn)); 2433 KASSERT((kn->kn_status & KN_DETACHED) != 0, 2434 ("knote %p was not detached", kn)); 2435 if (!islocked) 2436 knl->kl_lock(knl->kl_lockarg); 2437 SLIST_INSERT_HEAD(&knl->kl_list, kn, kn_selnext); 2438 if (!islocked) 2439 knl->kl_unlock(knl->kl_lockarg); 2440 KQ_LOCK(kn->kn_kq); 2441 kn->kn_knlist = knl; 2442 kn->kn_status &= ~KN_DETACHED; 2443 KQ_UNLOCK(kn->kn_kq); 2444 } 2445 2446 static void 2447 knlist_remove_kq(struct knlist *knl, struct knote *kn, int knlislocked, 2448 int kqislocked) 2449 { 2450 2451 KASSERT(!kqislocked || knlislocked, ("kq locked w/o knl locked")); 2452 KNL_ASSERT_LOCK(knl, knlislocked); 2453 mtx_assert(&kn->kn_kq->kq_lock, kqislocked ? MA_OWNED : MA_NOTOWNED); 2454 KASSERT(kqislocked || kn_in_flux(kn), ("knote %p not in flux", kn)); 2455 KASSERT((kn->kn_status & KN_DETACHED) == 0, 2456 ("knote %p was already detached", kn)); 2457 if (!knlislocked) 2458 knl->kl_lock(knl->kl_lockarg); 2459 SLIST_REMOVE(&knl->kl_list, kn, knote, kn_selnext); 2460 kn->kn_knlist = NULL; 2461 if (!knlislocked) 2462 kn_list_unlock(knl); 2463 if (!kqislocked) 2464 KQ_LOCK(kn->kn_kq); 2465 kn->kn_status |= KN_DETACHED; 2466 if (!kqislocked) 2467 KQ_UNLOCK(kn->kn_kq); 2468 } 2469 2470 /* 2471 * remove knote from the specified knlist 2472 */ 2473 void 2474 knlist_remove(struct knlist *knl, struct knote *kn, int islocked) 2475 { 2476 2477 knlist_remove_kq(knl, kn, islocked, 0); 2478 } 2479 2480 int 2481 knlist_empty(struct knlist *knl) 2482 { 2483 2484 KNL_ASSERT_LOCKED(knl); 2485 return (SLIST_EMPTY(&knl->kl_list)); 2486 } 2487 2488 static struct mtx knlist_lock; 2489 MTX_SYSINIT(knlist_lock, &knlist_lock, "knlist lock for lockless objects", 2490 MTX_DEF); 2491 static void knlist_mtx_lock(void *arg); 2492 static void knlist_mtx_unlock(void *arg); 2493 2494 static void 2495 knlist_mtx_lock(void *arg) 2496 { 2497 2498 mtx_lock((struct mtx *)arg); 2499 } 2500 2501 static void 2502 knlist_mtx_unlock(void *arg) 2503 { 2504 2505 mtx_unlock((struct mtx *)arg); 2506 } 2507 2508 static void 2509 knlist_mtx_assert_lock(void *arg, int what) 2510 { 2511 2512 if (what == LA_LOCKED) 2513 mtx_assert((struct mtx *)arg, MA_OWNED); 2514 else 2515 mtx_assert((struct mtx *)arg, MA_NOTOWNED); 2516 } 2517 2518 void 2519 knlist_init(struct knlist *knl, void *lock, void (*kl_lock)(void *), 2520 void (*kl_unlock)(void *), 2521 void (*kl_assert_lock)(void *, int)) 2522 { 2523 2524 if (lock == NULL) 2525 knl->kl_lockarg = &knlist_lock; 2526 else 2527 knl->kl_lockarg = lock; 2528 2529 if (kl_lock == NULL) 2530 knl->kl_lock = knlist_mtx_lock; 2531 else 2532 knl->kl_lock = kl_lock; 2533 if (kl_unlock == NULL) 2534 knl->kl_unlock = knlist_mtx_unlock; 2535 else 2536 knl->kl_unlock = kl_unlock; 2537 if (kl_assert_lock == NULL) 2538 knl->kl_assert_lock = knlist_mtx_assert_lock; 2539 else 2540 knl->kl_assert_lock = kl_assert_lock; 2541 2542 knl->kl_autodestroy = 0; 2543 SLIST_INIT(&knl->kl_list); 2544 } 2545 2546 void 2547 knlist_init_mtx(struct knlist *knl, struct mtx *lock) 2548 { 2549 2550 knlist_init(knl, lock, NULL, NULL, NULL); 2551 } 2552 2553 struct knlist * 2554 knlist_alloc(struct mtx *lock) 2555 { 2556 struct knlist *knl; 2557 2558 knl = malloc(sizeof(struct knlist), M_KQUEUE, M_WAITOK); 2559 knlist_init_mtx(knl, lock); 2560 return (knl); 2561 } 2562 2563 void 2564 knlist_destroy(struct knlist *knl) 2565 { 2566 2567 KASSERT(KNLIST_EMPTY(knl), 2568 ("destroying knlist %p with knotes on it", knl)); 2569 } 2570 2571 void 2572 knlist_detach(struct knlist *knl) 2573 { 2574 2575 KNL_ASSERT_LOCKED(knl); 2576 knl->kl_autodestroy = 1; 2577 if (knlist_empty(knl)) { 2578 knlist_destroy(knl); 2579 free(knl, M_KQUEUE); 2580 } 2581 } 2582 2583 /* 2584 * Even if we are locked, we may need to drop the lock to allow any influx 2585 * knotes time to "settle". 2586 */ 2587 void 2588 knlist_cleardel(struct knlist *knl, struct thread *td, int islocked, int killkn) 2589 { 2590 struct knote *kn, *kn2; 2591 struct kqueue *kq; 2592 2593 KASSERT(!knl->kl_autodestroy, ("cleardel for autodestroy %p", knl)); 2594 if (islocked) 2595 KNL_ASSERT_LOCKED(knl); 2596 else { 2597 KNL_ASSERT_UNLOCKED(knl); 2598 again: /* need to reacquire lock since we have dropped it */ 2599 knl->kl_lock(knl->kl_lockarg); 2600 } 2601 2602 SLIST_FOREACH_SAFE(kn, &knl->kl_list, kn_selnext, kn2) { 2603 kq = kn->kn_kq; 2604 KQ_LOCK(kq); 2605 if (kn_in_flux(kn)) { 2606 KQ_UNLOCK(kq); 2607 continue; 2608 } 2609 knlist_remove_kq(knl, kn, 1, 1); 2610 if (killkn) { 2611 kn_enter_flux(kn); 2612 KQ_UNLOCK(kq); 2613 knote_drop_detached(kn, td); 2614 } else { 2615 /* Make sure cleared knotes disappear soon */ 2616 kn->kn_flags |= EV_EOF | EV_ONESHOT; 2617 KQ_UNLOCK(kq); 2618 } 2619 kq = NULL; 2620 } 2621 2622 if (!SLIST_EMPTY(&knl->kl_list)) { 2623 /* there are still in flux knotes remaining */ 2624 kn = SLIST_FIRST(&knl->kl_list); 2625 kq = kn->kn_kq; 2626 KQ_LOCK(kq); 2627 KASSERT(kn_in_flux(kn), ("knote removed w/o list lock")); 2628 knl->kl_unlock(knl->kl_lockarg); 2629 kq->kq_state |= KQ_FLUXWAIT; 2630 msleep(kq, &kq->kq_lock, PSOCK | PDROP, "kqkclr", 0); 2631 kq = NULL; 2632 goto again; 2633 } 2634 2635 if (islocked) 2636 KNL_ASSERT_LOCKED(knl); 2637 else { 2638 knl->kl_unlock(knl->kl_lockarg); 2639 KNL_ASSERT_UNLOCKED(knl); 2640 } 2641 } 2642 2643 /* 2644 * Remove all knotes referencing a specified fd must be called with FILEDESC 2645 * lock. This prevents a race where a new fd comes along and occupies the 2646 * entry and we attach a knote to the fd. 2647 */ 2648 void 2649 knote_fdclose(struct thread *td, int fd) 2650 { 2651 struct filedesc *fdp = td->td_proc->p_fd; 2652 struct kqueue *kq; 2653 struct knote *kn; 2654 int influx; 2655 2656 FILEDESC_XLOCK_ASSERT(fdp); 2657 2658 /* 2659 * We shouldn't have to worry about new kevents appearing on fd 2660 * since filedesc is locked. 2661 */ 2662 TAILQ_FOREACH(kq, &fdp->fd_kqlist, kq_list) { 2663 KQ_LOCK(kq); 2664 2665 again: 2666 influx = 0; 2667 while (kq->kq_knlistsize > fd && 2668 (kn = SLIST_FIRST(&kq->kq_knlist[fd])) != NULL) { 2669 if (kn_in_flux(kn)) { 2670 /* someone else might be waiting on our knote */ 2671 if (influx) 2672 wakeup(kq); 2673 kq->kq_state |= KQ_FLUXWAIT; 2674 msleep(kq, &kq->kq_lock, PSOCK, "kqflxwt", 0); 2675 goto again; 2676 } 2677 kn_enter_flux(kn); 2678 KQ_UNLOCK(kq); 2679 influx = 1; 2680 knote_drop(kn, td); 2681 KQ_LOCK(kq); 2682 } 2683 KQ_UNLOCK_FLUX(kq); 2684 } 2685 } 2686 2687 static int 2688 knote_attach(struct knote *kn, struct kqueue *kq) 2689 { 2690 struct klist *list; 2691 2692 KASSERT(kn_in_flux(kn), ("knote %p not marked influx", kn)); 2693 KQ_OWNED(kq); 2694 2695 if ((kq->kq_state & KQ_CLOSING) != 0) 2696 return (EBADF); 2697 if (kn->kn_fop->f_isfd) { 2698 if (kn->kn_id >= kq->kq_knlistsize) 2699 return (ENOMEM); 2700 list = &kq->kq_knlist[kn->kn_id]; 2701 } else { 2702 if (kq->kq_knhash == NULL) 2703 return (ENOMEM); 2704 list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)]; 2705 } 2706 SLIST_INSERT_HEAD(list, kn, kn_link); 2707 return (0); 2708 } 2709 2710 static void 2711 knote_drop(struct knote *kn, struct thread *td) 2712 { 2713 2714 if ((kn->kn_status & KN_DETACHED) == 0) 2715 kn->kn_fop->f_detach(kn); 2716 knote_drop_detached(kn, td); 2717 } 2718 2719 static void 2720 knote_drop_detached(struct knote *kn, struct thread *td) 2721 { 2722 struct kqueue *kq; 2723 struct klist *list; 2724 2725 kq = kn->kn_kq; 2726 2727 KASSERT((kn->kn_status & KN_DETACHED) != 0, 2728 ("knote %p still attached", kn)); 2729 KQ_NOTOWNED(kq); 2730 2731 KQ_LOCK(kq); 2732 KASSERT(kn->kn_influx == 1, 2733 ("knote_drop called on %p with influx %d", kn, kn->kn_influx)); 2734 2735 if (kn->kn_fop->f_isfd) 2736 list = &kq->kq_knlist[kn->kn_id]; 2737 else 2738 list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)]; 2739 2740 if (!SLIST_EMPTY(list)) 2741 SLIST_REMOVE(list, kn, knote, kn_link); 2742 if (kn->kn_status & KN_QUEUED) 2743 knote_dequeue(kn); 2744 KQ_UNLOCK_FLUX(kq); 2745 2746 if (kn->kn_fop->f_isfd) { 2747 fdrop(kn->kn_fp, td); 2748 kn->kn_fp = NULL; 2749 } 2750 kqueue_fo_release(kn->kn_kevent.filter); 2751 kn->kn_fop = NULL; 2752 knote_free(kn); 2753 } 2754 2755 static void 2756 knote_enqueue(struct knote *kn) 2757 { 2758 struct kqueue *kq = kn->kn_kq; 2759 2760 KQ_OWNED(kn->kn_kq); 2761 KASSERT((kn->kn_status & KN_QUEUED) == 0, ("knote already queued")); 2762 2763 TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe); 2764 kn->kn_status |= KN_QUEUED; 2765 kq->kq_count++; 2766 kqueue_wakeup(kq); 2767 } 2768 2769 static void 2770 knote_dequeue(struct knote *kn) 2771 { 2772 struct kqueue *kq = kn->kn_kq; 2773 2774 KQ_OWNED(kn->kn_kq); 2775 KASSERT(kn->kn_status & KN_QUEUED, ("knote not queued")); 2776 2777 TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe); 2778 kn->kn_status &= ~KN_QUEUED; 2779 kq->kq_count--; 2780 } 2781 2782 static void 2783 knote_init(void) 2784 { 2785 2786 knote_zone = uma_zcreate("KNOTE", sizeof(struct knote), NULL, NULL, 2787 NULL, NULL, UMA_ALIGN_PTR, 0); 2788 ast_register(TDA_KQUEUE, ASTR_ASTF_REQUIRED, 0, ast_kqueue); 2789 } 2790 SYSINIT(knote, SI_SUB_PSEUDO, SI_ORDER_ANY, knote_init, NULL); 2791 2792 static struct knote * 2793 knote_alloc(int mflag) 2794 { 2795 2796 return (uma_zalloc(knote_zone, mflag | M_ZERO)); 2797 } 2798 2799 static void 2800 knote_free(struct knote *kn) 2801 { 2802 2803 uma_zfree(knote_zone, kn); 2804 } 2805 2806 /* 2807 * Register the kev w/ the kq specified by fd. 2808 */ 2809 int 2810 kqfd_register(int fd, struct kevent *kev, struct thread *td, int mflag) 2811 { 2812 struct kqueue *kq; 2813 struct file *fp; 2814 cap_rights_t rights; 2815 int error; 2816 2817 error = fget(td, fd, cap_rights_init_one(&rights, CAP_KQUEUE_CHANGE), 2818 &fp); 2819 if (error != 0) 2820 return (error); 2821 if ((error = kqueue_acquire(fp, &kq)) != 0) 2822 goto noacquire; 2823 2824 error = kqueue_register(kq, kev, td, mflag); 2825 kqueue_release(kq, 0); 2826 2827 noacquire: 2828 fdrop(fp, td); 2829 return (error); 2830 } 2831