1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 1999,2000,2001 Jonathan Lemon <jlemon@FreeBSD.org> 5 * Copyright 2004 John-Mark Gurney <jmg@FreeBSD.org> 6 * Copyright (c) 2009 Apple, Inc. 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 */ 30 31 #include "opt_ktrace.h" 32 #include "opt_kqueue.h" 33 34 #ifdef COMPAT_FREEBSD11 35 #define _WANT_FREEBSD11_KEVENT 36 #endif 37 38 #include <sys/param.h> 39 #include <sys/systm.h> 40 #include <sys/capsicum.h> 41 #include <sys/kernel.h> 42 #include <sys/limits.h> 43 #include <sys/lock.h> 44 #include <sys/mutex.h> 45 #include <sys/proc.h> 46 #include <sys/malloc.h> 47 #include <sys/unistd.h> 48 #include <sys/file.h> 49 #include <sys/filedesc.h> 50 #include <sys/filio.h> 51 #include <sys/fcntl.h> 52 #include <sys/jail.h> 53 #include <sys/jaildesc.h> 54 #include <sys/kthread.h> 55 #include <sys/selinfo.h> 56 #include <sys/queue.h> 57 #include <sys/event.h> 58 #include <sys/eventvar.h> 59 #include <sys/poll.h> 60 #include <sys/protosw.h> 61 #include <sys/resourcevar.h> 62 #include <sys/sbuf.h> 63 #include <sys/sigio.h> 64 #include <sys/signalvar.h> 65 #include <sys/socket.h> 66 #include <sys/socketvar.h> 67 #include <sys/stat.h> 68 #include <sys/sysctl.h> 69 #include <sys/sysent.h> 70 #include <sys/sysproto.h> 71 #include <sys/syscallsubr.h> 72 #include <sys/taskqueue.h> 73 #include <sys/uio.h> 74 #include <sys/user.h> 75 #ifdef KTRACE 76 #include <sys/ktrace.h> 77 #endif 78 #include <machine/atomic.h> 79 #ifdef COMPAT_FREEBSD32 80 #include <compat/freebsd32/freebsd32.h> 81 #include <compat/freebsd32/freebsd32_util.h> 82 #endif 83 84 #include <vm/uma.h> 85 86 static MALLOC_DEFINE(M_KQUEUE, "kqueue", "memory for kqueue system"); 87 88 /* 89 * This lock is used if multiple kq locks are required. This possibly 90 * should be made into a per proc lock. 91 */ 92 static struct mtx kq_global; 93 MTX_SYSINIT(kq_global, &kq_global, "kqueue order", MTX_DEF); 94 #define KQ_GLOBAL_LOCK(lck, haslck) do { \ 95 if (!haslck) \ 96 mtx_lock(lck); \ 97 haslck = 1; \ 98 } while (0) 99 #define KQ_GLOBAL_UNLOCK(lck, haslck) do { \ 100 if (haslck) \ 101 mtx_unlock(lck); \ 102 haslck = 0; \ 103 } while (0) 104 105 TASKQUEUE_DEFINE_THREAD(kqueue_ctx); 106 107 static int kevent_copyout(void *arg, struct kevent *kevp, int count); 108 static int kevent_copyin(void *arg, struct kevent *kevp, int count); 109 static int kqueue_register(struct kqueue *kq, struct kevent *kev, 110 struct thread *td, int mflag); 111 static int kqueue_acquire(struct file *fp, struct kqueue **kqp); 112 static void kqueue_release(struct kqueue *kq, int locked); 113 static void kqueue_destroy(struct kqueue *kq); 114 static void kqueue_drain(struct kqueue *kq, struct thread *td); 115 static int kqueue_expand(struct kqueue *kq, const struct filterops *fops, 116 uintptr_t ident, int mflag); 117 static void kqueue_task(void *arg, int pending); 118 static int kqueue_scan(struct kqueue *kq, int maxevents, 119 struct kevent_copyops *k_ops, 120 const struct timespec *timeout, 121 struct kevent *keva, struct thread *td); 122 static void kqueue_wakeup(struct kqueue *kq); 123 static const struct filterops *kqueue_fo_find(int filt); 124 static void kqueue_fo_release(int filt); 125 struct g_kevent_args; 126 static int kern_kevent_generic(struct thread *td, 127 struct g_kevent_args *uap, 128 struct kevent_copyops *k_ops, const char *struct_name); 129 130 static fo_ioctl_t kqueue_ioctl; 131 static fo_poll_t kqueue_poll; 132 static fo_kqfilter_t kqueue_kqfilter; 133 static fo_stat_t kqueue_stat; 134 static fo_close_t kqueue_close; 135 static fo_fill_kinfo_t kqueue_fill_kinfo; 136 static fo_fork_t kqueue_fork; 137 138 static const struct fileops kqueueops = { 139 .fo_read = invfo_rdwr, 140 .fo_write = invfo_rdwr, 141 .fo_truncate = invfo_truncate, 142 .fo_ioctl = kqueue_ioctl, 143 .fo_poll = kqueue_poll, 144 .fo_kqfilter = kqueue_kqfilter, 145 .fo_stat = kqueue_stat, 146 .fo_close = kqueue_close, 147 .fo_chmod = invfo_chmod, 148 .fo_chown = invfo_chown, 149 .fo_sendfile = invfo_sendfile, 150 .fo_cmp = file_kcmp_generic, 151 .fo_fork = kqueue_fork, 152 .fo_fill_kinfo = kqueue_fill_kinfo, 153 .fo_flags = DFLAG_FORK, 154 }; 155 156 static int knote_attach(struct knote *kn, struct kqueue *kq); 157 static void knote_drop(struct knote *kn, struct thread *td); 158 static void knote_drop_detached(struct knote *kn, struct thread *td); 159 static void knote_enqueue(struct knote *kn); 160 static void knote_dequeue(struct knote *kn); 161 static void knote_init(void *); 162 static struct knote *knote_alloc(int mflag); 163 static void knote_free(struct knote *kn); 164 165 static void filt_kqdetach(struct knote *kn); 166 static int filt_kqueue(struct knote *kn, long hint); 167 static int filt_procattach(struct knote *kn); 168 static void filt_procdetach(struct knote *kn); 169 static int filt_proc(struct knote *kn, long hint); 170 static int filt_jailattach(struct knote *kn); 171 static void filt_jaildetach(struct knote *kn); 172 static int filt_jail(struct knote *kn, long hint); 173 static int filt_fileattach(struct knote *kn); 174 static void filt_timerexpire(void *knx); 175 static void filt_timerexpire_l(struct knote *kn, bool proc_locked); 176 static int filt_timerattach(struct knote *kn); 177 static void filt_timerdetach(struct knote *kn); 178 static void filt_timerstart(struct knote *kn, sbintime_t to); 179 static void filt_timertouch(struct knote *kn, struct kevent *kev, 180 u_long type); 181 static int filt_timercopy(struct knote *kn, struct proc *p1); 182 static int filt_timervalidate(struct knote *kn, sbintime_t *to); 183 static int filt_timer(struct knote *kn, long hint); 184 static int filt_userattach(struct knote *kn); 185 static void filt_userdetach(struct knote *kn); 186 static int filt_user(struct knote *kn, long hint); 187 static void filt_usertouch(struct knote *kn, struct kevent *kev, 188 u_long type); 189 190 static const struct filterops file_filtops = { 191 .f_isfd = 1, 192 .f_attach = filt_fileattach, 193 .f_copy = knote_triv_copy, 194 }; 195 static const struct filterops kqread_filtops = { 196 .f_isfd = 1, 197 .f_detach = filt_kqdetach, 198 .f_event = filt_kqueue, 199 .f_copy = knote_triv_copy, 200 }; 201 /* XXX - move to kern_proc.c? */ 202 static const struct filterops proc_filtops = { 203 .f_isfd = 0, 204 .f_attach = filt_procattach, 205 .f_detach = filt_procdetach, 206 .f_event = filt_proc, 207 .f_copy = knote_triv_copy, 208 }; 209 static const struct filterops jail_filtops = { 210 .f_isfd = 0, 211 .f_attach = filt_jailattach, 212 .f_detach = filt_jaildetach, 213 .f_event = filt_jail, 214 .f_copy = knote_triv_copy, 215 }; 216 static const struct filterops timer_filtops = { 217 .f_isfd = 0, 218 .f_attach = filt_timerattach, 219 .f_detach = filt_timerdetach, 220 .f_event = filt_timer, 221 .f_touch = filt_timertouch, 222 .f_copy = filt_timercopy, 223 }; 224 static const struct filterops user_filtops = { 225 .f_attach = filt_userattach, 226 .f_detach = filt_userdetach, 227 .f_event = filt_user, 228 .f_touch = filt_usertouch, 229 .f_copy = knote_triv_copy, 230 }; 231 232 static uma_zone_t knote_zone; 233 static unsigned int __exclusive_cache_line kq_ncallouts; 234 static unsigned int kq_calloutmax = 4 * 1024; 235 SYSCTL_UINT(_kern, OID_AUTO, kq_calloutmax, CTLFLAG_RW, 236 &kq_calloutmax, 0, "Maximum number of callouts allocated for kqueue"); 237 238 /* XXX - ensure not influx ? */ 239 #define KNOTE_ACTIVATE(kn, islock) do { \ 240 if ((islock)) \ 241 mtx_assert(&(kn)->kn_kq->kq_lock, MA_OWNED); \ 242 else \ 243 KQ_LOCK((kn)->kn_kq); \ 244 (kn)->kn_status |= KN_ACTIVE; \ 245 if (((kn)->kn_status & (KN_QUEUED | KN_DISABLED)) == 0) \ 246 knote_enqueue((kn)); \ 247 if (!(islock)) \ 248 KQ_UNLOCK((kn)->kn_kq); \ 249 } while (0) 250 #define KQ_LOCK(kq) do { \ 251 mtx_lock(&(kq)->kq_lock); \ 252 } while (0) 253 #define KQ_FLUX_WAKEUP(kq) do { \ 254 if (((kq)->kq_state & KQ_FLUXWAIT) == KQ_FLUXWAIT) { \ 255 (kq)->kq_state &= ~KQ_FLUXWAIT; \ 256 wakeup((kq)); \ 257 } \ 258 } while (0) 259 #define KQ_UNLOCK_FLUX(kq) do { \ 260 KQ_FLUX_WAKEUP(kq); \ 261 mtx_unlock(&(kq)->kq_lock); \ 262 } while (0) 263 #define KQ_UNLOCK(kq) do { \ 264 mtx_unlock(&(kq)->kq_lock); \ 265 } while (0) 266 #define KQ_OWNED(kq) do { \ 267 mtx_assert(&(kq)->kq_lock, MA_OWNED); \ 268 } while (0) 269 #define KQ_NOTOWNED(kq) do { \ 270 mtx_assert(&(kq)->kq_lock, MA_NOTOWNED); \ 271 } while (0) 272 273 static struct knlist * 274 kn_list_lock(struct knote *kn) 275 { 276 struct knlist *knl; 277 278 knl = kn->kn_knlist; 279 if (knl != NULL) 280 knl->kl_lock(knl->kl_lockarg); 281 return (knl); 282 } 283 284 static void 285 kn_list_unlock(struct knlist *knl) 286 { 287 bool do_free; 288 289 if (knl == NULL) 290 return; 291 do_free = knl->kl_autodestroy && knlist_empty(knl); 292 knl->kl_unlock(knl->kl_lockarg); 293 if (do_free) { 294 knlist_destroy(knl); 295 free(knl, M_KQUEUE); 296 } 297 } 298 299 static bool 300 kn_in_flux(struct knote *kn) 301 { 302 303 return (kn->kn_influx > 0); 304 } 305 306 static void 307 kn_enter_flux(struct knote *kn) 308 { 309 310 KQ_OWNED(kn->kn_kq); 311 MPASS(kn->kn_influx < INT_MAX); 312 kn->kn_influx++; 313 } 314 315 static bool 316 kn_leave_flux(struct knote *kn) 317 { 318 319 KQ_OWNED(kn->kn_kq); 320 MPASS(kn->kn_influx > 0); 321 kn->kn_influx--; 322 return (kn->kn_influx == 0); 323 } 324 325 #define KNL_ASSERT_LOCK(knl, islocked) do { \ 326 if (islocked) \ 327 KNL_ASSERT_LOCKED(knl); \ 328 else \ 329 KNL_ASSERT_UNLOCKED(knl); \ 330 } while (0) 331 #ifdef INVARIANTS 332 #define KNL_ASSERT_LOCKED(knl) do { \ 333 knl->kl_assert_lock((knl)->kl_lockarg, LA_LOCKED); \ 334 } while (0) 335 #define KNL_ASSERT_UNLOCKED(knl) do { \ 336 knl->kl_assert_lock((knl)->kl_lockarg, LA_UNLOCKED); \ 337 } while (0) 338 #else /* !INVARIANTS */ 339 #define KNL_ASSERT_LOCKED(knl) do {} while (0) 340 #define KNL_ASSERT_UNLOCKED(knl) do {} while (0) 341 #endif /* INVARIANTS */ 342 343 #ifndef KN_HASHSIZE 344 #define KN_HASHSIZE 64 /* XXX should be tunable */ 345 #endif 346 347 #define KN_HASH(val, mask) (((val) ^ (val >> 8)) & (mask)) 348 349 static int 350 filt_nullattach(struct knote *kn) 351 { 352 353 return (ENXIO); 354 }; 355 356 static const struct filterops null_filtops = { 357 .f_isfd = 0, 358 .f_attach = filt_nullattach, 359 .f_copy = knote_triv_copy, 360 }; 361 362 /* XXX - make SYSINIT to add these, and move into respective modules. */ 363 extern const struct filterops sig_filtops; 364 extern const struct filterops fs_filtops; 365 366 /* 367 * Table for all system-defined filters. 368 */ 369 static struct mtx filterops_lock; 370 MTX_SYSINIT(kqueue_filterops, &filterops_lock, "protect sysfilt_ops", MTX_DEF); 371 static struct { 372 const struct filterops *for_fop; 373 int for_nolock; 374 int for_refcnt; 375 } sysfilt_ops[EVFILT_SYSCOUNT] = { 376 [~EVFILT_READ] = { &file_filtops, 1 }, 377 [~EVFILT_WRITE] = { &file_filtops, 1 }, 378 [~EVFILT_AIO] = { &null_filtops }, 379 [~EVFILT_VNODE] = { &file_filtops, 1 }, 380 [~EVFILT_PROC] = { &proc_filtops, 1 }, 381 [~EVFILT_SIGNAL] = { &sig_filtops, 1 }, 382 [~EVFILT_TIMER] = { &timer_filtops, 1 }, 383 [~EVFILT_PROCDESC] = { &file_filtops, 1 }, 384 [~EVFILT_FS] = { &fs_filtops, 1 }, 385 [~EVFILT_LIO] = { &null_filtops }, 386 [~EVFILT_USER] = { &user_filtops, 1 }, 387 [~EVFILT_SENDFILE] = { &null_filtops }, 388 [~EVFILT_EMPTY] = { &file_filtops, 1 }, 389 [~EVFILT_JAIL] = { &jail_filtops, 1 }, 390 [~EVFILT_JAILDESC] = { &file_filtops, 1 }, 391 }; 392 393 /* 394 * Simple redirection for all cdevsw style objects to call their fo_kqfilter 395 * method. 396 */ 397 static int 398 filt_fileattach(struct knote *kn) 399 { 400 401 return (fo_kqfilter(kn->kn_fp, kn)); 402 } 403 404 /*ARGSUSED*/ 405 static int 406 kqueue_kqfilter(struct file *fp, struct knote *kn) 407 { 408 struct kqueue *kq = kn->kn_fp->f_data; 409 410 if (kn->kn_filter != EVFILT_READ) 411 return (EINVAL); 412 413 kn->kn_status |= KN_KQUEUE; 414 kn->kn_fop = &kqread_filtops; 415 knlist_add(&kq->kq_sel.si_note, kn, 0); 416 417 return (0); 418 } 419 420 static void 421 filt_kqdetach(struct knote *kn) 422 { 423 struct kqueue *kq = kn->kn_fp->f_data; 424 425 knlist_remove(&kq->kq_sel.si_note, kn, 0); 426 } 427 428 /*ARGSUSED*/ 429 static int 430 filt_kqueue(struct knote *kn, long hint) 431 { 432 struct kqueue *kq = kn->kn_fp->f_data; 433 434 kn->kn_data = kq->kq_count; 435 return (kn->kn_data > 0); 436 } 437 438 /* XXX - move to kern_proc.c? */ 439 static int 440 filt_procattach(struct knote *kn) 441 { 442 struct proc *p; 443 int error; 444 bool exiting, immediate; 445 446 exiting = immediate = false; 447 if (kn->kn_sfflags & NOTE_EXIT) 448 p = pfind_any(kn->kn_id); 449 else 450 p = pfind(kn->kn_id); 451 if (p == NULL) 452 return (ESRCH); 453 if (p->p_flag & P_WEXIT) 454 exiting = true; 455 456 if ((error = p_cansee(curthread, p))) { 457 PROC_UNLOCK(p); 458 return (error); 459 } 460 461 kn->kn_ptr.p_proc = p; 462 kn->kn_flags |= EV_CLEAR; /* automatically set */ 463 464 /* 465 * Internal flag indicating registration done by kernel for the 466 * purposes of getting a NOTE_CHILD notification. 467 */ 468 if (kn->kn_flags & EV_FLAG2) { 469 kn->kn_flags &= ~EV_FLAG2; 470 kn->kn_data = kn->kn_sdata; /* ppid */ 471 kn->kn_fflags = NOTE_CHILD; 472 kn->kn_sfflags &= ~(NOTE_EXIT | NOTE_EXEC | NOTE_FORK); 473 immediate = true; /* Force immediate activation of child note. */ 474 } 475 /* 476 * Internal flag indicating registration done by kernel (for other than 477 * NOTE_CHILD). 478 */ 479 if (kn->kn_flags & EV_FLAG1) { 480 kn->kn_flags &= ~EV_FLAG1; 481 } 482 483 knlist_add(p->p_klist, kn, 1); 484 485 /* 486 * Immediately activate any child notes or, in the case of a zombie 487 * target process, exit notes. The latter is necessary to handle the 488 * case where the target process, e.g. a child, dies before the kevent 489 * is registered. 490 */ 491 if (immediate || (exiting && filt_proc(kn, NOTE_EXIT))) 492 KNOTE_ACTIVATE(kn, 0); 493 494 PROC_UNLOCK(p); 495 496 return (0); 497 } 498 499 /* 500 * The knote may be attached to a different process, which may exit, 501 * leaving nothing for the knote to be attached to. So when the process 502 * exits, the knote is marked as DETACHED and also flagged as ONESHOT so 503 * it will be deleted when read out. However, as part of the knote deletion, 504 * this routine is called, so a check is needed to avoid actually performing 505 * a detach, because the original process does not exist any more. 506 */ 507 /* XXX - move to kern_proc.c? */ 508 static void 509 filt_procdetach(struct knote *kn) 510 { 511 512 knlist_remove(kn->kn_knlist, kn, 0); 513 kn->kn_ptr.p_proc = NULL; 514 } 515 516 /* XXX - move to kern_proc.c? */ 517 static int 518 filt_proc(struct knote *kn, long hint) 519 { 520 struct proc *p; 521 u_int event; 522 523 p = kn->kn_ptr.p_proc; 524 if (p == NULL) /* already activated, from attach filter */ 525 return (0); 526 527 /* Mask off extra data. */ 528 event = (u_int)hint & NOTE_PCTRLMASK; 529 530 /* If the user is interested in this event, record it. */ 531 if (kn->kn_sfflags & event) 532 kn->kn_fflags |= event; 533 534 /* Process is gone, so flag the event as finished. */ 535 if (event == NOTE_EXIT) { 536 kn->kn_flags |= EV_EOF | EV_ONESHOT; 537 kn->kn_ptr.p_proc = NULL; 538 if (kn->kn_fflags & NOTE_EXIT) 539 kn->kn_data = KW_EXITCODE(p->p_xexit, p->p_xsig); 540 if (kn->kn_fflags == 0) 541 kn->kn_flags |= EV_DROP; 542 return (1); 543 } 544 545 return (kn->kn_fflags != 0); 546 } 547 548 /* 549 * Called when the process forked. It mostly does the same as the 550 * knote(), activating all knotes registered to be activated when the 551 * process forked. Additionally, for each knote attached to the 552 * parent, check whether user wants to track the new process. If so 553 * attach a new knote to it, and immediately report an event with the 554 * child's pid. 555 */ 556 void 557 knote_fork(struct knlist *list, int pid) 558 { 559 struct kqueue *kq; 560 struct knote *kn; 561 struct kevent kev; 562 int error; 563 564 MPASS(list != NULL); 565 KNL_ASSERT_LOCKED(list); 566 if (SLIST_EMPTY(&list->kl_list)) 567 return; 568 569 memset(&kev, 0, sizeof(kev)); 570 SLIST_FOREACH(kn, &list->kl_list, kn_selnext) { 571 kq = kn->kn_kq; 572 KQ_LOCK(kq); 573 if (kn_in_flux(kn) && (kn->kn_status & KN_SCAN) == 0) { 574 KQ_UNLOCK(kq); 575 continue; 576 } 577 578 /* 579 * The same as knote(), activate the event. 580 */ 581 if ((kn->kn_sfflags & NOTE_TRACK) == 0) { 582 if (kn->kn_fop->f_event(kn, NOTE_FORK)) 583 KNOTE_ACTIVATE(kn, 1); 584 KQ_UNLOCK(kq); 585 continue; 586 } 587 588 /* 589 * The NOTE_TRACK case. In addition to the activation 590 * of the event, we need to register new events to 591 * track the child. Drop the locks in preparation for 592 * the call to kqueue_register(). 593 */ 594 kn_enter_flux(kn); 595 KQ_UNLOCK(kq); 596 list->kl_unlock(list->kl_lockarg); 597 598 /* 599 * Activate existing knote and register tracking knotes with 600 * new process. 601 * 602 * First register a knote to get just the child notice. This 603 * must be a separate note from a potential NOTE_EXIT 604 * notification since both NOTE_CHILD and NOTE_EXIT are defined 605 * to use the data field (in conflicting ways). 606 */ 607 kev.ident = pid; 608 kev.filter = kn->kn_filter; 609 kev.flags = kn->kn_flags | EV_ADD | EV_ENABLE | EV_ONESHOT | 610 EV_FLAG2; 611 kev.fflags = kn->kn_sfflags; 612 kev.data = kn->kn_id; /* parent */ 613 kev.udata = kn->kn_kevent.udata;/* preserve udata */ 614 error = kqueue_register(kq, &kev, NULL, M_NOWAIT); 615 if (error) 616 kn->kn_fflags |= NOTE_TRACKERR; 617 618 /* 619 * Then register another knote to track other potential events 620 * from the new process. 621 */ 622 kev.ident = pid; 623 kev.filter = kn->kn_filter; 624 kev.flags = kn->kn_flags | EV_ADD | EV_ENABLE | EV_FLAG1; 625 kev.fflags = kn->kn_sfflags; 626 kev.data = kn->kn_id; /* parent */ 627 kev.udata = kn->kn_kevent.udata;/* preserve udata */ 628 error = kqueue_register(kq, &kev, NULL, M_NOWAIT); 629 630 /* 631 * Serialize updates to the kn_kevent fields with threads 632 * scanning the queue. 633 */ 634 list->kl_lock(list->kl_lockarg); 635 if (error) 636 kn->kn_fflags |= NOTE_TRACKERR; 637 if (kn->kn_fop->f_event(kn, NOTE_FORK)) { 638 KQ_LOCK(kq); 639 KNOTE_ACTIVATE(kn, 1); 640 } else { 641 KQ_LOCK(kq); 642 } 643 kn_leave_flux(kn); 644 KQ_UNLOCK_FLUX(kq); 645 } 646 } 647 648 int 649 filt_jailattach(struct knote *kn) 650 { 651 struct prison *pr; 652 653 if (kn->kn_id == 0) { 654 /* Let jid=0 watch the current prison (including prison0). */ 655 pr = curthread->td_ucred->cr_prison; 656 mtx_lock(&pr->pr_mtx); 657 } else { 658 sx_slock(&allprison_lock); 659 pr = prison_find_child(curthread->td_ucred->cr_prison, 660 kn->kn_id); 661 sx_sunlock(&allprison_lock); 662 if (pr == NULL) 663 return (ENOENT); 664 if (!prison_isalive(pr)) { 665 mtx_unlock(&pr->pr_mtx); 666 return (ENOENT); 667 } 668 } 669 kn->kn_ptr.p_prison = pr; 670 kn->kn_flags |= EV_CLEAR; 671 knlist_add(pr->pr_klist, kn, 1); 672 mtx_unlock(&pr->pr_mtx); 673 return (0); 674 } 675 676 void 677 filt_jaildetach(struct knote *kn) 678 { 679 if (kn->kn_ptr.p_prison != NULL) { 680 knlist_remove(kn->kn_knlist, kn, 0); 681 kn->kn_ptr.p_prison = NULL; 682 } else 683 kn->kn_status |= KN_DETACHED; 684 } 685 686 int 687 filt_jail(struct knote *kn, long hint) 688 { 689 struct prison *pr; 690 u_int event; 691 692 pr = kn->kn_ptr.p_prison; 693 if (pr == NULL) /* already activated, from attach filter */ 694 return (0); 695 696 /* 697 * Mask off extra data. In the NOTE_JAIL_CHILD case, that's 698 * everything except the NOTE_JAIL_CHILD bit itself, since a 699 * JID is any positive integer. 700 */ 701 event = ((u_int)hint & NOTE_JAIL_CHILD) ? NOTE_JAIL_CHILD : 702 (u_int)hint & NOTE_JAIL_CTRLMASK; 703 704 /* If the user is interested in this event, record it. */ 705 if (kn->kn_sfflags & event) { 706 kn->kn_fflags |= event; 707 /* Report the created jail id or attached process id. */ 708 if (event == NOTE_JAIL_CHILD || event == NOTE_JAIL_ATTACH) { 709 if (kn->kn_data != 0) 710 kn->kn_fflags |= NOTE_JAIL_MULTI; 711 kn->kn_data = (kn->kn_fflags & NOTE_JAIL_MULTI) ? 0U : 712 (u_int)hint & ~event; 713 } 714 } 715 716 /* Prison is gone, so flag the event as finished. */ 717 if (event == NOTE_JAIL_REMOVE) { 718 kn->kn_flags |= EV_EOF | EV_ONESHOT; 719 kn->kn_ptr.p_prison = NULL; 720 if (kn->kn_fflags == 0) 721 kn->kn_flags |= EV_DROP; 722 return (1); 723 } 724 725 return (kn->kn_fflags != 0); 726 } 727 728 /* 729 * XXX: EVFILT_TIMER should perhaps live in kern_time.c beside the 730 * interval timer support code. 731 */ 732 733 #define NOTE_TIMER_PRECMASK \ 734 (NOTE_SECONDS | NOTE_MSECONDS | NOTE_USECONDS | NOTE_NSECONDS) 735 736 static sbintime_t 737 timer2sbintime(int64_t data, unsigned int flags) 738 { 739 int64_t secs; 740 741 /* 742 * Macros for converting to the fractional second portion of an 743 * sbintime_t using 64bit multiplication to improve precision. 744 */ 745 #define NS_TO_SBT(ns) (((ns) * (((uint64_t)1 << 63) / 500000000)) >> 32) 746 #define US_TO_SBT(us) (((us) * (((uint64_t)1 << 63) / 500000)) >> 32) 747 #define MS_TO_SBT(ms) (((ms) * (((uint64_t)1 << 63) / 500)) >> 32) 748 switch (flags & NOTE_TIMER_PRECMASK) { 749 case NOTE_SECONDS: 750 #ifdef __LP64__ 751 if (data > (SBT_MAX / SBT_1S)) 752 return (SBT_MAX); 753 #endif 754 return ((sbintime_t)data << 32); 755 case NOTE_MSECONDS: /* FALLTHROUGH */ 756 case 0: 757 if (data >= 1000) { 758 secs = data / 1000; 759 #ifdef __LP64__ 760 if (secs > (SBT_MAX / SBT_1S)) 761 return (SBT_MAX); 762 #endif 763 return (secs << 32 | MS_TO_SBT(data % 1000)); 764 } 765 return (MS_TO_SBT(data)); 766 case NOTE_USECONDS: 767 if (data >= 1000000) { 768 secs = data / 1000000; 769 #ifdef __LP64__ 770 if (secs > (SBT_MAX / SBT_1S)) 771 return (SBT_MAX); 772 #endif 773 return (secs << 32 | US_TO_SBT(data % 1000000)); 774 } 775 return (US_TO_SBT(data)); 776 case NOTE_NSECONDS: 777 if (data >= 1000000000) { 778 secs = data / 1000000000; 779 #ifdef __LP64__ 780 if (secs > (SBT_MAX / SBT_1S)) 781 return (SBT_MAX); 782 #endif 783 return (secs << 32 | NS_TO_SBT(data % 1000000000)); 784 } 785 return (NS_TO_SBT(data)); 786 default: 787 break; 788 } 789 return (-1); 790 } 791 792 struct kq_timer_cb_data { 793 struct callout c; 794 struct proc *p; 795 struct knote *kn; 796 int cpuid; 797 int flags; 798 TAILQ_ENTRY(kq_timer_cb_data) link; 799 sbintime_t next; /* next timer event fires at */ 800 sbintime_t to; /* precalculated timer period, 0 for abs */ 801 }; 802 803 #define KQ_TIMER_CB_ENQUEUED 0x01 804 805 static void 806 kqtimer_sched_callout(struct kq_timer_cb_data *kc) 807 { 808 callout_reset_sbt_on(&kc->c, kc->next, 0, filt_timerexpire, kc->kn, 809 kc->cpuid, C_ABSOLUTE); 810 } 811 812 void 813 kqtimer_proc_continue(struct proc *p) 814 { 815 struct kq_timer_cb_data *kc, *kc1; 816 sbintime_t now; 817 818 PROC_LOCK_ASSERT(p, MA_OWNED); 819 820 now = sbinuptime(); 821 TAILQ_FOREACH_SAFE(kc, &p->p_kqtim_stop, link, kc1) { 822 TAILQ_REMOVE(&p->p_kqtim_stop, kc, link); 823 kc->flags &= ~KQ_TIMER_CB_ENQUEUED; 824 if (kc->next <= now) 825 filt_timerexpire_l(kc->kn, true); 826 else 827 kqtimer_sched_callout(kc); 828 } 829 } 830 831 static void 832 filt_timerexpire_l(struct knote *kn, bool proc_locked) 833 { 834 struct kq_timer_cb_data *kc; 835 struct proc *p; 836 uint64_t delta; 837 sbintime_t now; 838 839 kc = kn->kn_ptr.p_v; 840 841 if ((kn->kn_flags & EV_ONESHOT) != 0 || kc->to == 0) { 842 kn->kn_data++; 843 KNOTE_ACTIVATE(kn, 0); 844 return; 845 } 846 847 now = sbinuptime(); 848 if (now >= kc->next) { 849 delta = (now - kc->next) / kc->to; 850 if (delta == 0) 851 delta = 1; 852 kn->kn_data += delta; 853 kc->next += delta * kc->to; 854 if (now >= kc->next) /* overflow */ 855 kc->next = now + kc->to; 856 KNOTE_ACTIVATE(kn, 0); /* XXX - handle locking */ 857 } 858 859 /* 860 * Initial check for stopped kc->p is racy. It is fine to 861 * miss the set of the stop flags, at worst we would schedule 862 * one more callout. On the other hand, it is not fine to not 863 * schedule when we we missed clearing of the flags, we 864 * recheck them under the lock and observe consistent state. 865 */ 866 p = kc->p; 867 if (P_SHOULDSTOP(p) || P_KILLED(p)) { 868 if (!proc_locked) 869 PROC_LOCK(p); 870 if (P_SHOULDSTOP(p) || P_KILLED(p)) { 871 if ((kc->flags & KQ_TIMER_CB_ENQUEUED) == 0) { 872 /* 873 * Insert into head so that 874 * kqtimer_proc_continue() does not 875 * iterate into us again. 876 */ 877 kc->flags |= KQ_TIMER_CB_ENQUEUED; 878 TAILQ_INSERT_HEAD(&p->p_kqtim_stop, kc, link); 879 } 880 if (!proc_locked) 881 PROC_UNLOCK(p); 882 return; 883 } 884 if (!proc_locked) 885 PROC_UNLOCK(p); 886 } 887 kqtimer_sched_callout(kc); 888 } 889 890 static void 891 filt_timerexpire(void *knx) 892 { 893 filt_timerexpire_l(knx, false); 894 } 895 896 /* 897 * data contains amount of time to sleep 898 */ 899 static int 900 filt_timervalidate(struct knote *kn, sbintime_t *to) 901 { 902 struct bintime bt; 903 sbintime_t sbt; 904 905 if (kn->kn_sdata < 0) 906 return (EINVAL); 907 if (kn->kn_sdata == 0 && (kn->kn_flags & EV_ONESHOT) == 0) 908 kn->kn_sdata = 1; 909 /* 910 * The only fflags values supported are the timer unit 911 * (precision) and the absolute time indicator. 912 */ 913 if ((kn->kn_sfflags & ~(NOTE_TIMER_PRECMASK | NOTE_ABSTIME)) != 0) 914 return (EINVAL); 915 916 *to = timer2sbintime(kn->kn_sdata, kn->kn_sfflags); 917 if (*to < 0) 918 return (EINVAL); 919 if ((kn->kn_sfflags & NOTE_ABSTIME) != 0) { 920 getboottimebin(&bt); 921 sbt = bttosbt(bt); 922 *to = MAX(0, *to - sbt); 923 } 924 return (0); 925 } 926 927 static int 928 filt_timerattach(struct knote *kn) 929 { 930 struct kq_timer_cb_data *kc; 931 sbintime_t to; 932 int error; 933 934 to = -1; 935 error = filt_timervalidate(kn, &to); 936 if (error != 0) 937 return (error); 938 KASSERT(to > 0 || (kn->kn_flags & EV_ONESHOT) != 0 || 939 (kn->kn_sfflags & NOTE_ABSTIME) != 0, 940 ("%s: periodic timer has a calculated zero timeout", __func__)); 941 KASSERT(to >= 0, 942 ("%s: timer has a calculated negative timeout", __func__)); 943 944 if (atomic_fetchadd_int(&kq_ncallouts, 1) + 1 > kq_calloutmax) { 945 atomic_subtract_int(&kq_ncallouts, 1); 946 return (ENOMEM); 947 } 948 949 if ((kn->kn_sfflags & NOTE_ABSTIME) == 0) 950 kn->kn_flags |= EV_CLEAR; /* automatically set */ 951 kn->kn_status &= ~KN_DETACHED; /* knlist_add clears it */ 952 kn->kn_ptr.p_v = kc = malloc(sizeof(*kc), M_KQUEUE, M_WAITOK); 953 kc->kn = kn; 954 kc->p = curproc; 955 kc->cpuid = PCPU_GET(cpuid); 956 kc->flags = 0; 957 callout_init(&kc->c, 1); 958 filt_timerstart(kn, to); 959 960 return (0); 961 } 962 963 static int 964 filt_timercopy(struct knote *kn, struct proc *p) 965 { 966 struct kq_timer_cb_data *kc_src, *kc; 967 968 if (atomic_fetchadd_int(&kq_ncallouts, 1) + 1 > kq_calloutmax) { 969 atomic_subtract_int(&kq_ncallouts, 1); 970 return (ENOMEM); 971 } 972 973 kn->kn_status &= ~KN_DETACHED; 974 kc_src = kn->kn_ptr.p_v; 975 kn->kn_ptr.p_v = kc = malloc(sizeof(*kc), M_KQUEUE, M_WAITOK); 976 kc->kn = kn; 977 kc->p = p; 978 kc->flags = kc_src->flags & ~KQ_TIMER_CB_ENQUEUED; 979 kc->next = kc_src->next; 980 kc->to = kc_src->to; 981 kc->cpuid = PCPU_GET(cpuid); 982 callout_init(&kc->c, 1); 983 kqtimer_sched_callout(kc); 984 return (0); 985 } 986 987 static void 988 filt_timerstart(struct knote *kn, sbintime_t to) 989 { 990 struct kq_timer_cb_data *kc; 991 992 kc = kn->kn_ptr.p_v; 993 if ((kn->kn_sfflags & NOTE_ABSTIME) != 0) { 994 kc->next = to; 995 kc->to = 0; 996 } else { 997 kc->next = to + sbinuptime(); 998 kc->to = to; 999 } 1000 kqtimer_sched_callout(kc); 1001 } 1002 1003 static void 1004 filt_timerdetach(struct knote *kn) 1005 { 1006 struct kq_timer_cb_data *kc; 1007 unsigned int old __unused; 1008 bool pending; 1009 1010 kc = kn->kn_ptr.p_v; 1011 do { 1012 callout_drain(&kc->c); 1013 1014 /* 1015 * kqtimer_proc_continue() might have rescheduled this callout. 1016 * Double-check, using the process mutex as an interlock. 1017 */ 1018 PROC_LOCK(kc->p); 1019 if ((kc->flags & KQ_TIMER_CB_ENQUEUED) != 0) { 1020 kc->flags &= ~KQ_TIMER_CB_ENQUEUED; 1021 TAILQ_REMOVE(&kc->p->p_kqtim_stop, kc, link); 1022 } 1023 pending = callout_pending(&kc->c); 1024 PROC_UNLOCK(kc->p); 1025 } while (pending); 1026 free(kc, M_KQUEUE); 1027 old = atomic_fetchadd_int(&kq_ncallouts, -1); 1028 KASSERT(old > 0, ("Number of callouts cannot become negative")); 1029 kn->kn_status |= KN_DETACHED; /* knlist_remove sets it */ 1030 } 1031 1032 static void 1033 filt_timertouch(struct knote *kn, struct kevent *kev, u_long type) 1034 { 1035 struct kq_timer_cb_data *kc; 1036 struct kqueue *kq; 1037 sbintime_t to; 1038 int error; 1039 1040 switch (type) { 1041 case EVENT_REGISTER: 1042 /* Handle re-added timers that update data/fflags */ 1043 if (kev->flags & EV_ADD) { 1044 kc = kn->kn_ptr.p_v; 1045 1046 /* Drain any existing callout. */ 1047 callout_drain(&kc->c); 1048 1049 /* Throw away any existing undelivered record 1050 * of the timer expiration. This is done under 1051 * the presumption that if a process is 1052 * re-adding this timer with new parameters, 1053 * it is no longer interested in what may have 1054 * happened under the old parameters. If it is 1055 * interested, it can wait for the expiration, 1056 * delete the old timer definition, and then 1057 * add the new one. 1058 * 1059 * This has to be done while the kq is locked: 1060 * - if enqueued, dequeue 1061 * - make it no longer active 1062 * - clear the count of expiration events 1063 */ 1064 kq = kn->kn_kq; 1065 KQ_LOCK(kq); 1066 if (kn->kn_status & KN_QUEUED) 1067 knote_dequeue(kn); 1068 1069 kn->kn_status &= ~KN_ACTIVE; 1070 kn->kn_data = 0; 1071 KQ_UNLOCK(kq); 1072 1073 /* Reschedule timer based on new data/fflags */ 1074 kn->kn_sfflags = kev->fflags; 1075 kn->kn_sdata = kev->data; 1076 error = filt_timervalidate(kn, &to); 1077 if (error != 0) { 1078 kn->kn_flags |= EV_ERROR; 1079 kn->kn_data = error; 1080 } else 1081 filt_timerstart(kn, to); 1082 } 1083 break; 1084 1085 case EVENT_PROCESS: 1086 *kev = kn->kn_kevent; 1087 if (kn->kn_flags & EV_CLEAR) { 1088 kn->kn_data = 0; 1089 kn->kn_fflags = 0; 1090 } 1091 break; 1092 1093 default: 1094 panic("filt_timertouch() - invalid type (%ld)", type); 1095 break; 1096 } 1097 } 1098 1099 static int 1100 filt_timer(struct knote *kn, long hint) 1101 { 1102 1103 return (kn->kn_data != 0); 1104 } 1105 1106 static int 1107 filt_userattach(struct knote *kn) 1108 { 1109 1110 /* 1111 * EVFILT_USER knotes are not attached to anything in the kernel. 1112 */ 1113 kn->kn_hook = NULL; 1114 if (kn->kn_fflags & NOTE_TRIGGER) 1115 kn->kn_hookid = 1; 1116 else 1117 kn->kn_hookid = 0; 1118 return (0); 1119 } 1120 1121 static void 1122 filt_userdetach(__unused struct knote *kn) 1123 { 1124 1125 /* 1126 * EVFILT_USER knotes are not attached to anything in the kernel. 1127 */ 1128 } 1129 1130 static int 1131 filt_user(struct knote *kn, __unused long hint) 1132 { 1133 1134 return (kn->kn_hookid); 1135 } 1136 1137 static void 1138 filt_usertouch(struct knote *kn, struct kevent *kev, u_long type) 1139 { 1140 u_int ffctrl; 1141 1142 switch (type) { 1143 case EVENT_REGISTER: 1144 if (kev->fflags & NOTE_TRIGGER) 1145 kn->kn_hookid = 1; 1146 1147 ffctrl = kev->fflags & NOTE_FFCTRLMASK; 1148 kev->fflags &= NOTE_FFLAGSMASK; 1149 switch (ffctrl) { 1150 case NOTE_FFNOP: 1151 break; 1152 1153 case NOTE_FFAND: 1154 kn->kn_sfflags &= kev->fflags; 1155 break; 1156 1157 case NOTE_FFOR: 1158 kn->kn_sfflags |= kev->fflags; 1159 break; 1160 1161 case NOTE_FFCOPY: 1162 kn->kn_sfflags = kev->fflags; 1163 break; 1164 1165 default: 1166 /* XXX Return error? */ 1167 break; 1168 } 1169 kn->kn_sdata = kev->data; 1170 if (kev->flags & EV_CLEAR) { 1171 kn->kn_hookid = 0; 1172 kn->kn_data = 0; 1173 kn->kn_fflags = 0; 1174 } 1175 break; 1176 1177 case EVENT_PROCESS: 1178 *kev = kn->kn_kevent; 1179 kev->fflags = kn->kn_sfflags; 1180 kev->data = kn->kn_sdata; 1181 if (kn->kn_flags & EV_CLEAR) { 1182 kn->kn_hookid = 0; 1183 kn->kn_data = 0; 1184 kn->kn_fflags = 0; 1185 } 1186 break; 1187 1188 default: 1189 panic("filt_usertouch() - invalid type (%ld)", type); 1190 break; 1191 } 1192 } 1193 1194 int 1195 sys_kqueue(struct thread *td, struct kqueue_args *uap) 1196 { 1197 1198 return (kern_kqueue(td, 0, false, NULL)); 1199 } 1200 1201 int 1202 sys_kqueuex(struct thread *td, struct kqueuex_args *uap) 1203 { 1204 int flags; 1205 1206 if ((uap->flags & ~(KQUEUE_CLOEXEC | KQUEUE_CPONFORK)) != 0) 1207 return (EINVAL); 1208 flags = 0; 1209 if ((uap->flags & KQUEUE_CLOEXEC) != 0) 1210 flags |= O_CLOEXEC; 1211 return (kern_kqueue(td, flags, (uap->flags & KQUEUE_CPONFORK) != 0, 1212 NULL)); 1213 } 1214 1215 static void 1216 kqueue_init(struct kqueue *kq, bool cponfork) 1217 { 1218 1219 mtx_init(&kq->kq_lock, "kqueue", NULL, MTX_DEF | MTX_DUPOK); 1220 TAILQ_INIT(&kq->kq_head); 1221 knlist_init_mtx(&kq->kq_sel.si_note, &kq->kq_lock); 1222 TASK_INIT(&kq->kq_task, 0, kqueue_task, kq); 1223 if (cponfork) 1224 kq->kq_state |= KQ_CPONFORK; 1225 } 1226 1227 static int 1228 kern_kqueue_alloc(struct thread *td, struct filedesc *fdp, int *fdip, 1229 struct file **fpp, int flags, struct filecaps *fcaps, bool cponfork, 1230 struct kqueue **kqp) 1231 { 1232 struct ucred *cred; 1233 struct kqueue *kq; 1234 int error; 1235 1236 cred = td->td_ucred; 1237 if (!chgkqcnt(cred->cr_ruidinfo, 1, lim_cur(td, RLIMIT_KQUEUES))) 1238 return (ENOMEM); 1239 1240 error = fdip != NULL ? falloc_caps(td, fpp, fdip, flags, fcaps) : 1241 _falloc_noinstall(td, fpp, 1); 1242 if (error != 0) { 1243 chgkqcnt(cred->cr_ruidinfo, -1, 0); 1244 return (error); 1245 } 1246 1247 /* An extra reference on `fp' has been held for us by falloc(). */ 1248 kq = malloc(sizeof(*kq), M_KQUEUE, M_WAITOK | M_ZERO); 1249 kqueue_init(kq, cponfork); 1250 kq->kq_fdp = fdp; 1251 kq->kq_cred = crhold(cred); 1252 1253 if (fdip != NULL) 1254 FILEDESC_XLOCK(fdp); 1255 TAILQ_INSERT_HEAD(&fdp->fd_kqlist, kq, kq_list); 1256 if (fdip != NULL) 1257 FILEDESC_XUNLOCK(fdp); 1258 1259 finit(*fpp, FREAD | FWRITE, DTYPE_KQUEUE, kq, &kqueueops); 1260 *kqp = kq; 1261 return (0); 1262 } 1263 1264 int 1265 kern_kqueue(struct thread *td, int flags, bool cponfork, struct filecaps *fcaps) 1266 { 1267 struct kqueue *kq; 1268 struct file *fp; 1269 int fd, error; 1270 1271 error = kern_kqueue_alloc(td, td->td_proc->p_fd, &fd, &fp, flags, 1272 fcaps, cponfork, &kq); 1273 if (error != 0) 1274 return (error); 1275 1276 fdrop(fp, td); 1277 1278 td->td_retval[0] = fd; 1279 return (0); 1280 } 1281 1282 struct g_kevent_args { 1283 int fd; 1284 const void *changelist; 1285 int nchanges; 1286 void *eventlist; 1287 int nevents; 1288 const struct timespec *timeout; 1289 }; 1290 1291 int 1292 sys_kevent(struct thread *td, struct kevent_args *uap) 1293 { 1294 struct kevent_copyops k_ops = { 1295 .arg = uap, 1296 .k_copyout = kevent_copyout, 1297 .k_copyin = kevent_copyin, 1298 .kevent_size = sizeof(struct kevent), 1299 }; 1300 struct g_kevent_args gk_args = { 1301 .fd = uap->fd, 1302 .changelist = uap->changelist, 1303 .nchanges = uap->nchanges, 1304 .eventlist = uap->eventlist, 1305 .nevents = uap->nevents, 1306 .timeout = uap->timeout, 1307 }; 1308 1309 return (kern_kevent_generic(td, &gk_args, &k_ops, "kevent")); 1310 } 1311 1312 static int 1313 kern_kevent_generic(struct thread *td, struct g_kevent_args *uap, 1314 struct kevent_copyops *k_ops, const char *struct_name) 1315 { 1316 struct timespec ts, *tsp; 1317 #ifdef KTRACE 1318 struct kevent *eventlist = uap->eventlist; 1319 #endif 1320 int error; 1321 1322 if (uap->timeout != NULL) { 1323 error = copyin(uap->timeout, &ts, sizeof(ts)); 1324 if (error) 1325 return (error); 1326 tsp = &ts; 1327 } else 1328 tsp = NULL; 1329 1330 #ifdef KTRACE 1331 if (KTRPOINT(td, KTR_STRUCT_ARRAY)) 1332 ktrstructarray(struct_name, UIO_USERSPACE, uap->changelist, 1333 uap->nchanges, k_ops->kevent_size); 1334 #endif 1335 1336 error = kern_kevent(td, uap->fd, uap->nchanges, uap->nevents, 1337 k_ops, tsp); 1338 1339 #ifdef KTRACE 1340 if (error == 0 && KTRPOINT(td, KTR_STRUCT_ARRAY)) 1341 ktrstructarray(struct_name, UIO_USERSPACE, eventlist, 1342 td->td_retval[0], k_ops->kevent_size); 1343 #endif 1344 1345 return (error); 1346 } 1347 1348 /* 1349 * Copy 'count' items into the destination list pointed to by uap->eventlist. 1350 */ 1351 static int 1352 kevent_copyout(void *arg, struct kevent *kevp, int count) 1353 { 1354 struct kevent_args *uap; 1355 int error; 1356 1357 KASSERT(count <= KQ_NEVENTS, ("count (%d) > KQ_NEVENTS", count)); 1358 uap = (struct kevent_args *)arg; 1359 1360 error = copyout(kevp, uap->eventlist, count * sizeof *kevp); 1361 if (error == 0) 1362 uap->eventlist += count; 1363 return (error); 1364 } 1365 1366 /* 1367 * Copy 'count' items from the list pointed to by uap->changelist. 1368 */ 1369 static int 1370 kevent_copyin(void *arg, struct kevent *kevp, int count) 1371 { 1372 struct kevent_args *uap; 1373 int error; 1374 1375 KASSERT(count <= KQ_NEVENTS, ("count (%d) > KQ_NEVENTS", count)); 1376 uap = (struct kevent_args *)arg; 1377 1378 error = copyin(uap->changelist, kevp, count * sizeof *kevp); 1379 if (error == 0) 1380 uap->changelist += count; 1381 return (error); 1382 } 1383 1384 #ifdef COMPAT_FREEBSD11 1385 static int 1386 kevent11_copyout(void *arg, struct kevent *kevp, int count) 1387 { 1388 struct freebsd11_kevent_args *uap; 1389 struct freebsd11_kevent kev11; 1390 int error, i; 1391 1392 KASSERT(count <= KQ_NEVENTS, ("count (%d) > KQ_NEVENTS", count)); 1393 uap = (struct freebsd11_kevent_args *)arg; 1394 1395 for (i = 0; i < count; i++) { 1396 kev11.ident = kevp->ident; 1397 kev11.filter = kevp->filter; 1398 kev11.flags = kevp->flags; 1399 kev11.fflags = kevp->fflags; 1400 kev11.data = kevp->data; 1401 kev11.udata = kevp->udata; 1402 error = copyout(&kev11, uap->eventlist, sizeof(kev11)); 1403 if (error != 0) 1404 break; 1405 uap->eventlist++; 1406 kevp++; 1407 } 1408 return (error); 1409 } 1410 1411 /* 1412 * Copy 'count' items from the list pointed to by uap->changelist. 1413 */ 1414 static int 1415 kevent11_copyin(void *arg, struct kevent *kevp, int count) 1416 { 1417 struct freebsd11_kevent_args *uap; 1418 struct freebsd11_kevent kev11; 1419 int error, i; 1420 1421 KASSERT(count <= KQ_NEVENTS, ("count (%d) > KQ_NEVENTS", count)); 1422 uap = (struct freebsd11_kevent_args *)arg; 1423 1424 for (i = 0; i < count; i++) { 1425 error = copyin(uap->changelist, &kev11, sizeof(kev11)); 1426 if (error != 0) 1427 break; 1428 kevp->ident = kev11.ident; 1429 kevp->filter = kev11.filter; 1430 kevp->flags = kev11.flags; 1431 kevp->fflags = kev11.fflags; 1432 kevp->data = (uintptr_t)kev11.data; 1433 kevp->udata = kev11.udata; 1434 bzero(&kevp->ext, sizeof(kevp->ext)); 1435 uap->changelist++; 1436 kevp++; 1437 } 1438 return (error); 1439 } 1440 1441 int 1442 freebsd11_kevent(struct thread *td, struct freebsd11_kevent_args *uap) 1443 { 1444 struct kevent_copyops k_ops = { 1445 .arg = uap, 1446 .k_copyout = kevent11_copyout, 1447 .k_copyin = kevent11_copyin, 1448 .kevent_size = sizeof(struct freebsd11_kevent), 1449 }; 1450 struct g_kevent_args gk_args = { 1451 .fd = uap->fd, 1452 .changelist = uap->changelist, 1453 .nchanges = uap->nchanges, 1454 .eventlist = uap->eventlist, 1455 .nevents = uap->nevents, 1456 .timeout = uap->timeout, 1457 }; 1458 1459 return (kern_kevent_generic(td, &gk_args, &k_ops, "freebsd11_kevent")); 1460 } 1461 #endif 1462 1463 int 1464 kern_kevent(struct thread *td, int fd, int nchanges, int nevents, 1465 struct kevent_copyops *k_ops, const struct timespec *timeout) 1466 { 1467 cap_rights_t rights; 1468 struct file *fp; 1469 int error; 1470 1471 cap_rights_init_zero(&rights); 1472 if (nchanges > 0) 1473 cap_rights_set_one(&rights, CAP_KQUEUE_CHANGE); 1474 if (nevents > 0) 1475 cap_rights_set_one(&rights, CAP_KQUEUE_EVENT); 1476 error = fget(td, fd, &rights, &fp); 1477 if (error != 0) 1478 return (error); 1479 1480 error = kern_kevent_fp(td, fp, nchanges, nevents, k_ops, timeout); 1481 fdrop(fp, td); 1482 1483 return (error); 1484 } 1485 1486 static int 1487 kqueue_kevent(struct kqueue *kq, struct thread *td, int nchanges, int nevents, 1488 struct kevent_copyops *k_ops, const struct timespec *timeout) 1489 { 1490 struct kevent keva[KQ_NEVENTS]; 1491 struct kevent *kevp, *changes; 1492 int i, n, nerrors, error; 1493 1494 if (nchanges < 0) 1495 return (EINVAL); 1496 1497 nerrors = 0; 1498 while (nchanges > 0) { 1499 n = nchanges > KQ_NEVENTS ? KQ_NEVENTS : nchanges; 1500 error = k_ops->k_copyin(k_ops->arg, keva, n); 1501 if (error) 1502 return (error); 1503 changes = keva; 1504 for (i = 0; i < n; i++) { 1505 kevp = &changes[i]; 1506 if (!kevp->filter) 1507 continue; 1508 kevp->flags &= ~EV_SYSFLAGS; 1509 error = kqueue_register(kq, kevp, td, M_WAITOK); 1510 if (error || (kevp->flags & EV_RECEIPT)) { 1511 if (nevents == 0) 1512 return (error); 1513 kevp->flags = EV_ERROR; 1514 kevp->data = error; 1515 (void)k_ops->k_copyout(k_ops->arg, kevp, 1); 1516 nevents--; 1517 nerrors++; 1518 } 1519 } 1520 nchanges -= n; 1521 } 1522 if (nerrors) { 1523 td->td_retval[0] = nerrors; 1524 return (0); 1525 } 1526 1527 return (kqueue_scan(kq, nevents, k_ops, timeout, keva, td)); 1528 } 1529 1530 int 1531 kern_kevent_fp(struct thread *td, struct file *fp, int nchanges, int nevents, 1532 struct kevent_copyops *k_ops, const struct timespec *timeout) 1533 { 1534 struct kqueue *kq; 1535 int error; 1536 1537 error = kqueue_acquire(fp, &kq); 1538 if (error != 0) 1539 return (error); 1540 error = kqueue_kevent(kq, td, nchanges, nevents, k_ops, timeout); 1541 kqueue_release(kq, 0); 1542 return (error); 1543 } 1544 1545 /* 1546 * Performs a kevent() call on a temporarily created kqueue. This can be 1547 * used to perform one-shot polling, similar to poll() and select(). 1548 */ 1549 int 1550 kern_kevent_anonymous(struct thread *td, int nevents, 1551 struct kevent_copyops *k_ops) 1552 { 1553 struct kqueue kq = {}; 1554 int error; 1555 1556 kqueue_init(&kq, false); 1557 kq.kq_refcnt = 1; 1558 error = kqueue_kevent(&kq, td, nevents, nevents, k_ops, NULL); 1559 kqueue_drain(&kq, td); 1560 kqueue_destroy(&kq); 1561 return (error); 1562 } 1563 1564 int 1565 kqueue_add_filteropts(int filt, const struct filterops *filtops) 1566 { 1567 int error; 1568 1569 error = 0; 1570 if (filt > 0 || filt + EVFILT_SYSCOUNT < 0) { 1571 printf( 1572 "trying to add a filterop that is out of range: %d is beyond %d\n", 1573 ~filt, EVFILT_SYSCOUNT); 1574 return EINVAL; 1575 } 1576 mtx_lock(&filterops_lock); 1577 if (sysfilt_ops[~filt].for_fop != &null_filtops && 1578 sysfilt_ops[~filt].for_fop != NULL) 1579 error = EEXIST; 1580 else { 1581 sysfilt_ops[~filt].for_fop = filtops; 1582 sysfilt_ops[~filt].for_refcnt = 0; 1583 } 1584 mtx_unlock(&filterops_lock); 1585 1586 return (error); 1587 } 1588 1589 int 1590 kqueue_del_filteropts(int filt) 1591 { 1592 int error; 1593 1594 error = 0; 1595 if (filt > 0 || filt + EVFILT_SYSCOUNT < 0) 1596 return EINVAL; 1597 1598 mtx_lock(&filterops_lock); 1599 if (sysfilt_ops[~filt].for_fop == &null_filtops || 1600 sysfilt_ops[~filt].for_fop == NULL) 1601 error = EINVAL; 1602 else if (sysfilt_ops[~filt].for_refcnt != 0) 1603 error = EBUSY; 1604 else { 1605 sysfilt_ops[~filt].for_fop = &null_filtops; 1606 sysfilt_ops[~filt].for_refcnt = 0; 1607 } 1608 mtx_unlock(&filterops_lock); 1609 1610 return error; 1611 } 1612 1613 static const struct filterops * 1614 kqueue_fo_find(int filt) 1615 { 1616 1617 if (filt > 0 || filt + EVFILT_SYSCOUNT < 0) 1618 return NULL; 1619 1620 if (sysfilt_ops[~filt].for_nolock) 1621 return sysfilt_ops[~filt].for_fop; 1622 1623 mtx_lock(&filterops_lock); 1624 sysfilt_ops[~filt].for_refcnt++; 1625 if (sysfilt_ops[~filt].for_fop == NULL) 1626 sysfilt_ops[~filt].for_fop = &null_filtops; 1627 mtx_unlock(&filterops_lock); 1628 1629 return sysfilt_ops[~filt].for_fop; 1630 } 1631 1632 static void 1633 kqueue_fo_release(int filt) 1634 { 1635 1636 if (filt > 0 || filt + EVFILT_SYSCOUNT < 0) 1637 return; 1638 1639 if (sysfilt_ops[~filt].for_nolock) 1640 return; 1641 1642 mtx_lock(&filterops_lock); 1643 KASSERT(sysfilt_ops[~filt].for_refcnt > 0, 1644 ("filter object %d refcount not valid on release", filt)); 1645 sysfilt_ops[~filt].for_refcnt--; 1646 mtx_unlock(&filterops_lock); 1647 } 1648 1649 /* 1650 * A ref to kq (obtained via kqueue_acquire) must be held. 1651 */ 1652 static int 1653 kqueue_register(struct kqueue *kq, struct kevent *kev, struct thread *td, 1654 int mflag) 1655 { 1656 const struct filterops *fops; 1657 struct file *fp; 1658 struct knote *kn, *tkn; 1659 struct knlist *knl; 1660 int error, filt, event; 1661 int haskqglobal, filedesc_unlock; 1662 1663 if ((kev->flags & (EV_ENABLE | EV_DISABLE)) == (EV_ENABLE | EV_DISABLE)) 1664 return (EINVAL); 1665 1666 fp = NULL; 1667 kn = NULL; 1668 knl = NULL; 1669 error = 0; 1670 haskqglobal = 0; 1671 filedesc_unlock = 0; 1672 1673 filt = kev->filter; 1674 fops = kqueue_fo_find(filt); 1675 if (fops == NULL) 1676 return EINVAL; 1677 1678 if (kev->flags & EV_ADD) { 1679 /* Reject an invalid flag pair early */ 1680 if (kev->flags & EV_KEEPUDATA) { 1681 tkn = NULL; 1682 error = EINVAL; 1683 goto done; 1684 } 1685 1686 /* 1687 * Prevent waiting with locks. Non-sleepable 1688 * allocation failures are handled in the loop, only 1689 * if the spare knote appears to be actually required. 1690 */ 1691 tkn = knote_alloc(mflag); 1692 } else { 1693 tkn = NULL; 1694 } 1695 1696 findkn: 1697 if (fops->f_isfd) { 1698 KASSERT(td != NULL, ("td is NULL")); 1699 if (kev->ident > INT_MAX) 1700 error = EBADF; 1701 else 1702 error = fget(td, kev->ident, &cap_event_rights, &fp); 1703 if (error) 1704 goto done; 1705 1706 if ((kev->flags & EV_ADD) == EV_ADD && kqueue_expand(kq, fops, 1707 kev->ident, M_NOWAIT) != 0) { 1708 /* try again */ 1709 fdrop(fp, td); 1710 fp = NULL; 1711 error = kqueue_expand(kq, fops, kev->ident, mflag); 1712 if (error) 1713 goto done; 1714 goto findkn; 1715 } 1716 1717 if (fp->f_type == DTYPE_KQUEUE) { 1718 /* 1719 * If we add some intelligence about what we are doing, 1720 * we should be able to support events on ourselves. 1721 * We need to know when we are doing this to prevent 1722 * getting both the knlist lock and the kq lock since 1723 * they are the same thing. 1724 */ 1725 if (fp->f_data == kq) { 1726 error = EINVAL; 1727 goto done; 1728 } 1729 1730 /* 1731 * Pre-lock the filedesc before the global 1732 * lock mutex, see the comment in 1733 * kqueue_close(). 1734 */ 1735 FILEDESC_XLOCK(td->td_proc->p_fd); 1736 filedesc_unlock = 1; 1737 KQ_GLOBAL_LOCK(&kq_global, haskqglobal); 1738 } 1739 1740 KQ_LOCK(kq); 1741 if (kev->ident < kq->kq_knlistsize) { 1742 SLIST_FOREACH(kn, &kq->kq_knlist[kev->ident], kn_link) 1743 if (kev->filter == kn->kn_filter) 1744 break; 1745 } 1746 } else { 1747 if ((kev->flags & EV_ADD) == EV_ADD) { 1748 error = kqueue_expand(kq, fops, kev->ident, mflag); 1749 if (error != 0) 1750 goto done; 1751 } 1752 1753 KQ_LOCK(kq); 1754 1755 /* 1756 * If possible, find an existing knote to use for this kevent. 1757 */ 1758 if (kev->filter == EVFILT_PROC && 1759 (kev->flags & (EV_FLAG1 | EV_FLAG2)) != 0) { 1760 /* This is an internal creation of a process tracking 1761 * note. Don't attempt to coalesce this with an 1762 * existing note. 1763 */ 1764 ; 1765 } else if (kq->kq_knhashmask != 0) { 1766 struct klist *list; 1767 1768 list = &kq->kq_knhash[ 1769 KN_HASH((u_long)kev->ident, kq->kq_knhashmask)]; 1770 SLIST_FOREACH(kn, list, kn_link) 1771 if (kev->ident == kn->kn_id && 1772 kev->filter == kn->kn_filter) 1773 break; 1774 } 1775 } 1776 1777 /* knote is in the process of changing, wait for it to stabilize. */ 1778 if (kn != NULL && kn_in_flux(kn)) { 1779 KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal); 1780 if (filedesc_unlock) { 1781 FILEDESC_XUNLOCK(td->td_proc->p_fd); 1782 filedesc_unlock = 0; 1783 } 1784 kq->kq_state |= KQ_FLUXWAIT; 1785 msleep(kq, &kq->kq_lock, PSOCK | PDROP, "kqflxwt", 0); 1786 if (fp != NULL) { 1787 fdrop(fp, td); 1788 fp = NULL; 1789 } 1790 goto findkn; 1791 } 1792 1793 /* 1794 * kn now contains the matching knote, or NULL if no match 1795 */ 1796 if (kn == NULL) { 1797 if (kev->flags & EV_ADD) { 1798 kn = tkn; 1799 tkn = NULL; 1800 if (kn == NULL) { 1801 KQ_UNLOCK(kq); 1802 error = ENOMEM; 1803 goto done; 1804 } 1805 1806 /* 1807 * Now that the kqueue is locked, make sure the fd 1808 * didn't change out from under us. 1809 */ 1810 if (fops->f_isfd && 1811 fget_noref_unlocked(td->td_proc->p_fd, 1812 kev->ident) != fp) { 1813 KQ_UNLOCK(kq); 1814 tkn = kn; 1815 error = EBADF; 1816 goto done; 1817 } 1818 kn->kn_fp = fp; 1819 kn->kn_kq = kq; 1820 kn->kn_fop = fops; 1821 /* 1822 * apply reference counts to knote structure, and 1823 * do not release it at the end of this routine. 1824 */ 1825 fops = NULL; 1826 fp = NULL; 1827 1828 kn->kn_sfflags = kev->fflags; 1829 kn->kn_sdata = kev->data; 1830 kev->fflags = 0; 1831 kev->data = 0; 1832 kn->kn_kevent = *kev; 1833 kn->kn_kevent.flags &= ~(EV_ADD | EV_DELETE | 1834 EV_ENABLE | EV_DISABLE | EV_FORCEONESHOT); 1835 kn->kn_status = KN_DETACHED; 1836 if ((kev->flags & EV_DISABLE) != 0) 1837 kn->kn_status |= KN_DISABLED; 1838 kn_enter_flux(kn); 1839 1840 error = knote_attach(kn, kq); 1841 KQ_UNLOCK(kq); 1842 if (error != 0) { 1843 tkn = kn; 1844 goto done; 1845 } 1846 1847 if ((error = kn->kn_fop->f_attach(kn)) != 0) { 1848 knote_drop_detached(kn, td); 1849 goto done; 1850 } 1851 knl = kn_list_lock(kn); 1852 goto done_ev_add; 1853 } else { 1854 /* No matching knote and the EV_ADD flag is not set. */ 1855 KQ_UNLOCK(kq); 1856 error = ENOENT; 1857 goto done; 1858 } 1859 } 1860 1861 if (kev->flags & EV_DELETE) { 1862 kn_enter_flux(kn); 1863 KQ_UNLOCK(kq); 1864 knote_drop(kn, td); 1865 goto done; 1866 } 1867 1868 if (kev->flags & EV_FORCEONESHOT) { 1869 kn->kn_flags |= EV_ONESHOT; 1870 KNOTE_ACTIVATE(kn, 1); 1871 } 1872 1873 if ((kev->flags & EV_ENABLE) != 0) 1874 kn->kn_status &= ~KN_DISABLED; 1875 else if ((kev->flags & EV_DISABLE) != 0) 1876 kn->kn_status |= KN_DISABLED; 1877 1878 /* 1879 * The user may change some filter values after the initial EV_ADD, 1880 * but doing so will not reset any filter which has already been 1881 * triggered. 1882 */ 1883 kn->kn_status |= KN_SCAN; 1884 kn_enter_flux(kn); 1885 KQ_UNLOCK(kq); 1886 knl = kn_list_lock(kn); 1887 if ((kev->flags & EV_KEEPUDATA) == 0) 1888 kn->kn_kevent.udata = kev->udata; 1889 if (!fops->f_isfd && fops->f_touch != NULL) { 1890 fops->f_touch(kn, kev, EVENT_REGISTER); 1891 } else { 1892 kn->kn_sfflags = kev->fflags; 1893 kn->kn_sdata = kev->data; 1894 } 1895 1896 done_ev_add: 1897 /* 1898 * We can get here with kn->kn_knlist == NULL. This can happen when 1899 * the initial attach event decides that the event is "completed" 1900 * already, e.g., filt_procattach() is called on a zombie process. It 1901 * will call filt_proc() which will remove it from the list, and NULL 1902 * kn_knlist. 1903 * 1904 * KN_DISABLED will be stable while the knote is in flux, so the 1905 * unlocked read will not race with an update. 1906 */ 1907 if ((kn->kn_status & KN_DISABLED) == 0) 1908 event = kn->kn_fop->f_event(kn, 0); 1909 else 1910 event = 0; 1911 1912 KQ_LOCK(kq); 1913 if (event) 1914 kn->kn_status |= KN_ACTIVE; 1915 if ((kn->kn_status & (KN_ACTIVE | KN_DISABLED | KN_QUEUED)) == 1916 KN_ACTIVE) 1917 knote_enqueue(kn); 1918 kn->kn_status &= ~KN_SCAN; 1919 kn_leave_flux(kn); 1920 kn_list_unlock(knl); 1921 KQ_UNLOCK_FLUX(kq); 1922 1923 done: 1924 KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal); 1925 if (filedesc_unlock) 1926 FILEDESC_XUNLOCK(td->td_proc->p_fd); 1927 if (fp != NULL) 1928 fdrop(fp, td); 1929 knote_free(tkn); 1930 if (fops != NULL) 1931 kqueue_fo_release(filt); 1932 return (error); 1933 } 1934 1935 static int 1936 kqueue_acquire_ref(struct kqueue *kq) 1937 { 1938 KQ_LOCK(kq); 1939 if ((kq->kq_state & KQ_CLOSING) == KQ_CLOSING) { 1940 KQ_UNLOCK(kq); 1941 return (EBADF); 1942 } 1943 kq->kq_refcnt++; 1944 KQ_UNLOCK(kq); 1945 return (0); 1946 } 1947 1948 static int 1949 kqueue_acquire(struct file *fp, struct kqueue **kqp) 1950 { 1951 struct kqueue *kq; 1952 int error; 1953 1954 kq = fp->f_data; 1955 if (fp->f_type != DTYPE_KQUEUE || kq == NULL) 1956 return (EINVAL); 1957 error = kqueue_acquire_ref(kq); 1958 if (error == 0) 1959 *kqp = kq; 1960 return (error); 1961 } 1962 1963 static void 1964 kqueue_release(struct kqueue *kq, int locked) 1965 { 1966 if (locked) 1967 KQ_OWNED(kq); 1968 else 1969 KQ_LOCK(kq); 1970 kq->kq_refcnt--; 1971 if (kq->kq_refcnt == 1) 1972 wakeup(&kq->kq_refcnt); 1973 if (!locked) 1974 KQ_UNLOCK(kq); 1975 } 1976 1977 static void 1978 ast_kqueue(struct thread *td, int tda __unused) 1979 { 1980 taskqueue_quiesce(taskqueue_kqueue_ctx); 1981 } 1982 1983 static void 1984 kqueue_schedtask(struct kqueue *kq) 1985 { 1986 KQ_OWNED(kq); 1987 KASSERT(((kq->kq_state & KQ_TASKDRAIN) != KQ_TASKDRAIN), 1988 ("scheduling kqueue task while draining")); 1989 1990 if ((kq->kq_state & KQ_TASKSCHED) != KQ_TASKSCHED) { 1991 taskqueue_enqueue(taskqueue_kqueue_ctx, &kq->kq_task); 1992 kq->kq_state |= KQ_TASKSCHED; 1993 ast_sched(curthread, TDA_KQUEUE); 1994 } 1995 } 1996 1997 /* 1998 * Expand the kq to make sure we have storage for fops/ident pair. 1999 * 2000 * Return 0 on success (or no work necessary), return errno on failure. 2001 */ 2002 static int 2003 kqueue_expand(struct kqueue *kq, const struct filterops *fops, uintptr_t ident, 2004 int mflag) 2005 { 2006 struct klist *list, *tmp_knhash, *to_free; 2007 u_long tmp_knhashmask; 2008 int error, fd, size; 2009 2010 KQ_NOTOWNED(kq); 2011 2012 error = 0; 2013 to_free = NULL; 2014 if (fops->f_isfd) { 2015 fd = ident; 2016 if (kq->kq_knlistsize <= fd) { 2017 size = kq->kq_knlistsize; 2018 while (size <= fd) 2019 size += KQEXTENT; 2020 list = malloc(size * sizeof(*list), M_KQUEUE, mflag); 2021 if (list == NULL) 2022 return ENOMEM; 2023 KQ_LOCK(kq); 2024 if ((kq->kq_state & KQ_CLOSING) != 0) { 2025 to_free = list; 2026 error = EBADF; 2027 } else if (kq->kq_knlistsize > fd) { 2028 to_free = list; 2029 } else { 2030 if (kq->kq_knlist != NULL) { 2031 bcopy(kq->kq_knlist, list, 2032 kq->kq_knlistsize * sizeof(*list)); 2033 to_free = kq->kq_knlist; 2034 kq->kq_knlist = NULL; 2035 } 2036 bzero((caddr_t)list + 2037 kq->kq_knlistsize * sizeof(*list), 2038 (size - kq->kq_knlistsize) * sizeof(*list)); 2039 kq->kq_knlistsize = size; 2040 kq->kq_knlist = list; 2041 } 2042 KQ_UNLOCK(kq); 2043 } 2044 } else { 2045 if (kq->kq_knhashmask == 0) { 2046 tmp_knhash = hashinit_flags(KN_HASHSIZE, M_KQUEUE, 2047 &tmp_knhashmask, (mflag & M_WAITOK) != 0 ? 2048 HASH_WAITOK : HASH_NOWAIT); 2049 if (tmp_knhash == NULL) 2050 return (ENOMEM); 2051 KQ_LOCK(kq); 2052 if ((kq->kq_state & KQ_CLOSING) != 0) { 2053 to_free = tmp_knhash; 2054 error = EBADF; 2055 } else if (kq->kq_knhashmask == 0) { 2056 kq->kq_knhash = tmp_knhash; 2057 kq->kq_knhashmask = tmp_knhashmask; 2058 } else { 2059 to_free = tmp_knhash; 2060 } 2061 KQ_UNLOCK(kq); 2062 } 2063 } 2064 free(to_free, M_KQUEUE); 2065 2066 KQ_NOTOWNED(kq); 2067 return (error); 2068 } 2069 2070 static void 2071 kqueue_task(void *arg, int pending) 2072 { 2073 struct kqueue *kq; 2074 int haskqglobal; 2075 2076 haskqglobal = 0; 2077 kq = arg; 2078 2079 KQ_GLOBAL_LOCK(&kq_global, haskqglobal); 2080 KQ_LOCK(kq); 2081 2082 KNOTE_LOCKED(&kq->kq_sel.si_note, 0); 2083 2084 kq->kq_state &= ~KQ_TASKSCHED; 2085 if ((kq->kq_state & KQ_TASKDRAIN) == KQ_TASKDRAIN) { 2086 wakeup(&kq->kq_state); 2087 } 2088 KQ_UNLOCK(kq); 2089 KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal); 2090 } 2091 2092 /* 2093 * Scan, update kn_data (if not ONESHOT), and copyout triggered events. 2094 * We treat KN_MARKER knotes as if they are in flux. 2095 */ 2096 static int 2097 kqueue_scan(struct kqueue *kq, int maxevents, struct kevent_copyops *k_ops, 2098 const struct timespec *tsp, struct kevent *keva, struct thread *td) 2099 { 2100 struct kevent *kevp; 2101 struct knote *kn, *marker; 2102 struct knlist *knl; 2103 sbintime_t asbt, rsbt; 2104 int count, error, haskqglobal, influx, nkev, touch; 2105 2106 count = maxevents; 2107 nkev = 0; 2108 error = 0; 2109 haskqglobal = 0; 2110 2111 if (maxevents == 0) 2112 goto done_nl; 2113 if (maxevents < 0) { 2114 error = EINVAL; 2115 goto done_nl; 2116 } 2117 2118 rsbt = 0; 2119 if (tsp != NULL) { 2120 if (!timespecvalid_interval(tsp)) { 2121 error = EINVAL; 2122 goto done_nl; 2123 } 2124 if (timespecisset(tsp)) { 2125 if (tsp->tv_sec <= INT32_MAX) { 2126 rsbt = tstosbt(*tsp); 2127 if (TIMESEL(&asbt, rsbt)) 2128 asbt += tc_tick_sbt; 2129 if (asbt <= SBT_MAX - rsbt) 2130 asbt += rsbt; 2131 else 2132 asbt = 0; 2133 rsbt >>= tc_precexp; 2134 } else 2135 asbt = 0; 2136 } else 2137 asbt = -1; 2138 } else 2139 asbt = 0; 2140 marker = knote_alloc(M_WAITOK); 2141 marker->kn_status = KN_MARKER; 2142 KQ_LOCK(kq); 2143 2144 retry: 2145 kevp = keva; 2146 if (kq->kq_count == 0) { 2147 if (asbt == -1) { 2148 error = EWOULDBLOCK; 2149 } else { 2150 kq->kq_state |= KQ_SLEEP; 2151 error = msleep_sbt(kq, &kq->kq_lock, PSOCK | PCATCH, 2152 "kqread", asbt, rsbt, C_ABSOLUTE); 2153 } 2154 if (error == 0) 2155 goto retry; 2156 /* don't restart after signals... */ 2157 if (error == ERESTART) 2158 error = EINTR; 2159 else if (error == EWOULDBLOCK) 2160 error = 0; 2161 goto done; 2162 } 2163 2164 TAILQ_INSERT_TAIL(&kq->kq_head, marker, kn_tqe); 2165 influx = 0; 2166 while (count) { 2167 KQ_OWNED(kq); 2168 kn = TAILQ_FIRST(&kq->kq_head); 2169 2170 if ((kn->kn_status == KN_MARKER && kn != marker) || 2171 kn_in_flux(kn)) { 2172 if (influx) { 2173 influx = 0; 2174 KQ_FLUX_WAKEUP(kq); 2175 } 2176 kq->kq_state |= KQ_FLUXWAIT; 2177 error = msleep(kq, &kq->kq_lock, PSOCK, 2178 "kqflxwt", 0); 2179 continue; 2180 } 2181 2182 TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe); 2183 if ((kn->kn_status & KN_DISABLED) == KN_DISABLED) { 2184 kn->kn_status &= ~KN_QUEUED; 2185 kq->kq_count--; 2186 continue; 2187 } 2188 if (kn == marker) { 2189 KQ_FLUX_WAKEUP(kq); 2190 if (count == maxevents) 2191 goto retry; 2192 goto done; 2193 } 2194 KASSERT(!kn_in_flux(kn), 2195 ("knote %p is unexpectedly in flux", kn)); 2196 2197 if ((kn->kn_flags & EV_DROP) == EV_DROP) { 2198 kn->kn_status &= ~KN_QUEUED; 2199 kn_enter_flux(kn); 2200 kq->kq_count--; 2201 KQ_UNLOCK(kq); 2202 /* 2203 * We don't need to lock the list since we've 2204 * marked it as in flux. 2205 */ 2206 knote_drop(kn, td); 2207 KQ_LOCK(kq); 2208 continue; 2209 } else if ((kn->kn_flags & EV_ONESHOT) == EV_ONESHOT) { 2210 kn->kn_status &= ~KN_QUEUED; 2211 kn_enter_flux(kn); 2212 kq->kq_count--; 2213 KQ_UNLOCK(kq); 2214 /* 2215 * We don't need to lock the list since we've 2216 * marked the knote as being in flux. 2217 */ 2218 *kevp = kn->kn_kevent; 2219 knote_drop(kn, td); 2220 KQ_LOCK(kq); 2221 kn = NULL; 2222 } else { 2223 kn->kn_status |= KN_SCAN; 2224 kn_enter_flux(kn); 2225 KQ_UNLOCK(kq); 2226 if ((kn->kn_status & KN_KQUEUE) == KN_KQUEUE) 2227 KQ_GLOBAL_LOCK(&kq_global, haskqglobal); 2228 knl = kn_list_lock(kn); 2229 if (kn->kn_fop->f_event(kn, 0) == 0) { 2230 KQ_LOCK(kq); 2231 KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal); 2232 kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE | 2233 KN_SCAN); 2234 kn_leave_flux(kn); 2235 kq->kq_count--; 2236 kn_list_unlock(knl); 2237 influx = 1; 2238 continue; 2239 } 2240 touch = (!kn->kn_fop->f_isfd && 2241 kn->kn_fop->f_touch != NULL); 2242 if (touch) 2243 kn->kn_fop->f_touch(kn, kevp, EVENT_PROCESS); 2244 else 2245 *kevp = kn->kn_kevent; 2246 KQ_LOCK(kq); 2247 KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal); 2248 if (kn->kn_flags & (EV_CLEAR | EV_DISPATCH)) { 2249 /* 2250 * Manually clear knotes who weren't 2251 * 'touch'ed. 2252 */ 2253 if (touch == 0 && kn->kn_flags & EV_CLEAR) { 2254 kn->kn_data = 0; 2255 kn->kn_fflags = 0; 2256 } 2257 if (kn->kn_flags & EV_DISPATCH) 2258 kn->kn_status |= KN_DISABLED; 2259 kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE); 2260 kq->kq_count--; 2261 } else 2262 TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe); 2263 2264 kn->kn_status &= ~KN_SCAN; 2265 kn_leave_flux(kn); 2266 kn_list_unlock(knl); 2267 influx = 1; 2268 } 2269 2270 /* we are returning a copy to the user */ 2271 kevp++; 2272 nkev++; 2273 count--; 2274 2275 if (nkev == KQ_NEVENTS) { 2276 influx = 0; 2277 KQ_UNLOCK_FLUX(kq); 2278 error = k_ops->k_copyout(k_ops->arg, keva, nkev); 2279 nkev = 0; 2280 kevp = keva; 2281 KQ_LOCK(kq); 2282 if (error) 2283 break; 2284 } 2285 } 2286 TAILQ_REMOVE(&kq->kq_head, marker, kn_tqe); 2287 done: 2288 KQ_OWNED(kq); 2289 KQ_UNLOCK_FLUX(kq); 2290 knote_free(marker); 2291 done_nl: 2292 KQ_NOTOWNED(kq); 2293 if (nkev != 0) 2294 error = k_ops->k_copyout(k_ops->arg, keva, nkev); 2295 td->td_retval[0] = maxevents - count; 2296 return (error); 2297 } 2298 2299 /*ARGSUSED*/ 2300 static int 2301 kqueue_ioctl(struct file *fp, u_long cmd, void *data, 2302 struct ucred *active_cred, struct thread *td) 2303 { 2304 /* 2305 * Enabling sigio causes two major problems: 2306 * 1) infinite recursion: 2307 * Synopsys: kevent is being used to track signals and have FIOASYNC 2308 * set. On receipt of a signal this will cause a kqueue to recurse 2309 * into itself over and over. Sending the sigio causes the kqueue 2310 * to become ready, which in turn posts sigio again, forever. 2311 * Solution: this can be solved by setting a flag in the kqueue that 2312 * we have a SIGIO in progress. 2313 * 2) locking problems: 2314 * Synopsys: Kqueue is a leaf subsystem, but adding signalling puts 2315 * us above the proc and pgrp locks. 2316 * Solution: Post a signal using an async mechanism, being sure to 2317 * record a generation count in the delivery so that we do not deliver 2318 * a signal to the wrong process. 2319 * 2320 * Note, these two mechanisms are somewhat mutually exclusive! 2321 */ 2322 #if 0 2323 struct kqueue *kq; 2324 2325 kq = fp->f_data; 2326 switch (cmd) { 2327 case FIOASYNC: 2328 if (*(int *)data) { 2329 kq->kq_state |= KQ_ASYNC; 2330 } else { 2331 kq->kq_state &= ~KQ_ASYNC; 2332 } 2333 return (0); 2334 2335 case FIOSETOWN: 2336 return (fsetown(*(int *)data, &kq->kq_sigio)); 2337 2338 case FIOGETOWN: 2339 *(int *)data = fgetown(&kq->kq_sigio); 2340 return (0); 2341 } 2342 #endif 2343 2344 return (ENOTTY); 2345 } 2346 2347 /*ARGSUSED*/ 2348 static int 2349 kqueue_poll(struct file *fp, int events, struct ucred *active_cred, 2350 struct thread *td) 2351 { 2352 struct kqueue *kq; 2353 int revents = 0; 2354 int error; 2355 2356 if ((error = kqueue_acquire(fp, &kq))) 2357 return POLLERR; 2358 2359 KQ_LOCK(kq); 2360 if (events & (POLLIN | POLLRDNORM)) { 2361 if (kq->kq_count) { 2362 revents |= events & (POLLIN | POLLRDNORM); 2363 } else { 2364 selrecord(td, &kq->kq_sel); 2365 if (SEL_WAITING(&kq->kq_sel)) 2366 kq->kq_state |= KQ_SEL; 2367 } 2368 } 2369 kqueue_release(kq, 1); 2370 KQ_UNLOCK(kq); 2371 return (revents); 2372 } 2373 2374 /*ARGSUSED*/ 2375 static int 2376 kqueue_stat(struct file *fp, struct stat *st, struct ucred *active_cred) 2377 { 2378 2379 bzero((void *)st, sizeof *st); 2380 /* 2381 * We no longer return kq_count because the unlocked value is useless. 2382 * If you spent all this time getting the count, why not spend your 2383 * syscall better by calling kevent? 2384 * 2385 * XXX - This is needed for libc_r. 2386 */ 2387 st->st_mode = S_IFIFO; 2388 return (0); 2389 } 2390 2391 static void 2392 kqueue_drain(struct kqueue *kq, struct thread *td) 2393 { 2394 struct knote *kn; 2395 int i; 2396 2397 KQ_LOCK(kq); 2398 2399 KASSERT((kq->kq_state & KQ_CLOSING) != KQ_CLOSING, 2400 ("kqueue already closing")); 2401 kq->kq_state |= KQ_CLOSING; 2402 if (kq->kq_refcnt > 1) 2403 msleep(&kq->kq_refcnt, &kq->kq_lock, PSOCK, "kqclose", 0); 2404 2405 KASSERT(kq->kq_refcnt == 1, ("other refs are out there!")); 2406 2407 KASSERT(knlist_empty(&kq->kq_sel.si_note), 2408 ("kqueue's knlist not empty")); 2409 2410 for (i = 0; i < kq->kq_knlistsize; i++) { 2411 while ((kn = SLIST_FIRST(&kq->kq_knlist[i])) != NULL) { 2412 if (kn_in_flux(kn)) { 2413 kq->kq_state |= KQ_FLUXWAIT; 2414 msleep(kq, &kq->kq_lock, PSOCK, "kqclo1", 0); 2415 continue; 2416 } 2417 kn_enter_flux(kn); 2418 KQ_UNLOCK(kq); 2419 knote_drop(kn, td); 2420 KQ_LOCK(kq); 2421 } 2422 } 2423 if (kq->kq_knhashmask != 0) { 2424 for (i = 0; i <= kq->kq_knhashmask; i++) { 2425 while ((kn = SLIST_FIRST(&kq->kq_knhash[i])) != NULL) { 2426 if (kn_in_flux(kn)) { 2427 kq->kq_state |= KQ_FLUXWAIT; 2428 msleep(kq, &kq->kq_lock, PSOCK, 2429 "kqclo2", 0); 2430 continue; 2431 } 2432 kn_enter_flux(kn); 2433 KQ_UNLOCK(kq); 2434 knote_drop(kn, td); 2435 KQ_LOCK(kq); 2436 } 2437 } 2438 } 2439 2440 if ((kq->kq_state & KQ_TASKSCHED) == KQ_TASKSCHED) { 2441 kq->kq_state |= KQ_TASKDRAIN; 2442 msleep(&kq->kq_state, &kq->kq_lock, PSOCK, "kqtqdr", 0); 2443 } 2444 2445 if ((kq->kq_state & KQ_SEL) == KQ_SEL) { 2446 selwakeuppri(&kq->kq_sel, PSOCK); 2447 if (!SEL_WAITING(&kq->kq_sel)) 2448 kq->kq_state &= ~KQ_SEL; 2449 } 2450 2451 KQ_UNLOCK(kq); 2452 } 2453 2454 static void 2455 kqueue_destroy(struct kqueue *kq) 2456 { 2457 2458 KASSERT(kq->kq_fdp == NULL, 2459 ("kqueue still attached to a file descriptor")); 2460 seldrain(&kq->kq_sel); 2461 knlist_destroy(&kq->kq_sel.si_note); 2462 mtx_destroy(&kq->kq_lock); 2463 2464 if (kq->kq_knhash != NULL) 2465 free(kq->kq_knhash, M_KQUEUE); 2466 if (kq->kq_knlist != NULL) 2467 free(kq->kq_knlist, M_KQUEUE); 2468 2469 funsetown(&kq->kq_sigio); 2470 } 2471 2472 /*ARGSUSED*/ 2473 static int 2474 kqueue_close(struct file *fp, struct thread *td) 2475 { 2476 struct kqueue *kq = fp->f_data; 2477 struct filedesc *fdp; 2478 int error; 2479 int filedesc_unlock; 2480 2481 if ((error = kqueue_acquire(fp, &kq))) 2482 return error; 2483 kqueue_drain(kq, td); 2484 2485 /* 2486 * We could be called due to the knote_drop() doing fdrop(), 2487 * called from kqueue_register(). In this case the global 2488 * lock is owned, and filedesc sx is locked before, to not 2489 * take the sleepable lock after non-sleepable. 2490 */ 2491 fdp = kq->kq_fdp; 2492 kq->kq_fdp = NULL; 2493 if (!sx_xlocked(FILEDESC_LOCK(fdp))) { 2494 FILEDESC_XLOCK(fdp); 2495 filedesc_unlock = 1; 2496 } else 2497 filedesc_unlock = 0; 2498 TAILQ_REMOVE(&fdp->fd_kqlist, kq, kq_list); 2499 if (filedesc_unlock) 2500 FILEDESC_XUNLOCK(fdp); 2501 2502 kqueue_destroy(kq); 2503 chgkqcnt(kq->kq_cred->cr_ruidinfo, -1, 0); 2504 crfree(kq->kq_cred); 2505 free(kq, M_KQUEUE); 2506 fp->f_data = NULL; 2507 2508 return (0); 2509 } 2510 2511 static int 2512 kqueue_fill_kinfo(struct file *fp, struct kinfo_file *kif, struct filedesc *fdp) 2513 { 2514 struct kqueue *kq = fp->f_data; 2515 2516 kif->kf_type = KF_TYPE_KQUEUE; 2517 kif->kf_un.kf_kqueue.kf_kqueue_addr = (uintptr_t)kq; 2518 kif->kf_un.kf_kqueue.kf_kqueue_count = kq->kq_count; 2519 kif->kf_un.kf_kqueue.kf_kqueue_state = kq->kq_state; 2520 return (0); 2521 } 2522 2523 static void 2524 kqueue_wakeup(struct kqueue *kq) 2525 { 2526 KQ_OWNED(kq); 2527 2528 if ((kq->kq_state & KQ_SLEEP) == KQ_SLEEP) { 2529 kq->kq_state &= ~KQ_SLEEP; 2530 wakeup(kq); 2531 } 2532 if ((kq->kq_state & KQ_SEL) == KQ_SEL) { 2533 selwakeuppri(&kq->kq_sel, PSOCK); 2534 if (!SEL_WAITING(&kq->kq_sel)) 2535 kq->kq_state &= ~KQ_SEL; 2536 } 2537 if (!knlist_empty(&kq->kq_sel.si_note)) 2538 kqueue_schedtask(kq); 2539 if ((kq->kq_state & KQ_ASYNC) == KQ_ASYNC) { 2540 pgsigio(&kq->kq_sigio, SIGIO, 0); 2541 } 2542 } 2543 2544 /* 2545 * Walk down a list of knotes, activating them if their event has triggered. 2546 * 2547 * There is a possibility to optimize in the case of one kq watching another. 2548 * Instead of scheduling a task to wake it up, you could pass enough state 2549 * down the chain to make up the parent kqueue. Make this code functional 2550 * first. 2551 */ 2552 void 2553 knote(struct knlist *list, long hint, int lockflags) 2554 { 2555 struct kqueue *kq; 2556 struct knote *kn, *tkn; 2557 int error; 2558 2559 if (list == NULL) 2560 return; 2561 2562 KNL_ASSERT_LOCK(list, lockflags & KNF_LISTLOCKED); 2563 2564 if ((lockflags & KNF_LISTLOCKED) == 0) 2565 list->kl_lock(list->kl_lockarg); 2566 2567 /* 2568 * If we unlock the list lock (and enter influx), we can 2569 * eliminate the kqueue scheduling, but this will introduce 2570 * four lock/unlock's for each knote to test. Also, marker 2571 * would be needed to keep iteration position, since filters 2572 * or other threads could remove events. 2573 */ 2574 SLIST_FOREACH_SAFE(kn, &list->kl_list, kn_selnext, tkn) { 2575 kq = kn->kn_kq; 2576 KQ_LOCK(kq); 2577 if (kn_in_flux(kn) && (kn->kn_status & KN_SCAN) == 0) { 2578 /* 2579 * Do not process the influx notes, except for 2580 * the influx coming from the kq unlock in the 2581 * kqueue_scan(). In the later case, we do 2582 * not interfere with the scan, since the code 2583 * fragment in kqueue_scan() locks the knlist, 2584 * and cannot proceed until we finished. 2585 */ 2586 KQ_UNLOCK(kq); 2587 } else if ((lockflags & KNF_NOKQLOCK) != 0) { 2588 kn_enter_flux(kn); 2589 KQ_UNLOCK(kq); 2590 error = kn->kn_fop->f_event(kn, hint); 2591 KQ_LOCK(kq); 2592 kn_leave_flux(kn); 2593 if (error) 2594 KNOTE_ACTIVATE(kn, 1); 2595 KQ_UNLOCK_FLUX(kq); 2596 } else { 2597 if (kn->kn_fop->f_event(kn, hint)) 2598 KNOTE_ACTIVATE(kn, 1); 2599 KQ_UNLOCK(kq); 2600 } 2601 } 2602 if ((lockflags & KNF_LISTLOCKED) == 0) 2603 list->kl_unlock(list->kl_lockarg); 2604 } 2605 2606 /* 2607 * add a knote to a knlist 2608 */ 2609 void 2610 knlist_add(struct knlist *knl, struct knote *kn, int islocked) 2611 { 2612 2613 KNL_ASSERT_LOCK(knl, islocked); 2614 KQ_NOTOWNED(kn->kn_kq); 2615 KASSERT(kn_in_flux(kn), ("knote %p not in flux", kn)); 2616 KASSERT((kn->kn_status & KN_DETACHED) != 0, 2617 ("knote %p was not detached", kn)); 2618 if (!islocked) 2619 knl->kl_lock(knl->kl_lockarg); 2620 SLIST_INSERT_HEAD(&knl->kl_list, kn, kn_selnext); 2621 if (!islocked) 2622 knl->kl_unlock(knl->kl_lockarg); 2623 KQ_LOCK(kn->kn_kq); 2624 kn->kn_knlist = knl; 2625 kn->kn_status &= ~KN_DETACHED; 2626 KQ_UNLOCK(kn->kn_kq); 2627 } 2628 2629 static void 2630 knlist_remove_kq(struct knlist *knl, struct knote *kn, int knlislocked, 2631 int kqislocked) 2632 { 2633 2634 KASSERT(!kqislocked || knlislocked, ("kq locked w/o knl locked")); 2635 KNL_ASSERT_LOCK(knl, knlislocked); 2636 mtx_assert(&kn->kn_kq->kq_lock, kqislocked ? MA_OWNED : MA_NOTOWNED); 2637 KASSERT(kqislocked || kn_in_flux(kn), ("knote %p not in flux", kn)); 2638 KASSERT((kn->kn_status & KN_DETACHED) == 0, 2639 ("knote %p was already detached", kn)); 2640 if (!knlislocked) 2641 knl->kl_lock(knl->kl_lockarg); 2642 SLIST_REMOVE(&knl->kl_list, kn, knote, kn_selnext); 2643 kn->kn_knlist = NULL; 2644 if (!knlislocked) 2645 kn_list_unlock(knl); 2646 if (!kqislocked) 2647 KQ_LOCK(kn->kn_kq); 2648 kn->kn_status |= KN_DETACHED; 2649 if (!kqislocked) 2650 KQ_UNLOCK(kn->kn_kq); 2651 } 2652 2653 /* 2654 * remove knote from the specified knlist 2655 */ 2656 void 2657 knlist_remove(struct knlist *knl, struct knote *kn, int islocked) 2658 { 2659 2660 knlist_remove_kq(knl, kn, islocked, 0); 2661 } 2662 2663 int 2664 knlist_empty(struct knlist *knl) 2665 { 2666 2667 KNL_ASSERT_LOCKED(knl); 2668 return (SLIST_EMPTY(&knl->kl_list)); 2669 } 2670 2671 static struct mtx knlist_lock; 2672 MTX_SYSINIT(knlist_lock, &knlist_lock, "knlist lock for lockless objects", 2673 MTX_DEF); 2674 static void knlist_mtx_lock(void *arg); 2675 static void knlist_mtx_unlock(void *arg); 2676 2677 static void 2678 knlist_mtx_lock(void *arg) 2679 { 2680 2681 mtx_lock((struct mtx *)arg); 2682 } 2683 2684 static void 2685 knlist_mtx_unlock(void *arg) 2686 { 2687 2688 mtx_unlock((struct mtx *)arg); 2689 } 2690 2691 static void 2692 knlist_mtx_assert_lock(void *arg, int what) 2693 { 2694 2695 if (what == LA_LOCKED) 2696 mtx_assert((struct mtx *)arg, MA_OWNED); 2697 else 2698 mtx_assert((struct mtx *)arg, MA_NOTOWNED); 2699 } 2700 2701 void 2702 knlist_init(struct knlist *knl, void *lock, void (*kl_lock)(void *), 2703 void (*kl_unlock)(void *), 2704 void (*kl_assert_lock)(void *, int)) 2705 { 2706 2707 if (lock == NULL) 2708 knl->kl_lockarg = &knlist_lock; 2709 else 2710 knl->kl_lockarg = lock; 2711 2712 if (kl_lock == NULL) 2713 knl->kl_lock = knlist_mtx_lock; 2714 else 2715 knl->kl_lock = kl_lock; 2716 if (kl_unlock == NULL) 2717 knl->kl_unlock = knlist_mtx_unlock; 2718 else 2719 knl->kl_unlock = kl_unlock; 2720 if (kl_assert_lock == NULL) 2721 knl->kl_assert_lock = knlist_mtx_assert_lock; 2722 else 2723 knl->kl_assert_lock = kl_assert_lock; 2724 2725 knl->kl_autodestroy = 0; 2726 SLIST_INIT(&knl->kl_list); 2727 } 2728 2729 void 2730 knlist_init_mtx(struct knlist *knl, struct mtx *lock) 2731 { 2732 2733 knlist_init(knl, lock, NULL, NULL, NULL); 2734 } 2735 2736 struct knlist * 2737 knlist_alloc(struct mtx *lock) 2738 { 2739 struct knlist *knl; 2740 2741 knl = malloc(sizeof(struct knlist), M_KQUEUE, M_WAITOK); 2742 knlist_init_mtx(knl, lock); 2743 return (knl); 2744 } 2745 2746 void 2747 knlist_destroy(struct knlist *knl) 2748 { 2749 2750 KASSERT(KNLIST_EMPTY(knl), 2751 ("destroying knlist %p with knotes on it", knl)); 2752 } 2753 2754 void 2755 knlist_detach(struct knlist *knl) 2756 { 2757 2758 KNL_ASSERT_LOCKED(knl); 2759 knl->kl_autodestroy = 1; 2760 if (knlist_empty(knl)) { 2761 knlist_destroy(knl); 2762 free(knl, M_KQUEUE); 2763 } 2764 } 2765 2766 /* 2767 * Even if we are locked, we may need to drop the lock to allow any influx 2768 * knotes time to "settle". 2769 */ 2770 void 2771 knlist_cleardel(struct knlist *knl, struct thread *td, int islocked, int killkn) 2772 { 2773 struct knote *kn, *kn2; 2774 struct kqueue *kq; 2775 2776 KASSERT(!knl->kl_autodestroy, ("cleardel for autodestroy %p", knl)); 2777 if (islocked) 2778 KNL_ASSERT_LOCKED(knl); 2779 else { 2780 KNL_ASSERT_UNLOCKED(knl); 2781 again: /* need to reacquire lock since we have dropped it */ 2782 knl->kl_lock(knl->kl_lockarg); 2783 } 2784 2785 SLIST_FOREACH_SAFE(kn, &knl->kl_list, kn_selnext, kn2) { 2786 kq = kn->kn_kq; 2787 KQ_LOCK(kq); 2788 if (kn_in_flux(kn)) { 2789 KQ_UNLOCK(kq); 2790 continue; 2791 } 2792 knlist_remove_kq(knl, kn, 1, 1); 2793 if (killkn) { 2794 kn_enter_flux(kn); 2795 KQ_UNLOCK(kq); 2796 knote_drop_detached(kn, td); 2797 } else { 2798 /* Make sure cleared knotes disappear soon */ 2799 kn->kn_flags |= EV_EOF | EV_ONESHOT; 2800 KQ_UNLOCK(kq); 2801 } 2802 kq = NULL; 2803 } 2804 2805 if (!SLIST_EMPTY(&knl->kl_list)) { 2806 /* there are still in flux knotes remaining */ 2807 kn = SLIST_FIRST(&knl->kl_list); 2808 kq = kn->kn_kq; 2809 KQ_LOCK(kq); 2810 KASSERT(kn_in_flux(kn), ("knote removed w/o list lock")); 2811 knl->kl_unlock(knl->kl_lockarg); 2812 kq->kq_state |= KQ_FLUXWAIT; 2813 msleep(kq, &kq->kq_lock, PSOCK | PDROP, "kqkclr", 0); 2814 kq = NULL; 2815 goto again; 2816 } 2817 2818 if (islocked) 2819 KNL_ASSERT_LOCKED(knl); 2820 else { 2821 knl->kl_unlock(knl->kl_lockarg); 2822 KNL_ASSERT_UNLOCKED(knl); 2823 } 2824 } 2825 2826 /* 2827 * Remove all knotes referencing a specified fd must be called with FILEDESC 2828 * lock. This prevents a race where a new fd comes along and occupies the 2829 * entry and we attach a knote to the fd. 2830 */ 2831 void 2832 knote_fdclose(struct thread *td, int fd) 2833 { 2834 struct filedesc *fdp = td->td_proc->p_fd; 2835 struct kqueue *kq; 2836 struct knote *kn; 2837 int influx; 2838 2839 FILEDESC_XLOCK_ASSERT(fdp); 2840 2841 /* 2842 * We shouldn't have to worry about new kevents appearing on fd 2843 * since filedesc is locked. 2844 */ 2845 TAILQ_FOREACH(kq, &fdp->fd_kqlist, kq_list) { 2846 KQ_LOCK(kq); 2847 2848 again: 2849 influx = 0; 2850 while (kq->kq_knlistsize > fd && 2851 (kn = SLIST_FIRST(&kq->kq_knlist[fd])) != NULL) { 2852 if (kn_in_flux(kn)) { 2853 /* someone else might be waiting on our knote */ 2854 if (influx) 2855 wakeup(kq); 2856 kq->kq_state |= KQ_FLUXWAIT; 2857 msleep(kq, &kq->kq_lock, PSOCK, "kqflxwt", 0); 2858 goto again; 2859 } 2860 kn_enter_flux(kn); 2861 KQ_UNLOCK(kq); 2862 influx = 1; 2863 knote_drop(kn, td); 2864 KQ_LOCK(kq); 2865 } 2866 KQ_UNLOCK_FLUX(kq); 2867 } 2868 } 2869 2870 static int 2871 knote_attach(struct knote *kn, struct kqueue *kq) 2872 { 2873 struct klist *list; 2874 2875 KASSERT(kn_in_flux(kn), ("knote %p not marked influx", kn)); 2876 KQ_OWNED(kq); 2877 2878 if ((kq->kq_state & KQ_CLOSING) != 0) 2879 return (EBADF); 2880 if (kn->kn_fop->f_isfd) { 2881 if (kn->kn_id >= kq->kq_knlistsize) 2882 return (ENOMEM); 2883 list = &kq->kq_knlist[kn->kn_id]; 2884 } else { 2885 if (kq->kq_knhash == NULL) 2886 return (ENOMEM); 2887 list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)]; 2888 } 2889 SLIST_INSERT_HEAD(list, kn, kn_link); 2890 return (0); 2891 } 2892 2893 static void 2894 knote_drop(struct knote *kn, struct thread *td) 2895 { 2896 2897 if ((kn->kn_status & KN_DETACHED) == 0) 2898 kn->kn_fop->f_detach(kn); 2899 knote_drop_detached(kn, td); 2900 } 2901 2902 static void 2903 knote_drop_detached(struct knote *kn, struct thread *td) 2904 { 2905 struct kqueue *kq; 2906 struct klist *list; 2907 2908 kq = kn->kn_kq; 2909 2910 KASSERT((kn->kn_status & KN_DETACHED) != 0, 2911 ("knote %p still attached", kn)); 2912 KQ_NOTOWNED(kq); 2913 2914 KQ_LOCK(kq); 2915 for (;;) { 2916 KASSERT(kn->kn_influx >= 1, 2917 ("knote_drop called on %p with influx %d", 2918 kn, kn->kn_influx)); 2919 if (kn->kn_influx == 1) 2920 break; 2921 kq->kq_state |= KQ_FLUXWAIT; 2922 msleep(kq, &kq->kq_lock, PSOCK, "kqflxwt", 0); 2923 } 2924 2925 if (kn->kn_fop->f_isfd) 2926 list = &kq->kq_knlist[kn->kn_id]; 2927 else 2928 list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)]; 2929 2930 if (!SLIST_EMPTY(list)) 2931 SLIST_REMOVE(list, kn, knote, kn_link); 2932 if (kn->kn_status & KN_QUEUED) 2933 knote_dequeue(kn); 2934 KQ_UNLOCK_FLUX(kq); 2935 2936 if (kn->kn_fop->f_isfd) { 2937 fdrop(kn->kn_fp, td); 2938 kn->kn_fp = NULL; 2939 } 2940 kqueue_fo_release(kn->kn_kevent.filter); 2941 kn->kn_fop = NULL; 2942 knote_free(kn); 2943 } 2944 2945 static void 2946 knote_enqueue(struct knote *kn) 2947 { 2948 struct kqueue *kq = kn->kn_kq; 2949 2950 KQ_OWNED(kn->kn_kq); 2951 KASSERT((kn->kn_status & KN_QUEUED) == 0, ("knote already queued")); 2952 2953 TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe); 2954 kn->kn_status |= KN_QUEUED; 2955 kq->kq_count++; 2956 kqueue_wakeup(kq); 2957 } 2958 2959 static void 2960 knote_dequeue(struct knote *kn) 2961 { 2962 struct kqueue *kq = kn->kn_kq; 2963 2964 KQ_OWNED(kn->kn_kq); 2965 KASSERT(kn->kn_status & KN_QUEUED, ("knote not queued")); 2966 2967 TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe); 2968 kn->kn_status &= ~KN_QUEUED; 2969 kq->kq_count--; 2970 } 2971 2972 static void 2973 knote_init(void *dummy __unused) 2974 { 2975 2976 knote_zone = uma_zcreate("KNOTE", sizeof(struct knote), NULL, NULL, 2977 NULL, NULL, UMA_ALIGN_PTR, 0); 2978 ast_register(TDA_KQUEUE, ASTR_ASTF_REQUIRED, 0, ast_kqueue); 2979 prison0.pr_klist = knlist_alloc(&prison0.pr_mtx); 2980 } 2981 SYSINIT(knote, SI_SUB_PSEUDO, SI_ORDER_ANY, knote_init, NULL); 2982 2983 static struct knote * 2984 knote_alloc(int mflag) 2985 { 2986 2987 return (uma_zalloc(knote_zone, mflag | M_ZERO)); 2988 } 2989 2990 static void 2991 knote_free(struct knote *kn) 2992 { 2993 2994 uma_zfree(knote_zone, kn); 2995 } 2996 2997 /* 2998 * Register the kev w/ the kq specified by fd. 2999 */ 3000 int 3001 kqfd_register(int fd, struct kevent *kev, struct thread *td, int mflag) 3002 { 3003 struct kqueue *kq; 3004 struct file *fp; 3005 cap_rights_t rights; 3006 int error; 3007 3008 error = fget(td, fd, cap_rights_init_one(&rights, CAP_KQUEUE_CHANGE), 3009 &fp); 3010 if (error != 0) 3011 return (error); 3012 if ((error = kqueue_acquire(fp, &kq)) != 0) 3013 goto noacquire; 3014 3015 error = kqueue_register(kq, kev, td, mflag); 3016 kqueue_release(kq, 0); 3017 3018 noacquire: 3019 fdrop(fp, td); 3020 return (error); 3021 } 3022 3023 static int 3024 kqueue_fork_alloc(struct filedesc *fdp, struct file *fp, struct file **fp1, 3025 struct thread *td) 3026 { 3027 struct kqueue *kq, *kq1; 3028 int error; 3029 3030 MPASS(fp->f_type == DTYPE_KQUEUE); 3031 kq = fp->f_data; 3032 if ((kq->kq_state & KQ_CPONFORK) == 0) 3033 return (EOPNOTSUPP); 3034 error = kqueue_acquire_ref(kq); 3035 if (error != 0) 3036 return (error); 3037 error = kern_kqueue_alloc(td, fdp, NULL, fp1, 0, NULL, true, &kq1); 3038 if (error == 0) { 3039 kq1->kq_forksrc = kq; 3040 (*fp1)->f_flag = fp->f_flag & (FREAD | FWRITE | FEXEC | 3041 O_CLOEXEC | O_CLOFORK); 3042 } else { 3043 kqueue_release(kq, 0); 3044 } 3045 return (error); 3046 } 3047 3048 static void 3049 kqueue_fork_copy_knote(struct kqueue *kq1, struct knote *kn, struct proc *p1, 3050 struct filedesc *fdp) 3051 { 3052 struct knote *kn1; 3053 const struct filterops *fop; 3054 int error; 3055 3056 fop = kn->kn_fop; 3057 if (fop->f_copy == NULL || (fop->f_isfd && 3058 fdp->fd_files->fdt_ofiles[kn->kn_kevent.ident].fde_file == NULL)) 3059 return; 3060 error = kqueue_expand(kq1, fop, kn->kn_kevent.ident, M_WAITOK); 3061 if (error != 0) 3062 return; 3063 3064 kn1 = knote_alloc(M_WAITOK); 3065 *kn1 = *kn; 3066 kn1->kn_status |= KN_DETACHED; 3067 kn1->kn_status &= ~KN_QUEUED; 3068 kn1->kn_kq = kq1; 3069 error = fop->f_copy(kn1, p1); 3070 if (error != 0) { 3071 knote_free(kn1); 3072 return; 3073 } 3074 (void)kqueue_fo_find(kn->kn_kevent.filter); 3075 if (fop->f_isfd && !fhold(kn1->kn_fp)) { 3076 fop->f_detach(kn1); 3077 kqueue_fo_release(kn->kn_kevent.filter); 3078 knote_free(kn1); 3079 return; 3080 } 3081 if (kn->kn_knlist != NULL) 3082 knlist_add(kn->kn_knlist, kn1, 0); 3083 KQ_LOCK(kq1); 3084 knote_attach(kn1, kq1); 3085 kn1->kn_influx = 0; 3086 if ((kn->kn_status & KN_QUEUED) != 0) 3087 knote_enqueue(kn1); 3088 KQ_UNLOCK(kq1); 3089 } 3090 3091 static void 3092 kqueue_fork_copy_list(struct klist *knlist, struct knote *marker, 3093 struct kqueue *kq, struct kqueue *kq1, struct proc *p1, 3094 struct filedesc *fdp) 3095 { 3096 struct knote *kn; 3097 3098 KQ_OWNED(kq); 3099 kn = SLIST_FIRST(knlist); 3100 while (kn != NULL) { 3101 if ((kn->kn_status & KN_DETACHED) != 0 || 3102 (kn_in_flux(kn) && (kn->kn_status & KN_SCAN) == 0)) { 3103 kn = SLIST_NEXT(kn, kn_link); 3104 continue; 3105 } 3106 kn_enter_flux(kn); 3107 SLIST_INSERT_AFTER(kn, marker, kn_link); 3108 KQ_UNLOCK(kq); 3109 kqueue_fork_copy_knote(kq1, kn, p1, fdp); 3110 KQ_LOCK(kq); 3111 kn_leave_flux(kn); 3112 kn = SLIST_NEXT(marker, kn_link); 3113 /* XXXKIB switch kn_link to LIST? */ 3114 SLIST_REMOVE(knlist, marker, knote, kn_link); 3115 } 3116 } 3117 3118 static int 3119 kqueue_fork_copy(struct filedesc *fdp, struct file *fp, struct file *fp1, 3120 struct proc *p1, struct thread *td) 3121 { 3122 struct kqueue *kq, *kq1; 3123 struct knote *marker; 3124 int error, i; 3125 3126 error = 0; 3127 MPASS(fp == NULL); 3128 MPASS(fp1->f_type == DTYPE_KQUEUE); 3129 3130 kq1 = fp1->f_data; 3131 kq = kq1->kq_forksrc; 3132 marker = knote_alloc(M_WAITOK); 3133 marker->kn_status = KN_MARKER; 3134 3135 KQ_LOCK(kq); 3136 for (i = 0; i < kq->kq_knlistsize; i++) { 3137 kqueue_fork_copy_list(&kq->kq_knlist[i], marker, kq, kq1, 3138 p1, fdp); 3139 } 3140 if (kq->kq_knhashmask != 0) { 3141 for (i = 0; i <= kq->kq_knhashmask; i++) { 3142 kqueue_fork_copy_list(&kq->kq_knhash[i], marker, kq, 3143 kq1, p1, fdp); 3144 } 3145 } 3146 kqueue_release(kq, 1); 3147 kq1->kq_forksrc = NULL; 3148 KQ_UNLOCK(kq); 3149 3150 knote_free(marker); 3151 return (error); 3152 } 3153 3154 static int 3155 kqueue_fork(struct filedesc *fdp, struct file *fp, struct file **fp1, 3156 struct proc *p1, struct thread *td) 3157 { 3158 if (*fp1 == NULL) 3159 return (kqueue_fork_alloc(fdp, fp, fp1, td)); 3160 return (kqueue_fork_copy(fdp, fp, *fp1, p1, td)); 3161 } 3162 3163 int 3164 knote_triv_copy(struct knote *kn __unused, struct proc *p1 __unused) 3165 { 3166 return (0); 3167 } 3168 3169 struct knote_status_export_bit { 3170 int kn_status_bit; 3171 int knt_status_bit; 3172 }; 3173 3174 #define ST(name) \ 3175 { .kn_status_bit = KN_##name, .knt_status_bit = KNOTE_STATUS_##name } 3176 static const struct knote_status_export_bit knote_status_export_bits[] = { 3177 ST(ACTIVE), 3178 ST(QUEUED), 3179 ST(DISABLED), 3180 ST(DETACHED), 3181 ST(KQUEUE), 3182 }; 3183 #undef ST 3184 3185 static int 3186 knote_status_export(int kn_status) 3187 { 3188 const struct knote_status_export_bit *b; 3189 unsigned i; 3190 int res; 3191 3192 res = 0; 3193 for (i = 0; i < nitems(knote_status_export_bits); i++) { 3194 b = &knote_status_export_bits[i]; 3195 if ((kn_status & b->kn_status_bit) != 0) 3196 res |= b->knt_status_bit; 3197 } 3198 return (res); 3199 } 3200 3201 static int 3202 kern_proc_kqueue_report_one(struct sbuf *s, struct proc *p, 3203 int kq_fd, struct kqueue *kq, struct knote *kn, bool compat32 __unused) 3204 { 3205 struct kinfo_knote kin; 3206 #ifdef COMPAT_FREEBSD32 3207 struct kinfo_knote32 kin32; 3208 #endif 3209 int error; 3210 3211 if (kn->kn_status == KN_MARKER) 3212 return (0); 3213 3214 memset(&kin, 0, sizeof(kin)); 3215 kin.knt_kq_fd = kq_fd; 3216 memcpy(&kin.knt_event, &kn->kn_kevent, sizeof(struct kevent)); 3217 kin.knt_status = knote_status_export(kn->kn_status); 3218 kn_enter_flux(kn); 3219 KQ_UNLOCK_FLUX(kq); 3220 if (kn->kn_fop->f_userdump != NULL) 3221 (void)kn->kn_fop->f_userdump(p, kn, &kin); 3222 #ifdef COMPAT_FREEBSD32 3223 if (compat32) { 3224 freebsd32_kinfo_knote_to_32(&kin, &kin32); 3225 error = sbuf_bcat(s, &kin32, sizeof(kin32)); 3226 } else 3227 #endif 3228 error = sbuf_bcat(s, &kin, sizeof(kin)); 3229 KQ_LOCK(kq); 3230 kn_leave_flux(kn); 3231 return (error); 3232 } 3233 3234 static int 3235 kern_proc_kqueue_report(struct sbuf *s, struct proc *p, int kq_fd, 3236 struct kqueue *kq, bool compat32) 3237 { 3238 struct knote *kn; 3239 int error, i; 3240 3241 error = 0; 3242 KQ_LOCK(kq); 3243 for (i = 0; i < kq->kq_knlistsize; i++) { 3244 SLIST_FOREACH(kn, &kq->kq_knlist[i], kn_link) { 3245 error = kern_proc_kqueue_report_one(s, p, kq_fd, 3246 kq, kn, compat32); 3247 if (error != 0) 3248 goto out; 3249 } 3250 } 3251 if (kq->kq_knhashmask == 0) 3252 goto out; 3253 for (i = 0; i <= kq->kq_knhashmask; i++) { 3254 SLIST_FOREACH(kn, &kq->kq_knhash[i], kn_link) { 3255 error = kern_proc_kqueue_report_one(s, p, kq_fd, 3256 kq, kn, compat32); 3257 if (error != 0) 3258 goto out; 3259 } 3260 } 3261 out: 3262 KQ_UNLOCK_FLUX(kq); 3263 return (error); 3264 } 3265 3266 struct kern_proc_kqueues_out1_cb_args { 3267 struct sbuf *s; 3268 bool compat32; 3269 }; 3270 3271 static int 3272 kern_proc_kqueues_out1_cb(struct proc *p, int fd, struct file *fp, void *arg) 3273 { 3274 struct kqueue *kq; 3275 struct kern_proc_kqueues_out1_cb_args *a; 3276 3277 if (fp->f_type != DTYPE_KQUEUE) 3278 return (0); 3279 a = arg; 3280 kq = fp->f_data; 3281 return (kern_proc_kqueue_report(a->s, p, fd, kq, a->compat32)); 3282 } 3283 3284 static int 3285 kern_proc_kqueues_out1(struct thread *td, struct proc *p, struct sbuf *s, 3286 bool compat32) 3287 { 3288 struct kern_proc_kqueues_out1_cb_args a; 3289 3290 a.s = s; 3291 a.compat32 = compat32; 3292 return (fget_remote_foreach(td, p, kern_proc_kqueues_out1_cb, &a)); 3293 } 3294 3295 int 3296 kern_proc_kqueues_out(struct proc *p, struct sbuf *sb, size_t maxlen, 3297 bool compat32) 3298 { 3299 struct sbuf *s, sm; 3300 size_t sb_len; 3301 int error; 3302 3303 if (maxlen == -1 || maxlen == 0) 3304 sb_len = 128; 3305 else 3306 sb_len = maxlen; 3307 s = sbuf_new(&sm, NULL, sb_len, maxlen == -1 ? SBUF_AUTOEXTEND : 3308 SBUF_FIXEDLEN); 3309 error = kern_proc_kqueues_out1(curthread, p, s, compat32); 3310 sbuf_finish(s); 3311 if (error == 0) { 3312 sbuf_bcat(sb, sbuf_data(s), MIN(sbuf_len(s), maxlen == -1 ? 3313 SIZE_T_MAX : maxlen)); 3314 } 3315 sbuf_delete(s); 3316 return (error); 3317 } 3318 3319 static int 3320 sysctl_kern_proc_kqueue_one(struct thread *td, struct sbuf *s, struct proc *p, 3321 int kq_fd, bool compat32) 3322 { 3323 struct file *fp; 3324 struct kqueue *kq; 3325 int error; 3326 3327 error = fget_remote(td, p, kq_fd, &fp); 3328 if (error == 0) { 3329 if (fp->f_type != DTYPE_KQUEUE) { 3330 error = EINVAL; 3331 } else { 3332 kq = fp->f_data; 3333 error = kern_proc_kqueue_report(s, p, kq_fd, kq, 3334 compat32); 3335 } 3336 fdrop(fp, td); 3337 } 3338 return (error); 3339 } 3340 3341 static int 3342 sysctl_kern_proc_kqueue(SYSCTL_HANDLER_ARGS) 3343 { 3344 struct thread *td; 3345 struct proc *p; 3346 struct sbuf *s, sm; 3347 int error, error1, *name; 3348 bool compat32; 3349 3350 name = (int *)arg1; 3351 if ((u_int)arg2 > 2 || (u_int)arg2 == 0) 3352 return (EINVAL); 3353 3354 error = pget((pid_t)name[0], PGET_HOLD | PGET_CANDEBUG, &p); 3355 if (error != 0) 3356 return (error); 3357 3358 td = curthread; 3359 #ifdef COMPAT_FREEBSD32 3360 compat32 = SV_CURPROC_FLAG(SV_ILP32); 3361 #else 3362 compat32 = false; 3363 #endif 3364 3365 s = sbuf_new_for_sysctl(&sm, NULL, 0, req); 3366 if (s == NULL) { 3367 error = ENOMEM; 3368 goto out; 3369 } 3370 sbuf_clear_flags(s, SBUF_INCLUDENUL); 3371 3372 if ((u_int)arg2 == 1) { 3373 error = kern_proc_kqueues_out1(td, p, s, compat32); 3374 } else { 3375 error = sysctl_kern_proc_kqueue_one(td, s, p, 3376 name[1] /* kq_fd */, compat32); 3377 } 3378 3379 error1 = sbuf_finish(s); 3380 if (error == 0) 3381 error = error1; 3382 sbuf_delete(s); 3383 3384 out: 3385 PRELE(p); 3386 return (error); 3387 } 3388 3389 static SYSCTL_NODE(_kern_proc, KERN_PROC_KQUEUE, kq, 3390 CTLFLAG_RD | CTLFLAG_MPSAFE, 3391 sysctl_kern_proc_kqueue, "KQueue events"); 3392