1 /*- 2 * Copyright (c) 1999,2000,2001 Jonathan Lemon <jlemon@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include <sys/param.h> 31 #include <sys/systm.h> 32 #include <sys/kernel.h> 33 #include <sys/lock.h> 34 #include <sys/mutex.h> 35 #include <sys/proc.h> 36 #include <sys/malloc.h> 37 #include <sys/unistd.h> 38 #include <sys/file.h> 39 #include <sys/filedesc.h> 40 #include <sys/fcntl.h> 41 #include <sys/selinfo.h> 42 #include <sys/queue.h> 43 #include <sys/event.h> 44 #include <sys/eventvar.h> 45 #include <sys/poll.h> 46 #include <sys/protosw.h> 47 #include <sys/socket.h> 48 #include <sys/socketvar.h> 49 #include <sys/stat.h> 50 #include <sys/sysctl.h> 51 #include <sys/sysproto.h> 52 #include <sys/uio.h> 53 54 #include <vm/uma.h> 55 56 MALLOC_DEFINE(M_KQUEUE, "kqueue", "memory for kqueue system"); 57 58 static int kqueue_scan(struct file *fp, int maxevents, 59 struct kevent *ulistp, const struct timespec *timeout, 60 struct thread *td); 61 static void kqueue_wakeup(struct kqueue *kq); 62 63 static fo_rdwr_t kqueue_read; 64 static fo_rdwr_t kqueue_write; 65 static fo_ioctl_t kqueue_ioctl; 66 static fo_poll_t kqueue_poll; 67 static fo_kqfilter_t kqueue_kqfilter; 68 static fo_stat_t kqueue_stat; 69 static fo_close_t kqueue_close; 70 71 static struct fileops kqueueops = { 72 .fo_read = kqueue_read, 73 .fo_write = kqueue_write, 74 .fo_ioctl = kqueue_ioctl, 75 .fo_poll = kqueue_poll, 76 .fo_kqfilter = kqueue_kqfilter, 77 .fo_stat = kqueue_stat, 78 .fo_close = kqueue_close, 79 }; 80 81 static void knote_attach(struct knote *kn, struct filedesc *fdp); 82 static void knote_drop(struct knote *kn, struct thread *td); 83 static void knote_enqueue(struct knote *kn); 84 static void knote_dequeue(struct knote *kn); 85 static void knote_init(void); 86 static struct knote *knote_alloc(void); 87 static void knote_free(struct knote *kn); 88 89 static void filt_kqdetach(struct knote *kn); 90 static int filt_kqueue(struct knote *kn, long hint); 91 static int filt_procattach(struct knote *kn); 92 static void filt_procdetach(struct knote *kn); 93 static int filt_proc(struct knote *kn, long hint); 94 static int filt_fileattach(struct knote *kn); 95 static void filt_timerexpire(void *knx); 96 static int filt_timerattach(struct knote *kn); 97 static void filt_timerdetach(struct knote *kn); 98 static int filt_timer(struct knote *kn, long hint); 99 100 static struct filterops file_filtops = 101 { 1, filt_fileattach, NULL, NULL }; 102 static struct filterops kqread_filtops = 103 { 1, NULL, filt_kqdetach, filt_kqueue }; 104 static struct filterops proc_filtops = 105 { 0, filt_procattach, filt_procdetach, filt_proc }; 106 static struct filterops timer_filtops = 107 { 0, filt_timerattach, filt_timerdetach, filt_timer }; 108 109 static uma_zone_t knote_zone; 110 static int kq_ncallouts = 0; 111 static int kq_calloutmax = (4 * 1024); 112 SYSCTL_INT(_kern, OID_AUTO, kq_calloutmax, CTLFLAG_RW, 113 &kq_calloutmax, 0, "Maximum number of callouts allocated for kqueue"); 114 115 #define KNOTE_ACTIVATE(kn) do { \ 116 kn->kn_status |= KN_ACTIVE; \ 117 if ((kn->kn_status & (KN_QUEUED | KN_DISABLED)) == 0) \ 118 knote_enqueue(kn); \ 119 } while(0) 120 121 #define KN_HASHSIZE 64 /* XXX should be tunable */ 122 #define KN_HASH(val, mask) (((val) ^ (val >> 8)) & (mask)) 123 124 static int 125 filt_nullattach(struct knote *kn) 126 { 127 128 return (ENXIO); 129 }; 130 131 struct filterops null_filtops = 132 { 0, filt_nullattach, NULL, NULL }; 133 134 extern struct filterops sig_filtops; 135 136 /* 137 * Table for for all system-defined filters. 138 */ 139 static struct filterops *sysfilt_ops[] = { 140 &file_filtops, /* EVFILT_READ */ 141 &file_filtops, /* EVFILT_WRITE */ 142 &null_filtops, /* EVFILT_AIO */ 143 &file_filtops, /* EVFILT_VNODE */ 144 &proc_filtops, /* EVFILT_PROC */ 145 &sig_filtops, /* EVFILT_SIGNAL */ 146 &timer_filtops, /* EVFILT_TIMER */ 147 &file_filtops, /* EVFILT_NETDEV */ 148 }; 149 150 static int 151 filt_fileattach(struct knote *kn) 152 { 153 154 return (fo_kqfilter(kn->kn_fp, kn)); 155 } 156 157 /*ARGSUSED*/ 158 static int 159 kqueue_kqfilter(struct file *fp, struct knote *kn) 160 { 161 struct kqueue *kq = kn->kn_fp->f_data; 162 163 if (kn->kn_filter != EVFILT_READ) 164 return (1); 165 166 kn->kn_fop = &kqread_filtops; 167 SLIST_INSERT_HEAD(&kq->kq_sel.si_note, kn, kn_selnext); 168 return (0); 169 } 170 171 static void 172 filt_kqdetach(struct knote *kn) 173 { 174 struct kqueue *kq = kn->kn_fp->f_data; 175 176 SLIST_REMOVE(&kq->kq_sel.si_note, kn, knote, kn_selnext); 177 } 178 179 /*ARGSUSED*/ 180 static int 181 filt_kqueue(struct knote *kn, long hint) 182 { 183 struct kqueue *kq = kn->kn_fp->f_data; 184 185 kn->kn_data = kq->kq_count; 186 return (kn->kn_data > 0); 187 } 188 189 static int 190 filt_procattach(struct knote *kn) 191 { 192 struct proc *p; 193 int immediate; 194 int error; 195 196 immediate = 0; 197 p = pfind(kn->kn_id); 198 if (p == NULL && (kn->kn_sfflags & NOTE_EXIT)) { 199 p = zpfind(kn->kn_id); 200 immediate = 1; 201 } 202 if (p == NULL) 203 return (ESRCH); 204 if ((error = p_cansee(curthread, p))) { 205 PROC_UNLOCK(p); 206 return (error); 207 } 208 209 kn->kn_ptr.p_proc = p; 210 kn->kn_flags |= EV_CLEAR; /* automatically set */ 211 212 /* 213 * internal flag indicating registration done by kernel 214 */ 215 if (kn->kn_flags & EV_FLAG1) { 216 kn->kn_data = kn->kn_sdata; /* ppid */ 217 kn->kn_fflags = NOTE_CHILD; 218 kn->kn_flags &= ~EV_FLAG1; 219 } 220 221 if (immediate == 0) 222 SLIST_INSERT_HEAD(&p->p_klist, kn, kn_selnext); 223 224 /* 225 * Immediately activate any exit notes if the target process is a 226 * zombie. This is necessary to handle the case where the target 227 * process, e.g. a child, dies before the kevent is registered. 228 */ 229 if (immediate && filt_proc(kn, NOTE_EXIT)) 230 KNOTE_ACTIVATE(kn); 231 232 PROC_UNLOCK(p); 233 234 return (0); 235 } 236 237 /* 238 * The knote may be attached to a different process, which may exit, 239 * leaving nothing for the knote to be attached to. So when the process 240 * exits, the knote is marked as DETACHED and also flagged as ONESHOT so 241 * it will be deleted when read out. However, as part of the knote deletion, 242 * this routine is called, so a check is needed to avoid actually performing 243 * a detach, because the original process does not exist any more. 244 */ 245 static void 246 filt_procdetach(struct knote *kn) 247 { 248 struct proc *p = kn->kn_ptr.p_proc; 249 250 if (kn->kn_status & KN_DETACHED) 251 return; 252 253 PROC_LOCK(p); 254 SLIST_REMOVE(&p->p_klist, kn, knote, kn_selnext); 255 PROC_UNLOCK(p); 256 } 257 258 static int 259 filt_proc(struct knote *kn, long hint) 260 { 261 u_int event; 262 263 /* 264 * mask off extra data 265 */ 266 event = (u_int)hint & NOTE_PCTRLMASK; 267 268 /* 269 * if the user is interested in this event, record it. 270 */ 271 if (kn->kn_sfflags & event) 272 kn->kn_fflags |= event; 273 274 /* 275 * process is gone, so flag the event as finished. 276 */ 277 if (event == NOTE_EXIT) { 278 kn->kn_status |= KN_DETACHED; 279 kn->kn_flags |= (EV_EOF | EV_ONESHOT); 280 return (1); 281 } 282 283 /* 284 * process forked, and user wants to track the new process, 285 * so attach a new knote to it, and immediately report an 286 * event with the parent's pid. 287 */ 288 if ((event == NOTE_FORK) && (kn->kn_sfflags & NOTE_TRACK)) { 289 struct kevent kev; 290 int error; 291 292 /* 293 * register knote with new process. 294 */ 295 kev.ident = hint & NOTE_PDATAMASK; /* pid */ 296 kev.filter = kn->kn_filter; 297 kev.flags = kn->kn_flags | EV_ADD | EV_ENABLE | EV_FLAG1; 298 kev.fflags = kn->kn_sfflags; 299 kev.data = kn->kn_id; /* parent */ 300 kev.udata = kn->kn_kevent.udata; /* preserve udata */ 301 error = kqueue_register(kn->kn_kq, &kev, NULL); 302 if (error) 303 kn->kn_fflags |= NOTE_TRACKERR; 304 } 305 306 return (kn->kn_fflags != 0); 307 } 308 309 static void 310 filt_timerexpire(void *knx) 311 { 312 struct knote *kn = knx; 313 struct callout *calloutp; 314 struct timeval tv; 315 int tticks; 316 317 kn->kn_data++; 318 KNOTE_ACTIVATE(kn); 319 320 if ((kn->kn_flags & EV_ONESHOT) == 0) { 321 tv.tv_sec = kn->kn_sdata / 1000; 322 tv.tv_usec = (kn->kn_sdata % 1000) * 1000; 323 tticks = tvtohz(&tv); 324 calloutp = (struct callout *)kn->kn_hook; 325 callout_reset(calloutp, tticks, filt_timerexpire, kn); 326 } 327 } 328 329 /* 330 * data contains amount of time to sleep, in milliseconds 331 */ 332 static int 333 filt_timerattach(struct knote *kn) 334 { 335 struct callout *calloutp; 336 struct timeval tv; 337 int tticks; 338 339 if (kq_ncallouts >= kq_calloutmax) 340 return (ENOMEM); 341 kq_ncallouts++; 342 343 tv.tv_sec = kn->kn_sdata / 1000; 344 tv.tv_usec = (kn->kn_sdata % 1000) * 1000; 345 tticks = tvtohz(&tv); 346 347 kn->kn_flags |= EV_CLEAR; /* automatically set */ 348 MALLOC(calloutp, struct callout *, sizeof(*calloutp), 349 M_KQUEUE, M_WAITOK); 350 callout_init(calloutp, 0); 351 kn->kn_hook = calloutp; 352 callout_reset(calloutp, tticks, filt_timerexpire, kn); 353 354 return (0); 355 } 356 357 static void 358 filt_timerdetach(struct knote *kn) 359 { 360 struct callout *calloutp; 361 362 calloutp = (struct callout *)kn->kn_hook; 363 callout_drain(calloutp); 364 FREE(calloutp, M_KQUEUE); 365 kq_ncallouts--; 366 } 367 368 static int 369 filt_timer(struct knote *kn, long hint) 370 { 371 372 return (kn->kn_data != 0); 373 } 374 375 /* 376 * MPSAFE 377 */ 378 int 379 kqueue(struct thread *td, struct kqueue_args *uap) 380 { 381 struct filedesc *fdp; 382 struct kqueue *kq; 383 struct file *fp; 384 int fd, error; 385 386 mtx_lock(&Giant); 387 fdp = td->td_proc->p_fd; 388 error = falloc(td, &fp, &fd); 389 if (error) 390 goto done2; 391 /* An extra reference on `nfp' has been held for us by falloc(). */ 392 kq = malloc(sizeof(struct kqueue), M_KQUEUE, M_WAITOK | M_ZERO); 393 TAILQ_INIT(&kq->kq_head); 394 FILE_LOCK(fp); 395 fp->f_flag = FREAD | FWRITE; 396 fp->f_type = DTYPE_KQUEUE; 397 fp->f_ops = &kqueueops; 398 fp->f_data = kq; 399 FILE_UNLOCK(fp); 400 fdrop(fp, td); 401 FILEDESC_LOCK(fdp); 402 td->td_retval[0] = fd; 403 if (fdp->fd_knlistsize < 0) 404 fdp->fd_knlistsize = 0; /* this process has a kq */ 405 FILEDESC_UNLOCK(fdp); 406 kq->kq_fdp = fdp; 407 done2: 408 mtx_unlock(&Giant); 409 return (error); 410 } 411 412 #ifndef _SYS_SYSPROTO_H_ 413 struct kevent_args { 414 int fd; 415 const struct kevent *changelist; 416 int nchanges; 417 struct kevent *eventlist; 418 int nevents; 419 const struct timespec *timeout; 420 }; 421 #endif 422 /* 423 * MPSAFE 424 */ 425 int 426 kevent(struct thread *td, struct kevent_args *uap) 427 { 428 struct kevent *kevp; 429 struct kqueue *kq; 430 struct file *fp; 431 struct timespec ts; 432 int i, n, nerrors, error; 433 434 if ((error = fget(td, uap->fd, &fp)) != 0) 435 return (error); 436 if (fp->f_type != DTYPE_KQUEUE) { 437 fdrop(fp, td); 438 return (EBADF); 439 } 440 if (uap->timeout != NULL) { 441 error = copyin(uap->timeout, &ts, sizeof(ts)); 442 if (error) 443 goto done_nogiant; 444 uap->timeout = &ts; 445 } 446 mtx_lock(&Giant); 447 448 kq = fp->f_data; 449 nerrors = 0; 450 451 while (uap->nchanges > 0) { 452 n = uap->nchanges > KQ_NEVENTS ? KQ_NEVENTS : uap->nchanges; 453 error = copyin(uap->changelist, kq->kq_kev, 454 n * sizeof(struct kevent)); 455 if (error) 456 goto done; 457 for (i = 0; i < n; i++) { 458 kevp = &kq->kq_kev[i]; 459 kevp->flags &= ~EV_SYSFLAGS; 460 error = kqueue_register(kq, kevp, td); 461 if (error) { 462 if (uap->nevents != 0) { 463 kevp->flags = EV_ERROR; 464 kevp->data = error; 465 (void) copyout(kevp, 466 uap->eventlist, 467 sizeof(*kevp)); 468 uap->eventlist++; 469 uap->nevents--; 470 nerrors++; 471 } else { 472 goto done; 473 } 474 } 475 } 476 uap->nchanges -= n; 477 uap->changelist += n; 478 } 479 if (nerrors) { 480 td->td_retval[0] = nerrors; 481 error = 0; 482 goto done; 483 } 484 485 error = kqueue_scan(fp, uap->nevents, uap->eventlist, uap->timeout, td); 486 done: 487 mtx_unlock(&Giant); 488 done_nogiant: 489 if (fp != NULL) 490 fdrop(fp, td); 491 return (error); 492 } 493 494 int 495 kqueue_add_filteropts(int filt, struct filterops *filtops) 496 { 497 498 if (filt > 0) 499 panic("filt(%d) > 0", filt); 500 if (filt + EVFILT_SYSCOUNT < 0) 501 panic("filt(%d) + EVFILT_SYSCOUNT(%d) == %d < 0", 502 filt, EVFILT_SYSCOUNT, filt + EVFILT_SYSCOUNT); 503 if (sysfilt_ops[~filt] != &null_filtops) 504 panic("sysfilt_ops[~filt(%d)] != &null_filtops", filt); 505 sysfilt_ops[~filt] = filtops; 506 return (0); 507 } 508 509 int 510 kqueue_del_filteropts(int filt) 511 { 512 513 if (filt > 0) 514 panic("filt(%d) > 0", filt); 515 if (filt + EVFILT_SYSCOUNT < 0) 516 panic("filt(%d) + EVFILT_SYSCOUNT(%d) == %d < 0", 517 filt, EVFILT_SYSCOUNT, filt + EVFILT_SYSCOUNT); 518 if (sysfilt_ops[~filt] == &null_filtops) 519 panic("sysfilt_ops[~filt(%d)] != &null_filtops", filt); 520 sysfilt_ops[~filt] = &null_filtops; 521 return (0); 522 } 523 524 int 525 kqueue_register(struct kqueue *kq, struct kevent *kev, struct thread *td) 526 { 527 struct filedesc *fdp = kq->kq_fdp; 528 struct filterops *fops; 529 struct file *fp = NULL; 530 struct knote *kn = NULL; 531 int s, error = 0; 532 533 if (kev->filter < 0) { 534 if (kev->filter + EVFILT_SYSCOUNT < 0) 535 return (EINVAL); 536 fops = sysfilt_ops[~kev->filter]; /* to 0-base index */ 537 } else { 538 /* 539 * XXX 540 * filter attach routine is responsible for insuring that 541 * the identifier can be attached to it. 542 */ 543 printf("unknown filter: %d\n", kev->filter); 544 return (EINVAL); 545 } 546 547 FILEDESC_LOCK(fdp); 548 if (fops->f_isfd) { 549 /* validate descriptor */ 550 if ((u_int)kev->ident >= fdp->fd_nfiles || 551 (fp = fdp->fd_ofiles[kev->ident]) == NULL) { 552 FILEDESC_UNLOCK(fdp); 553 return (EBADF); 554 } 555 fhold(fp); 556 557 if (kev->ident < fdp->fd_knlistsize) { 558 SLIST_FOREACH(kn, &fdp->fd_knlist[kev->ident], kn_link) 559 if (kq == kn->kn_kq && 560 kev->filter == kn->kn_filter) 561 break; 562 } 563 } else { 564 if (fdp->fd_knhashmask != 0) { 565 struct klist *list; 566 567 list = &fdp->fd_knhash[ 568 KN_HASH((u_long)kev->ident, fdp->fd_knhashmask)]; 569 SLIST_FOREACH(kn, list, kn_link) 570 if (kev->ident == kn->kn_id && 571 kq == kn->kn_kq && 572 kev->filter == kn->kn_filter) 573 break; 574 } 575 } 576 FILEDESC_UNLOCK(fdp); 577 578 if (kn == NULL && ((kev->flags & EV_ADD) == 0)) { 579 error = ENOENT; 580 goto done; 581 } 582 583 /* 584 * kn now contains the matching knote, or NULL if no match 585 */ 586 if (kev->flags & EV_ADD) { 587 588 if (kn == NULL) { 589 kn = knote_alloc(); 590 if (kn == NULL) { 591 error = ENOMEM; 592 goto done; 593 } 594 kn->kn_fp = fp; 595 kn->kn_kq = kq; 596 kn->kn_fop = fops; 597 598 /* 599 * apply reference count to knote structure, and 600 * do not release it at the end of this routine. 601 */ 602 fp = NULL; 603 604 kn->kn_sfflags = kev->fflags; 605 kn->kn_sdata = kev->data; 606 kev->fflags = 0; 607 kev->data = 0; 608 kn->kn_kevent = *kev; 609 610 knote_attach(kn, fdp); 611 if ((error = fops->f_attach(kn)) != 0) { 612 knote_drop(kn, td); 613 goto done; 614 } 615 } else { 616 /* 617 * The user may change some filter values after the 618 * initial EV_ADD, but doing so will not reset any 619 * filter which has already been triggered. 620 */ 621 kn->kn_sfflags = kev->fflags; 622 kn->kn_sdata = kev->data; 623 kn->kn_kevent.udata = kev->udata; 624 } 625 626 s = splhigh(); 627 if (kn->kn_fop->f_event(kn, 0)) 628 KNOTE_ACTIVATE(kn); 629 splx(s); 630 631 } else if (kev->flags & EV_DELETE) { 632 kn->kn_fop->f_detach(kn); 633 knote_drop(kn, td); 634 goto done; 635 } 636 637 if ((kev->flags & EV_DISABLE) && 638 ((kn->kn_status & KN_DISABLED) == 0)) { 639 s = splhigh(); 640 kn->kn_status |= KN_DISABLED; 641 splx(s); 642 } 643 644 if ((kev->flags & EV_ENABLE) && (kn->kn_status & KN_DISABLED)) { 645 s = splhigh(); 646 kn->kn_status &= ~KN_DISABLED; 647 if ((kn->kn_status & KN_ACTIVE) && 648 ((kn->kn_status & KN_QUEUED) == 0)) 649 knote_enqueue(kn); 650 splx(s); 651 } 652 653 done: 654 if (fp != NULL) 655 fdrop(fp, td); 656 return (error); 657 } 658 659 static int 660 kqueue_scan(struct file *fp, int maxevents, struct kevent *ulistp, 661 const struct timespec *tsp, struct thread *td) 662 { 663 struct kqueue *kq; 664 struct kevent *kevp; 665 struct timeval atv, rtv, ttv; 666 struct knote *kn, marker; 667 int s, count, timeout, nkev = 0, error = 0; 668 669 FILE_LOCK_ASSERT(fp, MA_NOTOWNED); 670 671 kq = fp->f_data; 672 count = maxevents; 673 if (count == 0) 674 goto done; 675 676 if (tsp != NULL) { 677 TIMESPEC_TO_TIMEVAL(&atv, tsp); 678 if (itimerfix(&atv)) { 679 error = EINVAL; 680 goto done; 681 } 682 if (tsp->tv_sec == 0 && tsp->tv_nsec == 0) 683 timeout = -1; 684 else 685 timeout = atv.tv_sec > 24 * 60 * 60 ? 686 24 * 60 * 60 * hz : tvtohz(&atv); 687 getmicrouptime(&rtv); 688 timevaladd(&atv, &rtv); 689 } else { 690 atv.tv_sec = 0; 691 atv.tv_usec = 0; 692 timeout = 0; 693 } 694 goto start; 695 696 retry: 697 if (atv.tv_sec || atv.tv_usec) { 698 getmicrouptime(&rtv); 699 if (timevalcmp(&rtv, &atv, >=)) 700 goto done; 701 ttv = atv; 702 timevalsub(&ttv, &rtv); 703 timeout = ttv.tv_sec > 24 * 60 * 60 ? 704 24 * 60 * 60 * hz : tvtohz(&ttv); 705 } 706 707 start: 708 kevp = kq->kq_kev; 709 s = splhigh(); 710 if (kq->kq_count == 0) { 711 if (timeout < 0) { 712 error = EWOULDBLOCK; 713 } else { 714 kq->kq_state |= KQ_SLEEP; 715 error = tsleep(kq, PSOCK | PCATCH, "kqread", timeout); 716 } 717 splx(s); 718 if (error == 0) 719 goto retry; 720 /* don't restart after signals... */ 721 if (error == ERESTART) 722 error = EINTR; 723 else if (error == EWOULDBLOCK) 724 error = 0; 725 goto done; 726 } 727 728 TAILQ_INSERT_TAIL(&kq->kq_head, &marker, kn_tqe); 729 while (count) { 730 kn = TAILQ_FIRST(&kq->kq_head); 731 TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe); 732 if (kn == &marker) { 733 splx(s); 734 if (count == maxevents) 735 goto retry; 736 goto done; 737 } 738 if (kn->kn_status & KN_DISABLED) { 739 kn->kn_status &= ~KN_QUEUED; 740 kq->kq_count--; 741 continue; 742 } 743 if ((kn->kn_flags & EV_ONESHOT) == 0 && 744 kn->kn_fop->f_event(kn, 0) == 0) { 745 kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE); 746 kq->kq_count--; 747 continue; 748 } 749 *kevp = kn->kn_kevent; 750 kevp++; 751 nkev++; 752 if (kn->kn_flags & EV_ONESHOT) { 753 kn->kn_status &= ~KN_QUEUED; 754 kq->kq_count--; 755 splx(s); 756 kn->kn_fop->f_detach(kn); 757 knote_drop(kn, td); 758 s = splhigh(); 759 } else if (kn->kn_flags & EV_CLEAR) { 760 kn->kn_data = 0; 761 kn->kn_fflags = 0; 762 kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE); 763 kq->kq_count--; 764 } else { 765 TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe); 766 } 767 count--; 768 if (nkev == KQ_NEVENTS) { 769 splx(s); 770 error = copyout(&kq->kq_kev, ulistp, 771 sizeof(struct kevent) * nkev); 772 ulistp += nkev; 773 nkev = 0; 774 kevp = kq->kq_kev; 775 s = splhigh(); 776 if (error) 777 break; 778 } 779 } 780 TAILQ_REMOVE(&kq->kq_head, &marker, kn_tqe); 781 splx(s); 782 done: 783 if (nkev != 0) 784 error = copyout(&kq->kq_kev, ulistp, 785 sizeof(struct kevent) * nkev); 786 td->td_retval[0] = maxevents - count; 787 return (error); 788 } 789 790 /* 791 * XXX 792 * This could be expanded to call kqueue_scan, if desired. 793 */ 794 /*ARGSUSED*/ 795 static int 796 kqueue_read(struct file *fp, struct uio *uio, struct ucred *active_cred, 797 int flags, struct thread *td) 798 { 799 return (ENXIO); 800 } 801 802 /*ARGSUSED*/ 803 static int 804 kqueue_write(struct file *fp, struct uio *uio, struct ucred *active_cred, 805 int flags, struct thread *td) 806 { 807 return (ENXIO); 808 } 809 810 /*ARGSUSED*/ 811 static int 812 kqueue_ioctl(struct file *fp, u_long com, void *data, 813 struct ucred *active_cred, struct thread *td) 814 { 815 return (ENOTTY); 816 } 817 818 /*ARGSUSED*/ 819 static int 820 kqueue_poll(struct file *fp, int events, struct ucred *active_cred, 821 struct thread *td) 822 { 823 struct kqueue *kq; 824 int revents = 0; 825 int s = splnet(); 826 827 kq = fp->f_data; 828 if (events & (POLLIN | POLLRDNORM)) { 829 if (kq->kq_count) { 830 revents |= events & (POLLIN | POLLRDNORM); 831 } else { 832 selrecord(td, &kq->kq_sel); 833 kq->kq_state |= KQ_SEL; 834 } 835 } 836 splx(s); 837 return (revents); 838 } 839 840 /*ARGSUSED*/ 841 static int 842 kqueue_stat(struct file *fp, struct stat *st, struct ucred *active_cred, 843 struct thread *td) 844 { 845 struct kqueue *kq; 846 847 kq = fp->f_data; 848 bzero((void *)st, sizeof(*st)); 849 st->st_size = kq->kq_count; 850 st->st_blksize = sizeof(struct kevent); 851 st->st_mode = S_IFIFO; 852 return (0); 853 } 854 855 /*ARGSUSED*/ 856 static int 857 kqueue_close(struct file *fp, struct thread *td) 858 { 859 struct kqueue *kq = fp->f_data; 860 struct filedesc *fdp = kq->kq_fdp; 861 struct knote **knp, *kn, *kn0; 862 int i; 863 864 GIANT_REQUIRED; 865 866 FILEDESC_LOCK(fdp); 867 for (i = 0; i < fdp->fd_knlistsize; i++) { 868 knp = &SLIST_FIRST(&fdp->fd_knlist[i]); 869 kn = *knp; 870 while (kn != NULL) { 871 kn0 = SLIST_NEXT(kn, kn_link); 872 if (kq == kn->kn_kq) { 873 kn->kn_fop->f_detach(kn); 874 *knp = kn0; 875 FILE_LOCK(kn->kn_fp); 876 FILEDESC_UNLOCK(fdp); 877 fdrop_locked(kn->kn_fp, td); 878 knote_free(kn); 879 FILEDESC_LOCK(fdp); 880 } else { 881 knp = &SLIST_NEXT(kn, kn_link); 882 } 883 kn = kn0; 884 } 885 } 886 if (fdp->fd_knhashmask != 0) { 887 for (i = 0; i < fdp->fd_knhashmask + 1; i++) { 888 knp = &SLIST_FIRST(&fdp->fd_knhash[i]); 889 kn = *knp; 890 while (kn != NULL) { 891 kn0 = SLIST_NEXT(kn, kn_link); 892 if (kq == kn->kn_kq) { 893 kn->kn_fop->f_detach(kn); 894 *knp = kn0; 895 /* XXX non-fd release of kn->kn_ptr */ 896 FILEDESC_UNLOCK(fdp); 897 knote_free(kn); 898 FILEDESC_LOCK(fdp); 899 } else { 900 knp = &SLIST_NEXT(kn, kn_link); 901 } 902 kn = kn0; 903 } 904 } 905 } 906 FILEDESC_UNLOCK(fdp); 907 if (kq->kq_state & KQ_SEL) { 908 kq->kq_state &= ~KQ_SEL; 909 selwakeuppri(&kq->kq_sel, PSOCK); 910 } 911 free(kq, M_KQUEUE); 912 fp->f_data = NULL; 913 914 return (0); 915 } 916 917 static void 918 kqueue_wakeup(struct kqueue *kq) 919 { 920 921 if (kq->kq_state & KQ_SLEEP) { 922 kq->kq_state &= ~KQ_SLEEP; 923 wakeup(kq); 924 } 925 if (kq->kq_state & KQ_SEL) { 926 kq->kq_state &= ~KQ_SEL; 927 selwakeuppri(&kq->kq_sel, PSOCK); 928 } 929 KNOTE(&kq->kq_sel.si_note, 0); 930 } 931 932 /* 933 * walk down a list of knotes, activating them if their event has triggered. 934 */ 935 void 936 knote(struct klist *list, long hint) 937 { 938 struct knote *kn; 939 940 SLIST_FOREACH(kn, list, kn_selnext) 941 if (kn->kn_fop->f_event(kn, hint)) 942 KNOTE_ACTIVATE(kn); 943 } 944 945 /* 946 * remove all knotes from a specified klist 947 */ 948 void 949 knote_remove(struct thread *td, struct klist *list) 950 { 951 struct knote *kn; 952 953 while ((kn = SLIST_FIRST(list)) != NULL) { 954 kn->kn_fop->f_detach(kn); 955 knote_drop(kn, td); 956 } 957 } 958 959 /* 960 * remove all knotes referencing a specified fd 961 */ 962 void 963 knote_fdclose(struct thread *td, int fd) 964 { 965 struct filedesc *fdp = td->td_proc->p_fd; 966 struct klist *list; 967 968 FILEDESC_LOCK(fdp); 969 list = &fdp->fd_knlist[fd]; 970 FILEDESC_UNLOCK(fdp); 971 knote_remove(td, list); 972 } 973 974 static void 975 knote_attach(struct knote *kn, struct filedesc *fdp) 976 { 977 struct klist *list, *tmp_knhash; 978 u_long tmp_knhashmask; 979 int size; 980 981 FILEDESC_LOCK(fdp); 982 983 if (! kn->kn_fop->f_isfd) { 984 if (fdp->fd_knhashmask == 0) { 985 FILEDESC_UNLOCK(fdp); 986 tmp_knhash = hashinit(KN_HASHSIZE, M_KQUEUE, 987 &tmp_knhashmask); 988 FILEDESC_LOCK(fdp); 989 if (fdp->fd_knhashmask == 0) { 990 fdp->fd_knhash = tmp_knhash; 991 fdp->fd_knhashmask = tmp_knhashmask; 992 } else { 993 free(tmp_knhash, M_KQUEUE); 994 } 995 } 996 list = &fdp->fd_knhash[KN_HASH(kn->kn_id, fdp->fd_knhashmask)]; 997 goto done; 998 } 999 1000 if (fdp->fd_knlistsize <= kn->kn_id) { 1001 size = fdp->fd_knlistsize; 1002 while (size <= kn->kn_id) 1003 size += KQEXTENT; 1004 FILEDESC_UNLOCK(fdp); 1005 MALLOC(list, struct klist *, 1006 size * sizeof(struct klist *), M_KQUEUE, M_WAITOK); 1007 FILEDESC_LOCK(fdp); 1008 if (fdp->fd_knlistsize > kn->kn_id) { 1009 FREE(list, M_KQUEUE); 1010 goto bigenough; 1011 } 1012 if (fdp->fd_knlist != NULL) { 1013 bcopy(fdp->fd_knlist, list, 1014 fdp->fd_knlistsize * sizeof(struct klist *)); 1015 FREE(fdp->fd_knlist, M_KQUEUE); 1016 } 1017 bzero((caddr_t)list + 1018 fdp->fd_knlistsize * sizeof(struct klist *), 1019 (size - fdp->fd_knlistsize) * sizeof(struct klist *)); 1020 fdp->fd_knlistsize = size; 1021 fdp->fd_knlist = list; 1022 } 1023 bigenough: 1024 list = &fdp->fd_knlist[kn->kn_id]; 1025 done: 1026 FILEDESC_UNLOCK(fdp); 1027 SLIST_INSERT_HEAD(list, kn, kn_link); 1028 kn->kn_status = 0; 1029 } 1030 1031 /* 1032 * should be called at spl == 0, since we don't want to hold spl 1033 * while calling fdrop and free. 1034 */ 1035 static void 1036 knote_drop(struct knote *kn, struct thread *td) 1037 { 1038 struct filedesc *fdp = td->td_proc->p_fd; 1039 struct klist *list; 1040 1041 FILEDESC_LOCK(fdp); 1042 if (kn->kn_fop->f_isfd) 1043 list = &fdp->fd_knlist[kn->kn_id]; 1044 else 1045 list = &fdp->fd_knhash[KN_HASH(kn->kn_id, fdp->fd_knhashmask)]; 1046 if (kn->kn_fop->f_isfd) 1047 FILE_LOCK(kn->kn_fp); 1048 FILEDESC_UNLOCK(fdp); 1049 1050 SLIST_REMOVE(list, kn, knote, kn_link); 1051 if (kn->kn_status & KN_QUEUED) 1052 knote_dequeue(kn); 1053 if (kn->kn_fop->f_isfd) 1054 fdrop_locked(kn->kn_fp, td); 1055 knote_free(kn); 1056 } 1057 1058 1059 static void 1060 knote_enqueue(struct knote *kn) 1061 { 1062 struct kqueue *kq = kn->kn_kq; 1063 int s = splhigh(); 1064 1065 KASSERT((kn->kn_status & KN_QUEUED) == 0, ("knote already queued")); 1066 1067 TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe); 1068 kn->kn_status |= KN_QUEUED; 1069 kq->kq_count++; 1070 splx(s); 1071 kqueue_wakeup(kq); 1072 } 1073 1074 static void 1075 knote_dequeue(struct knote *kn) 1076 { 1077 struct kqueue *kq = kn->kn_kq; 1078 int s = splhigh(); 1079 1080 KASSERT(kn->kn_status & KN_QUEUED, ("knote not queued")); 1081 1082 TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe); 1083 kn->kn_status &= ~KN_QUEUED; 1084 kq->kq_count--; 1085 splx(s); 1086 } 1087 1088 static void 1089 knote_init(void) 1090 { 1091 knote_zone = uma_zcreate("KNOTE", sizeof(struct knote), NULL, NULL, 1092 NULL, NULL, UMA_ALIGN_PTR, 0); 1093 1094 } 1095 SYSINIT(knote, SI_SUB_PSEUDO, SI_ORDER_ANY, knote_init, NULL) 1096 1097 static struct knote * 1098 knote_alloc(void) 1099 { 1100 return ((struct knote *)uma_zalloc(knote_zone, M_WAITOK)); 1101 } 1102 1103 static void 1104 knote_free(struct knote *kn) 1105 { 1106 uma_zfree(knote_zone, kn); 1107 } 1108