1 /*- 2 * Copyright (c) 1999,2000,2001 Jonathan Lemon <jlemon@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include <sys/param.h> 31 #include <sys/systm.h> 32 #include <sys/kernel.h> 33 #include <sys/lock.h> 34 #include <sys/mutex.h> 35 #include <sys/proc.h> 36 #include <sys/malloc.h> 37 #include <sys/unistd.h> 38 #include <sys/file.h> 39 #include <sys/filedesc.h> 40 #include <sys/fcntl.h> 41 #include <sys/selinfo.h> 42 #include <sys/queue.h> 43 #include <sys/event.h> 44 #include <sys/eventvar.h> 45 #include <sys/poll.h> 46 #include <sys/protosw.h> 47 #include <sys/socket.h> 48 #include <sys/socketvar.h> 49 #include <sys/stat.h> 50 #include <sys/sysctl.h> 51 #include <sys/sysproto.h> 52 #include <sys/uio.h> 53 54 #include <vm/uma.h> 55 56 MALLOC_DEFINE(M_KQUEUE, "kqueue", "memory for kqueue system"); 57 58 static int kqueue_scan(struct file *fp, int maxevents, 59 struct kevent *ulistp, const struct timespec *timeout, 60 struct thread *td); 61 static void kqueue_wakeup(struct kqueue *kq); 62 63 static fo_rdwr_t kqueue_read; 64 static fo_rdwr_t kqueue_write; 65 static fo_ioctl_t kqueue_ioctl; 66 static fo_poll_t kqueue_poll; 67 static fo_kqfilter_t kqueue_kqfilter; 68 static fo_stat_t kqueue_stat; 69 static fo_close_t kqueue_close; 70 71 static struct fileops kqueueops = { 72 .fo_read = kqueue_read, 73 .fo_write = kqueue_write, 74 .fo_ioctl = kqueue_ioctl, 75 .fo_poll = kqueue_poll, 76 .fo_kqfilter = kqueue_kqfilter, 77 .fo_stat = kqueue_stat, 78 .fo_close = kqueue_close, 79 }; 80 81 static void knote_attach(struct knote *kn, struct filedesc *fdp); 82 static void knote_drop(struct knote *kn, struct thread *td); 83 static void knote_enqueue(struct knote *kn); 84 static void knote_dequeue(struct knote *kn); 85 static void knote_init(void); 86 static struct knote *knote_alloc(void); 87 static void knote_free(struct knote *kn); 88 89 static void filt_kqdetach(struct knote *kn); 90 static int filt_kqueue(struct knote *kn, long hint); 91 static int filt_procattach(struct knote *kn); 92 static void filt_procdetach(struct knote *kn); 93 static int filt_proc(struct knote *kn, long hint); 94 static int filt_fileattach(struct knote *kn); 95 static void filt_timerexpire(void *knx); 96 static int filt_timerattach(struct knote *kn); 97 static void filt_timerdetach(struct knote *kn); 98 static int filt_timer(struct knote *kn, long hint); 99 100 static struct filterops file_filtops = 101 { 1, filt_fileattach, NULL, NULL }; 102 static struct filterops kqread_filtops = 103 { 1, NULL, filt_kqdetach, filt_kqueue }; 104 static struct filterops proc_filtops = 105 { 0, filt_procattach, filt_procdetach, filt_proc }; 106 static struct filterops timer_filtops = 107 { 0, filt_timerattach, filt_timerdetach, filt_timer }; 108 109 static uma_zone_t knote_zone; 110 static int kq_ncallouts = 0; 111 static int kq_calloutmax = (4 * 1024); 112 SYSCTL_INT(_kern, OID_AUTO, kq_calloutmax, CTLFLAG_RW, 113 &kq_calloutmax, 0, "Maximum number of callouts allocated for kqueue"); 114 115 #define KNOTE_ACTIVATE(kn) do { \ 116 kn->kn_status |= KN_ACTIVE; \ 117 if ((kn->kn_status & (KN_QUEUED | KN_DISABLED)) == 0) \ 118 knote_enqueue(kn); \ 119 } while(0) 120 121 #define KN_HASHSIZE 64 /* XXX should be tunable */ 122 #define KN_HASH(val, mask) (((val) ^ (val >> 8)) & (mask)) 123 124 static int 125 filt_nullattach(struct knote *kn) 126 { 127 128 return (ENXIO); 129 }; 130 131 struct filterops null_filtops = 132 { 0, filt_nullattach, NULL, NULL }; 133 134 extern struct filterops sig_filtops; 135 136 /* 137 * Table for for all system-defined filters. 138 */ 139 static struct filterops *sysfilt_ops[] = { 140 &file_filtops, /* EVFILT_READ */ 141 &file_filtops, /* EVFILT_WRITE */ 142 &null_filtops, /* EVFILT_AIO */ 143 &file_filtops, /* EVFILT_VNODE */ 144 &proc_filtops, /* EVFILT_PROC */ 145 &sig_filtops, /* EVFILT_SIGNAL */ 146 &timer_filtops, /* EVFILT_TIMER */ 147 &file_filtops, /* EVFILT_NETDEV */ 148 }; 149 150 static int 151 filt_fileattach(struct knote *kn) 152 { 153 154 return (fo_kqfilter(kn->kn_fp, kn)); 155 } 156 157 /*ARGSUSED*/ 158 static int 159 kqueue_kqfilter(struct file *fp, struct knote *kn) 160 { 161 struct kqueue *kq = kn->kn_fp->f_data; 162 163 if (kn->kn_filter != EVFILT_READ) 164 return (1); 165 166 kn->kn_fop = &kqread_filtops; 167 SLIST_INSERT_HEAD(&kq->kq_sel.si_note, kn, kn_selnext); 168 return (0); 169 } 170 171 static void 172 filt_kqdetach(struct knote *kn) 173 { 174 struct kqueue *kq = kn->kn_fp->f_data; 175 176 SLIST_REMOVE(&kq->kq_sel.si_note, kn, knote, kn_selnext); 177 } 178 179 /*ARGSUSED*/ 180 static int 181 filt_kqueue(struct knote *kn, long hint) 182 { 183 struct kqueue *kq = kn->kn_fp->f_data; 184 185 kn->kn_data = kq->kq_count; 186 return (kn->kn_data > 0); 187 } 188 189 static int 190 filt_procattach(struct knote *kn) 191 { 192 struct proc *p; 193 int immediate; 194 int error; 195 196 immediate = 0; 197 p = pfind(kn->kn_id); 198 if (p == NULL) 199 return (ESRCH); 200 if (p == NULL && (kn->kn_sfflags & NOTE_EXIT)) { 201 p = zpfind(kn->kn_id); 202 immediate = 1; 203 } 204 if ((error = p_cansee(curthread, p))) { 205 PROC_UNLOCK(p); 206 return (error); 207 } 208 209 kn->kn_ptr.p_proc = p; 210 kn->kn_flags |= EV_CLEAR; /* automatically set */ 211 212 /* 213 * internal flag indicating registration done by kernel 214 */ 215 if (kn->kn_flags & EV_FLAG1) { 216 kn->kn_data = kn->kn_sdata; /* ppid */ 217 kn->kn_fflags = NOTE_CHILD; 218 kn->kn_flags &= ~EV_FLAG1; 219 } 220 221 SLIST_INSERT_HEAD(&p->p_klist, kn, kn_selnext); 222 223 /* 224 * Immediately activate any exit notes if the target process is a 225 * zombie. This is necessary to handle the case where the target 226 * process, e.g. a child, dies before the kevent is registered. 227 */ 228 if (immediate && filt_proc(kn, NOTE_EXIT)) 229 KNOTE_ACTIVATE(kn); 230 231 PROC_UNLOCK(p); 232 233 return (0); 234 } 235 236 /* 237 * The knote may be attached to a different process, which may exit, 238 * leaving nothing for the knote to be attached to. So when the process 239 * exits, the knote is marked as DETACHED and also flagged as ONESHOT so 240 * it will be deleted when read out. However, as part of the knote deletion, 241 * this routine is called, so a check is needed to avoid actually performing 242 * a detach, because the original process does not exist any more. 243 */ 244 static void 245 filt_procdetach(struct knote *kn) 246 { 247 struct proc *p = kn->kn_ptr.p_proc; 248 249 if (kn->kn_status & KN_DETACHED) 250 return; 251 252 PROC_LOCK(p); 253 SLIST_REMOVE(&p->p_klist, kn, knote, kn_selnext); 254 PROC_UNLOCK(p); 255 } 256 257 static int 258 filt_proc(struct knote *kn, long hint) 259 { 260 u_int event; 261 262 /* 263 * mask off extra data 264 */ 265 event = (u_int)hint & NOTE_PCTRLMASK; 266 267 /* 268 * if the user is interested in this event, record it. 269 */ 270 if (kn->kn_sfflags & event) 271 kn->kn_fflags |= event; 272 273 /* 274 * process is gone, so flag the event as finished. 275 */ 276 if (event == NOTE_EXIT) { 277 kn->kn_status |= KN_DETACHED; 278 kn->kn_flags |= (EV_EOF | EV_ONESHOT); 279 return (1); 280 } 281 282 /* 283 * process forked, and user wants to track the new process, 284 * so attach a new knote to it, and immediately report an 285 * event with the parent's pid. 286 */ 287 if ((event == NOTE_FORK) && (kn->kn_sfflags & NOTE_TRACK)) { 288 struct kevent kev; 289 int error; 290 291 /* 292 * register knote with new process. 293 */ 294 kev.ident = hint & NOTE_PDATAMASK; /* pid */ 295 kev.filter = kn->kn_filter; 296 kev.flags = kn->kn_flags | EV_ADD | EV_ENABLE | EV_FLAG1; 297 kev.fflags = kn->kn_sfflags; 298 kev.data = kn->kn_id; /* parent */ 299 kev.udata = kn->kn_kevent.udata; /* preserve udata */ 300 error = kqueue_register(kn->kn_kq, &kev, NULL); 301 if (error) 302 kn->kn_fflags |= NOTE_TRACKERR; 303 } 304 305 return (kn->kn_fflags != 0); 306 } 307 308 static void 309 filt_timerexpire(void *knx) 310 { 311 struct knote *kn = knx; 312 struct callout *calloutp; 313 struct timeval tv; 314 int tticks; 315 316 kn->kn_data++; 317 KNOTE_ACTIVATE(kn); 318 319 if ((kn->kn_flags & EV_ONESHOT) == 0) { 320 tv.tv_sec = kn->kn_sdata / 1000; 321 tv.tv_usec = (kn->kn_sdata % 1000) * 1000; 322 tticks = tvtohz(&tv); 323 calloutp = (struct callout *)kn->kn_hook; 324 callout_reset(calloutp, tticks, filt_timerexpire, kn); 325 } 326 } 327 328 /* 329 * data contains amount of time to sleep, in milliseconds 330 */ 331 static int 332 filt_timerattach(struct knote *kn) 333 { 334 struct callout *calloutp; 335 struct timeval tv; 336 int tticks; 337 338 if (kq_ncallouts >= kq_calloutmax) 339 return (ENOMEM); 340 kq_ncallouts++; 341 342 tv.tv_sec = kn->kn_sdata / 1000; 343 tv.tv_usec = (kn->kn_sdata % 1000) * 1000; 344 tticks = tvtohz(&tv); 345 346 kn->kn_flags |= EV_CLEAR; /* automatically set */ 347 MALLOC(calloutp, struct callout *, sizeof(*calloutp), 348 M_KQUEUE, M_WAITOK); 349 callout_init(calloutp, 0); 350 callout_reset(calloutp, tticks, filt_timerexpire, kn); 351 kn->kn_hook = calloutp; 352 353 return (0); 354 } 355 356 static void 357 filt_timerdetach(struct knote *kn) 358 { 359 struct callout *calloutp; 360 361 calloutp = (struct callout *)kn->kn_hook; 362 callout_stop(calloutp); 363 FREE(calloutp, M_KQUEUE); 364 kq_ncallouts--; 365 } 366 367 static int 368 filt_timer(struct knote *kn, long hint) 369 { 370 371 return (kn->kn_data != 0); 372 } 373 374 /* 375 * MPSAFE 376 */ 377 int 378 kqueue(struct thread *td, struct kqueue_args *uap) 379 { 380 struct filedesc *fdp; 381 struct kqueue *kq; 382 struct file *fp; 383 int fd, error; 384 385 mtx_lock(&Giant); 386 fdp = td->td_proc->p_fd; 387 error = falloc(td, &fp, &fd); 388 if (error) 389 goto done2; 390 kq = malloc(sizeof(struct kqueue), M_KQUEUE, M_WAITOK | M_ZERO); 391 TAILQ_INIT(&kq->kq_head); 392 FILE_LOCK(fp); 393 fp->f_flag = FREAD | FWRITE; 394 fp->f_type = DTYPE_KQUEUE; 395 fp->f_ops = &kqueueops; 396 TAILQ_INIT(&kq->kq_head); 397 fp->f_data = kq; 398 FILE_UNLOCK(fp); 399 FILEDESC_LOCK(fdp); 400 td->td_retval[0] = fd; 401 if (fdp->fd_knlistsize < 0) 402 fdp->fd_knlistsize = 0; /* this process has a kq */ 403 FILEDESC_UNLOCK(fdp); 404 kq->kq_fdp = fdp; 405 done2: 406 mtx_unlock(&Giant); 407 return (error); 408 } 409 410 #ifndef _SYS_SYSPROTO_H_ 411 struct kevent_args { 412 int fd; 413 const struct kevent *changelist; 414 int nchanges; 415 struct kevent *eventlist; 416 int nevents; 417 const struct timespec *timeout; 418 }; 419 #endif 420 /* 421 * MPSAFE 422 */ 423 int 424 kevent(struct thread *td, struct kevent_args *uap) 425 { 426 struct kevent *kevp; 427 struct kqueue *kq; 428 struct file *fp; 429 struct timespec ts; 430 int i, n, nerrors, error; 431 432 if ((error = fget(td, uap->fd, &fp)) != 0) 433 return (error); 434 if (fp->f_type != DTYPE_KQUEUE) { 435 fdrop(fp, td); 436 return (EBADF); 437 } 438 if (uap->timeout != NULL) { 439 error = copyin(uap->timeout, &ts, sizeof(ts)); 440 if (error) 441 goto done_nogiant; 442 uap->timeout = &ts; 443 } 444 mtx_lock(&Giant); 445 446 kq = fp->f_data; 447 nerrors = 0; 448 449 while (uap->nchanges > 0) { 450 n = uap->nchanges > KQ_NEVENTS ? KQ_NEVENTS : uap->nchanges; 451 error = copyin(uap->changelist, kq->kq_kev, 452 n * sizeof(struct kevent)); 453 if (error) 454 goto done; 455 for (i = 0; i < n; i++) { 456 kevp = &kq->kq_kev[i]; 457 kevp->flags &= ~EV_SYSFLAGS; 458 error = kqueue_register(kq, kevp, td); 459 if (error) { 460 if (uap->nevents != 0) { 461 kevp->flags = EV_ERROR; 462 kevp->data = error; 463 (void) copyout(kevp, 464 uap->eventlist, 465 sizeof(*kevp)); 466 uap->eventlist++; 467 uap->nevents--; 468 nerrors++; 469 } else { 470 goto done; 471 } 472 } 473 } 474 uap->nchanges -= n; 475 uap->changelist += n; 476 } 477 if (nerrors) { 478 td->td_retval[0] = nerrors; 479 error = 0; 480 goto done; 481 } 482 483 error = kqueue_scan(fp, uap->nevents, uap->eventlist, uap->timeout, td); 484 done: 485 mtx_unlock(&Giant); 486 done_nogiant: 487 if (fp != NULL) 488 fdrop(fp, td); 489 return (error); 490 } 491 492 int 493 kqueue_add_filteropts(int filt, struct filterops *filtops) 494 { 495 496 if (filt > 0) 497 panic("filt(%d) > 0", filt); 498 if (filt + EVFILT_SYSCOUNT < 0) 499 panic("filt(%d) + EVFILT_SYSCOUNT(%d) == %d < 0", 500 filt, EVFILT_SYSCOUNT, filt + EVFILT_SYSCOUNT); 501 if (sysfilt_ops[~filt] != &null_filtops) 502 panic("sysfilt_ops[~filt(%d)] != &null_filtops", filt); 503 sysfilt_ops[~filt] = filtops; 504 return (0); 505 } 506 507 int 508 kqueue_del_filteropts(int filt) 509 { 510 511 if (filt > 0) 512 panic("filt(%d) > 0", filt); 513 if (filt + EVFILT_SYSCOUNT < 0) 514 panic("filt(%d) + EVFILT_SYSCOUNT(%d) == %d < 0", 515 filt, EVFILT_SYSCOUNT, filt + EVFILT_SYSCOUNT); 516 if (sysfilt_ops[~filt] == &null_filtops) 517 panic("sysfilt_ops[~filt(%d)] != &null_filtops", filt); 518 sysfilt_ops[~filt] = &null_filtops; 519 return (0); 520 } 521 522 int 523 kqueue_register(struct kqueue *kq, struct kevent *kev, struct thread *td) 524 { 525 struct filedesc *fdp = kq->kq_fdp; 526 struct filterops *fops; 527 struct file *fp = NULL; 528 struct knote *kn = NULL; 529 int s, error = 0; 530 531 if (kev->filter < 0) { 532 if (kev->filter + EVFILT_SYSCOUNT < 0) 533 return (EINVAL); 534 fops = sysfilt_ops[~kev->filter]; /* to 0-base index */ 535 } else { 536 /* 537 * XXX 538 * filter attach routine is responsible for insuring that 539 * the identifier can be attached to it. 540 */ 541 printf("unknown filter: %d\n", kev->filter); 542 return (EINVAL); 543 } 544 545 FILEDESC_LOCK(fdp); 546 if (fops->f_isfd) { 547 /* validate descriptor */ 548 if ((u_int)kev->ident >= fdp->fd_nfiles || 549 (fp = fdp->fd_ofiles[kev->ident]) == NULL) { 550 FILEDESC_UNLOCK(fdp); 551 return (EBADF); 552 } 553 fhold(fp); 554 555 if (kev->ident < fdp->fd_knlistsize) { 556 SLIST_FOREACH(kn, &fdp->fd_knlist[kev->ident], kn_link) 557 if (kq == kn->kn_kq && 558 kev->filter == kn->kn_filter) 559 break; 560 } 561 } else { 562 if (fdp->fd_knhashmask != 0) { 563 struct klist *list; 564 565 list = &fdp->fd_knhash[ 566 KN_HASH((u_long)kev->ident, fdp->fd_knhashmask)]; 567 SLIST_FOREACH(kn, list, kn_link) 568 if (kev->ident == kn->kn_id && 569 kq == kn->kn_kq && 570 kev->filter == kn->kn_filter) 571 break; 572 } 573 } 574 FILEDESC_UNLOCK(fdp); 575 576 if (kn == NULL && ((kev->flags & EV_ADD) == 0)) { 577 error = ENOENT; 578 goto done; 579 } 580 581 /* 582 * kn now contains the matching knote, or NULL if no match 583 */ 584 if (kev->flags & EV_ADD) { 585 586 if (kn == NULL) { 587 kn = knote_alloc(); 588 if (kn == NULL) { 589 error = ENOMEM; 590 goto done; 591 } 592 kn->kn_fp = fp; 593 kn->kn_kq = kq; 594 kn->kn_fop = fops; 595 596 /* 597 * apply reference count to knote structure, and 598 * do not release it at the end of this routine. 599 */ 600 fp = NULL; 601 602 kn->kn_sfflags = kev->fflags; 603 kn->kn_sdata = kev->data; 604 kev->fflags = 0; 605 kev->data = 0; 606 kn->kn_kevent = *kev; 607 608 knote_attach(kn, fdp); 609 if ((error = fops->f_attach(kn)) != 0) { 610 knote_drop(kn, td); 611 goto done; 612 } 613 } else { 614 /* 615 * The user may change some filter values after the 616 * initial EV_ADD, but doing so will not reset any 617 * filter which has already been triggered. 618 */ 619 kn->kn_sfflags = kev->fflags; 620 kn->kn_sdata = kev->data; 621 kn->kn_kevent.udata = kev->udata; 622 } 623 624 s = splhigh(); 625 if (kn->kn_fop->f_event(kn, 0)) 626 KNOTE_ACTIVATE(kn); 627 splx(s); 628 629 } else if (kev->flags & EV_DELETE) { 630 kn->kn_fop->f_detach(kn); 631 knote_drop(kn, td); 632 goto done; 633 } 634 635 if ((kev->flags & EV_DISABLE) && 636 ((kn->kn_status & KN_DISABLED) == 0)) { 637 s = splhigh(); 638 kn->kn_status |= KN_DISABLED; 639 splx(s); 640 } 641 642 if ((kev->flags & EV_ENABLE) && (kn->kn_status & KN_DISABLED)) { 643 s = splhigh(); 644 kn->kn_status &= ~KN_DISABLED; 645 if ((kn->kn_status & KN_ACTIVE) && 646 ((kn->kn_status & KN_QUEUED) == 0)) 647 knote_enqueue(kn); 648 splx(s); 649 } 650 651 done: 652 if (fp != NULL) 653 fdrop(fp, td); 654 return (error); 655 } 656 657 static int 658 kqueue_scan(struct file *fp, int maxevents, struct kevent *ulistp, 659 const struct timespec *tsp, struct thread *td) 660 { 661 struct kqueue *kq; 662 struct kevent *kevp; 663 struct timeval atv, rtv, ttv; 664 struct knote *kn, marker; 665 int s, count, timeout, nkev = 0, error = 0; 666 667 FILE_LOCK_ASSERT(fp, MA_NOTOWNED); 668 669 kq = fp->f_data; 670 count = maxevents; 671 if (count == 0) 672 goto done; 673 674 if (tsp != NULL) { 675 TIMESPEC_TO_TIMEVAL(&atv, tsp); 676 if (itimerfix(&atv)) { 677 error = EINVAL; 678 goto done; 679 } 680 if (tsp->tv_sec == 0 && tsp->tv_nsec == 0) 681 timeout = -1; 682 else 683 timeout = atv.tv_sec > 24 * 60 * 60 ? 684 24 * 60 * 60 * hz : tvtohz(&atv); 685 getmicrouptime(&rtv); 686 timevaladd(&atv, &rtv); 687 } else { 688 atv.tv_sec = 0; 689 atv.tv_usec = 0; 690 timeout = 0; 691 } 692 goto start; 693 694 retry: 695 if (atv.tv_sec || atv.tv_usec) { 696 getmicrouptime(&rtv); 697 if (timevalcmp(&rtv, &atv, >=)) 698 goto done; 699 ttv = atv; 700 timevalsub(&ttv, &rtv); 701 timeout = ttv.tv_sec > 24 * 60 * 60 ? 702 24 * 60 * 60 * hz : tvtohz(&ttv); 703 } 704 705 start: 706 kevp = kq->kq_kev; 707 s = splhigh(); 708 if (kq->kq_count == 0) { 709 if (timeout < 0) { 710 error = EWOULDBLOCK; 711 } else { 712 kq->kq_state |= KQ_SLEEP; 713 error = tsleep(kq, PSOCK | PCATCH, "kqread", timeout); 714 } 715 splx(s); 716 if (error == 0) 717 goto retry; 718 /* don't restart after signals... */ 719 if (error == ERESTART) 720 error = EINTR; 721 else if (error == EWOULDBLOCK) 722 error = 0; 723 goto done; 724 } 725 726 TAILQ_INSERT_TAIL(&kq->kq_head, &marker, kn_tqe); 727 while (count) { 728 kn = TAILQ_FIRST(&kq->kq_head); 729 TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe); 730 if (kn == &marker) { 731 splx(s); 732 if (count == maxevents) 733 goto retry; 734 goto done; 735 } 736 if (kn->kn_status & KN_DISABLED) { 737 kn->kn_status &= ~KN_QUEUED; 738 kq->kq_count--; 739 continue; 740 } 741 if ((kn->kn_flags & EV_ONESHOT) == 0 && 742 kn->kn_fop->f_event(kn, 0) == 0) { 743 kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE); 744 kq->kq_count--; 745 continue; 746 } 747 *kevp = kn->kn_kevent; 748 kevp++; 749 nkev++; 750 if (kn->kn_flags & EV_ONESHOT) { 751 kn->kn_status &= ~KN_QUEUED; 752 kq->kq_count--; 753 splx(s); 754 kn->kn_fop->f_detach(kn); 755 knote_drop(kn, td); 756 s = splhigh(); 757 } else if (kn->kn_flags & EV_CLEAR) { 758 kn->kn_data = 0; 759 kn->kn_fflags = 0; 760 kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE); 761 kq->kq_count--; 762 } else { 763 TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe); 764 } 765 count--; 766 if (nkev == KQ_NEVENTS) { 767 splx(s); 768 error = copyout(&kq->kq_kev, ulistp, 769 sizeof(struct kevent) * nkev); 770 ulistp += nkev; 771 nkev = 0; 772 kevp = kq->kq_kev; 773 s = splhigh(); 774 if (error) 775 break; 776 } 777 } 778 TAILQ_REMOVE(&kq->kq_head, &marker, kn_tqe); 779 splx(s); 780 done: 781 if (nkev != 0) 782 error = copyout(&kq->kq_kev, ulistp, 783 sizeof(struct kevent) * nkev); 784 td->td_retval[0] = maxevents - count; 785 return (error); 786 } 787 788 /* 789 * XXX 790 * This could be expanded to call kqueue_scan, if desired. 791 */ 792 /*ARGSUSED*/ 793 static int 794 kqueue_read(struct file *fp, struct uio *uio, struct ucred *active_cred, 795 int flags, struct thread *td) 796 { 797 return (ENXIO); 798 } 799 800 /*ARGSUSED*/ 801 static int 802 kqueue_write(struct file *fp, struct uio *uio, struct ucred *active_cred, 803 int flags, struct thread *td) 804 { 805 return (ENXIO); 806 } 807 808 /*ARGSUSED*/ 809 static int 810 kqueue_ioctl(struct file *fp, u_long com, void *data, 811 struct ucred *active_cred, struct thread *td) 812 { 813 return (ENOTTY); 814 } 815 816 /*ARGSUSED*/ 817 static int 818 kqueue_poll(struct file *fp, int events, struct ucred *active_cred, 819 struct thread *td) 820 { 821 struct kqueue *kq; 822 int revents = 0; 823 int s = splnet(); 824 825 kq = fp->f_data; 826 if (events & (POLLIN | POLLRDNORM)) { 827 if (kq->kq_count) { 828 revents |= events & (POLLIN | POLLRDNORM); 829 } else { 830 selrecord(td, &kq->kq_sel); 831 kq->kq_state |= KQ_SEL; 832 } 833 } 834 splx(s); 835 return (revents); 836 } 837 838 /*ARGSUSED*/ 839 static int 840 kqueue_stat(struct file *fp, struct stat *st, struct ucred *active_cred, 841 struct thread *td) 842 { 843 struct kqueue *kq; 844 845 kq = fp->f_data; 846 bzero((void *)st, sizeof(*st)); 847 st->st_size = kq->kq_count; 848 st->st_blksize = sizeof(struct kevent); 849 st->st_mode = S_IFIFO; 850 return (0); 851 } 852 853 /*ARGSUSED*/ 854 static int 855 kqueue_close(struct file *fp, struct thread *td) 856 { 857 struct kqueue *kq = fp->f_data; 858 struct filedesc *fdp = kq->kq_fdp; 859 struct knote **knp, *kn, *kn0; 860 int i; 861 862 FILEDESC_LOCK(fdp); 863 for (i = 0; i < fdp->fd_knlistsize; i++) { 864 knp = &SLIST_FIRST(&fdp->fd_knlist[i]); 865 kn = *knp; 866 while (kn != NULL) { 867 kn0 = SLIST_NEXT(kn, kn_link); 868 if (kq == kn->kn_kq) { 869 kn->kn_fop->f_detach(kn); 870 *knp = kn0; 871 FILE_LOCK(kn->kn_fp); 872 FILEDESC_UNLOCK(fdp); 873 fdrop_locked(kn->kn_fp, td); 874 knote_free(kn); 875 FILEDESC_LOCK(fdp); 876 } else { 877 knp = &SLIST_NEXT(kn, kn_link); 878 } 879 kn = kn0; 880 } 881 } 882 if (fdp->fd_knhashmask != 0) { 883 for (i = 0; i < fdp->fd_knhashmask + 1; i++) { 884 knp = &SLIST_FIRST(&fdp->fd_knhash[i]); 885 kn = *knp; 886 while (kn != NULL) { 887 kn0 = SLIST_NEXT(kn, kn_link); 888 if (kq == kn->kn_kq) { 889 kn->kn_fop->f_detach(kn); 890 *knp = kn0; 891 /* XXX non-fd release of kn->kn_ptr */ 892 FILEDESC_UNLOCK(fdp); 893 knote_free(kn); 894 FILEDESC_LOCK(fdp); 895 } else { 896 knp = &SLIST_NEXT(kn, kn_link); 897 } 898 kn = kn0; 899 } 900 } 901 } 902 FILEDESC_UNLOCK(fdp); 903 free(kq, M_KQUEUE); 904 fp->f_data = NULL; 905 906 return (0); 907 } 908 909 static void 910 kqueue_wakeup(struct kqueue *kq) 911 { 912 913 if (kq->kq_state & KQ_SLEEP) { 914 kq->kq_state &= ~KQ_SLEEP; 915 wakeup(kq); 916 } 917 if (kq->kq_state & KQ_SEL) { 918 kq->kq_state &= ~KQ_SEL; 919 selwakeup(&kq->kq_sel); 920 } 921 KNOTE(&kq->kq_sel.si_note, 0); 922 } 923 924 /* 925 * walk down a list of knotes, activating them if their event has triggered. 926 */ 927 void 928 knote(struct klist *list, long hint) 929 { 930 struct knote *kn; 931 932 SLIST_FOREACH(kn, list, kn_selnext) 933 if (kn->kn_fop->f_event(kn, hint)) 934 KNOTE_ACTIVATE(kn); 935 } 936 937 /* 938 * remove all knotes from a specified klist 939 */ 940 void 941 knote_remove(struct thread *td, struct klist *list) 942 { 943 struct knote *kn; 944 945 while ((kn = SLIST_FIRST(list)) != NULL) { 946 kn->kn_fop->f_detach(kn); 947 knote_drop(kn, td); 948 } 949 } 950 951 /* 952 * remove all knotes referencing a specified fd 953 */ 954 void 955 knote_fdclose(struct thread *td, int fd) 956 { 957 struct filedesc *fdp = td->td_proc->p_fd; 958 struct klist *list; 959 960 FILEDESC_LOCK(fdp); 961 list = &fdp->fd_knlist[fd]; 962 FILEDESC_UNLOCK(fdp); 963 knote_remove(td, list); 964 } 965 966 static void 967 knote_attach(struct knote *kn, struct filedesc *fdp) 968 { 969 struct klist *list, *tmp_knhash; 970 u_long tmp_knhashmask; 971 int size; 972 973 FILEDESC_LOCK(fdp); 974 975 if (! kn->kn_fop->f_isfd) { 976 if (fdp->fd_knhashmask == 0) { 977 FILEDESC_UNLOCK(fdp); 978 tmp_knhash = hashinit(KN_HASHSIZE, M_KQUEUE, 979 &tmp_knhashmask); 980 FILEDESC_LOCK(fdp); 981 if (fdp->fd_knhashmask == 0) { 982 fdp->fd_knhash = tmp_knhash; 983 fdp->fd_knhashmask = tmp_knhashmask; 984 } else { 985 free(tmp_knhash, M_KQUEUE); 986 } 987 } 988 list = &fdp->fd_knhash[KN_HASH(kn->kn_id, fdp->fd_knhashmask)]; 989 goto done; 990 } 991 992 if (fdp->fd_knlistsize <= kn->kn_id) { 993 size = fdp->fd_knlistsize; 994 while (size <= kn->kn_id) 995 size += KQEXTENT; 996 FILEDESC_UNLOCK(fdp); 997 MALLOC(list, struct klist *, 998 size * sizeof(struct klist *), M_KQUEUE, M_WAITOK); 999 FILEDESC_LOCK(fdp); 1000 if (fdp->fd_knlistsize > kn->kn_id) { 1001 FREE(list, M_KQUEUE); 1002 goto bigenough; 1003 } 1004 if (fdp->fd_knlist != NULL) { 1005 bcopy(fdp->fd_knlist, list, 1006 fdp->fd_knlistsize * sizeof(struct klist *)); 1007 FREE(fdp->fd_knlist, M_KQUEUE); 1008 } 1009 bzero((caddr_t)list + 1010 fdp->fd_knlistsize * sizeof(struct klist *), 1011 (size - fdp->fd_knlistsize) * sizeof(struct klist *)); 1012 fdp->fd_knlistsize = size; 1013 fdp->fd_knlist = list; 1014 } 1015 bigenough: 1016 list = &fdp->fd_knlist[kn->kn_id]; 1017 done: 1018 FILEDESC_UNLOCK(fdp); 1019 SLIST_INSERT_HEAD(list, kn, kn_link); 1020 kn->kn_status = 0; 1021 } 1022 1023 /* 1024 * should be called at spl == 0, since we don't want to hold spl 1025 * while calling fdrop and free. 1026 */ 1027 static void 1028 knote_drop(struct knote *kn, struct thread *td) 1029 { 1030 struct filedesc *fdp = td->td_proc->p_fd; 1031 struct klist *list; 1032 1033 FILEDESC_LOCK(fdp); 1034 if (kn->kn_fop->f_isfd) 1035 list = &fdp->fd_knlist[kn->kn_id]; 1036 else 1037 list = &fdp->fd_knhash[KN_HASH(kn->kn_id, fdp->fd_knhashmask)]; 1038 if (kn->kn_fop->f_isfd) 1039 FILE_LOCK(kn->kn_fp); 1040 FILEDESC_UNLOCK(fdp); 1041 1042 SLIST_REMOVE(list, kn, knote, kn_link); 1043 if (kn->kn_status & KN_QUEUED) 1044 knote_dequeue(kn); 1045 if (kn->kn_fop->f_isfd) 1046 fdrop_locked(kn->kn_fp, td); 1047 knote_free(kn); 1048 } 1049 1050 1051 static void 1052 knote_enqueue(struct knote *kn) 1053 { 1054 struct kqueue *kq = kn->kn_kq; 1055 int s = splhigh(); 1056 1057 KASSERT((kn->kn_status & KN_QUEUED) == 0, ("knote already queued")); 1058 1059 TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe); 1060 kn->kn_status |= KN_QUEUED; 1061 kq->kq_count++; 1062 splx(s); 1063 kqueue_wakeup(kq); 1064 } 1065 1066 static void 1067 knote_dequeue(struct knote *kn) 1068 { 1069 struct kqueue *kq = kn->kn_kq; 1070 int s = splhigh(); 1071 1072 KASSERT(kn->kn_status & KN_QUEUED, ("knote not queued")); 1073 1074 TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe); 1075 kn->kn_status &= ~KN_QUEUED; 1076 kq->kq_count--; 1077 splx(s); 1078 } 1079 1080 static void 1081 knote_init(void) 1082 { 1083 knote_zone = uma_zcreate("KNOTE", sizeof(struct knote), NULL, NULL, 1084 NULL, NULL, UMA_ALIGN_PTR, 0); 1085 1086 } 1087 SYSINIT(knote, SI_SUB_PSEUDO, SI_ORDER_ANY, knote_init, NULL) 1088 1089 static struct knote * 1090 knote_alloc(void) 1091 { 1092 return ((struct knote *)uma_zalloc(knote_zone, M_WAITOK)); 1093 } 1094 1095 static void 1096 knote_free(struct knote *kn) 1097 { 1098 uma_zfree(knote_zone, kn); 1099 } 1100