1 /*- 2 * Copyright (c) 1999,2000,2001 Jonathan Lemon <jlemon@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD$ 27 */ 28 29 #include <sys/param.h> 30 #include <sys/systm.h> 31 #include <sys/kernel.h> 32 #include <sys/lock.h> 33 #include <sys/mutex.h> 34 #include <sys/proc.h> 35 #include <sys/malloc.h> 36 #include <sys/unistd.h> 37 #include <sys/file.h> 38 #include <sys/fcntl.h> 39 #include <sys/selinfo.h> 40 #include <sys/queue.h> 41 #include <sys/event.h> 42 #include <sys/eventvar.h> 43 #include <sys/poll.h> 44 #include <sys/protosw.h> 45 #include <sys/socket.h> 46 #include <sys/socketvar.h> 47 #include <sys/stat.h> 48 #include <sys/sysctl.h> 49 #include <sys/sysproto.h> 50 #include <sys/uio.h> 51 52 #include <vm/vm_zone.h> 53 54 MALLOC_DEFINE(M_KQUEUE, "kqueue", "memory for kqueue system"); 55 56 static int kqueue_scan(struct file *fp, int maxevents, 57 struct kevent *ulistp, const struct timespec *timeout, 58 struct thread *td); 59 static int kqueue_read(struct file *fp, struct uio *uio, 60 struct ucred *cred, int flags, struct thread *td); 61 static int kqueue_write(struct file *fp, struct uio *uio, 62 struct ucred *cred, int flags, struct thread *td); 63 static int kqueue_ioctl(struct file *fp, u_long com, caddr_t data, 64 struct thread *td); 65 static int kqueue_poll(struct file *fp, int events, struct ucred *cred, 66 struct thread *td); 67 static int kqueue_kqfilter(struct file *fp, struct knote *kn); 68 static int kqueue_stat(struct file *fp, struct stat *st, struct thread *td); 69 static int kqueue_close(struct file *fp, struct thread *td); 70 static void kqueue_wakeup(struct kqueue *kq); 71 72 static struct fileops kqueueops = { 73 kqueue_read, 74 kqueue_write, 75 kqueue_ioctl, 76 kqueue_poll, 77 kqueue_kqfilter, 78 kqueue_stat, 79 kqueue_close 80 }; 81 82 static void knote_attach(struct knote *kn, struct filedesc *fdp); 83 static void knote_drop(struct knote *kn, struct thread *td); 84 static void knote_enqueue(struct knote *kn); 85 static void knote_dequeue(struct knote *kn); 86 static void knote_init(void); 87 static struct knote *knote_alloc(void); 88 static void knote_free(struct knote *kn); 89 90 static void filt_kqdetach(struct knote *kn); 91 static int filt_kqueue(struct knote *kn, long hint); 92 static int filt_procattach(struct knote *kn); 93 static void filt_procdetach(struct knote *kn); 94 static int filt_proc(struct knote *kn, long hint); 95 static int filt_fileattach(struct knote *kn); 96 static void filt_timerexpire(void *knx); 97 static int filt_timerattach(struct knote *kn); 98 static void filt_timerdetach(struct knote *kn); 99 static int filt_timer(struct knote *kn, long hint); 100 101 static struct filterops file_filtops = 102 { 1, filt_fileattach, NULL, NULL }; 103 static struct filterops kqread_filtops = 104 { 1, NULL, filt_kqdetach, filt_kqueue }; 105 static struct filterops proc_filtops = 106 { 0, filt_procattach, filt_procdetach, filt_proc }; 107 static struct filterops timer_filtops = 108 { 0, filt_timerattach, filt_timerdetach, filt_timer }; 109 110 static vm_zone_t knote_zone; 111 static int kq_ncallouts = 0; 112 static int kq_calloutmax = (4 * 1024); 113 SYSCTL_INT(_kern, OID_AUTO, kq_calloutmax, CTLFLAG_RW, 114 &kq_calloutmax, 0, "Maximum number of callouts allocated for kqueue"); 115 116 #define KNOTE_ACTIVATE(kn) do { \ 117 kn->kn_status |= KN_ACTIVE; \ 118 if ((kn->kn_status & (KN_QUEUED | KN_DISABLED)) == 0) \ 119 knote_enqueue(kn); \ 120 } while(0) 121 122 #define KN_HASHSIZE 64 /* XXX should be tunable */ 123 #define KN_HASH(val, mask) (((val) ^ (val >> 8)) & (mask)) 124 125 static int 126 filt_nullattach(struct knote *kn) 127 { 128 129 return (ENXIO); 130 }; 131 132 struct filterops null_filtops = 133 { 0, filt_nullattach, NULL, NULL }; 134 135 extern struct filterops sig_filtops; 136 137 /* 138 * Table for for all system-defined filters. 139 */ 140 static struct filterops *sysfilt_ops[] = { 141 &file_filtops, /* EVFILT_READ */ 142 &file_filtops, /* EVFILT_WRITE */ 143 &null_filtops, /* EVFILT_AIO */ 144 &file_filtops, /* EVFILT_VNODE */ 145 &proc_filtops, /* EVFILT_PROC */ 146 &sig_filtops, /* EVFILT_SIGNAL */ 147 &timer_filtops, /* EVFILT_TIMER */ 148 }; 149 150 static int 151 filt_fileattach(struct knote *kn) 152 { 153 154 return (fo_kqfilter(kn->kn_fp, kn)); 155 } 156 157 /*ARGSUSED*/ 158 static int 159 kqueue_kqfilter(struct file *fp, struct knote *kn) 160 { 161 struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data; 162 163 if (kn->kn_filter != EVFILT_READ) 164 return (1); 165 166 kn->kn_fop = &kqread_filtops; 167 SLIST_INSERT_HEAD(&kq->kq_sel.si_note, kn, kn_selnext); 168 return (0); 169 } 170 171 static void 172 filt_kqdetach(struct knote *kn) 173 { 174 struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data; 175 176 SLIST_REMOVE(&kq->kq_sel.si_note, kn, knote, kn_selnext); 177 } 178 179 /*ARGSUSED*/ 180 static int 181 filt_kqueue(struct knote *kn, long hint) 182 { 183 struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data; 184 185 kn->kn_data = kq->kq_count; 186 return (kn->kn_data > 0); 187 } 188 189 static int 190 filt_procattach(struct knote *kn) 191 { 192 struct proc *p; 193 int error; 194 195 p = pfind(kn->kn_id); 196 if (p == NULL) 197 return (ESRCH); 198 if ((error = p_cansee(curproc, p))) { 199 PROC_UNLOCK(p); 200 return (error); 201 } 202 203 kn->kn_ptr.p_proc = p; 204 kn->kn_flags |= EV_CLEAR; /* automatically set */ 205 206 /* 207 * internal flag indicating registration done by kernel 208 */ 209 if (kn->kn_flags & EV_FLAG1) { 210 kn->kn_data = kn->kn_sdata; /* ppid */ 211 kn->kn_fflags = NOTE_CHILD; 212 kn->kn_flags &= ~EV_FLAG1; 213 } 214 215 SLIST_INSERT_HEAD(&p->p_klist, kn, kn_selnext); 216 PROC_UNLOCK(p); 217 218 return (0); 219 } 220 221 /* 222 * The knote may be attached to a different process, which may exit, 223 * leaving nothing for the knote to be attached to. So when the process 224 * exits, the knote is marked as DETACHED and also flagged as ONESHOT so 225 * it will be deleted when read out. However, as part of the knote deletion, 226 * this routine is called, so a check is needed to avoid actually performing 227 * a detach, because the original process does not exist any more. 228 */ 229 static void 230 filt_procdetach(struct knote *kn) 231 { 232 struct proc *p = kn->kn_ptr.p_proc; 233 234 if (kn->kn_status & KN_DETACHED) 235 return; 236 237 PROC_LOCK(p); 238 SLIST_REMOVE(&p->p_klist, kn, knote, kn_selnext); 239 PROC_UNLOCK(p); 240 } 241 242 static int 243 filt_proc(struct knote *kn, long hint) 244 { 245 u_int event; 246 247 /* 248 * mask off extra data 249 */ 250 event = (u_int)hint & NOTE_PCTRLMASK; 251 252 /* 253 * if the user is interested in this event, record it. 254 */ 255 if (kn->kn_sfflags & event) 256 kn->kn_fflags |= event; 257 258 /* 259 * process is gone, so flag the event as finished. 260 */ 261 if (event == NOTE_EXIT) { 262 kn->kn_status |= KN_DETACHED; 263 kn->kn_flags |= (EV_EOF | EV_ONESHOT); 264 return (1); 265 } 266 267 /* 268 * process forked, and user wants to track the new process, 269 * so attach a new knote to it, and immediately report an 270 * event with the parent's pid. 271 */ 272 if ((event == NOTE_FORK) && (kn->kn_sfflags & NOTE_TRACK)) { 273 struct kevent kev; 274 int error; 275 276 /* 277 * register knote with new process. 278 */ 279 kev.ident = hint & NOTE_PDATAMASK; /* pid */ 280 kev.filter = kn->kn_filter; 281 kev.flags = kn->kn_flags | EV_ADD | EV_ENABLE | EV_FLAG1; 282 kev.fflags = kn->kn_sfflags; 283 kev.data = kn->kn_id; /* parent */ 284 kev.udata = kn->kn_kevent.udata; /* preserve udata */ 285 error = kqueue_register(kn->kn_kq, &kev, NULL); 286 if (error) 287 kn->kn_fflags |= NOTE_TRACKERR; 288 } 289 290 return (kn->kn_fflags != 0); 291 } 292 293 static void 294 filt_timerexpire(void *knx) 295 { 296 struct knote *kn = knx; 297 struct callout *calloutp; 298 struct timeval tv; 299 int tticks; 300 301 kn->kn_data++; 302 KNOTE_ACTIVATE(kn); 303 304 if ((kn->kn_flags & EV_ONESHOT) == 0) { 305 tv.tv_sec = kn->kn_sdata / 1000; 306 tv.tv_usec = (kn->kn_sdata % 1000) * 1000; 307 tticks = tvtohz(&tv); 308 calloutp = (struct callout *)kn->kn_hook; 309 callout_reset(calloutp, tticks, filt_timerexpire, kn); 310 } 311 } 312 313 /* 314 * data contains amount of time to sleep, in milliseconds 315 */ 316 static int 317 filt_timerattach(struct knote *kn) 318 { 319 struct callout *calloutp; 320 struct timeval tv; 321 int tticks; 322 323 if (kq_ncallouts >= kq_calloutmax) 324 return (ENOMEM); 325 kq_ncallouts++; 326 327 tv.tv_sec = kn->kn_sdata / 1000; 328 tv.tv_usec = (kn->kn_sdata % 1000) * 1000; 329 tticks = tvtohz(&tv); 330 331 kn->kn_flags |= EV_CLEAR; /* automatically set */ 332 MALLOC(calloutp, struct callout *, sizeof(*calloutp), 333 M_KQUEUE, M_WAITOK); 334 callout_init(calloutp, 0); 335 callout_reset(calloutp, tticks, filt_timerexpire, kn); 336 kn->kn_hook = (caddr_t)calloutp; 337 338 return (0); 339 } 340 341 static void 342 filt_timerdetach(struct knote *kn) 343 { 344 struct callout *calloutp; 345 346 calloutp = (struct callout *)kn->kn_hook; 347 callout_stop(calloutp); 348 FREE(calloutp, M_KQUEUE); 349 kq_ncallouts--; 350 } 351 352 static int 353 filt_timer(struct knote *kn, long hint) 354 { 355 356 return (kn->kn_data != 0); 357 } 358 359 /* 360 * MPSAFE 361 */ 362 int 363 kqueue(struct thread *td, struct kqueue_args *uap) 364 { 365 struct filedesc *fdp; 366 struct kqueue *kq; 367 struct file *fp; 368 int fd, error; 369 370 mtx_lock(&Giant); 371 fdp = td->td_proc->p_fd; 372 error = falloc(td, &fp, &fd); 373 if (error) 374 goto done2; 375 fp->f_flag = FREAD | FWRITE; 376 fp->f_type = DTYPE_KQUEUE; 377 fp->f_ops = &kqueueops; 378 kq = malloc(sizeof(struct kqueue), M_KQUEUE, M_WAITOK | M_ZERO); 379 TAILQ_INIT(&kq->kq_head); 380 fp->f_data = (caddr_t)kq; 381 td->td_retval[0] = fd; 382 if (fdp->fd_knlistsize < 0) 383 fdp->fd_knlistsize = 0; /* this process has a kq */ 384 kq->kq_fdp = fdp; 385 done2: 386 mtx_unlock(&Giant); 387 return (error); 388 } 389 390 #ifndef _SYS_SYSPROTO_H_ 391 struct kevent_args { 392 int fd; 393 const struct kevent *changelist; 394 int nchanges; 395 struct kevent *eventlist; 396 int nevents; 397 const struct timespec *timeout; 398 }; 399 #endif 400 /* 401 * MPSAFE 402 */ 403 int 404 kevent(struct thread *td, struct kevent_args *uap) 405 { 406 struct kevent *kevp; 407 struct kqueue *kq; 408 struct file *fp; 409 struct timespec ts; 410 int i, n, nerrors, error; 411 412 mtx_lock(&Giant); 413 if ((error = fget(td, uap->fd, &fp)) != 0) 414 goto done; 415 if (fp->f_type != DTYPE_KQUEUE) { 416 error = EBADF; 417 goto done; 418 } 419 if (uap->timeout != NULL) { 420 error = copyin(uap->timeout, &ts, sizeof(ts)); 421 if (error) 422 goto done; 423 uap->timeout = &ts; 424 } 425 426 kq = (struct kqueue *)fp->f_data; 427 nerrors = 0; 428 429 while (uap->nchanges > 0) { 430 n = uap->nchanges > KQ_NEVENTS ? KQ_NEVENTS : uap->nchanges; 431 error = copyin(uap->changelist, kq->kq_kev, 432 n * sizeof(struct kevent)); 433 if (error) 434 goto done; 435 for (i = 0; i < n; i++) { 436 kevp = &kq->kq_kev[i]; 437 kevp->flags &= ~EV_SYSFLAGS; 438 error = kqueue_register(kq, kevp, td); 439 if (error) { 440 if (uap->nevents != 0) { 441 kevp->flags = EV_ERROR; 442 kevp->data = error; 443 (void) copyout((caddr_t)kevp, 444 (caddr_t)uap->eventlist, 445 sizeof(*kevp)); 446 uap->eventlist++; 447 uap->nevents--; 448 nerrors++; 449 } else { 450 goto done; 451 } 452 } 453 } 454 uap->nchanges -= n; 455 uap->changelist += n; 456 } 457 if (nerrors) { 458 td->td_retval[0] = nerrors; 459 error = 0; 460 goto done; 461 } 462 463 error = kqueue_scan(fp, uap->nevents, uap->eventlist, uap->timeout, td); 464 done: 465 if (fp != NULL) 466 fdrop(fp, td); 467 mtx_unlock(&Giant); 468 return (error); 469 } 470 471 int 472 kqueue_add_filteropts(int filt, struct filterops *filtops) 473 { 474 475 if (filt > 0) 476 panic("filt(%d) > 0", filt); 477 if (filt + EVFILT_SYSCOUNT < 0) 478 panic("filt(%d) + EVFILT_SYSCOUNT(%d) == %d < 0", 479 filt, EVFILT_SYSCOUNT, filt + EVFILT_SYSCOUNT); 480 if (sysfilt_ops[~filt] != &null_filtops) 481 panic("sysfilt_ops[~filt(%d)] != &null_filtops", filt); 482 sysfilt_ops[~filt] = filtops; 483 return (0); 484 } 485 486 int 487 kqueue_del_filteropts(int filt) 488 { 489 490 if (filt > 0) 491 panic("filt(%d) > 0", filt); 492 if (filt + EVFILT_SYSCOUNT < 0) 493 panic("filt(%d) + EVFILT_SYSCOUNT(%d) == %d < 0", 494 filt, EVFILT_SYSCOUNT, filt + EVFILT_SYSCOUNT); 495 if (sysfilt_ops[~filt] == &null_filtops) 496 panic("sysfilt_ops[~filt(%d)] != &null_filtops", filt); 497 sysfilt_ops[~filt] = &null_filtops; 498 return (0); 499 } 500 501 int 502 kqueue_register(struct kqueue *kq, struct kevent *kev, struct thread *td) 503 { 504 struct filedesc *fdp = kq->kq_fdp; 505 struct filterops *fops; 506 struct file *fp = NULL; 507 struct knote *kn = NULL; 508 int s, error = 0; 509 510 if (kev->filter < 0) { 511 if (kev->filter + EVFILT_SYSCOUNT < 0) 512 return (EINVAL); 513 fops = sysfilt_ops[~kev->filter]; /* to 0-base index */ 514 } else { 515 /* 516 * XXX 517 * filter attach routine is responsible for insuring that 518 * the identifier can be attached to it. 519 */ 520 printf("unknown filter: %d\n", kev->filter); 521 return (EINVAL); 522 } 523 524 if (fops->f_isfd) { 525 /* validate descriptor */ 526 if ((u_int)kev->ident >= fdp->fd_nfiles || 527 (fp = fdp->fd_ofiles[kev->ident]) == NULL) 528 return (EBADF); 529 fhold(fp); 530 531 if (kev->ident < fdp->fd_knlistsize) { 532 SLIST_FOREACH(kn, &fdp->fd_knlist[kev->ident], kn_link) 533 if (kq == kn->kn_kq && 534 kev->filter == kn->kn_filter) 535 break; 536 } 537 } else { 538 if (fdp->fd_knhashmask != 0) { 539 struct klist *list; 540 541 list = &fdp->fd_knhash[ 542 KN_HASH((u_long)kev->ident, fdp->fd_knhashmask)]; 543 SLIST_FOREACH(kn, list, kn_link) 544 if (kev->ident == kn->kn_id && 545 kq == kn->kn_kq && 546 kev->filter == kn->kn_filter) 547 break; 548 } 549 } 550 551 if (kn == NULL && ((kev->flags & EV_ADD) == 0)) { 552 error = ENOENT; 553 goto done; 554 } 555 556 /* 557 * kn now contains the matching knote, or NULL if no match 558 */ 559 if (kev->flags & EV_ADD) { 560 561 if (kn == NULL) { 562 kn = knote_alloc(); 563 if (kn == NULL) { 564 error = ENOMEM; 565 goto done; 566 } 567 kn->kn_fp = fp; 568 kn->kn_kq = kq; 569 kn->kn_fop = fops; 570 571 /* 572 * apply reference count to knote structure, and 573 * do not release it at the end of this routine. 574 */ 575 fp = NULL; 576 577 kn->kn_sfflags = kev->fflags; 578 kn->kn_sdata = kev->data; 579 kev->fflags = 0; 580 kev->data = 0; 581 kn->kn_kevent = *kev; 582 583 knote_attach(kn, fdp); 584 if ((error = fops->f_attach(kn)) != 0) { 585 knote_drop(kn, td); 586 goto done; 587 } 588 } else { 589 /* 590 * The user may change some filter values after the 591 * initial EV_ADD, but doing so will not reset any 592 * filter which have already been triggered. 593 */ 594 kn->kn_sfflags = kev->fflags; 595 kn->kn_sdata = kev->data; 596 kn->kn_kevent.udata = kev->udata; 597 } 598 599 s = splhigh(); 600 if (kn->kn_fop->f_event(kn, 0)) 601 KNOTE_ACTIVATE(kn); 602 splx(s); 603 604 } else if (kev->flags & EV_DELETE) { 605 kn->kn_fop->f_detach(kn); 606 knote_drop(kn, td); 607 goto done; 608 } 609 610 if ((kev->flags & EV_DISABLE) && 611 ((kn->kn_status & KN_DISABLED) == 0)) { 612 s = splhigh(); 613 kn->kn_status |= KN_DISABLED; 614 splx(s); 615 } 616 617 if ((kev->flags & EV_ENABLE) && (kn->kn_status & KN_DISABLED)) { 618 s = splhigh(); 619 kn->kn_status &= ~KN_DISABLED; 620 if ((kn->kn_status & KN_ACTIVE) && 621 ((kn->kn_status & KN_QUEUED) == 0)) 622 knote_enqueue(kn); 623 splx(s); 624 } 625 626 done: 627 if (fp != NULL) 628 fdrop(fp, td); 629 return (error); 630 } 631 632 static int 633 kqueue_scan(struct file *fp, int maxevents, struct kevent *ulistp, 634 const struct timespec *tsp, struct thread *td) 635 { 636 struct kqueue *kq = (struct kqueue *)fp->f_data; 637 struct kevent *kevp; 638 struct timeval atv, rtv, ttv; 639 struct knote *kn, marker; 640 int s, count, timeout, nkev = 0, error = 0; 641 642 count = maxevents; 643 if (count == 0) 644 goto done; 645 646 if (tsp != NULL) { 647 TIMESPEC_TO_TIMEVAL(&atv, tsp); 648 if (itimerfix(&atv)) { 649 error = EINVAL; 650 goto done; 651 } 652 if (tsp->tv_sec == 0 && tsp->tv_nsec == 0) 653 timeout = -1; 654 else 655 timeout = atv.tv_sec > 24 * 60 * 60 ? 656 24 * 60 * 60 * hz : tvtohz(&atv); 657 getmicrouptime(&rtv); 658 timevaladd(&atv, &rtv); 659 } else { 660 atv.tv_sec = 0; 661 atv.tv_usec = 0; 662 timeout = 0; 663 } 664 goto start; 665 666 retry: 667 if (atv.tv_sec || atv.tv_usec) { 668 getmicrouptime(&rtv); 669 if (timevalcmp(&rtv, &atv, >=)) 670 goto done; 671 ttv = atv; 672 timevalsub(&ttv, &rtv); 673 timeout = ttv.tv_sec > 24 * 60 * 60 ? 674 24 * 60 * 60 * hz : tvtohz(&ttv); 675 } 676 677 start: 678 kevp = kq->kq_kev; 679 s = splhigh(); 680 if (kq->kq_count == 0) { 681 if (timeout < 0) { 682 error = EWOULDBLOCK; 683 } else { 684 kq->kq_state |= KQ_SLEEP; 685 error = tsleep(kq, PSOCK | PCATCH, "kqread", timeout); 686 } 687 splx(s); 688 if (error == 0) 689 goto retry; 690 /* don't restart after signals... */ 691 if (error == ERESTART) 692 error = EINTR; 693 else if (error == EWOULDBLOCK) 694 error = 0; 695 goto done; 696 } 697 698 TAILQ_INSERT_TAIL(&kq->kq_head, &marker, kn_tqe); 699 while (count) { 700 kn = TAILQ_FIRST(&kq->kq_head); 701 TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe); 702 if (kn == &marker) { 703 splx(s); 704 if (count == maxevents) 705 goto retry; 706 goto done; 707 } 708 if (kn->kn_status & KN_DISABLED) { 709 kn->kn_status &= ~KN_QUEUED; 710 kq->kq_count--; 711 continue; 712 } 713 if ((kn->kn_flags & EV_ONESHOT) == 0 && 714 kn->kn_fop->f_event(kn, 0) == 0) { 715 kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE); 716 kq->kq_count--; 717 continue; 718 } 719 *kevp = kn->kn_kevent; 720 kevp++; 721 nkev++; 722 if (kn->kn_flags & EV_ONESHOT) { 723 kn->kn_status &= ~KN_QUEUED; 724 kq->kq_count--; 725 splx(s); 726 kn->kn_fop->f_detach(kn); 727 knote_drop(kn, td); 728 s = splhigh(); 729 } else if (kn->kn_flags & EV_CLEAR) { 730 kn->kn_data = 0; 731 kn->kn_fflags = 0; 732 kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE); 733 kq->kq_count--; 734 } else { 735 TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe); 736 } 737 count--; 738 if (nkev == KQ_NEVENTS) { 739 splx(s); 740 error = copyout((caddr_t)&kq->kq_kev, (caddr_t)ulistp, 741 sizeof(struct kevent) * nkev); 742 ulistp += nkev; 743 nkev = 0; 744 kevp = kq->kq_kev; 745 s = splhigh(); 746 if (error) 747 break; 748 } 749 } 750 TAILQ_REMOVE(&kq->kq_head, &marker, kn_tqe); 751 splx(s); 752 done: 753 if (nkev != 0) 754 error = copyout((caddr_t)&kq->kq_kev, (caddr_t)ulistp, 755 sizeof(struct kevent) * nkev); 756 td->td_retval[0] = maxevents - count; 757 return (error); 758 } 759 760 /* 761 * XXX 762 * This could be expanded to call kqueue_scan, if desired. 763 */ 764 /*ARGSUSED*/ 765 static int 766 kqueue_read(struct file *fp, struct uio *uio, struct ucred *cred, 767 int flags, struct thread *td) 768 { 769 return (ENXIO); 770 } 771 772 /*ARGSUSED*/ 773 static int 774 kqueue_write(struct file *fp, struct uio *uio, struct ucred *cred, 775 int flags, struct thread *td) 776 { 777 return (ENXIO); 778 } 779 780 /*ARGSUSED*/ 781 static int 782 kqueue_ioctl(struct file *fp, u_long com, caddr_t data, struct thread *td) 783 { 784 return (ENOTTY); 785 } 786 787 /*ARGSUSED*/ 788 static int 789 kqueue_poll(struct file *fp, int events, struct ucred *cred, struct thread *td) 790 { 791 struct kqueue *kq = (struct kqueue *)fp->f_data; 792 int revents = 0; 793 int s = splnet(); 794 795 if (events & (POLLIN | POLLRDNORM)) { 796 if (kq->kq_count) { 797 revents |= events & (POLLIN | POLLRDNORM); 798 } else { 799 selrecord(td, &kq->kq_sel); 800 kq->kq_state |= KQ_SEL; 801 } 802 } 803 splx(s); 804 return (revents); 805 } 806 807 /*ARGSUSED*/ 808 static int 809 kqueue_stat(struct file *fp, struct stat *st, struct thread *td) 810 { 811 struct kqueue *kq = (struct kqueue *)fp->f_data; 812 813 bzero((void *)st, sizeof(*st)); 814 st->st_size = kq->kq_count; 815 st->st_blksize = sizeof(struct kevent); 816 st->st_mode = S_IFIFO; 817 return (0); 818 } 819 820 /*ARGSUSED*/ 821 static int 822 kqueue_close(struct file *fp, struct thread *td) 823 { 824 struct kqueue *kq = (struct kqueue *)fp->f_data; 825 struct filedesc *fdp = td->td_proc->p_fd; 826 struct knote **knp, *kn, *kn0; 827 int i; 828 829 for (i = 0; i < fdp->fd_knlistsize; i++) { 830 knp = &SLIST_FIRST(&fdp->fd_knlist[i]); 831 kn = *knp; 832 while (kn != NULL) { 833 kn0 = SLIST_NEXT(kn, kn_link); 834 if (kq == kn->kn_kq) { 835 kn->kn_fop->f_detach(kn); 836 fdrop(kn->kn_fp, td); 837 knote_free(kn); 838 *knp = kn0; 839 } else { 840 knp = &SLIST_NEXT(kn, kn_link); 841 } 842 kn = kn0; 843 } 844 } 845 if (fdp->fd_knhashmask != 0) { 846 for (i = 0; i < fdp->fd_knhashmask + 1; i++) { 847 knp = &SLIST_FIRST(&fdp->fd_knhash[i]); 848 kn = *knp; 849 while (kn != NULL) { 850 kn0 = SLIST_NEXT(kn, kn_link); 851 if (kq == kn->kn_kq) { 852 kn->kn_fop->f_detach(kn); 853 /* XXX non-fd release of kn->kn_ptr */ 854 knote_free(kn); 855 *knp = kn0; 856 } else { 857 knp = &SLIST_NEXT(kn, kn_link); 858 } 859 kn = kn0; 860 } 861 } 862 } 863 free(kq, M_KQUEUE); 864 fp->f_data = NULL; 865 866 return (0); 867 } 868 869 static void 870 kqueue_wakeup(struct kqueue *kq) 871 { 872 873 if (kq->kq_state & KQ_SLEEP) { 874 kq->kq_state &= ~KQ_SLEEP; 875 wakeup(kq); 876 } 877 if (kq->kq_state & KQ_SEL) { 878 kq->kq_state &= ~KQ_SEL; 879 selwakeup(&kq->kq_sel); 880 } 881 KNOTE(&kq->kq_sel.si_note, 0); 882 } 883 884 /* 885 * walk down a list of knotes, activating them if their event has triggered. 886 */ 887 void 888 knote(struct klist *list, long hint) 889 { 890 struct knote *kn; 891 892 SLIST_FOREACH(kn, list, kn_selnext) 893 if (kn->kn_fop->f_event(kn, hint)) 894 KNOTE_ACTIVATE(kn); 895 } 896 897 /* 898 * remove all knotes from a specified klist 899 */ 900 void 901 knote_remove(struct thread *td, struct klist *list) 902 { 903 struct knote *kn; 904 905 while ((kn = SLIST_FIRST(list)) != NULL) { 906 kn->kn_fop->f_detach(kn); 907 knote_drop(kn, td); 908 } 909 } 910 911 /* 912 * remove all knotes referencing a specified fd 913 */ 914 void 915 knote_fdclose(struct thread *td, int fd) 916 { 917 struct filedesc *fdp = td->td_proc->p_fd; 918 struct klist *list = &fdp->fd_knlist[fd]; 919 920 knote_remove(td, list); 921 } 922 923 static void 924 knote_attach(struct knote *kn, struct filedesc *fdp) 925 { 926 struct klist *list; 927 int size; 928 929 if (! kn->kn_fop->f_isfd) { 930 if (fdp->fd_knhashmask == 0) 931 fdp->fd_knhash = hashinit(KN_HASHSIZE, M_KQUEUE, 932 &fdp->fd_knhashmask); 933 list = &fdp->fd_knhash[KN_HASH(kn->kn_id, fdp->fd_knhashmask)]; 934 goto done; 935 } 936 937 if (fdp->fd_knlistsize <= kn->kn_id) { 938 size = fdp->fd_knlistsize; 939 while (size <= kn->kn_id) 940 size += KQEXTENT; 941 MALLOC(list, struct klist *, 942 size * sizeof(struct klist *), M_KQUEUE, M_WAITOK); 943 bcopy((caddr_t)fdp->fd_knlist, (caddr_t)list, 944 fdp->fd_knlistsize * sizeof(struct klist *)); 945 bzero((caddr_t)list + 946 fdp->fd_knlistsize * sizeof(struct klist *), 947 (size - fdp->fd_knlistsize) * sizeof(struct klist *)); 948 if (fdp->fd_knlist != NULL) 949 FREE(fdp->fd_knlist, M_KQUEUE); 950 fdp->fd_knlistsize = size; 951 fdp->fd_knlist = list; 952 } 953 list = &fdp->fd_knlist[kn->kn_id]; 954 done: 955 SLIST_INSERT_HEAD(list, kn, kn_link); 956 kn->kn_status = 0; 957 } 958 959 /* 960 * should be called at spl == 0, since we don't want to hold spl 961 * while calling fdrop and free. 962 */ 963 static void 964 knote_drop(struct knote *kn, struct thread *td) 965 { 966 struct filedesc *fdp = td->td_proc->p_fd; 967 struct klist *list; 968 969 if (kn->kn_fop->f_isfd) 970 list = &fdp->fd_knlist[kn->kn_id]; 971 else 972 list = &fdp->fd_knhash[KN_HASH(kn->kn_id, fdp->fd_knhashmask)]; 973 974 SLIST_REMOVE(list, kn, knote, kn_link); 975 if (kn->kn_status & KN_QUEUED) 976 knote_dequeue(kn); 977 if (kn->kn_fop->f_isfd) 978 fdrop(kn->kn_fp, td); 979 knote_free(kn); 980 } 981 982 983 static void 984 knote_enqueue(struct knote *kn) 985 { 986 struct kqueue *kq = kn->kn_kq; 987 int s = splhigh(); 988 989 KASSERT((kn->kn_status & KN_QUEUED) == 0, ("knote already queued")); 990 991 TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe); 992 kn->kn_status |= KN_QUEUED; 993 kq->kq_count++; 994 splx(s); 995 kqueue_wakeup(kq); 996 } 997 998 static void 999 knote_dequeue(struct knote *kn) 1000 { 1001 struct kqueue *kq = kn->kn_kq; 1002 int s = splhigh(); 1003 1004 KASSERT(kn->kn_status & KN_QUEUED, ("knote not queued")); 1005 1006 TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe); 1007 kn->kn_status &= ~KN_QUEUED; 1008 kq->kq_count--; 1009 splx(s); 1010 } 1011 1012 static void 1013 knote_init(void) 1014 { 1015 knote_zone = zinit("KNOTE", sizeof(struct knote), 0, 0, 1); 1016 } 1017 SYSINIT(knote, SI_SUB_PSEUDO, SI_ORDER_ANY, knote_init, NULL) 1018 1019 static struct knote * 1020 knote_alloc(void) 1021 { 1022 return ((struct knote *)zalloc(knote_zone)); 1023 } 1024 1025 static void 1026 knote_free(struct knote *kn) 1027 { 1028 zfree(knote_zone, kn); 1029 } 1030