1 /*- 2 * Copyright (c) 1999,2000 Jonathan Lemon <jlemon@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD$ 27 */ 28 29 #include <sys/param.h> 30 #include <sys/systm.h> 31 #include <sys/kernel.h> 32 #include <sys/proc.h> 33 #include <sys/malloc.h> 34 #include <sys/unistd.h> 35 #include <sys/file.h> 36 #include <sys/fcntl.h> 37 #include <sys/select.h> 38 #include <sys/queue.h> 39 #include <sys/event.h> 40 #include <sys/eventvar.h> 41 #include <sys/poll.h> 42 #include <sys/protosw.h> 43 #include <sys/socket.h> 44 #include <sys/socketvar.h> 45 #include <sys/stat.h> 46 #include <sys/sysproto.h> 47 #include <sys/uio.h> 48 49 #include <vm/vm_zone.h> 50 51 static int filt_nullattach(struct knote *kn); 52 static int filt_rwtypattach(struct knote *kn); 53 static int filt_kqattach(struct knote *kn); 54 static void filt_kqdetach(struct knote *kn); 55 static int filt_kqueue(struct knote *kn, long hint); 56 static int filt_procattach(struct knote *kn); 57 static void filt_procdetach(struct knote *kn); 58 static int filt_proc(struct knote *kn, long hint); 59 60 static int kqueue_scan(struct file *fp, int maxevents, 61 struct kevent *ulistp, struct timespec *timeout, 62 struct proc *p); 63 static int kqueue_read(struct file *fp, struct uio *uio, 64 struct ucred *cred, int flags, struct proc *p); 65 static int kqueue_write(struct file *fp, struct uio *uio, 66 struct ucred *cred, int flags, struct proc *p); 67 static int kqueue_ioctl(struct file *fp, u_long com, caddr_t data, 68 struct proc *p); 69 static int kqueue_poll(struct file *fp, int events, struct ucred *cred, 70 struct proc *p); 71 static int kqueue_stat(struct file *fp, struct stat *st, struct proc *p); 72 static int kqueue_close(struct file *fp, struct proc *p); 73 static void kqueue_wakeup(struct kqueue *kq); 74 75 static void knote_attach(struct knote *kn, struct filedesc *fdp); 76 static void knote_drop(struct knote *kn, struct proc *p); 77 static void knote_enqueue(struct knote *kn); 78 static void knote_dequeue(struct knote *kn); 79 static void knote_init(void); 80 static struct knote *knote_alloc(void); 81 static void knote_free(struct knote *kn); 82 83 static vm_zone_t knote_zone; 84 85 #define KNOTE_ACTIVATE(kn) do { \ 86 kn->kn_status |= KN_ACTIVE; \ 87 if ((kn->kn_status & (KN_QUEUED | KN_DISABLED)) == 0) \ 88 knote_enqueue(kn); \ 89 } while(0) 90 91 #define KN_HASHSIZE 64 /* XXX should be tunable */ 92 #define KN_HASH(val, mask) (((val) ^ (val >> 8)) & (mask)) 93 94 static struct fileops kqueueops = { 95 kqueue_read, 96 kqueue_write, 97 kqueue_ioctl, 98 kqueue_poll, 99 kqueue_stat, 100 kqueue_close 101 }; 102 103 extern struct filterops so_rwfiltops[]; 104 extern struct filterops fifo_rwfiltops[]; 105 extern struct filterops pipe_rwfiltops[]; 106 extern struct filterops vn_rwfiltops[]; 107 108 static struct filterops kq_rwfiltops[] = { 109 { 1, filt_kqattach, filt_kqdetach, filt_kqueue }, 110 { 1, filt_nullattach, NULL, NULL }, 111 }; 112 113 extern struct filterops aio_filtops; 114 extern struct filterops sig_filtops; 115 extern struct filterops vn_filtops; 116 117 static struct filterops rwtype_filtops = 118 { 1, filt_rwtypattach, NULL, NULL }; 119 static struct filterops proc_filtops = 120 { 0, filt_procattach, filt_procdetach, filt_proc }; 121 122 /* 123 * XXX 124 * These must match the order of defines in <sys/file.h> 125 */ 126 static struct filterops *rwtypfilt_sw[] = { 127 NULL, /* 0 */ 128 vn_rwfiltops, /* DTYPE_VNODE */ 129 so_rwfiltops, /* DTYPE_SOCKET */ 130 pipe_rwfiltops, /* DTYPE_PIPE */ 131 fifo_rwfiltops, /* DTYPE_FIFO */ 132 kq_rwfiltops, /* DTYPE_KQUEUE */ 133 }; 134 135 /* 136 * table for for all system-defined filters. 137 */ 138 static struct filterops *sysfilt_ops[] = { 139 &rwtype_filtops, /* EVFILT_READ */ 140 &rwtype_filtops, /* EVFILT_WRITE */ 141 &aio_filtops, /* EVFILT_AIO */ 142 &vn_filtops, /* EVFILT_VNODE */ 143 &proc_filtops, /* EVFILT_PROC */ 144 &sig_filtops, /* EVFILT_SIGNAL */ 145 }; 146 147 static int 148 filt_nullattach(struct knote *kn) 149 { 150 return (ENXIO); 151 } 152 153 /* 154 * file-type specific attach routine for read/write filters 155 */ 156 static int 157 filt_rwtypattach(struct knote *kn) 158 { 159 struct filterops *fops; 160 161 fops = rwtypfilt_sw[kn->kn_fp->f_type]; 162 if (fops == NULL) 163 return (EINVAL); 164 kn->kn_fop = &fops[~kn->kn_filter]; /* convert to 0-base index */ 165 return (kn->kn_fop->f_attach(kn)); 166 } 167 168 static int 169 filt_kqattach(struct knote *kn) 170 { 171 struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data; 172 173 SLIST_INSERT_HEAD(&kq->kq_sel.si_note, kn, kn_selnext); 174 return (0); 175 } 176 177 static void 178 filt_kqdetach(struct knote *kn) 179 { 180 struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data; 181 182 SLIST_REMOVE(&kq->kq_sel.si_note, kn, knote, kn_selnext); 183 } 184 185 /*ARGSUSED*/ 186 static int 187 filt_kqueue(struct knote *kn, long hint) 188 { 189 struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data; 190 191 kn->kn_data = kq->kq_count; 192 return (kn->kn_data > 0); 193 } 194 195 static int 196 filt_procattach(struct knote *kn) 197 { 198 struct proc *p; 199 200 p = pfind(kn->kn_id); 201 if (p == NULL) 202 return (ESRCH); 203 if (! PRISON_CHECK(curproc, p)) 204 return (EACCES); 205 206 kn->kn_ptr.p_proc = p; 207 kn->kn_flags |= EV_CLEAR; /* automatically set */ 208 209 /* 210 * internal flag indicating registration done by kernel 211 */ 212 if (kn->kn_flags & EV_FLAG1) { 213 kn->kn_data = kn->kn_sdata; /* ppid */ 214 kn->kn_fflags = NOTE_CHILD; 215 kn->kn_flags &= ~EV_FLAG1; 216 } 217 218 /* XXX lock the proc here while adding to the list? */ 219 SLIST_INSERT_HEAD(&p->p_klist, kn, kn_selnext); 220 221 return (0); 222 } 223 224 /* 225 * The knote may be attached to a different process, which may exit, 226 * leaving nothing for the knote to be attached to. So when the process 227 * exits, the knote is marked as DETACHED and also flagged as ONESHOT so 228 * it will be deleted when read out. However, as part of the knote deletion, 229 * this routine is called, so a check is needed to avoid actually performing 230 * a detach, because the original process does not exist any more. 231 */ 232 static void 233 filt_procdetach(struct knote *kn) 234 { 235 struct proc *p = kn->kn_ptr.p_proc; 236 237 if (kn->kn_status & KN_DETACHED) 238 return; 239 240 /* XXX locking? this might modify another process. */ 241 SLIST_REMOVE(&p->p_klist, kn, knote, kn_selnext); 242 } 243 244 static int 245 filt_proc(struct knote *kn, long hint) 246 { 247 u_int event; 248 249 /* 250 * mask off extra data 251 */ 252 event = (u_int)hint & NOTE_PCTRLMASK; 253 254 /* 255 * if the user is interested in this event, record it. 256 */ 257 if (kn->kn_sfflags & event) 258 kn->kn_fflags |= event; 259 260 /* 261 * process is gone, so flag the event as finished. 262 */ 263 if (event == NOTE_EXIT) { 264 kn->kn_status |= KN_DETACHED; 265 kn->kn_flags |= (EV_EOF | EV_ONESHOT); 266 return (1); 267 } 268 269 /* 270 * process forked, and user wants to track the new process, 271 * so attach a new knote to it, and immediately report an 272 * event with the parent's pid. 273 */ 274 if ((event == NOTE_FORK) && (kn->kn_sfflags & NOTE_TRACK)) { 275 struct kevent kev; 276 int error; 277 278 /* 279 * register knote with new process. 280 */ 281 kev.ident = hint & NOTE_PDATAMASK; /* pid */ 282 kev.filter = kn->kn_filter; 283 kev.flags = kn->kn_flags | EV_ADD | EV_ENABLE | EV_FLAG1; 284 kev.fflags = kn->kn_sfflags; 285 kev.data = kn->kn_id; /* parent */ 286 kev.udata = kn->kn_kevent.udata; /* preserve udata */ 287 error = kqueue_register(kn->kn_kq, &kev, NULL); 288 if (error) 289 kn->kn_fflags |= NOTE_TRACKERR; 290 } 291 292 return (kn->kn_fflags != 0); 293 } 294 295 int 296 kqueue(struct proc *p, struct kqueue_args *uap) 297 { 298 struct filedesc *fdp = p->p_fd; 299 struct kqueue *kq; 300 struct file *fp; 301 int fd, error; 302 303 error = falloc(p, &fp, &fd); 304 if (error) 305 return (error); 306 fp->f_flag = FREAD | FWRITE; 307 fp->f_type = DTYPE_KQUEUE; 308 fp->f_ops = &kqueueops; 309 kq = malloc(sizeof(struct kqueue), M_TEMP, M_WAITOK); 310 bzero(kq, sizeof(*kq)); 311 TAILQ_INIT(&kq->kq_head); 312 fp->f_data = (caddr_t)kq; 313 p->p_retval[0] = fd; 314 if (fdp->fd_knlistsize < 0) 315 fdp->fd_knlistsize = 0; /* this process has a kq */ 316 kq->kq_fdp = fdp; 317 return (error); 318 } 319 320 #ifndef _SYS_SYSPROTO_H_ 321 struct kevent_args { 322 int fd; 323 struct kevent *changelist; 324 int nchanges; 325 struct kevent *eventlist; 326 int nevents; 327 struct timespec *timeout; 328 }; 329 #endif 330 int 331 kevent(struct proc *p, struct kevent_args *uap) 332 { 333 struct filedesc* fdp = p->p_fd; 334 struct kevent *kevp; 335 struct kqueue *kq; 336 struct file *fp; 337 struct timespec ts; 338 int i, n, nerrors, error; 339 340 if (((u_int)uap->fd) >= fdp->fd_nfiles || 341 (fp = fdp->fd_ofiles[uap->fd]) == NULL || 342 (fp->f_type != DTYPE_KQUEUE)) 343 return (EBADF); 344 345 if (uap->timeout != NULL) { 346 error = copyin(uap->timeout, &ts, sizeof(ts)); 347 if (error) 348 return error; 349 uap->timeout = &ts; 350 } 351 352 kq = (struct kqueue *)fp->f_data; 353 nerrors = 0; 354 355 while (uap->nchanges > 0) { 356 n = uap->nchanges > KQ_NEVENTS ? KQ_NEVENTS : uap->nchanges; 357 error = copyin(uap->changelist, kq->kq_kev, 358 n * sizeof(struct kevent)); 359 if (error) 360 return (error); 361 for (i = 0; i < n; i++) { 362 kevp = &kq->kq_kev[i]; 363 kevp->flags &= ~EV_SYSFLAGS; 364 error = kqueue_register(kq, kevp, p); 365 if (error) { 366 if (uap->nevents != 0) { 367 kevp->flags = EV_ERROR; 368 kevp->data = error; 369 (void) copyout((caddr_t)kevp, 370 (caddr_t)uap->eventlist, 371 sizeof(*kevp)); 372 uap->eventlist++; 373 uap->nevents--; 374 nerrors++; 375 } else { 376 return (error); 377 } 378 } 379 } 380 uap->nchanges -= n; 381 uap->changelist += n; 382 } 383 if (nerrors) { 384 p->p_retval[0] = nerrors; 385 return (0); 386 } 387 388 error = kqueue_scan(fp, uap->nevents, uap->eventlist, uap->timeout, p); 389 return (error); 390 } 391 392 int 393 kqueue_register(struct kqueue *kq, struct kevent *kev, struct proc *p) 394 { 395 struct filedesc *fdp = kq->kq_fdp; 396 struct filterops *fops; 397 struct file *fp = NULL; 398 struct knote *kn = NULL; 399 int s, error = 0; 400 401 if (kev->filter < 0) { 402 if (kev->filter + EVFILT_SYSCOUNT < 0) 403 return (EINVAL); 404 fops = sysfilt_ops[~kev->filter]; /* to 0-base index */ 405 } else { 406 /* 407 * XXX 408 * filter attach routine is responsible for insuring that 409 * the identifier can be attached to it. 410 */ 411 printf("unknown filter: %d\n", kev->filter); 412 return (EINVAL); 413 } 414 415 if (fops->f_isfd) { 416 /* validate descriptor; ignore invalid descriptors */ 417 if ((u_int)kev->ident >= fdp->fd_nfiles || 418 (fp = fdp->fd_ofiles[kev->ident]) == NULL) 419 return (0); 420 421 if (kev->ident < fdp->fd_knlistsize) { 422 SLIST_FOREACH(kn, &fdp->fd_knlist[kev->ident], kn_link) 423 if (kq == kn->kn_kq && 424 kev->filter == kn->kn_filter) 425 break; 426 } 427 } else { 428 if (fdp->fd_knhashmask != 0) { 429 struct klist *list; 430 431 list = &fdp->fd_knhash[ 432 KN_HASH((u_long)kev->ident, fdp->fd_knhashmask)]; 433 SLIST_FOREACH(kn, list, kn_link) 434 if (kev->ident == kn->kn_id && 435 kq == kn->kn_kq && 436 kev->filter == kn->kn_filter) 437 break; 438 } 439 } 440 441 if (kn == NULL && ((kev->flags & EV_ADD) == 0)) 442 goto done; 443 444 /* 445 * kn now contains the matching knote, or NULL if no match 446 */ 447 if (kev->flags & EV_ADD) { 448 449 if (kn == NULL) { 450 kn = knote_alloc(); 451 if (kn == NULL) 452 return (ENOMEM); 453 if (fp != NULL) 454 fhold(fp); 455 kn->kn_fp = fp; 456 kn->kn_kq = kq; 457 kn->kn_fop = fops; 458 459 kn->kn_sfflags = kev->fflags; 460 kn->kn_sdata = kev->data; 461 kev->fflags = 0; 462 kev->data = 0; 463 kn->kn_kevent = *kev; 464 465 knote_attach(kn, fdp); 466 if ((error = fops->f_attach(kn)) != 0) { 467 knote_drop(kn, p); 468 goto done; 469 } 470 } else { 471 /* 472 * The user may change some filter values after the 473 * initial EV_ADD, but doing so will not reset any 474 * filter which have already been triggered. 475 */ 476 kn->kn_sfflags = kev->fflags; 477 kn->kn_sdata = kev->data; 478 kn->kn_kevent.udata = kev->udata; 479 } 480 481 s = splhigh(); 482 if (kn->kn_fop->f_event(kn, 0)) 483 KNOTE_ACTIVATE(kn); 484 splx(s); 485 486 } else if (kev->flags & EV_DELETE) { 487 kn->kn_fop->f_detach(kn); 488 knote_drop(kn, p); 489 goto done; 490 } 491 492 if ((kev->flags & EV_DISABLE) && 493 ((kn->kn_status & KN_DISABLED) == 0)) { 494 s = splhigh(); 495 kn->kn_status |= KN_DISABLED; 496 splx(s); 497 } 498 499 if ((kev->flags & EV_ENABLE) && (kn->kn_status & KN_DISABLED)) { 500 s = splhigh(); 501 kn->kn_status &= ~KN_DISABLED; 502 if ((kn->kn_status & KN_ACTIVE) && 503 ((kn->kn_status & KN_QUEUED) == 0)) 504 knote_enqueue(kn); 505 splx(s); 506 } 507 508 done: 509 return (error); 510 } 511 512 static int 513 kqueue_scan(struct file *fp, int maxevents, struct kevent *ulistp, 514 struct timespec *tsp, struct proc *p) 515 { 516 struct kqueue *kq = (struct kqueue *)fp->f_data; 517 struct kevent *kevp; 518 struct timeval atv, rtv, ttv; 519 struct knote *kn, marker; 520 int s, count, timeout, nkev = 0, error = 0; 521 522 count = maxevents; 523 if (count == 0) 524 goto done; 525 526 if (tsp != NULL) { 527 TIMESPEC_TO_TIMEVAL(&atv, tsp); 528 if (itimerfix(&atv)) { 529 error = EINVAL; 530 goto done; 531 } 532 timeout = atv.tv_sec > 24 * 60 * 60 ? 533 24 * 60 * 60 * hz : tvtohz(&atv); 534 getmicrouptime(&rtv); 535 timevaladd(&atv, &rtv); 536 } else { 537 atv.tv_sec = 0; 538 timeout = 0; 539 } 540 goto start; 541 542 retry: 543 if (atv.tv_sec) { 544 getmicrouptime(&rtv); 545 if (timevalcmp(&rtv, &atv, >=)) 546 goto done; 547 ttv = atv; 548 timevalsub(&ttv, &rtv); 549 timeout = ttv.tv_sec > 24 * 60 * 60 ? 550 24 * 60 * 60 * hz : tvtohz(&ttv); 551 } 552 553 start: 554 kevp = kq->kq_kev; 555 s = splhigh(); 556 if (kq->kq_count == 0) { 557 kq->kq_state |= KQ_SLEEP; 558 error = tsleep(kq, PSOCK | PCATCH, "kqread", timeout); 559 splx(s); 560 if (error == 0) 561 goto retry; 562 /* don't restart after signals... */ 563 if (error == ERESTART) 564 error = EINTR; 565 else if (error == EWOULDBLOCK) 566 error = 0; 567 goto done; 568 } 569 570 TAILQ_INSERT_TAIL(&kq->kq_head, &marker, kn_tqe); 571 while (count) { 572 kn = TAILQ_FIRST(&kq->kq_head); 573 TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe); 574 if (kn == &marker) { 575 splx(s); 576 if (count == maxevents) 577 goto retry; 578 goto done; 579 } 580 if (kn->kn_status & KN_DISABLED) { 581 kn->kn_status &= ~KN_QUEUED; 582 kq->kq_count--; 583 continue; 584 } 585 if ((kn->kn_flags & EV_ONESHOT) == 0 && 586 kn->kn_fop->f_event(kn, 0) == 0) { 587 kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE); 588 kq->kq_count--; 589 continue; 590 } 591 *kevp = kn->kn_kevent; 592 kevp++; 593 nkev++; 594 if (kn->kn_flags & EV_ONESHOT) { 595 kn->kn_status &= ~KN_QUEUED; 596 kq->kq_count--; 597 splx(s); 598 kn->kn_fop->f_detach(kn); 599 knote_drop(kn, p); 600 s = splhigh(); 601 } else if (kn->kn_flags & EV_CLEAR) { 602 kn->kn_data = 0; 603 kn->kn_fflags = 0; 604 kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE); 605 kq->kq_count--; 606 } else { 607 TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe); 608 } 609 count--; 610 if (nkev == KQ_NEVENTS) { 611 splx(s); 612 error = copyout((caddr_t)&kq->kq_kev, (caddr_t)ulistp, 613 sizeof(struct kevent) * nkev); 614 ulistp += nkev; 615 nkev = 0; 616 kevp = kq->kq_kev; 617 s = splhigh(); 618 if (error) 619 break; 620 } 621 } 622 TAILQ_REMOVE(&kq->kq_head, &marker, kn_tqe); 623 splx(s); 624 done: 625 if (nkev != 0) 626 error = copyout((caddr_t)&kq->kq_kev, (caddr_t)ulistp, 627 sizeof(struct kevent) * nkev); 628 p->p_retval[0] = maxevents - count; 629 return (error); 630 } 631 632 /* 633 * XXX 634 * This could be expanded to call kqueue_scan, if desired. 635 */ 636 /*ARGSUSED*/ 637 static int 638 kqueue_read(struct file *fp, struct uio *uio, struct ucred *cred, 639 int flags, struct proc *p) 640 { 641 return (ENXIO); 642 } 643 644 /*ARGSUSED*/ 645 static int 646 kqueue_write(struct file *fp, struct uio *uio, struct ucred *cred, 647 int flags, struct proc *p) 648 { 649 return (ENXIO); 650 } 651 652 /*ARGSUSED*/ 653 static int 654 kqueue_ioctl(struct file *fp, u_long com, caddr_t data, struct proc *p) 655 { 656 return (ENOTTY); 657 } 658 659 /*ARGSUSED*/ 660 static int 661 kqueue_poll(struct file *fp, int events, struct ucred *cred, struct proc *p) 662 { 663 struct kqueue *kq = (struct kqueue *)fp->f_data; 664 int revents = 0; 665 int s = splnet(); 666 667 if (events & (POLLIN | POLLRDNORM)) { 668 if (kq->kq_count) { 669 revents |= events & (POLLIN | POLLRDNORM); 670 } else { 671 selrecord(p, &kq->kq_sel); 672 kq->kq_state |= KQ_SEL; 673 } 674 } 675 splx(s); 676 return (revents); 677 } 678 679 /*ARGSUSED*/ 680 static int 681 kqueue_stat(struct file *fp, struct stat *st, struct proc *p) 682 { 683 struct kqueue *kq = (struct kqueue *)fp->f_data; 684 685 bzero((void *)st, sizeof(*st)); 686 st->st_size = kq->kq_count; 687 st->st_blksize = sizeof(struct kevent); 688 st->st_mode = S_IFIFO; 689 return (0); 690 } 691 692 /*ARGSUSED*/ 693 static int 694 kqueue_close(struct file *fp, struct proc *p) 695 { 696 struct kqueue *kq = (struct kqueue *)fp->f_data; 697 struct filedesc *fdp = p->p_fd; 698 struct knote **knp, *kn, *kn0; 699 int i; 700 701 for (i = 0; i < fdp->fd_knlistsize; i++) { 702 knp = &SLIST_FIRST(&fdp->fd_knlist[i]); 703 kn = *knp; 704 while (kn != NULL) { 705 kn0 = SLIST_NEXT(kn, kn_link); 706 if (kq == kn->kn_kq) { 707 kn->kn_fop->f_detach(kn); 708 fdrop(kn->kn_fp, p); 709 knote_free(kn); 710 *knp = kn0; 711 } else { 712 knp = &SLIST_NEXT(kn, kn_link); 713 } 714 kn = kn0; 715 } 716 } 717 if (fdp->fd_knhashmask != 0) { 718 for (i = 0; i < fdp->fd_knhashmask + 1; i++) { 719 knp = &SLIST_FIRST(&fdp->fd_knhash[i]); 720 kn = *knp; 721 while (kn != NULL) { 722 kn0 = SLIST_NEXT(kn, kn_link); 723 if (kq == kn->kn_kq) { 724 kn->kn_fop->f_detach(kn); 725 /* XXX non-fd release of kn->kn_ptr */ 726 knote_free(kn); 727 *knp = kn0; 728 } else { 729 knp = &SLIST_NEXT(kn, kn_link); 730 } 731 kn = kn0; 732 } 733 } 734 } 735 free(kq, M_TEMP); 736 fp->f_data = NULL; 737 738 return (0); 739 } 740 741 static void 742 kqueue_wakeup(struct kqueue *kq) 743 { 744 745 if (kq->kq_state & KQ_SLEEP) { 746 kq->kq_state &= ~KQ_SLEEP; 747 wakeup(kq); 748 } 749 if (kq->kq_state & KQ_SEL) { 750 kq->kq_state &= ~KQ_SEL; 751 selwakeup(&kq->kq_sel); 752 } 753 KNOTE(&kq->kq_sel.si_note, 0); 754 } 755 756 /* 757 * walk down a list of knotes, activating them if their event has triggered. 758 */ 759 void 760 knote(struct klist *list, long hint) 761 { 762 struct knote *kn; 763 764 SLIST_FOREACH(kn, list, kn_selnext) 765 if (kn->kn_fop->f_event(kn, hint)) 766 KNOTE_ACTIVATE(kn); 767 } 768 769 /* 770 * remove all knotes from a specified klist 771 */ 772 void 773 knote_remove(struct proc *p, struct klist *list) 774 { 775 struct knote *kn; 776 777 while ((kn = SLIST_FIRST(list)) != NULL) { 778 kn->kn_fop->f_detach(kn); 779 knote_drop(kn, p); 780 } 781 } 782 783 /* 784 * remove all knotes referencing a specified fd 785 */ 786 void 787 knote_fdclose(struct proc *p, int fd) 788 { 789 struct filedesc *fdp = p->p_fd; 790 struct klist *list = &fdp->fd_knlist[fd]; 791 792 knote_remove(p, list); 793 } 794 795 static void 796 knote_attach(struct knote *kn, struct filedesc *fdp) 797 { 798 struct klist *list; 799 int size; 800 801 if (! kn->kn_fop->f_isfd) { 802 if (fdp->fd_knhashmask == 0) 803 fdp->fd_knhash = hashinit(KN_HASHSIZE, M_TEMP, 804 &fdp->fd_knhashmask); 805 list = &fdp->fd_knhash[KN_HASH(kn->kn_id, fdp->fd_knhashmask)]; 806 goto done; 807 } 808 809 if (fdp->fd_knlistsize <= kn->kn_id) { 810 size = fdp->fd_knlistsize; 811 while (size <= kn->kn_id) 812 size += KQEXTENT; 813 MALLOC(list, struct klist *, 814 size * sizeof(struct klist *), M_TEMP, M_WAITOK); 815 bcopy((caddr_t)fdp->fd_knlist, (caddr_t)list, 816 fdp->fd_knlistsize * sizeof(struct klist *)); 817 bzero((caddr_t)list + 818 fdp->fd_knlistsize * sizeof(struct klist *), 819 (size - fdp->fd_knlistsize) * sizeof(struct klist *)); 820 if (fdp->fd_knlist != NULL) 821 FREE(fdp->fd_knlist, M_TEMP); 822 fdp->fd_knlistsize = size; 823 fdp->fd_knlist = list; 824 } 825 list = &fdp->fd_knlist[kn->kn_id]; 826 done: 827 SLIST_INSERT_HEAD(list, kn, kn_link); 828 kn->kn_status = 0; 829 } 830 831 /* 832 * should be called at spl == 0, since we don't want to hold spl 833 * while calling fdrop and free. 834 */ 835 static void 836 knote_drop(struct knote *kn, struct proc *p) 837 { 838 struct filedesc *fdp = p->p_fd; 839 struct klist *list; 840 841 if (kn->kn_fop->f_isfd) 842 list = &fdp->fd_knlist[kn->kn_id]; 843 else 844 list = &fdp->fd_knhash[KN_HASH(kn->kn_id, fdp->fd_knhashmask)]; 845 846 SLIST_REMOVE(list, kn, knote, kn_link); 847 if (kn->kn_status & KN_QUEUED) 848 knote_dequeue(kn); 849 if (kn->kn_fop->f_isfd) 850 fdrop(kn->kn_fp, p); 851 knote_free(kn); 852 } 853 854 855 static void 856 knote_enqueue(struct knote *kn) 857 { 858 struct kqueue *kq = kn->kn_kq; 859 int s = splhigh(); 860 861 KASSERT((kn->kn_status & KN_QUEUED) == 0, ("knote already queued")); 862 863 TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe); 864 kn->kn_status |= KN_QUEUED; 865 kq->kq_count++; 866 splx(s); 867 kqueue_wakeup(kq); 868 } 869 870 static void 871 knote_dequeue(struct knote *kn) 872 { 873 struct kqueue *kq = kn->kn_kq; 874 int s = splhigh(); 875 876 KASSERT(kn->kn_status & KN_QUEUED, ("knote not queued")); 877 878 TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe); 879 kn->kn_status &= ~KN_QUEUED; 880 kq->kq_count--; 881 splx(s); 882 } 883 884 static void 885 knote_init(void) 886 { 887 knote_zone = zinit("KNOTE", sizeof(struct knote), 0, 0, 1); 888 } 889 SYSINIT(knote, SI_SUB_PSEUDO, SI_ORDER_ANY, knote_init, NULL) 890 891 static struct knote * 892 knote_alloc(void) 893 { 894 return ((struct knote *)zalloc(knote_zone)); 895 } 896 897 static void 898 knote_free(struct knote *kn) 899 { 900 zfree(knote_zone, kn); 901 } 902