1 /*- 2 * Copyright (c) 1999,2000 Jonathan Lemon <jlemon@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD$ 27 */ 28 29 #include <sys/param.h> 30 #include <sys/systm.h> 31 #include <sys/kernel.h> 32 #include <sys/proc.h> 33 #include <sys/malloc.h> 34 #include <sys/unistd.h> 35 #include <sys/file.h> 36 #include <sys/fcntl.h> 37 #include <sys/select.h> 38 #include <sys/queue.h> 39 #include <sys/event.h> 40 #include <sys/eventvar.h> 41 #include <sys/poll.h> 42 #include <sys/protosw.h> 43 #include <sys/socket.h> 44 #include <sys/socketvar.h> 45 #include <sys/stat.h> 46 #include <sys/sysproto.h> 47 #include <sys/uio.h> 48 49 #include <vm/vm_zone.h> 50 51 static int filt_nullattach(struct knote *kn); 52 static int filt_rwtypattach(struct knote *kn); 53 static int filt_kqattach(struct knote *kn); 54 static void filt_kqdetach(struct knote *kn); 55 static int filt_kqueue(struct knote *kn, long hint); 56 static int filt_procattach(struct knote *kn); 57 static void filt_procdetach(struct knote *kn); 58 static int filt_proc(struct knote *kn, long hint); 59 60 static int kqueue_scan(struct file *fp, int maxevents, 61 struct kevent *ulistp, struct timespec *timeout, 62 struct proc *p); 63 static int kqueue_read(struct file *fp, struct uio *uio, 64 struct ucred *cred, int flags, struct proc *p); 65 static int kqueue_write(struct file *fp, struct uio *uio, 66 struct ucred *cred, int flags, struct proc *p); 67 static int kqueue_ioctl(struct file *fp, u_long com, caddr_t data, 68 struct proc *p); 69 static int kqueue_poll(struct file *fp, int events, struct ucred *cred, 70 struct proc *p); 71 static int kqueue_stat(struct file *fp, struct stat *st, struct proc *p); 72 static int kqueue_close(struct file *fp, struct proc *p); 73 static void kqueue_wakeup(struct kqueue *kq); 74 75 static void knote_attach(struct knote *kn, struct filedesc *fdp); 76 static void knote_drop(struct knote *kn, struct proc *p); 77 static void knote_enqueue(struct knote *kn); 78 static void knote_dequeue(struct knote *kn); 79 static void knote_init(void); 80 static struct knote *knote_alloc(void); 81 static void knote_free(struct knote *kn); 82 83 static vm_zone_t knote_zone; 84 85 #define KNOTE_ACTIVATE(kn) do { \ 86 kn->kn_status |= KN_ACTIVE; \ 87 if ((kn->kn_status & (KN_QUEUED | KN_DISABLED)) == 0) \ 88 knote_enqueue(kn); \ 89 } while(0) 90 91 #define KN_HASHSIZE 64 /* XXX should be tunable */ 92 #define KN_HASH(val, mask) (((val) ^ (val >> 8)) & (mask)) 93 94 static struct fileops kqueueops = { 95 kqueue_read, 96 kqueue_write, 97 kqueue_ioctl, 98 kqueue_poll, 99 kqueue_stat, 100 kqueue_close 101 }; 102 103 extern struct filterops so_rwfiltops[]; 104 extern struct filterops fifo_rwfiltops[]; 105 extern struct filterops pipe_rwfiltops[]; 106 extern struct filterops vn_rwfiltops[]; 107 108 static struct filterops kq_rwfiltops[] = { 109 { 1, filt_kqattach, filt_kqdetach, filt_kqueue }, 110 { 1, filt_nullattach, NULL, NULL }, 111 }; 112 113 extern struct filterops aio_filtops; 114 extern struct filterops sig_filtops; 115 extern struct filterops vn_filtops; 116 117 static struct filterops rwtype_filtops = 118 { 1, filt_rwtypattach, NULL, NULL }; 119 static struct filterops proc_filtops = 120 { 0, filt_procattach, filt_procdetach, filt_proc }; 121 122 /* 123 * XXX 124 * These must match the order of defines in <sys/file.h> 125 */ 126 static struct filterops *rwtypfilt_sw[] = { 127 NULL, /* 0 */ 128 vn_rwfiltops, /* DTYPE_VNODE */ 129 so_rwfiltops, /* DTYPE_SOCKET */ 130 pipe_rwfiltops, /* DTYPE_PIPE */ 131 fifo_rwfiltops, /* DTYPE_FIFO */ 132 kq_rwfiltops, /* DTYPE_KQUEUE */ 133 }; 134 135 /* 136 * table for for all system-defined filters. 137 */ 138 static struct filterops *sysfilt_ops[] = { 139 &rwtype_filtops, /* EVFILT_READ */ 140 &rwtype_filtops, /* EVFILT_WRITE */ 141 &aio_filtops, /* EVFILT_AIO */ 142 &vn_filtops, /* EVFILT_VNODE */ 143 &proc_filtops, /* EVFILT_PROC */ 144 &sig_filtops, /* EVFILT_SIGNAL */ 145 }; 146 147 static int 148 filt_nullattach(struct knote *kn) 149 { 150 return (ENXIO); 151 } 152 153 /* 154 * file-type specific attach routine for read/write filters 155 */ 156 static int 157 filt_rwtypattach(struct knote *kn) 158 { 159 struct filterops *fops; 160 161 fops = rwtypfilt_sw[kn->kn_fp->f_type]; 162 if (fops == NULL) 163 return (EINVAL); 164 kn->kn_fop = &fops[~kn->kn_filter]; /* convert to 0-base index */ 165 return (kn->kn_fop->f_attach(kn)); 166 } 167 168 static int 169 filt_kqattach(struct knote *kn) 170 { 171 struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data; 172 173 SLIST_INSERT_HEAD(&kq->kq_sel.si_note, kn, kn_selnext); 174 return (0); 175 } 176 177 static void 178 filt_kqdetach(struct knote *kn) 179 { 180 struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data; 181 182 SLIST_REMOVE(&kq->kq_sel.si_note, kn, knote, kn_selnext); 183 } 184 185 /*ARGSUSED*/ 186 static int 187 filt_kqueue(struct knote *kn, long hint) 188 { 189 struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data; 190 191 kn->kn_data = kq->kq_count; 192 return (kn->kn_data > 0); 193 } 194 195 static int 196 filt_procattach(struct knote *kn) 197 { 198 struct proc *p; 199 200 p = pfind(kn->kn_id); 201 if (p == NULL) 202 return (ESRCH); 203 if (! PRISON_CHECK(curproc, p)) 204 return (EACCES); 205 206 kn->kn_ptr.p_proc = p; 207 kn->kn_flags |= EV_CLEAR; /* automatically set */ 208 209 /* 210 * internal flag indicating registration done by kernel 211 */ 212 if (kn->kn_flags & EV_FLAG1) { 213 kn->kn_data = kn->kn_sdata; /* ppid */ 214 kn->kn_fflags = NOTE_CHILD; 215 kn->kn_flags &= ~EV_FLAG1; 216 } 217 218 /* XXX lock the proc here while adding to the list? */ 219 SLIST_INSERT_HEAD(&p->p_klist, kn, kn_selnext); 220 221 return (0); 222 } 223 224 /* 225 * The knote may be attached to a different process, which may exit, 226 * leaving nothing for the knote to be attached to. So when the process 227 * exits, the knote is marked as DETACHED and also flagged as ONESHOT so 228 * it will be deleted when read out. However, as part of the knote deletion, 229 * this routine is called, so a check is needed to avoid actually performing 230 * a detach, because the original process does not exist any more. 231 */ 232 static void 233 filt_procdetach(struct knote *kn) 234 { 235 struct proc *p = kn->kn_ptr.p_proc; 236 237 if (kn->kn_status & KN_DETACHED) 238 return; 239 240 /* XXX locking? this might modify another process. */ 241 SLIST_REMOVE(&p->p_klist, kn, knote, kn_selnext); 242 } 243 244 static int 245 filt_proc(struct knote *kn, long hint) 246 { 247 u_int event; 248 249 /* 250 * mask off extra data 251 */ 252 event = (u_int)hint & NOTE_PCTRLMASK; 253 254 /* 255 * if the user is interested in this event, record it. 256 */ 257 if (kn->kn_sfflags & event) 258 kn->kn_fflags |= event; 259 260 /* 261 * process is gone, so flag the event as finished. 262 */ 263 if (event == NOTE_EXIT) { 264 kn->kn_status |= KN_DETACHED; 265 kn->kn_flags |= (EV_EOF | EV_ONESHOT); 266 return (1); 267 } 268 269 /* 270 * process forked, and user wants to track the new process, 271 * so attach a new knote to it, and immediately report an 272 * event with the parent's pid. 273 */ 274 if ((event == NOTE_FORK) && (kn->kn_sfflags & NOTE_TRACK)) { 275 struct kevent kev; 276 int error; 277 278 /* 279 * register knote with new process. 280 */ 281 kev.ident = hint & NOTE_PDATAMASK; /* pid */ 282 kev.filter = kn->kn_filter; 283 kev.flags = kn->kn_flags | EV_ADD | EV_ENABLE | EV_FLAG1; 284 kev.fflags = kn->kn_sfflags; 285 kev.data = kn->kn_id; /* parent */ 286 error = kqueue_register(kn->kn_kq, &kev, NULL); 287 if (error) 288 kn->kn_fflags |= NOTE_TRACKERR; 289 } 290 291 return (kn->kn_fflags != 0); 292 } 293 294 int 295 kqueue(struct proc *p, struct kqueue_args *uap) 296 { 297 struct filedesc *fdp = p->p_fd; 298 struct kqueue *kq; 299 struct file *fp; 300 int fd, error; 301 302 error = falloc(p, &fp, &fd); 303 if (error) 304 return (error); 305 fp->f_flag = FREAD | FWRITE; 306 fp->f_type = DTYPE_KQUEUE; 307 fp->f_ops = &kqueueops; 308 kq = malloc(sizeof(struct kqueue), M_TEMP, M_WAITOK); 309 bzero(kq, sizeof(*kq)); 310 TAILQ_INIT(&kq->kq_head); 311 fp->f_data = (caddr_t)kq; 312 p->p_retval[0] = fd; 313 fdp->fd_knlistsize = 0; /* mark this fdesc as having a kq */ 314 kq->kq_fdp = fdp; 315 return (error); 316 } 317 318 #ifndef _SYS_SYSPROTO_H_ 319 struct kevent_args { 320 int fd; 321 int nchanges; 322 struct kevent **changelist; 323 int nevents; 324 struct kevent *eventlist; 325 struct timespec *timeout; 326 }; 327 #endif 328 int 329 kevent(struct proc *p, struct kevent_args *uap) 330 { 331 struct filedesc* fdp = p->p_fd; 332 struct kevent kev; 333 struct kqueue *kq; 334 struct file *fp; 335 struct timespec ts; 336 int i, n, nerrors, error; 337 338 if (((u_int)uap->fd) >= fdp->fd_nfiles || 339 (fp = fdp->fd_ofiles[uap->fd]) == NULL || 340 (fp->f_type != DTYPE_KQUEUE)) 341 return (EBADF); 342 343 if (uap->timeout != NULL) { 344 error = copyin((caddr_t)uap->timeout, (caddr_t)&ts, 345 sizeof(ts)); 346 if (error) 347 return error; 348 uap->timeout = &ts; 349 } 350 351 kq = (struct kqueue *)fp->f_data; 352 nerrors = 0; 353 354 while (uap->nchanges > 0) { 355 n = uap->nchanges > KQ_NEVENTS ? KQ_NEVENTS : uap->nchanges; 356 error = copyin((caddr_t)uap->changelist, (caddr_t)kq->kq_kevp, 357 n * sizeof(struct kevent *)); 358 if (error) 359 return (error); 360 for (i = 0; i < n; i++) { 361 error = copyin((caddr_t)kq->kq_kevp[i], 362 (caddr_t)&kev, sizeof(kev)); 363 if (error) 364 return (error); 365 kev.flags &= ~EV_SYSFLAGS; 366 error = kqueue_register(kq, &kev, p); 367 if (error) { 368 if (uap->nevents != 0) { 369 kev.flags = EV_ERROR; 370 kev.data = error; 371 (void) copyout((caddr_t)&kev, 372 (caddr_t)uap->eventlist, 373 sizeof(kev)); 374 uap->eventlist++; 375 uap->nevents--; 376 nerrors++; 377 } else { 378 return (error); 379 } 380 } 381 } 382 uap->nchanges -= n; 383 uap->changelist += n; 384 } 385 if (nerrors) { 386 p->p_retval[0] = nerrors; 387 return (0); 388 } 389 390 error = kqueue_scan(fp, uap->nevents, uap->eventlist, uap->timeout, p); 391 return (error); 392 } 393 394 int 395 kqueue_register(struct kqueue *kq, struct kevent *kev, struct proc *p) 396 { 397 struct filedesc *fdp = kq->kq_fdp; 398 struct filterops *fops; 399 struct file *fp = NULL; 400 struct knote *kn = NULL; 401 int s, error = 0; 402 403 if (kev->filter < 0) { 404 if (kev->filter + EVFILT_SYSCOUNT < 0) 405 return (EINVAL); 406 fops = sysfilt_ops[~kev->filter]; /* to 0-base index */ 407 } else { 408 /* 409 * XXX 410 * filter attach routine is responsible for insuring that 411 * the identifier can be attached to it. 412 */ 413 printf("unknown filter: %d\n", kev->filter); 414 return (EINVAL); 415 } 416 417 if (fops->f_isfd) { 418 /* validate descriptor; ignore invalid descriptors */ 419 if ((u_int)kev->ident >= fdp->fd_nfiles || 420 (fp = fdp->fd_ofiles[kev->ident]) == NULL) 421 return (0); 422 423 if (kev->ident < fdp->fd_knlistsize) { 424 SLIST_FOREACH(kn, &fdp->fd_knlist[kev->ident], kn_link) 425 if (kq == kn->kn_kq && 426 kev->filter == kn->kn_filter) 427 break; 428 } 429 } else { 430 if (fdp->fd_knhashmask != 0) { 431 struct klist *list; 432 433 list = &fdp->fd_knhash[ 434 KN_HASH((u_long)kev->ident, fdp->fd_knhashmask)]; 435 SLIST_FOREACH(kn, list, kn_link) 436 if (kev->ident == kn->kn_id && 437 kq == kn->kn_kq && 438 kev->filter == kn->kn_filter) 439 break; 440 } 441 } 442 443 if (kn == NULL && ((kev->flags & EV_ADD) == 0)) 444 goto done; 445 446 /* 447 * kn now contains the matching knote, or NULL if no match 448 */ 449 if (kev->flags & EV_ADD) { 450 int attach = 0; 451 452 if (kn == NULL) { 453 kn = knote_alloc(); 454 if (kn == NULL) 455 return (ENOMEM); 456 if (fp != NULL) 457 fhold(fp); 458 kn->kn_fp = fp; 459 kn->kn_kq = kq; 460 kn->kn_fop = fops; 461 attach = 1; 462 } 463 kn->kn_sfflags = kev->fflags; 464 kn->kn_sdata = kev->data; 465 kev->fflags = 0; 466 kev->data = 0; 467 kn->kn_kevent = *kev; 468 469 if (attach) { 470 knote_attach(kn, fdp); 471 if ((error = fops->f_attach(kn)) != 0) { 472 knote_drop(kn, p); 473 goto done; 474 } 475 } 476 s = splhigh(); 477 if (kn->kn_fop->f_event(kn, 0)) 478 KNOTE_ACTIVATE(kn); 479 splx(s); 480 } else if (kev->flags & EV_DELETE) { 481 kn->kn_fop->f_detach(kn); 482 knote_drop(kn, p); 483 goto done; 484 } 485 486 if ((kev->flags & EV_DISABLE) && 487 ((kn->kn_status & KN_DISABLED) == 0)) { 488 s = splhigh(); 489 kn->kn_status |= KN_DISABLED; 490 splx(s); 491 } 492 493 if ((kev->flags & EV_ENABLE) && (kn->kn_status & KN_DISABLED)) { 494 s = splhigh(); 495 kn->kn_status &= ~KN_DISABLED; 496 if ((kn->kn_status & KN_ACTIVE) && 497 ((kn->kn_status & KN_QUEUED) == 0)) 498 knote_enqueue(kn); 499 splx(s); 500 } 501 502 done: 503 return (error); 504 } 505 506 static int 507 kqueue_scan(struct file *fp, int maxevents, struct kevent *ulistp, 508 struct timespec *tsp, struct proc *p) 509 { 510 struct kqueue *kq = (struct kqueue *)fp->f_data; 511 struct kevent *kevp; 512 struct timeval atv, rtv, ttv; 513 struct knote *kn, marker; 514 int s, count, timeout, nkev = 0, error = 0; 515 516 count = maxevents; 517 if (count == 0) 518 goto done; 519 520 if (tsp != NULL) { 521 TIMESPEC_TO_TIMEVAL(&atv, tsp); 522 if (itimerfix(&atv)) { 523 error = EINVAL; 524 goto done; 525 } 526 timeout = atv.tv_sec > 24 * 60 * 60 ? 527 24 * 60 * 60 * hz : tvtohz(&atv); 528 getmicrouptime(&rtv); 529 timevaladd(&atv, &rtv); 530 } else { 531 atv.tv_sec = 0; 532 timeout = 0; 533 } 534 goto start; 535 536 retry: 537 if (atv.tv_sec) { 538 getmicrouptime(&rtv); 539 if (timevalcmp(&rtv, &atv, >=)) 540 goto done; 541 ttv = atv; 542 timevalsub(&ttv, &rtv); 543 timeout = ttv.tv_sec > 24 * 60 * 60 ? 544 24 * 60 * 60 * hz : tvtohz(&ttv); 545 } 546 547 start: 548 kevp = kq->kq_kev; 549 s = splhigh(); 550 if (kq->kq_count == 0) { 551 kq->kq_state |= KQ_SLEEP; 552 error = tsleep(kq, PSOCK | PCATCH, "kqread", timeout); 553 splx(s); 554 if (error == 0) 555 goto retry; 556 /* don't restart after signals... */ 557 if (error == ERESTART) 558 error = EINTR; 559 else if (error == EWOULDBLOCK) 560 error = 0; 561 goto done; 562 } 563 564 TAILQ_INSERT_TAIL(&kq->kq_head, &marker, kn_tqe); 565 while (count) { 566 kn = TAILQ_FIRST(&kq->kq_head); 567 TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe); 568 if (kn == &marker) { 569 splx(s); 570 if (count == maxevents) 571 goto retry; 572 goto done; 573 } 574 if (kn->kn_status & KN_DISABLED) { 575 kn->kn_status &= ~KN_QUEUED; 576 kq->kq_count--; 577 continue; 578 } 579 if ((kn->kn_flags & EV_ONESHOT) == 0 && 580 kn->kn_fop->f_event(kn, 0) == 0) { 581 kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE); 582 kq->kq_count--; 583 continue; 584 } 585 *kevp = kn->kn_kevent; 586 kevp++; 587 nkev++; 588 if (kn->kn_flags & EV_ONESHOT) { 589 kn->kn_status &= ~KN_QUEUED; 590 kq->kq_count--; 591 splx(s); 592 kn->kn_fop->f_detach(kn); 593 knote_drop(kn, p); 594 s = splhigh(); 595 } else if (kn->kn_flags & EV_CLEAR) { 596 kn->kn_data = 0; 597 kn->kn_fflags = 0; 598 kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE); 599 kq->kq_count--; 600 } else { 601 TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe); 602 } 603 count--; 604 if (nkev == KQ_NEVENTS) { 605 splx(s); 606 error = copyout((caddr_t)&kq->kq_kev, (caddr_t)ulistp, 607 sizeof(struct kevent) * nkev); 608 ulistp += nkev; 609 nkev = 0; 610 kevp = kq->kq_kev; 611 s = splhigh(); 612 if (error) 613 break; 614 } 615 } 616 TAILQ_REMOVE(&kq->kq_head, &marker, kn_tqe); 617 splx(s); 618 done: 619 if (nkev != 0) 620 error = copyout((caddr_t)&kq->kq_kev, (caddr_t)ulistp, 621 sizeof(struct kevent) * nkev); 622 p->p_retval[0] = maxevents - count; 623 return (error); 624 } 625 626 /* 627 * XXX 628 * This could be expanded to call kqueue_scan, if desired. 629 */ 630 /*ARGSUSED*/ 631 static int 632 kqueue_read(struct file *fp, struct uio *uio, struct ucred *cred, 633 int flags, struct proc *p) 634 { 635 return (ENXIO); 636 } 637 638 /*ARGSUSED*/ 639 static int 640 kqueue_write(struct file *fp, struct uio *uio, struct ucred *cred, 641 int flags, struct proc *p) 642 { 643 return (ENXIO); 644 } 645 646 /*ARGSUSED*/ 647 static int 648 kqueue_ioctl(struct file *fp, u_long com, caddr_t data, struct proc *p) 649 { 650 return (ENOTTY); 651 } 652 653 /*ARGSUSED*/ 654 static int 655 kqueue_poll(struct file *fp, int events, struct ucred *cred, struct proc *p) 656 { 657 struct kqueue *kq = (struct kqueue *)fp->f_data; 658 int revents = 0; 659 int s = splnet(); 660 661 if (events & (POLLIN | POLLRDNORM)) { 662 if (kq->kq_count) { 663 revents |= events & (POLLIN | POLLRDNORM); 664 } else { 665 selrecord(p, &kq->kq_sel); 666 kq->kq_state |= KQ_SEL; 667 } 668 } 669 splx(s); 670 return (revents); 671 } 672 673 /*ARGSUSED*/ 674 static int 675 kqueue_stat(struct file *fp, struct stat *st, struct proc *p) 676 { 677 struct kqueue *kq = (struct kqueue *)fp->f_data; 678 679 bzero((void *)st, sizeof(*st)); 680 st->st_size = kq->kq_count; 681 st->st_blksize = sizeof(struct kevent); 682 return (0); 683 } 684 685 /*ARGSUSED*/ 686 static int 687 kqueue_close(struct file *fp, struct proc *p) 688 { 689 struct kqueue *kq = (struct kqueue *)fp->f_data; 690 struct filedesc *fdp = p->p_fd; 691 struct knote **knp, *kn, *kn0; 692 int i; 693 694 for (i = 0; i < fdp->fd_knlistsize; i++) { 695 knp = &SLIST_FIRST(&fdp->fd_knlist[i]); 696 kn = *knp; 697 while (kn != NULL) { 698 kn0 = SLIST_NEXT(kn, kn_link); 699 if (kq == kn->kn_kq) { 700 kn->kn_fop->f_detach(kn); 701 fdrop(kn->kn_fp, p); 702 knote_free(kn); 703 *knp = kn0; 704 } else { 705 knp = &SLIST_NEXT(kn, kn_link); 706 } 707 kn = kn0; 708 } 709 } 710 if (fdp->fd_knhashmask != 0) { 711 for (i = 0; i < fdp->fd_knhashmask + 1; i++) { 712 knp = &SLIST_FIRST(&fdp->fd_knhash[i]); 713 kn = *knp; 714 while (kn != NULL) { 715 kn0 = SLIST_NEXT(kn, kn_link); 716 if (kq == kn->kn_kq) { 717 kn->kn_fop->f_detach(kn); 718 /* XXX non-fd release of kn->kn_ptr */ 719 knote_free(kn); 720 *knp = kn0; 721 } else { 722 knp = &SLIST_NEXT(kn, kn_link); 723 } 724 kn = kn0; 725 } 726 } 727 } 728 free(kq, M_TEMP); 729 fp->f_data = NULL; 730 731 return (0); 732 } 733 734 static void 735 kqueue_wakeup(struct kqueue *kq) 736 { 737 738 if (kq->kq_state & KQ_SLEEP) { 739 kq->kq_state &= ~KQ_SLEEP; 740 wakeup(kq); 741 } 742 if (kq->kq_state & KQ_SEL) { 743 kq->kq_state &= ~KQ_SEL; 744 selwakeup(&kq->kq_sel); 745 } 746 KNOTE(&kq->kq_sel.si_note, 0); 747 } 748 749 /* 750 * walk down a list of knotes, activating them if their event has triggered. 751 */ 752 void 753 knote(struct klist *list, long hint) 754 { 755 struct knote *kn; 756 757 SLIST_FOREACH(kn, list, kn_selnext) 758 if (kn->kn_fop->f_event(kn, hint)) 759 KNOTE_ACTIVATE(kn); 760 } 761 762 /* 763 * remove all knotes from a specified klist 764 */ 765 void 766 knote_remove(struct proc *p, struct klist *list) 767 { 768 struct knote *kn; 769 770 while ((kn = SLIST_FIRST(list)) != NULL) { 771 kn->kn_fop->f_detach(kn); 772 knote_drop(kn, p); 773 } 774 } 775 776 /* 777 * remove all knotes referencing a specified fd 778 */ 779 void 780 knote_fdclose(struct proc *p, int fd) 781 { 782 struct filedesc *fdp = p->p_fd; 783 struct klist *list = &fdp->fd_knlist[fd]; 784 785 knote_remove(p, list); 786 } 787 788 static void 789 knote_attach(struct knote *kn, struct filedesc *fdp) 790 { 791 struct klist *list; 792 int size; 793 794 if (! kn->kn_fop->f_isfd) { 795 if (fdp->fd_knhashmask == 0) 796 fdp->fd_knhash = hashinit(KN_HASHSIZE, M_TEMP, 797 &fdp->fd_knhashmask); 798 list = &fdp->fd_knhash[KN_HASH(kn->kn_id, fdp->fd_knhashmask)]; 799 goto done; 800 } 801 802 if (fdp->fd_knlistsize <= kn->kn_id) { 803 size = fdp->fd_knlistsize; 804 while (size <= kn->kn_id) 805 size += KQEXTENT; 806 MALLOC(list, struct klist *, 807 size * sizeof(struct klist *), M_TEMP, M_WAITOK); 808 bcopy((caddr_t)fdp->fd_knlist, (caddr_t)list, 809 fdp->fd_knlistsize * sizeof(struct klist *)); 810 bzero((caddr_t)list + 811 fdp->fd_knlistsize * sizeof(struct klist *), 812 (size - fdp->fd_knlistsize) * sizeof(struct klist *)); 813 if (fdp->fd_knlist != NULL) 814 FREE(fdp->fd_knlist, M_TEMP); 815 fdp->fd_knlistsize = size; 816 fdp->fd_knlist = list; 817 } 818 list = &fdp->fd_knlist[kn->kn_id]; 819 done: 820 SLIST_INSERT_HEAD(list, kn, kn_link); 821 kn->kn_status = 0; 822 } 823 824 /* 825 * should be called at spl == 0, since we don't want to hold spl 826 * while calling fdrop and free. 827 */ 828 static void 829 knote_drop(struct knote *kn, struct proc *p) 830 { 831 struct filedesc *fdp = p->p_fd; 832 struct klist *list; 833 834 if (kn->kn_fop->f_isfd) 835 list = &fdp->fd_knlist[kn->kn_id]; 836 else 837 list = &fdp->fd_knhash[KN_HASH(kn->kn_id, fdp->fd_knhashmask)]; 838 839 SLIST_REMOVE(list, kn, knote, kn_link); 840 if (kn->kn_status & KN_QUEUED) 841 knote_dequeue(kn); 842 if (kn->kn_fop->f_isfd) 843 fdrop(kn->kn_fp, p); 844 knote_free(kn); 845 } 846 847 848 static void 849 knote_enqueue(struct knote *kn) 850 { 851 struct kqueue *kq = kn->kn_kq; 852 int s = splhigh(); 853 854 KASSERT((kn->kn_status & KN_QUEUED) == 0, ("knote already queued")); 855 856 TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe); 857 kn->kn_status |= KN_QUEUED; 858 kq->kq_count++; 859 splx(s); 860 kqueue_wakeup(kq); 861 } 862 863 static void 864 knote_dequeue(struct knote *kn) 865 { 866 struct kqueue *kq = kn->kn_kq; 867 int s = splhigh(); 868 869 KASSERT(kn->kn_status & KN_QUEUED, ("knote not queued")); 870 871 TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe); 872 kn->kn_status &= ~KN_QUEUED; 873 kq->kq_count--; 874 splx(s); 875 } 876 877 static void 878 knote_init(void) 879 { 880 knote_zone = zinit("KNOTE", sizeof(struct knote), 0, 0, 1); 881 } 882 SYSINIT(knote, SI_SUB_PSEUDO, SI_ORDER_ANY, knote_init, NULL) 883 884 static struct knote * 885 knote_alloc(void) 886 { 887 return ((struct knote *)zalloc(knote_zone)); 888 } 889 890 static void 891 knote_free(struct knote *kn) 892 { 893 zfree(knote_zone, kn); 894 } 895