xref: /freebsd/sys/kern/kern_event.c (revision 0fddbf874719b9bd50cf66ac26d1140bb3f2be69)
1 /*-
2  * Copyright (c) 1999,2000,2001 Jonathan Lemon <jlemon@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD$
27  */
28 
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/kernel.h>
32 #include <sys/lock.h>
33 #include <sys/mutex.h>
34 #include <sys/proc.h>
35 #include <sys/malloc.h>
36 #include <sys/unistd.h>
37 #include <sys/file.h>
38 #include <sys/fcntl.h>
39 #include <sys/selinfo.h>
40 #include <sys/queue.h>
41 #include <sys/event.h>
42 #include <sys/eventvar.h>
43 #include <sys/poll.h>
44 #include <sys/protosw.h>
45 #include <sys/socket.h>
46 #include <sys/socketvar.h>
47 #include <sys/stat.h>
48 #include <sys/sysproto.h>
49 #include <sys/uio.h>
50 
51 #include <vm/vm_zone.h>
52 
53 static int	kqueue_scan(struct file *fp, int maxevents,
54 		    struct kevent *ulistp, const struct timespec *timeout,
55 		    struct proc *p);
56 static int 	kqueue_read(struct file *fp, struct uio *uio,
57 		    struct ucred *cred, int flags, struct proc *p);
58 static int	kqueue_write(struct file *fp, struct uio *uio,
59 		    struct ucred *cred, int flags, struct proc *p);
60 static int	kqueue_ioctl(struct file *fp, u_long com, caddr_t data,
61 		    struct proc *p);
62 static int 	kqueue_poll(struct file *fp, int events, struct ucred *cred,
63 		    struct proc *p);
64 static int 	kqueue_kqfilter(struct file *fp, struct knote *kn);
65 static int 	kqueue_stat(struct file *fp, struct stat *st, struct proc *p);
66 static int 	kqueue_close(struct file *fp, struct proc *p);
67 static void 	kqueue_wakeup(struct kqueue *kq);
68 
69 static struct fileops kqueueops = {
70 	kqueue_read,
71 	kqueue_write,
72 	kqueue_ioctl,
73 	kqueue_poll,
74 	kqueue_kqfilter,
75 	kqueue_stat,
76 	kqueue_close
77 };
78 
79 static void 	knote_attach(struct knote *kn, struct filedesc *fdp);
80 static void 	knote_drop(struct knote *kn, struct proc *p);
81 static void 	knote_enqueue(struct knote *kn);
82 static void 	knote_dequeue(struct knote *kn);
83 static void 	knote_init(void);
84 static struct 	knote *knote_alloc(void);
85 static void 	knote_free(struct knote *kn);
86 
87 static void	filt_kqdetach(struct knote *kn);
88 static int	filt_kqueue(struct knote *kn, long hint);
89 static int	filt_procattach(struct knote *kn);
90 static void	filt_procdetach(struct knote *kn);
91 static int	filt_proc(struct knote *kn, long hint);
92 static int	filt_fileattach(struct knote *kn);
93 static void	filt_timerexpire(void *knx);
94 static int	filt_timerattach(struct knote *kn);
95 static void	filt_timerdetach(struct knote *kn);
96 static int	filt_timer(struct knote *kn, long hint);
97 
98 static struct filterops file_filtops =
99 	{ 1, filt_fileattach, NULL, NULL };
100 static struct filterops kqread_filtops =
101 	{ 1, NULL, filt_kqdetach, filt_kqueue };
102 static struct filterops proc_filtops =
103 	{ 0, filt_procattach, filt_procdetach, filt_proc };
104 static struct filterops timer_filtops =
105 	{ 0, filt_timerattach, filt_timerdetach, filt_timer };
106 
107 static vm_zone_t	knote_zone;
108 
109 #define KNOTE_ACTIVATE(kn) do { 					\
110 	kn->kn_status |= KN_ACTIVE;					\
111 	if ((kn->kn_status & (KN_QUEUED | KN_DISABLED)) == 0)		\
112 		knote_enqueue(kn);					\
113 } while(0)
114 
115 #define	KN_HASHSIZE		64		/* XXX should be tunable */
116 #define KN_HASH(val, mask)	(((val) ^ (val >> 8)) & (mask))
117 
118 extern struct filterops aio_filtops;
119 extern struct filterops sig_filtops;
120 
121 /*
122  * Table for for all system-defined filters.
123  */
124 static struct filterops *sysfilt_ops[] = {
125 	&file_filtops,			/* EVFILT_READ */
126 	&file_filtops,			/* EVFILT_WRITE */
127 	&aio_filtops,			/* EVFILT_AIO */
128 	&file_filtops,			/* EVFILT_VNODE */
129 	&proc_filtops,			/* EVFILT_PROC */
130 	&sig_filtops,			/* EVFILT_SIGNAL */
131 	&timer_filtops,			/* EVFILT_TIMER */
132 };
133 
134 static int
135 filt_fileattach(struct knote *kn)
136 {
137 
138 	return (fo_kqfilter(kn->kn_fp, kn));
139 }
140 
141 /*ARGSUSED*/
142 static int
143 kqueue_kqfilter(struct file *fp, struct knote *kn)
144 {
145 	struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data;
146 
147 	if (kn->kn_filter != EVFILT_READ)
148 		return (1);
149 
150 	kn->kn_fop = &kqread_filtops;
151 	SLIST_INSERT_HEAD(&kq->kq_sel.si_note, kn, kn_selnext);
152 	return (0);
153 }
154 
155 static void
156 filt_kqdetach(struct knote *kn)
157 {
158 	struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data;
159 
160 	SLIST_REMOVE(&kq->kq_sel.si_note, kn, knote, kn_selnext);
161 }
162 
163 /*ARGSUSED*/
164 static int
165 filt_kqueue(struct knote *kn, long hint)
166 {
167 	struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data;
168 
169 	kn->kn_data = kq->kq_count;
170 	return (kn->kn_data > 0);
171 }
172 
173 static int
174 filt_procattach(struct knote *kn)
175 {
176 	struct proc *p;
177 	int error;
178 
179 	p = pfind(kn->kn_id);
180 	if (p == NULL)
181 		return (ESRCH);
182 	if ((error = p_cansee(curproc, p))) {
183 		PROC_UNLOCK(p);
184 		return (error);
185 	}
186 
187 	kn->kn_ptr.p_proc = p;
188 	kn->kn_flags |= EV_CLEAR;		/* automatically set */
189 
190 	/*
191 	 * internal flag indicating registration done by kernel
192 	 */
193 	if (kn->kn_flags & EV_FLAG1) {
194 		kn->kn_data = kn->kn_sdata;		/* ppid */
195 		kn->kn_fflags = NOTE_CHILD;
196 		kn->kn_flags &= ~EV_FLAG1;
197 	}
198 
199 	SLIST_INSERT_HEAD(&p->p_klist, kn, kn_selnext);
200 	PROC_UNLOCK(p);
201 
202 	return (0);
203 }
204 
205 /*
206  * The knote may be attached to a different process, which may exit,
207  * leaving nothing for the knote to be attached to.  So when the process
208  * exits, the knote is marked as DETACHED and also flagged as ONESHOT so
209  * it will be deleted when read out.  However, as part of the knote deletion,
210  * this routine is called, so a check is needed to avoid actually performing
211  * a detach, because the original process does not exist any more.
212  */
213 static void
214 filt_procdetach(struct knote *kn)
215 {
216 	struct proc *p = kn->kn_ptr.p_proc;
217 
218 	if (kn->kn_status & KN_DETACHED)
219 		return;
220 
221 	PROC_LOCK(p);
222 	SLIST_REMOVE(&p->p_klist, kn, knote, kn_selnext);
223 	PROC_UNLOCK(p);
224 }
225 
226 static int
227 filt_proc(struct knote *kn, long hint)
228 {
229 	u_int event;
230 
231 	/*
232 	 * mask off extra data
233 	 */
234 	event = (u_int)hint & NOTE_PCTRLMASK;
235 
236 	/*
237 	 * if the user is interested in this event, record it.
238 	 */
239 	if (kn->kn_sfflags & event)
240 		kn->kn_fflags |= event;
241 
242 	/*
243 	 * process is gone, so flag the event as finished.
244 	 */
245 	if (event == NOTE_EXIT) {
246 		kn->kn_status |= KN_DETACHED;
247 		kn->kn_flags |= (EV_EOF | EV_ONESHOT);
248 		return (1);
249 	}
250 
251 	/*
252 	 * process forked, and user wants to track the new process,
253 	 * so attach a new knote to it, and immediately report an
254 	 * event with the parent's pid.
255 	 */
256 	if ((event == NOTE_FORK) && (kn->kn_sfflags & NOTE_TRACK)) {
257 		struct kevent kev;
258 		int error;
259 
260 		/*
261 		 * register knote with new process.
262 		 */
263 		kev.ident = hint & NOTE_PDATAMASK;	/* pid */
264 		kev.filter = kn->kn_filter;
265 		kev.flags = kn->kn_flags | EV_ADD | EV_ENABLE | EV_FLAG1;
266 		kev.fflags = kn->kn_sfflags;
267 		kev.data = kn->kn_id;			/* parent */
268 		kev.udata = kn->kn_kevent.udata;	/* preserve udata */
269 		error = kqueue_register(kn->kn_kq, &kev, NULL);
270 		if (error)
271 			kn->kn_fflags |= NOTE_TRACKERR;
272 	}
273 
274 	return (kn->kn_fflags != 0);
275 }
276 
277 static void
278 filt_timerexpire(void *knx)
279 {
280 	struct knote *kn = knx;
281 	struct callout_handle ch;
282 	struct timeval tv;
283 	int tticks;
284 
285 	kn->kn_data++;
286 	KNOTE_ACTIVATE(kn);
287 
288 	if ((kn->kn_flags & EV_ONESHOT) == 0) {
289 		tv.tv_sec = kn->kn_sdata / 1000;
290 		tv.tv_usec = (kn->kn_sdata % 1000) * 1000;
291 		tticks = tvtohz(&tv);
292 		ch = timeout(filt_timerexpire, kn, tticks);
293 		kn->kn_hook = (caddr_t)ch.callout;
294 	}
295 }
296 
297 /*
298  * data contains amount of time to sleep, in milliseconds
299  */
300 static int
301 filt_timerattach(struct knote *kn)
302 {
303 	struct callout_handle ch;
304 	struct timeval tv;
305 	int tticks;
306 
307 	tv.tv_sec = kn->kn_sdata / 1000;
308 	tv.tv_usec = (kn->kn_sdata % 1000) * 1000;
309 	tticks = tvtohz(&tv);
310 
311 	kn->kn_flags |= EV_CLEAR;		/* automatically set */
312 	ch = timeout(filt_timerexpire, kn, tticks);
313 	kn->kn_hook = (caddr_t)ch.callout;
314 
315 	return (0);
316 }
317 
318 static void
319 filt_timerdetach(struct knote *kn)
320 {
321 	struct callout_handle ch;
322 
323 	ch.callout = (struct callout *)kn->kn_hook;
324 	untimeout(filt_timerexpire, kn, ch);
325 }
326 
327 static int
328 filt_timer(struct knote *kn, long hint)
329 {
330 
331 	return (kn->kn_data != 0);
332 }
333 
334 /*
335  * MPSAFE
336  */
337 int
338 kqueue(struct proc *p, struct kqueue_args *uap)
339 {
340 	struct filedesc *fdp;
341 	struct kqueue *kq;
342 	struct file *fp;
343 	int fd, error;
344 
345 	mtx_lock(&Giant);
346 	fdp = p->p_fd;
347 	error = falloc(p, &fp, &fd);
348 	if (error)
349 		goto done2;
350 	fp->f_flag = FREAD | FWRITE;
351 	fp->f_type = DTYPE_KQUEUE;
352 	fp->f_ops = &kqueueops;
353 	kq = malloc(sizeof(struct kqueue), M_TEMP, M_WAITOK | M_ZERO);
354 	TAILQ_INIT(&kq->kq_head);
355 	fp->f_data = (caddr_t)kq;
356 	p->p_retval[0] = fd;
357 	if (fdp->fd_knlistsize < 0)
358 		fdp->fd_knlistsize = 0;		/* this process has a kq */
359 	kq->kq_fdp = fdp;
360 done2:
361 	mtx_unlock(&Giant);
362 	return (error);
363 }
364 
365 #ifndef _SYS_SYSPROTO_H_
366 struct kevent_args {
367 	int	fd;
368 	const struct kevent *changelist;
369 	int	nchanges;
370 	struct	kevent *eventlist;
371 	int	nevents;
372 	const struct timespec *timeout;
373 };
374 #endif
375 /*
376  * MPSAFE
377  */
378 int
379 kevent(struct proc *p, struct kevent_args *uap)
380 {
381 	struct filedesc *fdp;
382 	struct kevent *kevp;
383 	struct kqueue *kq;
384 	struct file *fp = NULL;
385 	struct timespec ts;
386 	int i, n, nerrors, error;
387 
388 	mtx_lock(&Giant);
389 	fdp = p->p_fd;
390         if (((u_int)uap->fd) >= fdp->fd_nfiles ||
391             (fp = fdp->fd_ofiles[uap->fd]) == NULL ||
392 	    (fp->f_type != DTYPE_KQUEUE)) {
393 		error = EBADF;
394 		goto done;
395 	}
396 	fhold(fp);
397 
398 	if (uap->timeout != NULL) {
399 		error = copyin(uap->timeout, &ts, sizeof(ts));
400 		if (error)
401 			goto done;
402 		uap->timeout = &ts;
403 	}
404 
405 	kq = (struct kqueue *)fp->f_data;
406 	nerrors = 0;
407 
408 	while (uap->nchanges > 0) {
409 		n = uap->nchanges > KQ_NEVENTS ? KQ_NEVENTS : uap->nchanges;
410 		error = copyin(uap->changelist, kq->kq_kev,
411 		    n * sizeof(struct kevent));
412 		if (error)
413 			goto done;
414 		for (i = 0; i < n; i++) {
415 			kevp = &kq->kq_kev[i];
416 			kevp->flags &= ~EV_SYSFLAGS;
417 			error = kqueue_register(kq, kevp, p);
418 			if (error) {
419 				if (uap->nevents != 0) {
420 					kevp->flags = EV_ERROR;
421 					kevp->data = error;
422 					(void) copyout((caddr_t)kevp,
423 					    (caddr_t)uap->eventlist,
424 					    sizeof(*kevp));
425 					uap->eventlist++;
426 					uap->nevents--;
427 					nerrors++;
428 				} else {
429 					goto done;
430 				}
431 			}
432 		}
433 		uap->nchanges -= n;
434 		uap->changelist += n;
435 	}
436 	if (nerrors) {
437         	p->p_retval[0] = nerrors;
438 		error = 0;
439 		goto done;
440 	}
441 
442 	error = kqueue_scan(fp, uap->nevents, uap->eventlist, uap->timeout, p);
443 done:
444 	if (fp != NULL)
445 		fdrop(fp, p);
446 	mtx_unlock(&Giant);
447 	return (error);
448 }
449 
450 int
451 kqueue_register(struct kqueue *kq, struct kevent *kev, struct proc *p)
452 {
453 	struct filedesc *fdp = kq->kq_fdp;
454 	struct filterops *fops;
455 	struct file *fp = NULL;
456 	struct knote *kn = NULL;
457 	int s, error = 0;
458 
459 	if (kev->filter < 0) {
460 		if (kev->filter + EVFILT_SYSCOUNT < 0)
461 			return (EINVAL);
462 		fops = sysfilt_ops[~kev->filter];	/* to 0-base index */
463 	} else {
464 		/*
465 		 * XXX
466 		 * filter attach routine is responsible for insuring that
467 		 * the identifier can be attached to it.
468 		 */
469 		printf("unknown filter: %d\n", kev->filter);
470 		return (EINVAL);
471 	}
472 
473 	if (fops->f_isfd) {
474 		/* validate descriptor */
475 		if ((u_int)kev->ident >= fdp->fd_nfiles ||
476 		    (fp = fdp->fd_ofiles[kev->ident]) == NULL)
477 			return (EBADF);
478 		fhold(fp);
479 
480 		if (kev->ident < fdp->fd_knlistsize) {
481 			SLIST_FOREACH(kn, &fdp->fd_knlist[kev->ident], kn_link)
482 				if (kq == kn->kn_kq &&
483 				    kev->filter == kn->kn_filter)
484 					break;
485 		}
486 	} else {
487 		if (fdp->fd_knhashmask != 0) {
488 			struct klist *list;
489 
490 			list = &fdp->fd_knhash[
491 			    KN_HASH((u_long)kev->ident, fdp->fd_knhashmask)];
492 			SLIST_FOREACH(kn, list, kn_link)
493 				if (kev->ident == kn->kn_id &&
494 				    kq == kn->kn_kq &&
495 				    kev->filter == kn->kn_filter)
496 					break;
497 		}
498 	}
499 
500 	if (kn == NULL && ((kev->flags & EV_ADD) == 0)) {
501 		error = ENOENT;
502 		goto done;
503 	}
504 
505 	/*
506 	 * kn now contains the matching knote, or NULL if no match
507 	 */
508 	if (kev->flags & EV_ADD) {
509 
510 		if (kn == NULL) {
511 			kn = knote_alloc();
512 			if (kn == NULL) {
513 				error = ENOMEM;
514 				goto done;
515 			}
516 			kn->kn_fp = fp;
517 			kn->kn_kq = kq;
518 			kn->kn_fop = fops;
519 
520 			/*
521 			 * apply reference count to knote structure, and
522 			 * do not release it at the end of this routine.
523 			 */
524 			fp = NULL;
525 
526 			kn->kn_sfflags = kev->fflags;
527 			kn->kn_sdata = kev->data;
528 			kev->fflags = 0;
529 			kev->data = 0;
530 			kn->kn_kevent = *kev;
531 
532 			knote_attach(kn, fdp);
533 			if ((error = fops->f_attach(kn)) != 0) {
534 				knote_drop(kn, p);
535 				goto done;
536 			}
537 		} else {
538 			/*
539 			 * The user may change some filter values after the
540 			 * initial EV_ADD, but doing so will not reset any
541 			 * filter which have already been triggered.
542 			 */
543 			kn->kn_sfflags = kev->fflags;
544 			kn->kn_sdata = kev->data;
545 			kn->kn_kevent.udata = kev->udata;
546 		}
547 
548 		s = splhigh();
549 		if (kn->kn_fop->f_event(kn, 0))
550 			KNOTE_ACTIVATE(kn);
551 		splx(s);
552 
553 	} else if (kev->flags & EV_DELETE) {
554 		kn->kn_fop->f_detach(kn);
555 		knote_drop(kn, p);
556 		goto done;
557 	}
558 
559 	if ((kev->flags & EV_DISABLE) &&
560 	    ((kn->kn_status & KN_DISABLED) == 0)) {
561 		s = splhigh();
562 		kn->kn_status |= KN_DISABLED;
563 		splx(s);
564 	}
565 
566 	if ((kev->flags & EV_ENABLE) && (kn->kn_status & KN_DISABLED)) {
567 		s = splhigh();
568 		kn->kn_status &= ~KN_DISABLED;
569 		if ((kn->kn_status & KN_ACTIVE) &&
570 		    ((kn->kn_status & KN_QUEUED) == 0))
571 			knote_enqueue(kn);
572 		splx(s);
573 	}
574 
575 done:
576 	if (fp != NULL)
577 		fdrop(fp, p);
578 	return (error);
579 }
580 
581 static int
582 kqueue_scan(struct file *fp, int maxevents, struct kevent *ulistp,
583 	const struct timespec *tsp, struct proc *p)
584 {
585 	struct kqueue *kq = (struct kqueue *)fp->f_data;
586 	struct kevent *kevp;
587 	struct timeval atv, rtv, ttv;
588 	struct knote *kn, marker;
589 	int s, count, timeout, nkev = 0, error = 0;
590 
591 	count = maxevents;
592 	if (count == 0)
593 		goto done;
594 
595 	if (tsp != NULL) {
596 		TIMESPEC_TO_TIMEVAL(&atv, tsp);
597 		if (itimerfix(&atv)) {
598 			error = EINVAL;
599 			goto done;
600 		}
601 		if (tsp->tv_sec == 0 && tsp->tv_nsec == 0)
602 			timeout = -1;
603 		else
604 			timeout = atv.tv_sec > 24 * 60 * 60 ?
605 			    24 * 60 * 60 * hz : tvtohz(&atv);
606 		getmicrouptime(&rtv);
607 		timevaladd(&atv, &rtv);
608 	} else {
609 		atv.tv_sec = 0;
610 		atv.tv_usec = 0;
611 		timeout = 0;
612 	}
613 	goto start;
614 
615 retry:
616 	if (atv.tv_sec || atv.tv_usec) {
617 		getmicrouptime(&rtv);
618 		if (timevalcmp(&rtv, &atv, >=))
619 			goto done;
620 		ttv = atv;
621 		timevalsub(&ttv, &rtv);
622 		timeout = ttv.tv_sec > 24 * 60 * 60 ?
623 			24 * 60 * 60 * hz : tvtohz(&ttv);
624 	}
625 
626 start:
627 	kevp = kq->kq_kev;
628 	s = splhigh();
629 	if (kq->kq_count == 0) {
630 		if (timeout < 0) {
631 			error = EWOULDBLOCK;
632 		} else {
633 			kq->kq_state |= KQ_SLEEP;
634 			error = tsleep(kq, PSOCK | PCATCH, "kqread", timeout);
635 		}
636 		splx(s);
637 		if (error == 0)
638 			goto retry;
639 		/* don't restart after signals... */
640 		if (error == ERESTART)
641 			error = EINTR;
642 		else if (error == EWOULDBLOCK)
643 			error = 0;
644 		goto done;
645 	}
646 
647 	TAILQ_INSERT_TAIL(&kq->kq_head, &marker, kn_tqe);
648 	while (count) {
649 		kn = TAILQ_FIRST(&kq->kq_head);
650 		TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe);
651 		if (kn == &marker) {
652 			splx(s);
653 			if (count == maxevents)
654 				goto retry;
655 			goto done;
656 		}
657 		if (kn->kn_status & KN_DISABLED) {
658 			kn->kn_status &= ~KN_QUEUED;
659 			kq->kq_count--;
660 			continue;
661 		}
662 		if ((kn->kn_flags & EV_ONESHOT) == 0 &&
663 		    kn->kn_fop->f_event(kn, 0) == 0) {
664 			kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE);
665 			kq->kq_count--;
666 			continue;
667 		}
668 		*kevp = kn->kn_kevent;
669 		kevp++;
670 		nkev++;
671 		if (kn->kn_flags & EV_ONESHOT) {
672 			kn->kn_status &= ~KN_QUEUED;
673 			kq->kq_count--;
674 			splx(s);
675 			kn->kn_fop->f_detach(kn);
676 			knote_drop(kn, p);
677 			s = splhigh();
678 		} else if (kn->kn_flags & EV_CLEAR) {
679 			kn->kn_data = 0;
680 			kn->kn_fflags = 0;
681 			kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE);
682 			kq->kq_count--;
683 		} else {
684 			TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe);
685 		}
686 		count--;
687 		if (nkev == KQ_NEVENTS) {
688 			splx(s);
689 			error = copyout((caddr_t)&kq->kq_kev, (caddr_t)ulistp,
690 			    sizeof(struct kevent) * nkev);
691 			ulistp += nkev;
692 			nkev = 0;
693 			kevp = kq->kq_kev;
694 			s = splhigh();
695 			if (error)
696 				break;
697 		}
698 	}
699 	TAILQ_REMOVE(&kq->kq_head, &marker, kn_tqe);
700 	splx(s);
701 done:
702 	if (nkev != 0)
703 		error = copyout((caddr_t)&kq->kq_kev, (caddr_t)ulistp,
704 		    sizeof(struct kevent) * nkev);
705         p->p_retval[0] = maxevents - count;
706 	return (error);
707 }
708 
709 /*
710  * XXX
711  * This could be expanded to call kqueue_scan, if desired.
712  */
713 /*ARGSUSED*/
714 static int
715 kqueue_read(struct file *fp, struct uio *uio, struct ucred *cred,
716 	int flags, struct proc *p)
717 {
718 	return (ENXIO);
719 }
720 
721 /*ARGSUSED*/
722 static int
723 kqueue_write(struct file *fp, struct uio *uio, struct ucred *cred,
724 	 int flags, struct proc *p)
725 {
726 	return (ENXIO);
727 }
728 
729 /*ARGSUSED*/
730 static int
731 kqueue_ioctl(struct file *fp, u_long com, caddr_t data, struct proc *p)
732 {
733 	return (ENOTTY);
734 }
735 
736 /*ARGSUSED*/
737 static int
738 kqueue_poll(struct file *fp, int events, struct ucred *cred, struct proc *p)
739 {
740 	struct kqueue *kq = (struct kqueue *)fp->f_data;
741 	int revents = 0;
742 	int s = splnet();
743 
744         if (events & (POLLIN | POLLRDNORM)) {
745                 if (kq->kq_count) {
746                         revents |= events & (POLLIN | POLLRDNORM);
747 		} else {
748                         selrecord(p, &kq->kq_sel);
749 			kq->kq_state |= KQ_SEL;
750 		}
751 	}
752 	splx(s);
753 	return (revents);
754 }
755 
756 /*ARGSUSED*/
757 static int
758 kqueue_stat(struct file *fp, struct stat *st, struct proc *p)
759 {
760 	struct kqueue *kq = (struct kqueue *)fp->f_data;
761 
762 	bzero((void *)st, sizeof(*st));
763 	st->st_size = kq->kq_count;
764 	st->st_blksize = sizeof(struct kevent);
765 	st->st_mode = S_IFIFO;
766 	return (0);
767 }
768 
769 /*ARGSUSED*/
770 static int
771 kqueue_close(struct file *fp, struct proc *p)
772 {
773 	struct kqueue *kq = (struct kqueue *)fp->f_data;
774 	struct filedesc *fdp = p->p_fd;
775 	struct knote **knp, *kn, *kn0;
776 	int i;
777 
778 	for (i = 0; i < fdp->fd_knlistsize; i++) {
779 		knp = &SLIST_FIRST(&fdp->fd_knlist[i]);
780 		kn = *knp;
781 		while (kn != NULL) {
782 			kn0 = SLIST_NEXT(kn, kn_link);
783 			if (kq == kn->kn_kq) {
784 				kn->kn_fop->f_detach(kn);
785 				fdrop(kn->kn_fp, p);
786 				knote_free(kn);
787 				*knp = kn0;
788 			} else {
789 				knp = &SLIST_NEXT(kn, kn_link);
790 			}
791 			kn = kn0;
792 		}
793 	}
794 	if (fdp->fd_knhashmask != 0) {
795 		for (i = 0; i < fdp->fd_knhashmask + 1; i++) {
796 			knp = &SLIST_FIRST(&fdp->fd_knhash[i]);
797 			kn = *knp;
798 			while (kn != NULL) {
799 				kn0 = SLIST_NEXT(kn, kn_link);
800 				if (kq == kn->kn_kq) {
801 					kn->kn_fop->f_detach(kn);
802 		/* XXX non-fd release of kn->kn_ptr */
803 					knote_free(kn);
804 					*knp = kn0;
805 				} else {
806 					knp = &SLIST_NEXT(kn, kn_link);
807 				}
808 				kn = kn0;
809 			}
810 		}
811 	}
812 	free(kq, M_TEMP);
813 	fp->f_data = NULL;
814 
815 	return (0);
816 }
817 
818 static void
819 kqueue_wakeup(struct kqueue *kq)
820 {
821 
822 	if (kq->kq_state & KQ_SLEEP) {
823 		kq->kq_state &= ~KQ_SLEEP;
824 		wakeup(kq);
825 	}
826 	if (kq->kq_state & KQ_SEL) {
827 		kq->kq_state &= ~KQ_SEL;
828 		selwakeup(&kq->kq_sel);
829 	}
830 	KNOTE(&kq->kq_sel.si_note, 0);
831 }
832 
833 /*
834  * walk down a list of knotes, activating them if their event has triggered.
835  */
836 void
837 knote(struct klist *list, long hint)
838 {
839 	struct knote *kn;
840 
841 	SLIST_FOREACH(kn, list, kn_selnext)
842 		if (kn->kn_fop->f_event(kn, hint))
843 			KNOTE_ACTIVATE(kn);
844 }
845 
846 /*
847  * remove all knotes from a specified klist
848  */
849 void
850 knote_remove(struct proc *p, struct klist *list)
851 {
852 	struct knote *kn;
853 
854 	while ((kn = SLIST_FIRST(list)) != NULL) {
855 		kn->kn_fop->f_detach(kn);
856 		knote_drop(kn, p);
857 	}
858 }
859 
860 /*
861  * remove all knotes referencing a specified fd
862  */
863 void
864 knote_fdclose(struct proc *p, int fd)
865 {
866 	struct filedesc *fdp = p->p_fd;
867 	struct klist *list = &fdp->fd_knlist[fd];
868 
869 	knote_remove(p, list);
870 }
871 
872 static void
873 knote_attach(struct knote *kn, struct filedesc *fdp)
874 {
875 	struct klist *list;
876 	int size;
877 
878 	if (! kn->kn_fop->f_isfd) {
879 		if (fdp->fd_knhashmask == 0)
880 			fdp->fd_knhash = hashinit(KN_HASHSIZE, M_TEMP,
881 			    &fdp->fd_knhashmask);
882 		list = &fdp->fd_knhash[KN_HASH(kn->kn_id, fdp->fd_knhashmask)];
883 		goto done;
884 	}
885 
886 	if (fdp->fd_knlistsize <= kn->kn_id) {
887 		size = fdp->fd_knlistsize;
888 		while (size <= kn->kn_id)
889 			size += KQEXTENT;
890 		MALLOC(list, struct klist *,
891 		    size * sizeof(struct klist *), M_TEMP, M_WAITOK);
892 		bcopy((caddr_t)fdp->fd_knlist, (caddr_t)list,
893 		    fdp->fd_knlistsize * sizeof(struct klist *));
894 		bzero((caddr_t)list +
895 		    fdp->fd_knlistsize * sizeof(struct klist *),
896 		    (size - fdp->fd_knlistsize) * sizeof(struct klist *));
897 		if (fdp->fd_knlist != NULL)
898 			FREE(fdp->fd_knlist, M_TEMP);
899 		fdp->fd_knlistsize = size;
900 		fdp->fd_knlist = list;
901 	}
902 	list = &fdp->fd_knlist[kn->kn_id];
903 done:
904 	SLIST_INSERT_HEAD(list, kn, kn_link);
905 	kn->kn_status = 0;
906 }
907 
908 /*
909  * should be called at spl == 0, since we don't want to hold spl
910  * while calling fdrop and free.
911  */
912 static void
913 knote_drop(struct knote *kn, struct proc *p)
914 {
915         struct filedesc *fdp = p->p_fd;
916 	struct klist *list;
917 
918 	if (kn->kn_fop->f_isfd)
919 		list = &fdp->fd_knlist[kn->kn_id];
920 	else
921 		list = &fdp->fd_knhash[KN_HASH(kn->kn_id, fdp->fd_knhashmask)];
922 
923 	SLIST_REMOVE(list, kn, knote, kn_link);
924 	if (kn->kn_status & KN_QUEUED)
925 		knote_dequeue(kn);
926 	if (kn->kn_fop->f_isfd)
927 		fdrop(kn->kn_fp, p);
928 	knote_free(kn);
929 }
930 
931 
932 static void
933 knote_enqueue(struct knote *kn)
934 {
935 	struct kqueue *kq = kn->kn_kq;
936 	int s = splhigh();
937 
938 	KASSERT((kn->kn_status & KN_QUEUED) == 0, ("knote already queued"));
939 
940 	TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe);
941 	kn->kn_status |= KN_QUEUED;
942 	kq->kq_count++;
943 	splx(s);
944 	kqueue_wakeup(kq);
945 }
946 
947 static void
948 knote_dequeue(struct knote *kn)
949 {
950 	struct kqueue *kq = kn->kn_kq;
951 	int s = splhigh();
952 
953 	KASSERT(kn->kn_status & KN_QUEUED, ("knote not queued"));
954 
955 	TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe);
956 	kn->kn_status &= ~KN_QUEUED;
957 	kq->kq_count--;
958 	splx(s);
959 }
960 
961 static void
962 knote_init(void)
963 {
964 	knote_zone = zinit("KNOTE", sizeof(struct knote), 0, 0, 1);
965 }
966 SYSINIT(knote, SI_SUB_PSEUDO, SI_ORDER_ANY, knote_init, NULL)
967 
968 static struct knote *
969 knote_alloc(void)
970 {
971 	return ((struct knote *)zalloc(knote_zone));
972 }
973 
974 static void
975 knote_free(struct knote *kn)
976 {
977 	zfree(knote_zone, kn);
978 }
979