xref: /freebsd/sys/kern/kern_event.c (revision c68159a6d8eede11766cf13896d0f7670dbd51aa)
1 /*-
2  * Copyright (c) 1999,2000 Jonathan Lemon <jlemon@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD$
27  */
28 
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/kernel.h>
32 #include <sys/proc.h>
33 #include <sys/malloc.h>
34 #include <sys/unistd.h>
35 #include <sys/file.h>
36 #include <sys/fcntl.h>
37 #include <sys/select.h>
38 #include <sys/queue.h>
39 #include <sys/event.h>
40 #include <sys/eventvar.h>
41 #include <sys/poll.h>
42 #include <sys/protosw.h>
43 #include <sys/socket.h>
44 #include <sys/socketvar.h>
45 #include <sys/stat.h>
46 #include <sys/sysproto.h>
47 #include <sys/uio.h>
48 
49 #include <vm/vm_zone.h>
50 
51 static int 	filt_nullattach(struct knote *kn);
52 static int 	filt_rwtypattach(struct knote *kn);
53 static int	filt_kqattach(struct knote *kn);
54 static void	filt_kqdetach(struct knote *kn);
55 static int	filt_kqueue(struct knote *kn, long hint);
56 static int	filt_procattach(struct knote *kn);
57 static void	filt_procdetach(struct knote *kn);
58 static int	filt_proc(struct knote *kn, long hint);
59 
60 static int	kqueue_scan(struct file *fp, int maxevents,
61 		    struct kevent *ulistp, const struct timespec *timeout,
62 		    struct proc *p);
63 static int 	kqueue_read(struct file *fp, struct uio *uio,
64 		    struct ucred *cred, int flags, struct proc *p);
65 static int	kqueue_write(struct file *fp, struct uio *uio,
66 		    struct ucred *cred, int flags, struct proc *p);
67 static int	kqueue_ioctl(struct file *fp, u_long com, caddr_t data,
68 		    struct proc *p);
69 static int 	kqueue_poll(struct file *fp, int events, struct ucred *cred,
70 		    struct proc *p);
71 static int 	kqueue_stat(struct file *fp, struct stat *st, struct proc *p);
72 static int 	kqueue_close(struct file *fp, struct proc *p);
73 static void 	kqueue_wakeup(struct kqueue *kq);
74 
75 static void 	knote_attach(struct knote *kn, struct filedesc *fdp);
76 static void 	knote_drop(struct knote *kn, struct proc *p);
77 static void 	knote_enqueue(struct knote *kn);
78 static void 	knote_dequeue(struct knote *kn);
79 static void 	knote_init(void);
80 static struct 	knote *knote_alloc(void);
81 static void 	knote_free(struct knote *kn);
82 
83 static vm_zone_t	knote_zone;
84 
85 #define KNOTE_ACTIVATE(kn) do { 					\
86 	kn->kn_status |= KN_ACTIVE;					\
87 	if ((kn->kn_status & (KN_QUEUED | KN_DISABLED)) == 0)		\
88 		knote_enqueue(kn);					\
89 } while(0)
90 
91 #define	KN_HASHSIZE		64		/* XXX should be tunable */
92 #define KN_HASH(val, mask)	(((val) ^ (val >> 8)) & (mask))
93 
94 static struct fileops kqueueops = {
95 	kqueue_read,
96 	kqueue_write,
97 	kqueue_ioctl,
98 	kqueue_poll,
99 	kqueue_stat,
100 	kqueue_close
101 };
102 
103 extern struct filterops so_rwfiltops[];
104 extern struct filterops fifo_rwfiltops[];
105 extern struct filterops pipe_rwfiltops[];
106 extern struct filterops vn_rwfiltops[];
107 
108 static struct filterops kq_rwfiltops[] = {
109     { 1, filt_kqattach, filt_kqdetach, filt_kqueue },
110     { 1, filt_nullattach, NULL, NULL },
111 };
112 
113 extern struct filterops aio_filtops;
114 extern struct filterops sig_filtops;
115 extern struct filterops vn_filtops;
116 
117 static struct filterops rwtype_filtops =
118 	{ 1, filt_rwtypattach, NULL, NULL };
119 static struct filterops proc_filtops =
120 	{ 0, filt_procattach, filt_procdetach, filt_proc };
121 
122 /*
123  * XXX
124  * These must match the order of defines in <sys/file.h>
125  */
126 static struct filterops *rwtypfilt_sw[] = {
127 	NULL,				/* 0 */
128 	vn_rwfiltops,			/* DTYPE_VNODE */
129 	so_rwfiltops,			/* DTYPE_SOCKET */
130 	pipe_rwfiltops,			/* DTYPE_PIPE */
131 	fifo_rwfiltops,			/* DTYPE_FIFO */
132 	kq_rwfiltops,			/* DTYPE_KQUEUE */
133 };
134 
135 /*
136  * table for for all system-defined filters.
137  */
138 static struct filterops *sysfilt_ops[] = {
139 	&rwtype_filtops,		/* EVFILT_READ */
140 	&rwtype_filtops,		/* EVFILT_WRITE */
141 	&aio_filtops,			/* EVFILT_AIO */
142 	&vn_filtops,			/* EVFILT_VNODE */
143 	&proc_filtops,			/* EVFILT_PROC */
144 	&sig_filtops,			/* EVFILT_SIGNAL */
145 };
146 
147 static int
148 filt_nullattach(struct knote *kn)
149 {
150 	return (ENXIO);
151 }
152 
153 /*
154  * file-type specific attach routine for read/write filters
155  */
156 static int
157 filt_rwtypattach(struct knote *kn)
158 {
159 	struct filterops *fops;
160 
161 	fops = rwtypfilt_sw[kn->kn_fp->f_type];
162 	if (fops == NULL)
163 		return (EINVAL);
164 	kn->kn_fop = &fops[~kn->kn_filter];	/* convert to 0-base index */
165 	return (kn->kn_fop->f_attach(kn));
166 }
167 
168 static int
169 filt_kqattach(struct knote *kn)
170 {
171 	struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data;
172 
173 	SLIST_INSERT_HEAD(&kq->kq_sel.si_note, kn, kn_selnext);
174 	return (0);
175 }
176 
177 static void
178 filt_kqdetach(struct knote *kn)
179 {
180 	struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data;
181 
182 	SLIST_REMOVE(&kq->kq_sel.si_note, kn, knote, kn_selnext);
183 }
184 
185 /*ARGSUSED*/
186 static int
187 filt_kqueue(struct knote *kn, long hint)
188 {
189 	struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data;
190 
191 	kn->kn_data = kq->kq_count;
192 	return (kn->kn_data > 0);
193 }
194 
195 static int
196 filt_procattach(struct knote *kn)
197 {
198 	struct proc *p;
199 
200 	p = pfind(kn->kn_id);
201 	if (p == NULL)
202 		return (ESRCH);
203 	if (p_can(curproc, p, P_CAN_SEE, NULL))
204 		return (EACCES);
205 
206 	kn->kn_ptr.p_proc = p;
207 	kn->kn_flags |= EV_CLEAR;		/* automatically set */
208 
209 	/*
210 	 * internal flag indicating registration done by kernel
211 	 */
212 	if (kn->kn_flags & EV_FLAG1) {
213 		kn->kn_data = kn->kn_sdata;		/* ppid */
214 		kn->kn_fflags = NOTE_CHILD;
215 		kn->kn_flags &= ~EV_FLAG1;
216 	}
217 
218 	/* XXX lock the proc here while adding to the list? */
219 	SLIST_INSERT_HEAD(&p->p_klist, kn, kn_selnext);
220 
221 	return (0);
222 }
223 
224 /*
225  * The knote may be attached to a different process, which may exit,
226  * leaving nothing for the knote to be attached to.  So when the process
227  * exits, the knote is marked as DETACHED and also flagged as ONESHOT so
228  * it will be deleted when read out.  However, as part of the knote deletion,
229  * this routine is called, so a check is needed to avoid actually performing
230  * a detach, because the original process does not exist any more.
231  */
232 static void
233 filt_procdetach(struct knote *kn)
234 {
235 	struct proc *p = kn->kn_ptr.p_proc;
236 
237 	if (kn->kn_status & KN_DETACHED)
238 		return;
239 
240 	/* XXX locking?  this might modify another process. */
241 	SLIST_REMOVE(&p->p_klist, kn, knote, kn_selnext);
242 }
243 
244 static int
245 filt_proc(struct knote *kn, long hint)
246 {
247 	u_int event;
248 
249 	/*
250 	 * mask off extra data
251 	 */
252 	event = (u_int)hint & NOTE_PCTRLMASK;
253 
254 	/*
255 	 * if the user is interested in this event, record it.
256 	 */
257 	if (kn->kn_sfflags & event)
258 		kn->kn_fflags |= event;
259 
260 	/*
261 	 * process is gone, so flag the event as finished.
262 	 */
263 	if (event == NOTE_EXIT) {
264 		kn->kn_status |= KN_DETACHED;
265 		kn->kn_flags |= (EV_EOF | EV_ONESHOT);
266 		return (1);
267 	}
268 
269 	/*
270 	 * process forked, and user wants to track the new process,
271 	 * so attach a new knote to it, and immediately report an
272 	 * event with the parent's pid.
273 	 */
274 	if ((event == NOTE_FORK) && (kn->kn_sfflags & NOTE_TRACK)) {
275 		struct kevent kev;
276 		int error;
277 
278 		/*
279 		 * register knote with new process.
280 		 */
281 		kev.ident = hint & NOTE_PDATAMASK;	/* pid */
282 		kev.filter = kn->kn_filter;
283 		kev.flags = kn->kn_flags | EV_ADD | EV_ENABLE | EV_FLAG1;
284 		kev.fflags = kn->kn_sfflags;
285 		kev.data = kn->kn_id;			/* parent */
286 		kev.udata = kn->kn_kevent.udata;	/* preserve udata */
287 		error = kqueue_register(kn->kn_kq, &kev, NULL);
288 		if (error)
289 			kn->kn_fflags |= NOTE_TRACKERR;
290 	}
291 
292 	return (kn->kn_fflags != 0);
293 }
294 
295 int
296 kqueue(struct proc *p, struct kqueue_args *uap)
297 {
298 	struct filedesc *fdp = p->p_fd;
299 	struct kqueue *kq;
300 	struct file *fp;
301 	int fd, error;
302 
303 	error = falloc(p, &fp, &fd);
304 	if (error)
305 		return (error);
306 	fp->f_flag = FREAD | FWRITE;
307 	fp->f_type = DTYPE_KQUEUE;
308 	fp->f_ops = &kqueueops;
309 	kq = malloc(sizeof(struct kqueue), M_TEMP, M_WAITOK | M_ZERO);
310 	TAILQ_INIT(&kq->kq_head);
311 	fp->f_data = (caddr_t)kq;
312 	p->p_retval[0] = fd;
313 	if (fdp->fd_knlistsize < 0)
314 		fdp->fd_knlistsize = 0;		/* this process has a kq */
315 	kq->kq_fdp = fdp;
316 	return (error);
317 }
318 
319 #ifndef _SYS_SYSPROTO_H_
320 struct kevent_args {
321 	int	fd;
322 	const struct kevent *changelist;
323 	int	nchanges;
324 	struct	kevent *eventlist;
325 	int	nevents;
326 	const struct timespec *timeout;
327 };
328 #endif
329 int
330 kevent(struct proc *p, struct kevent_args *uap)
331 {
332 	struct filedesc* fdp = p->p_fd;
333 	struct kevent *kevp;
334 	struct kqueue *kq;
335 	struct file *fp = NULL;
336 	struct timespec ts;
337 	int i, n, nerrors, error;
338 
339         if (((u_int)uap->fd) >= fdp->fd_nfiles ||
340             (fp = fdp->fd_ofiles[uap->fd]) == NULL ||
341 	    (fp->f_type != DTYPE_KQUEUE))
342 		return (EBADF);
343 
344 	fhold(fp);
345 
346 	if (uap->timeout != NULL) {
347 		error = copyin(uap->timeout, &ts, sizeof(ts));
348 		if (error)
349 			goto done;
350 		uap->timeout = &ts;
351 	}
352 
353 	kq = (struct kqueue *)fp->f_data;
354 	nerrors = 0;
355 
356 	while (uap->nchanges > 0) {
357 		n = uap->nchanges > KQ_NEVENTS ? KQ_NEVENTS : uap->nchanges;
358 		error = copyin(uap->changelist, kq->kq_kev,
359 		    n * sizeof(struct kevent));
360 		if (error)
361 			goto done;
362 		for (i = 0; i < n; i++) {
363 			kevp = &kq->kq_kev[i];
364 			kevp->flags &= ~EV_SYSFLAGS;
365 			error = kqueue_register(kq, kevp, p);
366 			if (error) {
367 				if (uap->nevents != 0) {
368 					kevp->flags = EV_ERROR;
369 					kevp->data = error;
370 					(void) copyout((caddr_t)kevp,
371 					    (caddr_t)uap->eventlist,
372 					    sizeof(*kevp));
373 					uap->eventlist++;
374 					uap->nevents--;
375 					nerrors++;
376 				} else {
377 					goto done;
378 				}
379 			}
380 		}
381 		uap->nchanges -= n;
382 		uap->changelist += n;
383 	}
384 	if (nerrors) {
385         	p->p_retval[0] = nerrors;
386 		error = 0;
387 		goto done;
388 	}
389 
390 	error = kqueue_scan(fp, uap->nevents, uap->eventlist, uap->timeout, p);
391 done:
392 	if (fp != NULL)
393 		fdrop(fp, p);
394 	return (error);
395 }
396 
397 int
398 kqueue_register(struct kqueue *kq, struct kevent *kev, struct proc *p)
399 {
400 	struct filedesc *fdp = kq->kq_fdp;
401 	struct filterops *fops;
402 	struct file *fp = NULL;
403 	struct knote *kn = NULL;
404 	int s, error = 0;
405 
406 	if (kev->filter < 0) {
407 		if (kev->filter + EVFILT_SYSCOUNT < 0)
408 			return (EINVAL);
409 		fops = sysfilt_ops[~kev->filter];	/* to 0-base index */
410 	} else {
411 		/*
412 		 * XXX
413 		 * filter attach routine is responsible for insuring that
414 		 * the identifier can be attached to it.
415 		 */
416 		printf("unknown filter: %d\n", kev->filter);
417 		return (EINVAL);
418 	}
419 
420 	if (fops->f_isfd) {
421 		/* validate descriptor */
422 		if ((u_int)kev->ident >= fdp->fd_nfiles ||
423 		    (fp = fdp->fd_ofiles[kev->ident]) == NULL)
424 			return (EBADF);
425 		fhold(fp);
426 
427 		if (kev->ident < fdp->fd_knlistsize) {
428 			SLIST_FOREACH(kn, &fdp->fd_knlist[kev->ident], kn_link)
429 				if (kq == kn->kn_kq &&
430 				    kev->filter == kn->kn_filter)
431 					break;
432 		}
433 	} else {
434 		if (fdp->fd_knhashmask != 0) {
435 			struct klist *list;
436 
437 			list = &fdp->fd_knhash[
438 			    KN_HASH((u_long)kev->ident, fdp->fd_knhashmask)];
439 			SLIST_FOREACH(kn, list, kn_link)
440 				if (kev->ident == kn->kn_id &&
441 				    kq == kn->kn_kq &&
442 				    kev->filter == kn->kn_filter)
443 					break;
444 		}
445 	}
446 
447 	if (kn == NULL && ((kev->flags & EV_ADD) == 0)) {
448 		error = ENOENT;
449 		goto done;
450 	}
451 
452 	/*
453 	 * kn now contains the matching knote, or NULL if no match
454 	 */
455 	if (kev->flags & EV_ADD) {
456 
457 		if (kn == NULL) {
458 			kn = knote_alloc();
459 			if (kn == NULL) {
460 				error = ENOMEM;
461 				goto done;
462 			}
463 			kn->kn_fp = fp;
464 			kn->kn_kq = kq;
465 			kn->kn_fop = fops;
466 
467 			/*
468 			 * apply reference count to knode structure, so
469 			 * do not release it at the end of this routine.
470 			 */
471 			fp = NULL;
472 
473 			kn->kn_sfflags = kev->fflags;
474 			kn->kn_sdata = kev->data;
475 			kev->fflags = 0;
476 			kev->data = 0;
477 			kn->kn_kevent = *kev;
478 
479 			knote_attach(kn, fdp);
480 			if ((error = fops->f_attach(kn)) != 0) {
481 				knote_drop(kn, p);
482 				goto done;
483 			}
484 		} else {
485 			/*
486 			 * The user may change some filter values after the
487 			 * initial EV_ADD, but doing so will not reset any
488 			 * filter which have already been triggered.
489 			 */
490 			kn->kn_sfflags = kev->fflags;
491 			kn->kn_sdata = kev->data;
492 			kn->kn_kevent.udata = kev->udata;
493 		}
494 
495 		s = splhigh();
496 		if (kn->kn_fop->f_event(kn, 0))
497 			KNOTE_ACTIVATE(kn);
498 		splx(s);
499 
500 	} else if (kev->flags & EV_DELETE) {
501 		kn->kn_fop->f_detach(kn);
502 		knote_drop(kn, p);
503 		goto done;
504 	}
505 
506 	if ((kev->flags & EV_DISABLE) &&
507 	    ((kn->kn_status & KN_DISABLED) == 0)) {
508 		s = splhigh();
509 		kn->kn_status |= KN_DISABLED;
510 		splx(s);
511 	}
512 
513 	if ((kev->flags & EV_ENABLE) && (kn->kn_status & KN_DISABLED)) {
514 		s = splhigh();
515 		kn->kn_status &= ~KN_DISABLED;
516 		if ((kn->kn_status & KN_ACTIVE) &&
517 		    ((kn->kn_status & KN_QUEUED) == 0))
518 			knote_enqueue(kn);
519 		splx(s);
520 	}
521 
522 done:
523 	if (fp != NULL)
524 		fdrop(fp, p);
525 	return (error);
526 }
527 
528 static int
529 kqueue_scan(struct file *fp, int maxevents, struct kevent *ulistp,
530 	const struct timespec *tsp, struct proc *p)
531 {
532 	struct kqueue *kq = (struct kqueue *)fp->f_data;
533 	struct kevent *kevp;
534 	struct timeval atv, rtv, ttv;
535 	struct knote *kn, marker;
536 	int s, count, timeout, nkev = 0, error = 0;
537 
538 	count = maxevents;
539 	if (count == 0)
540 		goto done;
541 
542 	if (tsp != NULL) {
543 		TIMESPEC_TO_TIMEVAL(&atv, tsp);
544 		if (itimerfix(&atv)) {
545 			error = EINVAL;
546 			goto done;
547 		}
548 		if (tsp->tv_sec == 0 && tsp->tv_nsec == 0)
549 			timeout = -1;
550 		else
551 			timeout = atv.tv_sec > 24 * 60 * 60 ?
552 			    24 * 60 * 60 * hz : tvtohz(&atv);
553 		getmicrouptime(&rtv);
554 		timevaladd(&atv, &rtv);
555 	} else {
556 		atv.tv_sec = 0;
557 		atv.tv_usec = 0;
558 		timeout = 0;
559 	}
560 	goto start;
561 
562 retry:
563 	if (atv.tv_sec || atv.tv_usec) {
564 		getmicrouptime(&rtv);
565 		if (timevalcmp(&rtv, &atv, >=))
566 			goto done;
567 		ttv = atv;
568 		timevalsub(&ttv, &rtv);
569 		timeout = ttv.tv_sec > 24 * 60 * 60 ?
570 			24 * 60 * 60 * hz : tvtohz(&ttv);
571 	}
572 
573 start:
574 	kevp = kq->kq_kev;
575 	s = splhigh();
576 	if (kq->kq_count == 0) {
577 		if (timeout < 0) {
578 			error = EWOULDBLOCK;
579 		} else {
580 			kq->kq_state |= KQ_SLEEP;
581 			error = tsleep(kq, PSOCK | PCATCH, "kqread", timeout);
582 		}
583 		splx(s);
584 		if (error == 0)
585 			goto retry;
586 		/* don't restart after signals... */
587 		if (error == ERESTART)
588 			error = EINTR;
589 		else if (error == EWOULDBLOCK)
590 			error = 0;
591 		goto done;
592 	}
593 
594 	TAILQ_INSERT_TAIL(&kq->kq_head, &marker, kn_tqe);
595 	while (count) {
596 		kn = TAILQ_FIRST(&kq->kq_head);
597 		TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe);
598 		if (kn == &marker) {
599 			splx(s);
600 			if (count == maxevents)
601 				goto retry;
602 			goto done;
603 		}
604 		if (kn->kn_status & KN_DISABLED) {
605 			kn->kn_status &= ~KN_QUEUED;
606 			kq->kq_count--;
607 			continue;
608 		}
609 		if ((kn->kn_flags & EV_ONESHOT) == 0 &&
610 		    kn->kn_fop->f_event(kn, 0) == 0) {
611 			kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE);
612 			kq->kq_count--;
613 			continue;
614 		}
615 		*kevp = kn->kn_kevent;
616 		kevp++;
617 		nkev++;
618 		if (kn->kn_flags & EV_ONESHOT) {
619 			kn->kn_status &= ~KN_QUEUED;
620 			kq->kq_count--;
621 			splx(s);
622 			kn->kn_fop->f_detach(kn);
623 			knote_drop(kn, p);
624 			s = splhigh();
625 		} else if (kn->kn_flags & EV_CLEAR) {
626 			kn->kn_data = 0;
627 			kn->kn_fflags = 0;
628 			kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE);
629 			kq->kq_count--;
630 		} else {
631 			TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe);
632 		}
633 		count--;
634 		if (nkev == KQ_NEVENTS) {
635 			splx(s);
636 			error = copyout((caddr_t)&kq->kq_kev, (caddr_t)ulistp,
637 			    sizeof(struct kevent) * nkev);
638 			ulistp += nkev;
639 			nkev = 0;
640 			kevp = kq->kq_kev;
641 			s = splhigh();
642 			if (error)
643 				break;
644 		}
645 	}
646 	TAILQ_REMOVE(&kq->kq_head, &marker, kn_tqe);
647 	splx(s);
648 done:
649 	if (nkev != 0)
650 		error = copyout((caddr_t)&kq->kq_kev, (caddr_t)ulistp,
651 		    sizeof(struct kevent) * nkev);
652         p->p_retval[0] = maxevents - count;
653 	return (error);
654 }
655 
656 /*
657  * XXX
658  * This could be expanded to call kqueue_scan, if desired.
659  */
660 /*ARGSUSED*/
661 static int
662 kqueue_read(struct file *fp, struct uio *uio, struct ucred *cred,
663 	int flags, struct proc *p)
664 {
665 	return (ENXIO);
666 }
667 
668 /*ARGSUSED*/
669 static int
670 kqueue_write(struct file *fp, struct uio *uio, struct ucred *cred,
671 	 int flags, struct proc *p)
672 {
673 	return (ENXIO);
674 }
675 
676 /*ARGSUSED*/
677 static int
678 kqueue_ioctl(struct file *fp, u_long com, caddr_t data, struct proc *p)
679 {
680 	return (ENOTTY);
681 }
682 
683 /*ARGSUSED*/
684 static int
685 kqueue_poll(struct file *fp, int events, struct ucred *cred, struct proc *p)
686 {
687 	struct kqueue *kq = (struct kqueue *)fp->f_data;
688 	int revents = 0;
689 	int s = splnet();
690 
691         if (events & (POLLIN | POLLRDNORM)) {
692                 if (kq->kq_count) {
693                         revents |= events & (POLLIN | POLLRDNORM);
694 		} else {
695                         selrecord(p, &kq->kq_sel);
696 			kq->kq_state |= KQ_SEL;
697 		}
698 	}
699 	splx(s);
700 	return (revents);
701 }
702 
703 /*ARGSUSED*/
704 static int
705 kqueue_stat(struct file *fp, struct stat *st, struct proc *p)
706 {
707 	struct kqueue *kq = (struct kqueue *)fp->f_data;
708 
709 	bzero((void *)st, sizeof(*st));
710 	st->st_size = kq->kq_count;
711 	st->st_blksize = sizeof(struct kevent);
712 	st->st_mode = S_IFIFO;
713 	return (0);
714 }
715 
716 /*ARGSUSED*/
717 static int
718 kqueue_close(struct file *fp, struct proc *p)
719 {
720 	struct kqueue *kq = (struct kqueue *)fp->f_data;
721 	struct filedesc *fdp = p->p_fd;
722 	struct knote **knp, *kn, *kn0;
723 	int i;
724 
725 	for (i = 0; i < fdp->fd_knlistsize; i++) {
726 		knp = &SLIST_FIRST(&fdp->fd_knlist[i]);
727 		kn = *knp;
728 		while (kn != NULL) {
729 			kn0 = SLIST_NEXT(kn, kn_link);
730 			if (kq == kn->kn_kq) {
731 				kn->kn_fop->f_detach(kn);
732 				fdrop(kn->kn_fp, p);
733 				knote_free(kn);
734 				*knp = kn0;
735 			} else {
736 				knp = &SLIST_NEXT(kn, kn_link);
737 			}
738 			kn = kn0;
739 		}
740 	}
741 	if (fdp->fd_knhashmask != 0) {
742 		for (i = 0; i < fdp->fd_knhashmask + 1; i++) {
743 			knp = &SLIST_FIRST(&fdp->fd_knhash[i]);
744 			kn = *knp;
745 			while (kn != NULL) {
746 				kn0 = SLIST_NEXT(kn, kn_link);
747 				if (kq == kn->kn_kq) {
748 					kn->kn_fop->f_detach(kn);
749 		/* XXX non-fd release of kn->kn_ptr */
750 					knote_free(kn);
751 					*knp = kn0;
752 				} else {
753 					knp = &SLIST_NEXT(kn, kn_link);
754 				}
755 				kn = kn0;
756 			}
757 		}
758 	}
759 	free(kq, M_TEMP);
760 	fp->f_data = NULL;
761 
762 	return (0);
763 }
764 
765 static void
766 kqueue_wakeup(struct kqueue *kq)
767 {
768 
769 	if (kq->kq_state & KQ_SLEEP) {
770 		kq->kq_state &= ~KQ_SLEEP;
771 		wakeup(kq);
772 	}
773 	if (kq->kq_state & KQ_SEL) {
774 		kq->kq_state &= ~KQ_SEL;
775 		selwakeup(&kq->kq_sel);
776 	}
777 	KNOTE(&kq->kq_sel.si_note, 0);
778 }
779 
780 /*
781  * walk down a list of knotes, activating them if their event has triggered.
782  */
783 void
784 knote(struct klist *list, long hint)
785 {
786 	struct knote *kn;
787 
788 	SLIST_FOREACH(kn, list, kn_selnext)
789 		if (kn->kn_fop->f_event(kn, hint))
790 			KNOTE_ACTIVATE(kn);
791 }
792 
793 /*
794  * remove all knotes from a specified klist
795  */
796 void
797 knote_remove(struct proc *p, struct klist *list)
798 {
799 	struct knote *kn;
800 
801 	while ((kn = SLIST_FIRST(list)) != NULL) {
802 		kn->kn_fop->f_detach(kn);
803 		knote_drop(kn, p);
804 	}
805 }
806 
807 /*
808  * remove all knotes referencing a specified fd
809  */
810 void
811 knote_fdclose(struct proc *p, int fd)
812 {
813 	struct filedesc *fdp = p->p_fd;
814 	struct klist *list = &fdp->fd_knlist[fd];
815 
816 	knote_remove(p, list);
817 }
818 
819 static void
820 knote_attach(struct knote *kn, struct filedesc *fdp)
821 {
822 	struct klist *list;
823 	int size;
824 
825 	if (! kn->kn_fop->f_isfd) {
826 		if (fdp->fd_knhashmask == 0)
827 			fdp->fd_knhash = hashinit(KN_HASHSIZE, M_TEMP,
828 			    &fdp->fd_knhashmask);
829 		list = &fdp->fd_knhash[KN_HASH(kn->kn_id, fdp->fd_knhashmask)];
830 		goto done;
831 	}
832 
833 	if (fdp->fd_knlistsize <= kn->kn_id) {
834 		size = fdp->fd_knlistsize;
835 		while (size <= kn->kn_id)
836 			size += KQEXTENT;
837 		MALLOC(list, struct klist *,
838 		    size * sizeof(struct klist *), M_TEMP, M_WAITOK);
839 		bcopy((caddr_t)fdp->fd_knlist, (caddr_t)list,
840 		    fdp->fd_knlistsize * sizeof(struct klist *));
841 		bzero((caddr_t)list +
842 		    fdp->fd_knlistsize * sizeof(struct klist *),
843 		    (size - fdp->fd_knlistsize) * sizeof(struct klist *));
844 		if (fdp->fd_knlist != NULL)
845 			FREE(fdp->fd_knlist, M_TEMP);
846 		fdp->fd_knlistsize = size;
847 		fdp->fd_knlist = list;
848 	}
849 	list = &fdp->fd_knlist[kn->kn_id];
850 done:
851 	SLIST_INSERT_HEAD(list, kn, kn_link);
852 	kn->kn_status = 0;
853 }
854 
855 /*
856  * should be called at spl == 0, since we don't want to hold spl
857  * while calling fdrop and free.
858  */
859 static void
860 knote_drop(struct knote *kn, struct proc *p)
861 {
862         struct filedesc *fdp = p->p_fd;
863 	struct klist *list;
864 
865 	if (kn->kn_fop->f_isfd)
866 		list = &fdp->fd_knlist[kn->kn_id];
867 	else
868 		list = &fdp->fd_knhash[KN_HASH(kn->kn_id, fdp->fd_knhashmask)];
869 
870 	SLIST_REMOVE(list, kn, knote, kn_link);
871 	if (kn->kn_status & KN_QUEUED)
872 		knote_dequeue(kn);
873 	if (kn->kn_fop->f_isfd)
874 		fdrop(kn->kn_fp, p);
875 	knote_free(kn);
876 }
877 
878 
879 static void
880 knote_enqueue(struct knote *kn)
881 {
882 	struct kqueue *kq = kn->kn_kq;
883 	int s = splhigh();
884 
885 	KASSERT((kn->kn_status & KN_QUEUED) == 0, ("knote already queued"));
886 
887 	TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe);
888 	kn->kn_status |= KN_QUEUED;
889 	kq->kq_count++;
890 	splx(s);
891 	kqueue_wakeup(kq);
892 }
893 
894 static void
895 knote_dequeue(struct knote *kn)
896 {
897 	struct kqueue *kq = kn->kn_kq;
898 	int s = splhigh();
899 
900 	KASSERT(kn->kn_status & KN_QUEUED, ("knote not queued"));
901 
902 	TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe);
903 	kn->kn_status &= ~KN_QUEUED;
904 	kq->kq_count--;
905 	splx(s);
906 }
907 
908 static void
909 knote_init(void)
910 {
911 	knote_zone = zinit("KNOTE", sizeof(struct knote), 0, 0, 1);
912 }
913 SYSINIT(knote, SI_SUB_PSEUDO, SI_ORDER_ANY, knote_init, NULL)
914 
915 static struct knote *
916 knote_alloc(void)
917 {
918 	return ((struct knote *)zalloc(knote_zone));
919 }
920 
921 static void
922 knote_free(struct knote *kn)
923 {
924 	zfree(knote_zone, kn);
925 }
926