xref: /freebsd/sys/kern/kern_event.c (revision c37420b0d5b3b6ef875fbf0b84a13f6f09be56d6)
1 /*-
2  * Copyright (c) 1999,2000,2001 Jonathan Lemon <jlemon@FreeBSD.org>
3  * Copyright 2004 John-Mark Gurney <jmg@FreeBSD.org>
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30 
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/kernel.h>
34 #include <sys/lock.h>
35 #include <sys/mutex.h>
36 #include <sys/proc.h>
37 #include <sys/malloc.h>
38 #include <sys/unistd.h>
39 #include <sys/file.h>
40 #include <sys/filedesc.h>
41 #include <sys/filio.h>
42 #include <sys/fcntl.h>
43 #include <sys/kthread.h>
44 #include <sys/selinfo.h>
45 #include <sys/queue.h>
46 #include <sys/event.h>
47 #include <sys/eventvar.h>
48 #include <sys/poll.h>
49 #include <sys/protosw.h>
50 #include <sys/sigio.h>
51 #include <sys/signalvar.h>
52 #include <sys/socket.h>
53 #include <sys/socketvar.h>
54 #include <sys/stat.h>
55 #include <sys/sysctl.h>
56 #include <sys/sysproto.h>
57 #include <sys/taskqueue.h>
58 #include <sys/uio.h>
59 
60 #include <vm/uma.h>
61 
62 MALLOC_DEFINE(M_KQUEUE, "kqueue", "memory for kqueue system");
63 /*
64  * This lock is used if multiple kq locks are required.  This possibly
65  * should be made into a per proc lock.
66  */
67 static struct mtx	kq_global;
68 MTX_SYSINIT(kq_global, &kq_global, "kqueue order", MTX_DEF);
69 #define KQ_GLOBAL_LOCK(lck, haslck)	do {	\
70 	if (!haslck)				\
71 		mtx_lock(lck);			\
72 	haslck = 1;				\
73 } while (0)
74 #define KQ_GLOBAL_UNLOCK(lck, haslck)	do {	\
75 	if (haslck)				\
76 		mtx_unlock(lck);			\
77 	haslck = 0;				\
78 } while (0)
79 
80 TASKQUEUE_DEFINE_THREAD(kqueue);
81 
82 static int	kqueue_aquire(struct file *fp, struct kqueue **kqp);
83 static void	kqueue_release(struct kqueue *kq, int locked);
84 static int	kqueue_expand(struct kqueue *kq, struct filterops *fops,
85 		    uintptr_t ident, int waitok);
86 static void	kqueue_task(void *arg, int pending);
87 static int	kqueue_scan(struct kqueue *kq, int maxevents,
88 		    struct kevent *ulistp, const struct timespec *timeout,
89 		    struct kevent *keva, struct thread *td);
90 static void 	kqueue_wakeup(struct kqueue *kq);
91 static struct filterops *kqueue_fo_find(int filt);
92 static void	kqueue_fo_release(int filt);
93 
94 static fo_rdwr_t	kqueue_read;
95 static fo_rdwr_t	kqueue_write;
96 static fo_ioctl_t	kqueue_ioctl;
97 static fo_poll_t	kqueue_poll;
98 static fo_kqfilter_t	kqueue_kqfilter;
99 static fo_stat_t	kqueue_stat;
100 static fo_close_t	kqueue_close;
101 
102 static struct fileops kqueueops = {
103 	.fo_read = kqueue_read,
104 	.fo_write = kqueue_write,
105 	.fo_ioctl = kqueue_ioctl,
106 	.fo_poll = kqueue_poll,
107 	.fo_kqfilter = kqueue_kqfilter,
108 	.fo_stat = kqueue_stat,
109 	.fo_close = kqueue_close,
110 };
111 
112 static int 	knote_attach(struct knote *kn, struct kqueue *kq);
113 static void 	knote_drop(struct knote *kn, struct thread *td);
114 static void 	knote_enqueue(struct knote *kn);
115 static void 	knote_dequeue(struct knote *kn);
116 static void 	knote_init(void);
117 static struct 	knote *knote_alloc(int waitok);
118 static void 	knote_free(struct knote *kn);
119 
120 static void	filt_kqdetach(struct knote *kn);
121 static int	filt_kqueue(struct knote *kn, long hint);
122 static int	filt_procattach(struct knote *kn);
123 static void	filt_procdetach(struct knote *kn);
124 static int	filt_proc(struct knote *kn, long hint);
125 static int	filt_fileattach(struct knote *kn);
126 static void	filt_timerexpire(void *knx);
127 static int	filt_timerattach(struct knote *kn);
128 static void	filt_timerdetach(struct knote *kn);
129 static int	filt_timer(struct knote *kn, long hint);
130 
131 static struct filterops file_filtops =
132 	{ 1, filt_fileattach, NULL, NULL };
133 static struct filterops kqread_filtops =
134 	{ 1, NULL, filt_kqdetach, filt_kqueue };
135 /* XXX - move to kern_proc.c?  */
136 static struct filterops proc_filtops =
137 	{ 0, filt_procattach, filt_procdetach, filt_proc };
138 static struct filterops timer_filtops =
139 	{ 0, filt_timerattach, filt_timerdetach, filt_timer };
140 
141 static uma_zone_t	knote_zone;
142 static int 		kq_ncallouts = 0;
143 static int 		kq_calloutmax = (4 * 1024);
144 SYSCTL_INT(_kern, OID_AUTO, kq_calloutmax, CTLFLAG_RW,
145     &kq_calloutmax, 0, "Maximum number of callouts allocated for kqueue");
146 
147 /* XXX - ensure not KN_INFLUX?? */
148 #define KNOTE_ACTIVATE(kn, islock) do { 				\
149 	if ((islock))							\
150 		mtx_assert(&(kn)->kn_kq->kq_lock, MA_OWNED);		\
151 	else								\
152 		KQ_LOCK((kn)->kn_kq);					\
153 	(kn)->kn_status |= KN_ACTIVE;					\
154 	if (((kn)->kn_status & (KN_QUEUED | KN_DISABLED)) == 0)		\
155 		knote_enqueue((kn));					\
156 	if (!(islock))							\
157 		KQ_UNLOCK((kn)->kn_kq);					\
158 } while(0)
159 #define KQ_LOCK(kq) do {						\
160 	mtx_lock(&(kq)->kq_lock);					\
161 } while (0)
162 #define KQ_FLUX_WAKEUP(kq) do {						\
163 	if (((kq)->kq_state & KQ_FLUXWAIT) == KQ_FLUXWAIT) {		\
164 		(kq)->kq_state &= ~KQ_FLUXWAIT;				\
165 		wakeup((kq));						\
166 	}								\
167 } while (0)
168 #define KQ_UNLOCK_FLUX(kq) do {						\
169 	KQ_FLUX_WAKEUP(kq);						\
170 	mtx_unlock(&(kq)->kq_lock);					\
171 } while (0)
172 #define KQ_UNLOCK(kq) do {						\
173 	mtx_unlock(&(kq)->kq_lock);					\
174 } while (0)
175 #define KQ_OWNED(kq) do {						\
176 	mtx_assert(&(kq)->kq_lock, MA_OWNED);				\
177 } while (0)
178 #define KQ_NOTOWNED(kq) do {						\
179 	mtx_assert(&(kq)->kq_lock, MA_NOTOWNED);			\
180 } while (0)
181 #define KN_LIST_LOCK(kn) do {						\
182 	if (kn->kn_knlist != NULL)					\
183 		mtx_lock(kn->kn_knlist->kl_lock);			\
184 } while (0)
185 #define KN_LIST_UNLOCK(kn) do {						\
186 	if (kn->kn_knlist != NULL)					\
187 		mtx_unlock(kn->kn_knlist->kl_lock);			\
188 } while (0)
189 
190 #define	KN_HASHSIZE		64		/* XXX should be tunable */
191 #define KN_HASH(val, mask)	(((val) ^ (val >> 8)) & (mask))
192 
193 static int
194 filt_nullattach(struct knote *kn)
195 {
196 
197 	return (ENXIO);
198 };
199 
200 struct filterops null_filtops =
201 	{ 0, filt_nullattach, NULL, NULL };
202 
203 /* XXX - make SYSINIT to add these, and move into respective modules. */
204 extern struct filterops sig_filtops;
205 extern struct filterops fs_filtops;
206 
207 /*
208  * Table for for all system-defined filters.
209  */
210 static struct mtx	filterops_lock;
211 MTX_SYSINIT(kqueue_filterops, &filterops_lock, "protect sysfilt_ops",
212 	MTX_DEF);
213 static struct {
214 	struct filterops *for_fop;
215 	int for_refcnt;
216 } sysfilt_ops[EVFILT_SYSCOUNT] = {
217 	{ &file_filtops },			/* EVFILT_READ */
218 	{ &file_filtops },			/* EVFILT_WRITE */
219 	{ &null_filtops },			/* EVFILT_AIO */
220 	{ &file_filtops },			/* EVFILT_VNODE */
221 	{ &proc_filtops },			/* EVFILT_PROC */
222 	{ &sig_filtops },			/* EVFILT_SIGNAL */
223 	{ &timer_filtops },			/* EVFILT_TIMER */
224 	{ &file_filtops },			/* EVFILT_NETDEV */
225 	{ &fs_filtops },			/* EVFILT_FS */
226 };
227 
228 /*
229  * Simple redirection for all cdevsw style objects to call their fo_kqfilter
230  * method.
231  */
232 static int
233 filt_fileattach(struct knote *kn)
234 {
235 
236 	return (fo_kqfilter(kn->kn_fp, kn));
237 }
238 
239 /*ARGSUSED*/
240 static int
241 kqueue_kqfilter(struct file *fp, struct knote *kn)
242 {
243 	struct kqueue *kq = kn->kn_fp->f_data;
244 
245 	if (kn->kn_filter != EVFILT_READ)
246 		return (EINVAL);
247 
248 	kn->kn_status |= KN_KQUEUE;
249 	kn->kn_fop = &kqread_filtops;
250 	knlist_add(&kq->kq_sel.si_note, kn, 0);
251 
252 	return (0);
253 }
254 
255 static void
256 filt_kqdetach(struct knote *kn)
257 {
258 	struct kqueue *kq = kn->kn_fp->f_data;
259 
260 	knlist_remove(&kq->kq_sel.si_note, kn, 0);
261 }
262 
263 /*ARGSUSED*/
264 static int
265 filt_kqueue(struct knote *kn, long hint)
266 {
267 	struct kqueue *kq = kn->kn_fp->f_data;
268 
269 	kn->kn_data = kq->kq_count;
270 	return (kn->kn_data > 0);
271 }
272 
273 /* XXX - move to kern_proc.c?  */
274 static int
275 filt_procattach(struct knote *kn)
276 {
277 	struct proc *p;
278 	int immediate;
279 	int error;
280 
281 	immediate = 0;
282 	p = pfind(kn->kn_id);
283 	if (p == NULL && (kn->kn_sfflags & NOTE_EXIT)) {
284 		p = zpfind(kn->kn_id);
285 		immediate = 1;
286 	} else if (p != NULL && (p->p_flag & P_WEXIT)) {
287 		immediate = 1;
288 	}
289 
290 	if (p == NULL)
291 		return (ESRCH);
292 	if ((error = p_cansee(curthread, p)))
293 		return (error);
294 
295 	kn->kn_ptr.p_proc = p;
296 	kn->kn_flags |= EV_CLEAR;		/* automatically set */
297 
298 	/*
299 	 * internal flag indicating registration done by kernel
300 	 */
301 	if (kn->kn_flags & EV_FLAG1) {
302 		kn->kn_data = kn->kn_sdata;		/* ppid */
303 		kn->kn_fflags = NOTE_CHILD;
304 		kn->kn_flags &= ~EV_FLAG1;
305 	}
306 
307 	if (immediate == 0)
308 		knlist_add(&p->p_klist, kn, 1);
309 
310 	/*
311 	 * Immediately activate any exit notes if the target process is a
312 	 * zombie.  This is necessary to handle the case where the target
313 	 * process, e.g. a child, dies before the kevent is registered.
314 	 */
315 	if (immediate && filt_proc(kn, NOTE_EXIT))
316 		KNOTE_ACTIVATE(kn, 0);
317 
318 	PROC_UNLOCK(p);
319 
320 	return (0);
321 }
322 
323 /*
324  * The knote may be attached to a different process, which may exit,
325  * leaving nothing for the knote to be attached to.  So when the process
326  * exits, the knote is marked as DETACHED and also flagged as ONESHOT so
327  * it will be deleted when read out.  However, as part of the knote deletion,
328  * this routine is called, so a check is needed to avoid actually performing
329  * a detach, because the original process does not exist any more.
330  */
331 /* XXX - move to kern_proc.c?  */
332 static void
333 filt_procdetach(struct knote *kn)
334 {
335 	struct proc *p;
336 
337 	if (kn->kn_status & KN_DETACHED)
338 		return;
339 
340 	p = kn->kn_ptr.p_proc;
341 	knlist_remove(&p->p_klist, kn, 0);
342 	kn->kn_ptr.p_proc = NULL;
343 }
344 
345 /* XXX - move to kern_proc.c?  */
346 static int
347 filt_proc(struct knote *kn, long hint)
348 {
349 	struct proc *p = kn->kn_ptr.p_proc;
350 	u_int event;
351 
352 	/*
353 	 * mask off extra data
354 	 */
355 	event = (u_int)hint & NOTE_PCTRLMASK;
356 
357 	/*
358 	 * if the user is interested in this event, record it.
359 	 */
360 	if (kn->kn_sfflags & event)
361 		kn->kn_fflags |= event;
362 
363 	/*
364 	 * process is gone, so flag the event as finished.
365 	 */
366 	if (event == NOTE_EXIT) {
367 		if (!(kn->kn_status & KN_DETACHED))
368 			knlist_remove_inevent(&p->p_klist, kn);
369 		kn->kn_flags |= (EV_EOF | EV_ONESHOT);
370 		kn->kn_ptr.p_proc = NULL;
371 		return (1);
372 	}
373 
374 	/*
375 	 * process forked, and user wants to track the new process,
376 	 * so attach a new knote to it, and immediately report an
377 	 * event with the parent's pid.
378 	 */
379 	if ((event == NOTE_FORK) && (kn->kn_sfflags & NOTE_TRACK)) {
380 		struct kevent kev;
381 		int error;
382 
383 		/*
384 		 * register knote with new process.
385 		 */
386 		kev.ident = hint & NOTE_PDATAMASK;	/* pid */
387 		kev.filter = kn->kn_filter;
388 		kev.flags = kn->kn_flags | EV_ADD | EV_ENABLE | EV_FLAG1;
389 		kev.fflags = kn->kn_sfflags;
390 		kev.data = kn->kn_id;			/* parent */
391 		kev.udata = kn->kn_kevent.udata;	/* preserve udata */
392 		error = kqueue_register(kn->kn_kq, &kev, NULL, 0);
393 		if (error)
394 			kn->kn_fflags |= NOTE_TRACKERR;
395 	}
396 
397 	return (kn->kn_fflags != 0);
398 }
399 
400 static int
401 timertoticks(intptr_t data)
402 {
403 	struct timeval tv;
404 	int tticks;
405 
406 	tv.tv_sec = data / 1000;
407 	tv.tv_usec = (data % 1000) * 1000;
408 	tticks = tvtohz(&tv);
409 
410 	return tticks;
411 }
412 
413 /* XXX - move to kern_timeout.c? */
414 static void
415 filt_timerexpire(void *knx)
416 {
417 	struct knote *kn = knx;
418 	struct callout *calloutp;
419 
420 	kn->kn_data++;
421 	KNOTE_ACTIVATE(kn, 0);	/* XXX - handle locking */
422 
423 	if ((kn->kn_flags & EV_ONESHOT) != EV_ONESHOT) {
424 		calloutp = (struct callout *)kn->kn_hook;
425 		callout_reset(calloutp, timertoticks(kn->kn_sdata),
426 		    filt_timerexpire, kn);
427 	}
428 }
429 
430 /*
431  * data contains amount of time to sleep, in milliseconds
432  */
433 /* XXX - move to kern_timeout.c? */
434 static int
435 filt_timerattach(struct knote *kn)
436 {
437 	struct callout *calloutp;
438 
439 	atomic_add_int(&kq_ncallouts, 1);
440 
441 	if (kq_ncallouts >= kq_calloutmax) {
442 		atomic_add_int(&kq_ncallouts, -1);
443 		return (ENOMEM);
444 	}
445 
446 	kn->kn_flags |= EV_CLEAR;		/* automatically set */
447 	MALLOC(calloutp, struct callout *, sizeof(*calloutp),
448 	    M_KQUEUE, M_WAITOK);
449 	callout_init(calloutp, 1);
450 	kn->kn_hook = calloutp;
451 	callout_reset(calloutp, timertoticks(kn->kn_sdata), filt_timerexpire,
452 	    kn);
453 
454 	return (0);
455 }
456 
457 /* XXX - move to kern_timeout.c? */
458 static void
459 filt_timerdetach(struct knote *kn)
460 {
461 	struct callout *calloutp;
462 
463 	calloutp = (struct callout *)kn->kn_hook;
464 	callout_drain(calloutp);
465 	FREE(calloutp, M_KQUEUE);
466 	atomic_add_int(&kq_ncallouts, -1);
467 }
468 
469 /* XXX - move to kern_timeout.c? */
470 static int
471 filt_timer(struct knote *kn, long hint)
472 {
473 
474 	return (kn->kn_data != 0);
475 }
476 
477 /*
478  * MPSAFE
479  */
480 int
481 kqueue(struct thread *td, struct kqueue_args *uap)
482 {
483 	struct filedesc *fdp;
484 	struct kqueue *kq;
485 	struct file *fp;
486 	int fd, error;
487 
488 	fdp = td->td_proc->p_fd;
489 	error = falloc(td, &fp, &fd);
490 	if (error)
491 		goto done2;
492 
493 	/* An extra reference on `nfp' has been held for us by falloc(). */
494 	kq = malloc(sizeof *kq, M_KQUEUE, M_WAITOK | M_ZERO);
495 	mtx_init(&kq->kq_lock, "kqueue", NULL, MTX_DEF|MTX_DUPOK);
496 	TAILQ_INIT(&kq->kq_head);
497 	kq->kq_fdp = fdp;
498 	knlist_init(&kq->kq_sel.si_note, &kq->kq_lock);
499 	TASK_INIT(&kq->kq_task, 0, kqueue_task, kq);
500 
501 	FILEDESC_LOCK(fdp);
502 	SLIST_INSERT_HEAD(&fdp->fd_kqlist, kq, kq_list);
503 	FILEDESC_UNLOCK(fdp);
504 
505 	FILE_LOCK(fp);
506 	fp->f_flag = FREAD | FWRITE;
507 	fp->f_type = DTYPE_KQUEUE;
508 	fp->f_ops = &kqueueops;
509 	fp->f_data = kq;
510 	FILE_UNLOCK(fp);
511 	fdrop(fp, td);
512 
513 	td->td_retval[0] = fd;
514 done2:
515 	return (error);
516 }
517 
518 #ifndef _SYS_SYSPROTO_H_
519 struct kevent_args {
520 	int	fd;
521 	const struct kevent *changelist;
522 	int	nchanges;
523 	struct	kevent *eventlist;
524 	int	nevents;
525 	const struct timespec *timeout;
526 };
527 #endif
528 /*
529  * MPSAFE
530  */
531 int
532 kevent(struct thread *td, struct kevent_args *uap)
533 {
534 	struct kevent keva[KQ_NEVENTS];
535 	struct kevent *kevp;
536 	struct kqueue *kq;
537 	struct file *fp;
538 	struct timespec ts;
539 	int i, n, nerrors, error;
540 
541 	if ((error = fget(td, uap->fd, &fp)) != 0)
542 		return (error);
543 	if ((error = kqueue_aquire(fp, &kq)) != 0)
544 		goto done_norel;
545 
546 	if (uap->timeout != NULL) {
547 		error = copyin(uap->timeout, &ts, sizeof(ts));
548 		if (error)
549 			goto done;
550 		uap->timeout = &ts;
551 	}
552 
553 	nerrors = 0;
554 
555 	while (uap->nchanges > 0) {
556 		n = uap->nchanges > KQ_NEVENTS ? KQ_NEVENTS : uap->nchanges;
557 		error = copyin(uap->changelist, keva,
558 		    n * sizeof *keva);
559 		if (error)
560 			goto done;
561 		for (i = 0; i < n; i++) {
562 			kevp = &keva[i];
563 			kevp->flags &= ~EV_SYSFLAGS;
564 			error = kqueue_register(kq, kevp, td, 1);
565 			if (error) {
566 				if (uap->nevents != 0) {
567 					kevp->flags = EV_ERROR;
568 					kevp->data = error;
569 					(void) copyout(kevp,
570 					    uap->eventlist,
571 					    sizeof(*kevp));
572 					uap->eventlist++;
573 					uap->nevents--;
574 					nerrors++;
575 				} else {
576 					goto done;
577 				}
578 			}
579 		}
580 		uap->nchanges -= n;
581 		uap->changelist += n;
582 	}
583 	if (nerrors) {
584 		td->td_retval[0] = nerrors;
585 		error = 0;
586 		goto done;
587 	}
588 
589 	error = kqueue_scan(kq, uap->nevents, uap->eventlist, uap->timeout,
590 	    keva, td);
591 done:
592 	kqueue_release(kq, 0);
593 done_norel:
594 	if (fp != NULL)
595 		fdrop(fp, td);
596 	return (error);
597 }
598 
599 int
600 kqueue_add_filteropts(int filt, struct filterops *filtops)
601 {
602 	int error;
603 
604 	if (filt > 0 || filt + EVFILT_SYSCOUNT < 0) {
605 		printf(
606 "trying to add a filterop that is out of range: %d is beyond %d\n",
607 		    ~filt, EVFILT_SYSCOUNT);
608 		return EINVAL;
609 	}
610 	mtx_lock(&filterops_lock);
611 	if (sysfilt_ops[~filt].for_fop != &null_filtops &&
612 	    sysfilt_ops[~filt].for_fop != NULL)
613 		error = EEXIST;
614 	else {
615 		sysfilt_ops[~filt].for_fop = filtops;
616 		sysfilt_ops[~filt].for_refcnt = 0;
617 	}
618 	mtx_unlock(&filterops_lock);
619 
620 	return (0);
621 }
622 
623 int
624 kqueue_del_filteropts(int filt)
625 {
626 	int error;
627 
628 	error = 0;
629 	if (filt > 0 || filt + EVFILT_SYSCOUNT < 0)
630 		return EINVAL;
631 
632 	mtx_lock(&filterops_lock);
633 	if (sysfilt_ops[~filt].for_fop == &null_filtops ||
634 	    sysfilt_ops[~filt].for_fop == NULL)
635 		error = EINVAL;
636 	else if (sysfilt_ops[~filt].for_refcnt != 0)
637 		error = EBUSY;
638 	else {
639 		sysfilt_ops[~filt].for_fop = &null_filtops;
640 		sysfilt_ops[~filt].for_refcnt = 0;
641 	}
642 	mtx_unlock(&filterops_lock);
643 
644 	return error;
645 }
646 
647 static struct filterops *
648 kqueue_fo_find(int filt)
649 {
650 
651 	if (filt > 0 || filt + EVFILT_SYSCOUNT < 0)
652 		return NULL;
653 
654 	mtx_lock(&filterops_lock);
655 	sysfilt_ops[~filt].for_refcnt++;
656 	if (sysfilt_ops[~filt].for_fop == NULL)
657 		sysfilt_ops[~filt].for_fop = &null_filtops;
658 	mtx_unlock(&filterops_lock);
659 
660 	return sysfilt_ops[~filt].for_fop;
661 }
662 
663 static void
664 kqueue_fo_release(int filt)
665 {
666 
667 	if (filt > 0 || filt + EVFILT_SYSCOUNT < 0)
668 		return;
669 
670 	mtx_lock(&filterops_lock);
671 	KASSERT(sysfilt_ops[~filt].for_refcnt > 0,
672 	    ("filter object refcount not valid on release"));
673 	sysfilt_ops[~filt].for_refcnt--;
674 	mtx_unlock(&filterops_lock);
675 }
676 
677 /*
678  * A ref to kq (obtained via kqueue_aquire) should be held.  waitok will
679  * influence if memory allocation should wait.  Make sure it is 0 if you
680  * hold any mutexes.
681  */
682 int
683 kqueue_register(struct kqueue *kq, struct kevent *kev, struct thread *td, int waitok)
684 {
685 	struct filedesc *fdp;
686 	struct filterops *fops;
687 	struct file *fp;
688 	struct knote *kn, *tkn;
689 	int error, filt, event;
690 	int haskqglobal;
691 	int fd;
692 
693 	fdp = NULL;
694 	fp = NULL;
695 	kn = NULL;
696 	error = 0;
697 	haskqglobal = 0;
698 
699 	filt = kev->filter;
700 	fops = kqueue_fo_find(filt);
701 	if (fops == NULL)
702 		return EINVAL;
703 
704 	tkn = knote_alloc(waitok);		/* prevent waiting with locks */
705 
706 findkn:
707 	if (fops->f_isfd) {
708 		KASSERT(td != NULL, ("td is NULL"));
709 		fdp = td->td_proc->p_fd;
710 		FILEDESC_LOCK(fdp);
711 		/* validate descriptor */
712 		fd = kev->ident;
713 		if (fd < 0 || fd >= fdp->fd_nfiles ||
714 		    (fp = fdp->fd_ofiles[fd]) == NULL) {
715 			FILEDESC_UNLOCK(fdp);
716 			error = EBADF;
717 			goto done;
718 		}
719 		fhold(fp);
720 
721 		if ((kev->flags & EV_ADD) == EV_ADD && kqueue_expand(kq, fops,
722 		    kev->ident, 0) != 0) {
723 			/* unlock and try again */
724 			FILEDESC_UNLOCK(fdp);
725 			fdrop(fp, td);
726 			fp = NULL;
727 			error = kqueue_expand(kq, fops, kev->ident, waitok);
728 			if (error)
729 				goto done;
730 			goto findkn;
731 		}
732 
733 		if (fp->f_type == DTYPE_KQUEUE) {
734 			/*
735 			 * if we add some inteligence about what we are doing,
736 			 * we should be able to support events on ourselves.
737 			 * We need to know when we are doing this to prevent
738 			 * getting both the knlist lock and the kq lock since
739 			 * they are the same thing.
740 			 */
741 			if (fp->f_data == kq) {
742 				FILEDESC_UNLOCK(fdp);
743 				error = EINVAL;
744 				goto done_noglobal;
745 			}
746 
747 			KQ_GLOBAL_LOCK(&kq_global, haskqglobal);
748 		}
749 
750 		KQ_LOCK(kq);
751 		if (kev->ident < kq->kq_knlistsize) {
752 			SLIST_FOREACH(kn, &kq->kq_knlist[kev->ident], kn_link)
753 				if (kev->filter == kn->kn_filter)
754 					break;
755 		}
756 		FILEDESC_UNLOCK(fdp);
757 	} else {
758 		if ((kev->flags & EV_ADD) == EV_ADD)
759 			kqueue_expand(kq, fops, kev->ident, waitok);
760 
761 		KQ_LOCK(kq);
762 		if (kq->kq_knhashmask != 0) {
763 			struct klist *list;
764 
765 			list = &kq->kq_knhash[
766 			    KN_HASH((u_long)kev->ident, kq->kq_knhashmask)];
767 			SLIST_FOREACH(kn, list, kn_link)
768 				if (kev->ident == kn->kn_id &&
769 				    kev->filter == kn->kn_filter)
770 					break;
771 		}
772 	}
773 
774 	/* knote is in the process of changing, wait for it to stablize. */
775 	if (kn != NULL && (kn->kn_status & KN_INFLUX) == KN_INFLUX) {
776 		if (fp != NULL) {
777 			fdrop(fp, td);
778 			fp = NULL;
779 		}
780 		KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal);
781 		kq->kq_state |= KQ_FLUXWAIT;
782 		msleep(kq, &kq->kq_lock, PSOCK | PDROP, "kqflxwt", 0);
783 		goto findkn;
784 	}
785 
786 	if (kn == NULL && ((kev->flags & EV_ADD) == 0)) {
787 		KQ_UNLOCK(kq);
788 		error = ENOENT;
789 		goto done;
790 	}
791 
792 	/*
793 	 * kn now contains the matching knote, or NULL if no match
794 	 */
795 	if (kev->flags & EV_ADD) {
796 		if (kn == NULL) {
797 			kn = tkn;
798 			tkn = NULL;
799 			if (kn == NULL) {
800 				error = ENOMEM;
801 				goto done;
802 			}
803 			kn->kn_fp = fp;
804 			kn->kn_kq = kq;
805 			kn->kn_fop = fops;
806 			/*
807 			 * apply reference counts to knote structure, and
808 			 * do not release it at the end of this routine.
809 			 */
810 			fops = NULL;
811 			fp = NULL;
812 
813 			kn->kn_sfflags = kev->fflags;
814 			kn->kn_sdata = kev->data;
815 			kev->fflags = 0;
816 			kev->data = 0;
817 			kn->kn_kevent = *kev;
818 			kn->kn_status = KN_INFLUX|KN_DETACHED;
819 
820 			error = knote_attach(kn, kq);
821 			KQ_UNLOCK(kq);
822 			if (error != 0) {
823 				tkn = kn;
824 				goto done;
825 			}
826 
827 			if ((error = kn->kn_fop->f_attach(kn)) != 0) {
828 				knote_drop(kn, td);
829 				goto done;
830 			}
831 			KN_LIST_LOCK(kn);
832 		} else {
833 			/*
834 			 * The user may change some filter values after the
835 			 * initial EV_ADD, but doing so will not reset any
836 			 * filter which has already been triggered.
837 			 */
838 			kn->kn_status |= KN_INFLUX;
839 			KQ_UNLOCK(kq);
840 			KN_LIST_LOCK(kn);
841 			kn->kn_sfflags = kev->fflags;
842 			kn->kn_sdata = kev->data;
843 			kn->kn_kevent.udata = kev->udata;
844 		}
845 
846 		/*
847 		 * We can get here with kn->kn_knlist == NULL.
848 		 * This can happen when the initial attach event decides that
849 		 * the event is "completed" already.  i.e. filt_procattach
850 		 * is called on a zombie process.  It will call filt_proc
851 		 * which will remove it from the list, and NULL kn_knlist.
852 		 */
853 		event = kn->kn_fop->f_event(kn, 0);
854 		KN_LIST_UNLOCK(kn);
855 		KQ_LOCK(kq);
856 		if (event)
857 			KNOTE_ACTIVATE(kn, 1);
858 		kn->kn_status &= ~KN_INFLUX;
859 	} else if (kev->flags & EV_DELETE) {
860 		kn->kn_status |= KN_INFLUX;
861 		KQ_UNLOCK(kq);
862 		kn->kn_fop->f_detach(kn);
863 		knote_drop(kn, td);
864 		goto done;
865 	}
866 
867 	if ((kev->flags & EV_DISABLE) &&
868 	    ((kn->kn_status & KN_DISABLED) == 0)) {
869 		kn->kn_status |= KN_DISABLED;
870 	}
871 
872 	if ((kev->flags & EV_ENABLE) && (kn->kn_status & KN_DISABLED)) {
873 		kn->kn_status &= ~KN_DISABLED;
874 		if ((kn->kn_status & KN_ACTIVE) &&
875 		    ((kn->kn_status & KN_QUEUED) == 0))
876 			knote_enqueue(kn);
877 	}
878 	KQ_UNLOCK_FLUX(kq);
879 
880 done:
881 	KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal);
882 done_noglobal:
883 	if (fp != NULL)
884 		fdrop(fp, td);
885 	if (tkn != NULL)
886 		knote_free(tkn);
887 	if (fops != NULL)
888 		kqueue_fo_release(filt);
889 	return (error);
890 }
891 
892 static int
893 kqueue_aquire(struct file *fp, struct kqueue **kqp)
894 {
895 	int error;
896 	struct kqueue *kq;
897 
898 	error = 0;
899 
900 	FILE_LOCK(fp);
901 	do {
902 		kq = fp->f_data;
903 		if (fp->f_type != DTYPE_KQUEUE || kq == NULL) {
904 			error = EBADF;
905 			break;
906 		}
907 		*kqp = kq;
908 		KQ_LOCK(kq);
909 		if ((kq->kq_state & KQ_CLOSING) == KQ_CLOSING) {
910 			KQ_UNLOCK(kq);
911 			error = EBADF;
912 			break;
913 		}
914 		kq->kq_refcnt++;
915 		KQ_UNLOCK(kq);
916 	} while (0);
917 	FILE_UNLOCK(fp);
918 
919 	return error;
920 }
921 
922 static void
923 kqueue_release(struct kqueue *kq, int locked)
924 {
925 	if (locked)
926 		KQ_OWNED(kq);
927 	else
928 		KQ_LOCK(kq);
929 	kq->kq_refcnt--;
930 	if (kq->kq_refcnt == 1)
931 		wakeup(&kq->kq_refcnt);
932 	if (!locked)
933 		KQ_UNLOCK(kq);
934 }
935 
936 static void
937 kqueue_schedtask(struct kqueue *kq)
938 {
939 
940 	KQ_OWNED(kq);
941 	KASSERT(((kq->kq_state & KQ_TASKDRAIN) != KQ_TASKDRAIN),
942 	    ("scheduling kqueue task while draining"));
943 
944 	if ((kq->kq_state & KQ_TASKSCHED) != KQ_TASKSCHED) {
945 		taskqueue_enqueue(taskqueue_kqueue, &kq->kq_task);
946 		kq->kq_state |= KQ_TASKSCHED;
947 	}
948 }
949 
950 /*
951  * Expand the kq to make sure we have storage for fops/ident pair.
952  *
953  * Return 0 on success (or no work necessary), return errno on failure.
954  *
955  * Not calling hashinit w/ waitok (proper malloc flag) should be safe.
956  * If kqueue_register is called from a non-fd context, there usually/should
957  * be no locks held.
958  */
959 static int
960 kqueue_expand(struct kqueue *kq, struct filterops *fops, uintptr_t ident,
961 	int waitok)
962 {
963 	struct klist *list, *tmp_knhash;
964 	u_long tmp_knhashmask;
965 	int size;
966 	int fd;
967 	int mflag = waitok ? M_WAITOK : M_NOWAIT;
968 
969 	KQ_NOTOWNED(kq);
970 
971 	if (fops->f_isfd) {
972 		fd = ident;
973 		if (kq->kq_knlistsize <= fd) {
974 			size = kq->kq_knlistsize;
975 			while (size <= fd)
976 				size += KQEXTENT;
977 			MALLOC(list, struct klist *,
978 			    size * sizeof list, M_KQUEUE, mflag);
979 			if (list == NULL)
980 				return ENOMEM;
981 			KQ_LOCK(kq);
982 			if (kq->kq_knlistsize > fd) {
983 				FREE(list, M_KQUEUE);
984 				list = NULL;
985 			} else {
986 				if (kq->kq_knlist != NULL) {
987 					bcopy(kq->kq_knlist, list,
988 					    kq->kq_knlistsize * sizeof list);
989 					FREE(kq->kq_knlist, M_KQUEUE);
990 					kq->kq_knlist = NULL;
991 				}
992 				bzero((caddr_t)list +
993 				    kq->kq_knlistsize * sizeof list,
994 				    (size - kq->kq_knlistsize) * sizeof list);
995 				kq->kq_knlistsize = size;
996 				kq->kq_knlist = list;
997 			}
998 			KQ_UNLOCK(kq);
999 		}
1000 	} else {
1001 		if (kq->kq_knhashmask == 0) {
1002 			tmp_knhash = hashinit(KN_HASHSIZE, M_KQUEUE,
1003 			    &tmp_knhashmask);
1004 			if (tmp_knhash == NULL)
1005 				return ENOMEM;
1006 			KQ_LOCK(kq);
1007 			if (kq->kq_knhashmask == 0) {
1008 				kq->kq_knhash = tmp_knhash;
1009 				kq->kq_knhashmask = tmp_knhashmask;
1010 			} else {
1011 				free(tmp_knhash, M_KQUEUE);
1012 			}
1013 			KQ_UNLOCK(kq);
1014 		}
1015 	}
1016 
1017 	KQ_NOTOWNED(kq);
1018 	return 0;
1019 }
1020 
1021 static void
1022 kqueue_task(void *arg, int pending)
1023 {
1024 	struct kqueue *kq;
1025 	int haskqglobal;
1026 
1027 	haskqglobal = 0;
1028 	kq = arg;
1029 
1030 	KQ_GLOBAL_LOCK(&kq_global, haskqglobal);
1031 	KQ_LOCK(kq);
1032 
1033 	KNOTE_LOCKED(&kq->kq_sel.si_note, 0);
1034 
1035 	kq->kq_state &= ~KQ_TASKSCHED;
1036 	if ((kq->kq_state & KQ_TASKDRAIN) == KQ_TASKDRAIN) {
1037 		wakeup(&kq->kq_state);
1038 	}
1039 	KQ_UNLOCK(kq);
1040 	KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal);
1041 }
1042 
1043 /*
1044  * Scan, update kn_data (if not ONESHOT), and copyout triggered events.
1045  * We treat KN_MARKER knotes as if they are INFLUX.
1046  */
1047 static int
1048 kqueue_scan(struct kqueue *kq, int maxevents, struct kevent *ulistp,
1049 	const struct timespec *tsp, struct kevent *keva, struct thread *td)
1050 {
1051 	struct kevent *kevp;
1052 	struct timeval atv, rtv, ttv;
1053 	struct knote *kn, *marker;
1054 	int count, timeout, nkev, error;
1055 	int haskqglobal;
1056 
1057 	count = maxevents;
1058 	nkev = 0;
1059 	error = 0;
1060 	haskqglobal = 0;
1061 
1062 	if (maxevents == 0)
1063 		goto done_nl;
1064 
1065 	if (tsp != NULL) {
1066 		TIMESPEC_TO_TIMEVAL(&atv, tsp);
1067 		if (itimerfix(&atv)) {
1068 			error = EINVAL;
1069 			goto done_nl;
1070 		}
1071 		if (tsp->tv_sec == 0 && tsp->tv_nsec == 0)
1072 			timeout = -1;
1073 		else
1074 			timeout = atv.tv_sec > 24 * 60 * 60 ?
1075 			    24 * 60 * 60 * hz : tvtohz(&atv);
1076 		getmicrouptime(&rtv);
1077 		timevaladd(&atv, &rtv);
1078 	} else {
1079 		atv.tv_sec = 0;
1080 		atv.tv_usec = 0;
1081 		timeout = 0;
1082 	}
1083 	marker = knote_alloc(1);
1084 	if (marker == NULL) {
1085 		error = ENOMEM;
1086 		goto done_nl;
1087 	}
1088 	marker->kn_status = KN_MARKER;
1089 	KQ_LOCK(kq);
1090 	goto start;
1091 
1092 retry:
1093 	if (atv.tv_sec || atv.tv_usec) {
1094 		getmicrouptime(&rtv);
1095 		if (timevalcmp(&rtv, &atv, >=))
1096 			goto done;
1097 		ttv = atv;
1098 		timevalsub(&ttv, &rtv);
1099 		timeout = ttv.tv_sec > 24 * 60 * 60 ?
1100 			24 * 60 * 60 * hz : tvtohz(&ttv);
1101 	}
1102 
1103 start:
1104 	kevp = keva;
1105 	if (kq->kq_count == 0) {
1106 		if (timeout < 0) {
1107 			error = EWOULDBLOCK;
1108 		} else {
1109 			kq->kq_state |= KQ_SLEEP;
1110 			error = msleep(kq, &kq->kq_lock, PSOCK | PCATCH,
1111 			    "kqread", timeout);
1112 		}
1113 		if (error == 0)
1114 			goto retry;
1115 		/* don't restart after signals... */
1116 		if (error == ERESTART)
1117 			error = EINTR;
1118 		else if (error == EWOULDBLOCK)
1119 			error = 0;
1120 		goto done;
1121 	}
1122 
1123 	TAILQ_INSERT_TAIL(&kq->kq_head, marker, kn_tqe);
1124 	while (count) {
1125 		KQ_OWNED(kq);
1126 		kn = TAILQ_FIRST(&kq->kq_head);
1127 
1128 		if ((kn->kn_status == KN_MARKER && kn != marker) ||
1129 		    (kn->kn_status & KN_INFLUX) == KN_INFLUX) {
1130 			kq->kq_state |= KQ_FLUXWAIT;
1131 			error = msleep(kq, &kq->kq_lock, PSOCK,
1132 			    "kqflxwt", 0);
1133 			continue;
1134 		}
1135 
1136 		TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe);
1137 		if ((kn->kn_status & KN_DISABLED) == KN_DISABLED) {
1138 			kn->kn_status &= ~KN_QUEUED;
1139 			kq->kq_count--;
1140 			continue;
1141 		}
1142 		if (kn == marker) {
1143 			KQ_FLUX_WAKEUP(kq);
1144 			if (count == maxevents)
1145 				goto retry;
1146 			goto done;
1147 		}
1148 		KASSERT((kn->kn_status & KN_INFLUX) == 0,
1149 		    ("KN_INFLUX set when not suppose to be"));
1150 
1151 		if ((kn->kn_flags & EV_ONESHOT) == EV_ONESHOT) {
1152 			kn->kn_status &= ~KN_QUEUED;
1153 			kn->kn_status |= KN_INFLUX;
1154 			kq->kq_count--;
1155 			KQ_UNLOCK(kq);
1156 			/*
1157 			 * We don't need to lock the list since we've marked
1158 			 * it _INFLUX.
1159 			 */
1160 			*kevp = kn->kn_kevent;
1161 			kn->kn_fop->f_detach(kn);
1162 			knote_drop(kn, td);
1163 			KQ_LOCK(kq);
1164 			kn = NULL;
1165 		} else {
1166 			kn->kn_status |= KN_INFLUX;
1167 			KQ_UNLOCK(kq);
1168 			if ((kn->kn_status & KN_KQUEUE) == KN_KQUEUE)
1169 				KQ_GLOBAL_LOCK(&kq_global, haskqglobal);
1170 			KN_LIST_LOCK(kn);
1171 			if (kn->kn_fop->f_event(kn, 0) == 0) {
1172 				KN_LIST_UNLOCK(kn);
1173 				KQ_LOCK(kq);
1174 				kn->kn_status &=
1175 				    ~(KN_QUEUED | KN_ACTIVE | KN_INFLUX);
1176 				kq->kq_count--;
1177 				continue;
1178 			}
1179 			*kevp = kn->kn_kevent;
1180 			KQ_LOCK(kq);
1181 			if (kn->kn_flags & EV_CLEAR) {
1182 				kn->kn_data = 0;
1183 				kn->kn_fflags = 0;
1184 				kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE);
1185 				kq->kq_count--;
1186 			} else
1187 				TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe);
1188 			KN_LIST_UNLOCK(kn);
1189 			kn->kn_status &= ~(KN_INFLUX);
1190 		}
1191 
1192 		/* we are returning a copy to the user */
1193 		kevp++;
1194 		nkev++;
1195 		count--;
1196 
1197 		if (nkev == KQ_NEVENTS) {
1198 			KQ_UNLOCK_FLUX(kq);
1199 			error = copyout(keva, ulistp, sizeof *keva * nkev);
1200 			ulistp += nkev;
1201 			nkev = 0;
1202 			kevp = keva;
1203 			KQ_LOCK(kq);
1204 			if (error)
1205 				break;
1206 		}
1207 	}
1208 	TAILQ_REMOVE(&kq->kq_head, marker, kn_tqe);
1209 done:
1210 	KQ_OWNED(kq);
1211 	KQ_UNLOCK_FLUX(kq);
1212 	KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal);
1213 	knote_free(marker);
1214 done_nl:
1215 	KQ_NOTOWNED(kq);
1216 	if (nkev != 0)
1217 		error = copyout(keva, ulistp, sizeof *keva * nkev);
1218 	td->td_retval[0] = maxevents - count;
1219 	return (error);
1220 }
1221 
1222 /*
1223  * XXX
1224  * This could be expanded to call kqueue_scan, if desired.
1225  */
1226 /*ARGSUSED*/
1227 static int
1228 kqueue_read(struct file *fp, struct uio *uio, struct ucred *active_cred,
1229 	int flags, struct thread *td)
1230 {
1231 	return (ENXIO);
1232 }
1233 
1234 /*ARGSUSED*/
1235 static int
1236 kqueue_write(struct file *fp, struct uio *uio, struct ucred *active_cred,
1237 	 int flags, struct thread *td)
1238 {
1239 	return (ENXIO);
1240 }
1241 
1242 /*ARGSUSED*/
1243 static int
1244 kqueue_ioctl(struct file *fp, u_long cmd, void *data,
1245 	struct ucred *active_cred, struct thread *td)
1246 {
1247 	/*
1248 	 * Enabling sigio causes two major problems:
1249 	 * 1) infinite recursion:
1250 	 * Synopsys: kevent is being used to track signals and have FIOASYNC
1251 	 * set.  On receipt of a signal this will cause a kqueue to recurse
1252 	 * into itself over and over.  Sending the sigio causes the kqueue
1253 	 * to become ready, which in turn posts sigio again, forever.
1254 	 * Solution: this can be solved by setting a flag in the kqueue that
1255 	 * we have a SIGIO in progress.
1256 	 * 2) locking problems:
1257 	 * Synopsys: Kqueue is a leaf subsystem, but adding signalling puts
1258 	 * us above the proc and pgrp locks.
1259 	 * Solution: Post a signal using an async mechanism, being sure to
1260 	 * record a generation count in the delivery so that we do not deliver
1261 	 * a signal to the wrong process.
1262 	 *
1263 	 * Note, these two mechanisms are somewhat mutually exclusive!
1264 	 */
1265 #if 0
1266 	struct kqueue *kq;
1267 
1268 	kq = fp->f_data;
1269 	switch (cmd) {
1270 	case FIOASYNC:
1271 		if (*(int *)data) {
1272 			kq->kq_state |= KQ_ASYNC;
1273 		} else {
1274 			kq->kq_state &= ~KQ_ASYNC;
1275 		}
1276 		return (0);
1277 
1278 	case FIOSETOWN:
1279 		return (fsetown(*(int *)data, &kq->kq_sigio));
1280 
1281 	case FIOGETOWN:
1282 		*(int *)data = fgetown(&kq->kq_sigio);
1283 		return (0);
1284 	}
1285 #endif
1286 
1287 	return (ENOTTY);
1288 }
1289 
1290 /*ARGSUSED*/
1291 static int
1292 kqueue_poll(struct file *fp, int events, struct ucred *active_cred,
1293 	struct thread *td)
1294 {
1295 	struct kqueue *kq;
1296 	int revents = 0;
1297 	int error;
1298 
1299 	if ((error = kqueue_aquire(fp, &kq)))
1300 		return POLLERR;
1301 
1302 	KQ_LOCK(kq);
1303 	if (events & (POLLIN | POLLRDNORM)) {
1304 		if (kq->kq_count) {
1305 			revents |= events & (POLLIN | POLLRDNORM);
1306 		} else {
1307 			selrecord(td, &kq->kq_sel);
1308 			kq->kq_state |= KQ_SEL;
1309 		}
1310 	}
1311 	kqueue_release(kq, 1);
1312 	KQ_UNLOCK(kq);
1313 	return (revents);
1314 }
1315 
1316 /*ARGSUSED*/
1317 static int
1318 kqueue_stat(struct file *fp, struct stat *st, struct ucred *active_cred,
1319 	struct thread *td)
1320 {
1321 
1322 	return (ENXIO);
1323 }
1324 
1325 /*ARGSUSED*/
1326 static int
1327 kqueue_close(struct file *fp, struct thread *td)
1328 {
1329 	struct kqueue *kq = fp->f_data;
1330 	struct filedesc *fdp;
1331 	struct knote *kn;
1332 	int i;
1333 	int error;
1334 
1335 	GIANT_REQUIRED;
1336 
1337 	if ((error = kqueue_aquire(fp, &kq)))
1338 		return error;
1339 
1340 	KQ_LOCK(kq);
1341 
1342 	KASSERT((kq->kq_state & KQ_CLOSING) != KQ_CLOSING,
1343 	    ("kqueue already closing"));
1344 	kq->kq_state |= KQ_CLOSING;
1345 	if (kq->kq_refcnt > 1)
1346 		msleep(&kq->kq_refcnt, &kq->kq_lock, PSOCK, "kqclose", 0);
1347 
1348 	KASSERT(kq->kq_refcnt == 1, ("other refs are out there!"));
1349 	fdp = kq->kq_fdp;
1350 
1351 	KASSERT(knlist_empty(&kq->kq_sel.si_note),
1352 	    ("kqueue's knlist not empty"));
1353 
1354 	for (i = 0; i < kq->kq_knlistsize; i++) {
1355 		while ((kn = SLIST_FIRST(&kq->kq_knlist[i])) != NULL) {
1356 			KASSERT((kn->kn_status & KN_INFLUX) == 0,
1357 			    ("KN_INFLUX set when not suppose to be"));
1358 			kn->kn_status |= KN_INFLUX;
1359 			KQ_UNLOCK(kq);
1360 			kn->kn_fop->f_detach(kn);
1361 			knote_drop(kn, td);
1362 			KQ_LOCK(kq);
1363 		}
1364 	}
1365 	if (kq->kq_knhashmask != 0) {
1366 		for (i = 0; i <= kq->kq_knhashmask; i++) {
1367 			while ((kn = SLIST_FIRST(&kq->kq_knhash[i])) != NULL) {
1368 				KASSERT((kn->kn_status & KN_INFLUX) == 0,
1369 				    ("KN_INFLUX set when not suppose to be"));
1370 				kn->kn_status |= KN_INFLUX;
1371 				KQ_UNLOCK(kq);
1372 				kn->kn_fop->f_detach(kn);
1373 				knote_drop(kn, td);
1374 				KQ_LOCK(kq);
1375 			}
1376 		}
1377 	}
1378 
1379 	if ((kq->kq_state & KQ_TASKSCHED) == KQ_TASKSCHED) {
1380 		kq->kq_state |= KQ_TASKDRAIN;
1381 		msleep(&kq->kq_state, &kq->kq_lock, PSOCK, "kqtqdr", 0);
1382 	}
1383 
1384 	if ((kq->kq_state & KQ_SEL) == KQ_SEL) {
1385 		kq->kq_state &= ~KQ_SEL;
1386 		selwakeuppri(&kq->kq_sel, PSOCK);
1387 	}
1388 
1389 	KQ_UNLOCK(kq);
1390 
1391 	FILEDESC_LOCK(fdp);
1392 	SLIST_REMOVE(&fdp->fd_kqlist, kq, kqueue, kq_list);
1393 	FILEDESC_UNLOCK(fdp);
1394 
1395 	knlist_destroy(&kq->kq_sel.si_note);
1396 	mtx_destroy(&kq->kq_lock);
1397 	kq->kq_fdp = NULL;
1398 
1399 	if (kq->kq_knhash != NULL)
1400 		free(kq->kq_knhash, M_KQUEUE);
1401 	if (kq->kq_knlist != NULL)
1402 		free(kq->kq_knlist, M_KQUEUE);
1403 
1404 	funsetown(&kq->kq_sigio);
1405 	free(kq, M_KQUEUE);
1406 	fp->f_data = NULL;
1407 
1408 	return (0);
1409 }
1410 
1411 static void
1412 kqueue_wakeup(struct kqueue *kq)
1413 {
1414 	KQ_OWNED(kq);
1415 
1416 	if ((kq->kq_state & KQ_SLEEP) == KQ_SLEEP) {
1417 		kq->kq_state &= ~KQ_SLEEP;
1418 		wakeup(kq);
1419 	}
1420 	if ((kq->kq_state & KQ_SEL) == KQ_SEL) {
1421 		kq->kq_state &= ~KQ_SEL;
1422 		selwakeuppri(&kq->kq_sel, PSOCK);
1423 	}
1424 	if (!knlist_empty(&kq->kq_sel.si_note))
1425 		kqueue_schedtask(kq);
1426 	if ((kq->kq_state & KQ_ASYNC) == KQ_ASYNC) {
1427 		pgsigio(&kq->kq_sigio, SIGIO, 0);
1428 	}
1429 }
1430 
1431 /*
1432  * Walk down a list of knotes, activating them if their event has triggered.
1433  *
1434  * There is a possibility to optimize in the case of one kq watching another.
1435  * Instead of scheduling a task to wake it up, you could pass enough state
1436  * down the chain to make up the parent kqueue.  Make this code functional
1437  * first.
1438  */
1439 void
1440 knote(struct knlist *list, long hint, int islocked)
1441 {
1442 	struct kqueue *kq;
1443 	struct knote *kn;
1444 
1445 	if (list == NULL)
1446 		return;
1447 
1448 	mtx_assert(list->kl_lock, islocked ? MA_OWNED : MA_NOTOWNED);
1449 	if (!islocked)
1450 		mtx_lock(list->kl_lock);
1451 	/*
1452 	 * If we unlock the list lock (and set KN_INFLUX), we can eliminate
1453 	 * the kqueue scheduling, but this will introduce four
1454 	 * lock/unlock's for each knote to test.  If we do, continue to use
1455 	 * SLIST_FOREACH, SLIST_FOREACH_SAFE is not safe in our case, it is
1456 	 * only safe if you want to remove the current item, which we are
1457 	 * not doing.
1458 	 */
1459 	SLIST_FOREACH(kn, &list->kl_list, kn_selnext) {
1460 		kq = kn->kn_kq;
1461 		if ((kn->kn_status & KN_INFLUX) != KN_INFLUX) {
1462 			KQ_LOCK(kq);
1463 			if ((kn->kn_status & KN_INFLUX) != KN_INFLUX) {
1464 				kn->kn_status |= KN_HASKQLOCK;
1465 				if (kn->kn_fop->f_event(kn, hint))
1466 					KNOTE_ACTIVATE(kn, 1);
1467 				kn->kn_status &= ~KN_HASKQLOCK;
1468 			}
1469 			KQ_UNLOCK(kq);
1470 		}
1471 		kq = NULL;
1472 	}
1473 	if (!islocked)
1474 		mtx_unlock(list->kl_lock);
1475 }
1476 
1477 /*
1478  * add a knote to a knlist
1479  */
1480 void
1481 knlist_add(struct knlist *knl, struct knote *kn, int islocked)
1482 {
1483 	mtx_assert(knl->kl_lock, islocked ? MA_OWNED : MA_NOTOWNED);
1484 	KQ_NOTOWNED(kn->kn_kq);
1485 	KASSERT((kn->kn_status & (KN_INFLUX|KN_DETACHED)) ==
1486 	    (KN_INFLUX|KN_DETACHED), ("knote not KN_INFLUX and KN_DETACHED"));
1487 	if (!islocked)
1488 		mtx_lock(knl->kl_lock);
1489 	SLIST_INSERT_HEAD(&knl->kl_list, kn, kn_selnext);
1490 	if (!islocked)
1491 		mtx_unlock(knl->kl_lock);
1492 	KQ_LOCK(kn->kn_kq);
1493 	kn->kn_knlist = knl;
1494 	kn->kn_status &= ~KN_DETACHED;
1495 	KQ_UNLOCK(kn->kn_kq);
1496 }
1497 
1498 static void
1499 knlist_remove_kq(struct knlist *knl, struct knote *kn, int knlislocked, int kqislocked)
1500 {
1501 	KASSERT(!(!!kqislocked && !knlislocked), ("kq locked w/o knl locked"));
1502 	mtx_assert(knl->kl_lock, knlislocked ? MA_OWNED : MA_NOTOWNED);
1503 	mtx_assert(&kn->kn_kq->kq_lock, kqislocked ? MA_OWNED : MA_NOTOWNED);
1504 	if (!kqislocked)
1505 		KASSERT((kn->kn_status & (KN_INFLUX|KN_DETACHED)) == KN_INFLUX,
1506     ("knlist_remove called w/o knote being KN_INFLUX or already removed"));
1507 	if (!knlislocked)
1508 		mtx_lock(knl->kl_lock);
1509 	SLIST_REMOVE(&knl->kl_list, kn, knote, kn_selnext);
1510 	kn->kn_knlist = NULL;
1511 	if (!knlislocked)
1512 		mtx_unlock(knl->kl_lock);
1513 	if (!kqislocked)
1514 		KQ_LOCK(kn->kn_kq);
1515 	kn->kn_status |= KN_DETACHED;
1516 	if (!kqislocked)
1517 		KQ_UNLOCK(kn->kn_kq);
1518 }
1519 
1520 /*
1521  * remove all knotes from a specified klist
1522  */
1523 void
1524 knlist_remove(struct knlist *knl, struct knote *kn, int islocked)
1525 {
1526 
1527 	knlist_remove_kq(knl, kn, islocked, 0);
1528 }
1529 
1530 /*
1531  * remove knote from a specified klist while in f_event handler.
1532  */
1533 void
1534 knlist_remove_inevent(struct knlist *knl, struct knote *kn)
1535 {
1536 
1537 	knlist_remove_kq(knl, kn, 1,
1538 	    (kn->kn_status & KN_HASKQLOCK) == KN_HASKQLOCK);
1539 }
1540 
1541 int
1542 knlist_empty(struct knlist *knl)
1543 {
1544 
1545 	mtx_assert(knl->kl_lock, MA_OWNED);
1546 	return SLIST_EMPTY(&knl->kl_list);
1547 }
1548 
1549 static struct mtx	knlist_lock;
1550 MTX_SYSINIT(knlist_lock, &knlist_lock, "knlist lock for lockless objects",
1551 	MTX_DEF);
1552 
1553 void
1554 knlist_init(struct knlist *knl, struct mtx *mtx)
1555 {
1556 
1557 	if (mtx == NULL)
1558 		knl->kl_lock = &knlist_lock;
1559 	else
1560 		knl->kl_lock = mtx;
1561 
1562 	SLIST_INIT(&knl->kl_list);
1563 }
1564 
1565 void
1566 knlist_destroy(struct knlist *knl)
1567 {
1568 
1569 #ifdef INVARIANTS
1570 	/*
1571 	 * if we run across this error, we need to find the offending
1572 	 * driver and have it call knlist_clear.
1573 	 */
1574 	if (!SLIST_EMPTY(&knl->kl_list))
1575 		printf("WARNING: destroying knlist w/ knotes on it!\n");
1576 #endif
1577 
1578 	knl->kl_lock = NULL;
1579 	SLIST_INIT(&knl->kl_list);
1580 }
1581 
1582 /*
1583  * Even if we are locked, we may need to drop the lock to allow any influx
1584  * knotes time to "settle".
1585  */
1586 void
1587 knlist_clear(struct knlist *knl, int islocked)
1588 {
1589 	struct knote *kn;
1590 	struct kqueue *kq;
1591 
1592 	if (islocked)
1593 		mtx_assert(knl->kl_lock, MA_OWNED);
1594 	else {
1595 		mtx_assert(knl->kl_lock, MA_NOTOWNED);
1596 again:		/* need to reaquire lock since we have dropped it */
1597 		mtx_lock(knl->kl_lock);
1598 	}
1599 
1600 	SLIST_FOREACH(kn, &knl->kl_list, kn_selnext) {
1601 		kq = kn->kn_kq;
1602 		KQ_LOCK(kq);
1603 		if ((kn->kn_status & KN_INFLUX) &&
1604 		    (kn->kn_status & KN_DETACHED) != KN_DETACHED) {
1605 			KQ_UNLOCK(kq);
1606 			continue;
1607 		}
1608 		/* Make sure cleared knotes disappear soon */
1609 		kn->kn_flags |= (EV_EOF | EV_ONESHOT);
1610 		knlist_remove_kq(knl, kn, 1, 1);
1611 		KQ_UNLOCK(kq);
1612 		kq = NULL;
1613 	}
1614 
1615 	if (!SLIST_EMPTY(&knl->kl_list)) {
1616 		/* there are still KN_INFLUX remaining */
1617 		kn = SLIST_FIRST(&knl->kl_list);
1618 		kq = kn->kn_kq;
1619 		KQ_LOCK(kq);
1620 		KASSERT(kn->kn_status & KN_INFLUX,
1621 		    ("knote removed w/o list lock"));
1622 		mtx_unlock(knl->kl_lock);
1623 		kq->kq_state |= KQ_FLUXWAIT;
1624 		msleep(kq, &kq->kq_lock, PSOCK | PDROP, "kqkclr", 0);
1625 		kq = NULL;
1626 		goto again;
1627 	}
1628 
1629 	SLIST_INIT(&knl->kl_list);
1630 
1631 	if (islocked)
1632 		mtx_assert(knl->kl_lock, MA_OWNED);
1633 	else {
1634 		mtx_unlock(knl->kl_lock);
1635 		mtx_assert(knl->kl_lock, MA_NOTOWNED);
1636 	}
1637 }
1638 
1639 /*
1640  * remove all knotes referencing a specified fd
1641  * must be called with FILEDESC lock.  This prevents a race where a new fd
1642  * comes along and occupies the entry and we attach a knote to the fd.
1643  */
1644 void
1645 knote_fdclose(struct thread *td, int fd)
1646 {
1647 	struct filedesc *fdp = td->td_proc->p_fd;
1648 	struct kqueue *kq;
1649 	struct knote *kn;
1650 	int influx;
1651 
1652 	FILEDESC_LOCK_ASSERT(fdp, MA_OWNED);
1653 
1654 	/*
1655 	 * We shouldn't have to worry about new kevents appearing on fd
1656 	 * since filedesc is locked.
1657 	 */
1658 	SLIST_FOREACH(kq, &fdp->fd_kqlist, kq_list) {
1659 		KQ_LOCK(kq);
1660 
1661 again:
1662 		influx = 0;
1663 		while (kq->kq_knlistsize > fd &&
1664 		    (kn = SLIST_FIRST(&kq->kq_knlist[fd])) != NULL) {
1665 			if (kn->kn_status & KN_INFLUX) {
1666 				/* someone else might be waiting on our knote */
1667 				if (influx)
1668 					wakeup(kq);
1669 				kq->kq_state |= KQ_FLUXWAIT;
1670 				msleep(kq, &kq->kq_lock, PSOCK, "kqflxwt", 0);
1671 				goto again;
1672 			}
1673 			kn->kn_status |= KN_INFLUX;
1674 			KQ_UNLOCK(kq);
1675 			kn->kn_fop->f_detach(kn);
1676 			knote_drop(kn, td);
1677 			influx = 1;
1678 			KQ_LOCK(kq);
1679 		}
1680 		KQ_UNLOCK_FLUX(kq);
1681 	}
1682 }
1683 
1684 static int
1685 knote_attach(struct knote *kn, struct kqueue *kq)
1686 {
1687 	struct klist *list;
1688 
1689 	KASSERT(kn->kn_status & KN_INFLUX, ("knote not marked INFLUX"));
1690 	KQ_OWNED(kq);
1691 
1692 	if (kn->kn_fop->f_isfd) {
1693 		if (kn->kn_id >= kq->kq_knlistsize)
1694 			return ENOMEM;
1695 		list = &kq->kq_knlist[kn->kn_id];
1696 	} else {
1697 		if (kq->kq_knhash == NULL)
1698 			return ENOMEM;
1699 		list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)];
1700 	}
1701 
1702 	SLIST_INSERT_HEAD(list, kn, kn_link);
1703 
1704 	return 0;
1705 }
1706 
1707 /*
1708  * knote must already have been detatched using the f_detach method.
1709  * no lock need to be held, it is assumed that the KN_INFLUX flag is set
1710  * to prevent other removal.
1711  */
1712 static void
1713 knote_drop(struct knote *kn, struct thread *td)
1714 {
1715 	struct kqueue *kq;
1716 	struct klist *list;
1717 
1718 	kq = kn->kn_kq;
1719 
1720 	KQ_NOTOWNED(kq);
1721 	KASSERT((kn->kn_status & KN_INFLUX) == KN_INFLUX,
1722 	    ("knote_drop called without KN_INFLUX set in kn_status"));
1723 
1724 	KQ_LOCK(kq);
1725 	if (kn->kn_fop->f_isfd)
1726 		list = &kq->kq_knlist[kn->kn_id];
1727 	else
1728 		list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)];
1729 
1730 	SLIST_REMOVE(list, kn, knote, kn_link);
1731 	if (kn->kn_status & KN_QUEUED)
1732 		knote_dequeue(kn);
1733 	KQ_UNLOCK_FLUX(kq);
1734 
1735 	if (kn->kn_fop->f_isfd) {
1736 		fdrop(kn->kn_fp, td);
1737 		kn->kn_fp = NULL;
1738 	}
1739 	kqueue_fo_release(kn->kn_kevent.filter);
1740 	kn->kn_fop = NULL;
1741 	knote_free(kn);
1742 }
1743 
1744 static void
1745 knote_enqueue(struct knote *kn)
1746 {
1747 	struct kqueue *kq = kn->kn_kq;
1748 
1749 	KQ_OWNED(kn->kn_kq);
1750 	KASSERT((kn->kn_status & KN_QUEUED) == 0, ("knote already queued"));
1751 
1752 	TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe);
1753 	kn->kn_status |= KN_QUEUED;
1754 	kq->kq_count++;
1755 	kqueue_wakeup(kq);
1756 }
1757 
1758 static void
1759 knote_dequeue(struct knote *kn)
1760 {
1761 	struct kqueue *kq = kn->kn_kq;
1762 
1763 	KQ_OWNED(kn->kn_kq);
1764 	KASSERT(kn->kn_status & KN_QUEUED, ("knote not queued"));
1765 
1766 	TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe);
1767 	kn->kn_status &= ~KN_QUEUED;
1768 	kq->kq_count--;
1769 }
1770 
1771 static void
1772 knote_init(void)
1773 {
1774 
1775 	knote_zone = uma_zcreate("KNOTE", sizeof(struct knote), NULL, NULL,
1776 	    NULL, NULL, UMA_ALIGN_PTR, 0);
1777 }
1778 SYSINIT(knote, SI_SUB_PSEUDO, SI_ORDER_ANY, knote_init, NULL)
1779 
1780 static struct knote *
1781 knote_alloc(int waitok)
1782 {
1783 	return ((struct knote *)uma_zalloc(knote_zone,
1784 	    (waitok ? M_WAITOK : M_NOWAIT)|M_ZERO));
1785 }
1786 
1787 static void
1788 knote_free(struct knote *kn)
1789 {
1790 	if (kn != NULL)
1791 		uma_zfree(knote_zone, kn);
1792 }
1793