xref: /freebsd/sys/kern/kern_event.c (revision ca987d4641cdcd7f27e153db17c5bf064934faf5)
1 /*-
2  * Copyright (c) 1999,2000,2001 Jonathan Lemon <jlemon@FreeBSD.org>
3  * Copyright 2004 John-Mark Gurney <jmg@FreeBSD.org>
4  * Copyright (c) 2009 Apple, Inc.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31 
32 #include "opt_compat.h"
33 #include "opt_ktrace.h"
34 #include "opt_kqueue.h"
35 
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/capsicum.h>
39 #include <sys/kernel.h>
40 #include <sys/lock.h>
41 #include <sys/mutex.h>
42 #include <sys/rwlock.h>
43 #include <sys/proc.h>
44 #include <sys/malloc.h>
45 #include <sys/unistd.h>
46 #include <sys/file.h>
47 #include <sys/filedesc.h>
48 #include <sys/filio.h>
49 #include <sys/fcntl.h>
50 #include <sys/kthread.h>
51 #include <sys/selinfo.h>
52 #include <sys/queue.h>
53 #include <sys/event.h>
54 #include <sys/eventvar.h>
55 #include <sys/poll.h>
56 #include <sys/protosw.h>
57 #include <sys/resourcevar.h>
58 #include <sys/sigio.h>
59 #include <sys/signalvar.h>
60 #include <sys/socket.h>
61 #include <sys/socketvar.h>
62 #include <sys/stat.h>
63 #include <sys/sysctl.h>
64 #include <sys/sysproto.h>
65 #include <sys/syscallsubr.h>
66 #include <sys/taskqueue.h>
67 #include <sys/uio.h>
68 #include <sys/user.h>
69 #ifdef KTRACE
70 #include <sys/ktrace.h>
71 #endif
72 #include <machine/atomic.h>
73 
74 #include <vm/uma.h>
75 
76 static MALLOC_DEFINE(M_KQUEUE, "kqueue", "memory for kqueue system");
77 
78 /*
79  * This lock is used if multiple kq locks are required.  This possibly
80  * should be made into a per proc lock.
81  */
82 static struct mtx	kq_global;
83 MTX_SYSINIT(kq_global, &kq_global, "kqueue order", MTX_DEF);
84 #define KQ_GLOBAL_LOCK(lck, haslck)	do {	\
85 	if (!haslck)				\
86 		mtx_lock(lck);			\
87 	haslck = 1;				\
88 } while (0)
89 #define KQ_GLOBAL_UNLOCK(lck, haslck)	do {	\
90 	if (haslck)				\
91 		mtx_unlock(lck);			\
92 	haslck = 0;				\
93 } while (0)
94 
95 TASKQUEUE_DEFINE_THREAD(kqueue_ctx);
96 
97 static int	kevent_copyout(void *arg, struct kevent *kevp, int count);
98 static int	kevent_copyin(void *arg, struct kevent *kevp, int count);
99 static int	kqueue_register(struct kqueue *kq, struct kevent *kev,
100 		    struct thread *td, int waitok);
101 static int	kqueue_acquire(struct file *fp, struct kqueue **kqp);
102 static void	kqueue_release(struct kqueue *kq, int locked);
103 static void	kqueue_destroy(struct kqueue *kq);
104 static void	kqueue_drain(struct kqueue *kq, struct thread *td);
105 static int	kqueue_expand(struct kqueue *kq, struct filterops *fops,
106 		    uintptr_t ident, int waitok);
107 static void	kqueue_task(void *arg, int pending);
108 static int	kqueue_scan(struct kqueue *kq, int maxevents,
109 		    struct kevent_copyops *k_ops,
110 		    const struct timespec *timeout,
111 		    struct kevent *keva, struct thread *td);
112 static void 	kqueue_wakeup(struct kqueue *kq);
113 static struct filterops *kqueue_fo_find(int filt);
114 static void	kqueue_fo_release(int filt);
115 struct g_kevent_args;
116 static int	kern_kevent_generic(struct thread *td,
117 		    struct g_kevent_args *uap,
118 		    struct kevent_copyops *k_ops);
119 
120 static fo_ioctl_t	kqueue_ioctl;
121 static fo_poll_t	kqueue_poll;
122 static fo_kqfilter_t	kqueue_kqfilter;
123 static fo_stat_t	kqueue_stat;
124 static fo_close_t	kqueue_close;
125 static fo_fill_kinfo_t	kqueue_fill_kinfo;
126 
127 static struct fileops kqueueops = {
128 	.fo_read = invfo_rdwr,
129 	.fo_write = invfo_rdwr,
130 	.fo_truncate = invfo_truncate,
131 	.fo_ioctl = kqueue_ioctl,
132 	.fo_poll = kqueue_poll,
133 	.fo_kqfilter = kqueue_kqfilter,
134 	.fo_stat = kqueue_stat,
135 	.fo_close = kqueue_close,
136 	.fo_chmod = invfo_chmod,
137 	.fo_chown = invfo_chown,
138 	.fo_sendfile = invfo_sendfile,
139 	.fo_fill_kinfo = kqueue_fill_kinfo,
140 };
141 
142 static int 	knote_attach(struct knote *kn, struct kqueue *kq);
143 static void 	knote_drop(struct knote *kn, struct thread *td);
144 static void 	knote_drop_detached(struct knote *kn, struct thread *td);
145 static void 	knote_enqueue(struct knote *kn);
146 static void 	knote_dequeue(struct knote *kn);
147 static void 	knote_init(void);
148 static struct 	knote *knote_alloc(int waitok);
149 static void 	knote_free(struct knote *kn);
150 
151 static void	filt_kqdetach(struct knote *kn);
152 static int	filt_kqueue(struct knote *kn, long hint);
153 static int	filt_procattach(struct knote *kn);
154 static void	filt_procdetach(struct knote *kn);
155 static int	filt_proc(struct knote *kn, long hint);
156 static int	filt_fileattach(struct knote *kn);
157 static void	filt_timerexpire(void *knx);
158 static int	filt_timerattach(struct knote *kn);
159 static void	filt_timerdetach(struct knote *kn);
160 static int	filt_timer(struct knote *kn, long hint);
161 static int	filt_userattach(struct knote *kn);
162 static void	filt_userdetach(struct knote *kn);
163 static int	filt_user(struct knote *kn, long hint);
164 static void	filt_usertouch(struct knote *kn, struct kevent *kev,
165 		    u_long type);
166 
167 static struct filterops file_filtops = {
168 	.f_isfd = 1,
169 	.f_attach = filt_fileattach,
170 };
171 static struct filterops kqread_filtops = {
172 	.f_isfd = 1,
173 	.f_detach = filt_kqdetach,
174 	.f_event = filt_kqueue,
175 };
176 /* XXX - move to kern_proc.c?  */
177 static struct filterops proc_filtops = {
178 	.f_isfd = 0,
179 	.f_attach = filt_procattach,
180 	.f_detach = filt_procdetach,
181 	.f_event = filt_proc,
182 };
183 static struct filterops timer_filtops = {
184 	.f_isfd = 0,
185 	.f_attach = filt_timerattach,
186 	.f_detach = filt_timerdetach,
187 	.f_event = filt_timer,
188 };
189 static struct filterops user_filtops = {
190 	.f_attach = filt_userattach,
191 	.f_detach = filt_userdetach,
192 	.f_event = filt_user,
193 	.f_touch = filt_usertouch,
194 };
195 
196 static uma_zone_t	knote_zone;
197 static unsigned int	kq_ncallouts = 0;
198 static unsigned int 	kq_calloutmax = 4 * 1024;
199 SYSCTL_UINT(_kern, OID_AUTO, kq_calloutmax, CTLFLAG_RW,
200     &kq_calloutmax, 0, "Maximum number of callouts allocated for kqueue");
201 
202 /* XXX - ensure not influx ? */
203 #define KNOTE_ACTIVATE(kn, islock) do { 				\
204 	if ((islock))							\
205 		mtx_assert(&(kn)->kn_kq->kq_lock, MA_OWNED);		\
206 	else								\
207 		KQ_LOCK((kn)->kn_kq);					\
208 	(kn)->kn_status |= KN_ACTIVE;					\
209 	if (((kn)->kn_status & (KN_QUEUED | KN_DISABLED)) == 0)		\
210 		knote_enqueue((kn));					\
211 	if (!(islock))							\
212 		KQ_UNLOCK((kn)->kn_kq);					\
213 } while(0)
214 #define KQ_LOCK(kq) do {						\
215 	mtx_lock(&(kq)->kq_lock);					\
216 } while (0)
217 #define KQ_FLUX_WAKEUP(kq) do {						\
218 	if (((kq)->kq_state & KQ_FLUXWAIT) == KQ_FLUXWAIT) {		\
219 		(kq)->kq_state &= ~KQ_FLUXWAIT;				\
220 		wakeup((kq));						\
221 	}								\
222 } while (0)
223 #define KQ_UNLOCK_FLUX(kq) do {						\
224 	KQ_FLUX_WAKEUP(kq);						\
225 	mtx_unlock(&(kq)->kq_lock);					\
226 } while (0)
227 #define KQ_UNLOCK(kq) do {						\
228 	mtx_unlock(&(kq)->kq_lock);					\
229 } while (0)
230 #define KQ_OWNED(kq) do {						\
231 	mtx_assert(&(kq)->kq_lock, MA_OWNED);				\
232 } while (0)
233 #define KQ_NOTOWNED(kq) do {						\
234 	mtx_assert(&(kq)->kq_lock, MA_NOTOWNED);			\
235 } while (0)
236 
237 static struct knlist *
238 kn_list_lock(struct knote *kn)
239 {
240 	struct knlist *knl;
241 
242 	knl = kn->kn_knlist;
243 	if (knl != NULL)
244 		knl->kl_lock(knl->kl_lockarg);
245 	return (knl);
246 }
247 
248 static void
249 kn_list_unlock(struct knlist *knl)
250 {
251 	bool do_free;
252 
253 	if (knl == NULL)
254 		return;
255 	do_free = knl->kl_autodestroy && knlist_empty(knl);
256 	knl->kl_unlock(knl->kl_lockarg);
257 	if (do_free) {
258 		knlist_destroy(knl);
259 		free(knl, M_KQUEUE);
260 	}
261 }
262 
263 static bool
264 kn_in_flux(struct knote *kn)
265 {
266 
267 	return (kn->kn_influx > 0);
268 }
269 
270 static void
271 kn_enter_flux(struct knote *kn)
272 {
273 
274 	KQ_OWNED(kn->kn_kq);
275 	MPASS(kn->kn_influx < INT_MAX);
276 	kn->kn_influx++;
277 }
278 
279 static bool
280 kn_leave_flux(struct knote *kn)
281 {
282 
283 	KQ_OWNED(kn->kn_kq);
284 	MPASS(kn->kn_influx > 0);
285 	kn->kn_influx--;
286 	return (kn->kn_influx == 0);
287 }
288 
289 #define	KNL_ASSERT_LOCK(knl, islocked) do {				\
290 	if (islocked)							\
291 		KNL_ASSERT_LOCKED(knl);				\
292 	else								\
293 		KNL_ASSERT_UNLOCKED(knl);				\
294 } while (0)
295 #ifdef INVARIANTS
296 #define	KNL_ASSERT_LOCKED(knl) do {					\
297 	knl->kl_assert_locked((knl)->kl_lockarg);			\
298 } while (0)
299 #define	KNL_ASSERT_UNLOCKED(knl) do {					\
300 	knl->kl_assert_unlocked((knl)->kl_lockarg);			\
301 } while (0)
302 #else /* !INVARIANTS */
303 #define	KNL_ASSERT_LOCKED(knl) do {} while(0)
304 #define	KNL_ASSERT_UNLOCKED(knl) do {} while (0)
305 #endif /* INVARIANTS */
306 
307 #ifndef	KN_HASHSIZE
308 #define	KN_HASHSIZE		64		/* XXX should be tunable */
309 #endif
310 
311 #define KN_HASH(val, mask)	(((val) ^ (val >> 8)) & (mask))
312 
313 static int
314 filt_nullattach(struct knote *kn)
315 {
316 
317 	return (ENXIO);
318 };
319 
320 struct filterops null_filtops = {
321 	.f_isfd = 0,
322 	.f_attach = filt_nullattach,
323 };
324 
325 /* XXX - make SYSINIT to add these, and move into respective modules. */
326 extern struct filterops sig_filtops;
327 extern struct filterops fs_filtops;
328 
329 /*
330  * Table for for all system-defined filters.
331  */
332 static struct mtx	filterops_lock;
333 MTX_SYSINIT(kqueue_filterops, &filterops_lock, "protect sysfilt_ops",
334 	MTX_DEF);
335 static struct {
336 	struct filterops *for_fop;
337 	int for_nolock;
338 	int for_refcnt;
339 } sysfilt_ops[EVFILT_SYSCOUNT] = {
340 	{ &file_filtops, 1 },			/* EVFILT_READ */
341 	{ &file_filtops, 1 },			/* EVFILT_WRITE */
342 	{ &null_filtops },			/* EVFILT_AIO */
343 	{ &file_filtops, 1 },			/* EVFILT_VNODE */
344 	{ &proc_filtops, 1 },			/* EVFILT_PROC */
345 	{ &sig_filtops, 1 },			/* EVFILT_SIGNAL */
346 	{ &timer_filtops, 1 },			/* EVFILT_TIMER */
347 	{ &file_filtops, 1 },			/* EVFILT_PROCDESC */
348 	{ &fs_filtops, 1 },			/* EVFILT_FS */
349 	{ &null_filtops },			/* EVFILT_LIO */
350 	{ &user_filtops, 1 },			/* EVFILT_USER */
351 	{ &null_filtops },			/* EVFILT_SENDFILE */
352 	{ &file_filtops, 1 },                   /* EVFILT_EMPTY */
353 };
354 
355 /*
356  * Simple redirection for all cdevsw style objects to call their fo_kqfilter
357  * method.
358  */
359 static int
360 filt_fileattach(struct knote *kn)
361 {
362 
363 	return (fo_kqfilter(kn->kn_fp, kn));
364 }
365 
366 /*ARGSUSED*/
367 static int
368 kqueue_kqfilter(struct file *fp, struct knote *kn)
369 {
370 	struct kqueue *kq = kn->kn_fp->f_data;
371 
372 	if (kn->kn_filter != EVFILT_READ)
373 		return (EINVAL);
374 
375 	kn->kn_status |= KN_KQUEUE;
376 	kn->kn_fop = &kqread_filtops;
377 	knlist_add(&kq->kq_sel.si_note, kn, 0);
378 
379 	return (0);
380 }
381 
382 static void
383 filt_kqdetach(struct knote *kn)
384 {
385 	struct kqueue *kq = kn->kn_fp->f_data;
386 
387 	knlist_remove(&kq->kq_sel.si_note, kn, 0);
388 }
389 
390 /*ARGSUSED*/
391 static int
392 filt_kqueue(struct knote *kn, long hint)
393 {
394 	struct kqueue *kq = kn->kn_fp->f_data;
395 
396 	kn->kn_data = kq->kq_count;
397 	return (kn->kn_data > 0);
398 }
399 
400 /* XXX - move to kern_proc.c?  */
401 static int
402 filt_procattach(struct knote *kn)
403 {
404 	struct proc *p;
405 	int error;
406 	bool exiting, immediate;
407 
408 	exiting = immediate = false;
409 	if (kn->kn_sfflags & NOTE_EXIT)
410 		p = pfind_any(kn->kn_id);
411 	else
412 		p = pfind(kn->kn_id);
413 	if (p == NULL)
414 		return (ESRCH);
415 	if (p->p_flag & P_WEXIT)
416 		exiting = true;
417 
418 	if ((error = p_cansee(curthread, p))) {
419 		PROC_UNLOCK(p);
420 		return (error);
421 	}
422 
423 	kn->kn_ptr.p_proc = p;
424 	kn->kn_flags |= EV_CLEAR;		/* automatically set */
425 
426 	/*
427 	 * Internal flag indicating registration done by kernel for the
428 	 * purposes of getting a NOTE_CHILD notification.
429 	 */
430 	if (kn->kn_flags & EV_FLAG2) {
431 		kn->kn_flags &= ~EV_FLAG2;
432 		kn->kn_data = kn->kn_sdata;		/* ppid */
433 		kn->kn_fflags = NOTE_CHILD;
434 		kn->kn_sfflags &= ~(NOTE_EXIT | NOTE_EXEC | NOTE_FORK);
435 		immediate = true; /* Force immediate activation of child note. */
436 	}
437 	/*
438 	 * Internal flag indicating registration done by kernel (for other than
439 	 * NOTE_CHILD).
440 	 */
441 	if (kn->kn_flags & EV_FLAG1) {
442 		kn->kn_flags &= ~EV_FLAG1;
443 	}
444 
445 	knlist_add(p->p_klist, kn, 1);
446 
447 	/*
448 	 * Immediately activate any child notes or, in the case of a zombie
449 	 * target process, exit notes.  The latter is necessary to handle the
450 	 * case where the target process, e.g. a child, dies before the kevent
451 	 * is registered.
452 	 */
453 	if (immediate || (exiting && filt_proc(kn, NOTE_EXIT)))
454 		KNOTE_ACTIVATE(kn, 0);
455 
456 	PROC_UNLOCK(p);
457 
458 	return (0);
459 }
460 
461 /*
462  * The knote may be attached to a different process, which may exit,
463  * leaving nothing for the knote to be attached to.  So when the process
464  * exits, the knote is marked as DETACHED and also flagged as ONESHOT so
465  * it will be deleted when read out.  However, as part of the knote deletion,
466  * this routine is called, so a check is needed to avoid actually performing
467  * a detach, because the original process does not exist any more.
468  */
469 /* XXX - move to kern_proc.c?  */
470 static void
471 filt_procdetach(struct knote *kn)
472 {
473 
474 	knlist_remove(kn->kn_knlist, kn, 0);
475 	kn->kn_ptr.p_proc = NULL;
476 }
477 
478 /* XXX - move to kern_proc.c?  */
479 static int
480 filt_proc(struct knote *kn, long hint)
481 {
482 	struct proc *p;
483 	u_int event;
484 
485 	p = kn->kn_ptr.p_proc;
486 	if (p == NULL) /* already activated, from attach filter */
487 		return (0);
488 
489 	/* Mask off extra data. */
490 	event = (u_int)hint & NOTE_PCTRLMASK;
491 
492 	/* If the user is interested in this event, record it. */
493 	if (kn->kn_sfflags & event)
494 		kn->kn_fflags |= event;
495 
496 	/* Process is gone, so flag the event as finished. */
497 	if (event == NOTE_EXIT) {
498 		kn->kn_flags |= EV_EOF | EV_ONESHOT;
499 		kn->kn_ptr.p_proc = NULL;
500 		if (kn->kn_fflags & NOTE_EXIT)
501 			kn->kn_data = KW_EXITCODE(p->p_xexit, p->p_xsig);
502 		if (kn->kn_fflags == 0)
503 			kn->kn_flags |= EV_DROP;
504 		return (1);
505 	}
506 
507 	return (kn->kn_fflags != 0);
508 }
509 
510 /*
511  * Called when the process forked. It mostly does the same as the
512  * knote(), activating all knotes registered to be activated when the
513  * process forked. Additionally, for each knote attached to the
514  * parent, check whether user wants to track the new process. If so
515  * attach a new knote to it, and immediately report an event with the
516  * child's pid.
517  */
518 void
519 knote_fork(struct knlist *list, int pid)
520 {
521 	struct kqueue *kq;
522 	struct knote *kn;
523 	struct kevent kev;
524 	int error;
525 
526 	if (list == NULL)
527 		return;
528 	list->kl_lock(list->kl_lockarg);
529 
530 	SLIST_FOREACH(kn, &list->kl_list, kn_selnext) {
531 		kq = kn->kn_kq;
532 		KQ_LOCK(kq);
533 		if (kn_in_flux(kn) && (kn->kn_status & KN_SCAN) == 0) {
534 			KQ_UNLOCK(kq);
535 			continue;
536 		}
537 
538 		/*
539 		 * The same as knote(), activate the event.
540 		 */
541 		if ((kn->kn_sfflags & NOTE_TRACK) == 0) {
542 			kn->kn_status |= KN_HASKQLOCK;
543 			if (kn->kn_fop->f_event(kn, NOTE_FORK))
544 				KNOTE_ACTIVATE(kn, 1);
545 			kn->kn_status &= ~KN_HASKQLOCK;
546 			KQ_UNLOCK(kq);
547 			continue;
548 		}
549 
550 		/*
551 		 * The NOTE_TRACK case. In addition to the activation
552 		 * of the event, we need to register new events to
553 		 * track the child. Drop the locks in preparation for
554 		 * the call to kqueue_register().
555 		 */
556 		kn_enter_flux(kn);
557 		KQ_UNLOCK(kq);
558 		list->kl_unlock(list->kl_lockarg);
559 
560 		/*
561 		 * Activate existing knote and register tracking knotes with
562 		 * new process.
563 		 *
564 		 * First register a knote to get just the child notice. This
565 		 * must be a separate note from a potential NOTE_EXIT
566 		 * notification since both NOTE_CHILD and NOTE_EXIT are defined
567 		 * to use the data field (in conflicting ways).
568 		 */
569 		kev.ident = pid;
570 		kev.filter = kn->kn_filter;
571 		kev.flags = kn->kn_flags | EV_ADD | EV_ENABLE | EV_ONESHOT |
572 		    EV_FLAG2;
573 		kev.fflags = kn->kn_sfflags;
574 		kev.data = kn->kn_id;		/* parent */
575 		kev.udata = kn->kn_kevent.udata;/* preserve udata */
576 		error = kqueue_register(kq, &kev, NULL, 0);
577 		if (error)
578 			kn->kn_fflags |= NOTE_TRACKERR;
579 
580 		/*
581 		 * Then register another knote to track other potential events
582 		 * from the new process.
583 		 */
584 		kev.ident = pid;
585 		kev.filter = kn->kn_filter;
586 		kev.flags = kn->kn_flags | EV_ADD | EV_ENABLE | EV_FLAG1;
587 		kev.fflags = kn->kn_sfflags;
588 		kev.data = kn->kn_id;		/* parent */
589 		kev.udata = kn->kn_kevent.udata;/* preserve udata */
590 		error = kqueue_register(kq, &kev, NULL, 0);
591 		if (error)
592 			kn->kn_fflags |= NOTE_TRACKERR;
593 		if (kn->kn_fop->f_event(kn, NOTE_FORK))
594 			KNOTE_ACTIVATE(kn, 0);
595 		KQ_LOCK(kq);
596 		kn_leave_flux(kn);
597 		KQ_UNLOCK_FLUX(kq);
598 		list->kl_lock(list->kl_lockarg);
599 	}
600 	list->kl_unlock(list->kl_lockarg);
601 }
602 
603 /*
604  * XXX: EVFILT_TIMER should perhaps live in kern_time.c beside the
605  * interval timer support code.
606  */
607 
608 #define NOTE_TIMER_PRECMASK						\
609     (NOTE_SECONDS | NOTE_MSECONDS | NOTE_USECONDS | NOTE_NSECONDS)
610 
611 static sbintime_t
612 timer2sbintime(intptr_t data, int flags)
613 {
614 	int64_t secs;
615 
616         /*
617          * Macros for converting to the fractional second portion of an
618          * sbintime_t using 64bit multiplication to improve precision.
619          */
620 #define NS_TO_SBT(ns) (((ns) * (((uint64_t)1 << 63) / 500000000)) >> 32)
621 #define US_TO_SBT(us) (((us) * (((uint64_t)1 << 63) / 500000)) >> 32)
622 #define MS_TO_SBT(ms) (((ms) * (((uint64_t)1 << 63) / 500)) >> 32)
623 	switch (flags & NOTE_TIMER_PRECMASK) {
624 	case NOTE_SECONDS:
625 #ifdef __LP64__
626 		if (data > (SBT_MAX / SBT_1S))
627 			return (SBT_MAX);
628 #endif
629 		return ((sbintime_t)data << 32);
630 	case NOTE_MSECONDS: /* FALLTHROUGH */
631 	case 0:
632 		if (data >= 1000) {
633 			secs = data / 1000;
634 #ifdef __LP64__
635 			if (secs > (SBT_MAX / SBT_1S))
636 				return (SBT_MAX);
637 #endif
638 			return (secs << 32 | MS_TO_SBT(data % 1000));
639 		}
640 		return (MS_TO_SBT(data));
641 	case NOTE_USECONDS:
642 		if (data >= 1000000) {
643 			secs = data / 1000000;
644 #ifdef __LP64__
645 			if (secs > (SBT_MAX / SBT_1S))
646 				return (SBT_MAX);
647 #endif
648 			return (secs << 32 | US_TO_SBT(data % 1000000));
649 		}
650 		return (US_TO_SBT(data));
651 	case NOTE_NSECONDS:
652 		if (data >= 1000000000) {
653 			secs = data / 1000000000;
654 #ifdef __LP64__
655 			if (secs > (SBT_MAX / SBT_1S))
656 				return (SBT_MAX);
657 #endif
658 			return (secs << 32 | US_TO_SBT(data % 1000000000));
659 		}
660 		return (NS_TO_SBT(data));
661 	default:
662 		break;
663 	}
664 	return (-1);
665 }
666 
667 struct kq_timer_cb_data {
668 	struct callout c;
669 	sbintime_t next;	/* next timer event fires at */
670 	sbintime_t to;		/* precalculated timer period, 0 for abs */
671 };
672 
673 static void
674 filt_timerexpire(void *knx)
675 {
676 	struct knote *kn;
677 	struct kq_timer_cb_data *kc;
678 
679 	kn = knx;
680 	kn->kn_data++;
681 	KNOTE_ACTIVATE(kn, 0);	/* XXX - handle locking */
682 
683 	if ((kn->kn_flags & EV_ONESHOT) != 0)
684 		return;
685 	kc = kn->kn_ptr.p_v;
686 	if (kc->to == 0)
687 		return;
688 	kc->next += kc->to;
689 	callout_reset_sbt_on(&kc->c, kc->next, 0, filt_timerexpire, kn,
690 	    PCPU_GET(cpuid), C_ABSOLUTE);
691 }
692 
693 /*
694  * data contains amount of time to sleep
695  */
696 static int
697 filt_timerattach(struct knote *kn)
698 {
699 	struct kq_timer_cb_data *kc;
700 	struct bintime bt;
701 	sbintime_t to, sbt;
702 	unsigned int ncallouts;
703 
704 	if (kn->kn_sdata < 0)
705 		return (EINVAL);
706 	if (kn->kn_sdata == 0 && (kn->kn_flags & EV_ONESHOT) == 0)
707 		kn->kn_sdata = 1;
708 	/* Only precision unit are supported in flags so far */
709 	if ((kn->kn_sfflags & ~(NOTE_TIMER_PRECMASK | NOTE_ABSTIME)) != 0)
710 		return (EINVAL);
711 
712 	to = timer2sbintime(kn->kn_sdata, kn->kn_sfflags);
713 	if ((kn->kn_sfflags & NOTE_ABSTIME) != 0) {
714 		getboottimebin(&bt);
715 		sbt = bttosbt(bt);
716 		to -= sbt;
717 	}
718 	if (to < 0)
719 		return (EINVAL);
720 
721 	do {
722 		ncallouts = kq_ncallouts;
723 		if (ncallouts >= kq_calloutmax)
724 			return (ENOMEM);
725 	} while (!atomic_cmpset_int(&kq_ncallouts, ncallouts, ncallouts + 1));
726 
727 	if ((kn->kn_sfflags & NOTE_ABSTIME) == 0)
728 		kn->kn_flags |= EV_CLEAR;	/* automatically set */
729 	kn->kn_status &= ~KN_DETACHED;		/* knlist_add clears it */
730 	kn->kn_ptr.p_v = kc = malloc(sizeof(*kc), M_KQUEUE, M_WAITOK);
731 	callout_init(&kc->c, 1);
732 	if ((kn->kn_sfflags & NOTE_ABSTIME) != 0) {
733 		kc->next = to;
734 		kc->to = 0;
735 	} else {
736 		kc->next = to + sbinuptime();
737 		kc->to = to;
738 	}
739 	callout_reset_sbt_on(&kc->c, kc->next, 0, filt_timerexpire, kn,
740 	    PCPU_GET(cpuid), C_ABSOLUTE);
741 
742 	return (0);
743 }
744 
745 static void
746 filt_timerdetach(struct knote *kn)
747 {
748 	struct kq_timer_cb_data *kc;
749 	unsigned int old;
750 
751 	kc = kn->kn_ptr.p_v;
752 	callout_drain(&kc->c);
753 	free(kc, M_KQUEUE);
754 	old = atomic_fetchadd_int(&kq_ncallouts, -1);
755 	KASSERT(old > 0, ("Number of callouts cannot become negative"));
756 	kn->kn_status |= KN_DETACHED;	/* knlist_remove sets it */
757 }
758 
759 static int
760 filt_timer(struct knote *kn, long hint)
761 {
762 
763 	return (kn->kn_data != 0);
764 }
765 
766 static int
767 filt_userattach(struct knote *kn)
768 {
769 
770 	/*
771 	 * EVFILT_USER knotes are not attached to anything in the kernel.
772 	 */
773 	kn->kn_hook = NULL;
774 	if (kn->kn_fflags & NOTE_TRIGGER)
775 		kn->kn_hookid = 1;
776 	else
777 		kn->kn_hookid = 0;
778 	return (0);
779 }
780 
781 static void
782 filt_userdetach(__unused struct knote *kn)
783 {
784 
785 	/*
786 	 * EVFILT_USER knotes are not attached to anything in the kernel.
787 	 */
788 }
789 
790 static int
791 filt_user(struct knote *kn, __unused long hint)
792 {
793 
794 	return (kn->kn_hookid);
795 }
796 
797 static void
798 filt_usertouch(struct knote *kn, struct kevent *kev, u_long type)
799 {
800 	u_int ffctrl;
801 
802 	switch (type) {
803 	case EVENT_REGISTER:
804 		if (kev->fflags & NOTE_TRIGGER)
805 			kn->kn_hookid = 1;
806 
807 		ffctrl = kev->fflags & NOTE_FFCTRLMASK;
808 		kev->fflags &= NOTE_FFLAGSMASK;
809 		switch (ffctrl) {
810 		case NOTE_FFNOP:
811 			break;
812 
813 		case NOTE_FFAND:
814 			kn->kn_sfflags &= kev->fflags;
815 			break;
816 
817 		case NOTE_FFOR:
818 			kn->kn_sfflags |= kev->fflags;
819 			break;
820 
821 		case NOTE_FFCOPY:
822 			kn->kn_sfflags = kev->fflags;
823 			break;
824 
825 		default:
826 			/* XXX Return error? */
827 			break;
828 		}
829 		kn->kn_sdata = kev->data;
830 		if (kev->flags & EV_CLEAR) {
831 			kn->kn_hookid = 0;
832 			kn->kn_data = 0;
833 			kn->kn_fflags = 0;
834 		}
835 		break;
836 
837         case EVENT_PROCESS:
838 		*kev = kn->kn_kevent;
839 		kev->fflags = kn->kn_sfflags;
840 		kev->data = kn->kn_sdata;
841 		if (kn->kn_flags & EV_CLEAR) {
842 			kn->kn_hookid = 0;
843 			kn->kn_data = 0;
844 			kn->kn_fflags = 0;
845 		}
846 		break;
847 
848 	default:
849 		panic("filt_usertouch() - invalid type (%ld)", type);
850 		break;
851 	}
852 }
853 
854 int
855 sys_kqueue(struct thread *td, struct kqueue_args *uap)
856 {
857 
858 	return (kern_kqueue(td, 0, NULL));
859 }
860 
861 static void
862 kqueue_init(struct kqueue *kq)
863 {
864 
865 	mtx_init(&kq->kq_lock, "kqueue", NULL, MTX_DEF | MTX_DUPOK);
866 	TAILQ_INIT(&kq->kq_head);
867 	knlist_init_mtx(&kq->kq_sel.si_note, &kq->kq_lock);
868 	TASK_INIT(&kq->kq_task, 0, kqueue_task, kq);
869 }
870 
871 int
872 kern_kqueue(struct thread *td, int flags, struct filecaps *fcaps)
873 {
874 	struct filedesc *fdp;
875 	struct kqueue *kq;
876 	struct file *fp;
877 	struct ucred *cred;
878 	int fd, error;
879 
880 	fdp = td->td_proc->p_fd;
881 	cred = td->td_ucred;
882 	if (!chgkqcnt(cred->cr_ruidinfo, 1, lim_cur(td, RLIMIT_KQUEUES)))
883 		return (ENOMEM);
884 
885 	error = falloc_caps(td, &fp, &fd, flags, fcaps);
886 	if (error != 0) {
887 		chgkqcnt(cred->cr_ruidinfo, -1, 0);
888 		return (error);
889 	}
890 
891 	/* An extra reference on `fp' has been held for us by falloc(). */
892 	kq = malloc(sizeof *kq, M_KQUEUE, M_WAITOK | M_ZERO);
893 	kqueue_init(kq);
894 	kq->kq_fdp = fdp;
895 	kq->kq_cred = crhold(cred);
896 
897 	FILEDESC_XLOCK(fdp);
898 	TAILQ_INSERT_HEAD(&fdp->fd_kqlist, kq, kq_list);
899 	FILEDESC_XUNLOCK(fdp);
900 
901 	finit(fp, FREAD | FWRITE, DTYPE_KQUEUE, kq, &kqueueops);
902 	fdrop(fp, td);
903 
904 	td->td_retval[0] = fd;
905 	return (0);
906 }
907 
908 #ifdef KTRACE
909 static size_t
910 kev_iovlen(int n, u_int kgio, size_t kevent_size)
911 {
912 
913 	if (n < 0 || n >= kgio / kevent_size)
914 		return (kgio);
915 	return (n * kevent_size);
916 }
917 #endif
918 
919 struct g_kevent_args {
920 	int	fd;
921 	void	*changelist;
922 	int	nchanges;
923 	void	*eventlist;
924 	int	nevents;
925 	const struct timespec *timeout;
926 };
927 
928 int
929 sys_kevent(struct thread *td, struct kevent_args *uap)
930 {
931 	struct kevent_copyops k_ops = {
932 		.arg = uap,
933 		.k_copyout = kevent_copyout,
934 		.k_copyin = kevent_copyin,
935 		.kevent_size = sizeof(struct kevent),
936 	};
937 	struct g_kevent_args gk_args = {
938 		.fd = uap->fd,
939 		.changelist = uap->changelist,
940 		.nchanges = uap->nchanges,
941 		.eventlist = uap->eventlist,
942 		.nevents = uap->nevents,
943 		.timeout = uap->timeout,
944 	};
945 
946 	return (kern_kevent_generic(td, &gk_args, &k_ops));
947 }
948 
949 static int
950 kern_kevent_generic(struct thread *td, struct g_kevent_args *uap,
951     struct kevent_copyops *k_ops)
952 {
953 	struct timespec ts, *tsp;
954 	int error;
955 #ifdef KTRACE
956 	struct uio ktruio;
957 	struct iovec ktriov;
958 	struct uio *ktruioin = NULL;
959 	struct uio *ktruioout = NULL;
960 	u_int kgio;
961 #endif
962 
963 	if (uap->timeout != NULL) {
964 		error = copyin(uap->timeout, &ts, sizeof(ts));
965 		if (error)
966 			return (error);
967 		tsp = &ts;
968 	} else
969 		tsp = NULL;
970 
971 #ifdef KTRACE
972 	if (KTRPOINT(td, KTR_GENIO)) {
973 		kgio = ktr_geniosize;
974 		ktriov.iov_base = uap->changelist;
975 		ktriov.iov_len = kev_iovlen(uap->nchanges, kgio,
976 		    k_ops->kevent_size);
977 		ktruio = (struct uio){ .uio_iov = &ktriov, .uio_iovcnt = 1,
978 		    .uio_segflg = UIO_USERSPACE, .uio_rw = UIO_READ,
979 		    .uio_td = td };
980 		ktruioin = cloneuio(&ktruio);
981 		ktriov.iov_base = uap->eventlist;
982 		ktriov.iov_len = kev_iovlen(uap->nevents, kgio,
983 		    k_ops->kevent_size);
984 		ktriov.iov_len = uap->nevents * k_ops->kevent_size;
985 		ktruioout = cloneuio(&ktruio);
986 	}
987 #endif
988 
989 	error = kern_kevent(td, uap->fd, uap->nchanges, uap->nevents,
990 	    k_ops, tsp);
991 
992 #ifdef KTRACE
993 	if (ktruioin != NULL) {
994 		ktruioin->uio_resid = kev_iovlen(uap->nchanges, kgio,
995 		    k_ops->kevent_size);
996 		ktrgenio(uap->fd, UIO_WRITE, ktruioin, 0);
997 		ktruioout->uio_resid = kev_iovlen(td->td_retval[0], kgio,
998 		    k_ops->kevent_size);
999 		ktrgenio(uap->fd, UIO_READ, ktruioout, error);
1000 	}
1001 #endif
1002 
1003 	return (error);
1004 }
1005 
1006 /*
1007  * Copy 'count' items into the destination list pointed to by uap->eventlist.
1008  */
1009 static int
1010 kevent_copyout(void *arg, struct kevent *kevp, int count)
1011 {
1012 	struct kevent_args *uap;
1013 	int error;
1014 
1015 	KASSERT(count <= KQ_NEVENTS, ("count (%d) > KQ_NEVENTS", count));
1016 	uap = (struct kevent_args *)arg;
1017 
1018 	error = copyout(kevp, uap->eventlist, count * sizeof *kevp);
1019 	if (error == 0)
1020 		uap->eventlist += count;
1021 	return (error);
1022 }
1023 
1024 /*
1025  * Copy 'count' items from the list pointed to by uap->changelist.
1026  */
1027 static int
1028 kevent_copyin(void *arg, struct kevent *kevp, int count)
1029 {
1030 	struct kevent_args *uap;
1031 	int error;
1032 
1033 	KASSERT(count <= KQ_NEVENTS, ("count (%d) > KQ_NEVENTS", count));
1034 	uap = (struct kevent_args *)arg;
1035 
1036 	error = copyin(uap->changelist, kevp, count * sizeof *kevp);
1037 	if (error == 0)
1038 		uap->changelist += count;
1039 	return (error);
1040 }
1041 
1042 #ifdef COMPAT_FREEBSD11
1043 struct kevent_freebsd11 {
1044 	__uintptr_t	ident;		/* identifier for this event */
1045 	short		filter;		/* filter for event */
1046 	unsigned short	flags;
1047 	unsigned int	fflags;
1048 	__intptr_t	data;
1049 	void		*udata;		/* opaque user data identifier */
1050 };
1051 
1052 static int
1053 kevent11_copyout(void *arg, struct kevent *kevp, int count)
1054 {
1055 	struct freebsd11_kevent_args *uap;
1056 	struct kevent_freebsd11 kev11;
1057 	int error, i;
1058 
1059 	KASSERT(count <= KQ_NEVENTS, ("count (%d) > KQ_NEVENTS", count));
1060 	uap = (struct freebsd11_kevent_args *)arg;
1061 
1062 	for (i = 0; i < count; i++) {
1063 		kev11.ident = kevp->ident;
1064 		kev11.filter = kevp->filter;
1065 		kev11.flags = kevp->flags;
1066 		kev11.fflags = kevp->fflags;
1067 		kev11.data = kevp->data;
1068 		kev11.udata = kevp->udata;
1069 		error = copyout(&kev11, uap->eventlist, sizeof(kev11));
1070 		if (error != 0)
1071 			break;
1072 		uap->eventlist++;
1073 		kevp++;
1074 	}
1075 	return (error);
1076 }
1077 
1078 /*
1079  * Copy 'count' items from the list pointed to by uap->changelist.
1080  */
1081 static int
1082 kevent11_copyin(void *arg, struct kevent *kevp, int count)
1083 {
1084 	struct freebsd11_kevent_args *uap;
1085 	struct kevent_freebsd11 kev11;
1086 	int error, i;
1087 
1088 	KASSERT(count <= KQ_NEVENTS, ("count (%d) > KQ_NEVENTS", count));
1089 	uap = (struct freebsd11_kevent_args *)arg;
1090 
1091 	for (i = 0; i < count; i++) {
1092 		error = copyin(uap->changelist, &kev11, sizeof(kev11));
1093 		if (error != 0)
1094 			break;
1095 		kevp->ident = kev11.ident;
1096 		kevp->filter = kev11.filter;
1097 		kevp->flags = kev11.flags;
1098 		kevp->fflags = kev11.fflags;
1099 		kevp->data = (uintptr_t)kev11.data;
1100 		kevp->udata = kev11.udata;
1101 		bzero(&kevp->ext, sizeof(kevp->ext));
1102 		uap->changelist++;
1103 		kevp++;
1104 	}
1105 	return (error);
1106 }
1107 
1108 int
1109 freebsd11_kevent(struct thread *td, struct freebsd11_kevent_args *uap)
1110 {
1111 	struct kevent_copyops k_ops = {
1112 		.arg = uap,
1113 		.k_copyout = kevent11_copyout,
1114 		.k_copyin = kevent11_copyin,
1115 		.kevent_size = sizeof(struct kevent_freebsd11),
1116 	};
1117 	struct g_kevent_args gk_args = {
1118 		.fd = uap->fd,
1119 		.changelist = uap->changelist,
1120 		.nchanges = uap->nchanges,
1121 		.eventlist = uap->eventlist,
1122 		.nevents = uap->nevents,
1123 		.timeout = uap->timeout,
1124 	};
1125 
1126 	return (kern_kevent_generic(td, &gk_args, &k_ops));
1127 }
1128 #endif
1129 
1130 int
1131 kern_kevent(struct thread *td, int fd, int nchanges, int nevents,
1132     struct kevent_copyops *k_ops, const struct timespec *timeout)
1133 {
1134 	cap_rights_t rights;
1135 	struct file *fp;
1136 	int error;
1137 
1138 	cap_rights_init(&rights);
1139 	if (nchanges > 0)
1140 		cap_rights_set(&rights, CAP_KQUEUE_CHANGE);
1141 	if (nevents > 0)
1142 		cap_rights_set(&rights, CAP_KQUEUE_EVENT);
1143 	error = fget(td, fd, &rights, &fp);
1144 	if (error != 0)
1145 		return (error);
1146 
1147 	error = kern_kevent_fp(td, fp, nchanges, nevents, k_ops, timeout);
1148 	fdrop(fp, td);
1149 
1150 	return (error);
1151 }
1152 
1153 static int
1154 kqueue_kevent(struct kqueue *kq, struct thread *td, int nchanges, int nevents,
1155     struct kevent_copyops *k_ops, const struct timespec *timeout)
1156 {
1157 	struct kevent keva[KQ_NEVENTS];
1158 	struct kevent *kevp, *changes;
1159 	int i, n, nerrors, error;
1160 
1161 	nerrors = 0;
1162 	while (nchanges > 0) {
1163 		n = nchanges > KQ_NEVENTS ? KQ_NEVENTS : nchanges;
1164 		error = k_ops->k_copyin(k_ops->arg, keva, n);
1165 		if (error)
1166 			return (error);
1167 		changes = keva;
1168 		for (i = 0; i < n; i++) {
1169 			kevp = &changes[i];
1170 			if (!kevp->filter)
1171 				continue;
1172 			kevp->flags &= ~EV_SYSFLAGS;
1173 			error = kqueue_register(kq, kevp, td, 1);
1174 			if (error || (kevp->flags & EV_RECEIPT)) {
1175 				if (nevents == 0)
1176 					return (error);
1177 				kevp->flags = EV_ERROR;
1178 				kevp->data = error;
1179 				(void)k_ops->k_copyout(k_ops->arg, kevp, 1);
1180 				nevents--;
1181 				nerrors++;
1182 			}
1183 		}
1184 		nchanges -= n;
1185 	}
1186 	if (nerrors) {
1187 		td->td_retval[0] = nerrors;
1188 		return (0);
1189 	}
1190 
1191 	return (kqueue_scan(kq, nevents, k_ops, timeout, keva, td));
1192 }
1193 
1194 int
1195 kern_kevent_fp(struct thread *td, struct file *fp, int nchanges, int nevents,
1196     struct kevent_copyops *k_ops, const struct timespec *timeout)
1197 {
1198 	struct kqueue *kq;
1199 	int error;
1200 
1201 	error = kqueue_acquire(fp, &kq);
1202 	if (error != 0)
1203 		return (error);
1204 	error = kqueue_kevent(kq, td, nchanges, nevents, k_ops, timeout);
1205 	kqueue_release(kq, 0);
1206 	return (error);
1207 }
1208 
1209 /*
1210  * Performs a kevent() call on a temporarily created kqueue. This can be
1211  * used to perform one-shot polling, similar to poll() and select().
1212  */
1213 int
1214 kern_kevent_anonymous(struct thread *td, int nevents,
1215     struct kevent_copyops *k_ops)
1216 {
1217 	struct kqueue kq = {};
1218 	int error;
1219 
1220 	kqueue_init(&kq);
1221 	kq.kq_refcnt = 1;
1222 	error = kqueue_kevent(&kq, td, nevents, nevents, k_ops, NULL);
1223 	kqueue_drain(&kq, td);
1224 	kqueue_destroy(&kq);
1225 	return (error);
1226 }
1227 
1228 int
1229 kqueue_add_filteropts(int filt, struct filterops *filtops)
1230 {
1231 	int error;
1232 
1233 	error = 0;
1234 	if (filt > 0 || filt + EVFILT_SYSCOUNT < 0) {
1235 		printf(
1236 "trying to add a filterop that is out of range: %d is beyond %d\n",
1237 		    ~filt, EVFILT_SYSCOUNT);
1238 		return EINVAL;
1239 	}
1240 	mtx_lock(&filterops_lock);
1241 	if (sysfilt_ops[~filt].for_fop != &null_filtops &&
1242 	    sysfilt_ops[~filt].for_fop != NULL)
1243 		error = EEXIST;
1244 	else {
1245 		sysfilt_ops[~filt].for_fop = filtops;
1246 		sysfilt_ops[~filt].for_refcnt = 0;
1247 	}
1248 	mtx_unlock(&filterops_lock);
1249 
1250 	return (error);
1251 }
1252 
1253 int
1254 kqueue_del_filteropts(int filt)
1255 {
1256 	int error;
1257 
1258 	error = 0;
1259 	if (filt > 0 || filt + EVFILT_SYSCOUNT < 0)
1260 		return EINVAL;
1261 
1262 	mtx_lock(&filterops_lock);
1263 	if (sysfilt_ops[~filt].for_fop == &null_filtops ||
1264 	    sysfilt_ops[~filt].for_fop == NULL)
1265 		error = EINVAL;
1266 	else if (sysfilt_ops[~filt].for_refcnt != 0)
1267 		error = EBUSY;
1268 	else {
1269 		sysfilt_ops[~filt].for_fop = &null_filtops;
1270 		sysfilt_ops[~filt].for_refcnt = 0;
1271 	}
1272 	mtx_unlock(&filterops_lock);
1273 
1274 	return error;
1275 }
1276 
1277 static struct filterops *
1278 kqueue_fo_find(int filt)
1279 {
1280 
1281 	if (filt > 0 || filt + EVFILT_SYSCOUNT < 0)
1282 		return NULL;
1283 
1284 	if (sysfilt_ops[~filt].for_nolock)
1285 		return sysfilt_ops[~filt].for_fop;
1286 
1287 	mtx_lock(&filterops_lock);
1288 	sysfilt_ops[~filt].for_refcnt++;
1289 	if (sysfilt_ops[~filt].for_fop == NULL)
1290 		sysfilt_ops[~filt].for_fop = &null_filtops;
1291 	mtx_unlock(&filterops_lock);
1292 
1293 	return sysfilt_ops[~filt].for_fop;
1294 }
1295 
1296 static void
1297 kqueue_fo_release(int filt)
1298 {
1299 
1300 	if (filt > 0 || filt + EVFILT_SYSCOUNT < 0)
1301 		return;
1302 
1303 	if (sysfilt_ops[~filt].for_nolock)
1304 		return;
1305 
1306 	mtx_lock(&filterops_lock);
1307 	KASSERT(sysfilt_ops[~filt].for_refcnt > 0,
1308 	    ("filter object refcount not valid on release"));
1309 	sysfilt_ops[~filt].for_refcnt--;
1310 	mtx_unlock(&filterops_lock);
1311 }
1312 
1313 /*
1314  * A ref to kq (obtained via kqueue_acquire) must be held.  waitok will
1315  * influence if memory allocation should wait.  Make sure it is 0 if you
1316  * hold any mutexes.
1317  */
1318 static int
1319 kqueue_register(struct kqueue *kq, struct kevent *kev, struct thread *td, int waitok)
1320 {
1321 	struct filterops *fops;
1322 	struct file *fp;
1323 	struct knote *kn, *tkn;
1324 	struct knlist *knl;
1325 	cap_rights_t rights;
1326 	int error, filt, event;
1327 	int haskqglobal, filedesc_unlock;
1328 
1329 	if ((kev->flags & (EV_ENABLE | EV_DISABLE)) == (EV_ENABLE | EV_DISABLE))
1330 		return (EINVAL);
1331 
1332 	fp = NULL;
1333 	kn = NULL;
1334 	knl = NULL;
1335 	error = 0;
1336 	haskqglobal = 0;
1337 	filedesc_unlock = 0;
1338 
1339 	filt = kev->filter;
1340 	fops = kqueue_fo_find(filt);
1341 	if (fops == NULL)
1342 		return EINVAL;
1343 
1344 	if (kev->flags & EV_ADD) {
1345 		/*
1346 		 * Prevent waiting with locks.  Non-sleepable
1347 		 * allocation failures are handled in the loop, only
1348 		 * if the spare knote appears to be actually required.
1349 		 */
1350 		tkn = knote_alloc(waitok);
1351 	} else {
1352 		tkn = NULL;
1353 	}
1354 
1355 findkn:
1356 	if (fops->f_isfd) {
1357 		KASSERT(td != NULL, ("td is NULL"));
1358 		if (kev->ident > INT_MAX)
1359 			error = EBADF;
1360 		else
1361 			error = fget(td, kev->ident,
1362 			    cap_rights_init(&rights, CAP_EVENT), &fp);
1363 		if (error)
1364 			goto done;
1365 
1366 		if ((kev->flags & EV_ADD) == EV_ADD && kqueue_expand(kq, fops,
1367 		    kev->ident, 0) != 0) {
1368 			/* try again */
1369 			fdrop(fp, td);
1370 			fp = NULL;
1371 			error = kqueue_expand(kq, fops, kev->ident, waitok);
1372 			if (error)
1373 				goto done;
1374 			goto findkn;
1375 		}
1376 
1377 		if (fp->f_type == DTYPE_KQUEUE) {
1378 			/*
1379 			 * If we add some intelligence about what we are doing,
1380 			 * we should be able to support events on ourselves.
1381 			 * We need to know when we are doing this to prevent
1382 			 * getting both the knlist lock and the kq lock since
1383 			 * they are the same thing.
1384 			 */
1385 			if (fp->f_data == kq) {
1386 				error = EINVAL;
1387 				goto done;
1388 			}
1389 
1390 			/*
1391 			 * Pre-lock the filedesc before the global
1392 			 * lock mutex, see the comment in
1393 			 * kqueue_close().
1394 			 */
1395 			FILEDESC_XLOCK(td->td_proc->p_fd);
1396 			filedesc_unlock = 1;
1397 			KQ_GLOBAL_LOCK(&kq_global, haskqglobal);
1398 		}
1399 
1400 		KQ_LOCK(kq);
1401 		if (kev->ident < kq->kq_knlistsize) {
1402 			SLIST_FOREACH(kn, &kq->kq_knlist[kev->ident], kn_link)
1403 				if (kev->filter == kn->kn_filter)
1404 					break;
1405 		}
1406 	} else {
1407 		if ((kev->flags & EV_ADD) == EV_ADD)
1408 			kqueue_expand(kq, fops, kev->ident, waitok);
1409 
1410 		KQ_LOCK(kq);
1411 
1412 		/*
1413 		 * If possible, find an existing knote to use for this kevent.
1414 		 */
1415 		if (kev->filter == EVFILT_PROC &&
1416 		    (kev->flags & (EV_FLAG1 | EV_FLAG2)) != 0) {
1417 			/* This is an internal creation of a process tracking
1418 			 * note. Don't attempt to coalesce this with an
1419 			 * existing note.
1420 			 */
1421 			;
1422 		} else if (kq->kq_knhashmask != 0) {
1423 			struct klist *list;
1424 
1425 			list = &kq->kq_knhash[
1426 			    KN_HASH((u_long)kev->ident, kq->kq_knhashmask)];
1427 			SLIST_FOREACH(kn, list, kn_link)
1428 				if (kev->ident == kn->kn_id &&
1429 				    kev->filter == kn->kn_filter)
1430 					break;
1431 		}
1432 	}
1433 
1434 	/* knote is in the process of changing, wait for it to stabilize. */
1435 	if (kn != NULL && kn_in_flux(kn)) {
1436 		KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal);
1437 		if (filedesc_unlock) {
1438 			FILEDESC_XUNLOCK(td->td_proc->p_fd);
1439 			filedesc_unlock = 0;
1440 		}
1441 		kq->kq_state |= KQ_FLUXWAIT;
1442 		msleep(kq, &kq->kq_lock, PSOCK | PDROP, "kqflxwt", 0);
1443 		if (fp != NULL) {
1444 			fdrop(fp, td);
1445 			fp = NULL;
1446 		}
1447 		goto findkn;
1448 	}
1449 
1450 	/*
1451 	 * kn now contains the matching knote, or NULL if no match
1452 	 */
1453 	if (kn == NULL) {
1454 		if (kev->flags & EV_ADD) {
1455 			kn = tkn;
1456 			tkn = NULL;
1457 			if (kn == NULL) {
1458 				KQ_UNLOCK(kq);
1459 				error = ENOMEM;
1460 				goto done;
1461 			}
1462 			kn->kn_fp = fp;
1463 			kn->kn_kq = kq;
1464 			kn->kn_fop = fops;
1465 			/*
1466 			 * apply reference counts to knote structure, and
1467 			 * do not release it at the end of this routine.
1468 			 */
1469 			fops = NULL;
1470 			fp = NULL;
1471 
1472 			kn->kn_sfflags = kev->fflags;
1473 			kn->kn_sdata = kev->data;
1474 			kev->fflags = 0;
1475 			kev->data = 0;
1476 			kn->kn_kevent = *kev;
1477 			kn->kn_kevent.flags &= ~(EV_ADD | EV_DELETE |
1478 			    EV_ENABLE | EV_DISABLE | EV_FORCEONESHOT);
1479 			kn->kn_status = KN_DETACHED;
1480 			kn_enter_flux(kn);
1481 
1482 			error = knote_attach(kn, kq);
1483 			KQ_UNLOCK(kq);
1484 			if (error != 0) {
1485 				tkn = kn;
1486 				goto done;
1487 			}
1488 
1489 			if ((error = kn->kn_fop->f_attach(kn)) != 0) {
1490 				knote_drop_detached(kn, td);
1491 				goto done;
1492 			}
1493 			knl = kn_list_lock(kn);
1494 			goto done_ev_add;
1495 		} else {
1496 			/* No matching knote and the EV_ADD flag is not set. */
1497 			KQ_UNLOCK(kq);
1498 			error = ENOENT;
1499 			goto done;
1500 		}
1501 	}
1502 
1503 	if (kev->flags & EV_DELETE) {
1504 		kn_enter_flux(kn);
1505 		KQ_UNLOCK(kq);
1506 		knote_drop(kn, td);
1507 		goto done;
1508 	}
1509 
1510 	if (kev->flags & EV_FORCEONESHOT) {
1511 		kn->kn_flags |= EV_ONESHOT;
1512 		KNOTE_ACTIVATE(kn, 1);
1513 	}
1514 
1515 	/*
1516 	 * The user may change some filter values after the initial EV_ADD,
1517 	 * but doing so will not reset any filter which has already been
1518 	 * triggered.
1519 	 */
1520 	kn->kn_status |= KN_SCAN;
1521 	kn_enter_flux(kn);
1522 	KQ_UNLOCK(kq);
1523 	knl = kn_list_lock(kn);
1524 	kn->kn_kevent.udata = kev->udata;
1525 	if (!fops->f_isfd && fops->f_touch != NULL) {
1526 		fops->f_touch(kn, kev, EVENT_REGISTER);
1527 	} else {
1528 		kn->kn_sfflags = kev->fflags;
1529 		kn->kn_sdata = kev->data;
1530 	}
1531 
1532 	/*
1533 	 * We can get here with kn->kn_knlist == NULL.  This can happen when
1534 	 * the initial attach event decides that the event is "completed"
1535 	 * already.  i.e. filt_procattach is called on a zombie process.  It
1536 	 * will call filt_proc which will remove it from the list, and NULL
1537 	 * kn_knlist.
1538 	 */
1539 done_ev_add:
1540 	if ((kev->flags & EV_ENABLE) != 0)
1541 		kn->kn_status &= ~KN_DISABLED;
1542 	else if ((kev->flags & EV_DISABLE) != 0)
1543 		kn->kn_status |= KN_DISABLED;
1544 
1545 	if ((kn->kn_status & KN_DISABLED) == 0)
1546 		event = kn->kn_fop->f_event(kn, 0);
1547 	else
1548 		event = 0;
1549 
1550 	KQ_LOCK(kq);
1551 	if (event)
1552 		kn->kn_status |= KN_ACTIVE;
1553 	if ((kn->kn_status & (KN_ACTIVE | KN_DISABLED | KN_QUEUED)) ==
1554 	    KN_ACTIVE)
1555 		knote_enqueue(kn);
1556 	kn->kn_status &= ~KN_SCAN;
1557 	kn_leave_flux(kn);
1558 	kn_list_unlock(knl);
1559 	KQ_UNLOCK_FLUX(kq);
1560 
1561 done:
1562 	KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal);
1563 	if (filedesc_unlock)
1564 		FILEDESC_XUNLOCK(td->td_proc->p_fd);
1565 	if (fp != NULL)
1566 		fdrop(fp, td);
1567 	knote_free(tkn);
1568 	if (fops != NULL)
1569 		kqueue_fo_release(filt);
1570 	return (error);
1571 }
1572 
1573 static int
1574 kqueue_acquire(struct file *fp, struct kqueue **kqp)
1575 {
1576 	int error;
1577 	struct kqueue *kq;
1578 
1579 	error = 0;
1580 
1581 	kq = fp->f_data;
1582 	if (fp->f_type != DTYPE_KQUEUE || kq == NULL)
1583 		return (EBADF);
1584 	*kqp = kq;
1585 	KQ_LOCK(kq);
1586 	if ((kq->kq_state & KQ_CLOSING) == KQ_CLOSING) {
1587 		KQ_UNLOCK(kq);
1588 		return (EBADF);
1589 	}
1590 	kq->kq_refcnt++;
1591 	KQ_UNLOCK(kq);
1592 
1593 	return error;
1594 }
1595 
1596 static void
1597 kqueue_release(struct kqueue *kq, int locked)
1598 {
1599 	if (locked)
1600 		KQ_OWNED(kq);
1601 	else
1602 		KQ_LOCK(kq);
1603 	kq->kq_refcnt--;
1604 	if (kq->kq_refcnt == 1)
1605 		wakeup(&kq->kq_refcnt);
1606 	if (!locked)
1607 		KQ_UNLOCK(kq);
1608 }
1609 
1610 static void
1611 kqueue_schedtask(struct kqueue *kq)
1612 {
1613 
1614 	KQ_OWNED(kq);
1615 	KASSERT(((kq->kq_state & KQ_TASKDRAIN) != KQ_TASKDRAIN),
1616 	    ("scheduling kqueue task while draining"));
1617 
1618 	if ((kq->kq_state & KQ_TASKSCHED) != KQ_TASKSCHED) {
1619 		taskqueue_enqueue(taskqueue_kqueue_ctx, &kq->kq_task);
1620 		kq->kq_state |= KQ_TASKSCHED;
1621 	}
1622 }
1623 
1624 /*
1625  * Expand the kq to make sure we have storage for fops/ident pair.
1626  *
1627  * Return 0 on success (or no work necessary), return errno on failure.
1628  *
1629  * Not calling hashinit w/ waitok (proper malloc flag) should be safe.
1630  * If kqueue_register is called from a non-fd context, there usually/should
1631  * be no locks held.
1632  */
1633 static int
1634 kqueue_expand(struct kqueue *kq, struct filterops *fops, uintptr_t ident,
1635 	int waitok)
1636 {
1637 	struct klist *list, *tmp_knhash, *to_free;
1638 	u_long tmp_knhashmask;
1639 	int size;
1640 	int fd;
1641 	int mflag = waitok ? M_WAITOK : M_NOWAIT;
1642 
1643 	KQ_NOTOWNED(kq);
1644 
1645 	to_free = NULL;
1646 	if (fops->f_isfd) {
1647 		fd = ident;
1648 		if (kq->kq_knlistsize <= fd) {
1649 			size = kq->kq_knlistsize;
1650 			while (size <= fd)
1651 				size += KQEXTENT;
1652 			list = malloc(size * sizeof(*list), M_KQUEUE, mflag);
1653 			if (list == NULL)
1654 				return ENOMEM;
1655 			KQ_LOCK(kq);
1656 			if (kq->kq_knlistsize > fd) {
1657 				to_free = list;
1658 				list = NULL;
1659 			} else {
1660 				if (kq->kq_knlist != NULL) {
1661 					bcopy(kq->kq_knlist, list,
1662 					    kq->kq_knlistsize * sizeof(*list));
1663 					to_free = kq->kq_knlist;
1664 					kq->kq_knlist = NULL;
1665 				}
1666 				bzero((caddr_t)list +
1667 				    kq->kq_knlistsize * sizeof(*list),
1668 				    (size - kq->kq_knlistsize) * sizeof(*list));
1669 				kq->kq_knlistsize = size;
1670 				kq->kq_knlist = list;
1671 			}
1672 			KQ_UNLOCK(kq);
1673 		}
1674 	} else {
1675 		if (kq->kq_knhashmask == 0) {
1676 			tmp_knhash = hashinit(KN_HASHSIZE, M_KQUEUE,
1677 			    &tmp_knhashmask);
1678 			if (tmp_knhash == NULL)
1679 				return ENOMEM;
1680 			KQ_LOCK(kq);
1681 			if (kq->kq_knhashmask == 0) {
1682 				kq->kq_knhash = tmp_knhash;
1683 				kq->kq_knhashmask = tmp_knhashmask;
1684 			} else {
1685 				to_free = tmp_knhash;
1686 			}
1687 			KQ_UNLOCK(kq);
1688 		}
1689 	}
1690 	free(to_free, M_KQUEUE);
1691 
1692 	KQ_NOTOWNED(kq);
1693 	return 0;
1694 }
1695 
1696 static void
1697 kqueue_task(void *arg, int pending)
1698 {
1699 	struct kqueue *kq;
1700 	int haskqglobal;
1701 
1702 	haskqglobal = 0;
1703 	kq = arg;
1704 
1705 	KQ_GLOBAL_LOCK(&kq_global, haskqglobal);
1706 	KQ_LOCK(kq);
1707 
1708 	KNOTE_LOCKED(&kq->kq_sel.si_note, 0);
1709 
1710 	kq->kq_state &= ~KQ_TASKSCHED;
1711 	if ((kq->kq_state & KQ_TASKDRAIN) == KQ_TASKDRAIN) {
1712 		wakeup(&kq->kq_state);
1713 	}
1714 	KQ_UNLOCK(kq);
1715 	KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal);
1716 }
1717 
1718 /*
1719  * Scan, update kn_data (if not ONESHOT), and copyout triggered events.
1720  * We treat KN_MARKER knotes as if they are in flux.
1721  */
1722 static int
1723 kqueue_scan(struct kqueue *kq, int maxevents, struct kevent_copyops *k_ops,
1724     const struct timespec *tsp, struct kevent *keva, struct thread *td)
1725 {
1726 	struct kevent *kevp;
1727 	struct knote *kn, *marker;
1728 	struct knlist *knl;
1729 	sbintime_t asbt, rsbt;
1730 	int count, error, haskqglobal, influx, nkev, touch;
1731 
1732 	count = maxevents;
1733 	nkev = 0;
1734 	error = 0;
1735 	haskqglobal = 0;
1736 
1737 	if (maxevents == 0)
1738 		goto done_nl;
1739 
1740 	rsbt = 0;
1741 	if (tsp != NULL) {
1742 		if (tsp->tv_sec < 0 || tsp->tv_nsec < 0 ||
1743 		    tsp->tv_nsec >= 1000000000) {
1744 			error = EINVAL;
1745 			goto done_nl;
1746 		}
1747 		if (timespecisset(tsp)) {
1748 			if (tsp->tv_sec <= INT32_MAX) {
1749 				rsbt = tstosbt(*tsp);
1750 				if (TIMESEL(&asbt, rsbt))
1751 					asbt += tc_tick_sbt;
1752 				if (asbt <= SBT_MAX - rsbt)
1753 					asbt += rsbt;
1754 				else
1755 					asbt = 0;
1756 				rsbt >>= tc_precexp;
1757 			} else
1758 				asbt = 0;
1759 		} else
1760 			asbt = -1;
1761 	} else
1762 		asbt = 0;
1763 	marker = knote_alloc(1);
1764 	marker->kn_status = KN_MARKER;
1765 	KQ_LOCK(kq);
1766 
1767 retry:
1768 	kevp = keva;
1769 	if (kq->kq_count == 0) {
1770 		if (asbt == -1) {
1771 			error = EWOULDBLOCK;
1772 		} else {
1773 			kq->kq_state |= KQ_SLEEP;
1774 			error = msleep_sbt(kq, &kq->kq_lock, PSOCK | PCATCH,
1775 			    "kqread", asbt, rsbt, C_ABSOLUTE);
1776 		}
1777 		if (error == 0)
1778 			goto retry;
1779 		/* don't restart after signals... */
1780 		if (error == ERESTART)
1781 			error = EINTR;
1782 		else if (error == EWOULDBLOCK)
1783 			error = 0;
1784 		goto done;
1785 	}
1786 
1787 	TAILQ_INSERT_TAIL(&kq->kq_head, marker, kn_tqe);
1788 	influx = 0;
1789 	while (count) {
1790 		KQ_OWNED(kq);
1791 		kn = TAILQ_FIRST(&kq->kq_head);
1792 
1793 		if ((kn->kn_status == KN_MARKER && kn != marker) ||
1794 		    kn_in_flux(kn)) {
1795 			if (influx) {
1796 				influx = 0;
1797 				KQ_FLUX_WAKEUP(kq);
1798 			}
1799 			kq->kq_state |= KQ_FLUXWAIT;
1800 			error = msleep(kq, &kq->kq_lock, PSOCK,
1801 			    "kqflxwt", 0);
1802 			continue;
1803 		}
1804 
1805 		TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe);
1806 		if ((kn->kn_status & KN_DISABLED) == KN_DISABLED) {
1807 			kn->kn_status &= ~KN_QUEUED;
1808 			kq->kq_count--;
1809 			continue;
1810 		}
1811 		if (kn == marker) {
1812 			KQ_FLUX_WAKEUP(kq);
1813 			if (count == maxevents)
1814 				goto retry;
1815 			goto done;
1816 		}
1817 		KASSERT(!kn_in_flux(kn),
1818 		    ("knote %p is unexpectedly in flux", kn));
1819 
1820 		if ((kn->kn_flags & EV_DROP) == EV_DROP) {
1821 			kn->kn_status &= ~KN_QUEUED;
1822 			kn_enter_flux(kn);
1823 			kq->kq_count--;
1824 			KQ_UNLOCK(kq);
1825 			/*
1826 			 * We don't need to lock the list since we've
1827 			 * marked it as in flux.
1828 			 */
1829 			knote_drop(kn, td);
1830 			KQ_LOCK(kq);
1831 			continue;
1832 		} else if ((kn->kn_flags & EV_ONESHOT) == EV_ONESHOT) {
1833 			kn->kn_status &= ~KN_QUEUED;
1834 			kn_enter_flux(kn);
1835 			kq->kq_count--;
1836 			KQ_UNLOCK(kq);
1837 			/*
1838 			 * We don't need to lock the list since we've
1839 			 * marked the knote as being in flux.
1840 			 */
1841 			*kevp = kn->kn_kevent;
1842 			knote_drop(kn, td);
1843 			KQ_LOCK(kq);
1844 			kn = NULL;
1845 		} else {
1846 			kn->kn_status |= KN_SCAN;
1847 			kn_enter_flux(kn);
1848 			KQ_UNLOCK(kq);
1849 			if ((kn->kn_status & KN_KQUEUE) == KN_KQUEUE)
1850 				KQ_GLOBAL_LOCK(&kq_global, haskqglobal);
1851 			knl = kn_list_lock(kn);
1852 			if (kn->kn_fop->f_event(kn, 0) == 0) {
1853 				KQ_LOCK(kq);
1854 				KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal);
1855 				kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE |
1856 				    KN_SCAN);
1857 				kn_leave_flux(kn);
1858 				kq->kq_count--;
1859 				kn_list_unlock(knl);
1860 				influx = 1;
1861 				continue;
1862 			}
1863 			touch = (!kn->kn_fop->f_isfd &&
1864 			    kn->kn_fop->f_touch != NULL);
1865 			if (touch)
1866 				kn->kn_fop->f_touch(kn, kevp, EVENT_PROCESS);
1867 			else
1868 				*kevp = kn->kn_kevent;
1869 			KQ_LOCK(kq);
1870 			KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal);
1871 			if (kn->kn_flags & (EV_CLEAR | EV_DISPATCH)) {
1872 				/*
1873 				 * Manually clear knotes who weren't
1874 				 * 'touch'ed.
1875 				 */
1876 				if (touch == 0 && kn->kn_flags & EV_CLEAR) {
1877 					kn->kn_data = 0;
1878 					kn->kn_fflags = 0;
1879 				}
1880 				if (kn->kn_flags & EV_DISPATCH)
1881 					kn->kn_status |= KN_DISABLED;
1882 				kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE);
1883 				kq->kq_count--;
1884 			} else
1885 				TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe);
1886 
1887 			kn->kn_status &= ~KN_SCAN;
1888 			kn_leave_flux(kn);
1889 			kn_list_unlock(knl);
1890 			influx = 1;
1891 		}
1892 
1893 		/* we are returning a copy to the user */
1894 		kevp++;
1895 		nkev++;
1896 		count--;
1897 
1898 		if (nkev == KQ_NEVENTS) {
1899 			influx = 0;
1900 			KQ_UNLOCK_FLUX(kq);
1901 			error = k_ops->k_copyout(k_ops->arg, keva, nkev);
1902 			nkev = 0;
1903 			kevp = keva;
1904 			KQ_LOCK(kq);
1905 			if (error)
1906 				break;
1907 		}
1908 	}
1909 	TAILQ_REMOVE(&kq->kq_head, marker, kn_tqe);
1910 done:
1911 	KQ_OWNED(kq);
1912 	KQ_UNLOCK_FLUX(kq);
1913 	knote_free(marker);
1914 done_nl:
1915 	KQ_NOTOWNED(kq);
1916 	if (nkev != 0)
1917 		error = k_ops->k_copyout(k_ops->arg, keva, nkev);
1918 	td->td_retval[0] = maxevents - count;
1919 	return (error);
1920 }
1921 
1922 /*ARGSUSED*/
1923 static int
1924 kqueue_ioctl(struct file *fp, u_long cmd, void *data,
1925 	struct ucred *active_cred, struct thread *td)
1926 {
1927 	/*
1928 	 * Enabling sigio causes two major problems:
1929 	 * 1) infinite recursion:
1930 	 * Synopsys: kevent is being used to track signals and have FIOASYNC
1931 	 * set.  On receipt of a signal this will cause a kqueue to recurse
1932 	 * into itself over and over.  Sending the sigio causes the kqueue
1933 	 * to become ready, which in turn posts sigio again, forever.
1934 	 * Solution: this can be solved by setting a flag in the kqueue that
1935 	 * we have a SIGIO in progress.
1936 	 * 2) locking problems:
1937 	 * Synopsys: Kqueue is a leaf subsystem, but adding signalling puts
1938 	 * us above the proc and pgrp locks.
1939 	 * Solution: Post a signal using an async mechanism, being sure to
1940 	 * record a generation count in the delivery so that we do not deliver
1941 	 * a signal to the wrong process.
1942 	 *
1943 	 * Note, these two mechanisms are somewhat mutually exclusive!
1944 	 */
1945 #if 0
1946 	struct kqueue *kq;
1947 
1948 	kq = fp->f_data;
1949 	switch (cmd) {
1950 	case FIOASYNC:
1951 		if (*(int *)data) {
1952 			kq->kq_state |= KQ_ASYNC;
1953 		} else {
1954 			kq->kq_state &= ~KQ_ASYNC;
1955 		}
1956 		return (0);
1957 
1958 	case FIOSETOWN:
1959 		return (fsetown(*(int *)data, &kq->kq_sigio));
1960 
1961 	case FIOGETOWN:
1962 		*(int *)data = fgetown(&kq->kq_sigio);
1963 		return (0);
1964 	}
1965 #endif
1966 
1967 	return (ENOTTY);
1968 }
1969 
1970 /*ARGSUSED*/
1971 static int
1972 kqueue_poll(struct file *fp, int events, struct ucred *active_cred,
1973 	struct thread *td)
1974 {
1975 	struct kqueue *kq;
1976 	int revents = 0;
1977 	int error;
1978 
1979 	if ((error = kqueue_acquire(fp, &kq)))
1980 		return POLLERR;
1981 
1982 	KQ_LOCK(kq);
1983 	if (events & (POLLIN | POLLRDNORM)) {
1984 		if (kq->kq_count) {
1985 			revents |= events & (POLLIN | POLLRDNORM);
1986 		} else {
1987 			selrecord(td, &kq->kq_sel);
1988 			if (SEL_WAITING(&kq->kq_sel))
1989 				kq->kq_state |= KQ_SEL;
1990 		}
1991 	}
1992 	kqueue_release(kq, 1);
1993 	KQ_UNLOCK(kq);
1994 	return (revents);
1995 }
1996 
1997 /*ARGSUSED*/
1998 static int
1999 kqueue_stat(struct file *fp, struct stat *st, struct ucred *active_cred,
2000 	struct thread *td)
2001 {
2002 
2003 	bzero((void *)st, sizeof *st);
2004 	/*
2005 	 * We no longer return kq_count because the unlocked value is useless.
2006 	 * If you spent all this time getting the count, why not spend your
2007 	 * syscall better by calling kevent?
2008 	 *
2009 	 * XXX - This is needed for libc_r.
2010 	 */
2011 	st->st_mode = S_IFIFO;
2012 	return (0);
2013 }
2014 
2015 static void
2016 kqueue_drain(struct kqueue *kq, struct thread *td)
2017 {
2018 	struct knote *kn;
2019 	int i;
2020 
2021 	KQ_LOCK(kq);
2022 
2023 	KASSERT((kq->kq_state & KQ_CLOSING) != KQ_CLOSING,
2024 	    ("kqueue already closing"));
2025 	kq->kq_state |= KQ_CLOSING;
2026 	if (kq->kq_refcnt > 1)
2027 		msleep(&kq->kq_refcnt, &kq->kq_lock, PSOCK, "kqclose", 0);
2028 
2029 	KASSERT(kq->kq_refcnt == 1, ("other refs are out there!"));
2030 
2031 	KASSERT(knlist_empty(&kq->kq_sel.si_note),
2032 	    ("kqueue's knlist not empty"));
2033 
2034 	for (i = 0; i < kq->kq_knlistsize; i++) {
2035 		while ((kn = SLIST_FIRST(&kq->kq_knlist[i])) != NULL) {
2036 			if (kn_in_flux(kn)) {
2037 				kq->kq_state |= KQ_FLUXWAIT;
2038 				msleep(kq, &kq->kq_lock, PSOCK, "kqclo1", 0);
2039 				continue;
2040 			}
2041 			kn_enter_flux(kn);
2042 			KQ_UNLOCK(kq);
2043 			knote_drop(kn, td);
2044 			KQ_LOCK(kq);
2045 		}
2046 	}
2047 	if (kq->kq_knhashmask != 0) {
2048 		for (i = 0; i <= kq->kq_knhashmask; i++) {
2049 			while ((kn = SLIST_FIRST(&kq->kq_knhash[i])) != NULL) {
2050 				if (kn_in_flux(kn)) {
2051 					kq->kq_state |= KQ_FLUXWAIT;
2052 					msleep(kq, &kq->kq_lock, PSOCK,
2053 					       "kqclo2", 0);
2054 					continue;
2055 				}
2056 				kn_enter_flux(kn);
2057 				KQ_UNLOCK(kq);
2058 				knote_drop(kn, td);
2059 				KQ_LOCK(kq);
2060 			}
2061 		}
2062 	}
2063 
2064 	if ((kq->kq_state & KQ_TASKSCHED) == KQ_TASKSCHED) {
2065 		kq->kq_state |= KQ_TASKDRAIN;
2066 		msleep(&kq->kq_state, &kq->kq_lock, PSOCK, "kqtqdr", 0);
2067 	}
2068 
2069 	if ((kq->kq_state & KQ_SEL) == KQ_SEL) {
2070 		selwakeuppri(&kq->kq_sel, PSOCK);
2071 		if (!SEL_WAITING(&kq->kq_sel))
2072 			kq->kq_state &= ~KQ_SEL;
2073 	}
2074 
2075 	KQ_UNLOCK(kq);
2076 }
2077 
2078 static void
2079 kqueue_destroy(struct kqueue *kq)
2080 {
2081 
2082 	KASSERT(kq->kq_fdp == NULL,
2083 	    ("kqueue still attached to a file descriptor"));
2084 	seldrain(&kq->kq_sel);
2085 	knlist_destroy(&kq->kq_sel.si_note);
2086 	mtx_destroy(&kq->kq_lock);
2087 
2088 	if (kq->kq_knhash != NULL)
2089 		free(kq->kq_knhash, M_KQUEUE);
2090 	if (kq->kq_knlist != NULL)
2091 		free(kq->kq_knlist, M_KQUEUE);
2092 
2093 	funsetown(&kq->kq_sigio);
2094 }
2095 
2096 /*ARGSUSED*/
2097 static int
2098 kqueue_close(struct file *fp, struct thread *td)
2099 {
2100 	struct kqueue *kq = fp->f_data;
2101 	struct filedesc *fdp;
2102 	int error;
2103 	int filedesc_unlock;
2104 
2105 	if ((error = kqueue_acquire(fp, &kq)))
2106 		return error;
2107 	kqueue_drain(kq, td);
2108 
2109 	/*
2110 	 * We could be called due to the knote_drop() doing fdrop(),
2111 	 * called from kqueue_register().  In this case the global
2112 	 * lock is owned, and filedesc sx is locked before, to not
2113 	 * take the sleepable lock after non-sleepable.
2114 	 */
2115 	fdp = kq->kq_fdp;
2116 	kq->kq_fdp = NULL;
2117 	if (!sx_xlocked(FILEDESC_LOCK(fdp))) {
2118 		FILEDESC_XLOCK(fdp);
2119 		filedesc_unlock = 1;
2120 	} else
2121 		filedesc_unlock = 0;
2122 	TAILQ_REMOVE(&fdp->fd_kqlist, kq, kq_list);
2123 	if (filedesc_unlock)
2124 		FILEDESC_XUNLOCK(fdp);
2125 
2126 	kqueue_destroy(kq);
2127 	chgkqcnt(kq->kq_cred->cr_ruidinfo, -1, 0);
2128 	crfree(kq->kq_cred);
2129 	free(kq, M_KQUEUE);
2130 	fp->f_data = NULL;
2131 
2132 	return (0);
2133 }
2134 
2135 static int
2136 kqueue_fill_kinfo(struct file *fp, struct kinfo_file *kif, struct filedesc *fdp)
2137 {
2138 
2139 	kif->kf_type = KF_TYPE_KQUEUE;
2140 	return (0);
2141 }
2142 
2143 static void
2144 kqueue_wakeup(struct kqueue *kq)
2145 {
2146 	KQ_OWNED(kq);
2147 
2148 	if ((kq->kq_state & KQ_SLEEP) == KQ_SLEEP) {
2149 		kq->kq_state &= ~KQ_SLEEP;
2150 		wakeup(kq);
2151 	}
2152 	if ((kq->kq_state & KQ_SEL) == KQ_SEL) {
2153 		selwakeuppri(&kq->kq_sel, PSOCK);
2154 		if (!SEL_WAITING(&kq->kq_sel))
2155 			kq->kq_state &= ~KQ_SEL;
2156 	}
2157 	if (!knlist_empty(&kq->kq_sel.si_note))
2158 		kqueue_schedtask(kq);
2159 	if ((kq->kq_state & KQ_ASYNC) == KQ_ASYNC) {
2160 		pgsigio(&kq->kq_sigio, SIGIO, 0);
2161 	}
2162 }
2163 
2164 /*
2165  * Walk down a list of knotes, activating them if their event has triggered.
2166  *
2167  * There is a possibility to optimize in the case of one kq watching another.
2168  * Instead of scheduling a task to wake it up, you could pass enough state
2169  * down the chain to make up the parent kqueue.  Make this code functional
2170  * first.
2171  */
2172 void
2173 knote(struct knlist *list, long hint, int lockflags)
2174 {
2175 	struct kqueue *kq;
2176 	struct knote *kn, *tkn;
2177 	int error;
2178 
2179 	if (list == NULL)
2180 		return;
2181 
2182 	KNL_ASSERT_LOCK(list, lockflags & KNF_LISTLOCKED);
2183 
2184 	if ((lockflags & KNF_LISTLOCKED) == 0)
2185 		list->kl_lock(list->kl_lockarg);
2186 
2187 	/*
2188 	 * If we unlock the list lock (and enter influx), we can
2189 	 * eliminate the kqueue scheduling, but this will introduce
2190 	 * four lock/unlock's for each knote to test.  Also, marker
2191 	 * would be needed to keep iteration position, since filters
2192 	 * or other threads could remove events.
2193 	 */
2194 	SLIST_FOREACH_SAFE(kn, &list->kl_list, kn_selnext, tkn) {
2195 		kq = kn->kn_kq;
2196 		KQ_LOCK(kq);
2197 		if (kn_in_flux(kn) && (kn->kn_status & KN_SCAN) == 0) {
2198 			/*
2199 			 * Do not process the influx notes, except for
2200 			 * the influx coming from the kq unlock in the
2201 			 * kqueue_scan().  In the later case, we do
2202 			 * not interfere with the scan, since the code
2203 			 * fragment in kqueue_scan() locks the knlist,
2204 			 * and cannot proceed until we finished.
2205 			 */
2206 			KQ_UNLOCK(kq);
2207 		} else if ((lockflags & KNF_NOKQLOCK) != 0) {
2208 			kn_enter_flux(kn);
2209 			KQ_UNLOCK(kq);
2210 			error = kn->kn_fop->f_event(kn, hint);
2211 			KQ_LOCK(kq);
2212 			kn_leave_flux(kn);
2213 			if (error)
2214 				KNOTE_ACTIVATE(kn, 1);
2215 			KQ_UNLOCK_FLUX(kq);
2216 		} else {
2217 			kn->kn_status |= KN_HASKQLOCK;
2218 			if (kn->kn_fop->f_event(kn, hint))
2219 				KNOTE_ACTIVATE(kn, 1);
2220 			kn->kn_status &= ~KN_HASKQLOCK;
2221 			KQ_UNLOCK(kq);
2222 		}
2223 	}
2224 	if ((lockflags & KNF_LISTLOCKED) == 0)
2225 		list->kl_unlock(list->kl_lockarg);
2226 }
2227 
2228 /*
2229  * add a knote to a knlist
2230  */
2231 void
2232 knlist_add(struct knlist *knl, struct knote *kn, int islocked)
2233 {
2234 
2235 	KNL_ASSERT_LOCK(knl, islocked);
2236 	KQ_NOTOWNED(kn->kn_kq);
2237 	KASSERT(kn_in_flux(kn), ("knote %p not in flux", kn));
2238 	KASSERT((kn->kn_status & KN_DETACHED) != 0,
2239 	    ("knote %p was not detached", kn));
2240 	if (!islocked)
2241 		knl->kl_lock(knl->kl_lockarg);
2242 	SLIST_INSERT_HEAD(&knl->kl_list, kn, kn_selnext);
2243 	if (!islocked)
2244 		knl->kl_unlock(knl->kl_lockarg);
2245 	KQ_LOCK(kn->kn_kq);
2246 	kn->kn_knlist = knl;
2247 	kn->kn_status &= ~KN_DETACHED;
2248 	KQ_UNLOCK(kn->kn_kq);
2249 }
2250 
2251 static void
2252 knlist_remove_kq(struct knlist *knl, struct knote *kn, int knlislocked,
2253     int kqislocked)
2254 {
2255 
2256 	KASSERT(!kqislocked || knlislocked, ("kq locked w/o knl locked"));
2257 	KNL_ASSERT_LOCK(knl, knlislocked);
2258 	mtx_assert(&kn->kn_kq->kq_lock, kqislocked ? MA_OWNED : MA_NOTOWNED);
2259 	KASSERT(kqislocked || kn_in_flux(kn), ("knote %p not in flux", kn));
2260 	KASSERT((kn->kn_status & KN_DETACHED) == 0,
2261 	    ("knote %p was already detached", kn));
2262 	if (!knlislocked)
2263 		knl->kl_lock(knl->kl_lockarg);
2264 	SLIST_REMOVE(&knl->kl_list, kn, knote, kn_selnext);
2265 	kn->kn_knlist = NULL;
2266 	if (!knlislocked)
2267 		kn_list_unlock(knl);
2268 	if (!kqislocked)
2269 		KQ_LOCK(kn->kn_kq);
2270 	kn->kn_status |= KN_DETACHED;
2271 	if (!kqislocked)
2272 		KQ_UNLOCK(kn->kn_kq);
2273 }
2274 
2275 /*
2276  * remove knote from the specified knlist
2277  */
2278 void
2279 knlist_remove(struct knlist *knl, struct knote *kn, int islocked)
2280 {
2281 
2282 	knlist_remove_kq(knl, kn, islocked, 0);
2283 }
2284 
2285 int
2286 knlist_empty(struct knlist *knl)
2287 {
2288 
2289 	KNL_ASSERT_LOCKED(knl);
2290 	return (SLIST_EMPTY(&knl->kl_list));
2291 }
2292 
2293 static struct mtx knlist_lock;
2294 MTX_SYSINIT(knlist_lock, &knlist_lock, "knlist lock for lockless objects",
2295     MTX_DEF);
2296 static void knlist_mtx_lock(void *arg);
2297 static void knlist_mtx_unlock(void *arg);
2298 
2299 static void
2300 knlist_mtx_lock(void *arg)
2301 {
2302 
2303 	mtx_lock((struct mtx *)arg);
2304 }
2305 
2306 static void
2307 knlist_mtx_unlock(void *arg)
2308 {
2309 
2310 	mtx_unlock((struct mtx *)arg);
2311 }
2312 
2313 static void
2314 knlist_mtx_assert_locked(void *arg)
2315 {
2316 
2317 	mtx_assert((struct mtx *)arg, MA_OWNED);
2318 }
2319 
2320 static void
2321 knlist_mtx_assert_unlocked(void *arg)
2322 {
2323 
2324 	mtx_assert((struct mtx *)arg, MA_NOTOWNED);
2325 }
2326 
2327 static void
2328 knlist_rw_rlock(void *arg)
2329 {
2330 
2331 	rw_rlock((struct rwlock *)arg);
2332 }
2333 
2334 static void
2335 knlist_rw_runlock(void *arg)
2336 {
2337 
2338 	rw_runlock((struct rwlock *)arg);
2339 }
2340 
2341 static void
2342 knlist_rw_assert_locked(void *arg)
2343 {
2344 
2345 	rw_assert((struct rwlock *)arg, RA_LOCKED);
2346 }
2347 
2348 static void
2349 knlist_rw_assert_unlocked(void *arg)
2350 {
2351 
2352 	rw_assert((struct rwlock *)arg, RA_UNLOCKED);
2353 }
2354 
2355 void
2356 knlist_init(struct knlist *knl, void *lock, void (*kl_lock)(void *),
2357     void (*kl_unlock)(void *),
2358     void (*kl_assert_locked)(void *), void (*kl_assert_unlocked)(void *))
2359 {
2360 
2361 	if (lock == NULL)
2362 		knl->kl_lockarg = &knlist_lock;
2363 	else
2364 		knl->kl_lockarg = lock;
2365 
2366 	if (kl_lock == NULL)
2367 		knl->kl_lock = knlist_mtx_lock;
2368 	else
2369 		knl->kl_lock = kl_lock;
2370 	if (kl_unlock == NULL)
2371 		knl->kl_unlock = knlist_mtx_unlock;
2372 	else
2373 		knl->kl_unlock = kl_unlock;
2374 	if (kl_assert_locked == NULL)
2375 		knl->kl_assert_locked = knlist_mtx_assert_locked;
2376 	else
2377 		knl->kl_assert_locked = kl_assert_locked;
2378 	if (kl_assert_unlocked == NULL)
2379 		knl->kl_assert_unlocked = knlist_mtx_assert_unlocked;
2380 	else
2381 		knl->kl_assert_unlocked = kl_assert_unlocked;
2382 
2383 	knl->kl_autodestroy = 0;
2384 	SLIST_INIT(&knl->kl_list);
2385 }
2386 
2387 void
2388 knlist_init_mtx(struct knlist *knl, struct mtx *lock)
2389 {
2390 
2391 	knlist_init(knl, lock, NULL, NULL, NULL, NULL);
2392 }
2393 
2394 struct knlist *
2395 knlist_alloc(struct mtx *lock)
2396 {
2397 	struct knlist *knl;
2398 
2399 	knl = malloc(sizeof(struct knlist), M_KQUEUE, M_WAITOK);
2400 	knlist_init_mtx(knl, lock);
2401 	return (knl);
2402 }
2403 
2404 void
2405 knlist_init_rw_reader(struct knlist *knl, struct rwlock *lock)
2406 {
2407 
2408 	knlist_init(knl, lock, knlist_rw_rlock, knlist_rw_runlock,
2409 	    knlist_rw_assert_locked, knlist_rw_assert_unlocked);
2410 }
2411 
2412 void
2413 knlist_destroy(struct knlist *knl)
2414 {
2415 
2416 	KASSERT(KNLIST_EMPTY(knl),
2417 	    ("destroying knlist %p with knotes on it", knl));
2418 }
2419 
2420 void
2421 knlist_detach(struct knlist *knl)
2422 {
2423 
2424 	KNL_ASSERT_LOCKED(knl);
2425 	knl->kl_autodestroy = 1;
2426 	if (knlist_empty(knl)) {
2427 		knlist_destroy(knl);
2428 		free(knl, M_KQUEUE);
2429 	}
2430 }
2431 
2432 /*
2433  * Even if we are locked, we may need to drop the lock to allow any influx
2434  * knotes time to "settle".
2435  */
2436 void
2437 knlist_cleardel(struct knlist *knl, struct thread *td, int islocked, int killkn)
2438 {
2439 	struct knote *kn, *kn2;
2440 	struct kqueue *kq;
2441 
2442 	KASSERT(!knl->kl_autodestroy, ("cleardel for autodestroy %p", knl));
2443 	if (islocked)
2444 		KNL_ASSERT_LOCKED(knl);
2445 	else {
2446 		KNL_ASSERT_UNLOCKED(knl);
2447 again:		/* need to reacquire lock since we have dropped it */
2448 		knl->kl_lock(knl->kl_lockarg);
2449 	}
2450 
2451 	SLIST_FOREACH_SAFE(kn, &knl->kl_list, kn_selnext, kn2) {
2452 		kq = kn->kn_kq;
2453 		KQ_LOCK(kq);
2454 		if (kn_in_flux(kn)) {
2455 			KQ_UNLOCK(kq);
2456 			continue;
2457 		}
2458 		knlist_remove_kq(knl, kn, 1, 1);
2459 		if (killkn) {
2460 			kn_enter_flux(kn);
2461 			KQ_UNLOCK(kq);
2462 			knote_drop_detached(kn, td);
2463 		} else {
2464 			/* Make sure cleared knotes disappear soon */
2465 			kn->kn_flags |= EV_EOF | EV_ONESHOT;
2466 			KQ_UNLOCK(kq);
2467 		}
2468 		kq = NULL;
2469 	}
2470 
2471 	if (!SLIST_EMPTY(&knl->kl_list)) {
2472 		/* there are still in flux knotes remaining */
2473 		kn = SLIST_FIRST(&knl->kl_list);
2474 		kq = kn->kn_kq;
2475 		KQ_LOCK(kq);
2476 		KASSERT(kn_in_flux(kn), ("knote removed w/o list lock"));
2477 		knl->kl_unlock(knl->kl_lockarg);
2478 		kq->kq_state |= KQ_FLUXWAIT;
2479 		msleep(kq, &kq->kq_lock, PSOCK | PDROP, "kqkclr", 0);
2480 		kq = NULL;
2481 		goto again;
2482 	}
2483 
2484 	if (islocked)
2485 		KNL_ASSERT_LOCKED(knl);
2486 	else {
2487 		knl->kl_unlock(knl->kl_lockarg);
2488 		KNL_ASSERT_UNLOCKED(knl);
2489 	}
2490 }
2491 
2492 /*
2493  * Remove all knotes referencing a specified fd must be called with FILEDESC
2494  * lock.  This prevents a race where a new fd comes along and occupies the
2495  * entry and we attach a knote to the fd.
2496  */
2497 void
2498 knote_fdclose(struct thread *td, int fd)
2499 {
2500 	struct filedesc *fdp = td->td_proc->p_fd;
2501 	struct kqueue *kq;
2502 	struct knote *kn;
2503 	int influx;
2504 
2505 	FILEDESC_XLOCK_ASSERT(fdp);
2506 
2507 	/*
2508 	 * We shouldn't have to worry about new kevents appearing on fd
2509 	 * since filedesc is locked.
2510 	 */
2511 	TAILQ_FOREACH(kq, &fdp->fd_kqlist, kq_list) {
2512 		KQ_LOCK(kq);
2513 
2514 again:
2515 		influx = 0;
2516 		while (kq->kq_knlistsize > fd &&
2517 		    (kn = SLIST_FIRST(&kq->kq_knlist[fd])) != NULL) {
2518 			if (kn_in_flux(kn)) {
2519 				/* someone else might be waiting on our knote */
2520 				if (influx)
2521 					wakeup(kq);
2522 				kq->kq_state |= KQ_FLUXWAIT;
2523 				msleep(kq, &kq->kq_lock, PSOCK, "kqflxwt", 0);
2524 				goto again;
2525 			}
2526 			kn_enter_flux(kn);
2527 			KQ_UNLOCK(kq);
2528 			influx = 1;
2529 			knote_drop(kn, td);
2530 			KQ_LOCK(kq);
2531 		}
2532 		KQ_UNLOCK_FLUX(kq);
2533 	}
2534 }
2535 
2536 static int
2537 knote_attach(struct knote *kn, struct kqueue *kq)
2538 {
2539 	struct klist *list;
2540 
2541 	KASSERT(kn_in_flux(kn), ("knote %p not marked influx", kn));
2542 	KQ_OWNED(kq);
2543 
2544 	if (kn->kn_fop->f_isfd) {
2545 		if (kn->kn_id >= kq->kq_knlistsize)
2546 			return (ENOMEM);
2547 		list = &kq->kq_knlist[kn->kn_id];
2548 	} else {
2549 		if (kq->kq_knhash == NULL)
2550 			return (ENOMEM);
2551 		list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)];
2552 	}
2553 	SLIST_INSERT_HEAD(list, kn, kn_link);
2554 	return (0);
2555 }
2556 
2557 static void
2558 knote_drop(struct knote *kn, struct thread *td)
2559 {
2560 
2561 	if ((kn->kn_status & KN_DETACHED) == 0)
2562 		kn->kn_fop->f_detach(kn);
2563 	knote_drop_detached(kn, td);
2564 }
2565 
2566 static void
2567 knote_drop_detached(struct knote *kn, struct thread *td)
2568 {
2569 	struct kqueue *kq;
2570 	struct klist *list;
2571 
2572 	kq = kn->kn_kq;
2573 
2574 	KASSERT((kn->kn_status & KN_DETACHED) != 0,
2575 	    ("knote %p still attached", kn));
2576 	KQ_NOTOWNED(kq);
2577 
2578 	KQ_LOCK(kq);
2579 	KASSERT(kn->kn_influx == 1,
2580 	    ("knote_drop called on %p with influx %d", kn, kn->kn_influx));
2581 
2582 	if (kn->kn_fop->f_isfd)
2583 		list = &kq->kq_knlist[kn->kn_id];
2584 	else
2585 		list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)];
2586 
2587 	if (!SLIST_EMPTY(list))
2588 		SLIST_REMOVE(list, kn, knote, kn_link);
2589 	if (kn->kn_status & KN_QUEUED)
2590 		knote_dequeue(kn);
2591 	KQ_UNLOCK_FLUX(kq);
2592 
2593 	if (kn->kn_fop->f_isfd) {
2594 		fdrop(kn->kn_fp, td);
2595 		kn->kn_fp = NULL;
2596 	}
2597 	kqueue_fo_release(kn->kn_kevent.filter);
2598 	kn->kn_fop = NULL;
2599 	knote_free(kn);
2600 }
2601 
2602 static void
2603 knote_enqueue(struct knote *kn)
2604 {
2605 	struct kqueue *kq = kn->kn_kq;
2606 
2607 	KQ_OWNED(kn->kn_kq);
2608 	KASSERT((kn->kn_status & KN_QUEUED) == 0, ("knote already queued"));
2609 
2610 	TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe);
2611 	kn->kn_status |= KN_QUEUED;
2612 	kq->kq_count++;
2613 	kqueue_wakeup(kq);
2614 }
2615 
2616 static void
2617 knote_dequeue(struct knote *kn)
2618 {
2619 	struct kqueue *kq = kn->kn_kq;
2620 
2621 	KQ_OWNED(kn->kn_kq);
2622 	KASSERT(kn->kn_status & KN_QUEUED, ("knote not queued"));
2623 
2624 	TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe);
2625 	kn->kn_status &= ~KN_QUEUED;
2626 	kq->kq_count--;
2627 }
2628 
2629 static void
2630 knote_init(void)
2631 {
2632 
2633 	knote_zone = uma_zcreate("KNOTE", sizeof(struct knote), NULL, NULL,
2634 	    NULL, NULL, UMA_ALIGN_PTR, 0);
2635 }
2636 SYSINIT(knote, SI_SUB_PSEUDO, SI_ORDER_ANY, knote_init, NULL);
2637 
2638 static struct knote *
2639 knote_alloc(int waitok)
2640 {
2641 
2642 	return (uma_zalloc(knote_zone, (waitok ? M_WAITOK : M_NOWAIT) |
2643 	    M_ZERO));
2644 }
2645 
2646 static void
2647 knote_free(struct knote *kn)
2648 {
2649 
2650 	uma_zfree(knote_zone, kn);
2651 }
2652 
2653 /*
2654  * Register the kev w/ the kq specified by fd.
2655  */
2656 int
2657 kqfd_register(int fd, struct kevent *kev, struct thread *td, int waitok)
2658 {
2659 	struct kqueue *kq;
2660 	struct file *fp;
2661 	cap_rights_t rights;
2662 	int error;
2663 
2664 	error = fget(td, fd, cap_rights_init(&rights, CAP_KQUEUE_CHANGE), &fp);
2665 	if (error != 0)
2666 		return (error);
2667 	if ((error = kqueue_acquire(fp, &kq)) != 0)
2668 		goto noacquire;
2669 
2670 	error = kqueue_register(kq, kev, td, waitok);
2671 	kqueue_release(kq, 0);
2672 
2673 noacquire:
2674 	fdrop(fp, td);
2675 	return (error);
2676 }
2677