xref: /freebsd/sys/kern/kern_event.c (revision 47dd1d1b619cc035b82b49a91a25544309ff95ae)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 1999,2000,2001 Jonathan Lemon <jlemon@FreeBSD.org>
5  * Copyright 2004 John-Mark Gurney <jmg@FreeBSD.org>
6  * Copyright (c) 2009 Apple, Inc.
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  */
30 
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33 
34 #include "opt_ktrace.h"
35 #include "opt_kqueue.h"
36 
37 #ifdef COMPAT_FREEBSD11
38 #define	_WANT_FREEBSD11_KEVENT
39 #endif
40 
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/capsicum.h>
44 #include <sys/kernel.h>
45 #include <sys/lock.h>
46 #include <sys/mutex.h>
47 #include <sys/rwlock.h>
48 #include <sys/proc.h>
49 #include <sys/malloc.h>
50 #include <sys/unistd.h>
51 #include <sys/file.h>
52 #include <sys/filedesc.h>
53 #include <sys/filio.h>
54 #include <sys/fcntl.h>
55 #include <sys/kthread.h>
56 #include <sys/selinfo.h>
57 #include <sys/queue.h>
58 #include <sys/event.h>
59 #include <sys/eventvar.h>
60 #include <sys/poll.h>
61 #include <sys/protosw.h>
62 #include <sys/resourcevar.h>
63 #include <sys/sigio.h>
64 #include <sys/signalvar.h>
65 #include <sys/socket.h>
66 #include <sys/socketvar.h>
67 #include <sys/stat.h>
68 #include <sys/sysctl.h>
69 #include <sys/sysproto.h>
70 #include <sys/syscallsubr.h>
71 #include <sys/taskqueue.h>
72 #include <sys/uio.h>
73 #include <sys/user.h>
74 #ifdef KTRACE
75 #include <sys/ktrace.h>
76 #endif
77 #include <machine/atomic.h>
78 
79 #include <vm/uma.h>
80 
81 static MALLOC_DEFINE(M_KQUEUE, "kqueue", "memory for kqueue system");
82 
83 /*
84  * This lock is used if multiple kq locks are required.  This possibly
85  * should be made into a per proc lock.
86  */
87 static struct mtx	kq_global;
88 MTX_SYSINIT(kq_global, &kq_global, "kqueue order", MTX_DEF);
89 #define KQ_GLOBAL_LOCK(lck, haslck)	do {	\
90 	if (!haslck)				\
91 		mtx_lock(lck);			\
92 	haslck = 1;				\
93 } while (0)
94 #define KQ_GLOBAL_UNLOCK(lck, haslck)	do {	\
95 	if (haslck)				\
96 		mtx_unlock(lck);			\
97 	haslck = 0;				\
98 } while (0)
99 
100 TASKQUEUE_DEFINE_THREAD(kqueue_ctx);
101 
102 static int	kevent_copyout(void *arg, struct kevent *kevp, int count);
103 static int	kevent_copyin(void *arg, struct kevent *kevp, int count);
104 static int	kqueue_register(struct kqueue *kq, struct kevent *kev,
105 		    struct thread *td, int waitok);
106 static int	kqueue_acquire(struct file *fp, struct kqueue **kqp);
107 static void	kqueue_release(struct kqueue *kq, int locked);
108 static void	kqueue_destroy(struct kqueue *kq);
109 static void	kqueue_drain(struct kqueue *kq, struct thread *td);
110 static int	kqueue_expand(struct kqueue *kq, struct filterops *fops,
111 		    uintptr_t ident, int waitok);
112 static void	kqueue_task(void *arg, int pending);
113 static int	kqueue_scan(struct kqueue *kq, int maxevents,
114 		    struct kevent_copyops *k_ops,
115 		    const struct timespec *timeout,
116 		    struct kevent *keva, struct thread *td);
117 static void 	kqueue_wakeup(struct kqueue *kq);
118 static struct filterops *kqueue_fo_find(int filt);
119 static void	kqueue_fo_release(int filt);
120 struct g_kevent_args;
121 static int	kern_kevent_generic(struct thread *td,
122 		    struct g_kevent_args *uap,
123 		    struct kevent_copyops *k_ops, const char *struct_name);
124 
125 static fo_ioctl_t	kqueue_ioctl;
126 static fo_poll_t	kqueue_poll;
127 static fo_kqfilter_t	kqueue_kqfilter;
128 static fo_stat_t	kqueue_stat;
129 static fo_close_t	kqueue_close;
130 static fo_fill_kinfo_t	kqueue_fill_kinfo;
131 
132 static struct fileops kqueueops = {
133 	.fo_read = invfo_rdwr,
134 	.fo_write = invfo_rdwr,
135 	.fo_truncate = invfo_truncate,
136 	.fo_ioctl = kqueue_ioctl,
137 	.fo_poll = kqueue_poll,
138 	.fo_kqfilter = kqueue_kqfilter,
139 	.fo_stat = kqueue_stat,
140 	.fo_close = kqueue_close,
141 	.fo_chmod = invfo_chmod,
142 	.fo_chown = invfo_chown,
143 	.fo_sendfile = invfo_sendfile,
144 	.fo_fill_kinfo = kqueue_fill_kinfo,
145 };
146 
147 static int 	knote_attach(struct knote *kn, struct kqueue *kq);
148 static void 	knote_drop(struct knote *kn, struct thread *td);
149 static void 	knote_drop_detached(struct knote *kn, struct thread *td);
150 static void 	knote_enqueue(struct knote *kn);
151 static void 	knote_dequeue(struct knote *kn);
152 static void 	knote_init(void);
153 static struct 	knote *knote_alloc(int waitok);
154 static void 	knote_free(struct knote *kn);
155 
156 static void	filt_kqdetach(struct knote *kn);
157 static int	filt_kqueue(struct knote *kn, long hint);
158 static int	filt_procattach(struct knote *kn);
159 static void	filt_procdetach(struct knote *kn);
160 static int	filt_proc(struct knote *kn, long hint);
161 static int	filt_fileattach(struct knote *kn);
162 static void	filt_timerexpire(void *knx);
163 static int	filt_timerattach(struct knote *kn);
164 static void	filt_timerdetach(struct knote *kn);
165 static int	filt_timer(struct knote *kn, long hint);
166 static int	filt_userattach(struct knote *kn);
167 static void	filt_userdetach(struct knote *kn);
168 static int	filt_user(struct knote *kn, long hint);
169 static void	filt_usertouch(struct knote *kn, struct kevent *kev,
170 		    u_long type);
171 
172 static struct filterops file_filtops = {
173 	.f_isfd = 1,
174 	.f_attach = filt_fileattach,
175 };
176 static struct filterops kqread_filtops = {
177 	.f_isfd = 1,
178 	.f_detach = filt_kqdetach,
179 	.f_event = filt_kqueue,
180 };
181 /* XXX - move to kern_proc.c?  */
182 static struct filterops proc_filtops = {
183 	.f_isfd = 0,
184 	.f_attach = filt_procattach,
185 	.f_detach = filt_procdetach,
186 	.f_event = filt_proc,
187 };
188 static struct filterops timer_filtops = {
189 	.f_isfd = 0,
190 	.f_attach = filt_timerattach,
191 	.f_detach = filt_timerdetach,
192 	.f_event = filt_timer,
193 };
194 static struct filterops user_filtops = {
195 	.f_attach = filt_userattach,
196 	.f_detach = filt_userdetach,
197 	.f_event = filt_user,
198 	.f_touch = filt_usertouch,
199 };
200 
201 static uma_zone_t	knote_zone;
202 static unsigned int	kq_ncallouts = 0;
203 static unsigned int 	kq_calloutmax = 4 * 1024;
204 SYSCTL_UINT(_kern, OID_AUTO, kq_calloutmax, CTLFLAG_RW,
205     &kq_calloutmax, 0, "Maximum number of callouts allocated for kqueue");
206 
207 /* XXX - ensure not influx ? */
208 #define KNOTE_ACTIVATE(kn, islock) do { 				\
209 	if ((islock))							\
210 		mtx_assert(&(kn)->kn_kq->kq_lock, MA_OWNED);		\
211 	else								\
212 		KQ_LOCK((kn)->kn_kq);					\
213 	(kn)->kn_status |= KN_ACTIVE;					\
214 	if (((kn)->kn_status & (KN_QUEUED | KN_DISABLED)) == 0)		\
215 		knote_enqueue((kn));					\
216 	if (!(islock))							\
217 		KQ_UNLOCK((kn)->kn_kq);					\
218 } while(0)
219 #define KQ_LOCK(kq) do {						\
220 	mtx_lock(&(kq)->kq_lock);					\
221 } while (0)
222 #define KQ_FLUX_WAKEUP(kq) do {						\
223 	if (((kq)->kq_state & KQ_FLUXWAIT) == KQ_FLUXWAIT) {		\
224 		(kq)->kq_state &= ~KQ_FLUXWAIT;				\
225 		wakeup((kq));						\
226 	}								\
227 } while (0)
228 #define KQ_UNLOCK_FLUX(kq) do {						\
229 	KQ_FLUX_WAKEUP(kq);						\
230 	mtx_unlock(&(kq)->kq_lock);					\
231 } while (0)
232 #define KQ_UNLOCK(kq) do {						\
233 	mtx_unlock(&(kq)->kq_lock);					\
234 } while (0)
235 #define KQ_OWNED(kq) do {						\
236 	mtx_assert(&(kq)->kq_lock, MA_OWNED);				\
237 } while (0)
238 #define KQ_NOTOWNED(kq) do {						\
239 	mtx_assert(&(kq)->kq_lock, MA_NOTOWNED);			\
240 } while (0)
241 
242 static struct knlist *
243 kn_list_lock(struct knote *kn)
244 {
245 	struct knlist *knl;
246 
247 	knl = kn->kn_knlist;
248 	if (knl != NULL)
249 		knl->kl_lock(knl->kl_lockarg);
250 	return (knl);
251 }
252 
253 static void
254 kn_list_unlock(struct knlist *knl)
255 {
256 	bool do_free;
257 
258 	if (knl == NULL)
259 		return;
260 	do_free = knl->kl_autodestroy && knlist_empty(knl);
261 	knl->kl_unlock(knl->kl_lockarg);
262 	if (do_free) {
263 		knlist_destroy(knl);
264 		free(knl, M_KQUEUE);
265 	}
266 }
267 
268 static bool
269 kn_in_flux(struct knote *kn)
270 {
271 
272 	return (kn->kn_influx > 0);
273 }
274 
275 static void
276 kn_enter_flux(struct knote *kn)
277 {
278 
279 	KQ_OWNED(kn->kn_kq);
280 	MPASS(kn->kn_influx < INT_MAX);
281 	kn->kn_influx++;
282 }
283 
284 static bool
285 kn_leave_flux(struct knote *kn)
286 {
287 
288 	KQ_OWNED(kn->kn_kq);
289 	MPASS(kn->kn_influx > 0);
290 	kn->kn_influx--;
291 	return (kn->kn_influx == 0);
292 }
293 
294 #define	KNL_ASSERT_LOCK(knl, islocked) do {				\
295 	if (islocked)							\
296 		KNL_ASSERT_LOCKED(knl);				\
297 	else								\
298 		KNL_ASSERT_UNLOCKED(knl);				\
299 } while (0)
300 #ifdef INVARIANTS
301 #define	KNL_ASSERT_LOCKED(knl) do {					\
302 	knl->kl_assert_locked((knl)->kl_lockarg);			\
303 } while (0)
304 #define	KNL_ASSERT_UNLOCKED(knl) do {					\
305 	knl->kl_assert_unlocked((knl)->kl_lockarg);			\
306 } while (0)
307 #else /* !INVARIANTS */
308 #define	KNL_ASSERT_LOCKED(knl) do {} while(0)
309 #define	KNL_ASSERT_UNLOCKED(knl) do {} while (0)
310 #endif /* INVARIANTS */
311 
312 #ifndef	KN_HASHSIZE
313 #define	KN_HASHSIZE		64		/* XXX should be tunable */
314 #endif
315 
316 #define KN_HASH(val, mask)	(((val) ^ (val >> 8)) & (mask))
317 
318 static int
319 filt_nullattach(struct knote *kn)
320 {
321 
322 	return (ENXIO);
323 };
324 
325 struct filterops null_filtops = {
326 	.f_isfd = 0,
327 	.f_attach = filt_nullattach,
328 };
329 
330 /* XXX - make SYSINIT to add these, and move into respective modules. */
331 extern struct filterops sig_filtops;
332 extern struct filterops fs_filtops;
333 
334 /*
335  * Table for for all system-defined filters.
336  */
337 static struct mtx	filterops_lock;
338 MTX_SYSINIT(kqueue_filterops, &filterops_lock, "protect sysfilt_ops",
339 	MTX_DEF);
340 static struct {
341 	struct filterops *for_fop;
342 	int for_nolock;
343 	int for_refcnt;
344 } sysfilt_ops[EVFILT_SYSCOUNT] = {
345 	{ &file_filtops, 1 },			/* EVFILT_READ */
346 	{ &file_filtops, 1 },			/* EVFILT_WRITE */
347 	{ &null_filtops },			/* EVFILT_AIO */
348 	{ &file_filtops, 1 },			/* EVFILT_VNODE */
349 	{ &proc_filtops, 1 },			/* EVFILT_PROC */
350 	{ &sig_filtops, 1 },			/* EVFILT_SIGNAL */
351 	{ &timer_filtops, 1 },			/* EVFILT_TIMER */
352 	{ &file_filtops, 1 },			/* EVFILT_PROCDESC */
353 	{ &fs_filtops, 1 },			/* EVFILT_FS */
354 	{ &null_filtops },			/* EVFILT_LIO */
355 	{ &user_filtops, 1 },			/* EVFILT_USER */
356 	{ &null_filtops },			/* EVFILT_SENDFILE */
357 	{ &file_filtops, 1 },                   /* EVFILT_EMPTY */
358 };
359 
360 /*
361  * Simple redirection for all cdevsw style objects to call their fo_kqfilter
362  * method.
363  */
364 static int
365 filt_fileattach(struct knote *kn)
366 {
367 
368 	return (fo_kqfilter(kn->kn_fp, kn));
369 }
370 
371 /*ARGSUSED*/
372 static int
373 kqueue_kqfilter(struct file *fp, struct knote *kn)
374 {
375 	struct kqueue *kq = kn->kn_fp->f_data;
376 
377 	if (kn->kn_filter != EVFILT_READ)
378 		return (EINVAL);
379 
380 	kn->kn_status |= KN_KQUEUE;
381 	kn->kn_fop = &kqread_filtops;
382 	knlist_add(&kq->kq_sel.si_note, kn, 0);
383 
384 	return (0);
385 }
386 
387 static void
388 filt_kqdetach(struct knote *kn)
389 {
390 	struct kqueue *kq = kn->kn_fp->f_data;
391 
392 	knlist_remove(&kq->kq_sel.si_note, kn, 0);
393 }
394 
395 /*ARGSUSED*/
396 static int
397 filt_kqueue(struct knote *kn, long hint)
398 {
399 	struct kqueue *kq = kn->kn_fp->f_data;
400 
401 	kn->kn_data = kq->kq_count;
402 	return (kn->kn_data > 0);
403 }
404 
405 /* XXX - move to kern_proc.c?  */
406 static int
407 filt_procattach(struct knote *kn)
408 {
409 	struct proc *p;
410 	int error;
411 	bool exiting, immediate;
412 
413 	exiting = immediate = false;
414 	if (kn->kn_sfflags & NOTE_EXIT)
415 		p = pfind_any(kn->kn_id);
416 	else
417 		p = pfind(kn->kn_id);
418 	if (p == NULL)
419 		return (ESRCH);
420 	if (p->p_flag & P_WEXIT)
421 		exiting = true;
422 
423 	if ((error = p_cansee(curthread, p))) {
424 		PROC_UNLOCK(p);
425 		return (error);
426 	}
427 
428 	kn->kn_ptr.p_proc = p;
429 	kn->kn_flags |= EV_CLEAR;		/* automatically set */
430 
431 	/*
432 	 * Internal flag indicating registration done by kernel for the
433 	 * purposes of getting a NOTE_CHILD notification.
434 	 */
435 	if (kn->kn_flags & EV_FLAG2) {
436 		kn->kn_flags &= ~EV_FLAG2;
437 		kn->kn_data = kn->kn_sdata;		/* ppid */
438 		kn->kn_fflags = NOTE_CHILD;
439 		kn->kn_sfflags &= ~(NOTE_EXIT | NOTE_EXEC | NOTE_FORK);
440 		immediate = true; /* Force immediate activation of child note. */
441 	}
442 	/*
443 	 * Internal flag indicating registration done by kernel (for other than
444 	 * NOTE_CHILD).
445 	 */
446 	if (kn->kn_flags & EV_FLAG1) {
447 		kn->kn_flags &= ~EV_FLAG1;
448 	}
449 
450 	knlist_add(p->p_klist, kn, 1);
451 
452 	/*
453 	 * Immediately activate any child notes or, in the case of a zombie
454 	 * target process, exit notes.  The latter is necessary to handle the
455 	 * case where the target process, e.g. a child, dies before the kevent
456 	 * is registered.
457 	 */
458 	if (immediate || (exiting && filt_proc(kn, NOTE_EXIT)))
459 		KNOTE_ACTIVATE(kn, 0);
460 
461 	PROC_UNLOCK(p);
462 
463 	return (0);
464 }
465 
466 /*
467  * The knote may be attached to a different process, which may exit,
468  * leaving nothing for the knote to be attached to.  So when the process
469  * exits, the knote is marked as DETACHED and also flagged as ONESHOT so
470  * it will be deleted when read out.  However, as part of the knote deletion,
471  * this routine is called, so a check is needed to avoid actually performing
472  * a detach, because the original process does not exist any more.
473  */
474 /* XXX - move to kern_proc.c?  */
475 static void
476 filt_procdetach(struct knote *kn)
477 {
478 
479 	knlist_remove(kn->kn_knlist, kn, 0);
480 	kn->kn_ptr.p_proc = NULL;
481 }
482 
483 /* XXX - move to kern_proc.c?  */
484 static int
485 filt_proc(struct knote *kn, long hint)
486 {
487 	struct proc *p;
488 	u_int event;
489 
490 	p = kn->kn_ptr.p_proc;
491 	if (p == NULL) /* already activated, from attach filter */
492 		return (0);
493 
494 	/* Mask off extra data. */
495 	event = (u_int)hint & NOTE_PCTRLMASK;
496 
497 	/* If the user is interested in this event, record it. */
498 	if (kn->kn_sfflags & event)
499 		kn->kn_fflags |= event;
500 
501 	/* Process is gone, so flag the event as finished. */
502 	if (event == NOTE_EXIT) {
503 		kn->kn_flags |= EV_EOF | EV_ONESHOT;
504 		kn->kn_ptr.p_proc = NULL;
505 		if (kn->kn_fflags & NOTE_EXIT)
506 			kn->kn_data = KW_EXITCODE(p->p_xexit, p->p_xsig);
507 		if (kn->kn_fflags == 0)
508 			kn->kn_flags |= EV_DROP;
509 		return (1);
510 	}
511 
512 	return (kn->kn_fflags != 0);
513 }
514 
515 /*
516  * Called when the process forked. It mostly does the same as the
517  * knote(), activating all knotes registered to be activated when the
518  * process forked. Additionally, for each knote attached to the
519  * parent, check whether user wants to track the new process. If so
520  * attach a new knote to it, and immediately report an event with the
521  * child's pid.
522  */
523 void
524 knote_fork(struct knlist *list, int pid)
525 {
526 	struct kqueue *kq;
527 	struct knote *kn;
528 	struct kevent kev;
529 	int error;
530 
531 	if (list == NULL)
532 		return;
533 	list->kl_lock(list->kl_lockarg);
534 
535 	SLIST_FOREACH(kn, &list->kl_list, kn_selnext) {
536 		kq = kn->kn_kq;
537 		KQ_LOCK(kq);
538 		if (kn_in_flux(kn) && (kn->kn_status & KN_SCAN) == 0) {
539 			KQ_UNLOCK(kq);
540 			continue;
541 		}
542 
543 		/*
544 		 * The same as knote(), activate the event.
545 		 */
546 		if ((kn->kn_sfflags & NOTE_TRACK) == 0) {
547 			kn->kn_status |= KN_HASKQLOCK;
548 			if (kn->kn_fop->f_event(kn, NOTE_FORK))
549 				KNOTE_ACTIVATE(kn, 1);
550 			kn->kn_status &= ~KN_HASKQLOCK;
551 			KQ_UNLOCK(kq);
552 			continue;
553 		}
554 
555 		/*
556 		 * The NOTE_TRACK case. In addition to the activation
557 		 * of the event, we need to register new events to
558 		 * track the child. Drop the locks in preparation for
559 		 * the call to kqueue_register().
560 		 */
561 		kn_enter_flux(kn);
562 		KQ_UNLOCK(kq);
563 		list->kl_unlock(list->kl_lockarg);
564 
565 		/*
566 		 * Activate existing knote and register tracking knotes with
567 		 * new process.
568 		 *
569 		 * First register a knote to get just the child notice. This
570 		 * must be a separate note from a potential NOTE_EXIT
571 		 * notification since both NOTE_CHILD and NOTE_EXIT are defined
572 		 * to use the data field (in conflicting ways).
573 		 */
574 		kev.ident = pid;
575 		kev.filter = kn->kn_filter;
576 		kev.flags = kn->kn_flags | EV_ADD | EV_ENABLE | EV_ONESHOT |
577 		    EV_FLAG2;
578 		kev.fflags = kn->kn_sfflags;
579 		kev.data = kn->kn_id;		/* parent */
580 		kev.udata = kn->kn_kevent.udata;/* preserve udata */
581 		error = kqueue_register(kq, &kev, NULL, 0);
582 		if (error)
583 			kn->kn_fflags |= NOTE_TRACKERR;
584 
585 		/*
586 		 * Then register another knote to track other potential events
587 		 * from the new process.
588 		 */
589 		kev.ident = pid;
590 		kev.filter = kn->kn_filter;
591 		kev.flags = kn->kn_flags | EV_ADD | EV_ENABLE | EV_FLAG1;
592 		kev.fflags = kn->kn_sfflags;
593 		kev.data = kn->kn_id;		/* parent */
594 		kev.udata = kn->kn_kevent.udata;/* preserve udata */
595 		error = kqueue_register(kq, &kev, NULL, 0);
596 		if (error)
597 			kn->kn_fflags |= NOTE_TRACKERR;
598 		if (kn->kn_fop->f_event(kn, NOTE_FORK))
599 			KNOTE_ACTIVATE(kn, 0);
600 		KQ_LOCK(kq);
601 		kn_leave_flux(kn);
602 		KQ_UNLOCK_FLUX(kq);
603 		list->kl_lock(list->kl_lockarg);
604 	}
605 	list->kl_unlock(list->kl_lockarg);
606 }
607 
608 /*
609  * XXX: EVFILT_TIMER should perhaps live in kern_time.c beside the
610  * interval timer support code.
611  */
612 
613 #define NOTE_TIMER_PRECMASK						\
614     (NOTE_SECONDS | NOTE_MSECONDS | NOTE_USECONDS | NOTE_NSECONDS)
615 
616 static sbintime_t
617 timer2sbintime(intptr_t data, int flags)
618 {
619 	int64_t secs;
620 
621         /*
622          * Macros for converting to the fractional second portion of an
623          * sbintime_t using 64bit multiplication to improve precision.
624          */
625 #define NS_TO_SBT(ns) (((ns) * (((uint64_t)1 << 63) / 500000000)) >> 32)
626 #define US_TO_SBT(us) (((us) * (((uint64_t)1 << 63) / 500000)) >> 32)
627 #define MS_TO_SBT(ms) (((ms) * (((uint64_t)1 << 63) / 500)) >> 32)
628 	switch (flags & NOTE_TIMER_PRECMASK) {
629 	case NOTE_SECONDS:
630 #ifdef __LP64__
631 		if (data > (SBT_MAX / SBT_1S))
632 			return (SBT_MAX);
633 #endif
634 		return ((sbintime_t)data << 32);
635 	case NOTE_MSECONDS: /* FALLTHROUGH */
636 	case 0:
637 		if (data >= 1000) {
638 			secs = data / 1000;
639 #ifdef __LP64__
640 			if (secs > (SBT_MAX / SBT_1S))
641 				return (SBT_MAX);
642 #endif
643 			return (secs << 32 | MS_TO_SBT(data % 1000));
644 		}
645 		return (MS_TO_SBT(data));
646 	case NOTE_USECONDS:
647 		if (data >= 1000000) {
648 			secs = data / 1000000;
649 #ifdef __LP64__
650 			if (secs > (SBT_MAX / SBT_1S))
651 				return (SBT_MAX);
652 #endif
653 			return (secs << 32 | US_TO_SBT(data % 1000000));
654 		}
655 		return (US_TO_SBT(data));
656 	case NOTE_NSECONDS:
657 		if (data >= 1000000000) {
658 			secs = data / 1000000000;
659 #ifdef __LP64__
660 			if (secs > (SBT_MAX / SBT_1S))
661 				return (SBT_MAX);
662 #endif
663 			return (secs << 32 | US_TO_SBT(data % 1000000000));
664 		}
665 		return (NS_TO_SBT(data));
666 	default:
667 		break;
668 	}
669 	return (-1);
670 }
671 
672 struct kq_timer_cb_data {
673 	struct callout c;
674 	sbintime_t next;	/* next timer event fires at */
675 	sbintime_t to;		/* precalculated timer period, 0 for abs */
676 };
677 
678 static void
679 filt_timerexpire(void *knx)
680 {
681 	struct knote *kn;
682 	struct kq_timer_cb_data *kc;
683 
684 	kn = knx;
685 	kn->kn_data++;
686 	KNOTE_ACTIVATE(kn, 0);	/* XXX - handle locking */
687 
688 	if ((kn->kn_flags & EV_ONESHOT) != 0)
689 		return;
690 	kc = kn->kn_ptr.p_v;
691 	if (kc->to == 0)
692 		return;
693 	kc->next += kc->to;
694 	callout_reset_sbt_on(&kc->c, kc->next, 0, filt_timerexpire, kn,
695 	    PCPU_GET(cpuid), C_ABSOLUTE);
696 }
697 
698 /*
699  * data contains amount of time to sleep
700  */
701 static int
702 filt_timerattach(struct knote *kn)
703 {
704 	struct kq_timer_cb_data *kc;
705 	struct bintime bt;
706 	sbintime_t to, sbt;
707 	unsigned int ncallouts;
708 
709 	if (kn->kn_sdata < 0)
710 		return (EINVAL);
711 	if (kn->kn_sdata == 0 && (kn->kn_flags & EV_ONESHOT) == 0)
712 		kn->kn_sdata = 1;
713 	/* Only precision unit are supported in flags so far */
714 	if ((kn->kn_sfflags & ~(NOTE_TIMER_PRECMASK | NOTE_ABSTIME)) != 0)
715 		return (EINVAL);
716 
717 	to = timer2sbintime(kn->kn_sdata, kn->kn_sfflags);
718 	if ((kn->kn_sfflags & NOTE_ABSTIME) != 0) {
719 		getboottimebin(&bt);
720 		sbt = bttosbt(bt);
721 		to -= sbt;
722 	}
723 	if (to < 0)
724 		return (EINVAL);
725 
726 	do {
727 		ncallouts = kq_ncallouts;
728 		if (ncallouts >= kq_calloutmax)
729 			return (ENOMEM);
730 	} while (!atomic_cmpset_int(&kq_ncallouts, ncallouts, ncallouts + 1));
731 
732 	if ((kn->kn_sfflags & NOTE_ABSTIME) == 0)
733 		kn->kn_flags |= EV_CLEAR;	/* automatically set */
734 	kn->kn_status &= ~KN_DETACHED;		/* knlist_add clears it */
735 	kn->kn_ptr.p_v = kc = malloc(sizeof(*kc), M_KQUEUE, M_WAITOK);
736 	callout_init(&kc->c, 1);
737 	if ((kn->kn_sfflags & NOTE_ABSTIME) != 0) {
738 		kc->next = to;
739 		kc->to = 0;
740 	} else {
741 		kc->next = to + sbinuptime();
742 		kc->to = to;
743 	}
744 	callout_reset_sbt_on(&kc->c, kc->next, 0, filt_timerexpire, kn,
745 	    PCPU_GET(cpuid), C_ABSOLUTE);
746 
747 	return (0);
748 }
749 
750 static void
751 filt_timerdetach(struct knote *kn)
752 {
753 	struct kq_timer_cb_data *kc;
754 	unsigned int old;
755 
756 	kc = kn->kn_ptr.p_v;
757 	callout_drain(&kc->c);
758 	free(kc, M_KQUEUE);
759 	old = atomic_fetchadd_int(&kq_ncallouts, -1);
760 	KASSERT(old > 0, ("Number of callouts cannot become negative"));
761 	kn->kn_status |= KN_DETACHED;	/* knlist_remove sets it */
762 }
763 
764 static int
765 filt_timer(struct knote *kn, long hint)
766 {
767 
768 	return (kn->kn_data != 0);
769 }
770 
771 static int
772 filt_userattach(struct knote *kn)
773 {
774 
775 	/*
776 	 * EVFILT_USER knotes are not attached to anything in the kernel.
777 	 */
778 	kn->kn_hook = NULL;
779 	if (kn->kn_fflags & NOTE_TRIGGER)
780 		kn->kn_hookid = 1;
781 	else
782 		kn->kn_hookid = 0;
783 	return (0);
784 }
785 
786 static void
787 filt_userdetach(__unused struct knote *kn)
788 {
789 
790 	/*
791 	 * EVFILT_USER knotes are not attached to anything in the kernel.
792 	 */
793 }
794 
795 static int
796 filt_user(struct knote *kn, __unused long hint)
797 {
798 
799 	return (kn->kn_hookid);
800 }
801 
802 static void
803 filt_usertouch(struct knote *kn, struct kevent *kev, u_long type)
804 {
805 	u_int ffctrl;
806 
807 	switch (type) {
808 	case EVENT_REGISTER:
809 		if (kev->fflags & NOTE_TRIGGER)
810 			kn->kn_hookid = 1;
811 
812 		ffctrl = kev->fflags & NOTE_FFCTRLMASK;
813 		kev->fflags &= NOTE_FFLAGSMASK;
814 		switch (ffctrl) {
815 		case NOTE_FFNOP:
816 			break;
817 
818 		case NOTE_FFAND:
819 			kn->kn_sfflags &= kev->fflags;
820 			break;
821 
822 		case NOTE_FFOR:
823 			kn->kn_sfflags |= kev->fflags;
824 			break;
825 
826 		case NOTE_FFCOPY:
827 			kn->kn_sfflags = kev->fflags;
828 			break;
829 
830 		default:
831 			/* XXX Return error? */
832 			break;
833 		}
834 		kn->kn_sdata = kev->data;
835 		if (kev->flags & EV_CLEAR) {
836 			kn->kn_hookid = 0;
837 			kn->kn_data = 0;
838 			kn->kn_fflags = 0;
839 		}
840 		break;
841 
842         case EVENT_PROCESS:
843 		*kev = kn->kn_kevent;
844 		kev->fflags = kn->kn_sfflags;
845 		kev->data = kn->kn_sdata;
846 		if (kn->kn_flags & EV_CLEAR) {
847 			kn->kn_hookid = 0;
848 			kn->kn_data = 0;
849 			kn->kn_fflags = 0;
850 		}
851 		break;
852 
853 	default:
854 		panic("filt_usertouch() - invalid type (%ld)", type);
855 		break;
856 	}
857 }
858 
859 int
860 sys_kqueue(struct thread *td, struct kqueue_args *uap)
861 {
862 
863 	return (kern_kqueue(td, 0, NULL));
864 }
865 
866 static void
867 kqueue_init(struct kqueue *kq)
868 {
869 
870 	mtx_init(&kq->kq_lock, "kqueue", NULL, MTX_DEF | MTX_DUPOK);
871 	TAILQ_INIT(&kq->kq_head);
872 	knlist_init_mtx(&kq->kq_sel.si_note, &kq->kq_lock);
873 	TASK_INIT(&kq->kq_task, 0, kqueue_task, kq);
874 }
875 
876 int
877 kern_kqueue(struct thread *td, int flags, struct filecaps *fcaps)
878 {
879 	struct filedesc *fdp;
880 	struct kqueue *kq;
881 	struct file *fp;
882 	struct ucred *cred;
883 	int fd, error;
884 
885 	fdp = td->td_proc->p_fd;
886 	cred = td->td_ucred;
887 	if (!chgkqcnt(cred->cr_ruidinfo, 1, lim_cur(td, RLIMIT_KQUEUES)))
888 		return (ENOMEM);
889 
890 	error = falloc_caps(td, &fp, &fd, flags, fcaps);
891 	if (error != 0) {
892 		chgkqcnt(cred->cr_ruidinfo, -1, 0);
893 		return (error);
894 	}
895 
896 	/* An extra reference on `fp' has been held for us by falloc(). */
897 	kq = malloc(sizeof *kq, M_KQUEUE, M_WAITOK | M_ZERO);
898 	kqueue_init(kq);
899 	kq->kq_fdp = fdp;
900 	kq->kq_cred = crhold(cred);
901 
902 	FILEDESC_XLOCK(fdp);
903 	TAILQ_INSERT_HEAD(&fdp->fd_kqlist, kq, kq_list);
904 	FILEDESC_XUNLOCK(fdp);
905 
906 	finit(fp, FREAD | FWRITE, DTYPE_KQUEUE, kq, &kqueueops);
907 	fdrop(fp, td);
908 
909 	td->td_retval[0] = fd;
910 	return (0);
911 }
912 
913 struct g_kevent_args {
914 	int	fd;
915 	void	*changelist;
916 	int	nchanges;
917 	void	*eventlist;
918 	int	nevents;
919 	const struct timespec *timeout;
920 };
921 
922 int
923 sys_kevent(struct thread *td, struct kevent_args *uap)
924 {
925 	struct kevent_copyops k_ops = {
926 		.arg = uap,
927 		.k_copyout = kevent_copyout,
928 		.k_copyin = kevent_copyin,
929 		.kevent_size = sizeof(struct kevent),
930 	};
931 	struct g_kevent_args gk_args = {
932 		.fd = uap->fd,
933 		.changelist = uap->changelist,
934 		.nchanges = uap->nchanges,
935 		.eventlist = uap->eventlist,
936 		.nevents = uap->nevents,
937 		.timeout = uap->timeout,
938 	};
939 
940 	return (kern_kevent_generic(td, &gk_args, &k_ops, "kevent"));
941 }
942 
943 static int
944 kern_kevent_generic(struct thread *td, struct g_kevent_args *uap,
945     struct kevent_copyops *k_ops, const char *struct_name)
946 {
947 	struct timespec ts, *tsp;
948 #ifdef KTRACE
949 	struct kevent *eventlist = uap->eventlist;
950 #endif
951 	int error;
952 
953 	if (uap->timeout != NULL) {
954 		error = copyin(uap->timeout, &ts, sizeof(ts));
955 		if (error)
956 			return (error);
957 		tsp = &ts;
958 	} else
959 		tsp = NULL;
960 
961 #ifdef KTRACE
962 	if (KTRPOINT(td, KTR_STRUCT_ARRAY))
963 		ktrstructarray(struct_name, UIO_USERSPACE, uap->changelist,
964 		    uap->nchanges, k_ops->kevent_size);
965 #endif
966 
967 	error = kern_kevent(td, uap->fd, uap->nchanges, uap->nevents,
968 	    k_ops, tsp);
969 
970 #ifdef KTRACE
971 	if (error == 0 && KTRPOINT(td, KTR_STRUCT_ARRAY))
972 		ktrstructarray(struct_name, UIO_USERSPACE, eventlist,
973 		    td->td_retval[0], k_ops->kevent_size);
974 #endif
975 
976 	return (error);
977 }
978 
979 /*
980  * Copy 'count' items into the destination list pointed to by uap->eventlist.
981  */
982 static int
983 kevent_copyout(void *arg, struct kevent *kevp, int count)
984 {
985 	struct kevent_args *uap;
986 	int error;
987 
988 	KASSERT(count <= KQ_NEVENTS, ("count (%d) > KQ_NEVENTS", count));
989 	uap = (struct kevent_args *)arg;
990 
991 	error = copyout(kevp, uap->eventlist, count * sizeof *kevp);
992 	if (error == 0)
993 		uap->eventlist += count;
994 	return (error);
995 }
996 
997 /*
998  * Copy 'count' items from the list pointed to by uap->changelist.
999  */
1000 static int
1001 kevent_copyin(void *arg, struct kevent *kevp, int count)
1002 {
1003 	struct kevent_args *uap;
1004 	int error;
1005 
1006 	KASSERT(count <= KQ_NEVENTS, ("count (%d) > KQ_NEVENTS", count));
1007 	uap = (struct kevent_args *)arg;
1008 
1009 	error = copyin(uap->changelist, kevp, count * sizeof *kevp);
1010 	if (error == 0)
1011 		uap->changelist += count;
1012 	return (error);
1013 }
1014 
1015 #ifdef COMPAT_FREEBSD11
1016 static int
1017 kevent11_copyout(void *arg, struct kevent *kevp, int count)
1018 {
1019 	struct freebsd11_kevent_args *uap;
1020 	struct kevent_freebsd11 kev11;
1021 	int error, i;
1022 
1023 	KASSERT(count <= KQ_NEVENTS, ("count (%d) > KQ_NEVENTS", count));
1024 	uap = (struct freebsd11_kevent_args *)arg;
1025 
1026 	for (i = 0; i < count; i++) {
1027 		kev11.ident = kevp->ident;
1028 		kev11.filter = kevp->filter;
1029 		kev11.flags = kevp->flags;
1030 		kev11.fflags = kevp->fflags;
1031 		kev11.data = kevp->data;
1032 		kev11.udata = kevp->udata;
1033 		error = copyout(&kev11, uap->eventlist, sizeof(kev11));
1034 		if (error != 0)
1035 			break;
1036 		uap->eventlist++;
1037 		kevp++;
1038 	}
1039 	return (error);
1040 }
1041 
1042 /*
1043  * Copy 'count' items from the list pointed to by uap->changelist.
1044  */
1045 static int
1046 kevent11_copyin(void *arg, struct kevent *kevp, int count)
1047 {
1048 	struct freebsd11_kevent_args *uap;
1049 	struct kevent_freebsd11 kev11;
1050 	int error, i;
1051 
1052 	KASSERT(count <= KQ_NEVENTS, ("count (%d) > KQ_NEVENTS", count));
1053 	uap = (struct freebsd11_kevent_args *)arg;
1054 
1055 	for (i = 0; i < count; i++) {
1056 		error = copyin(uap->changelist, &kev11, sizeof(kev11));
1057 		if (error != 0)
1058 			break;
1059 		kevp->ident = kev11.ident;
1060 		kevp->filter = kev11.filter;
1061 		kevp->flags = kev11.flags;
1062 		kevp->fflags = kev11.fflags;
1063 		kevp->data = (uintptr_t)kev11.data;
1064 		kevp->udata = kev11.udata;
1065 		bzero(&kevp->ext, sizeof(kevp->ext));
1066 		uap->changelist++;
1067 		kevp++;
1068 	}
1069 	return (error);
1070 }
1071 
1072 int
1073 freebsd11_kevent(struct thread *td, struct freebsd11_kevent_args *uap)
1074 {
1075 	struct kevent_copyops k_ops = {
1076 		.arg = uap,
1077 		.k_copyout = kevent11_copyout,
1078 		.k_copyin = kevent11_copyin,
1079 		.kevent_size = sizeof(struct kevent_freebsd11),
1080 	};
1081 	struct g_kevent_args gk_args = {
1082 		.fd = uap->fd,
1083 		.changelist = uap->changelist,
1084 		.nchanges = uap->nchanges,
1085 		.eventlist = uap->eventlist,
1086 		.nevents = uap->nevents,
1087 		.timeout = uap->timeout,
1088 	};
1089 
1090 	return (kern_kevent_generic(td, &gk_args, &k_ops, "kevent_freebsd11"));
1091 }
1092 #endif
1093 
1094 int
1095 kern_kevent(struct thread *td, int fd, int nchanges, int nevents,
1096     struct kevent_copyops *k_ops, const struct timespec *timeout)
1097 {
1098 	cap_rights_t rights;
1099 	struct file *fp;
1100 	int error;
1101 
1102 	cap_rights_init(&rights);
1103 	if (nchanges > 0)
1104 		cap_rights_set(&rights, CAP_KQUEUE_CHANGE);
1105 	if (nevents > 0)
1106 		cap_rights_set(&rights, CAP_KQUEUE_EVENT);
1107 	error = fget(td, fd, &rights, &fp);
1108 	if (error != 0)
1109 		return (error);
1110 
1111 	error = kern_kevent_fp(td, fp, nchanges, nevents, k_ops, timeout);
1112 	fdrop(fp, td);
1113 
1114 	return (error);
1115 }
1116 
1117 static int
1118 kqueue_kevent(struct kqueue *kq, struct thread *td, int nchanges, int nevents,
1119     struct kevent_copyops *k_ops, const struct timespec *timeout)
1120 {
1121 	struct kevent keva[KQ_NEVENTS];
1122 	struct kevent *kevp, *changes;
1123 	int i, n, nerrors, error;
1124 
1125 	nerrors = 0;
1126 	while (nchanges > 0) {
1127 		n = nchanges > KQ_NEVENTS ? KQ_NEVENTS : nchanges;
1128 		error = k_ops->k_copyin(k_ops->arg, keva, n);
1129 		if (error)
1130 			return (error);
1131 		changes = keva;
1132 		for (i = 0; i < n; i++) {
1133 			kevp = &changes[i];
1134 			if (!kevp->filter)
1135 				continue;
1136 			kevp->flags &= ~EV_SYSFLAGS;
1137 			error = kqueue_register(kq, kevp, td, 1);
1138 			if (error || (kevp->flags & EV_RECEIPT)) {
1139 				if (nevents == 0)
1140 					return (error);
1141 				kevp->flags = EV_ERROR;
1142 				kevp->data = error;
1143 				(void)k_ops->k_copyout(k_ops->arg, kevp, 1);
1144 				nevents--;
1145 				nerrors++;
1146 			}
1147 		}
1148 		nchanges -= n;
1149 	}
1150 	if (nerrors) {
1151 		td->td_retval[0] = nerrors;
1152 		return (0);
1153 	}
1154 
1155 	return (kqueue_scan(kq, nevents, k_ops, timeout, keva, td));
1156 }
1157 
1158 int
1159 kern_kevent_fp(struct thread *td, struct file *fp, int nchanges, int nevents,
1160     struct kevent_copyops *k_ops, const struct timespec *timeout)
1161 {
1162 	struct kqueue *kq;
1163 	int error;
1164 
1165 	error = kqueue_acquire(fp, &kq);
1166 	if (error != 0)
1167 		return (error);
1168 	error = kqueue_kevent(kq, td, nchanges, nevents, k_ops, timeout);
1169 	kqueue_release(kq, 0);
1170 	return (error);
1171 }
1172 
1173 /*
1174  * Performs a kevent() call on a temporarily created kqueue. This can be
1175  * used to perform one-shot polling, similar to poll() and select().
1176  */
1177 int
1178 kern_kevent_anonymous(struct thread *td, int nevents,
1179     struct kevent_copyops *k_ops)
1180 {
1181 	struct kqueue kq = {};
1182 	int error;
1183 
1184 	kqueue_init(&kq);
1185 	kq.kq_refcnt = 1;
1186 	error = kqueue_kevent(&kq, td, nevents, nevents, k_ops, NULL);
1187 	kqueue_drain(&kq, td);
1188 	kqueue_destroy(&kq);
1189 	return (error);
1190 }
1191 
1192 int
1193 kqueue_add_filteropts(int filt, struct filterops *filtops)
1194 {
1195 	int error;
1196 
1197 	error = 0;
1198 	if (filt > 0 || filt + EVFILT_SYSCOUNT < 0) {
1199 		printf(
1200 "trying to add a filterop that is out of range: %d is beyond %d\n",
1201 		    ~filt, EVFILT_SYSCOUNT);
1202 		return EINVAL;
1203 	}
1204 	mtx_lock(&filterops_lock);
1205 	if (sysfilt_ops[~filt].for_fop != &null_filtops &&
1206 	    sysfilt_ops[~filt].for_fop != NULL)
1207 		error = EEXIST;
1208 	else {
1209 		sysfilt_ops[~filt].for_fop = filtops;
1210 		sysfilt_ops[~filt].for_refcnt = 0;
1211 	}
1212 	mtx_unlock(&filterops_lock);
1213 
1214 	return (error);
1215 }
1216 
1217 int
1218 kqueue_del_filteropts(int filt)
1219 {
1220 	int error;
1221 
1222 	error = 0;
1223 	if (filt > 0 || filt + EVFILT_SYSCOUNT < 0)
1224 		return EINVAL;
1225 
1226 	mtx_lock(&filterops_lock);
1227 	if (sysfilt_ops[~filt].for_fop == &null_filtops ||
1228 	    sysfilt_ops[~filt].for_fop == NULL)
1229 		error = EINVAL;
1230 	else if (sysfilt_ops[~filt].for_refcnt != 0)
1231 		error = EBUSY;
1232 	else {
1233 		sysfilt_ops[~filt].for_fop = &null_filtops;
1234 		sysfilt_ops[~filt].for_refcnt = 0;
1235 	}
1236 	mtx_unlock(&filterops_lock);
1237 
1238 	return error;
1239 }
1240 
1241 static struct filterops *
1242 kqueue_fo_find(int filt)
1243 {
1244 
1245 	if (filt > 0 || filt + EVFILT_SYSCOUNT < 0)
1246 		return NULL;
1247 
1248 	if (sysfilt_ops[~filt].for_nolock)
1249 		return sysfilt_ops[~filt].for_fop;
1250 
1251 	mtx_lock(&filterops_lock);
1252 	sysfilt_ops[~filt].for_refcnt++;
1253 	if (sysfilt_ops[~filt].for_fop == NULL)
1254 		sysfilt_ops[~filt].for_fop = &null_filtops;
1255 	mtx_unlock(&filterops_lock);
1256 
1257 	return sysfilt_ops[~filt].for_fop;
1258 }
1259 
1260 static void
1261 kqueue_fo_release(int filt)
1262 {
1263 
1264 	if (filt > 0 || filt + EVFILT_SYSCOUNT < 0)
1265 		return;
1266 
1267 	if (sysfilt_ops[~filt].for_nolock)
1268 		return;
1269 
1270 	mtx_lock(&filterops_lock);
1271 	KASSERT(sysfilt_ops[~filt].for_refcnt > 0,
1272 	    ("filter object refcount not valid on release"));
1273 	sysfilt_ops[~filt].for_refcnt--;
1274 	mtx_unlock(&filterops_lock);
1275 }
1276 
1277 /*
1278  * A ref to kq (obtained via kqueue_acquire) must be held.  waitok will
1279  * influence if memory allocation should wait.  Make sure it is 0 if you
1280  * hold any mutexes.
1281  */
1282 static int
1283 kqueue_register(struct kqueue *kq, struct kevent *kev, struct thread *td, int waitok)
1284 {
1285 	struct filterops *fops;
1286 	struct file *fp;
1287 	struct knote *kn, *tkn;
1288 	struct knlist *knl;
1289 	int error, filt, event;
1290 	int haskqglobal, filedesc_unlock;
1291 
1292 	if ((kev->flags & (EV_ENABLE | EV_DISABLE)) == (EV_ENABLE | EV_DISABLE))
1293 		return (EINVAL);
1294 
1295 	fp = NULL;
1296 	kn = NULL;
1297 	knl = NULL;
1298 	error = 0;
1299 	haskqglobal = 0;
1300 	filedesc_unlock = 0;
1301 
1302 	filt = kev->filter;
1303 	fops = kqueue_fo_find(filt);
1304 	if (fops == NULL)
1305 		return EINVAL;
1306 
1307 	if (kev->flags & EV_ADD) {
1308 		/*
1309 		 * Prevent waiting with locks.  Non-sleepable
1310 		 * allocation failures are handled in the loop, only
1311 		 * if the spare knote appears to be actually required.
1312 		 */
1313 		tkn = knote_alloc(waitok);
1314 	} else {
1315 		tkn = NULL;
1316 	}
1317 
1318 findkn:
1319 	if (fops->f_isfd) {
1320 		KASSERT(td != NULL, ("td is NULL"));
1321 		if (kev->ident > INT_MAX)
1322 			error = EBADF;
1323 		else
1324 			error = fget(td, kev->ident, &cap_event_rights, &fp);
1325 		if (error)
1326 			goto done;
1327 
1328 		if ((kev->flags & EV_ADD) == EV_ADD && kqueue_expand(kq, fops,
1329 		    kev->ident, 0) != 0) {
1330 			/* try again */
1331 			fdrop(fp, td);
1332 			fp = NULL;
1333 			error = kqueue_expand(kq, fops, kev->ident, waitok);
1334 			if (error)
1335 				goto done;
1336 			goto findkn;
1337 		}
1338 
1339 		if (fp->f_type == DTYPE_KQUEUE) {
1340 			/*
1341 			 * If we add some intelligence about what we are doing,
1342 			 * we should be able to support events on ourselves.
1343 			 * We need to know when we are doing this to prevent
1344 			 * getting both the knlist lock and the kq lock since
1345 			 * they are the same thing.
1346 			 */
1347 			if (fp->f_data == kq) {
1348 				error = EINVAL;
1349 				goto done;
1350 			}
1351 
1352 			/*
1353 			 * Pre-lock the filedesc before the global
1354 			 * lock mutex, see the comment in
1355 			 * kqueue_close().
1356 			 */
1357 			FILEDESC_XLOCK(td->td_proc->p_fd);
1358 			filedesc_unlock = 1;
1359 			KQ_GLOBAL_LOCK(&kq_global, haskqglobal);
1360 		}
1361 
1362 		KQ_LOCK(kq);
1363 		if (kev->ident < kq->kq_knlistsize) {
1364 			SLIST_FOREACH(kn, &kq->kq_knlist[kev->ident], kn_link)
1365 				if (kev->filter == kn->kn_filter)
1366 					break;
1367 		}
1368 	} else {
1369 		if ((kev->flags & EV_ADD) == EV_ADD)
1370 			kqueue_expand(kq, fops, kev->ident, waitok);
1371 
1372 		KQ_LOCK(kq);
1373 
1374 		/*
1375 		 * If possible, find an existing knote to use for this kevent.
1376 		 */
1377 		if (kev->filter == EVFILT_PROC &&
1378 		    (kev->flags & (EV_FLAG1 | EV_FLAG2)) != 0) {
1379 			/* This is an internal creation of a process tracking
1380 			 * note. Don't attempt to coalesce this with an
1381 			 * existing note.
1382 			 */
1383 			;
1384 		} else if (kq->kq_knhashmask != 0) {
1385 			struct klist *list;
1386 
1387 			list = &kq->kq_knhash[
1388 			    KN_HASH((u_long)kev->ident, kq->kq_knhashmask)];
1389 			SLIST_FOREACH(kn, list, kn_link)
1390 				if (kev->ident == kn->kn_id &&
1391 				    kev->filter == kn->kn_filter)
1392 					break;
1393 		}
1394 	}
1395 
1396 	/* knote is in the process of changing, wait for it to stabilize. */
1397 	if (kn != NULL && kn_in_flux(kn)) {
1398 		KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal);
1399 		if (filedesc_unlock) {
1400 			FILEDESC_XUNLOCK(td->td_proc->p_fd);
1401 			filedesc_unlock = 0;
1402 		}
1403 		kq->kq_state |= KQ_FLUXWAIT;
1404 		msleep(kq, &kq->kq_lock, PSOCK | PDROP, "kqflxwt", 0);
1405 		if (fp != NULL) {
1406 			fdrop(fp, td);
1407 			fp = NULL;
1408 		}
1409 		goto findkn;
1410 	}
1411 
1412 	/*
1413 	 * kn now contains the matching knote, or NULL if no match
1414 	 */
1415 	if (kn == NULL) {
1416 		if (kev->flags & EV_ADD) {
1417 			kn = tkn;
1418 			tkn = NULL;
1419 			if (kn == NULL) {
1420 				KQ_UNLOCK(kq);
1421 				error = ENOMEM;
1422 				goto done;
1423 			}
1424 			kn->kn_fp = fp;
1425 			kn->kn_kq = kq;
1426 			kn->kn_fop = fops;
1427 			/*
1428 			 * apply reference counts to knote structure, and
1429 			 * do not release it at the end of this routine.
1430 			 */
1431 			fops = NULL;
1432 			fp = NULL;
1433 
1434 			kn->kn_sfflags = kev->fflags;
1435 			kn->kn_sdata = kev->data;
1436 			kev->fflags = 0;
1437 			kev->data = 0;
1438 			kn->kn_kevent = *kev;
1439 			kn->kn_kevent.flags &= ~(EV_ADD | EV_DELETE |
1440 			    EV_ENABLE | EV_DISABLE | EV_FORCEONESHOT);
1441 			kn->kn_status = KN_DETACHED;
1442 			kn_enter_flux(kn);
1443 
1444 			error = knote_attach(kn, kq);
1445 			KQ_UNLOCK(kq);
1446 			if (error != 0) {
1447 				tkn = kn;
1448 				goto done;
1449 			}
1450 
1451 			if ((error = kn->kn_fop->f_attach(kn)) != 0) {
1452 				knote_drop_detached(kn, td);
1453 				goto done;
1454 			}
1455 			knl = kn_list_lock(kn);
1456 			goto done_ev_add;
1457 		} else {
1458 			/* No matching knote and the EV_ADD flag is not set. */
1459 			KQ_UNLOCK(kq);
1460 			error = ENOENT;
1461 			goto done;
1462 		}
1463 	}
1464 
1465 	if (kev->flags & EV_DELETE) {
1466 		kn_enter_flux(kn);
1467 		KQ_UNLOCK(kq);
1468 		knote_drop(kn, td);
1469 		goto done;
1470 	}
1471 
1472 	if (kev->flags & EV_FORCEONESHOT) {
1473 		kn->kn_flags |= EV_ONESHOT;
1474 		KNOTE_ACTIVATE(kn, 1);
1475 	}
1476 
1477 	/*
1478 	 * The user may change some filter values after the initial EV_ADD,
1479 	 * but doing so will not reset any filter which has already been
1480 	 * triggered.
1481 	 */
1482 	kn->kn_status |= KN_SCAN;
1483 	kn_enter_flux(kn);
1484 	KQ_UNLOCK(kq);
1485 	knl = kn_list_lock(kn);
1486 	kn->kn_kevent.udata = kev->udata;
1487 	if (!fops->f_isfd && fops->f_touch != NULL) {
1488 		fops->f_touch(kn, kev, EVENT_REGISTER);
1489 	} else {
1490 		kn->kn_sfflags = kev->fflags;
1491 		kn->kn_sdata = kev->data;
1492 	}
1493 
1494 	/*
1495 	 * We can get here with kn->kn_knlist == NULL.  This can happen when
1496 	 * the initial attach event decides that the event is "completed"
1497 	 * already.  i.e. filt_procattach is called on a zombie process.  It
1498 	 * will call filt_proc which will remove it from the list, and NULL
1499 	 * kn_knlist.
1500 	 */
1501 done_ev_add:
1502 	if ((kev->flags & EV_ENABLE) != 0)
1503 		kn->kn_status &= ~KN_DISABLED;
1504 	else if ((kev->flags & EV_DISABLE) != 0)
1505 		kn->kn_status |= KN_DISABLED;
1506 
1507 	if ((kn->kn_status & KN_DISABLED) == 0)
1508 		event = kn->kn_fop->f_event(kn, 0);
1509 	else
1510 		event = 0;
1511 
1512 	KQ_LOCK(kq);
1513 	if (event)
1514 		kn->kn_status |= KN_ACTIVE;
1515 	if ((kn->kn_status & (KN_ACTIVE | KN_DISABLED | KN_QUEUED)) ==
1516 	    KN_ACTIVE)
1517 		knote_enqueue(kn);
1518 	kn->kn_status &= ~KN_SCAN;
1519 	kn_leave_flux(kn);
1520 	kn_list_unlock(knl);
1521 	KQ_UNLOCK_FLUX(kq);
1522 
1523 done:
1524 	KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal);
1525 	if (filedesc_unlock)
1526 		FILEDESC_XUNLOCK(td->td_proc->p_fd);
1527 	if (fp != NULL)
1528 		fdrop(fp, td);
1529 	knote_free(tkn);
1530 	if (fops != NULL)
1531 		kqueue_fo_release(filt);
1532 	return (error);
1533 }
1534 
1535 static int
1536 kqueue_acquire(struct file *fp, struct kqueue **kqp)
1537 {
1538 	int error;
1539 	struct kqueue *kq;
1540 
1541 	error = 0;
1542 
1543 	kq = fp->f_data;
1544 	if (fp->f_type != DTYPE_KQUEUE || kq == NULL)
1545 		return (EBADF);
1546 	*kqp = kq;
1547 	KQ_LOCK(kq);
1548 	if ((kq->kq_state & KQ_CLOSING) == KQ_CLOSING) {
1549 		KQ_UNLOCK(kq);
1550 		return (EBADF);
1551 	}
1552 	kq->kq_refcnt++;
1553 	KQ_UNLOCK(kq);
1554 
1555 	return error;
1556 }
1557 
1558 static void
1559 kqueue_release(struct kqueue *kq, int locked)
1560 {
1561 	if (locked)
1562 		KQ_OWNED(kq);
1563 	else
1564 		KQ_LOCK(kq);
1565 	kq->kq_refcnt--;
1566 	if (kq->kq_refcnt == 1)
1567 		wakeup(&kq->kq_refcnt);
1568 	if (!locked)
1569 		KQ_UNLOCK(kq);
1570 }
1571 
1572 static void
1573 kqueue_schedtask(struct kqueue *kq)
1574 {
1575 
1576 	KQ_OWNED(kq);
1577 	KASSERT(((kq->kq_state & KQ_TASKDRAIN) != KQ_TASKDRAIN),
1578 	    ("scheduling kqueue task while draining"));
1579 
1580 	if ((kq->kq_state & KQ_TASKSCHED) != KQ_TASKSCHED) {
1581 		taskqueue_enqueue(taskqueue_kqueue_ctx, &kq->kq_task);
1582 		kq->kq_state |= KQ_TASKSCHED;
1583 	}
1584 }
1585 
1586 /*
1587  * Expand the kq to make sure we have storage for fops/ident pair.
1588  *
1589  * Return 0 on success (or no work necessary), return errno on failure.
1590  *
1591  * Not calling hashinit w/ waitok (proper malloc flag) should be safe.
1592  * If kqueue_register is called from a non-fd context, there usually/should
1593  * be no locks held.
1594  */
1595 static int
1596 kqueue_expand(struct kqueue *kq, struct filterops *fops, uintptr_t ident,
1597 	int waitok)
1598 {
1599 	struct klist *list, *tmp_knhash, *to_free;
1600 	u_long tmp_knhashmask;
1601 	int size;
1602 	int fd;
1603 	int mflag = waitok ? M_WAITOK : M_NOWAIT;
1604 
1605 	KQ_NOTOWNED(kq);
1606 
1607 	to_free = NULL;
1608 	if (fops->f_isfd) {
1609 		fd = ident;
1610 		if (kq->kq_knlistsize <= fd) {
1611 			size = kq->kq_knlistsize;
1612 			while (size <= fd)
1613 				size += KQEXTENT;
1614 			list = malloc(size * sizeof(*list), M_KQUEUE, mflag);
1615 			if (list == NULL)
1616 				return ENOMEM;
1617 			KQ_LOCK(kq);
1618 			if (kq->kq_knlistsize > fd) {
1619 				to_free = list;
1620 				list = NULL;
1621 			} else {
1622 				if (kq->kq_knlist != NULL) {
1623 					bcopy(kq->kq_knlist, list,
1624 					    kq->kq_knlistsize * sizeof(*list));
1625 					to_free = kq->kq_knlist;
1626 					kq->kq_knlist = NULL;
1627 				}
1628 				bzero((caddr_t)list +
1629 				    kq->kq_knlistsize * sizeof(*list),
1630 				    (size - kq->kq_knlistsize) * sizeof(*list));
1631 				kq->kq_knlistsize = size;
1632 				kq->kq_knlist = list;
1633 			}
1634 			KQ_UNLOCK(kq);
1635 		}
1636 	} else {
1637 		if (kq->kq_knhashmask == 0) {
1638 			tmp_knhash = hashinit(KN_HASHSIZE, M_KQUEUE,
1639 			    &tmp_knhashmask);
1640 			if (tmp_knhash == NULL)
1641 				return ENOMEM;
1642 			KQ_LOCK(kq);
1643 			if (kq->kq_knhashmask == 0) {
1644 				kq->kq_knhash = tmp_knhash;
1645 				kq->kq_knhashmask = tmp_knhashmask;
1646 			} else {
1647 				to_free = tmp_knhash;
1648 			}
1649 			KQ_UNLOCK(kq);
1650 		}
1651 	}
1652 	free(to_free, M_KQUEUE);
1653 
1654 	KQ_NOTOWNED(kq);
1655 	return 0;
1656 }
1657 
1658 static void
1659 kqueue_task(void *arg, int pending)
1660 {
1661 	struct kqueue *kq;
1662 	int haskqglobal;
1663 
1664 	haskqglobal = 0;
1665 	kq = arg;
1666 
1667 	KQ_GLOBAL_LOCK(&kq_global, haskqglobal);
1668 	KQ_LOCK(kq);
1669 
1670 	KNOTE_LOCKED(&kq->kq_sel.si_note, 0);
1671 
1672 	kq->kq_state &= ~KQ_TASKSCHED;
1673 	if ((kq->kq_state & KQ_TASKDRAIN) == KQ_TASKDRAIN) {
1674 		wakeup(&kq->kq_state);
1675 	}
1676 	KQ_UNLOCK(kq);
1677 	KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal);
1678 }
1679 
1680 /*
1681  * Scan, update kn_data (if not ONESHOT), and copyout triggered events.
1682  * We treat KN_MARKER knotes as if they are in flux.
1683  */
1684 static int
1685 kqueue_scan(struct kqueue *kq, int maxevents, struct kevent_copyops *k_ops,
1686     const struct timespec *tsp, struct kevent *keva, struct thread *td)
1687 {
1688 	struct kevent *kevp;
1689 	struct knote *kn, *marker;
1690 	struct knlist *knl;
1691 	sbintime_t asbt, rsbt;
1692 	int count, error, haskqglobal, influx, nkev, touch;
1693 
1694 	count = maxevents;
1695 	nkev = 0;
1696 	error = 0;
1697 	haskqglobal = 0;
1698 
1699 	if (maxevents == 0)
1700 		goto done_nl;
1701 
1702 	rsbt = 0;
1703 	if (tsp != NULL) {
1704 		if (tsp->tv_sec < 0 || tsp->tv_nsec < 0 ||
1705 		    tsp->tv_nsec >= 1000000000) {
1706 			error = EINVAL;
1707 			goto done_nl;
1708 		}
1709 		if (timespecisset(tsp)) {
1710 			if (tsp->tv_sec <= INT32_MAX) {
1711 				rsbt = tstosbt(*tsp);
1712 				if (TIMESEL(&asbt, rsbt))
1713 					asbt += tc_tick_sbt;
1714 				if (asbt <= SBT_MAX - rsbt)
1715 					asbt += rsbt;
1716 				else
1717 					asbt = 0;
1718 				rsbt >>= tc_precexp;
1719 			} else
1720 				asbt = 0;
1721 		} else
1722 			asbt = -1;
1723 	} else
1724 		asbt = 0;
1725 	marker = knote_alloc(1);
1726 	marker->kn_status = KN_MARKER;
1727 	KQ_LOCK(kq);
1728 
1729 retry:
1730 	kevp = keva;
1731 	if (kq->kq_count == 0) {
1732 		if (asbt == -1) {
1733 			error = EWOULDBLOCK;
1734 		} else {
1735 			kq->kq_state |= KQ_SLEEP;
1736 			error = msleep_sbt(kq, &kq->kq_lock, PSOCK | PCATCH,
1737 			    "kqread", asbt, rsbt, C_ABSOLUTE);
1738 		}
1739 		if (error == 0)
1740 			goto retry;
1741 		/* don't restart after signals... */
1742 		if (error == ERESTART)
1743 			error = EINTR;
1744 		else if (error == EWOULDBLOCK)
1745 			error = 0;
1746 		goto done;
1747 	}
1748 
1749 	TAILQ_INSERT_TAIL(&kq->kq_head, marker, kn_tqe);
1750 	influx = 0;
1751 	while (count) {
1752 		KQ_OWNED(kq);
1753 		kn = TAILQ_FIRST(&kq->kq_head);
1754 
1755 		if ((kn->kn_status == KN_MARKER && kn != marker) ||
1756 		    kn_in_flux(kn)) {
1757 			if (influx) {
1758 				influx = 0;
1759 				KQ_FLUX_WAKEUP(kq);
1760 			}
1761 			kq->kq_state |= KQ_FLUXWAIT;
1762 			error = msleep(kq, &kq->kq_lock, PSOCK,
1763 			    "kqflxwt", 0);
1764 			continue;
1765 		}
1766 
1767 		TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe);
1768 		if ((kn->kn_status & KN_DISABLED) == KN_DISABLED) {
1769 			kn->kn_status &= ~KN_QUEUED;
1770 			kq->kq_count--;
1771 			continue;
1772 		}
1773 		if (kn == marker) {
1774 			KQ_FLUX_WAKEUP(kq);
1775 			if (count == maxevents)
1776 				goto retry;
1777 			goto done;
1778 		}
1779 		KASSERT(!kn_in_flux(kn),
1780 		    ("knote %p is unexpectedly in flux", kn));
1781 
1782 		if ((kn->kn_flags & EV_DROP) == EV_DROP) {
1783 			kn->kn_status &= ~KN_QUEUED;
1784 			kn_enter_flux(kn);
1785 			kq->kq_count--;
1786 			KQ_UNLOCK(kq);
1787 			/*
1788 			 * We don't need to lock the list since we've
1789 			 * marked it as in flux.
1790 			 */
1791 			knote_drop(kn, td);
1792 			KQ_LOCK(kq);
1793 			continue;
1794 		} else if ((kn->kn_flags & EV_ONESHOT) == EV_ONESHOT) {
1795 			kn->kn_status &= ~KN_QUEUED;
1796 			kn_enter_flux(kn);
1797 			kq->kq_count--;
1798 			KQ_UNLOCK(kq);
1799 			/*
1800 			 * We don't need to lock the list since we've
1801 			 * marked the knote as being in flux.
1802 			 */
1803 			*kevp = kn->kn_kevent;
1804 			knote_drop(kn, td);
1805 			KQ_LOCK(kq);
1806 			kn = NULL;
1807 		} else {
1808 			kn->kn_status |= KN_SCAN;
1809 			kn_enter_flux(kn);
1810 			KQ_UNLOCK(kq);
1811 			if ((kn->kn_status & KN_KQUEUE) == KN_KQUEUE)
1812 				KQ_GLOBAL_LOCK(&kq_global, haskqglobal);
1813 			knl = kn_list_lock(kn);
1814 			if (kn->kn_fop->f_event(kn, 0) == 0) {
1815 				KQ_LOCK(kq);
1816 				KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal);
1817 				kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE |
1818 				    KN_SCAN);
1819 				kn_leave_flux(kn);
1820 				kq->kq_count--;
1821 				kn_list_unlock(knl);
1822 				influx = 1;
1823 				continue;
1824 			}
1825 			touch = (!kn->kn_fop->f_isfd &&
1826 			    kn->kn_fop->f_touch != NULL);
1827 			if (touch)
1828 				kn->kn_fop->f_touch(kn, kevp, EVENT_PROCESS);
1829 			else
1830 				*kevp = kn->kn_kevent;
1831 			KQ_LOCK(kq);
1832 			KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal);
1833 			if (kn->kn_flags & (EV_CLEAR | EV_DISPATCH)) {
1834 				/*
1835 				 * Manually clear knotes who weren't
1836 				 * 'touch'ed.
1837 				 */
1838 				if (touch == 0 && kn->kn_flags & EV_CLEAR) {
1839 					kn->kn_data = 0;
1840 					kn->kn_fflags = 0;
1841 				}
1842 				if (kn->kn_flags & EV_DISPATCH)
1843 					kn->kn_status |= KN_DISABLED;
1844 				kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE);
1845 				kq->kq_count--;
1846 			} else
1847 				TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe);
1848 
1849 			kn->kn_status &= ~KN_SCAN;
1850 			kn_leave_flux(kn);
1851 			kn_list_unlock(knl);
1852 			influx = 1;
1853 		}
1854 
1855 		/* we are returning a copy to the user */
1856 		kevp++;
1857 		nkev++;
1858 		count--;
1859 
1860 		if (nkev == KQ_NEVENTS) {
1861 			influx = 0;
1862 			KQ_UNLOCK_FLUX(kq);
1863 			error = k_ops->k_copyout(k_ops->arg, keva, nkev);
1864 			nkev = 0;
1865 			kevp = keva;
1866 			KQ_LOCK(kq);
1867 			if (error)
1868 				break;
1869 		}
1870 	}
1871 	TAILQ_REMOVE(&kq->kq_head, marker, kn_tqe);
1872 done:
1873 	KQ_OWNED(kq);
1874 	KQ_UNLOCK_FLUX(kq);
1875 	knote_free(marker);
1876 done_nl:
1877 	KQ_NOTOWNED(kq);
1878 	if (nkev != 0)
1879 		error = k_ops->k_copyout(k_ops->arg, keva, nkev);
1880 	td->td_retval[0] = maxevents - count;
1881 	return (error);
1882 }
1883 
1884 /*ARGSUSED*/
1885 static int
1886 kqueue_ioctl(struct file *fp, u_long cmd, void *data,
1887 	struct ucred *active_cred, struct thread *td)
1888 {
1889 	/*
1890 	 * Enabling sigio causes two major problems:
1891 	 * 1) infinite recursion:
1892 	 * Synopsys: kevent is being used to track signals and have FIOASYNC
1893 	 * set.  On receipt of a signal this will cause a kqueue to recurse
1894 	 * into itself over and over.  Sending the sigio causes the kqueue
1895 	 * to become ready, which in turn posts sigio again, forever.
1896 	 * Solution: this can be solved by setting a flag in the kqueue that
1897 	 * we have a SIGIO in progress.
1898 	 * 2) locking problems:
1899 	 * Synopsys: Kqueue is a leaf subsystem, but adding signalling puts
1900 	 * us above the proc and pgrp locks.
1901 	 * Solution: Post a signal using an async mechanism, being sure to
1902 	 * record a generation count in the delivery so that we do not deliver
1903 	 * a signal to the wrong process.
1904 	 *
1905 	 * Note, these two mechanisms are somewhat mutually exclusive!
1906 	 */
1907 #if 0
1908 	struct kqueue *kq;
1909 
1910 	kq = fp->f_data;
1911 	switch (cmd) {
1912 	case FIOASYNC:
1913 		if (*(int *)data) {
1914 			kq->kq_state |= KQ_ASYNC;
1915 		} else {
1916 			kq->kq_state &= ~KQ_ASYNC;
1917 		}
1918 		return (0);
1919 
1920 	case FIOSETOWN:
1921 		return (fsetown(*(int *)data, &kq->kq_sigio));
1922 
1923 	case FIOGETOWN:
1924 		*(int *)data = fgetown(&kq->kq_sigio);
1925 		return (0);
1926 	}
1927 #endif
1928 
1929 	return (ENOTTY);
1930 }
1931 
1932 /*ARGSUSED*/
1933 static int
1934 kqueue_poll(struct file *fp, int events, struct ucred *active_cred,
1935 	struct thread *td)
1936 {
1937 	struct kqueue *kq;
1938 	int revents = 0;
1939 	int error;
1940 
1941 	if ((error = kqueue_acquire(fp, &kq)))
1942 		return POLLERR;
1943 
1944 	KQ_LOCK(kq);
1945 	if (events & (POLLIN | POLLRDNORM)) {
1946 		if (kq->kq_count) {
1947 			revents |= events & (POLLIN | POLLRDNORM);
1948 		} else {
1949 			selrecord(td, &kq->kq_sel);
1950 			if (SEL_WAITING(&kq->kq_sel))
1951 				kq->kq_state |= KQ_SEL;
1952 		}
1953 	}
1954 	kqueue_release(kq, 1);
1955 	KQ_UNLOCK(kq);
1956 	return (revents);
1957 }
1958 
1959 /*ARGSUSED*/
1960 static int
1961 kqueue_stat(struct file *fp, struct stat *st, struct ucred *active_cred,
1962 	struct thread *td)
1963 {
1964 
1965 	bzero((void *)st, sizeof *st);
1966 	/*
1967 	 * We no longer return kq_count because the unlocked value is useless.
1968 	 * If you spent all this time getting the count, why not spend your
1969 	 * syscall better by calling kevent?
1970 	 *
1971 	 * XXX - This is needed for libc_r.
1972 	 */
1973 	st->st_mode = S_IFIFO;
1974 	return (0);
1975 }
1976 
1977 static void
1978 kqueue_drain(struct kqueue *kq, struct thread *td)
1979 {
1980 	struct knote *kn;
1981 	int i;
1982 
1983 	KQ_LOCK(kq);
1984 
1985 	KASSERT((kq->kq_state & KQ_CLOSING) != KQ_CLOSING,
1986 	    ("kqueue already closing"));
1987 	kq->kq_state |= KQ_CLOSING;
1988 	if (kq->kq_refcnt > 1)
1989 		msleep(&kq->kq_refcnt, &kq->kq_lock, PSOCK, "kqclose", 0);
1990 
1991 	KASSERT(kq->kq_refcnt == 1, ("other refs are out there!"));
1992 
1993 	KASSERT(knlist_empty(&kq->kq_sel.si_note),
1994 	    ("kqueue's knlist not empty"));
1995 
1996 	for (i = 0; i < kq->kq_knlistsize; i++) {
1997 		while ((kn = SLIST_FIRST(&kq->kq_knlist[i])) != NULL) {
1998 			if (kn_in_flux(kn)) {
1999 				kq->kq_state |= KQ_FLUXWAIT;
2000 				msleep(kq, &kq->kq_lock, PSOCK, "kqclo1", 0);
2001 				continue;
2002 			}
2003 			kn_enter_flux(kn);
2004 			KQ_UNLOCK(kq);
2005 			knote_drop(kn, td);
2006 			KQ_LOCK(kq);
2007 		}
2008 	}
2009 	if (kq->kq_knhashmask != 0) {
2010 		for (i = 0; i <= kq->kq_knhashmask; i++) {
2011 			while ((kn = SLIST_FIRST(&kq->kq_knhash[i])) != NULL) {
2012 				if (kn_in_flux(kn)) {
2013 					kq->kq_state |= KQ_FLUXWAIT;
2014 					msleep(kq, &kq->kq_lock, PSOCK,
2015 					       "kqclo2", 0);
2016 					continue;
2017 				}
2018 				kn_enter_flux(kn);
2019 				KQ_UNLOCK(kq);
2020 				knote_drop(kn, td);
2021 				KQ_LOCK(kq);
2022 			}
2023 		}
2024 	}
2025 
2026 	if ((kq->kq_state & KQ_TASKSCHED) == KQ_TASKSCHED) {
2027 		kq->kq_state |= KQ_TASKDRAIN;
2028 		msleep(&kq->kq_state, &kq->kq_lock, PSOCK, "kqtqdr", 0);
2029 	}
2030 
2031 	if ((kq->kq_state & KQ_SEL) == KQ_SEL) {
2032 		selwakeuppri(&kq->kq_sel, PSOCK);
2033 		if (!SEL_WAITING(&kq->kq_sel))
2034 			kq->kq_state &= ~KQ_SEL;
2035 	}
2036 
2037 	KQ_UNLOCK(kq);
2038 }
2039 
2040 static void
2041 kqueue_destroy(struct kqueue *kq)
2042 {
2043 
2044 	KASSERT(kq->kq_fdp == NULL,
2045 	    ("kqueue still attached to a file descriptor"));
2046 	seldrain(&kq->kq_sel);
2047 	knlist_destroy(&kq->kq_sel.si_note);
2048 	mtx_destroy(&kq->kq_lock);
2049 
2050 	if (kq->kq_knhash != NULL)
2051 		free(kq->kq_knhash, M_KQUEUE);
2052 	if (kq->kq_knlist != NULL)
2053 		free(kq->kq_knlist, M_KQUEUE);
2054 
2055 	funsetown(&kq->kq_sigio);
2056 }
2057 
2058 /*ARGSUSED*/
2059 static int
2060 kqueue_close(struct file *fp, struct thread *td)
2061 {
2062 	struct kqueue *kq = fp->f_data;
2063 	struct filedesc *fdp;
2064 	int error;
2065 	int filedesc_unlock;
2066 
2067 	if ((error = kqueue_acquire(fp, &kq)))
2068 		return error;
2069 	kqueue_drain(kq, td);
2070 
2071 	/*
2072 	 * We could be called due to the knote_drop() doing fdrop(),
2073 	 * called from kqueue_register().  In this case the global
2074 	 * lock is owned, and filedesc sx is locked before, to not
2075 	 * take the sleepable lock after non-sleepable.
2076 	 */
2077 	fdp = kq->kq_fdp;
2078 	kq->kq_fdp = NULL;
2079 	if (!sx_xlocked(FILEDESC_LOCK(fdp))) {
2080 		FILEDESC_XLOCK(fdp);
2081 		filedesc_unlock = 1;
2082 	} else
2083 		filedesc_unlock = 0;
2084 	TAILQ_REMOVE(&fdp->fd_kqlist, kq, kq_list);
2085 	if (filedesc_unlock)
2086 		FILEDESC_XUNLOCK(fdp);
2087 
2088 	kqueue_destroy(kq);
2089 	chgkqcnt(kq->kq_cred->cr_ruidinfo, -1, 0);
2090 	crfree(kq->kq_cred);
2091 	free(kq, M_KQUEUE);
2092 	fp->f_data = NULL;
2093 
2094 	return (0);
2095 }
2096 
2097 static int
2098 kqueue_fill_kinfo(struct file *fp, struct kinfo_file *kif, struct filedesc *fdp)
2099 {
2100 
2101 	kif->kf_type = KF_TYPE_KQUEUE;
2102 	return (0);
2103 }
2104 
2105 static void
2106 kqueue_wakeup(struct kqueue *kq)
2107 {
2108 	KQ_OWNED(kq);
2109 
2110 	if ((kq->kq_state & KQ_SLEEP) == KQ_SLEEP) {
2111 		kq->kq_state &= ~KQ_SLEEP;
2112 		wakeup(kq);
2113 	}
2114 	if ((kq->kq_state & KQ_SEL) == KQ_SEL) {
2115 		selwakeuppri(&kq->kq_sel, PSOCK);
2116 		if (!SEL_WAITING(&kq->kq_sel))
2117 			kq->kq_state &= ~KQ_SEL;
2118 	}
2119 	if (!knlist_empty(&kq->kq_sel.si_note))
2120 		kqueue_schedtask(kq);
2121 	if ((kq->kq_state & KQ_ASYNC) == KQ_ASYNC) {
2122 		pgsigio(&kq->kq_sigio, SIGIO, 0);
2123 	}
2124 }
2125 
2126 /*
2127  * Walk down a list of knotes, activating them if their event has triggered.
2128  *
2129  * There is a possibility to optimize in the case of one kq watching another.
2130  * Instead of scheduling a task to wake it up, you could pass enough state
2131  * down the chain to make up the parent kqueue.  Make this code functional
2132  * first.
2133  */
2134 void
2135 knote(struct knlist *list, long hint, int lockflags)
2136 {
2137 	struct kqueue *kq;
2138 	struct knote *kn, *tkn;
2139 	int error;
2140 
2141 	if (list == NULL)
2142 		return;
2143 
2144 	KNL_ASSERT_LOCK(list, lockflags & KNF_LISTLOCKED);
2145 
2146 	if ((lockflags & KNF_LISTLOCKED) == 0)
2147 		list->kl_lock(list->kl_lockarg);
2148 
2149 	/*
2150 	 * If we unlock the list lock (and enter influx), we can
2151 	 * eliminate the kqueue scheduling, but this will introduce
2152 	 * four lock/unlock's for each knote to test.  Also, marker
2153 	 * would be needed to keep iteration position, since filters
2154 	 * or other threads could remove events.
2155 	 */
2156 	SLIST_FOREACH_SAFE(kn, &list->kl_list, kn_selnext, tkn) {
2157 		kq = kn->kn_kq;
2158 		KQ_LOCK(kq);
2159 		if (kn_in_flux(kn) && (kn->kn_status & KN_SCAN) == 0) {
2160 			/*
2161 			 * Do not process the influx notes, except for
2162 			 * the influx coming from the kq unlock in the
2163 			 * kqueue_scan().  In the later case, we do
2164 			 * not interfere with the scan, since the code
2165 			 * fragment in kqueue_scan() locks the knlist,
2166 			 * and cannot proceed until we finished.
2167 			 */
2168 			KQ_UNLOCK(kq);
2169 		} else if ((lockflags & KNF_NOKQLOCK) != 0) {
2170 			kn_enter_flux(kn);
2171 			KQ_UNLOCK(kq);
2172 			error = kn->kn_fop->f_event(kn, hint);
2173 			KQ_LOCK(kq);
2174 			kn_leave_flux(kn);
2175 			if (error)
2176 				KNOTE_ACTIVATE(kn, 1);
2177 			KQ_UNLOCK_FLUX(kq);
2178 		} else {
2179 			kn->kn_status |= KN_HASKQLOCK;
2180 			if (kn->kn_fop->f_event(kn, hint))
2181 				KNOTE_ACTIVATE(kn, 1);
2182 			kn->kn_status &= ~KN_HASKQLOCK;
2183 			KQ_UNLOCK(kq);
2184 		}
2185 	}
2186 	if ((lockflags & KNF_LISTLOCKED) == 0)
2187 		list->kl_unlock(list->kl_lockarg);
2188 }
2189 
2190 /*
2191  * add a knote to a knlist
2192  */
2193 void
2194 knlist_add(struct knlist *knl, struct knote *kn, int islocked)
2195 {
2196 
2197 	KNL_ASSERT_LOCK(knl, islocked);
2198 	KQ_NOTOWNED(kn->kn_kq);
2199 	KASSERT(kn_in_flux(kn), ("knote %p not in flux", kn));
2200 	KASSERT((kn->kn_status & KN_DETACHED) != 0,
2201 	    ("knote %p was not detached", kn));
2202 	if (!islocked)
2203 		knl->kl_lock(knl->kl_lockarg);
2204 	SLIST_INSERT_HEAD(&knl->kl_list, kn, kn_selnext);
2205 	if (!islocked)
2206 		knl->kl_unlock(knl->kl_lockarg);
2207 	KQ_LOCK(kn->kn_kq);
2208 	kn->kn_knlist = knl;
2209 	kn->kn_status &= ~KN_DETACHED;
2210 	KQ_UNLOCK(kn->kn_kq);
2211 }
2212 
2213 static void
2214 knlist_remove_kq(struct knlist *knl, struct knote *kn, int knlislocked,
2215     int kqislocked)
2216 {
2217 
2218 	KASSERT(!kqislocked || knlislocked, ("kq locked w/o knl locked"));
2219 	KNL_ASSERT_LOCK(knl, knlislocked);
2220 	mtx_assert(&kn->kn_kq->kq_lock, kqislocked ? MA_OWNED : MA_NOTOWNED);
2221 	KASSERT(kqislocked || kn_in_flux(kn), ("knote %p not in flux", kn));
2222 	KASSERT((kn->kn_status & KN_DETACHED) == 0,
2223 	    ("knote %p was already detached", kn));
2224 	if (!knlislocked)
2225 		knl->kl_lock(knl->kl_lockarg);
2226 	SLIST_REMOVE(&knl->kl_list, kn, knote, kn_selnext);
2227 	kn->kn_knlist = NULL;
2228 	if (!knlislocked)
2229 		kn_list_unlock(knl);
2230 	if (!kqislocked)
2231 		KQ_LOCK(kn->kn_kq);
2232 	kn->kn_status |= KN_DETACHED;
2233 	if (!kqislocked)
2234 		KQ_UNLOCK(kn->kn_kq);
2235 }
2236 
2237 /*
2238  * remove knote from the specified knlist
2239  */
2240 void
2241 knlist_remove(struct knlist *knl, struct knote *kn, int islocked)
2242 {
2243 
2244 	knlist_remove_kq(knl, kn, islocked, 0);
2245 }
2246 
2247 int
2248 knlist_empty(struct knlist *knl)
2249 {
2250 
2251 	KNL_ASSERT_LOCKED(knl);
2252 	return (SLIST_EMPTY(&knl->kl_list));
2253 }
2254 
2255 static struct mtx knlist_lock;
2256 MTX_SYSINIT(knlist_lock, &knlist_lock, "knlist lock for lockless objects",
2257     MTX_DEF);
2258 static void knlist_mtx_lock(void *arg);
2259 static void knlist_mtx_unlock(void *arg);
2260 
2261 static void
2262 knlist_mtx_lock(void *arg)
2263 {
2264 
2265 	mtx_lock((struct mtx *)arg);
2266 }
2267 
2268 static void
2269 knlist_mtx_unlock(void *arg)
2270 {
2271 
2272 	mtx_unlock((struct mtx *)arg);
2273 }
2274 
2275 static void
2276 knlist_mtx_assert_locked(void *arg)
2277 {
2278 
2279 	mtx_assert((struct mtx *)arg, MA_OWNED);
2280 }
2281 
2282 static void
2283 knlist_mtx_assert_unlocked(void *arg)
2284 {
2285 
2286 	mtx_assert((struct mtx *)arg, MA_NOTOWNED);
2287 }
2288 
2289 static void
2290 knlist_rw_rlock(void *arg)
2291 {
2292 
2293 	rw_rlock((struct rwlock *)arg);
2294 }
2295 
2296 static void
2297 knlist_rw_runlock(void *arg)
2298 {
2299 
2300 	rw_runlock((struct rwlock *)arg);
2301 }
2302 
2303 static void
2304 knlist_rw_assert_locked(void *arg)
2305 {
2306 
2307 	rw_assert((struct rwlock *)arg, RA_LOCKED);
2308 }
2309 
2310 static void
2311 knlist_rw_assert_unlocked(void *arg)
2312 {
2313 
2314 	rw_assert((struct rwlock *)arg, RA_UNLOCKED);
2315 }
2316 
2317 void
2318 knlist_init(struct knlist *knl, void *lock, void (*kl_lock)(void *),
2319     void (*kl_unlock)(void *),
2320     void (*kl_assert_locked)(void *), void (*kl_assert_unlocked)(void *))
2321 {
2322 
2323 	if (lock == NULL)
2324 		knl->kl_lockarg = &knlist_lock;
2325 	else
2326 		knl->kl_lockarg = lock;
2327 
2328 	if (kl_lock == NULL)
2329 		knl->kl_lock = knlist_mtx_lock;
2330 	else
2331 		knl->kl_lock = kl_lock;
2332 	if (kl_unlock == NULL)
2333 		knl->kl_unlock = knlist_mtx_unlock;
2334 	else
2335 		knl->kl_unlock = kl_unlock;
2336 	if (kl_assert_locked == NULL)
2337 		knl->kl_assert_locked = knlist_mtx_assert_locked;
2338 	else
2339 		knl->kl_assert_locked = kl_assert_locked;
2340 	if (kl_assert_unlocked == NULL)
2341 		knl->kl_assert_unlocked = knlist_mtx_assert_unlocked;
2342 	else
2343 		knl->kl_assert_unlocked = kl_assert_unlocked;
2344 
2345 	knl->kl_autodestroy = 0;
2346 	SLIST_INIT(&knl->kl_list);
2347 }
2348 
2349 void
2350 knlist_init_mtx(struct knlist *knl, struct mtx *lock)
2351 {
2352 
2353 	knlist_init(knl, lock, NULL, NULL, NULL, NULL);
2354 }
2355 
2356 struct knlist *
2357 knlist_alloc(struct mtx *lock)
2358 {
2359 	struct knlist *knl;
2360 
2361 	knl = malloc(sizeof(struct knlist), M_KQUEUE, M_WAITOK);
2362 	knlist_init_mtx(knl, lock);
2363 	return (knl);
2364 }
2365 
2366 void
2367 knlist_init_rw_reader(struct knlist *knl, struct rwlock *lock)
2368 {
2369 
2370 	knlist_init(knl, lock, knlist_rw_rlock, knlist_rw_runlock,
2371 	    knlist_rw_assert_locked, knlist_rw_assert_unlocked);
2372 }
2373 
2374 void
2375 knlist_destroy(struct knlist *knl)
2376 {
2377 
2378 	KASSERT(KNLIST_EMPTY(knl),
2379 	    ("destroying knlist %p with knotes on it", knl));
2380 }
2381 
2382 void
2383 knlist_detach(struct knlist *knl)
2384 {
2385 
2386 	KNL_ASSERT_LOCKED(knl);
2387 	knl->kl_autodestroy = 1;
2388 	if (knlist_empty(knl)) {
2389 		knlist_destroy(knl);
2390 		free(knl, M_KQUEUE);
2391 	}
2392 }
2393 
2394 /*
2395  * Even if we are locked, we may need to drop the lock to allow any influx
2396  * knotes time to "settle".
2397  */
2398 void
2399 knlist_cleardel(struct knlist *knl, struct thread *td, int islocked, int killkn)
2400 {
2401 	struct knote *kn, *kn2;
2402 	struct kqueue *kq;
2403 
2404 	KASSERT(!knl->kl_autodestroy, ("cleardel for autodestroy %p", knl));
2405 	if (islocked)
2406 		KNL_ASSERT_LOCKED(knl);
2407 	else {
2408 		KNL_ASSERT_UNLOCKED(knl);
2409 again:		/* need to reacquire lock since we have dropped it */
2410 		knl->kl_lock(knl->kl_lockarg);
2411 	}
2412 
2413 	SLIST_FOREACH_SAFE(kn, &knl->kl_list, kn_selnext, kn2) {
2414 		kq = kn->kn_kq;
2415 		KQ_LOCK(kq);
2416 		if (kn_in_flux(kn)) {
2417 			KQ_UNLOCK(kq);
2418 			continue;
2419 		}
2420 		knlist_remove_kq(knl, kn, 1, 1);
2421 		if (killkn) {
2422 			kn_enter_flux(kn);
2423 			KQ_UNLOCK(kq);
2424 			knote_drop_detached(kn, td);
2425 		} else {
2426 			/* Make sure cleared knotes disappear soon */
2427 			kn->kn_flags |= EV_EOF | EV_ONESHOT;
2428 			KQ_UNLOCK(kq);
2429 		}
2430 		kq = NULL;
2431 	}
2432 
2433 	if (!SLIST_EMPTY(&knl->kl_list)) {
2434 		/* there are still in flux knotes remaining */
2435 		kn = SLIST_FIRST(&knl->kl_list);
2436 		kq = kn->kn_kq;
2437 		KQ_LOCK(kq);
2438 		KASSERT(kn_in_flux(kn), ("knote removed w/o list lock"));
2439 		knl->kl_unlock(knl->kl_lockarg);
2440 		kq->kq_state |= KQ_FLUXWAIT;
2441 		msleep(kq, &kq->kq_lock, PSOCK | PDROP, "kqkclr", 0);
2442 		kq = NULL;
2443 		goto again;
2444 	}
2445 
2446 	if (islocked)
2447 		KNL_ASSERT_LOCKED(knl);
2448 	else {
2449 		knl->kl_unlock(knl->kl_lockarg);
2450 		KNL_ASSERT_UNLOCKED(knl);
2451 	}
2452 }
2453 
2454 /*
2455  * Remove all knotes referencing a specified fd must be called with FILEDESC
2456  * lock.  This prevents a race where a new fd comes along and occupies the
2457  * entry and we attach a knote to the fd.
2458  */
2459 void
2460 knote_fdclose(struct thread *td, int fd)
2461 {
2462 	struct filedesc *fdp = td->td_proc->p_fd;
2463 	struct kqueue *kq;
2464 	struct knote *kn;
2465 	int influx;
2466 
2467 	FILEDESC_XLOCK_ASSERT(fdp);
2468 
2469 	/*
2470 	 * We shouldn't have to worry about new kevents appearing on fd
2471 	 * since filedesc is locked.
2472 	 */
2473 	TAILQ_FOREACH(kq, &fdp->fd_kqlist, kq_list) {
2474 		KQ_LOCK(kq);
2475 
2476 again:
2477 		influx = 0;
2478 		while (kq->kq_knlistsize > fd &&
2479 		    (kn = SLIST_FIRST(&kq->kq_knlist[fd])) != NULL) {
2480 			if (kn_in_flux(kn)) {
2481 				/* someone else might be waiting on our knote */
2482 				if (influx)
2483 					wakeup(kq);
2484 				kq->kq_state |= KQ_FLUXWAIT;
2485 				msleep(kq, &kq->kq_lock, PSOCK, "kqflxwt", 0);
2486 				goto again;
2487 			}
2488 			kn_enter_flux(kn);
2489 			KQ_UNLOCK(kq);
2490 			influx = 1;
2491 			knote_drop(kn, td);
2492 			KQ_LOCK(kq);
2493 		}
2494 		KQ_UNLOCK_FLUX(kq);
2495 	}
2496 }
2497 
2498 static int
2499 knote_attach(struct knote *kn, struct kqueue *kq)
2500 {
2501 	struct klist *list;
2502 
2503 	KASSERT(kn_in_flux(kn), ("knote %p not marked influx", kn));
2504 	KQ_OWNED(kq);
2505 
2506 	if (kn->kn_fop->f_isfd) {
2507 		if (kn->kn_id >= kq->kq_knlistsize)
2508 			return (ENOMEM);
2509 		list = &kq->kq_knlist[kn->kn_id];
2510 	} else {
2511 		if (kq->kq_knhash == NULL)
2512 			return (ENOMEM);
2513 		list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)];
2514 	}
2515 	SLIST_INSERT_HEAD(list, kn, kn_link);
2516 	return (0);
2517 }
2518 
2519 static void
2520 knote_drop(struct knote *kn, struct thread *td)
2521 {
2522 
2523 	if ((kn->kn_status & KN_DETACHED) == 0)
2524 		kn->kn_fop->f_detach(kn);
2525 	knote_drop_detached(kn, td);
2526 }
2527 
2528 static void
2529 knote_drop_detached(struct knote *kn, struct thread *td)
2530 {
2531 	struct kqueue *kq;
2532 	struct klist *list;
2533 
2534 	kq = kn->kn_kq;
2535 
2536 	KASSERT((kn->kn_status & KN_DETACHED) != 0,
2537 	    ("knote %p still attached", kn));
2538 	KQ_NOTOWNED(kq);
2539 
2540 	KQ_LOCK(kq);
2541 	KASSERT(kn->kn_influx == 1,
2542 	    ("knote_drop called on %p with influx %d", kn, kn->kn_influx));
2543 
2544 	if (kn->kn_fop->f_isfd)
2545 		list = &kq->kq_knlist[kn->kn_id];
2546 	else
2547 		list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)];
2548 
2549 	if (!SLIST_EMPTY(list))
2550 		SLIST_REMOVE(list, kn, knote, kn_link);
2551 	if (kn->kn_status & KN_QUEUED)
2552 		knote_dequeue(kn);
2553 	KQ_UNLOCK_FLUX(kq);
2554 
2555 	if (kn->kn_fop->f_isfd) {
2556 		fdrop(kn->kn_fp, td);
2557 		kn->kn_fp = NULL;
2558 	}
2559 	kqueue_fo_release(kn->kn_kevent.filter);
2560 	kn->kn_fop = NULL;
2561 	knote_free(kn);
2562 }
2563 
2564 static void
2565 knote_enqueue(struct knote *kn)
2566 {
2567 	struct kqueue *kq = kn->kn_kq;
2568 
2569 	KQ_OWNED(kn->kn_kq);
2570 	KASSERT((kn->kn_status & KN_QUEUED) == 0, ("knote already queued"));
2571 
2572 	TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe);
2573 	kn->kn_status |= KN_QUEUED;
2574 	kq->kq_count++;
2575 	kqueue_wakeup(kq);
2576 }
2577 
2578 static void
2579 knote_dequeue(struct knote *kn)
2580 {
2581 	struct kqueue *kq = kn->kn_kq;
2582 
2583 	KQ_OWNED(kn->kn_kq);
2584 	KASSERT(kn->kn_status & KN_QUEUED, ("knote not queued"));
2585 
2586 	TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe);
2587 	kn->kn_status &= ~KN_QUEUED;
2588 	kq->kq_count--;
2589 }
2590 
2591 static void
2592 knote_init(void)
2593 {
2594 
2595 	knote_zone = uma_zcreate("KNOTE", sizeof(struct knote), NULL, NULL,
2596 	    NULL, NULL, UMA_ALIGN_PTR, 0);
2597 }
2598 SYSINIT(knote, SI_SUB_PSEUDO, SI_ORDER_ANY, knote_init, NULL);
2599 
2600 static struct knote *
2601 knote_alloc(int waitok)
2602 {
2603 
2604 	return (uma_zalloc(knote_zone, (waitok ? M_WAITOK : M_NOWAIT) |
2605 	    M_ZERO));
2606 }
2607 
2608 static void
2609 knote_free(struct knote *kn)
2610 {
2611 
2612 	uma_zfree(knote_zone, kn);
2613 }
2614 
2615 /*
2616  * Register the kev w/ the kq specified by fd.
2617  */
2618 int
2619 kqfd_register(int fd, struct kevent *kev, struct thread *td, int waitok)
2620 {
2621 	struct kqueue *kq;
2622 	struct file *fp;
2623 	cap_rights_t rights;
2624 	int error;
2625 
2626 	error = fget(td, fd, cap_rights_init(&rights, CAP_KQUEUE_CHANGE), &fp);
2627 	if (error != 0)
2628 		return (error);
2629 	if ((error = kqueue_acquire(fp, &kq)) != 0)
2630 		goto noacquire;
2631 
2632 	error = kqueue_register(kq, kev, td, waitok);
2633 	kqueue_release(kq, 0);
2634 
2635 noacquire:
2636 	fdrop(fp, td);
2637 	return (error);
2638 }
2639