xref: /freebsd/sys/kern/kern_event.c (revision b856b51d149811d68ab9e72daa609f00e13c2ec3)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 1999,2000,2001 Jonathan Lemon <jlemon@FreeBSD.org>
5  * Copyright 2004 John-Mark Gurney <jmg@FreeBSD.org>
6  * Copyright (c) 2009 Apple, Inc.
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  */
30 
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33 
34 #include "opt_ktrace.h"
35 #include "opt_kqueue.h"
36 
37 #ifdef COMPAT_FREEBSD11
38 #define	_WANT_FREEBSD11_KEVENT
39 #endif
40 
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/capsicum.h>
44 #include <sys/kernel.h>
45 #include <sys/limits.h>
46 #include <sys/lock.h>
47 #include <sys/mutex.h>
48 #include <sys/rwlock.h>
49 #include <sys/proc.h>
50 #include <sys/malloc.h>
51 #include <sys/unistd.h>
52 #include <sys/file.h>
53 #include <sys/filedesc.h>
54 #include <sys/filio.h>
55 #include <sys/fcntl.h>
56 #include <sys/kthread.h>
57 #include <sys/selinfo.h>
58 #include <sys/queue.h>
59 #include <sys/event.h>
60 #include <sys/eventvar.h>
61 #include <sys/poll.h>
62 #include <sys/protosw.h>
63 #include <sys/resourcevar.h>
64 #include <sys/sigio.h>
65 #include <sys/signalvar.h>
66 #include <sys/socket.h>
67 #include <sys/socketvar.h>
68 #include <sys/stat.h>
69 #include <sys/sysctl.h>
70 #include <sys/sysproto.h>
71 #include <sys/syscallsubr.h>
72 #include <sys/taskqueue.h>
73 #include <sys/uio.h>
74 #include <sys/user.h>
75 #ifdef KTRACE
76 #include <sys/ktrace.h>
77 #endif
78 #include <machine/atomic.h>
79 
80 #include <vm/uma.h>
81 
82 static MALLOC_DEFINE(M_KQUEUE, "kqueue", "memory for kqueue system");
83 
84 /*
85  * This lock is used if multiple kq locks are required.  This possibly
86  * should be made into a per proc lock.
87  */
88 static struct mtx	kq_global;
89 MTX_SYSINIT(kq_global, &kq_global, "kqueue order", MTX_DEF);
90 #define KQ_GLOBAL_LOCK(lck, haslck)	do {	\
91 	if (!haslck)				\
92 		mtx_lock(lck);			\
93 	haslck = 1;				\
94 } while (0)
95 #define KQ_GLOBAL_UNLOCK(lck, haslck)	do {	\
96 	if (haslck)				\
97 		mtx_unlock(lck);			\
98 	haslck = 0;				\
99 } while (0)
100 
101 TASKQUEUE_DEFINE_THREAD(kqueue_ctx);
102 
103 static int	kevent_copyout(void *arg, struct kevent *kevp, int count);
104 static int	kevent_copyin(void *arg, struct kevent *kevp, int count);
105 static int	kqueue_register(struct kqueue *kq, struct kevent *kev,
106 		    struct thread *td, int mflag);
107 static int	kqueue_acquire(struct file *fp, struct kqueue **kqp);
108 static void	kqueue_release(struct kqueue *kq, int locked);
109 static void	kqueue_destroy(struct kqueue *kq);
110 static void	kqueue_drain(struct kqueue *kq, struct thread *td);
111 static int	kqueue_expand(struct kqueue *kq, struct filterops *fops,
112 		    uintptr_t ident, int mflag);
113 static void	kqueue_task(void *arg, int pending);
114 static int	kqueue_scan(struct kqueue *kq, int maxevents,
115 		    struct kevent_copyops *k_ops,
116 		    const struct timespec *timeout,
117 		    struct kevent *keva, struct thread *td);
118 static void 	kqueue_wakeup(struct kqueue *kq);
119 static struct filterops *kqueue_fo_find(int filt);
120 static void	kqueue_fo_release(int filt);
121 struct g_kevent_args;
122 static int	kern_kevent_generic(struct thread *td,
123 		    struct g_kevent_args *uap,
124 		    struct kevent_copyops *k_ops, const char *struct_name);
125 
126 static fo_ioctl_t	kqueue_ioctl;
127 static fo_poll_t	kqueue_poll;
128 static fo_kqfilter_t	kqueue_kqfilter;
129 static fo_stat_t	kqueue_stat;
130 static fo_close_t	kqueue_close;
131 static fo_fill_kinfo_t	kqueue_fill_kinfo;
132 
133 static struct fileops kqueueops = {
134 	.fo_read = invfo_rdwr,
135 	.fo_write = invfo_rdwr,
136 	.fo_truncate = invfo_truncate,
137 	.fo_ioctl = kqueue_ioctl,
138 	.fo_poll = kqueue_poll,
139 	.fo_kqfilter = kqueue_kqfilter,
140 	.fo_stat = kqueue_stat,
141 	.fo_close = kqueue_close,
142 	.fo_chmod = invfo_chmod,
143 	.fo_chown = invfo_chown,
144 	.fo_sendfile = invfo_sendfile,
145 	.fo_fill_kinfo = kqueue_fill_kinfo,
146 };
147 
148 static int 	knote_attach(struct knote *kn, struct kqueue *kq);
149 static void 	knote_drop(struct knote *kn, struct thread *td);
150 static void 	knote_drop_detached(struct knote *kn, struct thread *td);
151 static void 	knote_enqueue(struct knote *kn);
152 static void 	knote_dequeue(struct knote *kn);
153 static void 	knote_init(void);
154 static struct 	knote *knote_alloc(int mflag);
155 static void 	knote_free(struct knote *kn);
156 
157 static void	filt_kqdetach(struct knote *kn);
158 static int	filt_kqueue(struct knote *kn, long hint);
159 static int	filt_procattach(struct knote *kn);
160 static void	filt_procdetach(struct knote *kn);
161 static int	filt_proc(struct knote *kn, long hint);
162 static int	filt_fileattach(struct knote *kn);
163 static void	filt_timerexpire(void *knx);
164 static int	filt_timerattach(struct knote *kn);
165 static void	filt_timerdetach(struct knote *kn);
166 static void	filt_timerstart(struct knote *kn, sbintime_t to);
167 static void	filt_timertouch(struct knote *kn, struct kevent *kev,
168 		    u_long type);
169 static int	filt_timervalidate(struct knote *kn, sbintime_t *to);
170 static int	filt_timer(struct knote *kn, long hint);
171 static int	filt_userattach(struct knote *kn);
172 static void	filt_userdetach(struct knote *kn);
173 static int	filt_user(struct knote *kn, long hint);
174 static void	filt_usertouch(struct knote *kn, struct kevent *kev,
175 		    u_long type);
176 
177 static struct filterops file_filtops = {
178 	.f_isfd = 1,
179 	.f_attach = filt_fileattach,
180 };
181 static struct filterops kqread_filtops = {
182 	.f_isfd = 1,
183 	.f_detach = filt_kqdetach,
184 	.f_event = filt_kqueue,
185 };
186 /* XXX - move to kern_proc.c?  */
187 static struct filterops proc_filtops = {
188 	.f_isfd = 0,
189 	.f_attach = filt_procattach,
190 	.f_detach = filt_procdetach,
191 	.f_event = filt_proc,
192 };
193 static struct filterops timer_filtops = {
194 	.f_isfd = 0,
195 	.f_attach = filt_timerattach,
196 	.f_detach = filt_timerdetach,
197 	.f_event = filt_timer,
198 	.f_touch = filt_timertouch,
199 };
200 static struct filterops user_filtops = {
201 	.f_attach = filt_userattach,
202 	.f_detach = filt_userdetach,
203 	.f_event = filt_user,
204 	.f_touch = filt_usertouch,
205 };
206 
207 static uma_zone_t	knote_zone;
208 static unsigned int	kq_ncallouts = 0;
209 static unsigned int 	kq_calloutmax = 4 * 1024;
210 SYSCTL_UINT(_kern, OID_AUTO, kq_calloutmax, CTLFLAG_RW,
211     &kq_calloutmax, 0, "Maximum number of callouts allocated for kqueue");
212 
213 /* XXX - ensure not influx ? */
214 #define KNOTE_ACTIVATE(kn, islock) do { 				\
215 	if ((islock))							\
216 		mtx_assert(&(kn)->kn_kq->kq_lock, MA_OWNED);		\
217 	else								\
218 		KQ_LOCK((kn)->kn_kq);					\
219 	(kn)->kn_status |= KN_ACTIVE;					\
220 	if (((kn)->kn_status & (KN_QUEUED | KN_DISABLED)) == 0)		\
221 		knote_enqueue((kn));					\
222 	if (!(islock))							\
223 		KQ_UNLOCK((kn)->kn_kq);					\
224 } while(0)
225 #define KQ_LOCK(kq) do {						\
226 	mtx_lock(&(kq)->kq_lock);					\
227 } while (0)
228 #define KQ_FLUX_WAKEUP(kq) do {						\
229 	if (((kq)->kq_state & KQ_FLUXWAIT) == KQ_FLUXWAIT) {		\
230 		(kq)->kq_state &= ~KQ_FLUXWAIT;				\
231 		wakeup((kq));						\
232 	}								\
233 } while (0)
234 #define KQ_UNLOCK_FLUX(kq) do {						\
235 	KQ_FLUX_WAKEUP(kq);						\
236 	mtx_unlock(&(kq)->kq_lock);					\
237 } while (0)
238 #define KQ_UNLOCK(kq) do {						\
239 	mtx_unlock(&(kq)->kq_lock);					\
240 } while (0)
241 #define KQ_OWNED(kq) do {						\
242 	mtx_assert(&(kq)->kq_lock, MA_OWNED);				\
243 } while (0)
244 #define KQ_NOTOWNED(kq) do {						\
245 	mtx_assert(&(kq)->kq_lock, MA_NOTOWNED);			\
246 } while (0)
247 
248 static struct knlist *
249 kn_list_lock(struct knote *kn)
250 {
251 	struct knlist *knl;
252 
253 	knl = kn->kn_knlist;
254 	if (knl != NULL)
255 		knl->kl_lock(knl->kl_lockarg);
256 	return (knl);
257 }
258 
259 static void
260 kn_list_unlock(struct knlist *knl)
261 {
262 	bool do_free;
263 
264 	if (knl == NULL)
265 		return;
266 	do_free = knl->kl_autodestroy && knlist_empty(knl);
267 	knl->kl_unlock(knl->kl_lockarg);
268 	if (do_free) {
269 		knlist_destroy(knl);
270 		free(knl, M_KQUEUE);
271 	}
272 }
273 
274 static bool
275 kn_in_flux(struct knote *kn)
276 {
277 
278 	return (kn->kn_influx > 0);
279 }
280 
281 static void
282 kn_enter_flux(struct knote *kn)
283 {
284 
285 	KQ_OWNED(kn->kn_kq);
286 	MPASS(kn->kn_influx < INT_MAX);
287 	kn->kn_influx++;
288 }
289 
290 static bool
291 kn_leave_flux(struct knote *kn)
292 {
293 
294 	KQ_OWNED(kn->kn_kq);
295 	MPASS(kn->kn_influx > 0);
296 	kn->kn_influx--;
297 	return (kn->kn_influx == 0);
298 }
299 
300 #define	KNL_ASSERT_LOCK(knl, islocked) do {				\
301 	if (islocked)							\
302 		KNL_ASSERT_LOCKED(knl);				\
303 	else								\
304 		KNL_ASSERT_UNLOCKED(knl);				\
305 } while (0)
306 #ifdef INVARIANTS
307 #define	KNL_ASSERT_LOCKED(knl) do {					\
308 	knl->kl_assert_lock((knl)->kl_lockarg, LA_LOCKED);		\
309 } while (0)
310 #define	KNL_ASSERT_UNLOCKED(knl) do {					\
311 	knl->kl_assert_lock((knl)->kl_lockarg, LA_UNLOCKED);		\
312 } while (0)
313 #else /* !INVARIANTS */
314 #define	KNL_ASSERT_LOCKED(knl) do {} while(0)
315 #define	KNL_ASSERT_UNLOCKED(knl) do {} while (0)
316 #endif /* INVARIANTS */
317 
318 #ifndef	KN_HASHSIZE
319 #define	KN_HASHSIZE		64		/* XXX should be tunable */
320 #endif
321 
322 #define KN_HASH(val, mask)	(((val) ^ (val >> 8)) & (mask))
323 
324 static int
325 filt_nullattach(struct knote *kn)
326 {
327 
328 	return (ENXIO);
329 };
330 
331 struct filterops null_filtops = {
332 	.f_isfd = 0,
333 	.f_attach = filt_nullattach,
334 };
335 
336 /* XXX - make SYSINIT to add these, and move into respective modules. */
337 extern struct filterops sig_filtops;
338 extern struct filterops fs_filtops;
339 
340 /*
341  * Table for for all system-defined filters.
342  */
343 static struct mtx	filterops_lock;
344 MTX_SYSINIT(kqueue_filterops, &filterops_lock, "protect sysfilt_ops",
345 	MTX_DEF);
346 static struct {
347 	struct filterops *for_fop;
348 	int for_nolock;
349 	int for_refcnt;
350 } sysfilt_ops[EVFILT_SYSCOUNT] = {
351 	{ &file_filtops, 1 },			/* EVFILT_READ */
352 	{ &file_filtops, 1 },			/* EVFILT_WRITE */
353 	{ &null_filtops },			/* EVFILT_AIO */
354 	{ &file_filtops, 1 },			/* EVFILT_VNODE */
355 	{ &proc_filtops, 1 },			/* EVFILT_PROC */
356 	{ &sig_filtops, 1 },			/* EVFILT_SIGNAL */
357 	{ &timer_filtops, 1 },			/* EVFILT_TIMER */
358 	{ &file_filtops, 1 },			/* EVFILT_PROCDESC */
359 	{ &fs_filtops, 1 },			/* EVFILT_FS */
360 	{ &null_filtops },			/* EVFILT_LIO */
361 	{ &user_filtops, 1 },			/* EVFILT_USER */
362 	{ &null_filtops },			/* EVFILT_SENDFILE */
363 	{ &file_filtops, 1 },                   /* EVFILT_EMPTY */
364 };
365 
366 /*
367  * Simple redirection for all cdevsw style objects to call their fo_kqfilter
368  * method.
369  */
370 static int
371 filt_fileattach(struct knote *kn)
372 {
373 
374 	return (fo_kqfilter(kn->kn_fp, kn));
375 }
376 
377 /*ARGSUSED*/
378 static int
379 kqueue_kqfilter(struct file *fp, struct knote *kn)
380 {
381 	struct kqueue *kq = kn->kn_fp->f_data;
382 
383 	if (kn->kn_filter != EVFILT_READ)
384 		return (EINVAL);
385 
386 	kn->kn_status |= KN_KQUEUE;
387 	kn->kn_fop = &kqread_filtops;
388 	knlist_add(&kq->kq_sel.si_note, kn, 0);
389 
390 	return (0);
391 }
392 
393 static void
394 filt_kqdetach(struct knote *kn)
395 {
396 	struct kqueue *kq = kn->kn_fp->f_data;
397 
398 	knlist_remove(&kq->kq_sel.si_note, kn, 0);
399 }
400 
401 /*ARGSUSED*/
402 static int
403 filt_kqueue(struct knote *kn, long hint)
404 {
405 	struct kqueue *kq = kn->kn_fp->f_data;
406 
407 	kn->kn_data = kq->kq_count;
408 	return (kn->kn_data > 0);
409 }
410 
411 /* XXX - move to kern_proc.c?  */
412 static int
413 filt_procattach(struct knote *kn)
414 {
415 	struct proc *p;
416 	int error;
417 	bool exiting, immediate;
418 
419 	exiting = immediate = false;
420 	if (kn->kn_sfflags & NOTE_EXIT)
421 		p = pfind_any(kn->kn_id);
422 	else
423 		p = pfind(kn->kn_id);
424 	if (p == NULL)
425 		return (ESRCH);
426 	if (p->p_flag & P_WEXIT)
427 		exiting = true;
428 
429 	if ((error = p_cansee(curthread, p))) {
430 		PROC_UNLOCK(p);
431 		return (error);
432 	}
433 
434 	kn->kn_ptr.p_proc = p;
435 	kn->kn_flags |= EV_CLEAR;		/* automatically set */
436 
437 	/*
438 	 * Internal flag indicating registration done by kernel for the
439 	 * purposes of getting a NOTE_CHILD notification.
440 	 */
441 	if (kn->kn_flags & EV_FLAG2) {
442 		kn->kn_flags &= ~EV_FLAG2;
443 		kn->kn_data = kn->kn_sdata;		/* ppid */
444 		kn->kn_fflags = NOTE_CHILD;
445 		kn->kn_sfflags &= ~(NOTE_EXIT | NOTE_EXEC | NOTE_FORK);
446 		immediate = true; /* Force immediate activation of child note. */
447 	}
448 	/*
449 	 * Internal flag indicating registration done by kernel (for other than
450 	 * NOTE_CHILD).
451 	 */
452 	if (kn->kn_flags & EV_FLAG1) {
453 		kn->kn_flags &= ~EV_FLAG1;
454 	}
455 
456 	knlist_add(p->p_klist, kn, 1);
457 
458 	/*
459 	 * Immediately activate any child notes or, in the case of a zombie
460 	 * target process, exit notes.  The latter is necessary to handle the
461 	 * case where the target process, e.g. a child, dies before the kevent
462 	 * is registered.
463 	 */
464 	if (immediate || (exiting && filt_proc(kn, NOTE_EXIT)))
465 		KNOTE_ACTIVATE(kn, 0);
466 
467 	PROC_UNLOCK(p);
468 
469 	return (0);
470 }
471 
472 /*
473  * The knote may be attached to a different process, which may exit,
474  * leaving nothing for the knote to be attached to.  So when the process
475  * exits, the knote is marked as DETACHED and also flagged as ONESHOT so
476  * it will be deleted when read out.  However, as part of the knote deletion,
477  * this routine is called, so a check is needed to avoid actually performing
478  * a detach, because the original process does not exist any more.
479  */
480 /* XXX - move to kern_proc.c?  */
481 static void
482 filt_procdetach(struct knote *kn)
483 {
484 
485 	knlist_remove(kn->kn_knlist, kn, 0);
486 	kn->kn_ptr.p_proc = NULL;
487 }
488 
489 /* XXX - move to kern_proc.c?  */
490 static int
491 filt_proc(struct knote *kn, long hint)
492 {
493 	struct proc *p;
494 	u_int event;
495 
496 	p = kn->kn_ptr.p_proc;
497 	if (p == NULL) /* already activated, from attach filter */
498 		return (0);
499 
500 	/* Mask off extra data. */
501 	event = (u_int)hint & NOTE_PCTRLMASK;
502 
503 	/* If the user is interested in this event, record it. */
504 	if (kn->kn_sfflags & event)
505 		kn->kn_fflags |= event;
506 
507 	/* Process is gone, so flag the event as finished. */
508 	if (event == NOTE_EXIT) {
509 		kn->kn_flags |= EV_EOF | EV_ONESHOT;
510 		kn->kn_ptr.p_proc = NULL;
511 		if (kn->kn_fflags & NOTE_EXIT)
512 			kn->kn_data = KW_EXITCODE(p->p_xexit, p->p_xsig);
513 		if (kn->kn_fflags == 0)
514 			kn->kn_flags |= EV_DROP;
515 		return (1);
516 	}
517 
518 	return (kn->kn_fflags != 0);
519 }
520 
521 /*
522  * Called when the process forked. It mostly does the same as the
523  * knote(), activating all knotes registered to be activated when the
524  * process forked. Additionally, for each knote attached to the
525  * parent, check whether user wants to track the new process. If so
526  * attach a new knote to it, and immediately report an event with the
527  * child's pid.
528  */
529 void
530 knote_fork(struct knlist *list, int pid)
531 {
532 	struct kqueue *kq;
533 	struct knote *kn;
534 	struct kevent kev;
535 	int error;
536 
537 	MPASS(list != NULL);
538 	KNL_ASSERT_LOCKED(list);
539 	if (SLIST_EMPTY(&list->kl_list))
540 		return;
541 
542 	memset(&kev, 0, sizeof(kev));
543 	SLIST_FOREACH(kn, &list->kl_list, kn_selnext) {
544 		kq = kn->kn_kq;
545 		KQ_LOCK(kq);
546 		if (kn_in_flux(kn) && (kn->kn_status & KN_SCAN) == 0) {
547 			KQ_UNLOCK(kq);
548 			continue;
549 		}
550 
551 		/*
552 		 * The same as knote(), activate the event.
553 		 */
554 		if ((kn->kn_sfflags & NOTE_TRACK) == 0) {
555 			if (kn->kn_fop->f_event(kn, NOTE_FORK))
556 				KNOTE_ACTIVATE(kn, 1);
557 			KQ_UNLOCK(kq);
558 			continue;
559 		}
560 
561 		/*
562 		 * The NOTE_TRACK case. In addition to the activation
563 		 * of the event, we need to register new events to
564 		 * track the child. Drop the locks in preparation for
565 		 * the call to kqueue_register().
566 		 */
567 		kn_enter_flux(kn);
568 		KQ_UNLOCK(kq);
569 		list->kl_unlock(list->kl_lockarg);
570 
571 		/*
572 		 * Activate existing knote and register tracking knotes with
573 		 * new process.
574 		 *
575 		 * First register a knote to get just the child notice. This
576 		 * must be a separate note from a potential NOTE_EXIT
577 		 * notification since both NOTE_CHILD and NOTE_EXIT are defined
578 		 * to use the data field (in conflicting ways).
579 		 */
580 		kev.ident = pid;
581 		kev.filter = kn->kn_filter;
582 		kev.flags = kn->kn_flags | EV_ADD | EV_ENABLE | EV_ONESHOT |
583 		    EV_FLAG2;
584 		kev.fflags = kn->kn_sfflags;
585 		kev.data = kn->kn_id;		/* parent */
586 		kev.udata = kn->kn_kevent.udata;/* preserve udata */
587 		error = kqueue_register(kq, &kev, NULL, M_NOWAIT);
588 		if (error)
589 			kn->kn_fflags |= NOTE_TRACKERR;
590 
591 		/*
592 		 * Then register another knote to track other potential events
593 		 * from the new process.
594 		 */
595 		kev.ident = pid;
596 		kev.filter = kn->kn_filter;
597 		kev.flags = kn->kn_flags | EV_ADD | EV_ENABLE | EV_FLAG1;
598 		kev.fflags = kn->kn_sfflags;
599 		kev.data = kn->kn_id;		/* parent */
600 		kev.udata = kn->kn_kevent.udata;/* preserve udata */
601 		error = kqueue_register(kq, &kev, NULL, M_NOWAIT);
602 		if (error)
603 			kn->kn_fflags |= NOTE_TRACKERR;
604 		if (kn->kn_fop->f_event(kn, NOTE_FORK))
605 			KNOTE_ACTIVATE(kn, 0);
606 		list->kl_lock(list->kl_lockarg);
607 		KQ_LOCK(kq);
608 		kn_leave_flux(kn);
609 		KQ_UNLOCK_FLUX(kq);
610 	}
611 }
612 
613 /*
614  * XXX: EVFILT_TIMER should perhaps live in kern_time.c beside the
615  * interval timer support code.
616  */
617 
618 #define NOTE_TIMER_PRECMASK						\
619     (NOTE_SECONDS | NOTE_MSECONDS | NOTE_USECONDS | NOTE_NSECONDS)
620 
621 static sbintime_t
622 timer2sbintime(int64_t data, int flags)
623 {
624 	int64_t secs;
625 
626         /*
627          * Macros for converting to the fractional second portion of an
628          * sbintime_t using 64bit multiplication to improve precision.
629          */
630 #define NS_TO_SBT(ns) (((ns) * (((uint64_t)1 << 63) / 500000000)) >> 32)
631 #define US_TO_SBT(us) (((us) * (((uint64_t)1 << 63) / 500000)) >> 32)
632 #define MS_TO_SBT(ms) (((ms) * (((uint64_t)1 << 63) / 500)) >> 32)
633 	switch (flags & NOTE_TIMER_PRECMASK) {
634 	case NOTE_SECONDS:
635 #ifdef __LP64__
636 		if (data > (SBT_MAX / SBT_1S))
637 			return (SBT_MAX);
638 #endif
639 		return ((sbintime_t)data << 32);
640 	case NOTE_MSECONDS: /* FALLTHROUGH */
641 	case 0:
642 		if (data >= 1000) {
643 			secs = data / 1000;
644 #ifdef __LP64__
645 			if (secs > (SBT_MAX / SBT_1S))
646 				return (SBT_MAX);
647 #endif
648 			return (secs << 32 | MS_TO_SBT(data % 1000));
649 		}
650 		return (MS_TO_SBT(data));
651 	case NOTE_USECONDS:
652 		if (data >= 1000000) {
653 			secs = data / 1000000;
654 #ifdef __LP64__
655 			if (secs > (SBT_MAX / SBT_1S))
656 				return (SBT_MAX);
657 #endif
658 			return (secs << 32 | US_TO_SBT(data % 1000000));
659 		}
660 		return (US_TO_SBT(data));
661 	case NOTE_NSECONDS:
662 		if (data >= 1000000000) {
663 			secs = data / 1000000000;
664 #ifdef __LP64__
665 			if (secs > (SBT_MAX / SBT_1S))
666 				return (SBT_MAX);
667 #endif
668 			return (secs << 32 | NS_TO_SBT(data % 1000000000));
669 		}
670 		return (NS_TO_SBT(data));
671 	default:
672 		break;
673 	}
674 	return (-1);
675 }
676 
677 struct kq_timer_cb_data {
678 	struct callout c;
679 	struct proc *p;
680 	struct knote *kn;
681 	int cpuid;
682 	TAILQ_ENTRY(kq_timer_cb_data) link;
683 	sbintime_t next;	/* next timer event fires at */
684 	sbintime_t to;		/* precalculated timer period, 0 for abs */
685 };
686 
687 static void
688 kqtimer_sched_callout(struct kq_timer_cb_data *kc)
689 {
690 	callout_reset_sbt_on(&kc->c, kc->next, 0, filt_timerexpire, kc->kn,
691 	    kc->cpuid, C_ABSOLUTE);
692 }
693 
694 void
695 kqtimer_proc_continue(struct proc *p)
696 {
697 	struct kq_timer_cb_data *kc, *kc1;
698 	struct bintime bt;
699 	sbintime_t now;
700 
701 	PROC_LOCK_ASSERT(p, MA_OWNED);
702 
703 	getboottimebin(&bt);
704 	now = bttosbt(bt);
705 
706 	TAILQ_FOREACH_SAFE(kc, &p->p_kqtim_stop, link, kc1) {
707 		TAILQ_REMOVE(&p->p_kqtim_stop, kc, link);
708 		if (kc->next <= now)
709 			filt_timerexpire(kc->kn);
710 		else
711 			kqtimer_sched_callout(kc);
712 	}
713 }
714 
715 static void
716 filt_timerexpire(void *knx)
717 {
718 	struct knote *kn;
719 	struct kq_timer_cb_data *kc;
720 	struct proc *p;
721 	sbintime_t now;
722 
723 	kn = knx;
724 	kc = kn->kn_ptr.p_v;
725 
726 	if ((kn->kn_flags & EV_ONESHOT) != 0 || kc->to == 0) {
727 		kn->kn_data++;
728 		KNOTE_ACTIVATE(kn, 0);
729 		return;
730 	}
731 
732 	for (now = sbinuptime(); kc->next <= now; kc->next += kc->to)
733 		kn->kn_data++;
734 	KNOTE_ACTIVATE(kn, 0);	/* XXX - handle locking */
735 
736 	/*
737 	 * Initial check for stopped kc->p is racy.  It is fine to
738 	 * miss the set of the stop flags, at worst we would schedule
739 	 * one more callout.  On the other hand, it is not fine to not
740 	 * schedule when we we missed clearing of the flags, we
741 	 * recheck them under the lock and observe consistent state.
742 	 */
743 	p = kc->p;
744 	if (P_SHOULDSTOP(p) || P_KILLED(p)) {
745 		PROC_LOCK(p);
746 		if (P_SHOULDSTOP(p) || P_KILLED(p)) {
747 			TAILQ_INSERT_TAIL(&p->p_kqtim_stop, kc, link);
748 			PROC_UNLOCK(p);
749 			return;
750 		}
751 		PROC_UNLOCK(p);
752 	}
753 	kqtimer_sched_callout(kc);
754 }
755 
756 /*
757  * data contains amount of time to sleep
758  */
759 static int
760 filt_timervalidate(struct knote *kn, sbintime_t *to)
761 {
762 	struct bintime bt;
763 	sbintime_t sbt;
764 
765 	if (kn->kn_sdata < 0)
766 		return (EINVAL);
767 	if (kn->kn_sdata == 0 && (kn->kn_flags & EV_ONESHOT) == 0)
768 		kn->kn_sdata = 1;
769 	/*
770 	 * The only fflags values supported are the timer unit
771 	 * (precision) and the absolute time indicator.
772 	 */
773 	if ((kn->kn_sfflags & ~(NOTE_TIMER_PRECMASK | NOTE_ABSTIME)) != 0)
774 		return (EINVAL);
775 
776 	*to = timer2sbintime(kn->kn_sdata, kn->kn_sfflags);
777 	if ((kn->kn_sfflags & NOTE_ABSTIME) != 0) {
778 		getboottimebin(&bt);
779 		sbt = bttosbt(bt);
780 		*to -= sbt;
781 	}
782 	if (*to < 0)
783 		return (EINVAL);
784 	return (0);
785 }
786 
787 static int
788 filt_timerattach(struct knote *kn)
789 {
790 	struct kq_timer_cb_data *kc;
791 	sbintime_t to;
792 	unsigned int ncallouts;
793 	int error;
794 
795 	error = filt_timervalidate(kn, &to);
796 	if (error != 0)
797 		return (error);
798 
799 	do {
800 		ncallouts = kq_ncallouts;
801 		if (ncallouts >= kq_calloutmax)
802 			return (ENOMEM);
803 	} while (!atomic_cmpset_int(&kq_ncallouts, ncallouts, ncallouts + 1));
804 
805 	if ((kn->kn_sfflags & NOTE_ABSTIME) == 0)
806 		kn->kn_flags |= EV_CLEAR;	/* automatically set */
807 	kn->kn_status &= ~KN_DETACHED;		/* knlist_add clears it */
808 	kn->kn_ptr.p_v = kc = malloc(sizeof(*kc), M_KQUEUE, M_WAITOK);
809 	kc->kn = kn;
810 	kc->p = curproc;
811 	kc->cpuid = PCPU_GET(cpuid);
812 	callout_init(&kc->c, 1);
813 	filt_timerstart(kn, to);
814 
815 	return (0);
816 }
817 
818 static void
819 filt_timerstart(struct knote *kn, sbintime_t to)
820 {
821 	struct kq_timer_cb_data *kc;
822 
823 	kc = kn->kn_ptr.p_v;
824 	if ((kn->kn_sfflags & NOTE_ABSTIME) != 0) {
825 		kc->next = to;
826 		kc->to = 0;
827 	} else {
828 		kc->next = to + sbinuptime();
829 		kc->to = to;
830 	}
831 	kqtimer_sched_callout(kc);
832 }
833 
834 static void
835 filt_timerdetach(struct knote *kn)
836 {
837 	struct kq_timer_cb_data *kc;
838 	unsigned int old __unused;
839 
840 	kc = kn->kn_ptr.p_v;
841 	callout_drain(&kc->c);
842 	free(kc, M_KQUEUE);
843 	old = atomic_fetchadd_int(&kq_ncallouts, -1);
844 	KASSERT(old > 0, ("Number of callouts cannot become negative"));
845 	kn->kn_status |= KN_DETACHED;	/* knlist_remove sets it */
846 }
847 
848 static void
849 filt_timertouch(struct knote *kn, struct kevent *kev, u_long type)
850 {
851 	struct kq_timer_cb_data *kc;
852 	struct kqueue *kq;
853 	sbintime_t to;
854 	int error;
855 
856 	switch (type) {
857 	case EVENT_REGISTER:
858 		/* Handle re-added timers that update data/fflags */
859 		if (kev->flags & EV_ADD) {
860 			kc = kn->kn_ptr.p_v;
861 
862 			/* Drain any existing callout. */
863 			callout_drain(&kc->c);
864 
865 			/* Throw away any existing undelivered record
866 			 * of the timer expiration. This is done under
867 			 * the presumption that if a process is
868 			 * re-adding this timer with new parameters,
869 			 * it is no longer interested in what may have
870 			 * happened under the old parameters. If it is
871 			 * interested, it can wait for the expiration,
872 			 * delete the old timer definition, and then
873 			 * add the new one.
874 			 *
875 			 * This has to be done while the kq is locked:
876 			 *   - if enqueued, dequeue
877 			 *   - make it no longer active
878 			 *   - clear the count of expiration events
879 			 */
880 			kq = kn->kn_kq;
881 			KQ_LOCK(kq);
882 			if (kn->kn_status & KN_QUEUED)
883 				knote_dequeue(kn);
884 
885 			kn->kn_status &= ~KN_ACTIVE;
886 			kn->kn_data = 0;
887 			KQ_UNLOCK(kq);
888 
889 			/* Reschedule timer based on new data/fflags */
890 			kn->kn_sfflags = kev->fflags;
891 			kn->kn_sdata = kev->data;
892 			error = filt_timervalidate(kn, &to);
893 			if (error != 0) {
894 			  	kn->kn_flags |= EV_ERROR;
895 				kn->kn_data = error;
896 			} else
897 			  	filt_timerstart(kn, to);
898 		}
899 		break;
900 
901         case EVENT_PROCESS:
902 		*kev = kn->kn_kevent;
903 		if (kn->kn_flags & EV_CLEAR) {
904 			kn->kn_data = 0;
905 			kn->kn_fflags = 0;
906 		}
907 		break;
908 
909 	default:
910 		panic("filt_timertouch() - invalid type (%ld)", type);
911 		break;
912 	}
913 }
914 
915 static int
916 filt_timer(struct knote *kn, long hint)
917 {
918 
919 	return (kn->kn_data != 0);
920 }
921 
922 static int
923 filt_userattach(struct knote *kn)
924 {
925 
926 	/*
927 	 * EVFILT_USER knotes are not attached to anything in the kernel.
928 	 */
929 	kn->kn_hook = NULL;
930 	if (kn->kn_fflags & NOTE_TRIGGER)
931 		kn->kn_hookid = 1;
932 	else
933 		kn->kn_hookid = 0;
934 	return (0);
935 }
936 
937 static void
938 filt_userdetach(__unused struct knote *kn)
939 {
940 
941 	/*
942 	 * EVFILT_USER knotes are not attached to anything in the kernel.
943 	 */
944 }
945 
946 static int
947 filt_user(struct knote *kn, __unused long hint)
948 {
949 
950 	return (kn->kn_hookid);
951 }
952 
953 static void
954 filt_usertouch(struct knote *kn, struct kevent *kev, u_long type)
955 {
956 	u_int ffctrl;
957 
958 	switch (type) {
959 	case EVENT_REGISTER:
960 		if (kev->fflags & NOTE_TRIGGER)
961 			kn->kn_hookid = 1;
962 
963 		ffctrl = kev->fflags & NOTE_FFCTRLMASK;
964 		kev->fflags &= NOTE_FFLAGSMASK;
965 		switch (ffctrl) {
966 		case NOTE_FFNOP:
967 			break;
968 
969 		case NOTE_FFAND:
970 			kn->kn_sfflags &= kev->fflags;
971 			break;
972 
973 		case NOTE_FFOR:
974 			kn->kn_sfflags |= kev->fflags;
975 			break;
976 
977 		case NOTE_FFCOPY:
978 			kn->kn_sfflags = kev->fflags;
979 			break;
980 
981 		default:
982 			/* XXX Return error? */
983 			break;
984 		}
985 		kn->kn_sdata = kev->data;
986 		if (kev->flags & EV_CLEAR) {
987 			kn->kn_hookid = 0;
988 			kn->kn_data = 0;
989 			kn->kn_fflags = 0;
990 		}
991 		break;
992 
993         case EVENT_PROCESS:
994 		*kev = kn->kn_kevent;
995 		kev->fflags = kn->kn_sfflags;
996 		kev->data = kn->kn_sdata;
997 		if (kn->kn_flags & EV_CLEAR) {
998 			kn->kn_hookid = 0;
999 			kn->kn_data = 0;
1000 			kn->kn_fflags = 0;
1001 		}
1002 		break;
1003 
1004 	default:
1005 		panic("filt_usertouch() - invalid type (%ld)", type);
1006 		break;
1007 	}
1008 }
1009 
1010 int
1011 sys_kqueue(struct thread *td, struct kqueue_args *uap)
1012 {
1013 
1014 	return (kern_kqueue(td, 0, NULL));
1015 }
1016 
1017 static void
1018 kqueue_init(struct kqueue *kq)
1019 {
1020 
1021 	mtx_init(&kq->kq_lock, "kqueue", NULL, MTX_DEF | MTX_DUPOK);
1022 	TAILQ_INIT(&kq->kq_head);
1023 	knlist_init_mtx(&kq->kq_sel.si_note, &kq->kq_lock);
1024 	TASK_INIT(&kq->kq_task, 0, kqueue_task, kq);
1025 }
1026 
1027 int
1028 kern_kqueue(struct thread *td, int flags, struct filecaps *fcaps)
1029 {
1030 	struct filedesc *fdp;
1031 	struct kqueue *kq;
1032 	struct file *fp;
1033 	struct ucred *cred;
1034 	int fd, error;
1035 
1036 	fdp = td->td_proc->p_fd;
1037 	cred = td->td_ucred;
1038 	if (!chgkqcnt(cred->cr_ruidinfo, 1, lim_cur(td, RLIMIT_KQUEUES)))
1039 		return (ENOMEM);
1040 
1041 	error = falloc_caps(td, &fp, &fd, flags, fcaps);
1042 	if (error != 0) {
1043 		chgkqcnt(cred->cr_ruidinfo, -1, 0);
1044 		return (error);
1045 	}
1046 
1047 	/* An extra reference on `fp' has been held for us by falloc(). */
1048 	kq = malloc(sizeof *kq, M_KQUEUE, M_WAITOK | M_ZERO);
1049 	kqueue_init(kq);
1050 	kq->kq_fdp = fdp;
1051 	kq->kq_cred = crhold(cred);
1052 
1053 	FILEDESC_XLOCK(fdp);
1054 	TAILQ_INSERT_HEAD(&fdp->fd_kqlist, kq, kq_list);
1055 	FILEDESC_XUNLOCK(fdp);
1056 
1057 	finit(fp, FREAD | FWRITE, DTYPE_KQUEUE, kq, &kqueueops);
1058 	fdrop(fp, td);
1059 
1060 	td->td_retval[0] = fd;
1061 	return (0);
1062 }
1063 
1064 struct g_kevent_args {
1065 	int	fd;
1066 	void	*changelist;
1067 	int	nchanges;
1068 	void	*eventlist;
1069 	int	nevents;
1070 	const struct timespec *timeout;
1071 };
1072 
1073 int
1074 sys_kevent(struct thread *td, struct kevent_args *uap)
1075 {
1076 	struct kevent_copyops k_ops = {
1077 		.arg = uap,
1078 		.k_copyout = kevent_copyout,
1079 		.k_copyin = kevent_copyin,
1080 		.kevent_size = sizeof(struct kevent),
1081 	};
1082 	struct g_kevent_args gk_args = {
1083 		.fd = uap->fd,
1084 		.changelist = uap->changelist,
1085 		.nchanges = uap->nchanges,
1086 		.eventlist = uap->eventlist,
1087 		.nevents = uap->nevents,
1088 		.timeout = uap->timeout,
1089 	};
1090 
1091 	return (kern_kevent_generic(td, &gk_args, &k_ops, "kevent"));
1092 }
1093 
1094 static int
1095 kern_kevent_generic(struct thread *td, struct g_kevent_args *uap,
1096     struct kevent_copyops *k_ops, const char *struct_name)
1097 {
1098 	struct timespec ts, *tsp;
1099 #ifdef KTRACE
1100 	struct kevent *eventlist = uap->eventlist;
1101 #endif
1102 	int error;
1103 
1104 	if (uap->timeout != NULL) {
1105 		error = copyin(uap->timeout, &ts, sizeof(ts));
1106 		if (error)
1107 			return (error);
1108 		tsp = &ts;
1109 	} else
1110 		tsp = NULL;
1111 
1112 #ifdef KTRACE
1113 	if (KTRPOINT(td, KTR_STRUCT_ARRAY))
1114 		ktrstructarray(struct_name, UIO_USERSPACE, uap->changelist,
1115 		    uap->nchanges, k_ops->kevent_size);
1116 #endif
1117 
1118 	error = kern_kevent(td, uap->fd, uap->nchanges, uap->nevents,
1119 	    k_ops, tsp);
1120 
1121 #ifdef KTRACE
1122 	if (error == 0 && KTRPOINT(td, KTR_STRUCT_ARRAY))
1123 		ktrstructarray(struct_name, UIO_USERSPACE, eventlist,
1124 		    td->td_retval[0], k_ops->kevent_size);
1125 #endif
1126 
1127 	return (error);
1128 }
1129 
1130 /*
1131  * Copy 'count' items into the destination list pointed to by uap->eventlist.
1132  */
1133 static int
1134 kevent_copyout(void *arg, struct kevent *kevp, int count)
1135 {
1136 	struct kevent_args *uap;
1137 	int error;
1138 
1139 	KASSERT(count <= KQ_NEVENTS, ("count (%d) > KQ_NEVENTS", count));
1140 	uap = (struct kevent_args *)arg;
1141 
1142 	error = copyout(kevp, uap->eventlist, count * sizeof *kevp);
1143 	if (error == 0)
1144 		uap->eventlist += count;
1145 	return (error);
1146 }
1147 
1148 /*
1149  * Copy 'count' items from the list pointed to by uap->changelist.
1150  */
1151 static int
1152 kevent_copyin(void *arg, struct kevent *kevp, int count)
1153 {
1154 	struct kevent_args *uap;
1155 	int error;
1156 
1157 	KASSERT(count <= KQ_NEVENTS, ("count (%d) > KQ_NEVENTS", count));
1158 	uap = (struct kevent_args *)arg;
1159 
1160 	error = copyin(uap->changelist, kevp, count * sizeof *kevp);
1161 	if (error == 0)
1162 		uap->changelist += count;
1163 	return (error);
1164 }
1165 
1166 #ifdef COMPAT_FREEBSD11
1167 static int
1168 kevent11_copyout(void *arg, struct kevent *kevp, int count)
1169 {
1170 	struct freebsd11_kevent_args *uap;
1171 	struct kevent_freebsd11 kev11;
1172 	int error, i;
1173 
1174 	KASSERT(count <= KQ_NEVENTS, ("count (%d) > KQ_NEVENTS", count));
1175 	uap = (struct freebsd11_kevent_args *)arg;
1176 
1177 	for (i = 0; i < count; i++) {
1178 		kev11.ident = kevp->ident;
1179 		kev11.filter = kevp->filter;
1180 		kev11.flags = kevp->flags;
1181 		kev11.fflags = kevp->fflags;
1182 		kev11.data = kevp->data;
1183 		kev11.udata = kevp->udata;
1184 		error = copyout(&kev11, uap->eventlist, sizeof(kev11));
1185 		if (error != 0)
1186 			break;
1187 		uap->eventlist++;
1188 		kevp++;
1189 	}
1190 	return (error);
1191 }
1192 
1193 /*
1194  * Copy 'count' items from the list pointed to by uap->changelist.
1195  */
1196 static int
1197 kevent11_copyin(void *arg, struct kevent *kevp, int count)
1198 {
1199 	struct freebsd11_kevent_args *uap;
1200 	struct kevent_freebsd11 kev11;
1201 	int error, i;
1202 
1203 	KASSERT(count <= KQ_NEVENTS, ("count (%d) > KQ_NEVENTS", count));
1204 	uap = (struct freebsd11_kevent_args *)arg;
1205 
1206 	for (i = 0; i < count; i++) {
1207 		error = copyin(uap->changelist, &kev11, sizeof(kev11));
1208 		if (error != 0)
1209 			break;
1210 		kevp->ident = kev11.ident;
1211 		kevp->filter = kev11.filter;
1212 		kevp->flags = kev11.flags;
1213 		kevp->fflags = kev11.fflags;
1214 		kevp->data = (uintptr_t)kev11.data;
1215 		kevp->udata = kev11.udata;
1216 		bzero(&kevp->ext, sizeof(kevp->ext));
1217 		uap->changelist++;
1218 		kevp++;
1219 	}
1220 	return (error);
1221 }
1222 
1223 int
1224 freebsd11_kevent(struct thread *td, struct freebsd11_kevent_args *uap)
1225 {
1226 	struct kevent_copyops k_ops = {
1227 		.arg = uap,
1228 		.k_copyout = kevent11_copyout,
1229 		.k_copyin = kevent11_copyin,
1230 		.kevent_size = sizeof(struct kevent_freebsd11),
1231 	};
1232 	struct g_kevent_args gk_args = {
1233 		.fd = uap->fd,
1234 		.changelist = uap->changelist,
1235 		.nchanges = uap->nchanges,
1236 		.eventlist = uap->eventlist,
1237 		.nevents = uap->nevents,
1238 		.timeout = uap->timeout,
1239 	};
1240 
1241 	return (kern_kevent_generic(td, &gk_args, &k_ops, "kevent_freebsd11"));
1242 }
1243 #endif
1244 
1245 int
1246 kern_kevent(struct thread *td, int fd, int nchanges, int nevents,
1247     struct kevent_copyops *k_ops, const struct timespec *timeout)
1248 {
1249 	cap_rights_t rights;
1250 	struct file *fp;
1251 	int error;
1252 
1253 	cap_rights_init_zero(&rights);
1254 	if (nchanges > 0)
1255 		cap_rights_set_one(&rights, CAP_KQUEUE_CHANGE);
1256 	if (nevents > 0)
1257 		cap_rights_set_one(&rights, CAP_KQUEUE_EVENT);
1258 	error = fget(td, fd, &rights, &fp);
1259 	if (error != 0)
1260 		return (error);
1261 
1262 	error = kern_kevent_fp(td, fp, nchanges, nevents, k_ops, timeout);
1263 	fdrop(fp, td);
1264 
1265 	return (error);
1266 }
1267 
1268 static int
1269 kqueue_kevent(struct kqueue *kq, struct thread *td, int nchanges, int nevents,
1270     struct kevent_copyops *k_ops, const struct timespec *timeout)
1271 {
1272 	struct kevent keva[KQ_NEVENTS];
1273 	struct kevent *kevp, *changes;
1274 	int i, n, nerrors, error;
1275 
1276 	nerrors = 0;
1277 	while (nchanges > 0) {
1278 		n = nchanges > KQ_NEVENTS ? KQ_NEVENTS : nchanges;
1279 		error = k_ops->k_copyin(k_ops->arg, keva, n);
1280 		if (error)
1281 			return (error);
1282 		changes = keva;
1283 		for (i = 0; i < n; i++) {
1284 			kevp = &changes[i];
1285 			if (!kevp->filter)
1286 				continue;
1287 			kevp->flags &= ~EV_SYSFLAGS;
1288 			error = kqueue_register(kq, kevp, td, M_WAITOK);
1289 			if (error || (kevp->flags & EV_RECEIPT)) {
1290 				if (nevents == 0)
1291 					return (error);
1292 				kevp->flags = EV_ERROR;
1293 				kevp->data = error;
1294 				(void)k_ops->k_copyout(k_ops->arg, kevp, 1);
1295 				nevents--;
1296 				nerrors++;
1297 			}
1298 		}
1299 		nchanges -= n;
1300 	}
1301 	if (nerrors) {
1302 		td->td_retval[0] = nerrors;
1303 		return (0);
1304 	}
1305 
1306 	return (kqueue_scan(kq, nevents, k_ops, timeout, keva, td));
1307 }
1308 
1309 int
1310 kern_kevent_fp(struct thread *td, struct file *fp, int nchanges, int nevents,
1311     struct kevent_copyops *k_ops, const struct timespec *timeout)
1312 {
1313 	struct kqueue *kq;
1314 	int error;
1315 
1316 	error = kqueue_acquire(fp, &kq);
1317 	if (error != 0)
1318 		return (error);
1319 	error = kqueue_kevent(kq, td, nchanges, nevents, k_ops, timeout);
1320 	kqueue_release(kq, 0);
1321 	return (error);
1322 }
1323 
1324 /*
1325  * Performs a kevent() call on a temporarily created kqueue. This can be
1326  * used to perform one-shot polling, similar to poll() and select().
1327  */
1328 int
1329 kern_kevent_anonymous(struct thread *td, int nevents,
1330     struct kevent_copyops *k_ops)
1331 {
1332 	struct kqueue kq = {};
1333 	int error;
1334 
1335 	kqueue_init(&kq);
1336 	kq.kq_refcnt = 1;
1337 	error = kqueue_kevent(&kq, td, nevents, nevents, k_ops, NULL);
1338 	kqueue_drain(&kq, td);
1339 	kqueue_destroy(&kq);
1340 	return (error);
1341 }
1342 
1343 int
1344 kqueue_add_filteropts(int filt, struct filterops *filtops)
1345 {
1346 	int error;
1347 
1348 	error = 0;
1349 	if (filt > 0 || filt + EVFILT_SYSCOUNT < 0) {
1350 		printf(
1351 "trying to add a filterop that is out of range: %d is beyond %d\n",
1352 		    ~filt, EVFILT_SYSCOUNT);
1353 		return EINVAL;
1354 	}
1355 	mtx_lock(&filterops_lock);
1356 	if (sysfilt_ops[~filt].for_fop != &null_filtops &&
1357 	    sysfilt_ops[~filt].for_fop != NULL)
1358 		error = EEXIST;
1359 	else {
1360 		sysfilt_ops[~filt].for_fop = filtops;
1361 		sysfilt_ops[~filt].for_refcnt = 0;
1362 	}
1363 	mtx_unlock(&filterops_lock);
1364 
1365 	return (error);
1366 }
1367 
1368 int
1369 kqueue_del_filteropts(int filt)
1370 {
1371 	int error;
1372 
1373 	error = 0;
1374 	if (filt > 0 || filt + EVFILT_SYSCOUNT < 0)
1375 		return EINVAL;
1376 
1377 	mtx_lock(&filterops_lock);
1378 	if (sysfilt_ops[~filt].for_fop == &null_filtops ||
1379 	    sysfilt_ops[~filt].for_fop == NULL)
1380 		error = EINVAL;
1381 	else if (sysfilt_ops[~filt].for_refcnt != 0)
1382 		error = EBUSY;
1383 	else {
1384 		sysfilt_ops[~filt].for_fop = &null_filtops;
1385 		sysfilt_ops[~filt].for_refcnt = 0;
1386 	}
1387 	mtx_unlock(&filterops_lock);
1388 
1389 	return error;
1390 }
1391 
1392 static struct filterops *
1393 kqueue_fo_find(int filt)
1394 {
1395 
1396 	if (filt > 0 || filt + EVFILT_SYSCOUNT < 0)
1397 		return NULL;
1398 
1399 	if (sysfilt_ops[~filt].for_nolock)
1400 		return sysfilt_ops[~filt].for_fop;
1401 
1402 	mtx_lock(&filterops_lock);
1403 	sysfilt_ops[~filt].for_refcnt++;
1404 	if (sysfilt_ops[~filt].for_fop == NULL)
1405 		sysfilt_ops[~filt].for_fop = &null_filtops;
1406 	mtx_unlock(&filterops_lock);
1407 
1408 	return sysfilt_ops[~filt].for_fop;
1409 }
1410 
1411 static void
1412 kqueue_fo_release(int filt)
1413 {
1414 
1415 	if (filt > 0 || filt + EVFILT_SYSCOUNT < 0)
1416 		return;
1417 
1418 	if (sysfilt_ops[~filt].for_nolock)
1419 		return;
1420 
1421 	mtx_lock(&filterops_lock);
1422 	KASSERT(sysfilt_ops[~filt].for_refcnt > 0,
1423 	    ("filter object refcount not valid on release"));
1424 	sysfilt_ops[~filt].for_refcnt--;
1425 	mtx_unlock(&filterops_lock);
1426 }
1427 
1428 /*
1429  * A ref to kq (obtained via kqueue_acquire) must be held.
1430  */
1431 static int
1432 kqueue_register(struct kqueue *kq, struct kevent *kev, struct thread *td,
1433     int mflag)
1434 {
1435 	struct filterops *fops;
1436 	struct file *fp;
1437 	struct knote *kn, *tkn;
1438 	struct knlist *knl;
1439 	int error, filt, event;
1440 	int haskqglobal, filedesc_unlock;
1441 
1442 	if ((kev->flags & (EV_ENABLE | EV_DISABLE)) == (EV_ENABLE | EV_DISABLE))
1443 		return (EINVAL);
1444 
1445 	fp = NULL;
1446 	kn = NULL;
1447 	knl = NULL;
1448 	error = 0;
1449 	haskqglobal = 0;
1450 	filedesc_unlock = 0;
1451 
1452 	filt = kev->filter;
1453 	fops = kqueue_fo_find(filt);
1454 	if (fops == NULL)
1455 		return EINVAL;
1456 
1457 	if (kev->flags & EV_ADD) {
1458 		/*
1459 		 * Prevent waiting with locks.  Non-sleepable
1460 		 * allocation failures are handled in the loop, only
1461 		 * if the spare knote appears to be actually required.
1462 		 */
1463 		tkn = knote_alloc(mflag);
1464 	} else {
1465 		tkn = NULL;
1466 	}
1467 
1468 findkn:
1469 	if (fops->f_isfd) {
1470 		KASSERT(td != NULL, ("td is NULL"));
1471 		if (kev->ident > INT_MAX)
1472 			error = EBADF;
1473 		else
1474 			error = fget(td, kev->ident, &cap_event_rights, &fp);
1475 		if (error)
1476 			goto done;
1477 
1478 		if ((kev->flags & EV_ADD) == EV_ADD && kqueue_expand(kq, fops,
1479 		    kev->ident, M_NOWAIT) != 0) {
1480 			/* try again */
1481 			fdrop(fp, td);
1482 			fp = NULL;
1483 			error = kqueue_expand(kq, fops, kev->ident, mflag);
1484 			if (error)
1485 				goto done;
1486 			goto findkn;
1487 		}
1488 
1489 		if (fp->f_type == DTYPE_KQUEUE) {
1490 			/*
1491 			 * If we add some intelligence about what we are doing,
1492 			 * we should be able to support events on ourselves.
1493 			 * We need to know when we are doing this to prevent
1494 			 * getting both the knlist lock and the kq lock since
1495 			 * they are the same thing.
1496 			 */
1497 			if (fp->f_data == kq) {
1498 				error = EINVAL;
1499 				goto done;
1500 			}
1501 
1502 			/*
1503 			 * Pre-lock the filedesc before the global
1504 			 * lock mutex, see the comment in
1505 			 * kqueue_close().
1506 			 */
1507 			FILEDESC_XLOCK(td->td_proc->p_fd);
1508 			filedesc_unlock = 1;
1509 			KQ_GLOBAL_LOCK(&kq_global, haskqglobal);
1510 		}
1511 
1512 		KQ_LOCK(kq);
1513 		if (kev->ident < kq->kq_knlistsize) {
1514 			SLIST_FOREACH(kn, &kq->kq_knlist[kev->ident], kn_link)
1515 				if (kev->filter == kn->kn_filter)
1516 					break;
1517 		}
1518 	} else {
1519 		if ((kev->flags & EV_ADD) == EV_ADD) {
1520 			error = kqueue_expand(kq, fops, kev->ident, mflag);
1521 			if (error != 0)
1522 				goto done;
1523 		}
1524 
1525 		KQ_LOCK(kq);
1526 
1527 		/*
1528 		 * If possible, find an existing knote to use for this kevent.
1529 		 */
1530 		if (kev->filter == EVFILT_PROC &&
1531 		    (kev->flags & (EV_FLAG1 | EV_FLAG2)) != 0) {
1532 			/* This is an internal creation of a process tracking
1533 			 * note. Don't attempt to coalesce this with an
1534 			 * existing note.
1535 			 */
1536 			;
1537 		} else if (kq->kq_knhashmask != 0) {
1538 			struct klist *list;
1539 
1540 			list = &kq->kq_knhash[
1541 			    KN_HASH((u_long)kev->ident, kq->kq_knhashmask)];
1542 			SLIST_FOREACH(kn, list, kn_link)
1543 				if (kev->ident == kn->kn_id &&
1544 				    kev->filter == kn->kn_filter)
1545 					break;
1546 		}
1547 	}
1548 
1549 	/* knote is in the process of changing, wait for it to stabilize. */
1550 	if (kn != NULL && kn_in_flux(kn)) {
1551 		KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal);
1552 		if (filedesc_unlock) {
1553 			FILEDESC_XUNLOCK(td->td_proc->p_fd);
1554 			filedesc_unlock = 0;
1555 		}
1556 		kq->kq_state |= KQ_FLUXWAIT;
1557 		msleep(kq, &kq->kq_lock, PSOCK | PDROP, "kqflxwt", 0);
1558 		if (fp != NULL) {
1559 			fdrop(fp, td);
1560 			fp = NULL;
1561 		}
1562 		goto findkn;
1563 	}
1564 
1565 	/*
1566 	 * kn now contains the matching knote, or NULL if no match
1567 	 */
1568 	if (kn == NULL) {
1569 		if (kev->flags & EV_ADD) {
1570 			kn = tkn;
1571 			tkn = NULL;
1572 			if (kn == NULL) {
1573 				KQ_UNLOCK(kq);
1574 				error = ENOMEM;
1575 				goto done;
1576 			}
1577 			kn->kn_fp = fp;
1578 			kn->kn_kq = kq;
1579 			kn->kn_fop = fops;
1580 			/*
1581 			 * apply reference counts to knote structure, and
1582 			 * do not release it at the end of this routine.
1583 			 */
1584 			fops = NULL;
1585 			fp = NULL;
1586 
1587 			kn->kn_sfflags = kev->fflags;
1588 			kn->kn_sdata = kev->data;
1589 			kev->fflags = 0;
1590 			kev->data = 0;
1591 			kn->kn_kevent = *kev;
1592 			kn->kn_kevent.flags &= ~(EV_ADD | EV_DELETE |
1593 			    EV_ENABLE | EV_DISABLE | EV_FORCEONESHOT);
1594 			kn->kn_status = KN_DETACHED;
1595 			if ((kev->flags & EV_DISABLE) != 0)
1596 				kn->kn_status |= KN_DISABLED;
1597 			kn_enter_flux(kn);
1598 
1599 			error = knote_attach(kn, kq);
1600 			KQ_UNLOCK(kq);
1601 			if (error != 0) {
1602 				tkn = kn;
1603 				goto done;
1604 			}
1605 
1606 			if ((error = kn->kn_fop->f_attach(kn)) != 0) {
1607 				knote_drop_detached(kn, td);
1608 				goto done;
1609 			}
1610 			knl = kn_list_lock(kn);
1611 			goto done_ev_add;
1612 		} else {
1613 			/* No matching knote and the EV_ADD flag is not set. */
1614 			KQ_UNLOCK(kq);
1615 			error = ENOENT;
1616 			goto done;
1617 		}
1618 	}
1619 
1620 	if (kev->flags & EV_DELETE) {
1621 		kn_enter_flux(kn);
1622 		KQ_UNLOCK(kq);
1623 		knote_drop(kn, td);
1624 		goto done;
1625 	}
1626 
1627 	if (kev->flags & EV_FORCEONESHOT) {
1628 		kn->kn_flags |= EV_ONESHOT;
1629 		KNOTE_ACTIVATE(kn, 1);
1630 	}
1631 
1632 	if ((kev->flags & EV_ENABLE) != 0)
1633 		kn->kn_status &= ~KN_DISABLED;
1634 	else if ((kev->flags & EV_DISABLE) != 0)
1635 		kn->kn_status |= KN_DISABLED;
1636 
1637 	/*
1638 	 * The user may change some filter values after the initial EV_ADD,
1639 	 * but doing so will not reset any filter which has already been
1640 	 * triggered.
1641 	 */
1642 	kn->kn_status |= KN_SCAN;
1643 	kn_enter_flux(kn);
1644 	KQ_UNLOCK(kq);
1645 	knl = kn_list_lock(kn);
1646 	kn->kn_kevent.udata = kev->udata;
1647 	if (!fops->f_isfd && fops->f_touch != NULL) {
1648 		fops->f_touch(kn, kev, EVENT_REGISTER);
1649 	} else {
1650 		kn->kn_sfflags = kev->fflags;
1651 		kn->kn_sdata = kev->data;
1652 	}
1653 
1654 done_ev_add:
1655 	/*
1656 	 * We can get here with kn->kn_knlist == NULL.  This can happen when
1657 	 * the initial attach event decides that the event is "completed"
1658 	 * already, e.g., filt_procattach() is called on a zombie process.  It
1659 	 * will call filt_proc() which will remove it from the list, and NULL
1660 	 * kn_knlist.
1661 	 *
1662 	 * KN_DISABLED will be stable while the knote is in flux, so the
1663 	 * unlocked read will not race with an update.
1664 	 */
1665 	if ((kn->kn_status & KN_DISABLED) == 0)
1666 		event = kn->kn_fop->f_event(kn, 0);
1667 	else
1668 		event = 0;
1669 
1670 	KQ_LOCK(kq);
1671 	if (event)
1672 		kn->kn_status |= KN_ACTIVE;
1673 	if ((kn->kn_status & (KN_ACTIVE | KN_DISABLED | KN_QUEUED)) ==
1674 	    KN_ACTIVE)
1675 		knote_enqueue(kn);
1676 	kn->kn_status &= ~KN_SCAN;
1677 	kn_leave_flux(kn);
1678 	kn_list_unlock(knl);
1679 	KQ_UNLOCK_FLUX(kq);
1680 
1681 done:
1682 	KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal);
1683 	if (filedesc_unlock)
1684 		FILEDESC_XUNLOCK(td->td_proc->p_fd);
1685 	if (fp != NULL)
1686 		fdrop(fp, td);
1687 	knote_free(tkn);
1688 	if (fops != NULL)
1689 		kqueue_fo_release(filt);
1690 	return (error);
1691 }
1692 
1693 static int
1694 kqueue_acquire(struct file *fp, struct kqueue **kqp)
1695 {
1696 	int error;
1697 	struct kqueue *kq;
1698 
1699 	error = 0;
1700 
1701 	kq = fp->f_data;
1702 	if (fp->f_type != DTYPE_KQUEUE || kq == NULL)
1703 		return (EBADF);
1704 	*kqp = kq;
1705 	KQ_LOCK(kq);
1706 	if ((kq->kq_state & KQ_CLOSING) == KQ_CLOSING) {
1707 		KQ_UNLOCK(kq);
1708 		return (EBADF);
1709 	}
1710 	kq->kq_refcnt++;
1711 	KQ_UNLOCK(kq);
1712 
1713 	return error;
1714 }
1715 
1716 static void
1717 kqueue_release(struct kqueue *kq, int locked)
1718 {
1719 	if (locked)
1720 		KQ_OWNED(kq);
1721 	else
1722 		KQ_LOCK(kq);
1723 	kq->kq_refcnt--;
1724 	if (kq->kq_refcnt == 1)
1725 		wakeup(&kq->kq_refcnt);
1726 	if (!locked)
1727 		KQ_UNLOCK(kq);
1728 }
1729 
1730 static void
1731 kqueue_schedtask(struct kqueue *kq)
1732 {
1733 
1734 	KQ_OWNED(kq);
1735 	KASSERT(((kq->kq_state & KQ_TASKDRAIN) != KQ_TASKDRAIN),
1736 	    ("scheduling kqueue task while draining"));
1737 
1738 	if ((kq->kq_state & KQ_TASKSCHED) != KQ_TASKSCHED) {
1739 		taskqueue_enqueue(taskqueue_kqueue_ctx, &kq->kq_task);
1740 		kq->kq_state |= KQ_TASKSCHED;
1741 	}
1742 }
1743 
1744 /*
1745  * Expand the kq to make sure we have storage for fops/ident pair.
1746  *
1747  * Return 0 on success (or no work necessary), return errno on failure.
1748  */
1749 static int
1750 kqueue_expand(struct kqueue *kq, struct filterops *fops, uintptr_t ident,
1751     int mflag)
1752 {
1753 	struct klist *list, *tmp_knhash, *to_free;
1754 	u_long tmp_knhashmask;
1755 	int error, fd, size;
1756 
1757 	KQ_NOTOWNED(kq);
1758 
1759 	error = 0;
1760 	to_free = NULL;
1761 	if (fops->f_isfd) {
1762 		fd = ident;
1763 		if (kq->kq_knlistsize <= fd) {
1764 			size = kq->kq_knlistsize;
1765 			while (size <= fd)
1766 				size += KQEXTENT;
1767 			list = malloc(size * sizeof(*list), M_KQUEUE, mflag);
1768 			if (list == NULL)
1769 				return ENOMEM;
1770 			KQ_LOCK(kq);
1771 			if ((kq->kq_state & KQ_CLOSING) != 0) {
1772 				to_free = list;
1773 				error = EBADF;
1774 			} else if (kq->kq_knlistsize > fd) {
1775 				to_free = list;
1776 			} else {
1777 				if (kq->kq_knlist != NULL) {
1778 					bcopy(kq->kq_knlist, list,
1779 					    kq->kq_knlistsize * sizeof(*list));
1780 					to_free = kq->kq_knlist;
1781 					kq->kq_knlist = NULL;
1782 				}
1783 				bzero((caddr_t)list +
1784 				    kq->kq_knlistsize * sizeof(*list),
1785 				    (size - kq->kq_knlistsize) * sizeof(*list));
1786 				kq->kq_knlistsize = size;
1787 				kq->kq_knlist = list;
1788 			}
1789 			KQ_UNLOCK(kq);
1790 		}
1791 	} else {
1792 		if (kq->kq_knhashmask == 0) {
1793 			tmp_knhash = hashinit_flags(KN_HASHSIZE, M_KQUEUE,
1794 			    &tmp_knhashmask, (mflag & M_WAITOK) != 0 ?
1795 			    HASH_WAITOK : HASH_NOWAIT);
1796 			if (tmp_knhash == NULL)
1797 				return (ENOMEM);
1798 			KQ_LOCK(kq);
1799 			if ((kq->kq_state & KQ_CLOSING) != 0) {
1800 				to_free = tmp_knhash;
1801 				error = EBADF;
1802 			} else if (kq->kq_knhashmask == 0) {
1803 				kq->kq_knhash = tmp_knhash;
1804 				kq->kq_knhashmask = tmp_knhashmask;
1805 			} else {
1806 				to_free = tmp_knhash;
1807 			}
1808 			KQ_UNLOCK(kq);
1809 		}
1810 	}
1811 	free(to_free, M_KQUEUE);
1812 
1813 	KQ_NOTOWNED(kq);
1814 	return (error);
1815 }
1816 
1817 static void
1818 kqueue_task(void *arg, int pending)
1819 {
1820 	struct kqueue *kq;
1821 	int haskqglobal;
1822 
1823 	haskqglobal = 0;
1824 	kq = arg;
1825 
1826 	KQ_GLOBAL_LOCK(&kq_global, haskqglobal);
1827 	KQ_LOCK(kq);
1828 
1829 	KNOTE_LOCKED(&kq->kq_sel.si_note, 0);
1830 
1831 	kq->kq_state &= ~KQ_TASKSCHED;
1832 	if ((kq->kq_state & KQ_TASKDRAIN) == KQ_TASKDRAIN) {
1833 		wakeup(&kq->kq_state);
1834 	}
1835 	KQ_UNLOCK(kq);
1836 	KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal);
1837 }
1838 
1839 /*
1840  * Scan, update kn_data (if not ONESHOT), and copyout triggered events.
1841  * We treat KN_MARKER knotes as if they are in flux.
1842  */
1843 static int
1844 kqueue_scan(struct kqueue *kq, int maxevents, struct kevent_copyops *k_ops,
1845     const struct timespec *tsp, struct kevent *keva, struct thread *td)
1846 {
1847 	struct kevent *kevp;
1848 	struct knote *kn, *marker;
1849 	struct knlist *knl;
1850 	sbintime_t asbt, rsbt;
1851 	int count, error, haskqglobal, influx, nkev, touch;
1852 
1853 	count = maxevents;
1854 	nkev = 0;
1855 	error = 0;
1856 	haskqglobal = 0;
1857 
1858 	if (maxevents == 0)
1859 		goto done_nl;
1860 
1861 	rsbt = 0;
1862 	if (tsp != NULL) {
1863 		if (tsp->tv_sec < 0 || tsp->tv_nsec < 0 ||
1864 		    tsp->tv_nsec >= 1000000000) {
1865 			error = EINVAL;
1866 			goto done_nl;
1867 		}
1868 		if (timespecisset(tsp)) {
1869 			if (tsp->tv_sec <= INT32_MAX) {
1870 				rsbt = tstosbt(*tsp);
1871 				if (TIMESEL(&asbt, rsbt))
1872 					asbt += tc_tick_sbt;
1873 				if (asbt <= SBT_MAX - rsbt)
1874 					asbt += rsbt;
1875 				else
1876 					asbt = 0;
1877 				rsbt >>= tc_precexp;
1878 			} else
1879 				asbt = 0;
1880 		} else
1881 			asbt = -1;
1882 	} else
1883 		asbt = 0;
1884 	marker = knote_alloc(M_WAITOK);
1885 	marker->kn_status = KN_MARKER;
1886 	KQ_LOCK(kq);
1887 
1888 retry:
1889 	kevp = keva;
1890 	if (kq->kq_count == 0) {
1891 		if (asbt == -1) {
1892 			error = EWOULDBLOCK;
1893 		} else {
1894 			kq->kq_state |= KQ_SLEEP;
1895 			error = msleep_sbt(kq, &kq->kq_lock, PSOCK | PCATCH,
1896 			    "kqread", asbt, rsbt, C_ABSOLUTE);
1897 		}
1898 		if (error == 0)
1899 			goto retry;
1900 		/* don't restart after signals... */
1901 		if (error == ERESTART)
1902 			error = EINTR;
1903 		else if (error == EWOULDBLOCK)
1904 			error = 0;
1905 		goto done;
1906 	}
1907 
1908 	TAILQ_INSERT_TAIL(&kq->kq_head, marker, kn_tqe);
1909 	influx = 0;
1910 	while (count) {
1911 		KQ_OWNED(kq);
1912 		kn = TAILQ_FIRST(&kq->kq_head);
1913 
1914 		if ((kn->kn_status == KN_MARKER && kn != marker) ||
1915 		    kn_in_flux(kn)) {
1916 			if (influx) {
1917 				influx = 0;
1918 				KQ_FLUX_WAKEUP(kq);
1919 			}
1920 			kq->kq_state |= KQ_FLUXWAIT;
1921 			error = msleep(kq, &kq->kq_lock, PSOCK,
1922 			    "kqflxwt", 0);
1923 			continue;
1924 		}
1925 
1926 		TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe);
1927 		if ((kn->kn_status & KN_DISABLED) == KN_DISABLED) {
1928 			kn->kn_status &= ~KN_QUEUED;
1929 			kq->kq_count--;
1930 			continue;
1931 		}
1932 		if (kn == marker) {
1933 			KQ_FLUX_WAKEUP(kq);
1934 			if (count == maxevents)
1935 				goto retry;
1936 			goto done;
1937 		}
1938 		KASSERT(!kn_in_flux(kn),
1939 		    ("knote %p is unexpectedly in flux", kn));
1940 
1941 		if ((kn->kn_flags & EV_DROP) == EV_DROP) {
1942 			kn->kn_status &= ~KN_QUEUED;
1943 			kn_enter_flux(kn);
1944 			kq->kq_count--;
1945 			KQ_UNLOCK(kq);
1946 			/*
1947 			 * We don't need to lock the list since we've
1948 			 * marked it as in flux.
1949 			 */
1950 			knote_drop(kn, td);
1951 			KQ_LOCK(kq);
1952 			continue;
1953 		} else if ((kn->kn_flags & EV_ONESHOT) == EV_ONESHOT) {
1954 			kn->kn_status &= ~KN_QUEUED;
1955 			kn_enter_flux(kn);
1956 			kq->kq_count--;
1957 			KQ_UNLOCK(kq);
1958 			/*
1959 			 * We don't need to lock the list since we've
1960 			 * marked the knote as being in flux.
1961 			 */
1962 			*kevp = kn->kn_kevent;
1963 			knote_drop(kn, td);
1964 			KQ_LOCK(kq);
1965 			kn = NULL;
1966 		} else {
1967 			kn->kn_status |= KN_SCAN;
1968 			kn_enter_flux(kn);
1969 			KQ_UNLOCK(kq);
1970 			if ((kn->kn_status & KN_KQUEUE) == KN_KQUEUE)
1971 				KQ_GLOBAL_LOCK(&kq_global, haskqglobal);
1972 			knl = kn_list_lock(kn);
1973 			if (kn->kn_fop->f_event(kn, 0) == 0) {
1974 				KQ_LOCK(kq);
1975 				KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal);
1976 				kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE |
1977 				    KN_SCAN);
1978 				kn_leave_flux(kn);
1979 				kq->kq_count--;
1980 				kn_list_unlock(knl);
1981 				influx = 1;
1982 				continue;
1983 			}
1984 			touch = (!kn->kn_fop->f_isfd &&
1985 			    kn->kn_fop->f_touch != NULL);
1986 			if (touch)
1987 				kn->kn_fop->f_touch(kn, kevp, EVENT_PROCESS);
1988 			else
1989 				*kevp = kn->kn_kevent;
1990 			KQ_LOCK(kq);
1991 			KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal);
1992 			if (kn->kn_flags & (EV_CLEAR | EV_DISPATCH)) {
1993 				/*
1994 				 * Manually clear knotes who weren't
1995 				 * 'touch'ed.
1996 				 */
1997 				if (touch == 0 && kn->kn_flags & EV_CLEAR) {
1998 					kn->kn_data = 0;
1999 					kn->kn_fflags = 0;
2000 				}
2001 				if (kn->kn_flags & EV_DISPATCH)
2002 					kn->kn_status |= KN_DISABLED;
2003 				kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE);
2004 				kq->kq_count--;
2005 			} else
2006 				TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe);
2007 
2008 			kn->kn_status &= ~KN_SCAN;
2009 			kn_leave_flux(kn);
2010 			kn_list_unlock(knl);
2011 			influx = 1;
2012 		}
2013 
2014 		/* we are returning a copy to the user */
2015 		kevp++;
2016 		nkev++;
2017 		count--;
2018 
2019 		if (nkev == KQ_NEVENTS) {
2020 			influx = 0;
2021 			KQ_UNLOCK_FLUX(kq);
2022 			error = k_ops->k_copyout(k_ops->arg, keva, nkev);
2023 			nkev = 0;
2024 			kevp = keva;
2025 			KQ_LOCK(kq);
2026 			if (error)
2027 				break;
2028 		}
2029 	}
2030 	TAILQ_REMOVE(&kq->kq_head, marker, kn_tqe);
2031 done:
2032 	KQ_OWNED(kq);
2033 	KQ_UNLOCK_FLUX(kq);
2034 	knote_free(marker);
2035 done_nl:
2036 	KQ_NOTOWNED(kq);
2037 	if (nkev != 0)
2038 		error = k_ops->k_copyout(k_ops->arg, keva, nkev);
2039 	td->td_retval[0] = maxevents - count;
2040 	return (error);
2041 }
2042 
2043 /*ARGSUSED*/
2044 static int
2045 kqueue_ioctl(struct file *fp, u_long cmd, void *data,
2046 	struct ucred *active_cred, struct thread *td)
2047 {
2048 	/*
2049 	 * Enabling sigio causes two major problems:
2050 	 * 1) infinite recursion:
2051 	 * Synopsys: kevent is being used to track signals and have FIOASYNC
2052 	 * set.  On receipt of a signal this will cause a kqueue to recurse
2053 	 * into itself over and over.  Sending the sigio causes the kqueue
2054 	 * to become ready, which in turn posts sigio again, forever.
2055 	 * Solution: this can be solved by setting a flag in the kqueue that
2056 	 * we have a SIGIO in progress.
2057 	 * 2) locking problems:
2058 	 * Synopsys: Kqueue is a leaf subsystem, but adding signalling puts
2059 	 * us above the proc and pgrp locks.
2060 	 * Solution: Post a signal using an async mechanism, being sure to
2061 	 * record a generation count in the delivery so that we do not deliver
2062 	 * a signal to the wrong process.
2063 	 *
2064 	 * Note, these two mechanisms are somewhat mutually exclusive!
2065 	 */
2066 #if 0
2067 	struct kqueue *kq;
2068 
2069 	kq = fp->f_data;
2070 	switch (cmd) {
2071 	case FIOASYNC:
2072 		if (*(int *)data) {
2073 			kq->kq_state |= KQ_ASYNC;
2074 		} else {
2075 			kq->kq_state &= ~KQ_ASYNC;
2076 		}
2077 		return (0);
2078 
2079 	case FIOSETOWN:
2080 		return (fsetown(*(int *)data, &kq->kq_sigio));
2081 
2082 	case FIOGETOWN:
2083 		*(int *)data = fgetown(&kq->kq_sigio);
2084 		return (0);
2085 	}
2086 #endif
2087 
2088 	return (ENOTTY);
2089 }
2090 
2091 /*ARGSUSED*/
2092 static int
2093 kqueue_poll(struct file *fp, int events, struct ucred *active_cred,
2094 	struct thread *td)
2095 {
2096 	struct kqueue *kq;
2097 	int revents = 0;
2098 	int error;
2099 
2100 	if ((error = kqueue_acquire(fp, &kq)))
2101 		return POLLERR;
2102 
2103 	KQ_LOCK(kq);
2104 	if (events & (POLLIN | POLLRDNORM)) {
2105 		if (kq->kq_count) {
2106 			revents |= events & (POLLIN | POLLRDNORM);
2107 		} else {
2108 			selrecord(td, &kq->kq_sel);
2109 			if (SEL_WAITING(&kq->kq_sel))
2110 				kq->kq_state |= KQ_SEL;
2111 		}
2112 	}
2113 	kqueue_release(kq, 1);
2114 	KQ_UNLOCK(kq);
2115 	return (revents);
2116 }
2117 
2118 /*ARGSUSED*/
2119 static int
2120 kqueue_stat(struct file *fp, struct stat *st, struct ucred *active_cred,
2121 	struct thread *td)
2122 {
2123 
2124 	bzero((void *)st, sizeof *st);
2125 	/*
2126 	 * We no longer return kq_count because the unlocked value is useless.
2127 	 * If you spent all this time getting the count, why not spend your
2128 	 * syscall better by calling kevent?
2129 	 *
2130 	 * XXX - This is needed for libc_r.
2131 	 */
2132 	st->st_mode = S_IFIFO;
2133 	return (0);
2134 }
2135 
2136 static void
2137 kqueue_drain(struct kqueue *kq, struct thread *td)
2138 {
2139 	struct knote *kn;
2140 	int i;
2141 
2142 	KQ_LOCK(kq);
2143 
2144 	KASSERT((kq->kq_state & KQ_CLOSING) != KQ_CLOSING,
2145 	    ("kqueue already closing"));
2146 	kq->kq_state |= KQ_CLOSING;
2147 	if (kq->kq_refcnt > 1)
2148 		msleep(&kq->kq_refcnt, &kq->kq_lock, PSOCK, "kqclose", 0);
2149 
2150 	KASSERT(kq->kq_refcnt == 1, ("other refs are out there!"));
2151 
2152 	KASSERT(knlist_empty(&kq->kq_sel.si_note),
2153 	    ("kqueue's knlist not empty"));
2154 
2155 	for (i = 0; i < kq->kq_knlistsize; i++) {
2156 		while ((kn = SLIST_FIRST(&kq->kq_knlist[i])) != NULL) {
2157 			if (kn_in_flux(kn)) {
2158 				kq->kq_state |= KQ_FLUXWAIT;
2159 				msleep(kq, &kq->kq_lock, PSOCK, "kqclo1", 0);
2160 				continue;
2161 			}
2162 			kn_enter_flux(kn);
2163 			KQ_UNLOCK(kq);
2164 			knote_drop(kn, td);
2165 			KQ_LOCK(kq);
2166 		}
2167 	}
2168 	if (kq->kq_knhashmask != 0) {
2169 		for (i = 0; i <= kq->kq_knhashmask; i++) {
2170 			while ((kn = SLIST_FIRST(&kq->kq_knhash[i])) != NULL) {
2171 				if (kn_in_flux(kn)) {
2172 					kq->kq_state |= KQ_FLUXWAIT;
2173 					msleep(kq, &kq->kq_lock, PSOCK,
2174 					       "kqclo2", 0);
2175 					continue;
2176 				}
2177 				kn_enter_flux(kn);
2178 				KQ_UNLOCK(kq);
2179 				knote_drop(kn, td);
2180 				KQ_LOCK(kq);
2181 			}
2182 		}
2183 	}
2184 
2185 	if ((kq->kq_state & KQ_TASKSCHED) == KQ_TASKSCHED) {
2186 		kq->kq_state |= KQ_TASKDRAIN;
2187 		msleep(&kq->kq_state, &kq->kq_lock, PSOCK, "kqtqdr", 0);
2188 	}
2189 
2190 	if ((kq->kq_state & KQ_SEL) == KQ_SEL) {
2191 		selwakeuppri(&kq->kq_sel, PSOCK);
2192 		if (!SEL_WAITING(&kq->kq_sel))
2193 			kq->kq_state &= ~KQ_SEL;
2194 	}
2195 
2196 	KQ_UNLOCK(kq);
2197 }
2198 
2199 static void
2200 kqueue_destroy(struct kqueue *kq)
2201 {
2202 
2203 	KASSERT(kq->kq_fdp == NULL,
2204 	    ("kqueue still attached to a file descriptor"));
2205 	seldrain(&kq->kq_sel);
2206 	knlist_destroy(&kq->kq_sel.si_note);
2207 	mtx_destroy(&kq->kq_lock);
2208 
2209 	if (kq->kq_knhash != NULL)
2210 		free(kq->kq_knhash, M_KQUEUE);
2211 	if (kq->kq_knlist != NULL)
2212 		free(kq->kq_knlist, M_KQUEUE);
2213 
2214 	funsetown(&kq->kq_sigio);
2215 }
2216 
2217 /*ARGSUSED*/
2218 static int
2219 kqueue_close(struct file *fp, struct thread *td)
2220 {
2221 	struct kqueue *kq = fp->f_data;
2222 	struct filedesc *fdp;
2223 	int error;
2224 	int filedesc_unlock;
2225 
2226 	if ((error = kqueue_acquire(fp, &kq)))
2227 		return error;
2228 	kqueue_drain(kq, td);
2229 
2230 	/*
2231 	 * We could be called due to the knote_drop() doing fdrop(),
2232 	 * called from kqueue_register().  In this case the global
2233 	 * lock is owned, and filedesc sx is locked before, to not
2234 	 * take the sleepable lock after non-sleepable.
2235 	 */
2236 	fdp = kq->kq_fdp;
2237 	kq->kq_fdp = NULL;
2238 	if (!sx_xlocked(FILEDESC_LOCK(fdp))) {
2239 		FILEDESC_XLOCK(fdp);
2240 		filedesc_unlock = 1;
2241 	} else
2242 		filedesc_unlock = 0;
2243 	TAILQ_REMOVE(&fdp->fd_kqlist, kq, kq_list);
2244 	if (filedesc_unlock)
2245 		FILEDESC_XUNLOCK(fdp);
2246 
2247 	kqueue_destroy(kq);
2248 	chgkqcnt(kq->kq_cred->cr_ruidinfo, -1, 0);
2249 	crfree(kq->kq_cred);
2250 	free(kq, M_KQUEUE);
2251 	fp->f_data = NULL;
2252 
2253 	return (0);
2254 }
2255 
2256 static int
2257 kqueue_fill_kinfo(struct file *fp, struct kinfo_file *kif, struct filedesc *fdp)
2258 {
2259 
2260 	kif->kf_type = KF_TYPE_KQUEUE;
2261 	return (0);
2262 }
2263 
2264 static void
2265 kqueue_wakeup(struct kqueue *kq)
2266 {
2267 	KQ_OWNED(kq);
2268 
2269 	if ((kq->kq_state & KQ_SLEEP) == KQ_SLEEP) {
2270 		kq->kq_state &= ~KQ_SLEEP;
2271 		wakeup(kq);
2272 	}
2273 	if ((kq->kq_state & KQ_SEL) == KQ_SEL) {
2274 		selwakeuppri(&kq->kq_sel, PSOCK);
2275 		if (!SEL_WAITING(&kq->kq_sel))
2276 			kq->kq_state &= ~KQ_SEL;
2277 	}
2278 	if (!knlist_empty(&kq->kq_sel.si_note))
2279 		kqueue_schedtask(kq);
2280 	if ((kq->kq_state & KQ_ASYNC) == KQ_ASYNC) {
2281 		pgsigio(&kq->kq_sigio, SIGIO, 0);
2282 	}
2283 }
2284 
2285 /*
2286  * Walk down a list of knotes, activating them if their event has triggered.
2287  *
2288  * There is a possibility to optimize in the case of one kq watching another.
2289  * Instead of scheduling a task to wake it up, you could pass enough state
2290  * down the chain to make up the parent kqueue.  Make this code functional
2291  * first.
2292  */
2293 void
2294 knote(struct knlist *list, long hint, int lockflags)
2295 {
2296 	struct kqueue *kq;
2297 	struct knote *kn, *tkn;
2298 	int error;
2299 
2300 	if (list == NULL)
2301 		return;
2302 
2303 	KNL_ASSERT_LOCK(list, lockflags & KNF_LISTLOCKED);
2304 
2305 	if ((lockflags & KNF_LISTLOCKED) == 0)
2306 		list->kl_lock(list->kl_lockarg);
2307 
2308 	/*
2309 	 * If we unlock the list lock (and enter influx), we can
2310 	 * eliminate the kqueue scheduling, but this will introduce
2311 	 * four lock/unlock's for each knote to test.  Also, marker
2312 	 * would be needed to keep iteration position, since filters
2313 	 * or other threads could remove events.
2314 	 */
2315 	SLIST_FOREACH_SAFE(kn, &list->kl_list, kn_selnext, tkn) {
2316 		kq = kn->kn_kq;
2317 		KQ_LOCK(kq);
2318 		if (kn_in_flux(kn) && (kn->kn_status & KN_SCAN) == 0) {
2319 			/*
2320 			 * Do not process the influx notes, except for
2321 			 * the influx coming from the kq unlock in the
2322 			 * kqueue_scan().  In the later case, we do
2323 			 * not interfere with the scan, since the code
2324 			 * fragment in kqueue_scan() locks the knlist,
2325 			 * and cannot proceed until we finished.
2326 			 */
2327 			KQ_UNLOCK(kq);
2328 		} else if ((lockflags & KNF_NOKQLOCK) != 0) {
2329 			kn_enter_flux(kn);
2330 			KQ_UNLOCK(kq);
2331 			error = kn->kn_fop->f_event(kn, hint);
2332 			KQ_LOCK(kq);
2333 			kn_leave_flux(kn);
2334 			if (error)
2335 				KNOTE_ACTIVATE(kn, 1);
2336 			KQ_UNLOCK_FLUX(kq);
2337 		} else {
2338 			if (kn->kn_fop->f_event(kn, hint))
2339 				KNOTE_ACTIVATE(kn, 1);
2340 			KQ_UNLOCK(kq);
2341 		}
2342 	}
2343 	if ((lockflags & KNF_LISTLOCKED) == 0)
2344 		list->kl_unlock(list->kl_lockarg);
2345 }
2346 
2347 /*
2348  * add a knote to a knlist
2349  */
2350 void
2351 knlist_add(struct knlist *knl, struct knote *kn, int islocked)
2352 {
2353 
2354 	KNL_ASSERT_LOCK(knl, islocked);
2355 	KQ_NOTOWNED(kn->kn_kq);
2356 	KASSERT(kn_in_flux(kn), ("knote %p not in flux", kn));
2357 	KASSERT((kn->kn_status & KN_DETACHED) != 0,
2358 	    ("knote %p was not detached", kn));
2359 	if (!islocked)
2360 		knl->kl_lock(knl->kl_lockarg);
2361 	SLIST_INSERT_HEAD(&knl->kl_list, kn, kn_selnext);
2362 	if (!islocked)
2363 		knl->kl_unlock(knl->kl_lockarg);
2364 	KQ_LOCK(kn->kn_kq);
2365 	kn->kn_knlist = knl;
2366 	kn->kn_status &= ~KN_DETACHED;
2367 	KQ_UNLOCK(kn->kn_kq);
2368 }
2369 
2370 static void
2371 knlist_remove_kq(struct knlist *knl, struct knote *kn, int knlislocked,
2372     int kqislocked)
2373 {
2374 
2375 	KASSERT(!kqislocked || knlislocked, ("kq locked w/o knl locked"));
2376 	KNL_ASSERT_LOCK(knl, knlislocked);
2377 	mtx_assert(&kn->kn_kq->kq_lock, kqislocked ? MA_OWNED : MA_NOTOWNED);
2378 	KASSERT(kqislocked || kn_in_flux(kn), ("knote %p not in flux", kn));
2379 	KASSERT((kn->kn_status & KN_DETACHED) == 0,
2380 	    ("knote %p was already detached", kn));
2381 	if (!knlislocked)
2382 		knl->kl_lock(knl->kl_lockarg);
2383 	SLIST_REMOVE(&knl->kl_list, kn, knote, kn_selnext);
2384 	kn->kn_knlist = NULL;
2385 	if (!knlislocked)
2386 		kn_list_unlock(knl);
2387 	if (!kqislocked)
2388 		KQ_LOCK(kn->kn_kq);
2389 	kn->kn_status |= KN_DETACHED;
2390 	if (!kqislocked)
2391 		KQ_UNLOCK(kn->kn_kq);
2392 }
2393 
2394 /*
2395  * remove knote from the specified knlist
2396  */
2397 void
2398 knlist_remove(struct knlist *knl, struct knote *kn, int islocked)
2399 {
2400 
2401 	knlist_remove_kq(knl, kn, islocked, 0);
2402 }
2403 
2404 int
2405 knlist_empty(struct knlist *knl)
2406 {
2407 
2408 	KNL_ASSERT_LOCKED(knl);
2409 	return (SLIST_EMPTY(&knl->kl_list));
2410 }
2411 
2412 static struct mtx knlist_lock;
2413 MTX_SYSINIT(knlist_lock, &knlist_lock, "knlist lock for lockless objects",
2414     MTX_DEF);
2415 static void knlist_mtx_lock(void *arg);
2416 static void knlist_mtx_unlock(void *arg);
2417 
2418 static void
2419 knlist_mtx_lock(void *arg)
2420 {
2421 
2422 	mtx_lock((struct mtx *)arg);
2423 }
2424 
2425 static void
2426 knlist_mtx_unlock(void *arg)
2427 {
2428 
2429 	mtx_unlock((struct mtx *)arg);
2430 }
2431 
2432 static void
2433 knlist_mtx_assert_lock(void *arg, int what)
2434 {
2435 
2436 	if (what == LA_LOCKED)
2437 		mtx_assert((struct mtx *)arg, MA_OWNED);
2438 	else
2439 		mtx_assert((struct mtx *)arg, MA_NOTOWNED);
2440 }
2441 
2442 static void
2443 knlist_rw_rlock(void *arg)
2444 {
2445 
2446 	rw_rlock((struct rwlock *)arg);
2447 }
2448 
2449 static void
2450 knlist_rw_runlock(void *arg)
2451 {
2452 
2453 	rw_runlock((struct rwlock *)arg);
2454 }
2455 
2456 static void
2457 knlist_rw_assert_lock(void *arg, int what)
2458 {
2459 
2460 	if (what == LA_LOCKED)
2461 		rw_assert((struct rwlock *)arg, RA_LOCKED);
2462 	else
2463 		rw_assert((struct rwlock *)arg, RA_UNLOCKED);
2464 }
2465 
2466 void
2467 knlist_init(struct knlist *knl, void *lock, void (*kl_lock)(void *),
2468     void (*kl_unlock)(void *),
2469     void (*kl_assert_lock)(void *, int))
2470 {
2471 
2472 	if (lock == NULL)
2473 		knl->kl_lockarg = &knlist_lock;
2474 	else
2475 		knl->kl_lockarg = lock;
2476 
2477 	if (kl_lock == NULL)
2478 		knl->kl_lock = knlist_mtx_lock;
2479 	else
2480 		knl->kl_lock = kl_lock;
2481 	if (kl_unlock == NULL)
2482 		knl->kl_unlock = knlist_mtx_unlock;
2483 	else
2484 		knl->kl_unlock = kl_unlock;
2485 	if (kl_assert_lock == NULL)
2486 		knl->kl_assert_lock = knlist_mtx_assert_lock;
2487 	else
2488 		knl->kl_assert_lock = kl_assert_lock;
2489 
2490 	knl->kl_autodestroy = 0;
2491 	SLIST_INIT(&knl->kl_list);
2492 }
2493 
2494 void
2495 knlist_init_mtx(struct knlist *knl, struct mtx *lock)
2496 {
2497 
2498 	knlist_init(knl, lock, NULL, NULL, NULL);
2499 }
2500 
2501 struct knlist *
2502 knlist_alloc(struct mtx *lock)
2503 {
2504 	struct knlist *knl;
2505 
2506 	knl = malloc(sizeof(struct knlist), M_KQUEUE, M_WAITOK);
2507 	knlist_init_mtx(knl, lock);
2508 	return (knl);
2509 }
2510 
2511 void
2512 knlist_init_rw_reader(struct knlist *knl, struct rwlock *lock)
2513 {
2514 
2515 	knlist_init(knl, lock, knlist_rw_rlock, knlist_rw_runlock,
2516 	    knlist_rw_assert_lock);
2517 }
2518 
2519 void
2520 knlist_destroy(struct knlist *knl)
2521 {
2522 
2523 	KASSERT(KNLIST_EMPTY(knl),
2524 	    ("destroying knlist %p with knotes on it", knl));
2525 }
2526 
2527 void
2528 knlist_detach(struct knlist *knl)
2529 {
2530 
2531 	KNL_ASSERT_LOCKED(knl);
2532 	knl->kl_autodestroy = 1;
2533 	if (knlist_empty(knl)) {
2534 		knlist_destroy(knl);
2535 		free(knl, M_KQUEUE);
2536 	}
2537 }
2538 
2539 /*
2540  * Even if we are locked, we may need to drop the lock to allow any influx
2541  * knotes time to "settle".
2542  */
2543 void
2544 knlist_cleardel(struct knlist *knl, struct thread *td, int islocked, int killkn)
2545 {
2546 	struct knote *kn, *kn2;
2547 	struct kqueue *kq;
2548 
2549 	KASSERT(!knl->kl_autodestroy, ("cleardel for autodestroy %p", knl));
2550 	if (islocked)
2551 		KNL_ASSERT_LOCKED(knl);
2552 	else {
2553 		KNL_ASSERT_UNLOCKED(knl);
2554 again:		/* need to reacquire lock since we have dropped it */
2555 		knl->kl_lock(knl->kl_lockarg);
2556 	}
2557 
2558 	SLIST_FOREACH_SAFE(kn, &knl->kl_list, kn_selnext, kn2) {
2559 		kq = kn->kn_kq;
2560 		KQ_LOCK(kq);
2561 		if (kn_in_flux(kn)) {
2562 			KQ_UNLOCK(kq);
2563 			continue;
2564 		}
2565 		knlist_remove_kq(knl, kn, 1, 1);
2566 		if (killkn) {
2567 			kn_enter_flux(kn);
2568 			KQ_UNLOCK(kq);
2569 			knote_drop_detached(kn, td);
2570 		} else {
2571 			/* Make sure cleared knotes disappear soon */
2572 			kn->kn_flags |= EV_EOF | EV_ONESHOT;
2573 			KQ_UNLOCK(kq);
2574 		}
2575 		kq = NULL;
2576 	}
2577 
2578 	if (!SLIST_EMPTY(&knl->kl_list)) {
2579 		/* there are still in flux knotes remaining */
2580 		kn = SLIST_FIRST(&knl->kl_list);
2581 		kq = kn->kn_kq;
2582 		KQ_LOCK(kq);
2583 		KASSERT(kn_in_flux(kn), ("knote removed w/o list lock"));
2584 		knl->kl_unlock(knl->kl_lockarg);
2585 		kq->kq_state |= KQ_FLUXWAIT;
2586 		msleep(kq, &kq->kq_lock, PSOCK | PDROP, "kqkclr", 0);
2587 		kq = NULL;
2588 		goto again;
2589 	}
2590 
2591 	if (islocked)
2592 		KNL_ASSERT_LOCKED(knl);
2593 	else {
2594 		knl->kl_unlock(knl->kl_lockarg);
2595 		KNL_ASSERT_UNLOCKED(knl);
2596 	}
2597 }
2598 
2599 /*
2600  * Remove all knotes referencing a specified fd must be called with FILEDESC
2601  * lock.  This prevents a race where a new fd comes along and occupies the
2602  * entry and we attach a knote to the fd.
2603  */
2604 void
2605 knote_fdclose(struct thread *td, int fd)
2606 {
2607 	struct filedesc *fdp = td->td_proc->p_fd;
2608 	struct kqueue *kq;
2609 	struct knote *kn;
2610 	int influx;
2611 
2612 	FILEDESC_XLOCK_ASSERT(fdp);
2613 
2614 	/*
2615 	 * We shouldn't have to worry about new kevents appearing on fd
2616 	 * since filedesc is locked.
2617 	 */
2618 	TAILQ_FOREACH(kq, &fdp->fd_kqlist, kq_list) {
2619 		KQ_LOCK(kq);
2620 
2621 again:
2622 		influx = 0;
2623 		while (kq->kq_knlistsize > fd &&
2624 		    (kn = SLIST_FIRST(&kq->kq_knlist[fd])) != NULL) {
2625 			if (kn_in_flux(kn)) {
2626 				/* someone else might be waiting on our knote */
2627 				if (influx)
2628 					wakeup(kq);
2629 				kq->kq_state |= KQ_FLUXWAIT;
2630 				msleep(kq, &kq->kq_lock, PSOCK, "kqflxwt", 0);
2631 				goto again;
2632 			}
2633 			kn_enter_flux(kn);
2634 			KQ_UNLOCK(kq);
2635 			influx = 1;
2636 			knote_drop(kn, td);
2637 			KQ_LOCK(kq);
2638 		}
2639 		KQ_UNLOCK_FLUX(kq);
2640 	}
2641 }
2642 
2643 static int
2644 knote_attach(struct knote *kn, struct kqueue *kq)
2645 {
2646 	struct klist *list;
2647 
2648 	KASSERT(kn_in_flux(kn), ("knote %p not marked influx", kn));
2649 	KQ_OWNED(kq);
2650 
2651 	if ((kq->kq_state & KQ_CLOSING) != 0)
2652 		return (EBADF);
2653 	if (kn->kn_fop->f_isfd) {
2654 		if (kn->kn_id >= kq->kq_knlistsize)
2655 			return (ENOMEM);
2656 		list = &kq->kq_knlist[kn->kn_id];
2657 	} else {
2658 		if (kq->kq_knhash == NULL)
2659 			return (ENOMEM);
2660 		list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)];
2661 	}
2662 	SLIST_INSERT_HEAD(list, kn, kn_link);
2663 	return (0);
2664 }
2665 
2666 static void
2667 knote_drop(struct knote *kn, struct thread *td)
2668 {
2669 
2670 	if ((kn->kn_status & KN_DETACHED) == 0)
2671 		kn->kn_fop->f_detach(kn);
2672 	knote_drop_detached(kn, td);
2673 }
2674 
2675 static void
2676 knote_drop_detached(struct knote *kn, struct thread *td)
2677 {
2678 	struct kqueue *kq;
2679 	struct klist *list;
2680 
2681 	kq = kn->kn_kq;
2682 
2683 	KASSERT((kn->kn_status & KN_DETACHED) != 0,
2684 	    ("knote %p still attached", kn));
2685 	KQ_NOTOWNED(kq);
2686 
2687 	KQ_LOCK(kq);
2688 	KASSERT(kn->kn_influx == 1,
2689 	    ("knote_drop called on %p with influx %d", kn, kn->kn_influx));
2690 
2691 	if (kn->kn_fop->f_isfd)
2692 		list = &kq->kq_knlist[kn->kn_id];
2693 	else
2694 		list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)];
2695 
2696 	if (!SLIST_EMPTY(list))
2697 		SLIST_REMOVE(list, kn, knote, kn_link);
2698 	if (kn->kn_status & KN_QUEUED)
2699 		knote_dequeue(kn);
2700 	KQ_UNLOCK_FLUX(kq);
2701 
2702 	if (kn->kn_fop->f_isfd) {
2703 		fdrop(kn->kn_fp, td);
2704 		kn->kn_fp = NULL;
2705 	}
2706 	kqueue_fo_release(kn->kn_kevent.filter);
2707 	kn->kn_fop = NULL;
2708 	knote_free(kn);
2709 }
2710 
2711 static void
2712 knote_enqueue(struct knote *kn)
2713 {
2714 	struct kqueue *kq = kn->kn_kq;
2715 
2716 	KQ_OWNED(kn->kn_kq);
2717 	KASSERT((kn->kn_status & KN_QUEUED) == 0, ("knote already queued"));
2718 
2719 	TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe);
2720 	kn->kn_status |= KN_QUEUED;
2721 	kq->kq_count++;
2722 	kqueue_wakeup(kq);
2723 }
2724 
2725 static void
2726 knote_dequeue(struct knote *kn)
2727 {
2728 	struct kqueue *kq = kn->kn_kq;
2729 
2730 	KQ_OWNED(kn->kn_kq);
2731 	KASSERT(kn->kn_status & KN_QUEUED, ("knote not queued"));
2732 
2733 	TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe);
2734 	kn->kn_status &= ~KN_QUEUED;
2735 	kq->kq_count--;
2736 }
2737 
2738 static void
2739 knote_init(void)
2740 {
2741 
2742 	knote_zone = uma_zcreate("KNOTE", sizeof(struct knote), NULL, NULL,
2743 	    NULL, NULL, UMA_ALIGN_PTR, 0);
2744 }
2745 SYSINIT(knote, SI_SUB_PSEUDO, SI_ORDER_ANY, knote_init, NULL);
2746 
2747 static struct knote *
2748 knote_alloc(int mflag)
2749 {
2750 
2751 	return (uma_zalloc(knote_zone, mflag | M_ZERO));
2752 }
2753 
2754 static void
2755 knote_free(struct knote *kn)
2756 {
2757 
2758 	uma_zfree(knote_zone, kn);
2759 }
2760 
2761 /*
2762  * Register the kev w/ the kq specified by fd.
2763  */
2764 int
2765 kqfd_register(int fd, struct kevent *kev, struct thread *td, int mflag)
2766 {
2767 	struct kqueue *kq;
2768 	struct file *fp;
2769 	cap_rights_t rights;
2770 	int error;
2771 
2772 	error = fget(td, fd, cap_rights_init_one(&rights, CAP_KQUEUE_CHANGE),
2773 	    &fp);
2774 	if (error != 0)
2775 		return (error);
2776 	if ((error = kqueue_acquire(fp, &kq)) != 0)
2777 		goto noacquire;
2778 
2779 	error = kqueue_register(kq, kev, td, mflag);
2780 	kqueue_release(kq, 0);
2781 
2782 noacquire:
2783 	fdrop(fp, td);
2784 	return (error);
2785 }
2786