xref: /freebsd/sys/kern/kern_event.c (revision cc349066556bcdeed0d6cc72aad340d0f383e35c)
1 /*-
2  * Copyright (c) 1999,2000,2001 Jonathan Lemon <jlemon@FreeBSD.org>
3  * Copyright 2004 John-Mark Gurney <jmg@FreeBSD.org>
4  * Copyright (c) 2009 Apple, Inc.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31 
32 #include "opt_ktrace.h"
33 #include "opt_kqueue.h"
34 
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/capsicum.h>
38 #include <sys/kernel.h>
39 #include <sys/lock.h>
40 #include <sys/mutex.h>
41 #include <sys/rwlock.h>
42 #include <sys/proc.h>
43 #include <sys/malloc.h>
44 #include <sys/unistd.h>
45 #include <sys/file.h>
46 #include <sys/filedesc.h>
47 #include <sys/filio.h>
48 #include <sys/fcntl.h>
49 #include <sys/kthread.h>
50 #include <sys/selinfo.h>
51 #include <sys/queue.h>
52 #include <sys/event.h>
53 #include <sys/eventvar.h>
54 #include <sys/poll.h>
55 #include <sys/protosw.h>
56 #include <sys/resourcevar.h>
57 #include <sys/sigio.h>
58 #include <sys/signalvar.h>
59 #include <sys/socket.h>
60 #include <sys/socketvar.h>
61 #include <sys/stat.h>
62 #include <sys/sysctl.h>
63 #include <sys/sysproto.h>
64 #include <sys/syscallsubr.h>
65 #include <sys/taskqueue.h>
66 #include <sys/uio.h>
67 #include <sys/user.h>
68 #ifdef KTRACE
69 #include <sys/ktrace.h>
70 #endif
71 #include <machine/atomic.h>
72 
73 #include <vm/uma.h>
74 
75 static MALLOC_DEFINE(M_KQUEUE, "kqueue", "memory for kqueue system");
76 
77 /*
78  * This lock is used if multiple kq locks are required.  This possibly
79  * should be made into a per proc lock.
80  */
81 static struct mtx	kq_global;
82 MTX_SYSINIT(kq_global, &kq_global, "kqueue order", MTX_DEF);
83 #define KQ_GLOBAL_LOCK(lck, haslck)	do {	\
84 	if (!haslck)				\
85 		mtx_lock(lck);			\
86 	haslck = 1;				\
87 } while (0)
88 #define KQ_GLOBAL_UNLOCK(lck, haslck)	do {	\
89 	if (haslck)				\
90 		mtx_unlock(lck);			\
91 	haslck = 0;				\
92 } while (0)
93 
94 TASKQUEUE_DEFINE_THREAD(kqueue_ctx);
95 
96 static int	kevent_copyout(void *arg, struct kevent *kevp, int count);
97 static int	kevent_copyin(void *arg, struct kevent *kevp, int count);
98 static int	kqueue_register(struct kqueue *kq, struct kevent *kev,
99 		    struct thread *td, int waitok);
100 static int	kqueue_acquire(struct file *fp, struct kqueue **kqp);
101 static void	kqueue_release(struct kqueue *kq, int locked);
102 static void	kqueue_destroy(struct kqueue *kq);
103 static void	kqueue_drain(struct kqueue *kq, struct thread *td);
104 static int	kqueue_expand(struct kqueue *kq, struct filterops *fops,
105 		    uintptr_t ident, int waitok);
106 static void	kqueue_task(void *arg, int pending);
107 static int	kqueue_scan(struct kqueue *kq, int maxevents,
108 		    struct kevent_copyops *k_ops,
109 		    const struct timespec *timeout,
110 		    struct kevent *keva, struct thread *td);
111 static void 	kqueue_wakeup(struct kqueue *kq);
112 static struct filterops *kqueue_fo_find(int filt);
113 static void	kqueue_fo_release(int filt);
114 
115 static fo_ioctl_t	kqueue_ioctl;
116 static fo_poll_t	kqueue_poll;
117 static fo_kqfilter_t	kqueue_kqfilter;
118 static fo_stat_t	kqueue_stat;
119 static fo_close_t	kqueue_close;
120 static fo_fill_kinfo_t	kqueue_fill_kinfo;
121 
122 static struct fileops kqueueops = {
123 	.fo_read = invfo_rdwr,
124 	.fo_write = invfo_rdwr,
125 	.fo_truncate = invfo_truncate,
126 	.fo_ioctl = kqueue_ioctl,
127 	.fo_poll = kqueue_poll,
128 	.fo_kqfilter = kqueue_kqfilter,
129 	.fo_stat = kqueue_stat,
130 	.fo_close = kqueue_close,
131 	.fo_chmod = invfo_chmod,
132 	.fo_chown = invfo_chown,
133 	.fo_sendfile = invfo_sendfile,
134 	.fo_fill_kinfo = kqueue_fill_kinfo,
135 };
136 
137 static int 	knote_attach(struct knote *kn, struct kqueue *kq);
138 static void 	knote_drop(struct knote *kn, struct thread *td);
139 static void 	knote_drop_detached(struct knote *kn, struct thread *td);
140 static void 	knote_enqueue(struct knote *kn);
141 static void 	knote_dequeue(struct knote *kn);
142 static void 	knote_init(void);
143 static struct 	knote *knote_alloc(int waitok);
144 static void 	knote_free(struct knote *kn);
145 
146 static void	filt_kqdetach(struct knote *kn);
147 static int	filt_kqueue(struct knote *kn, long hint);
148 static int	filt_procattach(struct knote *kn);
149 static void	filt_procdetach(struct knote *kn);
150 static int	filt_proc(struct knote *kn, long hint);
151 static int	filt_fileattach(struct knote *kn);
152 static void	filt_timerexpire(void *knx);
153 static int	filt_timerattach(struct knote *kn);
154 static void	filt_timerdetach(struct knote *kn);
155 static int	filt_timer(struct knote *kn, long hint);
156 static int	filt_userattach(struct knote *kn);
157 static void	filt_userdetach(struct knote *kn);
158 static int	filt_user(struct knote *kn, long hint);
159 static void	filt_usertouch(struct knote *kn, struct kevent *kev,
160 		    u_long type);
161 
162 static struct filterops file_filtops = {
163 	.f_isfd = 1,
164 	.f_attach = filt_fileattach,
165 };
166 static struct filterops kqread_filtops = {
167 	.f_isfd = 1,
168 	.f_detach = filt_kqdetach,
169 	.f_event = filt_kqueue,
170 };
171 /* XXX - move to kern_proc.c?  */
172 static struct filterops proc_filtops = {
173 	.f_isfd = 0,
174 	.f_attach = filt_procattach,
175 	.f_detach = filt_procdetach,
176 	.f_event = filt_proc,
177 };
178 static struct filterops timer_filtops = {
179 	.f_isfd = 0,
180 	.f_attach = filt_timerattach,
181 	.f_detach = filt_timerdetach,
182 	.f_event = filt_timer,
183 };
184 static struct filterops user_filtops = {
185 	.f_attach = filt_userattach,
186 	.f_detach = filt_userdetach,
187 	.f_event = filt_user,
188 	.f_touch = filt_usertouch,
189 };
190 
191 static uma_zone_t	knote_zone;
192 static unsigned int	kq_ncallouts = 0;
193 static unsigned int 	kq_calloutmax = 4 * 1024;
194 SYSCTL_UINT(_kern, OID_AUTO, kq_calloutmax, CTLFLAG_RW,
195     &kq_calloutmax, 0, "Maximum number of callouts allocated for kqueue");
196 
197 /* XXX - ensure not influx ? */
198 #define KNOTE_ACTIVATE(kn, islock) do { 				\
199 	if ((islock))							\
200 		mtx_assert(&(kn)->kn_kq->kq_lock, MA_OWNED);		\
201 	else								\
202 		KQ_LOCK((kn)->kn_kq);					\
203 	(kn)->kn_status |= KN_ACTIVE;					\
204 	if (((kn)->kn_status & (KN_QUEUED | KN_DISABLED)) == 0)		\
205 		knote_enqueue((kn));					\
206 	if (!(islock))							\
207 		KQ_UNLOCK((kn)->kn_kq);					\
208 } while(0)
209 #define KQ_LOCK(kq) do {						\
210 	mtx_lock(&(kq)->kq_lock);					\
211 } while (0)
212 #define KQ_FLUX_WAKEUP(kq) do {						\
213 	if (((kq)->kq_state & KQ_FLUXWAIT) == KQ_FLUXWAIT) {		\
214 		(kq)->kq_state &= ~KQ_FLUXWAIT;				\
215 		wakeup((kq));						\
216 	}								\
217 } while (0)
218 #define KQ_UNLOCK_FLUX(kq) do {						\
219 	KQ_FLUX_WAKEUP(kq);						\
220 	mtx_unlock(&(kq)->kq_lock);					\
221 } while (0)
222 #define KQ_UNLOCK(kq) do {						\
223 	mtx_unlock(&(kq)->kq_lock);					\
224 } while (0)
225 #define KQ_OWNED(kq) do {						\
226 	mtx_assert(&(kq)->kq_lock, MA_OWNED);				\
227 } while (0)
228 #define KQ_NOTOWNED(kq) do {						\
229 	mtx_assert(&(kq)->kq_lock, MA_NOTOWNED);			\
230 } while (0)
231 
232 static struct knlist *
233 kn_list_lock(struct knote *kn)
234 {
235 	struct knlist *knl;
236 
237 	knl = kn->kn_knlist;
238 	if (knl != NULL)
239 		knl->kl_lock(knl->kl_lockarg);
240 	return (knl);
241 }
242 
243 static void
244 kn_list_unlock(struct knlist *knl)
245 {
246 	bool do_free;
247 
248 	if (knl == NULL)
249 		return;
250 	do_free = knl->kl_autodestroy && knlist_empty(knl);
251 	knl->kl_unlock(knl->kl_lockarg);
252 	if (do_free) {
253 		knlist_destroy(knl);
254 		free(knl, M_KQUEUE);
255 	}
256 }
257 
258 static bool
259 kn_in_flux(struct knote *kn)
260 {
261 
262 	return (kn->kn_influx > 0);
263 }
264 
265 static void
266 kn_enter_flux(struct knote *kn)
267 {
268 
269 	KQ_OWNED(kn->kn_kq);
270 	MPASS(kn->kn_influx < INT_MAX);
271 	kn->kn_influx++;
272 }
273 
274 static bool
275 kn_leave_flux(struct knote *kn)
276 {
277 
278 	KQ_OWNED(kn->kn_kq);
279 	MPASS(kn->kn_influx > 0);
280 	kn->kn_influx--;
281 	return (kn->kn_influx == 0);
282 }
283 
284 #define	KNL_ASSERT_LOCK(knl, islocked) do {				\
285 	if (islocked)							\
286 		KNL_ASSERT_LOCKED(knl);				\
287 	else								\
288 		KNL_ASSERT_UNLOCKED(knl);				\
289 } while (0)
290 #ifdef INVARIANTS
291 #define	KNL_ASSERT_LOCKED(knl) do {					\
292 	knl->kl_assert_locked((knl)->kl_lockarg);			\
293 } while (0)
294 #define	KNL_ASSERT_UNLOCKED(knl) do {					\
295 	knl->kl_assert_unlocked((knl)->kl_lockarg);			\
296 } while (0)
297 #else /* !INVARIANTS */
298 #define	KNL_ASSERT_LOCKED(knl) do {} while(0)
299 #define	KNL_ASSERT_UNLOCKED(knl) do {} while (0)
300 #endif /* INVARIANTS */
301 
302 #ifndef	KN_HASHSIZE
303 #define	KN_HASHSIZE		64		/* XXX should be tunable */
304 #endif
305 
306 #define KN_HASH(val, mask)	(((val) ^ (val >> 8)) & (mask))
307 
308 static int
309 filt_nullattach(struct knote *kn)
310 {
311 
312 	return (ENXIO);
313 };
314 
315 struct filterops null_filtops = {
316 	.f_isfd = 0,
317 	.f_attach = filt_nullattach,
318 };
319 
320 /* XXX - make SYSINIT to add these, and move into respective modules. */
321 extern struct filterops sig_filtops;
322 extern struct filterops fs_filtops;
323 
324 /*
325  * Table for for all system-defined filters.
326  */
327 static struct mtx	filterops_lock;
328 MTX_SYSINIT(kqueue_filterops, &filterops_lock, "protect sysfilt_ops",
329 	MTX_DEF);
330 static struct {
331 	struct filterops *for_fop;
332 	int for_nolock;
333 	int for_refcnt;
334 } sysfilt_ops[EVFILT_SYSCOUNT] = {
335 	{ &file_filtops, 1 },			/* EVFILT_READ */
336 	{ &file_filtops, 1 },			/* EVFILT_WRITE */
337 	{ &null_filtops },			/* EVFILT_AIO */
338 	{ &file_filtops, 1 },			/* EVFILT_VNODE */
339 	{ &proc_filtops, 1 },			/* EVFILT_PROC */
340 	{ &sig_filtops, 1 },			/* EVFILT_SIGNAL */
341 	{ &timer_filtops, 1 },			/* EVFILT_TIMER */
342 	{ &file_filtops, 1 },			/* EVFILT_PROCDESC */
343 	{ &fs_filtops, 1 },			/* EVFILT_FS */
344 	{ &null_filtops },			/* EVFILT_LIO */
345 	{ &user_filtops, 1 },			/* EVFILT_USER */
346 	{ &null_filtops },			/* EVFILT_SENDFILE */
347 };
348 
349 /*
350  * Simple redirection for all cdevsw style objects to call their fo_kqfilter
351  * method.
352  */
353 static int
354 filt_fileattach(struct knote *kn)
355 {
356 
357 	return (fo_kqfilter(kn->kn_fp, kn));
358 }
359 
360 /*ARGSUSED*/
361 static int
362 kqueue_kqfilter(struct file *fp, struct knote *kn)
363 {
364 	struct kqueue *kq = kn->kn_fp->f_data;
365 
366 	if (kn->kn_filter != EVFILT_READ)
367 		return (EINVAL);
368 
369 	kn->kn_status |= KN_KQUEUE;
370 	kn->kn_fop = &kqread_filtops;
371 	knlist_add(&kq->kq_sel.si_note, kn, 0);
372 
373 	return (0);
374 }
375 
376 static void
377 filt_kqdetach(struct knote *kn)
378 {
379 	struct kqueue *kq = kn->kn_fp->f_data;
380 
381 	knlist_remove(&kq->kq_sel.si_note, kn, 0);
382 }
383 
384 /*ARGSUSED*/
385 static int
386 filt_kqueue(struct knote *kn, long hint)
387 {
388 	struct kqueue *kq = kn->kn_fp->f_data;
389 
390 	kn->kn_data = kq->kq_count;
391 	return (kn->kn_data > 0);
392 }
393 
394 /* XXX - move to kern_proc.c?  */
395 static int
396 filt_procattach(struct knote *kn)
397 {
398 	struct proc *p;
399 	int error;
400 	bool exiting, immediate;
401 
402 	exiting = immediate = false;
403 	p = pfind(kn->kn_id);
404 	if (p == NULL && (kn->kn_sfflags & NOTE_EXIT)) {
405 		p = zpfind(kn->kn_id);
406 		exiting = true;
407 	} else if (p != NULL && (p->p_flag & P_WEXIT)) {
408 		exiting = true;
409 	}
410 
411 	if (p == NULL)
412 		return (ESRCH);
413 	if ((error = p_cansee(curthread, p))) {
414 		PROC_UNLOCK(p);
415 		return (error);
416 	}
417 
418 	kn->kn_ptr.p_proc = p;
419 	kn->kn_flags |= EV_CLEAR;		/* automatically set */
420 
421 	/*
422 	 * Internal flag indicating registration done by kernel for the
423 	 * purposes of getting a NOTE_CHILD notification.
424 	 */
425 	if (kn->kn_flags & EV_FLAG2) {
426 		kn->kn_flags &= ~EV_FLAG2;
427 		kn->kn_data = kn->kn_sdata;		/* ppid */
428 		kn->kn_fflags = NOTE_CHILD;
429 		kn->kn_sfflags &= ~(NOTE_EXIT | NOTE_EXEC | NOTE_FORK);
430 		immediate = true; /* Force immediate activation of child note. */
431 	}
432 	/*
433 	 * Internal flag indicating registration done by kernel (for other than
434 	 * NOTE_CHILD).
435 	 */
436 	if (kn->kn_flags & EV_FLAG1) {
437 		kn->kn_flags &= ~EV_FLAG1;
438 	}
439 
440 	knlist_add(p->p_klist, kn, 1);
441 
442 	/*
443 	 * Immediately activate any child notes or, in the case of a zombie
444 	 * target process, exit notes.  The latter is necessary to handle the
445 	 * case where the target process, e.g. a child, dies before the kevent
446 	 * is registered.
447 	 */
448 	if (immediate || (exiting && filt_proc(kn, NOTE_EXIT)))
449 		KNOTE_ACTIVATE(kn, 0);
450 
451 	PROC_UNLOCK(p);
452 
453 	return (0);
454 }
455 
456 /*
457  * The knote may be attached to a different process, which may exit,
458  * leaving nothing for the knote to be attached to.  So when the process
459  * exits, the knote is marked as DETACHED and also flagged as ONESHOT so
460  * it will be deleted when read out.  However, as part of the knote deletion,
461  * this routine is called, so a check is needed to avoid actually performing
462  * a detach, because the original process does not exist any more.
463  */
464 /* XXX - move to kern_proc.c?  */
465 static void
466 filt_procdetach(struct knote *kn)
467 {
468 
469 	knlist_remove(kn->kn_knlist, kn, 0);
470 	kn->kn_ptr.p_proc = NULL;
471 }
472 
473 /* XXX - move to kern_proc.c?  */
474 static int
475 filt_proc(struct knote *kn, long hint)
476 {
477 	struct proc *p;
478 	u_int event;
479 
480 	p = kn->kn_ptr.p_proc;
481 	if (p == NULL) /* already activated, from attach filter */
482 		return (0);
483 
484 	/* Mask off extra data. */
485 	event = (u_int)hint & NOTE_PCTRLMASK;
486 
487 	/* If the user is interested in this event, record it. */
488 	if (kn->kn_sfflags & event)
489 		kn->kn_fflags |= event;
490 
491 	/* Process is gone, so flag the event as finished. */
492 	if (event == NOTE_EXIT) {
493 		kn->kn_flags |= EV_EOF | EV_ONESHOT;
494 		kn->kn_ptr.p_proc = NULL;
495 		if (kn->kn_fflags & NOTE_EXIT)
496 			kn->kn_data = KW_EXITCODE(p->p_xexit, p->p_xsig);
497 		if (kn->kn_fflags == 0)
498 			kn->kn_flags |= EV_DROP;
499 		return (1);
500 	}
501 
502 	return (kn->kn_fflags != 0);
503 }
504 
505 /*
506  * Called when the process forked. It mostly does the same as the
507  * knote(), activating all knotes registered to be activated when the
508  * process forked. Additionally, for each knote attached to the
509  * parent, check whether user wants to track the new process. If so
510  * attach a new knote to it, and immediately report an event with the
511  * child's pid.
512  */
513 void
514 knote_fork(struct knlist *list, int pid)
515 {
516 	struct kqueue *kq;
517 	struct knote *kn;
518 	struct kevent kev;
519 	int error;
520 
521 	if (list == NULL)
522 		return;
523 	list->kl_lock(list->kl_lockarg);
524 
525 	SLIST_FOREACH(kn, &list->kl_list, kn_selnext) {
526 		kq = kn->kn_kq;
527 		KQ_LOCK(kq);
528 		if (kn_in_flux(kn) && (kn->kn_status & KN_SCAN) == 0) {
529 			KQ_UNLOCK(kq);
530 			continue;
531 		}
532 
533 		/*
534 		 * The same as knote(), activate the event.
535 		 */
536 		if ((kn->kn_sfflags & NOTE_TRACK) == 0) {
537 			kn->kn_status |= KN_HASKQLOCK;
538 			if (kn->kn_fop->f_event(kn, NOTE_FORK))
539 				KNOTE_ACTIVATE(kn, 1);
540 			kn->kn_status &= ~KN_HASKQLOCK;
541 			KQ_UNLOCK(kq);
542 			continue;
543 		}
544 
545 		/*
546 		 * The NOTE_TRACK case. In addition to the activation
547 		 * of the event, we need to register new events to
548 		 * track the child. Drop the locks in preparation for
549 		 * the call to kqueue_register().
550 		 */
551 		kn_enter_flux(kn);
552 		KQ_UNLOCK(kq);
553 		list->kl_unlock(list->kl_lockarg);
554 
555 		/*
556 		 * Activate existing knote and register tracking knotes with
557 		 * new process.
558 		 *
559 		 * First register a knote to get just the child notice. This
560 		 * must be a separate note from a potential NOTE_EXIT
561 		 * notification since both NOTE_CHILD and NOTE_EXIT are defined
562 		 * to use the data field (in conflicting ways).
563 		 */
564 		kev.ident = pid;
565 		kev.filter = kn->kn_filter;
566 		kev.flags = kn->kn_flags | EV_ADD | EV_ENABLE | EV_ONESHOT |
567 		    EV_FLAG2;
568 		kev.fflags = kn->kn_sfflags;
569 		kev.data = kn->kn_id;		/* parent */
570 		kev.udata = kn->kn_kevent.udata;/* preserve udata */
571 		error = kqueue_register(kq, &kev, NULL, 0);
572 		if (error)
573 			kn->kn_fflags |= NOTE_TRACKERR;
574 
575 		/*
576 		 * Then register another knote to track other potential events
577 		 * from the new process.
578 		 */
579 		kev.ident = pid;
580 		kev.filter = kn->kn_filter;
581 		kev.flags = kn->kn_flags | EV_ADD | EV_ENABLE | EV_FLAG1;
582 		kev.fflags = kn->kn_sfflags;
583 		kev.data = kn->kn_id;		/* parent */
584 		kev.udata = kn->kn_kevent.udata;/* preserve udata */
585 		error = kqueue_register(kq, &kev, NULL, 0);
586 		if (error)
587 			kn->kn_fflags |= NOTE_TRACKERR;
588 		if (kn->kn_fop->f_event(kn, NOTE_FORK))
589 			KNOTE_ACTIVATE(kn, 0);
590 		KQ_LOCK(kq);
591 		kn_leave_flux(kn);
592 		KQ_UNLOCK_FLUX(kq);
593 		list->kl_lock(list->kl_lockarg);
594 	}
595 	list->kl_unlock(list->kl_lockarg);
596 }
597 
598 /*
599  * XXX: EVFILT_TIMER should perhaps live in kern_time.c beside the
600  * interval timer support code.
601  */
602 
603 #define NOTE_TIMER_PRECMASK	(NOTE_SECONDS|NOTE_MSECONDS|NOTE_USECONDS| \
604 				NOTE_NSECONDS)
605 
606 static sbintime_t
607 timer2sbintime(intptr_t data, int flags)
608 {
609 
610         /*
611          * Macros for converting to the fractional second portion of an
612          * sbintime_t using 64bit multiplication to improve precision.
613          */
614 #define NS_TO_SBT(ns) (((ns) * (((uint64_t)1 << 63) / 500000000)) >> 32)
615 #define US_TO_SBT(us) (((us) * (((uint64_t)1 << 63) / 500000)) >> 32)
616 #define MS_TO_SBT(ms) (((ms) * (((uint64_t)1 << 63) / 500)) >> 32)
617 	switch (flags & NOTE_TIMER_PRECMASK) {
618 	case NOTE_SECONDS:
619 #ifdef __LP64__
620 		if (data > (SBT_MAX / SBT_1S))
621 			return (SBT_MAX);
622 #endif
623 		return ((sbintime_t)data << 32);
624 	case NOTE_MSECONDS: /* FALLTHROUGH */
625 	case 0:
626 		if (data >= 1000) {
627 			int64_t secs = data / 1000;
628 #ifdef __LP64__
629 			if (secs > (SBT_MAX / SBT_1S))
630 				return (SBT_MAX);
631 #endif
632 			return (secs << 32 | MS_TO_SBT(data % 1000));
633 		}
634 		return MS_TO_SBT(data);
635 	case NOTE_USECONDS:
636 		if (data >= 1000000) {
637 			int64_t secs = data / 1000000;
638 #ifdef __LP64__
639 			if (secs > (SBT_MAX / SBT_1S))
640 				return (SBT_MAX);
641 #endif
642 			return (secs << 32 | US_TO_SBT(data % 1000000));
643 		}
644 		return US_TO_SBT(data);
645 	case NOTE_NSECONDS:
646 		if (data >= 1000000000) {
647 			int64_t secs = data / 1000000000;
648 #ifdef __LP64__
649 			if (secs > (SBT_MAX / SBT_1S))
650 				return (SBT_MAX);
651 #endif
652 			return (secs << 32 | US_TO_SBT(data % 1000000000));
653 		}
654 		return (NS_TO_SBT(data));
655 	default:
656 		break;
657 	}
658 	return (-1);
659 }
660 
661 struct kq_timer_cb_data {
662 	struct callout c;
663 	sbintime_t next;	/* next timer event fires at */
664 	sbintime_t to;		/* precalculated timer period */
665 };
666 
667 static void
668 filt_timerexpire(void *knx)
669 {
670 	struct knote *kn;
671 	struct kq_timer_cb_data *kc;
672 
673 	kn = knx;
674 	kn->kn_data++;
675 	KNOTE_ACTIVATE(kn, 0);	/* XXX - handle locking */
676 
677 	if ((kn->kn_flags & EV_ONESHOT) != 0)
678 		return;
679 
680 	kc = kn->kn_ptr.p_v;
681 	kc->next += kc->to;
682 	callout_reset_sbt_on(&kc->c, kc->next, 0, filt_timerexpire, kn,
683 	    PCPU_GET(cpuid), C_ABSOLUTE);
684 }
685 
686 /*
687  * data contains amount of time to sleep
688  */
689 static int
690 filt_timerattach(struct knote *kn)
691 {
692 	struct kq_timer_cb_data *kc;
693 	sbintime_t to;
694 	unsigned int ncallouts;
695 
696 	if (kn->kn_sdata < 0)
697 		return (EINVAL);
698 	if (kn->kn_sdata == 0 && (kn->kn_flags & EV_ONESHOT) == 0)
699 		kn->kn_sdata = 1;
700 	/* Only precision unit are supported in flags so far */
701 	if ((kn->kn_sfflags & ~NOTE_TIMER_PRECMASK) != 0)
702 		return (EINVAL);
703 
704 	to = timer2sbintime(kn->kn_sdata, kn->kn_sfflags);
705 	if (to < 0)
706 		return (EINVAL);
707 
708 	do {
709 		ncallouts = kq_ncallouts;
710 		if (ncallouts >= kq_calloutmax)
711 			return (ENOMEM);
712 	} while (!atomic_cmpset_int(&kq_ncallouts, ncallouts, ncallouts + 1));
713 
714 	kn->kn_flags |= EV_CLEAR;		/* automatically set */
715 	kn->kn_status &= ~KN_DETACHED;		/* knlist_add clears it */
716 	kn->kn_ptr.p_v = kc = malloc(sizeof(*kc), M_KQUEUE, M_WAITOK);
717 	callout_init(&kc->c, 1);
718 	kc->next = to + sbinuptime();
719 	kc->to = to;
720 	callout_reset_sbt_on(&kc->c, kc->next, 0, filt_timerexpire, kn,
721 	    PCPU_GET(cpuid), C_ABSOLUTE);
722 
723 	return (0);
724 }
725 
726 static void
727 filt_timerdetach(struct knote *kn)
728 {
729 	struct kq_timer_cb_data *kc;
730 	unsigned int old;
731 
732 	kc = kn->kn_ptr.p_v;
733 	callout_drain(&kc->c);
734 	free(kc, M_KQUEUE);
735 	old = atomic_fetchadd_int(&kq_ncallouts, -1);
736 	KASSERT(old > 0, ("Number of callouts cannot become negative"));
737 	kn->kn_status |= KN_DETACHED;	/* knlist_remove sets it */
738 }
739 
740 static int
741 filt_timer(struct knote *kn, long hint)
742 {
743 
744 	return (kn->kn_data != 0);
745 }
746 
747 static int
748 filt_userattach(struct knote *kn)
749 {
750 
751 	/*
752 	 * EVFILT_USER knotes are not attached to anything in the kernel.
753 	 */
754 	kn->kn_hook = NULL;
755 	if (kn->kn_fflags & NOTE_TRIGGER)
756 		kn->kn_hookid = 1;
757 	else
758 		kn->kn_hookid = 0;
759 	return (0);
760 }
761 
762 static void
763 filt_userdetach(__unused struct knote *kn)
764 {
765 
766 	/*
767 	 * EVFILT_USER knotes are not attached to anything in the kernel.
768 	 */
769 }
770 
771 static int
772 filt_user(struct knote *kn, __unused long hint)
773 {
774 
775 	return (kn->kn_hookid);
776 }
777 
778 static void
779 filt_usertouch(struct knote *kn, struct kevent *kev, u_long type)
780 {
781 	u_int ffctrl;
782 
783 	switch (type) {
784 	case EVENT_REGISTER:
785 		if (kev->fflags & NOTE_TRIGGER)
786 			kn->kn_hookid = 1;
787 
788 		ffctrl = kev->fflags & NOTE_FFCTRLMASK;
789 		kev->fflags &= NOTE_FFLAGSMASK;
790 		switch (ffctrl) {
791 		case NOTE_FFNOP:
792 			break;
793 
794 		case NOTE_FFAND:
795 			kn->kn_sfflags &= kev->fflags;
796 			break;
797 
798 		case NOTE_FFOR:
799 			kn->kn_sfflags |= kev->fflags;
800 			break;
801 
802 		case NOTE_FFCOPY:
803 			kn->kn_sfflags = kev->fflags;
804 			break;
805 
806 		default:
807 			/* XXX Return error? */
808 			break;
809 		}
810 		kn->kn_sdata = kev->data;
811 		if (kev->flags & EV_CLEAR) {
812 			kn->kn_hookid = 0;
813 			kn->kn_data = 0;
814 			kn->kn_fflags = 0;
815 		}
816 		break;
817 
818         case EVENT_PROCESS:
819 		*kev = kn->kn_kevent;
820 		kev->fflags = kn->kn_sfflags;
821 		kev->data = kn->kn_sdata;
822 		if (kn->kn_flags & EV_CLEAR) {
823 			kn->kn_hookid = 0;
824 			kn->kn_data = 0;
825 			kn->kn_fflags = 0;
826 		}
827 		break;
828 
829 	default:
830 		panic("filt_usertouch() - invalid type (%ld)", type);
831 		break;
832 	}
833 }
834 
835 int
836 sys_kqueue(struct thread *td, struct kqueue_args *uap)
837 {
838 
839 	return (kern_kqueue(td, 0, NULL));
840 }
841 
842 static void
843 kqueue_init(struct kqueue *kq)
844 {
845 
846 	mtx_init(&kq->kq_lock, "kqueue", NULL, MTX_DEF | MTX_DUPOK);
847 	TAILQ_INIT(&kq->kq_head);
848 	knlist_init_mtx(&kq->kq_sel.si_note, &kq->kq_lock);
849 	TASK_INIT(&kq->kq_task, 0, kqueue_task, kq);
850 }
851 
852 int
853 kern_kqueue(struct thread *td, int flags, struct filecaps *fcaps)
854 {
855 	struct filedesc *fdp;
856 	struct kqueue *kq;
857 	struct file *fp;
858 	struct ucred *cred;
859 	int fd, error;
860 
861 	fdp = td->td_proc->p_fd;
862 	cred = td->td_ucred;
863 	if (!chgkqcnt(cred->cr_ruidinfo, 1, lim_cur(td, RLIMIT_KQUEUES)))
864 		return (ENOMEM);
865 
866 	error = falloc_caps(td, &fp, &fd, flags, fcaps);
867 	if (error != 0) {
868 		chgkqcnt(cred->cr_ruidinfo, -1, 0);
869 		return (error);
870 	}
871 
872 	/* An extra reference on `fp' has been held for us by falloc(). */
873 	kq = malloc(sizeof *kq, M_KQUEUE, M_WAITOK | M_ZERO);
874 	kqueue_init(kq);
875 	kq->kq_fdp = fdp;
876 	kq->kq_cred = crhold(cred);
877 
878 	FILEDESC_XLOCK(fdp);
879 	TAILQ_INSERT_HEAD(&fdp->fd_kqlist, kq, kq_list);
880 	FILEDESC_XUNLOCK(fdp);
881 
882 	finit(fp, FREAD | FWRITE, DTYPE_KQUEUE, kq, &kqueueops);
883 	fdrop(fp, td);
884 
885 	td->td_retval[0] = fd;
886 	return (0);
887 }
888 
889 #ifndef _SYS_SYSPROTO_H_
890 struct kevent_args {
891 	int	fd;
892 	const struct kevent *changelist;
893 	int	nchanges;
894 	struct	kevent *eventlist;
895 	int	nevents;
896 	const struct timespec *timeout;
897 };
898 #endif
899 int
900 sys_kevent(struct thread *td, struct kevent_args *uap)
901 {
902 	struct timespec ts, *tsp;
903 	struct kevent_copyops k_ops = { uap,
904 					kevent_copyout,
905 					kevent_copyin};
906 	int error;
907 #ifdef KTRACE
908 	struct uio ktruio;
909 	struct iovec ktriov;
910 	struct uio *ktruioin = NULL;
911 	struct uio *ktruioout = NULL;
912 #endif
913 
914 	if (uap->timeout != NULL) {
915 		error = copyin(uap->timeout, &ts, sizeof(ts));
916 		if (error)
917 			return (error);
918 		tsp = &ts;
919 	} else
920 		tsp = NULL;
921 
922 #ifdef KTRACE
923 	if (KTRPOINT(td, KTR_GENIO)) {
924 		ktriov.iov_base = uap->changelist;
925 		ktriov.iov_len = uap->nchanges * sizeof(struct kevent);
926 		ktruio = (struct uio){ .uio_iov = &ktriov, .uio_iovcnt = 1,
927 		    .uio_segflg = UIO_USERSPACE, .uio_rw = UIO_READ,
928 		    .uio_td = td };
929 		ktruioin = cloneuio(&ktruio);
930 		ktriov.iov_base = uap->eventlist;
931 		ktriov.iov_len = uap->nevents * sizeof(struct kevent);
932 		ktruioout = cloneuio(&ktruio);
933 	}
934 #endif
935 
936 	error = kern_kevent(td, uap->fd, uap->nchanges, uap->nevents,
937 	    &k_ops, tsp);
938 
939 #ifdef KTRACE
940 	if (ktruioin != NULL) {
941 		ktruioin->uio_resid = uap->nchanges * sizeof(struct kevent);
942 		ktrgenio(uap->fd, UIO_WRITE, ktruioin, 0);
943 		ktruioout->uio_resid = td->td_retval[0] * sizeof(struct kevent);
944 		ktrgenio(uap->fd, UIO_READ, ktruioout, error);
945 	}
946 #endif
947 
948 	return (error);
949 }
950 
951 /*
952  * Copy 'count' items into the destination list pointed to by uap->eventlist.
953  */
954 static int
955 kevent_copyout(void *arg, struct kevent *kevp, int count)
956 {
957 	struct kevent_args *uap;
958 	int error;
959 
960 	KASSERT(count <= KQ_NEVENTS, ("count (%d) > KQ_NEVENTS", count));
961 	uap = (struct kevent_args *)arg;
962 
963 	error = copyout(kevp, uap->eventlist, count * sizeof *kevp);
964 	if (error == 0)
965 		uap->eventlist += count;
966 	return (error);
967 }
968 
969 /*
970  * Copy 'count' items from the list pointed to by uap->changelist.
971  */
972 static int
973 kevent_copyin(void *arg, struct kevent *kevp, int count)
974 {
975 	struct kevent_args *uap;
976 	int error;
977 
978 	KASSERT(count <= KQ_NEVENTS, ("count (%d) > KQ_NEVENTS", count));
979 	uap = (struct kevent_args *)arg;
980 
981 	error = copyin(uap->changelist, kevp, count * sizeof *kevp);
982 	if (error == 0)
983 		uap->changelist += count;
984 	return (error);
985 }
986 
987 int
988 kern_kevent(struct thread *td, int fd, int nchanges, int nevents,
989     struct kevent_copyops *k_ops, const struct timespec *timeout)
990 {
991 	cap_rights_t rights;
992 	struct file *fp;
993 	int error;
994 
995 	cap_rights_init(&rights);
996 	if (nchanges > 0)
997 		cap_rights_set(&rights, CAP_KQUEUE_CHANGE);
998 	if (nevents > 0)
999 		cap_rights_set(&rights, CAP_KQUEUE_EVENT);
1000 	error = fget(td, fd, &rights, &fp);
1001 	if (error != 0)
1002 		return (error);
1003 
1004 	error = kern_kevent_fp(td, fp, nchanges, nevents, k_ops, timeout);
1005 	fdrop(fp, td);
1006 
1007 	return (error);
1008 }
1009 
1010 static int
1011 kqueue_kevent(struct kqueue *kq, struct thread *td, int nchanges, int nevents,
1012     struct kevent_copyops *k_ops, const struct timespec *timeout)
1013 {
1014 	struct kevent keva[KQ_NEVENTS];
1015 	struct kevent *kevp, *changes;
1016 	int i, n, nerrors, error;
1017 
1018 	nerrors = 0;
1019 	while (nchanges > 0) {
1020 		n = nchanges > KQ_NEVENTS ? KQ_NEVENTS : nchanges;
1021 		error = k_ops->k_copyin(k_ops->arg, keva, n);
1022 		if (error)
1023 			return (error);
1024 		changes = keva;
1025 		for (i = 0; i < n; i++) {
1026 			kevp = &changes[i];
1027 			if (!kevp->filter)
1028 				continue;
1029 			kevp->flags &= ~EV_SYSFLAGS;
1030 			error = kqueue_register(kq, kevp, td, 1);
1031 			if (error || (kevp->flags & EV_RECEIPT)) {
1032 				if (nevents == 0)
1033 					return (error);
1034 				kevp->flags = EV_ERROR;
1035 				kevp->data = error;
1036 				(void)k_ops->k_copyout(k_ops->arg, kevp, 1);
1037 				nevents--;
1038 				nerrors++;
1039 			}
1040 		}
1041 		nchanges -= n;
1042 	}
1043 	if (nerrors) {
1044 		td->td_retval[0] = nerrors;
1045 		return (0);
1046 	}
1047 
1048 	return (kqueue_scan(kq, nevents, k_ops, timeout, keva, td));
1049 }
1050 
1051 int
1052 kern_kevent_fp(struct thread *td, struct file *fp, int nchanges, int nevents,
1053     struct kevent_copyops *k_ops, const struct timespec *timeout)
1054 {
1055 	struct kqueue *kq;
1056 	int error;
1057 
1058 	error = kqueue_acquire(fp, &kq);
1059 	if (error != 0)
1060 		return (error);
1061 	error = kqueue_kevent(kq, td, nchanges, nevents, k_ops, timeout);
1062 	kqueue_release(kq, 0);
1063 	return (error);
1064 }
1065 
1066 /*
1067  * Performs a kevent() call on a temporarily created kqueue. This can be
1068  * used to perform one-shot polling, similar to poll() and select().
1069  */
1070 int
1071 kern_kevent_anonymous(struct thread *td, int nevents,
1072     struct kevent_copyops *k_ops)
1073 {
1074 	struct kqueue kq = {};
1075 	int error;
1076 
1077 	kqueue_init(&kq);
1078 	kq.kq_refcnt = 1;
1079 	error = kqueue_kevent(&kq, td, nevents, nevents, k_ops, NULL);
1080 	kqueue_drain(&kq, td);
1081 	kqueue_destroy(&kq);
1082 	return (error);
1083 }
1084 
1085 int
1086 kqueue_add_filteropts(int filt, struct filterops *filtops)
1087 {
1088 	int error;
1089 
1090 	error = 0;
1091 	if (filt > 0 || filt + EVFILT_SYSCOUNT < 0) {
1092 		printf(
1093 "trying to add a filterop that is out of range: %d is beyond %d\n",
1094 		    ~filt, EVFILT_SYSCOUNT);
1095 		return EINVAL;
1096 	}
1097 	mtx_lock(&filterops_lock);
1098 	if (sysfilt_ops[~filt].for_fop != &null_filtops &&
1099 	    sysfilt_ops[~filt].for_fop != NULL)
1100 		error = EEXIST;
1101 	else {
1102 		sysfilt_ops[~filt].for_fop = filtops;
1103 		sysfilt_ops[~filt].for_refcnt = 0;
1104 	}
1105 	mtx_unlock(&filterops_lock);
1106 
1107 	return (error);
1108 }
1109 
1110 int
1111 kqueue_del_filteropts(int filt)
1112 {
1113 	int error;
1114 
1115 	error = 0;
1116 	if (filt > 0 || filt + EVFILT_SYSCOUNT < 0)
1117 		return EINVAL;
1118 
1119 	mtx_lock(&filterops_lock);
1120 	if (sysfilt_ops[~filt].for_fop == &null_filtops ||
1121 	    sysfilt_ops[~filt].for_fop == NULL)
1122 		error = EINVAL;
1123 	else if (sysfilt_ops[~filt].for_refcnt != 0)
1124 		error = EBUSY;
1125 	else {
1126 		sysfilt_ops[~filt].for_fop = &null_filtops;
1127 		sysfilt_ops[~filt].for_refcnt = 0;
1128 	}
1129 	mtx_unlock(&filterops_lock);
1130 
1131 	return error;
1132 }
1133 
1134 static struct filterops *
1135 kqueue_fo_find(int filt)
1136 {
1137 
1138 	if (filt > 0 || filt + EVFILT_SYSCOUNT < 0)
1139 		return NULL;
1140 
1141 	if (sysfilt_ops[~filt].for_nolock)
1142 		return sysfilt_ops[~filt].for_fop;
1143 
1144 	mtx_lock(&filterops_lock);
1145 	sysfilt_ops[~filt].for_refcnt++;
1146 	if (sysfilt_ops[~filt].for_fop == NULL)
1147 		sysfilt_ops[~filt].for_fop = &null_filtops;
1148 	mtx_unlock(&filterops_lock);
1149 
1150 	return sysfilt_ops[~filt].for_fop;
1151 }
1152 
1153 static void
1154 kqueue_fo_release(int filt)
1155 {
1156 
1157 	if (filt > 0 || filt + EVFILT_SYSCOUNT < 0)
1158 		return;
1159 
1160 	if (sysfilt_ops[~filt].for_nolock)
1161 		return;
1162 
1163 	mtx_lock(&filterops_lock);
1164 	KASSERT(sysfilt_ops[~filt].for_refcnt > 0,
1165 	    ("filter object refcount not valid on release"));
1166 	sysfilt_ops[~filt].for_refcnt--;
1167 	mtx_unlock(&filterops_lock);
1168 }
1169 
1170 /*
1171  * A ref to kq (obtained via kqueue_acquire) must be held.  waitok will
1172  * influence if memory allocation should wait.  Make sure it is 0 if you
1173  * hold any mutexes.
1174  */
1175 static int
1176 kqueue_register(struct kqueue *kq, struct kevent *kev, struct thread *td, int waitok)
1177 {
1178 	struct filterops *fops;
1179 	struct file *fp;
1180 	struct knote *kn, *tkn;
1181 	struct knlist *knl;
1182 	cap_rights_t rights;
1183 	int error, filt, event;
1184 	int haskqglobal, filedesc_unlock;
1185 
1186 	if ((kev->flags & (EV_ENABLE | EV_DISABLE)) == (EV_ENABLE | EV_DISABLE))
1187 		return (EINVAL);
1188 
1189 	fp = NULL;
1190 	kn = NULL;
1191 	knl = NULL;
1192 	error = 0;
1193 	haskqglobal = 0;
1194 	filedesc_unlock = 0;
1195 
1196 	filt = kev->filter;
1197 	fops = kqueue_fo_find(filt);
1198 	if (fops == NULL)
1199 		return EINVAL;
1200 
1201 	if (kev->flags & EV_ADD) {
1202 		/*
1203 		 * Prevent waiting with locks.  Non-sleepable
1204 		 * allocation failures are handled in the loop, only
1205 		 * if the spare knote appears to be actually required.
1206 		 */
1207 		tkn = knote_alloc(waitok);
1208 	} else {
1209 		tkn = NULL;
1210 	}
1211 
1212 findkn:
1213 	if (fops->f_isfd) {
1214 		KASSERT(td != NULL, ("td is NULL"));
1215 		if (kev->ident > INT_MAX)
1216 			error = EBADF;
1217 		else
1218 			error = fget(td, kev->ident,
1219 			    cap_rights_init(&rights, CAP_EVENT), &fp);
1220 		if (error)
1221 			goto done;
1222 
1223 		if ((kev->flags & EV_ADD) == EV_ADD && kqueue_expand(kq, fops,
1224 		    kev->ident, 0) != 0) {
1225 			/* try again */
1226 			fdrop(fp, td);
1227 			fp = NULL;
1228 			error = kqueue_expand(kq, fops, kev->ident, waitok);
1229 			if (error)
1230 				goto done;
1231 			goto findkn;
1232 		}
1233 
1234 		if (fp->f_type == DTYPE_KQUEUE) {
1235 			/*
1236 			 * If we add some intelligence about what we are doing,
1237 			 * we should be able to support events on ourselves.
1238 			 * We need to know when we are doing this to prevent
1239 			 * getting both the knlist lock and the kq lock since
1240 			 * they are the same thing.
1241 			 */
1242 			if (fp->f_data == kq) {
1243 				error = EINVAL;
1244 				goto done;
1245 			}
1246 
1247 			/*
1248 			 * Pre-lock the filedesc before the global
1249 			 * lock mutex, see the comment in
1250 			 * kqueue_close().
1251 			 */
1252 			FILEDESC_XLOCK(td->td_proc->p_fd);
1253 			filedesc_unlock = 1;
1254 			KQ_GLOBAL_LOCK(&kq_global, haskqglobal);
1255 		}
1256 
1257 		KQ_LOCK(kq);
1258 		if (kev->ident < kq->kq_knlistsize) {
1259 			SLIST_FOREACH(kn, &kq->kq_knlist[kev->ident], kn_link)
1260 				if (kev->filter == kn->kn_filter)
1261 					break;
1262 		}
1263 	} else {
1264 		if ((kev->flags & EV_ADD) == EV_ADD)
1265 			kqueue_expand(kq, fops, kev->ident, waitok);
1266 
1267 		KQ_LOCK(kq);
1268 
1269 		/*
1270 		 * If possible, find an existing knote to use for this kevent.
1271 		 */
1272 		if (kev->filter == EVFILT_PROC &&
1273 		    (kev->flags & (EV_FLAG1 | EV_FLAG2)) != 0) {
1274 			/* This is an internal creation of a process tracking
1275 			 * note. Don't attempt to coalesce this with an
1276 			 * existing note.
1277 			 */
1278 			;
1279 		} else if (kq->kq_knhashmask != 0) {
1280 			struct klist *list;
1281 
1282 			list = &kq->kq_knhash[
1283 			    KN_HASH((u_long)kev->ident, kq->kq_knhashmask)];
1284 			SLIST_FOREACH(kn, list, kn_link)
1285 				if (kev->ident == kn->kn_id &&
1286 				    kev->filter == kn->kn_filter)
1287 					break;
1288 		}
1289 	}
1290 
1291 	/* knote is in the process of changing, wait for it to stabilize. */
1292 	if (kn != NULL && kn_in_flux(kn)) {
1293 		KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal);
1294 		if (filedesc_unlock) {
1295 			FILEDESC_XUNLOCK(td->td_proc->p_fd);
1296 			filedesc_unlock = 0;
1297 		}
1298 		kq->kq_state |= KQ_FLUXWAIT;
1299 		msleep(kq, &kq->kq_lock, PSOCK | PDROP, "kqflxwt", 0);
1300 		if (fp != NULL) {
1301 			fdrop(fp, td);
1302 			fp = NULL;
1303 		}
1304 		goto findkn;
1305 	}
1306 
1307 	/*
1308 	 * kn now contains the matching knote, or NULL if no match
1309 	 */
1310 	if (kn == NULL) {
1311 		if (kev->flags & EV_ADD) {
1312 			kn = tkn;
1313 			tkn = NULL;
1314 			if (kn == NULL) {
1315 				KQ_UNLOCK(kq);
1316 				error = ENOMEM;
1317 				goto done;
1318 			}
1319 			kn->kn_fp = fp;
1320 			kn->kn_kq = kq;
1321 			kn->kn_fop = fops;
1322 			/*
1323 			 * apply reference counts to knote structure, and
1324 			 * do not release it at the end of this routine.
1325 			 */
1326 			fops = NULL;
1327 			fp = NULL;
1328 
1329 			kn->kn_sfflags = kev->fflags;
1330 			kn->kn_sdata = kev->data;
1331 			kev->fflags = 0;
1332 			kev->data = 0;
1333 			kn->kn_kevent = *kev;
1334 			kn->kn_kevent.flags &= ~(EV_ADD | EV_DELETE |
1335 			    EV_ENABLE | EV_DISABLE | EV_FORCEONESHOT);
1336 			kn->kn_status = KN_DETACHED;
1337 			kn_enter_flux(kn);
1338 
1339 			error = knote_attach(kn, kq);
1340 			KQ_UNLOCK(kq);
1341 			if (error != 0) {
1342 				tkn = kn;
1343 				goto done;
1344 			}
1345 
1346 			if ((error = kn->kn_fop->f_attach(kn)) != 0) {
1347 				knote_drop_detached(kn, td);
1348 				goto done;
1349 			}
1350 			knl = kn_list_lock(kn);
1351 			goto done_ev_add;
1352 		} else {
1353 			/* No matching knote and the EV_ADD flag is not set. */
1354 			KQ_UNLOCK(kq);
1355 			error = ENOENT;
1356 			goto done;
1357 		}
1358 	}
1359 
1360 	if (kev->flags & EV_DELETE) {
1361 		kn_enter_flux(kn);
1362 		KQ_UNLOCK(kq);
1363 		knote_drop(kn, td);
1364 		goto done;
1365 	}
1366 
1367 	if (kev->flags & EV_FORCEONESHOT) {
1368 		kn->kn_flags |= EV_ONESHOT;
1369 		KNOTE_ACTIVATE(kn, 1);
1370 	}
1371 
1372 	/*
1373 	 * The user may change some filter values after the initial EV_ADD,
1374 	 * but doing so will not reset any filter which has already been
1375 	 * triggered.
1376 	 */
1377 	kn->kn_status |= KN_SCAN;
1378 	kn_enter_flux(kn);
1379 	KQ_UNLOCK(kq);
1380 	knl = kn_list_lock(kn);
1381 	kn->kn_kevent.udata = kev->udata;
1382 	if (!fops->f_isfd && fops->f_touch != NULL) {
1383 		fops->f_touch(kn, kev, EVENT_REGISTER);
1384 	} else {
1385 		kn->kn_sfflags = kev->fflags;
1386 		kn->kn_sdata = kev->data;
1387 	}
1388 
1389 	/*
1390 	 * We can get here with kn->kn_knlist == NULL.  This can happen when
1391 	 * the initial attach event decides that the event is "completed"
1392 	 * already.  i.e. filt_procattach is called on a zombie process.  It
1393 	 * will call filt_proc which will remove it from the list, and NULL
1394 	 * kn_knlist.
1395 	 */
1396 done_ev_add:
1397 	if ((kev->flags & EV_ENABLE) != 0)
1398 		kn->kn_status &= ~KN_DISABLED;
1399 	else if ((kev->flags & EV_DISABLE) != 0)
1400 		kn->kn_status |= KN_DISABLED;
1401 
1402 	if ((kn->kn_status & KN_DISABLED) == 0)
1403 		event = kn->kn_fop->f_event(kn, 0);
1404 	else
1405 		event = 0;
1406 
1407 	KQ_LOCK(kq);
1408 	if (event)
1409 		kn->kn_status |= KN_ACTIVE;
1410 	if ((kn->kn_status & (KN_ACTIVE | KN_DISABLED | KN_QUEUED)) ==
1411 	    KN_ACTIVE)
1412 		knote_enqueue(kn);
1413 	kn->kn_status &= ~KN_SCAN;
1414 	kn_leave_flux(kn);
1415 	kn_list_unlock(knl);
1416 	KQ_UNLOCK_FLUX(kq);
1417 
1418 done:
1419 	KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal);
1420 	if (filedesc_unlock)
1421 		FILEDESC_XUNLOCK(td->td_proc->p_fd);
1422 	if (fp != NULL)
1423 		fdrop(fp, td);
1424 	knote_free(tkn);
1425 	if (fops != NULL)
1426 		kqueue_fo_release(filt);
1427 	return (error);
1428 }
1429 
1430 static int
1431 kqueue_acquire(struct file *fp, struct kqueue **kqp)
1432 {
1433 	int error;
1434 	struct kqueue *kq;
1435 
1436 	error = 0;
1437 
1438 	kq = fp->f_data;
1439 	if (fp->f_type != DTYPE_KQUEUE || kq == NULL)
1440 		return (EBADF);
1441 	*kqp = kq;
1442 	KQ_LOCK(kq);
1443 	if ((kq->kq_state & KQ_CLOSING) == KQ_CLOSING) {
1444 		KQ_UNLOCK(kq);
1445 		return (EBADF);
1446 	}
1447 	kq->kq_refcnt++;
1448 	KQ_UNLOCK(kq);
1449 
1450 	return error;
1451 }
1452 
1453 static void
1454 kqueue_release(struct kqueue *kq, int locked)
1455 {
1456 	if (locked)
1457 		KQ_OWNED(kq);
1458 	else
1459 		KQ_LOCK(kq);
1460 	kq->kq_refcnt--;
1461 	if (kq->kq_refcnt == 1)
1462 		wakeup(&kq->kq_refcnt);
1463 	if (!locked)
1464 		KQ_UNLOCK(kq);
1465 }
1466 
1467 static void
1468 kqueue_schedtask(struct kqueue *kq)
1469 {
1470 
1471 	KQ_OWNED(kq);
1472 	KASSERT(((kq->kq_state & KQ_TASKDRAIN) != KQ_TASKDRAIN),
1473 	    ("scheduling kqueue task while draining"));
1474 
1475 	if ((kq->kq_state & KQ_TASKSCHED) != KQ_TASKSCHED) {
1476 		taskqueue_enqueue(taskqueue_kqueue_ctx, &kq->kq_task);
1477 		kq->kq_state |= KQ_TASKSCHED;
1478 	}
1479 }
1480 
1481 /*
1482  * Expand the kq to make sure we have storage for fops/ident pair.
1483  *
1484  * Return 0 on success (or no work necessary), return errno on failure.
1485  *
1486  * Not calling hashinit w/ waitok (proper malloc flag) should be safe.
1487  * If kqueue_register is called from a non-fd context, there usually/should
1488  * be no locks held.
1489  */
1490 static int
1491 kqueue_expand(struct kqueue *kq, struct filterops *fops, uintptr_t ident,
1492 	int waitok)
1493 {
1494 	struct klist *list, *tmp_knhash, *to_free;
1495 	u_long tmp_knhashmask;
1496 	int size;
1497 	int fd;
1498 	int mflag = waitok ? M_WAITOK : M_NOWAIT;
1499 
1500 	KQ_NOTOWNED(kq);
1501 
1502 	to_free = NULL;
1503 	if (fops->f_isfd) {
1504 		fd = ident;
1505 		if (kq->kq_knlistsize <= fd) {
1506 			size = kq->kq_knlistsize;
1507 			while (size <= fd)
1508 				size += KQEXTENT;
1509 			list = malloc(size * sizeof(*list), M_KQUEUE, mflag);
1510 			if (list == NULL)
1511 				return ENOMEM;
1512 			KQ_LOCK(kq);
1513 			if (kq->kq_knlistsize > fd) {
1514 				to_free = list;
1515 				list = NULL;
1516 			} else {
1517 				if (kq->kq_knlist != NULL) {
1518 					bcopy(kq->kq_knlist, list,
1519 					    kq->kq_knlistsize * sizeof(*list));
1520 					to_free = kq->kq_knlist;
1521 					kq->kq_knlist = NULL;
1522 				}
1523 				bzero((caddr_t)list +
1524 				    kq->kq_knlistsize * sizeof(*list),
1525 				    (size - kq->kq_knlistsize) * sizeof(*list));
1526 				kq->kq_knlistsize = size;
1527 				kq->kq_knlist = list;
1528 			}
1529 			KQ_UNLOCK(kq);
1530 		}
1531 	} else {
1532 		if (kq->kq_knhashmask == 0) {
1533 			tmp_knhash = hashinit(KN_HASHSIZE, M_KQUEUE,
1534 			    &tmp_knhashmask);
1535 			if (tmp_knhash == NULL)
1536 				return ENOMEM;
1537 			KQ_LOCK(kq);
1538 			if (kq->kq_knhashmask == 0) {
1539 				kq->kq_knhash = tmp_knhash;
1540 				kq->kq_knhashmask = tmp_knhashmask;
1541 			} else {
1542 				to_free = tmp_knhash;
1543 			}
1544 			KQ_UNLOCK(kq);
1545 		}
1546 	}
1547 	free(to_free, M_KQUEUE);
1548 
1549 	KQ_NOTOWNED(kq);
1550 	return 0;
1551 }
1552 
1553 static void
1554 kqueue_task(void *arg, int pending)
1555 {
1556 	struct kqueue *kq;
1557 	int haskqglobal;
1558 
1559 	haskqglobal = 0;
1560 	kq = arg;
1561 
1562 	KQ_GLOBAL_LOCK(&kq_global, haskqglobal);
1563 	KQ_LOCK(kq);
1564 
1565 	KNOTE_LOCKED(&kq->kq_sel.si_note, 0);
1566 
1567 	kq->kq_state &= ~KQ_TASKSCHED;
1568 	if ((kq->kq_state & KQ_TASKDRAIN) == KQ_TASKDRAIN) {
1569 		wakeup(&kq->kq_state);
1570 	}
1571 	KQ_UNLOCK(kq);
1572 	KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal);
1573 }
1574 
1575 /*
1576  * Scan, update kn_data (if not ONESHOT), and copyout triggered events.
1577  * We treat KN_MARKER knotes as if they are in flux.
1578  */
1579 static int
1580 kqueue_scan(struct kqueue *kq, int maxevents, struct kevent_copyops *k_ops,
1581     const struct timespec *tsp, struct kevent *keva, struct thread *td)
1582 {
1583 	struct kevent *kevp;
1584 	struct knote *kn, *marker;
1585 	struct knlist *knl;
1586 	sbintime_t asbt, rsbt;
1587 	int count, error, haskqglobal, influx, nkev, touch;
1588 
1589 	count = maxevents;
1590 	nkev = 0;
1591 	error = 0;
1592 	haskqglobal = 0;
1593 
1594 	if (maxevents == 0)
1595 		goto done_nl;
1596 
1597 	rsbt = 0;
1598 	if (tsp != NULL) {
1599 		if (tsp->tv_sec < 0 || tsp->tv_nsec < 0 ||
1600 		    tsp->tv_nsec >= 1000000000) {
1601 			error = EINVAL;
1602 			goto done_nl;
1603 		}
1604 		if (timespecisset(tsp)) {
1605 			if (tsp->tv_sec <= INT32_MAX) {
1606 				rsbt = tstosbt(*tsp);
1607 				if (TIMESEL(&asbt, rsbt))
1608 					asbt += tc_tick_sbt;
1609 				if (asbt <= SBT_MAX - rsbt)
1610 					asbt += rsbt;
1611 				else
1612 					asbt = 0;
1613 				rsbt >>= tc_precexp;
1614 			} else
1615 				asbt = 0;
1616 		} else
1617 			asbt = -1;
1618 	} else
1619 		asbt = 0;
1620 	marker = knote_alloc(1);
1621 	marker->kn_status = KN_MARKER;
1622 	KQ_LOCK(kq);
1623 
1624 retry:
1625 	kevp = keva;
1626 	if (kq->kq_count == 0) {
1627 		if (asbt == -1) {
1628 			error = EWOULDBLOCK;
1629 		} else {
1630 			kq->kq_state |= KQ_SLEEP;
1631 			error = msleep_sbt(kq, &kq->kq_lock, PSOCK | PCATCH,
1632 			    "kqread", asbt, rsbt, C_ABSOLUTE);
1633 		}
1634 		if (error == 0)
1635 			goto retry;
1636 		/* don't restart after signals... */
1637 		if (error == ERESTART)
1638 			error = EINTR;
1639 		else if (error == EWOULDBLOCK)
1640 			error = 0;
1641 		goto done;
1642 	}
1643 
1644 	TAILQ_INSERT_TAIL(&kq->kq_head, marker, kn_tqe);
1645 	influx = 0;
1646 	while (count) {
1647 		KQ_OWNED(kq);
1648 		kn = TAILQ_FIRST(&kq->kq_head);
1649 
1650 		if ((kn->kn_status == KN_MARKER && kn != marker) ||
1651 		    kn_in_flux(kn)) {
1652 			if (influx) {
1653 				influx = 0;
1654 				KQ_FLUX_WAKEUP(kq);
1655 			}
1656 			kq->kq_state |= KQ_FLUXWAIT;
1657 			error = msleep(kq, &kq->kq_lock, PSOCK,
1658 			    "kqflxwt", 0);
1659 			continue;
1660 		}
1661 
1662 		TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe);
1663 		if ((kn->kn_status & KN_DISABLED) == KN_DISABLED) {
1664 			kn->kn_status &= ~KN_QUEUED;
1665 			kq->kq_count--;
1666 			continue;
1667 		}
1668 		if (kn == marker) {
1669 			KQ_FLUX_WAKEUP(kq);
1670 			if (count == maxevents)
1671 				goto retry;
1672 			goto done;
1673 		}
1674 		KASSERT(!kn_in_flux(kn),
1675 		    ("knote %p is unexpectedly in flux", kn));
1676 
1677 		if ((kn->kn_flags & EV_DROP) == EV_DROP) {
1678 			kn->kn_status &= ~KN_QUEUED;
1679 			kn_enter_flux(kn);
1680 			kq->kq_count--;
1681 			KQ_UNLOCK(kq);
1682 			/*
1683 			 * We don't need to lock the list since we've
1684 			 * marked it as in flux.
1685 			 */
1686 			knote_drop(kn, td);
1687 			KQ_LOCK(kq);
1688 			continue;
1689 		} else if ((kn->kn_flags & EV_ONESHOT) == EV_ONESHOT) {
1690 			kn->kn_status &= ~KN_QUEUED;
1691 			kn_enter_flux(kn);
1692 			kq->kq_count--;
1693 			KQ_UNLOCK(kq);
1694 			/*
1695 			 * We don't need to lock the list since we've
1696 			 * marked the knote as being in flux.
1697 			 */
1698 			*kevp = kn->kn_kevent;
1699 			knote_drop(kn, td);
1700 			KQ_LOCK(kq);
1701 			kn = NULL;
1702 		} else {
1703 			kn->kn_status |= KN_SCAN;
1704 			kn_enter_flux(kn);
1705 			KQ_UNLOCK(kq);
1706 			if ((kn->kn_status & KN_KQUEUE) == KN_KQUEUE)
1707 				KQ_GLOBAL_LOCK(&kq_global, haskqglobal);
1708 			knl = kn_list_lock(kn);
1709 			if (kn->kn_fop->f_event(kn, 0) == 0) {
1710 				KQ_LOCK(kq);
1711 				KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal);
1712 				kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE |
1713 				    KN_SCAN);
1714 				kn_leave_flux(kn);
1715 				kq->kq_count--;
1716 				kn_list_unlock(knl);
1717 				influx = 1;
1718 				continue;
1719 			}
1720 			touch = (!kn->kn_fop->f_isfd &&
1721 			    kn->kn_fop->f_touch != NULL);
1722 			if (touch)
1723 				kn->kn_fop->f_touch(kn, kevp, EVENT_PROCESS);
1724 			else
1725 				*kevp = kn->kn_kevent;
1726 			KQ_LOCK(kq);
1727 			KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal);
1728 			if (kn->kn_flags & (EV_CLEAR | EV_DISPATCH)) {
1729 				/*
1730 				 * Manually clear knotes who weren't
1731 				 * 'touch'ed.
1732 				 */
1733 				if (touch == 0 && kn->kn_flags & EV_CLEAR) {
1734 					kn->kn_data = 0;
1735 					kn->kn_fflags = 0;
1736 				}
1737 				if (kn->kn_flags & EV_DISPATCH)
1738 					kn->kn_status |= KN_DISABLED;
1739 				kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE);
1740 				kq->kq_count--;
1741 			} else
1742 				TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe);
1743 
1744 			kn->kn_status &= ~KN_SCAN;
1745 			kn_leave_flux(kn);
1746 			kn_list_unlock(knl);
1747 			influx = 1;
1748 		}
1749 
1750 		/* we are returning a copy to the user */
1751 		kevp++;
1752 		nkev++;
1753 		count--;
1754 
1755 		if (nkev == KQ_NEVENTS) {
1756 			influx = 0;
1757 			KQ_UNLOCK_FLUX(kq);
1758 			error = k_ops->k_copyout(k_ops->arg, keva, nkev);
1759 			nkev = 0;
1760 			kevp = keva;
1761 			KQ_LOCK(kq);
1762 			if (error)
1763 				break;
1764 		}
1765 	}
1766 	TAILQ_REMOVE(&kq->kq_head, marker, kn_tqe);
1767 done:
1768 	KQ_OWNED(kq);
1769 	KQ_UNLOCK_FLUX(kq);
1770 	knote_free(marker);
1771 done_nl:
1772 	KQ_NOTOWNED(kq);
1773 	if (nkev != 0)
1774 		error = k_ops->k_copyout(k_ops->arg, keva, nkev);
1775 	td->td_retval[0] = maxevents - count;
1776 	return (error);
1777 }
1778 
1779 /*ARGSUSED*/
1780 static int
1781 kqueue_ioctl(struct file *fp, u_long cmd, void *data,
1782 	struct ucred *active_cred, struct thread *td)
1783 {
1784 	/*
1785 	 * Enabling sigio causes two major problems:
1786 	 * 1) infinite recursion:
1787 	 * Synopsys: kevent is being used to track signals and have FIOASYNC
1788 	 * set.  On receipt of a signal this will cause a kqueue to recurse
1789 	 * into itself over and over.  Sending the sigio causes the kqueue
1790 	 * to become ready, which in turn posts sigio again, forever.
1791 	 * Solution: this can be solved by setting a flag in the kqueue that
1792 	 * we have a SIGIO in progress.
1793 	 * 2) locking problems:
1794 	 * Synopsys: Kqueue is a leaf subsystem, but adding signalling puts
1795 	 * us above the proc and pgrp locks.
1796 	 * Solution: Post a signal using an async mechanism, being sure to
1797 	 * record a generation count in the delivery so that we do not deliver
1798 	 * a signal to the wrong process.
1799 	 *
1800 	 * Note, these two mechanisms are somewhat mutually exclusive!
1801 	 */
1802 #if 0
1803 	struct kqueue *kq;
1804 
1805 	kq = fp->f_data;
1806 	switch (cmd) {
1807 	case FIOASYNC:
1808 		if (*(int *)data) {
1809 			kq->kq_state |= KQ_ASYNC;
1810 		} else {
1811 			kq->kq_state &= ~KQ_ASYNC;
1812 		}
1813 		return (0);
1814 
1815 	case FIOSETOWN:
1816 		return (fsetown(*(int *)data, &kq->kq_sigio));
1817 
1818 	case FIOGETOWN:
1819 		*(int *)data = fgetown(&kq->kq_sigio);
1820 		return (0);
1821 	}
1822 #endif
1823 
1824 	return (ENOTTY);
1825 }
1826 
1827 /*ARGSUSED*/
1828 static int
1829 kqueue_poll(struct file *fp, int events, struct ucred *active_cred,
1830 	struct thread *td)
1831 {
1832 	struct kqueue *kq;
1833 	int revents = 0;
1834 	int error;
1835 
1836 	if ((error = kqueue_acquire(fp, &kq)))
1837 		return POLLERR;
1838 
1839 	KQ_LOCK(kq);
1840 	if (events & (POLLIN | POLLRDNORM)) {
1841 		if (kq->kq_count) {
1842 			revents |= events & (POLLIN | POLLRDNORM);
1843 		} else {
1844 			selrecord(td, &kq->kq_sel);
1845 			if (SEL_WAITING(&kq->kq_sel))
1846 				kq->kq_state |= KQ_SEL;
1847 		}
1848 	}
1849 	kqueue_release(kq, 1);
1850 	KQ_UNLOCK(kq);
1851 	return (revents);
1852 }
1853 
1854 /*ARGSUSED*/
1855 static int
1856 kqueue_stat(struct file *fp, struct stat *st, struct ucred *active_cred,
1857 	struct thread *td)
1858 {
1859 
1860 	bzero((void *)st, sizeof *st);
1861 	/*
1862 	 * We no longer return kq_count because the unlocked value is useless.
1863 	 * If you spent all this time getting the count, why not spend your
1864 	 * syscall better by calling kevent?
1865 	 *
1866 	 * XXX - This is needed for libc_r.
1867 	 */
1868 	st->st_mode = S_IFIFO;
1869 	return (0);
1870 }
1871 
1872 static void
1873 kqueue_drain(struct kqueue *kq, struct thread *td)
1874 {
1875 	struct knote *kn;
1876 	int i;
1877 
1878 	KQ_LOCK(kq);
1879 
1880 	KASSERT((kq->kq_state & KQ_CLOSING) != KQ_CLOSING,
1881 	    ("kqueue already closing"));
1882 	kq->kq_state |= KQ_CLOSING;
1883 	if (kq->kq_refcnt > 1)
1884 		msleep(&kq->kq_refcnt, &kq->kq_lock, PSOCK, "kqclose", 0);
1885 
1886 	KASSERT(kq->kq_refcnt == 1, ("other refs are out there!"));
1887 
1888 	KASSERT(knlist_empty(&kq->kq_sel.si_note),
1889 	    ("kqueue's knlist not empty"));
1890 
1891 	for (i = 0; i < kq->kq_knlistsize; i++) {
1892 		while ((kn = SLIST_FIRST(&kq->kq_knlist[i])) != NULL) {
1893 			if (kn_in_flux(kn)) {
1894 				kq->kq_state |= KQ_FLUXWAIT;
1895 				msleep(kq, &kq->kq_lock, PSOCK, "kqclo1", 0);
1896 				continue;
1897 			}
1898 			kn_enter_flux(kn);
1899 			KQ_UNLOCK(kq);
1900 			knote_drop(kn, td);
1901 			KQ_LOCK(kq);
1902 		}
1903 	}
1904 	if (kq->kq_knhashmask != 0) {
1905 		for (i = 0; i <= kq->kq_knhashmask; i++) {
1906 			while ((kn = SLIST_FIRST(&kq->kq_knhash[i])) != NULL) {
1907 				if (kn_in_flux(kn)) {
1908 					kq->kq_state |= KQ_FLUXWAIT;
1909 					msleep(kq, &kq->kq_lock, PSOCK,
1910 					       "kqclo2", 0);
1911 					continue;
1912 				}
1913 				kn_enter_flux(kn);
1914 				KQ_UNLOCK(kq);
1915 				knote_drop(kn, td);
1916 				KQ_LOCK(kq);
1917 			}
1918 		}
1919 	}
1920 
1921 	if ((kq->kq_state & KQ_TASKSCHED) == KQ_TASKSCHED) {
1922 		kq->kq_state |= KQ_TASKDRAIN;
1923 		msleep(&kq->kq_state, &kq->kq_lock, PSOCK, "kqtqdr", 0);
1924 	}
1925 
1926 	if ((kq->kq_state & KQ_SEL) == KQ_SEL) {
1927 		selwakeuppri(&kq->kq_sel, PSOCK);
1928 		if (!SEL_WAITING(&kq->kq_sel))
1929 			kq->kq_state &= ~KQ_SEL;
1930 	}
1931 
1932 	KQ_UNLOCK(kq);
1933 }
1934 
1935 static void
1936 kqueue_destroy(struct kqueue *kq)
1937 {
1938 
1939 	KASSERT(kq->kq_fdp == NULL,
1940 	    ("kqueue still attached to a file descriptor"));
1941 	seldrain(&kq->kq_sel);
1942 	knlist_destroy(&kq->kq_sel.si_note);
1943 	mtx_destroy(&kq->kq_lock);
1944 
1945 	if (kq->kq_knhash != NULL)
1946 		free(kq->kq_knhash, M_KQUEUE);
1947 	if (kq->kq_knlist != NULL)
1948 		free(kq->kq_knlist, M_KQUEUE);
1949 
1950 	funsetown(&kq->kq_sigio);
1951 }
1952 
1953 /*ARGSUSED*/
1954 static int
1955 kqueue_close(struct file *fp, struct thread *td)
1956 {
1957 	struct kqueue *kq = fp->f_data;
1958 	struct filedesc *fdp;
1959 	int error;
1960 	int filedesc_unlock;
1961 
1962 	if ((error = kqueue_acquire(fp, &kq)))
1963 		return error;
1964 	kqueue_drain(kq, td);
1965 
1966 	/*
1967 	 * We could be called due to the knote_drop() doing fdrop(),
1968 	 * called from kqueue_register().  In this case the global
1969 	 * lock is owned, and filedesc sx is locked before, to not
1970 	 * take the sleepable lock after non-sleepable.
1971 	 */
1972 	fdp = kq->kq_fdp;
1973 	kq->kq_fdp = NULL;
1974 	if (!sx_xlocked(FILEDESC_LOCK(fdp))) {
1975 		FILEDESC_XLOCK(fdp);
1976 		filedesc_unlock = 1;
1977 	} else
1978 		filedesc_unlock = 0;
1979 	TAILQ_REMOVE(&fdp->fd_kqlist, kq, kq_list);
1980 	if (filedesc_unlock)
1981 		FILEDESC_XUNLOCK(fdp);
1982 
1983 	kqueue_destroy(kq);
1984 	chgkqcnt(kq->kq_cred->cr_ruidinfo, -1, 0);
1985 	crfree(kq->kq_cred);
1986 	free(kq, M_KQUEUE);
1987 	fp->f_data = NULL;
1988 
1989 	return (0);
1990 }
1991 
1992 static int
1993 kqueue_fill_kinfo(struct file *fp, struct kinfo_file *kif, struct filedesc *fdp)
1994 {
1995 
1996 	kif->kf_type = KF_TYPE_KQUEUE;
1997 	return (0);
1998 }
1999 
2000 static void
2001 kqueue_wakeup(struct kqueue *kq)
2002 {
2003 	KQ_OWNED(kq);
2004 
2005 	if ((kq->kq_state & KQ_SLEEP) == KQ_SLEEP) {
2006 		kq->kq_state &= ~KQ_SLEEP;
2007 		wakeup(kq);
2008 	}
2009 	if ((kq->kq_state & KQ_SEL) == KQ_SEL) {
2010 		selwakeuppri(&kq->kq_sel, PSOCK);
2011 		if (!SEL_WAITING(&kq->kq_sel))
2012 			kq->kq_state &= ~KQ_SEL;
2013 	}
2014 	if (!knlist_empty(&kq->kq_sel.si_note))
2015 		kqueue_schedtask(kq);
2016 	if ((kq->kq_state & KQ_ASYNC) == KQ_ASYNC) {
2017 		pgsigio(&kq->kq_sigio, SIGIO, 0);
2018 	}
2019 }
2020 
2021 /*
2022  * Walk down a list of knotes, activating them if their event has triggered.
2023  *
2024  * There is a possibility to optimize in the case of one kq watching another.
2025  * Instead of scheduling a task to wake it up, you could pass enough state
2026  * down the chain to make up the parent kqueue.  Make this code functional
2027  * first.
2028  */
2029 void
2030 knote(struct knlist *list, long hint, int lockflags)
2031 {
2032 	struct kqueue *kq;
2033 	struct knote *kn, *tkn;
2034 	int error;
2035 
2036 	if (list == NULL)
2037 		return;
2038 
2039 	KNL_ASSERT_LOCK(list, lockflags & KNF_LISTLOCKED);
2040 
2041 	if ((lockflags & KNF_LISTLOCKED) == 0)
2042 		list->kl_lock(list->kl_lockarg);
2043 
2044 	/*
2045 	 * If we unlock the list lock (and enter influx), we can
2046 	 * eliminate the kqueue scheduling, but this will introduce
2047 	 * four lock/unlock's for each knote to test.  Also, marker
2048 	 * would be needed to keep iteration position, since filters
2049 	 * or other threads could remove events.
2050 	 */
2051 	SLIST_FOREACH_SAFE(kn, &list->kl_list, kn_selnext, tkn) {
2052 		kq = kn->kn_kq;
2053 		KQ_LOCK(kq);
2054 		if (kn_in_flux(kn) && (kn->kn_status & KN_SCAN) == 0) {
2055 			/*
2056 			 * Do not process the influx notes, except for
2057 			 * the influx coming from the kq unlock in the
2058 			 * kqueue_scan().  In the later case, we do
2059 			 * not interfere with the scan, since the code
2060 			 * fragment in kqueue_scan() locks the knlist,
2061 			 * and cannot proceed until we finished.
2062 			 */
2063 			KQ_UNLOCK(kq);
2064 		} else if ((lockflags & KNF_NOKQLOCK) != 0) {
2065 			kn_enter_flux(kn);
2066 			KQ_UNLOCK(kq);
2067 			error = kn->kn_fop->f_event(kn, hint);
2068 			KQ_LOCK(kq);
2069 			kn_leave_flux(kn);
2070 			if (error)
2071 				KNOTE_ACTIVATE(kn, 1);
2072 			KQ_UNLOCK_FLUX(kq);
2073 		} else {
2074 			kn->kn_status |= KN_HASKQLOCK;
2075 			if (kn->kn_fop->f_event(kn, hint))
2076 				KNOTE_ACTIVATE(kn, 1);
2077 			kn->kn_status &= ~KN_HASKQLOCK;
2078 			KQ_UNLOCK(kq);
2079 		}
2080 	}
2081 	if ((lockflags & KNF_LISTLOCKED) == 0)
2082 		list->kl_unlock(list->kl_lockarg);
2083 }
2084 
2085 /*
2086  * add a knote to a knlist
2087  */
2088 void
2089 knlist_add(struct knlist *knl, struct knote *kn, int islocked)
2090 {
2091 
2092 	KNL_ASSERT_LOCK(knl, islocked);
2093 	KQ_NOTOWNED(kn->kn_kq);
2094 	KASSERT(kn_in_flux(kn), ("knote %p not in flux", kn));
2095 	KASSERT((kn->kn_status & KN_DETACHED) != 0,
2096 	    ("knote %p was not detached", kn));
2097 	if (!islocked)
2098 		knl->kl_lock(knl->kl_lockarg);
2099 	SLIST_INSERT_HEAD(&knl->kl_list, kn, kn_selnext);
2100 	if (!islocked)
2101 		knl->kl_unlock(knl->kl_lockarg);
2102 	KQ_LOCK(kn->kn_kq);
2103 	kn->kn_knlist = knl;
2104 	kn->kn_status &= ~KN_DETACHED;
2105 	KQ_UNLOCK(kn->kn_kq);
2106 }
2107 
2108 static void
2109 knlist_remove_kq(struct knlist *knl, struct knote *kn, int knlislocked,
2110     int kqislocked)
2111 {
2112 
2113 	KASSERT(!kqislocked || knlislocked, ("kq locked w/o knl locked"));
2114 	KNL_ASSERT_LOCK(knl, knlislocked);
2115 	mtx_assert(&kn->kn_kq->kq_lock, kqislocked ? MA_OWNED : MA_NOTOWNED);
2116 	KASSERT(kqislocked || kn_in_flux(kn), ("knote %p not in flux", kn));
2117 	KASSERT((kn->kn_status & KN_DETACHED) == 0,
2118 	    ("knote %p was already detached", kn));
2119 	if (!knlislocked)
2120 		knl->kl_lock(knl->kl_lockarg);
2121 	SLIST_REMOVE(&knl->kl_list, kn, knote, kn_selnext);
2122 	kn->kn_knlist = NULL;
2123 	if (!knlislocked)
2124 		kn_list_unlock(knl);
2125 	if (!kqislocked)
2126 		KQ_LOCK(kn->kn_kq);
2127 	kn->kn_status |= KN_DETACHED;
2128 	if (!kqislocked)
2129 		KQ_UNLOCK(kn->kn_kq);
2130 }
2131 
2132 /*
2133  * remove knote from the specified knlist
2134  */
2135 void
2136 knlist_remove(struct knlist *knl, struct knote *kn, int islocked)
2137 {
2138 
2139 	knlist_remove_kq(knl, kn, islocked, 0);
2140 }
2141 
2142 int
2143 knlist_empty(struct knlist *knl)
2144 {
2145 
2146 	KNL_ASSERT_LOCKED(knl);
2147 	return (SLIST_EMPTY(&knl->kl_list));
2148 }
2149 
2150 static struct mtx knlist_lock;
2151 MTX_SYSINIT(knlist_lock, &knlist_lock, "knlist lock for lockless objects",
2152     MTX_DEF);
2153 static void knlist_mtx_lock(void *arg);
2154 static void knlist_mtx_unlock(void *arg);
2155 
2156 static void
2157 knlist_mtx_lock(void *arg)
2158 {
2159 
2160 	mtx_lock((struct mtx *)arg);
2161 }
2162 
2163 static void
2164 knlist_mtx_unlock(void *arg)
2165 {
2166 
2167 	mtx_unlock((struct mtx *)arg);
2168 }
2169 
2170 static void
2171 knlist_mtx_assert_locked(void *arg)
2172 {
2173 
2174 	mtx_assert((struct mtx *)arg, MA_OWNED);
2175 }
2176 
2177 static void
2178 knlist_mtx_assert_unlocked(void *arg)
2179 {
2180 
2181 	mtx_assert((struct mtx *)arg, MA_NOTOWNED);
2182 }
2183 
2184 static void
2185 knlist_rw_rlock(void *arg)
2186 {
2187 
2188 	rw_rlock((struct rwlock *)arg);
2189 }
2190 
2191 static void
2192 knlist_rw_runlock(void *arg)
2193 {
2194 
2195 	rw_runlock((struct rwlock *)arg);
2196 }
2197 
2198 static void
2199 knlist_rw_assert_locked(void *arg)
2200 {
2201 
2202 	rw_assert((struct rwlock *)arg, RA_LOCKED);
2203 }
2204 
2205 static void
2206 knlist_rw_assert_unlocked(void *arg)
2207 {
2208 
2209 	rw_assert((struct rwlock *)arg, RA_UNLOCKED);
2210 }
2211 
2212 void
2213 knlist_init(struct knlist *knl, void *lock, void (*kl_lock)(void *),
2214     void (*kl_unlock)(void *),
2215     void (*kl_assert_locked)(void *), void (*kl_assert_unlocked)(void *))
2216 {
2217 
2218 	if (lock == NULL)
2219 		knl->kl_lockarg = &knlist_lock;
2220 	else
2221 		knl->kl_lockarg = lock;
2222 
2223 	if (kl_lock == NULL)
2224 		knl->kl_lock = knlist_mtx_lock;
2225 	else
2226 		knl->kl_lock = kl_lock;
2227 	if (kl_unlock == NULL)
2228 		knl->kl_unlock = knlist_mtx_unlock;
2229 	else
2230 		knl->kl_unlock = kl_unlock;
2231 	if (kl_assert_locked == NULL)
2232 		knl->kl_assert_locked = knlist_mtx_assert_locked;
2233 	else
2234 		knl->kl_assert_locked = kl_assert_locked;
2235 	if (kl_assert_unlocked == NULL)
2236 		knl->kl_assert_unlocked = knlist_mtx_assert_unlocked;
2237 	else
2238 		knl->kl_assert_unlocked = kl_assert_unlocked;
2239 
2240 	knl->kl_autodestroy = 0;
2241 	SLIST_INIT(&knl->kl_list);
2242 }
2243 
2244 void
2245 knlist_init_mtx(struct knlist *knl, struct mtx *lock)
2246 {
2247 
2248 	knlist_init(knl, lock, NULL, NULL, NULL, NULL);
2249 }
2250 
2251 struct knlist *
2252 knlist_alloc(struct mtx *lock)
2253 {
2254 	struct knlist *knl;
2255 
2256 	knl = malloc(sizeof(struct knlist), M_KQUEUE, M_WAITOK);
2257 	knlist_init_mtx(knl, lock);
2258 	return (knl);
2259 }
2260 
2261 void
2262 knlist_init_rw_reader(struct knlist *knl, struct rwlock *lock)
2263 {
2264 
2265 	knlist_init(knl, lock, knlist_rw_rlock, knlist_rw_runlock,
2266 	    knlist_rw_assert_locked, knlist_rw_assert_unlocked);
2267 }
2268 
2269 void
2270 knlist_destroy(struct knlist *knl)
2271 {
2272 
2273 	KASSERT(KNLIST_EMPTY(knl),
2274 	    ("destroying knlist %p with knotes on it", knl));
2275 }
2276 
2277 void
2278 knlist_detach(struct knlist *knl)
2279 {
2280 
2281 	KNL_ASSERT_LOCKED(knl);
2282 	knl->kl_autodestroy = 1;
2283 	if (knlist_empty(knl)) {
2284 		knlist_destroy(knl);
2285 		free(knl, M_KQUEUE);
2286 	}
2287 }
2288 
2289 /*
2290  * Even if we are locked, we may need to drop the lock to allow any influx
2291  * knotes time to "settle".
2292  */
2293 void
2294 knlist_cleardel(struct knlist *knl, struct thread *td, int islocked, int killkn)
2295 {
2296 	struct knote *kn, *kn2;
2297 	struct kqueue *kq;
2298 
2299 	KASSERT(!knl->kl_autodestroy, ("cleardel for autodestroy %p", knl));
2300 	if (islocked)
2301 		KNL_ASSERT_LOCKED(knl);
2302 	else {
2303 		KNL_ASSERT_UNLOCKED(knl);
2304 again:		/* need to reacquire lock since we have dropped it */
2305 		knl->kl_lock(knl->kl_lockarg);
2306 	}
2307 
2308 	SLIST_FOREACH_SAFE(kn, &knl->kl_list, kn_selnext, kn2) {
2309 		kq = kn->kn_kq;
2310 		KQ_LOCK(kq);
2311 		if (kn_in_flux(kn)) {
2312 			KQ_UNLOCK(kq);
2313 			continue;
2314 		}
2315 		knlist_remove_kq(knl, kn, 1, 1);
2316 		if (killkn) {
2317 			kn_enter_flux(kn);
2318 			KQ_UNLOCK(kq);
2319 			knote_drop_detached(kn, td);
2320 		} else {
2321 			/* Make sure cleared knotes disappear soon */
2322 			kn->kn_flags |= EV_EOF | EV_ONESHOT;
2323 			KQ_UNLOCK(kq);
2324 		}
2325 		kq = NULL;
2326 	}
2327 
2328 	if (!SLIST_EMPTY(&knl->kl_list)) {
2329 		/* there are still in flux knotes remaining */
2330 		kn = SLIST_FIRST(&knl->kl_list);
2331 		kq = kn->kn_kq;
2332 		KQ_LOCK(kq);
2333 		KASSERT(kn_in_flux(kn), ("knote removed w/o list lock"));
2334 		knl->kl_unlock(knl->kl_lockarg);
2335 		kq->kq_state |= KQ_FLUXWAIT;
2336 		msleep(kq, &kq->kq_lock, PSOCK | PDROP, "kqkclr", 0);
2337 		kq = NULL;
2338 		goto again;
2339 	}
2340 
2341 	if (islocked)
2342 		KNL_ASSERT_LOCKED(knl);
2343 	else {
2344 		knl->kl_unlock(knl->kl_lockarg);
2345 		KNL_ASSERT_UNLOCKED(knl);
2346 	}
2347 }
2348 
2349 /*
2350  * Remove all knotes referencing a specified fd must be called with FILEDESC
2351  * lock.  This prevents a race where a new fd comes along and occupies the
2352  * entry and we attach a knote to the fd.
2353  */
2354 void
2355 knote_fdclose(struct thread *td, int fd)
2356 {
2357 	struct filedesc *fdp = td->td_proc->p_fd;
2358 	struct kqueue *kq;
2359 	struct knote *kn;
2360 	int influx;
2361 
2362 	FILEDESC_XLOCK_ASSERT(fdp);
2363 
2364 	/*
2365 	 * We shouldn't have to worry about new kevents appearing on fd
2366 	 * since filedesc is locked.
2367 	 */
2368 	TAILQ_FOREACH(kq, &fdp->fd_kqlist, kq_list) {
2369 		KQ_LOCK(kq);
2370 
2371 again:
2372 		influx = 0;
2373 		while (kq->kq_knlistsize > fd &&
2374 		    (kn = SLIST_FIRST(&kq->kq_knlist[fd])) != NULL) {
2375 			if (kn_in_flux(kn)) {
2376 				/* someone else might be waiting on our knote */
2377 				if (influx)
2378 					wakeup(kq);
2379 				kq->kq_state |= KQ_FLUXWAIT;
2380 				msleep(kq, &kq->kq_lock, PSOCK, "kqflxwt", 0);
2381 				goto again;
2382 			}
2383 			kn_enter_flux(kn);
2384 			KQ_UNLOCK(kq);
2385 			influx = 1;
2386 			knote_drop(kn, td);
2387 			KQ_LOCK(kq);
2388 		}
2389 		KQ_UNLOCK_FLUX(kq);
2390 	}
2391 }
2392 
2393 static int
2394 knote_attach(struct knote *kn, struct kqueue *kq)
2395 {
2396 	struct klist *list;
2397 
2398 	KASSERT(kn_in_flux(kn), ("knote %p not marked influx", kn));
2399 	KQ_OWNED(kq);
2400 
2401 	if (kn->kn_fop->f_isfd) {
2402 		if (kn->kn_id >= kq->kq_knlistsize)
2403 			return (ENOMEM);
2404 		list = &kq->kq_knlist[kn->kn_id];
2405 	} else {
2406 		if (kq->kq_knhash == NULL)
2407 			return (ENOMEM);
2408 		list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)];
2409 	}
2410 	SLIST_INSERT_HEAD(list, kn, kn_link);
2411 	return (0);
2412 }
2413 
2414 static void
2415 knote_drop(struct knote *kn, struct thread *td)
2416 {
2417 
2418 	if ((kn->kn_status & KN_DETACHED) == 0)
2419 		kn->kn_fop->f_detach(kn);
2420 	knote_drop_detached(kn, td);
2421 }
2422 
2423 static void
2424 knote_drop_detached(struct knote *kn, struct thread *td)
2425 {
2426 	struct kqueue *kq;
2427 	struct klist *list;
2428 
2429 	kq = kn->kn_kq;
2430 
2431 	KASSERT((kn->kn_status & KN_DETACHED) != 0,
2432 	    ("knote %p still attached", kn));
2433 	KQ_NOTOWNED(kq);
2434 
2435 	KQ_LOCK(kq);
2436 	KASSERT(kn->kn_influx == 1,
2437 	    ("knote_drop called on %p with influx %d", kn, kn->kn_influx));
2438 
2439 	if (kn->kn_fop->f_isfd)
2440 		list = &kq->kq_knlist[kn->kn_id];
2441 	else
2442 		list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)];
2443 
2444 	if (!SLIST_EMPTY(list))
2445 		SLIST_REMOVE(list, kn, knote, kn_link);
2446 	if (kn->kn_status & KN_QUEUED)
2447 		knote_dequeue(kn);
2448 	KQ_UNLOCK_FLUX(kq);
2449 
2450 	if (kn->kn_fop->f_isfd) {
2451 		fdrop(kn->kn_fp, td);
2452 		kn->kn_fp = NULL;
2453 	}
2454 	kqueue_fo_release(kn->kn_kevent.filter);
2455 	kn->kn_fop = NULL;
2456 	knote_free(kn);
2457 }
2458 
2459 static void
2460 knote_enqueue(struct knote *kn)
2461 {
2462 	struct kqueue *kq = kn->kn_kq;
2463 
2464 	KQ_OWNED(kn->kn_kq);
2465 	KASSERT((kn->kn_status & KN_QUEUED) == 0, ("knote already queued"));
2466 
2467 	TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe);
2468 	kn->kn_status |= KN_QUEUED;
2469 	kq->kq_count++;
2470 	kqueue_wakeup(kq);
2471 }
2472 
2473 static void
2474 knote_dequeue(struct knote *kn)
2475 {
2476 	struct kqueue *kq = kn->kn_kq;
2477 
2478 	KQ_OWNED(kn->kn_kq);
2479 	KASSERT(kn->kn_status & KN_QUEUED, ("knote not queued"));
2480 
2481 	TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe);
2482 	kn->kn_status &= ~KN_QUEUED;
2483 	kq->kq_count--;
2484 }
2485 
2486 static void
2487 knote_init(void)
2488 {
2489 
2490 	knote_zone = uma_zcreate("KNOTE", sizeof(struct knote), NULL, NULL,
2491 	    NULL, NULL, UMA_ALIGN_PTR, 0);
2492 }
2493 SYSINIT(knote, SI_SUB_PSEUDO, SI_ORDER_ANY, knote_init, NULL);
2494 
2495 static struct knote *
2496 knote_alloc(int waitok)
2497 {
2498 
2499 	return (uma_zalloc(knote_zone, (waitok ? M_WAITOK : M_NOWAIT) |
2500 	    M_ZERO));
2501 }
2502 
2503 static void
2504 knote_free(struct knote *kn)
2505 {
2506 
2507 	uma_zfree(knote_zone, kn);
2508 }
2509 
2510 /*
2511  * Register the kev w/ the kq specified by fd.
2512  */
2513 int
2514 kqfd_register(int fd, struct kevent *kev, struct thread *td, int waitok)
2515 {
2516 	struct kqueue *kq;
2517 	struct file *fp;
2518 	cap_rights_t rights;
2519 	int error;
2520 
2521 	error = fget(td, fd, cap_rights_init(&rights, CAP_KQUEUE_CHANGE), &fp);
2522 	if (error != 0)
2523 		return (error);
2524 	if ((error = kqueue_acquire(fp, &kq)) != 0)
2525 		goto noacquire;
2526 
2527 	error = kqueue_register(kq, kev, td, waitok);
2528 	kqueue_release(kq, 0);
2529 
2530 noacquire:
2531 	fdrop(fp, td);
2532 	return (error);
2533 }
2534