xref: /freebsd/sys/kern/kern_event.c (revision eb6d21b4ca6d668cf89afd99eef7baeafa712197)
1 /*-
2  * Copyright (c) 1999,2000,2001 Jonathan Lemon <jlemon@FreeBSD.org>
3  * Copyright 2004 John-Mark Gurney <jmg@FreeBSD.org>
4  * Copyright (c) 2009 Apple, Inc.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31 
32 #include "opt_ktrace.h"
33 
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/kernel.h>
37 #include <sys/lock.h>
38 #include <sys/mutex.h>
39 #include <sys/proc.h>
40 #include <sys/malloc.h>
41 #include <sys/unistd.h>
42 #include <sys/file.h>
43 #include <sys/filedesc.h>
44 #include <sys/filio.h>
45 #include <sys/fcntl.h>
46 #include <sys/kthread.h>
47 #include <sys/selinfo.h>
48 #include <sys/queue.h>
49 #include <sys/event.h>
50 #include <sys/eventvar.h>
51 #include <sys/poll.h>
52 #include <sys/protosw.h>
53 #include <sys/sigio.h>
54 #include <sys/signalvar.h>
55 #include <sys/socket.h>
56 #include <sys/socketvar.h>
57 #include <sys/stat.h>
58 #include <sys/sysctl.h>
59 #include <sys/sysproto.h>
60 #include <sys/syscallsubr.h>
61 #include <sys/taskqueue.h>
62 #include <sys/uio.h>
63 #ifdef KTRACE
64 #include <sys/ktrace.h>
65 #endif
66 
67 #include <vm/uma.h>
68 
69 static MALLOC_DEFINE(M_KQUEUE, "kqueue", "memory for kqueue system");
70 
71 /*
72  * This lock is used if multiple kq locks are required.  This possibly
73  * should be made into a per proc lock.
74  */
75 static struct mtx	kq_global;
76 MTX_SYSINIT(kq_global, &kq_global, "kqueue order", MTX_DEF);
77 #define KQ_GLOBAL_LOCK(lck, haslck)	do {	\
78 	if (!haslck)				\
79 		mtx_lock(lck);			\
80 	haslck = 1;				\
81 } while (0)
82 #define KQ_GLOBAL_UNLOCK(lck, haslck)	do {	\
83 	if (haslck)				\
84 		mtx_unlock(lck);			\
85 	haslck = 0;				\
86 } while (0)
87 
88 TASKQUEUE_DEFINE_THREAD(kqueue);
89 
90 static int	kevent_copyout(void *arg, struct kevent *kevp, int count);
91 static int	kevent_copyin(void *arg, struct kevent *kevp, int count);
92 static int	kqueue_register(struct kqueue *kq, struct kevent *kev,
93 		    struct thread *td, int waitok);
94 static int	kqueue_acquire(struct file *fp, struct kqueue **kqp);
95 static void	kqueue_release(struct kqueue *kq, int locked);
96 static int	kqueue_expand(struct kqueue *kq, struct filterops *fops,
97 		    uintptr_t ident, int waitok);
98 static void	kqueue_task(void *arg, int pending);
99 static int	kqueue_scan(struct kqueue *kq, int maxevents,
100 		    struct kevent_copyops *k_ops,
101 		    const struct timespec *timeout,
102 		    struct kevent *keva, struct thread *td);
103 static void 	kqueue_wakeup(struct kqueue *kq);
104 static struct filterops *kqueue_fo_find(int filt);
105 static void	kqueue_fo_release(int filt);
106 
107 static fo_rdwr_t	kqueue_read;
108 static fo_rdwr_t	kqueue_write;
109 static fo_truncate_t	kqueue_truncate;
110 static fo_ioctl_t	kqueue_ioctl;
111 static fo_poll_t	kqueue_poll;
112 static fo_kqfilter_t	kqueue_kqfilter;
113 static fo_stat_t	kqueue_stat;
114 static fo_close_t	kqueue_close;
115 
116 static struct fileops kqueueops = {
117 	.fo_read = kqueue_read,
118 	.fo_write = kqueue_write,
119 	.fo_truncate = kqueue_truncate,
120 	.fo_ioctl = kqueue_ioctl,
121 	.fo_poll = kqueue_poll,
122 	.fo_kqfilter = kqueue_kqfilter,
123 	.fo_stat = kqueue_stat,
124 	.fo_close = kqueue_close,
125 };
126 
127 static int 	knote_attach(struct knote *kn, struct kqueue *kq);
128 static void 	knote_drop(struct knote *kn, struct thread *td);
129 static void 	knote_enqueue(struct knote *kn);
130 static void 	knote_dequeue(struct knote *kn);
131 static void 	knote_init(void);
132 static struct 	knote *knote_alloc(int waitok);
133 static void 	knote_free(struct knote *kn);
134 
135 static void	filt_kqdetach(struct knote *kn);
136 static int	filt_kqueue(struct knote *kn, long hint);
137 static int	filt_procattach(struct knote *kn);
138 static void	filt_procdetach(struct knote *kn);
139 static int	filt_proc(struct knote *kn, long hint);
140 static int	filt_fileattach(struct knote *kn);
141 static void	filt_timerexpire(void *knx);
142 static int	filt_timerattach(struct knote *kn);
143 static void	filt_timerdetach(struct knote *kn);
144 static int	filt_timer(struct knote *kn, long hint);
145 static int	filt_userattach(struct knote *kn);
146 static void	filt_userdetach(struct knote *kn);
147 static int	filt_user(struct knote *kn, long hint);
148 static void	filt_usertouch(struct knote *kn, struct kevent *kev,
149 		    u_long type);
150 
151 static struct filterops file_filtops = {
152 	.f_isfd = 1,
153 	.f_attach = filt_fileattach,
154 };
155 static struct filterops kqread_filtops = {
156 	.f_isfd = 1,
157 	.f_detach = filt_kqdetach,
158 	.f_event = filt_kqueue,
159 };
160 /* XXX - move to kern_proc.c?  */
161 static struct filterops proc_filtops = {
162 	.f_isfd = 0,
163 	.f_attach = filt_procattach,
164 	.f_detach = filt_procdetach,
165 	.f_event = filt_proc,
166 };
167 static struct filterops timer_filtops = {
168 	.f_isfd = 0,
169 	.f_attach = filt_timerattach,
170 	.f_detach = filt_timerdetach,
171 	.f_event = filt_timer,
172 };
173 static struct filterops user_filtops = {
174 	.f_attach = filt_userattach,
175 	.f_detach = filt_userdetach,
176 	.f_event = filt_user,
177 	.f_touch = filt_usertouch,
178 };
179 
180 static uma_zone_t	knote_zone;
181 static int 		kq_ncallouts = 0;
182 static int 		kq_calloutmax = (4 * 1024);
183 SYSCTL_INT(_kern, OID_AUTO, kq_calloutmax, CTLFLAG_RW,
184     &kq_calloutmax, 0, "Maximum number of callouts allocated for kqueue");
185 
186 /* XXX - ensure not KN_INFLUX?? */
187 #define KNOTE_ACTIVATE(kn, islock) do { 				\
188 	if ((islock))							\
189 		mtx_assert(&(kn)->kn_kq->kq_lock, MA_OWNED);		\
190 	else								\
191 		KQ_LOCK((kn)->kn_kq);					\
192 	(kn)->kn_status |= KN_ACTIVE;					\
193 	if (((kn)->kn_status & (KN_QUEUED | KN_DISABLED)) == 0)		\
194 		knote_enqueue((kn));					\
195 	if (!(islock))							\
196 		KQ_UNLOCK((kn)->kn_kq);					\
197 } while(0)
198 #define KQ_LOCK(kq) do {						\
199 	mtx_lock(&(kq)->kq_lock);					\
200 } while (0)
201 #define KQ_FLUX_WAKEUP(kq) do {						\
202 	if (((kq)->kq_state & KQ_FLUXWAIT) == KQ_FLUXWAIT) {		\
203 		(kq)->kq_state &= ~KQ_FLUXWAIT;				\
204 		wakeup((kq));						\
205 	}								\
206 } while (0)
207 #define KQ_UNLOCK_FLUX(kq) do {						\
208 	KQ_FLUX_WAKEUP(kq);						\
209 	mtx_unlock(&(kq)->kq_lock);					\
210 } while (0)
211 #define KQ_UNLOCK(kq) do {						\
212 	mtx_unlock(&(kq)->kq_lock);					\
213 } while (0)
214 #define KQ_OWNED(kq) do {						\
215 	mtx_assert(&(kq)->kq_lock, MA_OWNED);				\
216 } while (0)
217 #define KQ_NOTOWNED(kq) do {						\
218 	mtx_assert(&(kq)->kq_lock, MA_NOTOWNED);			\
219 } while (0)
220 #define KN_LIST_LOCK(kn) do {						\
221 	if (kn->kn_knlist != NULL)					\
222 		kn->kn_knlist->kl_lock(kn->kn_knlist->kl_lockarg);	\
223 } while (0)
224 #define KN_LIST_UNLOCK(kn) do {						\
225 	if (kn->kn_knlist != NULL) 					\
226 		kn->kn_knlist->kl_unlock(kn->kn_knlist->kl_lockarg);	\
227 } while (0)
228 #define	KNL_ASSERT_LOCK(knl, islocked) do {				\
229 	if (islocked)							\
230 		KNL_ASSERT_LOCKED(knl);				\
231 	else								\
232 		KNL_ASSERT_UNLOCKED(knl);				\
233 } while (0)
234 #ifdef INVARIANTS
235 #define	KNL_ASSERT_LOCKED(knl) do {					\
236 	knl->kl_assert_locked((knl)->kl_lockarg);			\
237 } while (0)
238 #define	KNL_ASSERT_UNLOCKED(knl) do {					\
239 	knl->kl_assert_unlocked((knl)->kl_lockarg);			\
240 } while (0)
241 #else /* !INVARIANTS */
242 #define	KNL_ASSERT_LOCKED(knl) do {} while(0)
243 #define	KNL_ASSERT_UNLOCKED(knl) do {} while (0)
244 #endif /* INVARIANTS */
245 
246 #define	KN_HASHSIZE		64		/* XXX should be tunable */
247 #define KN_HASH(val, mask)	(((val) ^ (val >> 8)) & (mask))
248 
249 static int
250 filt_nullattach(struct knote *kn)
251 {
252 
253 	return (ENXIO);
254 };
255 
256 struct filterops null_filtops = {
257 	.f_isfd = 0,
258 	.f_attach = filt_nullattach,
259 };
260 
261 /* XXX - make SYSINIT to add these, and move into respective modules. */
262 extern struct filterops sig_filtops;
263 extern struct filterops fs_filtops;
264 
265 /*
266  * Table for for all system-defined filters.
267  */
268 static struct mtx	filterops_lock;
269 MTX_SYSINIT(kqueue_filterops, &filterops_lock, "protect sysfilt_ops",
270 	MTX_DEF);
271 static struct {
272 	struct filterops *for_fop;
273 	int for_refcnt;
274 } sysfilt_ops[EVFILT_SYSCOUNT] = {
275 	{ &file_filtops },			/* EVFILT_READ */
276 	{ &file_filtops },			/* EVFILT_WRITE */
277 	{ &null_filtops },			/* EVFILT_AIO */
278 	{ &file_filtops },			/* EVFILT_VNODE */
279 	{ &proc_filtops },			/* EVFILT_PROC */
280 	{ &sig_filtops },			/* EVFILT_SIGNAL */
281 	{ &timer_filtops },			/* EVFILT_TIMER */
282 	{ &file_filtops },			/* EVFILT_NETDEV */
283 	{ &fs_filtops },			/* EVFILT_FS */
284 	{ &null_filtops },			/* EVFILT_LIO */
285 	{ &user_filtops },			/* EVFILT_USER */
286 };
287 
288 /*
289  * Simple redirection for all cdevsw style objects to call their fo_kqfilter
290  * method.
291  */
292 static int
293 filt_fileattach(struct knote *kn)
294 {
295 
296 	return (fo_kqfilter(kn->kn_fp, kn));
297 }
298 
299 /*ARGSUSED*/
300 static int
301 kqueue_kqfilter(struct file *fp, struct knote *kn)
302 {
303 	struct kqueue *kq = kn->kn_fp->f_data;
304 
305 	if (kn->kn_filter != EVFILT_READ)
306 		return (EINVAL);
307 
308 	kn->kn_status |= KN_KQUEUE;
309 	kn->kn_fop = &kqread_filtops;
310 	knlist_add(&kq->kq_sel.si_note, kn, 0);
311 
312 	return (0);
313 }
314 
315 static void
316 filt_kqdetach(struct knote *kn)
317 {
318 	struct kqueue *kq = kn->kn_fp->f_data;
319 
320 	knlist_remove(&kq->kq_sel.si_note, kn, 0);
321 }
322 
323 /*ARGSUSED*/
324 static int
325 filt_kqueue(struct knote *kn, long hint)
326 {
327 	struct kqueue *kq = kn->kn_fp->f_data;
328 
329 	kn->kn_data = kq->kq_count;
330 	return (kn->kn_data > 0);
331 }
332 
333 /* XXX - move to kern_proc.c?  */
334 static int
335 filt_procattach(struct knote *kn)
336 {
337 	struct proc *p;
338 	int immediate;
339 	int error;
340 
341 	immediate = 0;
342 	p = pfind(kn->kn_id);
343 	if (p == NULL && (kn->kn_sfflags & NOTE_EXIT)) {
344 		p = zpfind(kn->kn_id);
345 		immediate = 1;
346 	} else if (p != NULL && (p->p_flag & P_WEXIT)) {
347 		immediate = 1;
348 	}
349 
350 	if (p == NULL)
351 		return (ESRCH);
352 	if ((error = p_cansee(curthread, p)))
353 		return (error);
354 
355 	kn->kn_ptr.p_proc = p;
356 	kn->kn_flags |= EV_CLEAR;		/* automatically set */
357 
358 	/*
359 	 * internal flag indicating registration done by kernel
360 	 */
361 	if (kn->kn_flags & EV_FLAG1) {
362 		kn->kn_data = kn->kn_sdata;		/* ppid */
363 		kn->kn_fflags = NOTE_CHILD;
364 		kn->kn_flags &= ~EV_FLAG1;
365 	}
366 
367 	if (immediate == 0)
368 		knlist_add(&p->p_klist, kn, 1);
369 
370 	/*
371 	 * Immediately activate any exit notes if the target process is a
372 	 * zombie.  This is necessary to handle the case where the target
373 	 * process, e.g. a child, dies before the kevent is registered.
374 	 */
375 	if (immediate && filt_proc(kn, NOTE_EXIT))
376 		KNOTE_ACTIVATE(kn, 0);
377 
378 	PROC_UNLOCK(p);
379 
380 	return (0);
381 }
382 
383 /*
384  * The knote may be attached to a different process, which may exit,
385  * leaving nothing for the knote to be attached to.  So when the process
386  * exits, the knote is marked as DETACHED and also flagged as ONESHOT so
387  * it will be deleted when read out.  However, as part of the knote deletion,
388  * this routine is called, so a check is needed to avoid actually performing
389  * a detach, because the original process does not exist any more.
390  */
391 /* XXX - move to kern_proc.c?  */
392 static void
393 filt_procdetach(struct knote *kn)
394 {
395 	struct proc *p;
396 
397 	p = kn->kn_ptr.p_proc;
398 	knlist_remove(&p->p_klist, kn, 0);
399 	kn->kn_ptr.p_proc = NULL;
400 }
401 
402 /* XXX - move to kern_proc.c?  */
403 static int
404 filt_proc(struct knote *kn, long hint)
405 {
406 	struct proc *p = kn->kn_ptr.p_proc;
407 	u_int event;
408 
409 	/*
410 	 * mask off extra data
411 	 */
412 	event = (u_int)hint & NOTE_PCTRLMASK;
413 
414 	/*
415 	 * if the user is interested in this event, record it.
416 	 */
417 	if (kn->kn_sfflags & event)
418 		kn->kn_fflags |= event;
419 
420 	/*
421 	 * process is gone, so flag the event as finished.
422 	 */
423 	if (event == NOTE_EXIT) {
424 		if (!(kn->kn_status & KN_DETACHED))
425 			knlist_remove_inevent(&p->p_klist, kn);
426 		kn->kn_flags |= (EV_EOF | EV_ONESHOT);
427 		kn->kn_data = p->p_xstat;
428 		kn->kn_ptr.p_proc = NULL;
429 		return (1);
430 	}
431 
432 	return (kn->kn_fflags != 0);
433 }
434 
435 /*
436  * Called when the process forked. It mostly does the same as the
437  * knote(), activating all knotes registered to be activated when the
438  * process forked. Additionally, for each knote attached to the
439  * parent, check whether user wants to track the new process. If so
440  * attach a new knote to it, and immediately report an event with the
441  * child's pid.
442  */
443 void
444 knote_fork(struct knlist *list, int pid)
445 {
446 	struct kqueue *kq;
447 	struct knote *kn;
448 	struct kevent kev;
449 	int error;
450 
451 	if (list == NULL)
452 		return;
453 	list->kl_lock(list->kl_lockarg);
454 
455 	SLIST_FOREACH(kn, &list->kl_list, kn_selnext) {
456 		if ((kn->kn_status & KN_INFLUX) == KN_INFLUX)
457 			continue;
458 		kq = kn->kn_kq;
459 		KQ_LOCK(kq);
460 		if ((kn->kn_status & KN_INFLUX) == KN_INFLUX) {
461 			KQ_UNLOCK(kq);
462 			continue;
463 		}
464 
465 		/*
466 		 * The same as knote(), activate the event.
467 		 */
468 		if ((kn->kn_sfflags & NOTE_TRACK) == 0) {
469 			kn->kn_status |= KN_HASKQLOCK;
470 			if (kn->kn_fop->f_event(kn, NOTE_FORK | pid))
471 				KNOTE_ACTIVATE(kn, 1);
472 			kn->kn_status &= ~KN_HASKQLOCK;
473 			KQ_UNLOCK(kq);
474 			continue;
475 		}
476 
477 		/*
478 		 * The NOTE_TRACK case. In addition to the activation
479 		 * of the event, we need to register new event to
480 		 * track the child. Drop the locks in preparation for
481 		 * the call to kqueue_register().
482 		 */
483 		kn->kn_status |= KN_INFLUX;
484 		KQ_UNLOCK(kq);
485 		list->kl_unlock(list->kl_lockarg);
486 
487 		/*
488 		 * Activate existing knote and register a knote with
489 		 * new process.
490 		 */
491 		kev.ident = pid;
492 		kev.filter = kn->kn_filter;
493 		kev.flags = kn->kn_flags | EV_ADD | EV_ENABLE | EV_FLAG1;
494 		kev.fflags = kn->kn_sfflags;
495 		kev.data = kn->kn_id;		/* parent */
496 		kev.udata = kn->kn_kevent.udata;/* preserve udata */
497 		error = kqueue_register(kq, &kev, NULL, 0);
498 		if (kn->kn_fop->f_event(kn, NOTE_FORK | pid))
499 			KNOTE_ACTIVATE(kn, 0);
500 		if (error)
501 			kn->kn_fflags |= NOTE_TRACKERR;
502 		KQ_LOCK(kq);
503 		kn->kn_status &= ~KN_INFLUX;
504 		KQ_UNLOCK_FLUX(kq);
505 		list->kl_lock(list->kl_lockarg);
506 	}
507 	list->kl_unlock(list->kl_lockarg);
508 }
509 
510 static int
511 timertoticks(intptr_t data)
512 {
513 	struct timeval tv;
514 	int tticks;
515 
516 	tv.tv_sec = data / 1000;
517 	tv.tv_usec = (data % 1000) * 1000;
518 	tticks = tvtohz(&tv);
519 
520 	return tticks;
521 }
522 
523 /* XXX - move to kern_timeout.c? */
524 static void
525 filt_timerexpire(void *knx)
526 {
527 	struct knote *kn = knx;
528 	struct callout *calloutp;
529 
530 	kn->kn_data++;
531 	KNOTE_ACTIVATE(kn, 0);	/* XXX - handle locking */
532 
533 	if ((kn->kn_flags & EV_ONESHOT) != EV_ONESHOT) {
534 		calloutp = (struct callout *)kn->kn_hook;
535 		callout_reset_curcpu(calloutp, timertoticks(kn->kn_sdata),
536 		    filt_timerexpire, kn);
537 	}
538 }
539 
540 /*
541  * data contains amount of time to sleep, in milliseconds
542  */
543 /* XXX - move to kern_timeout.c? */
544 static int
545 filt_timerattach(struct knote *kn)
546 {
547 	struct callout *calloutp;
548 
549 	atomic_add_int(&kq_ncallouts, 1);
550 
551 	if (kq_ncallouts >= kq_calloutmax) {
552 		atomic_add_int(&kq_ncallouts, -1);
553 		return (ENOMEM);
554 	}
555 
556 	kn->kn_flags |= EV_CLEAR;		/* automatically set */
557 	kn->kn_status &= ~KN_DETACHED;		/* knlist_add usually sets it */
558 	calloutp = malloc(sizeof(*calloutp), M_KQUEUE, M_WAITOK);
559 	callout_init(calloutp, CALLOUT_MPSAFE);
560 	kn->kn_hook = calloutp;
561 	callout_reset_curcpu(calloutp, timertoticks(kn->kn_sdata),
562 	    filt_timerexpire, kn);
563 
564 	return (0);
565 }
566 
567 /* XXX - move to kern_timeout.c? */
568 static void
569 filt_timerdetach(struct knote *kn)
570 {
571 	struct callout *calloutp;
572 
573 	calloutp = (struct callout *)kn->kn_hook;
574 	callout_drain(calloutp);
575 	free(calloutp, M_KQUEUE);
576 	atomic_add_int(&kq_ncallouts, -1);
577 	kn->kn_status |= KN_DETACHED;	/* knlist_remove usually clears it */
578 }
579 
580 /* XXX - move to kern_timeout.c? */
581 static int
582 filt_timer(struct knote *kn, long hint)
583 {
584 
585 	return (kn->kn_data != 0);
586 }
587 
588 static int
589 filt_userattach(struct knote *kn)
590 {
591 
592 	/*
593 	 * EVFILT_USER knotes are not attached to anything in the kernel.
594 	 */
595 	kn->kn_hook = NULL;
596 	if (kn->kn_fflags & NOTE_TRIGGER)
597 		kn->kn_hookid = 1;
598 	else
599 		kn->kn_hookid = 0;
600 	return (0);
601 }
602 
603 static void
604 filt_userdetach(__unused struct knote *kn)
605 {
606 
607 	/*
608 	 * EVFILT_USER knotes are not attached to anything in the kernel.
609 	 */
610 }
611 
612 static int
613 filt_user(struct knote *kn, __unused long hint)
614 {
615 
616 	return (kn->kn_hookid);
617 }
618 
619 static void
620 filt_usertouch(struct knote *kn, struct kevent *kev, u_long type)
621 {
622 	u_int ffctrl;
623 
624 	switch (type) {
625 	case EVENT_REGISTER:
626 		if (kev->fflags & NOTE_TRIGGER)
627 			kn->kn_hookid = 1;
628 
629 		ffctrl = kev->fflags & NOTE_FFCTRLMASK;
630 		kev->fflags &= NOTE_FFLAGSMASK;
631 		switch (ffctrl) {
632 		case NOTE_FFNOP:
633 			break;
634 
635 		case NOTE_FFAND:
636 			kn->kn_sfflags &= kev->fflags;
637 			break;
638 
639 		case NOTE_FFOR:
640 			kn->kn_sfflags |= kev->fflags;
641 			break;
642 
643 		case NOTE_FFCOPY:
644 			kn->kn_sfflags = kev->fflags;
645 			break;
646 
647 		default:
648 			/* XXX Return error? */
649 			break;
650 		}
651 		kn->kn_sdata = kev->data;
652 		if (kev->flags & EV_CLEAR) {
653 			kn->kn_hookid = 0;
654 			kn->kn_data = 0;
655 			kn->kn_fflags = 0;
656 		}
657 		break;
658 
659         case EVENT_PROCESS:
660 		*kev = kn->kn_kevent;
661 		kev->fflags = kn->kn_sfflags;
662 		kev->data = kn->kn_sdata;
663 		if (kn->kn_flags & EV_CLEAR) {
664 			kn->kn_hookid = 0;
665 			kn->kn_data = 0;
666 			kn->kn_fflags = 0;
667 		}
668 		break;
669 
670 	default:
671 		panic("filt_usertouch() - invalid type (%ld)", type);
672 		break;
673 	}
674 }
675 
676 int
677 kqueue(struct thread *td, struct kqueue_args *uap)
678 {
679 	struct filedesc *fdp;
680 	struct kqueue *kq;
681 	struct file *fp;
682 	int fd, error;
683 
684 	fdp = td->td_proc->p_fd;
685 	error = falloc(td, &fp, &fd);
686 	if (error)
687 		goto done2;
688 
689 	/* An extra reference on `nfp' has been held for us by falloc(). */
690 	kq = malloc(sizeof *kq, M_KQUEUE, M_WAITOK | M_ZERO);
691 	mtx_init(&kq->kq_lock, "kqueue", NULL, MTX_DEF|MTX_DUPOK);
692 	TAILQ_INIT(&kq->kq_head);
693 	kq->kq_fdp = fdp;
694 	knlist_init_mtx(&kq->kq_sel.si_note, &kq->kq_lock);
695 	TASK_INIT(&kq->kq_task, 0, kqueue_task, kq);
696 
697 	FILEDESC_XLOCK(fdp);
698 	SLIST_INSERT_HEAD(&fdp->fd_kqlist, kq, kq_list);
699 	FILEDESC_XUNLOCK(fdp);
700 
701 	finit(fp, FREAD | FWRITE, DTYPE_KQUEUE, kq, &kqueueops);
702 	fdrop(fp, td);
703 
704 	td->td_retval[0] = fd;
705 done2:
706 	return (error);
707 }
708 
709 #ifndef _SYS_SYSPROTO_H_
710 struct kevent_args {
711 	int	fd;
712 	const struct kevent *changelist;
713 	int	nchanges;
714 	struct	kevent *eventlist;
715 	int	nevents;
716 	const struct timespec *timeout;
717 };
718 #endif
719 int
720 kevent(struct thread *td, struct kevent_args *uap)
721 {
722 	struct timespec ts, *tsp;
723 	struct kevent_copyops k_ops = { uap,
724 					kevent_copyout,
725 					kevent_copyin};
726 	int error;
727 #ifdef KTRACE
728 	struct uio ktruio;
729 	struct iovec ktriov;
730 	struct uio *ktruioin = NULL;
731 	struct uio *ktruioout = NULL;
732 #endif
733 
734 	if (uap->timeout != NULL) {
735 		error = copyin(uap->timeout, &ts, sizeof(ts));
736 		if (error)
737 			return (error);
738 		tsp = &ts;
739 	} else
740 		tsp = NULL;
741 
742 #ifdef KTRACE
743 	if (KTRPOINT(td, KTR_GENIO)) {
744 		ktriov.iov_base = uap->changelist;
745 		ktriov.iov_len = uap->nchanges * sizeof(struct kevent);
746 		ktruio = (struct uio){ .uio_iov = &ktriov, .uio_iovcnt = 1,
747 		    .uio_segflg = UIO_USERSPACE, .uio_rw = UIO_READ,
748 		    .uio_td = td };
749 		ktruioin = cloneuio(&ktruio);
750 		ktriov.iov_base = uap->eventlist;
751 		ktriov.iov_len = uap->nevents * sizeof(struct kevent);
752 		ktruioout = cloneuio(&ktruio);
753 	}
754 #endif
755 
756 	error = kern_kevent(td, uap->fd, uap->nchanges, uap->nevents,
757 	    &k_ops, tsp);
758 
759 #ifdef KTRACE
760 	if (ktruioin != NULL) {
761 		ktruioin->uio_resid = uap->nchanges * sizeof(struct kevent);
762 		ktrgenio(uap->fd, UIO_WRITE, ktruioin, 0);
763 		ktruioout->uio_resid = td->td_retval[0] * sizeof(struct kevent);
764 		ktrgenio(uap->fd, UIO_READ, ktruioout, error);
765 	}
766 #endif
767 
768 	return (error);
769 }
770 
771 /*
772  * Copy 'count' items into the destination list pointed to by uap->eventlist.
773  */
774 static int
775 kevent_copyout(void *arg, struct kevent *kevp, int count)
776 {
777 	struct kevent_args *uap;
778 	int error;
779 
780 	KASSERT(count <= KQ_NEVENTS, ("count (%d) > KQ_NEVENTS", count));
781 	uap = (struct kevent_args *)arg;
782 
783 	error = copyout(kevp, uap->eventlist, count * sizeof *kevp);
784 	if (error == 0)
785 		uap->eventlist += count;
786 	return (error);
787 }
788 
789 /*
790  * Copy 'count' items from the list pointed to by uap->changelist.
791  */
792 static int
793 kevent_copyin(void *arg, struct kevent *kevp, int count)
794 {
795 	struct kevent_args *uap;
796 	int error;
797 
798 	KASSERT(count <= KQ_NEVENTS, ("count (%d) > KQ_NEVENTS", count));
799 	uap = (struct kevent_args *)arg;
800 
801 	error = copyin(uap->changelist, kevp, count * sizeof *kevp);
802 	if (error == 0)
803 		uap->changelist += count;
804 	return (error);
805 }
806 
807 int
808 kern_kevent(struct thread *td, int fd, int nchanges, int nevents,
809     struct kevent_copyops *k_ops, const struct timespec *timeout)
810 {
811 	struct kevent keva[KQ_NEVENTS];
812 	struct kevent *kevp, *changes;
813 	struct kqueue *kq;
814 	struct file *fp;
815 	int i, n, nerrors, error;
816 
817 	if ((error = fget(td, fd, &fp)) != 0)
818 		return (error);
819 	if ((error = kqueue_acquire(fp, &kq)) != 0)
820 		goto done_norel;
821 
822 	nerrors = 0;
823 
824 	while (nchanges > 0) {
825 		n = nchanges > KQ_NEVENTS ? KQ_NEVENTS : nchanges;
826 		error = k_ops->k_copyin(k_ops->arg, keva, n);
827 		if (error)
828 			goto done;
829 		changes = keva;
830 		for (i = 0; i < n; i++) {
831 			kevp = &changes[i];
832 			if (!kevp->filter)
833 				continue;
834 			kevp->flags &= ~EV_SYSFLAGS;
835 			error = kqueue_register(kq, kevp, td, 1);
836 			if (error || (kevp->flags & EV_RECEIPT)) {
837 				if (nevents != 0) {
838 					kevp->flags = EV_ERROR;
839 					kevp->data = error;
840 					(void) k_ops->k_copyout(k_ops->arg,
841 					    kevp, 1);
842 					nevents--;
843 					nerrors++;
844 				} else {
845 					goto done;
846 				}
847 			}
848 		}
849 		nchanges -= n;
850 	}
851 	if (nerrors) {
852 		td->td_retval[0] = nerrors;
853 		error = 0;
854 		goto done;
855 	}
856 
857 	error = kqueue_scan(kq, nevents, k_ops, timeout, keva, td);
858 done:
859 	kqueue_release(kq, 0);
860 done_norel:
861 	fdrop(fp, td);
862 	return (error);
863 }
864 
865 int
866 kqueue_add_filteropts(int filt, struct filterops *filtops)
867 {
868 	int error;
869 
870 	if (filt > 0 || filt + EVFILT_SYSCOUNT < 0) {
871 		printf(
872 "trying to add a filterop that is out of range: %d is beyond %d\n",
873 		    ~filt, EVFILT_SYSCOUNT);
874 		return EINVAL;
875 	}
876 	mtx_lock(&filterops_lock);
877 	if (sysfilt_ops[~filt].for_fop != &null_filtops &&
878 	    sysfilt_ops[~filt].for_fop != NULL)
879 		error = EEXIST;
880 	else {
881 		sysfilt_ops[~filt].for_fop = filtops;
882 		sysfilt_ops[~filt].for_refcnt = 0;
883 	}
884 	mtx_unlock(&filterops_lock);
885 
886 	return (0);
887 }
888 
889 int
890 kqueue_del_filteropts(int filt)
891 {
892 	int error;
893 
894 	error = 0;
895 	if (filt > 0 || filt + EVFILT_SYSCOUNT < 0)
896 		return EINVAL;
897 
898 	mtx_lock(&filterops_lock);
899 	if (sysfilt_ops[~filt].for_fop == &null_filtops ||
900 	    sysfilt_ops[~filt].for_fop == NULL)
901 		error = EINVAL;
902 	else if (sysfilt_ops[~filt].for_refcnt != 0)
903 		error = EBUSY;
904 	else {
905 		sysfilt_ops[~filt].for_fop = &null_filtops;
906 		sysfilt_ops[~filt].for_refcnt = 0;
907 	}
908 	mtx_unlock(&filterops_lock);
909 
910 	return error;
911 }
912 
913 static struct filterops *
914 kqueue_fo_find(int filt)
915 {
916 
917 	if (filt > 0 || filt + EVFILT_SYSCOUNT < 0)
918 		return NULL;
919 
920 	mtx_lock(&filterops_lock);
921 	sysfilt_ops[~filt].for_refcnt++;
922 	if (sysfilt_ops[~filt].for_fop == NULL)
923 		sysfilt_ops[~filt].for_fop = &null_filtops;
924 	mtx_unlock(&filterops_lock);
925 
926 	return sysfilt_ops[~filt].for_fop;
927 }
928 
929 static void
930 kqueue_fo_release(int filt)
931 {
932 
933 	if (filt > 0 || filt + EVFILT_SYSCOUNT < 0)
934 		return;
935 
936 	mtx_lock(&filterops_lock);
937 	KASSERT(sysfilt_ops[~filt].for_refcnt > 0,
938 	    ("filter object refcount not valid on release"));
939 	sysfilt_ops[~filt].for_refcnt--;
940 	mtx_unlock(&filterops_lock);
941 }
942 
943 /*
944  * A ref to kq (obtained via kqueue_acquire) must be held.  waitok will
945  * influence if memory allocation should wait.  Make sure it is 0 if you
946  * hold any mutexes.
947  */
948 static int
949 kqueue_register(struct kqueue *kq, struct kevent *kev, struct thread *td, int waitok)
950 {
951 	struct filterops *fops;
952 	struct file *fp;
953 	struct knote *kn, *tkn;
954 	int error, filt, event;
955 	int haskqglobal;
956 
957 	fp = NULL;
958 	kn = NULL;
959 	error = 0;
960 	haskqglobal = 0;
961 
962 	filt = kev->filter;
963 	fops = kqueue_fo_find(filt);
964 	if (fops == NULL)
965 		return EINVAL;
966 
967 	tkn = knote_alloc(waitok);		/* prevent waiting with locks */
968 
969 findkn:
970 	if (fops->f_isfd) {
971 		KASSERT(td != NULL, ("td is NULL"));
972 		error = fget(td, kev->ident, &fp);
973 		if (error)
974 			goto done;
975 
976 		if ((kev->flags & EV_ADD) == EV_ADD && kqueue_expand(kq, fops,
977 		    kev->ident, 0) != 0) {
978 			/* try again */
979 			fdrop(fp, td);
980 			fp = NULL;
981 			error = kqueue_expand(kq, fops, kev->ident, waitok);
982 			if (error)
983 				goto done;
984 			goto findkn;
985 		}
986 
987 		if (fp->f_type == DTYPE_KQUEUE) {
988 			/*
989 			 * if we add some inteligence about what we are doing,
990 			 * we should be able to support events on ourselves.
991 			 * We need to know when we are doing this to prevent
992 			 * getting both the knlist lock and the kq lock since
993 			 * they are the same thing.
994 			 */
995 			if (fp->f_data == kq) {
996 				error = EINVAL;
997 				goto done;
998 			}
999 
1000 			KQ_GLOBAL_LOCK(&kq_global, haskqglobal);
1001 		}
1002 
1003 		KQ_LOCK(kq);
1004 		if (kev->ident < kq->kq_knlistsize) {
1005 			SLIST_FOREACH(kn, &kq->kq_knlist[kev->ident], kn_link)
1006 				if (kev->filter == kn->kn_filter)
1007 					break;
1008 		}
1009 	} else {
1010 		if ((kev->flags & EV_ADD) == EV_ADD)
1011 			kqueue_expand(kq, fops, kev->ident, waitok);
1012 
1013 		KQ_LOCK(kq);
1014 		if (kq->kq_knhashmask != 0) {
1015 			struct klist *list;
1016 
1017 			list = &kq->kq_knhash[
1018 			    KN_HASH((u_long)kev->ident, kq->kq_knhashmask)];
1019 			SLIST_FOREACH(kn, list, kn_link)
1020 				if (kev->ident == kn->kn_id &&
1021 				    kev->filter == kn->kn_filter)
1022 					break;
1023 		}
1024 	}
1025 
1026 	/* knote is in the process of changing, wait for it to stablize. */
1027 	if (kn != NULL && (kn->kn_status & KN_INFLUX) == KN_INFLUX) {
1028 		KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal);
1029 		kq->kq_state |= KQ_FLUXWAIT;
1030 		msleep(kq, &kq->kq_lock, PSOCK | PDROP, "kqflxwt", 0);
1031 		if (fp != NULL) {
1032 			fdrop(fp, td);
1033 			fp = NULL;
1034 		}
1035 		goto findkn;
1036 	}
1037 
1038 	/*
1039 	 * kn now contains the matching knote, or NULL if no match
1040 	 */
1041 	if (kn == NULL) {
1042 		if (kev->flags & EV_ADD) {
1043 			kn = tkn;
1044 			tkn = NULL;
1045 			if (kn == NULL) {
1046 				KQ_UNLOCK(kq);
1047 				error = ENOMEM;
1048 				goto done;
1049 			}
1050 			kn->kn_fp = fp;
1051 			kn->kn_kq = kq;
1052 			kn->kn_fop = fops;
1053 			/*
1054 			 * apply reference counts to knote structure, and
1055 			 * do not release it at the end of this routine.
1056 			 */
1057 			fops = NULL;
1058 			fp = NULL;
1059 
1060 			kn->kn_sfflags = kev->fflags;
1061 			kn->kn_sdata = kev->data;
1062 			kev->fflags = 0;
1063 			kev->data = 0;
1064 			kn->kn_kevent = *kev;
1065 			kn->kn_kevent.flags &= ~(EV_ADD | EV_DELETE |
1066 			    EV_ENABLE | EV_DISABLE);
1067 			kn->kn_status = KN_INFLUX|KN_DETACHED;
1068 
1069 			error = knote_attach(kn, kq);
1070 			KQ_UNLOCK(kq);
1071 			if (error != 0) {
1072 				tkn = kn;
1073 				goto done;
1074 			}
1075 
1076 			if ((error = kn->kn_fop->f_attach(kn)) != 0) {
1077 				knote_drop(kn, td);
1078 				goto done;
1079 			}
1080 			KN_LIST_LOCK(kn);
1081 			goto done_ev_add;
1082 		} else {
1083 			/* No matching knote and the EV_ADD flag is not set. */
1084 			KQ_UNLOCK(kq);
1085 			error = ENOENT;
1086 			goto done;
1087 		}
1088 	}
1089 
1090 	if (kev->flags & EV_DELETE) {
1091 		kn->kn_status |= KN_INFLUX;
1092 		KQ_UNLOCK(kq);
1093 		if (!(kn->kn_status & KN_DETACHED))
1094 			kn->kn_fop->f_detach(kn);
1095 		knote_drop(kn, td);
1096 		goto done;
1097 	}
1098 
1099 	/*
1100 	 * The user may change some filter values after the initial EV_ADD,
1101 	 * but doing so will not reset any filter which has already been
1102 	 * triggered.
1103 	 */
1104 	kn->kn_status |= KN_INFLUX;
1105 	KQ_UNLOCK(kq);
1106 	KN_LIST_LOCK(kn);
1107 	kn->kn_kevent.udata = kev->udata;
1108 	if (!fops->f_isfd && fops->f_touch != NULL) {
1109 		fops->f_touch(kn, kev, EVENT_REGISTER);
1110 	} else {
1111 		kn->kn_sfflags = kev->fflags;
1112 		kn->kn_sdata = kev->data;
1113 	}
1114 
1115 	/*
1116 	 * We can get here with kn->kn_knlist == NULL.  This can happen when
1117 	 * the initial attach event decides that the event is "completed"
1118 	 * already.  i.e. filt_procattach is called on a zombie process.  It
1119 	 * will call filt_proc which will remove it from the list, and NULL
1120 	 * kn_knlist.
1121 	 */
1122 done_ev_add:
1123 	event = kn->kn_fop->f_event(kn, 0);
1124 	KQ_LOCK(kq);
1125 	if (event)
1126 		KNOTE_ACTIVATE(kn, 1);
1127 	kn->kn_status &= ~KN_INFLUX;
1128 	KN_LIST_UNLOCK(kn);
1129 
1130 	if ((kev->flags & EV_DISABLE) &&
1131 	    ((kn->kn_status & KN_DISABLED) == 0)) {
1132 		kn->kn_status |= KN_DISABLED;
1133 	}
1134 
1135 	if ((kev->flags & EV_ENABLE) && (kn->kn_status & KN_DISABLED)) {
1136 		kn->kn_status &= ~KN_DISABLED;
1137 		if ((kn->kn_status & KN_ACTIVE) &&
1138 		    ((kn->kn_status & KN_QUEUED) == 0))
1139 			knote_enqueue(kn);
1140 	}
1141 	KQ_UNLOCK_FLUX(kq);
1142 
1143 done:
1144 	KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal);
1145 	if (fp != NULL)
1146 		fdrop(fp, td);
1147 	if (tkn != NULL)
1148 		knote_free(tkn);
1149 	if (fops != NULL)
1150 		kqueue_fo_release(filt);
1151 	return (error);
1152 }
1153 
1154 static int
1155 kqueue_acquire(struct file *fp, struct kqueue **kqp)
1156 {
1157 	int error;
1158 	struct kqueue *kq;
1159 
1160 	error = 0;
1161 
1162 	kq = fp->f_data;
1163 	if (fp->f_type != DTYPE_KQUEUE || kq == NULL)
1164 		return (EBADF);
1165 	*kqp = kq;
1166 	KQ_LOCK(kq);
1167 	if ((kq->kq_state & KQ_CLOSING) == KQ_CLOSING) {
1168 		KQ_UNLOCK(kq);
1169 		return (EBADF);
1170 	}
1171 	kq->kq_refcnt++;
1172 	KQ_UNLOCK(kq);
1173 
1174 	return error;
1175 }
1176 
1177 static void
1178 kqueue_release(struct kqueue *kq, int locked)
1179 {
1180 	if (locked)
1181 		KQ_OWNED(kq);
1182 	else
1183 		KQ_LOCK(kq);
1184 	kq->kq_refcnt--;
1185 	if (kq->kq_refcnt == 1)
1186 		wakeup(&kq->kq_refcnt);
1187 	if (!locked)
1188 		KQ_UNLOCK(kq);
1189 }
1190 
1191 static void
1192 kqueue_schedtask(struct kqueue *kq)
1193 {
1194 
1195 	KQ_OWNED(kq);
1196 	KASSERT(((kq->kq_state & KQ_TASKDRAIN) != KQ_TASKDRAIN),
1197 	    ("scheduling kqueue task while draining"));
1198 
1199 	if ((kq->kq_state & KQ_TASKSCHED) != KQ_TASKSCHED) {
1200 		taskqueue_enqueue(taskqueue_kqueue, &kq->kq_task);
1201 		kq->kq_state |= KQ_TASKSCHED;
1202 	}
1203 }
1204 
1205 /*
1206  * Expand the kq to make sure we have storage for fops/ident pair.
1207  *
1208  * Return 0 on success (or no work necessary), return errno on failure.
1209  *
1210  * Not calling hashinit w/ waitok (proper malloc flag) should be safe.
1211  * If kqueue_register is called from a non-fd context, there usually/should
1212  * be no locks held.
1213  */
1214 static int
1215 kqueue_expand(struct kqueue *kq, struct filterops *fops, uintptr_t ident,
1216 	int waitok)
1217 {
1218 	struct klist *list, *tmp_knhash;
1219 	u_long tmp_knhashmask;
1220 	int size;
1221 	int fd;
1222 	int mflag = waitok ? M_WAITOK : M_NOWAIT;
1223 
1224 	KQ_NOTOWNED(kq);
1225 
1226 	if (fops->f_isfd) {
1227 		fd = ident;
1228 		if (kq->kq_knlistsize <= fd) {
1229 			size = kq->kq_knlistsize;
1230 			while (size <= fd)
1231 				size += KQEXTENT;
1232 			list = malloc(size * sizeof(*list), M_KQUEUE, mflag);
1233 			if (list == NULL)
1234 				return ENOMEM;
1235 			KQ_LOCK(kq);
1236 			if (kq->kq_knlistsize > fd) {
1237 				free(list, M_KQUEUE);
1238 				list = NULL;
1239 			} else {
1240 				if (kq->kq_knlist != NULL) {
1241 					bcopy(kq->kq_knlist, list,
1242 					    kq->kq_knlistsize * sizeof(*list));
1243 					free(kq->kq_knlist, M_KQUEUE);
1244 					kq->kq_knlist = NULL;
1245 				}
1246 				bzero((caddr_t)list +
1247 				    kq->kq_knlistsize * sizeof(*list),
1248 				    (size - kq->kq_knlistsize) * sizeof(*list));
1249 				kq->kq_knlistsize = size;
1250 				kq->kq_knlist = list;
1251 			}
1252 			KQ_UNLOCK(kq);
1253 		}
1254 	} else {
1255 		if (kq->kq_knhashmask == 0) {
1256 			tmp_knhash = hashinit(KN_HASHSIZE, M_KQUEUE,
1257 			    &tmp_knhashmask);
1258 			if (tmp_knhash == NULL)
1259 				return ENOMEM;
1260 			KQ_LOCK(kq);
1261 			if (kq->kq_knhashmask == 0) {
1262 				kq->kq_knhash = tmp_knhash;
1263 				kq->kq_knhashmask = tmp_knhashmask;
1264 			} else {
1265 				free(tmp_knhash, M_KQUEUE);
1266 			}
1267 			KQ_UNLOCK(kq);
1268 		}
1269 	}
1270 
1271 	KQ_NOTOWNED(kq);
1272 	return 0;
1273 }
1274 
1275 static void
1276 kqueue_task(void *arg, int pending)
1277 {
1278 	struct kqueue *kq;
1279 	int haskqglobal;
1280 
1281 	haskqglobal = 0;
1282 	kq = arg;
1283 
1284 	KQ_GLOBAL_LOCK(&kq_global, haskqglobal);
1285 	KQ_LOCK(kq);
1286 
1287 	KNOTE_LOCKED(&kq->kq_sel.si_note, 0);
1288 
1289 	kq->kq_state &= ~KQ_TASKSCHED;
1290 	if ((kq->kq_state & KQ_TASKDRAIN) == KQ_TASKDRAIN) {
1291 		wakeup(&kq->kq_state);
1292 	}
1293 	KQ_UNLOCK(kq);
1294 	KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal);
1295 }
1296 
1297 /*
1298  * Scan, update kn_data (if not ONESHOT), and copyout triggered events.
1299  * We treat KN_MARKER knotes as if they are INFLUX.
1300  */
1301 static int
1302 kqueue_scan(struct kqueue *kq, int maxevents, struct kevent_copyops *k_ops,
1303     const struct timespec *tsp, struct kevent *keva, struct thread *td)
1304 {
1305 	struct kevent *kevp;
1306 	struct timeval atv, rtv, ttv;
1307 	struct knote *kn, *marker;
1308 	int count, timeout, nkev, error, influx;
1309 	int haskqglobal, touch;
1310 
1311 	count = maxevents;
1312 	nkev = 0;
1313 	error = 0;
1314 	haskqglobal = 0;
1315 
1316 	if (maxevents == 0)
1317 		goto done_nl;
1318 
1319 	if (tsp != NULL) {
1320 		TIMESPEC_TO_TIMEVAL(&atv, tsp);
1321 		if (itimerfix(&atv)) {
1322 			error = EINVAL;
1323 			goto done_nl;
1324 		}
1325 		if (tsp->tv_sec == 0 && tsp->tv_nsec == 0)
1326 			timeout = -1;
1327 		else
1328 			timeout = atv.tv_sec > 24 * 60 * 60 ?
1329 			    24 * 60 * 60 * hz : tvtohz(&atv);
1330 		getmicrouptime(&rtv);
1331 		timevaladd(&atv, &rtv);
1332 	} else {
1333 		atv.tv_sec = 0;
1334 		atv.tv_usec = 0;
1335 		timeout = 0;
1336 	}
1337 	marker = knote_alloc(1);
1338 	if (marker == NULL) {
1339 		error = ENOMEM;
1340 		goto done_nl;
1341 	}
1342 	marker->kn_status = KN_MARKER;
1343 	KQ_LOCK(kq);
1344 	goto start;
1345 
1346 retry:
1347 	if (atv.tv_sec || atv.tv_usec) {
1348 		getmicrouptime(&rtv);
1349 		if (timevalcmp(&rtv, &atv, >=))
1350 			goto done;
1351 		ttv = atv;
1352 		timevalsub(&ttv, &rtv);
1353 		timeout = ttv.tv_sec > 24 * 60 * 60 ?
1354 			24 * 60 * 60 * hz : tvtohz(&ttv);
1355 	}
1356 
1357 start:
1358 	kevp = keva;
1359 	if (kq->kq_count == 0) {
1360 		if (timeout < 0) {
1361 			error = EWOULDBLOCK;
1362 		} else {
1363 			kq->kq_state |= KQ_SLEEP;
1364 			error = msleep(kq, &kq->kq_lock, PSOCK | PCATCH,
1365 			    "kqread", timeout);
1366 		}
1367 		if (error == 0)
1368 			goto retry;
1369 		/* don't restart after signals... */
1370 		if (error == ERESTART)
1371 			error = EINTR;
1372 		else if (error == EWOULDBLOCK)
1373 			error = 0;
1374 		goto done;
1375 	}
1376 
1377 	TAILQ_INSERT_TAIL(&kq->kq_head, marker, kn_tqe);
1378 	influx = 0;
1379 	while (count) {
1380 		KQ_OWNED(kq);
1381 		kn = TAILQ_FIRST(&kq->kq_head);
1382 
1383 		if ((kn->kn_status == KN_MARKER && kn != marker) ||
1384 		    (kn->kn_status & KN_INFLUX) == KN_INFLUX) {
1385 			if (influx) {
1386 				influx = 0;
1387 				KQ_FLUX_WAKEUP(kq);
1388 			}
1389 			kq->kq_state |= KQ_FLUXWAIT;
1390 			error = msleep(kq, &kq->kq_lock, PSOCK,
1391 			    "kqflxwt", 0);
1392 			continue;
1393 		}
1394 
1395 		TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe);
1396 		if ((kn->kn_status & KN_DISABLED) == KN_DISABLED) {
1397 			kn->kn_status &= ~KN_QUEUED;
1398 			kq->kq_count--;
1399 			continue;
1400 		}
1401 		if (kn == marker) {
1402 			KQ_FLUX_WAKEUP(kq);
1403 			if (count == maxevents)
1404 				goto retry;
1405 			goto done;
1406 		}
1407 		KASSERT((kn->kn_status & KN_INFLUX) == 0,
1408 		    ("KN_INFLUX set when not suppose to be"));
1409 
1410 		if ((kn->kn_flags & EV_ONESHOT) == EV_ONESHOT) {
1411 			kn->kn_status &= ~KN_QUEUED;
1412 			kn->kn_status |= KN_INFLUX;
1413 			kq->kq_count--;
1414 			KQ_UNLOCK(kq);
1415 			/*
1416 			 * We don't need to lock the list since we've marked
1417 			 * it _INFLUX.
1418 			 */
1419 			*kevp = kn->kn_kevent;
1420 			if (!(kn->kn_status & KN_DETACHED))
1421 				kn->kn_fop->f_detach(kn);
1422 			knote_drop(kn, td);
1423 			KQ_LOCK(kq);
1424 			kn = NULL;
1425 		} else {
1426 			kn->kn_status |= KN_INFLUX;
1427 			KQ_UNLOCK(kq);
1428 			if ((kn->kn_status & KN_KQUEUE) == KN_KQUEUE)
1429 				KQ_GLOBAL_LOCK(&kq_global, haskqglobal);
1430 			KN_LIST_LOCK(kn);
1431 			if (kn->kn_fop->f_event(kn, 0) == 0) {
1432 				KQ_LOCK(kq);
1433 				KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal);
1434 				kn->kn_status &=
1435 				    ~(KN_QUEUED | KN_ACTIVE | KN_INFLUX);
1436 				kq->kq_count--;
1437 				KN_LIST_UNLOCK(kn);
1438 				influx = 1;
1439 				continue;
1440 			}
1441 			touch = (!kn->kn_fop->f_isfd &&
1442 			    kn->kn_fop->f_touch != NULL);
1443 			if (touch)
1444 				kn->kn_fop->f_touch(kn, kevp, EVENT_PROCESS);
1445 			else
1446 				*kevp = kn->kn_kevent;
1447 			KQ_LOCK(kq);
1448 			KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal);
1449 			if (kn->kn_flags & (EV_CLEAR |  EV_DISPATCH)) {
1450 				/*
1451 				 * Manually clear knotes who weren't
1452 				 * 'touch'ed.
1453 				 */
1454 				if (touch == 0 && kn->kn_flags & EV_CLEAR) {
1455 					kn->kn_data = 0;
1456 					kn->kn_fflags = 0;
1457 				}
1458 				if (kn->kn_flags & EV_DISPATCH)
1459 					kn->kn_status |= KN_DISABLED;
1460 				kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE);
1461 				kq->kq_count--;
1462 			} else
1463 				TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe);
1464 
1465 			kn->kn_status &= ~(KN_INFLUX);
1466 			KN_LIST_UNLOCK(kn);
1467 			influx = 1;
1468 		}
1469 
1470 		/* we are returning a copy to the user */
1471 		kevp++;
1472 		nkev++;
1473 		count--;
1474 
1475 		if (nkev == KQ_NEVENTS) {
1476 			influx = 0;
1477 			KQ_UNLOCK_FLUX(kq);
1478 			error = k_ops->k_copyout(k_ops->arg, keva, nkev);
1479 			nkev = 0;
1480 			kevp = keva;
1481 			KQ_LOCK(kq);
1482 			if (error)
1483 				break;
1484 		}
1485 	}
1486 	TAILQ_REMOVE(&kq->kq_head, marker, kn_tqe);
1487 done:
1488 	KQ_OWNED(kq);
1489 	KQ_UNLOCK_FLUX(kq);
1490 	knote_free(marker);
1491 done_nl:
1492 	KQ_NOTOWNED(kq);
1493 	if (nkev != 0)
1494 		error = k_ops->k_copyout(k_ops->arg, keva, nkev);
1495 	td->td_retval[0] = maxevents - count;
1496 	return (error);
1497 }
1498 
1499 /*
1500  * XXX
1501  * This could be expanded to call kqueue_scan, if desired.
1502  */
1503 /*ARGSUSED*/
1504 static int
1505 kqueue_read(struct file *fp, struct uio *uio, struct ucred *active_cred,
1506 	int flags, struct thread *td)
1507 {
1508 	return (ENXIO);
1509 }
1510 
1511 /*ARGSUSED*/
1512 static int
1513 kqueue_write(struct file *fp, struct uio *uio, struct ucred *active_cred,
1514 	 int flags, struct thread *td)
1515 {
1516 	return (ENXIO);
1517 }
1518 
1519 /*ARGSUSED*/
1520 static int
1521 kqueue_truncate(struct file *fp, off_t length, struct ucred *active_cred,
1522 	struct thread *td)
1523 {
1524 
1525 	return (EINVAL);
1526 }
1527 
1528 /*ARGSUSED*/
1529 static int
1530 kqueue_ioctl(struct file *fp, u_long cmd, void *data,
1531 	struct ucred *active_cred, struct thread *td)
1532 {
1533 	/*
1534 	 * Enabling sigio causes two major problems:
1535 	 * 1) infinite recursion:
1536 	 * Synopsys: kevent is being used to track signals and have FIOASYNC
1537 	 * set.  On receipt of a signal this will cause a kqueue to recurse
1538 	 * into itself over and over.  Sending the sigio causes the kqueue
1539 	 * to become ready, which in turn posts sigio again, forever.
1540 	 * Solution: this can be solved by setting a flag in the kqueue that
1541 	 * we have a SIGIO in progress.
1542 	 * 2) locking problems:
1543 	 * Synopsys: Kqueue is a leaf subsystem, but adding signalling puts
1544 	 * us above the proc and pgrp locks.
1545 	 * Solution: Post a signal using an async mechanism, being sure to
1546 	 * record a generation count in the delivery so that we do not deliver
1547 	 * a signal to the wrong process.
1548 	 *
1549 	 * Note, these two mechanisms are somewhat mutually exclusive!
1550 	 */
1551 #if 0
1552 	struct kqueue *kq;
1553 
1554 	kq = fp->f_data;
1555 	switch (cmd) {
1556 	case FIOASYNC:
1557 		if (*(int *)data) {
1558 			kq->kq_state |= KQ_ASYNC;
1559 		} else {
1560 			kq->kq_state &= ~KQ_ASYNC;
1561 		}
1562 		return (0);
1563 
1564 	case FIOSETOWN:
1565 		return (fsetown(*(int *)data, &kq->kq_sigio));
1566 
1567 	case FIOGETOWN:
1568 		*(int *)data = fgetown(&kq->kq_sigio);
1569 		return (0);
1570 	}
1571 #endif
1572 
1573 	return (ENOTTY);
1574 }
1575 
1576 /*ARGSUSED*/
1577 static int
1578 kqueue_poll(struct file *fp, int events, struct ucred *active_cred,
1579 	struct thread *td)
1580 {
1581 	struct kqueue *kq;
1582 	int revents = 0;
1583 	int error;
1584 
1585 	if ((error = kqueue_acquire(fp, &kq)))
1586 		return POLLERR;
1587 
1588 	KQ_LOCK(kq);
1589 	if (events & (POLLIN | POLLRDNORM)) {
1590 		if (kq->kq_count) {
1591 			revents |= events & (POLLIN | POLLRDNORM);
1592 		} else {
1593 			selrecord(td, &kq->kq_sel);
1594 			if (SEL_WAITING(&kq->kq_sel))
1595 				kq->kq_state |= KQ_SEL;
1596 		}
1597 	}
1598 	kqueue_release(kq, 1);
1599 	KQ_UNLOCK(kq);
1600 	return (revents);
1601 }
1602 
1603 /*ARGSUSED*/
1604 static int
1605 kqueue_stat(struct file *fp, struct stat *st, struct ucred *active_cred,
1606 	struct thread *td)
1607 {
1608 
1609 	bzero((void *)st, sizeof *st);
1610 	/*
1611 	 * We no longer return kq_count because the unlocked value is useless.
1612 	 * If you spent all this time getting the count, why not spend your
1613 	 * syscall better by calling kevent?
1614 	 *
1615 	 * XXX - This is needed for libc_r.
1616 	 */
1617 	st->st_mode = S_IFIFO;
1618 	return (0);
1619 }
1620 
1621 /*ARGSUSED*/
1622 static int
1623 kqueue_close(struct file *fp, struct thread *td)
1624 {
1625 	struct kqueue *kq = fp->f_data;
1626 	struct filedesc *fdp;
1627 	struct knote *kn;
1628 	int i;
1629 	int error;
1630 
1631 	if ((error = kqueue_acquire(fp, &kq)))
1632 		return error;
1633 
1634 	KQ_LOCK(kq);
1635 
1636 	KASSERT((kq->kq_state & KQ_CLOSING) != KQ_CLOSING,
1637 	    ("kqueue already closing"));
1638 	kq->kq_state |= KQ_CLOSING;
1639 	if (kq->kq_refcnt > 1)
1640 		msleep(&kq->kq_refcnt, &kq->kq_lock, PSOCK, "kqclose", 0);
1641 
1642 	KASSERT(kq->kq_refcnt == 1, ("other refs are out there!"));
1643 	fdp = kq->kq_fdp;
1644 
1645 	KASSERT(knlist_empty(&kq->kq_sel.si_note),
1646 	    ("kqueue's knlist not empty"));
1647 
1648 	for (i = 0; i < kq->kq_knlistsize; i++) {
1649 		while ((kn = SLIST_FIRST(&kq->kq_knlist[i])) != NULL) {
1650 			if ((kn->kn_status & KN_INFLUX) == KN_INFLUX) {
1651 				kq->kq_state |= KQ_FLUXWAIT;
1652 				msleep(kq, &kq->kq_lock, PSOCK, "kqclo1", 0);
1653 				continue;
1654 			}
1655 			kn->kn_status |= KN_INFLUX;
1656 			KQ_UNLOCK(kq);
1657 			if (!(kn->kn_status & KN_DETACHED))
1658 				kn->kn_fop->f_detach(kn);
1659 			knote_drop(kn, td);
1660 			KQ_LOCK(kq);
1661 		}
1662 	}
1663 	if (kq->kq_knhashmask != 0) {
1664 		for (i = 0; i <= kq->kq_knhashmask; i++) {
1665 			while ((kn = SLIST_FIRST(&kq->kq_knhash[i])) != NULL) {
1666 				if ((kn->kn_status & KN_INFLUX) == KN_INFLUX) {
1667 					kq->kq_state |= KQ_FLUXWAIT;
1668 					msleep(kq, &kq->kq_lock, PSOCK,
1669 					       "kqclo2", 0);
1670 					continue;
1671 				}
1672 				kn->kn_status |= KN_INFLUX;
1673 				KQ_UNLOCK(kq);
1674 				if (!(kn->kn_status & KN_DETACHED))
1675 					kn->kn_fop->f_detach(kn);
1676 				knote_drop(kn, td);
1677 				KQ_LOCK(kq);
1678 			}
1679 		}
1680 	}
1681 
1682 	if ((kq->kq_state & KQ_TASKSCHED) == KQ_TASKSCHED) {
1683 		kq->kq_state |= KQ_TASKDRAIN;
1684 		msleep(&kq->kq_state, &kq->kq_lock, PSOCK, "kqtqdr", 0);
1685 	}
1686 
1687 	if ((kq->kq_state & KQ_SEL) == KQ_SEL) {
1688 		selwakeuppri(&kq->kq_sel, PSOCK);
1689 		if (!SEL_WAITING(&kq->kq_sel))
1690 			kq->kq_state &= ~KQ_SEL;
1691 	}
1692 
1693 	KQ_UNLOCK(kq);
1694 
1695 	FILEDESC_XLOCK(fdp);
1696 	SLIST_REMOVE(&fdp->fd_kqlist, kq, kqueue, kq_list);
1697 	FILEDESC_XUNLOCK(fdp);
1698 
1699 	knlist_destroy(&kq->kq_sel.si_note);
1700 	mtx_destroy(&kq->kq_lock);
1701 	kq->kq_fdp = NULL;
1702 
1703 	if (kq->kq_knhash != NULL)
1704 		free(kq->kq_knhash, M_KQUEUE);
1705 	if (kq->kq_knlist != NULL)
1706 		free(kq->kq_knlist, M_KQUEUE);
1707 
1708 	funsetown(&kq->kq_sigio);
1709 	free(kq, M_KQUEUE);
1710 	fp->f_data = NULL;
1711 
1712 	return (0);
1713 }
1714 
1715 static void
1716 kqueue_wakeup(struct kqueue *kq)
1717 {
1718 	KQ_OWNED(kq);
1719 
1720 	if ((kq->kq_state & KQ_SLEEP) == KQ_SLEEP) {
1721 		kq->kq_state &= ~KQ_SLEEP;
1722 		wakeup(kq);
1723 	}
1724 	if ((kq->kq_state & KQ_SEL) == KQ_SEL) {
1725 		selwakeuppri(&kq->kq_sel, PSOCK);
1726 		if (!SEL_WAITING(&kq->kq_sel))
1727 			kq->kq_state &= ~KQ_SEL;
1728 	}
1729 	if (!knlist_empty(&kq->kq_sel.si_note))
1730 		kqueue_schedtask(kq);
1731 	if ((kq->kq_state & KQ_ASYNC) == KQ_ASYNC) {
1732 		pgsigio(&kq->kq_sigio, SIGIO, 0);
1733 	}
1734 }
1735 
1736 /*
1737  * Walk down a list of knotes, activating them if their event has triggered.
1738  *
1739  * There is a possibility to optimize in the case of one kq watching another.
1740  * Instead of scheduling a task to wake it up, you could pass enough state
1741  * down the chain to make up the parent kqueue.  Make this code functional
1742  * first.
1743  */
1744 void
1745 knote(struct knlist *list, long hint, int lockflags)
1746 {
1747 	struct kqueue *kq;
1748 	struct knote *kn;
1749 	int error;
1750 
1751 	if (list == NULL)
1752 		return;
1753 
1754 	KNL_ASSERT_LOCK(list, lockflags & KNF_LISTLOCKED);
1755 
1756 	if ((lockflags & KNF_LISTLOCKED) == 0)
1757 		list->kl_lock(list->kl_lockarg);
1758 
1759 	/*
1760 	 * If we unlock the list lock (and set KN_INFLUX), we can eliminate
1761 	 * the kqueue scheduling, but this will introduce four
1762 	 * lock/unlock's for each knote to test.  If we do, continue to use
1763 	 * SLIST_FOREACH, SLIST_FOREACH_SAFE is not safe in our case, it is
1764 	 * only safe if you want to remove the current item, which we are
1765 	 * not doing.
1766 	 */
1767 	SLIST_FOREACH(kn, &list->kl_list, kn_selnext) {
1768 		kq = kn->kn_kq;
1769 		if ((kn->kn_status & KN_INFLUX) != KN_INFLUX) {
1770 			KQ_LOCK(kq);
1771 			if ((kn->kn_status & KN_INFLUX) == KN_INFLUX) {
1772 				KQ_UNLOCK(kq);
1773 			} else if ((lockflags & KNF_NOKQLOCK) != 0) {
1774 				kn->kn_status |= KN_INFLUX;
1775 				KQ_UNLOCK(kq);
1776 				error = kn->kn_fop->f_event(kn, hint);
1777 				KQ_LOCK(kq);
1778 				kn->kn_status &= ~KN_INFLUX;
1779 				if (error)
1780 					KNOTE_ACTIVATE(kn, 1);
1781 				KQ_UNLOCK_FLUX(kq);
1782 			} else {
1783 				kn->kn_status |= KN_HASKQLOCK;
1784 				if (kn->kn_fop->f_event(kn, hint))
1785 					KNOTE_ACTIVATE(kn, 1);
1786 				kn->kn_status &= ~KN_HASKQLOCK;
1787 				KQ_UNLOCK(kq);
1788 			}
1789 		}
1790 		kq = NULL;
1791 	}
1792 	if ((lockflags & KNF_LISTLOCKED) == 0)
1793 		list->kl_unlock(list->kl_lockarg);
1794 }
1795 
1796 /*
1797  * add a knote to a knlist
1798  */
1799 void
1800 knlist_add(struct knlist *knl, struct knote *kn, int islocked)
1801 {
1802 	KNL_ASSERT_LOCK(knl, islocked);
1803 	KQ_NOTOWNED(kn->kn_kq);
1804 	KASSERT((kn->kn_status & (KN_INFLUX|KN_DETACHED)) ==
1805 	    (KN_INFLUX|KN_DETACHED), ("knote not KN_INFLUX and KN_DETACHED"));
1806 	if (!islocked)
1807 		knl->kl_lock(knl->kl_lockarg);
1808 	SLIST_INSERT_HEAD(&knl->kl_list, kn, kn_selnext);
1809 	if (!islocked)
1810 		knl->kl_unlock(knl->kl_lockarg);
1811 	KQ_LOCK(kn->kn_kq);
1812 	kn->kn_knlist = knl;
1813 	kn->kn_status &= ~KN_DETACHED;
1814 	KQ_UNLOCK(kn->kn_kq);
1815 }
1816 
1817 static void
1818 knlist_remove_kq(struct knlist *knl, struct knote *kn, int knlislocked, int kqislocked)
1819 {
1820 	KASSERT(!(!!kqislocked && !knlislocked), ("kq locked w/o knl locked"));
1821 	KNL_ASSERT_LOCK(knl, knlislocked);
1822 	mtx_assert(&kn->kn_kq->kq_lock, kqislocked ? MA_OWNED : MA_NOTOWNED);
1823 	if (!kqislocked)
1824 		KASSERT((kn->kn_status & (KN_INFLUX|KN_DETACHED)) == KN_INFLUX,
1825     ("knlist_remove called w/o knote being KN_INFLUX or already removed"));
1826 	if (!knlislocked)
1827 		knl->kl_lock(knl->kl_lockarg);
1828 	SLIST_REMOVE(&knl->kl_list, kn, knote, kn_selnext);
1829 	kn->kn_knlist = NULL;
1830 	if (!knlislocked)
1831 		knl->kl_unlock(knl->kl_lockarg);
1832 	if (!kqislocked)
1833 		KQ_LOCK(kn->kn_kq);
1834 	kn->kn_status |= KN_DETACHED;
1835 	if (!kqislocked)
1836 		KQ_UNLOCK(kn->kn_kq);
1837 }
1838 
1839 /*
1840  * remove all knotes from a specified klist
1841  */
1842 void
1843 knlist_remove(struct knlist *knl, struct knote *kn, int islocked)
1844 {
1845 
1846 	knlist_remove_kq(knl, kn, islocked, 0);
1847 }
1848 
1849 /*
1850  * remove knote from a specified klist while in f_event handler.
1851  */
1852 void
1853 knlist_remove_inevent(struct knlist *knl, struct knote *kn)
1854 {
1855 
1856 	knlist_remove_kq(knl, kn, 1,
1857 	    (kn->kn_status & KN_HASKQLOCK) == KN_HASKQLOCK);
1858 }
1859 
1860 int
1861 knlist_empty(struct knlist *knl)
1862 {
1863 	KNL_ASSERT_LOCKED(knl);
1864 	return SLIST_EMPTY(&knl->kl_list);
1865 }
1866 
1867 static struct mtx	knlist_lock;
1868 MTX_SYSINIT(knlist_lock, &knlist_lock, "knlist lock for lockless objects",
1869 	MTX_DEF);
1870 static void knlist_mtx_lock(void *arg);
1871 static void knlist_mtx_unlock(void *arg);
1872 
1873 static void
1874 knlist_mtx_lock(void *arg)
1875 {
1876 	mtx_lock((struct mtx *)arg);
1877 }
1878 
1879 static void
1880 knlist_mtx_unlock(void *arg)
1881 {
1882 	mtx_unlock((struct mtx *)arg);
1883 }
1884 
1885 static void
1886 knlist_mtx_assert_locked(void *arg)
1887 {
1888 	mtx_assert((struct mtx *)arg, MA_OWNED);
1889 }
1890 
1891 static void
1892 knlist_mtx_assert_unlocked(void *arg)
1893 {
1894 	mtx_assert((struct mtx *)arg, MA_NOTOWNED);
1895 }
1896 
1897 void
1898 knlist_init(struct knlist *knl, void *lock, void (*kl_lock)(void *),
1899     void (*kl_unlock)(void *),
1900     void (*kl_assert_locked)(void *), void (*kl_assert_unlocked)(void *))
1901 {
1902 
1903 	if (lock == NULL)
1904 		knl->kl_lockarg = &knlist_lock;
1905 	else
1906 		knl->kl_lockarg = lock;
1907 
1908 	if (kl_lock == NULL)
1909 		knl->kl_lock = knlist_mtx_lock;
1910 	else
1911 		knl->kl_lock = kl_lock;
1912 	if (kl_unlock == NULL)
1913 		knl->kl_unlock = knlist_mtx_unlock;
1914 	else
1915 		knl->kl_unlock = kl_unlock;
1916 	if (kl_assert_locked == NULL)
1917 		knl->kl_assert_locked = knlist_mtx_assert_locked;
1918 	else
1919 		knl->kl_assert_locked = kl_assert_locked;
1920 	if (kl_assert_unlocked == NULL)
1921 		knl->kl_assert_unlocked = knlist_mtx_assert_unlocked;
1922 	else
1923 		knl->kl_assert_unlocked = kl_assert_unlocked;
1924 
1925 	SLIST_INIT(&knl->kl_list);
1926 }
1927 
1928 void
1929 knlist_init_mtx(struct knlist *knl, struct mtx *lock)
1930 {
1931 
1932 	knlist_init(knl, lock, NULL, NULL, NULL, NULL);
1933 }
1934 
1935 void
1936 knlist_destroy(struct knlist *knl)
1937 {
1938 
1939 #ifdef INVARIANTS
1940 	/*
1941 	 * if we run across this error, we need to find the offending
1942 	 * driver and have it call knlist_clear.
1943 	 */
1944 	if (!SLIST_EMPTY(&knl->kl_list))
1945 		printf("WARNING: destroying knlist w/ knotes on it!\n");
1946 #endif
1947 
1948 	knl->kl_lockarg = knl->kl_lock = knl->kl_unlock = NULL;
1949 	SLIST_INIT(&knl->kl_list);
1950 }
1951 
1952 /*
1953  * Even if we are locked, we may need to drop the lock to allow any influx
1954  * knotes time to "settle".
1955  */
1956 void
1957 knlist_cleardel(struct knlist *knl, struct thread *td, int islocked, int killkn)
1958 {
1959 	struct knote *kn, *kn2;
1960 	struct kqueue *kq;
1961 
1962 	if (islocked)
1963 		KNL_ASSERT_LOCKED(knl);
1964 	else {
1965 		KNL_ASSERT_UNLOCKED(knl);
1966 again:		/* need to reacquire lock since we have dropped it */
1967 		knl->kl_lock(knl->kl_lockarg);
1968 	}
1969 
1970 	SLIST_FOREACH_SAFE(kn, &knl->kl_list, kn_selnext, kn2) {
1971 		kq = kn->kn_kq;
1972 		KQ_LOCK(kq);
1973 		if ((kn->kn_status & KN_INFLUX)) {
1974 			KQ_UNLOCK(kq);
1975 			continue;
1976 		}
1977 		knlist_remove_kq(knl, kn, 1, 1);
1978 		if (killkn) {
1979 			kn->kn_status |= KN_INFLUX | KN_DETACHED;
1980 			KQ_UNLOCK(kq);
1981 			knote_drop(kn, td);
1982 		} else {
1983 			/* Make sure cleared knotes disappear soon */
1984 			kn->kn_flags |= (EV_EOF | EV_ONESHOT);
1985 			KQ_UNLOCK(kq);
1986 		}
1987 		kq = NULL;
1988 	}
1989 
1990 	if (!SLIST_EMPTY(&knl->kl_list)) {
1991 		/* there are still KN_INFLUX remaining */
1992 		kn = SLIST_FIRST(&knl->kl_list);
1993 		kq = kn->kn_kq;
1994 		KQ_LOCK(kq);
1995 		KASSERT(kn->kn_status & KN_INFLUX,
1996 		    ("knote removed w/o list lock"));
1997 		knl->kl_unlock(knl->kl_lockarg);
1998 		kq->kq_state |= KQ_FLUXWAIT;
1999 		msleep(kq, &kq->kq_lock, PSOCK | PDROP, "kqkclr", 0);
2000 		kq = NULL;
2001 		goto again;
2002 	}
2003 
2004 	if (islocked)
2005 		KNL_ASSERT_LOCKED(knl);
2006 	else {
2007 		knl->kl_unlock(knl->kl_lockarg);
2008 		KNL_ASSERT_UNLOCKED(knl);
2009 	}
2010 }
2011 
2012 /*
2013  * Remove all knotes referencing a specified fd must be called with FILEDESC
2014  * lock.  This prevents a race where a new fd comes along and occupies the
2015  * entry and we attach a knote to the fd.
2016  */
2017 void
2018 knote_fdclose(struct thread *td, int fd)
2019 {
2020 	struct filedesc *fdp = td->td_proc->p_fd;
2021 	struct kqueue *kq;
2022 	struct knote *kn;
2023 	int influx;
2024 
2025 	FILEDESC_XLOCK_ASSERT(fdp);
2026 
2027 	/*
2028 	 * We shouldn't have to worry about new kevents appearing on fd
2029 	 * since filedesc is locked.
2030 	 */
2031 	SLIST_FOREACH(kq, &fdp->fd_kqlist, kq_list) {
2032 		KQ_LOCK(kq);
2033 
2034 again:
2035 		influx = 0;
2036 		while (kq->kq_knlistsize > fd &&
2037 		    (kn = SLIST_FIRST(&kq->kq_knlist[fd])) != NULL) {
2038 			if (kn->kn_status & KN_INFLUX) {
2039 				/* someone else might be waiting on our knote */
2040 				if (influx)
2041 					wakeup(kq);
2042 				kq->kq_state |= KQ_FLUXWAIT;
2043 				msleep(kq, &kq->kq_lock, PSOCK, "kqflxwt", 0);
2044 				goto again;
2045 			}
2046 			kn->kn_status |= KN_INFLUX;
2047 			KQ_UNLOCK(kq);
2048 			if (!(kn->kn_status & KN_DETACHED))
2049 				kn->kn_fop->f_detach(kn);
2050 			knote_drop(kn, td);
2051 			influx = 1;
2052 			KQ_LOCK(kq);
2053 		}
2054 		KQ_UNLOCK_FLUX(kq);
2055 	}
2056 }
2057 
2058 static int
2059 knote_attach(struct knote *kn, struct kqueue *kq)
2060 {
2061 	struct klist *list;
2062 
2063 	KASSERT(kn->kn_status & KN_INFLUX, ("knote not marked INFLUX"));
2064 	KQ_OWNED(kq);
2065 
2066 	if (kn->kn_fop->f_isfd) {
2067 		if (kn->kn_id >= kq->kq_knlistsize)
2068 			return ENOMEM;
2069 		list = &kq->kq_knlist[kn->kn_id];
2070 	} else {
2071 		if (kq->kq_knhash == NULL)
2072 			return ENOMEM;
2073 		list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)];
2074 	}
2075 
2076 	SLIST_INSERT_HEAD(list, kn, kn_link);
2077 
2078 	return 0;
2079 }
2080 
2081 /*
2082  * knote must already have been detached using the f_detach method.
2083  * no lock need to be held, it is assumed that the KN_INFLUX flag is set
2084  * to prevent other removal.
2085  */
2086 static void
2087 knote_drop(struct knote *kn, struct thread *td)
2088 {
2089 	struct kqueue *kq;
2090 	struct klist *list;
2091 
2092 	kq = kn->kn_kq;
2093 
2094 	KQ_NOTOWNED(kq);
2095 	KASSERT((kn->kn_status & KN_INFLUX) == KN_INFLUX,
2096 	    ("knote_drop called without KN_INFLUX set in kn_status"));
2097 
2098 	KQ_LOCK(kq);
2099 	if (kn->kn_fop->f_isfd)
2100 		list = &kq->kq_knlist[kn->kn_id];
2101 	else
2102 		list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)];
2103 
2104 	if (!SLIST_EMPTY(list))
2105 		SLIST_REMOVE(list, kn, knote, kn_link);
2106 	if (kn->kn_status & KN_QUEUED)
2107 		knote_dequeue(kn);
2108 	KQ_UNLOCK_FLUX(kq);
2109 
2110 	if (kn->kn_fop->f_isfd) {
2111 		fdrop(kn->kn_fp, td);
2112 		kn->kn_fp = NULL;
2113 	}
2114 	kqueue_fo_release(kn->kn_kevent.filter);
2115 	kn->kn_fop = NULL;
2116 	knote_free(kn);
2117 }
2118 
2119 static void
2120 knote_enqueue(struct knote *kn)
2121 {
2122 	struct kqueue *kq = kn->kn_kq;
2123 
2124 	KQ_OWNED(kn->kn_kq);
2125 	KASSERT((kn->kn_status & KN_QUEUED) == 0, ("knote already queued"));
2126 
2127 	TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe);
2128 	kn->kn_status |= KN_QUEUED;
2129 	kq->kq_count++;
2130 	kqueue_wakeup(kq);
2131 }
2132 
2133 static void
2134 knote_dequeue(struct knote *kn)
2135 {
2136 	struct kqueue *kq = kn->kn_kq;
2137 
2138 	KQ_OWNED(kn->kn_kq);
2139 	KASSERT(kn->kn_status & KN_QUEUED, ("knote not queued"));
2140 
2141 	TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe);
2142 	kn->kn_status &= ~KN_QUEUED;
2143 	kq->kq_count--;
2144 }
2145 
2146 static void
2147 knote_init(void)
2148 {
2149 
2150 	knote_zone = uma_zcreate("KNOTE", sizeof(struct knote), NULL, NULL,
2151 	    NULL, NULL, UMA_ALIGN_PTR, 0);
2152 }
2153 SYSINIT(knote, SI_SUB_PSEUDO, SI_ORDER_ANY, knote_init, NULL);
2154 
2155 static struct knote *
2156 knote_alloc(int waitok)
2157 {
2158 	return ((struct knote *)uma_zalloc(knote_zone,
2159 	    (waitok ? M_WAITOK : M_NOWAIT)|M_ZERO));
2160 }
2161 
2162 static void
2163 knote_free(struct knote *kn)
2164 {
2165 	if (kn != NULL)
2166 		uma_zfree(knote_zone, kn);
2167 }
2168 
2169 /*
2170  * Register the kev w/ the kq specified by fd.
2171  */
2172 int
2173 kqfd_register(int fd, struct kevent *kev, struct thread *td, int waitok)
2174 {
2175 	struct kqueue *kq;
2176 	struct file *fp;
2177 	int error;
2178 
2179 	if ((error = fget(td, fd, &fp)) != 0)
2180 		return (error);
2181 	if ((error = kqueue_acquire(fp, &kq)) != 0)
2182 		goto noacquire;
2183 
2184 	error = kqueue_register(kq, kev, td, waitok);
2185 
2186 	kqueue_release(kq, 0);
2187 
2188 noacquire:
2189 	fdrop(fp, td);
2190 
2191 	return error;
2192 }
2193