xref: /freebsd/sys/kern/kern_event.c (revision e3aa18ad71782a73d3dd9dd3d526bbd2b607ca16)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 1999,2000,2001 Jonathan Lemon <jlemon@FreeBSD.org>
5  * Copyright 2004 John-Mark Gurney <jmg@FreeBSD.org>
6  * Copyright (c) 2009 Apple, Inc.
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  */
30 
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33 
34 #include "opt_ktrace.h"
35 #include "opt_kqueue.h"
36 
37 #ifdef COMPAT_FREEBSD11
38 #define	_WANT_FREEBSD11_KEVENT
39 #endif
40 
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/capsicum.h>
44 #include <sys/kernel.h>
45 #include <sys/limits.h>
46 #include <sys/lock.h>
47 #include <sys/mutex.h>
48 #include <sys/rwlock.h>
49 #include <sys/proc.h>
50 #include <sys/malloc.h>
51 #include <sys/unistd.h>
52 #include <sys/file.h>
53 #include <sys/filedesc.h>
54 #include <sys/filio.h>
55 #include <sys/fcntl.h>
56 #include <sys/kthread.h>
57 #include <sys/selinfo.h>
58 #include <sys/queue.h>
59 #include <sys/event.h>
60 #include <sys/eventvar.h>
61 #include <sys/poll.h>
62 #include <sys/protosw.h>
63 #include <sys/resourcevar.h>
64 #include <sys/sigio.h>
65 #include <sys/signalvar.h>
66 #include <sys/socket.h>
67 #include <sys/socketvar.h>
68 #include <sys/stat.h>
69 #include <sys/sysctl.h>
70 #include <sys/sysproto.h>
71 #include <sys/syscallsubr.h>
72 #include <sys/taskqueue.h>
73 #include <sys/uio.h>
74 #include <sys/user.h>
75 #ifdef KTRACE
76 #include <sys/ktrace.h>
77 #endif
78 #include <machine/atomic.h>
79 
80 #include <vm/uma.h>
81 
82 static MALLOC_DEFINE(M_KQUEUE, "kqueue", "memory for kqueue system");
83 
84 /*
85  * This lock is used if multiple kq locks are required.  This possibly
86  * should be made into a per proc lock.
87  */
88 static struct mtx	kq_global;
89 MTX_SYSINIT(kq_global, &kq_global, "kqueue order", MTX_DEF);
90 #define KQ_GLOBAL_LOCK(lck, haslck)	do {	\
91 	if (!haslck)				\
92 		mtx_lock(lck);			\
93 	haslck = 1;				\
94 } while (0)
95 #define KQ_GLOBAL_UNLOCK(lck, haslck)	do {	\
96 	if (haslck)				\
97 		mtx_unlock(lck);			\
98 	haslck = 0;				\
99 } while (0)
100 
101 TASKQUEUE_DEFINE_THREAD(kqueue_ctx);
102 
103 static int	kevent_copyout(void *arg, struct kevent *kevp, int count);
104 static int	kevent_copyin(void *arg, struct kevent *kevp, int count);
105 static int	kqueue_register(struct kqueue *kq, struct kevent *kev,
106 		    struct thread *td, int mflag);
107 static int	kqueue_acquire(struct file *fp, struct kqueue **kqp);
108 static void	kqueue_release(struct kqueue *kq, int locked);
109 static void	kqueue_destroy(struct kqueue *kq);
110 static void	kqueue_drain(struct kqueue *kq, struct thread *td);
111 static int	kqueue_expand(struct kqueue *kq, struct filterops *fops,
112 		    uintptr_t ident, int mflag);
113 static void	kqueue_task(void *arg, int pending);
114 static int	kqueue_scan(struct kqueue *kq, int maxevents,
115 		    struct kevent_copyops *k_ops,
116 		    const struct timespec *timeout,
117 		    struct kevent *keva, struct thread *td);
118 static void 	kqueue_wakeup(struct kqueue *kq);
119 static struct filterops *kqueue_fo_find(int filt);
120 static void	kqueue_fo_release(int filt);
121 struct g_kevent_args;
122 static int	kern_kevent_generic(struct thread *td,
123 		    struct g_kevent_args *uap,
124 		    struct kevent_copyops *k_ops, const char *struct_name);
125 
126 static fo_ioctl_t	kqueue_ioctl;
127 static fo_poll_t	kqueue_poll;
128 static fo_kqfilter_t	kqueue_kqfilter;
129 static fo_stat_t	kqueue_stat;
130 static fo_close_t	kqueue_close;
131 static fo_fill_kinfo_t	kqueue_fill_kinfo;
132 
133 static struct fileops kqueueops = {
134 	.fo_read = invfo_rdwr,
135 	.fo_write = invfo_rdwr,
136 	.fo_truncate = invfo_truncate,
137 	.fo_ioctl = kqueue_ioctl,
138 	.fo_poll = kqueue_poll,
139 	.fo_kqfilter = kqueue_kqfilter,
140 	.fo_stat = kqueue_stat,
141 	.fo_close = kqueue_close,
142 	.fo_chmod = invfo_chmod,
143 	.fo_chown = invfo_chown,
144 	.fo_sendfile = invfo_sendfile,
145 	.fo_fill_kinfo = kqueue_fill_kinfo,
146 };
147 
148 static int 	knote_attach(struct knote *kn, struct kqueue *kq);
149 static void 	knote_drop(struct knote *kn, struct thread *td);
150 static void 	knote_drop_detached(struct knote *kn, struct thread *td);
151 static void 	knote_enqueue(struct knote *kn);
152 static void 	knote_dequeue(struct knote *kn);
153 static void 	knote_init(void);
154 static struct 	knote *knote_alloc(int mflag);
155 static void 	knote_free(struct knote *kn);
156 
157 static void	filt_kqdetach(struct knote *kn);
158 static int	filt_kqueue(struct knote *kn, long hint);
159 static int	filt_procattach(struct knote *kn);
160 static void	filt_procdetach(struct knote *kn);
161 static int	filt_proc(struct knote *kn, long hint);
162 static int	filt_fileattach(struct knote *kn);
163 static void	filt_timerexpire(void *knx);
164 static void	filt_timerexpire_l(struct knote *kn, bool proc_locked);
165 static int	filt_timerattach(struct knote *kn);
166 static void	filt_timerdetach(struct knote *kn);
167 static void	filt_timerstart(struct knote *kn, sbintime_t to);
168 static void	filt_timertouch(struct knote *kn, struct kevent *kev,
169 		    u_long type);
170 static int	filt_timervalidate(struct knote *kn, sbintime_t *to);
171 static int	filt_timer(struct knote *kn, long hint);
172 static int	filt_userattach(struct knote *kn);
173 static void	filt_userdetach(struct knote *kn);
174 static int	filt_user(struct knote *kn, long hint);
175 static void	filt_usertouch(struct knote *kn, struct kevent *kev,
176 		    u_long type);
177 
178 static struct filterops file_filtops = {
179 	.f_isfd = 1,
180 	.f_attach = filt_fileattach,
181 };
182 static struct filterops kqread_filtops = {
183 	.f_isfd = 1,
184 	.f_detach = filt_kqdetach,
185 	.f_event = filt_kqueue,
186 };
187 /* XXX - move to kern_proc.c?  */
188 static struct filterops proc_filtops = {
189 	.f_isfd = 0,
190 	.f_attach = filt_procattach,
191 	.f_detach = filt_procdetach,
192 	.f_event = filt_proc,
193 };
194 static struct filterops timer_filtops = {
195 	.f_isfd = 0,
196 	.f_attach = filt_timerattach,
197 	.f_detach = filt_timerdetach,
198 	.f_event = filt_timer,
199 	.f_touch = filt_timertouch,
200 };
201 static struct filterops user_filtops = {
202 	.f_attach = filt_userattach,
203 	.f_detach = filt_userdetach,
204 	.f_event = filt_user,
205 	.f_touch = filt_usertouch,
206 };
207 
208 static uma_zone_t	knote_zone;
209 static unsigned int __exclusive_cache_line	kq_ncallouts;
210 static unsigned int 	kq_calloutmax = 4 * 1024;
211 SYSCTL_UINT(_kern, OID_AUTO, kq_calloutmax, CTLFLAG_RW,
212     &kq_calloutmax, 0, "Maximum number of callouts allocated for kqueue");
213 
214 /* XXX - ensure not influx ? */
215 #define KNOTE_ACTIVATE(kn, islock) do { 				\
216 	if ((islock))							\
217 		mtx_assert(&(kn)->kn_kq->kq_lock, MA_OWNED);		\
218 	else								\
219 		KQ_LOCK((kn)->kn_kq);					\
220 	(kn)->kn_status |= KN_ACTIVE;					\
221 	if (((kn)->kn_status & (KN_QUEUED | KN_DISABLED)) == 0)		\
222 		knote_enqueue((kn));					\
223 	if (!(islock))							\
224 		KQ_UNLOCK((kn)->kn_kq);					\
225 } while (0)
226 #define KQ_LOCK(kq) do {						\
227 	mtx_lock(&(kq)->kq_lock);					\
228 } while (0)
229 #define KQ_FLUX_WAKEUP(kq) do {						\
230 	if (((kq)->kq_state & KQ_FLUXWAIT) == KQ_FLUXWAIT) {		\
231 		(kq)->kq_state &= ~KQ_FLUXWAIT;				\
232 		wakeup((kq));						\
233 	}								\
234 } while (0)
235 #define KQ_UNLOCK_FLUX(kq) do {						\
236 	KQ_FLUX_WAKEUP(kq);						\
237 	mtx_unlock(&(kq)->kq_lock);					\
238 } while (0)
239 #define KQ_UNLOCK(kq) do {						\
240 	mtx_unlock(&(kq)->kq_lock);					\
241 } while (0)
242 #define KQ_OWNED(kq) do {						\
243 	mtx_assert(&(kq)->kq_lock, MA_OWNED);				\
244 } while (0)
245 #define KQ_NOTOWNED(kq) do {						\
246 	mtx_assert(&(kq)->kq_lock, MA_NOTOWNED);			\
247 } while (0)
248 
249 static struct knlist *
250 kn_list_lock(struct knote *kn)
251 {
252 	struct knlist *knl;
253 
254 	knl = kn->kn_knlist;
255 	if (knl != NULL)
256 		knl->kl_lock(knl->kl_lockarg);
257 	return (knl);
258 }
259 
260 static void
261 kn_list_unlock(struct knlist *knl)
262 {
263 	bool do_free;
264 
265 	if (knl == NULL)
266 		return;
267 	do_free = knl->kl_autodestroy && knlist_empty(knl);
268 	knl->kl_unlock(knl->kl_lockarg);
269 	if (do_free) {
270 		knlist_destroy(knl);
271 		free(knl, M_KQUEUE);
272 	}
273 }
274 
275 static bool
276 kn_in_flux(struct knote *kn)
277 {
278 
279 	return (kn->kn_influx > 0);
280 }
281 
282 static void
283 kn_enter_flux(struct knote *kn)
284 {
285 
286 	KQ_OWNED(kn->kn_kq);
287 	MPASS(kn->kn_influx < INT_MAX);
288 	kn->kn_influx++;
289 }
290 
291 static bool
292 kn_leave_flux(struct knote *kn)
293 {
294 
295 	KQ_OWNED(kn->kn_kq);
296 	MPASS(kn->kn_influx > 0);
297 	kn->kn_influx--;
298 	return (kn->kn_influx == 0);
299 }
300 
301 #define	KNL_ASSERT_LOCK(knl, islocked) do {				\
302 	if (islocked)							\
303 		KNL_ASSERT_LOCKED(knl);				\
304 	else								\
305 		KNL_ASSERT_UNLOCKED(knl);				\
306 } while (0)
307 #ifdef INVARIANTS
308 #define	KNL_ASSERT_LOCKED(knl) do {					\
309 	knl->kl_assert_lock((knl)->kl_lockarg, LA_LOCKED);		\
310 } while (0)
311 #define	KNL_ASSERT_UNLOCKED(knl) do {					\
312 	knl->kl_assert_lock((knl)->kl_lockarg, LA_UNLOCKED);		\
313 } while (0)
314 #else /* !INVARIANTS */
315 #define	KNL_ASSERT_LOCKED(knl) do {} while (0)
316 #define	KNL_ASSERT_UNLOCKED(knl) do {} while (0)
317 #endif /* INVARIANTS */
318 
319 #ifndef	KN_HASHSIZE
320 #define	KN_HASHSIZE		64		/* XXX should be tunable */
321 #endif
322 
323 #define KN_HASH(val, mask)	(((val) ^ (val >> 8)) & (mask))
324 
325 static int
326 filt_nullattach(struct knote *kn)
327 {
328 
329 	return (ENXIO);
330 };
331 
332 struct filterops null_filtops = {
333 	.f_isfd = 0,
334 	.f_attach = filt_nullattach,
335 };
336 
337 /* XXX - make SYSINIT to add these, and move into respective modules. */
338 extern struct filterops sig_filtops;
339 extern struct filterops fs_filtops;
340 
341 /*
342  * Table for all system-defined filters.
343  */
344 static struct mtx	filterops_lock;
345 MTX_SYSINIT(kqueue_filterops, &filterops_lock, "protect sysfilt_ops",
346 	MTX_DEF);
347 static struct {
348 	struct filterops *for_fop;
349 	int for_nolock;
350 	int for_refcnt;
351 } sysfilt_ops[EVFILT_SYSCOUNT] = {
352 	{ &file_filtops, 1 },			/* EVFILT_READ */
353 	{ &file_filtops, 1 },			/* EVFILT_WRITE */
354 	{ &null_filtops },			/* EVFILT_AIO */
355 	{ &file_filtops, 1 },			/* EVFILT_VNODE */
356 	{ &proc_filtops, 1 },			/* EVFILT_PROC */
357 	{ &sig_filtops, 1 },			/* EVFILT_SIGNAL */
358 	{ &timer_filtops, 1 },			/* EVFILT_TIMER */
359 	{ &file_filtops, 1 },			/* EVFILT_PROCDESC */
360 	{ &fs_filtops, 1 },			/* EVFILT_FS */
361 	{ &null_filtops },			/* EVFILT_LIO */
362 	{ &user_filtops, 1 },			/* EVFILT_USER */
363 	{ &null_filtops },			/* EVFILT_SENDFILE */
364 	{ &file_filtops, 1 },                   /* EVFILT_EMPTY */
365 };
366 
367 /*
368  * Simple redirection for all cdevsw style objects to call their fo_kqfilter
369  * method.
370  */
371 static int
372 filt_fileattach(struct knote *kn)
373 {
374 
375 	return (fo_kqfilter(kn->kn_fp, kn));
376 }
377 
378 /*ARGSUSED*/
379 static int
380 kqueue_kqfilter(struct file *fp, struct knote *kn)
381 {
382 	struct kqueue *kq = kn->kn_fp->f_data;
383 
384 	if (kn->kn_filter != EVFILT_READ)
385 		return (EINVAL);
386 
387 	kn->kn_status |= KN_KQUEUE;
388 	kn->kn_fop = &kqread_filtops;
389 	knlist_add(&kq->kq_sel.si_note, kn, 0);
390 
391 	return (0);
392 }
393 
394 static void
395 filt_kqdetach(struct knote *kn)
396 {
397 	struct kqueue *kq = kn->kn_fp->f_data;
398 
399 	knlist_remove(&kq->kq_sel.si_note, kn, 0);
400 }
401 
402 /*ARGSUSED*/
403 static int
404 filt_kqueue(struct knote *kn, long hint)
405 {
406 	struct kqueue *kq = kn->kn_fp->f_data;
407 
408 	kn->kn_data = kq->kq_count;
409 	return (kn->kn_data > 0);
410 }
411 
412 /* XXX - move to kern_proc.c?  */
413 static int
414 filt_procattach(struct knote *kn)
415 {
416 	struct proc *p;
417 	int error;
418 	bool exiting, immediate;
419 
420 	exiting = immediate = false;
421 	if (kn->kn_sfflags & NOTE_EXIT)
422 		p = pfind_any(kn->kn_id);
423 	else
424 		p = pfind(kn->kn_id);
425 	if (p == NULL)
426 		return (ESRCH);
427 	if (p->p_flag & P_WEXIT)
428 		exiting = true;
429 
430 	if ((error = p_cansee(curthread, p))) {
431 		PROC_UNLOCK(p);
432 		return (error);
433 	}
434 
435 	kn->kn_ptr.p_proc = p;
436 	kn->kn_flags |= EV_CLEAR;		/* automatically set */
437 
438 	/*
439 	 * Internal flag indicating registration done by kernel for the
440 	 * purposes of getting a NOTE_CHILD notification.
441 	 */
442 	if (kn->kn_flags & EV_FLAG2) {
443 		kn->kn_flags &= ~EV_FLAG2;
444 		kn->kn_data = kn->kn_sdata;		/* ppid */
445 		kn->kn_fflags = NOTE_CHILD;
446 		kn->kn_sfflags &= ~(NOTE_EXIT | NOTE_EXEC | NOTE_FORK);
447 		immediate = true; /* Force immediate activation of child note. */
448 	}
449 	/*
450 	 * Internal flag indicating registration done by kernel (for other than
451 	 * NOTE_CHILD).
452 	 */
453 	if (kn->kn_flags & EV_FLAG1) {
454 		kn->kn_flags &= ~EV_FLAG1;
455 	}
456 
457 	knlist_add(p->p_klist, kn, 1);
458 
459 	/*
460 	 * Immediately activate any child notes or, in the case of a zombie
461 	 * target process, exit notes.  The latter is necessary to handle the
462 	 * case where the target process, e.g. a child, dies before the kevent
463 	 * is registered.
464 	 */
465 	if (immediate || (exiting && filt_proc(kn, NOTE_EXIT)))
466 		KNOTE_ACTIVATE(kn, 0);
467 
468 	PROC_UNLOCK(p);
469 
470 	return (0);
471 }
472 
473 /*
474  * The knote may be attached to a different process, which may exit,
475  * leaving nothing for the knote to be attached to.  So when the process
476  * exits, the knote is marked as DETACHED and also flagged as ONESHOT so
477  * it will be deleted when read out.  However, as part of the knote deletion,
478  * this routine is called, so a check is needed to avoid actually performing
479  * a detach, because the original process does not exist any more.
480  */
481 /* XXX - move to kern_proc.c?  */
482 static void
483 filt_procdetach(struct knote *kn)
484 {
485 
486 	knlist_remove(kn->kn_knlist, kn, 0);
487 	kn->kn_ptr.p_proc = NULL;
488 }
489 
490 /* XXX - move to kern_proc.c?  */
491 static int
492 filt_proc(struct knote *kn, long hint)
493 {
494 	struct proc *p;
495 	u_int event;
496 
497 	p = kn->kn_ptr.p_proc;
498 	if (p == NULL) /* already activated, from attach filter */
499 		return (0);
500 
501 	/* Mask off extra data. */
502 	event = (u_int)hint & NOTE_PCTRLMASK;
503 
504 	/* If the user is interested in this event, record it. */
505 	if (kn->kn_sfflags & event)
506 		kn->kn_fflags |= event;
507 
508 	/* Process is gone, so flag the event as finished. */
509 	if (event == NOTE_EXIT) {
510 		kn->kn_flags |= EV_EOF | EV_ONESHOT;
511 		kn->kn_ptr.p_proc = NULL;
512 		if (kn->kn_fflags & NOTE_EXIT)
513 			kn->kn_data = KW_EXITCODE(p->p_xexit, p->p_xsig);
514 		if (kn->kn_fflags == 0)
515 			kn->kn_flags |= EV_DROP;
516 		return (1);
517 	}
518 
519 	return (kn->kn_fflags != 0);
520 }
521 
522 /*
523  * Called when the process forked. It mostly does the same as the
524  * knote(), activating all knotes registered to be activated when the
525  * process forked. Additionally, for each knote attached to the
526  * parent, check whether user wants to track the new process. If so
527  * attach a new knote to it, and immediately report an event with the
528  * child's pid.
529  */
530 void
531 knote_fork(struct knlist *list, int pid)
532 {
533 	struct kqueue *kq;
534 	struct knote *kn;
535 	struct kevent kev;
536 	int error;
537 
538 	MPASS(list != NULL);
539 	KNL_ASSERT_LOCKED(list);
540 	if (SLIST_EMPTY(&list->kl_list))
541 		return;
542 
543 	memset(&kev, 0, sizeof(kev));
544 	SLIST_FOREACH(kn, &list->kl_list, kn_selnext) {
545 		kq = kn->kn_kq;
546 		KQ_LOCK(kq);
547 		if (kn_in_flux(kn) && (kn->kn_status & KN_SCAN) == 0) {
548 			KQ_UNLOCK(kq);
549 			continue;
550 		}
551 
552 		/*
553 		 * The same as knote(), activate the event.
554 		 */
555 		if ((kn->kn_sfflags & NOTE_TRACK) == 0) {
556 			if (kn->kn_fop->f_event(kn, NOTE_FORK))
557 				KNOTE_ACTIVATE(kn, 1);
558 			KQ_UNLOCK(kq);
559 			continue;
560 		}
561 
562 		/*
563 		 * The NOTE_TRACK case. In addition to the activation
564 		 * of the event, we need to register new events to
565 		 * track the child. Drop the locks in preparation for
566 		 * the call to kqueue_register().
567 		 */
568 		kn_enter_flux(kn);
569 		KQ_UNLOCK(kq);
570 		list->kl_unlock(list->kl_lockarg);
571 
572 		/*
573 		 * Activate existing knote and register tracking knotes with
574 		 * new process.
575 		 *
576 		 * First register a knote to get just the child notice. This
577 		 * must be a separate note from a potential NOTE_EXIT
578 		 * notification since both NOTE_CHILD and NOTE_EXIT are defined
579 		 * to use the data field (in conflicting ways).
580 		 */
581 		kev.ident = pid;
582 		kev.filter = kn->kn_filter;
583 		kev.flags = kn->kn_flags | EV_ADD | EV_ENABLE | EV_ONESHOT |
584 		    EV_FLAG2;
585 		kev.fflags = kn->kn_sfflags;
586 		kev.data = kn->kn_id;		/* parent */
587 		kev.udata = kn->kn_kevent.udata;/* preserve udata */
588 		error = kqueue_register(kq, &kev, NULL, M_NOWAIT);
589 		if (error)
590 			kn->kn_fflags |= NOTE_TRACKERR;
591 
592 		/*
593 		 * Then register another knote to track other potential events
594 		 * from the new process.
595 		 */
596 		kev.ident = pid;
597 		kev.filter = kn->kn_filter;
598 		kev.flags = kn->kn_flags | EV_ADD | EV_ENABLE | EV_FLAG1;
599 		kev.fflags = kn->kn_sfflags;
600 		kev.data = kn->kn_id;		/* parent */
601 		kev.udata = kn->kn_kevent.udata;/* preserve udata */
602 		error = kqueue_register(kq, &kev, NULL, M_NOWAIT);
603 		if (error)
604 			kn->kn_fflags |= NOTE_TRACKERR;
605 		if (kn->kn_fop->f_event(kn, NOTE_FORK))
606 			KNOTE_ACTIVATE(kn, 0);
607 		list->kl_lock(list->kl_lockarg);
608 		KQ_LOCK(kq);
609 		kn_leave_flux(kn);
610 		KQ_UNLOCK_FLUX(kq);
611 	}
612 }
613 
614 /*
615  * XXX: EVFILT_TIMER should perhaps live in kern_time.c beside the
616  * interval timer support code.
617  */
618 
619 #define NOTE_TIMER_PRECMASK						\
620     (NOTE_SECONDS | NOTE_MSECONDS | NOTE_USECONDS | NOTE_NSECONDS)
621 
622 static sbintime_t
623 timer2sbintime(int64_t data, int flags)
624 {
625 	int64_t secs;
626 
627         /*
628          * Macros for converting to the fractional second portion of an
629          * sbintime_t using 64bit multiplication to improve precision.
630          */
631 #define NS_TO_SBT(ns) (((ns) * (((uint64_t)1 << 63) / 500000000)) >> 32)
632 #define US_TO_SBT(us) (((us) * (((uint64_t)1 << 63) / 500000)) >> 32)
633 #define MS_TO_SBT(ms) (((ms) * (((uint64_t)1 << 63) / 500)) >> 32)
634 	switch (flags & NOTE_TIMER_PRECMASK) {
635 	case NOTE_SECONDS:
636 #ifdef __LP64__
637 		if (data > (SBT_MAX / SBT_1S))
638 			return (SBT_MAX);
639 #endif
640 		return ((sbintime_t)data << 32);
641 	case NOTE_MSECONDS: /* FALLTHROUGH */
642 	case 0:
643 		if (data >= 1000) {
644 			secs = data / 1000;
645 #ifdef __LP64__
646 			if (secs > (SBT_MAX / SBT_1S))
647 				return (SBT_MAX);
648 #endif
649 			return (secs << 32 | MS_TO_SBT(data % 1000));
650 		}
651 		return (MS_TO_SBT(data));
652 	case NOTE_USECONDS:
653 		if (data >= 1000000) {
654 			secs = data / 1000000;
655 #ifdef __LP64__
656 			if (secs > (SBT_MAX / SBT_1S))
657 				return (SBT_MAX);
658 #endif
659 			return (secs << 32 | US_TO_SBT(data % 1000000));
660 		}
661 		return (US_TO_SBT(data));
662 	case NOTE_NSECONDS:
663 		if (data >= 1000000000) {
664 			secs = data / 1000000000;
665 #ifdef __LP64__
666 			if (secs > (SBT_MAX / SBT_1S))
667 				return (SBT_MAX);
668 #endif
669 			return (secs << 32 | NS_TO_SBT(data % 1000000000));
670 		}
671 		return (NS_TO_SBT(data));
672 	default:
673 		break;
674 	}
675 	return (-1);
676 }
677 
678 struct kq_timer_cb_data {
679 	struct callout c;
680 	struct proc *p;
681 	struct knote *kn;
682 	int cpuid;
683 	int flags;
684 	TAILQ_ENTRY(kq_timer_cb_data) link;
685 	sbintime_t next;	/* next timer event fires at */
686 	sbintime_t to;		/* precalculated timer period, 0 for abs */
687 };
688 
689 #define	KQ_TIMER_CB_ENQUEUED	0x01
690 
691 static void
692 kqtimer_sched_callout(struct kq_timer_cb_data *kc)
693 {
694 	callout_reset_sbt_on(&kc->c, kc->next, 0, filt_timerexpire, kc->kn,
695 	    kc->cpuid, C_ABSOLUTE);
696 }
697 
698 void
699 kqtimer_proc_continue(struct proc *p)
700 {
701 	struct kq_timer_cb_data *kc, *kc1;
702 	struct bintime bt;
703 	sbintime_t now;
704 
705 	PROC_LOCK_ASSERT(p, MA_OWNED);
706 
707 	getboottimebin(&bt);
708 	now = bttosbt(bt);
709 
710 	TAILQ_FOREACH_SAFE(kc, &p->p_kqtim_stop, link, kc1) {
711 		TAILQ_REMOVE(&p->p_kqtim_stop, kc, link);
712 		kc->flags &= ~KQ_TIMER_CB_ENQUEUED;
713 		if (kc->next <= now)
714 			filt_timerexpire_l(kc->kn, true);
715 		else
716 			kqtimer_sched_callout(kc);
717 	}
718 }
719 
720 static void
721 filt_timerexpire_l(struct knote *kn, bool proc_locked)
722 {
723 	struct kq_timer_cb_data *kc;
724 	struct proc *p;
725 	uint64_t delta;
726 	sbintime_t now;
727 
728 	kc = kn->kn_ptr.p_v;
729 
730 	if ((kn->kn_flags & EV_ONESHOT) != 0 || kc->to == 0) {
731 		kn->kn_data++;
732 		KNOTE_ACTIVATE(kn, 0);
733 		return;
734 	}
735 
736 	now = sbinuptime();
737 	if (now >= kc->next) {
738 		delta = (now - kc->next) / kc->to;
739 		if (delta == 0)
740 			delta = 1;
741 		kn->kn_data += delta;
742 		kc->next += delta * kc->to;
743 		if (now >= kc->next)	/* overflow */
744 			kc->next = now + kc->to;
745 		KNOTE_ACTIVATE(kn, 0);	/* XXX - handle locking */
746 	}
747 
748 	/*
749 	 * Initial check for stopped kc->p is racy.  It is fine to
750 	 * miss the set of the stop flags, at worst we would schedule
751 	 * one more callout.  On the other hand, it is not fine to not
752 	 * schedule when we we missed clearing of the flags, we
753 	 * recheck them under the lock and observe consistent state.
754 	 */
755 	p = kc->p;
756 	if (P_SHOULDSTOP(p) || P_KILLED(p)) {
757 		if (!proc_locked)
758 			PROC_LOCK(p);
759 		if (P_SHOULDSTOP(p) || P_KILLED(p)) {
760 			if ((kc->flags & KQ_TIMER_CB_ENQUEUED) == 0) {
761 				kc->flags |= KQ_TIMER_CB_ENQUEUED;
762 				TAILQ_INSERT_TAIL(&p->p_kqtim_stop, kc, link);
763 			}
764 			if (!proc_locked)
765 				PROC_UNLOCK(p);
766 			return;
767 		}
768 		if (!proc_locked)
769 			PROC_UNLOCK(p);
770 	}
771 	kqtimer_sched_callout(kc);
772 }
773 
774 static void
775 filt_timerexpire(void *knx)
776 {
777 	filt_timerexpire_l(knx, false);
778 }
779 
780 /*
781  * data contains amount of time to sleep
782  */
783 static int
784 filt_timervalidate(struct knote *kn, sbintime_t *to)
785 {
786 	struct bintime bt;
787 	sbintime_t sbt;
788 
789 	if (kn->kn_sdata < 0)
790 		return (EINVAL);
791 	if (kn->kn_sdata == 0 && (kn->kn_flags & EV_ONESHOT) == 0)
792 		kn->kn_sdata = 1;
793 	/*
794 	 * The only fflags values supported are the timer unit
795 	 * (precision) and the absolute time indicator.
796 	 */
797 	if ((kn->kn_sfflags & ~(NOTE_TIMER_PRECMASK | NOTE_ABSTIME)) != 0)
798 		return (EINVAL);
799 
800 	*to = timer2sbintime(kn->kn_sdata, kn->kn_sfflags);
801 	if (*to < 0)
802 		return (EINVAL);
803 	if ((kn->kn_sfflags & NOTE_ABSTIME) != 0) {
804 		getboottimebin(&bt);
805 		sbt = bttosbt(bt);
806 		*to = MAX(0, *to - sbt);
807 	}
808 	return (0);
809 }
810 
811 static int
812 filt_timerattach(struct knote *kn)
813 {
814 	struct kq_timer_cb_data *kc;
815 	sbintime_t to;
816 	int error;
817 
818 	to = -1;
819 	error = filt_timervalidate(kn, &to);
820 	if (error != 0)
821 		return (error);
822 	KASSERT(to > 0 || (kn->kn_flags & EV_ONESHOT) != 0 ||
823 	    (kn->kn_sfflags & NOTE_ABSTIME) != 0,
824 	    ("%s: periodic timer has a calculated zero timeout", __func__));
825 	KASSERT(to >= 0,
826 	    ("%s: timer has a calculated negative timeout", __func__));
827 
828 	if (atomic_fetchadd_int(&kq_ncallouts, 1) + 1 > kq_calloutmax) {
829 		atomic_subtract_int(&kq_ncallouts, 1);
830 		return (ENOMEM);
831 	}
832 
833 	if ((kn->kn_sfflags & NOTE_ABSTIME) == 0)
834 		kn->kn_flags |= EV_CLEAR;	/* automatically set */
835 	kn->kn_status &= ~KN_DETACHED;		/* knlist_add clears it */
836 	kn->kn_ptr.p_v = kc = malloc(sizeof(*kc), M_KQUEUE, M_WAITOK);
837 	kc->kn = kn;
838 	kc->p = curproc;
839 	kc->cpuid = PCPU_GET(cpuid);
840 	kc->flags = 0;
841 	callout_init(&kc->c, 1);
842 	filt_timerstart(kn, to);
843 
844 	return (0);
845 }
846 
847 static void
848 filt_timerstart(struct knote *kn, sbintime_t to)
849 {
850 	struct kq_timer_cb_data *kc;
851 
852 	kc = kn->kn_ptr.p_v;
853 	if ((kn->kn_sfflags & NOTE_ABSTIME) != 0) {
854 		kc->next = to;
855 		kc->to = 0;
856 	} else {
857 		kc->next = to + sbinuptime();
858 		kc->to = to;
859 	}
860 	kqtimer_sched_callout(kc);
861 }
862 
863 static void
864 filt_timerdetach(struct knote *kn)
865 {
866 	struct kq_timer_cb_data *kc;
867 	unsigned int old __unused;
868 	bool pending;
869 
870 	kc = kn->kn_ptr.p_v;
871 	do {
872 		callout_drain(&kc->c);
873 
874 		/*
875 		 * kqtimer_proc_continue() might have rescheduled this callout.
876 		 * Double-check, using the process mutex as an interlock.
877 		 */
878 		PROC_LOCK(kc->p);
879 		if ((kc->flags & KQ_TIMER_CB_ENQUEUED) != 0) {
880 			kc->flags &= ~KQ_TIMER_CB_ENQUEUED;
881 			TAILQ_REMOVE(&kc->p->p_kqtim_stop, kc, link);
882 		}
883 		pending = callout_pending(&kc->c);
884 		PROC_UNLOCK(kc->p);
885 	} while (pending);
886 	free(kc, M_KQUEUE);
887 	old = atomic_fetchadd_int(&kq_ncallouts, -1);
888 	KASSERT(old > 0, ("Number of callouts cannot become negative"));
889 	kn->kn_status |= KN_DETACHED;	/* knlist_remove sets it */
890 }
891 
892 static void
893 filt_timertouch(struct knote *kn, struct kevent *kev, u_long type)
894 {
895 	struct kq_timer_cb_data *kc;
896 	struct kqueue *kq;
897 	sbintime_t to;
898 	int error;
899 
900 	switch (type) {
901 	case EVENT_REGISTER:
902 		/* Handle re-added timers that update data/fflags */
903 		if (kev->flags & EV_ADD) {
904 			kc = kn->kn_ptr.p_v;
905 
906 			/* Drain any existing callout. */
907 			callout_drain(&kc->c);
908 
909 			/* Throw away any existing undelivered record
910 			 * of the timer expiration. This is done under
911 			 * the presumption that if a process is
912 			 * re-adding this timer with new parameters,
913 			 * it is no longer interested in what may have
914 			 * happened under the old parameters. If it is
915 			 * interested, it can wait for the expiration,
916 			 * delete the old timer definition, and then
917 			 * add the new one.
918 			 *
919 			 * This has to be done while the kq is locked:
920 			 *   - if enqueued, dequeue
921 			 *   - make it no longer active
922 			 *   - clear the count of expiration events
923 			 */
924 			kq = kn->kn_kq;
925 			KQ_LOCK(kq);
926 			if (kn->kn_status & KN_QUEUED)
927 				knote_dequeue(kn);
928 
929 			kn->kn_status &= ~KN_ACTIVE;
930 			kn->kn_data = 0;
931 			KQ_UNLOCK(kq);
932 
933 			/* Reschedule timer based on new data/fflags */
934 			kn->kn_sfflags = kev->fflags;
935 			kn->kn_sdata = kev->data;
936 			error = filt_timervalidate(kn, &to);
937 			if (error != 0) {
938 			  	kn->kn_flags |= EV_ERROR;
939 				kn->kn_data = error;
940 			} else
941 			  	filt_timerstart(kn, to);
942 		}
943 		break;
944 
945         case EVENT_PROCESS:
946 		*kev = kn->kn_kevent;
947 		if (kn->kn_flags & EV_CLEAR) {
948 			kn->kn_data = 0;
949 			kn->kn_fflags = 0;
950 		}
951 		break;
952 
953 	default:
954 		panic("filt_timertouch() - invalid type (%ld)", type);
955 		break;
956 	}
957 }
958 
959 static int
960 filt_timer(struct knote *kn, long hint)
961 {
962 
963 	return (kn->kn_data != 0);
964 }
965 
966 static int
967 filt_userattach(struct knote *kn)
968 {
969 
970 	/*
971 	 * EVFILT_USER knotes are not attached to anything in the kernel.
972 	 */
973 	kn->kn_hook = NULL;
974 	if (kn->kn_fflags & NOTE_TRIGGER)
975 		kn->kn_hookid = 1;
976 	else
977 		kn->kn_hookid = 0;
978 	return (0);
979 }
980 
981 static void
982 filt_userdetach(__unused struct knote *kn)
983 {
984 
985 	/*
986 	 * EVFILT_USER knotes are not attached to anything in the kernel.
987 	 */
988 }
989 
990 static int
991 filt_user(struct knote *kn, __unused long hint)
992 {
993 
994 	return (kn->kn_hookid);
995 }
996 
997 static void
998 filt_usertouch(struct knote *kn, struct kevent *kev, u_long type)
999 {
1000 	u_int ffctrl;
1001 
1002 	switch (type) {
1003 	case EVENT_REGISTER:
1004 		if (kev->fflags & NOTE_TRIGGER)
1005 			kn->kn_hookid = 1;
1006 
1007 		ffctrl = kev->fflags & NOTE_FFCTRLMASK;
1008 		kev->fflags &= NOTE_FFLAGSMASK;
1009 		switch (ffctrl) {
1010 		case NOTE_FFNOP:
1011 			break;
1012 
1013 		case NOTE_FFAND:
1014 			kn->kn_sfflags &= kev->fflags;
1015 			break;
1016 
1017 		case NOTE_FFOR:
1018 			kn->kn_sfflags |= kev->fflags;
1019 			break;
1020 
1021 		case NOTE_FFCOPY:
1022 			kn->kn_sfflags = kev->fflags;
1023 			break;
1024 
1025 		default:
1026 			/* XXX Return error? */
1027 			break;
1028 		}
1029 		kn->kn_sdata = kev->data;
1030 		if (kev->flags & EV_CLEAR) {
1031 			kn->kn_hookid = 0;
1032 			kn->kn_data = 0;
1033 			kn->kn_fflags = 0;
1034 		}
1035 		break;
1036 
1037         case EVENT_PROCESS:
1038 		*kev = kn->kn_kevent;
1039 		kev->fflags = kn->kn_sfflags;
1040 		kev->data = kn->kn_sdata;
1041 		if (kn->kn_flags & EV_CLEAR) {
1042 			kn->kn_hookid = 0;
1043 			kn->kn_data = 0;
1044 			kn->kn_fflags = 0;
1045 		}
1046 		break;
1047 
1048 	default:
1049 		panic("filt_usertouch() - invalid type (%ld)", type);
1050 		break;
1051 	}
1052 }
1053 
1054 int
1055 sys_kqueue(struct thread *td, struct kqueue_args *uap)
1056 {
1057 
1058 	return (kern_kqueue(td, 0, NULL));
1059 }
1060 
1061 static void
1062 kqueue_init(struct kqueue *kq)
1063 {
1064 
1065 	mtx_init(&kq->kq_lock, "kqueue", NULL, MTX_DEF | MTX_DUPOK);
1066 	TAILQ_INIT(&kq->kq_head);
1067 	knlist_init_mtx(&kq->kq_sel.si_note, &kq->kq_lock);
1068 	TASK_INIT(&kq->kq_task, 0, kqueue_task, kq);
1069 }
1070 
1071 int
1072 kern_kqueue(struct thread *td, int flags, struct filecaps *fcaps)
1073 {
1074 	struct filedesc *fdp;
1075 	struct kqueue *kq;
1076 	struct file *fp;
1077 	struct ucred *cred;
1078 	int fd, error;
1079 
1080 	fdp = td->td_proc->p_fd;
1081 	cred = td->td_ucred;
1082 	if (!chgkqcnt(cred->cr_ruidinfo, 1, lim_cur(td, RLIMIT_KQUEUES)))
1083 		return (ENOMEM);
1084 
1085 	error = falloc_caps(td, &fp, &fd, flags, fcaps);
1086 	if (error != 0) {
1087 		chgkqcnt(cred->cr_ruidinfo, -1, 0);
1088 		return (error);
1089 	}
1090 
1091 	/* An extra reference on `fp' has been held for us by falloc(). */
1092 	kq = malloc(sizeof *kq, M_KQUEUE, M_WAITOK | M_ZERO);
1093 	kqueue_init(kq);
1094 	kq->kq_fdp = fdp;
1095 	kq->kq_cred = crhold(cred);
1096 
1097 	FILEDESC_XLOCK(fdp);
1098 	TAILQ_INSERT_HEAD(&fdp->fd_kqlist, kq, kq_list);
1099 	FILEDESC_XUNLOCK(fdp);
1100 
1101 	finit(fp, FREAD | FWRITE, DTYPE_KQUEUE, kq, &kqueueops);
1102 	fdrop(fp, td);
1103 
1104 	td->td_retval[0] = fd;
1105 	return (0);
1106 }
1107 
1108 struct g_kevent_args {
1109 	int	fd;
1110 	const void *changelist;
1111 	int	nchanges;
1112 	void	*eventlist;
1113 	int	nevents;
1114 	const struct timespec *timeout;
1115 };
1116 
1117 int
1118 sys_kevent(struct thread *td, struct kevent_args *uap)
1119 {
1120 	struct kevent_copyops k_ops = {
1121 		.arg = uap,
1122 		.k_copyout = kevent_copyout,
1123 		.k_copyin = kevent_copyin,
1124 		.kevent_size = sizeof(struct kevent),
1125 	};
1126 	struct g_kevent_args gk_args = {
1127 		.fd = uap->fd,
1128 		.changelist = uap->changelist,
1129 		.nchanges = uap->nchanges,
1130 		.eventlist = uap->eventlist,
1131 		.nevents = uap->nevents,
1132 		.timeout = uap->timeout,
1133 	};
1134 
1135 	return (kern_kevent_generic(td, &gk_args, &k_ops, "kevent"));
1136 }
1137 
1138 static int
1139 kern_kevent_generic(struct thread *td, struct g_kevent_args *uap,
1140     struct kevent_copyops *k_ops, const char *struct_name)
1141 {
1142 	struct timespec ts, *tsp;
1143 #ifdef KTRACE
1144 	struct kevent *eventlist = uap->eventlist;
1145 #endif
1146 	int error;
1147 
1148 	if (uap->timeout != NULL) {
1149 		error = copyin(uap->timeout, &ts, sizeof(ts));
1150 		if (error)
1151 			return (error);
1152 		tsp = &ts;
1153 	} else
1154 		tsp = NULL;
1155 
1156 #ifdef KTRACE
1157 	if (KTRPOINT(td, KTR_STRUCT_ARRAY))
1158 		ktrstructarray(struct_name, UIO_USERSPACE, uap->changelist,
1159 		    uap->nchanges, k_ops->kevent_size);
1160 #endif
1161 
1162 	error = kern_kevent(td, uap->fd, uap->nchanges, uap->nevents,
1163 	    k_ops, tsp);
1164 
1165 #ifdef KTRACE
1166 	if (error == 0 && KTRPOINT(td, KTR_STRUCT_ARRAY))
1167 		ktrstructarray(struct_name, UIO_USERSPACE, eventlist,
1168 		    td->td_retval[0], k_ops->kevent_size);
1169 #endif
1170 
1171 	return (error);
1172 }
1173 
1174 /*
1175  * Copy 'count' items into the destination list pointed to by uap->eventlist.
1176  */
1177 static int
1178 kevent_copyout(void *arg, struct kevent *kevp, int count)
1179 {
1180 	struct kevent_args *uap;
1181 	int error;
1182 
1183 	KASSERT(count <= KQ_NEVENTS, ("count (%d) > KQ_NEVENTS", count));
1184 	uap = (struct kevent_args *)arg;
1185 
1186 	error = copyout(kevp, uap->eventlist, count * sizeof *kevp);
1187 	if (error == 0)
1188 		uap->eventlist += count;
1189 	return (error);
1190 }
1191 
1192 /*
1193  * Copy 'count' items from the list pointed to by uap->changelist.
1194  */
1195 static int
1196 kevent_copyin(void *arg, struct kevent *kevp, int count)
1197 {
1198 	struct kevent_args *uap;
1199 	int error;
1200 
1201 	KASSERT(count <= KQ_NEVENTS, ("count (%d) > KQ_NEVENTS", count));
1202 	uap = (struct kevent_args *)arg;
1203 
1204 	error = copyin(uap->changelist, kevp, count * sizeof *kevp);
1205 	if (error == 0)
1206 		uap->changelist += count;
1207 	return (error);
1208 }
1209 
1210 #ifdef COMPAT_FREEBSD11
1211 static int
1212 kevent11_copyout(void *arg, struct kevent *kevp, int count)
1213 {
1214 	struct freebsd11_kevent_args *uap;
1215 	struct freebsd11_kevent kev11;
1216 	int error, i;
1217 
1218 	KASSERT(count <= KQ_NEVENTS, ("count (%d) > KQ_NEVENTS", count));
1219 	uap = (struct freebsd11_kevent_args *)arg;
1220 
1221 	for (i = 0; i < count; i++) {
1222 		kev11.ident = kevp->ident;
1223 		kev11.filter = kevp->filter;
1224 		kev11.flags = kevp->flags;
1225 		kev11.fflags = kevp->fflags;
1226 		kev11.data = kevp->data;
1227 		kev11.udata = kevp->udata;
1228 		error = copyout(&kev11, uap->eventlist, sizeof(kev11));
1229 		if (error != 0)
1230 			break;
1231 		uap->eventlist++;
1232 		kevp++;
1233 	}
1234 	return (error);
1235 }
1236 
1237 /*
1238  * Copy 'count' items from the list pointed to by uap->changelist.
1239  */
1240 static int
1241 kevent11_copyin(void *arg, struct kevent *kevp, int count)
1242 {
1243 	struct freebsd11_kevent_args *uap;
1244 	struct freebsd11_kevent kev11;
1245 	int error, i;
1246 
1247 	KASSERT(count <= KQ_NEVENTS, ("count (%d) > KQ_NEVENTS", count));
1248 	uap = (struct freebsd11_kevent_args *)arg;
1249 
1250 	for (i = 0; i < count; i++) {
1251 		error = copyin(uap->changelist, &kev11, sizeof(kev11));
1252 		if (error != 0)
1253 			break;
1254 		kevp->ident = kev11.ident;
1255 		kevp->filter = kev11.filter;
1256 		kevp->flags = kev11.flags;
1257 		kevp->fflags = kev11.fflags;
1258 		kevp->data = (uintptr_t)kev11.data;
1259 		kevp->udata = kev11.udata;
1260 		bzero(&kevp->ext, sizeof(kevp->ext));
1261 		uap->changelist++;
1262 		kevp++;
1263 	}
1264 	return (error);
1265 }
1266 
1267 int
1268 freebsd11_kevent(struct thread *td, struct freebsd11_kevent_args *uap)
1269 {
1270 	struct kevent_copyops k_ops = {
1271 		.arg = uap,
1272 		.k_copyout = kevent11_copyout,
1273 		.k_copyin = kevent11_copyin,
1274 		.kevent_size = sizeof(struct freebsd11_kevent),
1275 	};
1276 	struct g_kevent_args gk_args = {
1277 		.fd = uap->fd,
1278 		.changelist = uap->changelist,
1279 		.nchanges = uap->nchanges,
1280 		.eventlist = uap->eventlist,
1281 		.nevents = uap->nevents,
1282 		.timeout = uap->timeout,
1283 	};
1284 
1285 	return (kern_kevent_generic(td, &gk_args, &k_ops, "freebsd11_kevent"));
1286 }
1287 #endif
1288 
1289 int
1290 kern_kevent(struct thread *td, int fd, int nchanges, int nevents,
1291     struct kevent_copyops *k_ops, const struct timespec *timeout)
1292 {
1293 	cap_rights_t rights;
1294 	struct file *fp;
1295 	int error;
1296 
1297 	cap_rights_init_zero(&rights);
1298 	if (nchanges > 0)
1299 		cap_rights_set_one(&rights, CAP_KQUEUE_CHANGE);
1300 	if (nevents > 0)
1301 		cap_rights_set_one(&rights, CAP_KQUEUE_EVENT);
1302 	error = fget(td, fd, &rights, &fp);
1303 	if (error != 0)
1304 		return (error);
1305 
1306 	error = kern_kevent_fp(td, fp, nchanges, nevents, k_ops, timeout);
1307 	fdrop(fp, td);
1308 
1309 	return (error);
1310 }
1311 
1312 static int
1313 kqueue_kevent(struct kqueue *kq, struct thread *td, int nchanges, int nevents,
1314     struct kevent_copyops *k_ops, const struct timespec *timeout)
1315 {
1316 	struct kevent keva[KQ_NEVENTS];
1317 	struct kevent *kevp, *changes;
1318 	int i, n, nerrors, error;
1319 
1320 	if (nchanges < 0)
1321 		return (EINVAL);
1322 
1323 	nerrors = 0;
1324 	while (nchanges > 0) {
1325 		n = nchanges > KQ_NEVENTS ? KQ_NEVENTS : nchanges;
1326 		error = k_ops->k_copyin(k_ops->arg, keva, n);
1327 		if (error)
1328 			return (error);
1329 		changes = keva;
1330 		for (i = 0; i < n; i++) {
1331 			kevp = &changes[i];
1332 			if (!kevp->filter)
1333 				continue;
1334 			kevp->flags &= ~EV_SYSFLAGS;
1335 			error = kqueue_register(kq, kevp, td, M_WAITOK);
1336 			if (error || (kevp->flags & EV_RECEIPT)) {
1337 				if (nevents == 0)
1338 					return (error);
1339 				kevp->flags = EV_ERROR;
1340 				kevp->data = error;
1341 				(void)k_ops->k_copyout(k_ops->arg, kevp, 1);
1342 				nevents--;
1343 				nerrors++;
1344 			}
1345 		}
1346 		nchanges -= n;
1347 	}
1348 	if (nerrors) {
1349 		td->td_retval[0] = nerrors;
1350 		return (0);
1351 	}
1352 
1353 	return (kqueue_scan(kq, nevents, k_ops, timeout, keva, td));
1354 }
1355 
1356 int
1357 kern_kevent_fp(struct thread *td, struct file *fp, int nchanges, int nevents,
1358     struct kevent_copyops *k_ops, const struct timespec *timeout)
1359 {
1360 	struct kqueue *kq;
1361 	int error;
1362 
1363 	error = kqueue_acquire(fp, &kq);
1364 	if (error != 0)
1365 		return (error);
1366 	error = kqueue_kevent(kq, td, nchanges, nevents, k_ops, timeout);
1367 	kqueue_release(kq, 0);
1368 	return (error);
1369 }
1370 
1371 /*
1372  * Performs a kevent() call on a temporarily created kqueue. This can be
1373  * used to perform one-shot polling, similar to poll() and select().
1374  */
1375 int
1376 kern_kevent_anonymous(struct thread *td, int nevents,
1377     struct kevent_copyops *k_ops)
1378 {
1379 	struct kqueue kq = {};
1380 	int error;
1381 
1382 	kqueue_init(&kq);
1383 	kq.kq_refcnt = 1;
1384 	error = kqueue_kevent(&kq, td, nevents, nevents, k_ops, NULL);
1385 	kqueue_drain(&kq, td);
1386 	kqueue_destroy(&kq);
1387 	return (error);
1388 }
1389 
1390 int
1391 kqueue_add_filteropts(int filt, struct filterops *filtops)
1392 {
1393 	int error;
1394 
1395 	error = 0;
1396 	if (filt > 0 || filt + EVFILT_SYSCOUNT < 0) {
1397 		printf(
1398 "trying to add a filterop that is out of range: %d is beyond %d\n",
1399 		    ~filt, EVFILT_SYSCOUNT);
1400 		return EINVAL;
1401 	}
1402 	mtx_lock(&filterops_lock);
1403 	if (sysfilt_ops[~filt].for_fop != &null_filtops &&
1404 	    sysfilt_ops[~filt].for_fop != NULL)
1405 		error = EEXIST;
1406 	else {
1407 		sysfilt_ops[~filt].for_fop = filtops;
1408 		sysfilt_ops[~filt].for_refcnt = 0;
1409 	}
1410 	mtx_unlock(&filterops_lock);
1411 
1412 	return (error);
1413 }
1414 
1415 int
1416 kqueue_del_filteropts(int filt)
1417 {
1418 	int error;
1419 
1420 	error = 0;
1421 	if (filt > 0 || filt + EVFILT_SYSCOUNT < 0)
1422 		return EINVAL;
1423 
1424 	mtx_lock(&filterops_lock);
1425 	if (sysfilt_ops[~filt].for_fop == &null_filtops ||
1426 	    sysfilt_ops[~filt].for_fop == NULL)
1427 		error = EINVAL;
1428 	else if (sysfilt_ops[~filt].for_refcnt != 0)
1429 		error = EBUSY;
1430 	else {
1431 		sysfilt_ops[~filt].for_fop = &null_filtops;
1432 		sysfilt_ops[~filt].for_refcnt = 0;
1433 	}
1434 	mtx_unlock(&filterops_lock);
1435 
1436 	return error;
1437 }
1438 
1439 static struct filterops *
1440 kqueue_fo_find(int filt)
1441 {
1442 
1443 	if (filt > 0 || filt + EVFILT_SYSCOUNT < 0)
1444 		return NULL;
1445 
1446 	if (sysfilt_ops[~filt].for_nolock)
1447 		return sysfilt_ops[~filt].for_fop;
1448 
1449 	mtx_lock(&filterops_lock);
1450 	sysfilt_ops[~filt].for_refcnt++;
1451 	if (sysfilt_ops[~filt].for_fop == NULL)
1452 		sysfilt_ops[~filt].for_fop = &null_filtops;
1453 	mtx_unlock(&filterops_lock);
1454 
1455 	return sysfilt_ops[~filt].for_fop;
1456 }
1457 
1458 static void
1459 kqueue_fo_release(int filt)
1460 {
1461 
1462 	if (filt > 0 || filt + EVFILT_SYSCOUNT < 0)
1463 		return;
1464 
1465 	if (sysfilt_ops[~filt].for_nolock)
1466 		return;
1467 
1468 	mtx_lock(&filterops_lock);
1469 	KASSERT(sysfilt_ops[~filt].for_refcnt > 0,
1470 	    ("filter object refcount not valid on release"));
1471 	sysfilt_ops[~filt].for_refcnt--;
1472 	mtx_unlock(&filterops_lock);
1473 }
1474 
1475 /*
1476  * A ref to kq (obtained via kqueue_acquire) must be held.
1477  */
1478 static int
1479 kqueue_register(struct kqueue *kq, struct kevent *kev, struct thread *td,
1480     int mflag)
1481 {
1482 	struct filterops *fops;
1483 	struct file *fp;
1484 	struct knote *kn, *tkn;
1485 	struct knlist *knl;
1486 	int error, filt, event;
1487 	int haskqglobal, filedesc_unlock;
1488 
1489 	if ((kev->flags & (EV_ENABLE | EV_DISABLE)) == (EV_ENABLE | EV_DISABLE))
1490 		return (EINVAL);
1491 
1492 	fp = NULL;
1493 	kn = NULL;
1494 	knl = NULL;
1495 	error = 0;
1496 	haskqglobal = 0;
1497 	filedesc_unlock = 0;
1498 
1499 	filt = kev->filter;
1500 	fops = kqueue_fo_find(filt);
1501 	if (fops == NULL)
1502 		return EINVAL;
1503 
1504 	if (kev->flags & EV_ADD) {
1505 		/* Reject an invalid flag pair early */
1506 		if (kev->flags & EV_KEEPUDATA) {
1507 			tkn = NULL;
1508 			error = EINVAL;
1509 			goto done;
1510 		}
1511 
1512 		/*
1513 		 * Prevent waiting with locks.  Non-sleepable
1514 		 * allocation failures are handled in the loop, only
1515 		 * if the spare knote appears to be actually required.
1516 		 */
1517 		tkn = knote_alloc(mflag);
1518 	} else {
1519 		tkn = NULL;
1520 	}
1521 
1522 findkn:
1523 	if (fops->f_isfd) {
1524 		KASSERT(td != NULL, ("td is NULL"));
1525 		if (kev->ident > INT_MAX)
1526 			error = EBADF;
1527 		else
1528 			error = fget(td, kev->ident, &cap_event_rights, &fp);
1529 		if (error)
1530 			goto done;
1531 
1532 		if ((kev->flags & EV_ADD) == EV_ADD && kqueue_expand(kq, fops,
1533 		    kev->ident, M_NOWAIT) != 0) {
1534 			/* try again */
1535 			fdrop(fp, td);
1536 			fp = NULL;
1537 			error = kqueue_expand(kq, fops, kev->ident, mflag);
1538 			if (error)
1539 				goto done;
1540 			goto findkn;
1541 		}
1542 
1543 		if (fp->f_type == DTYPE_KQUEUE) {
1544 			/*
1545 			 * If we add some intelligence about what we are doing,
1546 			 * we should be able to support events on ourselves.
1547 			 * We need to know when we are doing this to prevent
1548 			 * getting both the knlist lock and the kq lock since
1549 			 * they are the same thing.
1550 			 */
1551 			if (fp->f_data == kq) {
1552 				error = EINVAL;
1553 				goto done;
1554 			}
1555 
1556 			/*
1557 			 * Pre-lock the filedesc before the global
1558 			 * lock mutex, see the comment in
1559 			 * kqueue_close().
1560 			 */
1561 			FILEDESC_XLOCK(td->td_proc->p_fd);
1562 			filedesc_unlock = 1;
1563 			KQ_GLOBAL_LOCK(&kq_global, haskqglobal);
1564 		}
1565 
1566 		KQ_LOCK(kq);
1567 		if (kev->ident < kq->kq_knlistsize) {
1568 			SLIST_FOREACH(kn, &kq->kq_knlist[kev->ident], kn_link)
1569 				if (kev->filter == kn->kn_filter)
1570 					break;
1571 		}
1572 	} else {
1573 		if ((kev->flags & EV_ADD) == EV_ADD) {
1574 			error = kqueue_expand(kq, fops, kev->ident, mflag);
1575 			if (error != 0)
1576 				goto done;
1577 		}
1578 
1579 		KQ_LOCK(kq);
1580 
1581 		/*
1582 		 * If possible, find an existing knote to use for this kevent.
1583 		 */
1584 		if (kev->filter == EVFILT_PROC &&
1585 		    (kev->flags & (EV_FLAG1 | EV_FLAG2)) != 0) {
1586 			/* This is an internal creation of a process tracking
1587 			 * note. Don't attempt to coalesce this with an
1588 			 * existing note.
1589 			 */
1590 			;
1591 		} else if (kq->kq_knhashmask != 0) {
1592 			struct klist *list;
1593 
1594 			list = &kq->kq_knhash[
1595 			    KN_HASH((u_long)kev->ident, kq->kq_knhashmask)];
1596 			SLIST_FOREACH(kn, list, kn_link)
1597 				if (kev->ident == kn->kn_id &&
1598 				    kev->filter == kn->kn_filter)
1599 					break;
1600 		}
1601 	}
1602 
1603 	/* knote is in the process of changing, wait for it to stabilize. */
1604 	if (kn != NULL && kn_in_flux(kn)) {
1605 		KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal);
1606 		if (filedesc_unlock) {
1607 			FILEDESC_XUNLOCK(td->td_proc->p_fd);
1608 			filedesc_unlock = 0;
1609 		}
1610 		kq->kq_state |= KQ_FLUXWAIT;
1611 		msleep(kq, &kq->kq_lock, PSOCK | PDROP, "kqflxwt", 0);
1612 		if (fp != NULL) {
1613 			fdrop(fp, td);
1614 			fp = NULL;
1615 		}
1616 		goto findkn;
1617 	}
1618 
1619 	/*
1620 	 * kn now contains the matching knote, or NULL if no match
1621 	 */
1622 	if (kn == NULL) {
1623 		if (kev->flags & EV_ADD) {
1624 			kn = tkn;
1625 			tkn = NULL;
1626 			if (kn == NULL) {
1627 				KQ_UNLOCK(kq);
1628 				error = ENOMEM;
1629 				goto done;
1630 			}
1631 			kn->kn_fp = fp;
1632 			kn->kn_kq = kq;
1633 			kn->kn_fop = fops;
1634 			/*
1635 			 * apply reference counts to knote structure, and
1636 			 * do not release it at the end of this routine.
1637 			 */
1638 			fops = NULL;
1639 			fp = NULL;
1640 
1641 			kn->kn_sfflags = kev->fflags;
1642 			kn->kn_sdata = kev->data;
1643 			kev->fflags = 0;
1644 			kev->data = 0;
1645 			kn->kn_kevent = *kev;
1646 			kn->kn_kevent.flags &= ~(EV_ADD | EV_DELETE |
1647 			    EV_ENABLE | EV_DISABLE | EV_FORCEONESHOT);
1648 			kn->kn_status = KN_DETACHED;
1649 			if ((kev->flags & EV_DISABLE) != 0)
1650 				kn->kn_status |= KN_DISABLED;
1651 			kn_enter_flux(kn);
1652 
1653 			error = knote_attach(kn, kq);
1654 			KQ_UNLOCK(kq);
1655 			if (error != 0) {
1656 				tkn = kn;
1657 				goto done;
1658 			}
1659 
1660 			if ((error = kn->kn_fop->f_attach(kn)) != 0) {
1661 				knote_drop_detached(kn, td);
1662 				goto done;
1663 			}
1664 			knl = kn_list_lock(kn);
1665 			goto done_ev_add;
1666 		} else {
1667 			/* No matching knote and the EV_ADD flag is not set. */
1668 			KQ_UNLOCK(kq);
1669 			error = ENOENT;
1670 			goto done;
1671 		}
1672 	}
1673 
1674 	if (kev->flags & EV_DELETE) {
1675 		kn_enter_flux(kn);
1676 		KQ_UNLOCK(kq);
1677 		knote_drop(kn, td);
1678 		goto done;
1679 	}
1680 
1681 	if (kev->flags & EV_FORCEONESHOT) {
1682 		kn->kn_flags |= EV_ONESHOT;
1683 		KNOTE_ACTIVATE(kn, 1);
1684 	}
1685 
1686 	if ((kev->flags & EV_ENABLE) != 0)
1687 		kn->kn_status &= ~KN_DISABLED;
1688 	else if ((kev->flags & EV_DISABLE) != 0)
1689 		kn->kn_status |= KN_DISABLED;
1690 
1691 	/*
1692 	 * The user may change some filter values after the initial EV_ADD,
1693 	 * but doing so will not reset any filter which has already been
1694 	 * triggered.
1695 	 */
1696 	kn->kn_status |= KN_SCAN;
1697 	kn_enter_flux(kn);
1698 	KQ_UNLOCK(kq);
1699 	knl = kn_list_lock(kn);
1700 	if ((kev->flags & EV_KEEPUDATA) == 0)
1701 		kn->kn_kevent.udata = kev->udata;
1702 	if (!fops->f_isfd && fops->f_touch != NULL) {
1703 		fops->f_touch(kn, kev, EVENT_REGISTER);
1704 	} else {
1705 		kn->kn_sfflags = kev->fflags;
1706 		kn->kn_sdata = kev->data;
1707 	}
1708 
1709 done_ev_add:
1710 	/*
1711 	 * We can get here with kn->kn_knlist == NULL.  This can happen when
1712 	 * the initial attach event decides that the event is "completed"
1713 	 * already, e.g., filt_procattach() is called on a zombie process.  It
1714 	 * will call filt_proc() which will remove it from the list, and NULL
1715 	 * kn_knlist.
1716 	 *
1717 	 * KN_DISABLED will be stable while the knote is in flux, so the
1718 	 * unlocked read will not race with an update.
1719 	 */
1720 	if ((kn->kn_status & KN_DISABLED) == 0)
1721 		event = kn->kn_fop->f_event(kn, 0);
1722 	else
1723 		event = 0;
1724 
1725 	KQ_LOCK(kq);
1726 	if (event)
1727 		kn->kn_status |= KN_ACTIVE;
1728 	if ((kn->kn_status & (KN_ACTIVE | KN_DISABLED | KN_QUEUED)) ==
1729 	    KN_ACTIVE)
1730 		knote_enqueue(kn);
1731 	kn->kn_status &= ~KN_SCAN;
1732 	kn_leave_flux(kn);
1733 	kn_list_unlock(knl);
1734 	KQ_UNLOCK_FLUX(kq);
1735 
1736 done:
1737 	KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal);
1738 	if (filedesc_unlock)
1739 		FILEDESC_XUNLOCK(td->td_proc->p_fd);
1740 	if (fp != NULL)
1741 		fdrop(fp, td);
1742 	knote_free(tkn);
1743 	if (fops != NULL)
1744 		kqueue_fo_release(filt);
1745 	return (error);
1746 }
1747 
1748 static int
1749 kqueue_acquire(struct file *fp, struct kqueue **kqp)
1750 {
1751 	int error;
1752 	struct kqueue *kq;
1753 
1754 	error = 0;
1755 
1756 	kq = fp->f_data;
1757 	if (fp->f_type != DTYPE_KQUEUE || kq == NULL)
1758 		return (EBADF);
1759 	*kqp = kq;
1760 	KQ_LOCK(kq);
1761 	if ((kq->kq_state & KQ_CLOSING) == KQ_CLOSING) {
1762 		KQ_UNLOCK(kq);
1763 		return (EBADF);
1764 	}
1765 	kq->kq_refcnt++;
1766 	KQ_UNLOCK(kq);
1767 
1768 	return error;
1769 }
1770 
1771 static void
1772 kqueue_release(struct kqueue *kq, int locked)
1773 {
1774 	if (locked)
1775 		KQ_OWNED(kq);
1776 	else
1777 		KQ_LOCK(kq);
1778 	kq->kq_refcnt--;
1779 	if (kq->kq_refcnt == 1)
1780 		wakeup(&kq->kq_refcnt);
1781 	if (!locked)
1782 		KQ_UNLOCK(kq);
1783 }
1784 
1785 void
1786 kqueue_drain_schedtask(void)
1787 {
1788 	taskqueue_quiesce(taskqueue_kqueue_ctx);
1789 }
1790 
1791 static void
1792 kqueue_schedtask(struct kqueue *kq)
1793 {
1794 	struct thread *td;
1795 
1796 	KQ_OWNED(kq);
1797 	KASSERT(((kq->kq_state & KQ_TASKDRAIN) != KQ_TASKDRAIN),
1798 	    ("scheduling kqueue task while draining"));
1799 
1800 	if ((kq->kq_state & KQ_TASKSCHED) != KQ_TASKSCHED) {
1801 		taskqueue_enqueue(taskqueue_kqueue_ctx, &kq->kq_task);
1802 		kq->kq_state |= KQ_TASKSCHED;
1803 		td = curthread;
1804 		thread_lock(td);
1805 		td->td_flags |= TDF_ASTPENDING | TDF_KQTICKLED;
1806 		thread_unlock(td);
1807 	}
1808 }
1809 
1810 /*
1811  * Expand the kq to make sure we have storage for fops/ident pair.
1812  *
1813  * Return 0 on success (or no work necessary), return errno on failure.
1814  */
1815 static int
1816 kqueue_expand(struct kqueue *kq, struct filterops *fops, uintptr_t ident,
1817     int mflag)
1818 {
1819 	struct klist *list, *tmp_knhash, *to_free;
1820 	u_long tmp_knhashmask;
1821 	int error, fd, size;
1822 
1823 	KQ_NOTOWNED(kq);
1824 
1825 	error = 0;
1826 	to_free = NULL;
1827 	if (fops->f_isfd) {
1828 		fd = ident;
1829 		if (kq->kq_knlistsize <= fd) {
1830 			size = kq->kq_knlistsize;
1831 			while (size <= fd)
1832 				size += KQEXTENT;
1833 			list = malloc(size * sizeof(*list), M_KQUEUE, mflag);
1834 			if (list == NULL)
1835 				return ENOMEM;
1836 			KQ_LOCK(kq);
1837 			if ((kq->kq_state & KQ_CLOSING) != 0) {
1838 				to_free = list;
1839 				error = EBADF;
1840 			} else if (kq->kq_knlistsize > fd) {
1841 				to_free = list;
1842 			} else {
1843 				if (kq->kq_knlist != NULL) {
1844 					bcopy(kq->kq_knlist, list,
1845 					    kq->kq_knlistsize * sizeof(*list));
1846 					to_free = kq->kq_knlist;
1847 					kq->kq_knlist = NULL;
1848 				}
1849 				bzero((caddr_t)list +
1850 				    kq->kq_knlistsize * sizeof(*list),
1851 				    (size - kq->kq_knlistsize) * sizeof(*list));
1852 				kq->kq_knlistsize = size;
1853 				kq->kq_knlist = list;
1854 			}
1855 			KQ_UNLOCK(kq);
1856 		}
1857 	} else {
1858 		if (kq->kq_knhashmask == 0) {
1859 			tmp_knhash = hashinit_flags(KN_HASHSIZE, M_KQUEUE,
1860 			    &tmp_knhashmask, (mflag & M_WAITOK) != 0 ?
1861 			    HASH_WAITOK : HASH_NOWAIT);
1862 			if (tmp_knhash == NULL)
1863 				return (ENOMEM);
1864 			KQ_LOCK(kq);
1865 			if ((kq->kq_state & KQ_CLOSING) != 0) {
1866 				to_free = tmp_knhash;
1867 				error = EBADF;
1868 			} else if (kq->kq_knhashmask == 0) {
1869 				kq->kq_knhash = tmp_knhash;
1870 				kq->kq_knhashmask = tmp_knhashmask;
1871 			} else {
1872 				to_free = tmp_knhash;
1873 			}
1874 			KQ_UNLOCK(kq);
1875 		}
1876 	}
1877 	free(to_free, M_KQUEUE);
1878 
1879 	KQ_NOTOWNED(kq);
1880 	return (error);
1881 }
1882 
1883 static void
1884 kqueue_task(void *arg, int pending)
1885 {
1886 	struct kqueue *kq;
1887 	int haskqglobal;
1888 
1889 	haskqglobal = 0;
1890 	kq = arg;
1891 
1892 	KQ_GLOBAL_LOCK(&kq_global, haskqglobal);
1893 	KQ_LOCK(kq);
1894 
1895 	KNOTE_LOCKED(&kq->kq_sel.si_note, 0);
1896 
1897 	kq->kq_state &= ~KQ_TASKSCHED;
1898 	if ((kq->kq_state & KQ_TASKDRAIN) == KQ_TASKDRAIN) {
1899 		wakeup(&kq->kq_state);
1900 	}
1901 	KQ_UNLOCK(kq);
1902 	KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal);
1903 }
1904 
1905 /*
1906  * Scan, update kn_data (if not ONESHOT), and copyout triggered events.
1907  * We treat KN_MARKER knotes as if they are in flux.
1908  */
1909 static int
1910 kqueue_scan(struct kqueue *kq, int maxevents, struct kevent_copyops *k_ops,
1911     const struct timespec *tsp, struct kevent *keva, struct thread *td)
1912 {
1913 	struct kevent *kevp;
1914 	struct knote *kn, *marker;
1915 	struct knlist *knl;
1916 	sbintime_t asbt, rsbt;
1917 	int count, error, haskqglobal, influx, nkev, touch;
1918 
1919 	count = maxevents;
1920 	nkev = 0;
1921 	error = 0;
1922 	haskqglobal = 0;
1923 
1924 	if (maxevents == 0)
1925 		goto done_nl;
1926 	if (maxevents < 0) {
1927 		error = EINVAL;
1928 		goto done_nl;
1929 	}
1930 
1931 	rsbt = 0;
1932 	if (tsp != NULL) {
1933 		if (!timespecvalid_interval(tsp)) {
1934 			error = EINVAL;
1935 			goto done_nl;
1936 		}
1937 		if (timespecisset(tsp)) {
1938 			if (tsp->tv_sec <= INT32_MAX) {
1939 				rsbt = tstosbt(*tsp);
1940 				if (TIMESEL(&asbt, rsbt))
1941 					asbt += tc_tick_sbt;
1942 				if (asbt <= SBT_MAX - rsbt)
1943 					asbt += rsbt;
1944 				else
1945 					asbt = 0;
1946 				rsbt >>= tc_precexp;
1947 			} else
1948 				asbt = 0;
1949 		} else
1950 			asbt = -1;
1951 	} else
1952 		asbt = 0;
1953 	marker = knote_alloc(M_WAITOK);
1954 	marker->kn_status = KN_MARKER;
1955 	KQ_LOCK(kq);
1956 
1957 retry:
1958 	kevp = keva;
1959 	if (kq->kq_count == 0) {
1960 		if (asbt == -1) {
1961 			error = EWOULDBLOCK;
1962 		} else {
1963 			kq->kq_state |= KQ_SLEEP;
1964 			error = msleep_sbt(kq, &kq->kq_lock, PSOCK | PCATCH,
1965 			    "kqread", asbt, rsbt, C_ABSOLUTE);
1966 		}
1967 		if (error == 0)
1968 			goto retry;
1969 		/* don't restart after signals... */
1970 		if (error == ERESTART)
1971 			error = EINTR;
1972 		else if (error == EWOULDBLOCK)
1973 			error = 0;
1974 		goto done;
1975 	}
1976 
1977 	TAILQ_INSERT_TAIL(&kq->kq_head, marker, kn_tqe);
1978 	influx = 0;
1979 	while (count) {
1980 		KQ_OWNED(kq);
1981 		kn = TAILQ_FIRST(&kq->kq_head);
1982 
1983 		if ((kn->kn_status == KN_MARKER && kn != marker) ||
1984 		    kn_in_flux(kn)) {
1985 			if (influx) {
1986 				influx = 0;
1987 				KQ_FLUX_WAKEUP(kq);
1988 			}
1989 			kq->kq_state |= KQ_FLUXWAIT;
1990 			error = msleep(kq, &kq->kq_lock, PSOCK,
1991 			    "kqflxwt", 0);
1992 			continue;
1993 		}
1994 
1995 		TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe);
1996 		if ((kn->kn_status & KN_DISABLED) == KN_DISABLED) {
1997 			kn->kn_status &= ~KN_QUEUED;
1998 			kq->kq_count--;
1999 			continue;
2000 		}
2001 		if (kn == marker) {
2002 			KQ_FLUX_WAKEUP(kq);
2003 			if (count == maxevents)
2004 				goto retry;
2005 			goto done;
2006 		}
2007 		KASSERT(!kn_in_flux(kn),
2008 		    ("knote %p is unexpectedly in flux", kn));
2009 
2010 		if ((kn->kn_flags & EV_DROP) == EV_DROP) {
2011 			kn->kn_status &= ~KN_QUEUED;
2012 			kn_enter_flux(kn);
2013 			kq->kq_count--;
2014 			KQ_UNLOCK(kq);
2015 			/*
2016 			 * We don't need to lock the list since we've
2017 			 * marked it as in flux.
2018 			 */
2019 			knote_drop(kn, td);
2020 			KQ_LOCK(kq);
2021 			continue;
2022 		} else if ((kn->kn_flags & EV_ONESHOT) == EV_ONESHOT) {
2023 			kn->kn_status &= ~KN_QUEUED;
2024 			kn_enter_flux(kn);
2025 			kq->kq_count--;
2026 			KQ_UNLOCK(kq);
2027 			/*
2028 			 * We don't need to lock the list since we've
2029 			 * marked the knote as being in flux.
2030 			 */
2031 			*kevp = kn->kn_kevent;
2032 			knote_drop(kn, td);
2033 			KQ_LOCK(kq);
2034 			kn = NULL;
2035 		} else {
2036 			kn->kn_status |= KN_SCAN;
2037 			kn_enter_flux(kn);
2038 			KQ_UNLOCK(kq);
2039 			if ((kn->kn_status & KN_KQUEUE) == KN_KQUEUE)
2040 				KQ_GLOBAL_LOCK(&kq_global, haskqglobal);
2041 			knl = kn_list_lock(kn);
2042 			if (kn->kn_fop->f_event(kn, 0) == 0) {
2043 				KQ_LOCK(kq);
2044 				KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal);
2045 				kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE |
2046 				    KN_SCAN);
2047 				kn_leave_flux(kn);
2048 				kq->kq_count--;
2049 				kn_list_unlock(knl);
2050 				influx = 1;
2051 				continue;
2052 			}
2053 			touch = (!kn->kn_fop->f_isfd &&
2054 			    kn->kn_fop->f_touch != NULL);
2055 			if (touch)
2056 				kn->kn_fop->f_touch(kn, kevp, EVENT_PROCESS);
2057 			else
2058 				*kevp = kn->kn_kevent;
2059 			KQ_LOCK(kq);
2060 			KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal);
2061 			if (kn->kn_flags & (EV_CLEAR | EV_DISPATCH)) {
2062 				/*
2063 				 * Manually clear knotes who weren't
2064 				 * 'touch'ed.
2065 				 */
2066 				if (touch == 0 && kn->kn_flags & EV_CLEAR) {
2067 					kn->kn_data = 0;
2068 					kn->kn_fflags = 0;
2069 				}
2070 				if (kn->kn_flags & EV_DISPATCH)
2071 					kn->kn_status |= KN_DISABLED;
2072 				kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE);
2073 				kq->kq_count--;
2074 			} else
2075 				TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe);
2076 
2077 			kn->kn_status &= ~KN_SCAN;
2078 			kn_leave_flux(kn);
2079 			kn_list_unlock(knl);
2080 			influx = 1;
2081 		}
2082 
2083 		/* we are returning a copy to the user */
2084 		kevp++;
2085 		nkev++;
2086 		count--;
2087 
2088 		if (nkev == KQ_NEVENTS) {
2089 			influx = 0;
2090 			KQ_UNLOCK_FLUX(kq);
2091 			error = k_ops->k_copyout(k_ops->arg, keva, nkev);
2092 			nkev = 0;
2093 			kevp = keva;
2094 			KQ_LOCK(kq);
2095 			if (error)
2096 				break;
2097 		}
2098 	}
2099 	TAILQ_REMOVE(&kq->kq_head, marker, kn_tqe);
2100 done:
2101 	KQ_OWNED(kq);
2102 	KQ_UNLOCK_FLUX(kq);
2103 	knote_free(marker);
2104 done_nl:
2105 	KQ_NOTOWNED(kq);
2106 	if (nkev != 0)
2107 		error = k_ops->k_copyout(k_ops->arg, keva, nkev);
2108 	td->td_retval[0] = maxevents - count;
2109 	return (error);
2110 }
2111 
2112 /*ARGSUSED*/
2113 static int
2114 kqueue_ioctl(struct file *fp, u_long cmd, void *data,
2115 	struct ucred *active_cred, struct thread *td)
2116 {
2117 	/*
2118 	 * Enabling sigio causes two major problems:
2119 	 * 1) infinite recursion:
2120 	 * Synopsys: kevent is being used to track signals and have FIOASYNC
2121 	 * set.  On receipt of a signal this will cause a kqueue to recurse
2122 	 * into itself over and over.  Sending the sigio causes the kqueue
2123 	 * to become ready, which in turn posts sigio again, forever.
2124 	 * Solution: this can be solved by setting a flag in the kqueue that
2125 	 * we have a SIGIO in progress.
2126 	 * 2) locking problems:
2127 	 * Synopsys: Kqueue is a leaf subsystem, but adding signalling puts
2128 	 * us above the proc and pgrp locks.
2129 	 * Solution: Post a signal using an async mechanism, being sure to
2130 	 * record a generation count in the delivery so that we do not deliver
2131 	 * a signal to the wrong process.
2132 	 *
2133 	 * Note, these two mechanisms are somewhat mutually exclusive!
2134 	 */
2135 #if 0
2136 	struct kqueue *kq;
2137 
2138 	kq = fp->f_data;
2139 	switch (cmd) {
2140 	case FIOASYNC:
2141 		if (*(int *)data) {
2142 			kq->kq_state |= KQ_ASYNC;
2143 		} else {
2144 			kq->kq_state &= ~KQ_ASYNC;
2145 		}
2146 		return (0);
2147 
2148 	case FIOSETOWN:
2149 		return (fsetown(*(int *)data, &kq->kq_sigio));
2150 
2151 	case FIOGETOWN:
2152 		*(int *)data = fgetown(&kq->kq_sigio);
2153 		return (0);
2154 	}
2155 #endif
2156 
2157 	return (ENOTTY);
2158 }
2159 
2160 /*ARGSUSED*/
2161 static int
2162 kqueue_poll(struct file *fp, int events, struct ucred *active_cred,
2163 	struct thread *td)
2164 {
2165 	struct kqueue *kq;
2166 	int revents = 0;
2167 	int error;
2168 
2169 	if ((error = kqueue_acquire(fp, &kq)))
2170 		return POLLERR;
2171 
2172 	KQ_LOCK(kq);
2173 	if (events & (POLLIN | POLLRDNORM)) {
2174 		if (kq->kq_count) {
2175 			revents |= events & (POLLIN | POLLRDNORM);
2176 		} else {
2177 			selrecord(td, &kq->kq_sel);
2178 			if (SEL_WAITING(&kq->kq_sel))
2179 				kq->kq_state |= KQ_SEL;
2180 		}
2181 	}
2182 	kqueue_release(kq, 1);
2183 	KQ_UNLOCK(kq);
2184 	return (revents);
2185 }
2186 
2187 /*ARGSUSED*/
2188 static int
2189 kqueue_stat(struct file *fp, struct stat *st, struct ucred *active_cred)
2190 {
2191 
2192 	bzero((void *)st, sizeof *st);
2193 	/*
2194 	 * We no longer return kq_count because the unlocked value is useless.
2195 	 * If you spent all this time getting the count, why not spend your
2196 	 * syscall better by calling kevent?
2197 	 *
2198 	 * XXX - This is needed for libc_r.
2199 	 */
2200 	st->st_mode = S_IFIFO;
2201 	return (0);
2202 }
2203 
2204 static void
2205 kqueue_drain(struct kqueue *kq, struct thread *td)
2206 {
2207 	struct knote *kn;
2208 	int i;
2209 
2210 	KQ_LOCK(kq);
2211 
2212 	KASSERT((kq->kq_state & KQ_CLOSING) != KQ_CLOSING,
2213 	    ("kqueue already closing"));
2214 	kq->kq_state |= KQ_CLOSING;
2215 	if (kq->kq_refcnt > 1)
2216 		msleep(&kq->kq_refcnt, &kq->kq_lock, PSOCK, "kqclose", 0);
2217 
2218 	KASSERT(kq->kq_refcnt == 1, ("other refs are out there!"));
2219 
2220 	KASSERT(knlist_empty(&kq->kq_sel.si_note),
2221 	    ("kqueue's knlist not empty"));
2222 
2223 	for (i = 0; i < kq->kq_knlistsize; i++) {
2224 		while ((kn = SLIST_FIRST(&kq->kq_knlist[i])) != NULL) {
2225 			if (kn_in_flux(kn)) {
2226 				kq->kq_state |= KQ_FLUXWAIT;
2227 				msleep(kq, &kq->kq_lock, PSOCK, "kqclo1", 0);
2228 				continue;
2229 			}
2230 			kn_enter_flux(kn);
2231 			KQ_UNLOCK(kq);
2232 			knote_drop(kn, td);
2233 			KQ_LOCK(kq);
2234 		}
2235 	}
2236 	if (kq->kq_knhashmask != 0) {
2237 		for (i = 0; i <= kq->kq_knhashmask; i++) {
2238 			while ((kn = SLIST_FIRST(&kq->kq_knhash[i])) != NULL) {
2239 				if (kn_in_flux(kn)) {
2240 					kq->kq_state |= KQ_FLUXWAIT;
2241 					msleep(kq, &kq->kq_lock, PSOCK,
2242 					       "kqclo2", 0);
2243 					continue;
2244 				}
2245 				kn_enter_flux(kn);
2246 				KQ_UNLOCK(kq);
2247 				knote_drop(kn, td);
2248 				KQ_LOCK(kq);
2249 			}
2250 		}
2251 	}
2252 
2253 	if ((kq->kq_state & KQ_TASKSCHED) == KQ_TASKSCHED) {
2254 		kq->kq_state |= KQ_TASKDRAIN;
2255 		msleep(&kq->kq_state, &kq->kq_lock, PSOCK, "kqtqdr", 0);
2256 	}
2257 
2258 	if ((kq->kq_state & KQ_SEL) == KQ_SEL) {
2259 		selwakeuppri(&kq->kq_sel, PSOCK);
2260 		if (!SEL_WAITING(&kq->kq_sel))
2261 			kq->kq_state &= ~KQ_SEL;
2262 	}
2263 
2264 	KQ_UNLOCK(kq);
2265 }
2266 
2267 static void
2268 kqueue_destroy(struct kqueue *kq)
2269 {
2270 
2271 	KASSERT(kq->kq_fdp == NULL,
2272 	    ("kqueue still attached to a file descriptor"));
2273 	seldrain(&kq->kq_sel);
2274 	knlist_destroy(&kq->kq_sel.si_note);
2275 	mtx_destroy(&kq->kq_lock);
2276 
2277 	if (kq->kq_knhash != NULL)
2278 		free(kq->kq_knhash, M_KQUEUE);
2279 	if (kq->kq_knlist != NULL)
2280 		free(kq->kq_knlist, M_KQUEUE);
2281 
2282 	funsetown(&kq->kq_sigio);
2283 }
2284 
2285 /*ARGSUSED*/
2286 static int
2287 kqueue_close(struct file *fp, struct thread *td)
2288 {
2289 	struct kqueue *kq = fp->f_data;
2290 	struct filedesc *fdp;
2291 	int error;
2292 	int filedesc_unlock;
2293 
2294 	if ((error = kqueue_acquire(fp, &kq)))
2295 		return error;
2296 	kqueue_drain(kq, td);
2297 
2298 	/*
2299 	 * We could be called due to the knote_drop() doing fdrop(),
2300 	 * called from kqueue_register().  In this case the global
2301 	 * lock is owned, and filedesc sx is locked before, to not
2302 	 * take the sleepable lock after non-sleepable.
2303 	 */
2304 	fdp = kq->kq_fdp;
2305 	kq->kq_fdp = NULL;
2306 	if (!sx_xlocked(FILEDESC_LOCK(fdp))) {
2307 		FILEDESC_XLOCK(fdp);
2308 		filedesc_unlock = 1;
2309 	} else
2310 		filedesc_unlock = 0;
2311 	TAILQ_REMOVE(&fdp->fd_kqlist, kq, kq_list);
2312 	if (filedesc_unlock)
2313 		FILEDESC_XUNLOCK(fdp);
2314 
2315 	kqueue_destroy(kq);
2316 	chgkqcnt(kq->kq_cred->cr_ruidinfo, -1, 0);
2317 	crfree(kq->kq_cred);
2318 	free(kq, M_KQUEUE);
2319 	fp->f_data = NULL;
2320 
2321 	return (0);
2322 }
2323 
2324 static int
2325 kqueue_fill_kinfo(struct file *fp, struct kinfo_file *kif, struct filedesc *fdp)
2326 {
2327 
2328 	kif->kf_type = KF_TYPE_KQUEUE;
2329 	return (0);
2330 }
2331 
2332 static void
2333 kqueue_wakeup(struct kqueue *kq)
2334 {
2335 	KQ_OWNED(kq);
2336 
2337 	if ((kq->kq_state & KQ_SLEEP) == KQ_SLEEP) {
2338 		kq->kq_state &= ~KQ_SLEEP;
2339 		wakeup(kq);
2340 	}
2341 	if ((kq->kq_state & KQ_SEL) == KQ_SEL) {
2342 		selwakeuppri(&kq->kq_sel, PSOCK);
2343 		if (!SEL_WAITING(&kq->kq_sel))
2344 			kq->kq_state &= ~KQ_SEL;
2345 	}
2346 	if (!knlist_empty(&kq->kq_sel.si_note))
2347 		kqueue_schedtask(kq);
2348 	if ((kq->kq_state & KQ_ASYNC) == KQ_ASYNC) {
2349 		pgsigio(&kq->kq_sigio, SIGIO, 0);
2350 	}
2351 }
2352 
2353 /*
2354  * Walk down a list of knotes, activating them if their event has triggered.
2355  *
2356  * There is a possibility to optimize in the case of one kq watching another.
2357  * Instead of scheduling a task to wake it up, you could pass enough state
2358  * down the chain to make up the parent kqueue.  Make this code functional
2359  * first.
2360  */
2361 void
2362 knote(struct knlist *list, long hint, int lockflags)
2363 {
2364 	struct kqueue *kq;
2365 	struct knote *kn, *tkn;
2366 	int error;
2367 
2368 	if (list == NULL)
2369 		return;
2370 
2371 	KNL_ASSERT_LOCK(list, lockflags & KNF_LISTLOCKED);
2372 
2373 	if ((lockflags & KNF_LISTLOCKED) == 0)
2374 		list->kl_lock(list->kl_lockarg);
2375 
2376 	/*
2377 	 * If we unlock the list lock (and enter influx), we can
2378 	 * eliminate the kqueue scheduling, but this will introduce
2379 	 * four lock/unlock's for each knote to test.  Also, marker
2380 	 * would be needed to keep iteration position, since filters
2381 	 * or other threads could remove events.
2382 	 */
2383 	SLIST_FOREACH_SAFE(kn, &list->kl_list, kn_selnext, tkn) {
2384 		kq = kn->kn_kq;
2385 		KQ_LOCK(kq);
2386 		if (kn_in_flux(kn) && (kn->kn_status & KN_SCAN) == 0) {
2387 			/*
2388 			 * Do not process the influx notes, except for
2389 			 * the influx coming from the kq unlock in the
2390 			 * kqueue_scan().  In the later case, we do
2391 			 * not interfere with the scan, since the code
2392 			 * fragment in kqueue_scan() locks the knlist,
2393 			 * and cannot proceed until we finished.
2394 			 */
2395 			KQ_UNLOCK(kq);
2396 		} else if ((lockflags & KNF_NOKQLOCK) != 0) {
2397 			kn_enter_flux(kn);
2398 			KQ_UNLOCK(kq);
2399 			error = kn->kn_fop->f_event(kn, hint);
2400 			KQ_LOCK(kq);
2401 			kn_leave_flux(kn);
2402 			if (error)
2403 				KNOTE_ACTIVATE(kn, 1);
2404 			KQ_UNLOCK_FLUX(kq);
2405 		} else {
2406 			if (kn->kn_fop->f_event(kn, hint))
2407 				KNOTE_ACTIVATE(kn, 1);
2408 			KQ_UNLOCK(kq);
2409 		}
2410 	}
2411 	if ((lockflags & KNF_LISTLOCKED) == 0)
2412 		list->kl_unlock(list->kl_lockarg);
2413 }
2414 
2415 /*
2416  * add a knote to a knlist
2417  */
2418 void
2419 knlist_add(struct knlist *knl, struct knote *kn, int islocked)
2420 {
2421 
2422 	KNL_ASSERT_LOCK(knl, islocked);
2423 	KQ_NOTOWNED(kn->kn_kq);
2424 	KASSERT(kn_in_flux(kn), ("knote %p not in flux", kn));
2425 	KASSERT((kn->kn_status & KN_DETACHED) != 0,
2426 	    ("knote %p was not detached", kn));
2427 	if (!islocked)
2428 		knl->kl_lock(knl->kl_lockarg);
2429 	SLIST_INSERT_HEAD(&knl->kl_list, kn, kn_selnext);
2430 	if (!islocked)
2431 		knl->kl_unlock(knl->kl_lockarg);
2432 	KQ_LOCK(kn->kn_kq);
2433 	kn->kn_knlist = knl;
2434 	kn->kn_status &= ~KN_DETACHED;
2435 	KQ_UNLOCK(kn->kn_kq);
2436 }
2437 
2438 static void
2439 knlist_remove_kq(struct knlist *knl, struct knote *kn, int knlislocked,
2440     int kqislocked)
2441 {
2442 
2443 	KASSERT(!kqislocked || knlislocked, ("kq locked w/o knl locked"));
2444 	KNL_ASSERT_LOCK(knl, knlislocked);
2445 	mtx_assert(&kn->kn_kq->kq_lock, kqislocked ? MA_OWNED : MA_NOTOWNED);
2446 	KASSERT(kqislocked || kn_in_flux(kn), ("knote %p not in flux", kn));
2447 	KASSERT((kn->kn_status & KN_DETACHED) == 0,
2448 	    ("knote %p was already detached", kn));
2449 	if (!knlislocked)
2450 		knl->kl_lock(knl->kl_lockarg);
2451 	SLIST_REMOVE(&knl->kl_list, kn, knote, kn_selnext);
2452 	kn->kn_knlist = NULL;
2453 	if (!knlislocked)
2454 		kn_list_unlock(knl);
2455 	if (!kqislocked)
2456 		KQ_LOCK(kn->kn_kq);
2457 	kn->kn_status |= KN_DETACHED;
2458 	if (!kqislocked)
2459 		KQ_UNLOCK(kn->kn_kq);
2460 }
2461 
2462 /*
2463  * remove knote from the specified knlist
2464  */
2465 void
2466 knlist_remove(struct knlist *knl, struct knote *kn, int islocked)
2467 {
2468 
2469 	knlist_remove_kq(knl, kn, islocked, 0);
2470 }
2471 
2472 int
2473 knlist_empty(struct knlist *knl)
2474 {
2475 
2476 	KNL_ASSERT_LOCKED(knl);
2477 	return (SLIST_EMPTY(&knl->kl_list));
2478 }
2479 
2480 static struct mtx knlist_lock;
2481 MTX_SYSINIT(knlist_lock, &knlist_lock, "knlist lock for lockless objects",
2482     MTX_DEF);
2483 static void knlist_mtx_lock(void *arg);
2484 static void knlist_mtx_unlock(void *arg);
2485 
2486 static void
2487 knlist_mtx_lock(void *arg)
2488 {
2489 
2490 	mtx_lock((struct mtx *)arg);
2491 }
2492 
2493 static void
2494 knlist_mtx_unlock(void *arg)
2495 {
2496 
2497 	mtx_unlock((struct mtx *)arg);
2498 }
2499 
2500 static void
2501 knlist_mtx_assert_lock(void *arg, int what)
2502 {
2503 
2504 	if (what == LA_LOCKED)
2505 		mtx_assert((struct mtx *)arg, MA_OWNED);
2506 	else
2507 		mtx_assert((struct mtx *)arg, MA_NOTOWNED);
2508 }
2509 
2510 static void
2511 knlist_rw_rlock(void *arg)
2512 {
2513 
2514 	rw_rlock((struct rwlock *)arg);
2515 }
2516 
2517 static void
2518 knlist_rw_runlock(void *arg)
2519 {
2520 
2521 	rw_runlock((struct rwlock *)arg);
2522 }
2523 
2524 static void
2525 knlist_rw_assert_lock(void *arg, int what)
2526 {
2527 
2528 	if (what == LA_LOCKED)
2529 		rw_assert((struct rwlock *)arg, RA_LOCKED);
2530 	else
2531 		rw_assert((struct rwlock *)arg, RA_UNLOCKED);
2532 }
2533 
2534 void
2535 knlist_init(struct knlist *knl, void *lock, void (*kl_lock)(void *),
2536     void (*kl_unlock)(void *),
2537     void (*kl_assert_lock)(void *, int))
2538 {
2539 
2540 	if (lock == NULL)
2541 		knl->kl_lockarg = &knlist_lock;
2542 	else
2543 		knl->kl_lockarg = lock;
2544 
2545 	if (kl_lock == NULL)
2546 		knl->kl_lock = knlist_mtx_lock;
2547 	else
2548 		knl->kl_lock = kl_lock;
2549 	if (kl_unlock == NULL)
2550 		knl->kl_unlock = knlist_mtx_unlock;
2551 	else
2552 		knl->kl_unlock = kl_unlock;
2553 	if (kl_assert_lock == NULL)
2554 		knl->kl_assert_lock = knlist_mtx_assert_lock;
2555 	else
2556 		knl->kl_assert_lock = kl_assert_lock;
2557 
2558 	knl->kl_autodestroy = 0;
2559 	SLIST_INIT(&knl->kl_list);
2560 }
2561 
2562 void
2563 knlist_init_mtx(struct knlist *knl, struct mtx *lock)
2564 {
2565 
2566 	knlist_init(knl, lock, NULL, NULL, NULL);
2567 }
2568 
2569 struct knlist *
2570 knlist_alloc(struct mtx *lock)
2571 {
2572 	struct knlist *knl;
2573 
2574 	knl = malloc(sizeof(struct knlist), M_KQUEUE, M_WAITOK);
2575 	knlist_init_mtx(knl, lock);
2576 	return (knl);
2577 }
2578 
2579 void
2580 knlist_init_rw_reader(struct knlist *knl, struct rwlock *lock)
2581 {
2582 
2583 	knlist_init(knl, lock, knlist_rw_rlock, knlist_rw_runlock,
2584 	    knlist_rw_assert_lock);
2585 }
2586 
2587 void
2588 knlist_destroy(struct knlist *knl)
2589 {
2590 
2591 	KASSERT(KNLIST_EMPTY(knl),
2592 	    ("destroying knlist %p with knotes on it", knl));
2593 }
2594 
2595 void
2596 knlist_detach(struct knlist *knl)
2597 {
2598 
2599 	KNL_ASSERT_LOCKED(knl);
2600 	knl->kl_autodestroy = 1;
2601 	if (knlist_empty(knl)) {
2602 		knlist_destroy(knl);
2603 		free(knl, M_KQUEUE);
2604 	}
2605 }
2606 
2607 /*
2608  * Even if we are locked, we may need to drop the lock to allow any influx
2609  * knotes time to "settle".
2610  */
2611 void
2612 knlist_cleardel(struct knlist *knl, struct thread *td, int islocked, int killkn)
2613 {
2614 	struct knote *kn, *kn2;
2615 	struct kqueue *kq;
2616 
2617 	KASSERT(!knl->kl_autodestroy, ("cleardel for autodestroy %p", knl));
2618 	if (islocked)
2619 		KNL_ASSERT_LOCKED(knl);
2620 	else {
2621 		KNL_ASSERT_UNLOCKED(knl);
2622 again:		/* need to reacquire lock since we have dropped it */
2623 		knl->kl_lock(knl->kl_lockarg);
2624 	}
2625 
2626 	SLIST_FOREACH_SAFE(kn, &knl->kl_list, kn_selnext, kn2) {
2627 		kq = kn->kn_kq;
2628 		KQ_LOCK(kq);
2629 		if (kn_in_flux(kn)) {
2630 			KQ_UNLOCK(kq);
2631 			continue;
2632 		}
2633 		knlist_remove_kq(knl, kn, 1, 1);
2634 		if (killkn) {
2635 			kn_enter_flux(kn);
2636 			KQ_UNLOCK(kq);
2637 			knote_drop_detached(kn, td);
2638 		} else {
2639 			/* Make sure cleared knotes disappear soon */
2640 			kn->kn_flags |= EV_EOF | EV_ONESHOT;
2641 			KQ_UNLOCK(kq);
2642 		}
2643 		kq = NULL;
2644 	}
2645 
2646 	if (!SLIST_EMPTY(&knl->kl_list)) {
2647 		/* there are still in flux knotes remaining */
2648 		kn = SLIST_FIRST(&knl->kl_list);
2649 		kq = kn->kn_kq;
2650 		KQ_LOCK(kq);
2651 		KASSERT(kn_in_flux(kn), ("knote removed w/o list lock"));
2652 		knl->kl_unlock(knl->kl_lockarg);
2653 		kq->kq_state |= KQ_FLUXWAIT;
2654 		msleep(kq, &kq->kq_lock, PSOCK | PDROP, "kqkclr", 0);
2655 		kq = NULL;
2656 		goto again;
2657 	}
2658 
2659 	if (islocked)
2660 		KNL_ASSERT_LOCKED(knl);
2661 	else {
2662 		knl->kl_unlock(knl->kl_lockarg);
2663 		KNL_ASSERT_UNLOCKED(knl);
2664 	}
2665 }
2666 
2667 /*
2668  * Remove all knotes referencing a specified fd must be called with FILEDESC
2669  * lock.  This prevents a race where a new fd comes along and occupies the
2670  * entry and we attach a knote to the fd.
2671  */
2672 void
2673 knote_fdclose(struct thread *td, int fd)
2674 {
2675 	struct filedesc *fdp = td->td_proc->p_fd;
2676 	struct kqueue *kq;
2677 	struct knote *kn;
2678 	int influx;
2679 
2680 	FILEDESC_XLOCK_ASSERT(fdp);
2681 
2682 	/*
2683 	 * We shouldn't have to worry about new kevents appearing on fd
2684 	 * since filedesc is locked.
2685 	 */
2686 	TAILQ_FOREACH(kq, &fdp->fd_kqlist, kq_list) {
2687 		KQ_LOCK(kq);
2688 
2689 again:
2690 		influx = 0;
2691 		while (kq->kq_knlistsize > fd &&
2692 		    (kn = SLIST_FIRST(&kq->kq_knlist[fd])) != NULL) {
2693 			if (kn_in_flux(kn)) {
2694 				/* someone else might be waiting on our knote */
2695 				if (influx)
2696 					wakeup(kq);
2697 				kq->kq_state |= KQ_FLUXWAIT;
2698 				msleep(kq, &kq->kq_lock, PSOCK, "kqflxwt", 0);
2699 				goto again;
2700 			}
2701 			kn_enter_flux(kn);
2702 			KQ_UNLOCK(kq);
2703 			influx = 1;
2704 			knote_drop(kn, td);
2705 			KQ_LOCK(kq);
2706 		}
2707 		KQ_UNLOCK_FLUX(kq);
2708 	}
2709 }
2710 
2711 static int
2712 knote_attach(struct knote *kn, struct kqueue *kq)
2713 {
2714 	struct klist *list;
2715 
2716 	KASSERT(kn_in_flux(kn), ("knote %p not marked influx", kn));
2717 	KQ_OWNED(kq);
2718 
2719 	if ((kq->kq_state & KQ_CLOSING) != 0)
2720 		return (EBADF);
2721 	if (kn->kn_fop->f_isfd) {
2722 		if (kn->kn_id >= kq->kq_knlistsize)
2723 			return (ENOMEM);
2724 		list = &kq->kq_knlist[kn->kn_id];
2725 	} else {
2726 		if (kq->kq_knhash == NULL)
2727 			return (ENOMEM);
2728 		list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)];
2729 	}
2730 	SLIST_INSERT_HEAD(list, kn, kn_link);
2731 	return (0);
2732 }
2733 
2734 static void
2735 knote_drop(struct knote *kn, struct thread *td)
2736 {
2737 
2738 	if ((kn->kn_status & KN_DETACHED) == 0)
2739 		kn->kn_fop->f_detach(kn);
2740 	knote_drop_detached(kn, td);
2741 }
2742 
2743 static void
2744 knote_drop_detached(struct knote *kn, struct thread *td)
2745 {
2746 	struct kqueue *kq;
2747 	struct klist *list;
2748 
2749 	kq = kn->kn_kq;
2750 
2751 	KASSERT((kn->kn_status & KN_DETACHED) != 0,
2752 	    ("knote %p still attached", kn));
2753 	KQ_NOTOWNED(kq);
2754 
2755 	KQ_LOCK(kq);
2756 	KASSERT(kn->kn_influx == 1,
2757 	    ("knote_drop called on %p with influx %d", kn, kn->kn_influx));
2758 
2759 	if (kn->kn_fop->f_isfd)
2760 		list = &kq->kq_knlist[kn->kn_id];
2761 	else
2762 		list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)];
2763 
2764 	if (!SLIST_EMPTY(list))
2765 		SLIST_REMOVE(list, kn, knote, kn_link);
2766 	if (kn->kn_status & KN_QUEUED)
2767 		knote_dequeue(kn);
2768 	KQ_UNLOCK_FLUX(kq);
2769 
2770 	if (kn->kn_fop->f_isfd) {
2771 		fdrop(kn->kn_fp, td);
2772 		kn->kn_fp = NULL;
2773 	}
2774 	kqueue_fo_release(kn->kn_kevent.filter);
2775 	kn->kn_fop = NULL;
2776 	knote_free(kn);
2777 }
2778 
2779 static void
2780 knote_enqueue(struct knote *kn)
2781 {
2782 	struct kqueue *kq = kn->kn_kq;
2783 
2784 	KQ_OWNED(kn->kn_kq);
2785 	KASSERT((kn->kn_status & KN_QUEUED) == 0, ("knote already queued"));
2786 
2787 	TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe);
2788 	kn->kn_status |= KN_QUEUED;
2789 	kq->kq_count++;
2790 	kqueue_wakeup(kq);
2791 }
2792 
2793 static void
2794 knote_dequeue(struct knote *kn)
2795 {
2796 	struct kqueue *kq = kn->kn_kq;
2797 
2798 	KQ_OWNED(kn->kn_kq);
2799 	KASSERT(kn->kn_status & KN_QUEUED, ("knote not queued"));
2800 
2801 	TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe);
2802 	kn->kn_status &= ~KN_QUEUED;
2803 	kq->kq_count--;
2804 }
2805 
2806 static void
2807 knote_init(void)
2808 {
2809 
2810 	knote_zone = uma_zcreate("KNOTE", sizeof(struct knote), NULL, NULL,
2811 	    NULL, NULL, UMA_ALIGN_PTR, 0);
2812 }
2813 SYSINIT(knote, SI_SUB_PSEUDO, SI_ORDER_ANY, knote_init, NULL);
2814 
2815 static struct knote *
2816 knote_alloc(int mflag)
2817 {
2818 
2819 	return (uma_zalloc(knote_zone, mflag | M_ZERO));
2820 }
2821 
2822 static void
2823 knote_free(struct knote *kn)
2824 {
2825 
2826 	uma_zfree(knote_zone, kn);
2827 }
2828 
2829 /*
2830  * Register the kev w/ the kq specified by fd.
2831  */
2832 int
2833 kqfd_register(int fd, struct kevent *kev, struct thread *td, int mflag)
2834 {
2835 	struct kqueue *kq;
2836 	struct file *fp;
2837 	cap_rights_t rights;
2838 	int error;
2839 
2840 	error = fget(td, fd, cap_rights_init_one(&rights, CAP_KQUEUE_CHANGE),
2841 	    &fp);
2842 	if (error != 0)
2843 		return (error);
2844 	if ((error = kqueue_acquire(fp, &kq)) != 0)
2845 		goto noacquire;
2846 
2847 	error = kqueue_register(kq, kev, td, mflag);
2848 	kqueue_release(kq, 0);
2849 
2850 noacquire:
2851 	fdrop(fp, td);
2852 	return (error);
2853 }
2854