xref: /freebsd/sys/kern/kern_event.c (revision d056fa046c6a91b90cd98165face0e42a33a5173)
1 /*-
2  * Copyright (c) 1999,2000,2001 Jonathan Lemon <jlemon@FreeBSD.org>
3  * Copyright 2004 John-Mark Gurney <jmg@FreeBSD.org>
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30 
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/kernel.h>
34 #include <sys/lock.h>
35 #include <sys/mutex.h>
36 #include <sys/proc.h>
37 #include <sys/malloc.h>
38 #include <sys/unistd.h>
39 #include <sys/file.h>
40 #include <sys/filedesc.h>
41 #include <sys/filio.h>
42 #include <sys/fcntl.h>
43 #include <sys/kthread.h>
44 #include <sys/selinfo.h>
45 #include <sys/queue.h>
46 #include <sys/event.h>
47 #include <sys/eventvar.h>
48 #include <sys/poll.h>
49 #include <sys/protosw.h>
50 #include <sys/sigio.h>
51 #include <sys/signalvar.h>
52 #include <sys/socket.h>
53 #include <sys/socketvar.h>
54 #include <sys/stat.h>
55 #include <sys/sysctl.h>
56 #include <sys/sysproto.h>
57 #include <sys/syscallsubr.h>
58 #include <sys/taskqueue.h>
59 #include <sys/uio.h>
60 
61 #include <vm/uma.h>
62 
63 static MALLOC_DEFINE(M_KQUEUE, "kqueue", "memory for kqueue system");
64 
65 /*
66  * This lock is used if multiple kq locks are required.  This possibly
67  * should be made into a per proc lock.
68  */
69 static struct mtx	kq_global;
70 MTX_SYSINIT(kq_global, &kq_global, "kqueue order", MTX_DEF);
71 #define KQ_GLOBAL_LOCK(lck, haslck)	do {	\
72 	if (!haslck)				\
73 		mtx_lock(lck);			\
74 	haslck = 1;				\
75 } while (0)
76 #define KQ_GLOBAL_UNLOCK(lck, haslck)	do {	\
77 	if (haslck)				\
78 		mtx_unlock(lck);			\
79 	haslck = 0;				\
80 } while (0)
81 
82 TASKQUEUE_DEFINE_THREAD(kqueue);
83 
84 static int	kevent_copyout(void *arg, struct kevent *kevp, int count);
85 static int	kevent_copyin(void *arg, struct kevent *kevp, int count);
86 static int	kqueue_aquire(struct file *fp, struct kqueue **kqp);
87 static void	kqueue_release(struct kqueue *kq, int locked);
88 static int	kqueue_expand(struct kqueue *kq, struct filterops *fops,
89 		    uintptr_t ident, int waitok);
90 static void	kqueue_task(void *arg, int pending);
91 static int	kqueue_scan(struct kqueue *kq, int maxevents,
92 		    struct kevent_copyops *k_ops,
93 		    const struct timespec *timeout,
94 		    struct kevent *keva, struct thread *td);
95 static void 	kqueue_wakeup(struct kqueue *kq);
96 static struct filterops *kqueue_fo_find(int filt);
97 static void	kqueue_fo_release(int filt);
98 
99 static fo_rdwr_t	kqueue_read;
100 static fo_rdwr_t	kqueue_write;
101 static fo_ioctl_t	kqueue_ioctl;
102 static fo_poll_t	kqueue_poll;
103 static fo_kqfilter_t	kqueue_kqfilter;
104 static fo_stat_t	kqueue_stat;
105 static fo_close_t	kqueue_close;
106 
107 static struct fileops kqueueops = {
108 	.fo_read = kqueue_read,
109 	.fo_write = kqueue_write,
110 	.fo_ioctl = kqueue_ioctl,
111 	.fo_poll = kqueue_poll,
112 	.fo_kqfilter = kqueue_kqfilter,
113 	.fo_stat = kqueue_stat,
114 	.fo_close = kqueue_close,
115 };
116 
117 static int 	knote_attach(struct knote *kn, struct kqueue *kq);
118 static void 	knote_drop(struct knote *kn, struct thread *td);
119 static void 	knote_enqueue(struct knote *kn);
120 static void 	knote_dequeue(struct knote *kn);
121 static void 	knote_init(void);
122 static struct 	knote *knote_alloc(int waitok);
123 static void 	knote_free(struct knote *kn);
124 
125 static void	filt_kqdetach(struct knote *kn);
126 static int	filt_kqueue(struct knote *kn, long hint);
127 static int	filt_procattach(struct knote *kn);
128 static void	filt_procdetach(struct knote *kn);
129 static int	filt_proc(struct knote *kn, long hint);
130 static int	filt_fileattach(struct knote *kn);
131 static void	filt_timerexpire(void *knx);
132 static int	filt_timerattach(struct knote *kn);
133 static void	filt_timerdetach(struct knote *kn);
134 static int	filt_timer(struct knote *kn, long hint);
135 
136 static struct filterops file_filtops =
137 	{ 1, filt_fileattach, NULL, NULL };
138 static struct filterops kqread_filtops =
139 	{ 1, NULL, filt_kqdetach, filt_kqueue };
140 /* XXX - move to kern_proc.c?  */
141 static struct filterops proc_filtops =
142 	{ 0, filt_procattach, filt_procdetach, filt_proc };
143 static struct filterops timer_filtops =
144 	{ 0, filt_timerattach, filt_timerdetach, filt_timer };
145 
146 static uma_zone_t	knote_zone;
147 static int 		kq_ncallouts = 0;
148 static int 		kq_calloutmax = (4 * 1024);
149 SYSCTL_INT(_kern, OID_AUTO, kq_calloutmax, CTLFLAG_RW,
150     &kq_calloutmax, 0, "Maximum number of callouts allocated for kqueue");
151 
152 /* XXX - ensure not KN_INFLUX?? */
153 #define KNOTE_ACTIVATE(kn, islock) do { 				\
154 	if ((islock))							\
155 		mtx_assert(&(kn)->kn_kq->kq_lock, MA_OWNED);		\
156 	else								\
157 		KQ_LOCK((kn)->kn_kq);					\
158 	(kn)->kn_status |= KN_ACTIVE;					\
159 	if (((kn)->kn_status & (KN_QUEUED | KN_DISABLED)) == 0)		\
160 		knote_enqueue((kn));					\
161 	if (!(islock))							\
162 		KQ_UNLOCK((kn)->kn_kq);					\
163 } while(0)
164 #define KQ_LOCK(kq) do {						\
165 	mtx_lock(&(kq)->kq_lock);					\
166 } while (0)
167 #define KQ_FLUX_WAKEUP(kq) do {						\
168 	if (((kq)->kq_state & KQ_FLUXWAIT) == KQ_FLUXWAIT) {		\
169 		(kq)->kq_state &= ~KQ_FLUXWAIT;				\
170 		wakeup((kq));						\
171 	}								\
172 } while (0)
173 #define KQ_UNLOCK_FLUX(kq) do {						\
174 	KQ_FLUX_WAKEUP(kq);						\
175 	mtx_unlock(&(kq)->kq_lock);					\
176 } while (0)
177 #define KQ_UNLOCK(kq) do {						\
178 	mtx_unlock(&(kq)->kq_lock);					\
179 } while (0)
180 #define KQ_OWNED(kq) do {						\
181 	mtx_assert(&(kq)->kq_lock, MA_OWNED);				\
182 } while (0)
183 #define KQ_NOTOWNED(kq) do {						\
184 	mtx_assert(&(kq)->kq_lock, MA_NOTOWNED);			\
185 } while (0)
186 #define KN_LIST_LOCK(kn) do {						\
187 	if (kn->kn_knlist != NULL)					\
188 		kn->kn_knlist->kl_lock(kn->kn_knlist->kl_lockarg);	\
189 } while (0)
190 #define KN_LIST_UNLOCK(kn) do {						\
191 	if (kn->kn_knlist != NULL) 					\
192 		kn->kn_knlist->kl_unlock(kn->kn_knlist->kl_lockarg);	\
193 } while (0)
194 #define	KNL_ASSERT_LOCK(knl, islocked) do {				\
195 	if (islocked)							\
196 		KNL_ASSERT_LOCKED(knl);				\
197 	else								\
198 		KNL_ASSERT_UNLOCKED(knl);				\
199 } while (0)
200 #ifdef INVARIANTS
201 #define	KNL_ASSERT_LOCKED(knl) do {					\
202 	if (!knl->kl_locked((knl)->kl_lockarg))				\
203 			panic("knlist not locked, but should be");	\
204 } while (0)
205 #define	KNL_ASSERT_UNLOCKED(knl) do {				\
206 	if (knl->kl_locked((knl)->kl_lockarg))				\
207 		panic("knlist locked, but should not be");		\
208 } while (0)
209 #else /* !INVARIANTS */
210 #define	KNL_ASSERT_LOCKED(knl) do {} while(0)
211 #define	KNL_ASSERT_UNLOCKED(knl) do {} while (0)
212 #endif /* INVARIANTS */
213 
214 #define	KN_HASHSIZE		64		/* XXX should be tunable */
215 #define KN_HASH(val, mask)	(((val) ^ (val >> 8)) & (mask))
216 
217 static int
218 filt_nullattach(struct knote *kn)
219 {
220 
221 	return (ENXIO);
222 };
223 
224 struct filterops null_filtops =
225 	{ 0, filt_nullattach, NULL, NULL };
226 
227 /* XXX - make SYSINIT to add these, and move into respective modules. */
228 extern struct filterops sig_filtops;
229 extern struct filterops fs_filtops;
230 
231 /*
232  * Table for for all system-defined filters.
233  */
234 static struct mtx	filterops_lock;
235 MTX_SYSINIT(kqueue_filterops, &filterops_lock, "protect sysfilt_ops",
236 	MTX_DEF);
237 static struct {
238 	struct filterops *for_fop;
239 	int for_refcnt;
240 } sysfilt_ops[EVFILT_SYSCOUNT] = {
241 	{ &file_filtops },			/* EVFILT_READ */
242 	{ &file_filtops },			/* EVFILT_WRITE */
243 	{ &null_filtops },			/* EVFILT_AIO */
244 	{ &file_filtops },			/* EVFILT_VNODE */
245 	{ &proc_filtops },			/* EVFILT_PROC */
246 	{ &sig_filtops },			/* EVFILT_SIGNAL */
247 	{ &timer_filtops },			/* EVFILT_TIMER */
248 	{ &file_filtops },			/* EVFILT_NETDEV */
249 	{ &fs_filtops },			/* EVFILT_FS */
250 	{ &null_filtops },			/* EVFILT_LIO */
251 };
252 
253 /*
254  * Simple redirection for all cdevsw style objects to call their fo_kqfilter
255  * method.
256  */
257 static int
258 filt_fileattach(struct knote *kn)
259 {
260 
261 	return (fo_kqfilter(kn->kn_fp, kn));
262 }
263 
264 /*ARGSUSED*/
265 static int
266 kqueue_kqfilter(struct file *fp, struct knote *kn)
267 {
268 	struct kqueue *kq = kn->kn_fp->f_data;
269 
270 	if (kn->kn_filter != EVFILT_READ)
271 		return (EINVAL);
272 
273 	kn->kn_status |= KN_KQUEUE;
274 	kn->kn_fop = &kqread_filtops;
275 	knlist_add(&kq->kq_sel.si_note, kn, 0);
276 
277 	return (0);
278 }
279 
280 static void
281 filt_kqdetach(struct knote *kn)
282 {
283 	struct kqueue *kq = kn->kn_fp->f_data;
284 
285 	knlist_remove(&kq->kq_sel.si_note, kn, 0);
286 }
287 
288 /*ARGSUSED*/
289 static int
290 filt_kqueue(struct knote *kn, long hint)
291 {
292 	struct kqueue *kq = kn->kn_fp->f_data;
293 
294 	kn->kn_data = kq->kq_count;
295 	return (kn->kn_data > 0);
296 }
297 
298 /* XXX - move to kern_proc.c?  */
299 static int
300 filt_procattach(struct knote *kn)
301 {
302 	struct proc *p;
303 	int immediate;
304 	int error;
305 
306 	immediate = 0;
307 	p = pfind(kn->kn_id);
308 	if (p == NULL && (kn->kn_sfflags & NOTE_EXIT)) {
309 		p = zpfind(kn->kn_id);
310 		immediate = 1;
311 	} else if (p != NULL && (p->p_flag & P_WEXIT)) {
312 		immediate = 1;
313 	}
314 
315 	if (p == NULL)
316 		return (ESRCH);
317 	if ((error = p_cansee(curthread, p)))
318 		return (error);
319 
320 	kn->kn_ptr.p_proc = p;
321 	kn->kn_flags |= EV_CLEAR;		/* automatically set */
322 
323 	/*
324 	 * internal flag indicating registration done by kernel
325 	 */
326 	if (kn->kn_flags & EV_FLAG1) {
327 		kn->kn_data = kn->kn_sdata;		/* ppid */
328 		kn->kn_fflags = NOTE_CHILD;
329 		kn->kn_flags &= ~EV_FLAG1;
330 	}
331 
332 	if (immediate == 0)
333 		knlist_add(&p->p_klist, kn, 1);
334 
335 	/*
336 	 * Immediately activate any exit notes if the target process is a
337 	 * zombie.  This is necessary to handle the case where the target
338 	 * process, e.g. a child, dies before the kevent is registered.
339 	 */
340 	if (immediate && filt_proc(kn, NOTE_EXIT))
341 		KNOTE_ACTIVATE(kn, 0);
342 
343 	PROC_UNLOCK(p);
344 
345 	return (0);
346 }
347 
348 /*
349  * The knote may be attached to a different process, which may exit,
350  * leaving nothing for the knote to be attached to.  So when the process
351  * exits, the knote is marked as DETACHED and also flagged as ONESHOT so
352  * it will be deleted when read out.  However, as part of the knote deletion,
353  * this routine is called, so a check is needed to avoid actually performing
354  * a detach, because the original process does not exist any more.
355  */
356 /* XXX - move to kern_proc.c?  */
357 static void
358 filt_procdetach(struct knote *kn)
359 {
360 	struct proc *p;
361 
362 	p = kn->kn_ptr.p_proc;
363 	knlist_remove(&p->p_klist, kn, 0);
364 	kn->kn_ptr.p_proc = NULL;
365 }
366 
367 /* XXX - move to kern_proc.c?  */
368 static int
369 filt_proc(struct knote *kn, long hint)
370 {
371 	struct proc *p = kn->kn_ptr.p_proc;
372 	u_int event;
373 
374 	/*
375 	 * mask off extra data
376 	 */
377 	event = (u_int)hint & NOTE_PCTRLMASK;
378 
379 	/*
380 	 * if the user is interested in this event, record it.
381 	 */
382 	if (kn->kn_sfflags & event)
383 		kn->kn_fflags |= event;
384 
385 	/*
386 	 * process is gone, so flag the event as finished.
387 	 */
388 	if (event == NOTE_EXIT) {
389 		if (!(kn->kn_status & KN_DETACHED))
390 			knlist_remove_inevent(&p->p_klist, kn);
391 		kn->kn_flags |= (EV_EOF | EV_ONESHOT);
392 		kn->kn_ptr.p_proc = NULL;
393 		return (1);
394 	}
395 
396 	/*
397 	 * process forked, and user wants to track the new process,
398 	 * so attach a new knote to it, and immediately report an
399 	 * event with the parent's pid.
400 	 */
401 	if ((event == NOTE_FORK) && (kn->kn_sfflags & NOTE_TRACK)) {
402 		struct kevent kev;
403 		int error;
404 
405 		/*
406 		 * register knote with new process.
407 		 */
408 		kev.ident = hint & NOTE_PDATAMASK;	/* pid */
409 		kev.filter = kn->kn_filter;
410 		kev.flags = kn->kn_flags | EV_ADD | EV_ENABLE | EV_FLAG1;
411 		kev.fflags = kn->kn_sfflags;
412 		kev.data = kn->kn_id;			/* parent */
413 		kev.udata = kn->kn_kevent.udata;	/* preserve udata */
414 		error = kqueue_register(kn->kn_kq, &kev, NULL, 0);
415 		if (error)
416 			kn->kn_fflags |= NOTE_TRACKERR;
417 	}
418 
419 	return (kn->kn_fflags != 0);
420 }
421 
422 static int
423 timertoticks(intptr_t data)
424 {
425 	struct timeval tv;
426 	int tticks;
427 
428 	tv.tv_sec = data / 1000;
429 	tv.tv_usec = (data % 1000) * 1000;
430 	tticks = tvtohz(&tv);
431 
432 	return tticks;
433 }
434 
435 /* XXX - move to kern_timeout.c? */
436 static void
437 filt_timerexpire(void *knx)
438 {
439 	struct knote *kn = knx;
440 	struct callout *calloutp;
441 
442 	kn->kn_data++;
443 	KNOTE_ACTIVATE(kn, 0);	/* XXX - handle locking */
444 
445 	if ((kn->kn_flags & EV_ONESHOT) != EV_ONESHOT) {
446 		calloutp = (struct callout *)kn->kn_hook;
447 		callout_reset(calloutp, timertoticks(kn->kn_sdata),
448 		    filt_timerexpire, kn);
449 	}
450 }
451 
452 /*
453  * data contains amount of time to sleep, in milliseconds
454  */
455 /* XXX - move to kern_timeout.c? */
456 static int
457 filt_timerattach(struct knote *kn)
458 {
459 	struct callout *calloutp;
460 
461 	atomic_add_int(&kq_ncallouts, 1);
462 
463 	if (kq_ncallouts >= kq_calloutmax) {
464 		atomic_add_int(&kq_ncallouts, -1);
465 		return (ENOMEM);
466 	}
467 
468 	kn->kn_flags |= EV_CLEAR;		/* automatically set */
469 	kn->kn_status &= ~KN_DETACHED;		/* knlist_add usually sets it */
470 	MALLOC(calloutp, struct callout *, sizeof(*calloutp),
471 	    M_KQUEUE, M_WAITOK);
472 	callout_init(calloutp, CALLOUT_MPSAFE);
473 	kn->kn_hook = calloutp;
474 	callout_reset(calloutp, timertoticks(kn->kn_sdata), filt_timerexpire,
475 	    kn);
476 
477 	return (0);
478 }
479 
480 /* XXX - move to kern_timeout.c? */
481 static void
482 filt_timerdetach(struct knote *kn)
483 {
484 	struct callout *calloutp;
485 
486 	calloutp = (struct callout *)kn->kn_hook;
487 	callout_drain(calloutp);
488 	FREE(calloutp, M_KQUEUE);
489 	atomic_add_int(&kq_ncallouts, -1);
490 	kn->kn_status |= KN_DETACHED;	/* knlist_remove usually clears it */
491 }
492 
493 /* XXX - move to kern_timeout.c? */
494 static int
495 filt_timer(struct knote *kn, long hint)
496 {
497 
498 	return (kn->kn_data != 0);
499 }
500 
501 /*
502  * MPSAFE
503  */
504 int
505 kqueue(struct thread *td, struct kqueue_args *uap)
506 {
507 	struct filedesc *fdp;
508 	struct kqueue *kq;
509 	struct file *fp;
510 	int fd, error;
511 
512 	fdp = td->td_proc->p_fd;
513 	error = falloc(td, &fp, &fd);
514 	if (error)
515 		goto done2;
516 
517 	/* An extra reference on `nfp' has been held for us by falloc(). */
518 	kq = malloc(sizeof *kq, M_KQUEUE, M_WAITOK | M_ZERO);
519 	mtx_init(&kq->kq_lock, "kqueue", NULL, MTX_DEF|MTX_DUPOK);
520 	TAILQ_INIT(&kq->kq_head);
521 	kq->kq_fdp = fdp;
522 	knlist_init(&kq->kq_sel.si_note, &kq->kq_lock, NULL, NULL, NULL);
523 	TASK_INIT(&kq->kq_task, 0, kqueue_task, kq);
524 
525 	FILEDESC_LOCK_FAST(fdp);
526 	SLIST_INSERT_HEAD(&fdp->fd_kqlist, kq, kq_list);
527 	FILEDESC_UNLOCK_FAST(fdp);
528 
529 	FILE_LOCK(fp);
530 	fp->f_flag = FREAD | FWRITE;
531 	fp->f_type = DTYPE_KQUEUE;
532 	fp->f_ops = &kqueueops;
533 	fp->f_data = kq;
534 	FILE_UNLOCK(fp);
535 	fdrop(fp, td);
536 
537 	td->td_retval[0] = fd;
538 done2:
539 	return (error);
540 }
541 
542 #ifndef _SYS_SYSPROTO_H_
543 struct kevent_args {
544 	int	fd;
545 	const struct kevent *changelist;
546 	int	nchanges;
547 	struct	kevent *eventlist;
548 	int	nevents;
549 	const struct timespec *timeout;
550 };
551 #endif
552 /*
553  * MPSAFE
554  */
555 int
556 kevent(struct thread *td, struct kevent_args *uap)
557 {
558 	struct timespec ts, *tsp;
559 	struct kevent_copyops k_ops = { uap,
560 					kevent_copyout,
561 					kevent_copyin};
562 	int error;
563 
564 	if (uap->timeout != NULL) {
565 		error = copyin(uap->timeout, &ts, sizeof(ts));
566 		if (error)
567 			return (error);
568 		tsp = &ts;
569 	} else
570 		tsp = NULL;
571 
572 	return (kern_kevent(td, uap->fd, uap->nchanges, uap->nevents,
573 	    &k_ops, tsp));
574 }
575 
576 /*
577  * Copy 'count' items into the destination list pointed to by uap->eventlist.
578  */
579 static int
580 kevent_copyout(void *arg, struct kevent *kevp, int count)
581 {
582 	struct kevent_args *uap;
583 	int error;
584 
585 	KASSERT(count <= KQ_NEVENTS, ("count (%d) > KQ_NEVENTS", count));
586 	uap = (struct kevent_args *)arg;
587 
588 	error = copyout(kevp, uap->eventlist, count * sizeof *kevp);
589 	if (error == 0)
590 		uap->eventlist += count;
591 	return (error);
592 }
593 
594 /*
595  * Copy 'count' items from the list pointed to by uap->changelist.
596  */
597 static int
598 kevent_copyin(void *arg, struct kevent *kevp, int count)
599 {
600 	struct kevent_args *uap;
601 	int error;
602 
603 	KASSERT(count <= KQ_NEVENTS, ("count (%d) > KQ_NEVENTS", count));
604 	uap = (struct kevent_args *)arg;
605 
606 	error = copyin(uap->changelist, kevp, count * sizeof *kevp);
607 	if (error == 0)
608 		uap->changelist += count;
609 	return (error);
610 }
611 
612 int
613 kern_kevent(struct thread *td, int fd, int nchanges, int nevents,
614     struct kevent_copyops *k_ops, const struct timespec *timeout)
615 {
616 	struct kevent keva[KQ_NEVENTS];
617 	struct kevent *kevp, *changes;
618 	struct kqueue *kq;
619 	struct file *fp;
620 	int i, n, nerrors, error;
621 
622 	if ((error = fget(td, fd, &fp)) != 0)
623 		return (error);
624 	if ((error = kqueue_aquire(fp, &kq)) != 0)
625 		goto done_norel;
626 
627 	nerrors = 0;
628 
629 	while (nchanges > 0) {
630 		n = nchanges > KQ_NEVENTS ? KQ_NEVENTS : nchanges;
631 		error = k_ops->k_copyin(k_ops->arg, keva, n);
632 		if (error)
633 			goto done;
634 		changes = keva;
635 		for (i = 0; i < n; i++) {
636 			kevp = &changes[i];
637 			if (!kevp->filter)
638 				continue;
639 			kevp->flags &= ~EV_SYSFLAGS;
640 			error = kqueue_register(kq, kevp, td, 1);
641 			if (error) {
642 				if (nevents != 0) {
643 					kevp->flags = EV_ERROR;
644 					kevp->data = error;
645 					(void) k_ops->k_copyout(k_ops->arg,
646 					    kevp, 1);
647 					nevents--;
648 					nerrors++;
649 				} else {
650 					goto done;
651 				}
652 			}
653 		}
654 		nchanges -= n;
655 	}
656 	if (nerrors) {
657 		td->td_retval[0] = nerrors;
658 		error = 0;
659 		goto done;
660 	}
661 
662 	error = kqueue_scan(kq, nevents, k_ops, timeout, keva, td);
663 done:
664 	kqueue_release(kq, 0);
665 done_norel:
666 	if (fp != NULL)
667 		fdrop(fp, td);
668 	return (error);
669 }
670 
671 int
672 kqueue_add_filteropts(int filt, struct filterops *filtops)
673 {
674 	int error;
675 
676 	if (filt > 0 || filt + EVFILT_SYSCOUNT < 0) {
677 		printf(
678 "trying to add a filterop that is out of range: %d is beyond %d\n",
679 		    ~filt, EVFILT_SYSCOUNT);
680 		return EINVAL;
681 	}
682 	mtx_lock(&filterops_lock);
683 	if (sysfilt_ops[~filt].for_fop != &null_filtops &&
684 	    sysfilt_ops[~filt].for_fop != NULL)
685 		error = EEXIST;
686 	else {
687 		sysfilt_ops[~filt].for_fop = filtops;
688 		sysfilt_ops[~filt].for_refcnt = 0;
689 	}
690 	mtx_unlock(&filterops_lock);
691 
692 	return (0);
693 }
694 
695 int
696 kqueue_del_filteropts(int filt)
697 {
698 	int error;
699 
700 	error = 0;
701 	if (filt > 0 || filt + EVFILT_SYSCOUNT < 0)
702 		return EINVAL;
703 
704 	mtx_lock(&filterops_lock);
705 	if (sysfilt_ops[~filt].for_fop == &null_filtops ||
706 	    sysfilt_ops[~filt].for_fop == NULL)
707 		error = EINVAL;
708 	else if (sysfilt_ops[~filt].for_refcnt != 0)
709 		error = EBUSY;
710 	else {
711 		sysfilt_ops[~filt].for_fop = &null_filtops;
712 		sysfilt_ops[~filt].for_refcnt = 0;
713 	}
714 	mtx_unlock(&filterops_lock);
715 
716 	return error;
717 }
718 
719 static struct filterops *
720 kqueue_fo_find(int filt)
721 {
722 
723 	if (filt > 0 || filt + EVFILT_SYSCOUNT < 0)
724 		return NULL;
725 
726 	mtx_lock(&filterops_lock);
727 	sysfilt_ops[~filt].for_refcnt++;
728 	if (sysfilt_ops[~filt].for_fop == NULL)
729 		sysfilt_ops[~filt].for_fop = &null_filtops;
730 	mtx_unlock(&filterops_lock);
731 
732 	return sysfilt_ops[~filt].for_fop;
733 }
734 
735 static void
736 kqueue_fo_release(int filt)
737 {
738 
739 	if (filt > 0 || filt + EVFILT_SYSCOUNT < 0)
740 		return;
741 
742 	mtx_lock(&filterops_lock);
743 	KASSERT(sysfilt_ops[~filt].for_refcnt > 0,
744 	    ("filter object refcount not valid on release"));
745 	sysfilt_ops[~filt].for_refcnt--;
746 	mtx_unlock(&filterops_lock);
747 }
748 
749 /*
750  * A ref to kq (obtained via kqueue_aquire) should be held.  waitok will
751  * influence if memory allocation should wait.  Make sure it is 0 if you
752  * hold any mutexes.
753  */
754 int
755 kqueue_register(struct kqueue *kq, struct kevent *kev, struct thread *td, int waitok)
756 {
757 	struct filterops *fops;
758 	struct file *fp;
759 	struct knote *kn, *tkn;
760 	int error, filt, event;
761 	int haskqglobal;
762 
763 	fp = NULL;
764 	kn = NULL;
765 	error = 0;
766 	haskqglobal = 0;
767 
768 	filt = kev->filter;
769 	fops = kqueue_fo_find(filt);
770 	if (fops == NULL)
771 		return EINVAL;
772 
773 	tkn = knote_alloc(waitok);		/* prevent waiting with locks */
774 
775 findkn:
776 	if (fops->f_isfd) {
777 		KASSERT(td != NULL, ("td is NULL"));
778 		error = fget(td, kev->ident, &fp);
779 		if (error)
780 			goto done;
781 
782 		if ((kev->flags & EV_ADD) == EV_ADD && kqueue_expand(kq, fops,
783 		    kev->ident, 0) != 0) {
784 			/* try again */
785 			fdrop(fp, td);
786 			fp = NULL;
787 			error = kqueue_expand(kq, fops, kev->ident, waitok);
788 			if (error)
789 				goto done;
790 			goto findkn;
791 		}
792 
793 		if (fp->f_type == DTYPE_KQUEUE) {
794 			/*
795 			 * if we add some inteligence about what we are doing,
796 			 * we should be able to support events on ourselves.
797 			 * We need to know when we are doing this to prevent
798 			 * getting both the knlist lock and the kq lock since
799 			 * they are the same thing.
800 			 */
801 			if (fp->f_data == kq) {
802 				error = EINVAL;
803 				goto done;
804 			}
805 
806 			KQ_GLOBAL_LOCK(&kq_global, haskqglobal);
807 		}
808 
809 		KQ_LOCK(kq);
810 		if (kev->ident < kq->kq_knlistsize) {
811 			SLIST_FOREACH(kn, &kq->kq_knlist[kev->ident], kn_link)
812 				if (kev->filter == kn->kn_filter)
813 					break;
814 		}
815 	} else {
816 		if ((kev->flags & EV_ADD) == EV_ADD)
817 			kqueue_expand(kq, fops, kev->ident, waitok);
818 
819 		KQ_LOCK(kq);
820 		if (kq->kq_knhashmask != 0) {
821 			struct klist *list;
822 
823 			list = &kq->kq_knhash[
824 			    KN_HASH((u_long)kev->ident, kq->kq_knhashmask)];
825 			SLIST_FOREACH(kn, list, kn_link)
826 				if (kev->ident == kn->kn_id &&
827 				    kev->filter == kn->kn_filter)
828 					break;
829 		}
830 	}
831 
832 	/* knote is in the process of changing, wait for it to stablize. */
833 	if (kn != NULL && (kn->kn_status & KN_INFLUX) == KN_INFLUX) {
834 		if (fp != NULL) {
835 			fdrop(fp, td);
836 			fp = NULL;
837 		}
838 		KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal);
839 		kq->kq_state |= KQ_FLUXWAIT;
840 		msleep(kq, &kq->kq_lock, PSOCK | PDROP, "kqflxwt", 0);
841 		goto findkn;
842 	}
843 
844 	if (kn == NULL && ((kev->flags & EV_ADD) == 0)) {
845 		KQ_UNLOCK(kq);
846 		error = ENOENT;
847 		goto done;
848 	}
849 
850 	/*
851 	 * kn now contains the matching knote, or NULL if no match
852 	 */
853 	if (kev->flags & EV_ADD) {
854 		if (kn == NULL) {
855 			kn = tkn;
856 			tkn = NULL;
857 			if (kn == NULL) {
858 				KQ_UNLOCK(kq);
859 				error = ENOMEM;
860 				goto done;
861 			}
862 			kn->kn_fp = fp;
863 			kn->kn_kq = kq;
864 			kn->kn_fop = fops;
865 			/*
866 			 * apply reference counts to knote structure, and
867 			 * do not release it at the end of this routine.
868 			 */
869 			fops = NULL;
870 			fp = NULL;
871 
872 			kn->kn_sfflags = kev->fflags;
873 			kn->kn_sdata = kev->data;
874 			kev->fflags = 0;
875 			kev->data = 0;
876 			kn->kn_kevent = *kev;
877 			kn->kn_kevent.flags &= ~(EV_ADD | EV_DELETE |
878 			    EV_ENABLE | EV_DISABLE);
879 			kn->kn_status = KN_INFLUX|KN_DETACHED;
880 
881 			error = knote_attach(kn, kq);
882 			KQ_UNLOCK(kq);
883 			if (error != 0) {
884 				tkn = kn;
885 				goto done;
886 			}
887 
888 			if ((error = kn->kn_fop->f_attach(kn)) != 0) {
889 				knote_drop(kn, td);
890 				goto done;
891 			}
892 			KN_LIST_LOCK(kn);
893 		} else {
894 			/*
895 			 * The user may change some filter values after the
896 			 * initial EV_ADD, but doing so will not reset any
897 			 * filter which has already been triggered.
898 			 */
899 			kn->kn_status |= KN_INFLUX;
900 			KQ_UNLOCK(kq);
901 			KN_LIST_LOCK(kn);
902 			kn->kn_sfflags = kev->fflags;
903 			kn->kn_sdata = kev->data;
904 			kn->kn_kevent.udata = kev->udata;
905 		}
906 
907 		/*
908 		 * We can get here with kn->kn_knlist == NULL.
909 		 * This can happen when the initial attach event decides that
910 		 * the event is "completed" already.  i.e. filt_procattach
911 		 * is called on a zombie process.  It will call filt_proc
912 		 * which will remove it from the list, and NULL kn_knlist.
913 		 */
914 		event = kn->kn_fop->f_event(kn, 0);
915 		KQ_LOCK(kq);
916 		if (event)
917 			KNOTE_ACTIVATE(kn, 1);
918 		kn->kn_status &= ~KN_INFLUX;
919 		KN_LIST_UNLOCK(kn);
920 	} else if (kev->flags & EV_DELETE) {
921 		kn->kn_status |= KN_INFLUX;
922 		KQ_UNLOCK(kq);
923 		if (!(kn->kn_status & KN_DETACHED))
924 			kn->kn_fop->f_detach(kn);
925 		knote_drop(kn, td);
926 		goto done;
927 	}
928 
929 	if ((kev->flags & EV_DISABLE) &&
930 	    ((kn->kn_status & KN_DISABLED) == 0)) {
931 		kn->kn_status |= KN_DISABLED;
932 	}
933 
934 	if ((kev->flags & EV_ENABLE) && (kn->kn_status & KN_DISABLED)) {
935 		kn->kn_status &= ~KN_DISABLED;
936 		if ((kn->kn_status & KN_ACTIVE) &&
937 		    ((kn->kn_status & KN_QUEUED) == 0))
938 			knote_enqueue(kn);
939 	}
940 	KQ_UNLOCK_FLUX(kq);
941 
942 done:
943 	KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal);
944 	if (fp != NULL)
945 		fdrop(fp, td);
946 	if (tkn != NULL)
947 		knote_free(tkn);
948 	if (fops != NULL)
949 		kqueue_fo_release(filt);
950 	return (error);
951 }
952 
953 static int
954 kqueue_aquire(struct file *fp, struct kqueue **kqp)
955 {
956 	int error;
957 	struct kqueue *kq;
958 
959 	error = 0;
960 
961 	FILE_LOCK(fp);
962 	do {
963 		kq = fp->f_data;
964 		if (fp->f_type != DTYPE_KQUEUE || kq == NULL) {
965 			error = EBADF;
966 			break;
967 		}
968 		*kqp = kq;
969 		KQ_LOCK(kq);
970 		if ((kq->kq_state & KQ_CLOSING) == KQ_CLOSING) {
971 			KQ_UNLOCK(kq);
972 			error = EBADF;
973 			break;
974 		}
975 		kq->kq_refcnt++;
976 		KQ_UNLOCK(kq);
977 	} while (0);
978 	FILE_UNLOCK(fp);
979 
980 	return error;
981 }
982 
983 static void
984 kqueue_release(struct kqueue *kq, int locked)
985 {
986 	if (locked)
987 		KQ_OWNED(kq);
988 	else
989 		KQ_LOCK(kq);
990 	kq->kq_refcnt--;
991 	if (kq->kq_refcnt == 1)
992 		wakeup(&kq->kq_refcnt);
993 	if (!locked)
994 		KQ_UNLOCK(kq);
995 }
996 
997 static void
998 kqueue_schedtask(struct kqueue *kq)
999 {
1000 
1001 	KQ_OWNED(kq);
1002 	KASSERT(((kq->kq_state & KQ_TASKDRAIN) != KQ_TASKDRAIN),
1003 	    ("scheduling kqueue task while draining"));
1004 
1005 	if ((kq->kq_state & KQ_TASKSCHED) != KQ_TASKSCHED) {
1006 		taskqueue_enqueue(taskqueue_kqueue, &kq->kq_task);
1007 		kq->kq_state |= KQ_TASKSCHED;
1008 	}
1009 }
1010 
1011 /*
1012  * Expand the kq to make sure we have storage for fops/ident pair.
1013  *
1014  * Return 0 on success (or no work necessary), return errno on failure.
1015  *
1016  * Not calling hashinit w/ waitok (proper malloc flag) should be safe.
1017  * If kqueue_register is called from a non-fd context, there usually/should
1018  * be no locks held.
1019  */
1020 static int
1021 kqueue_expand(struct kqueue *kq, struct filterops *fops, uintptr_t ident,
1022 	int waitok)
1023 {
1024 	struct klist *list, *tmp_knhash;
1025 	u_long tmp_knhashmask;
1026 	int size;
1027 	int fd;
1028 	int mflag = waitok ? M_WAITOK : M_NOWAIT;
1029 
1030 	KQ_NOTOWNED(kq);
1031 
1032 	if (fops->f_isfd) {
1033 		fd = ident;
1034 		if (kq->kq_knlistsize <= fd) {
1035 			size = kq->kq_knlistsize;
1036 			while (size <= fd)
1037 				size += KQEXTENT;
1038 			MALLOC(list, struct klist *,
1039 			    size * sizeof list, M_KQUEUE, mflag);
1040 			if (list == NULL)
1041 				return ENOMEM;
1042 			KQ_LOCK(kq);
1043 			if (kq->kq_knlistsize > fd) {
1044 				FREE(list, M_KQUEUE);
1045 				list = NULL;
1046 			} else {
1047 				if (kq->kq_knlist != NULL) {
1048 					bcopy(kq->kq_knlist, list,
1049 					    kq->kq_knlistsize * sizeof list);
1050 					FREE(kq->kq_knlist, M_KQUEUE);
1051 					kq->kq_knlist = NULL;
1052 				}
1053 				bzero((caddr_t)list +
1054 				    kq->kq_knlistsize * sizeof list,
1055 				    (size - kq->kq_knlistsize) * sizeof list);
1056 				kq->kq_knlistsize = size;
1057 				kq->kq_knlist = list;
1058 			}
1059 			KQ_UNLOCK(kq);
1060 		}
1061 	} else {
1062 		if (kq->kq_knhashmask == 0) {
1063 			tmp_knhash = hashinit(KN_HASHSIZE, M_KQUEUE,
1064 			    &tmp_knhashmask);
1065 			if (tmp_knhash == NULL)
1066 				return ENOMEM;
1067 			KQ_LOCK(kq);
1068 			if (kq->kq_knhashmask == 0) {
1069 				kq->kq_knhash = tmp_knhash;
1070 				kq->kq_knhashmask = tmp_knhashmask;
1071 			} else {
1072 				free(tmp_knhash, M_KQUEUE);
1073 			}
1074 			KQ_UNLOCK(kq);
1075 		}
1076 	}
1077 
1078 	KQ_NOTOWNED(kq);
1079 	return 0;
1080 }
1081 
1082 static void
1083 kqueue_task(void *arg, int pending)
1084 {
1085 	struct kqueue *kq;
1086 	int haskqglobal;
1087 
1088 	haskqglobal = 0;
1089 	kq = arg;
1090 
1091 	KQ_GLOBAL_LOCK(&kq_global, haskqglobal);
1092 	KQ_LOCK(kq);
1093 
1094 	KNOTE_LOCKED(&kq->kq_sel.si_note, 0);
1095 
1096 	kq->kq_state &= ~KQ_TASKSCHED;
1097 	if ((kq->kq_state & KQ_TASKDRAIN) == KQ_TASKDRAIN) {
1098 		wakeup(&kq->kq_state);
1099 	}
1100 	KQ_UNLOCK(kq);
1101 	KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal);
1102 }
1103 
1104 /*
1105  * Scan, update kn_data (if not ONESHOT), and copyout triggered events.
1106  * We treat KN_MARKER knotes as if they are INFLUX.
1107  */
1108 static int
1109 kqueue_scan(struct kqueue *kq, int maxevents, struct kevent_copyops *k_ops,
1110     const struct timespec *tsp, struct kevent *keva, struct thread *td)
1111 {
1112 	struct kevent *kevp;
1113 	struct timeval atv, rtv, ttv;
1114 	struct knote *kn, *marker;
1115 	int count, timeout, nkev, error;
1116 	int haskqglobal;
1117 
1118 	count = maxevents;
1119 	nkev = 0;
1120 	error = 0;
1121 	haskqglobal = 0;
1122 
1123 	if (maxevents == 0)
1124 		goto done_nl;
1125 
1126 	if (tsp != NULL) {
1127 		TIMESPEC_TO_TIMEVAL(&atv, tsp);
1128 		if (itimerfix(&atv)) {
1129 			error = EINVAL;
1130 			goto done_nl;
1131 		}
1132 		if (tsp->tv_sec == 0 && tsp->tv_nsec == 0)
1133 			timeout = -1;
1134 		else
1135 			timeout = atv.tv_sec > 24 * 60 * 60 ?
1136 			    24 * 60 * 60 * hz : tvtohz(&atv);
1137 		getmicrouptime(&rtv);
1138 		timevaladd(&atv, &rtv);
1139 	} else {
1140 		atv.tv_sec = 0;
1141 		atv.tv_usec = 0;
1142 		timeout = 0;
1143 	}
1144 	marker = knote_alloc(1);
1145 	if (marker == NULL) {
1146 		error = ENOMEM;
1147 		goto done_nl;
1148 	}
1149 	marker->kn_status = KN_MARKER;
1150 	KQ_LOCK(kq);
1151 	goto start;
1152 
1153 retry:
1154 	if (atv.tv_sec || atv.tv_usec) {
1155 		getmicrouptime(&rtv);
1156 		if (timevalcmp(&rtv, &atv, >=))
1157 			goto done;
1158 		ttv = atv;
1159 		timevalsub(&ttv, &rtv);
1160 		timeout = ttv.tv_sec > 24 * 60 * 60 ?
1161 			24 * 60 * 60 * hz : tvtohz(&ttv);
1162 	}
1163 
1164 start:
1165 	kevp = keva;
1166 	if (kq->kq_count == 0) {
1167 		if (timeout < 0) {
1168 			error = EWOULDBLOCK;
1169 		} else {
1170 			kq->kq_state |= KQ_SLEEP;
1171 			error = msleep(kq, &kq->kq_lock, PSOCK | PCATCH,
1172 			    "kqread", timeout);
1173 		}
1174 		if (error == 0)
1175 			goto retry;
1176 		/* don't restart after signals... */
1177 		if (error == ERESTART)
1178 			error = EINTR;
1179 		else if (error == EWOULDBLOCK)
1180 			error = 0;
1181 		goto done;
1182 	}
1183 
1184 	TAILQ_INSERT_TAIL(&kq->kq_head, marker, kn_tqe);
1185 	while (count) {
1186 		KQ_OWNED(kq);
1187 		kn = TAILQ_FIRST(&kq->kq_head);
1188 
1189 		if ((kn->kn_status == KN_MARKER && kn != marker) ||
1190 		    (kn->kn_status & KN_INFLUX) == KN_INFLUX) {
1191 			kq->kq_state |= KQ_FLUXWAIT;
1192 			error = msleep(kq, &kq->kq_lock, PSOCK,
1193 			    "kqflxwt", 0);
1194 			continue;
1195 		}
1196 
1197 		TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe);
1198 		if ((kn->kn_status & KN_DISABLED) == KN_DISABLED) {
1199 			kn->kn_status &= ~KN_QUEUED;
1200 			kq->kq_count--;
1201 			continue;
1202 		}
1203 		if (kn == marker) {
1204 			KQ_FLUX_WAKEUP(kq);
1205 			if (count == maxevents)
1206 				goto retry;
1207 			goto done;
1208 		}
1209 		KASSERT((kn->kn_status & KN_INFLUX) == 0,
1210 		    ("KN_INFLUX set when not suppose to be"));
1211 
1212 		if ((kn->kn_flags & EV_ONESHOT) == EV_ONESHOT) {
1213 			kn->kn_status &= ~KN_QUEUED;
1214 			kn->kn_status |= KN_INFLUX;
1215 			kq->kq_count--;
1216 			KQ_UNLOCK(kq);
1217 			/*
1218 			 * We don't need to lock the list since we've marked
1219 			 * it _INFLUX.
1220 			 */
1221 			*kevp = kn->kn_kevent;
1222 			if (!(kn->kn_status & KN_DETACHED))
1223 				kn->kn_fop->f_detach(kn);
1224 			knote_drop(kn, td);
1225 			KQ_LOCK(kq);
1226 			kn = NULL;
1227 		} else {
1228 			kn->kn_status |= KN_INFLUX;
1229 			KQ_UNLOCK(kq);
1230 			if ((kn->kn_status & KN_KQUEUE) == KN_KQUEUE)
1231 				KQ_GLOBAL_LOCK(&kq_global, haskqglobal);
1232 			KN_LIST_LOCK(kn);
1233 			if (kn->kn_fop->f_event(kn, 0) == 0) {
1234 				KQ_LOCK(kq);
1235 				KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal);
1236 				kn->kn_status &=
1237 				    ~(KN_QUEUED | KN_ACTIVE | KN_INFLUX);
1238 				kq->kq_count--;
1239 				KN_LIST_UNLOCK(kn);
1240 				continue;
1241 			}
1242 			*kevp = kn->kn_kevent;
1243 			KQ_LOCK(kq);
1244 			KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal);
1245 			if (kn->kn_flags & EV_CLEAR) {
1246 				kn->kn_data = 0;
1247 				kn->kn_fflags = 0;
1248 				kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE);
1249 				kq->kq_count--;
1250 			} else
1251 				TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe);
1252 
1253 			kn->kn_status &= ~(KN_INFLUX);
1254 			KN_LIST_UNLOCK(kn);
1255 		}
1256 
1257 		/* we are returning a copy to the user */
1258 		kevp++;
1259 		nkev++;
1260 		count--;
1261 
1262 		if (nkev == KQ_NEVENTS) {
1263 			KQ_UNLOCK_FLUX(kq);
1264 			error = k_ops->k_copyout(k_ops->arg, keva, nkev);
1265 			nkev = 0;
1266 			kevp = keva;
1267 			KQ_LOCK(kq);
1268 			if (error)
1269 				break;
1270 		}
1271 	}
1272 	TAILQ_REMOVE(&kq->kq_head, marker, kn_tqe);
1273 done:
1274 	KQ_OWNED(kq);
1275 	KQ_UNLOCK_FLUX(kq);
1276 	knote_free(marker);
1277 done_nl:
1278 	KQ_NOTOWNED(kq);
1279 	if (nkev != 0)
1280 		error = k_ops->k_copyout(k_ops->arg, keva, nkev);
1281 	td->td_retval[0] = maxevents - count;
1282 	return (error);
1283 }
1284 
1285 /*
1286  * XXX
1287  * This could be expanded to call kqueue_scan, if desired.
1288  */
1289 /*ARGSUSED*/
1290 static int
1291 kqueue_read(struct file *fp, struct uio *uio, struct ucred *active_cred,
1292 	int flags, struct thread *td)
1293 {
1294 	return (ENXIO);
1295 }
1296 
1297 /*ARGSUSED*/
1298 static int
1299 kqueue_write(struct file *fp, struct uio *uio, struct ucred *active_cred,
1300 	 int flags, struct thread *td)
1301 {
1302 	return (ENXIO);
1303 }
1304 
1305 /*ARGSUSED*/
1306 static int
1307 kqueue_ioctl(struct file *fp, u_long cmd, void *data,
1308 	struct ucred *active_cred, struct thread *td)
1309 {
1310 	/*
1311 	 * Enabling sigio causes two major problems:
1312 	 * 1) infinite recursion:
1313 	 * Synopsys: kevent is being used to track signals and have FIOASYNC
1314 	 * set.  On receipt of a signal this will cause a kqueue to recurse
1315 	 * into itself over and over.  Sending the sigio causes the kqueue
1316 	 * to become ready, which in turn posts sigio again, forever.
1317 	 * Solution: this can be solved by setting a flag in the kqueue that
1318 	 * we have a SIGIO in progress.
1319 	 * 2) locking problems:
1320 	 * Synopsys: Kqueue is a leaf subsystem, but adding signalling puts
1321 	 * us above the proc and pgrp locks.
1322 	 * Solution: Post a signal using an async mechanism, being sure to
1323 	 * record a generation count in the delivery so that we do not deliver
1324 	 * a signal to the wrong process.
1325 	 *
1326 	 * Note, these two mechanisms are somewhat mutually exclusive!
1327 	 */
1328 #if 0
1329 	struct kqueue *kq;
1330 
1331 	kq = fp->f_data;
1332 	switch (cmd) {
1333 	case FIOASYNC:
1334 		if (*(int *)data) {
1335 			kq->kq_state |= KQ_ASYNC;
1336 		} else {
1337 			kq->kq_state &= ~KQ_ASYNC;
1338 		}
1339 		return (0);
1340 
1341 	case FIOSETOWN:
1342 		return (fsetown(*(int *)data, &kq->kq_sigio));
1343 
1344 	case FIOGETOWN:
1345 		*(int *)data = fgetown(&kq->kq_sigio);
1346 		return (0);
1347 	}
1348 #endif
1349 
1350 	return (ENOTTY);
1351 }
1352 
1353 /*ARGSUSED*/
1354 static int
1355 kqueue_poll(struct file *fp, int events, struct ucred *active_cred,
1356 	struct thread *td)
1357 {
1358 	struct kqueue *kq;
1359 	int revents = 0;
1360 	int error;
1361 
1362 	if ((error = kqueue_aquire(fp, &kq)))
1363 		return POLLERR;
1364 
1365 	KQ_LOCK(kq);
1366 	if (events & (POLLIN | POLLRDNORM)) {
1367 		if (kq->kq_count) {
1368 			revents |= events & (POLLIN | POLLRDNORM);
1369 		} else {
1370 			selrecord(td, &kq->kq_sel);
1371 			kq->kq_state |= KQ_SEL;
1372 		}
1373 	}
1374 	kqueue_release(kq, 1);
1375 	KQ_UNLOCK(kq);
1376 	return (revents);
1377 }
1378 
1379 /*ARGSUSED*/
1380 static int
1381 kqueue_stat(struct file *fp, struct stat *st, struct ucred *active_cred,
1382 	struct thread *td)
1383 {
1384 
1385 	bzero((void *)st, sizeof *st);
1386 	/*
1387 	 * We no longer return kq_count because the unlocked value is useless.
1388 	 * If you spent all this time getting the count, why not spend your
1389 	 * syscall better by calling kevent?
1390 	 *
1391 	 * XXX - This is needed for libc_r.
1392 	 */
1393 	st->st_mode = S_IFIFO;
1394 	return (0);
1395 }
1396 
1397 /*ARGSUSED*/
1398 static int
1399 kqueue_close(struct file *fp, struct thread *td)
1400 {
1401 	struct kqueue *kq = fp->f_data;
1402 	struct filedesc *fdp;
1403 	struct knote *kn;
1404 	int i;
1405 	int error;
1406 
1407 	if ((error = kqueue_aquire(fp, &kq)))
1408 		return error;
1409 
1410 	KQ_LOCK(kq);
1411 
1412 	KASSERT((kq->kq_state & KQ_CLOSING) != KQ_CLOSING,
1413 	    ("kqueue already closing"));
1414 	kq->kq_state |= KQ_CLOSING;
1415 	if (kq->kq_refcnt > 1)
1416 		msleep(&kq->kq_refcnt, &kq->kq_lock, PSOCK, "kqclose", 0);
1417 
1418 	KASSERT(kq->kq_refcnt == 1, ("other refs are out there!"));
1419 	fdp = kq->kq_fdp;
1420 
1421 	KASSERT(knlist_empty(&kq->kq_sel.si_note),
1422 	    ("kqueue's knlist not empty"));
1423 
1424 	for (i = 0; i < kq->kq_knlistsize; i++) {
1425 		while ((kn = SLIST_FIRST(&kq->kq_knlist[i])) != NULL) {
1426 			KASSERT((kn->kn_status & KN_INFLUX) == 0,
1427 			    ("KN_INFLUX set when not suppose to be"));
1428 			kn->kn_status |= KN_INFLUX;
1429 			KQ_UNLOCK(kq);
1430 			if (!(kn->kn_status & KN_DETACHED))
1431 				kn->kn_fop->f_detach(kn);
1432 			knote_drop(kn, td);
1433 			KQ_LOCK(kq);
1434 		}
1435 	}
1436 	if (kq->kq_knhashmask != 0) {
1437 		for (i = 0; i <= kq->kq_knhashmask; i++) {
1438 			while ((kn = SLIST_FIRST(&kq->kq_knhash[i])) != NULL) {
1439 				KASSERT((kn->kn_status & KN_INFLUX) == 0,
1440 				    ("KN_INFLUX set when not suppose to be"));
1441 				kn->kn_status |= KN_INFLUX;
1442 				KQ_UNLOCK(kq);
1443 				if (!(kn->kn_status & KN_DETACHED))
1444 					kn->kn_fop->f_detach(kn);
1445 				knote_drop(kn, td);
1446 				KQ_LOCK(kq);
1447 			}
1448 		}
1449 	}
1450 
1451 	if ((kq->kq_state & KQ_TASKSCHED) == KQ_TASKSCHED) {
1452 		kq->kq_state |= KQ_TASKDRAIN;
1453 		msleep(&kq->kq_state, &kq->kq_lock, PSOCK, "kqtqdr", 0);
1454 	}
1455 
1456 	if ((kq->kq_state & KQ_SEL) == KQ_SEL) {
1457 		kq->kq_state &= ~KQ_SEL;
1458 		selwakeuppri(&kq->kq_sel, PSOCK);
1459 	}
1460 
1461 	KQ_UNLOCK(kq);
1462 
1463 	FILEDESC_LOCK_FAST(fdp);
1464 	SLIST_REMOVE(&fdp->fd_kqlist, kq, kqueue, kq_list);
1465 	FILEDESC_UNLOCK_FAST(fdp);
1466 
1467 	knlist_destroy(&kq->kq_sel.si_note);
1468 	mtx_destroy(&kq->kq_lock);
1469 	kq->kq_fdp = NULL;
1470 
1471 	if (kq->kq_knhash != NULL)
1472 		free(kq->kq_knhash, M_KQUEUE);
1473 	if (kq->kq_knlist != NULL)
1474 		free(kq->kq_knlist, M_KQUEUE);
1475 
1476 	funsetown(&kq->kq_sigio);
1477 	free(kq, M_KQUEUE);
1478 	fp->f_data = NULL;
1479 
1480 	return (0);
1481 }
1482 
1483 static void
1484 kqueue_wakeup(struct kqueue *kq)
1485 {
1486 	KQ_OWNED(kq);
1487 
1488 	if ((kq->kq_state & KQ_SLEEP) == KQ_SLEEP) {
1489 		kq->kq_state &= ~KQ_SLEEP;
1490 		wakeup(kq);
1491 	}
1492 	if ((kq->kq_state & KQ_SEL) == KQ_SEL) {
1493 		kq->kq_state &= ~KQ_SEL;
1494 		selwakeuppri(&kq->kq_sel, PSOCK);
1495 	}
1496 	if (!knlist_empty(&kq->kq_sel.si_note))
1497 		kqueue_schedtask(kq);
1498 	if ((kq->kq_state & KQ_ASYNC) == KQ_ASYNC) {
1499 		pgsigio(&kq->kq_sigio, SIGIO, 0);
1500 	}
1501 }
1502 
1503 /*
1504  * Walk down a list of knotes, activating them if their event has triggered.
1505  *
1506  * There is a possibility to optimize in the case of one kq watching another.
1507  * Instead of scheduling a task to wake it up, you could pass enough state
1508  * down the chain to make up the parent kqueue.  Make this code functional
1509  * first.
1510  */
1511 void
1512 knote(struct knlist *list, long hint, int islocked)
1513 {
1514 	struct kqueue *kq;
1515 	struct knote *kn;
1516 
1517 	if (list == NULL)
1518 		return;
1519 
1520 	KNL_ASSERT_LOCK(list, islocked);
1521 
1522 	if (!islocked)
1523 		list->kl_lock(list->kl_lockarg);
1524 
1525 	/*
1526 	 * If we unlock the list lock (and set KN_INFLUX), we can eliminate
1527 	 * the kqueue scheduling, but this will introduce four
1528 	 * lock/unlock's for each knote to test.  If we do, continue to use
1529 	 * SLIST_FOREACH, SLIST_FOREACH_SAFE is not safe in our case, it is
1530 	 * only safe if you want to remove the current item, which we are
1531 	 * not doing.
1532 	 */
1533 	SLIST_FOREACH(kn, &list->kl_list, kn_selnext) {
1534 		kq = kn->kn_kq;
1535 		if ((kn->kn_status & KN_INFLUX) != KN_INFLUX) {
1536 			KQ_LOCK(kq);
1537 			if ((kn->kn_status & KN_INFLUX) != KN_INFLUX) {
1538 				kn->kn_status |= KN_HASKQLOCK;
1539 				if (kn->kn_fop->f_event(kn, hint))
1540 					KNOTE_ACTIVATE(kn, 1);
1541 				kn->kn_status &= ~KN_HASKQLOCK;
1542 			}
1543 			KQ_UNLOCK(kq);
1544 		}
1545 		kq = NULL;
1546 	}
1547 	if (!islocked)
1548 		list->kl_unlock(list->kl_lockarg);
1549 }
1550 
1551 /*
1552  * add a knote to a knlist
1553  */
1554 void
1555 knlist_add(struct knlist *knl, struct knote *kn, int islocked)
1556 {
1557 	KNL_ASSERT_LOCK(knl, islocked);
1558 	KQ_NOTOWNED(kn->kn_kq);
1559 	KASSERT((kn->kn_status & (KN_INFLUX|KN_DETACHED)) ==
1560 	    (KN_INFLUX|KN_DETACHED), ("knote not KN_INFLUX and KN_DETACHED"));
1561 	if (!islocked)
1562 		knl->kl_lock(knl->kl_lockarg);
1563 	SLIST_INSERT_HEAD(&knl->kl_list, kn, kn_selnext);
1564 	if (!islocked)
1565 		knl->kl_unlock(knl->kl_lockarg);
1566 	KQ_LOCK(kn->kn_kq);
1567 	kn->kn_knlist = knl;
1568 	kn->kn_status &= ~KN_DETACHED;
1569 	KQ_UNLOCK(kn->kn_kq);
1570 }
1571 
1572 static void
1573 knlist_remove_kq(struct knlist *knl, struct knote *kn, int knlislocked, int kqislocked)
1574 {
1575 	KASSERT(!(!!kqislocked && !knlislocked), ("kq locked w/o knl locked"));
1576 	KNL_ASSERT_LOCK(knl, knlislocked);
1577 	mtx_assert(&kn->kn_kq->kq_lock, kqislocked ? MA_OWNED : MA_NOTOWNED);
1578 	if (!kqislocked)
1579 		KASSERT((kn->kn_status & (KN_INFLUX|KN_DETACHED)) == KN_INFLUX,
1580     ("knlist_remove called w/o knote being KN_INFLUX or already removed"));
1581 	if (!knlislocked)
1582 		knl->kl_lock(knl->kl_lockarg);
1583 	SLIST_REMOVE(&knl->kl_list, kn, knote, kn_selnext);
1584 	kn->kn_knlist = NULL;
1585 	if (!knlislocked)
1586 		knl->kl_unlock(knl->kl_lockarg);
1587 	if (!kqislocked)
1588 		KQ_LOCK(kn->kn_kq);
1589 	kn->kn_status |= KN_DETACHED;
1590 	if (!kqislocked)
1591 		KQ_UNLOCK(kn->kn_kq);
1592 }
1593 
1594 /*
1595  * remove all knotes from a specified klist
1596  */
1597 void
1598 knlist_remove(struct knlist *knl, struct knote *kn, int islocked)
1599 {
1600 
1601 	knlist_remove_kq(knl, kn, islocked, 0);
1602 }
1603 
1604 /*
1605  * remove knote from a specified klist while in f_event handler.
1606  */
1607 void
1608 knlist_remove_inevent(struct knlist *knl, struct knote *kn)
1609 {
1610 
1611 	knlist_remove_kq(knl, kn, 1,
1612 	    (kn->kn_status & KN_HASKQLOCK) == KN_HASKQLOCK);
1613 }
1614 
1615 int
1616 knlist_empty(struct knlist *knl)
1617 {
1618 	KNL_ASSERT_LOCKED(knl);
1619 	return SLIST_EMPTY(&knl->kl_list);
1620 }
1621 
1622 static struct mtx	knlist_lock;
1623 MTX_SYSINIT(knlist_lock, &knlist_lock, "knlist lock for lockless objects",
1624 	MTX_DEF);
1625 static void knlist_mtx_lock(void *arg);
1626 static void knlist_mtx_unlock(void *arg);
1627 static int knlist_mtx_locked(void *arg);
1628 
1629 static void
1630 knlist_mtx_lock(void *arg)
1631 {
1632 	mtx_lock((struct mtx *)arg);
1633 }
1634 
1635 static void
1636 knlist_mtx_unlock(void *arg)
1637 {
1638 	mtx_unlock((struct mtx *)arg);
1639 }
1640 
1641 static int
1642 knlist_mtx_locked(void *arg)
1643 {
1644 	return (mtx_owned((struct mtx *)arg));
1645 }
1646 
1647 void
1648 knlist_init(struct knlist *knl, void *lock, void (*kl_lock)(void *),
1649     void (*kl_unlock)(void *), int (*kl_locked)(void *))
1650 {
1651 
1652 	if (lock == NULL)
1653 		knl->kl_lockarg = &knlist_lock;
1654 	else
1655 		knl->kl_lockarg = lock;
1656 
1657 	if (kl_lock == NULL)
1658 		knl->kl_lock = knlist_mtx_lock;
1659 	else
1660 		knl->kl_lock = kl_lock;
1661 	if (kl_unlock == NULL)
1662 		knl->kl_unlock = knlist_mtx_unlock;
1663 	else
1664 		knl->kl_unlock = kl_unlock;
1665 	if (kl_locked == NULL)
1666 		knl->kl_locked = knlist_mtx_locked;
1667 	else
1668 		knl->kl_locked = kl_locked;
1669 
1670 	SLIST_INIT(&knl->kl_list);
1671 }
1672 
1673 void
1674 knlist_destroy(struct knlist *knl)
1675 {
1676 
1677 #ifdef INVARIANTS
1678 	/*
1679 	 * if we run across this error, we need to find the offending
1680 	 * driver and have it call knlist_clear.
1681 	 */
1682 	if (!SLIST_EMPTY(&knl->kl_list))
1683 		printf("WARNING: destroying knlist w/ knotes on it!\n");
1684 #endif
1685 
1686 	knl->kl_lockarg = knl->kl_lock = knl->kl_unlock = NULL;
1687 	SLIST_INIT(&knl->kl_list);
1688 }
1689 
1690 /*
1691  * Even if we are locked, we may need to drop the lock to allow any influx
1692  * knotes time to "settle".
1693  */
1694 void
1695 knlist_cleardel(struct knlist *knl, struct thread *td, int islocked, int killkn)
1696 {
1697 	struct knote *kn, *kn2;
1698 	struct kqueue *kq;
1699 
1700 	if (islocked)
1701 		KNL_ASSERT_LOCKED(knl);
1702 	else {
1703 		KNL_ASSERT_UNLOCKED(knl);
1704 again:		/* need to reaquire lock since we have dropped it */
1705 		knl->kl_lock(knl->kl_lockarg);
1706 	}
1707 
1708 	SLIST_FOREACH_SAFE(kn, &knl->kl_list, kn_selnext, kn2) {
1709 		kq = kn->kn_kq;
1710 		KQ_LOCK(kq);
1711 		if ((kn->kn_status & KN_INFLUX)) {
1712 			KQ_UNLOCK(kq);
1713 			continue;
1714 		}
1715 		knlist_remove_kq(knl, kn, 1, 1);
1716 		if (killkn) {
1717 			kn->kn_status |= KN_INFLUX | KN_DETACHED;
1718 			KQ_UNLOCK(kq);
1719 			knote_drop(kn, td);
1720 		} else {
1721 			/* Make sure cleared knotes disappear soon */
1722 			kn->kn_flags |= (EV_EOF | EV_ONESHOT);
1723 			KQ_UNLOCK(kq);
1724 		}
1725 		kq = NULL;
1726 	}
1727 
1728 	if (!SLIST_EMPTY(&knl->kl_list)) {
1729 		/* there are still KN_INFLUX remaining */
1730 		kn = SLIST_FIRST(&knl->kl_list);
1731 		kq = kn->kn_kq;
1732 		KQ_LOCK(kq);
1733 		KASSERT(kn->kn_status & KN_INFLUX,
1734 		    ("knote removed w/o list lock"));
1735 		knl->kl_unlock(knl->kl_lockarg);
1736 		kq->kq_state |= KQ_FLUXWAIT;
1737 		msleep(kq, &kq->kq_lock, PSOCK | PDROP, "kqkclr", 0);
1738 		kq = NULL;
1739 		goto again;
1740 	}
1741 
1742 	if (islocked)
1743 		KNL_ASSERT_LOCKED(knl);
1744 	else {
1745 		knl->kl_unlock(knl->kl_lockarg);
1746 		KNL_ASSERT_UNLOCKED(knl);
1747 	}
1748 }
1749 
1750 /*
1751  * remove all knotes referencing a specified fd
1752  * must be called with FILEDESC lock.  This prevents a race where a new fd
1753  * comes along and occupies the entry and we attach a knote to the fd.
1754  */
1755 void
1756 knote_fdclose(struct thread *td, int fd)
1757 {
1758 	struct filedesc *fdp = td->td_proc->p_fd;
1759 	struct kqueue *kq;
1760 	struct knote *kn;
1761 	int influx;
1762 
1763 	FILEDESC_LOCK_ASSERT(fdp, MA_OWNED);
1764 
1765 	/*
1766 	 * We shouldn't have to worry about new kevents appearing on fd
1767 	 * since filedesc is locked.
1768 	 */
1769 	SLIST_FOREACH(kq, &fdp->fd_kqlist, kq_list) {
1770 		KQ_LOCK(kq);
1771 
1772 again:
1773 		influx = 0;
1774 		while (kq->kq_knlistsize > fd &&
1775 		    (kn = SLIST_FIRST(&kq->kq_knlist[fd])) != NULL) {
1776 			if (kn->kn_status & KN_INFLUX) {
1777 				/* someone else might be waiting on our knote */
1778 				if (influx)
1779 					wakeup(kq);
1780 				kq->kq_state |= KQ_FLUXWAIT;
1781 				msleep(kq, &kq->kq_lock, PSOCK, "kqflxwt", 0);
1782 				goto again;
1783 			}
1784 			kn->kn_status |= KN_INFLUX;
1785 			KQ_UNLOCK(kq);
1786 			if (!(kn->kn_status & KN_DETACHED))
1787 				kn->kn_fop->f_detach(kn);
1788 			knote_drop(kn, td);
1789 			influx = 1;
1790 			KQ_LOCK(kq);
1791 		}
1792 		KQ_UNLOCK_FLUX(kq);
1793 	}
1794 }
1795 
1796 static int
1797 knote_attach(struct knote *kn, struct kqueue *kq)
1798 {
1799 	struct klist *list;
1800 
1801 	KASSERT(kn->kn_status & KN_INFLUX, ("knote not marked INFLUX"));
1802 	KQ_OWNED(kq);
1803 
1804 	if (kn->kn_fop->f_isfd) {
1805 		if (kn->kn_id >= kq->kq_knlistsize)
1806 			return ENOMEM;
1807 		list = &kq->kq_knlist[kn->kn_id];
1808 	} else {
1809 		if (kq->kq_knhash == NULL)
1810 			return ENOMEM;
1811 		list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)];
1812 	}
1813 
1814 	SLIST_INSERT_HEAD(list, kn, kn_link);
1815 
1816 	return 0;
1817 }
1818 
1819 /*
1820  * knote must already have been detached using the f_detach method.
1821  * no lock need to be held, it is assumed that the KN_INFLUX flag is set
1822  * to prevent other removal.
1823  */
1824 static void
1825 knote_drop(struct knote *kn, struct thread *td)
1826 {
1827 	struct kqueue *kq;
1828 	struct klist *list;
1829 
1830 	kq = kn->kn_kq;
1831 
1832 	KQ_NOTOWNED(kq);
1833 	KASSERT((kn->kn_status & KN_INFLUX) == KN_INFLUX,
1834 	    ("knote_drop called without KN_INFLUX set in kn_status"));
1835 
1836 	KQ_LOCK(kq);
1837 	if (kn->kn_fop->f_isfd)
1838 		list = &kq->kq_knlist[kn->kn_id];
1839 	else
1840 		list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)];
1841 
1842 	if (!SLIST_EMPTY(list))
1843 		SLIST_REMOVE(list, kn, knote, kn_link);
1844 	if (kn->kn_status & KN_QUEUED)
1845 		knote_dequeue(kn);
1846 	KQ_UNLOCK_FLUX(kq);
1847 
1848 	if (kn->kn_fop->f_isfd) {
1849 		fdrop(kn->kn_fp, td);
1850 		kn->kn_fp = NULL;
1851 	}
1852 	kqueue_fo_release(kn->kn_kevent.filter);
1853 	kn->kn_fop = NULL;
1854 	knote_free(kn);
1855 }
1856 
1857 static void
1858 knote_enqueue(struct knote *kn)
1859 {
1860 	struct kqueue *kq = kn->kn_kq;
1861 
1862 	KQ_OWNED(kn->kn_kq);
1863 	KASSERT((kn->kn_status & KN_QUEUED) == 0, ("knote already queued"));
1864 
1865 	TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe);
1866 	kn->kn_status |= KN_QUEUED;
1867 	kq->kq_count++;
1868 	kqueue_wakeup(kq);
1869 }
1870 
1871 static void
1872 knote_dequeue(struct knote *kn)
1873 {
1874 	struct kqueue *kq = kn->kn_kq;
1875 
1876 	KQ_OWNED(kn->kn_kq);
1877 	KASSERT(kn->kn_status & KN_QUEUED, ("knote not queued"));
1878 
1879 	TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe);
1880 	kn->kn_status &= ~KN_QUEUED;
1881 	kq->kq_count--;
1882 }
1883 
1884 static void
1885 knote_init(void)
1886 {
1887 
1888 	knote_zone = uma_zcreate("KNOTE", sizeof(struct knote), NULL, NULL,
1889 	    NULL, NULL, UMA_ALIGN_PTR, 0);
1890 }
1891 SYSINIT(knote, SI_SUB_PSEUDO, SI_ORDER_ANY, knote_init, NULL)
1892 
1893 static struct knote *
1894 knote_alloc(int waitok)
1895 {
1896 	return ((struct knote *)uma_zalloc(knote_zone,
1897 	    (waitok ? M_WAITOK : M_NOWAIT)|M_ZERO));
1898 }
1899 
1900 static void
1901 knote_free(struct knote *kn)
1902 {
1903 	if (kn != NULL)
1904 		uma_zfree(knote_zone, kn);
1905 }
1906