xref: /freebsd/sys/kern/kern_intr.c (revision 9bd55acf5e74cd2ad39e993b4d0fa9d7eb40abf4)
19454b2d8SWarner Losh /*-
2425f9fdaSStefan Eßer  * Copyright (c) 1997, Stefan Esser <se@freebsd.org>
3425f9fdaSStefan Eßer  * All rights reserved.
4425f9fdaSStefan Eßer  *
5425f9fdaSStefan Eßer  * Redistribution and use in source and binary forms, with or without
6425f9fdaSStefan Eßer  * modification, are permitted provided that the following conditions
7425f9fdaSStefan Eßer  * are met:
8425f9fdaSStefan Eßer  * 1. Redistributions of source code must retain the above copyright
9425f9fdaSStefan Eßer  *    notice unmodified, this list of conditions, and the following
10425f9fdaSStefan Eßer  *    disclaimer.
11425f9fdaSStefan Eßer  * 2. Redistributions in binary form must reproduce the above copyright
12425f9fdaSStefan Eßer  *    notice, this list of conditions and the following disclaimer in the
13425f9fdaSStefan Eßer  *    documentation and/or other materials provided with the distribution.
14425f9fdaSStefan Eßer  *
15425f9fdaSStefan Eßer  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16425f9fdaSStefan Eßer  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17425f9fdaSStefan Eßer  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18425f9fdaSStefan Eßer  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19425f9fdaSStefan Eßer  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20425f9fdaSStefan Eßer  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21425f9fdaSStefan Eßer  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22425f9fdaSStefan Eßer  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23425f9fdaSStefan Eßer  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24425f9fdaSStefan Eßer  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25425f9fdaSStefan Eßer  */
26425f9fdaSStefan Eßer 
27677b542eSDavid E. O'Brien #include <sys/cdefs.h>
28677b542eSDavid E. O'Brien __FBSDID("$FreeBSD$");
293900ddb2SDoug Rabson 
308b201c42SJohn Baldwin #include "opt_ddb.h"
318b201c42SJohn Baldwin 
321c5bb3eaSPeter Wemm #include <sys/param.h>
339a94c9c5SJohn Baldwin #include <sys/bus.h>
34c11110eaSAlfred Perlstein #include <sys/conf.h>
359b33b154SJeff Roberson #include <sys/cpuset.h>
369a94c9c5SJohn Baldwin #include <sys/rtprio.h>
37425f9fdaSStefan Eßer #include <sys/systm.h>
3868352337SDoug Rabson #include <sys/interrupt.h>
391931cf94SJohn Baldwin #include <sys/kernel.h>
401931cf94SJohn Baldwin #include <sys/kthread.h>
411931cf94SJohn Baldwin #include <sys/ktr.h>
4205b2c96fSBruce Evans #include <sys/limits.h>
43f34fa851SJohn Baldwin #include <sys/lock.h>
441931cf94SJohn Baldwin #include <sys/malloc.h>
4535e0e5b3SJohn Baldwin #include <sys/mutex.h>
461931cf94SJohn Baldwin #include <sys/proc.h>
473e5da754SJohn Baldwin #include <sys/random.h>
48b4151f71SJohn Baldwin #include <sys/resourcevar.h>
4963710c4dSJohn Baldwin #include <sys/sched.h>
50eaf86d16SJohn Baldwin #include <sys/smp.h>
51d279178dSThomas Moestl #include <sys/sysctl.h>
526205924aSKip Macy #include <sys/syslog.h>
531931cf94SJohn Baldwin #include <sys/unistd.h>
541931cf94SJohn Baldwin #include <sys/vmmeter.h>
551931cf94SJohn Baldwin #include <machine/atomic.h>
561931cf94SJohn Baldwin #include <machine/cpu.h>
578088699fSJohn Baldwin #include <machine/md_var.h>
58b4151f71SJohn Baldwin #include <machine/stdarg.h>
598b201c42SJohn Baldwin #ifdef DDB
608b201c42SJohn Baldwin #include <ddb/ddb.h>
618b201c42SJohn Baldwin #include <ddb/db_sym.h>
628b201c42SJohn Baldwin #endif
63425f9fdaSStefan Eßer 
64e0f66ef8SJohn Baldwin /*
65e0f66ef8SJohn Baldwin  * Describe an interrupt thread.  There is one of these per interrupt event.
66e0f66ef8SJohn Baldwin  */
67e0f66ef8SJohn Baldwin struct intr_thread {
68e0f66ef8SJohn Baldwin 	struct intr_event *it_event;
69e0f66ef8SJohn Baldwin 	struct thread *it_thread;	/* Kernel thread. */
70e0f66ef8SJohn Baldwin 	int	it_flags;		/* (j) IT_* flags. */
71e0f66ef8SJohn Baldwin 	int	it_need;		/* Needs service. */
723e5da754SJohn Baldwin };
733e5da754SJohn Baldwin 
74e0f66ef8SJohn Baldwin /* Interrupt thread flags kept in it_flags */
75e0f66ef8SJohn Baldwin #define	IT_DEAD		0x000001	/* Thread is waiting to exit. */
76e0f66ef8SJohn Baldwin 
77e0f66ef8SJohn Baldwin struct	intr_entropy {
78e0f66ef8SJohn Baldwin 	struct	thread *td;
79e0f66ef8SJohn Baldwin 	uintptr_t event;
80e0f66ef8SJohn Baldwin };
81e0f66ef8SJohn Baldwin 
82e0f66ef8SJohn Baldwin struct	intr_event *clk_intr_event;
83e0f66ef8SJohn Baldwin struct	intr_event *tty_intr_event;
847b1fe905SBruce Evans void	*vm_ih;
857ab24ea3SJulian Elischer struct proc *intrproc;
861931cf94SJohn Baldwin 
87b4151f71SJohn Baldwin static MALLOC_DEFINE(M_ITHREAD, "ithread", "Interrupt Threads");
88b4151f71SJohn Baldwin 
890ae62c18SNate Lawson static int intr_storm_threshold = 1000;
907870c3c6SJohn Baldwin TUNABLE_INT("hw.intr_storm_threshold", &intr_storm_threshold);
917870c3c6SJohn Baldwin SYSCTL_INT(_hw, OID_AUTO, intr_storm_threshold, CTLFLAG_RW,
927870c3c6SJohn Baldwin     &intr_storm_threshold, 0,
937b1fe905SBruce Evans     "Number of consecutive interrupts before storm protection is enabled");
94e0f66ef8SJohn Baldwin static TAILQ_HEAD(, intr_event) event_list =
95e0f66ef8SJohn Baldwin     TAILQ_HEAD_INITIALIZER(event_list);
969b33b154SJeff Roberson static struct mtx event_lock;
979b33b154SJeff Roberson MTX_SYSINIT(intr_event_list, &event_lock, "intr event list", MTX_DEF);
987b1fe905SBruce Evans 
99e0f66ef8SJohn Baldwin static void	intr_event_update(struct intr_event *ie);
100bafe5a31SPaolo Pisati #ifdef INTR_FILTER
1011ee1b687SJohn Baldwin static int	intr_event_schedule_thread(struct intr_event *ie,
1021ee1b687SJohn Baldwin 		    struct intr_thread *ithd);
1031ee1b687SJohn Baldwin static int	intr_filter_loop(struct intr_event *ie,
1041ee1b687SJohn Baldwin 		    struct trapframe *frame, struct intr_thread **ithd);
105bafe5a31SPaolo Pisati static struct intr_thread *ithread_create(const char *name,
106bafe5a31SPaolo Pisati 			      struct intr_handler *ih);
107bafe5a31SPaolo Pisati #else
1081ee1b687SJohn Baldwin static int	intr_event_schedule_thread(struct intr_event *ie);
109e0f66ef8SJohn Baldwin static struct intr_thread *ithread_create(const char *name);
110bafe5a31SPaolo Pisati #endif
111e0f66ef8SJohn Baldwin static void	ithread_destroy(struct intr_thread *ithread);
112bafe5a31SPaolo Pisati static void	ithread_execute_handlers(struct proc *p,
113bafe5a31SPaolo Pisati 		    struct intr_event *ie);
114bafe5a31SPaolo Pisati #ifdef INTR_FILTER
115bafe5a31SPaolo Pisati static void	priv_ithread_execute_handler(struct proc *p,
116bafe5a31SPaolo Pisati 		    struct intr_handler *ih);
117bafe5a31SPaolo Pisati #endif
1187b1fe905SBruce Evans static void	ithread_loop(void *);
119e0f66ef8SJohn Baldwin static void	ithread_update(struct intr_thread *ithd);
1207b1fe905SBruce Evans static void	start_softintr(void *);
1217870c3c6SJohn Baldwin 
122bc17acb2SJohn Baldwin /* Map an interrupt type to an ithread priority. */
123b4151f71SJohn Baldwin u_char
124e0f66ef8SJohn Baldwin intr_priority(enum intr_type flags)
1259a94c9c5SJohn Baldwin {
126b4151f71SJohn Baldwin 	u_char pri;
1279a94c9c5SJohn Baldwin 
128b4151f71SJohn Baldwin 	flags &= (INTR_TYPE_TTY | INTR_TYPE_BIO | INTR_TYPE_NET |
1295a280d9cSPeter Wemm 	    INTR_TYPE_CAM | INTR_TYPE_MISC | INTR_TYPE_CLK | INTR_TYPE_AV);
1309a94c9c5SJohn Baldwin 	switch (flags) {
131b4151f71SJohn Baldwin 	case INTR_TYPE_TTY:
1329a94c9c5SJohn Baldwin 		pri = PI_TTYLOW;
1339a94c9c5SJohn Baldwin 		break;
1349a94c9c5SJohn Baldwin 	case INTR_TYPE_BIO:
1359a94c9c5SJohn Baldwin 		/*
1369a94c9c5SJohn Baldwin 		 * XXX We need to refine this.  BSD/OS distinguishes
1379a94c9c5SJohn Baldwin 		 * between tape and disk priorities.
1389a94c9c5SJohn Baldwin 		 */
1399a94c9c5SJohn Baldwin 		pri = PI_DISK;
1409a94c9c5SJohn Baldwin 		break;
1419a94c9c5SJohn Baldwin 	case INTR_TYPE_NET:
1429a94c9c5SJohn Baldwin 		pri = PI_NET;
1439a94c9c5SJohn Baldwin 		break;
1449a94c9c5SJohn Baldwin 	case INTR_TYPE_CAM:
1459a94c9c5SJohn Baldwin 		pri = PI_DISK;          /* XXX or PI_CAM? */
1469a94c9c5SJohn Baldwin 		break;
1475a280d9cSPeter Wemm 	case INTR_TYPE_AV:		/* Audio/video */
1485a280d9cSPeter Wemm 		pri = PI_AV;
1495a280d9cSPeter Wemm 		break;
150b4151f71SJohn Baldwin 	case INTR_TYPE_CLK:
151b4151f71SJohn Baldwin 		pri = PI_REALTIME;
152b4151f71SJohn Baldwin 		break;
1539a94c9c5SJohn Baldwin 	case INTR_TYPE_MISC:
1549a94c9c5SJohn Baldwin 		pri = PI_DULL;          /* don't care */
1559a94c9c5SJohn Baldwin 		break;
1569a94c9c5SJohn Baldwin 	default:
157b4151f71SJohn Baldwin 		/* We didn't specify an interrupt level. */
158e0f66ef8SJohn Baldwin 		panic("intr_priority: no interrupt type in flags");
1599a94c9c5SJohn Baldwin 	}
1609a94c9c5SJohn Baldwin 
1619a94c9c5SJohn Baldwin 	return pri;
1629a94c9c5SJohn Baldwin }
1639a94c9c5SJohn Baldwin 
164b4151f71SJohn Baldwin /*
165e0f66ef8SJohn Baldwin  * Update an ithread based on the associated intr_event.
166b4151f71SJohn Baldwin  */
167b4151f71SJohn Baldwin static void
168e0f66ef8SJohn Baldwin ithread_update(struct intr_thread *ithd)
169b4151f71SJohn Baldwin {
170e0f66ef8SJohn Baldwin 	struct intr_event *ie;
171b40ce416SJulian Elischer 	struct thread *td;
172e0f66ef8SJohn Baldwin 	u_char pri;
1738088699fSJohn Baldwin 
174e0f66ef8SJohn Baldwin 	ie = ithd->it_event;
175e0f66ef8SJohn Baldwin 	td = ithd->it_thread;
176b4151f71SJohn Baldwin 
177e0f66ef8SJohn Baldwin 	/* Determine the overall priority of this event. */
178e0f66ef8SJohn Baldwin 	if (TAILQ_EMPTY(&ie->ie_handlers))
179e0f66ef8SJohn Baldwin 		pri = PRI_MAX_ITHD;
180e0f66ef8SJohn Baldwin 	else
181e0f66ef8SJohn Baldwin 		pri = TAILQ_FIRST(&ie->ie_handlers)->ih_pri;
182e80fb434SRobert Drehmel 
183e0f66ef8SJohn Baldwin 	/* Update name and priority. */
1847ab24ea3SJulian Elischer 	strlcpy(td->td_name, ie->ie_fullname, sizeof(td->td_name));
185982d11f8SJeff Roberson 	thread_lock(td);
186e0f66ef8SJohn Baldwin 	sched_prio(td, pri);
187982d11f8SJeff Roberson 	thread_unlock(td);
188b4151f71SJohn Baldwin }
189e0f66ef8SJohn Baldwin 
190e0f66ef8SJohn Baldwin /*
191e0f66ef8SJohn Baldwin  * Regenerate the full name of an interrupt event and update its priority.
192e0f66ef8SJohn Baldwin  */
193e0f66ef8SJohn Baldwin static void
194e0f66ef8SJohn Baldwin intr_event_update(struct intr_event *ie)
195e0f66ef8SJohn Baldwin {
196e0f66ef8SJohn Baldwin 	struct intr_handler *ih;
197e0f66ef8SJohn Baldwin 	char *last;
198e0f66ef8SJohn Baldwin 	int missed, space;
199e0f66ef8SJohn Baldwin 
200e0f66ef8SJohn Baldwin 	/* Start off with no entropy and just the name of the event. */
201e0f66ef8SJohn Baldwin 	mtx_assert(&ie->ie_lock, MA_OWNED);
202e0f66ef8SJohn Baldwin 	strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname));
203e0f66ef8SJohn Baldwin 	ie->ie_flags &= ~IE_ENTROPY;
2040811d60aSJohn Baldwin 	missed = 0;
205e0f66ef8SJohn Baldwin 	space = 1;
206e0f66ef8SJohn Baldwin 
207e0f66ef8SJohn Baldwin 	/* Run through all the handlers updating values. */
208e0f66ef8SJohn Baldwin 	TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) {
209e0f66ef8SJohn Baldwin 		if (strlen(ie->ie_fullname) + strlen(ih->ih_name) + 1 <
210e0f66ef8SJohn Baldwin 		    sizeof(ie->ie_fullname)) {
211e0f66ef8SJohn Baldwin 			strcat(ie->ie_fullname, " ");
212e0f66ef8SJohn Baldwin 			strcat(ie->ie_fullname, ih->ih_name);
213e0f66ef8SJohn Baldwin 			space = 0;
2140811d60aSJohn Baldwin 		} else
2150811d60aSJohn Baldwin 			missed++;
2160811d60aSJohn Baldwin 		if (ih->ih_flags & IH_ENTROPY)
217e0f66ef8SJohn Baldwin 			ie->ie_flags |= IE_ENTROPY;
2180811d60aSJohn Baldwin 	}
219e0f66ef8SJohn Baldwin 
220e0f66ef8SJohn Baldwin 	/*
221e0f66ef8SJohn Baldwin 	 * If the handler names were too long, add +'s to indicate missing
222e0f66ef8SJohn Baldwin 	 * names. If we run out of room and still have +'s to add, change
223e0f66ef8SJohn Baldwin 	 * the last character from a + to a *.
224e0f66ef8SJohn Baldwin 	 */
225e0f66ef8SJohn Baldwin 	last = &ie->ie_fullname[sizeof(ie->ie_fullname) - 2];
2260811d60aSJohn Baldwin 	while (missed-- > 0) {
227e0f66ef8SJohn Baldwin 		if (strlen(ie->ie_fullname) + 1 == sizeof(ie->ie_fullname)) {
228e0f66ef8SJohn Baldwin 			if (*last == '+') {
229e0f66ef8SJohn Baldwin 				*last = '*';
230e0f66ef8SJohn Baldwin 				break;
231b4151f71SJohn Baldwin 			} else
232e0f66ef8SJohn Baldwin 				*last = '+';
233e0f66ef8SJohn Baldwin 		} else if (space) {
234e0f66ef8SJohn Baldwin 			strcat(ie->ie_fullname, " +");
235e0f66ef8SJohn Baldwin 			space = 0;
236e0f66ef8SJohn Baldwin 		} else
237e0f66ef8SJohn Baldwin 			strcat(ie->ie_fullname, "+");
238b4151f71SJohn Baldwin 	}
239e0f66ef8SJohn Baldwin 
240e0f66ef8SJohn Baldwin 	/*
241e0f66ef8SJohn Baldwin 	 * If this event has an ithread, update it's priority and
242e0f66ef8SJohn Baldwin 	 * name.
243e0f66ef8SJohn Baldwin 	 */
244e0f66ef8SJohn Baldwin 	if (ie->ie_thread != NULL)
245e0f66ef8SJohn Baldwin 		ithread_update(ie->ie_thread);
246e0f66ef8SJohn Baldwin 	CTR2(KTR_INTR, "%s: updated %s", __func__, ie->ie_fullname);
247b4151f71SJohn Baldwin }
248b4151f71SJohn Baldwin 
249b4151f71SJohn Baldwin int
2509b33b154SJeff Roberson intr_event_create(struct intr_event **event, void *source, int flags, int irq,
2511ee1b687SJohn Baldwin     void (*pre_ithread)(void *), void (*post_ithread)(void *),
2521ee1b687SJohn Baldwin     void (*post_filter)(void *), int (*assign_cpu)(void *, u_char),
2531ee1b687SJohn Baldwin     const char *fmt, ...)
254bafe5a31SPaolo Pisati {
255bafe5a31SPaolo Pisati 	struct intr_event *ie;
256bafe5a31SPaolo Pisati 	va_list ap;
257bafe5a31SPaolo Pisati 
258bafe5a31SPaolo Pisati 	/* The only valid flag during creation is IE_SOFT. */
259bafe5a31SPaolo Pisati 	if ((flags & ~IE_SOFT) != 0)
260bafe5a31SPaolo Pisati 		return (EINVAL);
261bafe5a31SPaolo Pisati 	ie = malloc(sizeof(struct intr_event), M_ITHREAD, M_WAITOK | M_ZERO);
262bafe5a31SPaolo Pisati 	ie->ie_source = source;
2631ee1b687SJohn Baldwin 	ie->ie_pre_ithread = pre_ithread;
2641ee1b687SJohn Baldwin 	ie->ie_post_ithread = post_ithread;
2651ee1b687SJohn Baldwin 	ie->ie_post_filter = post_filter;
2666d2d1c04SJohn Baldwin 	ie->ie_assign_cpu = assign_cpu;
267bafe5a31SPaolo Pisati 	ie->ie_flags = flags;
2689b33b154SJeff Roberson 	ie->ie_irq = irq;
269eaf86d16SJohn Baldwin 	ie->ie_cpu = NOCPU;
270bafe5a31SPaolo Pisati 	TAILQ_INIT(&ie->ie_handlers);
271bafe5a31SPaolo Pisati 	mtx_init(&ie->ie_lock, "intr event", NULL, MTX_DEF);
272bafe5a31SPaolo Pisati 
273bafe5a31SPaolo Pisati 	va_start(ap, fmt);
274bafe5a31SPaolo Pisati 	vsnprintf(ie->ie_name, sizeof(ie->ie_name), fmt, ap);
275bafe5a31SPaolo Pisati 	va_end(ap);
276bafe5a31SPaolo Pisati 	strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname));
2779b33b154SJeff Roberson 	mtx_lock(&event_lock);
278bafe5a31SPaolo Pisati 	TAILQ_INSERT_TAIL(&event_list, ie, ie_list);
2799b33b154SJeff Roberson 	mtx_unlock(&event_lock);
280bafe5a31SPaolo Pisati 	if (event != NULL)
281bafe5a31SPaolo Pisati 		*event = ie;
282bafe5a31SPaolo Pisati 	CTR2(KTR_INTR, "%s: created %s", __func__, ie->ie_name);
283bafe5a31SPaolo Pisati 	return (0);
284bafe5a31SPaolo Pisati }
285b4151f71SJohn Baldwin 
286eaf86d16SJohn Baldwin /*
287eaf86d16SJohn Baldwin  * Bind an interrupt event to the specified CPU.  Note that not all
288eaf86d16SJohn Baldwin  * platforms support binding an interrupt to a CPU.  For those
289eaf86d16SJohn Baldwin  * platforms this request will fail.  For supported platforms, any
290eaf86d16SJohn Baldwin  * associated ithreads as well as the primary interrupt context will
291eaf86d16SJohn Baldwin  * be bound to the specificed CPU.  Using a cpu id of NOCPU unbinds
292eaf86d16SJohn Baldwin  * the interrupt event.
293eaf86d16SJohn Baldwin  */
294eaf86d16SJohn Baldwin int
295eaf86d16SJohn Baldwin intr_event_bind(struct intr_event *ie, u_char cpu)
296eaf86d16SJohn Baldwin {
2979b33b154SJeff Roberson 	cpuset_t mask;
2989b33b154SJeff Roberson 	lwpid_t id;
299eaf86d16SJohn Baldwin 	int error;
300eaf86d16SJohn Baldwin 
301eaf86d16SJohn Baldwin 	/* Need a CPU to bind to. */
302eaf86d16SJohn Baldwin 	if (cpu != NOCPU && CPU_ABSENT(cpu))
303eaf86d16SJohn Baldwin 		return (EINVAL);
304eaf86d16SJohn Baldwin 
305eaf86d16SJohn Baldwin 	if (ie->ie_assign_cpu == NULL)
306eaf86d16SJohn Baldwin 		return (EOPNOTSUPP);
3079b33b154SJeff Roberson 	/*
3089b33b154SJeff Roberson 	 * If we have any ithreads try to set their mask first since this
3099b33b154SJeff Roberson 	 * can fail.
3109b33b154SJeff Roberson 	 */
311eaf86d16SJohn Baldwin 	mtx_lock(&ie->ie_lock);
3129b33b154SJeff Roberson 	if (ie->ie_thread != NULL) {
3139b33b154SJeff Roberson 		CPU_ZERO(&mask);
3149b33b154SJeff Roberson 		if (cpu == NOCPU)
3159b33b154SJeff Roberson 			CPU_COPY(cpuset_root, &mask);
3169b33b154SJeff Roberson 		else
3179b33b154SJeff Roberson 			CPU_SET(cpu, &mask);
3189b33b154SJeff Roberson 		id = ie->ie_thread->it_thread->td_tid;
319eaf86d16SJohn Baldwin 		mtx_unlock(&ie->ie_lock);
3209b33b154SJeff Roberson 		error = cpuset_setthread(id, &mask);
3219b33b154SJeff Roberson 		if (error)
3229b33b154SJeff Roberson 			return (error);
3239b33b154SJeff Roberson 	} else
324eaf86d16SJohn Baldwin 		mtx_unlock(&ie->ie_lock);
325eaf86d16SJohn Baldwin 	error = ie->ie_assign_cpu(ie->ie_source, cpu);
326eaf86d16SJohn Baldwin 	if (error)
327eaf86d16SJohn Baldwin 		return (error);
328eaf86d16SJohn Baldwin 	mtx_lock(&ie->ie_lock);
329eaf86d16SJohn Baldwin 	ie->ie_cpu = cpu;
3309b33b154SJeff Roberson 	mtx_unlock(&ie->ie_lock);
3319b33b154SJeff Roberson 
3329b33b154SJeff Roberson 	return (error);
3339b33b154SJeff Roberson }
3349b33b154SJeff Roberson 
3359b33b154SJeff Roberson static struct intr_event *
3369b33b154SJeff Roberson intr_lookup(int irq)
3379b33b154SJeff Roberson {
3389b33b154SJeff Roberson 	struct intr_event *ie;
3399b33b154SJeff Roberson 
3409b33b154SJeff Roberson 	mtx_lock(&event_lock);
3419b33b154SJeff Roberson 	TAILQ_FOREACH(ie, &event_list, ie_list)
3429b33b154SJeff Roberson 		if (ie->ie_irq == irq &&
3439b33b154SJeff Roberson 		    (ie->ie_flags & IE_SOFT) == 0 &&
3449b33b154SJeff Roberson 		    TAILQ_FIRST(&ie->ie_handlers) != NULL)
3459b33b154SJeff Roberson 			break;
3469b33b154SJeff Roberson 	mtx_unlock(&event_lock);
3479b33b154SJeff Roberson 	return (ie);
3489b33b154SJeff Roberson }
3499b33b154SJeff Roberson 
3509b33b154SJeff Roberson int
3519b33b154SJeff Roberson intr_setaffinity(int irq, void *m)
3529b33b154SJeff Roberson {
3539b33b154SJeff Roberson 	struct intr_event *ie;
3549b33b154SJeff Roberson 	cpuset_t *mask;
3559b33b154SJeff Roberson 	u_char cpu;
3569b33b154SJeff Roberson 	int n;
3579b33b154SJeff Roberson 
3589b33b154SJeff Roberson 	mask = m;
3599b33b154SJeff Roberson 	cpu = NOCPU;
3609b33b154SJeff Roberson 	/*
3619b33b154SJeff Roberson 	 * If we're setting all cpus we can unbind.  Otherwise make sure
3629b33b154SJeff Roberson 	 * only one cpu is in the set.
3639b33b154SJeff Roberson 	 */
3649b33b154SJeff Roberson 	if (CPU_CMP(cpuset_root, mask)) {
3659b33b154SJeff Roberson 		for (n = 0; n < CPU_SETSIZE; n++) {
3669b33b154SJeff Roberson 			if (!CPU_ISSET(n, mask))
3679b33b154SJeff Roberson 				continue;
3689b33b154SJeff Roberson 			if (cpu != NOCPU)
3699b33b154SJeff Roberson 				return (EINVAL);
3709b33b154SJeff Roberson 			cpu = (u_char)n;
3719b33b154SJeff Roberson 		}
3729b33b154SJeff Roberson 	}
3739b33b154SJeff Roberson 	ie = intr_lookup(irq);
3749b33b154SJeff Roberson 	if (ie == NULL)
3759b33b154SJeff Roberson 		return (ESRCH);
3769bd55acfSJohn Baldwin 	return (intr_event_bind(ie, cpu));
3779b33b154SJeff Roberson }
3789b33b154SJeff Roberson 
3799b33b154SJeff Roberson int
3809b33b154SJeff Roberson intr_getaffinity(int irq, void *m)
3819b33b154SJeff Roberson {
3829b33b154SJeff Roberson 	struct intr_event *ie;
3839b33b154SJeff Roberson 	cpuset_t *mask;
3849b33b154SJeff Roberson 
3859b33b154SJeff Roberson 	mask = m;
3869b33b154SJeff Roberson 	ie = intr_lookup(irq);
3879b33b154SJeff Roberson 	if (ie == NULL)
3889b33b154SJeff Roberson 		return (ESRCH);
3899b33b154SJeff Roberson 	CPU_ZERO(mask);
3909b33b154SJeff Roberson 	mtx_lock(&ie->ie_lock);
3919b33b154SJeff Roberson 	if (ie->ie_cpu == NOCPU)
3929b33b154SJeff Roberson 		CPU_COPY(cpuset_root, mask);
3939b33b154SJeff Roberson 	else
3949b33b154SJeff Roberson 		CPU_SET(ie->ie_cpu, mask);
395eaf86d16SJohn Baldwin 	mtx_unlock(&ie->ie_lock);
396eaf86d16SJohn Baldwin 	return (0);
397eaf86d16SJohn Baldwin }
398eaf86d16SJohn Baldwin 
399b4151f71SJohn Baldwin int
400e0f66ef8SJohn Baldwin intr_event_destroy(struct intr_event *ie)
401b4151f71SJohn Baldwin {
402b4151f71SJohn Baldwin 
4039b33b154SJeff Roberson 	mtx_lock(&event_lock);
404e0f66ef8SJohn Baldwin 	mtx_lock(&ie->ie_lock);
405e0f66ef8SJohn Baldwin 	if (!TAILQ_EMPTY(&ie->ie_handlers)) {
406e0f66ef8SJohn Baldwin 		mtx_unlock(&ie->ie_lock);
4079b33b154SJeff Roberson 		mtx_unlock(&event_lock);
408e0f66ef8SJohn Baldwin 		return (EBUSY);
4094d29cb2dSJohn Baldwin 	}
410e0f66ef8SJohn Baldwin 	TAILQ_REMOVE(&event_list, ie, ie_list);
4119477358dSJohn Baldwin #ifndef notyet
4129477358dSJohn Baldwin 	if (ie->ie_thread != NULL) {
4139477358dSJohn Baldwin 		ithread_destroy(ie->ie_thread);
4149477358dSJohn Baldwin 		ie->ie_thread = NULL;
4159477358dSJohn Baldwin 	}
4169477358dSJohn Baldwin #endif
417e0f66ef8SJohn Baldwin 	mtx_unlock(&ie->ie_lock);
4189b33b154SJeff Roberson 	mtx_unlock(&event_lock);
419e0f66ef8SJohn Baldwin 	mtx_destroy(&ie->ie_lock);
420e0f66ef8SJohn Baldwin 	free(ie, M_ITHREAD);
421e0f66ef8SJohn Baldwin 	return (0);
422e0f66ef8SJohn Baldwin }
423e0f66ef8SJohn Baldwin 
424bafe5a31SPaolo Pisati #ifndef INTR_FILTER
425e0f66ef8SJohn Baldwin static struct intr_thread *
426e0f66ef8SJohn Baldwin ithread_create(const char *name)
427e0f66ef8SJohn Baldwin {
428e0f66ef8SJohn Baldwin 	struct intr_thread *ithd;
429e0f66ef8SJohn Baldwin 	struct thread *td;
430e0f66ef8SJohn Baldwin 	int error;
431e0f66ef8SJohn Baldwin 
432e0f66ef8SJohn Baldwin 	ithd = malloc(sizeof(struct intr_thread), M_ITHREAD, M_WAITOK | M_ZERO);
433e0f66ef8SJohn Baldwin 
4347ab24ea3SJulian Elischer 	error = kproc_kthread_add(ithread_loop, ithd, &intrproc,
4357ab24ea3SJulian Elischer 		    &td, RFSTOPPED | RFHIGHPID,
4369ef95d01SJulian Elischer 	    	    0, "intr", "%s", name);
437e0f66ef8SJohn Baldwin 	if (error)
4383745c395SJulian Elischer 		panic("kproc_create() failed with %d", error);
439982d11f8SJeff Roberson 	thread_lock(td);
440ad1e7d28SJulian Elischer 	sched_class(td, PRI_ITHD);
441e0f66ef8SJohn Baldwin 	TD_SET_IWAIT(td);
442982d11f8SJeff Roberson 	thread_unlock(td);
443e0f66ef8SJohn Baldwin 	td->td_pflags |= TDP_ITHREAD;
444e0f66ef8SJohn Baldwin 	ithd->it_thread = td;
445e0f66ef8SJohn Baldwin 	CTR2(KTR_INTR, "%s: created %s", __func__, name);
446e0f66ef8SJohn Baldwin 	return (ithd);
447e0f66ef8SJohn Baldwin }
448bafe5a31SPaolo Pisati #else
449bafe5a31SPaolo Pisati static struct intr_thread *
450bafe5a31SPaolo Pisati ithread_create(const char *name, struct intr_handler *ih)
451bafe5a31SPaolo Pisati {
452bafe5a31SPaolo Pisati 	struct intr_thread *ithd;
453bafe5a31SPaolo Pisati 	struct thread *td;
454bafe5a31SPaolo Pisati 	int error;
455bafe5a31SPaolo Pisati 
456bafe5a31SPaolo Pisati 	ithd = malloc(sizeof(struct intr_thread), M_ITHREAD, M_WAITOK | M_ZERO);
457bafe5a31SPaolo Pisati 
458539976ffSJulian Elischer 	error = kproc_kthread_add(ithread_loop, ih, &intrproc,
4597ab24ea3SJulian Elischer 		    &td, RFSTOPPED | RFHIGHPID,
4609ef95d01SJulian Elischer 	    	    0, "intr", "%s", name);
461bafe5a31SPaolo Pisati 	if (error)
4623745c395SJulian Elischer 		panic("kproc_create() failed with %d", error);
463982d11f8SJeff Roberson 	thread_lock(td);
464bafe5a31SPaolo Pisati 	sched_class(td, PRI_ITHD);
465bafe5a31SPaolo Pisati 	TD_SET_IWAIT(td);
466982d11f8SJeff Roberson 	thread_unlock(td);
467bafe5a31SPaolo Pisati 	td->td_pflags |= TDP_ITHREAD;
468bafe5a31SPaolo Pisati 	ithd->it_thread = td;
469bafe5a31SPaolo Pisati 	CTR2(KTR_INTR, "%s: created %s", __func__, name);
470bafe5a31SPaolo Pisati 	return (ithd);
471bafe5a31SPaolo Pisati }
472bafe5a31SPaolo Pisati #endif
473e0f66ef8SJohn Baldwin 
474e0f66ef8SJohn Baldwin static void
475e0f66ef8SJohn Baldwin ithread_destroy(struct intr_thread *ithread)
476e0f66ef8SJohn Baldwin {
477e0f66ef8SJohn Baldwin 	struct thread *td;
478e0f66ef8SJohn Baldwin 
479bb141be1SScott Long 	CTR2(KTR_INTR, "%s: killing %s", __func__, ithread->it_event->ie_name);
480e0f66ef8SJohn Baldwin 	td = ithread->it_thread;
481982d11f8SJeff Roberson 	thread_lock(td);
482e0f66ef8SJohn Baldwin 	ithread->it_flags |= IT_DEAD;
48371fad9fdSJulian Elischer 	if (TD_AWAITING_INTR(td)) {
48471fad9fdSJulian Elischer 		TD_CLR_IWAIT(td);
485f0393f06SJeff Roberson 		sched_add(td, SRQ_INTR);
486b4151f71SJohn Baldwin 	}
487982d11f8SJeff Roberson 	thread_unlock(td);
488b4151f71SJohn Baldwin }
489b4151f71SJohn Baldwin 
490bafe5a31SPaolo Pisati #ifndef INTR_FILTER
491b4151f71SJohn Baldwin int
492e0f66ef8SJohn Baldwin intr_event_add_handler(struct intr_event *ie, const char *name,
493ef544f63SPaolo Pisati     driver_filter_t filter, driver_intr_t handler, void *arg, u_char pri,
494ef544f63SPaolo Pisati     enum intr_type flags, void **cookiep)
495b4151f71SJohn Baldwin {
496e0f66ef8SJohn Baldwin 	struct intr_handler *ih, *temp_ih;
497e0f66ef8SJohn Baldwin 	struct intr_thread *it;
498b4151f71SJohn Baldwin 
499ef544f63SPaolo Pisati 	if (ie == NULL || name == NULL || (handler == NULL && filter == NULL))
500b4151f71SJohn Baldwin 		return (EINVAL);
501b4151f71SJohn Baldwin 
502e0f66ef8SJohn Baldwin 	/* Allocate and populate an interrupt handler structure. */
503e0f66ef8SJohn Baldwin 	ih = malloc(sizeof(struct intr_handler), M_ITHREAD, M_WAITOK | M_ZERO);
504ef544f63SPaolo Pisati 	ih->ih_filter = filter;
505b4151f71SJohn Baldwin 	ih->ih_handler = handler;
506b4151f71SJohn Baldwin 	ih->ih_argument = arg;
507b4151f71SJohn Baldwin 	ih->ih_name = name;
508e0f66ef8SJohn Baldwin 	ih->ih_event = ie;
509b4151f71SJohn Baldwin 	ih->ih_pri = pri;
510ef544f63SPaolo Pisati 	if (flags & INTR_EXCL)
511b4151f71SJohn Baldwin 		ih->ih_flags = IH_EXCLUSIVE;
512b4151f71SJohn Baldwin 	if (flags & INTR_MPSAFE)
513b4151f71SJohn Baldwin 		ih->ih_flags |= IH_MPSAFE;
514b4151f71SJohn Baldwin 	if (flags & INTR_ENTROPY)
515b4151f71SJohn Baldwin 		ih->ih_flags |= IH_ENTROPY;
516b4151f71SJohn Baldwin 
517e0f66ef8SJohn Baldwin 	/* We can only have one exclusive handler in a event. */
518e0f66ef8SJohn Baldwin 	mtx_lock(&ie->ie_lock);
519e0f66ef8SJohn Baldwin 	if (!TAILQ_EMPTY(&ie->ie_handlers)) {
520e0f66ef8SJohn Baldwin 		if ((flags & INTR_EXCL) ||
521e0f66ef8SJohn Baldwin 		    (TAILQ_FIRST(&ie->ie_handlers)->ih_flags & IH_EXCLUSIVE)) {
522e0f66ef8SJohn Baldwin 			mtx_unlock(&ie->ie_lock);
523b4151f71SJohn Baldwin 			free(ih, M_ITHREAD);
524b4151f71SJohn Baldwin 			return (EINVAL);
525b4151f71SJohn Baldwin 		}
526e0f66ef8SJohn Baldwin 	}
527e0f66ef8SJohn Baldwin 
528e0f66ef8SJohn Baldwin 	/* Add the new handler to the event in priority order. */
529e0f66ef8SJohn Baldwin 	TAILQ_FOREACH(temp_ih, &ie->ie_handlers, ih_next) {
530e0f66ef8SJohn Baldwin 		if (temp_ih->ih_pri > ih->ih_pri)
531e0f66ef8SJohn Baldwin 			break;
532e0f66ef8SJohn Baldwin 	}
533e0f66ef8SJohn Baldwin 	if (temp_ih == NULL)
534e0f66ef8SJohn Baldwin 		TAILQ_INSERT_TAIL(&ie->ie_handlers, ih, ih_next);
535e0f66ef8SJohn Baldwin 	else
536e0f66ef8SJohn Baldwin 		TAILQ_INSERT_BEFORE(temp_ih, ih, ih_next);
537e0f66ef8SJohn Baldwin 	intr_event_update(ie);
538e0f66ef8SJohn Baldwin 
539e0f66ef8SJohn Baldwin 	/* Create a thread if we need one. */
540ef544f63SPaolo Pisati 	while (ie->ie_thread == NULL && handler != NULL) {
541e0f66ef8SJohn Baldwin 		if (ie->ie_flags & IE_ADDING_THREAD)
5420f180a7cSJohn Baldwin 			msleep(ie, &ie->ie_lock, 0, "ithread", 0);
543e0f66ef8SJohn Baldwin 		else {
544e0f66ef8SJohn Baldwin 			ie->ie_flags |= IE_ADDING_THREAD;
545e0f66ef8SJohn Baldwin 			mtx_unlock(&ie->ie_lock);
546e0f66ef8SJohn Baldwin 			it = ithread_create("intr: newborn");
547e0f66ef8SJohn Baldwin 			mtx_lock(&ie->ie_lock);
548e0f66ef8SJohn Baldwin 			ie->ie_flags &= ~IE_ADDING_THREAD;
549e0f66ef8SJohn Baldwin 			ie->ie_thread = it;
550e0f66ef8SJohn Baldwin 			it->it_event = ie;
551e0f66ef8SJohn Baldwin 			ithread_update(it);
552e0f66ef8SJohn Baldwin 			wakeup(ie);
553e0f66ef8SJohn Baldwin 		}
554e0f66ef8SJohn Baldwin 	}
555e0f66ef8SJohn Baldwin 	CTR3(KTR_INTR, "%s: added %s to %s", __func__, ih->ih_name,
556e0f66ef8SJohn Baldwin 	    ie->ie_name);
557e0f66ef8SJohn Baldwin 	mtx_unlock(&ie->ie_lock);
558e0f66ef8SJohn Baldwin 
559e0f66ef8SJohn Baldwin 	if (cookiep != NULL)
560e0f66ef8SJohn Baldwin 		*cookiep = ih;
561e0f66ef8SJohn Baldwin 	return (0);
562e0f66ef8SJohn Baldwin }
563bafe5a31SPaolo Pisati #else
564bafe5a31SPaolo Pisati int
565bafe5a31SPaolo Pisati intr_event_add_handler(struct intr_event *ie, const char *name,
566bafe5a31SPaolo Pisati     driver_filter_t filter, driver_intr_t handler, void *arg, u_char pri,
567bafe5a31SPaolo Pisati     enum intr_type flags, void **cookiep)
568bafe5a31SPaolo Pisati {
569bafe5a31SPaolo Pisati 	struct intr_handler *ih, *temp_ih;
570bafe5a31SPaolo Pisati 	struct intr_thread *it;
571bafe5a31SPaolo Pisati 
572bafe5a31SPaolo Pisati 	if (ie == NULL || name == NULL || (handler == NULL && filter == NULL))
573bafe5a31SPaolo Pisati 		return (EINVAL);
574bafe5a31SPaolo Pisati 
575bafe5a31SPaolo Pisati 	/* Allocate and populate an interrupt handler structure. */
576bafe5a31SPaolo Pisati 	ih = malloc(sizeof(struct intr_handler), M_ITHREAD, M_WAITOK | M_ZERO);
577bafe5a31SPaolo Pisati 	ih->ih_filter = filter;
578bafe5a31SPaolo Pisati 	ih->ih_handler = handler;
579bafe5a31SPaolo Pisati 	ih->ih_argument = arg;
580bafe5a31SPaolo Pisati 	ih->ih_name = name;
581bafe5a31SPaolo Pisati 	ih->ih_event = ie;
582bafe5a31SPaolo Pisati 	ih->ih_pri = pri;
583bafe5a31SPaolo Pisati 	if (flags & INTR_EXCL)
584bafe5a31SPaolo Pisati 		ih->ih_flags = IH_EXCLUSIVE;
585bafe5a31SPaolo Pisati 	if (flags & INTR_MPSAFE)
586bafe5a31SPaolo Pisati 		ih->ih_flags |= IH_MPSAFE;
587bafe5a31SPaolo Pisati 	if (flags & INTR_ENTROPY)
588bafe5a31SPaolo Pisati 		ih->ih_flags |= IH_ENTROPY;
589bafe5a31SPaolo Pisati 
590bafe5a31SPaolo Pisati 	/* We can only have one exclusive handler in a event. */
591bafe5a31SPaolo Pisati 	mtx_lock(&ie->ie_lock);
592bafe5a31SPaolo Pisati 	if (!TAILQ_EMPTY(&ie->ie_handlers)) {
593bafe5a31SPaolo Pisati 		if ((flags & INTR_EXCL) ||
594bafe5a31SPaolo Pisati 		    (TAILQ_FIRST(&ie->ie_handlers)->ih_flags & IH_EXCLUSIVE)) {
595bafe5a31SPaolo Pisati 			mtx_unlock(&ie->ie_lock);
596bafe5a31SPaolo Pisati 			free(ih, M_ITHREAD);
597bafe5a31SPaolo Pisati 			return (EINVAL);
598bafe5a31SPaolo Pisati 		}
599bafe5a31SPaolo Pisati 	}
600bafe5a31SPaolo Pisati 
601bafe5a31SPaolo Pisati 	/* Add the new handler to the event in priority order. */
602bafe5a31SPaolo Pisati 	TAILQ_FOREACH(temp_ih, &ie->ie_handlers, ih_next) {
603bafe5a31SPaolo Pisati 		if (temp_ih->ih_pri > ih->ih_pri)
604bafe5a31SPaolo Pisati 			break;
605bafe5a31SPaolo Pisati 	}
606bafe5a31SPaolo Pisati 	if (temp_ih == NULL)
607bafe5a31SPaolo Pisati 		TAILQ_INSERT_TAIL(&ie->ie_handlers, ih, ih_next);
608bafe5a31SPaolo Pisati 	else
609bafe5a31SPaolo Pisati 		TAILQ_INSERT_BEFORE(temp_ih, ih, ih_next);
610bafe5a31SPaolo Pisati 	intr_event_update(ie);
611bafe5a31SPaolo Pisati 
612bafe5a31SPaolo Pisati 	/* For filtered handlers, create a private ithread to run on. */
613bafe5a31SPaolo Pisati 	if (filter != NULL && handler != NULL) {
614bafe5a31SPaolo Pisati 		mtx_unlock(&ie->ie_lock);
615bafe5a31SPaolo Pisati 		it = ithread_create("intr: newborn", ih);
616bafe5a31SPaolo Pisati 		mtx_lock(&ie->ie_lock);
617bafe5a31SPaolo Pisati 		it->it_event = ie;
618bafe5a31SPaolo Pisati 		ih->ih_thread = it;
619bafe5a31SPaolo Pisati 		ithread_update(it); // XXX - do we really need this?!?!?
620bafe5a31SPaolo Pisati 	} else { /* Create the global per-event thread if we need one. */
621bafe5a31SPaolo Pisati 		while (ie->ie_thread == NULL && handler != NULL) {
622bafe5a31SPaolo Pisati 			if (ie->ie_flags & IE_ADDING_THREAD)
623bafe5a31SPaolo Pisati 				msleep(ie, &ie->ie_lock, 0, "ithread", 0);
624bafe5a31SPaolo Pisati 			else {
625bafe5a31SPaolo Pisati 				ie->ie_flags |= IE_ADDING_THREAD;
626bafe5a31SPaolo Pisati 				mtx_unlock(&ie->ie_lock);
627bafe5a31SPaolo Pisati 				it = ithread_create("intr: newborn", ih);
628bafe5a31SPaolo Pisati 				mtx_lock(&ie->ie_lock);
629bafe5a31SPaolo Pisati 				ie->ie_flags &= ~IE_ADDING_THREAD;
630bafe5a31SPaolo Pisati 				ie->ie_thread = it;
631bafe5a31SPaolo Pisati 				it->it_event = ie;
632bafe5a31SPaolo Pisati 				ithread_update(it);
633bafe5a31SPaolo Pisati 				wakeup(ie);
634bafe5a31SPaolo Pisati 			}
635bafe5a31SPaolo Pisati 		}
636bafe5a31SPaolo Pisati 	}
637bafe5a31SPaolo Pisati 	CTR3(KTR_INTR, "%s: added %s to %s", __func__, ih->ih_name,
638bafe5a31SPaolo Pisati 	    ie->ie_name);
639bafe5a31SPaolo Pisati 	mtx_unlock(&ie->ie_lock);
640bafe5a31SPaolo Pisati 
641bafe5a31SPaolo Pisati 	if (cookiep != NULL)
642bafe5a31SPaolo Pisati 		*cookiep = ih;
643bafe5a31SPaolo Pisati 	return (0);
644bafe5a31SPaolo Pisati }
645bafe5a31SPaolo Pisati #endif
646b4151f71SJohn Baldwin 
647c3045318SJohn Baldwin /*
648c3045318SJohn Baldwin  * Return the ie_source field from the intr_event an intr_handler is
649c3045318SJohn Baldwin  * associated with.
650c3045318SJohn Baldwin  */
651c3045318SJohn Baldwin void *
652c3045318SJohn Baldwin intr_handler_source(void *cookie)
653c3045318SJohn Baldwin {
654c3045318SJohn Baldwin 	struct intr_handler *ih;
655c3045318SJohn Baldwin 	struct intr_event *ie;
656c3045318SJohn Baldwin 
657c3045318SJohn Baldwin 	ih = (struct intr_handler *)cookie;
658c3045318SJohn Baldwin 	if (ih == NULL)
659c3045318SJohn Baldwin 		return (NULL);
660c3045318SJohn Baldwin 	ie = ih->ih_event;
661c3045318SJohn Baldwin 	KASSERT(ie != NULL,
662c3045318SJohn Baldwin 	    ("interrupt handler \"%s\" has a NULL interrupt event",
663c3045318SJohn Baldwin 	    ih->ih_name));
664c3045318SJohn Baldwin 	return (ie->ie_source);
665c3045318SJohn Baldwin }
666c3045318SJohn Baldwin 
667bafe5a31SPaolo Pisati #ifndef INTR_FILTER
668b4151f71SJohn Baldwin int
669e0f66ef8SJohn Baldwin intr_event_remove_handler(void *cookie)
670b4151f71SJohn Baldwin {
671e0f66ef8SJohn Baldwin 	struct intr_handler *handler = (struct intr_handler *)cookie;
672e0f66ef8SJohn Baldwin 	struct intr_event *ie;
673b4151f71SJohn Baldwin #ifdef INVARIANTS
674e0f66ef8SJohn Baldwin 	struct intr_handler *ih;
675e0f66ef8SJohn Baldwin #endif
676e0f66ef8SJohn Baldwin #ifdef notyet
677e0f66ef8SJohn Baldwin 	int dead;
678b4151f71SJohn Baldwin #endif
679b4151f71SJohn Baldwin 
6803e5da754SJohn Baldwin 	if (handler == NULL)
681b4151f71SJohn Baldwin 		return (EINVAL);
682e0f66ef8SJohn Baldwin 	ie = handler->ih_event;
683e0f66ef8SJohn Baldwin 	KASSERT(ie != NULL,
684e0f66ef8SJohn Baldwin 	    ("interrupt handler \"%s\" has a NULL interrupt event",
6853e5da754SJohn Baldwin 	    handler->ih_name));
686e0f66ef8SJohn Baldwin 	mtx_lock(&ie->ie_lock);
68791f91617SDavid E. O'Brien 	CTR3(KTR_INTR, "%s: removing %s from %s", __func__, handler->ih_name,
688e0f66ef8SJohn Baldwin 	    ie->ie_name);
689b4151f71SJohn Baldwin #ifdef INVARIANTS
690e0f66ef8SJohn Baldwin 	TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next)
6913e5da754SJohn Baldwin 		if (ih == handler)
6923e5da754SJohn Baldwin 			goto ok;
693e0f66ef8SJohn Baldwin 	mtx_unlock(&ie->ie_lock);
694e0f66ef8SJohn Baldwin 	panic("interrupt handler \"%s\" not found in interrupt event \"%s\"",
695e0f66ef8SJohn Baldwin 	    ih->ih_name, ie->ie_name);
6963e5da754SJohn Baldwin ok:
697b4151f71SJohn Baldwin #endif
698de271f01SJohn Baldwin 	/*
699e0f66ef8SJohn Baldwin 	 * If there is no ithread, then just remove the handler and return.
700e0f66ef8SJohn Baldwin 	 * XXX: Note that an INTR_FAST handler might be running on another
701e0f66ef8SJohn Baldwin 	 * CPU!
702e0f66ef8SJohn Baldwin 	 */
703e0f66ef8SJohn Baldwin 	if (ie->ie_thread == NULL) {
704e0f66ef8SJohn Baldwin 		TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next);
705e0f66ef8SJohn Baldwin 		mtx_unlock(&ie->ie_lock);
706e0f66ef8SJohn Baldwin 		free(handler, M_ITHREAD);
707e0f66ef8SJohn Baldwin 		return (0);
708e0f66ef8SJohn Baldwin 	}
709e0f66ef8SJohn Baldwin 
710e0f66ef8SJohn Baldwin 	/*
711de271f01SJohn Baldwin 	 * If the interrupt thread is already running, then just mark this
712de271f01SJohn Baldwin 	 * handler as being dead and let the ithread do the actual removal.
713288e351bSDon Lewis 	 *
714288e351bSDon Lewis 	 * During a cold boot while cold is set, msleep() does not sleep,
715288e351bSDon Lewis 	 * so we have to remove the handler here rather than letting the
716288e351bSDon Lewis 	 * thread do it.
717de271f01SJohn Baldwin 	 */
718982d11f8SJeff Roberson 	thread_lock(ie->ie_thread->it_thread);
719e0f66ef8SJohn Baldwin 	if (!TD_AWAITING_INTR(ie->ie_thread->it_thread) && !cold) {
720de271f01SJohn Baldwin 		handler->ih_flags |= IH_DEAD;
721de271f01SJohn Baldwin 
722de271f01SJohn Baldwin 		/*
723de271f01SJohn Baldwin 		 * Ensure that the thread will process the handler list
724de271f01SJohn Baldwin 		 * again and remove this handler if it has already passed
725de271f01SJohn Baldwin 		 * it on the list.
726de271f01SJohn Baldwin 		 */
727e0f66ef8SJohn Baldwin 		ie->ie_thread->it_need = 1;
7284d29cb2dSJohn Baldwin 	} else
729e0f66ef8SJohn Baldwin 		TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next);
730982d11f8SJeff Roberson 	thread_unlock(ie->ie_thread->it_thread);
731e0f66ef8SJohn Baldwin 	while (handler->ih_flags & IH_DEAD)
7320f180a7cSJohn Baldwin 		msleep(handler, &ie->ie_lock, 0, "iev_rmh", 0);
733e0f66ef8SJohn Baldwin 	intr_event_update(ie);
734e0f66ef8SJohn Baldwin #ifdef notyet
735e0f66ef8SJohn Baldwin 	/*
736e0f66ef8SJohn Baldwin 	 * XXX: This could be bad in the case of ppbus(8).  Also, I think
737e0f66ef8SJohn Baldwin 	 * this could lead to races of stale data when servicing an
738e0f66ef8SJohn Baldwin 	 * interrupt.
739e0f66ef8SJohn Baldwin 	 */
740e0f66ef8SJohn Baldwin 	dead = 1;
741e0f66ef8SJohn Baldwin 	TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) {
742e0f66ef8SJohn Baldwin 		if (!(ih->ih_flags & IH_FAST)) {
743e0f66ef8SJohn Baldwin 			dead = 0;
744e0f66ef8SJohn Baldwin 			break;
745e0f66ef8SJohn Baldwin 		}
746e0f66ef8SJohn Baldwin 	}
747e0f66ef8SJohn Baldwin 	if (dead) {
748e0f66ef8SJohn Baldwin 		ithread_destroy(ie->ie_thread);
749e0f66ef8SJohn Baldwin 		ie->ie_thread = NULL;
750e0f66ef8SJohn Baldwin 	}
751e0f66ef8SJohn Baldwin #endif
752e0f66ef8SJohn Baldwin 	mtx_unlock(&ie->ie_lock);
753b4151f71SJohn Baldwin 	free(handler, M_ITHREAD);
754b4151f71SJohn Baldwin 	return (0);
755b4151f71SJohn Baldwin }
756b4151f71SJohn Baldwin 
7571ee1b687SJohn Baldwin static int
758e0f66ef8SJohn Baldwin intr_event_schedule_thread(struct intr_event *ie)
7593e5da754SJohn Baldwin {
760e0f66ef8SJohn Baldwin 	struct intr_entropy entropy;
761e0f66ef8SJohn Baldwin 	struct intr_thread *it;
762b40ce416SJulian Elischer 	struct thread *td;
76304774f23SJulian Elischer 	struct thread *ctd;
7643e5da754SJohn Baldwin 	struct proc *p;
7653e5da754SJohn Baldwin 
7663e5da754SJohn Baldwin 	/*
7673e5da754SJohn Baldwin 	 * If no ithread or no handlers, then we have a stray interrupt.
7683e5da754SJohn Baldwin 	 */
769e0f66ef8SJohn Baldwin 	if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers) ||
770e0f66ef8SJohn Baldwin 	    ie->ie_thread == NULL)
7713e5da754SJohn Baldwin 		return (EINVAL);
7723e5da754SJohn Baldwin 
77304774f23SJulian Elischer 	ctd = curthread;
774e0f66ef8SJohn Baldwin 	it = ie->ie_thread;
775e0f66ef8SJohn Baldwin 	td = it->it_thread;
7766f40c417SRobert Watson 	p = td->td_proc;
777e0f66ef8SJohn Baldwin 
7783e5da754SJohn Baldwin 	/*
7793e5da754SJohn Baldwin 	 * If any of the handlers for this ithread claim to be good
7803e5da754SJohn Baldwin 	 * sources of entropy, then gather some.
7813e5da754SJohn Baldwin 	 */
782e0f66ef8SJohn Baldwin 	if (harvest.interrupt && ie->ie_flags & IE_ENTROPY) {
7836f40c417SRobert Watson 		CTR3(KTR_INTR, "%s: pid %d (%s) gathering entropy", __func__,
7847ab24ea3SJulian Elischer 		    p->p_pid, td->td_name);
785e0f66ef8SJohn Baldwin 		entropy.event = (uintptr_t)ie;
786e0f66ef8SJohn Baldwin 		entropy.td = ctd;
7873e5da754SJohn Baldwin 		random_harvest(&entropy, sizeof(entropy), 2, 0,
7883e5da754SJohn Baldwin 		    RANDOM_INTERRUPT);
7893e5da754SJohn Baldwin 	}
7903e5da754SJohn Baldwin 
791e0f66ef8SJohn Baldwin 	KASSERT(p != NULL, ("ithread %s has no process", ie->ie_name));
7923e5da754SJohn Baldwin 
7933e5da754SJohn Baldwin 	/*
7943e5da754SJohn Baldwin 	 * Set it_need to tell the thread to keep running if it is already
795982d11f8SJeff Roberson 	 * running.  Then, lock the thread and see if we actually need to
796982d11f8SJeff Roberson 	 * put it on the runqueue.
7973e5da754SJohn Baldwin 	 */
798e0f66ef8SJohn Baldwin 	it->it_need = 1;
799982d11f8SJeff Roberson 	thread_lock(td);
80071fad9fdSJulian Elischer 	if (TD_AWAITING_INTR(td)) {
801e0f66ef8SJohn Baldwin 		CTR3(KTR_INTR, "%s: schedule pid %d (%s)", __func__, p->p_pid,
8027ab24ea3SJulian Elischer 		    td->td_name);
80371fad9fdSJulian Elischer 		TD_CLR_IWAIT(td);
804f0393f06SJeff Roberson 		sched_add(td, SRQ_INTR);
8053e5da754SJohn Baldwin 	} else {
806e0f66ef8SJohn Baldwin 		CTR5(KTR_INTR, "%s: pid %d (%s): it_need %d, state %d",
8077ab24ea3SJulian Elischer 		    __func__, p->p_pid, td->td_name, it->it_need, td->td_state);
8083e5da754SJohn Baldwin 	}
809982d11f8SJeff Roberson 	thread_unlock(td);
8103e5da754SJohn Baldwin 
8113e5da754SJohn Baldwin 	return (0);
8123e5da754SJohn Baldwin }
813bafe5a31SPaolo Pisati #else
814bafe5a31SPaolo Pisati int
815bafe5a31SPaolo Pisati intr_event_remove_handler(void *cookie)
816bafe5a31SPaolo Pisati {
817bafe5a31SPaolo Pisati 	struct intr_handler *handler = (struct intr_handler *)cookie;
818bafe5a31SPaolo Pisati 	struct intr_event *ie;
819bafe5a31SPaolo Pisati 	struct intr_thread *it;
820bafe5a31SPaolo Pisati #ifdef INVARIANTS
821bafe5a31SPaolo Pisati 	struct intr_handler *ih;
822bafe5a31SPaolo Pisati #endif
823bafe5a31SPaolo Pisati #ifdef notyet
824bafe5a31SPaolo Pisati 	int dead;
825bafe5a31SPaolo Pisati #endif
826bafe5a31SPaolo Pisati 
827bafe5a31SPaolo Pisati 	if (handler == NULL)
828bafe5a31SPaolo Pisati 		return (EINVAL);
829bafe5a31SPaolo Pisati 	ie = handler->ih_event;
830bafe5a31SPaolo Pisati 	KASSERT(ie != NULL,
831bafe5a31SPaolo Pisati 	    ("interrupt handler \"%s\" has a NULL interrupt event",
832bafe5a31SPaolo Pisati 	    handler->ih_name));
833bafe5a31SPaolo Pisati 	mtx_lock(&ie->ie_lock);
834bafe5a31SPaolo Pisati 	CTR3(KTR_INTR, "%s: removing %s from %s", __func__, handler->ih_name,
835bafe5a31SPaolo Pisati 	    ie->ie_name);
836bafe5a31SPaolo Pisati #ifdef INVARIANTS
837bafe5a31SPaolo Pisati 	TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next)
838bafe5a31SPaolo Pisati 		if (ih == handler)
839bafe5a31SPaolo Pisati 			goto ok;
840bafe5a31SPaolo Pisati 	mtx_unlock(&ie->ie_lock);
841bafe5a31SPaolo Pisati 	panic("interrupt handler \"%s\" not found in interrupt event \"%s\"",
842bafe5a31SPaolo Pisati 	    ih->ih_name, ie->ie_name);
843bafe5a31SPaolo Pisati ok:
844bafe5a31SPaolo Pisati #endif
845bafe5a31SPaolo Pisati 	/*
846bafe5a31SPaolo Pisati 	 * If there are no ithreads (per event and per handler), then
847bafe5a31SPaolo Pisati 	 * just remove the handler and return.
848bafe5a31SPaolo Pisati 	 * XXX: Note that an INTR_FAST handler might be running on another CPU!
849bafe5a31SPaolo Pisati 	 */
850bafe5a31SPaolo Pisati 	if (ie->ie_thread == NULL && handler->ih_thread == NULL) {
851bafe5a31SPaolo Pisati 		TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next);
852bafe5a31SPaolo Pisati 		mtx_unlock(&ie->ie_lock);
853bafe5a31SPaolo Pisati 		free(handler, M_ITHREAD);
854bafe5a31SPaolo Pisati 		return (0);
855bafe5a31SPaolo Pisati 	}
856bafe5a31SPaolo Pisati 
857bafe5a31SPaolo Pisati 	/* Private or global ithread? */
858bafe5a31SPaolo Pisati 	it = (handler->ih_thread) ? handler->ih_thread : ie->ie_thread;
859bafe5a31SPaolo Pisati 	/*
860bafe5a31SPaolo Pisati 	 * If the interrupt thread is already running, then just mark this
861bafe5a31SPaolo Pisati 	 * handler as being dead and let the ithread do the actual removal.
862bafe5a31SPaolo Pisati 	 *
863bafe5a31SPaolo Pisati 	 * During a cold boot while cold is set, msleep() does not sleep,
864bafe5a31SPaolo Pisati 	 * so we have to remove the handler here rather than letting the
865bafe5a31SPaolo Pisati 	 * thread do it.
866bafe5a31SPaolo Pisati 	 */
867982d11f8SJeff Roberson 	thread_lock(it->it_thread);
868bafe5a31SPaolo Pisati 	if (!TD_AWAITING_INTR(it->it_thread) && !cold) {
869bafe5a31SPaolo Pisati 		handler->ih_flags |= IH_DEAD;
870bafe5a31SPaolo Pisati 
871bafe5a31SPaolo Pisati 		/*
872bafe5a31SPaolo Pisati 		 * Ensure that the thread will process the handler list
873bafe5a31SPaolo Pisati 		 * again and remove this handler if it has already passed
874bafe5a31SPaolo Pisati 		 * it on the list.
875bafe5a31SPaolo Pisati 		 */
876bafe5a31SPaolo Pisati 		it->it_need = 1;
877bafe5a31SPaolo Pisati 	} else
878bafe5a31SPaolo Pisati 		TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next);
879982d11f8SJeff Roberson 	thread_unlock(it->it_thread);
880bafe5a31SPaolo Pisati 	while (handler->ih_flags & IH_DEAD)
881bafe5a31SPaolo Pisati 		msleep(handler, &ie->ie_lock, 0, "iev_rmh", 0);
882bafe5a31SPaolo Pisati 	/*
883bafe5a31SPaolo Pisati 	 * At this point, the handler has been disconnected from the event,
884bafe5a31SPaolo Pisati 	 * so we can kill the private ithread if any.
885bafe5a31SPaolo Pisati 	 */
886bafe5a31SPaolo Pisati 	if (handler->ih_thread) {
887bafe5a31SPaolo Pisati 		ithread_destroy(handler->ih_thread);
888bafe5a31SPaolo Pisati 		handler->ih_thread = NULL;
889bafe5a31SPaolo Pisati 	}
890bafe5a31SPaolo Pisati 	intr_event_update(ie);
891bafe5a31SPaolo Pisati #ifdef notyet
892bafe5a31SPaolo Pisati 	/*
893bafe5a31SPaolo Pisati 	 * XXX: This could be bad in the case of ppbus(8).  Also, I think
894bafe5a31SPaolo Pisati 	 * this could lead to races of stale data when servicing an
895bafe5a31SPaolo Pisati 	 * interrupt.
896bafe5a31SPaolo Pisati 	 */
897bafe5a31SPaolo Pisati 	dead = 1;
898bafe5a31SPaolo Pisati 	TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) {
899bafe5a31SPaolo Pisati 		if (handler != NULL) {
900bafe5a31SPaolo Pisati 			dead = 0;
901bafe5a31SPaolo Pisati 			break;
902bafe5a31SPaolo Pisati 		}
903bafe5a31SPaolo Pisati 	}
904bafe5a31SPaolo Pisati 	if (dead) {
905bafe5a31SPaolo Pisati 		ithread_destroy(ie->ie_thread);
906bafe5a31SPaolo Pisati 		ie->ie_thread = NULL;
907bafe5a31SPaolo Pisati 	}
908bafe5a31SPaolo Pisati #endif
909bafe5a31SPaolo Pisati 	mtx_unlock(&ie->ie_lock);
910bafe5a31SPaolo Pisati 	free(handler, M_ITHREAD);
911bafe5a31SPaolo Pisati 	return (0);
912bafe5a31SPaolo Pisati }
913bafe5a31SPaolo Pisati 
9141ee1b687SJohn Baldwin static int
915bafe5a31SPaolo Pisati intr_event_schedule_thread(struct intr_event *ie, struct intr_thread *it)
916bafe5a31SPaolo Pisati {
917bafe5a31SPaolo Pisati 	struct intr_entropy entropy;
918bafe5a31SPaolo Pisati 	struct thread *td;
919bafe5a31SPaolo Pisati 	struct thread *ctd;
920bafe5a31SPaolo Pisati 	struct proc *p;
921bafe5a31SPaolo Pisati 
922bafe5a31SPaolo Pisati 	/*
923bafe5a31SPaolo Pisati 	 * If no ithread or no handlers, then we have a stray interrupt.
924bafe5a31SPaolo Pisati 	 */
925bafe5a31SPaolo Pisati 	if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers) || it == NULL)
926bafe5a31SPaolo Pisati 		return (EINVAL);
927bafe5a31SPaolo Pisati 
928bafe5a31SPaolo Pisati 	ctd = curthread;
929bafe5a31SPaolo Pisati 	td = it->it_thread;
930bafe5a31SPaolo Pisati 	p = td->td_proc;
931bafe5a31SPaolo Pisati 
932bafe5a31SPaolo Pisati 	/*
933bafe5a31SPaolo Pisati 	 * If any of the handlers for this ithread claim to be good
934bafe5a31SPaolo Pisati 	 * sources of entropy, then gather some.
935bafe5a31SPaolo Pisati 	 */
936bafe5a31SPaolo Pisati 	if (harvest.interrupt && ie->ie_flags & IE_ENTROPY) {
937bafe5a31SPaolo Pisati 		CTR3(KTR_INTR, "%s: pid %d (%s) gathering entropy", __func__,
9387ab24ea3SJulian Elischer 		    p->p_pid, td->td_name);
939bafe5a31SPaolo Pisati 		entropy.event = (uintptr_t)ie;
940bafe5a31SPaolo Pisati 		entropy.td = ctd;
941bafe5a31SPaolo Pisati 		random_harvest(&entropy, sizeof(entropy), 2, 0,
942bafe5a31SPaolo Pisati 		    RANDOM_INTERRUPT);
943bafe5a31SPaolo Pisati 	}
944bafe5a31SPaolo Pisati 
945bafe5a31SPaolo Pisati 	KASSERT(p != NULL, ("ithread %s has no process", ie->ie_name));
946bafe5a31SPaolo Pisati 
947bafe5a31SPaolo Pisati 	/*
948bafe5a31SPaolo Pisati 	 * Set it_need to tell the thread to keep running if it is already
949982d11f8SJeff Roberson 	 * running.  Then, lock the thread and see if we actually need to
950982d11f8SJeff Roberson 	 * put it on the runqueue.
951bafe5a31SPaolo Pisati 	 */
952bafe5a31SPaolo Pisati 	it->it_need = 1;
953982d11f8SJeff Roberson 	thread_lock(td);
954bafe5a31SPaolo Pisati 	if (TD_AWAITING_INTR(td)) {
955bafe5a31SPaolo Pisati 		CTR3(KTR_INTR, "%s: schedule pid %d (%s)", __func__, p->p_pid,
9563c1ffc32SJulian Elischer 		    td->td_name);
957bafe5a31SPaolo Pisati 		TD_CLR_IWAIT(td);
958bafe5a31SPaolo Pisati 		sched_add(td, SRQ_INTR);
959bafe5a31SPaolo Pisati 	} else {
960bafe5a31SPaolo Pisati 		CTR5(KTR_INTR, "%s: pid %d (%s): it_need %d, state %d",
9617ab24ea3SJulian Elischer 		    __func__, p->p_pid, td->td_name, it->it_need, td->td_state);
962bafe5a31SPaolo Pisati 	}
963982d11f8SJeff Roberson 	thread_unlock(td);
964bafe5a31SPaolo Pisati 
965bafe5a31SPaolo Pisati 	return (0);
966bafe5a31SPaolo Pisati }
967bafe5a31SPaolo Pisati #endif
9683e5da754SJohn Baldwin 
969fe486a37SJohn Baldwin /*
970e84bcd84SRobert Watson  * Allow interrupt event binding for software interrupt handlers -- a no-op,
971e84bcd84SRobert Watson  * since interrupts are generated in software rather than being directed by
972e84bcd84SRobert Watson  * a PIC.
973e84bcd84SRobert Watson  */
974e84bcd84SRobert Watson static int
975e84bcd84SRobert Watson swi_assign_cpu(void *arg, u_char cpu)
976e84bcd84SRobert Watson {
977e84bcd84SRobert Watson 
978e84bcd84SRobert Watson 	return (0);
979e84bcd84SRobert Watson }
980e84bcd84SRobert Watson 
981e84bcd84SRobert Watson /*
982fe486a37SJohn Baldwin  * Add a software interrupt handler to a specified event.  If a given event
983fe486a37SJohn Baldwin  * is not specified, then a new event is created.
984fe486a37SJohn Baldwin  */
9853e5da754SJohn Baldwin int
986e0f66ef8SJohn Baldwin swi_add(struct intr_event **eventp, const char *name, driver_intr_t handler,
987b4151f71SJohn Baldwin 	    void *arg, int pri, enum intr_type flags, void **cookiep)
9888088699fSJohn Baldwin {
989e0f66ef8SJohn Baldwin 	struct intr_event *ie;
990b4151f71SJohn Baldwin 	int error;
9918088699fSJohn Baldwin 
992bafe5a31SPaolo Pisati 	if (flags & INTR_ENTROPY)
9933e5da754SJohn Baldwin 		return (EINVAL);
9943e5da754SJohn Baldwin 
995e0f66ef8SJohn Baldwin 	ie = (eventp != NULL) ? *eventp : NULL;
9968088699fSJohn Baldwin 
997e0f66ef8SJohn Baldwin 	if (ie != NULL) {
998e0f66ef8SJohn Baldwin 		if (!(ie->ie_flags & IE_SOFT))
9993e5da754SJohn Baldwin 			return (EINVAL);
10003e5da754SJohn Baldwin 	} else {
10019b33b154SJeff Roberson 		error = intr_event_create(&ie, NULL, IE_SOFT, 0,
1002e84bcd84SRobert Watson 		    NULL, NULL, NULL, swi_assign_cpu, "swi%d:", pri);
10038088699fSJohn Baldwin 		if (error)
1004b4151f71SJohn Baldwin 			return (error);
1005e0f66ef8SJohn Baldwin 		if (eventp != NULL)
1006e0f66ef8SJohn Baldwin 			*eventp = ie;
10078088699fSJohn Baldwin 	}
10088d809d50SJeff Roberson 	error = intr_event_add_handler(ie, name, NULL, handler, arg,
10098d809d50SJeff Roberson 	    (pri * RQ_PPQ) + PI_SOFT, flags, cookiep);
10108d809d50SJeff Roberson 	if (error)
10118d809d50SJeff Roberson 		return (error);
10128d809d50SJeff Roberson 	if (pri == SWI_CLOCK) {
10138d809d50SJeff Roberson 		struct proc *p;
10148d809d50SJeff Roberson 		p = ie->ie_thread->it_thread->td_proc;
10158d809d50SJeff Roberson 		PROC_LOCK(p);
10168d809d50SJeff Roberson 		p->p_flag |= P_NOLOAD;
10178d809d50SJeff Roberson 		PROC_UNLOCK(p);
10188d809d50SJeff Roberson 	}
10198d809d50SJeff Roberson 	return (0);
10208088699fSJohn Baldwin }
10218088699fSJohn Baldwin 
10221931cf94SJohn Baldwin /*
1023e0f66ef8SJohn Baldwin  * Schedule a software interrupt thread.
10241931cf94SJohn Baldwin  */
10251931cf94SJohn Baldwin void
1026b4151f71SJohn Baldwin swi_sched(void *cookie, int flags)
10271931cf94SJohn Baldwin {
1028e0f66ef8SJohn Baldwin 	struct intr_handler *ih = (struct intr_handler *)cookie;
1029e0f66ef8SJohn Baldwin 	struct intr_event *ie = ih->ih_event;
10303e5da754SJohn Baldwin 	int error;
10318088699fSJohn Baldwin 
1032e0f66ef8SJohn Baldwin 	CTR3(KTR_INTR, "swi_sched: %s %s need=%d", ie->ie_name, ih->ih_name,
1033e0f66ef8SJohn Baldwin 	    ih->ih_need);
10341931cf94SJohn Baldwin 
10351931cf94SJohn Baldwin 	/*
10363e5da754SJohn Baldwin 	 * Set ih_need for this handler so that if the ithread is already
10373e5da754SJohn Baldwin 	 * running it will execute this handler on the next pass.  Otherwise,
10383e5da754SJohn Baldwin 	 * it will execute it the next time it runs.
10391931cf94SJohn Baldwin 	 */
1040b4151f71SJohn Baldwin 	atomic_store_rel_int(&ih->ih_need, 1);
10411ca2c018SBruce Evans 
1042b4151f71SJohn Baldwin 	if (!(flags & SWI_DELAY)) {
104367596082SAttilio Rao 		PCPU_INC(cnt.v_soft);
1044bafe5a31SPaolo Pisati #ifdef INTR_FILTER
1045bafe5a31SPaolo Pisati 		error = intr_event_schedule_thread(ie, ie->ie_thread);
1046bafe5a31SPaolo Pisati #else
1047e0f66ef8SJohn Baldwin 		error = intr_event_schedule_thread(ie);
1048bafe5a31SPaolo Pisati #endif
10493e5da754SJohn Baldwin 		KASSERT(error == 0, ("stray software interrupt"));
10508088699fSJohn Baldwin 	}
10518088699fSJohn Baldwin }
10528088699fSJohn Baldwin 
1053fe486a37SJohn Baldwin /*
1054fe486a37SJohn Baldwin  * Remove a software interrupt handler.  Currently this code does not
1055fe486a37SJohn Baldwin  * remove the associated interrupt event if it becomes empty.  Calling code
1056fe486a37SJohn Baldwin  * may do so manually via intr_event_destroy(), but that's not really
1057fe486a37SJohn Baldwin  * an optimal interface.
1058fe486a37SJohn Baldwin  */
1059fe486a37SJohn Baldwin int
1060fe486a37SJohn Baldwin swi_remove(void *cookie)
1061fe486a37SJohn Baldwin {
1062fe486a37SJohn Baldwin 
1063fe486a37SJohn Baldwin 	return (intr_event_remove_handler(cookie));
1064fe486a37SJohn Baldwin }
1065fe486a37SJohn Baldwin 
1066bafe5a31SPaolo Pisati #ifdef INTR_FILTER
1067bafe5a31SPaolo Pisati static void
1068bafe5a31SPaolo Pisati priv_ithread_execute_handler(struct proc *p, struct intr_handler *ih)
1069bafe5a31SPaolo Pisati {
1070bafe5a31SPaolo Pisati 	struct intr_event *ie;
1071bafe5a31SPaolo Pisati 
1072bafe5a31SPaolo Pisati 	ie = ih->ih_event;
1073bafe5a31SPaolo Pisati 	/*
1074bafe5a31SPaolo Pisati 	 * If this handler is marked for death, remove it from
1075bafe5a31SPaolo Pisati 	 * the list of handlers and wake up the sleeper.
1076bafe5a31SPaolo Pisati 	 */
1077bafe5a31SPaolo Pisati 	if (ih->ih_flags & IH_DEAD) {
1078bafe5a31SPaolo Pisati 		mtx_lock(&ie->ie_lock);
1079bafe5a31SPaolo Pisati 		TAILQ_REMOVE(&ie->ie_handlers, ih, ih_next);
1080bafe5a31SPaolo Pisati 		ih->ih_flags &= ~IH_DEAD;
1081bafe5a31SPaolo Pisati 		wakeup(ih);
1082bafe5a31SPaolo Pisati 		mtx_unlock(&ie->ie_lock);
1083bafe5a31SPaolo Pisati 		return;
1084bafe5a31SPaolo Pisati 	}
1085bafe5a31SPaolo Pisati 
1086bafe5a31SPaolo Pisati 	/* Execute this handler. */
1087bafe5a31SPaolo Pisati 	CTR6(KTR_INTR, "%s: pid %d exec %p(%p) for %s flg=%x",
1088bafe5a31SPaolo Pisati 	     __func__, p->p_pid, (void *)ih->ih_handler, ih->ih_argument,
1089bafe5a31SPaolo Pisati 	     ih->ih_name, ih->ih_flags);
1090bafe5a31SPaolo Pisati 
1091bafe5a31SPaolo Pisati 	if (!(ih->ih_flags & IH_MPSAFE))
1092bafe5a31SPaolo Pisati 		mtx_lock(&Giant);
1093bafe5a31SPaolo Pisati 	ih->ih_handler(ih->ih_argument);
1094bafe5a31SPaolo Pisati 	if (!(ih->ih_flags & IH_MPSAFE))
1095bafe5a31SPaolo Pisati 		mtx_unlock(&Giant);
1096bafe5a31SPaolo Pisati }
1097bafe5a31SPaolo Pisati #endif
1098bafe5a31SPaolo Pisati 
109937e9511fSJohn Baldwin /*
110037e9511fSJohn Baldwin  * This is a public function for use by drivers that mux interrupt
110137e9511fSJohn Baldwin  * handlers for child devices from their interrupt handler.
110237e9511fSJohn Baldwin  */
110337e9511fSJohn Baldwin void
110437e9511fSJohn Baldwin intr_event_execute_handlers(struct proc *p, struct intr_event *ie)
1105e0f66ef8SJohn Baldwin {
1106e0f66ef8SJohn Baldwin 	struct intr_handler *ih, *ihn;
1107e0f66ef8SJohn Baldwin 
1108e0f66ef8SJohn Baldwin 	TAILQ_FOREACH_SAFE(ih, &ie->ie_handlers, ih_next, ihn) {
1109e0f66ef8SJohn Baldwin 		/*
1110e0f66ef8SJohn Baldwin 		 * If this handler is marked for death, remove it from
1111e0f66ef8SJohn Baldwin 		 * the list of handlers and wake up the sleeper.
1112e0f66ef8SJohn Baldwin 		 */
1113e0f66ef8SJohn Baldwin 		if (ih->ih_flags & IH_DEAD) {
1114e0f66ef8SJohn Baldwin 			mtx_lock(&ie->ie_lock);
1115e0f66ef8SJohn Baldwin 			TAILQ_REMOVE(&ie->ie_handlers, ih, ih_next);
1116e0f66ef8SJohn Baldwin 			ih->ih_flags &= ~IH_DEAD;
1117e0f66ef8SJohn Baldwin 			wakeup(ih);
1118e0f66ef8SJohn Baldwin 			mtx_unlock(&ie->ie_lock);
1119e0f66ef8SJohn Baldwin 			continue;
1120e0f66ef8SJohn Baldwin 		}
1121e0f66ef8SJohn Baldwin 
1122f2d619c8SPaolo Pisati 		/* Skip filter only handlers */
1123f2d619c8SPaolo Pisati 		if (ih->ih_handler == NULL)
1124f2d619c8SPaolo Pisati 			continue;
1125f2d619c8SPaolo Pisati 
1126e0f66ef8SJohn Baldwin 		/*
1127e0f66ef8SJohn Baldwin 		 * For software interrupt threads, we only execute
1128e0f66ef8SJohn Baldwin 		 * handlers that have their need flag set.  Hardware
1129e0f66ef8SJohn Baldwin 		 * interrupt threads always invoke all of their handlers.
1130e0f66ef8SJohn Baldwin 		 */
1131e0f66ef8SJohn Baldwin 		if (ie->ie_flags & IE_SOFT) {
1132e0f66ef8SJohn Baldwin 			if (!ih->ih_need)
1133e0f66ef8SJohn Baldwin 				continue;
1134e0f66ef8SJohn Baldwin 			else
1135e0f66ef8SJohn Baldwin 				atomic_store_rel_int(&ih->ih_need, 0);
1136e0f66ef8SJohn Baldwin 		}
1137e0f66ef8SJohn Baldwin 
1138e0f66ef8SJohn Baldwin 		/* Execute this handler. */
1139e0f66ef8SJohn Baldwin 		CTR6(KTR_INTR, "%s: pid %d exec %p(%p) for %s flg=%x",
1140bafe5a31SPaolo Pisati 		    __func__, p->p_pid, (void *)ih->ih_handler,
1141bafe5a31SPaolo Pisati 		    ih->ih_argument, ih->ih_name, ih->ih_flags);
1142e0f66ef8SJohn Baldwin 
1143e0f66ef8SJohn Baldwin 		if (!(ih->ih_flags & IH_MPSAFE))
1144e0f66ef8SJohn Baldwin 			mtx_lock(&Giant);
1145e0f66ef8SJohn Baldwin 		ih->ih_handler(ih->ih_argument);
1146e0f66ef8SJohn Baldwin 		if (!(ih->ih_flags & IH_MPSAFE))
1147e0f66ef8SJohn Baldwin 			mtx_unlock(&Giant);
1148e0f66ef8SJohn Baldwin 	}
114937e9511fSJohn Baldwin }
115037e9511fSJohn Baldwin 
115137e9511fSJohn Baldwin static void
115237e9511fSJohn Baldwin ithread_execute_handlers(struct proc *p, struct intr_event *ie)
115337e9511fSJohn Baldwin {
115437e9511fSJohn Baldwin 
115537e9511fSJohn Baldwin 	/* Interrupt handlers should not sleep. */
115637e9511fSJohn Baldwin 	if (!(ie->ie_flags & IE_SOFT))
115737e9511fSJohn Baldwin 		THREAD_NO_SLEEPING();
115837e9511fSJohn Baldwin 	intr_event_execute_handlers(p, ie);
1159e0f66ef8SJohn Baldwin 	if (!(ie->ie_flags & IE_SOFT))
1160e0f66ef8SJohn Baldwin 		THREAD_SLEEPING_OK();
1161e0f66ef8SJohn Baldwin 
1162e0f66ef8SJohn Baldwin 	/*
1163e0f66ef8SJohn Baldwin 	 * Interrupt storm handling:
1164e0f66ef8SJohn Baldwin 	 *
1165e0f66ef8SJohn Baldwin 	 * If this interrupt source is currently storming, then throttle
1166e0f66ef8SJohn Baldwin 	 * it to only fire the handler once  per clock tick.
1167e0f66ef8SJohn Baldwin 	 *
1168e0f66ef8SJohn Baldwin 	 * If this interrupt source is not currently storming, but the
1169e0f66ef8SJohn Baldwin 	 * number of back to back interrupts exceeds the storm threshold,
1170e0f66ef8SJohn Baldwin 	 * then enter storming mode.
1171e0f66ef8SJohn Baldwin 	 */
1172e41bcf3cSJohn Baldwin 	if (intr_storm_threshold != 0 && ie->ie_count >= intr_storm_threshold &&
1173e41bcf3cSJohn Baldwin 	    !(ie->ie_flags & IE_SOFT)) {
11740ae62c18SNate Lawson 		/* Report the message only once every second. */
11750ae62c18SNate Lawson 		if (ppsratecheck(&ie->ie_warntm, &ie->ie_warncnt, 1)) {
1176e0f66ef8SJohn Baldwin 			printf(
11770ae62c18SNate Lawson 	"interrupt storm detected on \"%s\"; throttling interrupt source\n",
1178e0f66ef8SJohn Baldwin 			    ie->ie_name);
1179e0f66ef8SJohn Baldwin 		}
1180e41bcf3cSJohn Baldwin 		pause("istorm", 1);
1181e0f66ef8SJohn Baldwin 	} else
1182e0f66ef8SJohn Baldwin 		ie->ie_count++;
1183e0f66ef8SJohn Baldwin 
1184e0f66ef8SJohn Baldwin 	/*
1185e0f66ef8SJohn Baldwin 	 * Now that all the handlers have had a chance to run, reenable
1186e0f66ef8SJohn Baldwin 	 * the interrupt source.
1187e0f66ef8SJohn Baldwin 	 */
11881ee1b687SJohn Baldwin 	if (ie->ie_post_ithread != NULL)
11891ee1b687SJohn Baldwin 		ie->ie_post_ithread(ie->ie_source);
1190e0f66ef8SJohn Baldwin }
1191e0f66ef8SJohn Baldwin 
1192bafe5a31SPaolo Pisati #ifndef INTR_FILTER
11938088699fSJohn Baldwin /*
1194b4151f71SJohn Baldwin  * This is the main code for interrupt threads.
11958088699fSJohn Baldwin  */
119637c84183SPoul-Henning Kamp static void
1197b4151f71SJohn Baldwin ithread_loop(void *arg)
11988088699fSJohn Baldwin {
1199e0f66ef8SJohn Baldwin 	struct intr_thread *ithd;
1200e0f66ef8SJohn Baldwin 	struct intr_event *ie;
1201b40ce416SJulian Elischer 	struct thread *td;
1202b4151f71SJohn Baldwin 	struct proc *p;
12038088699fSJohn Baldwin 
1204b40ce416SJulian Elischer 	td = curthread;
1205b40ce416SJulian Elischer 	p = td->td_proc;
1206e0f66ef8SJohn Baldwin 	ithd = (struct intr_thread *)arg;
1207e0f66ef8SJohn Baldwin 	KASSERT(ithd->it_thread == td,
120891f91617SDavid E. O'Brien 	    ("%s: ithread and proc linkage out of sync", __func__));
1209e0f66ef8SJohn Baldwin 	ie = ithd->it_event;
1210e0f66ef8SJohn Baldwin 	ie->ie_count = 0;
12118088699fSJohn Baldwin 
12128088699fSJohn Baldwin 	/*
12138088699fSJohn Baldwin 	 * As long as we have interrupts outstanding, go through the
12148088699fSJohn Baldwin 	 * list of handlers, giving each one a go at it.
12158088699fSJohn Baldwin 	 */
12168088699fSJohn Baldwin 	for (;;) {
1217b4151f71SJohn Baldwin 		/*
1218b4151f71SJohn Baldwin 		 * If we are an orphaned thread, then just die.
1219b4151f71SJohn Baldwin 		 */
1220b4151f71SJohn Baldwin 		if (ithd->it_flags & IT_DEAD) {
1221e0f66ef8SJohn Baldwin 			CTR3(KTR_INTR, "%s: pid %d (%s) exiting", __func__,
12227ab24ea3SJulian Elischer 			    p->p_pid, td->td_name);
1223b4151f71SJohn Baldwin 			free(ithd, M_ITHREAD);
1224ca9a0ddfSJulian Elischer 			kthread_exit();
1225b4151f71SJohn Baldwin 		}
1226b4151f71SJohn Baldwin 
1227e0f66ef8SJohn Baldwin 		/*
1228e0f66ef8SJohn Baldwin 		 * Service interrupts.  If another interrupt arrives while
1229e0f66ef8SJohn Baldwin 		 * we are running, it will set it_need to note that we
1230e0f66ef8SJohn Baldwin 		 * should make another pass.
1231e0f66ef8SJohn Baldwin 		 */
1232b4151f71SJohn Baldwin 		while (ithd->it_need) {
12338088699fSJohn Baldwin 			/*
1234e0f66ef8SJohn Baldwin 			 * This might need a full read and write barrier
1235e0f66ef8SJohn Baldwin 			 * to make sure that this write posts before any
1236e0f66ef8SJohn Baldwin 			 * of the memory or device accesses in the
1237e0f66ef8SJohn Baldwin 			 * handlers.
12388088699fSJohn Baldwin 			 */
1239b4151f71SJohn Baldwin 			atomic_store_rel_int(&ithd->it_need, 0);
1240e0f66ef8SJohn Baldwin 			ithread_execute_handlers(p, ie);
12418088699fSJohn Baldwin 		}
12427870c3c6SJohn Baldwin 		WITNESS_WARN(WARN_PANIC, NULL, "suspending ithread");
12437870c3c6SJohn Baldwin 		mtx_assert(&Giant, MA_NOTOWNED);
12448088699fSJohn Baldwin 
12458088699fSJohn Baldwin 		/*
12468088699fSJohn Baldwin 		 * Processed all our interrupts.  Now get the sched
12478088699fSJohn Baldwin 		 * lock.  This may take a while and it_need may get
12488088699fSJohn Baldwin 		 * set again, so we have to check it again.
12498088699fSJohn Baldwin 		 */
1250982d11f8SJeff Roberson 		thread_lock(td);
1251e0f66ef8SJohn Baldwin 		if (!ithd->it_need && !(ithd->it_flags & IT_DEAD)) {
12527870c3c6SJohn Baldwin 			TD_SET_IWAIT(td);
1253e0f66ef8SJohn Baldwin 			ie->ie_count = 0;
12548df78c41SJeff Roberson 			mi_switch(SW_VOL | SWT_IWAIT, NULL);
12558088699fSJohn Baldwin 		}
1256982d11f8SJeff Roberson 		thread_unlock(td);
12578088699fSJohn Baldwin 	}
12581931cf94SJohn Baldwin }
12591ee1b687SJohn Baldwin 
12601ee1b687SJohn Baldwin /*
12611ee1b687SJohn Baldwin  * Main interrupt handling body.
12621ee1b687SJohn Baldwin  *
12631ee1b687SJohn Baldwin  * Input:
12641ee1b687SJohn Baldwin  * o ie:                        the event connected to this interrupt.
12651ee1b687SJohn Baldwin  * o frame:                     some archs (i.e. i386) pass a frame to some.
12661ee1b687SJohn Baldwin  *                              handlers as their main argument.
12671ee1b687SJohn Baldwin  * Return value:
12681ee1b687SJohn Baldwin  * o 0:                         everything ok.
12691ee1b687SJohn Baldwin  * o EINVAL:                    stray interrupt.
12701ee1b687SJohn Baldwin  */
12711ee1b687SJohn Baldwin int
12721ee1b687SJohn Baldwin intr_event_handle(struct intr_event *ie, struct trapframe *frame)
12731ee1b687SJohn Baldwin {
12741ee1b687SJohn Baldwin 	struct intr_handler *ih;
12751ee1b687SJohn Baldwin 	struct thread *td;
12761ee1b687SJohn Baldwin 	int error, ret, thread;
12771ee1b687SJohn Baldwin 
12781ee1b687SJohn Baldwin 	td = curthread;
12791ee1b687SJohn Baldwin 
12801ee1b687SJohn Baldwin 	/* An interrupt with no event or handlers is a stray interrupt. */
12811ee1b687SJohn Baldwin 	if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers))
12821ee1b687SJohn Baldwin 		return (EINVAL);
12831ee1b687SJohn Baldwin 
12841ee1b687SJohn Baldwin 	/*
12851ee1b687SJohn Baldwin 	 * Execute fast interrupt handlers directly.
12861ee1b687SJohn Baldwin 	 * To support clock handlers, if a handler registers
12871ee1b687SJohn Baldwin 	 * with a NULL argument, then we pass it a pointer to
12881ee1b687SJohn Baldwin 	 * a trapframe as its argument.
12891ee1b687SJohn Baldwin 	 */
12901ee1b687SJohn Baldwin 	td->td_intr_nesting_level++;
12911ee1b687SJohn Baldwin 	thread = 0;
12921ee1b687SJohn Baldwin 	ret = 0;
12931ee1b687SJohn Baldwin 	critical_enter();
12941ee1b687SJohn Baldwin 	TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) {
12951ee1b687SJohn Baldwin 		if (ih->ih_filter == NULL) {
12961ee1b687SJohn Baldwin 			thread = 1;
12971ee1b687SJohn Baldwin 			continue;
12981ee1b687SJohn Baldwin 		}
12991ee1b687SJohn Baldwin 		CTR4(KTR_INTR, "%s: exec %p(%p) for %s", __func__,
13001ee1b687SJohn Baldwin 		    ih->ih_filter, ih->ih_argument == NULL ? frame :
13011ee1b687SJohn Baldwin 		    ih->ih_argument, ih->ih_name);
13021ee1b687SJohn Baldwin 		if (ih->ih_argument == NULL)
13031ee1b687SJohn Baldwin 			ret = ih->ih_filter(frame);
13041ee1b687SJohn Baldwin 		else
13051ee1b687SJohn Baldwin 			ret = ih->ih_filter(ih->ih_argument);
13061ee1b687SJohn Baldwin 		/*
13071ee1b687SJohn Baldwin 		 * Wrapper handler special handling:
13081ee1b687SJohn Baldwin 		 *
13091ee1b687SJohn Baldwin 		 * in some particular cases (like pccard and pccbb),
13101ee1b687SJohn Baldwin 		 * the _real_ device handler is wrapped in a couple of
13111ee1b687SJohn Baldwin 		 * functions - a filter wrapper and an ithread wrapper.
13121ee1b687SJohn Baldwin 		 * In this case (and just in this case), the filter wrapper
13131ee1b687SJohn Baldwin 		 * could ask the system to schedule the ithread and mask
13141ee1b687SJohn Baldwin 		 * the interrupt source if the wrapped handler is composed
13151ee1b687SJohn Baldwin 		 * of just an ithread handler.
13161ee1b687SJohn Baldwin 		 *
13171ee1b687SJohn Baldwin 		 * TODO: write a generic wrapper to avoid people rolling
13181ee1b687SJohn Baldwin 		 * their own
13191ee1b687SJohn Baldwin 		 */
13201ee1b687SJohn Baldwin 		if (!thread) {
13211ee1b687SJohn Baldwin 			if (ret == FILTER_SCHEDULE_THREAD)
13221ee1b687SJohn Baldwin 				thread = 1;
13231ee1b687SJohn Baldwin 		}
13241ee1b687SJohn Baldwin 	}
13251ee1b687SJohn Baldwin 
13261ee1b687SJohn Baldwin 	if (thread) {
13271ee1b687SJohn Baldwin 		if (ie->ie_pre_ithread != NULL)
13281ee1b687SJohn Baldwin 			ie->ie_pre_ithread(ie->ie_source);
13291ee1b687SJohn Baldwin 	} else {
13301ee1b687SJohn Baldwin 		if (ie->ie_post_filter != NULL)
13311ee1b687SJohn Baldwin 			ie->ie_post_filter(ie->ie_source);
13321ee1b687SJohn Baldwin 	}
13331ee1b687SJohn Baldwin 
13341ee1b687SJohn Baldwin 	/* Schedule the ithread if needed. */
13351ee1b687SJohn Baldwin 	if (thread) {
13361ee1b687SJohn Baldwin 		error = intr_event_schedule_thread(ie);
13376205924aSKip Macy #ifndef XEN
13381ee1b687SJohn Baldwin 		KASSERT(error == 0, ("bad stray interrupt"));
13396205924aSKip Macy #else
13406205924aSKip Macy 		if (error != 0)
13416205924aSKip Macy 			log(LOG_WARNING, "bad stray interrupt");
13426205924aSKip Macy #endif
13431ee1b687SJohn Baldwin 	}
13441ee1b687SJohn Baldwin 	critical_exit();
13451ee1b687SJohn Baldwin 	td->td_intr_nesting_level--;
13461ee1b687SJohn Baldwin 	return (0);
13471ee1b687SJohn Baldwin }
1348bafe5a31SPaolo Pisati #else
1349bafe5a31SPaolo Pisati /*
1350bafe5a31SPaolo Pisati  * This is the main code for interrupt threads.
1351bafe5a31SPaolo Pisati  */
1352bafe5a31SPaolo Pisati static void
1353bafe5a31SPaolo Pisati ithread_loop(void *arg)
1354bafe5a31SPaolo Pisati {
1355bafe5a31SPaolo Pisati 	struct intr_thread *ithd;
1356bafe5a31SPaolo Pisati 	struct intr_handler *ih;
1357bafe5a31SPaolo Pisati 	struct intr_event *ie;
1358bafe5a31SPaolo Pisati 	struct thread *td;
1359bafe5a31SPaolo Pisati 	struct proc *p;
1360bafe5a31SPaolo Pisati 	int priv;
1361bafe5a31SPaolo Pisati 
1362bafe5a31SPaolo Pisati 	td = curthread;
1363bafe5a31SPaolo Pisati 	p = td->td_proc;
1364bafe5a31SPaolo Pisati 	ih = (struct intr_handler *)arg;
1365bafe5a31SPaolo Pisati 	priv = (ih->ih_thread != NULL) ? 1 : 0;
1366bafe5a31SPaolo Pisati 	ithd = (priv) ? ih->ih_thread : ih->ih_event->ie_thread;
1367bafe5a31SPaolo Pisati 	KASSERT(ithd->it_thread == td,
1368bafe5a31SPaolo Pisati 	    ("%s: ithread and proc linkage out of sync", __func__));
1369bafe5a31SPaolo Pisati 	ie = ithd->it_event;
1370bafe5a31SPaolo Pisati 	ie->ie_count = 0;
1371bafe5a31SPaolo Pisati 
1372bafe5a31SPaolo Pisati 	/*
1373bafe5a31SPaolo Pisati 	 * As long as we have interrupts outstanding, go through the
1374bafe5a31SPaolo Pisati 	 * list of handlers, giving each one a go at it.
1375bafe5a31SPaolo Pisati 	 */
1376bafe5a31SPaolo Pisati 	for (;;) {
1377bafe5a31SPaolo Pisati 		/*
1378bafe5a31SPaolo Pisati 		 * If we are an orphaned thread, then just die.
1379bafe5a31SPaolo Pisati 		 */
1380bafe5a31SPaolo Pisati 		if (ithd->it_flags & IT_DEAD) {
1381bafe5a31SPaolo Pisati 			CTR3(KTR_INTR, "%s: pid %d (%s) exiting", __func__,
13827ab24ea3SJulian Elischer 			    p->p_pid, td->td_name);
1383bafe5a31SPaolo Pisati 			free(ithd, M_ITHREAD);
1384ca9a0ddfSJulian Elischer 			kthread_exit();
1385bafe5a31SPaolo Pisati 		}
1386bafe5a31SPaolo Pisati 
1387bafe5a31SPaolo Pisati 		/*
1388bafe5a31SPaolo Pisati 		 * Service interrupts.  If another interrupt arrives while
1389bafe5a31SPaolo Pisati 		 * we are running, it will set it_need to note that we
1390bafe5a31SPaolo Pisati 		 * should make another pass.
1391bafe5a31SPaolo Pisati 		 */
1392bafe5a31SPaolo Pisati 		while (ithd->it_need) {
1393bafe5a31SPaolo Pisati 			/*
1394bafe5a31SPaolo Pisati 			 * This might need a full read and write barrier
1395bafe5a31SPaolo Pisati 			 * to make sure that this write posts before any
1396bafe5a31SPaolo Pisati 			 * of the memory or device accesses in the
1397bafe5a31SPaolo Pisati 			 * handlers.
1398bafe5a31SPaolo Pisati 			 */
1399bafe5a31SPaolo Pisati 			atomic_store_rel_int(&ithd->it_need, 0);
1400bafe5a31SPaolo Pisati 			if (priv)
1401bafe5a31SPaolo Pisati 				priv_ithread_execute_handler(p, ih);
1402bafe5a31SPaolo Pisati 			else
1403bafe5a31SPaolo Pisati 				ithread_execute_handlers(p, ie);
1404bafe5a31SPaolo Pisati 		}
1405bafe5a31SPaolo Pisati 		WITNESS_WARN(WARN_PANIC, NULL, "suspending ithread");
1406bafe5a31SPaolo Pisati 		mtx_assert(&Giant, MA_NOTOWNED);
1407bafe5a31SPaolo Pisati 
1408bafe5a31SPaolo Pisati 		/*
1409bafe5a31SPaolo Pisati 		 * Processed all our interrupts.  Now get the sched
1410bafe5a31SPaolo Pisati 		 * lock.  This may take a while and it_need may get
1411bafe5a31SPaolo Pisati 		 * set again, so we have to check it again.
1412bafe5a31SPaolo Pisati 		 */
1413982d11f8SJeff Roberson 		thread_lock(td);
1414bafe5a31SPaolo Pisati 		if (!ithd->it_need && !(ithd->it_flags & IT_DEAD)) {
1415bafe5a31SPaolo Pisati 			TD_SET_IWAIT(td);
1416bafe5a31SPaolo Pisati 			ie->ie_count = 0;
14178df78c41SJeff Roberson 			mi_switch(SW_VOL | SWT_IWAIT, NULL);
1418bafe5a31SPaolo Pisati 		}
1419982d11f8SJeff Roberson 		thread_unlock(td);
1420bafe5a31SPaolo Pisati 	}
1421bafe5a31SPaolo Pisati }
1422bafe5a31SPaolo Pisati 
1423bafe5a31SPaolo Pisati /*
1424bafe5a31SPaolo Pisati  * Main loop for interrupt filter.
1425bafe5a31SPaolo Pisati  *
1426bafe5a31SPaolo Pisati  * Some architectures (i386, amd64 and arm) require the optional frame
1427bafe5a31SPaolo Pisati  * parameter, and use it as the main argument for fast handler execution
1428bafe5a31SPaolo Pisati  * when ih_argument == NULL.
1429bafe5a31SPaolo Pisati  *
1430bafe5a31SPaolo Pisati  * Return value:
1431bafe5a31SPaolo Pisati  * o FILTER_STRAY:              No filter recognized the event, and no
1432bafe5a31SPaolo Pisati  *                              filter-less handler is registered on this
1433bafe5a31SPaolo Pisati  *                              line.
1434bafe5a31SPaolo Pisati  * o FILTER_HANDLED:            A filter claimed the event and served it.
1435bafe5a31SPaolo Pisati  * o FILTER_SCHEDULE_THREAD:    No filter claimed the event, but there's at
1436bafe5a31SPaolo Pisati  *                              least one filter-less handler on this line.
1437bafe5a31SPaolo Pisati  * o FILTER_HANDLED |
1438bafe5a31SPaolo Pisati  *   FILTER_SCHEDULE_THREAD:    A filter claimed the event, and asked for
1439bafe5a31SPaolo Pisati  *                              scheduling the per-handler ithread.
1440bafe5a31SPaolo Pisati  *
1441bafe5a31SPaolo Pisati  * In case an ithread has to be scheduled, in *ithd there will be a
1442bafe5a31SPaolo Pisati  * pointer to a struct intr_thread containing the thread to be
1443bafe5a31SPaolo Pisati  * scheduled.
1444bafe5a31SPaolo Pisati  */
1445bafe5a31SPaolo Pisati 
14461ee1b687SJohn Baldwin static int
1447bafe5a31SPaolo Pisati intr_filter_loop(struct intr_event *ie, struct trapframe *frame,
1448bafe5a31SPaolo Pisati 		 struct intr_thread **ithd)
1449bafe5a31SPaolo Pisati {
1450bafe5a31SPaolo Pisati 	struct intr_handler *ih;
1451bafe5a31SPaolo Pisati 	void *arg;
1452bafe5a31SPaolo Pisati 	int ret, thread_only;
1453bafe5a31SPaolo Pisati 
1454bafe5a31SPaolo Pisati 	ret = 0;
1455bafe5a31SPaolo Pisati 	thread_only = 0;
1456bafe5a31SPaolo Pisati 	TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) {
1457bafe5a31SPaolo Pisati 		/*
1458bafe5a31SPaolo Pisati 		 * Execute fast interrupt handlers directly.
1459bafe5a31SPaolo Pisati 		 * To support clock handlers, if a handler registers
1460bafe5a31SPaolo Pisati 		 * with a NULL argument, then we pass it a pointer to
1461bafe5a31SPaolo Pisati 		 * a trapframe as its argument.
1462bafe5a31SPaolo Pisati 		 */
1463bafe5a31SPaolo Pisati 		arg = ((ih->ih_argument == NULL) ? frame : ih->ih_argument);
1464bafe5a31SPaolo Pisati 
1465bafe5a31SPaolo Pisati 		CTR5(KTR_INTR, "%s: exec %p/%p(%p) for %s", __func__,
1466bafe5a31SPaolo Pisati 		     ih->ih_filter, ih->ih_handler, arg, ih->ih_name);
1467bafe5a31SPaolo Pisati 
1468bafe5a31SPaolo Pisati 		if (ih->ih_filter != NULL)
1469bafe5a31SPaolo Pisati 			ret = ih->ih_filter(arg);
1470bafe5a31SPaolo Pisati 		else {
1471bafe5a31SPaolo Pisati 			thread_only = 1;
1472bafe5a31SPaolo Pisati 			continue;
1473bafe5a31SPaolo Pisati 		}
1474bafe5a31SPaolo Pisati 
1475bafe5a31SPaolo Pisati 		if (ret & FILTER_STRAY)
1476bafe5a31SPaolo Pisati 			continue;
1477bafe5a31SPaolo Pisati 		else {
1478bafe5a31SPaolo Pisati 			*ithd = ih->ih_thread;
1479bafe5a31SPaolo Pisati 			return (ret);
1480bafe5a31SPaolo Pisati 		}
1481bafe5a31SPaolo Pisati 	}
1482bafe5a31SPaolo Pisati 
1483bafe5a31SPaolo Pisati 	/*
1484bafe5a31SPaolo Pisati 	 * No filters handled the interrupt and we have at least
1485bafe5a31SPaolo Pisati 	 * one handler without a filter.  In this case, we schedule
1486bafe5a31SPaolo Pisati 	 * all of the filter-less handlers to run in the ithread.
1487bafe5a31SPaolo Pisati 	 */
1488bafe5a31SPaolo Pisati 	if (thread_only) {
1489bafe5a31SPaolo Pisati 		*ithd = ie->ie_thread;
1490bafe5a31SPaolo Pisati 		return (FILTER_SCHEDULE_THREAD);
1491bafe5a31SPaolo Pisati 	}
1492bafe5a31SPaolo Pisati 	return (FILTER_STRAY);
1493bafe5a31SPaolo Pisati }
1494bafe5a31SPaolo Pisati 
1495bafe5a31SPaolo Pisati /*
1496bafe5a31SPaolo Pisati  * Main interrupt handling body.
1497bafe5a31SPaolo Pisati  *
1498bafe5a31SPaolo Pisati  * Input:
1499bafe5a31SPaolo Pisati  * o ie:                        the event connected to this interrupt.
1500bafe5a31SPaolo Pisati  * o frame:                     some archs (i.e. i386) pass a frame to some.
1501bafe5a31SPaolo Pisati  *                              handlers as their main argument.
1502bafe5a31SPaolo Pisati  * Return value:
1503bafe5a31SPaolo Pisati  * o 0:                         everything ok.
1504bafe5a31SPaolo Pisati  * o EINVAL:                    stray interrupt.
1505bafe5a31SPaolo Pisati  */
1506bafe5a31SPaolo Pisati int
1507bafe5a31SPaolo Pisati intr_event_handle(struct intr_event *ie, struct trapframe *frame)
1508bafe5a31SPaolo Pisati {
1509bafe5a31SPaolo Pisati 	struct intr_thread *ithd;
1510bafe5a31SPaolo Pisati 	struct thread *td;
1511bafe5a31SPaolo Pisati 	int thread;
1512bafe5a31SPaolo Pisati 
1513bafe5a31SPaolo Pisati 	ithd = NULL;
1514bafe5a31SPaolo Pisati 	td = curthread;
1515bafe5a31SPaolo Pisati 
1516bafe5a31SPaolo Pisati 	if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers))
1517bafe5a31SPaolo Pisati 		return (EINVAL);
1518bafe5a31SPaolo Pisati 
1519bafe5a31SPaolo Pisati 	td->td_intr_nesting_level++;
1520bafe5a31SPaolo Pisati 	thread = 0;
1521bafe5a31SPaolo Pisati 	critical_enter();
1522bafe5a31SPaolo Pisati 	thread = intr_filter_loop(ie, frame, &ithd);
1523bafe5a31SPaolo Pisati 	if (thread & FILTER_HANDLED) {
15241ee1b687SJohn Baldwin 		if (ie->ie_post_filter != NULL)
15251ee1b687SJohn Baldwin 			ie->ie_post_filter(ie->ie_source);
1526bafe5a31SPaolo Pisati 	} else {
15271ee1b687SJohn Baldwin 		if (ie->ie_pre_ithread != NULL)
15281ee1b687SJohn Baldwin 			ie->ie_pre_ithread(ie->ie_source);
1529bafe5a31SPaolo Pisati 	}
1530bafe5a31SPaolo Pisati 	critical_exit();
1531bafe5a31SPaolo Pisati 
1532bafe5a31SPaolo Pisati 	/* Interrupt storm logic */
1533bafe5a31SPaolo Pisati 	if (thread & FILTER_STRAY) {
1534bafe5a31SPaolo Pisati 		ie->ie_count++;
1535bafe5a31SPaolo Pisati 		if (ie->ie_count < intr_storm_threshold)
1536bafe5a31SPaolo Pisati 			printf("Interrupt stray detection not present\n");
1537bafe5a31SPaolo Pisati 	}
1538bafe5a31SPaolo Pisati 
1539bafe5a31SPaolo Pisati 	/* Schedule an ithread if needed. */
1540bafe5a31SPaolo Pisati 	if (thread & FILTER_SCHEDULE_THREAD) {
1541bafe5a31SPaolo Pisati 		if (intr_event_schedule_thread(ie, ithd) != 0)
1542bafe5a31SPaolo Pisati 			panic("%s: impossible stray interrupt", __func__);
1543bafe5a31SPaolo Pisati 	}
1544bafe5a31SPaolo Pisati 	td->td_intr_nesting_level--;
1545bafe5a31SPaolo Pisati 	return (0);
1546bafe5a31SPaolo Pisati }
1547bafe5a31SPaolo Pisati #endif
15481931cf94SJohn Baldwin 
15498b201c42SJohn Baldwin #ifdef DDB
15508b201c42SJohn Baldwin /*
15518b201c42SJohn Baldwin  * Dump details about an interrupt handler
15528b201c42SJohn Baldwin  */
15538b201c42SJohn Baldwin static void
1554e0f66ef8SJohn Baldwin db_dump_intrhand(struct intr_handler *ih)
15558b201c42SJohn Baldwin {
15568b201c42SJohn Baldwin 	int comma;
15578b201c42SJohn Baldwin 
15588b201c42SJohn Baldwin 	db_printf("\t%-10s ", ih->ih_name);
15598b201c42SJohn Baldwin 	switch (ih->ih_pri) {
15608b201c42SJohn Baldwin 	case PI_REALTIME:
15618b201c42SJohn Baldwin 		db_printf("CLK ");
15628b201c42SJohn Baldwin 		break;
15638b201c42SJohn Baldwin 	case PI_AV:
15648b201c42SJohn Baldwin 		db_printf("AV  ");
15658b201c42SJohn Baldwin 		break;
15668b201c42SJohn Baldwin 	case PI_TTYHIGH:
15678b201c42SJohn Baldwin 	case PI_TTYLOW:
15688b201c42SJohn Baldwin 		db_printf("TTY ");
15698b201c42SJohn Baldwin 		break;
15708b201c42SJohn Baldwin 	case PI_TAPE:
15718b201c42SJohn Baldwin 		db_printf("TAPE");
15728b201c42SJohn Baldwin 		break;
15738b201c42SJohn Baldwin 	case PI_NET:
15748b201c42SJohn Baldwin 		db_printf("NET ");
15758b201c42SJohn Baldwin 		break;
15768b201c42SJohn Baldwin 	case PI_DISK:
15778b201c42SJohn Baldwin 	case PI_DISKLOW:
15788b201c42SJohn Baldwin 		db_printf("DISK");
15798b201c42SJohn Baldwin 		break;
15808b201c42SJohn Baldwin 	case PI_DULL:
15818b201c42SJohn Baldwin 		db_printf("DULL");
15828b201c42SJohn Baldwin 		break;
15838b201c42SJohn Baldwin 	default:
15848b201c42SJohn Baldwin 		if (ih->ih_pri >= PI_SOFT)
15858b201c42SJohn Baldwin 			db_printf("SWI ");
15868b201c42SJohn Baldwin 		else
15878b201c42SJohn Baldwin 			db_printf("%4u", ih->ih_pri);
15888b201c42SJohn Baldwin 		break;
15898b201c42SJohn Baldwin 	}
15908b201c42SJohn Baldwin 	db_printf(" ");
15918b201c42SJohn Baldwin 	db_printsym((uintptr_t)ih->ih_handler, DB_STGY_PROC);
15928b201c42SJohn Baldwin 	db_printf("(%p)", ih->ih_argument);
15938b201c42SJohn Baldwin 	if (ih->ih_need ||
1594ef544f63SPaolo Pisati 	    (ih->ih_flags & (IH_EXCLUSIVE | IH_ENTROPY | IH_DEAD |
15958b201c42SJohn Baldwin 	    IH_MPSAFE)) != 0) {
15968b201c42SJohn Baldwin 		db_printf(" {");
15978b201c42SJohn Baldwin 		comma = 0;
15988b201c42SJohn Baldwin 		if (ih->ih_flags & IH_EXCLUSIVE) {
15998b201c42SJohn Baldwin 			if (comma)
16008b201c42SJohn Baldwin 				db_printf(", ");
16018b201c42SJohn Baldwin 			db_printf("EXCL");
16028b201c42SJohn Baldwin 			comma = 1;
16038b201c42SJohn Baldwin 		}
16048b201c42SJohn Baldwin 		if (ih->ih_flags & IH_ENTROPY) {
16058b201c42SJohn Baldwin 			if (comma)
16068b201c42SJohn Baldwin 				db_printf(", ");
16078b201c42SJohn Baldwin 			db_printf("ENTROPY");
16088b201c42SJohn Baldwin 			comma = 1;
16098b201c42SJohn Baldwin 		}
16108b201c42SJohn Baldwin 		if (ih->ih_flags & IH_DEAD) {
16118b201c42SJohn Baldwin 			if (comma)
16128b201c42SJohn Baldwin 				db_printf(", ");
16138b201c42SJohn Baldwin 			db_printf("DEAD");
16148b201c42SJohn Baldwin 			comma = 1;
16158b201c42SJohn Baldwin 		}
16168b201c42SJohn Baldwin 		if (ih->ih_flags & IH_MPSAFE) {
16178b201c42SJohn Baldwin 			if (comma)
16188b201c42SJohn Baldwin 				db_printf(", ");
16198b201c42SJohn Baldwin 			db_printf("MPSAFE");
16208b201c42SJohn Baldwin 			comma = 1;
16218b201c42SJohn Baldwin 		}
16228b201c42SJohn Baldwin 		if (ih->ih_need) {
16238b201c42SJohn Baldwin 			if (comma)
16248b201c42SJohn Baldwin 				db_printf(", ");
16258b201c42SJohn Baldwin 			db_printf("NEED");
16268b201c42SJohn Baldwin 		}
16278b201c42SJohn Baldwin 		db_printf("}");
16288b201c42SJohn Baldwin 	}
16298b201c42SJohn Baldwin 	db_printf("\n");
16308b201c42SJohn Baldwin }
16318b201c42SJohn Baldwin 
16328b201c42SJohn Baldwin /*
1633e0f66ef8SJohn Baldwin  * Dump details about a event.
16348b201c42SJohn Baldwin  */
16358b201c42SJohn Baldwin void
1636e0f66ef8SJohn Baldwin db_dump_intr_event(struct intr_event *ie, int handlers)
16378b201c42SJohn Baldwin {
1638e0f66ef8SJohn Baldwin 	struct intr_handler *ih;
1639e0f66ef8SJohn Baldwin 	struct intr_thread *it;
16408b201c42SJohn Baldwin 	int comma;
16418b201c42SJohn Baldwin 
1642e0f66ef8SJohn Baldwin 	db_printf("%s ", ie->ie_fullname);
1643e0f66ef8SJohn Baldwin 	it = ie->ie_thread;
1644e0f66ef8SJohn Baldwin 	if (it != NULL)
1645e0f66ef8SJohn Baldwin 		db_printf("(pid %d)", it->it_thread->td_proc->p_pid);
1646e0f66ef8SJohn Baldwin 	else
1647e0f66ef8SJohn Baldwin 		db_printf("(no thread)");
1648e0f66ef8SJohn Baldwin 	if ((ie->ie_flags & (IE_SOFT | IE_ENTROPY | IE_ADDING_THREAD)) != 0 ||
1649e0f66ef8SJohn Baldwin 	    (it != NULL && it->it_need)) {
16508b201c42SJohn Baldwin 		db_printf(" {");
16518b201c42SJohn Baldwin 		comma = 0;
1652e0f66ef8SJohn Baldwin 		if (ie->ie_flags & IE_SOFT) {
16538b201c42SJohn Baldwin 			db_printf("SOFT");
16548b201c42SJohn Baldwin 			comma = 1;
16558b201c42SJohn Baldwin 		}
1656e0f66ef8SJohn Baldwin 		if (ie->ie_flags & IE_ENTROPY) {
16578b201c42SJohn Baldwin 			if (comma)
16588b201c42SJohn Baldwin 				db_printf(", ");
16598b201c42SJohn Baldwin 			db_printf("ENTROPY");
16608b201c42SJohn Baldwin 			comma = 1;
16618b201c42SJohn Baldwin 		}
1662e0f66ef8SJohn Baldwin 		if (ie->ie_flags & IE_ADDING_THREAD) {
16638b201c42SJohn Baldwin 			if (comma)
16648b201c42SJohn Baldwin 				db_printf(", ");
1665e0f66ef8SJohn Baldwin 			db_printf("ADDING_THREAD");
16668b201c42SJohn Baldwin 			comma = 1;
16678b201c42SJohn Baldwin 		}
1668e0f66ef8SJohn Baldwin 		if (it != NULL && it->it_need) {
16698b201c42SJohn Baldwin 			if (comma)
16708b201c42SJohn Baldwin 				db_printf(", ");
16718b201c42SJohn Baldwin 			db_printf("NEED");
16728b201c42SJohn Baldwin 		}
16738b201c42SJohn Baldwin 		db_printf("}");
16748b201c42SJohn Baldwin 	}
16758b201c42SJohn Baldwin 	db_printf("\n");
16768b201c42SJohn Baldwin 
16778b201c42SJohn Baldwin 	if (handlers)
1678e0f66ef8SJohn Baldwin 		TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next)
16798b201c42SJohn Baldwin 		    db_dump_intrhand(ih);
16808b201c42SJohn Baldwin }
1681e0f66ef8SJohn Baldwin 
1682e0f66ef8SJohn Baldwin /*
1683e0f66ef8SJohn Baldwin  * Dump data about interrupt handlers
1684e0f66ef8SJohn Baldwin  */
1685e0f66ef8SJohn Baldwin DB_SHOW_COMMAND(intr, db_show_intr)
1686e0f66ef8SJohn Baldwin {
1687e0f66ef8SJohn Baldwin 	struct intr_event *ie;
168819e9205aSJohn Baldwin 	int all, verbose;
1689e0f66ef8SJohn Baldwin 
1690e0f66ef8SJohn Baldwin 	verbose = index(modif, 'v') != NULL;
1691e0f66ef8SJohn Baldwin 	all = index(modif, 'a') != NULL;
1692e0f66ef8SJohn Baldwin 	TAILQ_FOREACH(ie, &event_list, ie_list) {
1693e0f66ef8SJohn Baldwin 		if (!all && TAILQ_EMPTY(&ie->ie_handlers))
1694e0f66ef8SJohn Baldwin 			continue;
1695e0f66ef8SJohn Baldwin 		db_dump_intr_event(ie, verbose);
169619e9205aSJohn Baldwin 		if (db_pager_quit)
169719e9205aSJohn Baldwin 			break;
1698e0f66ef8SJohn Baldwin 	}
1699e0f66ef8SJohn Baldwin }
17008b201c42SJohn Baldwin #endif /* DDB */
17018b201c42SJohn Baldwin 
1702b4151f71SJohn Baldwin /*
17038088699fSJohn Baldwin  * Start standard software interrupt threads
17041931cf94SJohn Baldwin  */
17051931cf94SJohn Baldwin static void
1706b4151f71SJohn Baldwin start_softintr(void *dummy)
17071931cf94SJohn Baldwin {
1708b4151f71SJohn Baldwin 
17098d809d50SJeff Roberson 	if (swi_add(NULL, "vm", swi_vm, NULL, SWI_VM, INTR_MPSAFE, &vm_ih))
17108d809d50SJeff Roberson 		panic("died while creating vm swi ithread");
17111931cf94SJohn Baldwin }
1712237fdd78SRobert Watson SYSINIT(start_softintr, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softintr,
1713237fdd78SRobert Watson     NULL);
17141931cf94SJohn Baldwin 
1715d279178dSThomas Moestl /*
1716d279178dSThomas Moestl  * Sysctls used by systat and others: hw.intrnames and hw.intrcnt.
1717d279178dSThomas Moestl  * The data for this machine dependent, and the declarations are in machine
1718d279178dSThomas Moestl  * dependent code.  The layout of intrnames and intrcnt however is machine
1719d279178dSThomas Moestl  * independent.
1720d279178dSThomas Moestl  *
1721d279178dSThomas Moestl  * We do not know the length of intrcnt and intrnames at compile time, so
1722d279178dSThomas Moestl  * calculate things at run time.
1723d279178dSThomas Moestl  */
1724d279178dSThomas Moestl static int
1725d279178dSThomas Moestl sysctl_intrnames(SYSCTL_HANDLER_ARGS)
1726d279178dSThomas Moestl {
1727d279178dSThomas Moestl 	return (sysctl_handle_opaque(oidp, intrnames, eintrnames - intrnames,
1728d279178dSThomas Moestl 	   req));
1729d279178dSThomas Moestl }
1730d279178dSThomas Moestl 
1731d279178dSThomas Moestl SYSCTL_PROC(_hw, OID_AUTO, intrnames, CTLTYPE_OPAQUE | CTLFLAG_RD,
1732d279178dSThomas Moestl     NULL, 0, sysctl_intrnames, "", "Interrupt Names");
1733d279178dSThomas Moestl 
1734d279178dSThomas Moestl static int
1735d279178dSThomas Moestl sysctl_intrcnt(SYSCTL_HANDLER_ARGS)
1736d279178dSThomas Moestl {
1737d279178dSThomas Moestl 	return (sysctl_handle_opaque(oidp, intrcnt,
1738d279178dSThomas Moestl 	    (char *)eintrcnt - (char *)intrcnt, req));
1739d279178dSThomas Moestl }
1740d279178dSThomas Moestl 
1741d279178dSThomas Moestl SYSCTL_PROC(_hw, OID_AUTO, intrcnt, CTLTYPE_OPAQUE | CTLFLAG_RD,
1742d279178dSThomas Moestl     NULL, 0, sysctl_intrcnt, "", "Interrupt Counts");
17438b201c42SJohn Baldwin 
17448b201c42SJohn Baldwin #ifdef DDB
17458b201c42SJohn Baldwin /*
17468b201c42SJohn Baldwin  * DDB command to dump the interrupt statistics.
17478b201c42SJohn Baldwin  */
17488b201c42SJohn Baldwin DB_SHOW_COMMAND(intrcnt, db_show_intrcnt)
17498b201c42SJohn Baldwin {
17508b201c42SJohn Baldwin 	u_long *i;
17518b201c42SJohn Baldwin 	char *cp;
17528b201c42SJohn Baldwin 
17538b201c42SJohn Baldwin 	cp = intrnames;
175419e9205aSJohn Baldwin 	for (i = intrcnt; i != eintrcnt && !db_pager_quit; i++) {
17558b201c42SJohn Baldwin 		if (*cp == '\0')
17568b201c42SJohn Baldwin 			break;
17578b201c42SJohn Baldwin 		if (*i != 0)
17588b201c42SJohn Baldwin 			db_printf("%s\t%lu\n", cp, *i);
17598b201c42SJohn Baldwin 		cp += strlen(cp) + 1;
17608b201c42SJohn Baldwin 	}
17618b201c42SJohn Baldwin }
17628b201c42SJohn Baldwin #endif
1763