xref: /freebsd/sys/kern/kern_intr.c (revision e84bcd849429ed2719d1edc500c38acf7df70699)
19454b2d8SWarner Losh /*-
2425f9fdaSStefan Eßer  * Copyright (c) 1997, Stefan Esser <se@freebsd.org>
3425f9fdaSStefan Eßer  * All rights reserved.
4425f9fdaSStefan Eßer  *
5425f9fdaSStefan Eßer  * Redistribution and use in source and binary forms, with or without
6425f9fdaSStefan Eßer  * modification, are permitted provided that the following conditions
7425f9fdaSStefan Eßer  * are met:
8425f9fdaSStefan Eßer  * 1. Redistributions of source code must retain the above copyright
9425f9fdaSStefan Eßer  *    notice unmodified, this list of conditions, and the following
10425f9fdaSStefan Eßer  *    disclaimer.
11425f9fdaSStefan Eßer  * 2. Redistributions in binary form must reproduce the above copyright
12425f9fdaSStefan Eßer  *    notice, this list of conditions and the following disclaimer in the
13425f9fdaSStefan Eßer  *    documentation and/or other materials provided with the distribution.
14425f9fdaSStefan Eßer  *
15425f9fdaSStefan Eßer  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16425f9fdaSStefan Eßer  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17425f9fdaSStefan Eßer  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18425f9fdaSStefan Eßer  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19425f9fdaSStefan Eßer  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20425f9fdaSStefan Eßer  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21425f9fdaSStefan Eßer  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22425f9fdaSStefan Eßer  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23425f9fdaSStefan Eßer  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24425f9fdaSStefan Eßer  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25425f9fdaSStefan Eßer  */
26425f9fdaSStefan Eßer 
27677b542eSDavid E. O'Brien #include <sys/cdefs.h>
28677b542eSDavid E. O'Brien __FBSDID("$FreeBSD$");
293900ddb2SDoug Rabson 
308b201c42SJohn Baldwin #include "opt_ddb.h"
318b201c42SJohn Baldwin 
321c5bb3eaSPeter Wemm #include <sys/param.h>
339a94c9c5SJohn Baldwin #include <sys/bus.h>
34c11110eaSAlfred Perlstein #include <sys/conf.h>
359b33b154SJeff Roberson #include <sys/cpuset.h>
369a94c9c5SJohn Baldwin #include <sys/rtprio.h>
37425f9fdaSStefan Eßer #include <sys/systm.h>
3868352337SDoug Rabson #include <sys/interrupt.h>
391931cf94SJohn Baldwin #include <sys/kernel.h>
401931cf94SJohn Baldwin #include <sys/kthread.h>
411931cf94SJohn Baldwin #include <sys/ktr.h>
4205b2c96fSBruce Evans #include <sys/limits.h>
43f34fa851SJohn Baldwin #include <sys/lock.h>
441931cf94SJohn Baldwin #include <sys/malloc.h>
4535e0e5b3SJohn Baldwin #include <sys/mutex.h>
461931cf94SJohn Baldwin #include <sys/proc.h>
473e5da754SJohn Baldwin #include <sys/random.h>
48b4151f71SJohn Baldwin #include <sys/resourcevar.h>
4963710c4dSJohn Baldwin #include <sys/sched.h>
50eaf86d16SJohn Baldwin #include <sys/smp.h>
51d279178dSThomas Moestl #include <sys/sysctl.h>
526205924aSKip Macy #include <sys/syslog.h>
531931cf94SJohn Baldwin #include <sys/unistd.h>
541931cf94SJohn Baldwin #include <sys/vmmeter.h>
551931cf94SJohn Baldwin #include <machine/atomic.h>
561931cf94SJohn Baldwin #include <machine/cpu.h>
578088699fSJohn Baldwin #include <machine/md_var.h>
58b4151f71SJohn Baldwin #include <machine/stdarg.h>
598b201c42SJohn Baldwin #ifdef DDB
608b201c42SJohn Baldwin #include <ddb/ddb.h>
618b201c42SJohn Baldwin #include <ddb/db_sym.h>
628b201c42SJohn Baldwin #endif
63425f9fdaSStefan Eßer 
64e0f66ef8SJohn Baldwin /*
65e0f66ef8SJohn Baldwin  * Describe an interrupt thread.  There is one of these per interrupt event.
66e0f66ef8SJohn Baldwin  */
67e0f66ef8SJohn Baldwin struct intr_thread {
68e0f66ef8SJohn Baldwin 	struct intr_event *it_event;
69e0f66ef8SJohn Baldwin 	struct thread *it_thread;	/* Kernel thread. */
70e0f66ef8SJohn Baldwin 	int	it_flags;		/* (j) IT_* flags. */
71e0f66ef8SJohn Baldwin 	int	it_need;		/* Needs service. */
723e5da754SJohn Baldwin };
733e5da754SJohn Baldwin 
74e0f66ef8SJohn Baldwin /* Interrupt thread flags kept in it_flags */
75e0f66ef8SJohn Baldwin #define	IT_DEAD		0x000001	/* Thread is waiting to exit. */
76e0f66ef8SJohn Baldwin 
77e0f66ef8SJohn Baldwin struct	intr_entropy {
78e0f66ef8SJohn Baldwin 	struct	thread *td;
79e0f66ef8SJohn Baldwin 	uintptr_t event;
80e0f66ef8SJohn Baldwin };
81e0f66ef8SJohn Baldwin 
82e0f66ef8SJohn Baldwin struct	intr_event *clk_intr_event;
83e0f66ef8SJohn Baldwin struct	intr_event *tty_intr_event;
847b1fe905SBruce Evans void	*vm_ih;
857ab24ea3SJulian Elischer struct proc *intrproc;
861931cf94SJohn Baldwin 
87b4151f71SJohn Baldwin static MALLOC_DEFINE(M_ITHREAD, "ithread", "Interrupt Threads");
88b4151f71SJohn Baldwin 
890ae62c18SNate Lawson static int intr_storm_threshold = 1000;
907870c3c6SJohn Baldwin TUNABLE_INT("hw.intr_storm_threshold", &intr_storm_threshold);
917870c3c6SJohn Baldwin SYSCTL_INT(_hw, OID_AUTO, intr_storm_threshold, CTLFLAG_RW,
927870c3c6SJohn Baldwin     &intr_storm_threshold, 0,
937b1fe905SBruce Evans     "Number of consecutive interrupts before storm protection is enabled");
94e0f66ef8SJohn Baldwin static TAILQ_HEAD(, intr_event) event_list =
95e0f66ef8SJohn Baldwin     TAILQ_HEAD_INITIALIZER(event_list);
969b33b154SJeff Roberson static struct mtx event_lock;
979b33b154SJeff Roberson MTX_SYSINIT(intr_event_list, &event_lock, "intr event list", MTX_DEF);
987b1fe905SBruce Evans 
99e0f66ef8SJohn Baldwin static void	intr_event_update(struct intr_event *ie);
100bafe5a31SPaolo Pisati #ifdef INTR_FILTER
1011ee1b687SJohn Baldwin static int	intr_event_schedule_thread(struct intr_event *ie,
1021ee1b687SJohn Baldwin 		    struct intr_thread *ithd);
1031ee1b687SJohn Baldwin static int	intr_filter_loop(struct intr_event *ie,
1041ee1b687SJohn Baldwin 		    struct trapframe *frame, struct intr_thread **ithd);
105bafe5a31SPaolo Pisati static struct intr_thread *ithread_create(const char *name,
106bafe5a31SPaolo Pisati 			      struct intr_handler *ih);
107bafe5a31SPaolo Pisati #else
1081ee1b687SJohn Baldwin static int	intr_event_schedule_thread(struct intr_event *ie);
109e0f66ef8SJohn Baldwin static struct intr_thread *ithread_create(const char *name);
110bafe5a31SPaolo Pisati #endif
111e0f66ef8SJohn Baldwin static void	ithread_destroy(struct intr_thread *ithread);
112bafe5a31SPaolo Pisati static void	ithread_execute_handlers(struct proc *p,
113bafe5a31SPaolo Pisati 		    struct intr_event *ie);
114bafe5a31SPaolo Pisati #ifdef INTR_FILTER
115bafe5a31SPaolo Pisati static void	priv_ithread_execute_handler(struct proc *p,
116bafe5a31SPaolo Pisati 		    struct intr_handler *ih);
117bafe5a31SPaolo Pisati #endif
1187b1fe905SBruce Evans static void	ithread_loop(void *);
119e0f66ef8SJohn Baldwin static void	ithread_update(struct intr_thread *ithd);
1207b1fe905SBruce Evans static void	start_softintr(void *);
1217870c3c6SJohn Baldwin 
122bc17acb2SJohn Baldwin /* Map an interrupt type to an ithread priority. */
123b4151f71SJohn Baldwin u_char
124e0f66ef8SJohn Baldwin intr_priority(enum intr_type flags)
1259a94c9c5SJohn Baldwin {
126b4151f71SJohn Baldwin 	u_char pri;
1279a94c9c5SJohn Baldwin 
128b4151f71SJohn Baldwin 	flags &= (INTR_TYPE_TTY | INTR_TYPE_BIO | INTR_TYPE_NET |
1295a280d9cSPeter Wemm 	    INTR_TYPE_CAM | INTR_TYPE_MISC | INTR_TYPE_CLK | INTR_TYPE_AV);
1309a94c9c5SJohn Baldwin 	switch (flags) {
131b4151f71SJohn Baldwin 	case INTR_TYPE_TTY:
1329a94c9c5SJohn Baldwin 		pri = PI_TTYLOW;
1339a94c9c5SJohn Baldwin 		break;
1349a94c9c5SJohn Baldwin 	case INTR_TYPE_BIO:
1359a94c9c5SJohn Baldwin 		/*
1369a94c9c5SJohn Baldwin 		 * XXX We need to refine this.  BSD/OS distinguishes
1379a94c9c5SJohn Baldwin 		 * between tape and disk priorities.
1389a94c9c5SJohn Baldwin 		 */
1399a94c9c5SJohn Baldwin 		pri = PI_DISK;
1409a94c9c5SJohn Baldwin 		break;
1419a94c9c5SJohn Baldwin 	case INTR_TYPE_NET:
1429a94c9c5SJohn Baldwin 		pri = PI_NET;
1439a94c9c5SJohn Baldwin 		break;
1449a94c9c5SJohn Baldwin 	case INTR_TYPE_CAM:
1459a94c9c5SJohn Baldwin 		pri = PI_DISK;          /* XXX or PI_CAM? */
1469a94c9c5SJohn Baldwin 		break;
1475a280d9cSPeter Wemm 	case INTR_TYPE_AV:		/* Audio/video */
1485a280d9cSPeter Wemm 		pri = PI_AV;
1495a280d9cSPeter Wemm 		break;
150b4151f71SJohn Baldwin 	case INTR_TYPE_CLK:
151b4151f71SJohn Baldwin 		pri = PI_REALTIME;
152b4151f71SJohn Baldwin 		break;
1539a94c9c5SJohn Baldwin 	case INTR_TYPE_MISC:
1549a94c9c5SJohn Baldwin 		pri = PI_DULL;          /* don't care */
1559a94c9c5SJohn Baldwin 		break;
1569a94c9c5SJohn Baldwin 	default:
157b4151f71SJohn Baldwin 		/* We didn't specify an interrupt level. */
158e0f66ef8SJohn Baldwin 		panic("intr_priority: no interrupt type in flags");
1599a94c9c5SJohn Baldwin 	}
1609a94c9c5SJohn Baldwin 
1619a94c9c5SJohn Baldwin 	return pri;
1629a94c9c5SJohn Baldwin }
1639a94c9c5SJohn Baldwin 
164b4151f71SJohn Baldwin /*
165e0f66ef8SJohn Baldwin  * Update an ithread based on the associated intr_event.
166b4151f71SJohn Baldwin  */
167b4151f71SJohn Baldwin static void
168e0f66ef8SJohn Baldwin ithread_update(struct intr_thread *ithd)
169b4151f71SJohn Baldwin {
170e0f66ef8SJohn Baldwin 	struct intr_event *ie;
171b40ce416SJulian Elischer 	struct thread *td;
172e0f66ef8SJohn Baldwin 	u_char pri;
1738088699fSJohn Baldwin 
174e0f66ef8SJohn Baldwin 	ie = ithd->it_event;
175e0f66ef8SJohn Baldwin 	td = ithd->it_thread;
176b4151f71SJohn Baldwin 
177e0f66ef8SJohn Baldwin 	/* Determine the overall priority of this event. */
178e0f66ef8SJohn Baldwin 	if (TAILQ_EMPTY(&ie->ie_handlers))
179e0f66ef8SJohn Baldwin 		pri = PRI_MAX_ITHD;
180e0f66ef8SJohn Baldwin 	else
181e0f66ef8SJohn Baldwin 		pri = TAILQ_FIRST(&ie->ie_handlers)->ih_pri;
182e80fb434SRobert Drehmel 
183e0f66ef8SJohn Baldwin 	/* Update name and priority. */
1847ab24ea3SJulian Elischer 	strlcpy(td->td_name, ie->ie_fullname, sizeof(td->td_name));
185982d11f8SJeff Roberson 	thread_lock(td);
186e0f66ef8SJohn Baldwin 	sched_prio(td, pri);
187982d11f8SJeff Roberson 	thread_unlock(td);
188b4151f71SJohn Baldwin }
189e0f66ef8SJohn Baldwin 
190e0f66ef8SJohn Baldwin /*
191e0f66ef8SJohn Baldwin  * Regenerate the full name of an interrupt event and update its priority.
192e0f66ef8SJohn Baldwin  */
193e0f66ef8SJohn Baldwin static void
194e0f66ef8SJohn Baldwin intr_event_update(struct intr_event *ie)
195e0f66ef8SJohn Baldwin {
196e0f66ef8SJohn Baldwin 	struct intr_handler *ih;
197e0f66ef8SJohn Baldwin 	char *last;
198e0f66ef8SJohn Baldwin 	int missed, space;
199e0f66ef8SJohn Baldwin 
200e0f66ef8SJohn Baldwin 	/* Start off with no entropy and just the name of the event. */
201e0f66ef8SJohn Baldwin 	mtx_assert(&ie->ie_lock, MA_OWNED);
202e0f66ef8SJohn Baldwin 	strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname));
203e0f66ef8SJohn Baldwin 	ie->ie_flags &= ~IE_ENTROPY;
2040811d60aSJohn Baldwin 	missed = 0;
205e0f66ef8SJohn Baldwin 	space = 1;
206e0f66ef8SJohn Baldwin 
207e0f66ef8SJohn Baldwin 	/* Run through all the handlers updating values. */
208e0f66ef8SJohn Baldwin 	TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) {
209e0f66ef8SJohn Baldwin 		if (strlen(ie->ie_fullname) + strlen(ih->ih_name) + 1 <
210e0f66ef8SJohn Baldwin 		    sizeof(ie->ie_fullname)) {
211e0f66ef8SJohn Baldwin 			strcat(ie->ie_fullname, " ");
212e0f66ef8SJohn Baldwin 			strcat(ie->ie_fullname, ih->ih_name);
213e0f66ef8SJohn Baldwin 			space = 0;
2140811d60aSJohn Baldwin 		} else
2150811d60aSJohn Baldwin 			missed++;
2160811d60aSJohn Baldwin 		if (ih->ih_flags & IH_ENTROPY)
217e0f66ef8SJohn Baldwin 			ie->ie_flags |= IE_ENTROPY;
2180811d60aSJohn Baldwin 	}
219e0f66ef8SJohn Baldwin 
220e0f66ef8SJohn Baldwin 	/*
221e0f66ef8SJohn Baldwin 	 * If the handler names were too long, add +'s to indicate missing
222e0f66ef8SJohn Baldwin 	 * names. If we run out of room and still have +'s to add, change
223e0f66ef8SJohn Baldwin 	 * the last character from a + to a *.
224e0f66ef8SJohn Baldwin 	 */
225e0f66ef8SJohn Baldwin 	last = &ie->ie_fullname[sizeof(ie->ie_fullname) - 2];
2260811d60aSJohn Baldwin 	while (missed-- > 0) {
227e0f66ef8SJohn Baldwin 		if (strlen(ie->ie_fullname) + 1 == sizeof(ie->ie_fullname)) {
228e0f66ef8SJohn Baldwin 			if (*last == '+') {
229e0f66ef8SJohn Baldwin 				*last = '*';
230e0f66ef8SJohn Baldwin 				break;
231b4151f71SJohn Baldwin 			} else
232e0f66ef8SJohn Baldwin 				*last = '+';
233e0f66ef8SJohn Baldwin 		} else if (space) {
234e0f66ef8SJohn Baldwin 			strcat(ie->ie_fullname, " +");
235e0f66ef8SJohn Baldwin 			space = 0;
236e0f66ef8SJohn Baldwin 		} else
237e0f66ef8SJohn Baldwin 			strcat(ie->ie_fullname, "+");
238b4151f71SJohn Baldwin 	}
239e0f66ef8SJohn Baldwin 
240e0f66ef8SJohn Baldwin 	/*
241e0f66ef8SJohn Baldwin 	 * If this event has an ithread, update it's priority and
242e0f66ef8SJohn Baldwin 	 * name.
243e0f66ef8SJohn Baldwin 	 */
244e0f66ef8SJohn Baldwin 	if (ie->ie_thread != NULL)
245e0f66ef8SJohn Baldwin 		ithread_update(ie->ie_thread);
246e0f66ef8SJohn Baldwin 	CTR2(KTR_INTR, "%s: updated %s", __func__, ie->ie_fullname);
247b4151f71SJohn Baldwin }
248b4151f71SJohn Baldwin 
249b4151f71SJohn Baldwin int
2509b33b154SJeff Roberson intr_event_create(struct intr_event **event, void *source, int flags, int irq,
2511ee1b687SJohn Baldwin     void (*pre_ithread)(void *), void (*post_ithread)(void *),
2521ee1b687SJohn Baldwin     void (*post_filter)(void *), int (*assign_cpu)(void *, u_char),
2531ee1b687SJohn Baldwin     const char *fmt, ...)
254bafe5a31SPaolo Pisati {
255bafe5a31SPaolo Pisati 	struct intr_event *ie;
256bafe5a31SPaolo Pisati 	va_list ap;
257bafe5a31SPaolo Pisati 
258bafe5a31SPaolo Pisati 	/* The only valid flag during creation is IE_SOFT. */
259bafe5a31SPaolo Pisati 	if ((flags & ~IE_SOFT) != 0)
260bafe5a31SPaolo Pisati 		return (EINVAL);
261bafe5a31SPaolo Pisati 	ie = malloc(sizeof(struct intr_event), M_ITHREAD, M_WAITOK | M_ZERO);
262bafe5a31SPaolo Pisati 	ie->ie_source = source;
2631ee1b687SJohn Baldwin 	ie->ie_pre_ithread = pre_ithread;
2641ee1b687SJohn Baldwin 	ie->ie_post_ithread = post_ithread;
2651ee1b687SJohn Baldwin 	ie->ie_post_filter = post_filter;
2666d2d1c04SJohn Baldwin 	ie->ie_assign_cpu = assign_cpu;
267bafe5a31SPaolo Pisati 	ie->ie_flags = flags;
2689b33b154SJeff Roberson 	ie->ie_irq = irq;
269eaf86d16SJohn Baldwin 	ie->ie_cpu = NOCPU;
270bafe5a31SPaolo Pisati 	TAILQ_INIT(&ie->ie_handlers);
271bafe5a31SPaolo Pisati 	mtx_init(&ie->ie_lock, "intr event", NULL, MTX_DEF);
272bafe5a31SPaolo Pisati 
273bafe5a31SPaolo Pisati 	va_start(ap, fmt);
274bafe5a31SPaolo Pisati 	vsnprintf(ie->ie_name, sizeof(ie->ie_name), fmt, ap);
275bafe5a31SPaolo Pisati 	va_end(ap);
276bafe5a31SPaolo Pisati 	strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname));
2779b33b154SJeff Roberson 	mtx_lock(&event_lock);
278bafe5a31SPaolo Pisati 	TAILQ_INSERT_TAIL(&event_list, ie, ie_list);
2799b33b154SJeff Roberson 	mtx_unlock(&event_lock);
280bafe5a31SPaolo Pisati 	if (event != NULL)
281bafe5a31SPaolo Pisati 		*event = ie;
282bafe5a31SPaolo Pisati 	CTR2(KTR_INTR, "%s: created %s", __func__, ie->ie_name);
283bafe5a31SPaolo Pisati 	return (0);
284bafe5a31SPaolo Pisati }
285b4151f71SJohn Baldwin 
286eaf86d16SJohn Baldwin /*
287eaf86d16SJohn Baldwin  * Bind an interrupt event to the specified CPU.  Note that not all
288eaf86d16SJohn Baldwin  * platforms support binding an interrupt to a CPU.  For those
289eaf86d16SJohn Baldwin  * platforms this request will fail.  For supported platforms, any
290eaf86d16SJohn Baldwin  * associated ithreads as well as the primary interrupt context will
291eaf86d16SJohn Baldwin  * be bound to the specificed CPU.  Using a cpu id of NOCPU unbinds
292eaf86d16SJohn Baldwin  * the interrupt event.
293eaf86d16SJohn Baldwin  */
294eaf86d16SJohn Baldwin int
295eaf86d16SJohn Baldwin intr_event_bind(struct intr_event *ie, u_char cpu)
296eaf86d16SJohn Baldwin {
2979b33b154SJeff Roberson 	cpuset_t mask;
2989b33b154SJeff Roberson 	lwpid_t id;
299eaf86d16SJohn Baldwin 	int error;
300eaf86d16SJohn Baldwin 
301eaf86d16SJohn Baldwin 	/* Need a CPU to bind to. */
302eaf86d16SJohn Baldwin 	if (cpu != NOCPU && CPU_ABSENT(cpu))
303eaf86d16SJohn Baldwin 		return (EINVAL);
304eaf86d16SJohn Baldwin 
305eaf86d16SJohn Baldwin 	if (ie->ie_assign_cpu == NULL)
306eaf86d16SJohn Baldwin 		return (EOPNOTSUPP);
3079b33b154SJeff Roberson 	/*
3089b33b154SJeff Roberson 	 * If we have any ithreads try to set their mask first since this
3099b33b154SJeff Roberson 	 * can fail.
3109b33b154SJeff Roberson 	 */
311eaf86d16SJohn Baldwin 	mtx_lock(&ie->ie_lock);
3129b33b154SJeff Roberson 	if (ie->ie_thread != NULL) {
3139b33b154SJeff Roberson 		CPU_ZERO(&mask);
3149b33b154SJeff Roberson 		if (cpu == NOCPU)
3159b33b154SJeff Roberson 			CPU_COPY(cpuset_root, &mask);
3169b33b154SJeff Roberson 		else
3179b33b154SJeff Roberson 			CPU_SET(cpu, &mask);
3189b33b154SJeff Roberson 		id = ie->ie_thread->it_thread->td_tid;
319eaf86d16SJohn Baldwin 		mtx_unlock(&ie->ie_lock);
3209b33b154SJeff Roberson 		error = cpuset_setthread(id, &mask);
3219b33b154SJeff Roberson 		if (error)
3229b33b154SJeff Roberson 			return (error);
3239b33b154SJeff Roberson 	} else
324eaf86d16SJohn Baldwin 		mtx_unlock(&ie->ie_lock);
325eaf86d16SJohn Baldwin 	error = ie->ie_assign_cpu(ie->ie_source, cpu);
326eaf86d16SJohn Baldwin 	if (error)
327eaf86d16SJohn Baldwin 		return (error);
328eaf86d16SJohn Baldwin 	mtx_lock(&ie->ie_lock);
329eaf86d16SJohn Baldwin 	ie->ie_cpu = cpu;
3309b33b154SJeff Roberson 	mtx_unlock(&ie->ie_lock);
3319b33b154SJeff Roberson 
3329b33b154SJeff Roberson 	return (error);
3339b33b154SJeff Roberson }
3349b33b154SJeff Roberson 
3359b33b154SJeff Roberson static struct intr_event *
3369b33b154SJeff Roberson intr_lookup(int irq)
3379b33b154SJeff Roberson {
3389b33b154SJeff Roberson 	struct intr_event *ie;
3399b33b154SJeff Roberson 
3409b33b154SJeff Roberson 	mtx_lock(&event_lock);
3419b33b154SJeff Roberson 	TAILQ_FOREACH(ie, &event_list, ie_list)
3429b33b154SJeff Roberson 		if (ie->ie_irq == irq &&
3439b33b154SJeff Roberson 		    (ie->ie_flags & IE_SOFT) == 0 &&
3449b33b154SJeff Roberson 		    TAILQ_FIRST(&ie->ie_handlers) != NULL)
3459b33b154SJeff Roberson 			break;
3469b33b154SJeff Roberson 	mtx_unlock(&event_lock);
3479b33b154SJeff Roberson 	return (ie);
3489b33b154SJeff Roberson }
3499b33b154SJeff Roberson 
3509b33b154SJeff Roberson int
3519b33b154SJeff Roberson intr_setaffinity(int irq, void *m)
3529b33b154SJeff Roberson {
3539b33b154SJeff Roberson 	struct intr_event *ie;
3549b33b154SJeff Roberson 	cpuset_t *mask;
3559b33b154SJeff Roberson 	u_char cpu;
3569b33b154SJeff Roberson 	int n;
3579b33b154SJeff Roberson 
3589b33b154SJeff Roberson 	mask = m;
3599b33b154SJeff Roberson 	cpu = NOCPU;
3609b33b154SJeff Roberson 	/*
3619b33b154SJeff Roberson 	 * If we're setting all cpus we can unbind.  Otherwise make sure
3629b33b154SJeff Roberson 	 * only one cpu is in the set.
3639b33b154SJeff Roberson 	 */
3649b33b154SJeff Roberson 	if (CPU_CMP(cpuset_root, mask)) {
3659b33b154SJeff Roberson 		for (n = 0; n < CPU_SETSIZE; n++) {
3669b33b154SJeff Roberson 			if (!CPU_ISSET(n, mask))
3679b33b154SJeff Roberson 				continue;
3689b33b154SJeff Roberson 			if (cpu != NOCPU)
3699b33b154SJeff Roberson 				return (EINVAL);
3709b33b154SJeff Roberson 			cpu = (u_char)n;
3719b33b154SJeff Roberson 		}
3729b33b154SJeff Roberson 	}
3739b33b154SJeff Roberson 	ie = intr_lookup(irq);
3749b33b154SJeff Roberson 	if (ie == NULL)
3759b33b154SJeff Roberson 		return (ESRCH);
3769b33b154SJeff Roberson 	intr_event_bind(ie, cpu);
37704a58b9dSBjoern A. Zeeb 	return (0);
3789b33b154SJeff Roberson }
3799b33b154SJeff Roberson 
3809b33b154SJeff Roberson int
3819b33b154SJeff Roberson intr_getaffinity(int irq, void *m)
3829b33b154SJeff Roberson {
3839b33b154SJeff Roberson 	struct intr_event *ie;
3849b33b154SJeff Roberson 	cpuset_t *mask;
3859b33b154SJeff Roberson 
3869b33b154SJeff Roberson 	mask = m;
3879b33b154SJeff Roberson 	ie = intr_lookup(irq);
3889b33b154SJeff Roberson 	if (ie == NULL)
3899b33b154SJeff Roberson 		return (ESRCH);
3909b33b154SJeff Roberson 	CPU_ZERO(mask);
3919b33b154SJeff Roberson 	mtx_lock(&ie->ie_lock);
3929b33b154SJeff Roberson 	if (ie->ie_cpu == NOCPU)
3939b33b154SJeff Roberson 		CPU_COPY(cpuset_root, mask);
3949b33b154SJeff Roberson 	else
3959b33b154SJeff Roberson 		CPU_SET(ie->ie_cpu, mask);
396eaf86d16SJohn Baldwin 	mtx_unlock(&ie->ie_lock);
397eaf86d16SJohn Baldwin 	return (0);
398eaf86d16SJohn Baldwin }
399eaf86d16SJohn Baldwin 
400b4151f71SJohn Baldwin int
401e0f66ef8SJohn Baldwin intr_event_destroy(struct intr_event *ie)
402b4151f71SJohn Baldwin {
403b4151f71SJohn Baldwin 
4049b33b154SJeff Roberson 	mtx_lock(&event_lock);
405e0f66ef8SJohn Baldwin 	mtx_lock(&ie->ie_lock);
406e0f66ef8SJohn Baldwin 	if (!TAILQ_EMPTY(&ie->ie_handlers)) {
407e0f66ef8SJohn Baldwin 		mtx_unlock(&ie->ie_lock);
4089b33b154SJeff Roberson 		mtx_unlock(&event_lock);
409e0f66ef8SJohn Baldwin 		return (EBUSY);
4104d29cb2dSJohn Baldwin 	}
411e0f66ef8SJohn Baldwin 	TAILQ_REMOVE(&event_list, ie, ie_list);
4129477358dSJohn Baldwin #ifndef notyet
4139477358dSJohn Baldwin 	if (ie->ie_thread != NULL) {
4149477358dSJohn Baldwin 		ithread_destroy(ie->ie_thread);
4159477358dSJohn Baldwin 		ie->ie_thread = NULL;
4169477358dSJohn Baldwin 	}
4179477358dSJohn Baldwin #endif
418e0f66ef8SJohn Baldwin 	mtx_unlock(&ie->ie_lock);
4199b33b154SJeff Roberson 	mtx_unlock(&event_lock);
420e0f66ef8SJohn Baldwin 	mtx_destroy(&ie->ie_lock);
421e0f66ef8SJohn Baldwin 	free(ie, M_ITHREAD);
422e0f66ef8SJohn Baldwin 	return (0);
423e0f66ef8SJohn Baldwin }
424e0f66ef8SJohn Baldwin 
425bafe5a31SPaolo Pisati #ifndef INTR_FILTER
426e0f66ef8SJohn Baldwin static struct intr_thread *
427e0f66ef8SJohn Baldwin ithread_create(const char *name)
428e0f66ef8SJohn Baldwin {
429e0f66ef8SJohn Baldwin 	struct intr_thread *ithd;
430e0f66ef8SJohn Baldwin 	struct thread *td;
431e0f66ef8SJohn Baldwin 	int error;
432e0f66ef8SJohn Baldwin 
433e0f66ef8SJohn Baldwin 	ithd = malloc(sizeof(struct intr_thread), M_ITHREAD, M_WAITOK | M_ZERO);
434e0f66ef8SJohn Baldwin 
4357ab24ea3SJulian Elischer 	error = kproc_kthread_add(ithread_loop, ithd, &intrproc,
4367ab24ea3SJulian Elischer 		    &td, RFSTOPPED | RFHIGHPID,
4379ef95d01SJulian Elischer 	    	    0, "intr", "%s", name);
438e0f66ef8SJohn Baldwin 	if (error)
4393745c395SJulian Elischer 		panic("kproc_create() failed with %d", error);
440982d11f8SJeff Roberson 	thread_lock(td);
441ad1e7d28SJulian Elischer 	sched_class(td, PRI_ITHD);
442e0f66ef8SJohn Baldwin 	TD_SET_IWAIT(td);
443982d11f8SJeff Roberson 	thread_unlock(td);
444e0f66ef8SJohn Baldwin 	td->td_pflags |= TDP_ITHREAD;
445e0f66ef8SJohn Baldwin 	ithd->it_thread = td;
446e0f66ef8SJohn Baldwin 	CTR2(KTR_INTR, "%s: created %s", __func__, name);
447e0f66ef8SJohn Baldwin 	return (ithd);
448e0f66ef8SJohn Baldwin }
449bafe5a31SPaolo Pisati #else
450bafe5a31SPaolo Pisati static struct intr_thread *
451bafe5a31SPaolo Pisati ithread_create(const char *name, struct intr_handler *ih)
452bafe5a31SPaolo Pisati {
453bafe5a31SPaolo Pisati 	struct intr_thread *ithd;
454bafe5a31SPaolo Pisati 	struct thread *td;
455bafe5a31SPaolo Pisati 	int error;
456bafe5a31SPaolo Pisati 
457bafe5a31SPaolo Pisati 	ithd = malloc(sizeof(struct intr_thread), M_ITHREAD, M_WAITOK | M_ZERO);
458bafe5a31SPaolo Pisati 
459539976ffSJulian Elischer 	error = kproc_kthread_add(ithread_loop, ih, &intrproc,
4607ab24ea3SJulian Elischer 		    &td, RFSTOPPED | RFHIGHPID,
4619ef95d01SJulian Elischer 	    	    0, "intr", "%s", name);
462bafe5a31SPaolo Pisati 	if (error)
4633745c395SJulian Elischer 		panic("kproc_create() failed with %d", error);
464982d11f8SJeff Roberson 	thread_lock(td);
465bafe5a31SPaolo Pisati 	sched_class(td, PRI_ITHD);
466bafe5a31SPaolo Pisati 	TD_SET_IWAIT(td);
467982d11f8SJeff Roberson 	thread_unlock(td);
468bafe5a31SPaolo Pisati 	td->td_pflags |= TDP_ITHREAD;
469bafe5a31SPaolo Pisati 	ithd->it_thread = td;
470bafe5a31SPaolo Pisati 	CTR2(KTR_INTR, "%s: created %s", __func__, name);
471bafe5a31SPaolo Pisati 	return (ithd);
472bafe5a31SPaolo Pisati }
473bafe5a31SPaolo Pisati #endif
474e0f66ef8SJohn Baldwin 
475e0f66ef8SJohn Baldwin static void
476e0f66ef8SJohn Baldwin ithread_destroy(struct intr_thread *ithread)
477e0f66ef8SJohn Baldwin {
478e0f66ef8SJohn Baldwin 	struct thread *td;
479e0f66ef8SJohn Baldwin 
480bb141be1SScott Long 	CTR2(KTR_INTR, "%s: killing %s", __func__, ithread->it_event->ie_name);
481e0f66ef8SJohn Baldwin 	td = ithread->it_thread;
482982d11f8SJeff Roberson 	thread_lock(td);
483e0f66ef8SJohn Baldwin 	ithread->it_flags |= IT_DEAD;
48471fad9fdSJulian Elischer 	if (TD_AWAITING_INTR(td)) {
48571fad9fdSJulian Elischer 		TD_CLR_IWAIT(td);
486f0393f06SJeff Roberson 		sched_add(td, SRQ_INTR);
487b4151f71SJohn Baldwin 	}
488982d11f8SJeff Roberson 	thread_unlock(td);
489b4151f71SJohn Baldwin }
490b4151f71SJohn Baldwin 
491bafe5a31SPaolo Pisati #ifndef INTR_FILTER
492b4151f71SJohn Baldwin int
493e0f66ef8SJohn Baldwin intr_event_add_handler(struct intr_event *ie, const char *name,
494ef544f63SPaolo Pisati     driver_filter_t filter, driver_intr_t handler, void *arg, u_char pri,
495ef544f63SPaolo Pisati     enum intr_type flags, void **cookiep)
496b4151f71SJohn Baldwin {
497e0f66ef8SJohn Baldwin 	struct intr_handler *ih, *temp_ih;
498e0f66ef8SJohn Baldwin 	struct intr_thread *it;
499b4151f71SJohn Baldwin 
500ef544f63SPaolo Pisati 	if (ie == NULL || name == NULL || (handler == NULL && filter == NULL))
501b4151f71SJohn Baldwin 		return (EINVAL);
502b4151f71SJohn Baldwin 
503e0f66ef8SJohn Baldwin 	/* Allocate and populate an interrupt handler structure. */
504e0f66ef8SJohn Baldwin 	ih = malloc(sizeof(struct intr_handler), M_ITHREAD, M_WAITOK | M_ZERO);
505ef544f63SPaolo Pisati 	ih->ih_filter = filter;
506b4151f71SJohn Baldwin 	ih->ih_handler = handler;
507b4151f71SJohn Baldwin 	ih->ih_argument = arg;
508b4151f71SJohn Baldwin 	ih->ih_name = name;
509e0f66ef8SJohn Baldwin 	ih->ih_event = ie;
510b4151f71SJohn Baldwin 	ih->ih_pri = pri;
511ef544f63SPaolo Pisati 	if (flags & INTR_EXCL)
512b4151f71SJohn Baldwin 		ih->ih_flags = IH_EXCLUSIVE;
513b4151f71SJohn Baldwin 	if (flags & INTR_MPSAFE)
514b4151f71SJohn Baldwin 		ih->ih_flags |= IH_MPSAFE;
515b4151f71SJohn Baldwin 	if (flags & INTR_ENTROPY)
516b4151f71SJohn Baldwin 		ih->ih_flags |= IH_ENTROPY;
517b4151f71SJohn Baldwin 
518e0f66ef8SJohn Baldwin 	/* We can only have one exclusive handler in a event. */
519e0f66ef8SJohn Baldwin 	mtx_lock(&ie->ie_lock);
520e0f66ef8SJohn Baldwin 	if (!TAILQ_EMPTY(&ie->ie_handlers)) {
521e0f66ef8SJohn Baldwin 		if ((flags & INTR_EXCL) ||
522e0f66ef8SJohn Baldwin 		    (TAILQ_FIRST(&ie->ie_handlers)->ih_flags & IH_EXCLUSIVE)) {
523e0f66ef8SJohn Baldwin 			mtx_unlock(&ie->ie_lock);
524b4151f71SJohn Baldwin 			free(ih, M_ITHREAD);
525b4151f71SJohn Baldwin 			return (EINVAL);
526b4151f71SJohn Baldwin 		}
527e0f66ef8SJohn Baldwin 	}
528e0f66ef8SJohn Baldwin 
529e0f66ef8SJohn Baldwin 	/* Add the new handler to the event in priority order. */
530e0f66ef8SJohn Baldwin 	TAILQ_FOREACH(temp_ih, &ie->ie_handlers, ih_next) {
531e0f66ef8SJohn Baldwin 		if (temp_ih->ih_pri > ih->ih_pri)
532e0f66ef8SJohn Baldwin 			break;
533e0f66ef8SJohn Baldwin 	}
534e0f66ef8SJohn Baldwin 	if (temp_ih == NULL)
535e0f66ef8SJohn Baldwin 		TAILQ_INSERT_TAIL(&ie->ie_handlers, ih, ih_next);
536e0f66ef8SJohn Baldwin 	else
537e0f66ef8SJohn Baldwin 		TAILQ_INSERT_BEFORE(temp_ih, ih, ih_next);
538e0f66ef8SJohn Baldwin 	intr_event_update(ie);
539e0f66ef8SJohn Baldwin 
540e0f66ef8SJohn Baldwin 	/* Create a thread if we need one. */
541ef544f63SPaolo Pisati 	while (ie->ie_thread == NULL && handler != NULL) {
542e0f66ef8SJohn Baldwin 		if (ie->ie_flags & IE_ADDING_THREAD)
5430f180a7cSJohn Baldwin 			msleep(ie, &ie->ie_lock, 0, "ithread", 0);
544e0f66ef8SJohn Baldwin 		else {
545e0f66ef8SJohn Baldwin 			ie->ie_flags |= IE_ADDING_THREAD;
546e0f66ef8SJohn Baldwin 			mtx_unlock(&ie->ie_lock);
547e0f66ef8SJohn Baldwin 			it = ithread_create("intr: newborn");
548e0f66ef8SJohn Baldwin 			mtx_lock(&ie->ie_lock);
549e0f66ef8SJohn Baldwin 			ie->ie_flags &= ~IE_ADDING_THREAD;
550e0f66ef8SJohn Baldwin 			ie->ie_thread = it;
551e0f66ef8SJohn Baldwin 			it->it_event = ie;
552e0f66ef8SJohn Baldwin 			ithread_update(it);
553e0f66ef8SJohn Baldwin 			wakeup(ie);
554e0f66ef8SJohn Baldwin 		}
555e0f66ef8SJohn Baldwin 	}
556e0f66ef8SJohn Baldwin 	CTR3(KTR_INTR, "%s: added %s to %s", __func__, ih->ih_name,
557e0f66ef8SJohn Baldwin 	    ie->ie_name);
558e0f66ef8SJohn Baldwin 	mtx_unlock(&ie->ie_lock);
559e0f66ef8SJohn Baldwin 
560e0f66ef8SJohn Baldwin 	if (cookiep != NULL)
561e0f66ef8SJohn Baldwin 		*cookiep = ih;
562e0f66ef8SJohn Baldwin 	return (0);
563e0f66ef8SJohn Baldwin }
564bafe5a31SPaolo Pisati #else
565bafe5a31SPaolo Pisati int
566bafe5a31SPaolo Pisati intr_event_add_handler(struct intr_event *ie, const char *name,
567bafe5a31SPaolo Pisati     driver_filter_t filter, driver_intr_t handler, void *arg, u_char pri,
568bafe5a31SPaolo Pisati     enum intr_type flags, void **cookiep)
569bafe5a31SPaolo Pisati {
570bafe5a31SPaolo Pisati 	struct intr_handler *ih, *temp_ih;
571bafe5a31SPaolo Pisati 	struct intr_thread *it;
572bafe5a31SPaolo Pisati 
573bafe5a31SPaolo Pisati 	if (ie == NULL || name == NULL || (handler == NULL && filter == NULL))
574bafe5a31SPaolo Pisati 		return (EINVAL);
575bafe5a31SPaolo Pisati 
576bafe5a31SPaolo Pisati 	/* Allocate and populate an interrupt handler structure. */
577bafe5a31SPaolo Pisati 	ih = malloc(sizeof(struct intr_handler), M_ITHREAD, M_WAITOK | M_ZERO);
578bafe5a31SPaolo Pisati 	ih->ih_filter = filter;
579bafe5a31SPaolo Pisati 	ih->ih_handler = handler;
580bafe5a31SPaolo Pisati 	ih->ih_argument = arg;
581bafe5a31SPaolo Pisati 	ih->ih_name = name;
582bafe5a31SPaolo Pisati 	ih->ih_event = ie;
583bafe5a31SPaolo Pisati 	ih->ih_pri = pri;
584bafe5a31SPaolo Pisati 	if (flags & INTR_EXCL)
585bafe5a31SPaolo Pisati 		ih->ih_flags = IH_EXCLUSIVE;
586bafe5a31SPaolo Pisati 	if (flags & INTR_MPSAFE)
587bafe5a31SPaolo Pisati 		ih->ih_flags |= IH_MPSAFE;
588bafe5a31SPaolo Pisati 	if (flags & INTR_ENTROPY)
589bafe5a31SPaolo Pisati 		ih->ih_flags |= IH_ENTROPY;
590bafe5a31SPaolo Pisati 
591bafe5a31SPaolo Pisati 	/* We can only have one exclusive handler in a event. */
592bafe5a31SPaolo Pisati 	mtx_lock(&ie->ie_lock);
593bafe5a31SPaolo Pisati 	if (!TAILQ_EMPTY(&ie->ie_handlers)) {
594bafe5a31SPaolo Pisati 		if ((flags & INTR_EXCL) ||
595bafe5a31SPaolo Pisati 		    (TAILQ_FIRST(&ie->ie_handlers)->ih_flags & IH_EXCLUSIVE)) {
596bafe5a31SPaolo Pisati 			mtx_unlock(&ie->ie_lock);
597bafe5a31SPaolo Pisati 			free(ih, M_ITHREAD);
598bafe5a31SPaolo Pisati 			return (EINVAL);
599bafe5a31SPaolo Pisati 		}
600bafe5a31SPaolo Pisati 	}
601bafe5a31SPaolo Pisati 
602bafe5a31SPaolo Pisati 	/* Add the new handler to the event in priority order. */
603bafe5a31SPaolo Pisati 	TAILQ_FOREACH(temp_ih, &ie->ie_handlers, ih_next) {
604bafe5a31SPaolo Pisati 		if (temp_ih->ih_pri > ih->ih_pri)
605bafe5a31SPaolo Pisati 			break;
606bafe5a31SPaolo Pisati 	}
607bafe5a31SPaolo Pisati 	if (temp_ih == NULL)
608bafe5a31SPaolo Pisati 		TAILQ_INSERT_TAIL(&ie->ie_handlers, ih, ih_next);
609bafe5a31SPaolo Pisati 	else
610bafe5a31SPaolo Pisati 		TAILQ_INSERT_BEFORE(temp_ih, ih, ih_next);
611bafe5a31SPaolo Pisati 	intr_event_update(ie);
612bafe5a31SPaolo Pisati 
613bafe5a31SPaolo Pisati 	/* For filtered handlers, create a private ithread to run on. */
614bafe5a31SPaolo Pisati 	if (filter != NULL && handler != NULL) {
615bafe5a31SPaolo Pisati 		mtx_unlock(&ie->ie_lock);
616bafe5a31SPaolo Pisati 		it = ithread_create("intr: newborn", ih);
617bafe5a31SPaolo Pisati 		mtx_lock(&ie->ie_lock);
618bafe5a31SPaolo Pisati 		it->it_event = ie;
619bafe5a31SPaolo Pisati 		ih->ih_thread = it;
620bafe5a31SPaolo Pisati 		ithread_update(it); // XXX - do we really need this?!?!?
621bafe5a31SPaolo Pisati 	} else { /* Create the global per-event thread if we need one. */
622bafe5a31SPaolo Pisati 		while (ie->ie_thread == NULL && handler != NULL) {
623bafe5a31SPaolo Pisati 			if (ie->ie_flags & IE_ADDING_THREAD)
624bafe5a31SPaolo Pisati 				msleep(ie, &ie->ie_lock, 0, "ithread", 0);
625bafe5a31SPaolo Pisati 			else {
626bafe5a31SPaolo Pisati 				ie->ie_flags |= IE_ADDING_THREAD;
627bafe5a31SPaolo Pisati 				mtx_unlock(&ie->ie_lock);
628bafe5a31SPaolo Pisati 				it = ithread_create("intr: newborn", ih);
629bafe5a31SPaolo Pisati 				mtx_lock(&ie->ie_lock);
630bafe5a31SPaolo Pisati 				ie->ie_flags &= ~IE_ADDING_THREAD;
631bafe5a31SPaolo Pisati 				ie->ie_thread = it;
632bafe5a31SPaolo Pisati 				it->it_event = ie;
633bafe5a31SPaolo Pisati 				ithread_update(it);
634bafe5a31SPaolo Pisati 				wakeup(ie);
635bafe5a31SPaolo Pisati 			}
636bafe5a31SPaolo Pisati 		}
637bafe5a31SPaolo Pisati 	}
638bafe5a31SPaolo Pisati 	CTR3(KTR_INTR, "%s: added %s to %s", __func__, ih->ih_name,
639bafe5a31SPaolo Pisati 	    ie->ie_name);
640bafe5a31SPaolo Pisati 	mtx_unlock(&ie->ie_lock);
641bafe5a31SPaolo Pisati 
642bafe5a31SPaolo Pisati 	if (cookiep != NULL)
643bafe5a31SPaolo Pisati 		*cookiep = ih;
644bafe5a31SPaolo Pisati 	return (0);
645bafe5a31SPaolo Pisati }
646bafe5a31SPaolo Pisati #endif
647b4151f71SJohn Baldwin 
648c3045318SJohn Baldwin /*
649c3045318SJohn Baldwin  * Return the ie_source field from the intr_event an intr_handler is
650c3045318SJohn Baldwin  * associated with.
651c3045318SJohn Baldwin  */
652c3045318SJohn Baldwin void *
653c3045318SJohn Baldwin intr_handler_source(void *cookie)
654c3045318SJohn Baldwin {
655c3045318SJohn Baldwin 	struct intr_handler *ih;
656c3045318SJohn Baldwin 	struct intr_event *ie;
657c3045318SJohn Baldwin 
658c3045318SJohn Baldwin 	ih = (struct intr_handler *)cookie;
659c3045318SJohn Baldwin 	if (ih == NULL)
660c3045318SJohn Baldwin 		return (NULL);
661c3045318SJohn Baldwin 	ie = ih->ih_event;
662c3045318SJohn Baldwin 	KASSERT(ie != NULL,
663c3045318SJohn Baldwin 	    ("interrupt handler \"%s\" has a NULL interrupt event",
664c3045318SJohn Baldwin 	    ih->ih_name));
665c3045318SJohn Baldwin 	return (ie->ie_source);
666c3045318SJohn Baldwin }
667c3045318SJohn Baldwin 
668bafe5a31SPaolo Pisati #ifndef INTR_FILTER
669b4151f71SJohn Baldwin int
670e0f66ef8SJohn Baldwin intr_event_remove_handler(void *cookie)
671b4151f71SJohn Baldwin {
672e0f66ef8SJohn Baldwin 	struct intr_handler *handler = (struct intr_handler *)cookie;
673e0f66ef8SJohn Baldwin 	struct intr_event *ie;
674b4151f71SJohn Baldwin #ifdef INVARIANTS
675e0f66ef8SJohn Baldwin 	struct intr_handler *ih;
676e0f66ef8SJohn Baldwin #endif
677e0f66ef8SJohn Baldwin #ifdef notyet
678e0f66ef8SJohn Baldwin 	int dead;
679b4151f71SJohn Baldwin #endif
680b4151f71SJohn Baldwin 
6813e5da754SJohn Baldwin 	if (handler == NULL)
682b4151f71SJohn Baldwin 		return (EINVAL);
683e0f66ef8SJohn Baldwin 	ie = handler->ih_event;
684e0f66ef8SJohn Baldwin 	KASSERT(ie != NULL,
685e0f66ef8SJohn Baldwin 	    ("interrupt handler \"%s\" has a NULL interrupt event",
6863e5da754SJohn Baldwin 	    handler->ih_name));
687e0f66ef8SJohn Baldwin 	mtx_lock(&ie->ie_lock);
68891f91617SDavid E. O'Brien 	CTR3(KTR_INTR, "%s: removing %s from %s", __func__, handler->ih_name,
689e0f66ef8SJohn Baldwin 	    ie->ie_name);
690b4151f71SJohn Baldwin #ifdef INVARIANTS
691e0f66ef8SJohn Baldwin 	TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next)
6923e5da754SJohn Baldwin 		if (ih == handler)
6933e5da754SJohn Baldwin 			goto ok;
694e0f66ef8SJohn Baldwin 	mtx_unlock(&ie->ie_lock);
695e0f66ef8SJohn Baldwin 	panic("interrupt handler \"%s\" not found in interrupt event \"%s\"",
696e0f66ef8SJohn Baldwin 	    ih->ih_name, ie->ie_name);
6973e5da754SJohn Baldwin ok:
698b4151f71SJohn Baldwin #endif
699de271f01SJohn Baldwin 	/*
700e0f66ef8SJohn Baldwin 	 * If there is no ithread, then just remove the handler and return.
701e0f66ef8SJohn Baldwin 	 * XXX: Note that an INTR_FAST handler might be running on another
702e0f66ef8SJohn Baldwin 	 * CPU!
703e0f66ef8SJohn Baldwin 	 */
704e0f66ef8SJohn Baldwin 	if (ie->ie_thread == NULL) {
705e0f66ef8SJohn Baldwin 		TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next);
706e0f66ef8SJohn Baldwin 		mtx_unlock(&ie->ie_lock);
707e0f66ef8SJohn Baldwin 		free(handler, M_ITHREAD);
708e0f66ef8SJohn Baldwin 		return (0);
709e0f66ef8SJohn Baldwin 	}
710e0f66ef8SJohn Baldwin 
711e0f66ef8SJohn Baldwin 	/*
712de271f01SJohn Baldwin 	 * If the interrupt thread is already running, then just mark this
713de271f01SJohn Baldwin 	 * handler as being dead and let the ithread do the actual removal.
714288e351bSDon Lewis 	 *
715288e351bSDon Lewis 	 * During a cold boot while cold is set, msleep() does not sleep,
716288e351bSDon Lewis 	 * so we have to remove the handler here rather than letting the
717288e351bSDon Lewis 	 * thread do it.
718de271f01SJohn Baldwin 	 */
719982d11f8SJeff Roberson 	thread_lock(ie->ie_thread->it_thread);
720e0f66ef8SJohn Baldwin 	if (!TD_AWAITING_INTR(ie->ie_thread->it_thread) && !cold) {
721de271f01SJohn Baldwin 		handler->ih_flags |= IH_DEAD;
722de271f01SJohn Baldwin 
723de271f01SJohn Baldwin 		/*
724de271f01SJohn Baldwin 		 * Ensure that the thread will process the handler list
725de271f01SJohn Baldwin 		 * again and remove this handler if it has already passed
726de271f01SJohn Baldwin 		 * it on the list.
727de271f01SJohn Baldwin 		 */
728e0f66ef8SJohn Baldwin 		ie->ie_thread->it_need = 1;
7294d29cb2dSJohn Baldwin 	} else
730e0f66ef8SJohn Baldwin 		TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next);
731982d11f8SJeff Roberson 	thread_unlock(ie->ie_thread->it_thread);
732e0f66ef8SJohn Baldwin 	while (handler->ih_flags & IH_DEAD)
7330f180a7cSJohn Baldwin 		msleep(handler, &ie->ie_lock, 0, "iev_rmh", 0);
734e0f66ef8SJohn Baldwin 	intr_event_update(ie);
735e0f66ef8SJohn Baldwin #ifdef notyet
736e0f66ef8SJohn Baldwin 	/*
737e0f66ef8SJohn Baldwin 	 * XXX: This could be bad in the case of ppbus(8).  Also, I think
738e0f66ef8SJohn Baldwin 	 * this could lead to races of stale data when servicing an
739e0f66ef8SJohn Baldwin 	 * interrupt.
740e0f66ef8SJohn Baldwin 	 */
741e0f66ef8SJohn Baldwin 	dead = 1;
742e0f66ef8SJohn Baldwin 	TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) {
743e0f66ef8SJohn Baldwin 		if (!(ih->ih_flags & IH_FAST)) {
744e0f66ef8SJohn Baldwin 			dead = 0;
745e0f66ef8SJohn Baldwin 			break;
746e0f66ef8SJohn Baldwin 		}
747e0f66ef8SJohn Baldwin 	}
748e0f66ef8SJohn Baldwin 	if (dead) {
749e0f66ef8SJohn Baldwin 		ithread_destroy(ie->ie_thread);
750e0f66ef8SJohn Baldwin 		ie->ie_thread = NULL;
751e0f66ef8SJohn Baldwin 	}
752e0f66ef8SJohn Baldwin #endif
753e0f66ef8SJohn Baldwin 	mtx_unlock(&ie->ie_lock);
754b4151f71SJohn Baldwin 	free(handler, M_ITHREAD);
755b4151f71SJohn Baldwin 	return (0);
756b4151f71SJohn Baldwin }
757b4151f71SJohn Baldwin 
7581ee1b687SJohn Baldwin static int
759e0f66ef8SJohn Baldwin intr_event_schedule_thread(struct intr_event *ie)
7603e5da754SJohn Baldwin {
761e0f66ef8SJohn Baldwin 	struct intr_entropy entropy;
762e0f66ef8SJohn Baldwin 	struct intr_thread *it;
763b40ce416SJulian Elischer 	struct thread *td;
76404774f23SJulian Elischer 	struct thread *ctd;
7653e5da754SJohn Baldwin 	struct proc *p;
7663e5da754SJohn Baldwin 
7673e5da754SJohn Baldwin 	/*
7683e5da754SJohn Baldwin 	 * If no ithread or no handlers, then we have a stray interrupt.
7693e5da754SJohn Baldwin 	 */
770e0f66ef8SJohn Baldwin 	if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers) ||
771e0f66ef8SJohn Baldwin 	    ie->ie_thread == NULL)
7723e5da754SJohn Baldwin 		return (EINVAL);
7733e5da754SJohn Baldwin 
77404774f23SJulian Elischer 	ctd = curthread;
775e0f66ef8SJohn Baldwin 	it = ie->ie_thread;
776e0f66ef8SJohn Baldwin 	td = it->it_thread;
7776f40c417SRobert Watson 	p = td->td_proc;
778e0f66ef8SJohn Baldwin 
7793e5da754SJohn Baldwin 	/*
7803e5da754SJohn Baldwin 	 * If any of the handlers for this ithread claim to be good
7813e5da754SJohn Baldwin 	 * sources of entropy, then gather some.
7823e5da754SJohn Baldwin 	 */
783e0f66ef8SJohn Baldwin 	if (harvest.interrupt && ie->ie_flags & IE_ENTROPY) {
7846f40c417SRobert Watson 		CTR3(KTR_INTR, "%s: pid %d (%s) gathering entropy", __func__,
7857ab24ea3SJulian Elischer 		    p->p_pid, td->td_name);
786e0f66ef8SJohn Baldwin 		entropy.event = (uintptr_t)ie;
787e0f66ef8SJohn Baldwin 		entropy.td = ctd;
7883e5da754SJohn Baldwin 		random_harvest(&entropy, sizeof(entropy), 2, 0,
7893e5da754SJohn Baldwin 		    RANDOM_INTERRUPT);
7903e5da754SJohn Baldwin 	}
7913e5da754SJohn Baldwin 
792e0f66ef8SJohn Baldwin 	KASSERT(p != NULL, ("ithread %s has no process", ie->ie_name));
7933e5da754SJohn Baldwin 
7943e5da754SJohn Baldwin 	/*
7953e5da754SJohn Baldwin 	 * Set it_need to tell the thread to keep running if it is already
796982d11f8SJeff Roberson 	 * running.  Then, lock the thread and see if we actually need to
797982d11f8SJeff Roberson 	 * put it on the runqueue.
7983e5da754SJohn Baldwin 	 */
799e0f66ef8SJohn Baldwin 	it->it_need = 1;
800982d11f8SJeff Roberson 	thread_lock(td);
80171fad9fdSJulian Elischer 	if (TD_AWAITING_INTR(td)) {
802e0f66ef8SJohn Baldwin 		CTR3(KTR_INTR, "%s: schedule pid %d (%s)", __func__, p->p_pid,
8037ab24ea3SJulian Elischer 		    td->td_name);
80471fad9fdSJulian Elischer 		TD_CLR_IWAIT(td);
805f0393f06SJeff Roberson 		sched_add(td, SRQ_INTR);
8063e5da754SJohn Baldwin 	} else {
807e0f66ef8SJohn Baldwin 		CTR5(KTR_INTR, "%s: pid %d (%s): it_need %d, state %d",
8087ab24ea3SJulian Elischer 		    __func__, p->p_pid, td->td_name, it->it_need, td->td_state);
8093e5da754SJohn Baldwin 	}
810982d11f8SJeff Roberson 	thread_unlock(td);
8113e5da754SJohn Baldwin 
8123e5da754SJohn Baldwin 	return (0);
8133e5da754SJohn Baldwin }
814bafe5a31SPaolo Pisati #else
815bafe5a31SPaolo Pisati int
816bafe5a31SPaolo Pisati intr_event_remove_handler(void *cookie)
817bafe5a31SPaolo Pisati {
818bafe5a31SPaolo Pisati 	struct intr_handler *handler = (struct intr_handler *)cookie;
819bafe5a31SPaolo Pisati 	struct intr_event *ie;
820bafe5a31SPaolo Pisati 	struct intr_thread *it;
821bafe5a31SPaolo Pisati #ifdef INVARIANTS
822bafe5a31SPaolo Pisati 	struct intr_handler *ih;
823bafe5a31SPaolo Pisati #endif
824bafe5a31SPaolo Pisati #ifdef notyet
825bafe5a31SPaolo Pisati 	int dead;
826bafe5a31SPaolo Pisati #endif
827bafe5a31SPaolo Pisati 
828bafe5a31SPaolo Pisati 	if (handler == NULL)
829bafe5a31SPaolo Pisati 		return (EINVAL);
830bafe5a31SPaolo Pisati 	ie = handler->ih_event;
831bafe5a31SPaolo Pisati 	KASSERT(ie != NULL,
832bafe5a31SPaolo Pisati 	    ("interrupt handler \"%s\" has a NULL interrupt event",
833bafe5a31SPaolo Pisati 	    handler->ih_name));
834bafe5a31SPaolo Pisati 	mtx_lock(&ie->ie_lock);
835bafe5a31SPaolo Pisati 	CTR3(KTR_INTR, "%s: removing %s from %s", __func__, handler->ih_name,
836bafe5a31SPaolo Pisati 	    ie->ie_name);
837bafe5a31SPaolo Pisati #ifdef INVARIANTS
838bafe5a31SPaolo Pisati 	TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next)
839bafe5a31SPaolo Pisati 		if (ih == handler)
840bafe5a31SPaolo Pisati 			goto ok;
841bafe5a31SPaolo Pisati 	mtx_unlock(&ie->ie_lock);
842bafe5a31SPaolo Pisati 	panic("interrupt handler \"%s\" not found in interrupt event \"%s\"",
843bafe5a31SPaolo Pisati 	    ih->ih_name, ie->ie_name);
844bafe5a31SPaolo Pisati ok:
845bafe5a31SPaolo Pisati #endif
846bafe5a31SPaolo Pisati 	/*
847bafe5a31SPaolo Pisati 	 * If there are no ithreads (per event and per handler), then
848bafe5a31SPaolo Pisati 	 * just remove the handler and return.
849bafe5a31SPaolo Pisati 	 * XXX: Note that an INTR_FAST handler might be running on another CPU!
850bafe5a31SPaolo Pisati 	 */
851bafe5a31SPaolo Pisati 	if (ie->ie_thread == NULL && handler->ih_thread == NULL) {
852bafe5a31SPaolo Pisati 		TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next);
853bafe5a31SPaolo Pisati 		mtx_unlock(&ie->ie_lock);
854bafe5a31SPaolo Pisati 		free(handler, M_ITHREAD);
855bafe5a31SPaolo Pisati 		return (0);
856bafe5a31SPaolo Pisati 	}
857bafe5a31SPaolo Pisati 
858bafe5a31SPaolo Pisati 	/* Private or global ithread? */
859bafe5a31SPaolo Pisati 	it = (handler->ih_thread) ? handler->ih_thread : ie->ie_thread;
860bafe5a31SPaolo Pisati 	/*
861bafe5a31SPaolo Pisati 	 * If the interrupt thread is already running, then just mark this
862bafe5a31SPaolo Pisati 	 * handler as being dead and let the ithread do the actual removal.
863bafe5a31SPaolo Pisati 	 *
864bafe5a31SPaolo Pisati 	 * During a cold boot while cold is set, msleep() does not sleep,
865bafe5a31SPaolo Pisati 	 * so we have to remove the handler here rather than letting the
866bafe5a31SPaolo Pisati 	 * thread do it.
867bafe5a31SPaolo Pisati 	 */
868982d11f8SJeff Roberson 	thread_lock(it->it_thread);
869bafe5a31SPaolo Pisati 	if (!TD_AWAITING_INTR(it->it_thread) && !cold) {
870bafe5a31SPaolo Pisati 		handler->ih_flags |= IH_DEAD;
871bafe5a31SPaolo Pisati 
872bafe5a31SPaolo Pisati 		/*
873bafe5a31SPaolo Pisati 		 * Ensure that the thread will process the handler list
874bafe5a31SPaolo Pisati 		 * again and remove this handler if it has already passed
875bafe5a31SPaolo Pisati 		 * it on the list.
876bafe5a31SPaolo Pisati 		 */
877bafe5a31SPaolo Pisati 		it->it_need = 1;
878bafe5a31SPaolo Pisati 	} else
879bafe5a31SPaolo Pisati 		TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next);
880982d11f8SJeff Roberson 	thread_unlock(it->it_thread);
881bafe5a31SPaolo Pisati 	while (handler->ih_flags & IH_DEAD)
882bafe5a31SPaolo Pisati 		msleep(handler, &ie->ie_lock, 0, "iev_rmh", 0);
883bafe5a31SPaolo Pisati 	/*
884bafe5a31SPaolo Pisati 	 * At this point, the handler has been disconnected from the event,
885bafe5a31SPaolo Pisati 	 * so we can kill the private ithread if any.
886bafe5a31SPaolo Pisati 	 */
887bafe5a31SPaolo Pisati 	if (handler->ih_thread) {
888bafe5a31SPaolo Pisati 		ithread_destroy(handler->ih_thread);
889bafe5a31SPaolo Pisati 		handler->ih_thread = NULL;
890bafe5a31SPaolo Pisati 	}
891bafe5a31SPaolo Pisati 	intr_event_update(ie);
892bafe5a31SPaolo Pisati #ifdef notyet
893bafe5a31SPaolo Pisati 	/*
894bafe5a31SPaolo Pisati 	 * XXX: This could be bad in the case of ppbus(8).  Also, I think
895bafe5a31SPaolo Pisati 	 * this could lead to races of stale data when servicing an
896bafe5a31SPaolo Pisati 	 * interrupt.
897bafe5a31SPaolo Pisati 	 */
898bafe5a31SPaolo Pisati 	dead = 1;
899bafe5a31SPaolo Pisati 	TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) {
900bafe5a31SPaolo Pisati 		if (handler != NULL) {
901bafe5a31SPaolo Pisati 			dead = 0;
902bafe5a31SPaolo Pisati 			break;
903bafe5a31SPaolo Pisati 		}
904bafe5a31SPaolo Pisati 	}
905bafe5a31SPaolo Pisati 	if (dead) {
906bafe5a31SPaolo Pisati 		ithread_destroy(ie->ie_thread);
907bafe5a31SPaolo Pisati 		ie->ie_thread = NULL;
908bafe5a31SPaolo Pisati 	}
909bafe5a31SPaolo Pisati #endif
910bafe5a31SPaolo Pisati 	mtx_unlock(&ie->ie_lock);
911bafe5a31SPaolo Pisati 	free(handler, M_ITHREAD);
912bafe5a31SPaolo Pisati 	return (0);
913bafe5a31SPaolo Pisati }
914bafe5a31SPaolo Pisati 
9151ee1b687SJohn Baldwin static int
916bafe5a31SPaolo Pisati intr_event_schedule_thread(struct intr_event *ie, struct intr_thread *it)
917bafe5a31SPaolo Pisati {
918bafe5a31SPaolo Pisati 	struct intr_entropy entropy;
919bafe5a31SPaolo Pisati 	struct thread *td;
920bafe5a31SPaolo Pisati 	struct thread *ctd;
921bafe5a31SPaolo Pisati 	struct proc *p;
922bafe5a31SPaolo Pisati 
923bafe5a31SPaolo Pisati 	/*
924bafe5a31SPaolo Pisati 	 * If no ithread or no handlers, then we have a stray interrupt.
925bafe5a31SPaolo Pisati 	 */
926bafe5a31SPaolo Pisati 	if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers) || it == NULL)
927bafe5a31SPaolo Pisati 		return (EINVAL);
928bafe5a31SPaolo Pisati 
929bafe5a31SPaolo Pisati 	ctd = curthread;
930bafe5a31SPaolo Pisati 	td = it->it_thread;
931bafe5a31SPaolo Pisati 	p = td->td_proc;
932bafe5a31SPaolo Pisati 
933bafe5a31SPaolo Pisati 	/*
934bafe5a31SPaolo Pisati 	 * If any of the handlers for this ithread claim to be good
935bafe5a31SPaolo Pisati 	 * sources of entropy, then gather some.
936bafe5a31SPaolo Pisati 	 */
937bafe5a31SPaolo Pisati 	if (harvest.interrupt && ie->ie_flags & IE_ENTROPY) {
938bafe5a31SPaolo Pisati 		CTR3(KTR_INTR, "%s: pid %d (%s) gathering entropy", __func__,
9397ab24ea3SJulian Elischer 		    p->p_pid, td->td_name);
940bafe5a31SPaolo Pisati 		entropy.event = (uintptr_t)ie;
941bafe5a31SPaolo Pisati 		entropy.td = ctd;
942bafe5a31SPaolo Pisati 		random_harvest(&entropy, sizeof(entropy), 2, 0,
943bafe5a31SPaolo Pisati 		    RANDOM_INTERRUPT);
944bafe5a31SPaolo Pisati 	}
945bafe5a31SPaolo Pisati 
946bafe5a31SPaolo Pisati 	KASSERT(p != NULL, ("ithread %s has no process", ie->ie_name));
947bafe5a31SPaolo Pisati 
948bafe5a31SPaolo Pisati 	/*
949bafe5a31SPaolo Pisati 	 * Set it_need to tell the thread to keep running if it is already
950982d11f8SJeff Roberson 	 * running.  Then, lock the thread and see if we actually need to
951982d11f8SJeff Roberson 	 * put it on the runqueue.
952bafe5a31SPaolo Pisati 	 */
953bafe5a31SPaolo Pisati 	it->it_need = 1;
954982d11f8SJeff Roberson 	thread_lock(td);
955bafe5a31SPaolo Pisati 	if (TD_AWAITING_INTR(td)) {
956bafe5a31SPaolo Pisati 		CTR3(KTR_INTR, "%s: schedule pid %d (%s)", __func__, p->p_pid,
9573c1ffc32SJulian Elischer 		    td->td_name);
958bafe5a31SPaolo Pisati 		TD_CLR_IWAIT(td);
959bafe5a31SPaolo Pisati 		sched_add(td, SRQ_INTR);
960bafe5a31SPaolo Pisati 	} else {
961bafe5a31SPaolo Pisati 		CTR5(KTR_INTR, "%s: pid %d (%s): it_need %d, state %d",
9627ab24ea3SJulian Elischer 		    __func__, p->p_pid, td->td_name, it->it_need, td->td_state);
963bafe5a31SPaolo Pisati 	}
964982d11f8SJeff Roberson 	thread_unlock(td);
965bafe5a31SPaolo Pisati 
966bafe5a31SPaolo Pisati 	return (0);
967bafe5a31SPaolo Pisati }
968bafe5a31SPaolo Pisati #endif
9693e5da754SJohn Baldwin 
970fe486a37SJohn Baldwin /*
971e84bcd84SRobert Watson  * Allow interrupt event binding for software interrupt handlers -- a no-op,
972e84bcd84SRobert Watson  * since interrupts are generated in software rather than being directed by
973e84bcd84SRobert Watson  * a PIC.
974e84bcd84SRobert Watson  */
975e84bcd84SRobert Watson static int
976e84bcd84SRobert Watson swi_assign_cpu(void *arg, u_char cpu)
977e84bcd84SRobert Watson {
978e84bcd84SRobert Watson 
979e84bcd84SRobert Watson 	return (0);
980e84bcd84SRobert Watson }
981e84bcd84SRobert Watson 
982e84bcd84SRobert Watson /*
983fe486a37SJohn Baldwin  * Add a software interrupt handler to a specified event.  If a given event
984fe486a37SJohn Baldwin  * is not specified, then a new event is created.
985fe486a37SJohn Baldwin  */
9863e5da754SJohn Baldwin int
987e0f66ef8SJohn Baldwin swi_add(struct intr_event **eventp, const char *name, driver_intr_t handler,
988b4151f71SJohn Baldwin 	    void *arg, int pri, enum intr_type flags, void **cookiep)
9898088699fSJohn Baldwin {
990e0f66ef8SJohn Baldwin 	struct intr_event *ie;
991b4151f71SJohn Baldwin 	int error;
9928088699fSJohn Baldwin 
993bafe5a31SPaolo Pisati 	if (flags & INTR_ENTROPY)
9943e5da754SJohn Baldwin 		return (EINVAL);
9953e5da754SJohn Baldwin 
996e0f66ef8SJohn Baldwin 	ie = (eventp != NULL) ? *eventp : NULL;
9978088699fSJohn Baldwin 
998e0f66ef8SJohn Baldwin 	if (ie != NULL) {
999e0f66ef8SJohn Baldwin 		if (!(ie->ie_flags & IE_SOFT))
10003e5da754SJohn Baldwin 			return (EINVAL);
10013e5da754SJohn Baldwin 	} else {
10029b33b154SJeff Roberson 		error = intr_event_create(&ie, NULL, IE_SOFT, 0,
1003e84bcd84SRobert Watson 		    NULL, NULL, NULL, swi_assign_cpu, "swi%d:", pri);
10048088699fSJohn Baldwin 		if (error)
1005b4151f71SJohn Baldwin 			return (error);
1006e0f66ef8SJohn Baldwin 		if (eventp != NULL)
1007e0f66ef8SJohn Baldwin 			*eventp = ie;
10088088699fSJohn Baldwin 	}
10098d809d50SJeff Roberson 	error = intr_event_add_handler(ie, name, NULL, handler, arg,
10108d809d50SJeff Roberson 	    (pri * RQ_PPQ) + PI_SOFT, flags, cookiep);
10118d809d50SJeff Roberson 	if (error)
10128d809d50SJeff Roberson 		return (error);
10138d809d50SJeff Roberson 	if (pri == SWI_CLOCK) {
10148d809d50SJeff Roberson 		struct proc *p;
10158d809d50SJeff Roberson 		p = ie->ie_thread->it_thread->td_proc;
10168d809d50SJeff Roberson 		PROC_LOCK(p);
10178d809d50SJeff Roberson 		p->p_flag |= P_NOLOAD;
10188d809d50SJeff Roberson 		PROC_UNLOCK(p);
10198d809d50SJeff Roberson 	}
10208d809d50SJeff Roberson 	return (0);
10218088699fSJohn Baldwin }
10228088699fSJohn Baldwin 
10231931cf94SJohn Baldwin /*
1024e0f66ef8SJohn Baldwin  * Schedule a software interrupt thread.
10251931cf94SJohn Baldwin  */
10261931cf94SJohn Baldwin void
1027b4151f71SJohn Baldwin swi_sched(void *cookie, int flags)
10281931cf94SJohn Baldwin {
1029e0f66ef8SJohn Baldwin 	struct intr_handler *ih = (struct intr_handler *)cookie;
1030e0f66ef8SJohn Baldwin 	struct intr_event *ie = ih->ih_event;
10313e5da754SJohn Baldwin 	int error;
10328088699fSJohn Baldwin 
1033e0f66ef8SJohn Baldwin 	CTR3(KTR_INTR, "swi_sched: %s %s need=%d", ie->ie_name, ih->ih_name,
1034e0f66ef8SJohn Baldwin 	    ih->ih_need);
10351931cf94SJohn Baldwin 
10361931cf94SJohn Baldwin 	/*
10373e5da754SJohn Baldwin 	 * Set ih_need for this handler so that if the ithread is already
10383e5da754SJohn Baldwin 	 * running it will execute this handler on the next pass.  Otherwise,
10393e5da754SJohn Baldwin 	 * it will execute it the next time it runs.
10401931cf94SJohn Baldwin 	 */
1041b4151f71SJohn Baldwin 	atomic_store_rel_int(&ih->ih_need, 1);
10421ca2c018SBruce Evans 
1043b4151f71SJohn Baldwin 	if (!(flags & SWI_DELAY)) {
104467596082SAttilio Rao 		PCPU_INC(cnt.v_soft);
1045bafe5a31SPaolo Pisati #ifdef INTR_FILTER
1046bafe5a31SPaolo Pisati 		error = intr_event_schedule_thread(ie, ie->ie_thread);
1047bafe5a31SPaolo Pisati #else
1048e0f66ef8SJohn Baldwin 		error = intr_event_schedule_thread(ie);
1049bafe5a31SPaolo Pisati #endif
10503e5da754SJohn Baldwin 		KASSERT(error == 0, ("stray software interrupt"));
10518088699fSJohn Baldwin 	}
10528088699fSJohn Baldwin }
10538088699fSJohn Baldwin 
1054fe486a37SJohn Baldwin /*
1055fe486a37SJohn Baldwin  * Remove a software interrupt handler.  Currently this code does not
1056fe486a37SJohn Baldwin  * remove the associated interrupt event if it becomes empty.  Calling code
1057fe486a37SJohn Baldwin  * may do so manually via intr_event_destroy(), but that's not really
1058fe486a37SJohn Baldwin  * an optimal interface.
1059fe486a37SJohn Baldwin  */
1060fe486a37SJohn Baldwin int
1061fe486a37SJohn Baldwin swi_remove(void *cookie)
1062fe486a37SJohn Baldwin {
1063fe486a37SJohn Baldwin 
1064fe486a37SJohn Baldwin 	return (intr_event_remove_handler(cookie));
1065fe486a37SJohn Baldwin }
1066fe486a37SJohn Baldwin 
1067bafe5a31SPaolo Pisati #ifdef INTR_FILTER
1068bafe5a31SPaolo Pisati static void
1069bafe5a31SPaolo Pisati priv_ithread_execute_handler(struct proc *p, struct intr_handler *ih)
1070bafe5a31SPaolo Pisati {
1071bafe5a31SPaolo Pisati 	struct intr_event *ie;
1072bafe5a31SPaolo Pisati 
1073bafe5a31SPaolo Pisati 	ie = ih->ih_event;
1074bafe5a31SPaolo Pisati 	/*
1075bafe5a31SPaolo Pisati 	 * If this handler is marked for death, remove it from
1076bafe5a31SPaolo Pisati 	 * the list of handlers and wake up the sleeper.
1077bafe5a31SPaolo Pisati 	 */
1078bafe5a31SPaolo Pisati 	if (ih->ih_flags & IH_DEAD) {
1079bafe5a31SPaolo Pisati 		mtx_lock(&ie->ie_lock);
1080bafe5a31SPaolo Pisati 		TAILQ_REMOVE(&ie->ie_handlers, ih, ih_next);
1081bafe5a31SPaolo Pisati 		ih->ih_flags &= ~IH_DEAD;
1082bafe5a31SPaolo Pisati 		wakeup(ih);
1083bafe5a31SPaolo Pisati 		mtx_unlock(&ie->ie_lock);
1084bafe5a31SPaolo Pisati 		return;
1085bafe5a31SPaolo Pisati 	}
1086bafe5a31SPaolo Pisati 
1087bafe5a31SPaolo Pisati 	/* Execute this handler. */
1088bafe5a31SPaolo Pisati 	CTR6(KTR_INTR, "%s: pid %d exec %p(%p) for %s flg=%x",
1089bafe5a31SPaolo Pisati 	     __func__, p->p_pid, (void *)ih->ih_handler, ih->ih_argument,
1090bafe5a31SPaolo Pisati 	     ih->ih_name, ih->ih_flags);
1091bafe5a31SPaolo Pisati 
1092bafe5a31SPaolo Pisati 	if (!(ih->ih_flags & IH_MPSAFE))
1093bafe5a31SPaolo Pisati 		mtx_lock(&Giant);
1094bafe5a31SPaolo Pisati 	ih->ih_handler(ih->ih_argument);
1095bafe5a31SPaolo Pisati 	if (!(ih->ih_flags & IH_MPSAFE))
1096bafe5a31SPaolo Pisati 		mtx_unlock(&Giant);
1097bafe5a31SPaolo Pisati }
1098bafe5a31SPaolo Pisati #endif
1099bafe5a31SPaolo Pisati 
110037e9511fSJohn Baldwin /*
110137e9511fSJohn Baldwin  * This is a public function for use by drivers that mux interrupt
110237e9511fSJohn Baldwin  * handlers for child devices from their interrupt handler.
110337e9511fSJohn Baldwin  */
110437e9511fSJohn Baldwin void
110537e9511fSJohn Baldwin intr_event_execute_handlers(struct proc *p, struct intr_event *ie)
1106e0f66ef8SJohn Baldwin {
1107e0f66ef8SJohn Baldwin 	struct intr_handler *ih, *ihn;
1108e0f66ef8SJohn Baldwin 
1109e0f66ef8SJohn Baldwin 	TAILQ_FOREACH_SAFE(ih, &ie->ie_handlers, ih_next, ihn) {
1110e0f66ef8SJohn Baldwin 		/*
1111e0f66ef8SJohn Baldwin 		 * If this handler is marked for death, remove it from
1112e0f66ef8SJohn Baldwin 		 * the list of handlers and wake up the sleeper.
1113e0f66ef8SJohn Baldwin 		 */
1114e0f66ef8SJohn Baldwin 		if (ih->ih_flags & IH_DEAD) {
1115e0f66ef8SJohn Baldwin 			mtx_lock(&ie->ie_lock);
1116e0f66ef8SJohn Baldwin 			TAILQ_REMOVE(&ie->ie_handlers, ih, ih_next);
1117e0f66ef8SJohn Baldwin 			ih->ih_flags &= ~IH_DEAD;
1118e0f66ef8SJohn Baldwin 			wakeup(ih);
1119e0f66ef8SJohn Baldwin 			mtx_unlock(&ie->ie_lock);
1120e0f66ef8SJohn Baldwin 			continue;
1121e0f66ef8SJohn Baldwin 		}
1122e0f66ef8SJohn Baldwin 
1123f2d619c8SPaolo Pisati 		/* Skip filter only handlers */
1124f2d619c8SPaolo Pisati 		if (ih->ih_handler == NULL)
1125f2d619c8SPaolo Pisati 			continue;
1126f2d619c8SPaolo Pisati 
1127e0f66ef8SJohn Baldwin 		/*
1128e0f66ef8SJohn Baldwin 		 * For software interrupt threads, we only execute
1129e0f66ef8SJohn Baldwin 		 * handlers that have their need flag set.  Hardware
1130e0f66ef8SJohn Baldwin 		 * interrupt threads always invoke all of their handlers.
1131e0f66ef8SJohn Baldwin 		 */
1132e0f66ef8SJohn Baldwin 		if (ie->ie_flags & IE_SOFT) {
1133e0f66ef8SJohn Baldwin 			if (!ih->ih_need)
1134e0f66ef8SJohn Baldwin 				continue;
1135e0f66ef8SJohn Baldwin 			else
1136e0f66ef8SJohn Baldwin 				atomic_store_rel_int(&ih->ih_need, 0);
1137e0f66ef8SJohn Baldwin 		}
1138e0f66ef8SJohn Baldwin 
1139e0f66ef8SJohn Baldwin 		/* Execute this handler. */
1140e0f66ef8SJohn Baldwin 		CTR6(KTR_INTR, "%s: pid %d exec %p(%p) for %s flg=%x",
1141bafe5a31SPaolo Pisati 		    __func__, p->p_pid, (void *)ih->ih_handler,
1142bafe5a31SPaolo Pisati 		    ih->ih_argument, ih->ih_name, ih->ih_flags);
1143e0f66ef8SJohn Baldwin 
1144e0f66ef8SJohn Baldwin 		if (!(ih->ih_flags & IH_MPSAFE))
1145e0f66ef8SJohn Baldwin 			mtx_lock(&Giant);
1146e0f66ef8SJohn Baldwin 		ih->ih_handler(ih->ih_argument);
1147e0f66ef8SJohn Baldwin 		if (!(ih->ih_flags & IH_MPSAFE))
1148e0f66ef8SJohn Baldwin 			mtx_unlock(&Giant);
1149e0f66ef8SJohn Baldwin 	}
115037e9511fSJohn Baldwin }
115137e9511fSJohn Baldwin 
115237e9511fSJohn Baldwin static void
115337e9511fSJohn Baldwin ithread_execute_handlers(struct proc *p, struct intr_event *ie)
115437e9511fSJohn Baldwin {
115537e9511fSJohn Baldwin 
115637e9511fSJohn Baldwin 	/* Interrupt handlers should not sleep. */
115737e9511fSJohn Baldwin 	if (!(ie->ie_flags & IE_SOFT))
115837e9511fSJohn Baldwin 		THREAD_NO_SLEEPING();
115937e9511fSJohn Baldwin 	intr_event_execute_handlers(p, ie);
1160e0f66ef8SJohn Baldwin 	if (!(ie->ie_flags & IE_SOFT))
1161e0f66ef8SJohn Baldwin 		THREAD_SLEEPING_OK();
1162e0f66ef8SJohn Baldwin 
1163e0f66ef8SJohn Baldwin 	/*
1164e0f66ef8SJohn Baldwin 	 * Interrupt storm handling:
1165e0f66ef8SJohn Baldwin 	 *
1166e0f66ef8SJohn Baldwin 	 * If this interrupt source is currently storming, then throttle
1167e0f66ef8SJohn Baldwin 	 * it to only fire the handler once  per clock tick.
1168e0f66ef8SJohn Baldwin 	 *
1169e0f66ef8SJohn Baldwin 	 * If this interrupt source is not currently storming, but the
1170e0f66ef8SJohn Baldwin 	 * number of back to back interrupts exceeds the storm threshold,
1171e0f66ef8SJohn Baldwin 	 * then enter storming mode.
1172e0f66ef8SJohn Baldwin 	 */
1173e41bcf3cSJohn Baldwin 	if (intr_storm_threshold != 0 && ie->ie_count >= intr_storm_threshold &&
1174e41bcf3cSJohn Baldwin 	    !(ie->ie_flags & IE_SOFT)) {
11750ae62c18SNate Lawson 		/* Report the message only once every second. */
11760ae62c18SNate Lawson 		if (ppsratecheck(&ie->ie_warntm, &ie->ie_warncnt, 1)) {
1177e0f66ef8SJohn Baldwin 			printf(
11780ae62c18SNate Lawson 	"interrupt storm detected on \"%s\"; throttling interrupt source\n",
1179e0f66ef8SJohn Baldwin 			    ie->ie_name);
1180e0f66ef8SJohn Baldwin 		}
1181e41bcf3cSJohn Baldwin 		pause("istorm", 1);
1182e0f66ef8SJohn Baldwin 	} else
1183e0f66ef8SJohn Baldwin 		ie->ie_count++;
1184e0f66ef8SJohn Baldwin 
1185e0f66ef8SJohn Baldwin 	/*
1186e0f66ef8SJohn Baldwin 	 * Now that all the handlers have had a chance to run, reenable
1187e0f66ef8SJohn Baldwin 	 * the interrupt source.
1188e0f66ef8SJohn Baldwin 	 */
11891ee1b687SJohn Baldwin 	if (ie->ie_post_ithread != NULL)
11901ee1b687SJohn Baldwin 		ie->ie_post_ithread(ie->ie_source);
1191e0f66ef8SJohn Baldwin }
1192e0f66ef8SJohn Baldwin 
1193bafe5a31SPaolo Pisati #ifndef INTR_FILTER
11948088699fSJohn Baldwin /*
1195b4151f71SJohn Baldwin  * This is the main code for interrupt threads.
11968088699fSJohn Baldwin  */
119737c84183SPoul-Henning Kamp static void
1198b4151f71SJohn Baldwin ithread_loop(void *arg)
11998088699fSJohn Baldwin {
1200e0f66ef8SJohn Baldwin 	struct intr_thread *ithd;
1201e0f66ef8SJohn Baldwin 	struct intr_event *ie;
1202b40ce416SJulian Elischer 	struct thread *td;
1203b4151f71SJohn Baldwin 	struct proc *p;
12048088699fSJohn Baldwin 
1205b40ce416SJulian Elischer 	td = curthread;
1206b40ce416SJulian Elischer 	p = td->td_proc;
1207e0f66ef8SJohn Baldwin 	ithd = (struct intr_thread *)arg;
1208e0f66ef8SJohn Baldwin 	KASSERT(ithd->it_thread == td,
120991f91617SDavid E. O'Brien 	    ("%s: ithread and proc linkage out of sync", __func__));
1210e0f66ef8SJohn Baldwin 	ie = ithd->it_event;
1211e0f66ef8SJohn Baldwin 	ie->ie_count = 0;
12128088699fSJohn Baldwin 
12138088699fSJohn Baldwin 	/*
12148088699fSJohn Baldwin 	 * As long as we have interrupts outstanding, go through the
12158088699fSJohn Baldwin 	 * list of handlers, giving each one a go at it.
12168088699fSJohn Baldwin 	 */
12178088699fSJohn Baldwin 	for (;;) {
1218b4151f71SJohn Baldwin 		/*
1219b4151f71SJohn Baldwin 		 * If we are an orphaned thread, then just die.
1220b4151f71SJohn Baldwin 		 */
1221b4151f71SJohn Baldwin 		if (ithd->it_flags & IT_DEAD) {
1222e0f66ef8SJohn Baldwin 			CTR3(KTR_INTR, "%s: pid %d (%s) exiting", __func__,
12237ab24ea3SJulian Elischer 			    p->p_pid, td->td_name);
1224b4151f71SJohn Baldwin 			free(ithd, M_ITHREAD);
1225ca9a0ddfSJulian Elischer 			kthread_exit();
1226b4151f71SJohn Baldwin 		}
1227b4151f71SJohn Baldwin 
1228e0f66ef8SJohn Baldwin 		/*
1229e0f66ef8SJohn Baldwin 		 * Service interrupts.  If another interrupt arrives while
1230e0f66ef8SJohn Baldwin 		 * we are running, it will set it_need to note that we
1231e0f66ef8SJohn Baldwin 		 * should make another pass.
1232e0f66ef8SJohn Baldwin 		 */
1233b4151f71SJohn Baldwin 		while (ithd->it_need) {
12348088699fSJohn Baldwin 			/*
1235e0f66ef8SJohn Baldwin 			 * This might need a full read and write barrier
1236e0f66ef8SJohn Baldwin 			 * to make sure that this write posts before any
1237e0f66ef8SJohn Baldwin 			 * of the memory or device accesses in the
1238e0f66ef8SJohn Baldwin 			 * handlers.
12398088699fSJohn Baldwin 			 */
1240b4151f71SJohn Baldwin 			atomic_store_rel_int(&ithd->it_need, 0);
1241e0f66ef8SJohn Baldwin 			ithread_execute_handlers(p, ie);
12428088699fSJohn Baldwin 		}
12437870c3c6SJohn Baldwin 		WITNESS_WARN(WARN_PANIC, NULL, "suspending ithread");
12447870c3c6SJohn Baldwin 		mtx_assert(&Giant, MA_NOTOWNED);
12458088699fSJohn Baldwin 
12468088699fSJohn Baldwin 		/*
12478088699fSJohn Baldwin 		 * Processed all our interrupts.  Now get the sched
12488088699fSJohn Baldwin 		 * lock.  This may take a while and it_need may get
12498088699fSJohn Baldwin 		 * set again, so we have to check it again.
12508088699fSJohn Baldwin 		 */
1251982d11f8SJeff Roberson 		thread_lock(td);
1252e0f66ef8SJohn Baldwin 		if (!ithd->it_need && !(ithd->it_flags & IT_DEAD)) {
12537870c3c6SJohn Baldwin 			TD_SET_IWAIT(td);
1254e0f66ef8SJohn Baldwin 			ie->ie_count = 0;
12558df78c41SJeff Roberson 			mi_switch(SW_VOL | SWT_IWAIT, NULL);
12568088699fSJohn Baldwin 		}
1257982d11f8SJeff Roberson 		thread_unlock(td);
12588088699fSJohn Baldwin 	}
12591931cf94SJohn Baldwin }
12601ee1b687SJohn Baldwin 
12611ee1b687SJohn Baldwin /*
12621ee1b687SJohn Baldwin  * Main interrupt handling body.
12631ee1b687SJohn Baldwin  *
12641ee1b687SJohn Baldwin  * Input:
12651ee1b687SJohn Baldwin  * o ie:                        the event connected to this interrupt.
12661ee1b687SJohn Baldwin  * o frame:                     some archs (i.e. i386) pass a frame to some.
12671ee1b687SJohn Baldwin  *                              handlers as their main argument.
12681ee1b687SJohn Baldwin  * Return value:
12691ee1b687SJohn Baldwin  * o 0:                         everything ok.
12701ee1b687SJohn Baldwin  * o EINVAL:                    stray interrupt.
12711ee1b687SJohn Baldwin  */
12721ee1b687SJohn Baldwin int
12731ee1b687SJohn Baldwin intr_event_handle(struct intr_event *ie, struct trapframe *frame)
12741ee1b687SJohn Baldwin {
12751ee1b687SJohn Baldwin 	struct intr_handler *ih;
12761ee1b687SJohn Baldwin 	struct thread *td;
12771ee1b687SJohn Baldwin 	int error, ret, thread;
12781ee1b687SJohn Baldwin 
12791ee1b687SJohn Baldwin 	td = curthread;
12801ee1b687SJohn Baldwin 
12811ee1b687SJohn Baldwin 	/* An interrupt with no event or handlers is a stray interrupt. */
12821ee1b687SJohn Baldwin 	if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers))
12831ee1b687SJohn Baldwin 		return (EINVAL);
12841ee1b687SJohn Baldwin 
12851ee1b687SJohn Baldwin 	/*
12861ee1b687SJohn Baldwin 	 * Execute fast interrupt handlers directly.
12871ee1b687SJohn Baldwin 	 * To support clock handlers, if a handler registers
12881ee1b687SJohn Baldwin 	 * with a NULL argument, then we pass it a pointer to
12891ee1b687SJohn Baldwin 	 * a trapframe as its argument.
12901ee1b687SJohn Baldwin 	 */
12911ee1b687SJohn Baldwin 	td->td_intr_nesting_level++;
12921ee1b687SJohn Baldwin 	thread = 0;
12931ee1b687SJohn Baldwin 	ret = 0;
12941ee1b687SJohn Baldwin 	critical_enter();
12951ee1b687SJohn Baldwin 	TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) {
12961ee1b687SJohn Baldwin 		if (ih->ih_filter == NULL) {
12971ee1b687SJohn Baldwin 			thread = 1;
12981ee1b687SJohn Baldwin 			continue;
12991ee1b687SJohn Baldwin 		}
13001ee1b687SJohn Baldwin 		CTR4(KTR_INTR, "%s: exec %p(%p) for %s", __func__,
13011ee1b687SJohn Baldwin 		    ih->ih_filter, ih->ih_argument == NULL ? frame :
13021ee1b687SJohn Baldwin 		    ih->ih_argument, ih->ih_name);
13031ee1b687SJohn Baldwin 		if (ih->ih_argument == NULL)
13041ee1b687SJohn Baldwin 			ret = ih->ih_filter(frame);
13051ee1b687SJohn Baldwin 		else
13061ee1b687SJohn Baldwin 			ret = ih->ih_filter(ih->ih_argument);
13071ee1b687SJohn Baldwin 		/*
13081ee1b687SJohn Baldwin 		 * Wrapper handler special handling:
13091ee1b687SJohn Baldwin 		 *
13101ee1b687SJohn Baldwin 		 * in some particular cases (like pccard and pccbb),
13111ee1b687SJohn Baldwin 		 * the _real_ device handler is wrapped in a couple of
13121ee1b687SJohn Baldwin 		 * functions - a filter wrapper and an ithread wrapper.
13131ee1b687SJohn Baldwin 		 * In this case (and just in this case), the filter wrapper
13141ee1b687SJohn Baldwin 		 * could ask the system to schedule the ithread and mask
13151ee1b687SJohn Baldwin 		 * the interrupt source if the wrapped handler is composed
13161ee1b687SJohn Baldwin 		 * of just an ithread handler.
13171ee1b687SJohn Baldwin 		 *
13181ee1b687SJohn Baldwin 		 * TODO: write a generic wrapper to avoid people rolling
13191ee1b687SJohn Baldwin 		 * their own
13201ee1b687SJohn Baldwin 		 */
13211ee1b687SJohn Baldwin 		if (!thread) {
13221ee1b687SJohn Baldwin 			if (ret == FILTER_SCHEDULE_THREAD)
13231ee1b687SJohn Baldwin 				thread = 1;
13241ee1b687SJohn Baldwin 		}
13251ee1b687SJohn Baldwin 	}
13261ee1b687SJohn Baldwin 
13271ee1b687SJohn Baldwin 	if (thread) {
13281ee1b687SJohn Baldwin 		if (ie->ie_pre_ithread != NULL)
13291ee1b687SJohn Baldwin 			ie->ie_pre_ithread(ie->ie_source);
13301ee1b687SJohn Baldwin 	} else {
13311ee1b687SJohn Baldwin 		if (ie->ie_post_filter != NULL)
13321ee1b687SJohn Baldwin 			ie->ie_post_filter(ie->ie_source);
13331ee1b687SJohn Baldwin 	}
13341ee1b687SJohn Baldwin 
13351ee1b687SJohn Baldwin 	/* Schedule the ithread if needed. */
13361ee1b687SJohn Baldwin 	if (thread) {
13371ee1b687SJohn Baldwin 		error = intr_event_schedule_thread(ie);
13386205924aSKip Macy #ifndef XEN
13391ee1b687SJohn Baldwin 		KASSERT(error == 0, ("bad stray interrupt"));
13406205924aSKip Macy #else
13416205924aSKip Macy 		if (error != 0)
13426205924aSKip Macy 			log(LOG_WARNING, "bad stray interrupt");
13436205924aSKip Macy #endif
13441ee1b687SJohn Baldwin 	}
13451ee1b687SJohn Baldwin 	critical_exit();
13461ee1b687SJohn Baldwin 	td->td_intr_nesting_level--;
13471ee1b687SJohn Baldwin 	return (0);
13481ee1b687SJohn Baldwin }
1349bafe5a31SPaolo Pisati #else
1350bafe5a31SPaolo Pisati /*
1351bafe5a31SPaolo Pisati  * This is the main code for interrupt threads.
1352bafe5a31SPaolo Pisati  */
1353bafe5a31SPaolo Pisati static void
1354bafe5a31SPaolo Pisati ithread_loop(void *arg)
1355bafe5a31SPaolo Pisati {
1356bafe5a31SPaolo Pisati 	struct intr_thread *ithd;
1357bafe5a31SPaolo Pisati 	struct intr_handler *ih;
1358bafe5a31SPaolo Pisati 	struct intr_event *ie;
1359bafe5a31SPaolo Pisati 	struct thread *td;
1360bafe5a31SPaolo Pisati 	struct proc *p;
1361bafe5a31SPaolo Pisati 	int priv;
1362bafe5a31SPaolo Pisati 
1363bafe5a31SPaolo Pisati 	td = curthread;
1364bafe5a31SPaolo Pisati 	p = td->td_proc;
1365bafe5a31SPaolo Pisati 	ih = (struct intr_handler *)arg;
1366bafe5a31SPaolo Pisati 	priv = (ih->ih_thread != NULL) ? 1 : 0;
1367bafe5a31SPaolo Pisati 	ithd = (priv) ? ih->ih_thread : ih->ih_event->ie_thread;
1368bafe5a31SPaolo Pisati 	KASSERT(ithd->it_thread == td,
1369bafe5a31SPaolo Pisati 	    ("%s: ithread and proc linkage out of sync", __func__));
1370bafe5a31SPaolo Pisati 	ie = ithd->it_event;
1371bafe5a31SPaolo Pisati 	ie->ie_count = 0;
1372bafe5a31SPaolo Pisati 
1373bafe5a31SPaolo Pisati 	/*
1374bafe5a31SPaolo Pisati 	 * As long as we have interrupts outstanding, go through the
1375bafe5a31SPaolo Pisati 	 * list of handlers, giving each one a go at it.
1376bafe5a31SPaolo Pisati 	 */
1377bafe5a31SPaolo Pisati 	for (;;) {
1378bafe5a31SPaolo Pisati 		/*
1379bafe5a31SPaolo Pisati 		 * If we are an orphaned thread, then just die.
1380bafe5a31SPaolo Pisati 		 */
1381bafe5a31SPaolo Pisati 		if (ithd->it_flags & IT_DEAD) {
1382bafe5a31SPaolo Pisati 			CTR3(KTR_INTR, "%s: pid %d (%s) exiting", __func__,
13837ab24ea3SJulian Elischer 			    p->p_pid, td->td_name);
1384bafe5a31SPaolo Pisati 			free(ithd, M_ITHREAD);
1385ca9a0ddfSJulian Elischer 			kthread_exit();
1386bafe5a31SPaolo Pisati 		}
1387bafe5a31SPaolo Pisati 
1388bafe5a31SPaolo Pisati 		/*
1389bafe5a31SPaolo Pisati 		 * Service interrupts.  If another interrupt arrives while
1390bafe5a31SPaolo Pisati 		 * we are running, it will set it_need to note that we
1391bafe5a31SPaolo Pisati 		 * should make another pass.
1392bafe5a31SPaolo Pisati 		 */
1393bafe5a31SPaolo Pisati 		while (ithd->it_need) {
1394bafe5a31SPaolo Pisati 			/*
1395bafe5a31SPaolo Pisati 			 * This might need a full read and write barrier
1396bafe5a31SPaolo Pisati 			 * to make sure that this write posts before any
1397bafe5a31SPaolo Pisati 			 * of the memory or device accesses in the
1398bafe5a31SPaolo Pisati 			 * handlers.
1399bafe5a31SPaolo Pisati 			 */
1400bafe5a31SPaolo Pisati 			atomic_store_rel_int(&ithd->it_need, 0);
1401bafe5a31SPaolo Pisati 			if (priv)
1402bafe5a31SPaolo Pisati 				priv_ithread_execute_handler(p, ih);
1403bafe5a31SPaolo Pisati 			else
1404bafe5a31SPaolo Pisati 				ithread_execute_handlers(p, ie);
1405bafe5a31SPaolo Pisati 		}
1406bafe5a31SPaolo Pisati 		WITNESS_WARN(WARN_PANIC, NULL, "suspending ithread");
1407bafe5a31SPaolo Pisati 		mtx_assert(&Giant, MA_NOTOWNED);
1408bafe5a31SPaolo Pisati 
1409bafe5a31SPaolo Pisati 		/*
1410bafe5a31SPaolo Pisati 		 * Processed all our interrupts.  Now get the sched
1411bafe5a31SPaolo Pisati 		 * lock.  This may take a while and it_need may get
1412bafe5a31SPaolo Pisati 		 * set again, so we have to check it again.
1413bafe5a31SPaolo Pisati 		 */
1414982d11f8SJeff Roberson 		thread_lock(td);
1415bafe5a31SPaolo Pisati 		if (!ithd->it_need && !(ithd->it_flags & IT_DEAD)) {
1416bafe5a31SPaolo Pisati 			TD_SET_IWAIT(td);
1417bafe5a31SPaolo Pisati 			ie->ie_count = 0;
14188df78c41SJeff Roberson 			mi_switch(SW_VOL | SWT_IWAIT, NULL);
1419bafe5a31SPaolo Pisati 		}
1420982d11f8SJeff Roberson 		thread_unlock(td);
1421bafe5a31SPaolo Pisati 	}
1422bafe5a31SPaolo Pisati }
1423bafe5a31SPaolo Pisati 
1424bafe5a31SPaolo Pisati /*
1425bafe5a31SPaolo Pisati  * Main loop for interrupt filter.
1426bafe5a31SPaolo Pisati  *
1427bafe5a31SPaolo Pisati  * Some architectures (i386, amd64 and arm) require the optional frame
1428bafe5a31SPaolo Pisati  * parameter, and use it as the main argument for fast handler execution
1429bafe5a31SPaolo Pisati  * when ih_argument == NULL.
1430bafe5a31SPaolo Pisati  *
1431bafe5a31SPaolo Pisati  * Return value:
1432bafe5a31SPaolo Pisati  * o FILTER_STRAY:              No filter recognized the event, and no
1433bafe5a31SPaolo Pisati  *                              filter-less handler is registered on this
1434bafe5a31SPaolo Pisati  *                              line.
1435bafe5a31SPaolo Pisati  * o FILTER_HANDLED:            A filter claimed the event and served it.
1436bafe5a31SPaolo Pisati  * o FILTER_SCHEDULE_THREAD:    No filter claimed the event, but there's at
1437bafe5a31SPaolo Pisati  *                              least one filter-less handler on this line.
1438bafe5a31SPaolo Pisati  * o FILTER_HANDLED |
1439bafe5a31SPaolo Pisati  *   FILTER_SCHEDULE_THREAD:    A filter claimed the event, and asked for
1440bafe5a31SPaolo Pisati  *                              scheduling the per-handler ithread.
1441bafe5a31SPaolo Pisati  *
1442bafe5a31SPaolo Pisati  * In case an ithread has to be scheduled, in *ithd there will be a
1443bafe5a31SPaolo Pisati  * pointer to a struct intr_thread containing the thread to be
1444bafe5a31SPaolo Pisati  * scheduled.
1445bafe5a31SPaolo Pisati  */
1446bafe5a31SPaolo Pisati 
14471ee1b687SJohn Baldwin static int
1448bafe5a31SPaolo Pisati intr_filter_loop(struct intr_event *ie, struct trapframe *frame,
1449bafe5a31SPaolo Pisati 		 struct intr_thread **ithd)
1450bafe5a31SPaolo Pisati {
1451bafe5a31SPaolo Pisati 	struct intr_handler *ih;
1452bafe5a31SPaolo Pisati 	void *arg;
1453bafe5a31SPaolo Pisati 	int ret, thread_only;
1454bafe5a31SPaolo Pisati 
1455bafe5a31SPaolo Pisati 	ret = 0;
1456bafe5a31SPaolo Pisati 	thread_only = 0;
1457bafe5a31SPaolo Pisati 	TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) {
1458bafe5a31SPaolo Pisati 		/*
1459bafe5a31SPaolo Pisati 		 * Execute fast interrupt handlers directly.
1460bafe5a31SPaolo Pisati 		 * To support clock handlers, if a handler registers
1461bafe5a31SPaolo Pisati 		 * with a NULL argument, then we pass it a pointer to
1462bafe5a31SPaolo Pisati 		 * a trapframe as its argument.
1463bafe5a31SPaolo Pisati 		 */
1464bafe5a31SPaolo Pisati 		arg = ((ih->ih_argument == NULL) ? frame : ih->ih_argument);
1465bafe5a31SPaolo Pisati 
1466bafe5a31SPaolo Pisati 		CTR5(KTR_INTR, "%s: exec %p/%p(%p) for %s", __func__,
1467bafe5a31SPaolo Pisati 		     ih->ih_filter, ih->ih_handler, arg, ih->ih_name);
1468bafe5a31SPaolo Pisati 
1469bafe5a31SPaolo Pisati 		if (ih->ih_filter != NULL)
1470bafe5a31SPaolo Pisati 			ret = ih->ih_filter(arg);
1471bafe5a31SPaolo Pisati 		else {
1472bafe5a31SPaolo Pisati 			thread_only = 1;
1473bafe5a31SPaolo Pisati 			continue;
1474bafe5a31SPaolo Pisati 		}
1475bafe5a31SPaolo Pisati 
1476bafe5a31SPaolo Pisati 		if (ret & FILTER_STRAY)
1477bafe5a31SPaolo Pisati 			continue;
1478bafe5a31SPaolo Pisati 		else {
1479bafe5a31SPaolo Pisati 			*ithd = ih->ih_thread;
1480bafe5a31SPaolo Pisati 			return (ret);
1481bafe5a31SPaolo Pisati 		}
1482bafe5a31SPaolo Pisati 	}
1483bafe5a31SPaolo Pisati 
1484bafe5a31SPaolo Pisati 	/*
1485bafe5a31SPaolo Pisati 	 * No filters handled the interrupt and we have at least
1486bafe5a31SPaolo Pisati 	 * one handler without a filter.  In this case, we schedule
1487bafe5a31SPaolo Pisati 	 * all of the filter-less handlers to run in the ithread.
1488bafe5a31SPaolo Pisati 	 */
1489bafe5a31SPaolo Pisati 	if (thread_only) {
1490bafe5a31SPaolo Pisati 		*ithd = ie->ie_thread;
1491bafe5a31SPaolo Pisati 		return (FILTER_SCHEDULE_THREAD);
1492bafe5a31SPaolo Pisati 	}
1493bafe5a31SPaolo Pisati 	return (FILTER_STRAY);
1494bafe5a31SPaolo Pisati }
1495bafe5a31SPaolo Pisati 
1496bafe5a31SPaolo Pisati /*
1497bafe5a31SPaolo Pisati  * Main interrupt handling body.
1498bafe5a31SPaolo Pisati  *
1499bafe5a31SPaolo Pisati  * Input:
1500bafe5a31SPaolo Pisati  * o ie:                        the event connected to this interrupt.
1501bafe5a31SPaolo Pisati  * o frame:                     some archs (i.e. i386) pass a frame to some.
1502bafe5a31SPaolo Pisati  *                              handlers as their main argument.
1503bafe5a31SPaolo Pisati  * Return value:
1504bafe5a31SPaolo Pisati  * o 0:                         everything ok.
1505bafe5a31SPaolo Pisati  * o EINVAL:                    stray interrupt.
1506bafe5a31SPaolo Pisati  */
1507bafe5a31SPaolo Pisati int
1508bafe5a31SPaolo Pisati intr_event_handle(struct intr_event *ie, struct trapframe *frame)
1509bafe5a31SPaolo Pisati {
1510bafe5a31SPaolo Pisati 	struct intr_thread *ithd;
1511bafe5a31SPaolo Pisati 	struct thread *td;
1512bafe5a31SPaolo Pisati 	int thread;
1513bafe5a31SPaolo Pisati 
1514bafe5a31SPaolo Pisati 	ithd = NULL;
1515bafe5a31SPaolo Pisati 	td = curthread;
1516bafe5a31SPaolo Pisati 
1517bafe5a31SPaolo Pisati 	if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers))
1518bafe5a31SPaolo Pisati 		return (EINVAL);
1519bafe5a31SPaolo Pisati 
1520bafe5a31SPaolo Pisati 	td->td_intr_nesting_level++;
1521bafe5a31SPaolo Pisati 	thread = 0;
1522bafe5a31SPaolo Pisati 	critical_enter();
1523bafe5a31SPaolo Pisati 	thread = intr_filter_loop(ie, frame, &ithd);
1524bafe5a31SPaolo Pisati 	if (thread & FILTER_HANDLED) {
15251ee1b687SJohn Baldwin 		if (ie->ie_post_filter != NULL)
15261ee1b687SJohn Baldwin 			ie->ie_post_filter(ie->ie_source);
1527bafe5a31SPaolo Pisati 	} else {
15281ee1b687SJohn Baldwin 		if (ie->ie_pre_ithread != NULL)
15291ee1b687SJohn Baldwin 			ie->ie_pre_ithread(ie->ie_source);
1530bafe5a31SPaolo Pisati 	}
1531bafe5a31SPaolo Pisati 	critical_exit();
1532bafe5a31SPaolo Pisati 
1533bafe5a31SPaolo Pisati 	/* Interrupt storm logic */
1534bafe5a31SPaolo Pisati 	if (thread & FILTER_STRAY) {
1535bafe5a31SPaolo Pisati 		ie->ie_count++;
1536bafe5a31SPaolo Pisati 		if (ie->ie_count < intr_storm_threshold)
1537bafe5a31SPaolo Pisati 			printf("Interrupt stray detection not present\n");
1538bafe5a31SPaolo Pisati 	}
1539bafe5a31SPaolo Pisati 
1540bafe5a31SPaolo Pisati 	/* Schedule an ithread if needed. */
1541bafe5a31SPaolo Pisati 	if (thread & FILTER_SCHEDULE_THREAD) {
1542bafe5a31SPaolo Pisati 		if (intr_event_schedule_thread(ie, ithd) != 0)
1543bafe5a31SPaolo Pisati 			panic("%s: impossible stray interrupt", __func__);
1544bafe5a31SPaolo Pisati 	}
1545bafe5a31SPaolo Pisati 	td->td_intr_nesting_level--;
1546bafe5a31SPaolo Pisati 	return (0);
1547bafe5a31SPaolo Pisati }
1548bafe5a31SPaolo Pisati #endif
15491931cf94SJohn Baldwin 
15508b201c42SJohn Baldwin #ifdef DDB
15518b201c42SJohn Baldwin /*
15528b201c42SJohn Baldwin  * Dump details about an interrupt handler
15538b201c42SJohn Baldwin  */
15548b201c42SJohn Baldwin static void
1555e0f66ef8SJohn Baldwin db_dump_intrhand(struct intr_handler *ih)
15568b201c42SJohn Baldwin {
15578b201c42SJohn Baldwin 	int comma;
15588b201c42SJohn Baldwin 
15598b201c42SJohn Baldwin 	db_printf("\t%-10s ", ih->ih_name);
15608b201c42SJohn Baldwin 	switch (ih->ih_pri) {
15618b201c42SJohn Baldwin 	case PI_REALTIME:
15628b201c42SJohn Baldwin 		db_printf("CLK ");
15638b201c42SJohn Baldwin 		break;
15648b201c42SJohn Baldwin 	case PI_AV:
15658b201c42SJohn Baldwin 		db_printf("AV  ");
15668b201c42SJohn Baldwin 		break;
15678b201c42SJohn Baldwin 	case PI_TTYHIGH:
15688b201c42SJohn Baldwin 	case PI_TTYLOW:
15698b201c42SJohn Baldwin 		db_printf("TTY ");
15708b201c42SJohn Baldwin 		break;
15718b201c42SJohn Baldwin 	case PI_TAPE:
15728b201c42SJohn Baldwin 		db_printf("TAPE");
15738b201c42SJohn Baldwin 		break;
15748b201c42SJohn Baldwin 	case PI_NET:
15758b201c42SJohn Baldwin 		db_printf("NET ");
15768b201c42SJohn Baldwin 		break;
15778b201c42SJohn Baldwin 	case PI_DISK:
15788b201c42SJohn Baldwin 	case PI_DISKLOW:
15798b201c42SJohn Baldwin 		db_printf("DISK");
15808b201c42SJohn Baldwin 		break;
15818b201c42SJohn Baldwin 	case PI_DULL:
15828b201c42SJohn Baldwin 		db_printf("DULL");
15838b201c42SJohn Baldwin 		break;
15848b201c42SJohn Baldwin 	default:
15858b201c42SJohn Baldwin 		if (ih->ih_pri >= PI_SOFT)
15868b201c42SJohn Baldwin 			db_printf("SWI ");
15878b201c42SJohn Baldwin 		else
15888b201c42SJohn Baldwin 			db_printf("%4u", ih->ih_pri);
15898b201c42SJohn Baldwin 		break;
15908b201c42SJohn Baldwin 	}
15918b201c42SJohn Baldwin 	db_printf(" ");
15928b201c42SJohn Baldwin 	db_printsym((uintptr_t)ih->ih_handler, DB_STGY_PROC);
15938b201c42SJohn Baldwin 	db_printf("(%p)", ih->ih_argument);
15948b201c42SJohn Baldwin 	if (ih->ih_need ||
1595ef544f63SPaolo Pisati 	    (ih->ih_flags & (IH_EXCLUSIVE | IH_ENTROPY | IH_DEAD |
15968b201c42SJohn Baldwin 	    IH_MPSAFE)) != 0) {
15978b201c42SJohn Baldwin 		db_printf(" {");
15988b201c42SJohn Baldwin 		comma = 0;
15998b201c42SJohn Baldwin 		if (ih->ih_flags & IH_EXCLUSIVE) {
16008b201c42SJohn Baldwin 			if (comma)
16018b201c42SJohn Baldwin 				db_printf(", ");
16028b201c42SJohn Baldwin 			db_printf("EXCL");
16038b201c42SJohn Baldwin 			comma = 1;
16048b201c42SJohn Baldwin 		}
16058b201c42SJohn Baldwin 		if (ih->ih_flags & IH_ENTROPY) {
16068b201c42SJohn Baldwin 			if (comma)
16078b201c42SJohn Baldwin 				db_printf(", ");
16088b201c42SJohn Baldwin 			db_printf("ENTROPY");
16098b201c42SJohn Baldwin 			comma = 1;
16108b201c42SJohn Baldwin 		}
16118b201c42SJohn Baldwin 		if (ih->ih_flags & IH_DEAD) {
16128b201c42SJohn Baldwin 			if (comma)
16138b201c42SJohn Baldwin 				db_printf(", ");
16148b201c42SJohn Baldwin 			db_printf("DEAD");
16158b201c42SJohn Baldwin 			comma = 1;
16168b201c42SJohn Baldwin 		}
16178b201c42SJohn Baldwin 		if (ih->ih_flags & IH_MPSAFE) {
16188b201c42SJohn Baldwin 			if (comma)
16198b201c42SJohn Baldwin 				db_printf(", ");
16208b201c42SJohn Baldwin 			db_printf("MPSAFE");
16218b201c42SJohn Baldwin 			comma = 1;
16228b201c42SJohn Baldwin 		}
16238b201c42SJohn Baldwin 		if (ih->ih_need) {
16248b201c42SJohn Baldwin 			if (comma)
16258b201c42SJohn Baldwin 				db_printf(", ");
16268b201c42SJohn Baldwin 			db_printf("NEED");
16278b201c42SJohn Baldwin 		}
16288b201c42SJohn Baldwin 		db_printf("}");
16298b201c42SJohn Baldwin 	}
16308b201c42SJohn Baldwin 	db_printf("\n");
16318b201c42SJohn Baldwin }
16328b201c42SJohn Baldwin 
16338b201c42SJohn Baldwin /*
1634e0f66ef8SJohn Baldwin  * Dump details about a event.
16358b201c42SJohn Baldwin  */
16368b201c42SJohn Baldwin void
1637e0f66ef8SJohn Baldwin db_dump_intr_event(struct intr_event *ie, int handlers)
16388b201c42SJohn Baldwin {
1639e0f66ef8SJohn Baldwin 	struct intr_handler *ih;
1640e0f66ef8SJohn Baldwin 	struct intr_thread *it;
16418b201c42SJohn Baldwin 	int comma;
16428b201c42SJohn Baldwin 
1643e0f66ef8SJohn Baldwin 	db_printf("%s ", ie->ie_fullname);
1644e0f66ef8SJohn Baldwin 	it = ie->ie_thread;
1645e0f66ef8SJohn Baldwin 	if (it != NULL)
1646e0f66ef8SJohn Baldwin 		db_printf("(pid %d)", it->it_thread->td_proc->p_pid);
1647e0f66ef8SJohn Baldwin 	else
1648e0f66ef8SJohn Baldwin 		db_printf("(no thread)");
1649e0f66ef8SJohn Baldwin 	if ((ie->ie_flags & (IE_SOFT | IE_ENTROPY | IE_ADDING_THREAD)) != 0 ||
1650e0f66ef8SJohn Baldwin 	    (it != NULL && it->it_need)) {
16518b201c42SJohn Baldwin 		db_printf(" {");
16528b201c42SJohn Baldwin 		comma = 0;
1653e0f66ef8SJohn Baldwin 		if (ie->ie_flags & IE_SOFT) {
16548b201c42SJohn Baldwin 			db_printf("SOFT");
16558b201c42SJohn Baldwin 			comma = 1;
16568b201c42SJohn Baldwin 		}
1657e0f66ef8SJohn Baldwin 		if (ie->ie_flags & IE_ENTROPY) {
16588b201c42SJohn Baldwin 			if (comma)
16598b201c42SJohn Baldwin 				db_printf(", ");
16608b201c42SJohn Baldwin 			db_printf("ENTROPY");
16618b201c42SJohn Baldwin 			comma = 1;
16628b201c42SJohn Baldwin 		}
1663e0f66ef8SJohn Baldwin 		if (ie->ie_flags & IE_ADDING_THREAD) {
16648b201c42SJohn Baldwin 			if (comma)
16658b201c42SJohn Baldwin 				db_printf(", ");
1666e0f66ef8SJohn Baldwin 			db_printf("ADDING_THREAD");
16678b201c42SJohn Baldwin 			comma = 1;
16688b201c42SJohn Baldwin 		}
1669e0f66ef8SJohn Baldwin 		if (it != NULL && it->it_need) {
16708b201c42SJohn Baldwin 			if (comma)
16718b201c42SJohn Baldwin 				db_printf(", ");
16728b201c42SJohn Baldwin 			db_printf("NEED");
16738b201c42SJohn Baldwin 		}
16748b201c42SJohn Baldwin 		db_printf("}");
16758b201c42SJohn Baldwin 	}
16768b201c42SJohn Baldwin 	db_printf("\n");
16778b201c42SJohn Baldwin 
16788b201c42SJohn Baldwin 	if (handlers)
1679e0f66ef8SJohn Baldwin 		TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next)
16808b201c42SJohn Baldwin 		    db_dump_intrhand(ih);
16818b201c42SJohn Baldwin }
1682e0f66ef8SJohn Baldwin 
1683e0f66ef8SJohn Baldwin /*
1684e0f66ef8SJohn Baldwin  * Dump data about interrupt handlers
1685e0f66ef8SJohn Baldwin  */
1686e0f66ef8SJohn Baldwin DB_SHOW_COMMAND(intr, db_show_intr)
1687e0f66ef8SJohn Baldwin {
1688e0f66ef8SJohn Baldwin 	struct intr_event *ie;
168919e9205aSJohn Baldwin 	int all, verbose;
1690e0f66ef8SJohn Baldwin 
1691e0f66ef8SJohn Baldwin 	verbose = index(modif, 'v') != NULL;
1692e0f66ef8SJohn Baldwin 	all = index(modif, 'a') != NULL;
1693e0f66ef8SJohn Baldwin 	TAILQ_FOREACH(ie, &event_list, ie_list) {
1694e0f66ef8SJohn Baldwin 		if (!all && TAILQ_EMPTY(&ie->ie_handlers))
1695e0f66ef8SJohn Baldwin 			continue;
1696e0f66ef8SJohn Baldwin 		db_dump_intr_event(ie, verbose);
169719e9205aSJohn Baldwin 		if (db_pager_quit)
169819e9205aSJohn Baldwin 			break;
1699e0f66ef8SJohn Baldwin 	}
1700e0f66ef8SJohn Baldwin }
17018b201c42SJohn Baldwin #endif /* DDB */
17028b201c42SJohn Baldwin 
1703b4151f71SJohn Baldwin /*
17048088699fSJohn Baldwin  * Start standard software interrupt threads
17051931cf94SJohn Baldwin  */
17061931cf94SJohn Baldwin static void
1707b4151f71SJohn Baldwin start_softintr(void *dummy)
17081931cf94SJohn Baldwin {
1709b4151f71SJohn Baldwin 
17108d809d50SJeff Roberson 	if (swi_add(NULL, "vm", swi_vm, NULL, SWI_VM, INTR_MPSAFE, &vm_ih))
17118d809d50SJeff Roberson 		panic("died while creating vm swi ithread");
17121931cf94SJohn Baldwin }
1713237fdd78SRobert Watson SYSINIT(start_softintr, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softintr,
1714237fdd78SRobert Watson     NULL);
17151931cf94SJohn Baldwin 
1716d279178dSThomas Moestl /*
1717d279178dSThomas Moestl  * Sysctls used by systat and others: hw.intrnames and hw.intrcnt.
1718d279178dSThomas Moestl  * The data for this machine dependent, and the declarations are in machine
1719d279178dSThomas Moestl  * dependent code.  The layout of intrnames and intrcnt however is machine
1720d279178dSThomas Moestl  * independent.
1721d279178dSThomas Moestl  *
1722d279178dSThomas Moestl  * We do not know the length of intrcnt and intrnames at compile time, so
1723d279178dSThomas Moestl  * calculate things at run time.
1724d279178dSThomas Moestl  */
1725d279178dSThomas Moestl static int
1726d279178dSThomas Moestl sysctl_intrnames(SYSCTL_HANDLER_ARGS)
1727d279178dSThomas Moestl {
1728d279178dSThomas Moestl 	return (sysctl_handle_opaque(oidp, intrnames, eintrnames - intrnames,
1729d279178dSThomas Moestl 	   req));
1730d279178dSThomas Moestl }
1731d279178dSThomas Moestl 
1732d279178dSThomas Moestl SYSCTL_PROC(_hw, OID_AUTO, intrnames, CTLTYPE_OPAQUE | CTLFLAG_RD,
1733d279178dSThomas Moestl     NULL, 0, sysctl_intrnames, "", "Interrupt Names");
1734d279178dSThomas Moestl 
1735d279178dSThomas Moestl static int
1736d279178dSThomas Moestl sysctl_intrcnt(SYSCTL_HANDLER_ARGS)
1737d279178dSThomas Moestl {
1738d279178dSThomas Moestl 	return (sysctl_handle_opaque(oidp, intrcnt,
1739d279178dSThomas Moestl 	    (char *)eintrcnt - (char *)intrcnt, req));
1740d279178dSThomas Moestl }
1741d279178dSThomas Moestl 
1742d279178dSThomas Moestl SYSCTL_PROC(_hw, OID_AUTO, intrcnt, CTLTYPE_OPAQUE | CTLFLAG_RD,
1743d279178dSThomas Moestl     NULL, 0, sysctl_intrcnt, "", "Interrupt Counts");
17448b201c42SJohn Baldwin 
17458b201c42SJohn Baldwin #ifdef DDB
17468b201c42SJohn Baldwin /*
17478b201c42SJohn Baldwin  * DDB command to dump the interrupt statistics.
17488b201c42SJohn Baldwin  */
17498b201c42SJohn Baldwin DB_SHOW_COMMAND(intrcnt, db_show_intrcnt)
17508b201c42SJohn Baldwin {
17518b201c42SJohn Baldwin 	u_long *i;
17528b201c42SJohn Baldwin 	char *cp;
17538b201c42SJohn Baldwin 
17548b201c42SJohn Baldwin 	cp = intrnames;
175519e9205aSJohn Baldwin 	for (i = intrcnt; i != eintrcnt && !db_pager_quit; i++) {
17568b201c42SJohn Baldwin 		if (*cp == '\0')
17578b201c42SJohn Baldwin 			break;
17588b201c42SJohn Baldwin 		if (*i != 0)
17598b201c42SJohn Baldwin 			db_printf("%s\t%lu\n", cp, *i);
17608b201c42SJohn Baldwin 		cp += strlen(cp) + 1;
17618b201c42SJohn Baldwin 	}
17628b201c42SJohn Baldwin }
17638b201c42SJohn Baldwin #endif
1764