xref: /freebsd/sys/kern/kern_intr.c (revision ba3f7276c035dec45589e1eb257661f83dad9a2b)
19454b2d8SWarner Losh /*-
28a36da99SPedro F. Giffuni  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
38a36da99SPedro F. Giffuni  *
4425f9fdaSStefan Eßer  * Copyright (c) 1997, Stefan Esser <se@freebsd.org>
5425f9fdaSStefan Eßer  * All rights reserved.
6425f9fdaSStefan Eßer  *
7425f9fdaSStefan Eßer  * Redistribution and use in source and binary forms, with or without
8425f9fdaSStefan Eßer  * modification, are permitted provided that the following conditions
9425f9fdaSStefan Eßer  * are met:
10425f9fdaSStefan Eßer  * 1. Redistributions of source code must retain the above copyright
11425f9fdaSStefan Eßer  *    notice unmodified, this list of conditions, and the following
12425f9fdaSStefan Eßer  *    disclaimer.
13425f9fdaSStefan Eßer  * 2. Redistributions in binary form must reproduce the above copyright
14425f9fdaSStefan Eßer  *    notice, this list of conditions and the following disclaimer in the
15425f9fdaSStefan Eßer  *    documentation and/or other materials provided with the distribution.
16425f9fdaSStefan Eßer  *
17425f9fdaSStefan Eßer  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18425f9fdaSStefan Eßer  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19425f9fdaSStefan Eßer  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20425f9fdaSStefan Eßer  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21425f9fdaSStefan Eßer  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22425f9fdaSStefan Eßer  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23425f9fdaSStefan Eßer  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24425f9fdaSStefan Eßer  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25425f9fdaSStefan Eßer  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26425f9fdaSStefan Eßer  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27425f9fdaSStefan Eßer  */
28425f9fdaSStefan Eßer 
29677b542eSDavid E. O'Brien #include <sys/cdefs.h>
30677b542eSDavid E. O'Brien __FBSDID("$FreeBSD$");
313900ddb2SDoug Rabson 
328b201c42SJohn Baldwin #include "opt_ddb.h"
33b7627840SKonstantin Belousov #include "opt_kstack_usage_prof.h"
348b201c42SJohn Baldwin 
351c5bb3eaSPeter Wemm #include <sys/param.h>
369a94c9c5SJohn Baldwin #include <sys/bus.h>
37c11110eaSAlfred Perlstein #include <sys/conf.h>
389b33b154SJeff Roberson #include <sys/cpuset.h>
399a94c9c5SJohn Baldwin #include <sys/rtprio.h>
40425f9fdaSStefan Eßer #include <sys/systm.h>
4168352337SDoug Rabson #include <sys/interrupt.h>
421931cf94SJohn Baldwin #include <sys/kernel.h>
431931cf94SJohn Baldwin #include <sys/kthread.h>
441931cf94SJohn Baldwin #include <sys/ktr.h>
4505b2c96fSBruce Evans #include <sys/limits.h>
46f34fa851SJohn Baldwin #include <sys/lock.h>
471931cf94SJohn Baldwin #include <sys/malloc.h>
4835e0e5b3SJohn Baldwin #include <sys/mutex.h>
49cebc7fb1SJohn Baldwin #include <sys/priv.h>
501931cf94SJohn Baldwin #include <sys/proc.h>
513e5da754SJohn Baldwin #include <sys/random.h>
52b4151f71SJohn Baldwin #include <sys/resourcevar.h>
5363710c4dSJohn Baldwin #include <sys/sched.h>
54eaf86d16SJohn Baldwin #include <sys/smp.h>
55d279178dSThomas Moestl #include <sys/sysctl.h>
566205924aSKip Macy #include <sys/syslog.h>
571931cf94SJohn Baldwin #include <sys/unistd.h>
581931cf94SJohn Baldwin #include <sys/vmmeter.h>
591931cf94SJohn Baldwin #include <machine/atomic.h>
601931cf94SJohn Baldwin #include <machine/cpu.h>
618088699fSJohn Baldwin #include <machine/md_var.h>
62b4151f71SJohn Baldwin #include <machine/stdarg.h>
638b201c42SJohn Baldwin #ifdef DDB
648b201c42SJohn Baldwin #include <ddb/ddb.h>
658b201c42SJohn Baldwin #include <ddb/db_sym.h>
668b201c42SJohn Baldwin #endif
67425f9fdaSStefan Eßer 
68e0f66ef8SJohn Baldwin /*
69e0f66ef8SJohn Baldwin  * Describe an interrupt thread.  There is one of these per interrupt event.
70e0f66ef8SJohn Baldwin  */
71e0f66ef8SJohn Baldwin struct intr_thread {
72e0f66ef8SJohn Baldwin 	struct intr_event *it_event;
73e0f66ef8SJohn Baldwin 	struct thread *it_thread;	/* Kernel thread. */
74e0f66ef8SJohn Baldwin 	int	it_flags;		/* (j) IT_* flags. */
75e0f66ef8SJohn Baldwin 	int	it_need;		/* Needs service. */
763e5da754SJohn Baldwin };
773e5da754SJohn Baldwin 
78e0f66ef8SJohn Baldwin /* Interrupt thread flags kept in it_flags */
79e0f66ef8SJohn Baldwin #define	IT_DEAD		0x000001	/* Thread is waiting to exit. */
80e4cd31ddSJeff Roberson #define	IT_WAIT		0x000002	/* Thread is waiting for completion. */
81e0f66ef8SJohn Baldwin 
82e0f66ef8SJohn Baldwin struct	intr_entropy {
83e0f66ef8SJohn Baldwin 	struct	thread *td;
84e0f66ef8SJohn Baldwin 	uintptr_t event;
85e0f66ef8SJohn Baldwin };
86e0f66ef8SJohn Baldwin 
87e0f66ef8SJohn Baldwin struct	intr_event *clk_intr_event;
88e0f66ef8SJohn Baldwin struct	intr_event *tty_intr_event;
897b1fe905SBruce Evans void	*vm_ih;
907ab24ea3SJulian Elischer struct proc *intrproc;
911931cf94SJohn Baldwin 
92b4151f71SJohn Baldwin static MALLOC_DEFINE(M_ITHREAD, "ithread", "Interrupt Threads");
93b4151f71SJohn Baldwin 
940ae62c18SNate Lawson static int intr_storm_threshold = 1000;
95af3b2549SHans Petter Selasky SYSCTL_INT(_hw, OID_AUTO, intr_storm_threshold, CTLFLAG_RWTUN,
967870c3c6SJohn Baldwin     &intr_storm_threshold, 0,
977b1fe905SBruce Evans     "Number of consecutive interrupts before storm protection is enabled");
98e0f66ef8SJohn Baldwin static TAILQ_HEAD(, intr_event) event_list =
99e0f66ef8SJohn Baldwin     TAILQ_HEAD_INITIALIZER(event_list);
1009b33b154SJeff Roberson static struct mtx event_lock;
1019b33b154SJeff Roberson MTX_SYSINIT(intr_event_list, &event_lock, "intr event list", MTX_DEF);
1027b1fe905SBruce Evans 
103e0f66ef8SJohn Baldwin static void	intr_event_update(struct intr_event *ie);
104bafe5a31SPaolo Pisati #ifdef INTR_FILTER
1051ee1b687SJohn Baldwin static int	intr_event_schedule_thread(struct intr_event *ie,
1061ee1b687SJohn Baldwin 		    struct intr_thread *ithd);
1071ee1b687SJohn Baldwin static int	intr_filter_loop(struct intr_event *ie,
1081ee1b687SJohn Baldwin 		    struct trapframe *frame, struct intr_thread **ithd);
109bafe5a31SPaolo Pisati static struct intr_thread *ithread_create(const char *name,
110bafe5a31SPaolo Pisati 			      struct intr_handler *ih);
111bafe5a31SPaolo Pisati #else
1121ee1b687SJohn Baldwin static int	intr_event_schedule_thread(struct intr_event *ie);
113e0f66ef8SJohn Baldwin static struct intr_thread *ithread_create(const char *name);
114bafe5a31SPaolo Pisati #endif
115e0f66ef8SJohn Baldwin static void	ithread_destroy(struct intr_thread *ithread);
116bafe5a31SPaolo Pisati static void	ithread_execute_handlers(struct proc *p,
117bafe5a31SPaolo Pisati 		    struct intr_event *ie);
118bafe5a31SPaolo Pisati #ifdef INTR_FILTER
119bafe5a31SPaolo Pisati static void	priv_ithread_execute_handler(struct proc *p,
120bafe5a31SPaolo Pisati 		    struct intr_handler *ih);
121bafe5a31SPaolo Pisati #endif
1227b1fe905SBruce Evans static void	ithread_loop(void *);
123e0f66ef8SJohn Baldwin static void	ithread_update(struct intr_thread *ithd);
1247b1fe905SBruce Evans static void	start_softintr(void *);
1257870c3c6SJohn Baldwin 
126bc17acb2SJohn Baldwin /* Map an interrupt type to an ithread priority. */
127b4151f71SJohn Baldwin u_char
128e0f66ef8SJohn Baldwin intr_priority(enum intr_type flags)
1299a94c9c5SJohn Baldwin {
130b4151f71SJohn Baldwin 	u_char pri;
1319a94c9c5SJohn Baldwin 
132b4151f71SJohn Baldwin 	flags &= (INTR_TYPE_TTY | INTR_TYPE_BIO | INTR_TYPE_NET |
1335a280d9cSPeter Wemm 	    INTR_TYPE_CAM | INTR_TYPE_MISC | INTR_TYPE_CLK | INTR_TYPE_AV);
1349a94c9c5SJohn Baldwin 	switch (flags) {
135b4151f71SJohn Baldwin 	case INTR_TYPE_TTY:
136d3305205SJohn Baldwin 		pri = PI_TTY;
1379a94c9c5SJohn Baldwin 		break;
1389a94c9c5SJohn Baldwin 	case INTR_TYPE_BIO:
1399a94c9c5SJohn Baldwin 		pri = PI_DISK;
1409a94c9c5SJohn Baldwin 		break;
1419a94c9c5SJohn Baldwin 	case INTR_TYPE_NET:
1429a94c9c5SJohn Baldwin 		pri = PI_NET;
1439a94c9c5SJohn Baldwin 		break;
1449a94c9c5SJohn Baldwin 	case INTR_TYPE_CAM:
145d3305205SJohn Baldwin 		pri = PI_DISK;
1469a94c9c5SJohn Baldwin 		break;
147d3305205SJohn Baldwin 	case INTR_TYPE_AV:
1485a280d9cSPeter Wemm 		pri = PI_AV;
1495a280d9cSPeter Wemm 		break;
150b4151f71SJohn Baldwin 	case INTR_TYPE_CLK:
151b4151f71SJohn Baldwin 		pri = PI_REALTIME;
152b4151f71SJohn Baldwin 		break;
1539a94c9c5SJohn Baldwin 	case INTR_TYPE_MISC:
1549a94c9c5SJohn Baldwin 		pri = PI_DULL;          /* don't care */
1559a94c9c5SJohn Baldwin 		break;
1569a94c9c5SJohn Baldwin 	default:
157b4151f71SJohn Baldwin 		/* We didn't specify an interrupt level. */
158e0f66ef8SJohn Baldwin 		panic("intr_priority: no interrupt type in flags");
1599a94c9c5SJohn Baldwin 	}
1609a94c9c5SJohn Baldwin 
1619a94c9c5SJohn Baldwin 	return pri;
1629a94c9c5SJohn Baldwin }
1639a94c9c5SJohn Baldwin 
164b4151f71SJohn Baldwin /*
165e0f66ef8SJohn Baldwin  * Update an ithread based on the associated intr_event.
166b4151f71SJohn Baldwin  */
167b4151f71SJohn Baldwin static void
168e0f66ef8SJohn Baldwin ithread_update(struct intr_thread *ithd)
169b4151f71SJohn Baldwin {
170e0f66ef8SJohn Baldwin 	struct intr_event *ie;
171b40ce416SJulian Elischer 	struct thread *td;
172e0f66ef8SJohn Baldwin 	u_char pri;
1738088699fSJohn Baldwin 
174e0f66ef8SJohn Baldwin 	ie = ithd->it_event;
175e0f66ef8SJohn Baldwin 	td = ithd->it_thread;
176b4151f71SJohn Baldwin 
177e0f66ef8SJohn Baldwin 	/* Determine the overall priority of this event. */
178e0f66ef8SJohn Baldwin 	if (TAILQ_EMPTY(&ie->ie_handlers))
179e0f66ef8SJohn Baldwin 		pri = PRI_MAX_ITHD;
180e0f66ef8SJohn Baldwin 	else
181e0f66ef8SJohn Baldwin 		pri = TAILQ_FIRST(&ie->ie_handlers)->ih_pri;
182e80fb434SRobert Drehmel 
183e0f66ef8SJohn Baldwin 	/* Update name and priority. */
1847ab24ea3SJulian Elischer 	strlcpy(td->td_name, ie->ie_fullname, sizeof(td->td_name));
18544ad5475SJohn Baldwin #ifdef KTR
18644ad5475SJohn Baldwin 	sched_clear_tdname(td);
18744ad5475SJohn Baldwin #endif
188982d11f8SJeff Roberson 	thread_lock(td);
189e0f66ef8SJohn Baldwin 	sched_prio(td, pri);
190982d11f8SJeff Roberson 	thread_unlock(td);
191b4151f71SJohn Baldwin }
192e0f66ef8SJohn Baldwin 
193e0f66ef8SJohn Baldwin /*
194e0f66ef8SJohn Baldwin  * Regenerate the full name of an interrupt event and update its priority.
195e0f66ef8SJohn Baldwin  */
196e0f66ef8SJohn Baldwin static void
197e0f66ef8SJohn Baldwin intr_event_update(struct intr_event *ie)
198e0f66ef8SJohn Baldwin {
199e0f66ef8SJohn Baldwin 	struct intr_handler *ih;
200e0f66ef8SJohn Baldwin 	char *last;
201e0f66ef8SJohn Baldwin 	int missed, space;
202e0f66ef8SJohn Baldwin 
203e0f66ef8SJohn Baldwin 	/* Start off with no entropy and just the name of the event. */
204e0f66ef8SJohn Baldwin 	mtx_assert(&ie->ie_lock, MA_OWNED);
205e0f66ef8SJohn Baldwin 	strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname));
206e0f66ef8SJohn Baldwin 	ie->ie_flags &= ~IE_ENTROPY;
2070811d60aSJohn Baldwin 	missed = 0;
208e0f66ef8SJohn Baldwin 	space = 1;
209e0f66ef8SJohn Baldwin 
210e0f66ef8SJohn Baldwin 	/* Run through all the handlers updating values. */
211e0f66ef8SJohn Baldwin 	TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) {
212e0f66ef8SJohn Baldwin 		if (strlen(ie->ie_fullname) + strlen(ih->ih_name) + 1 <
213e0f66ef8SJohn Baldwin 		    sizeof(ie->ie_fullname)) {
214e0f66ef8SJohn Baldwin 			strcat(ie->ie_fullname, " ");
215e0f66ef8SJohn Baldwin 			strcat(ie->ie_fullname, ih->ih_name);
216e0f66ef8SJohn Baldwin 			space = 0;
2170811d60aSJohn Baldwin 		} else
2180811d60aSJohn Baldwin 			missed++;
2190811d60aSJohn Baldwin 		if (ih->ih_flags & IH_ENTROPY)
220e0f66ef8SJohn Baldwin 			ie->ie_flags |= IE_ENTROPY;
2210811d60aSJohn Baldwin 	}
222e0f66ef8SJohn Baldwin 
223e0f66ef8SJohn Baldwin 	/*
224e0f66ef8SJohn Baldwin 	 * If the handler names were too long, add +'s to indicate missing
225e0f66ef8SJohn Baldwin 	 * names. If we run out of room and still have +'s to add, change
226e0f66ef8SJohn Baldwin 	 * the last character from a + to a *.
227e0f66ef8SJohn Baldwin 	 */
228e0f66ef8SJohn Baldwin 	last = &ie->ie_fullname[sizeof(ie->ie_fullname) - 2];
2290811d60aSJohn Baldwin 	while (missed-- > 0) {
230e0f66ef8SJohn Baldwin 		if (strlen(ie->ie_fullname) + 1 == sizeof(ie->ie_fullname)) {
231e0f66ef8SJohn Baldwin 			if (*last == '+') {
232e0f66ef8SJohn Baldwin 				*last = '*';
233e0f66ef8SJohn Baldwin 				break;
234b4151f71SJohn Baldwin 			} else
235e0f66ef8SJohn Baldwin 				*last = '+';
236e0f66ef8SJohn Baldwin 		} else if (space) {
237e0f66ef8SJohn Baldwin 			strcat(ie->ie_fullname, " +");
238e0f66ef8SJohn Baldwin 			space = 0;
239e0f66ef8SJohn Baldwin 		} else
240e0f66ef8SJohn Baldwin 			strcat(ie->ie_fullname, "+");
241b4151f71SJohn Baldwin 	}
242e0f66ef8SJohn Baldwin 
243e0f66ef8SJohn Baldwin 	/*
244e0f66ef8SJohn Baldwin 	 * If this event has an ithread, update it's priority and
245e0f66ef8SJohn Baldwin 	 * name.
246e0f66ef8SJohn Baldwin 	 */
247e0f66ef8SJohn Baldwin 	if (ie->ie_thread != NULL)
248e0f66ef8SJohn Baldwin 		ithread_update(ie->ie_thread);
249e0f66ef8SJohn Baldwin 	CTR2(KTR_INTR, "%s: updated %s", __func__, ie->ie_fullname);
250b4151f71SJohn Baldwin }
251b4151f71SJohn Baldwin 
252b4151f71SJohn Baldwin int
2539b33b154SJeff Roberson intr_event_create(struct intr_event **event, void *source, int flags, int irq,
2541ee1b687SJohn Baldwin     void (*pre_ithread)(void *), void (*post_ithread)(void *),
255066da805SAdrian Chadd     void (*post_filter)(void *), int (*assign_cpu)(void *, int),
2561ee1b687SJohn Baldwin     const char *fmt, ...)
257bafe5a31SPaolo Pisati {
258bafe5a31SPaolo Pisati 	struct intr_event *ie;
259bafe5a31SPaolo Pisati 	va_list ap;
260bafe5a31SPaolo Pisati 
261bafe5a31SPaolo Pisati 	/* The only valid flag during creation is IE_SOFT. */
262bafe5a31SPaolo Pisati 	if ((flags & ~IE_SOFT) != 0)
263bafe5a31SPaolo Pisati 		return (EINVAL);
264bafe5a31SPaolo Pisati 	ie = malloc(sizeof(struct intr_event), M_ITHREAD, M_WAITOK | M_ZERO);
265bafe5a31SPaolo Pisati 	ie->ie_source = source;
2661ee1b687SJohn Baldwin 	ie->ie_pre_ithread = pre_ithread;
2671ee1b687SJohn Baldwin 	ie->ie_post_ithread = post_ithread;
2681ee1b687SJohn Baldwin 	ie->ie_post_filter = post_filter;
2696d2d1c04SJohn Baldwin 	ie->ie_assign_cpu = assign_cpu;
270bafe5a31SPaolo Pisati 	ie->ie_flags = flags;
2719b33b154SJeff Roberson 	ie->ie_irq = irq;
272eaf86d16SJohn Baldwin 	ie->ie_cpu = NOCPU;
273bafe5a31SPaolo Pisati 	TAILQ_INIT(&ie->ie_handlers);
274bafe5a31SPaolo Pisati 	mtx_init(&ie->ie_lock, "intr event", NULL, MTX_DEF);
275bafe5a31SPaolo Pisati 
276bafe5a31SPaolo Pisati 	va_start(ap, fmt);
277bafe5a31SPaolo Pisati 	vsnprintf(ie->ie_name, sizeof(ie->ie_name), fmt, ap);
278bafe5a31SPaolo Pisati 	va_end(ap);
279bafe5a31SPaolo Pisati 	strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname));
2809b33b154SJeff Roberson 	mtx_lock(&event_lock);
281bafe5a31SPaolo Pisati 	TAILQ_INSERT_TAIL(&event_list, ie, ie_list);
2829b33b154SJeff Roberson 	mtx_unlock(&event_lock);
283bafe5a31SPaolo Pisati 	if (event != NULL)
284bafe5a31SPaolo Pisati 		*event = ie;
285bafe5a31SPaolo Pisati 	CTR2(KTR_INTR, "%s: created %s", __func__, ie->ie_name);
286bafe5a31SPaolo Pisati 	return (0);
287bafe5a31SPaolo Pisati }
288b4151f71SJohn Baldwin 
289eaf86d16SJohn Baldwin /*
290eaf86d16SJohn Baldwin  * Bind an interrupt event to the specified CPU.  Note that not all
291eaf86d16SJohn Baldwin  * platforms support binding an interrupt to a CPU.  For those
29229dfb631SConrad Meyer  * platforms this request will fail.  Using a cpu id of NOCPU unbinds
293eaf86d16SJohn Baldwin  * the interrupt event.
294eaf86d16SJohn Baldwin  */
29529dfb631SConrad Meyer static int
29629dfb631SConrad Meyer _intr_event_bind(struct intr_event *ie, int cpu, bool bindirq, bool bindithread)
297eaf86d16SJohn Baldwin {
2989b33b154SJeff Roberson 	lwpid_t id;
299eaf86d16SJohn Baldwin 	int error;
300eaf86d16SJohn Baldwin 
301eaf86d16SJohn Baldwin 	/* Need a CPU to bind to. */
302eaf86d16SJohn Baldwin 	if (cpu != NOCPU && CPU_ABSENT(cpu))
303eaf86d16SJohn Baldwin 		return (EINVAL);
304eaf86d16SJohn Baldwin 
305eaf86d16SJohn Baldwin 	if (ie->ie_assign_cpu == NULL)
306eaf86d16SJohn Baldwin 		return (EOPNOTSUPP);
307cebc7fb1SJohn Baldwin 
308cebc7fb1SJohn Baldwin 	error = priv_check(curthread, PRIV_SCHED_CPUSET_INTR);
309cebc7fb1SJohn Baldwin 	if (error)
310cebc7fb1SJohn Baldwin 		return (error);
311cebc7fb1SJohn Baldwin 
3129b33b154SJeff Roberson 	/*
313cebc7fb1SJohn Baldwin 	 * If we have any ithreads try to set their mask first to verify
314cebc7fb1SJohn Baldwin 	 * permissions, etc.
3159b33b154SJeff Roberson 	 */
31629dfb631SConrad Meyer 	if (bindithread) {
317eaf86d16SJohn Baldwin 		mtx_lock(&ie->ie_lock);
3189b33b154SJeff Roberson 		if (ie->ie_thread != NULL) {
3199b33b154SJeff Roberson 			id = ie->ie_thread->it_thread->td_tid;
320eaf86d16SJohn Baldwin 			mtx_unlock(&ie->ie_lock);
32181198539SAlexander V. Chernikov 			error = cpuset_setithread(id, cpu);
3229b33b154SJeff Roberson 			if (error)
3239b33b154SJeff Roberson 				return (error);
3249b33b154SJeff Roberson 		} else
325eaf86d16SJohn Baldwin 			mtx_unlock(&ie->ie_lock);
32629dfb631SConrad Meyer 	}
32729dfb631SConrad Meyer 	if (bindirq)
328eaf86d16SJohn Baldwin 		error = ie->ie_assign_cpu(ie->ie_source, cpu);
329cebc7fb1SJohn Baldwin 	if (error) {
33029dfb631SConrad Meyer 		if (bindithread) {
331cebc7fb1SJohn Baldwin 			mtx_lock(&ie->ie_lock);
332cebc7fb1SJohn Baldwin 			if (ie->ie_thread != NULL) {
33381198539SAlexander V. Chernikov 				cpu = ie->ie_cpu;
334cebc7fb1SJohn Baldwin 				id = ie->ie_thread->it_thread->td_tid;
335cebc7fb1SJohn Baldwin 				mtx_unlock(&ie->ie_lock);
33681198539SAlexander V. Chernikov 				(void)cpuset_setithread(id, cpu);
337cebc7fb1SJohn Baldwin 			} else
338cebc7fb1SJohn Baldwin 				mtx_unlock(&ie->ie_lock);
33929dfb631SConrad Meyer 		}
340eaf86d16SJohn Baldwin 		return (error);
341cebc7fb1SJohn Baldwin 	}
342cebc7fb1SJohn Baldwin 
34329dfb631SConrad Meyer 	if (bindirq) {
344eaf86d16SJohn Baldwin 		mtx_lock(&ie->ie_lock);
345eaf86d16SJohn Baldwin 		ie->ie_cpu = cpu;
3469b33b154SJeff Roberson 		mtx_unlock(&ie->ie_lock);
34729dfb631SConrad Meyer 	}
3489b33b154SJeff Roberson 
3499b33b154SJeff Roberson 	return (error);
3509b33b154SJeff Roberson }
3519b33b154SJeff Roberson 
35229dfb631SConrad Meyer /*
35329dfb631SConrad Meyer  * Bind an interrupt event to the specified CPU.  For supported platforms, any
35429dfb631SConrad Meyer  * associated ithreads as well as the primary interrupt context will be bound
35529dfb631SConrad Meyer  * to the specificed CPU.
35629dfb631SConrad Meyer  */
35729dfb631SConrad Meyer int
35829dfb631SConrad Meyer intr_event_bind(struct intr_event *ie, int cpu)
35929dfb631SConrad Meyer {
36029dfb631SConrad Meyer 
36129dfb631SConrad Meyer 	return (_intr_event_bind(ie, cpu, true, true));
36229dfb631SConrad Meyer }
36329dfb631SConrad Meyer 
36429dfb631SConrad Meyer /*
36529dfb631SConrad Meyer  * Bind an interrupt event to the specified CPU, but do not bind associated
36629dfb631SConrad Meyer  * ithreads.
36729dfb631SConrad Meyer  */
36829dfb631SConrad Meyer int
36929dfb631SConrad Meyer intr_event_bind_irqonly(struct intr_event *ie, int cpu)
37029dfb631SConrad Meyer {
37129dfb631SConrad Meyer 
37229dfb631SConrad Meyer 	return (_intr_event_bind(ie, cpu, true, false));
37329dfb631SConrad Meyer }
37429dfb631SConrad Meyer 
37529dfb631SConrad Meyer /*
37629dfb631SConrad Meyer  * Bind an interrupt event's ithread to the specified CPU.
37729dfb631SConrad Meyer  */
37829dfb631SConrad Meyer int
37929dfb631SConrad Meyer intr_event_bind_ithread(struct intr_event *ie, int cpu)
38029dfb631SConrad Meyer {
38129dfb631SConrad Meyer 
38229dfb631SConrad Meyer 	return (_intr_event_bind(ie, cpu, false, true));
38329dfb631SConrad Meyer }
38429dfb631SConrad Meyer 
3859b33b154SJeff Roberson static struct intr_event *
3869b33b154SJeff Roberson intr_lookup(int irq)
3879b33b154SJeff Roberson {
3889b33b154SJeff Roberson 	struct intr_event *ie;
3899b33b154SJeff Roberson 
3909b33b154SJeff Roberson 	mtx_lock(&event_lock);
3919b33b154SJeff Roberson 	TAILQ_FOREACH(ie, &event_list, ie_list)
3929b33b154SJeff Roberson 		if (ie->ie_irq == irq &&
3939b33b154SJeff Roberson 		    (ie->ie_flags & IE_SOFT) == 0 &&
3949b33b154SJeff Roberson 		    TAILQ_FIRST(&ie->ie_handlers) != NULL)
3959b33b154SJeff Roberson 			break;
3969b33b154SJeff Roberson 	mtx_unlock(&event_lock);
3979b33b154SJeff Roberson 	return (ie);
3989b33b154SJeff Roberson }
3999b33b154SJeff Roberson 
4009b33b154SJeff Roberson int
40129dfb631SConrad Meyer intr_setaffinity(int irq, int mode, void *m)
4029b33b154SJeff Roberson {
4039b33b154SJeff Roberson 	struct intr_event *ie;
4049b33b154SJeff Roberson 	cpuset_t *mask;
4053fe93b94SAdrian Chadd 	int cpu, n;
4069b33b154SJeff Roberson 
4079b33b154SJeff Roberson 	mask = m;
4089b33b154SJeff Roberson 	cpu = NOCPU;
4099b33b154SJeff Roberson 	/*
4109b33b154SJeff Roberson 	 * If we're setting all cpus we can unbind.  Otherwise make sure
4119b33b154SJeff Roberson 	 * only one cpu is in the set.
4129b33b154SJeff Roberson 	 */
4139b33b154SJeff Roberson 	if (CPU_CMP(cpuset_root, mask)) {
4149b33b154SJeff Roberson 		for (n = 0; n < CPU_SETSIZE; n++) {
4159b33b154SJeff Roberson 			if (!CPU_ISSET(n, mask))
4169b33b154SJeff Roberson 				continue;
4179b33b154SJeff Roberson 			if (cpu != NOCPU)
4189b33b154SJeff Roberson 				return (EINVAL);
4193fe93b94SAdrian Chadd 			cpu = n;
4209b33b154SJeff Roberson 		}
4219b33b154SJeff Roberson 	}
4229b33b154SJeff Roberson 	ie = intr_lookup(irq);
4239b33b154SJeff Roberson 	if (ie == NULL)
4249b33b154SJeff Roberson 		return (ESRCH);
42529dfb631SConrad Meyer 	switch (mode) {
42629dfb631SConrad Meyer 	case CPU_WHICH_IRQ:
4279bd55acfSJohn Baldwin 		return (intr_event_bind(ie, cpu));
42829dfb631SConrad Meyer 	case CPU_WHICH_INTRHANDLER:
42929dfb631SConrad Meyer 		return (intr_event_bind_irqonly(ie, cpu));
43029dfb631SConrad Meyer 	case CPU_WHICH_ITHREAD:
43129dfb631SConrad Meyer 		return (intr_event_bind_ithread(ie, cpu));
43229dfb631SConrad Meyer 	default:
43329dfb631SConrad Meyer 		return (EINVAL);
43429dfb631SConrad Meyer 	}
4359b33b154SJeff Roberson }
4369b33b154SJeff Roberson 
4379b33b154SJeff Roberson int
43829dfb631SConrad Meyer intr_getaffinity(int irq, int mode, void *m)
4399b33b154SJeff Roberson {
4409b33b154SJeff Roberson 	struct intr_event *ie;
44129dfb631SConrad Meyer 	struct thread *td;
44229dfb631SConrad Meyer 	struct proc *p;
4439b33b154SJeff Roberson 	cpuset_t *mask;
44429dfb631SConrad Meyer 	lwpid_t id;
44529dfb631SConrad Meyer 	int error;
4469b33b154SJeff Roberson 
4479b33b154SJeff Roberson 	mask = m;
4489b33b154SJeff Roberson 	ie = intr_lookup(irq);
4499b33b154SJeff Roberson 	if (ie == NULL)
4509b33b154SJeff Roberson 		return (ESRCH);
45129dfb631SConrad Meyer 
45229dfb631SConrad Meyer 	error = 0;
4539b33b154SJeff Roberson 	CPU_ZERO(mask);
45429dfb631SConrad Meyer 	switch (mode) {
45529dfb631SConrad Meyer 	case CPU_WHICH_IRQ:
45629dfb631SConrad Meyer 	case CPU_WHICH_INTRHANDLER:
4579b33b154SJeff Roberson 		mtx_lock(&ie->ie_lock);
4589b33b154SJeff Roberson 		if (ie->ie_cpu == NOCPU)
4599b33b154SJeff Roberson 			CPU_COPY(cpuset_root, mask);
4609b33b154SJeff Roberson 		else
4619b33b154SJeff Roberson 			CPU_SET(ie->ie_cpu, mask);
462eaf86d16SJohn Baldwin 		mtx_unlock(&ie->ie_lock);
46329dfb631SConrad Meyer 		break;
46429dfb631SConrad Meyer 	case CPU_WHICH_ITHREAD:
46529dfb631SConrad Meyer 		mtx_lock(&ie->ie_lock);
46629dfb631SConrad Meyer 		if (ie->ie_thread == NULL) {
46729dfb631SConrad Meyer 			mtx_unlock(&ie->ie_lock);
46829dfb631SConrad Meyer 			CPU_COPY(cpuset_root, mask);
46929dfb631SConrad Meyer 		} else {
47029dfb631SConrad Meyer 			id = ie->ie_thread->it_thread->td_tid;
47129dfb631SConrad Meyer 			mtx_unlock(&ie->ie_lock);
47229dfb631SConrad Meyer 			error = cpuset_which(CPU_WHICH_TID, id, &p, &td, NULL);
47329dfb631SConrad Meyer 			if (error != 0)
47429dfb631SConrad Meyer 				return (error);
47529dfb631SConrad Meyer 			CPU_COPY(&td->td_cpuset->cs_mask, mask);
47629dfb631SConrad Meyer 			PROC_UNLOCK(p);
47729dfb631SConrad Meyer 		}
47829dfb631SConrad Meyer 	default:
47929dfb631SConrad Meyer 		return (EINVAL);
48029dfb631SConrad Meyer 	}
481eaf86d16SJohn Baldwin 	return (0);
482eaf86d16SJohn Baldwin }
483eaf86d16SJohn Baldwin 
484b4151f71SJohn Baldwin int
485e0f66ef8SJohn Baldwin intr_event_destroy(struct intr_event *ie)
486b4151f71SJohn Baldwin {
487b4151f71SJohn Baldwin 
4889b33b154SJeff Roberson 	mtx_lock(&event_lock);
489e0f66ef8SJohn Baldwin 	mtx_lock(&ie->ie_lock);
490e0f66ef8SJohn Baldwin 	if (!TAILQ_EMPTY(&ie->ie_handlers)) {
491e0f66ef8SJohn Baldwin 		mtx_unlock(&ie->ie_lock);
4929b33b154SJeff Roberson 		mtx_unlock(&event_lock);
493e0f66ef8SJohn Baldwin 		return (EBUSY);
4944d29cb2dSJohn Baldwin 	}
495e0f66ef8SJohn Baldwin 	TAILQ_REMOVE(&event_list, ie, ie_list);
4969477358dSJohn Baldwin #ifndef notyet
4979477358dSJohn Baldwin 	if (ie->ie_thread != NULL) {
4989477358dSJohn Baldwin 		ithread_destroy(ie->ie_thread);
4999477358dSJohn Baldwin 		ie->ie_thread = NULL;
5009477358dSJohn Baldwin 	}
5019477358dSJohn Baldwin #endif
502e0f66ef8SJohn Baldwin 	mtx_unlock(&ie->ie_lock);
5039b33b154SJeff Roberson 	mtx_unlock(&event_lock);
504e0f66ef8SJohn Baldwin 	mtx_destroy(&ie->ie_lock);
505e0f66ef8SJohn Baldwin 	free(ie, M_ITHREAD);
506e0f66ef8SJohn Baldwin 	return (0);
507e0f66ef8SJohn Baldwin }
508e0f66ef8SJohn Baldwin 
509bafe5a31SPaolo Pisati #ifndef INTR_FILTER
510e0f66ef8SJohn Baldwin static struct intr_thread *
511e0f66ef8SJohn Baldwin ithread_create(const char *name)
512e0f66ef8SJohn Baldwin {
513e0f66ef8SJohn Baldwin 	struct intr_thread *ithd;
514e0f66ef8SJohn Baldwin 	struct thread *td;
515e0f66ef8SJohn Baldwin 	int error;
516e0f66ef8SJohn Baldwin 
517e0f66ef8SJohn Baldwin 	ithd = malloc(sizeof(struct intr_thread), M_ITHREAD, M_WAITOK | M_ZERO);
518e0f66ef8SJohn Baldwin 
5197ab24ea3SJulian Elischer 	error = kproc_kthread_add(ithread_loop, ithd, &intrproc,
5207ab24ea3SJulian Elischer 		    &td, RFSTOPPED | RFHIGHPID,
5219ef95d01SJulian Elischer 	    	    0, "intr", "%s", name);
522e0f66ef8SJohn Baldwin 	if (error)
5233745c395SJulian Elischer 		panic("kproc_create() failed with %d", error);
524982d11f8SJeff Roberson 	thread_lock(td);
525ad1e7d28SJulian Elischer 	sched_class(td, PRI_ITHD);
526e0f66ef8SJohn Baldwin 	TD_SET_IWAIT(td);
527982d11f8SJeff Roberson 	thread_unlock(td);
528e0f66ef8SJohn Baldwin 	td->td_pflags |= TDP_ITHREAD;
529e0f66ef8SJohn Baldwin 	ithd->it_thread = td;
530e0f66ef8SJohn Baldwin 	CTR2(KTR_INTR, "%s: created %s", __func__, name);
531e0f66ef8SJohn Baldwin 	return (ithd);
532e0f66ef8SJohn Baldwin }
533bafe5a31SPaolo Pisati #else
534bafe5a31SPaolo Pisati static struct intr_thread *
535bafe5a31SPaolo Pisati ithread_create(const char *name, struct intr_handler *ih)
536bafe5a31SPaolo Pisati {
537bafe5a31SPaolo Pisati 	struct intr_thread *ithd;
538bafe5a31SPaolo Pisati 	struct thread *td;
539bafe5a31SPaolo Pisati 	int error;
540bafe5a31SPaolo Pisati 
541bafe5a31SPaolo Pisati 	ithd = malloc(sizeof(struct intr_thread), M_ITHREAD, M_WAITOK | M_ZERO);
542bafe5a31SPaolo Pisati 
543539976ffSJulian Elischer 	error = kproc_kthread_add(ithread_loop, ih, &intrproc,
5447ab24ea3SJulian Elischer 		    &td, RFSTOPPED | RFHIGHPID,
5459ef95d01SJulian Elischer 	    	    0, "intr", "%s", name);
546bafe5a31SPaolo Pisati 	if (error)
5473745c395SJulian Elischer 		panic("kproc_create() failed with %d", error);
548982d11f8SJeff Roberson 	thread_lock(td);
549bafe5a31SPaolo Pisati 	sched_class(td, PRI_ITHD);
550bafe5a31SPaolo Pisati 	TD_SET_IWAIT(td);
551982d11f8SJeff Roberson 	thread_unlock(td);
552bafe5a31SPaolo Pisati 	td->td_pflags |= TDP_ITHREAD;
553bafe5a31SPaolo Pisati 	ithd->it_thread = td;
554bafe5a31SPaolo Pisati 	CTR2(KTR_INTR, "%s: created %s", __func__, name);
555bafe5a31SPaolo Pisati 	return (ithd);
556bafe5a31SPaolo Pisati }
557bafe5a31SPaolo Pisati #endif
558e0f66ef8SJohn Baldwin 
559e0f66ef8SJohn Baldwin static void
560e0f66ef8SJohn Baldwin ithread_destroy(struct intr_thread *ithread)
561e0f66ef8SJohn Baldwin {
562e0f66ef8SJohn Baldwin 	struct thread *td;
563e0f66ef8SJohn Baldwin 
564bb141be1SScott Long 	CTR2(KTR_INTR, "%s: killing %s", __func__, ithread->it_event->ie_name);
565e0f66ef8SJohn Baldwin 	td = ithread->it_thread;
566982d11f8SJeff Roberson 	thread_lock(td);
567e0f66ef8SJohn Baldwin 	ithread->it_flags |= IT_DEAD;
56871fad9fdSJulian Elischer 	if (TD_AWAITING_INTR(td)) {
56971fad9fdSJulian Elischer 		TD_CLR_IWAIT(td);
570f0393f06SJeff Roberson 		sched_add(td, SRQ_INTR);
571b4151f71SJohn Baldwin 	}
572982d11f8SJeff Roberson 	thread_unlock(td);
573b4151f71SJohn Baldwin }
574b4151f71SJohn Baldwin 
575bafe5a31SPaolo Pisati #ifndef INTR_FILTER
576b4151f71SJohn Baldwin int
577e0f66ef8SJohn Baldwin intr_event_add_handler(struct intr_event *ie, const char *name,
578ef544f63SPaolo Pisati     driver_filter_t filter, driver_intr_t handler, void *arg, u_char pri,
579ef544f63SPaolo Pisati     enum intr_type flags, void **cookiep)
580b4151f71SJohn Baldwin {
581e0f66ef8SJohn Baldwin 	struct intr_handler *ih, *temp_ih;
582e0f66ef8SJohn Baldwin 	struct intr_thread *it;
583b4151f71SJohn Baldwin 
584ef544f63SPaolo Pisati 	if (ie == NULL || name == NULL || (handler == NULL && filter == NULL))
585b4151f71SJohn Baldwin 		return (EINVAL);
586b4151f71SJohn Baldwin 
587e0f66ef8SJohn Baldwin 	/* Allocate and populate an interrupt handler structure. */
588e0f66ef8SJohn Baldwin 	ih = malloc(sizeof(struct intr_handler), M_ITHREAD, M_WAITOK | M_ZERO);
589ef544f63SPaolo Pisati 	ih->ih_filter = filter;
590b4151f71SJohn Baldwin 	ih->ih_handler = handler;
591b4151f71SJohn Baldwin 	ih->ih_argument = arg;
59237b8ef16SJohn Baldwin 	strlcpy(ih->ih_name, name, sizeof(ih->ih_name));
593e0f66ef8SJohn Baldwin 	ih->ih_event = ie;
594b4151f71SJohn Baldwin 	ih->ih_pri = pri;
595ef544f63SPaolo Pisati 	if (flags & INTR_EXCL)
596b4151f71SJohn Baldwin 		ih->ih_flags = IH_EXCLUSIVE;
597b4151f71SJohn Baldwin 	if (flags & INTR_MPSAFE)
598b4151f71SJohn Baldwin 		ih->ih_flags |= IH_MPSAFE;
599b4151f71SJohn Baldwin 	if (flags & INTR_ENTROPY)
600b4151f71SJohn Baldwin 		ih->ih_flags |= IH_ENTROPY;
601b4151f71SJohn Baldwin 
602e0f66ef8SJohn Baldwin 	/* We can only have one exclusive handler in a event. */
603e0f66ef8SJohn Baldwin 	mtx_lock(&ie->ie_lock);
604e0f66ef8SJohn Baldwin 	if (!TAILQ_EMPTY(&ie->ie_handlers)) {
605e0f66ef8SJohn Baldwin 		if ((flags & INTR_EXCL) ||
606e0f66ef8SJohn Baldwin 		    (TAILQ_FIRST(&ie->ie_handlers)->ih_flags & IH_EXCLUSIVE)) {
607e0f66ef8SJohn Baldwin 			mtx_unlock(&ie->ie_lock);
608b4151f71SJohn Baldwin 			free(ih, M_ITHREAD);
609b4151f71SJohn Baldwin 			return (EINVAL);
610b4151f71SJohn Baldwin 		}
611e0f66ef8SJohn Baldwin 	}
612e0f66ef8SJohn Baldwin 
613e0f66ef8SJohn Baldwin 	/* Create a thread if we need one. */
614ef544f63SPaolo Pisati 	while (ie->ie_thread == NULL && handler != NULL) {
615e0f66ef8SJohn Baldwin 		if (ie->ie_flags & IE_ADDING_THREAD)
6160f180a7cSJohn Baldwin 			msleep(ie, &ie->ie_lock, 0, "ithread", 0);
617e0f66ef8SJohn Baldwin 		else {
618e0f66ef8SJohn Baldwin 			ie->ie_flags |= IE_ADDING_THREAD;
619e0f66ef8SJohn Baldwin 			mtx_unlock(&ie->ie_lock);
620e0f66ef8SJohn Baldwin 			it = ithread_create("intr: newborn");
621e0f66ef8SJohn Baldwin 			mtx_lock(&ie->ie_lock);
622e0f66ef8SJohn Baldwin 			ie->ie_flags &= ~IE_ADDING_THREAD;
623e0f66ef8SJohn Baldwin 			ie->ie_thread = it;
624e0f66ef8SJohn Baldwin 			it->it_event = ie;
625e0f66ef8SJohn Baldwin 			ithread_update(it);
626e0f66ef8SJohn Baldwin 			wakeup(ie);
627e0f66ef8SJohn Baldwin 		}
628e0f66ef8SJohn Baldwin 	}
629c9516c94SAlexander Kabaev 
630c9516c94SAlexander Kabaev 	/* Add the new handler to the event in priority order. */
631c9516c94SAlexander Kabaev 	TAILQ_FOREACH(temp_ih, &ie->ie_handlers, ih_next) {
632c9516c94SAlexander Kabaev 		if (temp_ih->ih_pri > ih->ih_pri)
633c9516c94SAlexander Kabaev 			break;
634c9516c94SAlexander Kabaev 	}
635c9516c94SAlexander Kabaev 	if (temp_ih == NULL)
636c9516c94SAlexander Kabaev 		TAILQ_INSERT_TAIL(&ie->ie_handlers, ih, ih_next);
637c9516c94SAlexander Kabaev 	else
638c9516c94SAlexander Kabaev 		TAILQ_INSERT_BEFORE(temp_ih, ih, ih_next);
639c9516c94SAlexander Kabaev 	intr_event_update(ie);
640c9516c94SAlexander Kabaev 
641e0f66ef8SJohn Baldwin 	CTR3(KTR_INTR, "%s: added %s to %s", __func__, ih->ih_name,
642e0f66ef8SJohn Baldwin 	    ie->ie_name);
643e0f66ef8SJohn Baldwin 	mtx_unlock(&ie->ie_lock);
644e0f66ef8SJohn Baldwin 
645e0f66ef8SJohn Baldwin 	if (cookiep != NULL)
646e0f66ef8SJohn Baldwin 		*cookiep = ih;
647e0f66ef8SJohn Baldwin 	return (0);
648e0f66ef8SJohn Baldwin }
649bafe5a31SPaolo Pisati #else
650bafe5a31SPaolo Pisati int
651bafe5a31SPaolo Pisati intr_event_add_handler(struct intr_event *ie, const char *name,
652bafe5a31SPaolo Pisati     driver_filter_t filter, driver_intr_t handler, void *arg, u_char pri,
653bafe5a31SPaolo Pisati     enum intr_type flags, void **cookiep)
654bafe5a31SPaolo Pisati {
655bafe5a31SPaolo Pisati 	struct intr_handler *ih, *temp_ih;
656bafe5a31SPaolo Pisati 	struct intr_thread *it;
657bafe5a31SPaolo Pisati 
658bafe5a31SPaolo Pisati 	if (ie == NULL || name == NULL || (handler == NULL && filter == NULL))
659bafe5a31SPaolo Pisati 		return (EINVAL);
660bafe5a31SPaolo Pisati 
661bafe5a31SPaolo Pisati 	/* Allocate and populate an interrupt handler structure. */
662bafe5a31SPaolo Pisati 	ih = malloc(sizeof(struct intr_handler), M_ITHREAD, M_WAITOK | M_ZERO);
663bafe5a31SPaolo Pisati 	ih->ih_filter = filter;
664bafe5a31SPaolo Pisati 	ih->ih_handler = handler;
665bafe5a31SPaolo Pisati 	ih->ih_argument = arg;
66637b8ef16SJohn Baldwin 	strlcpy(ih->ih_name, name, sizeof(ih->ih_name));
667bafe5a31SPaolo Pisati 	ih->ih_event = ie;
668bafe5a31SPaolo Pisati 	ih->ih_pri = pri;
669bafe5a31SPaolo Pisati 	if (flags & INTR_EXCL)
670bafe5a31SPaolo Pisati 		ih->ih_flags = IH_EXCLUSIVE;
671bafe5a31SPaolo Pisati 	if (flags & INTR_MPSAFE)
672bafe5a31SPaolo Pisati 		ih->ih_flags |= IH_MPSAFE;
673bafe5a31SPaolo Pisati 	if (flags & INTR_ENTROPY)
674bafe5a31SPaolo Pisati 		ih->ih_flags |= IH_ENTROPY;
675bafe5a31SPaolo Pisati 
676bafe5a31SPaolo Pisati 	/* We can only have one exclusive handler in a event. */
677bafe5a31SPaolo Pisati 	mtx_lock(&ie->ie_lock);
678bafe5a31SPaolo Pisati 	if (!TAILQ_EMPTY(&ie->ie_handlers)) {
679bafe5a31SPaolo Pisati 		if ((flags & INTR_EXCL) ||
680bafe5a31SPaolo Pisati 		    (TAILQ_FIRST(&ie->ie_handlers)->ih_flags & IH_EXCLUSIVE)) {
681bafe5a31SPaolo Pisati 			mtx_unlock(&ie->ie_lock);
682bafe5a31SPaolo Pisati 			free(ih, M_ITHREAD);
683bafe5a31SPaolo Pisati 			return (EINVAL);
684bafe5a31SPaolo Pisati 		}
685bafe5a31SPaolo Pisati 	}
686bafe5a31SPaolo Pisati 
687bafe5a31SPaolo Pisati 	/* For filtered handlers, create a private ithread to run on. */
688bafe5a31SPaolo Pisati 	if (filter != NULL && handler != NULL) {
689bafe5a31SPaolo Pisati 		mtx_unlock(&ie->ie_lock);
690bafe5a31SPaolo Pisati 		it = ithread_create("intr: newborn", ih);
691bafe5a31SPaolo Pisati 		mtx_lock(&ie->ie_lock);
692bafe5a31SPaolo Pisati 		it->it_event = ie;
693bafe5a31SPaolo Pisati 		ih->ih_thread = it;
694037f43d3SSergey Kandaurov 		ithread_update(it); /* XXX - do we really need this?!?!? */
695bafe5a31SPaolo Pisati 	} else { /* Create the global per-event thread if we need one. */
696bafe5a31SPaolo Pisati 		while (ie->ie_thread == NULL && handler != NULL) {
697bafe5a31SPaolo Pisati 			if (ie->ie_flags & IE_ADDING_THREAD)
698bafe5a31SPaolo Pisati 				msleep(ie, &ie->ie_lock, 0, "ithread", 0);
699bafe5a31SPaolo Pisati 			else {
700bafe5a31SPaolo Pisati 				ie->ie_flags |= IE_ADDING_THREAD;
701bafe5a31SPaolo Pisati 				mtx_unlock(&ie->ie_lock);
702bafe5a31SPaolo Pisati 				it = ithread_create("intr: newborn", ih);
703bafe5a31SPaolo Pisati 				mtx_lock(&ie->ie_lock);
704bafe5a31SPaolo Pisati 				ie->ie_flags &= ~IE_ADDING_THREAD;
705bafe5a31SPaolo Pisati 				ie->ie_thread = it;
706bafe5a31SPaolo Pisati 				it->it_event = ie;
707bafe5a31SPaolo Pisati 				ithread_update(it);
708bafe5a31SPaolo Pisati 				wakeup(ie);
709bafe5a31SPaolo Pisati 			}
710bafe5a31SPaolo Pisati 		}
711bafe5a31SPaolo Pisati 	}
712c9516c94SAlexander Kabaev 
713c9516c94SAlexander Kabaev 	/* Add the new handler to the event in priority order. */
714c9516c94SAlexander Kabaev 	TAILQ_FOREACH(temp_ih, &ie->ie_handlers, ih_next) {
715c9516c94SAlexander Kabaev 		if (temp_ih->ih_pri > ih->ih_pri)
716c9516c94SAlexander Kabaev 			break;
717c9516c94SAlexander Kabaev 	}
718c9516c94SAlexander Kabaev 	if (temp_ih == NULL)
719c9516c94SAlexander Kabaev 		TAILQ_INSERT_TAIL(&ie->ie_handlers, ih, ih_next);
720c9516c94SAlexander Kabaev 	else
721c9516c94SAlexander Kabaev 		TAILQ_INSERT_BEFORE(temp_ih, ih, ih_next);
722c9516c94SAlexander Kabaev 	intr_event_update(ie);
723c9516c94SAlexander Kabaev 
724bafe5a31SPaolo Pisati 	CTR3(KTR_INTR, "%s: added %s to %s", __func__, ih->ih_name,
725bafe5a31SPaolo Pisati 	    ie->ie_name);
726bafe5a31SPaolo Pisati 	mtx_unlock(&ie->ie_lock);
727bafe5a31SPaolo Pisati 
728bafe5a31SPaolo Pisati 	if (cookiep != NULL)
729bafe5a31SPaolo Pisati 		*cookiep = ih;
730bafe5a31SPaolo Pisati 	return (0);
731bafe5a31SPaolo Pisati }
732bafe5a31SPaolo Pisati #endif
733b4151f71SJohn Baldwin 
734c3045318SJohn Baldwin /*
73537b8ef16SJohn Baldwin  * Append a description preceded by a ':' to the name of the specified
73637b8ef16SJohn Baldwin  * interrupt handler.
73737b8ef16SJohn Baldwin  */
73837b8ef16SJohn Baldwin int
73937b8ef16SJohn Baldwin intr_event_describe_handler(struct intr_event *ie, void *cookie,
74037b8ef16SJohn Baldwin     const char *descr)
74137b8ef16SJohn Baldwin {
74237b8ef16SJohn Baldwin 	struct intr_handler *ih;
74337b8ef16SJohn Baldwin 	size_t space;
74437b8ef16SJohn Baldwin 	char *start;
74537b8ef16SJohn Baldwin 
74637b8ef16SJohn Baldwin 	mtx_lock(&ie->ie_lock);
74737b8ef16SJohn Baldwin #ifdef INVARIANTS
74837b8ef16SJohn Baldwin 	TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) {
74937b8ef16SJohn Baldwin 		if (ih == cookie)
75037b8ef16SJohn Baldwin 			break;
75137b8ef16SJohn Baldwin 	}
75237b8ef16SJohn Baldwin 	if (ih == NULL) {
75337b8ef16SJohn Baldwin 		mtx_unlock(&ie->ie_lock);
754d0c9a291SJohn Baldwin 		panic("handler %p not found in interrupt event %p", cookie, ie);
75537b8ef16SJohn Baldwin 	}
75637b8ef16SJohn Baldwin #endif
75737b8ef16SJohn Baldwin 	ih = cookie;
75837b8ef16SJohn Baldwin 
75937b8ef16SJohn Baldwin 	/*
76037b8ef16SJohn Baldwin 	 * Look for an existing description by checking for an
76137b8ef16SJohn Baldwin 	 * existing ":".  This assumes device names do not include
76237b8ef16SJohn Baldwin 	 * colons.  If one is found, prepare to insert the new
76337b8ef16SJohn Baldwin 	 * description at that point.  If one is not found, find the
76437b8ef16SJohn Baldwin 	 * end of the name to use as the insertion point.
76537b8ef16SJohn Baldwin 	 */
766dc15eac0SEd Schouten 	start = strchr(ih->ih_name, ':');
76737b8ef16SJohn Baldwin 	if (start == NULL)
768dc15eac0SEd Schouten 		start = strchr(ih->ih_name, 0);
76937b8ef16SJohn Baldwin 
77037b8ef16SJohn Baldwin 	/*
77137b8ef16SJohn Baldwin 	 * See if there is enough remaining room in the string for the
77237b8ef16SJohn Baldwin 	 * description + ":".  The "- 1" leaves room for the trailing
77337b8ef16SJohn Baldwin 	 * '\0'.  The "+ 1" accounts for the colon.
77437b8ef16SJohn Baldwin 	 */
77537b8ef16SJohn Baldwin 	space = sizeof(ih->ih_name) - (start - ih->ih_name) - 1;
77637b8ef16SJohn Baldwin 	if (strlen(descr) + 1 > space) {
77737b8ef16SJohn Baldwin 		mtx_unlock(&ie->ie_lock);
77837b8ef16SJohn Baldwin 		return (ENOSPC);
77937b8ef16SJohn Baldwin 	}
78037b8ef16SJohn Baldwin 
78137b8ef16SJohn Baldwin 	/* Append a colon followed by the description. */
78237b8ef16SJohn Baldwin 	*start = ':';
78337b8ef16SJohn Baldwin 	strcpy(start + 1, descr);
78437b8ef16SJohn Baldwin 	intr_event_update(ie);
78537b8ef16SJohn Baldwin 	mtx_unlock(&ie->ie_lock);
78637b8ef16SJohn Baldwin 	return (0);
78737b8ef16SJohn Baldwin }
78837b8ef16SJohn Baldwin 
78937b8ef16SJohn Baldwin /*
790c3045318SJohn Baldwin  * Return the ie_source field from the intr_event an intr_handler is
791c3045318SJohn Baldwin  * associated with.
792c3045318SJohn Baldwin  */
793c3045318SJohn Baldwin void *
794c3045318SJohn Baldwin intr_handler_source(void *cookie)
795c3045318SJohn Baldwin {
796c3045318SJohn Baldwin 	struct intr_handler *ih;
797c3045318SJohn Baldwin 	struct intr_event *ie;
798c3045318SJohn Baldwin 
799c3045318SJohn Baldwin 	ih = (struct intr_handler *)cookie;
800c3045318SJohn Baldwin 	if (ih == NULL)
801c3045318SJohn Baldwin 		return (NULL);
802c3045318SJohn Baldwin 	ie = ih->ih_event;
803c3045318SJohn Baldwin 	KASSERT(ie != NULL,
804c3045318SJohn Baldwin 	    ("interrupt handler \"%s\" has a NULL interrupt event",
805c3045318SJohn Baldwin 	    ih->ih_name));
806c3045318SJohn Baldwin 	return (ie->ie_source);
807c3045318SJohn Baldwin }
808c3045318SJohn Baldwin 
809e4cd31ddSJeff Roberson /*
810e4cd31ddSJeff Roberson  * Sleep until an ithread finishes executing an interrupt handler.
811e4cd31ddSJeff Roberson  *
812e4cd31ddSJeff Roberson  * XXX Doesn't currently handle interrupt filters or fast interrupt
813e4cd31ddSJeff Roberson  * handlers.  This is intended for compatibility with linux drivers
814e4cd31ddSJeff Roberson  * only.  Do not use in BSD code.
815e4cd31ddSJeff Roberson  */
816e4cd31ddSJeff Roberson void
817e4cd31ddSJeff Roberson _intr_drain(int irq)
818e4cd31ddSJeff Roberson {
819e4cd31ddSJeff Roberson 	struct intr_event *ie;
820e4cd31ddSJeff Roberson 	struct intr_thread *ithd;
821e4cd31ddSJeff Roberson 	struct thread *td;
822e4cd31ddSJeff Roberson 
823e4cd31ddSJeff Roberson 	ie = intr_lookup(irq);
824e4cd31ddSJeff Roberson 	if (ie == NULL)
825e4cd31ddSJeff Roberson 		return;
826e4cd31ddSJeff Roberson 	if (ie->ie_thread == NULL)
827e4cd31ddSJeff Roberson 		return;
828e4cd31ddSJeff Roberson 	ithd = ie->ie_thread;
829e4cd31ddSJeff Roberson 	td = ithd->it_thread;
8305bd186a6SJeff Roberson 	/*
8315bd186a6SJeff Roberson 	 * We set the flag and wait for it to be cleared to avoid
8325bd186a6SJeff Roberson 	 * long delays with potentially busy interrupt handlers
8335bd186a6SJeff Roberson 	 * were we to only sample TD_AWAITING_INTR() every tick.
8345bd186a6SJeff Roberson 	 */
835e4cd31ddSJeff Roberson 	thread_lock(td);
836e4cd31ddSJeff Roberson 	if (!TD_AWAITING_INTR(td)) {
837e4cd31ddSJeff Roberson 		ithd->it_flags |= IT_WAIT;
8385bd186a6SJeff Roberson 		while (ithd->it_flags & IT_WAIT) {
8395bd186a6SJeff Roberson 			thread_unlock(td);
8405bd186a6SJeff Roberson 			pause("idrain", 1);
8415bd186a6SJeff Roberson 			thread_lock(td);
842e4cd31ddSJeff Roberson 		}
8435bd186a6SJeff Roberson 	}
8445bd186a6SJeff Roberson 	thread_unlock(td);
845e4cd31ddSJeff Roberson 	return;
846e4cd31ddSJeff Roberson }
847e4cd31ddSJeff Roberson 
848e4cd31ddSJeff Roberson 
849bafe5a31SPaolo Pisati #ifndef INTR_FILTER
850b4151f71SJohn Baldwin int
851e0f66ef8SJohn Baldwin intr_event_remove_handler(void *cookie)
852b4151f71SJohn Baldwin {
853e0f66ef8SJohn Baldwin 	struct intr_handler *handler = (struct intr_handler *)cookie;
854e0f66ef8SJohn Baldwin 	struct intr_event *ie;
855b4151f71SJohn Baldwin #ifdef INVARIANTS
856e0f66ef8SJohn Baldwin 	struct intr_handler *ih;
857e0f66ef8SJohn Baldwin #endif
858e0f66ef8SJohn Baldwin #ifdef notyet
859e0f66ef8SJohn Baldwin 	int dead;
860b4151f71SJohn Baldwin #endif
861b4151f71SJohn Baldwin 
8623e5da754SJohn Baldwin 	if (handler == NULL)
863b4151f71SJohn Baldwin 		return (EINVAL);
864e0f66ef8SJohn Baldwin 	ie = handler->ih_event;
865e0f66ef8SJohn Baldwin 	KASSERT(ie != NULL,
866e0f66ef8SJohn Baldwin 	    ("interrupt handler \"%s\" has a NULL interrupt event",
8673e5da754SJohn Baldwin 	    handler->ih_name));
868e0f66ef8SJohn Baldwin 	mtx_lock(&ie->ie_lock);
86991f91617SDavid E. O'Brien 	CTR3(KTR_INTR, "%s: removing %s from %s", __func__, handler->ih_name,
870e0f66ef8SJohn Baldwin 	    ie->ie_name);
871b4151f71SJohn Baldwin #ifdef INVARIANTS
872e0f66ef8SJohn Baldwin 	TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next)
8733e5da754SJohn Baldwin 		if (ih == handler)
8743e5da754SJohn Baldwin 			goto ok;
875e0f66ef8SJohn Baldwin 	mtx_unlock(&ie->ie_lock);
876e0f66ef8SJohn Baldwin 	panic("interrupt handler \"%s\" not found in interrupt event \"%s\"",
877e0f66ef8SJohn Baldwin 	    ih->ih_name, ie->ie_name);
8783e5da754SJohn Baldwin ok:
879b4151f71SJohn Baldwin #endif
880de271f01SJohn Baldwin 	/*
881e0f66ef8SJohn Baldwin 	 * If there is no ithread, then just remove the handler and return.
882e0f66ef8SJohn Baldwin 	 * XXX: Note that an INTR_FAST handler might be running on another
883e0f66ef8SJohn Baldwin 	 * CPU!
884e0f66ef8SJohn Baldwin 	 */
885e0f66ef8SJohn Baldwin 	if (ie->ie_thread == NULL) {
886e0f66ef8SJohn Baldwin 		TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next);
887e0f66ef8SJohn Baldwin 		mtx_unlock(&ie->ie_lock);
888e0f66ef8SJohn Baldwin 		free(handler, M_ITHREAD);
889e0f66ef8SJohn Baldwin 		return (0);
890e0f66ef8SJohn Baldwin 	}
891e0f66ef8SJohn Baldwin 
892e0f66ef8SJohn Baldwin 	/*
893de271f01SJohn Baldwin 	 * If the interrupt thread is already running, then just mark this
894de271f01SJohn Baldwin 	 * handler as being dead and let the ithread do the actual removal.
895288e351bSDon Lewis 	 *
896288e351bSDon Lewis 	 * During a cold boot while cold is set, msleep() does not sleep,
897288e351bSDon Lewis 	 * so we have to remove the handler here rather than letting the
898288e351bSDon Lewis 	 * thread do it.
899de271f01SJohn Baldwin 	 */
900982d11f8SJeff Roberson 	thread_lock(ie->ie_thread->it_thread);
901e0f66ef8SJohn Baldwin 	if (!TD_AWAITING_INTR(ie->ie_thread->it_thread) && !cold) {
902de271f01SJohn Baldwin 		handler->ih_flags |= IH_DEAD;
903de271f01SJohn Baldwin 
904de271f01SJohn Baldwin 		/*
905de271f01SJohn Baldwin 		 * Ensure that the thread will process the handler list
906de271f01SJohn Baldwin 		 * again and remove this handler if it has already passed
907de271f01SJohn Baldwin 		 * it on the list.
90801f5e086SKonstantin Belousov 		 *
90901f5e086SKonstantin Belousov 		 * The release part of the following store ensures
91001f5e086SKonstantin Belousov 		 * that the update of ih_flags is ordered before the
91101f5e086SKonstantin Belousov 		 * it_need setting.  See the comment before
91201f5e086SKonstantin Belousov 		 * atomic_cmpset_acq(&ithd->it_need, ...) operation in
91301f5e086SKonstantin Belousov 		 * the ithread_execute_handlers().
914de271f01SJohn Baldwin 		 */
91501f5e086SKonstantin Belousov 		atomic_store_rel_int(&ie->ie_thread->it_need, 1);
9164d29cb2dSJohn Baldwin 	} else
917e0f66ef8SJohn Baldwin 		TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next);
918982d11f8SJeff Roberson 	thread_unlock(ie->ie_thread->it_thread);
919e0f66ef8SJohn Baldwin 	while (handler->ih_flags & IH_DEAD)
9200f180a7cSJohn Baldwin 		msleep(handler, &ie->ie_lock, 0, "iev_rmh", 0);
921e0f66ef8SJohn Baldwin 	intr_event_update(ie);
922e0f66ef8SJohn Baldwin #ifdef notyet
923e0f66ef8SJohn Baldwin 	/*
924e0f66ef8SJohn Baldwin 	 * XXX: This could be bad in the case of ppbus(8).  Also, I think
925e0f66ef8SJohn Baldwin 	 * this could lead to races of stale data when servicing an
926e0f66ef8SJohn Baldwin 	 * interrupt.
927e0f66ef8SJohn Baldwin 	 */
928e0f66ef8SJohn Baldwin 	dead = 1;
929e0f66ef8SJohn Baldwin 	TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) {
930e0f66ef8SJohn Baldwin 		if (!(ih->ih_flags & IH_FAST)) {
931e0f66ef8SJohn Baldwin 			dead = 0;
932e0f66ef8SJohn Baldwin 			break;
933e0f66ef8SJohn Baldwin 		}
934e0f66ef8SJohn Baldwin 	}
935e0f66ef8SJohn Baldwin 	if (dead) {
936e0f66ef8SJohn Baldwin 		ithread_destroy(ie->ie_thread);
937e0f66ef8SJohn Baldwin 		ie->ie_thread = NULL;
938e0f66ef8SJohn Baldwin 	}
939e0f66ef8SJohn Baldwin #endif
940e0f66ef8SJohn Baldwin 	mtx_unlock(&ie->ie_lock);
941b4151f71SJohn Baldwin 	free(handler, M_ITHREAD);
942b4151f71SJohn Baldwin 	return (0);
943b4151f71SJohn Baldwin }
944b4151f71SJohn Baldwin 
9451ee1b687SJohn Baldwin static int
946e0f66ef8SJohn Baldwin intr_event_schedule_thread(struct intr_event *ie)
9473e5da754SJohn Baldwin {
948e0f66ef8SJohn Baldwin 	struct intr_entropy entropy;
949e0f66ef8SJohn Baldwin 	struct intr_thread *it;
950b40ce416SJulian Elischer 	struct thread *td;
95104774f23SJulian Elischer 	struct thread *ctd;
9523e5da754SJohn Baldwin 
9533e5da754SJohn Baldwin 	/*
9543e5da754SJohn Baldwin 	 * If no ithread or no handlers, then we have a stray interrupt.
9553e5da754SJohn Baldwin 	 */
956e0f66ef8SJohn Baldwin 	if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers) ||
957e0f66ef8SJohn Baldwin 	    ie->ie_thread == NULL)
9583e5da754SJohn Baldwin 		return (EINVAL);
9593e5da754SJohn Baldwin 
96004774f23SJulian Elischer 	ctd = curthread;
961e0f66ef8SJohn Baldwin 	it = ie->ie_thread;
962e0f66ef8SJohn Baldwin 	td = it->it_thread;
963e0f66ef8SJohn Baldwin 
9643e5da754SJohn Baldwin 	/*
9653e5da754SJohn Baldwin 	 * If any of the handlers for this ithread claim to be good
9663e5da754SJohn Baldwin 	 * sources of entropy, then gather some.
9673e5da754SJohn Baldwin 	 */
96810cb2424SMark Murray 	if (ie->ie_flags & IE_ENTROPY) {
969e0f66ef8SJohn Baldwin 		entropy.event = (uintptr_t)ie;
970e0f66ef8SJohn Baldwin 		entropy.td = ctd;
971d1b06863SMark Murray 		random_harvest_queue(&entropy, sizeof(entropy), 2, RANDOM_INTERRUPT);
9723e5da754SJohn Baldwin 	}
9733e5da754SJohn Baldwin 
974*ba3f7276SMatt Macy 	KASSERT(td->td_proc != NULL, ("ithread %s has no process", ie->ie_name));
9753e5da754SJohn Baldwin 
9763e5da754SJohn Baldwin 	/*
9773e5da754SJohn Baldwin 	 * Set it_need to tell the thread to keep running if it is already
978982d11f8SJeff Roberson 	 * running.  Then, lock the thread and see if we actually need to
979982d11f8SJeff Roberson 	 * put it on the runqueue.
980283dfee9SKonstantin Belousov 	 *
981283dfee9SKonstantin Belousov 	 * Use store_rel to arrange that the store to ih_need in
982283dfee9SKonstantin Belousov 	 * swi_sched() is before the store to it_need and prepare for
983283dfee9SKonstantin Belousov 	 * transfer of this order to loads in the ithread.
9843e5da754SJohn Baldwin 	 */
9853eebd44dSAlfred Perlstein 	atomic_store_rel_int(&it->it_need, 1);
986982d11f8SJeff Roberson 	thread_lock(td);
98771fad9fdSJulian Elischer 	if (TD_AWAITING_INTR(td)) {
988e0f66ef8SJohn Baldwin 		CTR3(KTR_INTR, "%s: schedule pid %d (%s)", __func__, p->p_pid,
9897ab24ea3SJulian Elischer 		    td->td_name);
99071fad9fdSJulian Elischer 		TD_CLR_IWAIT(td);
991f0393f06SJeff Roberson 		sched_add(td, SRQ_INTR);
9923e5da754SJohn Baldwin 	} else {
993e0f66ef8SJohn Baldwin 		CTR5(KTR_INTR, "%s: pid %d (%s): it_need %d, state %d",
9947ab24ea3SJulian Elischer 		    __func__, p->p_pid, td->td_name, it->it_need, td->td_state);
9953e5da754SJohn Baldwin 	}
996982d11f8SJeff Roberson 	thread_unlock(td);
9973e5da754SJohn Baldwin 
9983e5da754SJohn Baldwin 	return (0);
9993e5da754SJohn Baldwin }
1000bafe5a31SPaolo Pisati #else
1001bafe5a31SPaolo Pisati int
1002bafe5a31SPaolo Pisati intr_event_remove_handler(void *cookie)
1003bafe5a31SPaolo Pisati {
1004bafe5a31SPaolo Pisati 	struct intr_handler *handler = (struct intr_handler *)cookie;
1005bafe5a31SPaolo Pisati 	struct intr_event *ie;
1006bafe5a31SPaolo Pisati 	struct intr_thread *it;
1007bafe5a31SPaolo Pisati #ifdef INVARIANTS
1008bafe5a31SPaolo Pisati 	struct intr_handler *ih;
1009bafe5a31SPaolo Pisati #endif
1010bafe5a31SPaolo Pisati #ifdef notyet
1011bafe5a31SPaolo Pisati 	int dead;
1012bafe5a31SPaolo Pisati #endif
1013bafe5a31SPaolo Pisati 
1014bafe5a31SPaolo Pisati 	if (handler == NULL)
1015bafe5a31SPaolo Pisati 		return (EINVAL);
1016bafe5a31SPaolo Pisati 	ie = handler->ih_event;
1017bafe5a31SPaolo Pisati 	KASSERT(ie != NULL,
1018bafe5a31SPaolo Pisati 	    ("interrupt handler \"%s\" has a NULL interrupt event",
1019bafe5a31SPaolo Pisati 	    handler->ih_name));
1020bafe5a31SPaolo Pisati 	mtx_lock(&ie->ie_lock);
1021bafe5a31SPaolo Pisati 	CTR3(KTR_INTR, "%s: removing %s from %s", __func__, handler->ih_name,
1022bafe5a31SPaolo Pisati 	    ie->ie_name);
1023bafe5a31SPaolo Pisati #ifdef INVARIANTS
1024bafe5a31SPaolo Pisati 	TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next)
1025bafe5a31SPaolo Pisati 		if (ih == handler)
1026bafe5a31SPaolo Pisati 			goto ok;
1027bafe5a31SPaolo Pisati 	mtx_unlock(&ie->ie_lock);
1028bafe5a31SPaolo Pisati 	panic("interrupt handler \"%s\" not found in interrupt event \"%s\"",
1029bafe5a31SPaolo Pisati 	    ih->ih_name, ie->ie_name);
1030bafe5a31SPaolo Pisati ok:
1031bafe5a31SPaolo Pisati #endif
1032bafe5a31SPaolo Pisati 	/*
1033bafe5a31SPaolo Pisati 	 * If there are no ithreads (per event and per handler), then
1034bafe5a31SPaolo Pisati 	 * just remove the handler and return.
1035bafe5a31SPaolo Pisati 	 * XXX: Note that an INTR_FAST handler might be running on another CPU!
1036bafe5a31SPaolo Pisati 	 */
1037bafe5a31SPaolo Pisati 	if (ie->ie_thread == NULL && handler->ih_thread == NULL) {
1038bafe5a31SPaolo Pisati 		TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next);
1039bafe5a31SPaolo Pisati 		mtx_unlock(&ie->ie_lock);
1040bafe5a31SPaolo Pisati 		free(handler, M_ITHREAD);
1041bafe5a31SPaolo Pisati 		return (0);
1042bafe5a31SPaolo Pisati 	}
1043bafe5a31SPaolo Pisati 
1044bafe5a31SPaolo Pisati 	/* Private or global ithread? */
1045bafe5a31SPaolo Pisati 	it = (handler->ih_thread) ? handler->ih_thread : ie->ie_thread;
1046bafe5a31SPaolo Pisati 	/*
1047bafe5a31SPaolo Pisati 	 * If the interrupt thread is already running, then just mark this
1048bafe5a31SPaolo Pisati 	 * handler as being dead and let the ithread do the actual removal.
1049bafe5a31SPaolo Pisati 	 *
1050bafe5a31SPaolo Pisati 	 * During a cold boot while cold is set, msleep() does not sleep,
1051bafe5a31SPaolo Pisati 	 * so we have to remove the handler here rather than letting the
1052bafe5a31SPaolo Pisati 	 * thread do it.
1053bafe5a31SPaolo Pisati 	 */
1054982d11f8SJeff Roberson 	thread_lock(it->it_thread);
1055bafe5a31SPaolo Pisati 	if (!TD_AWAITING_INTR(it->it_thread) && !cold) {
1056bafe5a31SPaolo Pisati 		handler->ih_flags |= IH_DEAD;
1057bafe5a31SPaolo Pisati 
1058bafe5a31SPaolo Pisati 		/*
1059bafe5a31SPaolo Pisati 		 * Ensure that the thread will process the handler list
1060bafe5a31SPaolo Pisati 		 * again and remove this handler if it has already passed
1061bafe5a31SPaolo Pisati 		 * it on the list.
106201f5e086SKonstantin Belousov 		 *
106301f5e086SKonstantin Belousov 		 * The release part of the following store ensures
106401f5e086SKonstantin Belousov 		 * that the update of ih_flags is ordered before the
106501f5e086SKonstantin Belousov 		 * it_need setting.  See the comment before
106601f5e086SKonstantin Belousov 		 * atomic_cmpset_acq(&ithd->it_need, ...) operation in
106701f5e086SKonstantin Belousov 		 * the ithread_execute_handlers().
1068bafe5a31SPaolo Pisati 		 */
106901f5e086SKonstantin Belousov 		atomic_store_rel_int(&it->it_need, 1);
1070bafe5a31SPaolo Pisati 	} else
1071bafe5a31SPaolo Pisati 		TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next);
1072982d11f8SJeff Roberson 	thread_unlock(it->it_thread);
1073bafe5a31SPaolo Pisati 	while (handler->ih_flags & IH_DEAD)
1074bafe5a31SPaolo Pisati 		msleep(handler, &ie->ie_lock, 0, "iev_rmh", 0);
1075bafe5a31SPaolo Pisati 	/*
1076bafe5a31SPaolo Pisati 	 * At this point, the handler has been disconnected from the event,
1077bafe5a31SPaolo Pisati 	 * so we can kill the private ithread if any.
1078bafe5a31SPaolo Pisati 	 */
1079bafe5a31SPaolo Pisati 	if (handler->ih_thread) {
1080bafe5a31SPaolo Pisati 		ithread_destroy(handler->ih_thread);
1081bafe5a31SPaolo Pisati 		handler->ih_thread = NULL;
1082bafe5a31SPaolo Pisati 	}
1083bafe5a31SPaolo Pisati 	intr_event_update(ie);
1084bafe5a31SPaolo Pisati #ifdef notyet
1085bafe5a31SPaolo Pisati 	/*
1086bafe5a31SPaolo Pisati 	 * XXX: This could be bad in the case of ppbus(8).  Also, I think
1087bafe5a31SPaolo Pisati 	 * this could lead to races of stale data when servicing an
1088bafe5a31SPaolo Pisati 	 * interrupt.
1089bafe5a31SPaolo Pisati 	 */
1090bafe5a31SPaolo Pisati 	dead = 1;
1091bafe5a31SPaolo Pisati 	TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) {
1092bafe5a31SPaolo Pisati 		if (handler != NULL) {
1093bafe5a31SPaolo Pisati 			dead = 0;
1094bafe5a31SPaolo Pisati 			break;
1095bafe5a31SPaolo Pisati 		}
1096bafe5a31SPaolo Pisati 	}
1097bafe5a31SPaolo Pisati 	if (dead) {
1098bafe5a31SPaolo Pisati 		ithread_destroy(ie->ie_thread);
1099bafe5a31SPaolo Pisati 		ie->ie_thread = NULL;
1100bafe5a31SPaolo Pisati 	}
1101bafe5a31SPaolo Pisati #endif
1102bafe5a31SPaolo Pisati 	mtx_unlock(&ie->ie_lock);
1103bafe5a31SPaolo Pisati 	free(handler, M_ITHREAD);
1104bafe5a31SPaolo Pisati 	return (0);
1105bafe5a31SPaolo Pisati }
1106bafe5a31SPaolo Pisati 
11071ee1b687SJohn Baldwin static int
1108bafe5a31SPaolo Pisati intr_event_schedule_thread(struct intr_event *ie, struct intr_thread *it)
1109bafe5a31SPaolo Pisati {
1110bafe5a31SPaolo Pisati 	struct intr_entropy entropy;
1111bafe5a31SPaolo Pisati 	struct thread *td;
1112bafe5a31SPaolo Pisati 	struct thread *ctd;
1113bafe5a31SPaolo Pisati 	struct proc *p;
1114bafe5a31SPaolo Pisati 
1115bafe5a31SPaolo Pisati 	/*
1116bafe5a31SPaolo Pisati 	 * If no ithread or no handlers, then we have a stray interrupt.
1117bafe5a31SPaolo Pisati 	 */
1118bafe5a31SPaolo Pisati 	if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers) || it == NULL)
1119bafe5a31SPaolo Pisati 		return (EINVAL);
1120bafe5a31SPaolo Pisati 
1121bafe5a31SPaolo Pisati 	ctd = curthread;
1122bafe5a31SPaolo Pisati 	td = it->it_thread;
1123bafe5a31SPaolo Pisati 	p = td->td_proc;
1124bafe5a31SPaolo Pisati 
1125bafe5a31SPaolo Pisati 	/*
1126bafe5a31SPaolo Pisati 	 * If any of the handlers for this ithread claim to be good
1127bafe5a31SPaolo Pisati 	 * sources of entropy, then gather some.
1128bafe5a31SPaolo Pisati 	 */
112910cb2424SMark Murray 	if (ie->ie_flags & IE_ENTROPY) {
1130bafe5a31SPaolo Pisati 		entropy.event = (uintptr_t)ie;
1131bafe5a31SPaolo Pisati 		entropy.td = ctd;
1132d1b06863SMark Murray 		random_harvest_queue(&entropy, sizeof(entropy), 2, RANDOM_INTERRUPT);
1133bafe5a31SPaolo Pisati 	}
1134bafe5a31SPaolo Pisati 
1135bafe5a31SPaolo Pisati 	KASSERT(p != NULL, ("ithread %s has no process", ie->ie_name));
1136bafe5a31SPaolo Pisati 
1137bafe5a31SPaolo Pisati 	/*
1138bafe5a31SPaolo Pisati 	 * Set it_need to tell the thread to keep running if it is already
1139982d11f8SJeff Roberson 	 * running.  Then, lock the thread and see if we actually need to
1140982d11f8SJeff Roberson 	 * put it on the runqueue.
1141283dfee9SKonstantin Belousov 	 *
1142283dfee9SKonstantin Belousov 	 * Use store_rel to arrange that the store to ih_need in
1143283dfee9SKonstantin Belousov 	 * swi_sched() is before the store to it_need and prepare for
1144283dfee9SKonstantin Belousov 	 * transfer of this order to loads in the ithread.
1145bafe5a31SPaolo Pisati 	 */
11463eebd44dSAlfred Perlstein 	atomic_store_rel_int(&it->it_need, 1);
1147982d11f8SJeff Roberson 	thread_lock(td);
1148bafe5a31SPaolo Pisati 	if (TD_AWAITING_INTR(td)) {
1149bafe5a31SPaolo Pisati 		CTR3(KTR_INTR, "%s: schedule pid %d (%s)", __func__, p->p_pid,
11503c1ffc32SJulian Elischer 		    td->td_name);
1151bafe5a31SPaolo Pisati 		TD_CLR_IWAIT(td);
1152bafe5a31SPaolo Pisati 		sched_add(td, SRQ_INTR);
1153bafe5a31SPaolo Pisati 	} else {
1154bafe5a31SPaolo Pisati 		CTR5(KTR_INTR, "%s: pid %d (%s): it_need %d, state %d",
11557ab24ea3SJulian Elischer 		    __func__, p->p_pid, td->td_name, it->it_need, td->td_state);
1156bafe5a31SPaolo Pisati 	}
1157982d11f8SJeff Roberson 	thread_unlock(td);
1158bafe5a31SPaolo Pisati 
1159bafe5a31SPaolo Pisati 	return (0);
1160bafe5a31SPaolo Pisati }
1161bafe5a31SPaolo Pisati #endif
11623e5da754SJohn Baldwin 
1163fe486a37SJohn Baldwin /*
1164e84bcd84SRobert Watson  * Allow interrupt event binding for software interrupt handlers -- a no-op,
1165e84bcd84SRobert Watson  * since interrupts are generated in software rather than being directed by
1166e84bcd84SRobert Watson  * a PIC.
1167e84bcd84SRobert Watson  */
1168e84bcd84SRobert Watson static int
1169066da805SAdrian Chadd swi_assign_cpu(void *arg, int cpu)
1170e84bcd84SRobert Watson {
1171e84bcd84SRobert Watson 
1172e84bcd84SRobert Watson 	return (0);
1173e84bcd84SRobert Watson }
1174e84bcd84SRobert Watson 
1175e84bcd84SRobert Watson /*
1176fe486a37SJohn Baldwin  * Add a software interrupt handler to a specified event.  If a given event
1177fe486a37SJohn Baldwin  * is not specified, then a new event is created.
1178fe486a37SJohn Baldwin  */
11793e5da754SJohn Baldwin int
1180e0f66ef8SJohn Baldwin swi_add(struct intr_event **eventp, const char *name, driver_intr_t handler,
1181b4151f71SJohn Baldwin 	    void *arg, int pri, enum intr_type flags, void **cookiep)
11828088699fSJohn Baldwin {
1183e0f66ef8SJohn Baldwin 	struct intr_event *ie;
1184b4151f71SJohn Baldwin 	int error;
11858088699fSJohn Baldwin 
1186bafe5a31SPaolo Pisati 	if (flags & INTR_ENTROPY)
11873e5da754SJohn Baldwin 		return (EINVAL);
11883e5da754SJohn Baldwin 
1189e0f66ef8SJohn Baldwin 	ie = (eventp != NULL) ? *eventp : NULL;
11908088699fSJohn Baldwin 
1191e0f66ef8SJohn Baldwin 	if (ie != NULL) {
1192e0f66ef8SJohn Baldwin 		if (!(ie->ie_flags & IE_SOFT))
11933e5da754SJohn Baldwin 			return (EINVAL);
11943e5da754SJohn Baldwin 	} else {
11959b33b154SJeff Roberson 		error = intr_event_create(&ie, NULL, IE_SOFT, 0,
1196e84bcd84SRobert Watson 		    NULL, NULL, NULL, swi_assign_cpu, "swi%d:", pri);
11978088699fSJohn Baldwin 		if (error)
1198b4151f71SJohn Baldwin 			return (error);
1199e0f66ef8SJohn Baldwin 		if (eventp != NULL)
1200e0f66ef8SJohn Baldwin 			*eventp = ie;
12018088699fSJohn Baldwin 	}
12028d809d50SJeff Roberson 	error = intr_event_add_handler(ie, name, NULL, handler, arg,
1203d3305205SJohn Baldwin 	    PI_SWI(pri), flags, cookiep);
12048d809d50SJeff Roberson 	return (error);
12058088699fSJohn Baldwin }
12068088699fSJohn Baldwin 
12071931cf94SJohn Baldwin /*
1208e0f66ef8SJohn Baldwin  * Schedule a software interrupt thread.
12091931cf94SJohn Baldwin  */
12101931cf94SJohn Baldwin void
1211b4151f71SJohn Baldwin swi_sched(void *cookie, int flags)
12121931cf94SJohn Baldwin {
1213e0f66ef8SJohn Baldwin 	struct intr_handler *ih = (struct intr_handler *)cookie;
1214e0f66ef8SJohn Baldwin 	struct intr_event *ie = ih->ih_event;
1215d95dca1dSJohn Baldwin 	struct intr_entropy entropy;
1216*ba3f7276SMatt Macy 	int error __unused;
12178088699fSJohn Baldwin 
1218e0f66ef8SJohn Baldwin 	CTR3(KTR_INTR, "swi_sched: %s %s need=%d", ie->ie_name, ih->ih_name,
1219e0f66ef8SJohn Baldwin 	    ih->ih_need);
12201931cf94SJohn Baldwin 
1221d95dca1dSJohn Baldwin 	entropy.event = (uintptr_t)ih;
1222d95dca1dSJohn Baldwin 	entropy.td = curthread;
1223d1b06863SMark Murray 	random_harvest_queue(&entropy, sizeof(entropy), 1, RANDOM_SWI);
1224d95dca1dSJohn Baldwin 
12251931cf94SJohn Baldwin 	/*
12263e5da754SJohn Baldwin 	 * Set ih_need for this handler so that if the ithread is already
12273e5da754SJohn Baldwin 	 * running it will execute this handler on the next pass.  Otherwise,
12283e5da754SJohn Baldwin 	 * it will execute it the next time it runs.
12291931cf94SJohn Baldwin 	 */
1230283dfee9SKonstantin Belousov 	ih->ih_need = 1;
12311ca2c018SBruce Evans 
1232b4151f71SJohn Baldwin 	if (!(flags & SWI_DELAY)) {
123383c9dea1SGleb Smirnoff 		VM_CNT_INC(v_soft);
1234bafe5a31SPaolo Pisati #ifdef INTR_FILTER
1235bafe5a31SPaolo Pisati 		error = intr_event_schedule_thread(ie, ie->ie_thread);
1236bafe5a31SPaolo Pisati #else
1237e0f66ef8SJohn Baldwin 		error = intr_event_schedule_thread(ie);
1238bafe5a31SPaolo Pisati #endif
12393e5da754SJohn Baldwin 		KASSERT(error == 0, ("stray software interrupt"));
12408088699fSJohn Baldwin 	}
12418088699fSJohn Baldwin }
12428088699fSJohn Baldwin 
1243fe486a37SJohn Baldwin /*
1244fe486a37SJohn Baldwin  * Remove a software interrupt handler.  Currently this code does not
1245fe486a37SJohn Baldwin  * remove the associated interrupt event if it becomes empty.  Calling code
1246fe486a37SJohn Baldwin  * may do so manually via intr_event_destroy(), but that's not really
1247fe486a37SJohn Baldwin  * an optimal interface.
1248fe486a37SJohn Baldwin  */
1249fe486a37SJohn Baldwin int
1250fe486a37SJohn Baldwin swi_remove(void *cookie)
1251fe486a37SJohn Baldwin {
1252fe486a37SJohn Baldwin 
1253fe486a37SJohn Baldwin 	return (intr_event_remove_handler(cookie));
1254fe486a37SJohn Baldwin }
1255fe486a37SJohn Baldwin 
1256bafe5a31SPaolo Pisati #ifdef INTR_FILTER
1257bafe5a31SPaolo Pisati static void
1258bafe5a31SPaolo Pisati priv_ithread_execute_handler(struct proc *p, struct intr_handler *ih)
1259bafe5a31SPaolo Pisati {
1260bafe5a31SPaolo Pisati 	struct intr_event *ie;
1261bafe5a31SPaolo Pisati 
1262bafe5a31SPaolo Pisati 	ie = ih->ih_event;
1263bafe5a31SPaolo Pisati 	/*
1264bafe5a31SPaolo Pisati 	 * If this handler is marked for death, remove it from
1265bafe5a31SPaolo Pisati 	 * the list of handlers and wake up the sleeper.
1266bafe5a31SPaolo Pisati 	 */
1267bafe5a31SPaolo Pisati 	if (ih->ih_flags & IH_DEAD) {
1268bafe5a31SPaolo Pisati 		mtx_lock(&ie->ie_lock);
1269bafe5a31SPaolo Pisati 		TAILQ_REMOVE(&ie->ie_handlers, ih, ih_next);
1270bafe5a31SPaolo Pisati 		ih->ih_flags &= ~IH_DEAD;
1271bafe5a31SPaolo Pisati 		wakeup(ih);
1272bafe5a31SPaolo Pisati 		mtx_unlock(&ie->ie_lock);
1273bafe5a31SPaolo Pisati 		return;
1274bafe5a31SPaolo Pisati 	}
1275bafe5a31SPaolo Pisati 
1276bafe5a31SPaolo Pisati 	/* Execute this handler. */
1277bafe5a31SPaolo Pisati 	CTR6(KTR_INTR, "%s: pid %d exec %p(%p) for %s flg=%x",
1278bafe5a31SPaolo Pisati 	     __func__, p->p_pid, (void *)ih->ih_handler, ih->ih_argument,
1279bafe5a31SPaolo Pisati 	     ih->ih_name, ih->ih_flags);
1280bafe5a31SPaolo Pisati 
1281bafe5a31SPaolo Pisati 	if (!(ih->ih_flags & IH_MPSAFE))
1282bafe5a31SPaolo Pisati 		mtx_lock(&Giant);
1283bafe5a31SPaolo Pisati 	ih->ih_handler(ih->ih_argument);
1284bafe5a31SPaolo Pisati 	if (!(ih->ih_flags & IH_MPSAFE))
1285bafe5a31SPaolo Pisati 		mtx_unlock(&Giant);
1286bafe5a31SPaolo Pisati }
1287bafe5a31SPaolo Pisati #endif
1288bafe5a31SPaolo Pisati 
128937e9511fSJohn Baldwin /*
129037e9511fSJohn Baldwin  * This is a public function for use by drivers that mux interrupt
129137e9511fSJohn Baldwin  * handlers for child devices from their interrupt handler.
129237e9511fSJohn Baldwin  */
129337e9511fSJohn Baldwin void
129437e9511fSJohn Baldwin intr_event_execute_handlers(struct proc *p, struct intr_event *ie)
1295e0f66ef8SJohn Baldwin {
1296e0f66ef8SJohn Baldwin 	struct intr_handler *ih, *ihn;
1297e0f66ef8SJohn Baldwin 
1298e0f66ef8SJohn Baldwin 	TAILQ_FOREACH_SAFE(ih, &ie->ie_handlers, ih_next, ihn) {
1299e0f66ef8SJohn Baldwin 		/*
1300e0f66ef8SJohn Baldwin 		 * If this handler is marked for death, remove it from
1301e0f66ef8SJohn Baldwin 		 * the list of handlers and wake up the sleeper.
1302e0f66ef8SJohn Baldwin 		 */
1303e0f66ef8SJohn Baldwin 		if (ih->ih_flags & IH_DEAD) {
1304e0f66ef8SJohn Baldwin 			mtx_lock(&ie->ie_lock);
1305e0f66ef8SJohn Baldwin 			TAILQ_REMOVE(&ie->ie_handlers, ih, ih_next);
1306e0f66ef8SJohn Baldwin 			ih->ih_flags &= ~IH_DEAD;
1307e0f66ef8SJohn Baldwin 			wakeup(ih);
1308e0f66ef8SJohn Baldwin 			mtx_unlock(&ie->ie_lock);
1309e0f66ef8SJohn Baldwin 			continue;
1310e0f66ef8SJohn Baldwin 		}
1311e0f66ef8SJohn Baldwin 
1312f2d619c8SPaolo Pisati 		/* Skip filter only handlers */
1313f2d619c8SPaolo Pisati 		if (ih->ih_handler == NULL)
1314f2d619c8SPaolo Pisati 			continue;
1315f2d619c8SPaolo Pisati 
1316e0f66ef8SJohn Baldwin 		/*
1317e0f66ef8SJohn Baldwin 		 * For software interrupt threads, we only execute
1318e0f66ef8SJohn Baldwin 		 * handlers that have their need flag set.  Hardware
1319e0f66ef8SJohn Baldwin 		 * interrupt threads always invoke all of their handlers.
13201b79b949SKirk McKusick 		 *
13211b79b949SKirk McKusick 		 * ih_need can only be 0 or 1.  Failed cmpset below
13221b79b949SKirk McKusick 		 * means that there is no request to execute handlers,
13231b79b949SKirk McKusick 		 * so a retry of the cmpset is not needed.
1324e0f66ef8SJohn Baldwin 		 */
13251b79b949SKirk McKusick 		if ((ie->ie_flags & IE_SOFT) != 0 &&
13261b79b949SKirk McKusick 		    atomic_cmpset_int(&ih->ih_need, 1, 0) == 0)
1327e0f66ef8SJohn Baldwin 			continue;
1328e0f66ef8SJohn Baldwin 
1329e0f66ef8SJohn Baldwin 		/* Execute this handler. */
1330e0f66ef8SJohn Baldwin 		CTR6(KTR_INTR, "%s: pid %d exec %p(%p) for %s flg=%x",
1331bafe5a31SPaolo Pisati 		    __func__, p->p_pid, (void *)ih->ih_handler,
1332bafe5a31SPaolo Pisati 		    ih->ih_argument, ih->ih_name, ih->ih_flags);
1333e0f66ef8SJohn Baldwin 
1334e0f66ef8SJohn Baldwin 		if (!(ih->ih_flags & IH_MPSAFE))
1335e0f66ef8SJohn Baldwin 			mtx_lock(&Giant);
1336e0f66ef8SJohn Baldwin 		ih->ih_handler(ih->ih_argument);
1337e0f66ef8SJohn Baldwin 		if (!(ih->ih_flags & IH_MPSAFE))
1338e0f66ef8SJohn Baldwin 			mtx_unlock(&Giant);
1339e0f66ef8SJohn Baldwin 	}
134037e9511fSJohn Baldwin }
134137e9511fSJohn Baldwin 
134237e9511fSJohn Baldwin static void
134337e9511fSJohn Baldwin ithread_execute_handlers(struct proc *p, struct intr_event *ie)
134437e9511fSJohn Baldwin {
134537e9511fSJohn Baldwin 
134637e9511fSJohn Baldwin 	/* Interrupt handlers should not sleep. */
134737e9511fSJohn Baldwin 	if (!(ie->ie_flags & IE_SOFT))
134837e9511fSJohn Baldwin 		THREAD_NO_SLEEPING();
134937e9511fSJohn Baldwin 	intr_event_execute_handlers(p, ie);
1350e0f66ef8SJohn Baldwin 	if (!(ie->ie_flags & IE_SOFT))
1351e0f66ef8SJohn Baldwin 		THREAD_SLEEPING_OK();
1352e0f66ef8SJohn Baldwin 
1353e0f66ef8SJohn Baldwin 	/*
1354e0f66ef8SJohn Baldwin 	 * Interrupt storm handling:
1355e0f66ef8SJohn Baldwin 	 *
1356e0f66ef8SJohn Baldwin 	 * If this interrupt source is currently storming, then throttle
1357e0f66ef8SJohn Baldwin 	 * it to only fire the handler once  per clock tick.
1358e0f66ef8SJohn Baldwin 	 *
1359e0f66ef8SJohn Baldwin 	 * If this interrupt source is not currently storming, but the
1360e0f66ef8SJohn Baldwin 	 * number of back to back interrupts exceeds the storm threshold,
1361e0f66ef8SJohn Baldwin 	 * then enter storming mode.
1362e0f66ef8SJohn Baldwin 	 */
1363e41bcf3cSJohn Baldwin 	if (intr_storm_threshold != 0 && ie->ie_count >= intr_storm_threshold &&
1364e41bcf3cSJohn Baldwin 	    !(ie->ie_flags & IE_SOFT)) {
13650ae62c18SNate Lawson 		/* Report the message only once every second. */
13660ae62c18SNate Lawson 		if (ppsratecheck(&ie->ie_warntm, &ie->ie_warncnt, 1)) {
1367e0f66ef8SJohn Baldwin 			printf(
13680ae62c18SNate Lawson 	"interrupt storm detected on \"%s\"; throttling interrupt source\n",
1369e0f66ef8SJohn Baldwin 			    ie->ie_name);
1370e0f66ef8SJohn Baldwin 		}
1371e41bcf3cSJohn Baldwin 		pause("istorm", 1);
1372e0f66ef8SJohn Baldwin 	} else
1373e0f66ef8SJohn Baldwin 		ie->ie_count++;
1374e0f66ef8SJohn Baldwin 
1375e0f66ef8SJohn Baldwin 	/*
1376e0f66ef8SJohn Baldwin 	 * Now that all the handlers have had a chance to run, reenable
1377e0f66ef8SJohn Baldwin 	 * the interrupt source.
1378e0f66ef8SJohn Baldwin 	 */
13791ee1b687SJohn Baldwin 	if (ie->ie_post_ithread != NULL)
13801ee1b687SJohn Baldwin 		ie->ie_post_ithread(ie->ie_source);
1381e0f66ef8SJohn Baldwin }
1382e0f66ef8SJohn Baldwin 
1383bafe5a31SPaolo Pisati #ifndef INTR_FILTER
13848088699fSJohn Baldwin /*
1385b4151f71SJohn Baldwin  * This is the main code for interrupt threads.
13868088699fSJohn Baldwin  */
138737c84183SPoul-Henning Kamp static void
1388b4151f71SJohn Baldwin ithread_loop(void *arg)
13898088699fSJohn Baldwin {
1390e0f66ef8SJohn Baldwin 	struct intr_thread *ithd;
1391e0f66ef8SJohn Baldwin 	struct intr_event *ie;
1392b40ce416SJulian Elischer 	struct thread *td;
1393b4151f71SJohn Baldwin 	struct proc *p;
1394e4cd31ddSJeff Roberson 	int wake;
13958088699fSJohn Baldwin 
1396b40ce416SJulian Elischer 	td = curthread;
1397b40ce416SJulian Elischer 	p = td->td_proc;
1398e0f66ef8SJohn Baldwin 	ithd = (struct intr_thread *)arg;
1399e0f66ef8SJohn Baldwin 	KASSERT(ithd->it_thread == td,
140091f91617SDavid E. O'Brien 	    ("%s: ithread and proc linkage out of sync", __func__));
1401e0f66ef8SJohn Baldwin 	ie = ithd->it_event;
1402e0f66ef8SJohn Baldwin 	ie->ie_count = 0;
1403e4cd31ddSJeff Roberson 	wake = 0;
14048088699fSJohn Baldwin 
14058088699fSJohn Baldwin 	/*
14068088699fSJohn Baldwin 	 * As long as we have interrupts outstanding, go through the
14078088699fSJohn Baldwin 	 * list of handlers, giving each one a go at it.
14088088699fSJohn Baldwin 	 */
14098088699fSJohn Baldwin 	for (;;) {
1410b4151f71SJohn Baldwin 		/*
1411b4151f71SJohn Baldwin 		 * If we are an orphaned thread, then just die.
1412b4151f71SJohn Baldwin 		 */
1413b4151f71SJohn Baldwin 		if (ithd->it_flags & IT_DEAD) {
1414e0f66ef8SJohn Baldwin 			CTR3(KTR_INTR, "%s: pid %d (%s) exiting", __func__,
14157ab24ea3SJulian Elischer 			    p->p_pid, td->td_name);
1416b4151f71SJohn Baldwin 			free(ithd, M_ITHREAD);
1417ca9a0ddfSJulian Elischer 			kthread_exit();
1418b4151f71SJohn Baldwin 		}
1419b4151f71SJohn Baldwin 
1420e0f66ef8SJohn Baldwin 		/*
1421e0f66ef8SJohn Baldwin 		 * Service interrupts.  If another interrupt arrives while
1422e0f66ef8SJohn Baldwin 		 * we are running, it will set it_need to note that we
1423e0f66ef8SJohn Baldwin 		 * should make another pass.
1424283dfee9SKonstantin Belousov 		 *
1425283dfee9SKonstantin Belousov 		 * The load_acq part of the following cmpset ensures
1426283dfee9SKonstantin Belousov 		 * that the load of ih_need in ithread_execute_handlers()
1427283dfee9SKonstantin Belousov 		 * is ordered after the load of it_need here.
1428e0f66ef8SJohn Baldwin 		 */
1429283dfee9SKonstantin Belousov 		while (atomic_cmpset_acq_int(&ithd->it_need, 1, 0) != 0)
1430e0f66ef8SJohn Baldwin 			ithread_execute_handlers(p, ie);
14317870c3c6SJohn Baldwin 		WITNESS_WARN(WARN_PANIC, NULL, "suspending ithread");
14327870c3c6SJohn Baldwin 		mtx_assert(&Giant, MA_NOTOWNED);
14338088699fSJohn Baldwin 
14348088699fSJohn Baldwin 		/*
14358088699fSJohn Baldwin 		 * Processed all our interrupts.  Now get the sched
14368088699fSJohn Baldwin 		 * lock.  This may take a while and it_need may get
14378088699fSJohn Baldwin 		 * set again, so we have to check it again.
14388088699fSJohn Baldwin 		 */
1439982d11f8SJeff Roberson 		thread_lock(td);
144003bbcb2fSKonstantin Belousov 		if (atomic_load_acq_int(&ithd->it_need) == 0 &&
144103bbcb2fSKonstantin Belousov 		    (ithd->it_flags & (IT_DEAD | IT_WAIT)) == 0) {
14427870c3c6SJohn Baldwin 			TD_SET_IWAIT(td);
1443e0f66ef8SJohn Baldwin 			ie->ie_count = 0;
14448df78c41SJeff Roberson 			mi_switch(SW_VOL | SWT_IWAIT, NULL);
14458088699fSJohn Baldwin 		}
1446e4cd31ddSJeff Roberson 		if (ithd->it_flags & IT_WAIT) {
1447e4cd31ddSJeff Roberson 			wake = 1;
1448e4cd31ddSJeff Roberson 			ithd->it_flags &= ~IT_WAIT;
1449e4cd31ddSJeff Roberson 		}
1450982d11f8SJeff Roberson 		thread_unlock(td);
1451e4cd31ddSJeff Roberson 		if (wake) {
1452e4cd31ddSJeff Roberson 			wakeup(ithd);
1453e4cd31ddSJeff Roberson 			wake = 0;
1454e4cd31ddSJeff Roberson 		}
14558088699fSJohn Baldwin 	}
14561931cf94SJohn Baldwin }
14571ee1b687SJohn Baldwin 
14581ee1b687SJohn Baldwin /*
14591ee1b687SJohn Baldwin  * Main interrupt handling body.
14601ee1b687SJohn Baldwin  *
14611ee1b687SJohn Baldwin  * Input:
14621ee1b687SJohn Baldwin  * o ie:                        the event connected to this interrupt.
14631ee1b687SJohn Baldwin  * o frame:                     some archs (i.e. i386) pass a frame to some.
14641ee1b687SJohn Baldwin  *                              handlers as their main argument.
14651ee1b687SJohn Baldwin  * Return value:
14661ee1b687SJohn Baldwin  * o 0:                         everything ok.
14671ee1b687SJohn Baldwin  * o EINVAL:                    stray interrupt.
14681ee1b687SJohn Baldwin  */
14691ee1b687SJohn Baldwin int
14701ee1b687SJohn Baldwin intr_event_handle(struct intr_event *ie, struct trapframe *frame)
14711ee1b687SJohn Baldwin {
14721ee1b687SJohn Baldwin 	struct intr_handler *ih;
14731f255bd3SAlexander Motin 	struct trapframe *oldframe;
14741ee1b687SJohn Baldwin 	struct thread *td;
1475*ba3f7276SMatt Macy 	int ret, thread;
14761ee1b687SJohn Baldwin 
14771ee1b687SJohn Baldwin 	td = curthread;
14781ee1b687SJohn Baldwin 
1479b7627840SKonstantin Belousov #ifdef KSTACK_USAGE_PROF
1480b7627840SKonstantin Belousov 	intr_prof_stack_use(td, frame);
1481b7627840SKonstantin Belousov #endif
1482b7627840SKonstantin Belousov 
14831ee1b687SJohn Baldwin 	/* An interrupt with no event or handlers is a stray interrupt. */
14841ee1b687SJohn Baldwin 	if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers))
14851ee1b687SJohn Baldwin 		return (EINVAL);
14861ee1b687SJohn Baldwin 
14871ee1b687SJohn Baldwin 	/*
14881ee1b687SJohn Baldwin 	 * Execute fast interrupt handlers directly.
14891ee1b687SJohn Baldwin 	 * To support clock handlers, if a handler registers
14901ee1b687SJohn Baldwin 	 * with a NULL argument, then we pass it a pointer to
14911ee1b687SJohn Baldwin 	 * a trapframe as its argument.
14921ee1b687SJohn Baldwin 	 */
14931ee1b687SJohn Baldwin 	td->td_intr_nesting_level++;
14941ee1b687SJohn Baldwin 	thread = 0;
14951ee1b687SJohn Baldwin 	ret = 0;
14961ee1b687SJohn Baldwin 	critical_enter();
14971f255bd3SAlexander Motin 	oldframe = td->td_intr_frame;
14981f255bd3SAlexander Motin 	td->td_intr_frame = frame;
14991ee1b687SJohn Baldwin 	TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) {
15001ee1b687SJohn Baldwin 		if (ih->ih_filter == NULL) {
15011ee1b687SJohn Baldwin 			thread = 1;
15021ee1b687SJohn Baldwin 			continue;
15031ee1b687SJohn Baldwin 		}
15041ee1b687SJohn Baldwin 		CTR4(KTR_INTR, "%s: exec %p(%p) for %s", __func__,
15051ee1b687SJohn Baldwin 		    ih->ih_filter, ih->ih_argument == NULL ? frame :
15061ee1b687SJohn Baldwin 		    ih->ih_argument, ih->ih_name);
15071ee1b687SJohn Baldwin 		if (ih->ih_argument == NULL)
15081ee1b687SJohn Baldwin 			ret = ih->ih_filter(frame);
15091ee1b687SJohn Baldwin 		else
15101ee1b687SJohn Baldwin 			ret = ih->ih_filter(ih->ih_argument);
151189fc20ccSAndriy Gapon 		KASSERT(ret == FILTER_STRAY ||
151289fc20ccSAndriy Gapon 		    ((ret & (FILTER_SCHEDULE_THREAD | FILTER_HANDLED)) != 0 &&
151389fc20ccSAndriy Gapon 		    (ret & ~(FILTER_SCHEDULE_THREAD | FILTER_HANDLED)) == 0),
151489fc20ccSAndriy Gapon 		    ("%s: incorrect return value %#x from %s", __func__, ret,
151589fc20ccSAndriy Gapon 		    ih->ih_name));
151689fc20ccSAndriy Gapon 
15171ee1b687SJohn Baldwin 		/*
15181ee1b687SJohn Baldwin 		 * Wrapper handler special handling:
15191ee1b687SJohn Baldwin 		 *
15201ee1b687SJohn Baldwin 		 * in some particular cases (like pccard and pccbb),
15211ee1b687SJohn Baldwin 		 * the _real_ device handler is wrapped in a couple of
15221ee1b687SJohn Baldwin 		 * functions - a filter wrapper and an ithread wrapper.
15231ee1b687SJohn Baldwin 		 * In this case (and just in this case), the filter wrapper
15241ee1b687SJohn Baldwin 		 * could ask the system to schedule the ithread and mask
15251ee1b687SJohn Baldwin 		 * the interrupt source if the wrapped handler is composed
15261ee1b687SJohn Baldwin 		 * of just an ithread handler.
15271ee1b687SJohn Baldwin 		 *
15281ee1b687SJohn Baldwin 		 * TODO: write a generic wrapper to avoid people rolling
15291ee1b687SJohn Baldwin 		 * their own
15301ee1b687SJohn Baldwin 		 */
15311ee1b687SJohn Baldwin 		if (!thread) {
15321ee1b687SJohn Baldwin 			if (ret == FILTER_SCHEDULE_THREAD)
15331ee1b687SJohn Baldwin 				thread = 1;
15341ee1b687SJohn Baldwin 		}
15351ee1b687SJohn Baldwin 	}
15361f255bd3SAlexander Motin 	td->td_intr_frame = oldframe;
15371ee1b687SJohn Baldwin 
15381ee1b687SJohn Baldwin 	if (thread) {
15391ee1b687SJohn Baldwin 		if (ie->ie_pre_ithread != NULL)
15401ee1b687SJohn Baldwin 			ie->ie_pre_ithread(ie->ie_source);
15411ee1b687SJohn Baldwin 	} else {
15421ee1b687SJohn Baldwin 		if (ie->ie_post_filter != NULL)
15431ee1b687SJohn Baldwin 			ie->ie_post_filter(ie->ie_source);
15441ee1b687SJohn Baldwin 	}
15451ee1b687SJohn Baldwin 
15461ee1b687SJohn Baldwin 	/* Schedule the ithread if needed. */
15471ee1b687SJohn Baldwin 	if (thread) {
1548*ba3f7276SMatt Macy 		int error __unused;
1549*ba3f7276SMatt Macy 
15501ee1b687SJohn Baldwin 		error =  intr_event_schedule_thread(ie);
15511ee1b687SJohn Baldwin 		KASSERT(error == 0, ("bad stray interrupt"));
15521ee1b687SJohn Baldwin 	}
15531ee1b687SJohn Baldwin 	critical_exit();
15541ee1b687SJohn Baldwin 	td->td_intr_nesting_level--;
15551ee1b687SJohn Baldwin 	return (0);
15561ee1b687SJohn Baldwin }
1557bafe5a31SPaolo Pisati #else
1558bafe5a31SPaolo Pisati /*
1559bafe5a31SPaolo Pisati  * This is the main code for interrupt threads.
1560bafe5a31SPaolo Pisati  */
1561bafe5a31SPaolo Pisati static void
1562bafe5a31SPaolo Pisati ithread_loop(void *arg)
1563bafe5a31SPaolo Pisati {
1564bafe5a31SPaolo Pisati 	struct intr_thread *ithd;
1565bafe5a31SPaolo Pisati 	struct intr_handler *ih;
1566bafe5a31SPaolo Pisati 	struct intr_event *ie;
1567bafe5a31SPaolo Pisati 	struct thread *td;
1568bafe5a31SPaolo Pisati 	struct proc *p;
1569bafe5a31SPaolo Pisati 	int priv;
1570e4cd31ddSJeff Roberson 	int wake;
1571bafe5a31SPaolo Pisati 
1572bafe5a31SPaolo Pisati 	td = curthread;
1573bafe5a31SPaolo Pisati 	p = td->td_proc;
1574bafe5a31SPaolo Pisati 	ih = (struct intr_handler *)arg;
1575bafe5a31SPaolo Pisati 	priv = (ih->ih_thread != NULL) ? 1 : 0;
1576bafe5a31SPaolo Pisati 	ithd = (priv) ? ih->ih_thread : ih->ih_event->ie_thread;
1577bafe5a31SPaolo Pisati 	KASSERT(ithd->it_thread == td,
1578bafe5a31SPaolo Pisati 	    ("%s: ithread and proc linkage out of sync", __func__));
1579bafe5a31SPaolo Pisati 	ie = ithd->it_event;
1580bafe5a31SPaolo Pisati 	ie->ie_count = 0;
1581e4cd31ddSJeff Roberson 	wake = 0;
1582bafe5a31SPaolo Pisati 
1583bafe5a31SPaolo Pisati 	/*
1584bafe5a31SPaolo Pisati 	 * As long as we have interrupts outstanding, go through the
1585bafe5a31SPaolo Pisati 	 * list of handlers, giving each one a go at it.
1586bafe5a31SPaolo Pisati 	 */
1587bafe5a31SPaolo Pisati 	for (;;) {
1588bafe5a31SPaolo Pisati 		/*
1589bafe5a31SPaolo Pisati 		 * If we are an orphaned thread, then just die.
1590bafe5a31SPaolo Pisati 		 */
1591bafe5a31SPaolo Pisati 		if (ithd->it_flags & IT_DEAD) {
1592bafe5a31SPaolo Pisati 			CTR3(KTR_INTR, "%s: pid %d (%s) exiting", __func__,
15937ab24ea3SJulian Elischer 			    p->p_pid, td->td_name);
1594bafe5a31SPaolo Pisati 			free(ithd, M_ITHREAD);
1595ca9a0ddfSJulian Elischer 			kthread_exit();
1596bafe5a31SPaolo Pisati 		}
1597bafe5a31SPaolo Pisati 
1598bafe5a31SPaolo Pisati 		/*
1599bafe5a31SPaolo Pisati 		 * Service interrupts.  If another interrupt arrives while
1600bafe5a31SPaolo Pisati 		 * we are running, it will set it_need to note that we
1601bafe5a31SPaolo Pisati 		 * should make another pass.
1602283dfee9SKonstantin Belousov 		 *
1603283dfee9SKonstantin Belousov 		 * The load_acq part of the following cmpset ensures
1604283dfee9SKonstantin Belousov 		 * that the load of ih_need in ithread_execute_handlers()
1605283dfee9SKonstantin Belousov 		 * is ordered after the load of it_need here.
1606bafe5a31SPaolo Pisati 		 */
1607283dfee9SKonstantin Belousov 		while (atomic_cmpset_acq_int(&ithd->it_need, 1, 0) != 0) {
1608bafe5a31SPaolo Pisati 			if (priv)
1609bafe5a31SPaolo Pisati 				priv_ithread_execute_handler(p, ih);
1610bafe5a31SPaolo Pisati 			else
1611bafe5a31SPaolo Pisati 				ithread_execute_handlers(p, ie);
1612bafe5a31SPaolo Pisati 		}
1613bafe5a31SPaolo Pisati 		WITNESS_WARN(WARN_PANIC, NULL, "suspending ithread");
1614bafe5a31SPaolo Pisati 		mtx_assert(&Giant, MA_NOTOWNED);
1615bafe5a31SPaolo Pisati 
1616bafe5a31SPaolo Pisati 		/*
1617bafe5a31SPaolo Pisati 		 * Processed all our interrupts.  Now get the sched
1618bafe5a31SPaolo Pisati 		 * lock.  This may take a while and it_need may get
1619bafe5a31SPaolo Pisati 		 * set again, so we have to check it again.
1620bafe5a31SPaolo Pisati 		 */
1621982d11f8SJeff Roberson 		thread_lock(td);
162203bbcb2fSKonstantin Belousov 		if (atomic_load_acq_int(&ithd->it_need) == 0 &&
162303bbcb2fSKonstantin Belousov 		    (ithd->it_flags & (IT_DEAD | IT_WAIT)) == 0) {
1624bafe5a31SPaolo Pisati 			TD_SET_IWAIT(td);
1625bafe5a31SPaolo Pisati 			ie->ie_count = 0;
16268df78c41SJeff Roberson 			mi_switch(SW_VOL | SWT_IWAIT, NULL);
1627bafe5a31SPaolo Pisati 		}
1628e4cd31ddSJeff Roberson 		if (ithd->it_flags & IT_WAIT) {
1629e4cd31ddSJeff Roberson 			wake = 1;
1630e4cd31ddSJeff Roberson 			ithd->it_flags &= ~IT_WAIT;
1631e4cd31ddSJeff Roberson 		}
1632982d11f8SJeff Roberson 		thread_unlock(td);
1633e4cd31ddSJeff Roberson 		if (wake) {
1634e4cd31ddSJeff Roberson 			wakeup(ithd);
1635e4cd31ddSJeff Roberson 			wake = 0;
1636e4cd31ddSJeff Roberson 		}
1637bafe5a31SPaolo Pisati 	}
1638bafe5a31SPaolo Pisati }
1639bafe5a31SPaolo Pisati 
1640bafe5a31SPaolo Pisati /*
1641bafe5a31SPaolo Pisati  * Main loop for interrupt filter.
1642bafe5a31SPaolo Pisati  *
1643bafe5a31SPaolo Pisati  * Some architectures (i386, amd64 and arm) require the optional frame
1644bafe5a31SPaolo Pisati  * parameter, and use it as the main argument for fast handler execution
1645bafe5a31SPaolo Pisati  * when ih_argument == NULL.
1646bafe5a31SPaolo Pisati  *
1647bafe5a31SPaolo Pisati  * Return value:
1648bafe5a31SPaolo Pisati  * o FILTER_STRAY:              No filter recognized the event, and no
1649bafe5a31SPaolo Pisati  *                              filter-less handler is registered on this
1650bafe5a31SPaolo Pisati  *                              line.
1651bafe5a31SPaolo Pisati  * o FILTER_HANDLED:            A filter claimed the event and served it.
1652bafe5a31SPaolo Pisati  * o FILTER_SCHEDULE_THREAD:    No filter claimed the event, but there's at
1653bafe5a31SPaolo Pisati  *                              least one filter-less handler on this line.
1654bafe5a31SPaolo Pisati  * o FILTER_HANDLED |
1655bafe5a31SPaolo Pisati  *   FILTER_SCHEDULE_THREAD:    A filter claimed the event, and asked for
1656bafe5a31SPaolo Pisati  *                              scheduling the per-handler ithread.
1657bafe5a31SPaolo Pisati  *
1658bafe5a31SPaolo Pisati  * In case an ithread has to be scheduled, in *ithd there will be a
1659bafe5a31SPaolo Pisati  * pointer to a struct intr_thread containing the thread to be
1660bafe5a31SPaolo Pisati  * scheduled.
1661bafe5a31SPaolo Pisati  */
1662bafe5a31SPaolo Pisati 
16631ee1b687SJohn Baldwin static int
1664bafe5a31SPaolo Pisati intr_filter_loop(struct intr_event *ie, struct trapframe *frame,
1665bafe5a31SPaolo Pisati 		 struct intr_thread **ithd)
1666bafe5a31SPaolo Pisati {
1667bafe5a31SPaolo Pisati 	struct intr_handler *ih;
1668bafe5a31SPaolo Pisati 	void *arg;
1669bafe5a31SPaolo Pisati 	int ret, thread_only;
1670bafe5a31SPaolo Pisati 
1671bafe5a31SPaolo Pisati 	ret = 0;
1672bafe5a31SPaolo Pisati 	thread_only = 0;
1673bafe5a31SPaolo Pisati 	TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) {
1674bafe5a31SPaolo Pisati 		/*
1675bafe5a31SPaolo Pisati 		 * Execute fast interrupt handlers directly.
1676bafe5a31SPaolo Pisati 		 * To support clock handlers, if a handler registers
1677bafe5a31SPaolo Pisati 		 * with a NULL argument, then we pass it a pointer to
1678bafe5a31SPaolo Pisati 		 * a trapframe as its argument.
1679bafe5a31SPaolo Pisati 		 */
1680bafe5a31SPaolo Pisati 		arg = ((ih->ih_argument == NULL) ? frame : ih->ih_argument);
1681bafe5a31SPaolo Pisati 
1682bafe5a31SPaolo Pisati 		CTR5(KTR_INTR, "%s: exec %p/%p(%p) for %s", __func__,
1683bafe5a31SPaolo Pisati 		     ih->ih_filter, ih->ih_handler, arg, ih->ih_name);
1684bafe5a31SPaolo Pisati 
1685bafe5a31SPaolo Pisati 		if (ih->ih_filter != NULL)
1686bafe5a31SPaolo Pisati 			ret = ih->ih_filter(arg);
1687bafe5a31SPaolo Pisati 		else {
1688bafe5a31SPaolo Pisati 			thread_only = 1;
1689bafe5a31SPaolo Pisati 			continue;
1690bafe5a31SPaolo Pisati 		}
169189fc20ccSAndriy Gapon 		KASSERT(ret == FILTER_STRAY ||
169289fc20ccSAndriy Gapon 		    ((ret & (FILTER_SCHEDULE_THREAD | FILTER_HANDLED)) != 0 &&
169389fc20ccSAndriy Gapon 		    (ret & ~(FILTER_SCHEDULE_THREAD | FILTER_HANDLED)) == 0),
169489fc20ccSAndriy Gapon 		    ("%s: incorrect return value %#x from %s", __func__, ret,
169589fc20ccSAndriy Gapon 		    ih->ih_name));
1696bafe5a31SPaolo Pisati 		if (ret & FILTER_STRAY)
1697bafe5a31SPaolo Pisati 			continue;
1698bafe5a31SPaolo Pisati 		else {
1699bafe5a31SPaolo Pisati 			*ithd = ih->ih_thread;
1700bafe5a31SPaolo Pisati 			return (ret);
1701bafe5a31SPaolo Pisati 		}
1702bafe5a31SPaolo Pisati 	}
1703bafe5a31SPaolo Pisati 
1704bafe5a31SPaolo Pisati 	/*
1705bafe5a31SPaolo Pisati 	 * No filters handled the interrupt and we have at least
1706bafe5a31SPaolo Pisati 	 * one handler without a filter.  In this case, we schedule
1707bafe5a31SPaolo Pisati 	 * all of the filter-less handlers to run in the ithread.
1708bafe5a31SPaolo Pisati 	 */
1709bafe5a31SPaolo Pisati 	if (thread_only) {
1710bafe5a31SPaolo Pisati 		*ithd = ie->ie_thread;
1711bafe5a31SPaolo Pisati 		return (FILTER_SCHEDULE_THREAD);
1712bafe5a31SPaolo Pisati 	}
1713bafe5a31SPaolo Pisati 	return (FILTER_STRAY);
1714bafe5a31SPaolo Pisati }
1715bafe5a31SPaolo Pisati 
1716bafe5a31SPaolo Pisati /*
1717bafe5a31SPaolo Pisati  * Main interrupt handling body.
1718bafe5a31SPaolo Pisati  *
1719bafe5a31SPaolo Pisati  * Input:
1720bafe5a31SPaolo Pisati  * o ie:                        the event connected to this interrupt.
1721bafe5a31SPaolo Pisati  * o frame:                     some archs (i.e. i386) pass a frame to some.
1722bafe5a31SPaolo Pisati  *                              handlers as their main argument.
1723bafe5a31SPaolo Pisati  * Return value:
1724bafe5a31SPaolo Pisati  * o 0:                         everything ok.
1725bafe5a31SPaolo Pisati  * o EINVAL:                    stray interrupt.
1726bafe5a31SPaolo Pisati  */
1727bafe5a31SPaolo Pisati int
1728bafe5a31SPaolo Pisati intr_event_handle(struct intr_event *ie, struct trapframe *frame)
1729bafe5a31SPaolo Pisati {
1730bafe5a31SPaolo Pisati 	struct intr_thread *ithd;
17311f255bd3SAlexander Motin 	struct trapframe *oldframe;
1732bafe5a31SPaolo Pisati 	struct thread *td;
1733bafe5a31SPaolo Pisati 	int thread;
1734bafe5a31SPaolo Pisati 
1735bafe5a31SPaolo Pisati 	ithd = NULL;
1736bafe5a31SPaolo Pisati 	td = curthread;
1737bafe5a31SPaolo Pisati 
1738bafe5a31SPaolo Pisati 	if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers))
1739bafe5a31SPaolo Pisati 		return (EINVAL);
1740bafe5a31SPaolo Pisati 
1741bafe5a31SPaolo Pisati 	td->td_intr_nesting_level++;
1742bafe5a31SPaolo Pisati 	thread = 0;
1743bafe5a31SPaolo Pisati 	critical_enter();
17441f255bd3SAlexander Motin 	oldframe = td->td_intr_frame;
17451f255bd3SAlexander Motin 	td->td_intr_frame = frame;
1746bafe5a31SPaolo Pisati 	thread = intr_filter_loop(ie, frame, &ithd);
1747bafe5a31SPaolo Pisati 	if (thread & FILTER_HANDLED) {
17481ee1b687SJohn Baldwin 		if (ie->ie_post_filter != NULL)
17491ee1b687SJohn Baldwin 			ie->ie_post_filter(ie->ie_source);
1750bafe5a31SPaolo Pisati 	} else {
17511ee1b687SJohn Baldwin 		if (ie->ie_pre_ithread != NULL)
17521ee1b687SJohn Baldwin 			ie->ie_pre_ithread(ie->ie_source);
1753bafe5a31SPaolo Pisati 	}
17541f255bd3SAlexander Motin 	td->td_intr_frame = oldframe;
1755bafe5a31SPaolo Pisati 	critical_exit();
1756bafe5a31SPaolo Pisati 
1757bafe5a31SPaolo Pisati 	/* Interrupt storm logic */
1758bafe5a31SPaolo Pisati 	if (thread & FILTER_STRAY) {
1759bafe5a31SPaolo Pisati 		ie->ie_count++;
1760bafe5a31SPaolo Pisati 		if (ie->ie_count < intr_storm_threshold)
1761bafe5a31SPaolo Pisati 			printf("Interrupt stray detection not present\n");
1762bafe5a31SPaolo Pisati 	}
1763bafe5a31SPaolo Pisati 
1764bafe5a31SPaolo Pisati 	/* Schedule an ithread if needed. */
1765bafe5a31SPaolo Pisati 	if (thread & FILTER_SCHEDULE_THREAD) {
1766bafe5a31SPaolo Pisati 		if (intr_event_schedule_thread(ie, ithd) != 0)
1767bafe5a31SPaolo Pisati 			panic("%s: impossible stray interrupt", __func__);
1768bafe5a31SPaolo Pisati 	}
1769bafe5a31SPaolo Pisati 	td->td_intr_nesting_level--;
1770bafe5a31SPaolo Pisati 	return (0);
1771bafe5a31SPaolo Pisati }
1772bafe5a31SPaolo Pisati #endif
17731931cf94SJohn Baldwin 
17748b201c42SJohn Baldwin #ifdef DDB
17758b201c42SJohn Baldwin /*
17768b201c42SJohn Baldwin  * Dump details about an interrupt handler
17778b201c42SJohn Baldwin  */
17788b201c42SJohn Baldwin static void
1779e0f66ef8SJohn Baldwin db_dump_intrhand(struct intr_handler *ih)
17808b201c42SJohn Baldwin {
17818b201c42SJohn Baldwin 	int comma;
17828b201c42SJohn Baldwin 
17838b201c42SJohn Baldwin 	db_printf("\t%-10s ", ih->ih_name);
17848b201c42SJohn Baldwin 	switch (ih->ih_pri) {
17858b201c42SJohn Baldwin 	case PI_REALTIME:
17868b201c42SJohn Baldwin 		db_printf("CLK ");
17878b201c42SJohn Baldwin 		break;
17888b201c42SJohn Baldwin 	case PI_AV:
17898b201c42SJohn Baldwin 		db_printf("AV  ");
17908b201c42SJohn Baldwin 		break;
1791d3305205SJohn Baldwin 	case PI_TTY:
17928b201c42SJohn Baldwin 		db_printf("TTY ");
17938b201c42SJohn Baldwin 		break;
17948b201c42SJohn Baldwin 	case PI_NET:
17958b201c42SJohn Baldwin 		db_printf("NET ");
17968b201c42SJohn Baldwin 		break;
17978b201c42SJohn Baldwin 	case PI_DISK:
17988b201c42SJohn Baldwin 		db_printf("DISK");
17998b201c42SJohn Baldwin 		break;
18008b201c42SJohn Baldwin 	case PI_DULL:
18018b201c42SJohn Baldwin 		db_printf("DULL");
18028b201c42SJohn Baldwin 		break;
18038b201c42SJohn Baldwin 	default:
18048b201c42SJohn Baldwin 		if (ih->ih_pri >= PI_SOFT)
18058b201c42SJohn Baldwin 			db_printf("SWI ");
18068b201c42SJohn Baldwin 		else
18078b201c42SJohn Baldwin 			db_printf("%4u", ih->ih_pri);
18088b201c42SJohn Baldwin 		break;
18098b201c42SJohn Baldwin 	}
18108b201c42SJohn Baldwin 	db_printf(" ");
1811b887a155SKonstantin Belousov 	if (ih->ih_filter != NULL) {
1812b887a155SKonstantin Belousov 		db_printf("[F]");
1813b887a155SKonstantin Belousov 		db_printsym((uintptr_t)ih->ih_filter, DB_STGY_PROC);
1814b887a155SKonstantin Belousov 	}
1815b887a155SKonstantin Belousov 	if (ih->ih_handler != NULL) {
1816b887a155SKonstantin Belousov 		if (ih->ih_filter != NULL)
1817b887a155SKonstantin Belousov 			db_printf(",");
1818b887a155SKonstantin Belousov 		db_printf("[H]");
18198b201c42SJohn Baldwin 		db_printsym((uintptr_t)ih->ih_handler, DB_STGY_PROC);
1820b887a155SKonstantin Belousov 	}
18218b201c42SJohn Baldwin 	db_printf("(%p)", ih->ih_argument);
18228b201c42SJohn Baldwin 	if (ih->ih_need ||
1823ef544f63SPaolo Pisati 	    (ih->ih_flags & (IH_EXCLUSIVE | IH_ENTROPY | IH_DEAD |
18248b201c42SJohn Baldwin 	    IH_MPSAFE)) != 0) {
18258b201c42SJohn Baldwin 		db_printf(" {");
18268b201c42SJohn Baldwin 		comma = 0;
18278b201c42SJohn Baldwin 		if (ih->ih_flags & IH_EXCLUSIVE) {
18288b201c42SJohn Baldwin 			if (comma)
18298b201c42SJohn Baldwin 				db_printf(", ");
18308b201c42SJohn Baldwin 			db_printf("EXCL");
18318b201c42SJohn Baldwin 			comma = 1;
18328b201c42SJohn Baldwin 		}
18338b201c42SJohn Baldwin 		if (ih->ih_flags & IH_ENTROPY) {
18348b201c42SJohn Baldwin 			if (comma)
18358b201c42SJohn Baldwin 				db_printf(", ");
18368b201c42SJohn Baldwin 			db_printf("ENTROPY");
18378b201c42SJohn Baldwin 			comma = 1;
18388b201c42SJohn Baldwin 		}
18398b201c42SJohn Baldwin 		if (ih->ih_flags & IH_DEAD) {
18408b201c42SJohn Baldwin 			if (comma)
18418b201c42SJohn Baldwin 				db_printf(", ");
18428b201c42SJohn Baldwin 			db_printf("DEAD");
18438b201c42SJohn Baldwin 			comma = 1;
18448b201c42SJohn Baldwin 		}
18458b201c42SJohn Baldwin 		if (ih->ih_flags & IH_MPSAFE) {
18468b201c42SJohn Baldwin 			if (comma)
18478b201c42SJohn Baldwin 				db_printf(", ");
18488b201c42SJohn Baldwin 			db_printf("MPSAFE");
18498b201c42SJohn Baldwin 			comma = 1;
18508b201c42SJohn Baldwin 		}
18518b201c42SJohn Baldwin 		if (ih->ih_need) {
18528b201c42SJohn Baldwin 			if (comma)
18538b201c42SJohn Baldwin 				db_printf(", ");
18548b201c42SJohn Baldwin 			db_printf("NEED");
18558b201c42SJohn Baldwin 		}
18568b201c42SJohn Baldwin 		db_printf("}");
18578b201c42SJohn Baldwin 	}
18588b201c42SJohn Baldwin 	db_printf("\n");
18598b201c42SJohn Baldwin }
18608b201c42SJohn Baldwin 
18618b201c42SJohn Baldwin /*
1862e0f66ef8SJohn Baldwin  * Dump details about a event.
18638b201c42SJohn Baldwin  */
18648b201c42SJohn Baldwin void
1865e0f66ef8SJohn Baldwin db_dump_intr_event(struct intr_event *ie, int handlers)
18668b201c42SJohn Baldwin {
1867e0f66ef8SJohn Baldwin 	struct intr_handler *ih;
1868e0f66ef8SJohn Baldwin 	struct intr_thread *it;
18698b201c42SJohn Baldwin 	int comma;
18708b201c42SJohn Baldwin 
1871e0f66ef8SJohn Baldwin 	db_printf("%s ", ie->ie_fullname);
1872e0f66ef8SJohn Baldwin 	it = ie->ie_thread;
1873e0f66ef8SJohn Baldwin 	if (it != NULL)
1874e0f66ef8SJohn Baldwin 		db_printf("(pid %d)", it->it_thread->td_proc->p_pid);
1875e0f66ef8SJohn Baldwin 	else
1876e0f66ef8SJohn Baldwin 		db_printf("(no thread)");
1877e0f66ef8SJohn Baldwin 	if ((ie->ie_flags & (IE_SOFT | IE_ENTROPY | IE_ADDING_THREAD)) != 0 ||
1878e0f66ef8SJohn Baldwin 	    (it != NULL && it->it_need)) {
18798b201c42SJohn Baldwin 		db_printf(" {");
18808b201c42SJohn Baldwin 		comma = 0;
1881e0f66ef8SJohn Baldwin 		if (ie->ie_flags & IE_SOFT) {
18828b201c42SJohn Baldwin 			db_printf("SOFT");
18838b201c42SJohn Baldwin 			comma = 1;
18848b201c42SJohn Baldwin 		}
1885e0f66ef8SJohn Baldwin 		if (ie->ie_flags & IE_ENTROPY) {
18868b201c42SJohn Baldwin 			if (comma)
18878b201c42SJohn Baldwin 				db_printf(", ");
18888b201c42SJohn Baldwin 			db_printf("ENTROPY");
18898b201c42SJohn Baldwin 			comma = 1;
18908b201c42SJohn Baldwin 		}
1891e0f66ef8SJohn Baldwin 		if (ie->ie_flags & IE_ADDING_THREAD) {
18928b201c42SJohn Baldwin 			if (comma)
18938b201c42SJohn Baldwin 				db_printf(", ");
1894e0f66ef8SJohn Baldwin 			db_printf("ADDING_THREAD");
18958b201c42SJohn Baldwin 			comma = 1;
18968b201c42SJohn Baldwin 		}
1897e0f66ef8SJohn Baldwin 		if (it != NULL && it->it_need) {
18988b201c42SJohn Baldwin 			if (comma)
18998b201c42SJohn Baldwin 				db_printf(", ");
19008b201c42SJohn Baldwin 			db_printf("NEED");
19018b201c42SJohn Baldwin 		}
19028b201c42SJohn Baldwin 		db_printf("}");
19038b201c42SJohn Baldwin 	}
19048b201c42SJohn Baldwin 	db_printf("\n");
19058b201c42SJohn Baldwin 
19068b201c42SJohn Baldwin 	if (handlers)
1907e0f66ef8SJohn Baldwin 		TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next)
19088b201c42SJohn Baldwin 		    db_dump_intrhand(ih);
19098b201c42SJohn Baldwin }
1910e0f66ef8SJohn Baldwin 
1911e0f66ef8SJohn Baldwin /*
1912e0f66ef8SJohn Baldwin  * Dump data about interrupt handlers
1913e0f66ef8SJohn Baldwin  */
1914e0f66ef8SJohn Baldwin DB_SHOW_COMMAND(intr, db_show_intr)
1915e0f66ef8SJohn Baldwin {
1916e0f66ef8SJohn Baldwin 	struct intr_event *ie;
191719e9205aSJohn Baldwin 	int all, verbose;
1918e0f66ef8SJohn Baldwin 
1919dc15eac0SEd Schouten 	verbose = strchr(modif, 'v') != NULL;
1920dc15eac0SEd Schouten 	all = strchr(modif, 'a') != NULL;
1921e0f66ef8SJohn Baldwin 	TAILQ_FOREACH(ie, &event_list, ie_list) {
1922e0f66ef8SJohn Baldwin 		if (!all && TAILQ_EMPTY(&ie->ie_handlers))
1923e0f66ef8SJohn Baldwin 			continue;
1924e0f66ef8SJohn Baldwin 		db_dump_intr_event(ie, verbose);
192519e9205aSJohn Baldwin 		if (db_pager_quit)
192619e9205aSJohn Baldwin 			break;
1927e0f66ef8SJohn Baldwin 	}
1928e0f66ef8SJohn Baldwin }
19298b201c42SJohn Baldwin #endif /* DDB */
19308b201c42SJohn Baldwin 
1931b4151f71SJohn Baldwin /*
19328088699fSJohn Baldwin  * Start standard software interrupt threads
19331931cf94SJohn Baldwin  */
19341931cf94SJohn Baldwin static void
1935b4151f71SJohn Baldwin start_softintr(void *dummy)
19361931cf94SJohn Baldwin {
1937b4151f71SJohn Baldwin 
19388d809d50SJeff Roberson 	if (swi_add(NULL, "vm", swi_vm, NULL, SWI_VM, INTR_MPSAFE, &vm_ih))
19398d809d50SJeff Roberson 		panic("died while creating vm swi ithread");
19401931cf94SJohn Baldwin }
1941237fdd78SRobert Watson SYSINIT(start_softintr, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softintr,
1942237fdd78SRobert Watson     NULL);
19431931cf94SJohn Baldwin 
1944d279178dSThomas Moestl /*
1945d279178dSThomas Moestl  * Sysctls used by systat and others: hw.intrnames and hw.intrcnt.
1946d279178dSThomas Moestl  * The data for this machine dependent, and the declarations are in machine
1947d279178dSThomas Moestl  * dependent code.  The layout of intrnames and intrcnt however is machine
1948d279178dSThomas Moestl  * independent.
1949d279178dSThomas Moestl  *
1950d279178dSThomas Moestl  * We do not know the length of intrcnt and intrnames at compile time, so
1951d279178dSThomas Moestl  * calculate things at run time.
1952d279178dSThomas Moestl  */
1953d279178dSThomas Moestl static int
1954d279178dSThomas Moestl sysctl_intrnames(SYSCTL_HANDLER_ARGS)
1955d279178dSThomas Moestl {
1956521ea19dSAttilio Rao 	return (sysctl_handle_opaque(oidp, intrnames, sintrnames, req));
1957d279178dSThomas Moestl }
1958d279178dSThomas Moestl 
1959d279178dSThomas Moestl SYSCTL_PROC(_hw, OID_AUTO, intrnames, CTLTYPE_OPAQUE | CTLFLAG_RD,
1960d279178dSThomas Moestl     NULL, 0, sysctl_intrnames, "", "Interrupt Names");
1961d279178dSThomas Moestl 
1962d279178dSThomas Moestl static int
1963d279178dSThomas Moestl sysctl_intrcnt(SYSCTL_HANDLER_ARGS)
1964d279178dSThomas Moestl {
196585729c2cSJuli Mallett #ifdef SCTL_MASK32
196685729c2cSJuli Mallett 	uint32_t *intrcnt32;
196785729c2cSJuli Mallett 	unsigned i;
196885729c2cSJuli Mallett 	int error;
196985729c2cSJuli Mallett 
197085729c2cSJuli Mallett 	if (req->flags & SCTL_MASK32) {
197185729c2cSJuli Mallett 		if (!req->oldptr)
197285729c2cSJuli Mallett 			return (sysctl_handle_opaque(oidp, NULL, sintrcnt / 2, req));
197385729c2cSJuli Mallett 		intrcnt32 = malloc(sintrcnt / 2, M_TEMP, M_NOWAIT);
197485729c2cSJuli Mallett 		if (intrcnt32 == NULL)
197585729c2cSJuli Mallett 			return (ENOMEM);
197685729c2cSJuli Mallett 		for (i = 0; i < sintrcnt / sizeof (u_long); i++)
197785729c2cSJuli Mallett 			intrcnt32[i] = intrcnt[i];
197885729c2cSJuli Mallett 		error = sysctl_handle_opaque(oidp, intrcnt32, sintrcnt / 2, req);
197985729c2cSJuli Mallett 		free(intrcnt32, M_TEMP);
198085729c2cSJuli Mallett 		return (error);
198185729c2cSJuli Mallett 	}
198285729c2cSJuli Mallett #endif
1983521ea19dSAttilio Rao 	return (sysctl_handle_opaque(oidp, intrcnt, sintrcnt, req));
1984d279178dSThomas Moestl }
1985d279178dSThomas Moestl 
1986d279178dSThomas Moestl SYSCTL_PROC(_hw, OID_AUTO, intrcnt, CTLTYPE_OPAQUE | CTLFLAG_RD,
1987d279178dSThomas Moestl     NULL, 0, sysctl_intrcnt, "", "Interrupt Counts");
19888b201c42SJohn Baldwin 
19898b201c42SJohn Baldwin #ifdef DDB
19908b201c42SJohn Baldwin /*
19918b201c42SJohn Baldwin  * DDB command to dump the interrupt statistics.
19928b201c42SJohn Baldwin  */
19938b201c42SJohn Baldwin DB_SHOW_COMMAND(intrcnt, db_show_intrcnt)
19948b201c42SJohn Baldwin {
19958b201c42SJohn Baldwin 	u_long *i;
19968b201c42SJohn Baldwin 	char *cp;
1997521ea19dSAttilio Rao 	u_int j;
19988b201c42SJohn Baldwin 
19998b201c42SJohn Baldwin 	cp = intrnames;
2000521ea19dSAttilio Rao 	j = 0;
2001521ea19dSAttilio Rao 	for (i = intrcnt; j < (sintrcnt / sizeof(u_long)) && !db_pager_quit;
2002521ea19dSAttilio Rao 	    i++, j++) {
20038b201c42SJohn Baldwin 		if (*cp == '\0')
20048b201c42SJohn Baldwin 			break;
20058b201c42SJohn Baldwin 		if (*i != 0)
20068b201c42SJohn Baldwin 			db_printf("%s\t%lu\n", cp, *i);
20078b201c42SJohn Baldwin 		cp += strlen(cp) + 1;
20088b201c42SJohn Baldwin 	}
20098b201c42SJohn Baldwin }
20108b201c42SJohn Baldwin #endif
2011