xref: /freebsd/sys/kern/kern_intr.c (revision f02c783757365340763739e58ef0e68ec13a0f8f)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 1997, Stefan Esser <se@freebsd.org>
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice unmodified, this list of conditions, and the following
12  *    disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31 
32 #include "opt_ddb.h"
33 #include "opt_hwpmc_hooks.h"
34 #include "opt_kstack_usage_prof.h"
35 
36 #include <sys/param.h>
37 #include <sys/bus.h>
38 #include <sys/conf.h>
39 #include <sys/cpuset.h>
40 #include <sys/rtprio.h>
41 #include <sys/systm.h>
42 #include <sys/interrupt.h>
43 #include <sys/kernel.h>
44 #include <sys/kthread.h>
45 #include <sys/ktr.h>
46 #include <sys/limits.h>
47 #include <sys/lock.h>
48 #include <sys/malloc.h>
49 #include <sys/mutex.h>
50 #include <sys/priv.h>
51 #include <sys/proc.h>
52 #include <sys/epoch.h>
53 #include <sys/random.h>
54 #include <sys/resourcevar.h>
55 #include <sys/sched.h>
56 #include <sys/smp.h>
57 #include <sys/sysctl.h>
58 #include <sys/syslog.h>
59 #include <sys/unistd.h>
60 #include <sys/vmmeter.h>
61 #include <machine/atomic.h>
62 #include <machine/cpu.h>
63 #include <machine/md_var.h>
64 #include <machine/smp.h>
65 #include <machine/stdarg.h>
66 #ifdef DDB
67 #include <ddb/ddb.h>
68 #include <ddb/db_sym.h>
69 #endif
70 
71 /*
72  * Describe an interrupt thread.  There is one of these per interrupt event.
73  */
74 struct intr_thread {
75 	struct intr_event *it_event;
76 	struct thread *it_thread;	/* Kernel thread. */
77 	int	it_flags;		/* (j) IT_* flags. */
78 	int	it_need;		/* Needs service. */
79 	int	it_waiting;		/* Waiting in the runq. */
80 };
81 
82 /* Interrupt thread flags kept in it_flags */
83 #define	IT_DEAD		0x000001	/* Thread is waiting to exit. */
84 #define	IT_WAIT		0x000002	/* Thread is waiting for completion. */
85 
86 struct	intr_entropy {
87 	struct	thread *td;
88 	uintptr_t event;
89 };
90 
91 struct	intr_event *clk_intr_event;
92 struct proc *intrproc;
93 
94 static MALLOC_DEFINE(M_ITHREAD, "ithread", "Interrupt Threads");
95 
96 static int intr_storm_threshold = 0;
97 SYSCTL_INT(_hw, OID_AUTO, intr_storm_threshold, CTLFLAG_RWTUN,
98     &intr_storm_threshold, 0,
99     "Number of consecutive interrupts before storm protection is enabled");
100 static int intr_epoch_batch = 1000;
101 SYSCTL_INT(_hw, OID_AUTO, intr_epoch_batch, CTLFLAG_RWTUN, &intr_epoch_batch,
102     0, "Maximum interrupt handler executions without re-entering epoch(9)");
103 #ifdef HWPMC_HOOKS
104 static int intr_hwpmc_waiting_report_threshold = 1;
105 SYSCTL_INT(_hw, OID_AUTO, intr_hwpmc_waiting_report_threshold, CTLFLAG_RWTUN,
106     &intr_hwpmc_waiting_report_threshold, 1,
107     "Threshold for reporting number of events in a workq");
108 #define	PMC_HOOK_INSTALLED_ANY() __predict_false(pmc_hook != NULL)
109 #endif
110 static TAILQ_HEAD(, intr_event) event_list =
111     TAILQ_HEAD_INITIALIZER(event_list);
112 static struct mtx event_lock;
113 MTX_SYSINIT(intr_event_list, &event_lock, "intr event list", MTX_DEF);
114 
115 static void	intr_event_update(struct intr_event *ie);
116 static int	intr_event_schedule_thread(struct intr_event *ie, struct trapframe *frame);
117 static struct intr_thread *ithread_create(const char *name);
118 static void	ithread_destroy(struct intr_thread *ithread);
119 static void	ithread_execute_handlers(struct proc *p,
120 		    struct intr_event *ie);
121 static void	ithread_loop(void *);
122 static void	ithread_update(struct intr_thread *ithd);
123 static void	start_softintr(void *);
124 
125 #ifdef HWPMC_HOOKS
126 #include <sys/pmckern.h>
127 PMC_SOFT_DEFINE( , , intr, all);
128 PMC_SOFT_DEFINE( , , intr, ithread);
129 PMC_SOFT_DEFINE( , , intr, filter);
130 PMC_SOFT_DEFINE( , , intr, stray);
131 PMC_SOFT_DEFINE( , , intr, schedule);
132 PMC_SOFT_DEFINE( , , intr, waiting);
133 
134 #define PMC_SOFT_CALL_INTR_HLPR(event, frame)			\
135 do {					\
136 	if (frame != NULL)					\
137 		PMC_SOFT_CALL_TF( , , intr, event, frame);	\
138 	else							\
139 		PMC_SOFT_CALL( , , intr, event);		\
140 } while (0)
141 #endif
142 
143 /* Map an interrupt type to an ithread priority. */
144 u_char
145 intr_priority(enum intr_type flags)
146 {
147 	u_char pri;
148 
149 	flags &= (INTR_TYPE_TTY | INTR_TYPE_BIO | INTR_TYPE_NET |
150 	    INTR_TYPE_CAM | INTR_TYPE_MISC | INTR_TYPE_CLK | INTR_TYPE_AV);
151 	switch (flags) {
152 	case INTR_TYPE_TTY:
153 		pri = PI_TTY;
154 		break;
155 	case INTR_TYPE_BIO:
156 		pri = PI_DISK;
157 		break;
158 	case INTR_TYPE_NET:
159 		pri = PI_NET;
160 		break;
161 	case INTR_TYPE_CAM:
162 		pri = PI_DISK;
163 		break;
164 	case INTR_TYPE_AV:
165 		pri = PI_AV;
166 		break;
167 	case INTR_TYPE_CLK:
168 		pri = PI_REALTIME;
169 		break;
170 	case INTR_TYPE_MISC:
171 		pri = PI_DULL;          /* don't care */
172 		break;
173 	default:
174 		/* We didn't specify an interrupt level. */
175 		panic("intr_priority: no interrupt type in flags");
176 	}
177 
178 	return pri;
179 }
180 
181 /*
182  * Update an ithread based on the associated intr_event.
183  */
184 static void
185 ithread_update(struct intr_thread *ithd)
186 {
187 	struct intr_event *ie;
188 	struct thread *td;
189 	u_char pri;
190 
191 	ie = ithd->it_event;
192 	td = ithd->it_thread;
193 	mtx_assert(&ie->ie_lock, MA_OWNED);
194 
195 	/* Determine the overall priority of this event. */
196 	if (CK_SLIST_EMPTY(&ie->ie_handlers))
197 		pri = PRI_MAX_ITHD;
198 	else
199 		pri = CK_SLIST_FIRST(&ie->ie_handlers)->ih_pri;
200 
201 	/* Update name and priority. */
202 	strlcpy(td->td_name, ie->ie_fullname, sizeof(td->td_name));
203 #ifdef KTR
204 	sched_clear_tdname(td);
205 #endif
206 	thread_lock(td);
207 	sched_ithread_prio(td, pri);
208 	thread_unlock(td);
209 }
210 
211 /*
212  * Regenerate the full name of an interrupt event and update its priority.
213  */
214 static void
215 intr_event_update(struct intr_event *ie)
216 {
217 	struct intr_handler *ih;
218 	char *last;
219 	int missed, space, flags;
220 
221 	/* Start off with no entropy and just the name of the event. */
222 	mtx_assert(&ie->ie_lock, MA_OWNED);
223 	strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname));
224 	flags = 0;
225 	missed = 0;
226 	space = 1;
227 
228 	/* Run through all the handlers updating values. */
229 	CK_SLIST_FOREACH(ih, &ie->ie_handlers, ih_next) {
230 		if (strlen(ie->ie_fullname) + strlen(ih->ih_name) + 1 <
231 		    sizeof(ie->ie_fullname)) {
232 			strcat(ie->ie_fullname, " ");
233 			strcat(ie->ie_fullname, ih->ih_name);
234 			space = 0;
235 		} else
236 			missed++;
237 		flags |= ih->ih_flags;
238 	}
239 	ie->ie_hflags = flags;
240 
241 	/*
242 	 * If there is only one handler and its name is too long, just copy in
243 	 * as much of the end of the name (includes the unit number) as will
244 	 * fit.  Otherwise, we have multiple handlers and not all of the names
245 	 * will fit.  Add +'s to indicate missing names.  If we run out of room
246 	 * and still have +'s to add, change the last character from a + to a *.
247 	 */
248 	if (missed == 1 && space == 1) {
249 		ih = CK_SLIST_FIRST(&ie->ie_handlers);
250 		missed = strlen(ie->ie_fullname) + strlen(ih->ih_name) + 2 -
251 		    sizeof(ie->ie_fullname);
252 		strcat(ie->ie_fullname, (missed == 0) ? " " : "-");
253 		strcat(ie->ie_fullname, &ih->ih_name[missed]);
254 		missed = 0;
255 	}
256 	last = &ie->ie_fullname[sizeof(ie->ie_fullname) - 2];
257 	while (missed-- > 0) {
258 		if (strlen(ie->ie_fullname) + 1 == sizeof(ie->ie_fullname)) {
259 			if (*last == '+') {
260 				*last = '*';
261 				break;
262 			} else
263 				*last = '+';
264 		} else if (space) {
265 			strcat(ie->ie_fullname, " +");
266 			space = 0;
267 		} else
268 			strcat(ie->ie_fullname, "+");
269 	}
270 
271 	/*
272 	 * If this event has an ithread, update it's priority and
273 	 * name.
274 	 */
275 	if (ie->ie_thread != NULL)
276 		ithread_update(ie->ie_thread);
277 	CTR2(KTR_INTR, "%s: updated %s", __func__, ie->ie_fullname);
278 }
279 
280 int
281 intr_event_create(struct intr_event **event, void *source, int flags, int irq,
282     void (*pre_ithread)(void *), void (*post_ithread)(void *),
283     void (*post_filter)(void *), int (*assign_cpu)(void *, int),
284     const char *fmt, ...)
285 {
286 	struct intr_event *ie;
287 	va_list ap;
288 
289 	/* The only valid flag during creation is IE_SOFT. */
290 	if ((flags & ~IE_SOFT) != 0)
291 		return (EINVAL);
292 	ie = malloc(sizeof(struct intr_event), M_ITHREAD, M_WAITOK | M_ZERO);
293 	ie->ie_source = source;
294 	ie->ie_pre_ithread = pre_ithread;
295 	ie->ie_post_ithread = post_ithread;
296 	ie->ie_post_filter = post_filter;
297 	ie->ie_assign_cpu = assign_cpu;
298 	ie->ie_flags = flags;
299 	ie->ie_irq = irq;
300 	ie->ie_cpu = NOCPU;
301 	CK_SLIST_INIT(&ie->ie_handlers);
302 	mtx_init(&ie->ie_lock, "intr event", NULL, MTX_DEF);
303 
304 	va_start(ap, fmt);
305 	vsnprintf(ie->ie_name, sizeof(ie->ie_name), fmt, ap);
306 	va_end(ap);
307 	strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname));
308 	mtx_lock(&event_lock);
309 	TAILQ_INSERT_TAIL(&event_list, ie, ie_list);
310 	mtx_unlock(&event_lock);
311 	if (event != NULL)
312 		*event = ie;
313 	CTR2(KTR_INTR, "%s: created %s", __func__, ie->ie_name);
314 	return (0);
315 }
316 
317 /*
318  * Bind an interrupt event to the specified CPU.  Note that not all
319  * platforms support binding an interrupt to a CPU.  For those
320  * platforms this request will fail.  Using a cpu id of NOCPU unbinds
321  * the interrupt event.
322  */
323 static int
324 _intr_event_bind(struct intr_event *ie, int cpu, bool bindirq, bool bindithread)
325 {
326 	lwpid_t id;
327 	int error;
328 
329 	/* Need a CPU to bind to. */
330 	if (cpu != NOCPU && CPU_ABSENT(cpu))
331 		return (EINVAL);
332 
333 	if (ie->ie_assign_cpu == NULL)
334 		return (EOPNOTSUPP);
335 
336 	error = priv_check(curthread, PRIV_SCHED_CPUSET_INTR);
337 	if (error)
338 		return (error);
339 
340 	/*
341 	 * If we have any ithreads try to set their mask first to verify
342 	 * permissions, etc.
343 	 */
344 	if (bindithread) {
345 		mtx_lock(&ie->ie_lock);
346 		if (ie->ie_thread != NULL) {
347 			id = ie->ie_thread->it_thread->td_tid;
348 			mtx_unlock(&ie->ie_lock);
349 			error = cpuset_setithread(id, cpu);
350 			if (error)
351 				return (error);
352 		} else
353 			mtx_unlock(&ie->ie_lock);
354 	}
355 	if (bindirq)
356 		error = ie->ie_assign_cpu(ie->ie_source, cpu);
357 	if (error) {
358 		if (bindithread) {
359 			mtx_lock(&ie->ie_lock);
360 			if (ie->ie_thread != NULL) {
361 				cpu = ie->ie_cpu;
362 				id = ie->ie_thread->it_thread->td_tid;
363 				mtx_unlock(&ie->ie_lock);
364 				(void)cpuset_setithread(id, cpu);
365 			} else
366 				mtx_unlock(&ie->ie_lock);
367 		}
368 		return (error);
369 	}
370 
371 	if (bindirq) {
372 		mtx_lock(&ie->ie_lock);
373 		ie->ie_cpu = cpu;
374 		mtx_unlock(&ie->ie_lock);
375 	}
376 
377 	return (error);
378 }
379 
380 /*
381  * Bind an interrupt event to the specified CPU.  For supported platforms, any
382  * associated ithreads as well as the primary interrupt context will be bound
383  * to the specificed CPU.
384  */
385 int
386 intr_event_bind(struct intr_event *ie, int cpu)
387 {
388 
389 	return (_intr_event_bind(ie, cpu, true, true));
390 }
391 
392 /*
393  * Bind an interrupt event to the specified CPU, but do not bind associated
394  * ithreads.
395  */
396 int
397 intr_event_bind_irqonly(struct intr_event *ie, int cpu)
398 {
399 
400 	return (_intr_event_bind(ie, cpu, true, false));
401 }
402 
403 /*
404  * Bind an interrupt event's ithread to the specified CPU.
405  */
406 int
407 intr_event_bind_ithread(struct intr_event *ie, int cpu)
408 {
409 
410 	return (_intr_event_bind(ie, cpu, false, true));
411 }
412 
413 /*
414  * Bind an interrupt event's ithread to the specified cpuset.
415  */
416 int
417 intr_event_bind_ithread_cpuset(struct intr_event *ie, cpuset_t *cs)
418 {
419 	lwpid_t id;
420 
421 	mtx_lock(&ie->ie_lock);
422 	if (ie->ie_thread != NULL) {
423 		id = ie->ie_thread->it_thread->td_tid;
424 		mtx_unlock(&ie->ie_lock);
425 		return (cpuset_setthread(id, cs));
426 	} else {
427 		mtx_unlock(&ie->ie_lock);
428 	}
429 	return (ENODEV);
430 }
431 
432 static struct intr_event *
433 intr_lookup(int irq)
434 {
435 	struct intr_event *ie;
436 
437 	mtx_lock(&event_lock);
438 	TAILQ_FOREACH(ie, &event_list, ie_list)
439 		if (ie->ie_irq == irq &&
440 		    (ie->ie_flags & IE_SOFT) == 0 &&
441 		    CK_SLIST_FIRST(&ie->ie_handlers) != NULL)
442 			break;
443 	mtx_unlock(&event_lock);
444 	return (ie);
445 }
446 
447 int
448 intr_setaffinity(int irq, int mode, void *m)
449 {
450 	struct intr_event *ie;
451 	cpuset_t *mask;
452 	int cpu, n;
453 
454 	mask = m;
455 	cpu = NOCPU;
456 	/*
457 	 * If we're setting all cpus we can unbind.  Otherwise make sure
458 	 * only one cpu is in the set.
459 	 */
460 	if (CPU_CMP(cpuset_root, mask)) {
461 		for (n = 0; n < CPU_SETSIZE; n++) {
462 			if (!CPU_ISSET(n, mask))
463 				continue;
464 			if (cpu != NOCPU)
465 				return (EINVAL);
466 			cpu = n;
467 		}
468 	}
469 	ie = intr_lookup(irq);
470 	if (ie == NULL)
471 		return (ESRCH);
472 	switch (mode) {
473 	case CPU_WHICH_IRQ:
474 		return (intr_event_bind(ie, cpu));
475 	case CPU_WHICH_INTRHANDLER:
476 		return (intr_event_bind_irqonly(ie, cpu));
477 	case CPU_WHICH_ITHREAD:
478 		return (intr_event_bind_ithread(ie, cpu));
479 	default:
480 		return (EINVAL);
481 	}
482 }
483 
484 int
485 intr_getaffinity(int irq, int mode, void *m)
486 {
487 	struct intr_event *ie;
488 	struct thread *td;
489 	struct proc *p;
490 	cpuset_t *mask;
491 	lwpid_t id;
492 	int error;
493 
494 	mask = m;
495 	ie = intr_lookup(irq);
496 	if (ie == NULL)
497 		return (ESRCH);
498 
499 	error = 0;
500 	CPU_ZERO(mask);
501 	switch (mode) {
502 	case CPU_WHICH_IRQ:
503 	case CPU_WHICH_INTRHANDLER:
504 		mtx_lock(&ie->ie_lock);
505 		if (ie->ie_cpu == NOCPU)
506 			CPU_COPY(cpuset_root, mask);
507 		else
508 			CPU_SET(ie->ie_cpu, mask);
509 		mtx_unlock(&ie->ie_lock);
510 		break;
511 	case CPU_WHICH_ITHREAD:
512 		mtx_lock(&ie->ie_lock);
513 		if (ie->ie_thread == NULL) {
514 			mtx_unlock(&ie->ie_lock);
515 			CPU_COPY(cpuset_root, mask);
516 		} else {
517 			id = ie->ie_thread->it_thread->td_tid;
518 			mtx_unlock(&ie->ie_lock);
519 			error = cpuset_which(CPU_WHICH_TID, id, &p, &td, NULL);
520 			if (error != 0)
521 				return (error);
522 			CPU_COPY(&td->td_cpuset->cs_mask, mask);
523 			PROC_UNLOCK(p);
524 		}
525 	default:
526 		return (EINVAL);
527 	}
528 	return (0);
529 }
530 
531 int
532 intr_event_destroy(struct intr_event *ie)
533 {
534 
535 	mtx_lock(&event_lock);
536 	mtx_lock(&ie->ie_lock);
537 	if (!CK_SLIST_EMPTY(&ie->ie_handlers)) {
538 		mtx_unlock(&ie->ie_lock);
539 		mtx_unlock(&event_lock);
540 		return (EBUSY);
541 	}
542 	TAILQ_REMOVE(&event_list, ie, ie_list);
543 #ifndef notyet
544 	if (ie->ie_thread != NULL) {
545 		ithread_destroy(ie->ie_thread);
546 		ie->ie_thread = NULL;
547 	}
548 #endif
549 	mtx_unlock(&ie->ie_lock);
550 	mtx_unlock(&event_lock);
551 	mtx_destroy(&ie->ie_lock);
552 	free(ie, M_ITHREAD);
553 	return (0);
554 }
555 
556 static struct intr_thread *
557 ithread_create(const char *name)
558 {
559 	struct intr_thread *ithd;
560 	struct thread *td;
561 	int error;
562 
563 	ithd = malloc(sizeof(struct intr_thread), M_ITHREAD, M_WAITOK | M_ZERO);
564 
565 	error = kproc_kthread_add(ithread_loop, ithd, &intrproc,
566 		    &td, RFSTOPPED | RFHIGHPID,
567 		    0, "intr", "%s", name);
568 	if (error)
569 		panic("kproc_create() failed with %d", error);
570 	thread_lock(td);
571 	sched_class(td, PRI_ITHD);
572 	TD_SET_IWAIT(td);
573 	thread_unlock(td);
574 	td->td_pflags |= TDP_ITHREAD;
575 	ithd->it_thread = td;
576 	CTR2(KTR_INTR, "%s: created %s", __func__, name);
577 	return (ithd);
578 }
579 
580 static void
581 ithread_destroy(struct intr_thread *ithread)
582 {
583 	struct thread *td;
584 
585 	CTR2(KTR_INTR, "%s: killing %s", __func__, ithread->it_event->ie_name);
586 	td = ithread->it_thread;
587 	thread_lock(td);
588 	ithread->it_flags |= IT_DEAD;
589 	if (TD_AWAITING_INTR(td)) {
590 		TD_CLR_IWAIT(td);
591 		sched_wakeup(td, SRQ_INTR);
592 	} else
593 		thread_unlock(td);
594 }
595 
596 int
597 intr_event_add_handler(struct intr_event *ie, const char *name,
598     driver_filter_t filter, driver_intr_t handler, void *arg, u_char pri,
599     enum intr_type flags, void **cookiep)
600 {
601 	struct intr_handler *ih, *temp_ih;
602 	struct intr_handler **prevptr;
603 	struct intr_thread *it;
604 
605 	if (ie == NULL || name == NULL || (handler == NULL && filter == NULL))
606 		return (EINVAL);
607 
608 	/* Allocate and populate an interrupt handler structure. */
609 	ih = malloc(sizeof(struct intr_handler), M_ITHREAD, M_WAITOK | M_ZERO);
610 	ih->ih_filter = filter;
611 	ih->ih_handler = handler;
612 	ih->ih_argument = arg;
613 	strlcpy(ih->ih_name, name, sizeof(ih->ih_name));
614 	ih->ih_event = ie;
615 	ih->ih_pri = pri;
616 	if (flags & INTR_EXCL)
617 		ih->ih_flags = IH_EXCLUSIVE;
618 	if (flags & INTR_MPSAFE)
619 		ih->ih_flags |= IH_MPSAFE;
620 	if (flags & INTR_ENTROPY)
621 		ih->ih_flags |= IH_ENTROPY;
622 	if (flags & INTR_TYPE_NET)
623 		ih->ih_flags |= IH_NET;
624 
625 	/* We can only have one exclusive handler in a event. */
626 	mtx_lock(&ie->ie_lock);
627 	if (!CK_SLIST_EMPTY(&ie->ie_handlers)) {
628 		if ((flags & INTR_EXCL) ||
629 		    (CK_SLIST_FIRST(&ie->ie_handlers)->ih_flags & IH_EXCLUSIVE)) {
630 			mtx_unlock(&ie->ie_lock);
631 			free(ih, M_ITHREAD);
632 			return (EINVAL);
633 		}
634 	}
635 
636 	/* Create a thread if we need one. */
637 	while (ie->ie_thread == NULL && handler != NULL) {
638 		if (ie->ie_flags & IE_ADDING_THREAD)
639 			msleep(ie, &ie->ie_lock, 0, "ithread", 0);
640 		else {
641 			ie->ie_flags |= IE_ADDING_THREAD;
642 			mtx_unlock(&ie->ie_lock);
643 			it = ithread_create("intr: newborn");
644 			mtx_lock(&ie->ie_lock);
645 			ie->ie_flags &= ~IE_ADDING_THREAD;
646 			ie->ie_thread = it;
647 			it->it_event = ie;
648 			ithread_update(it);
649 			wakeup(ie);
650 		}
651 	}
652 
653 	/* Add the new handler to the event in priority order. */
654 	CK_SLIST_FOREACH_PREVPTR(temp_ih, prevptr, &ie->ie_handlers, ih_next) {
655 		if (temp_ih->ih_pri > ih->ih_pri)
656 			break;
657 	}
658 	CK_SLIST_INSERT_PREVPTR(prevptr, temp_ih, ih, ih_next);
659 
660 	intr_event_update(ie);
661 
662 	CTR3(KTR_INTR, "%s: added %s to %s", __func__, ih->ih_name,
663 	    ie->ie_name);
664 	mtx_unlock(&ie->ie_lock);
665 
666 	if (cookiep != NULL)
667 		*cookiep = ih;
668 	return (0);
669 }
670 
671 /*
672  * Append a description preceded by a ':' to the name of the specified
673  * interrupt handler.
674  */
675 int
676 intr_event_describe_handler(struct intr_event *ie, void *cookie,
677     const char *descr)
678 {
679 	struct intr_handler *ih;
680 	size_t space;
681 	char *start;
682 
683 	mtx_lock(&ie->ie_lock);
684 #ifdef INVARIANTS
685 	CK_SLIST_FOREACH(ih, &ie->ie_handlers, ih_next) {
686 		if (ih == cookie)
687 			break;
688 	}
689 	if (ih == NULL) {
690 		mtx_unlock(&ie->ie_lock);
691 		panic("handler %p not found in interrupt event %p", cookie, ie);
692 	}
693 #endif
694 	ih = cookie;
695 
696 	/*
697 	 * Look for an existing description by checking for an
698 	 * existing ":".  This assumes device names do not include
699 	 * colons.  If one is found, prepare to insert the new
700 	 * description at that point.  If one is not found, find the
701 	 * end of the name to use as the insertion point.
702 	 */
703 	start = strchr(ih->ih_name, ':');
704 	if (start == NULL)
705 		start = strchr(ih->ih_name, 0);
706 
707 	/*
708 	 * See if there is enough remaining room in the string for the
709 	 * description + ":".  The "- 1" leaves room for the trailing
710 	 * '\0'.  The "+ 1" accounts for the colon.
711 	 */
712 	space = sizeof(ih->ih_name) - (start - ih->ih_name) - 1;
713 	if (strlen(descr) + 1 > space) {
714 		mtx_unlock(&ie->ie_lock);
715 		return (ENOSPC);
716 	}
717 
718 	/* Append a colon followed by the description. */
719 	*start = ':';
720 	strcpy(start + 1, descr);
721 	intr_event_update(ie);
722 	mtx_unlock(&ie->ie_lock);
723 	return (0);
724 }
725 
726 /*
727  * Return the ie_source field from the intr_event an intr_handler is
728  * associated with.
729  */
730 void *
731 intr_handler_source(void *cookie)
732 {
733 	struct intr_handler *ih;
734 	struct intr_event *ie;
735 
736 	ih = (struct intr_handler *)cookie;
737 	if (ih == NULL)
738 		return (NULL);
739 	ie = ih->ih_event;
740 	KASSERT(ie != NULL,
741 	    ("interrupt handler \"%s\" has a NULL interrupt event",
742 	    ih->ih_name));
743 	return (ie->ie_source);
744 }
745 
746 /*
747  * If intr_event_handle() is running in the ISR context at the time of the call,
748  * then wait for it to complete.
749  */
750 static void
751 intr_event_barrier(struct intr_event *ie)
752 {
753 	int phase;
754 
755 	mtx_assert(&ie->ie_lock, MA_OWNED);
756 	phase = ie->ie_phase;
757 
758 	/*
759 	 * Switch phase to direct future interrupts to the other active counter.
760 	 * Make sure that any preceding stores are visible before the switch.
761 	 */
762 	KASSERT(ie->ie_active[!phase] == 0, ("idle phase has activity"));
763 	atomic_store_rel_int(&ie->ie_phase, !phase);
764 
765 	/*
766 	 * This code cooperates with wait-free iteration of ie_handlers
767 	 * in intr_event_handle.
768 	 * Make sure that the removal and the phase update are not reordered
769 	 * with the active count check.
770 	 * Note that no combination of acquire and release fences can provide
771 	 * that guarantee as Store->Load sequences can always be reordered.
772 	 */
773 	atomic_thread_fence_seq_cst();
774 
775 	/*
776 	 * Now wait on the inactive phase.
777 	 * The acquire fence is needed so that all post-barrier accesses
778 	 * are after the check.
779 	 */
780 	while (ie->ie_active[phase] > 0)
781 		cpu_spinwait();
782 	atomic_thread_fence_acq();
783 }
784 
785 static void
786 intr_handler_barrier(struct intr_handler *handler)
787 {
788 	struct intr_event *ie;
789 
790 	ie = handler->ih_event;
791 	mtx_assert(&ie->ie_lock, MA_OWNED);
792 	KASSERT((handler->ih_flags & IH_DEAD) == 0,
793 	    ("update for a removed handler"));
794 
795 	if (ie->ie_thread == NULL) {
796 		intr_event_barrier(ie);
797 		return;
798 	}
799 	if ((handler->ih_flags & IH_CHANGED) == 0) {
800 		handler->ih_flags |= IH_CHANGED;
801 		intr_event_schedule_thread(ie, NULL);
802 	}
803 	while ((handler->ih_flags & IH_CHANGED) != 0)
804 		msleep(handler, &ie->ie_lock, 0, "ih_barr", 0);
805 }
806 
807 /*
808  * Sleep until an ithread finishes executing an interrupt handler.
809  *
810  * XXX Doesn't currently handle interrupt filters or fast interrupt
811  * handlers. This is intended for LinuxKPI drivers only.
812  * Do not use in BSD code.
813  */
814 void
815 _intr_drain(int irq)
816 {
817 	struct intr_event *ie;
818 	struct intr_thread *ithd;
819 	struct thread *td;
820 
821 	ie = intr_lookup(irq);
822 	if (ie == NULL)
823 		return;
824 	if (ie->ie_thread == NULL)
825 		return;
826 	ithd = ie->ie_thread;
827 	td = ithd->it_thread;
828 	/*
829 	 * We set the flag and wait for it to be cleared to avoid
830 	 * long delays with potentially busy interrupt handlers
831 	 * were we to only sample TD_AWAITING_INTR() every tick.
832 	 */
833 	thread_lock(td);
834 	if (!TD_AWAITING_INTR(td)) {
835 		ithd->it_flags |= IT_WAIT;
836 		while (ithd->it_flags & IT_WAIT) {
837 			thread_unlock(td);
838 			pause("idrain", 1);
839 			thread_lock(td);
840 		}
841 	}
842 	thread_unlock(td);
843 	return;
844 }
845 
846 int
847 intr_event_remove_handler(void *cookie)
848 {
849 	struct intr_handler *handler = (struct intr_handler *)cookie;
850 	struct intr_event *ie;
851 	struct intr_handler *ih;
852 	struct intr_handler **prevptr;
853 #ifdef notyet
854 	int dead;
855 #endif
856 
857 	if (handler == NULL)
858 		return (EINVAL);
859 	ie = handler->ih_event;
860 	KASSERT(ie != NULL,
861 	    ("interrupt handler \"%s\" has a NULL interrupt event",
862 	    handler->ih_name));
863 
864 	mtx_lock(&ie->ie_lock);
865 	CTR3(KTR_INTR, "%s: removing %s from %s", __func__, handler->ih_name,
866 	    ie->ie_name);
867 	CK_SLIST_FOREACH_PREVPTR(ih, prevptr, &ie->ie_handlers, ih_next) {
868 		if (ih == handler)
869 			break;
870 	}
871 	if (ih == NULL) {
872 		panic("interrupt handler \"%s\" not found in "
873 		    "interrupt event \"%s\"", handler->ih_name, ie->ie_name);
874 	}
875 
876 	/*
877 	 * If there is no ithread, then directly remove the handler.  Note that
878 	 * intr_event_handle() iterates ie_handlers in a lock-less fashion, so
879 	 * care needs to be taken to keep ie_handlers consistent and to free
880 	 * the removed handler only when ie_handlers is quiescent.
881 	 */
882 	if (ie->ie_thread == NULL) {
883 		CK_SLIST_REMOVE_PREVPTR(prevptr, ih, ih_next);
884 		intr_event_barrier(ie);
885 		intr_event_update(ie);
886 		mtx_unlock(&ie->ie_lock);
887 		free(handler, M_ITHREAD);
888 		return (0);
889 	}
890 
891 	/*
892 	 * Let the interrupt thread do the job.
893 	 * The interrupt source is disabled when the interrupt thread is
894 	 * running, so it does not have to worry about interaction with
895 	 * intr_event_handle().
896 	 */
897 	KASSERT((handler->ih_flags & IH_DEAD) == 0,
898 	    ("duplicate handle remove"));
899 	handler->ih_flags |= IH_DEAD;
900 	intr_event_schedule_thread(ie, NULL);
901 	while (handler->ih_flags & IH_DEAD)
902 		msleep(handler, &ie->ie_lock, 0, "iev_rmh", 0);
903 	intr_event_update(ie);
904 
905 #ifdef notyet
906 	/*
907 	 * XXX: This could be bad in the case of ppbus(8).  Also, I think
908 	 * this could lead to races of stale data when servicing an
909 	 * interrupt.
910 	 */
911 	dead = 1;
912 	CK_SLIST_FOREACH(ih, &ie->ie_handlers, ih_next) {
913 		if (ih->ih_handler != NULL) {
914 			dead = 0;
915 			break;
916 		}
917 	}
918 	if (dead) {
919 		ithread_destroy(ie->ie_thread);
920 		ie->ie_thread = NULL;
921 	}
922 #endif
923 	mtx_unlock(&ie->ie_lock);
924 	free(handler, M_ITHREAD);
925 	return (0);
926 }
927 
928 int
929 intr_event_suspend_handler(void *cookie)
930 {
931 	struct intr_handler *handler = (struct intr_handler *)cookie;
932 	struct intr_event *ie;
933 
934 	if (handler == NULL)
935 		return (EINVAL);
936 	ie = handler->ih_event;
937 	KASSERT(ie != NULL,
938 	    ("interrupt handler \"%s\" has a NULL interrupt event",
939 	    handler->ih_name));
940 	mtx_lock(&ie->ie_lock);
941 	handler->ih_flags |= IH_SUSP;
942 	intr_handler_barrier(handler);
943 	mtx_unlock(&ie->ie_lock);
944 	return (0);
945 }
946 
947 int
948 intr_event_resume_handler(void *cookie)
949 {
950 	struct intr_handler *handler = (struct intr_handler *)cookie;
951 	struct intr_event *ie;
952 
953 	if (handler == NULL)
954 		return (EINVAL);
955 	ie = handler->ih_event;
956 	KASSERT(ie != NULL,
957 	    ("interrupt handler \"%s\" has a NULL interrupt event",
958 	    handler->ih_name));
959 
960 	/*
961 	 * intr_handler_barrier() acts not only as a barrier,
962 	 * it also allows to check for any pending interrupts.
963 	 */
964 	mtx_lock(&ie->ie_lock);
965 	handler->ih_flags &= ~IH_SUSP;
966 	intr_handler_barrier(handler);
967 	mtx_unlock(&ie->ie_lock);
968 	return (0);
969 }
970 
971 static int
972 intr_event_schedule_thread(struct intr_event *ie, struct trapframe *frame)
973 {
974 	struct intr_entropy entropy;
975 	struct intr_thread *it;
976 	struct thread *td;
977 	struct thread *ctd;
978 
979 	/*
980 	 * If no ithread or no handlers, then we have a stray interrupt.
981 	 */
982 	if (ie == NULL || CK_SLIST_EMPTY(&ie->ie_handlers) ||
983 	    ie->ie_thread == NULL)
984 		return (EINVAL);
985 
986 	ctd = curthread;
987 	it = ie->ie_thread;
988 	td = it->it_thread;
989 
990 	/*
991 	 * If any of the handlers for this ithread claim to be good
992 	 * sources of entropy, then gather some.
993 	 */
994 	if (ie->ie_hflags & IH_ENTROPY) {
995 		entropy.event = (uintptr_t)ie;
996 		entropy.td = ctd;
997 		random_harvest_queue(&entropy, sizeof(entropy), RANDOM_INTERRUPT);
998 	}
999 
1000 	KASSERT(td->td_proc != NULL, ("ithread %s has no process", ie->ie_name));
1001 
1002 	/*
1003 	 * Set it_need to tell the thread to keep running if it is already
1004 	 * running.  Then, lock the thread and see if we actually need to
1005 	 * put it on the runqueue.
1006 	 *
1007 	 * Use store_rel to arrange that the store to ih_need in
1008 	 * swi_sched() is before the store to it_need and prepare for
1009 	 * transfer of this order to loads in the ithread.
1010 	 */
1011 	atomic_store_rel_int(&it->it_need, 1);
1012 	thread_lock(td);
1013 	if (TD_AWAITING_INTR(td)) {
1014 #ifdef HWPMC_HOOKS
1015 		it->it_waiting = 0;
1016 		if (PMC_HOOK_INSTALLED_ANY())
1017 			PMC_SOFT_CALL_INTR_HLPR(schedule, frame);
1018 #endif
1019 		CTR3(KTR_INTR, "%s: schedule pid %d (%s)", __func__, td->td_proc->p_pid,
1020 		    td->td_name);
1021 		TD_CLR_IWAIT(td);
1022 		sched_wakeup(td, SRQ_INTR);
1023 	} else {
1024 #ifdef HWPMC_HOOKS
1025 		it->it_waiting++;
1026 		if (PMC_HOOK_INSTALLED_ANY() &&
1027 		    (it->it_waiting >= intr_hwpmc_waiting_report_threshold))
1028 			PMC_SOFT_CALL_INTR_HLPR(waiting, frame);
1029 #endif
1030 		CTR5(KTR_INTR, "%s: pid %d (%s): it_need %d, state %d",
1031 		    __func__, td->td_proc->p_pid, td->td_name, it->it_need, TD_GET_STATE(td));
1032 		thread_unlock(td);
1033 	}
1034 
1035 	return (0);
1036 }
1037 
1038 /*
1039  * Allow interrupt event binding for software interrupt handlers -- a no-op,
1040  * since interrupts are generated in software rather than being directed by
1041  * a PIC.
1042  */
1043 static int
1044 swi_assign_cpu(void *arg, int cpu)
1045 {
1046 
1047 	return (0);
1048 }
1049 
1050 /*
1051  * Add a software interrupt handler to a specified event.  If a given event
1052  * is not specified, then a new event is created.
1053  */
1054 int
1055 swi_add(struct intr_event **eventp, const char *name, driver_intr_t handler,
1056 	    void *arg, int pri, enum intr_type flags, void **cookiep)
1057 {
1058 	struct intr_event *ie;
1059 	int error = 0;
1060 
1061 	if (flags & INTR_ENTROPY)
1062 		return (EINVAL);
1063 
1064 	ie = (eventp != NULL) ? *eventp : NULL;
1065 
1066 	if (ie != NULL) {
1067 		if (!(ie->ie_flags & IE_SOFT))
1068 			return (EINVAL);
1069 	} else {
1070 		error = intr_event_create(&ie, NULL, IE_SOFT, 0,
1071 		    NULL, NULL, NULL, swi_assign_cpu, "swi%d:", pri);
1072 		if (error)
1073 			return (error);
1074 		if (eventp != NULL)
1075 			*eventp = ie;
1076 	}
1077 	if (handler != NULL) {
1078 		error = intr_event_add_handler(ie, name, NULL, handler, arg,
1079 		    PI_SWI(pri), flags, cookiep);
1080 	}
1081 	return (error);
1082 }
1083 
1084 /*
1085  * Schedule a software interrupt thread.
1086  */
1087 void
1088 swi_sched(void *cookie, int flags)
1089 {
1090 	struct intr_handler *ih = (struct intr_handler *)cookie;
1091 	struct intr_event *ie = ih->ih_event;
1092 	struct intr_entropy entropy;
1093 	int error __unused;
1094 
1095 	CTR3(KTR_INTR, "swi_sched: %s %s need=%d", ie->ie_name, ih->ih_name,
1096 	    ih->ih_need);
1097 
1098 	if ((flags & SWI_FROMNMI) == 0) {
1099 		entropy.event = (uintptr_t)ih;
1100 		entropy.td = curthread;
1101 		random_harvest_queue(&entropy, sizeof(entropy), RANDOM_SWI);
1102 	}
1103 
1104 	/*
1105 	 * Set ih_need for this handler so that if the ithread is already
1106 	 * running it will execute this handler on the next pass.  Otherwise,
1107 	 * it will execute it the next time it runs.
1108 	 */
1109 	ih->ih_need = 1;
1110 
1111 	if (flags & SWI_DELAY)
1112 		return;
1113 
1114 	if (flags & SWI_FROMNMI) {
1115 #if defined(SMP) && (defined(__i386__) || defined(__amd64__))
1116 		KASSERT(ie == clk_intr_event,
1117 		    ("SWI_FROMNMI used not with clk_intr_event"));
1118 		ipi_self_from_nmi(IPI_SWI);
1119 #endif
1120 	} else {
1121 		VM_CNT_INC(v_soft);
1122 		error = intr_event_schedule_thread(ie, NULL);
1123 		KASSERT(error == 0, ("stray software interrupt"));
1124 	}
1125 }
1126 
1127 /*
1128  * Remove a software interrupt handler.  Currently this code does not
1129  * remove the associated interrupt event if it becomes empty.  Calling code
1130  * may do so manually via intr_event_destroy(), but that's not really
1131  * an optimal interface.
1132  */
1133 int
1134 swi_remove(void *cookie)
1135 {
1136 
1137 	return (intr_event_remove_handler(cookie));
1138 }
1139 
1140 static void
1141 intr_event_execute_handlers(struct proc *p, struct intr_event *ie)
1142 {
1143 	struct intr_handler *ih, *ihn, *ihp;
1144 
1145 	ihp = NULL;
1146 	CK_SLIST_FOREACH_SAFE(ih, &ie->ie_handlers, ih_next, ihn) {
1147 		/*
1148 		 * If this handler is marked for death, remove it from
1149 		 * the list of handlers and wake up the sleeper.
1150 		 */
1151 		if (ih->ih_flags & IH_DEAD) {
1152 			mtx_lock(&ie->ie_lock);
1153 			if (ihp == NULL)
1154 				CK_SLIST_REMOVE_HEAD(&ie->ie_handlers, ih_next);
1155 			else
1156 				CK_SLIST_REMOVE_AFTER(ihp, ih_next);
1157 			ih->ih_flags &= ~IH_DEAD;
1158 			wakeup(ih);
1159 			mtx_unlock(&ie->ie_lock);
1160 			continue;
1161 		}
1162 
1163 		/*
1164 		 * Now that we know that the current element won't be removed
1165 		 * update the previous element.
1166 		 */
1167 		ihp = ih;
1168 
1169 		if ((ih->ih_flags & IH_CHANGED) != 0) {
1170 			mtx_lock(&ie->ie_lock);
1171 			ih->ih_flags &= ~IH_CHANGED;
1172 			wakeup(ih);
1173 			mtx_unlock(&ie->ie_lock);
1174 		}
1175 
1176 		/* Skip filter only handlers */
1177 		if (ih->ih_handler == NULL)
1178 			continue;
1179 
1180 		/* Skip suspended handlers */
1181 		if ((ih->ih_flags & IH_SUSP) != 0)
1182 			continue;
1183 
1184 		/*
1185 		 * For software interrupt threads, we only execute
1186 		 * handlers that have their need flag set.  Hardware
1187 		 * interrupt threads always invoke all of their handlers.
1188 		 *
1189 		 * ih_need can only be 0 or 1.  Failed cmpset below
1190 		 * means that there is no request to execute handlers,
1191 		 * so a retry of the cmpset is not needed.
1192 		 */
1193 		if ((ie->ie_flags & IE_SOFT) != 0 &&
1194 		    atomic_cmpset_int(&ih->ih_need, 1, 0) == 0)
1195 			continue;
1196 
1197 		/* Execute this handler. */
1198 		CTR6(KTR_INTR, "%s: pid %d exec %p(%p) for %s flg=%x",
1199 		    __func__, p->p_pid, (void *)ih->ih_handler,
1200 		    ih->ih_argument, ih->ih_name, ih->ih_flags);
1201 
1202 		if (!(ih->ih_flags & IH_MPSAFE))
1203 			mtx_lock(&Giant);
1204 		ih->ih_handler(ih->ih_argument);
1205 		if (!(ih->ih_flags & IH_MPSAFE))
1206 			mtx_unlock(&Giant);
1207 	}
1208 }
1209 
1210 static void
1211 ithread_execute_handlers(struct proc *p, struct intr_event *ie)
1212 {
1213 
1214 	/* Interrupt handlers should not sleep. */
1215 	if (!(ie->ie_flags & IE_SOFT))
1216 		THREAD_NO_SLEEPING();
1217 	intr_event_execute_handlers(p, ie);
1218 	if (!(ie->ie_flags & IE_SOFT))
1219 		THREAD_SLEEPING_OK();
1220 
1221 	/*
1222 	 * Interrupt storm handling:
1223 	 *
1224 	 * If this interrupt source is currently storming, then throttle
1225 	 * it to only fire the handler once  per clock tick.
1226 	 *
1227 	 * If this interrupt source is not currently storming, but the
1228 	 * number of back to back interrupts exceeds the storm threshold,
1229 	 * then enter storming mode.
1230 	 */
1231 	if (intr_storm_threshold != 0 && ie->ie_count >= intr_storm_threshold &&
1232 	    !(ie->ie_flags & IE_SOFT)) {
1233 		/* Report the message only once every second. */
1234 		if (ppsratecheck(&ie->ie_warntm, &ie->ie_warncnt, 1)) {
1235 			printf(
1236 	"interrupt storm detected on \"%s\"; throttling interrupt source\n",
1237 			    ie->ie_name);
1238 		}
1239 		pause("istorm", 1);
1240 	} else
1241 		ie->ie_count++;
1242 
1243 	/*
1244 	 * Now that all the handlers have had a chance to run, reenable
1245 	 * the interrupt source.
1246 	 */
1247 	if (ie->ie_post_ithread != NULL)
1248 		ie->ie_post_ithread(ie->ie_source);
1249 }
1250 
1251 /*
1252  * This is the main code for interrupt threads.
1253  */
1254 static void
1255 ithread_loop(void *arg)
1256 {
1257 	struct epoch_tracker et;
1258 	struct intr_thread *ithd;
1259 	struct intr_event *ie;
1260 	struct thread *td;
1261 	struct proc *p;
1262 	int wake, epoch_count;
1263 	bool needs_epoch;
1264 
1265 	td = curthread;
1266 	p = td->td_proc;
1267 	ithd = (struct intr_thread *)arg;
1268 	KASSERT(ithd->it_thread == td,
1269 	    ("%s: ithread and proc linkage out of sync", __func__));
1270 	ie = ithd->it_event;
1271 	ie->ie_count = 0;
1272 	wake = 0;
1273 
1274 	/*
1275 	 * As long as we have interrupts outstanding, go through the
1276 	 * list of handlers, giving each one a go at it.
1277 	 */
1278 	for (;;) {
1279 		/*
1280 		 * If we are an orphaned thread, then just die.
1281 		 */
1282 		if (ithd->it_flags & IT_DEAD) {
1283 			CTR3(KTR_INTR, "%s: pid %d (%s) exiting", __func__,
1284 			    p->p_pid, td->td_name);
1285 			free(ithd, M_ITHREAD);
1286 			kthread_exit();
1287 		}
1288 
1289 		/*
1290 		 * Service interrupts.  If another interrupt arrives while
1291 		 * we are running, it will set it_need to note that we
1292 		 * should make another pass.
1293 		 *
1294 		 * The load_acq part of the following cmpset ensures
1295 		 * that the load of ih_need in ithread_execute_handlers()
1296 		 * is ordered after the load of it_need here.
1297 		 */
1298 		needs_epoch =
1299 		    (atomic_load_int(&ie->ie_hflags) & IH_NET) != 0;
1300 		if (needs_epoch) {
1301 			epoch_count = 0;
1302 			NET_EPOCH_ENTER(et);
1303 		}
1304 		while (atomic_cmpset_acq_int(&ithd->it_need, 1, 0) != 0) {
1305 			ithread_execute_handlers(p, ie);
1306 			if (needs_epoch &&
1307 			    ++epoch_count >= intr_epoch_batch) {
1308 				NET_EPOCH_EXIT(et);
1309 				epoch_count = 0;
1310 				NET_EPOCH_ENTER(et);
1311 			}
1312 		}
1313 		if (needs_epoch)
1314 			NET_EPOCH_EXIT(et);
1315 		WITNESS_WARN(WARN_PANIC, NULL, "suspending ithread");
1316 		mtx_assert(&Giant, MA_NOTOWNED);
1317 
1318 		/*
1319 		 * Processed all our interrupts.  Now get the sched
1320 		 * lock.  This may take a while and it_need may get
1321 		 * set again, so we have to check it again.
1322 		 */
1323 		thread_lock(td);
1324 		if (atomic_load_acq_int(&ithd->it_need) == 0 &&
1325 		    (ithd->it_flags & (IT_DEAD | IT_WAIT)) == 0) {
1326 			TD_SET_IWAIT(td);
1327 			ie->ie_count = 0;
1328 			mi_switch(SW_VOL | SWT_IWAIT);
1329 		} else {
1330 			if (ithd->it_flags & IT_WAIT) {
1331 				wake = 1;
1332 				ithd->it_flags &= ~IT_WAIT;
1333 			}
1334 			thread_unlock(td);
1335 		}
1336 		if (wake) {
1337 			wakeup(ithd);
1338 			wake = 0;
1339 		}
1340 	}
1341 }
1342 
1343 /*
1344  * Main interrupt handling body.
1345  *
1346  * Input:
1347  * o ie:                        the event connected to this interrupt.
1348  * o frame:                     some archs (i.e. i386) pass a frame to some.
1349  *                              handlers as their main argument.
1350  * Return value:
1351  * o 0:                         everything ok.
1352  * o EINVAL:                    stray interrupt.
1353  */
1354 int
1355 intr_event_handle(struct intr_event *ie, struct trapframe *frame)
1356 {
1357 	struct intr_handler *ih;
1358 	struct trapframe *oldframe;
1359 	struct thread *td;
1360 	int phase;
1361 	int ret;
1362 	bool filter, thread;
1363 
1364 	td = curthread;
1365 
1366 #ifdef KSTACK_USAGE_PROF
1367 	intr_prof_stack_use(td, frame);
1368 #endif
1369 
1370 	/* An interrupt with no event or handlers is a stray interrupt. */
1371 	if (ie == NULL || CK_SLIST_EMPTY(&ie->ie_handlers))
1372 		return (EINVAL);
1373 
1374 	/*
1375 	 * Execute fast interrupt handlers directly.
1376 	 * To support clock handlers, if a handler registers
1377 	 * with a NULL argument, then we pass it a pointer to
1378 	 * a trapframe as its argument.
1379 	 */
1380 	td->td_intr_nesting_level++;
1381 	filter = false;
1382 	thread = false;
1383 	ret = 0;
1384 	critical_enter();
1385 	oldframe = td->td_intr_frame;
1386 	td->td_intr_frame = frame;
1387 
1388 	phase = ie->ie_phase;
1389 	atomic_add_int(&ie->ie_active[phase], 1);
1390 
1391 	/*
1392 	 * This fence is required to ensure that no later loads are
1393 	 * re-ordered before the ie_active store.
1394 	 */
1395 	atomic_thread_fence_seq_cst();
1396 
1397 	CK_SLIST_FOREACH(ih, &ie->ie_handlers, ih_next) {
1398 		if ((ih->ih_flags & IH_SUSP) != 0)
1399 			continue;
1400 		if ((ie->ie_flags & IE_SOFT) != 0 && ih->ih_need == 0)
1401 			continue;
1402 		if (ih->ih_filter == NULL) {
1403 			thread = true;
1404 			continue;
1405 		}
1406 		CTR4(KTR_INTR, "%s: exec %p(%p) for %s", __func__,
1407 		    ih->ih_filter, ih->ih_argument == NULL ? frame :
1408 		    ih->ih_argument, ih->ih_name);
1409 		if (ih->ih_argument == NULL)
1410 			ret = ih->ih_filter(frame);
1411 		else
1412 			ret = ih->ih_filter(ih->ih_argument);
1413 #ifdef HWPMC_HOOKS
1414 		PMC_SOFT_CALL_TF( , , intr, all, frame);
1415 #endif
1416 		KASSERT(ret == FILTER_STRAY ||
1417 		    ((ret & (FILTER_SCHEDULE_THREAD | FILTER_HANDLED)) != 0 &&
1418 		    (ret & ~(FILTER_SCHEDULE_THREAD | FILTER_HANDLED)) == 0),
1419 		    ("%s: incorrect return value %#x from %s", __func__, ret,
1420 		    ih->ih_name));
1421 		filter = filter || ret == FILTER_HANDLED;
1422 #ifdef HWPMC_HOOKS
1423 		if (ret & FILTER_SCHEDULE_THREAD)
1424 			PMC_SOFT_CALL_TF( , , intr, ithread, frame);
1425 		else if (ret & FILTER_HANDLED)
1426 			PMC_SOFT_CALL_TF( , , intr, filter, frame);
1427 		else if (ret == FILTER_STRAY)
1428 			PMC_SOFT_CALL_TF( , , intr, stray, frame);
1429 #endif
1430 
1431 		/*
1432 		 * Wrapper handler special handling:
1433 		 *
1434 		 * in some particular cases (like pccard and pccbb),
1435 		 * the _real_ device handler is wrapped in a couple of
1436 		 * functions - a filter wrapper and an ithread wrapper.
1437 		 * In this case (and just in this case), the filter wrapper
1438 		 * could ask the system to schedule the ithread and mask
1439 		 * the interrupt source if the wrapped handler is composed
1440 		 * of just an ithread handler.
1441 		 *
1442 		 * TODO: write a generic wrapper to avoid people rolling
1443 		 * their own.
1444 		 */
1445 		if (!thread) {
1446 			if (ret == FILTER_SCHEDULE_THREAD)
1447 				thread = true;
1448 		}
1449 	}
1450 	atomic_add_rel_int(&ie->ie_active[phase], -1);
1451 
1452 	td->td_intr_frame = oldframe;
1453 
1454 	if (thread) {
1455 		if (ie->ie_pre_ithread != NULL)
1456 			ie->ie_pre_ithread(ie->ie_source);
1457 	} else {
1458 		if (ie->ie_post_filter != NULL)
1459 			ie->ie_post_filter(ie->ie_source);
1460 	}
1461 
1462 	/* Schedule the ithread if needed. */
1463 	if (thread) {
1464 		int error __unused;
1465 
1466 		error =  intr_event_schedule_thread(ie, frame);
1467 		KASSERT(error == 0, ("bad stray interrupt"));
1468 	}
1469 	critical_exit();
1470 	td->td_intr_nesting_level--;
1471 #ifdef notyet
1472 	/* The interrupt is not aknowledged by any filter and has no ithread. */
1473 	if (!thread && !filter)
1474 		return (EINVAL);
1475 #endif
1476 	return (0);
1477 }
1478 
1479 #ifdef DDB
1480 /*
1481  * Dump details about an interrupt handler
1482  */
1483 static void
1484 db_dump_intrhand(struct intr_handler *ih)
1485 {
1486 	int comma;
1487 
1488 	db_printf("\t%-10s ", ih->ih_name);
1489 	switch (ih->ih_pri) {
1490 	case PI_REALTIME:
1491 		db_printf("CLK ");
1492 		break;
1493 	case PI_INTR:
1494 		db_printf("INTR");
1495 		break;
1496 	default:
1497 		if (ih->ih_pri >= PI_SOFT)
1498 			db_printf("SWI ");
1499 		else
1500 			db_printf("%4u", ih->ih_pri);
1501 		break;
1502 	}
1503 	db_printf(" ");
1504 	if (ih->ih_filter != NULL) {
1505 		db_printf("[F]");
1506 		db_printsym((uintptr_t)ih->ih_filter, DB_STGY_PROC);
1507 	}
1508 	if (ih->ih_handler != NULL) {
1509 		if (ih->ih_filter != NULL)
1510 			db_printf(",");
1511 		db_printf("[H]");
1512 		db_printsym((uintptr_t)ih->ih_handler, DB_STGY_PROC);
1513 	}
1514 	db_printf("(%p)", ih->ih_argument);
1515 	if (ih->ih_need ||
1516 	    (ih->ih_flags & (IH_EXCLUSIVE | IH_ENTROPY | IH_DEAD |
1517 	    IH_MPSAFE)) != 0) {
1518 		db_printf(" {");
1519 		comma = 0;
1520 		if (ih->ih_flags & IH_EXCLUSIVE) {
1521 			if (comma)
1522 				db_printf(", ");
1523 			db_printf("EXCL");
1524 			comma = 1;
1525 		}
1526 		if (ih->ih_flags & IH_ENTROPY) {
1527 			if (comma)
1528 				db_printf(", ");
1529 			db_printf("ENTROPY");
1530 			comma = 1;
1531 		}
1532 		if (ih->ih_flags & IH_DEAD) {
1533 			if (comma)
1534 				db_printf(", ");
1535 			db_printf("DEAD");
1536 			comma = 1;
1537 		}
1538 		if (ih->ih_flags & IH_MPSAFE) {
1539 			if (comma)
1540 				db_printf(", ");
1541 			db_printf("MPSAFE");
1542 			comma = 1;
1543 		}
1544 		if (ih->ih_need) {
1545 			if (comma)
1546 				db_printf(", ");
1547 			db_printf("NEED");
1548 		}
1549 		db_printf("}");
1550 	}
1551 	db_printf("\n");
1552 }
1553 
1554 /*
1555  * Dump details about a event.
1556  */
1557 void
1558 db_dump_intr_event(struct intr_event *ie, int handlers)
1559 {
1560 	struct intr_handler *ih;
1561 	struct intr_thread *it;
1562 	int comma;
1563 
1564 	db_printf("%s ", ie->ie_fullname);
1565 	it = ie->ie_thread;
1566 	if (it != NULL)
1567 		db_printf("(pid %d)", it->it_thread->td_proc->p_pid);
1568 	else
1569 		db_printf("(no thread)");
1570 	if ((ie->ie_flags & (IE_SOFT | IE_ADDING_THREAD)) != 0 ||
1571 	    (it != NULL && it->it_need)) {
1572 		db_printf(" {");
1573 		comma = 0;
1574 		if (ie->ie_flags & IE_SOFT) {
1575 			db_printf("SOFT");
1576 			comma = 1;
1577 		}
1578 		if (ie->ie_flags & IE_ADDING_THREAD) {
1579 			if (comma)
1580 				db_printf(", ");
1581 			db_printf("ADDING_THREAD");
1582 			comma = 1;
1583 		}
1584 		if (it != NULL && it->it_need) {
1585 			if (comma)
1586 				db_printf(", ");
1587 			db_printf("NEED");
1588 		}
1589 		db_printf("}");
1590 	}
1591 	db_printf("\n");
1592 
1593 	if (handlers)
1594 		CK_SLIST_FOREACH(ih, &ie->ie_handlers, ih_next)
1595 		    db_dump_intrhand(ih);
1596 }
1597 
1598 /*
1599  * Dump data about interrupt handlers
1600  */
1601 DB_SHOW_COMMAND_FLAGS(intr, db_show_intr, DB_CMD_MEMSAFE)
1602 {
1603 	struct intr_event *ie;
1604 	int all, verbose;
1605 
1606 	verbose = strchr(modif, 'v') != NULL;
1607 	all = strchr(modif, 'a') != NULL;
1608 	TAILQ_FOREACH(ie, &event_list, ie_list) {
1609 		if (!all && CK_SLIST_EMPTY(&ie->ie_handlers))
1610 			continue;
1611 		db_dump_intr_event(ie, verbose);
1612 		if (db_pager_quit)
1613 			break;
1614 	}
1615 }
1616 #endif /* DDB */
1617 
1618 /*
1619  * Start standard software interrupt threads
1620  */
1621 static void
1622 start_softintr(void *dummy)
1623 {
1624 
1625 	if (swi_add(&clk_intr_event, "clk", NULL, NULL, SWI_CLOCK,
1626 	    INTR_MPSAFE, NULL))
1627 		panic("died while creating clk swi ithread");
1628 }
1629 SYSINIT(start_softintr, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softintr,
1630     NULL);
1631 
1632 /*
1633  * Sysctls used by systat and others: hw.intrnames and hw.intrcnt.
1634  * The data for this machine dependent, and the declarations are in machine
1635  * dependent code.  The layout of intrnames and intrcnt however is machine
1636  * independent.
1637  *
1638  * We do not know the length of intrcnt and intrnames at compile time, so
1639  * calculate things at run time.
1640  */
1641 static int
1642 sysctl_intrnames(SYSCTL_HANDLER_ARGS)
1643 {
1644 	return (sysctl_handle_opaque(oidp, intrnames, sintrnames, req));
1645 }
1646 
1647 SYSCTL_PROC(_hw, OID_AUTO, intrnames,
1648     CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0,
1649     sysctl_intrnames, "",
1650     "Interrupt Names");
1651 
1652 static int
1653 sysctl_intrcnt(SYSCTL_HANDLER_ARGS)
1654 {
1655 #ifdef SCTL_MASK32
1656 	uint32_t *intrcnt32;
1657 	unsigned i;
1658 	int error;
1659 
1660 	if (req->flags & SCTL_MASK32) {
1661 		if (!req->oldptr)
1662 			return (sysctl_handle_opaque(oidp, NULL, sintrcnt / 2, req));
1663 		intrcnt32 = malloc(sintrcnt / 2, M_TEMP, M_NOWAIT);
1664 		if (intrcnt32 == NULL)
1665 			return (ENOMEM);
1666 		for (i = 0; i < sintrcnt / sizeof (u_long); i++)
1667 			intrcnt32[i] = intrcnt[i];
1668 		error = sysctl_handle_opaque(oidp, intrcnt32, sintrcnt / 2, req);
1669 		free(intrcnt32, M_TEMP);
1670 		return (error);
1671 	}
1672 #endif
1673 	return (sysctl_handle_opaque(oidp, intrcnt, sintrcnt, req));
1674 }
1675 
1676 SYSCTL_PROC(_hw, OID_AUTO, intrcnt,
1677     CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0,
1678     sysctl_intrcnt, "",
1679     "Interrupt Counts");
1680 
1681 #ifdef DDB
1682 /*
1683  * DDB command to dump the interrupt statistics.
1684  */
1685 DB_SHOW_COMMAND_FLAGS(intrcnt, db_show_intrcnt, DB_CMD_MEMSAFE)
1686 {
1687 	u_long *i;
1688 	char *cp;
1689 	u_int j;
1690 
1691 	cp = intrnames;
1692 	j = 0;
1693 	for (i = intrcnt; j < (sintrcnt / sizeof(u_long)) && !db_pager_quit;
1694 	    i++, j++) {
1695 		if (*cp == '\0')
1696 			break;
1697 		if (*i != 0)
1698 			db_printf("%s\t%lu\n", cp, *i);
1699 		cp += strlen(cp) + 1;
1700 	}
1701 }
1702 #endif
1703