xref: /freebsd/sys/kern/kern_intr.c (revision d7d962ead0b6e5e8a39202d0590022082bf5bfb6)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 1997, Stefan Esser <se@freebsd.org>
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice unmodified, this list of conditions, and the following
12  *    disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31 
32 #include "opt_ddb.h"
33 #include "opt_hwpmc_hooks.h"
34 #include "opt_kstack_usage_prof.h"
35 
36 #include <sys/param.h>
37 #include <sys/bus.h>
38 #include <sys/conf.h>
39 #include <sys/cpuset.h>
40 #include <sys/rtprio.h>
41 #include <sys/systm.h>
42 #include <sys/interrupt.h>
43 #include <sys/kernel.h>
44 #include <sys/kthread.h>
45 #include <sys/ktr.h>
46 #include <sys/limits.h>
47 #include <sys/lock.h>
48 #include <sys/malloc.h>
49 #include <sys/mutex.h>
50 #include <sys/priv.h>
51 #include <sys/proc.h>
52 #include <sys/epoch.h>
53 #include <sys/random.h>
54 #include <sys/resourcevar.h>
55 #include <sys/sched.h>
56 #include <sys/smp.h>
57 #include <sys/sysctl.h>
58 #include <sys/syslog.h>
59 #include <sys/unistd.h>
60 #include <sys/vmmeter.h>
61 #include <machine/atomic.h>
62 #include <machine/cpu.h>
63 #include <machine/md_var.h>
64 #include <machine/smp.h>
65 #include <machine/stdarg.h>
66 #ifdef DDB
67 #include <ddb/ddb.h>
68 #include <ddb/db_sym.h>
69 #endif
70 
71 /*
72  * Describe an interrupt thread.  There is one of these per interrupt event.
73  */
74 struct intr_thread {
75 	struct intr_event *it_event;
76 	struct thread *it_thread;	/* Kernel thread. */
77 	int	it_flags;		/* (j) IT_* flags. */
78 	int	it_need;		/* Needs service. */
79 	int	it_waiting;		/* Waiting in the runq. */
80 };
81 
82 /* Interrupt thread flags kept in it_flags */
83 #define	IT_DEAD		0x000001	/* Thread is waiting to exit. */
84 #define	IT_WAIT		0x000002	/* Thread is waiting for completion. */
85 
86 struct	intr_entropy {
87 	struct	thread *td;
88 	uintptr_t event;
89 };
90 
91 struct	intr_event *clk_intr_event;
92 struct	intr_event *tty_intr_event;
93 void	*vm_ih;
94 struct proc *intrproc;
95 
96 static MALLOC_DEFINE(M_ITHREAD, "ithread", "Interrupt Threads");
97 
98 static int intr_storm_threshold = 0;
99 SYSCTL_INT(_hw, OID_AUTO, intr_storm_threshold, CTLFLAG_RWTUN,
100     &intr_storm_threshold, 0,
101     "Number of consecutive interrupts before storm protection is enabled");
102 static int intr_epoch_batch = 1000;
103 SYSCTL_INT(_hw, OID_AUTO, intr_epoch_batch, CTLFLAG_RWTUN, &intr_epoch_batch,
104     0, "Maximum interrupt handler executions without re-entering epoch(9)");
105 #ifdef HWPMC_HOOKS
106 static int intr_hwpmc_waiting_report_threshold = 1;
107 SYSCTL_INT(_hw, OID_AUTO, intr_hwpmc_waiting_report_threshold, CTLFLAG_RWTUN,
108     &intr_hwpmc_waiting_report_threshold, 1,
109     "Threshold for reporting number of events in a workq");
110 #endif
111 static TAILQ_HEAD(, intr_event) event_list =
112     TAILQ_HEAD_INITIALIZER(event_list);
113 static struct mtx event_lock;
114 MTX_SYSINIT(intr_event_list, &event_lock, "intr event list", MTX_DEF);
115 
116 static void	intr_event_update(struct intr_event *ie);
117 static int	intr_event_schedule_thread(struct intr_event *ie, struct trapframe *frame);
118 static struct intr_thread *ithread_create(const char *name);
119 static void	ithread_destroy(struct intr_thread *ithread);
120 static void	ithread_execute_handlers(struct proc *p,
121 		    struct intr_event *ie);
122 static void	ithread_loop(void *);
123 static void	ithread_update(struct intr_thread *ithd);
124 static void	start_softintr(void *);
125 
126 #ifdef HWPMC_HOOKS
127 #include <sys/pmckern.h>
128 PMC_SOFT_DEFINE( , , intr, all);
129 PMC_SOFT_DEFINE( , , intr, ithread);
130 PMC_SOFT_DEFINE( , , intr, filter);
131 PMC_SOFT_DEFINE( , , intr, stray);
132 PMC_SOFT_DEFINE( , , intr, schedule);
133 PMC_SOFT_DEFINE( , , intr, waiting);
134 #endif
135 
136 /* Map an interrupt type to an ithread priority. */
137 u_char
138 intr_priority(enum intr_type flags)
139 {
140 	u_char pri;
141 
142 	flags &= (INTR_TYPE_TTY | INTR_TYPE_BIO | INTR_TYPE_NET |
143 	    INTR_TYPE_CAM | INTR_TYPE_MISC | INTR_TYPE_CLK | INTR_TYPE_AV);
144 	switch (flags) {
145 	case INTR_TYPE_TTY:
146 		pri = PI_TTY;
147 		break;
148 	case INTR_TYPE_BIO:
149 		pri = PI_DISK;
150 		break;
151 	case INTR_TYPE_NET:
152 		pri = PI_NET;
153 		break;
154 	case INTR_TYPE_CAM:
155 		pri = PI_DISK;
156 		break;
157 	case INTR_TYPE_AV:
158 		pri = PI_AV;
159 		break;
160 	case INTR_TYPE_CLK:
161 		pri = PI_REALTIME;
162 		break;
163 	case INTR_TYPE_MISC:
164 		pri = PI_DULL;          /* don't care */
165 		break;
166 	default:
167 		/* We didn't specify an interrupt level. */
168 		panic("intr_priority: no interrupt type in flags");
169 	}
170 
171 	return pri;
172 }
173 
174 /*
175  * Update an ithread based on the associated intr_event.
176  */
177 static void
178 ithread_update(struct intr_thread *ithd)
179 {
180 	struct intr_event *ie;
181 	struct thread *td;
182 	u_char pri;
183 
184 	ie = ithd->it_event;
185 	td = ithd->it_thread;
186 	mtx_assert(&ie->ie_lock, MA_OWNED);
187 
188 	/* Determine the overall priority of this event. */
189 	if (CK_SLIST_EMPTY(&ie->ie_handlers))
190 		pri = PRI_MAX_ITHD;
191 	else
192 		pri = CK_SLIST_FIRST(&ie->ie_handlers)->ih_pri;
193 
194 	/* Update name and priority. */
195 	strlcpy(td->td_name, ie->ie_fullname, sizeof(td->td_name));
196 #ifdef KTR
197 	sched_clear_tdname(td);
198 #endif
199 	thread_lock(td);
200 	sched_prio(td, pri);
201 	thread_unlock(td);
202 }
203 
204 /*
205  * Regenerate the full name of an interrupt event and update its priority.
206  */
207 static void
208 intr_event_update(struct intr_event *ie)
209 {
210 	struct intr_handler *ih;
211 	char *last;
212 	int missed, space, flags;
213 
214 	/* Start off with no entropy and just the name of the event. */
215 	mtx_assert(&ie->ie_lock, MA_OWNED);
216 	strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname));
217 	flags = 0;
218 	missed = 0;
219 	space = 1;
220 
221 	/* Run through all the handlers updating values. */
222 	CK_SLIST_FOREACH(ih, &ie->ie_handlers, ih_next) {
223 		if (strlen(ie->ie_fullname) + strlen(ih->ih_name) + 1 <
224 		    sizeof(ie->ie_fullname)) {
225 			strcat(ie->ie_fullname, " ");
226 			strcat(ie->ie_fullname, ih->ih_name);
227 			space = 0;
228 		} else
229 			missed++;
230 		flags |= ih->ih_flags;
231 	}
232 	ie->ie_hflags = flags;
233 
234 	/*
235 	 * If there is only one handler and its name is too long, just copy in
236 	 * as much of the end of the name (includes the unit number) as will
237 	 * fit.  Otherwise, we have multiple handlers and not all of the names
238 	 * will fit.  Add +'s to indicate missing names.  If we run out of room
239 	 * and still have +'s to add, change the last character from a + to a *.
240 	 */
241 	if (missed == 1 && space == 1) {
242 		ih = CK_SLIST_FIRST(&ie->ie_handlers);
243 		missed = strlen(ie->ie_fullname) + strlen(ih->ih_name) + 2 -
244 		    sizeof(ie->ie_fullname);
245 		strcat(ie->ie_fullname, (missed == 0) ? " " : "-");
246 		strcat(ie->ie_fullname, &ih->ih_name[missed]);
247 		missed = 0;
248 	}
249 	last = &ie->ie_fullname[sizeof(ie->ie_fullname) - 2];
250 	while (missed-- > 0) {
251 		if (strlen(ie->ie_fullname) + 1 == sizeof(ie->ie_fullname)) {
252 			if (*last == '+') {
253 				*last = '*';
254 				break;
255 			} else
256 				*last = '+';
257 		} else if (space) {
258 			strcat(ie->ie_fullname, " +");
259 			space = 0;
260 		} else
261 			strcat(ie->ie_fullname, "+");
262 	}
263 
264 	/*
265 	 * If this event has an ithread, update it's priority and
266 	 * name.
267 	 */
268 	if (ie->ie_thread != NULL)
269 		ithread_update(ie->ie_thread);
270 	CTR2(KTR_INTR, "%s: updated %s", __func__, ie->ie_fullname);
271 }
272 
273 int
274 intr_event_create(struct intr_event **event, void *source, int flags, int irq,
275     void (*pre_ithread)(void *), void (*post_ithread)(void *),
276     void (*post_filter)(void *), int (*assign_cpu)(void *, int),
277     const char *fmt, ...)
278 {
279 	struct intr_event *ie;
280 	va_list ap;
281 
282 	/* The only valid flag during creation is IE_SOFT. */
283 	if ((flags & ~IE_SOFT) != 0)
284 		return (EINVAL);
285 	ie = malloc(sizeof(struct intr_event), M_ITHREAD, M_WAITOK | M_ZERO);
286 	ie->ie_source = source;
287 	ie->ie_pre_ithread = pre_ithread;
288 	ie->ie_post_ithread = post_ithread;
289 	ie->ie_post_filter = post_filter;
290 	ie->ie_assign_cpu = assign_cpu;
291 	ie->ie_flags = flags;
292 	ie->ie_irq = irq;
293 	ie->ie_cpu = NOCPU;
294 	CK_SLIST_INIT(&ie->ie_handlers);
295 	mtx_init(&ie->ie_lock, "intr event", NULL, MTX_DEF);
296 
297 	va_start(ap, fmt);
298 	vsnprintf(ie->ie_name, sizeof(ie->ie_name), fmt, ap);
299 	va_end(ap);
300 	strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname));
301 	mtx_lock(&event_lock);
302 	TAILQ_INSERT_TAIL(&event_list, ie, ie_list);
303 	mtx_unlock(&event_lock);
304 	if (event != NULL)
305 		*event = ie;
306 	CTR2(KTR_INTR, "%s: created %s", __func__, ie->ie_name);
307 	return (0);
308 }
309 
310 /*
311  * Bind an interrupt event to the specified CPU.  Note that not all
312  * platforms support binding an interrupt to a CPU.  For those
313  * platforms this request will fail.  Using a cpu id of NOCPU unbinds
314  * the interrupt event.
315  */
316 static int
317 _intr_event_bind(struct intr_event *ie, int cpu, bool bindirq, bool bindithread)
318 {
319 	lwpid_t id;
320 	int error;
321 
322 	/* Need a CPU to bind to. */
323 	if (cpu != NOCPU && CPU_ABSENT(cpu))
324 		return (EINVAL);
325 
326 	if (ie->ie_assign_cpu == NULL)
327 		return (EOPNOTSUPP);
328 
329 	error = priv_check(curthread, PRIV_SCHED_CPUSET_INTR);
330 	if (error)
331 		return (error);
332 
333 	/*
334 	 * If we have any ithreads try to set their mask first to verify
335 	 * permissions, etc.
336 	 */
337 	if (bindithread) {
338 		mtx_lock(&ie->ie_lock);
339 		if (ie->ie_thread != NULL) {
340 			id = ie->ie_thread->it_thread->td_tid;
341 			mtx_unlock(&ie->ie_lock);
342 			error = cpuset_setithread(id, cpu);
343 			if (error)
344 				return (error);
345 		} else
346 			mtx_unlock(&ie->ie_lock);
347 	}
348 	if (bindirq)
349 		error = ie->ie_assign_cpu(ie->ie_source, cpu);
350 	if (error) {
351 		if (bindithread) {
352 			mtx_lock(&ie->ie_lock);
353 			if (ie->ie_thread != NULL) {
354 				cpu = ie->ie_cpu;
355 				id = ie->ie_thread->it_thread->td_tid;
356 				mtx_unlock(&ie->ie_lock);
357 				(void)cpuset_setithread(id, cpu);
358 			} else
359 				mtx_unlock(&ie->ie_lock);
360 		}
361 		return (error);
362 	}
363 
364 	if (bindirq) {
365 		mtx_lock(&ie->ie_lock);
366 		ie->ie_cpu = cpu;
367 		mtx_unlock(&ie->ie_lock);
368 	}
369 
370 	return (error);
371 }
372 
373 /*
374  * Bind an interrupt event to the specified CPU.  For supported platforms, any
375  * associated ithreads as well as the primary interrupt context will be bound
376  * to the specificed CPU.
377  */
378 int
379 intr_event_bind(struct intr_event *ie, int cpu)
380 {
381 
382 	return (_intr_event_bind(ie, cpu, true, true));
383 }
384 
385 /*
386  * Bind an interrupt event to the specified CPU, but do not bind associated
387  * ithreads.
388  */
389 int
390 intr_event_bind_irqonly(struct intr_event *ie, int cpu)
391 {
392 
393 	return (_intr_event_bind(ie, cpu, true, false));
394 }
395 
396 /*
397  * Bind an interrupt event's ithread to the specified CPU.
398  */
399 int
400 intr_event_bind_ithread(struct intr_event *ie, int cpu)
401 {
402 
403 	return (_intr_event_bind(ie, cpu, false, true));
404 }
405 
406 /*
407  * Bind an interrupt event's ithread to the specified cpuset.
408  */
409 int
410 intr_event_bind_ithread_cpuset(struct intr_event *ie, cpuset_t *cs)
411 {
412 	lwpid_t id;
413 
414 	mtx_lock(&ie->ie_lock);
415 	if (ie->ie_thread != NULL) {
416 		id = ie->ie_thread->it_thread->td_tid;
417 		mtx_unlock(&ie->ie_lock);
418 		return (cpuset_setthread(id, cs));
419 	} else {
420 		mtx_unlock(&ie->ie_lock);
421 	}
422 	return (ENODEV);
423 }
424 
425 static struct intr_event *
426 intr_lookup(int irq)
427 {
428 	struct intr_event *ie;
429 
430 	mtx_lock(&event_lock);
431 	TAILQ_FOREACH(ie, &event_list, ie_list)
432 		if (ie->ie_irq == irq &&
433 		    (ie->ie_flags & IE_SOFT) == 0 &&
434 		    CK_SLIST_FIRST(&ie->ie_handlers) != NULL)
435 			break;
436 	mtx_unlock(&event_lock);
437 	return (ie);
438 }
439 
440 int
441 intr_setaffinity(int irq, int mode, void *m)
442 {
443 	struct intr_event *ie;
444 	cpuset_t *mask;
445 	int cpu, n;
446 
447 	mask = m;
448 	cpu = NOCPU;
449 	/*
450 	 * If we're setting all cpus we can unbind.  Otherwise make sure
451 	 * only one cpu is in the set.
452 	 */
453 	if (CPU_CMP(cpuset_root, mask)) {
454 		for (n = 0; n < CPU_SETSIZE; n++) {
455 			if (!CPU_ISSET(n, mask))
456 				continue;
457 			if (cpu != NOCPU)
458 				return (EINVAL);
459 			cpu = n;
460 		}
461 	}
462 	ie = intr_lookup(irq);
463 	if (ie == NULL)
464 		return (ESRCH);
465 	switch (mode) {
466 	case CPU_WHICH_IRQ:
467 		return (intr_event_bind(ie, cpu));
468 	case CPU_WHICH_INTRHANDLER:
469 		return (intr_event_bind_irqonly(ie, cpu));
470 	case CPU_WHICH_ITHREAD:
471 		return (intr_event_bind_ithread(ie, cpu));
472 	default:
473 		return (EINVAL);
474 	}
475 }
476 
477 int
478 intr_getaffinity(int irq, int mode, void *m)
479 {
480 	struct intr_event *ie;
481 	struct thread *td;
482 	struct proc *p;
483 	cpuset_t *mask;
484 	lwpid_t id;
485 	int error;
486 
487 	mask = m;
488 	ie = intr_lookup(irq);
489 	if (ie == NULL)
490 		return (ESRCH);
491 
492 	error = 0;
493 	CPU_ZERO(mask);
494 	switch (mode) {
495 	case CPU_WHICH_IRQ:
496 	case CPU_WHICH_INTRHANDLER:
497 		mtx_lock(&ie->ie_lock);
498 		if (ie->ie_cpu == NOCPU)
499 			CPU_COPY(cpuset_root, mask);
500 		else
501 			CPU_SET(ie->ie_cpu, mask);
502 		mtx_unlock(&ie->ie_lock);
503 		break;
504 	case CPU_WHICH_ITHREAD:
505 		mtx_lock(&ie->ie_lock);
506 		if (ie->ie_thread == NULL) {
507 			mtx_unlock(&ie->ie_lock);
508 			CPU_COPY(cpuset_root, mask);
509 		} else {
510 			id = ie->ie_thread->it_thread->td_tid;
511 			mtx_unlock(&ie->ie_lock);
512 			error = cpuset_which(CPU_WHICH_TID, id, &p, &td, NULL);
513 			if (error != 0)
514 				return (error);
515 			CPU_COPY(&td->td_cpuset->cs_mask, mask);
516 			PROC_UNLOCK(p);
517 		}
518 	default:
519 		return (EINVAL);
520 	}
521 	return (0);
522 }
523 
524 int
525 intr_event_destroy(struct intr_event *ie)
526 {
527 
528 	mtx_lock(&event_lock);
529 	mtx_lock(&ie->ie_lock);
530 	if (!CK_SLIST_EMPTY(&ie->ie_handlers)) {
531 		mtx_unlock(&ie->ie_lock);
532 		mtx_unlock(&event_lock);
533 		return (EBUSY);
534 	}
535 	TAILQ_REMOVE(&event_list, ie, ie_list);
536 #ifndef notyet
537 	if (ie->ie_thread != NULL) {
538 		ithread_destroy(ie->ie_thread);
539 		ie->ie_thread = NULL;
540 	}
541 #endif
542 	mtx_unlock(&ie->ie_lock);
543 	mtx_unlock(&event_lock);
544 	mtx_destroy(&ie->ie_lock);
545 	free(ie, M_ITHREAD);
546 	return (0);
547 }
548 
549 static struct intr_thread *
550 ithread_create(const char *name)
551 {
552 	struct intr_thread *ithd;
553 	struct thread *td;
554 	int error;
555 
556 	ithd = malloc(sizeof(struct intr_thread), M_ITHREAD, M_WAITOK | M_ZERO);
557 
558 	error = kproc_kthread_add(ithread_loop, ithd, &intrproc,
559 		    &td, RFSTOPPED | RFHIGHPID,
560 		    0, "intr", "%s", name);
561 	if (error)
562 		panic("kproc_create() failed with %d", error);
563 	thread_lock(td);
564 	sched_class(td, PRI_ITHD);
565 	TD_SET_IWAIT(td);
566 	thread_unlock(td);
567 	td->td_pflags |= TDP_ITHREAD;
568 	ithd->it_thread = td;
569 	CTR2(KTR_INTR, "%s: created %s", __func__, name);
570 	return (ithd);
571 }
572 
573 static void
574 ithread_destroy(struct intr_thread *ithread)
575 {
576 	struct thread *td;
577 
578 	CTR2(KTR_INTR, "%s: killing %s", __func__, ithread->it_event->ie_name);
579 	td = ithread->it_thread;
580 	thread_lock(td);
581 	ithread->it_flags |= IT_DEAD;
582 	if (TD_AWAITING_INTR(td)) {
583 		TD_CLR_IWAIT(td);
584 		sched_add(td, SRQ_INTR);
585 	} else
586 		thread_unlock(td);
587 }
588 
589 int
590 intr_event_add_handler(struct intr_event *ie, const char *name,
591     driver_filter_t filter, driver_intr_t handler, void *arg, u_char pri,
592     enum intr_type flags, void **cookiep)
593 {
594 	struct intr_handler *ih, *temp_ih;
595 	struct intr_handler **prevptr;
596 	struct intr_thread *it;
597 
598 	if (ie == NULL || name == NULL || (handler == NULL && filter == NULL))
599 		return (EINVAL);
600 
601 	/* Allocate and populate an interrupt handler structure. */
602 	ih = malloc(sizeof(struct intr_handler), M_ITHREAD, M_WAITOK | M_ZERO);
603 	ih->ih_filter = filter;
604 	ih->ih_handler = handler;
605 	ih->ih_argument = arg;
606 	strlcpy(ih->ih_name, name, sizeof(ih->ih_name));
607 	ih->ih_event = ie;
608 	ih->ih_pri = pri;
609 	if (flags & INTR_EXCL)
610 		ih->ih_flags = IH_EXCLUSIVE;
611 	if (flags & INTR_MPSAFE)
612 		ih->ih_flags |= IH_MPSAFE;
613 	if (flags & INTR_ENTROPY)
614 		ih->ih_flags |= IH_ENTROPY;
615 	if (flags & INTR_TYPE_NET)
616 		ih->ih_flags |= IH_NET;
617 
618 	/* We can only have one exclusive handler in a event. */
619 	mtx_lock(&ie->ie_lock);
620 	if (!CK_SLIST_EMPTY(&ie->ie_handlers)) {
621 		if ((flags & INTR_EXCL) ||
622 		    (CK_SLIST_FIRST(&ie->ie_handlers)->ih_flags & IH_EXCLUSIVE)) {
623 			mtx_unlock(&ie->ie_lock);
624 			free(ih, M_ITHREAD);
625 			return (EINVAL);
626 		}
627 	}
628 
629 	/* Create a thread if we need one. */
630 	while (ie->ie_thread == NULL && handler != NULL) {
631 		if (ie->ie_flags & IE_ADDING_THREAD)
632 			msleep(ie, &ie->ie_lock, 0, "ithread", 0);
633 		else {
634 			ie->ie_flags |= IE_ADDING_THREAD;
635 			mtx_unlock(&ie->ie_lock);
636 			it = ithread_create("intr: newborn");
637 			mtx_lock(&ie->ie_lock);
638 			ie->ie_flags &= ~IE_ADDING_THREAD;
639 			ie->ie_thread = it;
640 			it->it_event = ie;
641 			ithread_update(it);
642 			wakeup(ie);
643 		}
644 	}
645 
646 	/* Add the new handler to the event in priority order. */
647 	CK_SLIST_FOREACH_PREVPTR(temp_ih, prevptr, &ie->ie_handlers, ih_next) {
648 		if (temp_ih->ih_pri > ih->ih_pri)
649 			break;
650 	}
651 	CK_SLIST_INSERT_PREVPTR(prevptr, temp_ih, ih, ih_next);
652 
653 	intr_event_update(ie);
654 
655 	CTR3(KTR_INTR, "%s: added %s to %s", __func__, ih->ih_name,
656 	    ie->ie_name);
657 	mtx_unlock(&ie->ie_lock);
658 
659 	if (cookiep != NULL)
660 		*cookiep = ih;
661 	return (0);
662 }
663 
664 /*
665  * Append a description preceded by a ':' to the name of the specified
666  * interrupt handler.
667  */
668 int
669 intr_event_describe_handler(struct intr_event *ie, void *cookie,
670     const char *descr)
671 {
672 	struct intr_handler *ih;
673 	size_t space;
674 	char *start;
675 
676 	mtx_lock(&ie->ie_lock);
677 #ifdef INVARIANTS
678 	CK_SLIST_FOREACH(ih, &ie->ie_handlers, ih_next) {
679 		if (ih == cookie)
680 			break;
681 	}
682 	if (ih == NULL) {
683 		mtx_unlock(&ie->ie_lock);
684 		panic("handler %p not found in interrupt event %p", cookie, ie);
685 	}
686 #endif
687 	ih = cookie;
688 
689 	/*
690 	 * Look for an existing description by checking for an
691 	 * existing ":".  This assumes device names do not include
692 	 * colons.  If one is found, prepare to insert the new
693 	 * description at that point.  If one is not found, find the
694 	 * end of the name to use as the insertion point.
695 	 */
696 	start = strchr(ih->ih_name, ':');
697 	if (start == NULL)
698 		start = strchr(ih->ih_name, 0);
699 
700 	/*
701 	 * See if there is enough remaining room in the string for the
702 	 * description + ":".  The "- 1" leaves room for the trailing
703 	 * '\0'.  The "+ 1" accounts for the colon.
704 	 */
705 	space = sizeof(ih->ih_name) - (start - ih->ih_name) - 1;
706 	if (strlen(descr) + 1 > space) {
707 		mtx_unlock(&ie->ie_lock);
708 		return (ENOSPC);
709 	}
710 
711 	/* Append a colon followed by the description. */
712 	*start = ':';
713 	strcpy(start + 1, descr);
714 	intr_event_update(ie);
715 	mtx_unlock(&ie->ie_lock);
716 	return (0);
717 }
718 
719 /*
720  * Return the ie_source field from the intr_event an intr_handler is
721  * associated with.
722  */
723 void *
724 intr_handler_source(void *cookie)
725 {
726 	struct intr_handler *ih;
727 	struct intr_event *ie;
728 
729 	ih = (struct intr_handler *)cookie;
730 	if (ih == NULL)
731 		return (NULL);
732 	ie = ih->ih_event;
733 	KASSERT(ie != NULL,
734 	    ("interrupt handler \"%s\" has a NULL interrupt event",
735 	    ih->ih_name));
736 	return (ie->ie_source);
737 }
738 
739 /*
740  * If intr_event_handle() is running in the ISR context at the time of the call,
741  * then wait for it to complete.
742  */
743 static void
744 intr_event_barrier(struct intr_event *ie)
745 {
746 	int phase;
747 
748 	mtx_assert(&ie->ie_lock, MA_OWNED);
749 	phase = ie->ie_phase;
750 
751 	/*
752 	 * Switch phase to direct future interrupts to the other active counter.
753 	 * Make sure that any preceding stores are visible before the switch.
754 	 */
755 	KASSERT(ie->ie_active[!phase] == 0, ("idle phase has activity"));
756 	atomic_store_rel_int(&ie->ie_phase, !phase);
757 
758 	/*
759 	 * This code cooperates with wait-free iteration of ie_handlers
760 	 * in intr_event_handle.
761 	 * Make sure that the removal and the phase update are not reordered
762 	 * with the active count check.
763 	 * Note that no combination of acquire and release fences can provide
764 	 * that guarantee as Store->Load sequences can always be reordered.
765 	 */
766 	atomic_thread_fence_seq_cst();
767 
768 	/*
769 	 * Now wait on the inactive phase.
770 	 * The acquire fence is needed so that that all post-barrier accesses
771 	 * are after the check.
772 	 */
773 	while (ie->ie_active[phase] > 0)
774 		cpu_spinwait();
775 	atomic_thread_fence_acq();
776 }
777 
778 static void
779 intr_handler_barrier(struct intr_handler *handler)
780 {
781 	struct intr_event *ie;
782 
783 	ie = handler->ih_event;
784 	mtx_assert(&ie->ie_lock, MA_OWNED);
785 	KASSERT((handler->ih_flags & IH_DEAD) == 0,
786 	    ("update for a removed handler"));
787 
788 	if (ie->ie_thread == NULL) {
789 		intr_event_barrier(ie);
790 		return;
791 	}
792 	if ((handler->ih_flags & IH_CHANGED) == 0) {
793 		handler->ih_flags |= IH_CHANGED;
794 		intr_event_schedule_thread(ie, NULL);
795 	}
796 	while ((handler->ih_flags & IH_CHANGED) != 0)
797 		msleep(handler, &ie->ie_lock, 0, "ih_barr", 0);
798 }
799 
800 /*
801  * Sleep until an ithread finishes executing an interrupt handler.
802  *
803  * XXX Doesn't currently handle interrupt filters or fast interrupt
804  * handlers. This is intended for LinuxKPI drivers only.
805  * Do not use in BSD code.
806  */
807 void
808 _intr_drain(int irq)
809 {
810 	struct intr_event *ie;
811 	struct intr_thread *ithd;
812 	struct thread *td;
813 
814 	ie = intr_lookup(irq);
815 	if (ie == NULL)
816 		return;
817 	if (ie->ie_thread == NULL)
818 		return;
819 	ithd = ie->ie_thread;
820 	td = ithd->it_thread;
821 	/*
822 	 * We set the flag and wait for it to be cleared to avoid
823 	 * long delays with potentially busy interrupt handlers
824 	 * were we to only sample TD_AWAITING_INTR() every tick.
825 	 */
826 	thread_lock(td);
827 	if (!TD_AWAITING_INTR(td)) {
828 		ithd->it_flags |= IT_WAIT;
829 		while (ithd->it_flags & IT_WAIT) {
830 			thread_unlock(td);
831 			pause("idrain", 1);
832 			thread_lock(td);
833 		}
834 	}
835 	thread_unlock(td);
836 	return;
837 }
838 
839 int
840 intr_event_remove_handler(void *cookie)
841 {
842 	struct intr_handler *handler = (struct intr_handler *)cookie;
843 	struct intr_event *ie;
844 	struct intr_handler *ih;
845 	struct intr_handler **prevptr;
846 #ifdef notyet
847 	int dead;
848 #endif
849 
850 	if (handler == NULL)
851 		return (EINVAL);
852 	ie = handler->ih_event;
853 	KASSERT(ie != NULL,
854 	    ("interrupt handler \"%s\" has a NULL interrupt event",
855 	    handler->ih_name));
856 
857 	mtx_lock(&ie->ie_lock);
858 	CTR3(KTR_INTR, "%s: removing %s from %s", __func__, handler->ih_name,
859 	    ie->ie_name);
860 	CK_SLIST_FOREACH_PREVPTR(ih, prevptr, &ie->ie_handlers, ih_next) {
861 		if (ih == handler)
862 			break;
863 	}
864 	if (ih == NULL) {
865 		panic("interrupt handler \"%s\" not found in "
866 		    "interrupt event \"%s\"", handler->ih_name, ie->ie_name);
867 	}
868 
869 	/*
870 	 * If there is no ithread, then directly remove the handler.  Note that
871 	 * intr_event_handle() iterates ie_handlers in a lock-less fashion, so
872 	 * care needs to be taken to keep ie_handlers consistent and to free
873 	 * the removed handler only when ie_handlers is quiescent.
874 	 */
875 	if (ie->ie_thread == NULL) {
876 		CK_SLIST_REMOVE_PREVPTR(prevptr, ih, ih_next);
877 		intr_event_barrier(ie);
878 		intr_event_update(ie);
879 		mtx_unlock(&ie->ie_lock);
880 		free(handler, M_ITHREAD);
881 		return (0);
882 	}
883 
884 	/*
885 	 * Let the interrupt thread do the job.
886 	 * The interrupt source is disabled when the interrupt thread is
887 	 * running, so it does not have to worry about interaction with
888 	 * intr_event_handle().
889 	 */
890 	KASSERT((handler->ih_flags & IH_DEAD) == 0,
891 	    ("duplicate handle remove"));
892 	handler->ih_flags |= IH_DEAD;
893 	intr_event_schedule_thread(ie, NULL);
894 	while (handler->ih_flags & IH_DEAD)
895 		msleep(handler, &ie->ie_lock, 0, "iev_rmh", 0);
896 	intr_event_update(ie);
897 
898 #ifdef notyet
899 	/*
900 	 * XXX: This could be bad in the case of ppbus(8).  Also, I think
901 	 * this could lead to races of stale data when servicing an
902 	 * interrupt.
903 	 */
904 	dead = 1;
905 	CK_SLIST_FOREACH(ih, &ie->ie_handlers, ih_next) {
906 		if (ih->ih_handler != NULL) {
907 			dead = 0;
908 			break;
909 		}
910 	}
911 	if (dead) {
912 		ithread_destroy(ie->ie_thread);
913 		ie->ie_thread = NULL;
914 	}
915 #endif
916 	mtx_unlock(&ie->ie_lock);
917 	free(handler, M_ITHREAD);
918 	return (0);
919 }
920 
921 int
922 intr_event_suspend_handler(void *cookie)
923 {
924 	struct intr_handler *handler = (struct intr_handler *)cookie;
925 	struct intr_event *ie;
926 
927 	if (handler == NULL)
928 		return (EINVAL);
929 	ie = handler->ih_event;
930 	KASSERT(ie != NULL,
931 	    ("interrupt handler \"%s\" has a NULL interrupt event",
932 	    handler->ih_name));
933 	mtx_lock(&ie->ie_lock);
934 	handler->ih_flags |= IH_SUSP;
935 	intr_handler_barrier(handler);
936 	mtx_unlock(&ie->ie_lock);
937 	return (0);
938 }
939 
940 int
941 intr_event_resume_handler(void *cookie)
942 {
943 	struct intr_handler *handler = (struct intr_handler *)cookie;
944 	struct intr_event *ie;
945 
946 	if (handler == NULL)
947 		return (EINVAL);
948 	ie = handler->ih_event;
949 	KASSERT(ie != NULL,
950 	    ("interrupt handler \"%s\" has a NULL interrupt event",
951 	    handler->ih_name));
952 
953 	/*
954 	 * intr_handler_barrier() acts not only as a barrier,
955 	 * it also allows to check for any pending interrupts.
956 	 */
957 	mtx_lock(&ie->ie_lock);
958 	handler->ih_flags &= ~IH_SUSP;
959 	intr_handler_barrier(handler);
960 	mtx_unlock(&ie->ie_lock);
961 	return (0);
962 }
963 
964 static int
965 intr_event_schedule_thread(struct intr_event *ie, struct trapframe *frame)
966 {
967 	struct intr_entropy entropy;
968 	struct intr_thread *it;
969 	struct thread *td;
970 	struct thread *ctd;
971 
972 	/*
973 	 * If no ithread or no handlers, then we have a stray interrupt.
974 	 */
975 	if (ie == NULL || CK_SLIST_EMPTY(&ie->ie_handlers) ||
976 	    ie->ie_thread == NULL)
977 		return (EINVAL);
978 
979 	ctd = curthread;
980 	it = ie->ie_thread;
981 	td = it->it_thread;
982 
983 	/*
984 	 * If any of the handlers for this ithread claim to be good
985 	 * sources of entropy, then gather some.
986 	 */
987 	if (ie->ie_hflags & IH_ENTROPY) {
988 		entropy.event = (uintptr_t)ie;
989 		entropy.td = ctd;
990 		random_harvest_queue(&entropy, sizeof(entropy), RANDOM_INTERRUPT);
991 	}
992 
993 	KASSERT(td->td_proc != NULL, ("ithread %s has no process", ie->ie_name));
994 
995 	/*
996 	 * Set it_need to tell the thread to keep running if it is already
997 	 * running.  Then, lock the thread and see if we actually need to
998 	 * put it on the runqueue.
999 	 *
1000 	 * Use store_rel to arrange that the store to ih_need in
1001 	 * swi_sched() is before the store to it_need and prepare for
1002 	 * transfer of this order to loads in the ithread.
1003 	 */
1004 	atomic_store_rel_int(&it->it_need, 1);
1005 	thread_lock(td);
1006 	if (TD_AWAITING_INTR(td)) {
1007 #ifdef HWPMC_HOOKS
1008 		atomic_set_int(&it->it_waiting, 0);
1009 		if (frame != NULL)
1010 			PMC_SOFT_CALL_TF( , , intr, schedule, frame);
1011 		else
1012 			PMC_SOFT_CALL( , , intr, schedule);
1013 #endif
1014 		CTR3(KTR_INTR, "%s: schedule pid %d (%s)", __func__, td->td_proc->p_pid,
1015 		    td->td_name);
1016 		TD_CLR_IWAIT(td);
1017 		sched_add(td, SRQ_INTR);
1018 	} else {
1019 #ifdef HWPMC_HOOKS
1020 		atomic_add_int(&it->it_waiting, 1);
1021 
1022 		if (atomic_load_int(&it->it_waiting) >= intr_hwpmc_waiting_report_threshold) {
1023 			if (frame != NULL)
1024 				PMC_SOFT_CALL_TF( , , intr, waiting, frame);
1025 			else
1026 				PMC_SOFT_CALL( , , intr, waiting);
1027 		}
1028 #endif
1029 		CTR5(KTR_INTR, "%s: pid %d (%s): it_need %d, state %d",
1030 		    __func__, td->td_proc->p_pid, td->td_name, it->it_need, TD_GET_STATE(td));
1031 		thread_unlock(td);
1032 	}
1033 
1034 	return (0);
1035 }
1036 
1037 /*
1038  * Allow interrupt event binding for software interrupt handlers -- a no-op,
1039  * since interrupts are generated in software rather than being directed by
1040  * a PIC.
1041  */
1042 static int
1043 swi_assign_cpu(void *arg, int cpu)
1044 {
1045 
1046 	return (0);
1047 }
1048 
1049 /*
1050  * Add a software interrupt handler to a specified event.  If a given event
1051  * is not specified, then a new event is created.
1052  */
1053 int
1054 swi_add(struct intr_event **eventp, const char *name, driver_intr_t handler,
1055 	    void *arg, int pri, enum intr_type flags, void **cookiep)
1056 {
1057 	struct intr_event *ie;
1058 	int error = 0;
1059 
1060 	if (flags & INTR_ENTROPY)
1061 		return (EINVAL);
1062 
1063 	ie = (eventp != NULL) ? *eventp : NULL;
1064 
1065 	if (ie != NULL) {
1066 		if (!(ie->ie_flags & IE_SOFT))
1067 			return (EINVAL);
1068 	} else {
1069 		error = intr_event_create(&ie, NULL, IE_SOFT, 0,
1070 		    NULL, NULL, NULL, swi_assign_cpu, "swi%d:", pri);
1071 		if (error)
1072 			return (error);
1073 		if (eventp != NULL)
1074 			*eventp = ie;
1075 	}
1076 	if (handler != NULL) {
1077 		error = intr_event_add_handler(ie, name, NULL, handler, arg,
1078 		    PI_SWI(pri), flags, cookiep);
1079 	}
1080 	return (error);
1081 }
1082 
1083 /*
1084  * Schedule a software interrupt thread.
1085  */
1086 void
1087 swi_sched(void *cookie, int flags)
1088 {
1089 	struct intr_handler *ih = (struct intr_handler *)cookie;
1090 	struct intr_event *ie = ih->ih_event;
1091 	struct intr_entropy entropy;
1092 	int error __unused;
1093 
1094 	CTR3(KTR_INTR, "swi_sched: %s %s need=%d", ie->ie_name, ih->ih_name,
1095 	    ih->ih_need);
1096 
1097 	if ((flags & SWI_FROMNMI) == 0) {
1098 		entropy.event = (uintptr_t)ih;
1099 		entropy.td = curthread;
1100 		random_harvest_queue(&entropy, sizeof(entropy), RANDOM_SWI);
1101 	}
1102 
1103 	/*
1104 	 * Set ih_need for this handler so that if the ithread is already
1105 	 * running it will execute this handler on the next pass.  Otherwise,
1106 	 * it will execute it the next time it runs.
1107 	 */
1108 	ih->ih_need = 1;
1109 
1110 	if (flags & SWI_DELAY)
1111 		return;
1112 
1113 	if (flags & SWI_FROMNMI) {
1114 #if defined(SMP) && (defined(__i386__) || defined(__amd64__))
1115 		KASSERT(ie == clk_intr_event,
1116 		    ("SWI_FROMNMI used not with clk_intr_event"));
1117 		ipi_self_from_nmi(IPI_SWI);
1118 #endif
1119 	} else {
1120 		VM_CNT_INC(v_soft);
1121 		error = intr_event_schedule_thread(ie, NULL);
1122 		KASSERT(error == 0, ("stray software interrupt"));
1123 	}
1124 }
1125 
1126 /*
1127  * Remove a software interrupt handler.  Currently this code does not
1128  * remove the associated interrupt event if it becomes empty.  Calling code
1129  * may do so manually via intr_event_destroy(), but that's not really
1130  * an optimal interface.
1131  */
1132 int
1133 swi_remove(void *cookie)
1134 {
1135 
1136 	return (intr_event_remove_handler(cookie));
1137 }
1138 
1139 static void
1140 intr_event_execute_handlers(struct proc *p, struct intr_event *ie)
1141 {
1142 	struct intr_handler *ih, *ihn, *ihp;
1143 
1144 	ihp = NULL;
1145 	CK_SLIST_FOREACH_SAFE(ih, &ie->ie_handlers, ih_next, ihn) {
1146 		/*
1147 		 * If this handler is marked for death, remove it from
1148 		 * the list of handlers and wake up the sleeper.
1149 		 */
1150 		if (ih->ih_flags & IH_DEAD) {
1151 			mtx_lock(&ie->ie_lock);
1152 			if (ihp == NULL)
1153 				CK_SLIST_REMOVE_HEAD(&ie->ie_handlers, ih_next);
1154 			else
1155 				CK_SLIST_REMOVE_AFTER(ihp, ih_next);
1156 			ih->ih_flags &= ~IH_DEAD;
1157 			wakeup(ih);
1158 			mtx_unlock(&ie->ie_lock);
1159 			continue;
1160 		}
1161 
1162 		/*
1163 		 * Now that we know that the current element won't be removed
1164 		 * update the previous element.
1165 		 */
1166 		ihp = ih;
1167 
1168 		if ((ih->ih_flags & IH_CHANGED) != 0) {
1169 			mtx_lock(&ie->ie_lock);
1170 			ih->ih_flags &= ~IH_CHANGED;
1171 			wakeup(ih);
1172 			mtx_unlock(&ie->ie_lock);
1173 		}
1174 
1175 		/* Skip filter only handlers */
1176 		if (ih->ih_handler == NULL)
1177 			continue;
1178 
1179 		/* Skip suspended handlers */
1180 		if ((ih->ih_flags & IH_SUSP) != 0)
1181 			continue;
1182 
1183 		/*
1184 		 * For software interrupt threads, we only execute
1185 		 * handlers that have their need flag set.  Hardware
1186 		 * interrupt threads always invoke all of their handlers.
1187 		 *
1188 		 * ih_need can only be 0 or 1.  Failed cmpset below
1189 		 * means that there is no request to execute handlers,
1190 		 * so a retry of the cmpset is not needed.
1191 		 */
1192 		if ((ie->ie_flags & IE_SOFT) != 0 &&
1193 		    atomic_cmpset_int(&ih->ih_need, 1, 0) == 0)
1194 			continue;
1195 
1196 		/* Execute this handler. */
1197 		CTR6(KTR_INTR, "%s: pid %d exec %p(%p) for %s flg=%x",
1198 		    __func__, p->p_pid, (void *)ih->ih_handler,
1199 		    ih->ih_argument, ih->ih_name, ih->ih_flags);
1200 
1201 		if (!(ih->ih_flags & IH_MPSAFE))
1202 			mtx_lock(&Giant);
1203 		ih->ih_handler(ih->ih_argument);
1204 		if (!(ih->ih_flags & IH_MPSAFE))
1205 			mtx_unlock(&Giant);
1206 	}
1207 }
1208 
1209 static void
1210 ithread_execute_handlers(struct proc *p, struct intr_event *ie)
1211 {
1212 
1213 	/* Interrupt handlers should not sleep. */
1214 	if (!(ie->ie_flags & IE_SOFT))
1215 		THREAD_NO_SLEEPING();
1216 	intr_event_execute_handlers(p, ie);
1217 	if (!(ie->ie_flags & IE_SOFT))
1218 		THREAD_SLEEPING_OK();
1219 
1220 	/*
1221 	 * Interrupt storm handling:
1222 	 *
1223 	 * If this interrupt source is currently storming, then throttle
1224 	 * it to only fire the handler once  per clock tick.
1225 	 *
1226 	 * If this interrupt source is not currently storming, but the
1227 	 * number of back to back interrupts exceeds the storm threshold,
1228 	 * then enter storming mode.
1229 	 */
1230 	if (intr_storm_threshold != 0 && ie->ie_count >= intr_storm_threshold &&
1231 	    !(ie->ie_flags & IE_SOFT)) {
1232 		/* Report the message only once every second. */
1233 		if (ppsratecheck(&ie->ie_warntm, &ie->ie_warncnt, 1)) {
1234 			printf(
1235 	"interrupt storm detected on \"%s\"; throttling interrupt source\n",
1236 			    ie->ie_name);
1237 		}
1238 		pause("istorm", 1);
1239 	} else
1240 		ie->ie_count++;
1241 
1242 	/*
1243 	 * Now that all the handlers have had a chance to run, reenable
1244 	 * the interrupt source.
1245 	 */
1246 	if (ie->ie_post_ithread != NULL)
1247 		ie->ie_post_ithread(ie->ie_source);
1248 }
1249 
1250 /*
1251  * This is the main code for interrupt threads.
1252  */
1253 static void
1254 ithread_loop(void *arg)
1255 {
1256 	struct epoch_tracker et;
1257 	struct intr_thread *ithd;
1258 	struct intr_event *ie;
1259 	struct thread *td;
1260 	struct proc *p;
1261 	int wake, epoch_count;
1262 	bool needs_epoch;
1263 
1264 	td = curthread;
1265 	p = td->td_proc;
1266 	ithd = (struct intr_thread *)arg;
1267 	KASSERT(ithd->it_thread == td,
1268 	    ("%s: ithread and proc linkage out of sync", __func__));
1269 	ie = ithd->it_event;
1270 	ie->ie_count = 0;
1271 	wake = 0;
1272 
1273 	/*
1274 	 * As long as we have interrupts outstanding, go through the
1275 	 * list of handlers, giving each one a go at it.
1276 	 */
1277 	for (;;) {
1278 		/*
1279 		 * If we are an orphaned thread, then just die.
1280 		 */
1281 		if (ithd->it_flags & IT_DEAD) {
1282 			CTR3(KTR_INTR, "%s: pid %d (%s) exiting", __func__,
1283 			    p->p_pid, td->td_name);
1284 			free(ithd, M_ITHREAD);
1285 			kthread_exit();
1286 		}
1287 
1288 		/*
1289 		 * Service interrupts.  If another interrupt arrives while
1290 		 * we are running, it will set it_need to note that we
1291 		 * should make another pass.
1292 		 *
1293 		 * The load_acq part of the following cmpset ensures
1294 		 * that the load of ih_need in ithread_execute_handlers()
1295 		 * is ordered after the load of it_need here.
1296 		 */
1297 		needs_epoch =
1298 		    (atomic_load_int(&ie->ie_hflags) & IH_NET) != 0;
1299 		if (needs_epoch) {
1300 			epoch_count = 0;
1301 			NET_EPOCH_ENTER(et);
1302 		}
1303 		while (atomic_cmpset_acq_int(&ithd->it_need, 1, 0) != 0) {
1304 			ithread_execute_handlers(p, ie);
1305 			if (needs_epoch &&
1306 			    ++epoch_count >= intr_epoch_batch) {
1307 				NET_EPOCH_EXIT(et);
1308 				epoch_count = 0;
1309 				NET_EPOCH_ENTER(et);
1310 			}
1311 		}
1312 		if (needs_epoch)
1313 			NET_EPOCH_EXIT(et);
1314 		WITNESS_WARN(WARN_PANIC, NULL, "suspending ithread");
1315 		mtx_assert(&Giant, MA_NOTOWNED);
1316 
1317 		/*
1318 		 * Processed all our interrupts.  Now get the sched
1319 		 * lock.  This may take a while and it_need may get
1320 		 * set again, so we have to check it again.
1321 		 */
1322 		thread_lock(td);
1323 		if (atomic_load_acq_int(&ithd->it_need) == 0 &&
1324 		    (ithd->it_flags & (IT_DEAD | IT_WAIT)) == 0) {
1325 			TD_SET_IWAIT(td);
1326 			ie->ie_count = 0;
1327 			mi_switch(SW_VOL | SWT_IWAIT);
1328 		} else {
1329 			if (ithd->it_flags & IT_WAIT) {
1330 				wake = 1;
1331 				ithd->it_flags &= ~IT_WAIT;
1332 			}
1333 			thread_unlock(td);
1334 		}
1335 		if (wake) {
1336 			wakeup(ithd);
1337 			wake = 0;
1338 		}
1339 	}
1340 }
1341 
1342 /*
1343  * Main interrupt handling body.
1344  *
1345  * Input:
1346  * o ie:                        the event connected to this interrupt.
1347  * o frame:                     some archs (i.e. i386) pass a frame to some.
1348  *                              handlers as their main argument.
1349  * Return value:
1350  * o 0:                         everything ok.
1351  * o EINVAL:                    stray interrupt.
1352  */
1353 int
1354 intr_event_handle(struct intr_event *ie, struct trapframe *frame)
1355 {
1356 	struct intr_handler *ih;
1357 	struct trapframe *oldframe;
1358 	struct thread *td;
1359 	int phase;
1360 	int ret;
1361 	bool filter, thread;
1362 
1363 	td = curthread;
1364 
1365 #ifdef KSTACK_USAGE_PROF
1366 	intr_prof_stack_use(td, frame);
1367 #endif
1368 
1369 	/* An interrupt with no event or handlers is a stray interrupt. */
1370 	if (ie == NULL || CK_SLIST_EMPTY(&ie->ie_handlers))
1371 		return (EINVAL);
1372 
1373 	/*
1374 	 * Execute fast interrupt handlers directly.
1375 	 * To support clock handlers, if a handler registers
1376 	 * with a NULL argument, then we pass it a pointer to
1377 	 * a trapframe as its argument.
1378 	 */
1379 	td->td_intr_nesting_level++;
1380 	filter = false;
1381 	thread = false;
1382 	ret = 0;
1383 	critical_enter();
1384 	oldframe = td->td_intr_frame;
1385 	td->td_intr_frame = frame;
1386 
1387 	phase = ie->ie_phase;
1388 	atomic_add_int(&ie->ie_active[phase], 1);
1389 
1390 	/*
1391 	 * This fence is required to ensure that no later loads are
1392 	 * re-ordered before the ie_active store.
1393 	 */
1394 	atomic_thread_fence_seq_cst();
1395 
1396 	CK_SLIST_FOREACH(ih, &ie->ie_handlers, ih_next) {
1397 		if ((ih->ih_flags & IH_SUSP) != 0)
1398 			continue;
1399 		if ((ie->ie_flags & IE_SOFT) != 0 && ih->ih_need == 0)
1400 			continue;
1401 		if (ih->ih_filter == NULL) {
1402 			thread = true;
1403 			continue;
1404 		}
1405 		CTR4(KTR_INTR, "%s: exec %p(%p) for %s", __func__,
1406 		    ih->ih_filter, ih->ih_argument == NULL ? frame :
1407 		    ih->ih_argument, ih->ih_name);
1408 		if (ih->ih_argument == NULL)
1409 			ret = ih->ih_filter(frame);
1410 		else
1411 			ret = ih->ih_filter(ih->ih_argument);
1412 #ifdef HWPMC_HOOKS
1413 		PMC_SOFT_CALL_TF( , , intr, all, frame);
1414 #endif
1415 		KASSERT(ret == FILTER_STRAY ||
1416 		    ((ret & (FILTER_SCHEDULE_THREAD | FILTER_HANDLED)) != 0 &&
1417 		    (ret & ~(FILTER_SCHEDULE_THREAD | FILTER_HANDLED)) == 0),
1418 		    ("%s: incorrect return value %#x from %s", __func__, ret,
1419 		    ih->ih_name));
1420 		filter = filter || ret == FILTER_HANDLED;
1421 #ifdef HWPMC_HOOKS
1422 		if (ret & FILTER_SCHEDULE_THREAD)
1423 			PMC_SOFT_CALL_TF( , , intr, ithread, frame);
1424 		else if (ret & FILTER_HANDLED)
1425 			PMC_SOFT_CALL_TF( , , intr, filter, frame);
1426 		else if (ret == FILTER_STRAY)
1427 			PMC_SOFT_CALL_TF( , , intr, stray, frame);
1428 #endif
1429 
1430 		/*
1431 		 * Wrapper handler special handling:
1432 		 *
1433 		 * in some particular cases (like pccard and pccbb),
1434 		 * the _real_ device handler is wrapped in a couple of
1435 		 * functions - a filter wrapper and an ithread wrapper.
1436 		 * In this case (and just in this case), the filter wrapper
1437 		 * could ask the system to schedule the ithread and mask
1438 		 * the interrupt source if the wrapped handler is composed
1439 		 * of just an ithread handler.
1440 		 *
1441 		 * TODO: write a generic wrapper to avoid people rolling
1442 		 * their own.
1443 		 */
1444 		if (!thread) {
1445 			if (ret == FILTER_SCHEDULE_THREAD)
1446 				thread = true;
1447 		}
1448 	}
1449 	atomic_add_rel_int(&ie->ie_active[phase], -1);
1450 
1451 	td->td_intr_frame = oldframe;
1452 
1453 	if (thread) {
1454 		if (ie->ie_pre_ithread != NULL)
1455 			ie->ie_pre_ithread(ie->ie_source);
1456 	} else {
1457 		if (ie->ie_post_filter != NULL)
1458 			ie->ie_post_filter(ie->ie_source);
1459 	}
1460 
1461 	/* Schedule the ithread if needed. */
1462 	if (thread) {
1463 		int error __unused;
1464 
1465 		error =  intr_event_schedule_thread(ie, frame);
1466 		KASSERT(error == 0, ("bad stray interrupt"));
1467 	}
1468 	critical_exit();
1469 	td->td_intr_nesting_level--;
1470 #ifdef notyet
1471 	/* The interrupt is not aknowledged by any filter and has no ithread. */
1472 	if (!thread && !filter)
1473 		return (EINVAL);
1474 #endif
1475 	return (0);
1476 }
1477 
1478 #ifdef DDB
1479 /*
1480  * Dump details about an interrupt handler
1481  */
1482 static void
1483 db_dump_intrhand(struct intr_handler *ih)
1484 {
1485 	int comma;
1486 
1487 	db_printf("\t%-10s ", ih->ih_name);
1488 	switch (ih->ih_pri) {
1489 	case PI_REALTIME:
1490 		db_printf("CLK ");
1491 		break;
1492 	case PI_AV:
1493 		db_printf("AV  ");
1494 		break;
1495 	case PI_TTY:
1496 		db_printf("TTY ");
1497 		break;
1498 	case PI_NET:
1499 		db_printf("NET ");
1500 		break;
1501 	case PI_DISK:
1502 		db_printf("DISK");
1503 		break;
1504 	case PI_DULL:
1505 		db_printf("DULL");
1506 		break;
1507 	default:
1508 		if (ih->ih_pri >= PI_SOFT)
1509 			db_printf("SWI ");
1510 		else
1511 			db_printf("%4u", ih->ih_pri);
1512 		break;
1513 	}
1514 	db_printf(" ");
1515 	if (ih->ih_filter != NULL) {
1516 		db_printf("[F]");
1517 		db_printsym((uintptr_t)ih->ih_filter, DB_STGY_PROC);
1518 	}
1519 	if (ih->ih_handler != NULL) {
1520 		if (ih->ih_filter != NULL)
1521 			db_printf(",");
1522 		db_printf("[H]");
1523 		db_printsym((uintptr_t)ih->ih_handler, DB_STGY_PROC);
1524 	}
1525 	db_printf("(%p)", ih->ih_argument);
1526 	if (ih->ih_need ||
1527 	    (ih->ih_flags & (IH_EXCLUSIVE | IH_ENTROPY | IH_DEAD |
1528 	    IH_MPSAFE)) != 0) {
1529 		db_printf(" {");
1530 		comma = 0;
1531 		if (ih->ih_flags & IH_EXCLUSIVE) {
1532 			if (comma)
1533 				db_printf(", ");
1534 			db_printf("EXCL");
1535 			comma = 1;
1536 		}
1537 		if (ih->ih_flags & IH_ENTROPY) {
1538 			if (comma)
1539 				db_printf(", ");
1540 			db_printf("ENTROPY");
1541 			comma = 1;
1542 		}
1543 		if (ih->ih_flags & IH_DEAD) {
1544 			if (comma)
1545 				db_printf(", ");
1546 			db_printf("DEAD");
1547 			comma = 1;
1548 		}
1549 		if (ih->ih_flags & IH_MPSAFE) {
1550 			if (comma)
1551 				db_printf(", ");
1552 			db_printf("MPSAFE");
1553 			comma = 1;
1554 		}
1555 		if (ih->ih_need) {
1556 			if (comma)
1557 				db_printf(", ");
1558 			db_printf("NEED");
1559 		}
1560 		db_printf("}");
1561 	}
1562 	db_printf("\n");
1563 }
1564 
1565 /*
1566  * Dump details about a event.
1567  */
1568 void
1569 db_dump_intr_event(struct intr_event *ie, int handlers)
1570 {
1571 	struct intr_handler *ih;
1572 	struct intr_thread *it;
1573 	int comma;
1574 
1575 	db_printf("%s ", ie->ie_fullname);
1576 	it = ie->ie_thread;
1577 	if (it != NULL)
1578 		db_printf("(pid %d)", it->it_thread->td_proc->p_pid);
1579 	else
1580 		db_printf("(no thread)");
1581 	if ((ie->ie_flags & (IE_SOFT | IE_ADDING_THREAD)) != 0 ||
1582 	    (it != NULL && it->it_need)) {
1583 		db_printf(" {");
1584 		comma = 0;
1585 		if (ie->ie_flags & IE_SOFT) {
1586 			db_printf("SOFT");
1587 			comma = 1;
1588 		}
1589 		if (ie->ie_flags & IE_ADDING_THREAD) {
1590 			if (comma)
1591 				db_printf(", ");
1592 			db_printf("ADDING_THREAD");
1593 			comma = 1;
1594 		}
1595 		if (it != NULL && it->it_need) {
1596 			if (comma)
1597 				db_printf(", ");
1598 			db_printf("NEED");
1599 		}
1600 		db_printf("}");
1601 	}
1602 	db_printf("\n");
1603 
1604 	if (handlers)
1605 		CK_SLIST_FOREACH(ih, &ie->ie_handlers, ih_next)
1606 		    db_dump_intrhand(ih);
1607 }
1608 
1609 /*
1610  * Dump data about interrupt handlers
1611  */
1612 DB_SHOW_COMMAND(intr, db_show_intr)
1613 {
1614 	struct intr_event *ie;
1615 	int all, verbose;
1616 
1617 	verbose = strchr(modif, 'v') != NULL;
1618 	all = strchr(modif, 'a') != NULL;
1619 	TAILQ_FOREACH(ie, &event_list, ie_list) {
1620 		if (!all && CK_SLIST_EMPTY(&ie->ie_handlers))
1621 			continue;
1622 		db_dump_intr_event(ie, verbose);
1623 		if (db_pager_quit)
1624 			break;
1625 	}
1626 }
1627 #endif /* DDB */
1628 
1629 /*
1630  * Start standard software interrupt threads
1631  */
1632 static void
1633 start_softintr(void *dummy)
1634 {
1635 
1636 	if (swi_add(&clk_intr_event, "clk", NULL, NULL, SWI_CLOCK,
1637 	    INTR_MPSAFE, NULL))
1638 		panic("died while creating clk swi ithread");
1639 	if (swi_add(NULL, "vm", swi_vm, NULL, SWI_VM, INTR_MPSAFE, &vm_ih))
1640 		panic("died while creating vm swi ithread");
1641 }
1642 SYSINIT(start_softintr, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softintr,
1643     NULL);
1644 
1645 /*
1646  * Sysctls used by systat and others: hw.intrnames and hw.intrcnt.
1647  * The data for this machine dependent, and the declarations are in machine
1648  * dependent code.  The layout of intrnames and intrcnt however is machine
1649  * independent.
1650  *
1651  * We do not know the length of intrcnt and intrnames at compile time, so
1652  * calculate things at run time.
1653  */
1654 static int
1655 sysctl_intrnames(SYSCTL_HANDLER_ARGS)
1656 {
1657 	return (sysctl_handle_opaque(oidp, intrnames, sintrnames, req));
1658 }
1659 
1660 SYSCTL_PROC(_hw, OID_AUTO, intrnames,
1661     CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0,
1662     sysctl_intrnames, "",
1663     "Interrupt Names");
1664 
1665 static int
1666 sysctl_intrcnt(SYSCTL_HANDLER_ARGS)
1667 {
1668 #ifdef SCTL_MASK32
1669 	uint32_t *intrcnt32;
1670 	unsigned i;
1671 	int error;
1672 
1673 	if (req->flags & SCTL_MASK32) {
1674 		if (!req->oldptr)
1675 			return (sysctl_handle_opaque(oidp, NULL, sintrcnt / 2, req));
1676 		intrcnt32 = malloc(sintrcnt / 2, M_TEMP, M_NOWAIT);
1677 		if (intrcnt32 == NULL)
1678 			return (ENOMEM);
1679 		for (i = 0; i < sintrcnt / sizeof (u_long); i++)
1680 			intrcnt32[i] = intrcnt[i];
1681 		error = sysctl_handle_opaque(oidp, intrcnt32, sintrcnt / 2, req);
1682 		free(intrcnt32, M_TEMP);
1683 		return (error);
1684 	}
1685 #endif
1686 	return (sysctl_handle_opaque(oidp, intrcnt, sintrcnt, req));
1687 }
1688 
1689 SYSCTL_PROC(_hw, OID_AUTO, intrcnt,
1690     CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0,
1691     sysctl_intrcnt, "",
1692     "Interrupt Counts");
1693 
1694 #ifdef DDB
1695 /*
1696  * DDB command to dump the interrupt statistics.
1697  */
1698 DB_SHOW_COMMAND(intrcnt, db_show_intrcnt)
1699 {
1700 	u_long *i;
1701 	char *cp;
1702 	u_int j;
1703 
1704 	cp = intrnames;
1705 	j = 0;
1706 	for (i = intrcnt; j < (sintrcnt / sizeof(u_long)) && !db_pager_quit;
1707 	    i++, j++) {
1708 		if (*cp == '\0')
1709 			break;
1710 		if (*i != 0)
1711 			db_printf("%s\t%lu\n", cp, *i);
1712 		cp += strlen(cp) + 1;
1713 	}
1714 }
1715 #endif
1716