xref: /freebsd/sys/kern/subr_intr.c (revision ee5cf11617a9b7f034d95c639bd4d27d1f09e848)
1 /*-
2  * Copyright (c) 2015-2016 Svatopluk Kraus
3  * Copyright (c) 2015-2016 Michal Meloun
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30 
31 /*
32  *	New-style Interrupt Framework
33  *
34  *  TODO: - to support IPI (PPI) enabling on other CPUs if already started
35  *        - to complete things for removable PICs
36  */
37 
38 #include "opt_acpi.h"
39 #include "opt_ddb.h"
40 #include "opt_hwpmc_hooks.h"
41 #include "opt_platform.h"
42 
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/kernel.h>
46 #include <sys/syslog.h>
47 #include <sys/malloc.h>
48 #include <sys/proc.h>
49 #include <sys/queue.h>
50 #include <sys/bus.h>
51 #include <sys/interrupt.h>
52 #include <sys/conf.h>
53 #include <sys/cpuset.h>
54 #include <sys/rman.h>
55 #include <sys/sched.h>
56 #include <sys/smp.h>
57 #ifdef HWPMC_HOOKS
58 #include <sys/pmckern.h>
59 #endif
60 
61 #include <machine/atomic.h>
62 #include <machine/intr.h>
63 #include <machine/cpu.h>
64 #include <machine/smp.h>
65 #include <machine/stdarg.h>
66 
67 #ifdef FDT
68 #include <dev/ofw/openfirm.h>
69 #include <dev/ofw/ofw_bus.h>
70 #include <dev/ofw/ofw_bus_subr.h>
71 #endif
72 
73 #ifdef DDB
74 #include <ddb/ddb.h>
75 #endif
76 
77 #include "pic_if.h"
78 #include "msi_if.h"
79 
80 #define	INTRNAME_LEN	(2*MAXCOMLEN + 1)
81 
82 #ifdef DEBUG
83 #define debugf(fmt, args...) do { printf("%s(): ", __func__);	\
84     printf(fmt,##args); } while (0)
85 #else
86 #define debugf(fmt, args...)
87 #endif
88 
89 MALLOC_DECLARE(M_INTRNG);
90 MALLOC_DEFINE(M_INTRNG, "intr", "intr interrupt handling");
91 
92 /* Main interrupt handler called from assembler -> 'hidden' for C code. */
93 void intr_irq_handler(struct trapframe *tf);
94 
95 /* Root interrupt controller stuff. */
96 device_t intr_irq_root_dev;
97 static intr_irq_filter_t *irq_root_filter;
98 static void *irq_root_arg;
99 static u_int irq_root_ipicount;
100 
101 /* Interrupt controller definition. */
102 struct intr_pic {
103 	SLIST_ENTRY(intr_pic)	pic_next;
104 	intptr_t		pic_xref;	/* hardware identification */
105 	device_t		pic_dev;
106 #define	FLAG_PIC	(1 << 0)
107 #define	FLAG_MSI	(1 << 1)
108 	u_int			pic_flags;
109 };
110 
111 static struct mtx pic_list_lock;
112 static SLIST_HEAD(, intr_pic) pic_list;
113 
114 static struct intr_pic *pic_lookup(device_t dev, intptr_t xref);
115 
116 /* Interrupt source definition. */
117 static struct mtx isrc_table_lock;
118 static struct intr_irqsrc *irq_sources[NIRQ];
119 u_int irq_next_free;
120 
121 /*
122  *  XXX - All stuff around struct intr_dev_data is considered as temporary
123  *  until better place for storing struct intr_map_data will be find.
124  *
125  *  For now, there are two global interrupt numbers spaces:
126  *  <0, NIRQ)                      ... interrupts without config data
127  *                                     managed in irq_sources[]
128  *  IRQ_DDATA_BASE + <0, 2 * NIRQ) ... interrupts with config data
129  *                                     managed in intr_ddata_tab[]
130  *
131  *  Read intr_ddata_lookup() to see how these spaces are worked with.
132  *  Note that each interrupt number from second space duplicates some number
133  *  from first space at this moment. An interrupt number from first space can
134  *  be duplicated even multiple times in second space.
135  */
136 struct intr_dev_data {
137 	device_t		idd_dev;
138 	intptr_t		idd_xref;
139 	u_int			idd_irq;
140 	struct intr_map_data *	idd_data;
141 	struct intr_irqsrc *	idd_isrc;
142 };
143 
144 static struct intr_dev_data *intr_ddata_tab[2 * NIRQ];
145 static u_int intr_ddata_first_unused;
146 
147 #define IRQ_DDATA_BASE	10000
148 CTASSERT(IRQ_DDATA_BASE > nitems(irq_sources));
149 
150 #ifdef SMP
151 static boolean_t irq_assign_cpu = FALSE;
152 #endif
153 
154 /*
155  * - 2 counters for each I/O interrupt.
156  * - MAXCPU counters for each IPI counters for SMP.
157  */
158 #ifdef SMP
159 #define INTRCNT_COUNT   (NIRQ * 2 + INTR_IPI_COUNT * MAXCPU)
160 #else
161 #define INTRCNT_COUNT   (NIRQ * 2)
162 #endif
163 
164 /* Data for MI statistics reporting. */
165 u_long intrcnt[INTRCNT_COUNT];
166 char intrnames[INTRCNT_COUNT * INTRNAME_LEN];
167 size_t sintrcnt = sizeof(intrcnt);
168 size_t sintrnames = sizeof(intrnames);
169 static u_int intrcnt_index;
170 
171 /*
172  *  Interrupt framework initialization routine.
173  */
174 static void
175 intr_irq_init(void *dummy __unused)
176 {
177 
178 	SLIST_INIT(&pic_list);
179 	mtx_init(&pic_list_lock, "intr pic list", NULL, MTX_DEF);
180 
181 	mtx_init(&isrc_table_lock, "intr isrc table", NULL, MTX_DEF);
182 }
183 SYSINIT(intr_irq_init, SI_SUB_INTR, SI_ORDER_FIRST, intr_irq_init, NULL);
184 
185 static void
186 intrcnt_setname(const char *name, int index)
187 {
188 
189 	snprintf(intrnames + INTRNAME_LEN * index, INTRNAME_LEN, "%-*s",
190 	    INTRNAME_LEN - 1, name);
191 }
192 
193 /*
194  *  Update name for interrupt source with interrupt event.
195  */
196 static void
197 intrcnt_updatename(struct intr_irqsrc *isrc)
198 {
199 
200 	/* QQQ: What about stray counter name? */
201 	mtx_assert(&isrc_table_lock, MA_OWNED);
202 	intrcnt_setname(isrc->isrc_event->ie_fullname, isrc->isrc_index);
203 }
204 
205 /*
206  *  Virtualization for interrupt source interrupt counter increment.
207  */
208 static inline void
209 isrc_increment_count(struct intr_irqsrc *isrc)
210 {
211 
212 	if (isrc->isrc_flags & INTR_ISRCF_PPI)
213 		atomic_add_long(&isrc->isrc_count[0], 1);
214 	else
215 		isrc->isrc_count[0]++;
216 }
217 
218 /*
219  *  Virtualization for interrupt source interrupt stray counter increment.
220  */
221 static inline void
222 isrc_increment_straycount(struct intr_irqsrc *isrc)
223 {
224 
225 	isrc->isrc_count[1]++;
226 }
227 
228 /*
229  *  Virtualization for interrupt source interrupt name update.
230  */
231 static void
232 isrc_update_name(struct intr_irqsrc *isrc, const char *name)
233 {
234 	char str[INTRNAME_LEN];
235 
236 	mtx_assert(&isrc_table_lock, MA_OWNED);
237 
238 	if (name != NULL) {
239 		snprintf(str, INTRNAME_LEN, "%s: %s", isrc->isrc_name, name);
240 		intrcnt_setname(str, isrc->isrc_index);
241 		snprintf(str, INTRNAME_LEN, "stray %s: %s", isrc->isrc_name,
242 		    name);
243 		intrcnt_setname(str, isrc->isrc_index + 1);
244 	} else {
245 		snprintf(str, INTRNAME_LEN, "%s:", isrc->isrc_name);
246 		intrcnt_setname(str, isrc->isrc_index);
247 		snprintf(str, INTRNAME_LEN, "stray %s:", isrc->isrc_name);
248 		intrcnt_setname(str, isrc->isrc_index + 1);
249 	}
250 }
251 
252 /*
253  *  Virtualization for interrupt source interrupt counters setup.
254  */
255 static void
256 isrc_setup_counters(struct intr_irqsrc *isrc)
257 {
258 	u_int index;
259 
260 	/*
261 	 *  XXX - it does not work well with removable controllers and
262 	 *        interrupt sources !!!
263 	 */
264 	index = atomic_fetchadd_int(&intrcnt_index, 2);
265 	isrc->isrc_index = index;
266 	isrc->isrc_count = &intrcnt[index];
267 	isrc_update_name(isrc, NULL);
268 }
269 
270 /*
271  *  Virtualization for interrupt source interrupt counters release.
272  */
273 static void
274 isrc_release_counters(struct intr_irqsrc *isrc)
275 {
276 
277 	panic("%s: not implemented", __func__);
278 }
279 
280 #ifdef SMP
281 /*
282  *  Virtualization for interrupt source IPI counters setup.
283  */
284 u_long *
285 intr_ipi_setup_counters(const char *name)
286 {
287 	u_int index, i;
288 	char str[INTRNAME_LEN];
289 
290 	index = atomic_fetchadd_int(&intrcnt_index, MAXCPU);
291 	for (i = 0; i < MAXCPU; i++) {
292 		snprintf(str, INTRNAME_LEN, "cpu%d:%s", i, name);
293 		intrcnt_setname(str, index + i);
294 	}
295 	return (&intrcnt[index]);
296 }
297 #endif
298 
299 /*
300  *  Main interrupt dispatch handler. It's called straight
301  *  from the assembler, where CPU interrupt is served.
302  */
303 void
304 intr_irq_handler(struct trapframe *tf)
305 {
306 	struct trapframe * oldframe;
307 	struct thread * td;
308 
309 	KASSERT(irq_root_filter != NULL, ("%s: no filter", __func__));
310 
311 	PCPU_INC(cnt.v_intr);
312 	critical_enter();
313 	td = curthread;
314 	oldframe = td->td_intr_frame;
315 	td->td_intr_frame = tf;
316 	irq_root_filter(irq_root_arg);
317 	td->td_intr_frame = oldframe;
318 	critical_exit();
319 #ifdef HWPMC_HOOKS
320 	if (pmc_hook && TRAPF_USERMODE(tf) &&
321 	    (PCPU_GET(curthread)->td_pflags & TDP_CALLCHAIN))
322 		pmc_hook(PCPU_GET(curthread), PMC_FN_USER_CALLCHAIN, tf);
323 #endif
324 }
325 
326 /*
327  *  interrupt controller dispatch function for interrupts. It should
328  *  be called straight from the interrupt controller, when associated interrupt
329  *  source is learned.
330  */
331 int
332 intr_isrc_dispatch(struct intr_irqsrc *isrc, struct trapframe *tf)
333 {
334 
335 	KASSERT(isrc != NULL, ("%s: no source", __func__));
336 
337 	isrc_increment_count(isrc);
338 
339 #ifdef INTR_SOLO
340 	if (isrc->isrc_filter != NULL) {
341 		int error;
342 		error = isrc->isrc_filter(isrc->isrc_arg, tf);
343 		PIC_POST_FILTER(isrc->isrc_dev, isrc);
344 		if (error == FILTER_HANDLED)
345 			return (0);
346 	} else
347 #endif
348 	if (isrc->isrc_event != NULL) {
349 		if (intr_event_handle(isrc->isrc_event, tf) == 0)
350 			return (0);
351 	}
352 
353 	isrc_increment_straycount(isrc);
354 	return (EINVAL);
355 }
356 
357 /*
358  *  Alloc unique interrupt number (resource handle) for interrupt source.
359  *
360  *  There could be various strategies how to allocate free interrupt number
361  *  (resource handle) for new interrupt source.
362  *
363  *  1. Handles are always allocated forward, so handles are not recycled
364  *     immediately. However, if only one free handle left which is reused
365  *     constantly...
366  */
367 static inline int
368 isrc_alloc_irq(struct intr_irqsrc *isrc)
369 {
370 	u_int maxirqs, irq;
371 
372 	mtx_assert(&isrc_table_lock, MA_OWNED);
373 
374 	maxirqs = nitems(irq_sources);
375 	if (irq_next_free >= maxirqs)
376 		return (ENOSPC);
377 
378 	for (irq = irq_next_free; irq < maxirqs; irq++) {
379 		if (irq_sources[irq] == NULL)
380 			goto found;
381 	}
382 	for (irq = 0; irq < irq_next_free; irq++) {
383 		if (irq_sources[irq] == NULL)
384 			goto found;
385 	}
386 
387 	irq_next_free = maxirqs;
388 	return (ENOSPC);
389 
390 found:
391 	isrc->isrc_irq = irq;
392 	irq_sources[irq] = isrc;
393 
394 	irq_next_free = irq + 1;
395 	if (irq_next_free >= maxirqs)
396 		irq_next_free = 0;
397 	return (0);
398 }
399 
400 /*
401  *  Free unique interrupt number (resource handle) from interrupt source.
402  */
403 static inline int
404 isrc_free_irq(struct intr_irqsrc *isrc)
405 {
406 
407 	mtx_assert(&isrc_table_lock, MA_OWNED);
408 
409 	if (isrc->isrc_irq >= nitems(irq_sources))
410 		return (EINVAL);
411 	if (irq_sources[isrc->isrc_irq] != isrc)
412 		return (EINVAL);
413 
414 	irq_sources[isrc->isrc_irq] = NULL;
415 	isrc->isrc_irq = INTR_IRQ_INVALID;	/* just to be safe */
416 	return (0);
417 }
418 
419 /*
420  *  Lookup interrupt source by interrupt number (resource handle).
421  */
422 static inline struct intr_irqsrc *
423 isrc_lookup(u_int irq)
424 {
425 
426 	if (irq < nitems(irq_sources))
427 		return (irq_sources[irq]);
428 	return (NULL);
429 }
430 
431 /*
432  *  Initialize interrupt source and register it into global interrupt table.
433  */
434 int
435 intr_isrc_register(struct intr_irqsrc *isrc, device_t dev, u_int flags,
436     const char *fmt, ...)
437 {
438 	int error;
439 	va_list ap;
440 
441 	bzero(isrc, sizeof(struct intr_irqsrc));
442 	isrc->isrc_dev = dev;
443 	isrc->isrc_irq = INTR_IRQ_INVALID;	/* just to be safe */
444 	isrc->isrc_flags = flags;
445 
446 	va_start(ap, fmt);
447 	vsnprintf(isrc->isrc_name, INTR_ISRC_NAMELEN, fmt, ap);
448 	va_end(ap);
449 
450 	mtx_lock(&isrc_table_lock);
451 	error = isrc_alloc_irq(isrc);
452 	if (error != 0) {
453 		mtx_unlock(&isrc_table_lock);
454 		return (error);
455 	}
456 	/*
457 	 * Setup interrupt counters, but not for IPI sources. Those are setup
458 	 * later and only for used ones (up to INTR_IPI_COUNT) to not exhaust
459 	 * our counter pool.
460 	 */
461 	if ((isrc->isrc_flags & INTR_ISRCF_IPI) == 0)
462 		isrc_setup_counters(isrc);
463 	mtx_unlock(&isrc_table_lock);
464 	return (0);
465 }
466 
467 /*
468  *  Deregister interrupt source from global interrupt table.
469  */
470 int
471 intr_isrc_deregister(struct intr_irqsrc *isrc)
472 {
473 	int error;
474 
475 	mtx_lock(&isrc_table_lock);
476 	if ((isrc->isrc_flags & INTR_ISRCF_IPI) == 0)
477 		isrc_release_counters(isrc);
478 	error = isrc_free_irq(isrc);
479 	mtx_unlock(&isrc_table_lock);
480 	return (error);
481 }
482 
483 #ifdef SMP
484 /*
485  *  A support function for a PIC to decide if provided ISRC should be inited
486  *  on given cpu. The logic of INTR_ISRCF_BOUND flag and isrc_cpu member of
487  *  struct intr_irqsrc is the following:
488  *
489  *     If INTR_ISRCF_BOUND is set, the ISRC should be inited only on cpus
490  *     set in isrc_cpu. If not, the ISRC should be inited on every cpu and
491  *     isrc_cpu is kept consistent with it. Thus isrc_cpu is always correct.
492  */
493 bool
494 intr_isrc_init_on_cpu(struct intr_irqsrc *isrc, u_int cpu)
495 {
496 
497 	if (isrc->isrc_handlers == 0)
498 		return (false);
499 	if ((isrc->isrc_flags & (INTR_ISRCF_PPI | INTR_ISRCF_IPI)) == 0)
500 		return (false);
501 	if (isrc->isrc_flags & INTR_ISRCF_BOUND)
502 		return (CPU_ISSET(cpu, &isrc->isrc_cpu));
503 
504 	CPU_SET(cpu, &isrc->isrc_cpu);
505 	return (true);
506 }
507 #endif
508 
509 static struct intr_dev_data *
510 intr_ddata_alloc(u_int extsize)
511 {
512 	struct intr_dev_data *ddata;
513 	size_t size;
514 
515 	size = sizeof(*ddata);
516 	ddata = malloc(size + extsize, M_INTRNG, M_WAITOK | M_ZERO);
517 
518 	mtx_lock(&isrc_table_lock);
519 	if (intr_ddata_first_unused >= nitems(intr_ddata_tab)) {
520 		mtx_unlock(&isrc_table_lock);
521 		free(ddata, M_INTRNG);
522 		return (NULL);
523 	}
524 	intr_ddata_tab[intr_ddata_first_unused] = ddata;
525 	ddata->idd_irq = IRQ_DDATA_BASE + intr_ddata_first_unused++;
526 	mtx_unlock(&isrc_table_lock);
527 
528 	ddata->idd_data = (struct intr_map_data *)((uintptr_t)ddata + size);
529 	ddata->idd_data->size = extsize;
530 	return (ddata);
531 }
532 
533 static struct intr_irqsrc *
534 intr_ddata_lookup(u_int irq, struct intr_map_data **datap)
535 {
536 	int error;
537 	struct intr_irqsrc *isrc;
538 	struct intr_dev_data *ddata;
539 
540 	isrc = isrc_lookup(irq);
541 	if (isrc != NULL) {
542 		if (datap != NULL)
543 			*datap = NULL;
544 		return (isrc);
545 	}
546 
547 	if (irq < IRQ_DDATA_BASE)
548 		return (NULL);
549 
550 	irq -= IRQ_DDATA_BASE;
551 	if (irq >= nitems(intr_ddata_tab))
552 		return (NULL);
553 
554 	ddata = intr_ddata_tab[irq];
555 	if (ddata->idd_isrc == NULL) {
556 		error = intr_map_irq(ddata->idd_dev, ddata->idd_xref,
557 		    ddata->idd_data, &irq);
558 		if (error != 0)
559 			return (NULL);
560 		ddata->idd_isrc = isrc_lookup(irq);
561 	}
562 	if (datap != NULL)
563 		*datap = ddata->idd_data;
564 	return (ddata->idd_isrc);
565 }
566 
567 #ifdef DEV_ACPI
568 /*
569  *  Map interrupt source according to ACPI info into framework. If such mapping
570  *  does not exist, create it. Return unique interrupt number (resource handle)
571  *  associated with mapped interrupt source.
572  */
573 u_int
574 intr_acpi_map_irq(device_t dev, u_int irq, enum intr_polarity pol,
575     enum intr_trigger trig)
576 {
577 	struct intr_map_data_acpi *daa;
578 	struct intr_dev_data *ddata;
579 
580 	ddata = intr_ddata_alloc(sizeof(struct intr_map_data_acpi));
581 	if (ddata == NULL)
582 		return (INTR_IRQ_INVALID);	/* no space left */
583 
584 	ddata->idd_dev = dev;
585 	ddata->idd_data->type = INTR_MAP_DATA_ACPI;
586 
587 	daa = (struct intr_map_data_acpi *)ddata->idd_data;
588 	daa->irq = irq;
589 	daa->pol = pol;
590 	daa->trig = trig;
591 
592 	return (ddata->idd_irq);
593 }
594 #endif
595 #ifdef FDT
596 /*
597  *  Map interrupt source according to FDT data into framework. If such mapping
598  *  does not exist, create it. Return unique interrupt number (resource handle)
599  *  associated with mapped interrupt source.
600  */
601 u_int
602 intr_fdt_map_irq(phandle_t node, pcell_t *cells, u_int ncells)
603 {
604 	size_t cellsize;
605 	struct intr_dev_data *ddata;
606 	struct intr_map_data_fdt *daf;
607 
608 	cellsize = ncells * sizeof(*cells);
609 	ddata = intr_ddata_alloc(sizeof(struct intr_map_data_fdt) + cellsize);
610 	if (ddata == NULL)
611 		return (INTR_IRQ_INVALID);	/* no space left */
612 
613 	ddata->idd_xref = (intptr_t)node;
614 	ddata->idd_data->type = INTR_MAP_DATA_FDT;
615 
616 	daf = (struct intr_map_data_fdt *)ddata->idd_data;
617 	daf->ncells = ncells;
618 	memcpy(daf->cells, cells, cellsize);
619 	return (ddata->idd_irq);
620 }
621 #endif
622 
623 /*
624  *  Store GPIO interrupt decription in framework and return unique interrupt
625  *  number (resource handle) associated with it.
626  */
627 u_int
628 intr_gpio_map_irq(device_t dev, u_int pin_num, u_int pin_flags, u_int intr_mode)
629 {
630 	struct intr_dev_data *ddata;
631 	struct intr_map_data_gpio *dag;
632 
633 	ddata = intr_ddata_alloc(sizeof(struct intr_map_data_gpio));
634 	if (ddata == NULL)
635 		return (INTR_IRQ_INVALID);	/* no space left */
636 
637 	ddata->idd_dev = dev;
638 	ddata->idd_data->type = INTR_MAP_DATA_GPIO;
639 
640 	dag = (struct intr_map_data_gpio *)ddata->idd_data;
641 	dag->gpio_pin_num = pin_num;
642 	dag->gpio_pin_flags = pin_flags;
643 	dag->gpio_intr_mode = intr_mode;
644 	return (ddata->idd_irq);
645 }
646 
647 #ifdef INTR_SOLO
648 /*
649  *  Setup filter into interrupt source.
650  */
651 static int
652 iscr_setup_filter(struct intr_irqsrc *isrc, const char *name,
653     intr_irq_filter_t *filter, void *arg, void **cookiep)
654 {
655 
656 	if (filter == NULL)
657 		return (EINVAL);
658 
659 	mtx_lock(&isrc_table_lock);
660 	/*
661 	 * Make sure that we do not mix the two ways
662 	 * how we handle interrupt sources.
663 	 */
664 	if (isrc->isrc_filter != NULL || isrc->isrc_event != NULL) {
665 		mtx_unlock(&isrc_table_lock);
666 		return (EBUSY);
667 	}
668 	isrc->isrc_filter = filter;
669 	isrc->isrc_arg = arg;
670 	isrc_update_name(isrc, name);
671 	mtx_unlock(&isrc_table_lock);
672 
673 	*cookiep = isrc;
674 	return (0);
675 }
676 #endif
677 
678 /*
679  *  Interrupt source pre_ithread method for MI interrupt framework.
680  */
681 static void
682 intr_isrc_pre_ithread(void *arg)
683 {
684 	struct intr_irqsrc *isrc = arg;
685 
686 	PIC_PRE_ITHREAD(isrc->isrc_dev, isrc);
687 }
688 
689 /*
690  *  Interrupt source post_ithread method for MI interrupt framework.
691  */
692 static void
693 intr_isrc_post_ithread(void *arg)
694 {
695 	struct intr_irqsrc *isrc = arg;
696 
697 	PIC_POST_ITHREAD(isrc->isrc_dev, isrc);
698 }
699 
700 /*
701  *  Interrupt source post_filter method for MI interrupt framework.
702  */
703 static void
704 intr_isrc_post_filter(void *arg)
705 {
706 	struct intr_irqsrc *isrc = arg;
707 
708 	PIC_POST_FILTER(isrc->isrc_dev, isrc);
709 }
710 
711 /*
712  *  Interrupt source assign_cpu method for MI interrupt framework.
713  */
714 static int
715 intr_isrc_assign_cpu(void *arg, int cpu)
716 {
717 #ifdef SMP
718 	struct intr_irqsrc *isrc = arg;
719 	int error;
720 
721 	if (isrc->isrc_dev != intr_irq_root_dev)
722 		return (EINVAL);
723 
724 	mtx_lock(&isrc_table_lock);
725 	if (cpu == NOCPU) {
726 		CPU_ZERO(&isrc->isrc_cpu);
727 		isrc->isrc_flags &= ~INTR_ISRCF_BOUND;
728 	} else {
729 		CPU_SETOF(cpu, &isrc->isrc_cpu);
730 		isrc->isrc_flags |= INTR_ISRCF_BOUND;
731 	}
732 
733 	/*
734 	 * In NOCPU case, it's up to PIC to either leave ISRC on same CPU or
735 	 * re-balance it to another CPU or enable it on more CPUs. However,
736 	 * PIC is expected to change isrc_cpu appropriately to keep us well
737 	 * informed if the call is successful.
738 	 */
739 	if (irq_assign_cpu) {
740 		error = PIC_BIND_INTR(isrc->isrc_dev, isrc);
741 		if (error) {
742 			CPU_ZERO(&isrc->isrc_cpu);
743 			mtx_unlock(&isrc_table_lock);
744 			return (error);
745 		}
746 	}
747 	mtx_unlock(&isrc_table_lock);
748 	return (0);
749 #else
750 	return (EOPNOTSUPP);
751 #endif
752 }
753 
754 /*
755  *  Create interrupt event for interrupt source.
756  */
757 static int
758 isrc_event_create(struct intr_irqsrc *isrc)
759 {
760 	struct intr_event *ie;
761 	int error;
762 
763 	error = intr_event_create(&ie, isrc, 0, isrc->isrc_irq,
764 	    intr_isrc_pre_ithread, intr_isrc_post_ithread, intr_isrc_post_filter,
765 	    intr_isrc_assign_cpu, "%s:", isrc->isrc_name);
766 	if (error)
767 		return (error);
768 
769 	mtx_lock(&isrc_table_lock);
770 	/*
771 	 * Make sure that we do not mix the two ways
772 	 * how we handle interrupt sources. Let contested event wins.
773 	 */
774 #ifdef INTR_SOLO
775 	if (isrc->isrc_filter != NULL || isrc->isrc_event != NULL) {
776 #else
777 	if (isrc->isrc_event != NULL) {
778 #endif
779 		mtx_unlock(&isrc_table_lock);
780 		intr_event_destroy(ie);
781 		return (isrc->isrc_event != NULL ? EBUSY : 0);
782 	}
783 	isrc->isrc_event = ie;
784 	mtx_unlock(&isrc_table_lock);
785 
786 	return (0);
787 }
788 #ifdef notyet
789 /*
790  *  Destroy interrupt event for interrupt source.
791  */
792 static void
793 isrc_event_destroy(struct intr_irqsrc *isrc)
794 {
795 	struct intr_event *ie;
796 
797 	mtx_lock(&isrc_table_lock);
798 	ie = isrc->isrc_event;
799 	isrc->isrc_event = NULL;
800 	mtx_unlock(&isrc_table_lock);
801 
802 	if (ie != NULL)
803 		intr_event_destroy(ie);
804 }
805 #endif
806 /*
807  *  Add handler to interrupt source.
808  */
809 static int
810 isrc_add_handler(struct intr_irqsrc *isrc, const char *name,
811     driver_filter_t filter, driver_intr_t handler, void *arg,
812     enum intr_type flags, void **cookiep)
813 {
814 	int error;
815 
816 	if (isrc->isrc_event == NULL) {
817 		error = isrc_event_create(isrc);
818 		if (error)
819 			return (error);
820 	}
821 
822 	error = intr_event_add_handler(isrc->isrc_event, name, filter, handler,
823 	    arg, intr_priority(flags), flags, cookiep);
824 	if (error == 0) {
825 		mtx_lock(&isrc_table_lock);
826 		intrcnt_updatename(isrc);
827 		mtx_unlock(&isrc_table_lock);
828 	}
829 
830 	return (error);
831 }
832 
833 /*
834  *  Lookup interrupt controller locked.
835  */
836 static inline struct intr_pic *
837 pic_lookup_locked(device_t dev, intptr_t xref)
838 {
839 	struct intr_pic *pic;
840 
841 	mtx_assert(&pic_list_lock, MA_OWNED);
842 
843 	if (dev == NULL && xref == 0)
844 		return (NULL);
845 
846 	/* Note that pic->pic_dev is never NULL on registered PIC. */
847 	SLIST_FOREACH(pic, &pic_list, pic_next) {
848 		if (dev == NULL) {
849 			if (xref == pic->pic_xref)
850 				return (pic);
851 		} else if (xref == 0 || pic->pic_xref == 0) {
852 			if (dev == pic->pic_dev)
853 				return (pic);
854 		} else if (xref == pic->pic_xref && dev == pic->pic_dev)
855 				return (pic);
856 	}
857 	return (NULL);
858 }
859 
860 /*
861  *  Lookup interrupt controller.
862  */
863 static struct intr_pic *
864 pic_lookup(device_t dev, intptr_t xref)
865 {
866 	struct intr_pic *pic;
867 
868 	mtx_lock(&pic_list_lock);
869 	pic = pic_lookup_locked(dev, xref);
870 	mtx_unlock(&pic_list_lock);
871 	return (pic);
872 }
873 
874 /*
875  *  Create interrupt controller.
876  */
877 static struct intr_pic *
878 pic_create(device_t dev, intptr_t xref)
879 {
880 	struct intr_pic *pic;
881 
882 	mtx_lock(&pic_list_lock);
883 	pic = pic_lookup_locked(dev, xref);
884 	if (pic != NULL) {
885 		mtx_unlock(&pic_list_lock);
886 		return (pic);
887 	}
888 	pic = malloc(sizeof(*pic), M_INTRNG, M_NOWAIT | M_ZERO);
889 	if (pic == NULL) {
890 		mtx_unlock(&pic_list_lock);
891 		return (NULL);
892 	}
893 	pic->pic_xref = xref;
894 	pic->pic_dev = dev;
895 	SLIST_INSERT_HEAD(&pic_list, pic, pic_next);
896 	mtx_unlock(&pic_list_lock);
897 
898 	return (pic);
899 }
900 #ifdef notyet
901 /*
902  *  Destroy interrupt controller.
903  */
904 static void
905 pic_destroy(device_t dev, intptr_t xref)
906 {
907 	struct intr_pic *pic;
908 
909 	mtx_lock(&pic_list_lock);
910 	pic = pic_lookup_locked(dev, xref);
911 	if (pic == NULL) {
912 		mtx_unlock(&pic_list_lock);
913 		return;
914 	}
915 	SLIST_REMOVE(&pic_list, pic, intr_pic, pic_next);
916 	mtx_unlock(&pic_list_lock);
917 
918 	free(pic, M_INTRNG);
919 }
920 #endif
921 /*
922  *  Register interrupt controller.
923  */
924 struct intr_pic *
925 intr_pic_register(device_t dev, intptr_t xref)
926 {
927 	struct intr_pic *pic;
928 
929 	if (dev == NULL)
930 		return (NULL);
931 	pic = pic_create(dev, xref);
932 	if (pic == NULL)
933 		return (NULL);
934 
935 	pic->pic_flags |= FLAG_PIC;
936 
937 	debugf("PIC %p registered for %s <dev %p, xref %x>\n", pic,
938 	    device_get_nameunit(dev), dev, xref);
939 	return (pic);
940 }
941 
942 /*
943  *  Unregister interrupt controller.
944  */
945 int
946 intr_pic_deregister(device_t dev, intptr_t xref)
947 {
948 
949 	panic("%s: not implemented", __func__);
950 }
951 
952 /*
953  *  Mark interrupt controller (itself) as a root one.
954  *
955  *  Note that only an interrupt controller can really know its position
956  *  in interrupt controller's tree. So root PIC must claim itself as a root.
957  *
958  *  In FDT case, according to ePAPR approved version 1.1 from 08 April 2011,
959  *  page 30:
960  *    "The root of the interrupt tree is determined when traversal
961  *     of the interrupt tree reaches an interrupt controller node without
962  *     an interrupts property and thus no explicit interrupt parent."
963  */
964 int
965 intr_pic_claim_root(device_t dev, intptr_t xref, intr_irq_filter_t *filter,
966     void *arg, u_int ipicount)
967 {
968 	struct intr_pic *pic;
969 
970 	pic = pic_lookup(dev, xref);
971 	if (pic == NULL) {
972 		device_printf(dev, "not registered\n");
973 		return (EINVAL);
974 	}
975 
976 	KASSERT((pic->pic_flags & FLAG_PIC) != 0,
977 	    ("%s: Found a non-PIC controller: %s", __func__,
978 	     device_get_name(pic->pic_dev)));
979 
980 	if (filter == NULL) {
981 		device_printf(dev, "filter missing\n");
982 		return (EINVAL);
983 	}
984 
985 	/*
986 	 * Only one interrupt controllers could be on the root for now.
987 	 * Note that we further suppose that there is not threaded interrupt
988 	 * routine (handler) on the root. See intr_irq_handler().
989 	 */
990 	if (intr_irq_root_dev != NULL) {
991 		device_printf(dev, "another root already set\n");
992 		return (EBUSY);
993 	}
994 
995 	intr_irq_root_dev = dev;
996 	irq_root_filter = filter;
997 	irq_root_arg = arg;
998 	irq_root_ipicount = ipicount;
999 
1000 	debugf("irq root set to %s\n", device_get_nameunit(dev));
1001 	return (0);
1002 }
1003 
1004 int
1005 intr_map_irq(device_t dev, intptr_t xref, struct intr_map_data *data,
1006     u_int *irqp)
1007 {
1008 	int error;
1009 	struct intr_irqsrc *isrc;
1010 	struct intr_pic *pic;
1011 
1012 	if (data == NULL)
1013 		return (EINVAL);
1014 
1015 	pic = pic_lookup(dev, xref);
1016 	if (pic == NULL)
1017 		return (ESRCH);
1018 
1019 	KASSERT((pic->pic_flags & FLAG_PIC) != 0,
1020 	    ("%s: Found a non-PIC controller: %s", __func__,
1021 	     device_get_name(pic->pic_dev)));
1022 
1023 	error = PIC_MAP_INTR(pic->pic_dev, data, &isrc);
1024 	if (error == 0)
1025 		*irqp = isrc->isrc_irq;
1026 	return (error);
1027 }
1028 
1029 int
1030 intr_alloc_irq(device_t dev, struct resource *res)
1031 {
1032 	struct intr_map_data *data;
1033 	struct intr_irqsrc *isrc;
1034 
1035 	KASSERT(rman_get_start(res) == rman_get_end(res),
1036 	    ("%s: more interrupts in resource", __func__));
1037 
1038 	isrc = intr_ddata_lookup(rman_get_start(res), &data);
1039 	if (isrc == NULL)
1040 		return (EINVAL);
1041 
1042 	return (PIC_ALLOC_INTR(isrc->isrc_dev, isrc, res, data));
1043 }
1044 
1045 int
1046 intr_release_irq(device_t dev, struct resource *res)
1047 {
1048 	struct intr_map_data *data;
1049 	struct intr_irqsrc *isrc;
1050 
1051 	KASSERT(rman_get_start(res) == rman_get_end(res),
1052 	    ("%s: more interrupts in resource", __func__));
1053 
1054 	isrc = intr_ddata_lookup(rman_get_start(res), &data);
1055 	if (isrc == NULL)
1056 		return (EINVAL);
1057 
1058 	return (PIC_RELEASE_INTR(isrc->isrc_dev, isrc, res, data));
1059 }
1060 
1061 int
1062 intr_setup_irq(device_t dev, struct resource *res, driver_filter_t filt,
1063     driver_intr_t hand, void *arg, int flags, void **cookiep)
1064 {
1065 	int error;
1066 	struct intr_map_data *data;
1067 	struct intr_irqsrc *isrc;
1068 	const char *name;
1069 
1070 	KASSERT(rman_get_start(res) == rman_get_end(res),
1071 	    ("%s: more interrupts in resource", __func__));
1072 
1073 	isrc = intr_ddata_lookup(rman_get_start(res), &data);
1074 	if (isrc == NULL)
1075 		return (EINVAL);
1076 
1077 	name = device_get_nameunit(dev);
1078 
1079 #ifdef INTR_SOLO
1080 	/*
1081 	 * Standard handling is done through MI interrupt framework. However,
1082 	 * some interrupts could request solely own special handling. This
1083 	 * non standard handling can be used for interrupt controllers without
1084 	 * handler (filter only), so in case that interrupt controllers are
1085 	 * chained, MI interrupt framework is called only in leaf controller.
1086 	 *
1087 	 * Note that root interrupt controller routine is served as well,
1088 	 * however in intr_irq_handler(), i.e. main system dispatch routine.
1089 	 */
1090 	if (flags & INTR_SOLO && hand != NULL) {
1091 		debugf("irq %u cannot solo on %s\n", irq, name);
1092 		return (EINVAL);
1093 	}
1094 
1095 	if (flags & INTR_SOLO) {
1096 		error = iscr_setup_filter(isrc, name, (intr_irq_filter_t *)filt,
1097 		    arg, cookiep);
1098 		debugf("irq %u setup filter error %d on %s\n", irq, error,
1099 		    name);
1100 	} else
1101 #endif
1102 		{
1103 		error = isrc_add_handler(isrc, name, filt, hand, arg, flags,
1104 		    cookiep);
1105 		debugf("irq %u add handler error %d on %s\n", irq, error, name);
1106 	}
1107 	if (error != 0)
1108 		return (error);
1109 
1110 	mtx_lock(&isrc_table_lock);
1111 	error = PIC_SETUP_INTR(isrc->isrc_dev, isrc, res, data);
1112 	if (error == 0) {
1113 		isrc->isrc_handlers++;
1114 		if (isrc->isrc_handlers == 1)
1115 			PIC_ENABLE_INTR(isrc->isrc_dev, isrc);
1116 	}
1117 	mtx_unlock(&isrc_table_lock);
1118 	if (error != 0)
1119 		intr_event_remove_handler(*cookiep);
1120 	return (error);
1121 }
1122 
1123 int
1124 intr_teardown_irq(device_t dev, struct resource *res, void *cookie)
1125 {
1126 	int error;
1127 	struct intr_map_data *data;
1128 	struct intr_irqsrc *isrc;
1129 
1130 	KASSERT(rman_get_start(res) == rman_get_end(res),
1131 	    ("%s: more interrupts in resource", __func__));
1132 
1133 	isrc = intr_ddata_lookup(rman_get_start(res), &data);
1134 	if (isrc == NULL || isrc->isrc_handlers == 0)
1135 		return (EINVAL);
1136 
1137 #ifdef INTR_SOLO
1138 	if (isrc->isrc_filter != NULL) {
1139 		if (isrc != cookie)
1140 			return (EINVAL);
1141 
1142 		mtx_lock(&isrc_table_lock);
1143 		isrc->isrc_filter = NULL;
1144 		isrc->isrc_arg = NULL;
1145 		isrc->isrc_handlers = 0;
1146 		PIC_DISABLE_INTR(isrc->isrc_dev, isrc);
1147 		PIC_TEARDOWN_INTR(isrc->isrc_dev, isrc, res, data);
1148 		isrc_update_name(isrc, NULL);
1149 		mtx_unlock(&isrc_table_lock);
1150 		return (0);
1151 	}
1152 #endif
1153 	if (isrc != intr_handler_source(cookie))
1154 		return (EINVAL);
1155 
1156 	error = intr_event_remove_handler(cookie);
1157 	if (error == 0) {
1158 		mtx_lock(&isrc_table_lock);
1159 		isrc->isrc_handlers--;
1160 		if (isrc->isrc_handlers == 0)
1161 			PIC_DISABLE_INTR(isrc->isrc_dev, isrc);
1162 		PIC_TEARDOWN_INTR(isrc->isrc_dev, isrc, res, data);
1163 		intrcnt_updatename(isrc);
1164 		mtx_unlock(&isrc_table_lock);
1165 	}
1166 	return (error);
1167 }
1168 
1169 int
1170 intr_describe_irq(device_t dev, struct resource *res, void *cookie,
1171     const char *descr)
1172 {
1173 	int error;
1174 	struct intr_irqsrc *isrc;
1175 
1176 	KASSERT(rman_get_start(res) == rman_get_end(res),
1177 	    ("%s: more interrupts in resource", __func__));
1178 
1179 	isrc = intr_ddata_lookup(rman_get_start(res), NULL);
1180 	if (isrc == NULL || isrc->isrc_handlers == 0)
1181 		return (EINVAL);
1182 #ifdef INTR_SOLO
1183 	if (isrc->isrc_filter != NULL) {
1184 		if (isrc != cookie)
1185 			return (EINVAL);
1186 
1187 		mtx_lock(&isrc_table_lock);
1188 		isrc_update_name(isrc, descr);
1189 		mtx_unlock(&isrc_table_lock);
1190 		return (0);
1191 	}
1192 #endif
1193 	error = intr_event_describe_handler(isrc->isrc_event, cookie, descr);
1194 	if (error == 0) {
1195 		mtx_lock(&isrc_table_lock);
1196 		intrcnt_updatename(isrc);
1197 		mtx_unlock(&isrc_table_lock);
1198 	}
1199 	return (error);
1200 }
1201 
1202 #ifdef SMP
1203 int
1204 intr_bind_irq(device_t dev, struct resource *res, int cpu)
1205 {
1206 	struct intr_irqsrc *isrc;
1207 
1208 	KASSERT(rman_get_start(res) == rman_get_end(res),
1209 	    ("%s: more interrupts in resource", __func__));
1210 
1211 	isrc = intr_ddata_lookup(rman_get_start(res), NULL);
1212 	if (isrc == NULL || isrc->isrc_handlers == 0)
1213 		return (EINVAL);
1214 #ifdef INTR_SOLO
1215 	if (isrc->isrc_filter != NULL)
1216 		return (intr_isrc_assign_cpu(isrc, cpu));
1217 #endif
1218 	return (intr_event_bind(isrc->isrc_event, cpu));
1219 }
1220 
1221 /*
1222  * Return the CPU that the next interrupt source should use.
1223  * For now just returns the next CPU according to round-robin.
1224  */
1225 u_int
1226 intr_irq_next_cpu(u_int last_cpu, cpuset_t *cpumask)
1227 {
1228 
1229 	if (!irq_assign_cpu || mp_ncpus == 1)
1230 		return (PCPU_GET(cpuid));
1231 
1232 	do {
1233 		last_cpu++;
1234 		if (last_cpu > mp_maxid)
1235 			last_cpu = 0;
1236 	} while (!CPU_ISSET(last_cpu, cpumask));
1237 	return (last_cpu);
1238 }
1239 
1240 /*
1241  *  Distribute all the interrupt sources among the available
1242  *  CPUs once the AP's have been launched.
1243  */
1244 static void
1245 intr_irq_shuffle(void *arg __unused)
1246 {
1247 	struct intr_irqsrc *isrc;
1248 	u_int i;
1249 
1250 	if (mp_ncpus == 1)
1251 		return;
1252 
1253 	mtx_lock(&isrc_table_lock);
1254 	irq_assign_cpu = TRUE;
1255 	for (i = 0; i < NIRQ; i++) {
1256 		isrc = irq_sources[i];
1257 		if (isrc == NULL || isrc->isrc_handlers == 0 ||
1258 		    isrc->isrc_flags & (INTR_ISRCF_PPI | INTR_ISRCF_IPI))
1259 			continue;
1260 
1261 		if (isrc->isrc_event != NULL &&
1262 		    isrc->isrc_flags & INTR_ISRCF_BOUND &&
1263 		    isrc->isrc_event->ie_cpu != CPU_FFS(&isrc->isrc_cpu) - 1)
1264 			panic("%s: CPU inconsistency", __func__);
1265 
1266 		if ((isrc->isrc_flags & INTR_ISRCF_BOUND) == 0)
1267 			CPU_ZERO(&isrc->isrc_cpu); /* start again */
1268 
1269 		/*
1270 		 * We are in wicked position here if the following call fails
1271 		 * for bound ISRC. The best thing we can do is to clear
1272 		 * isrc_cpu so inconsistency with ie_cpu will be detectable.
1273 		 */
1274 		if (PIC_BIND_INTR(isrc->isrc_dev, isrc) != 0)
1275 			CPU_ZERO(&isrc->isrc_cpu);
1276 	}
1277 	mtx_unlock(&isrc_table_lock);
1278 }
1279 SYSINIT(intr_irq_shuffle, SI_SUB_SMP, SI_ORDER_SECOND, intr_irq_shuffle, NULL);
1280 
1281 #else
1282 u_int
1283 intr_irq_next_cpu(u_int current_cpu, cpuset_t *cpumask)
1284 {
1285 
1286 	return (PCPU_GET(cpuid));
1287 }
1288 #endif
1289 
1290 /*
1291  *  Register a MSI/MSI-X interrupt controller
1292  */
1293 int
1294 intr_msi_register(device_t dev, intptr_t xref)
1295 {
1296 	struct intr_pic *pic;
1297 
1298 	if (dev == NULL)
1299 		return (EINVAL);
1300 	pic = pic_create(dev, xref);
1301 	if (pic == NULL)
1302 		return (ENOMEM);
1303 
1304 	pic->pic_flags |= FLAG_MSI;
1305 
1306 	debugf("PIC %p registered for %s <dev %p, xref %jx>\n", pic,
1307 	    device_get_nameunit(dev), dev, (uintmax_t)xref);
1308 	return (0);
1309 }
1310 
1311 int
1312 intr_alloc_msi(device_t pci, device_t child, intptr_t xref, int count,
1313     int maxcount, int *irqs)
1314 {
1315 	struct intr_irqsrc **isrc;
1316 	struct intr_pic *pic;
1317 	device_t pdev;
1318 	int err, i;
1319 
1320 	pic = pic_lookup(NULL, xref);
1321 	if (pic == NULL)
1322 		return (ESRCH);
1323 
1324 	KASSERT((pic->pic_flags & FLAG_MSI) != 0,
1325 	    ("%s: Found a non-MSI controller: %s", __func__,
1326 	     device_get_name(pic->pic_dev)));
1327 
1328 	isrc = malloc(sizeof(*isrc) * count, M_INTRNG, M_WAITOK);
1329 	err = MSI_ALLOC_MSI(pic->pic_dev, child, count, maxcount, &pdev, isrc);
1330 	if (err == 0) {
1331 		for (i = 0; i < count; i++) {
1332 			irqs[i] = isrc[i]->isrc_irq;
1333 		}
1334 	}
1335 
1336 	free(isrc, M_INTRNG);
1337 
1338 	return (err);
1339 }
1340 
1341 int
1342 intr_release_msi(device_t pci, device_t child, intptr_t xref, int count,
1343     int *irqs)
1344 {
1345 	struct intr_irqsrc **isrc;
1346 	struct intr_pic *pic;
1347 	int i, err;
1348 
1349 	pic = pic_lookup(NULL, xref);
1350 	if (pic == NULL)
1351 		return (ESRCH);
1352 
1353 	KASSERT((pic->pic_flags & FLAG_MSI) != 0,
1354 	    ("%s: Found a non-MSI controller: %s", __func__,
1355 	     device_get_name(pic->pic_dev)));
1356 
1357 	isrc = malloc(sizeof(*isrc) * count, M_INTRNG, M_WAITOK);
1358 
1359 	for (i = 0; i < count; i++) {
1360 		isrc[i] = isrc_lookup(irqs[i]);
1361 		if (isrc == NULL) {
1362 			free(isrc, M_INTRNG);
1363 			return (EINVAL);
1364 		}
1365 	}
1366 
1367 	err = MSI_RELEASE_MSI(pic->pic_dev, child, count, isrc);
1368 	free(isrc, M_INTRNG);
1369 	return (err);
1370 }
1371 
1372 int
1373 intr_alloc_msix(device_t pci, device_t child, intptr_t xref, int *irq)
1374 {
1375 	struct intr_irqsrc *isrc;
1376 	struct intr_pic *pic;
1377 	device_t pdev;
1378 	int err;
1379 
1380 	pic = pic_lookup(NULL, xref);
1381 	if (pic == NULL)
1382 		return (ESRCH);
1383 
1384 	KASSERT((pic->pic_flags & FLAG_MSI) != 0,
1385 	    ("%s: Found a non-MSI controller: %s", __func__,
1386 	     device_get_name(pic->pic_dev)));
1387 
1388 	err = MSI_ALLOC_MSIX(pic->pic_dev, child, &pdev, &isrc);
1389 	if (err != 0)
1390 		return (err);
1391 
1392 	*irq = isrc->isrc_irq;
1393 	return (0);
1394 }
1395 
1396 int
1397 intr_release_msix(device_t pci, device_t child, intptr_t xref, int irq)
1398 {
1399 	struct intr_irqsrc *isrc;
1400 	struct intr_pic *pic;
1401 	int err;
1402 
1403 	pic = pic_lookup(NULL, xref);
1404 	if (pic == NULL)
1405 		return (ESRCH);
1406 
1407 	KASSERT((pic->pic_flags & FLAG_MSI) != 0,
1408 	    ("%s: Found a non-MSI controller: %s", __func__,
1409 	     device_get_name(pic->pic_dev)));
1410 
1411 	isrc = isrc_lookup(irq);
1412 	if (isrc == NULL)
1413 		return (EINVAL);
1414 
1415 	err = MSI_RELEASE_MSIX(pic->pic_dev, child, isrc);
1416 	return (err);
1417 }
1418 
1419 int
1420 intr_map_msi(device_t pci, device_t child, intptr_t xref, int irq,
1421     uint64_t *addr, uint32_t *data)
1422 {
1423 	struct intr_irqsrc *isrc;
1424 	struct intr_pic *pic;
1425 	int err;
1426 
1427 	pic = pic_lookup(NULL, xref);
1428 	if (pic == NULL)
1429 		return (ESRCH);
1430 
1431 	KASSERT((pic->pic_flags & FLAG_MSI) != 0,
1432 	    ("%s: Found a non-MSI controller: %s", __func__,
1433 	     device_get_name(pic->pic_dev)));
1434 
1435 	isrc = isrc_lookup(irq);
1436 	if (isrc == NULL)
1437 		return (EINVAL);
1438 
1439 	err = MSI_MAP_MSI(pic->pic_dev, child, isrc, addr, data);
1440 	return (err);
1441 }
1442 
1443 
1444 void dosoftints(void);
1445 void
1446 dosoftints(void)
1447 {
1448 }
1449 
1450 #ifdef SMP
1451 /*
1452  *  Init interrupt controller on another CPU.
1453  */
1454 void
1455 intr_pic_init_secondary(void)
1456 {
1457 
1458 	/*
1459 	 * QQQ: Only root PIC is aware of other CPUs ???
1460 	 */
1461 	KASSERT(intr_irq_root_dev != NULL, ("%s: no root attached", __func__));
1462 
1463 	//mtx_lock(&isrc_table_lock);
1464 	PIC_INIT_SECONDARY(intr_irq_root_dev);
1465 	//mtx_unlock(&isrc_table_lock);
1466 }
1467 #endif
1468 
1469 #ifdef DDB
1470 DB_SHOW_COMMAND(irqs, db_show_irqs)
1471 {
1472 	u_int i, irqsum;
1473 	u_long num;
1474 	struct intr_irqsrc *isrc;
1475 
1476 	for (irqsum = 0, i = 0; i < NIRQ; i++) {
1477 		isrc = irq_sources[i];
1478 		if (isrc == NULL)
1479 			continue;
1480 
1481 		num = isrc->isrc_count != NULL ? isrc->isrc_count[0] : 0;
1482 		db_printf("irq%-3u <%s>: cpu %02lx%s cnt %lu\n", i,
1483 		    isrc->isrc_name, isrc->isrc_cpu.__bits[0],
1484 		    isrc->isrc_flags & INTR_ISRCF_BOUND ? " (bound)" : "", num);
1485 		irqsum += num;
1486 	}
1487 	db_printf("irq total %u\n", irqsum);
1488 }
1489 #endif
1490