xref: /freebsd/sys/kern/subr_intr.c (revision 3fc155dc64bd967aebcde25b51c4210d870718b9)
1 /*-
2  * Copyright (c) 2015-2016 Svatopluk Kraus
3  * Copyright (c) 2015-2016 Michal Meloun
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30 
31 /*
32  *	New-style Interrupt Framework
33  *
34  *  TODO: - to support IPI (PPI) enabling on other CPUs if already started
35  *        - to complete things for removable PICs
36  */
37 
38 #include "opt_acpi.h"
39 #include "opt_ddb.h"
40 #include "opt_platform.h"
41 
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/kernel.h>
45 #include <sys/syslog.h>
46 #include <sys/malloc.h>
47 #include <sys/proc.h>
48 #include <sys/queue.h>
49 #include <sys/bus.h>
50 #include <sys/interrupt.h>
51 #include <sys/conf.h>
52 #include <sys/cpuset.h>
53 #include <sys/rman.h>
54 #include <sys/sched.h>
55 #include <sys/smp.h>
56 #include <machine/atomic.h>
57 #include <machine/intr.h>
58 #include <machine/cpu.h>
59 #include <machine/smp.h>
60 #include <machine/stdarg.h>
61 
62 #ifdef FDT
63 #include <dev/ofw/openfirm.h>
64 #include <dev/ofw/ofw_bus.h>
65 #include <dev/ofw/ofw_bus_subr.h>
66 #endif
67 
68 #ifdef DDB
69 #include <ddb/ddb.h>
70 #endif
71 
72 #include "pic_if.h"
73 #include "msi_if.h"
74 
75 #define	INTRNAME_LEN	(2*MAXCOMLEN + 1)
76 
77 #ifdef DEBUG
78 #define debugf(fmt, args...) do { printf("%s(): ", __func__);	\
79     printf(fmt,##args); } while (0)
80 #else
81 #define debugf(fmt, args...)
82 #endif
83 
84 MALLOC_DECLARE(M_INTRNG);
85 MALLOC_DEFINE(M_INTRNG, "intr", "intr interrupt handling");
86 
87 /* Main interrupt handler called from assembler -> 'hidden' for C code. */
88 void intr_irq_handler(struct trapframe *tf);
89 
90 /* Root interrupt controller stuff. */
91 device_t intr_irq_root_dev;
92 static intr_irq_filter_t *irq_root_filter;
93 static void *irq_root_arg;
94 static u_int irq_root_ipicount;
95 
96 /* Interrupt controller definition. */
97 struct intr_pic {
98 	SLIST_ENTRY(intr_pic)	pic_next;
99 	intptr_t		pic_xref;	/* hardware identification */
100 	device_t		pic_dev;
101 #define	FLAG_PIC	(1 << 0)
102 #define	FLAG_MSI	(1 << 1)
103 	u_int			pic_flags;
104 };
105 
106 static struct mtx pic_list_lock;
107 static SLIST_HEAD(, intr_pic) pic_list;
108 
109 static struct intr_pic *pic_lookup(device_t dev, intptr_t xref);
110 
111 /* Interrupt source definition. */
112 static struct mtx isrc_table_lock;
113 static struct intr_irqsrc *irq_sources[NIRQ];
114 u_int irq_next_free;
115 
116 /*
117  *  XXX - All stuff around struct intr_dev_data is considered as temporary
118  *  until better place for storing struct intr_map_data will be find.
119  *
120  *  For now, there are two global interrupt numbers spaces:
121  *  <0, NIRQ)                      ... interrupts without config data
122  *                                     managed in irq_sources[]
123  *  IRQ_DDATA_BASE + <0, 2 * NIRQ) ... interrupts with config data
124  *                                     managed in intr_ddata_tab[]
125  *
126  *  Read intr_ddata_lookup() to see how these spaces are worked with.
127  *  Note that each interrupt number from second space duplicates some number
128  *  from first space at this moment. An interrupt number from first space can
129  *  be duplicated even multiple times in second space.
130  */
131 struct intr_dev_data {
132 	device_t		idd_dev;
133 	intptr_t		idd_xref;
134 	u_int			idd_irq;
135 	struct intr_map_data *	idd_data;
136 	struct intr_irqsrc *	idd_isrc;
137 };
138 
139 static struct intr_dev_data *intr_ddata_tab[2 * NIRQ];
140 static u_int intr_ddata_first_unused;
141 
142 #define IRQ_DDATA_BASE	10000
143 CTASSERT(IRQ_DDATA_BASE > nitems(irq_sources));
144 
145 #ifdef SMP
146 static boolean_t irq_assign_cpu = FALSE;
147 #endif
148 
149 /*
150  * - 2 counters for each I/O interrupt.
151  * - MAXCPU counters for each IPI counters for SMP.
152  */
153 #ifdef SMP
154 #define INTRCNT_COUNT   (NIRQ * 2 + INTR_IPI_COUNT * MAXCPU)
155 #else
156 #define INTRCNT_COUNT   (NIRQ * 2)
157 #endif
158 
159 /* Data for MI statistics reporting. */
160 u_long intrcnt[INTRCNT_COUNT];
161 char intrnames[INTRCNT_COUNT * INTRNAME_LEN];
162 size_t sintrcnt = sizeof(intrcnt);
163 size_t sintrnames = sizeof(intrnames);
164 static u_int intrcnt_index;
165 
166 /*
167  *  Interrupt framework initialization routine.
168  */
169 static void
170 intr_irq_init(void *dummy __unused)
171 {
172 
173 	SLIST_INIT(&pic_list);
174 	mtx_init(&pic_list_lock, "intr pic list", NULL, MTX_DEF);
175 
176 	mtx_init(&isrc_table_lock, "intr isrc table", NULL, MTX_DEF);
177 }
178 SYSINIT(intr_irq_init, SI_SUB_INTR, SI_ORDER_FIRST, intr_irq_init, NULL);
179 
180 static void
181 intrcnt_setname(const char *name, int index)
182 {
183 
184 	snprintf(intrnames + INTRNAME_LEN * index, INTRNAME_LEN, "%-*s",
185 	    INTRNAME_LEN - 1, name);
186 }
187 
188 /*
189  *  Update name for interrupt source with interrupt event.
190  */
191 static void
192 intrcnt_updatename(struct intr_irqsrc *isrc)
193 {
194 
195 	/* QQQ: What about stray counter name? */
196 	mtx_assert(&isrc_table_lock, MA_OWNED);
197 	intrcnt_setname(isrc->isrc_event->ie_fullname, isrc->isrc_index);
198 }
199 
200 /*
201  *  Virtualization for interrupt source interrupt counter increment.
202  */
203 static inline void
204 isrc_increment_count(struct intr_irqsrc *isrc)
205 {
206 
207 	if (isrc->isrc_flags & INTR_ISRCF_PPI)
208 		atomic_add_long(&isrc->isrc_count[0], 1);
209 	else
210 		isrc->isrc_count[0]++;
211 }
212 
213 /*
214  *  Virtualization for interrupt source interrupt stray counter increment.
215  */
216 static inline void
217 isrc_increment_straycount(struct intr_irqsrc *isrc)
218 {
219 
220 	isrc->isrc_count[1]++;
221 }
222 
223 /*
224  *  Virtualization for interrupt source interrupt name update.
225  */
226 static void
227 isrc_update_name(struct intr_irqsrc *isrc, const char *name)
228 {
229 	char str[INTRNAME_LEN];
230 
231 	mtx_assert(&isrc_table_lock, MA_OWNED);
232 
233 	if (name != NULL) {
234 		snprintf(str, INTRNAME_LEN, "%s: %s", isrc->isrc_name, name);
235 		intrcnt_setname(str, isrc->isrc_index);
236 		snprintf(str, INTRNAME_LEN, "stray %s: %s", isrc->isrc_name,
237 		    name);
238 		intrcnt_setname(str, isrc->isrc_index + 1);
239 	} else {
240 		snprintf(str, INTRNAME_LEN, "%s:", isrc->isrc_name);
241 		intrcnt_setname(str, isrc->isrc_index);
242 		snprintf(str, INTRNAME_LEN, "stray %s:", isrc->isrc_name);
243 		intrcnt_setname(str, isrc->isrc_index + 1);
244 	}
245 }
246 
247 /*
248  *  Virtualization for interrupt source interrupt counters setup.
249  */
250 static void
251 isrc_setup_counters(struct intr_irqsrc *isrc)
252 {
253 	u_int index;
254 
255 	/*
256 	 *  XXX - it does not work well with removable controllers and
257 	 *        interrupt sources !!!
258 	 */
259 	index = atomic_fetchadd_int(&intrcnt_index, 2);
260 	isrc->isrc_index = index;
261 	isrc->isrc_count = &intrcnt[index];
262 	isrc_update_name(isrc, NULL);
263 }
264 
265 /*
266  *  Virtualization for interrupt source interrupt counters release.
267  */
268 static void
269 isrc_release_counters(struct intr_irqsrc *isrc)
270 {
271 
272 	panic("%s: not implemented", __func__);
273 }
274 
275 #ifdef SMP
276 /*
277  *  Virtualization for interrupt source IPI counters setup.
278  */
279 u_long *
280 intr_ipi_setup_counters(const char *name)
281 {
282 	u_int index, i;
283 	char str[INTRNAME_LEN];
284 
285 	index = atomic_fetchadd_int(&intrcnt_index, MAXCPU);
286 	for (i = 0; i < MAXCPU; i++) {
287 		snprintf(str, INTRNAME_LEN, "cpu%d:%s", i, name);
288 		intrcnt_setname(str, index + i);
289 	}
290 	return (&intrcnt[index]);
291 }
292 #endif
293 
294 /*
295  *  Main interrupt dispatch handler. It's called straight
296  *  from the assembler, where CPU interrupt is served.
297  */
298 void
299 intr_irq_handler(struct trapframe *tf)
300 {
301 	struct trapframe * oldframe;
302 	struct thread * td;
303 
304 	KASSERT(irq_root_filter != NULL, ("%s: no filter", __func__));
305 
306 	PCPU_INC(cnt.v_intr);
307 	critical_enter();
308 	td = curthread;
309 	oldframe = td->td_intr_frame;
310 	td->td_intr_frame = tf;
311 	irq_root_filter(irq_root_arg);
312 	td->td_intr_frame = oldframe;
313 	critical_exit();
314 }
315 
316 /*
317  *  interrupt controller dispatch function for interrupts. It should
318  *  be called straight from the interrupt controller, when associated interrupt
319  *  source is learned.
320  */
321 int
322 intr_isrc_dispatch(struct intr_irqsrc *isrc, struct trapframe *tf)
323 {
324 
325 	KASSERT(isrc != NULL, ("%s: no source", __func__));
326 
327 	isrc_increment_count(isrc);
328 
329 #ifdef INTR_SOLO
330 	if (isrc->isrc_filter != NULL) {
331 		int error;
332 		error = isrc->isrc_filter(isrc->isrc_arg, tf);
333 		PIC_POST_FILTER(isrc->isrc_dev, isrc);
334 		if (error == FILTER_HANDLED)
335 			return (0);
336 	} else
337 #endif
338 	if (isrc->isrc_event != NULL) {
339 		if (intr_event_handle(isrc->isrc_event, tf) == 0)
340 			return (0);
341 	}
342 
343 	isrc_increment_straycount(isrc);
344 	return (EINVAL);
345 }
346 
347 /*
348  *  Alloc unique interrupt number (resource handle) for interrupt source.
349  *
350  *  There could be various strategies how to allocate free interrupt number
351  *  (resource handle) for new interrupt source.
352  *
353  *  1. Handles are always allocated forward, so handles are not recycled
354  *     immediately. However, if only one free handle left which is reused
355  *     constantly...
356  */
357 static inline int
358 isrc_alloc_irq(struct intr_irqsrc *isrc)
359 {
360 	u_int maxirqs, irq;
361 
362 	mtx_assert(&isrc_table_lock, MA_OWNED);
363 
364 	maxirqs = nitems(irq_sources);
365 	if (irq_next_free >= maxirqs)
366 		return (ENOSPC);
367 
368 	for (irq = irq_next_free; irq < maxirqs; irq++) {
369 		if (irq_sources[irq] == NULL)
370 			goto found;
371 	}
372 	for (irq = 0; irq < irq_next_free; irq++) {
373 		if (irq_sources[irq] == NULL)
374 			goto found;
375 	}
376 
377 	irq_next_free = maxirqs;
378 	return (ENOSPC);
379 
380 found:
381 	isrc->isrc_irq = irq;
382 	irq_sources[irq] = isrc;
383 
384 	irq_next_free = irq + 1;
385 	if (irq_next_free >= maxirqs)
386 		irq_next_free = 0;
387 	return (0);
388 }
389 
390 /*
391  *  Free unique interrupt number (resource handle) from interrupt source.
392  */
393 static inline int
394 isrc_free_irq(struct intr_irqsrc *isrc)
395 {
396 
397 	mtx_assert(&isrc_table_lock, MA_OWNED);
398 
399 	if (isrc->isrc_irq >= nitems(irq_sources))
400 		return (EINVAL);
401 	if (irq_sources[isrc->isrc_irq] != isrc)
402 		return (EINVAL);
403 
404 	irq_sources[isrc->isrc_irq] = NULL;
405 	isrc->isrc_irq = INTR_IRQ_INVALID;	/* just to be safe */
406 	return (0);
407 }
408 
409 /*
410  *  Lookup interrupt source by interrupt number (resource handle).
411  */
412 static inline struct intr_irqsrc *
413 isrc_lookup(u_int irq)
414 {
415 
416 	if (irq < nitems(irq_sources))
417 		return (irq_sources[irq]);
418 	return (NULL);
419 }
420 
421 /*
422  *  Initialize interrupt source and register it into global interrupt table.
423  */
424 int
425 intr_isrc_register(struct intr_irqsrc *isrc, device_t dev, u_int flags,
426     const char *fmt, ...)
427 {
428 	int error;
429 	va_list ap;
430 
431 	bzero(isrc, sizeof(struct intr_irqsrc));
432 	isrc->isrc_dev = dev;
433 	isrc->isrc_irq = INTR_IRQ_INVALID;	/* just to be safe */
434 	isrc->isrc_flags = flags;
435 
436 	va_start(ap, fmt);
437 	vsnprintf(isrc->isrc_name, INTR_ISRC_NAMELEN, fmt, ap);
438 	va_end(ap);
439 
440 	mtx_lock(&isrc_table_lock);
441 	error = isrc_alloc_irq(isrc);
442 	if (error != 0) {
443 		mtx_unlock(&isrc_table_lock);
444 		return (error);
445 	}
446 	/*
447 	 * Setup interrupt counters, but not for IPI sources. Those are setup
448 	 * later and only for used ones (up to INTR_IPI_COUNT) to not exhaust
449 	 * our counter pool.
450 	 */
451 	if ((isrc->isrc_flags & INTR_ISRCF_IPI) == 0)
452 		isrc_setup_counters(isrc);
453 	mtx_unlock(&isrc_table_lock);
454 	return (0);
455 }
456 
457 /*
458  *  Deregister interrupt source from global interrupt table.
459  */
460 int
461 intr_isrc_deregister(struct intr_irqsrc *isrc)
462 {
463 	int error;
464 
465 	mtx_lock(&isrc_table_lock);
466 	if ((isrc->isrc_flags & INTR_ISRCF_IPI) == 0)
467 		isrc_release_counters(isrc);
468 	error = isrc_free_irq(isrc);
469 	mtx_unlock(&isrc_table_lock);
470 	return (error);
471 }
472 
473 #ifdef SMP
474 /*
475  *  A support function for a PIC to decide if provided ISRC should be inited
476  *  on given cpu. The logic of INTR_ISRCF_BOUND flag and isrc_cpu member of
477  *  struct intr_irqsrc is the following:
478  *
479  *     If INTR_ISRCF_BOUND is set, the ISRC should be inited only on cpus
480  *     set in isrc_cpu. If not, the ISRC should be inited on every cpu and
481  *     isrc_cpu is kept consistent with it. Thus isrc_cpu is always correct.
482  */
483 bool
484 intr_isrc_init_on_cpu(struct intr_irqsrc *isrc, u_int cpu)
485 {
486 
487 	if (isrc->isrc_handlers == 0)
488 		return (false);
489 	if ((isrc->isrc_flags & (INTR_ISRCF_PPI | INTR_ISRCF_IPI)) == 0)
490 		return (false);
491 	if (isrc->isrc_flags & INTR_ISRCF_BOUND)
492 		return (CPU_ISSET(cpu, &isrc->isrc_cpu));
493 
494 	CPU_SET(cpu, &isrc->isrc_cpu);
495 	return (true);
496 }
497 #endif
498 
499 static struct intr_dev_data *
500 intr_ddata_alloc(u_int extsize)
501 {
502 	struct intr_dev_data *ddata;
503 	size_t size;
504 
505 	size = sizeof(*ddata);
506 	ddata = malloc(size + extsize, M_INTRNG, M_WAITOK | M_ZERO);
507 
508 	mtx_lock(&isrc_table_lock);
509 	if (intr_ddata_first_unused >= nitems(intr_ddata_tab)) {
510 		mtx_unlock(&isrc_table_lock);
511 		free(ddata, M_INTRNG);
512 		return (NULL);
513 	}
514 	intr_ddata_tab[intr_ddata_first_unused] = ddata;
515 	ddata->idd_irq = IRQ_DDATA_BASE + intr_ddata_first_unused++;
516 	mtx_unlock(&isrc_table_lock);
517 
518 	ddata->idd_data = (struct intr_map_data *)((uintptr_t)ddata + size);
519 	ddata->idd_data->size = extsize;
520 	return (ddata);
521 }
522 
523 static struct intr_irqsrc *
524 intr_ddata_lookup(u_int irq, struct intr_map_data **datap)
525 {
526 	int error;
527 	struct intr_irqsrc *isrc;
528 	struct intr_dev_data *ddata;
529 
530 	isrc = isrc_lookup(irq);
531 	if (isrc != NULL) {
532 		if (datap != NULL)
533 			*datap = NULL;
534 		return (isrc);
535 	}
536 
537 	if (irq < IRQ_DDATA_BASE)
538 		return (NULL);
539 
540 	irq -= IRQ_DDATA_BASE;
541 	if (irq >= nitems(intr_ddata_tab))
542 		return (NULL);
543 
544 	ddata = intr_ddata_tab[irq];
545 	if (ddata->idd_isrc == NULL) {
546 		error = intr_map_irq(ddata->idd_dev, ddata->idd_xref,
547 		    ddata->idd_data, &irq);
548 		if (error != 0)
549 			return (NULL);
550 		ddata->idd_isrc = isrc_lookup(irq);
551 	}
552 	if (datap != NULL)
553 		*datap = ddata->idd_data;
554 	return (ddata->idd_isrc);
555 }
556 
557 #ifdef DEV_ACPI
558 /*
559  *  Map interrupt source according to ACPI info into framework. If such mapping
560  *  does not exist, create it. Return unique interrupt number (resource handle)
561  *  associated with mapped interrupt source.
562  */
563 u_int
564 intr_acpi_map_irq(device_t dev, u_int irq, enum intr_polarity pol,
565     enum intr_trigger trig)
566 {
567 	struct intr_map_data_acpi *daa;
568 	struct intr_dev_data *ddata;
569 
570 	ddata = intr_ddata_alloc(sizeof(struct intr_map_data_acpi));
571 	if (ddata == NULL)
572 		return (INTR_IRQ_INVALID);	/* no space left */
573 
574 	ddata->idd_dev = dev;
575 	ddata->idd_data->type = INTR_MAP_DATA_ACPI;
576 
577 	daa = (struct intr_map_data_acpi *)ddata->idd_data;
578 	daa->irq = irq;
579 	daa->pol = pol;
580 	daa->trig = trig;
581 
582 	return (ddata->idd_irq);
583 }
584 #endif
585 #ifdef FDT
586 /*
587  *  Map interrupt source according to FDT data into framework. If such mapping
588  *  does not exist, create it. Return unique interrupt number (resource handle)
589  *  associated with mapped interrupt source.
590  */
591 u_int
592 intr_fdt_map_irq(phandle_t node, pcell_t *cells, u_int ncells)
593 {
594 	size_t cellsize;
595 	struct intr_dev_data *ddata;
596 	struct intr_map_data_fdt *daf;
597 
598 	cellsize = ncells * sizeof(*cells);
599 	ddata = intr_ddata_alloc(sizeof(struct intr_map_data_fdt) + cellsize);
600 	if (ddata == NULL)
601 		return (INTR_IRQ_INVALID);	/* no space left */
602 
603 	ddata->idd_xref = (intptr_t)node;
604 	ddata->idd_data->type = INTR_MAP_DATA_FDT;
605 
606 	daf = (struct intr_map_data_fdt *)ddata->idd_data;
607 	daf->ncells = ncells;
608 	memcpy(daf->cells, cells, cellsize);
609 	return (ddata->idd_irq);
610 }
611 #endif
612 
613 /*
614  *  Store GPIO interrupt decription in framework and return unique interrupt
615  *  number (resource handle) associated with it.
616  */
617 u_int
618 intr_gpio_map_irq(device_t dev, u_int pin_num, u_int pin_flags, u_int intr_mode)
619 {
620 	struct intr_dev_data *ddata;
621 	struct intr_map_data_gpio *dag;
622 
623 	ddata = intr_ddata_alloc(sizeof(struct intr_map_data_gpio));
624 	if (ddata == NULL)
625 		return (INTR_IRQ_INVALID);	/* no space left */
626 
627 	ddata->idd_dev = dev;
628 	ddata->idd_data->type = INTR_MAP_DATA_GPIO;
629 
630 	dag = (struct intr_map_data_gpio *)ddata->idd_data;
631 	dag->gpio_pin_num = pin_num;
632 	dag->gpio_pin_flags = pin_flags;
633 	dag->gpio_intr_mode = intr_mode;
634 	return (ddata->idd_irq);
635 }
636 
637 #ifdef INTR_SOLO
638 /*
639  *  Setup filter into interrupt source.
640  */
641 static int
642 iscr_setup_filter(struct intr_irqsrc *isrc, const char *name,
643     intr_irq_filter_t *filter, void *arg, void **cookiep)
644 {
645 
646 	if (filter == NULL)
647 		return (EINVAL);
648 
649 	mtx_lock(&isrc_table_lock);
650 	/*
651 	 * Make sure that we do not mix the two ways
652 	 * how we handle interrupt sources.
653 	 */
654 	if (isrc->isrc_filter != NULL || isrc->isrc_event != NULL) {
655 		mtx_unlock(&isrc_table_lock);
656 		return (EBUSY);
657 	}
658 	isrc->isrc_filter = filter;
659 	isrc->isrc_arg = arg;
660 	isrc_update_name(isrc, name);
661 	mtx_unlock(&isrc_table_lock);
662 
663 	*cookiep = isrc;
664 	return (0);
665 }
666 #endif
667 
668 /*
669  *  Interrupt source pre_ithread method for MI interrupt framework.
670  */
671 static void
672 intr_isrc_pre_ithread(void *arg)
673 {
674 	struct intr_irqsrc *isrc = arg;
675 
676 	PIC_PRE_ITHREAD(isrc->isrc_dev, isrc);
677 }
678 
679 /*
680  *  Interrupt source post_ithread method for MI interrupt framework.
681  */
682 static void
683 intr_isrc_post_ithread(void *arg)
684 {
685 	struct intr_irqsrc *isrc = arg;
686 
687 	PIC_POST_ITHREAD(isrc->isrc_dev, isrc);
688 }
689 
690 /*
691  *  Interrupt source post_filter method for MI interrupt framework.
692  */
693 static void
694 intr_isrc_post_filter(void *arg)
695 {
696 	struct intr_irqsrc *isrc = arg;
697 
698 	PIC_POST_FILTER(isrc->isrc_dev, isrc);
699 }
700 
701 /*
702  *  Interrupt source assign_cpu method for MI interrupt framework.
703  */
704 static int
705 intr_isrc_assign_cpu(void *arg, int cpu)
706 {
707 #ifdef SMP
708 	struct intr_irqsrc *isrc = arg;
709 	int error;
710 
711 	if (isrc->isrc_dev != intr_irq_root_dev)
712 		return (EINVAL);
713 
714 	mtx_lock(&isrc_table_lock);
715 	if (cpu == NOCPU) {
716 		CPU_ZERO(&isrc->isrc_cpu);
717 		isrc->isrc_flags &= ~INTR_ISRCF_BOUND;
718 	} else {
719 		CPU_SETOF(cpu, &isrc->isrc_cpu);
720 		isrc->isrc_flags |= INTR_ISRCF_BOUND;
721 	}
722 
723 	/*
724 	 * In NOCPU case, it's up to PIC to either leave ISRC on same CPU or
725 	 * re-balance it to another CPU or enable it on more CPUs. However,
726 	 * PIC is expected to change isrc_cpu appropriately to keep us well
727 	 * informed if the call is successful.
728 	 */
729 	if (irq_assign_cpu) {
730 		error = PIC_BIND_INTR(isrc->isrc_dev, isrc);
731 		if (error) {
732 			CPU_ZERO(&isrc->isrc_cpu);
733 			mtx_unlock(&isrc_table_lock);
734 			return (error);
735 		}
736 	}
737 	mtx_unlock(&isrc_table_lock);
738 	return (0);
739 #else
740 	return (EOPNOTSUPP);
741 #endif
742 }
743 
744 /*
745  *  Create interrupt event for interrupt source.
746  */
747 static int
748 isrc_event_create(struct intr_irqsrc *isrc)
749 {
750 	struct intr_event *ie;
751 	int error;
752 
753 	error = intr_event_create(&ie, isrc, 0, isrc->isrc_irq,
754 	    intr_isrc_pre_ithread, intr_isrc_post_ithread, intr_isrc_post_filter,
755 	    intr_isrc_assign_cpu, "%s:", isrc->isrc_name);
756 	if (error)
757 		return (error);
758 
759 	mtx_lock(&isrc_table_lock);
760 	/*
761 	 * Make sure that we do not mix the two ways
762 	 * how we handle interrupt sources. Let contested event wins.
763 	 */
764 #ifdef INTR_SOLO
765 	if (isrc->isrc_filter != NULL || isrc->isrc_event != NULL) {
766 #else
767 	if (isrc->isrc_event != NULL) {
768 #endif
769 		mtx_unlock(&isrc_table_lock);
770 		intr_event_destroy(ie);
771 		return (isrc->isrc_event != NULL ? EBUSY : 0);
772 	}
773 	isrc->isrc_event = ie;
774 	mtx_unlock(&isrc_table_lock);
775 
776 	return (0);
777 }
778 #ifdef notyet
779 /*
780  *  Destroy interrupt event for interrupt source.
781  */
782 static void
783 isrc_event_destroy(struct intr_irqsrc *isrc)
784 {
785 	struct intr_event *ie;
786 
787 	mtx_lock(&isrc_table_lock);
788 	ie = isrc->isrc_event;
789 	isrc->isrc_event = NULL;
790 	mtx_unlock(&isrc_table_lock);
791 
792 	if (ie != NULL)
793 		intr_event_destroy(ie);
794 }
795 #endif
796 /*
797  *  Add handler to interrupt source.
798  */
799 static int
800 isrc_add_handler(struct intr_irqsrc *isrc, const char *name,
801     driver_filter_t filter, driver_intr_t handler, void *arg,
802     enum intr_type flags, void **cookiep)
803 {
804 	int error;
805 
806 	if (isrc->isrc_event == NULL) {
807 		error = isrc_event_create(isrc);
808 		if (error)
809 			return (error);
810 	}
811 
812 	error = intr_event_add_handler(isrc->isrc_event, name, filter, handler,
813 	    arg, intr_priority(flags), flags, cookiep);
814 	if (error == 0) {
815 		mtx_lock(&isrc_table_lock);
816 		intrcnt_updatename(isrc);
817 		mtx_unlock(&isrc_table_lock);
818 	}
819 
820 	return (error);
821 }
822 
823 /*
824  *  Lookup interrupt controller locked.
825  */
826 static inline struct intr_pic *
827 pic_lookup_locked(device_t dev, intptr_t xref)
828 {
829 	struct intr_pic *pic;
830 
831 	mtx_assert(&pic_list_lock, MA_OWNED);
832 
833 	if (dev == NULL && xref == 0)
834 		return (NULL);
835 
836 	/* Note that pic->pic_dev is never NULL on registered PIC. */
837 	SLIST_FOREACH(pic, &pic_list, pic_next) {
838 		if (dev == NULL) {
839 			if (xref == pic->pic_xref)
840 				return (pic);
841 		} else if (xref == 0 || pic->pic_xref == 0) {
842 			if (dev == pic->pic_dev)
843 				return (pic);
844 		} else if (xref == pic->pic_xref && dev == pic->pic_dev)
845 				return (pic);
846 	}
847 	return (NULL);
848 }
849 
850 /*
851  *  Lookup interrupt controller.
852  */
853 static struct intr_pic *
854 pic_lookup(device_t dev, intptr_t xref)
855 {
856 	struct intr_pic *pic;
857 
858 	mtx_lock(&pic_list_lock);
859 	pic = pic_lookup_locked(dev, xref);
860 	mtx_unlock(&pic_list_lock);
861 	return (pic);
862 }
863 
864 /*
865  *  Create interrupt controller.
866  */
867 static struct intr_pic *
868 pic_create(device_t dev, intptr_t xref)
869 {
870 	struct intr_pic *pic;
871 
872 	mtx_lock(&pic_list_lock);
873 	pic = pic_lookup_locked(dev, xref);
874 	if (pic != NULL) {
875 		mtx_unlock(&pic_list_lock);
876 		return (pic);
877 	}
878 	pic = malloc(sizeof(*pic), M_INTRNG, M_NOWAIT | M_ZERO);
879 	if (pic == NULL) {
880 		mtx_unlock(&pic_list_lock);
881 		return (NULL);
882 	}
883 	pic->pic_xref = xref;
884 	pic->pic_dev = dev;
885 	SLIST_INSERT_HEAD(&pic_list, pic, pic_next);
886 	mtx_unlock(&pic_list_lock);
887 
888 	return (pic);
889 }
890 #ifdef notyet
891 /*
892  *  Destroy interrupt controller.
893  */
894 static void
895 pic_destroy(device_t dev, intptr_t xref)
896 {
897 	struct intr_pic *pic;
898 
899 	mtx_lock(&pic_list_lock);
900 	pic = pic_lookup_locked(dev, xref);
901 	if (pic == NULL) {
902 		mtx_unlock(&pic_list_lock);
903 		return;
904 	}
905 	SLIST_REMOVE(&pic_list, pic, intr_pic, pic_next);
906 	mtx_unlock(&pic_list_lock);
907 
908 	free(pic, M_INTRNG);
909 }
910 #endif
911 /*
912  *  Register interrupt controller.
913  */
914 int
915 intr_pic_register(device_t dev, intptr_t xref)
916 {
917 	struct intr_pic *pic;
918 
919 	if (dev == NULL)
920 		return (EINVAL);
921 	pic = pic_create(dev, xref);
922 	if (pic == NULL)
923 		return (ENOMEM);
924 
925 	pic->pic_flags |= FLAG_PIC;
926 
927 	debugf("PIC %p registered for %s <dev %p, xref %x>\n", pic,
928 	    device_get_nameunit(dev), dev, xref);
929 	return (0);
930 }
931 
932 /*
933  *  Unregister interrupt controller.
934  */
935 int
936 intr_pic_deregister(device_t dev, intptr_t xref)
937 {
938 
939 	panic("%s: not implemented", __func__);
940 }
941 
942 /*
943  *  Mark interrupt controller (itself) as a root one.
944  *
945  *  Note that only an interrupt controller can really know its position
946  *  in interrupt controller's tree. So root PIC must claim itself as a root.
947  *
948  *  In FDT case, according to ePAPR approved version 1.1 from 08 April 2011,
949  *  page 30:
950  *    "The root of the interrupt tree is determined when traversal
951  *     of the interrupt tree reaches an interrupt controller node without
952  *     an interrupts property and thus no explicit interrupt parent."
953  */
954 int
955 intr_pic_claim_root(device_t dev, intptr_t xref, intr_irq_filter_t *filter,
956     void *arg, u_int ipicount)
957 {
958 	struct intr_pic *pic;
959 
960 	pic = pic_lookup(dev, xref);
961 	if (pic == NULL) {
962 		device_printf(dev, "not registered\n");
963 		return (EINVAL);
964 	}
965 
966 	KASSERT((pic->pic_flags & FLAG_PIC) != 0,
967 	    ("%s: Found a non-PIC controller: %s", __func__,
968 	     device_get_name(pic->pic_dev)));
969 
970 	if (filter == NULL) {
971 		device_printf(dev, "filter missing\n");
972 		return (EINVAL);
973 	}
974 
975 	/*
976 	 * Only one interrupt controllers could be on the root for now.
977 	 * Note that we further suppose that there is not threaded interrupt
978 	 * routine (handler) on the root. See intr_irq_handler().
979 	 */
980 	if (intr_irq_root_dev != NULL) {
981 		device_printf(dev, "another root already set\n");
982 		return (EBUSY);
983 	}
984 
985 	intr_irq_root_dev = dev;
986 	irq_root_filter = filter;
987 	irq_root_arg = arg;
988 	irq_root_ipicount = ipicount;
989 
990 	debugf("irq root set to %s\n", device_get_nameunit(dev));
991 	return (0);
992 }
993 
994 int
995 intr_map_irq(device_t dev, intptr_t xref, struct intr_map_data *data,
996     u_int *irqp)
997 {
998 	int error;
999 	struct intr_irqsrc *isrc;
1000 	struct intr_pic *pic;
1001 
1002 	if (data == NULL)
1003 		return (EINVAL);
1004 
1005 	pic = pic_lookup(dev, xref);
1006 	if (pic == NULL)
1007 		return (ESRCH);
1008 
1009 	KASSERT((pic->pic_flags & FLAG_PIC) != 0,
1010 	    ("%s: Found a non-PIC controller: %s", __func__,
1011 	     device_get_name(pic->pic_dev)));
1012 
1013 	error = PIC_MAP_INTR(pic->pic_dev, data, &isrc);
1014 	if (error == 0)
1015 		*irqp = isrc->isrc_irq;
1016 	return (error);
1017 }
1018 
1019 int
1020 intr_alloc_irq(device_t dev, struct resource *res)
1021 {
1022 	struct intr_map_data *data;
1023 	struct intr_irqsrc *isrc;
1024 
1025 	KASSERT(rman_get_start(res) == rman_get_end(res),
1026 	    ("%s: more interrupts in resource", __func__));
1027 
1028 	isrc = intr_ddata_lookup(rman_get_start(res), &data);
1029 	if (isrc == NULL)
1030 		return (EINVAL);
1031 
1032 	return (PIC_ALLOC_INTR(isrc->isrc_dev, isrc, res, data));
1033 }
1034 
1035 int
1036 intr_release_irq(device_t dev, struct resource *res)
1037 {
1038 	struct intr_map_data *data;
1039 	struct intr_irqsrc *isrc;
1040 
1041 	KASSERT(rman_get_start(res) == rman_get_end(res),
1042 	    ("%s: more interrupts in resource", __func__));
1043 
1044 	isrc = intr_ddata_lookup(rman_get_start(res), &data);
1045 	if (isrc == NULL)
1046 		return (EINVAL);
1047 
1048 	return (PIC_RELEASE_INTR(isrc->isrc_dev, isrc, res, data));
1049 }
1050 
1051 int
1052 intr_setup_irq(device_t dev, struct resource *res, driver_filter_t filt,
1053     driver_intr_t hand, void *arg, int flags, void **cookiep)
1054 {
1055 	int error;
1056 	struct intr_map_data *data;
1057 	struct intr_irqsrc *isrc;
1058 	const char *name;
1059 
1060 	KASSERT(rman_get_start(res) == rman_get_end(res),
1061 	    ("%s: more interrupts in resource", __func__));
1062 
1063 	isrc = intr_ddata_lookup(rman_get_start(res), &data);
1064 	if (isrc == NULL)
1065 		return (EINVAL);
1066 
1067 	name = device_get_nameunit(dev);
1068 
1069 #ifdef INTR_SOLO
1070 	/*
1071 	 * Standard handling is done through MI interrupt framework. However,
1072 	 * some interrupts could request solely own special handling. This
1073 	 * non standard handling can be used for interrupt controllers without
1074 	 * handler (filter only), so in case that interrupt controllers are
1075 	 * chained, MI interrupt framework is called only in leaf controller.
1076 	 *
1077 	 * Note that root interrupt controller routine is served as well,
1078 	 * however in intr_irq_handler(), i.e. main system dispatch routine.
1079 	 */
1080 	if (flags & INTR_SOLO && hand != NULL) {
1081 		debugf("irq %u cannot solo on %s\n", irq, name);
1082 		return (EINVAL);
1083 	}
1084 
1085 	if (flags & INTR_SOLO) {
1086 		error = iscr_setup_filter(isrc, name, (intr_irq_filter_t *)filt,
1087 		    arg, cookiep);
1088 		debugf("irq %u setup filter error %d on %s\n", irq, error,
1089 		    name);
1090 	} else
1091 #endif
1092 		{
1093 		error = isrc_add_handler(isrc, name, filt, hand, arg, flags,
1094 		    cookiep);
1095 		debugf("irq %u add handler error %d on %s\n", irq, error, name);
1096 	}
1097 	if (error != 0)
1098 		return (error);
1099 
1100 	mtx_lock(&isrc_table_lock);
1101 	error = PIC_SETUP_INTR(isrc->isrc_dev, isrc, res, data);
1102 	if (error == 0) {
1103 		isrc->isrc_handlers++;
1104 		if (isrc->isrc_handlers == 1)
1105 			PIC_ENABLE_INTR(isrc->isrc_dev, isrc);
1106 	}
1107 	mtx_unlock(&isrc_table_lock);
1108 	if (error != 0)
1109 		intr_event_remove_handler(*cookiep);
1110 	return (error);
1111 }
1112 
1113 int
1114 intr_teardown_irq(device_t dev, struct resource *res, void *cookie)
1115 {
1116 	int error;
1117 	struct intr_map_data *data;
1118 	struct intr_irqsrc *isrc;
1119 
1120 	KASSERT(rman_get_start(res) == rman_get_end(res),
1121 	    ("%s: more interrupts in resource", __func__));
1122 
1123 	isrc = intr_ddata_lookup(rman_get_start(res), &data);
1124 	if (isrc == NULL || isrc->isrc_handlers == 0)
1125 		return (EINVAL);
1126 
1127 #ifdef INTR_SOLO
1128 	if (isrc->isrc_filter != NULL) {
1129 		if (isrc != cookie)
1130 			return (EINVAL);
1131 
1132 		mtx_lock(&isrc_table_lock);
1133 		isrc->isrc_filter = NULL;
1134 		isrc->isrc_arg = NULL;
1135 		isrc->isrc_handlers = 0;
1136 		PIC_DISABLE_INTR(isrc->isrc_dev, isrc);
1137 		PIC_TEARDOWN_INTR(isrc->isrc_dev, isrc, res, data);
1138 		isrc_update_name(isrc, NULL);
1139 		mtx_unlock(&isrc_table_lock);
1140 		return (0);
1141 	}
1142 #endif
1143 	if (isrc != intr_handler_source(cookie))
1144 		return (EINVAL);
1145 
1146 	error = intr_event_remove_handler(cookie);
1147 	if (error == 0) {
1148 		mtx_lock(&isrc_table_lock);
1149 		isrc->isrc_handlers--;
1150 		if (isrc->isrc_handlers == 0)
1151 			PIC_DISABLE_INTR(isrc->isrc_dev, isrc);
1152 		PIC_TEARDOWN_INTR(isrc->isrc_dev, isrc, res, data);
1153 		intrcnt_updatename(isrc);
1154 		mtx_unlock(&isrc_table_lock);
1155 	}
1156 	return (error);
1157 }
1158 
1159 int
1160 intr_describe_irq(device_t dev, struct resource *res, void *cookie,
1161     const char *descr)
1162 {
1163 	int error;
1164 	struct intr_irqsrc *isrc;
1165 
1166 	KASSERT(rman_get_start(res) == rman_get_end(res),
1167 	    ("%s: more interrupts in resource", __func__));
1168 
1169 	isrc = intr_ddata_lookup(rman_get_start(res), NULL);
1170 	if (isrc == NULL || isrc->isrc_handlers == 0)
1171 		return (EINVAL);
1172 #ifdef INTR_SOLO
1173 	if (isrc->isrc_filter != NULL) {
1174 		if (isrc != cookie)
1175 			return (EINVAL);
1176 
1177 		mtx_lock(&isrc_table_lock);
1178 		isrc_update_name(isrc, descr);
1179 		mtx_unlock(&isrc_table_lock);
1180 		return (0);
1181 	}
1182 #endif
1183 	error = intr_event_describe_handler(isrc->isrc_event, cookie, descr);
1184 	if (error == 0) {
1185 		mtx_lock(&isrc_table_lock);
1186 		intrcnt_updatename(isrc);
1187 		mtx_unlock(&isrc_table_lock);
1188 	}
1189 	return (error);
1190 }
1191 
1192 #ifdef SMP
1193 int
1194 intr_bind_irq(device_t dev, struct resource *res, int cpu)
1195 {
1196 	struct intr_irqsrc *isrc;
1197 
1198 	KASSERT(rman_get_start(res) == rman_get_end(res),
1199 	    ("%s: more interrupts in resource", __func__));
1200 
1201 	isrc = intr_ddata_lookup(rman_get_start(res), NULL);
1202 	if (isrc == NULL || isrc->isrc_handlers == 0)
1203 		return (EINVAL);
1204 #ifdef INTR_SOLO
1205 	if (isrc->isrc_filter != NULL)
1206 		return (intr_isrc_assign_cpu(isrc, cpu));
1207 #endif
1208 	return (intr_event_bind(isrc->isrc_event, cpu));
1209 }
1210 
1211 /*
1212  * Return the CPU that the next interrupt source should use.
1213  * For now just returns the next CPU according to round-robin.
1214  */
1215 u_int
1216 intr_irq_next_cpu(u_int last_cpu, cpuset_t *cpumask)
1217 {
1218 
1219 	if (!irq_assign_cpu || mp_ncpus == 1)
1220 		return (PCPU_GET(cpuid));
1221 
1222 	do {
1223 		last_cpu++;
1224 		if (last_cpu > mp_maxid)
1225 			last_cpu = 0;
1226 	} while (!CPU_ISSET(last_cpu, cpumask));
1227 	return (last_cpu);
1228 }
1229 
1230 /*
1231  *  Distribute all the interrupt sources among the available
1232  *  CPUs once the AP's have been launched.
1233  */
1234 static void
1235 intr_irq_shuffle(void *arg __unused)
1236 {
1237 	struct intr_irqsrc *isrc;
1238 	u_int i;
1239 
1240 	if (mp_ncpus == 1)
1241 		return;
1242 
1243 	mtx_lock(&isrc_table_lock);
1244 	irq_assign_cpu = TRUE;
1245 	for (i = 0; i < NIRQ; i++) {
1246 		isrc = irq_sources[i];
1247 		if (isrc == NULL || isrc->isrc_handlers == 0 ||
1248 		    isrc->isrc_flags & (INTR_ISRCF_PPI | INTR_ISRCF_IPI))
1249 			continue;
1250 
1251 		if (isrc->isrc_event != NULL &&
1252 		    isrc->isrc_flags & INTR_ISRCF_BOUND &&
1253 		    isrc->isrc_event->ie_cpu != CPU_FFS(&isrc->isrc_cpu) - 1)
1254 			panic("%s: CPU inconsistency", __func__);
1255 
1256 		if ((isrc->isrc_flags & INTR_ISRCF_BOUND) == 0)
1257 			CPU_ZERO(&isrc->isrc_cpu); /* start again */
1258 
1259 		/*
1260 		 * We are in wicked position here if the following call fails
1261 		 * for bound ISRC. The best thing we can do is to clear
1262 		 * isrc_cpu so inconsistency with ie_cpu will be detectable.
1263 		 */
1264 		if (PIC_BIND_INTR(isrc->isrc_dev, isrc) != 0)
1265 			CPU_ZERO(&isrc->isrc_cpu);
1266 	}
1267 	mtx_unlock(&isrc_table_lock);
1268 }
1269 SYSINIT(intr_irq_shuffle, SI_SUB_SMP, SI_ORDER_SECOND, intr_irq_shuffle, NULL);
1270 
1271 #else
1272 u_int
1273 intr_irq_next_cpu(u_int current_cpu, cpuset_t *cpumask)
1274 {
1275 
1276 	return (PCPU_GET(cpuid));
1277 }
1278 #endif
1279 
1280 /*
1281  *  Register a MSI/MSI-X interrupt controller
1282  */
1283 int
1284 intr_msi_register(device_t dev, intptr_t xref)
1285 {
1286 	struct intr_pic *pic;
1287 
1288 	if (dev == NULL)
1289 		return (EINVAL);
1290 	pic = pic_create(dev, xref);
1291 	if (pic == NULL)
1292 		return (ENOMEM);
1293 
1294 	pic->pic_flags |= FLAG_MSI;
1295 
1296 	debugf("PIC %p registered for %s <dev %p, xref %jx>\n", pic,
1297 	    device_get_nameunit(dev), dev, (uintmax_t)xref);
1298 	return (0);
1299 }
1300 
1301 int
1302 intr_alloc_msi(device_t pci, device_t child, intptr_t xref, int count,
1303     int maxcount, int *irqs)
1304 {
1305 	struct intr_irqsrc **isrc;
1306 	struct intr_pic *pic;
1307 	device_t pdev;
1308 	int err, i;
1309 
1310 	pic = pic_lookup(NULL, xref);
1311 	if (pic == NULL)
1312 		return (ESRCH);
1313 
1314 	KASSERT((pic->pic_flags & FLAG_MSI) != 0,
1315 	    ("%s: Found a non-MSI controller: %s", __func__,
1316 	     device_get_name(pic->pic_dev)));
1317 
1318 	isrc = malloc(sizeof(*isrc) * count, M_INTRNG, M_WAITOK);
1319 	err = MSI_ALLOC_MSI(pic->pic_dev, child, count, maxcount, &pdev, isrc);
1320 	if (err == 0) {
1321 		for (i = 0; i < count; i++) {
1322 			irqs[i] = isrc[i]->isrc_irq;
1323 		}
1324 	}
1325 
1326 	free(isrc, M_INTRNG);
1327 
1328 	return (err);
1329 }
1330 
1331 int
1332 intr_release_msi(device_t pci, device_t child, intptr_t xref, int count,
1333     int *irqs)
1334 {
1335 	struct intr_irqsrc **isrc;
1336 	struct intr_pic *pic;
1337 	int i, err;
1338 
1339 	pic = pic_lookup(NULL, xref);
1340 	if (pic == NULL)
1341 		return (ESRCH);
1342 
1343 	KASSERT((pic->pic_flags & FLAG_MSI) != 0,
1344 	    ("%s: Found a non-MSI controller: %s", __func__,
1345 	     device_get_name(pic->pic_dev)));
1346 
1347 	isrc = malloc(sizeof(*isrc) * count, M_INTRNG, M_WAITOK);
1348 
1349 	for (i = 0; i < count; i++) {
1350 		isrc[i] = isrc_lookup(irqs[i]);
1351 		if (isrc == NULL) {
1352 			free(isrc, M_INTRNG);
1353 			return (EINVAL);
1354 		}
1355 	}
1356 
1357 	err = MSI_RELEASE_MSI(pic->pic_dev, child, count, isrc);
1358 	free(isrc, M_INTRNG);
1359 	return (err);
1360 }
1361 
1362 int
1363 intr_alloc_msix(device_t pci, device_t child, intptr_t xref, int *irq)
1364 {
1365 	struct intr_irqsrc *isrc;
1366 	struct intr_pic *pic;
1367 	device_t pdev;
1368 	int err;
1369 
1370 	pic = pic_lookup(NULL, xref);
1371 	if (pic == NULL)
1372 		return (ESRCH);
1373 
1374 	KASSERT((pic->pic_flags & FLAG_MSI) != 0,
1375 	    ("%s: Found a non-MSI controller: %s", __func__,
1376 	     device_get_name(pic->pic_dev)));
1377 
1378 	err = MSI_ALLOC_MSIX(pic->pic_dev, child, &pdev, &isrc);
1379 	if (err != 0)
1380 		return (err);
1381 
1382 	*irq = isrc->isrc_irq;
1383 	return (0);
1384 }
1385 
1386 int
1387 intr_release_msix(device_t pci, device_t child, intptr_t xref, int irq)
1388 {
1389 	struct intr_irqsrc *isrc;
1390 	struct intr_pic *pic;
1391 	int err;
1392 
1393 	pic = pic_lookup(NULL, xref);
1394 	if (pic == NULL)
1395 		return (ESRCH);
1396 
1397 	KASSERT((pic->pic_flags & FLAG_MSI) != 0,
1398 	    ("%s: Found a non-MSI controller: %s", __func__,
1399 	     device_get_name(pic->pic_dev)));
1400 
1401 	isrc = isrc_lookup(irq);
1402 	if (isrc == NULL)
1403 		return (EINVAL);
1404 
1405 	err = MSI_RELEASE_MSIX(pic->pic_dev, child, isrc);
1406 	return (err);
1407 }
1408 
1409 int
1410 intr_map_msi(device_t pci, device_t child, intptr_t xref, int irq,
1411     uint64_t *addr, uint32_t *data)
1412 {
1413 	struct intr_irqsrc *isrc;
1414 	struct intr_pic *pic;
1415 	int err;
1416 
1417 	pic = pic_lookup(NULL, xref);
1418 	if (pic == NULL)
1419 		return (ESRCH);
1420 
1421 	KASSERT((pic->pic_flags & FLAG_MSI) != 0,
1422 	    ("%s: Found a non-MSI controller: %s", __func__,
1423 	     device_get_name(pic->pic_dev)));
1424 
1425 	isrc = isrc_lookup(irq);
1426 	if (isrc == NULL)
1427 		return (EINVAL);
1428 
1429 	err = MSI_MAP_MSI(pic->pic_dev, child, isrc, addr, data);
1430 	return (err);
1431 }
1432 
1433 
1434 void dosoftints(void);
1435 void
1436 dosoftints(void)
1437 {
1438 }
1439 
1440 #ifdef SMP
1441 /*
1442  *  Init interrupt controller on another CPU.
1443  */
1444 void
1445 intr_pic_init_secondary(void)
1446 {
1447 
1448 	/*
1449 	 * QQQ: Only root PIC is aware of other CPUs ???
1450 	 */
1451 	KASSERT(intr_irq_root_dev != NULL, ("%s: no root attached", __func__));
1452 
1453 	//mtx_lock(&isrc_table_lock);
1454 	PIC_INIT_SECONDARY(intr_irq_root_dev);
1455 	//mtx_unlock(&isrc_table_lock);
1456 }
1457 #endif
1458 
1459 #ifdef DDB
1460 DB_SHOW_COMMAND(irqs, db_show_irqs)
1461 {
1462 	u_int i, irqsum;
1463 	u_long num;
1464 	struct intr_irqsrc *isrc;
1465 
1466 	for (irqsum = 0, i = 0; i < NIRQ; i++) {
1467 		isrc = irq_sources[i];
1468 		if (isrc == NULL)
1469 			continue;
1470 
1471 		num = isrc->isrc_count != NULL ? isrc->isrc_count[0] : 0;
1472 		db_printf("irq%-3u <%s>: cpu %02lx%s cnt %lu\n", i,
1473 		    isrc->isrc_name, isrc->isrc_cpu.__bits[0],
1474 		    isrc->isrc_flags & INTR_ISRCF_BOUND ? " (bound)" : "", num);
1475 		irqsum += num;
1476 	}
1477 	db_printf("irq total %u\n", irqsum);
1478 }
1479 #endif
1480