xref: /freebsd/sys/kern/subr_intr.c (revision 20f8619da05e2775ef7b381c5df080d621fa8332)
1 /*-
2  * Copyright (c) 2015-2016 Svatopluk Kraus
3  * Copyright (c) 2015-2016 Michal Meloun
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30 
31 /*
32  *	New-style Interrupt Framework
33  *
34  *  TODO: - to support IPI (PPI) enabling on other CPUs if already started
35  *        - to complete things for removable PICs
36  */
37 
38 #include "opt_acpi.h"
39 #include "opt_ddb.h"
40 #include "opt_platform.h"
41 
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/kernel.h>
45 #include <sys/syslog.h>
46 #include <sys/malloc.h>
47 #include <sys/proc.h>
48 #include <sys/queue.h>
49 #include <sys/bus.h>
50 #include <sys/interrupt.h>
51 #include <sys/conf.h>
52 #include <sys/cpuset.h>
53 #include <sys/rman.h>
54 #include <sys/sched.h>
55 #include <sys/smp.h>
56 #include <machine/atomic.h>
57 #include <machine/intr.h>
58 #include <machine/cpu.h>
59 #include <machine/smp.h>
60 #include <machine/stdarg.h>
61 
62 #ifdef FDT
63 #include <dev/ofw/openfirm.h>
64 #include <dev/ofw/ofw_bus.h>
65 #include <dev/ofw/ofw_bus_subr.h>
66 #endif
67 
68 #ifdef DDB
69 #include <ddb/ddb.h>
70 #endif
71 
72 #include "pic_if.h"
73 
74 #define	INTRNAME_LEN	(2*MAXCOMLEN + 1)
75 
76 #ifdef DEBUG
77 #define debugf(fmt, args...) do { printf("%s(): ", __func__);	\
78     printf(fmt,##args); } while (0)
79 #else
80 #define debugf(fmt, args...)
81 #endif
82 
83 MALLOC_DECLARE(M_INTRNG);
84 MALLOC_DEFINE(M_INTRNG, "intr", "intr interrupt handling");
85 
86 /* Main interrupt handler called from assembler -> 'hidden' for C code. */
87 void intr_irq_handler(struct trapframe *tf);
88 
89 /* Root interrupt controller stuff. */
90 device_t intr_irq_root_dev;
91 static intr_irq_filter_t *irq_root_filter;
92 static void *irq_root_arg;
93 static u_int irq_root_ipicount;
94 
95 /* Interrupt controller definition. */
96 struct intr_pic {
97 	SLIST_ENTRY(intr_pic)	pic_next;
98 	intptr_t		pic_xref;	/* hardware identification */
99 	device_t		pic_dev;
100 };
101 
102 static struct mtx pic_list_lock;
103 static SLIST_HEAD(, intr_pic) pic_list;
104 
105 static struct intr_pic *pic_lookup(device_t dev, intptr_t xref);
106 
107 /* Interrupt source definition. */
108 static struct mtx isrc_table_lock;
109 static struct intr_irqsrc *irq_sources[NIRQ];
110 u_int irq_next_free;
111 
112 /*
113  *  XXX - All stuff around struct intr_dev_data is considered as temporary
114  *  until better place for storing struct intr_map_data will be find.
115  *
116  *  For now, there are two global interrupt numbers spaces:
117  *  <0, NIRQ)                      ... interrupts without config data
118  *                                     managed in irq_sources[]
119  *  IRQ_DDATA_BASE + <0, 2 * NIRQ) ... interrupts with config data
120  *                                     managed in intr_ddata_tab[]
121  *
122  *  Read intr_ddata_lookup() to see how these spaces are worked with.
123  *  Note that each interrupt number from second space duplicates some number
124  *  from first space at this moment. An interrupt number from first space can
125  *  be duplicated even multiple times in second space.
126  */
127 struct intr_dev_data {
128 	device_t		idd_dev;
129 	intptr_t		idd_xref;
130 	u_int			idd_irq;
131 	struct intr_map_data *	idd_data;
132 	struct intr_irqsrc *	idd_isrc;
133 };
134 
135 static struct intr_dev_data *intr_ddata_tab[2 * NIRQ];
136 static u_int intr_ddata_first_unused;
137 
138 #define IRQ_DDATA_BASE	10000
139 CTASSERT(IRQ_DDATA_BASE > nitems(irq_sources));
140 
141 #ifdef SMP
142 static boolean_t irq_assign_cpu = FALSE;
143 #endif
144 
145 /*
146  * - 2 counters for each I/O interrupt.
147  * - MAXCPU counters for each IPI counters for SMP.
148  */
149 #ifdef SMP
150 #define INTRCNT_COUNT   (NIRQ * 2 + INTR_IPI_COUNT * MAXCPU)
151 #else
152 #define INTRCNT_COUNT   (NIRQ * 2)
153 #endif
154 
155 /* Data for MI statistics reporting. */
156 u_long intrcnt[INTRCNT_COUNT];
157 char intrnames[INTRCNT_COUNT * INTRNAME_LEN];
158 size_t sintrcnt = sizeof(intrcnt);
159 size_t sintrnames = sizeof(intrnames);
160 static u_int intrcnt_index;
161 
162 /*
163  *  Interrupt framework initialization routine.
164  */
165 static void
166 intr_irq_init(void *dummy __unused)
167 {
168 
169 	SLIST_INIT(&pic_list);
170 	mtx_init(&pic_list_lock, "intr pic list", NULL, MTX_DEF);
171 	mtx_init(&isrc_table_lock, "intr isrc table", NULL, MTX_DEF);
172 }
173 SYSINIT(intr_irq_init, SI_SUB_INTR, SI_ORDER_FIRST, intr_irq_init, NULL);
174 
175 static void
176 intrcnt_setname(const char *name, int index)
177 {
178 
179 	snprintf(intrnames + INTRNAME_LEN * index, INTRNAME_LEN, "%-*s",
180 	    INTRNAME_LEN - 1, name);
181 }
182 
183 /*
184  *  Update name for interrupt source with interrupt event.
185  */
186 static void
187 intrcnt_updatename(struct intr_irqsrc *isrc)
188 {
189 
190 	/* QQQ: What about stray counter name? */
191 	mtx_assert(&isrc_table_lock, MA_OWNED);
192 	intrcnt_setname(isrc->isrc_event->ie_fullname, isrc->isrc_index);
193 }
194 
195 /*
196  *  Virtualization for interrupt source interrupt counter increment.
197  */
198 static inline void
199 isrc_increment_count(struct intr_irqsrc *isrc)
200 {
201 
202 	if (isrc->isrc_flags & INTR_ISRCF_PPI)
203 		atomic_add_long(&isrc->isrc_count[0], 1);
204 	else
205 		isrc->isrc_count[0]++;
206 }
207 
208 /*
209  *  Virtualization for interrupt source interrupt stray counter increment.
210  */
211 static inline void
212 isrc_increment_straycount(struct intr_irqsrc *isrc)
213 {
214 
215 	isrc->isrc_count[1]++;
216 }
217 
218 /*
219  *  Virtualization for interrupt source interrupt name update.
220  */
221 static void
222 isrc_update_name(struct intr_irqsrc *isrc, const char *name)
223 {
224 	char str[INTRNAME_LEN];
225 
226 	mtx_assert(&isrc_table_lock, MA_OWNED);
227 
228 	if (name != NULL) {
229 		snprintf(str, INTRNAME_LEN, "%s: %s", isrc->isrc_name, name);
230 		intrcnt_setname(str, isrc->isrc_index);
231 		snprintf(str, INTRNAME_LEN, "stray %s: %s", isrc->isrc_name,
232 		    name);
233 		intrcnt_setname(str, isrc->isrc_index + 1);
234 	} else {
235 		snprintf(str, INTRNAME_LEN, "%s:", isrc->isrc_name);
236 		intrcnt_setname(str, isrc->isrc_index);
237 		snprintf(str, INTRNAME_LEN, "stray %s:", isrc->isrc_name);
238 		intrcnt_setname(str, isrc->isrc_index + 1);
239 	}
240 }
241 
242 /*
243  *  Virtualization for interrupt source interrupt counters setup.
244  */
245 static void
246 isrc_setup_counters(struct intr_irqsrc *isrc)
247 {
248 	u_int index;
249 
250 	/*
251 	 *  XXX - it does not work well with removable controllers and
252 	 *        interrupt sources !!!
253 	 */
254 	index = atomic_fetchadd_int(&intrcnt_index, 2);
255 	isrc->isrc_index = index;
256 	isrc->isrc_count = &intrcnt[index];
257 	isrc_update_name(isrc, NULL);
258 }
259 
260 /*
261  *  Virtualization for interrupt source interrupt counters release.
262  */
263 static void
264 isrc_release_counters(struct intr_irqsrc *isrc)
265 {
266 
267 	panic("%s: not implemented", __func__);
268 }
269 
270 #ifdef SMP
271 /*
272  *  Virtualization for interrupt source IPI counters setup.
273  */
274 u_long *
275 intr_ipi_setup_counters(const char *name)
276 {
277 	u_int index, i;
278 	char str[INTRNAME_LEN];
279 
280 	index = atomic_fetchadd_int(&intrcnt_index, MAXCPU);
281 	for (i = 0; i < MAXCPU; i++) {
282 		snprintf(str, INTRNAME_LEN, "cpu%d:%s", i, name);
283 		intrcnt_setname(str, index + i);
284 	}
285 	return (&intrcnt[index]);
286 }
287 #endif
288 
289 /*
290  *  Main interrupt dispatch handler. It's called straight
291  *  from the assembler, where CPU interrupt is served.
292  */
293 void
294 intr_irq_handler(struct trapframe *tf)
295 {
296 	struct trapframe * oldframe;
297 	struct thread * td;
298 
299 	KASSERT(irq_root_filter != NULL, ("%s: no filter", __func__));
300 
301 	PCPU_INC(cnt.v_intr);
302 	critical_enter();
303 	td = curthread;
304 	oldframe = td->td_intr_frame;
305 	td->td_intr_frame = tf;
306 	irq_root_filter(irq_root_arg);
307 	td->td_intr_frame = oldframe;
308 	critical_exit();
309 }
310 
311 /*
312  *  interrupt controller dispatch function for interrupts. It should
313  *  be called straight from the interrupt controller, when associated interrupt
314  *  source is learned.
315  */
316 int
317 intr_isrc_dispatch(struct intr_irqsrc *isrc, struct trapframe *tf)
318 {
319 
320 	KASSERT(isrc != NULL, ("%s: no source", __func__));
321 
322 	isrc_increment_count(isrc);
323 
324 #ifdef INTR_SOLO
325 	if (isrc->isrc_filter != NULL) {
326 		int error;
327 		error = isrc->isrc_filter(isrc->isrc_arg, tf);
328 		PIC_POST_FILTER(isrc->isrc_dev, isrc);
329 		if (error == FILTER_HANDLED)
330 			return (0);
331 	} else
332 #endif
333 	if (isrc->isrc_event != NULL) {
334 		if (intr_event_handle(isrc->isrc_event, tf) == 0)
335 			return (0);
336 	}
337 
338 	isrc_increment_straycount(isrc);
339 	return (EINVAL);
340 }
341 
342 /*
343  *  Alloc unique interrupt number (resource handle) for interrupt source.
344  *
345  *  There could be various strategies how to allocate free interrupt number
346  *  (resource handle) for new interrupt source.
347  *
348  *  1. Handles are always allocated forward, so handles are not recycled
349  *     immediately. However, if only one free handle left which is reused
350  *     constantly...
351  */
352 static inline int
353 isrc_alloc_irq(struct intr_irqsrc *isrc)
354 {
355 	u_int maxirqs, irq;
356 
357 	mtx_assert(&isrc_table_lock, MA_OWNED);
358 
359 	maxirqs = nitems(irq_sources);
360 	if (irq_next_free >= maxirqs)
361 		return (ENOSPC);
362 
363 	for (irq = irq_next_free; irq < maxirqs; irq++) {
364 		if (irq_sources[irq] == NULL)
365 			goto found;
366 	}
367 	for (irq = 0; irq < irq_next_free; irq++) {
368 		if (irq_sources[irq] == NULL)
369 			goto found;
370 	}
371 
372 	irq_next_free = maxirqs;
373 	return (ENOSPC);
374 
375 found:
376 	isrc->isrc_irq = irq;
377 	irq_sources[irq] = isrc;
378 
379 	irq_next_free = irq + 1;
380 	if (irq_next_free >= maxirqs)
381 		irq_next_free = 0;
382 	return (0);
383 }
384 
385 /*
386  *  Free unique interrupt number (resource handle) from interrupt source.
387  */
388 static inline int
389 isrc_free_irq(struct intr_irqsrc *isrc)
390 {
391 
392 	mtx_assert(&isrc_table_lock, MA_OWNED);
393 
394 	if (isrc->isrc_irq >= nitems(irq_sources))
395 		return (EINVAL);
396 	if (irq_sources[isrc->isrc_irq] != isrc)
397 		return (EINVAL);
398 
399 	irq_sources[isrc->isrc_irq] = NULL;
400 	isrc->isrc_irq = INTR_IRQ_INVALID;	/* just to be safe */
401 	return (0);
402 }
403 
404 /*
405  *  Lookup interrupt source by interrupt number (resource handle).
406  */
407 static inline struct intr_irqsrc *
408 isrc_lookup(u_int irq)
409 {
410 
411 	if (irq < nitems(irq_sources))
412 		return (irq_sources[irq]);
413 	return (NULL);
414 }
415 
416 /*
417  *  Initialize interrupt source and register it into global interrupt table.
418  */
419 int
420 intr_isrc_register(struct intr_irqsrc *isrc, device_t dev, u_int flags,
421     const char *fmt, ...)
422 {
423 	int error;
424 	va_list ap;
425 
426 	bzero(isrc, sizeof(struct intr_irqsrc));
427 	isrc->isrc_dev = dev;
428 	isrc->isrc_irq = INTR_IRQ_INVALID;	/* just to be safe */
429 	isrc->isrc_flags = flags;
430 
431 	va_start(ap, fmt);
432 	vsnprintf(isrc->isrc_name, INTR_ISRC_NAMELEN, fmt, ap);
433 	va_end(ap);
434 
435 	mtx_lock(&isrc_table_lock);
436 	error = isrc_alloc_irq(isrc);
437 	if (error != 0) {
438 		mtx_unlock(&isrc_table_lock);
439 		return (error);
440 	}
441 	/*
442 	 * Setup interrupt counters, but not for IPI sources. Those are setup
443 	 * later and only for used ones (up to INTR_IPI_COUNT) to not exhaust
444 	 * our counter pool.
445 	 */
446 	if ((isrc->isrc_flags & INTR_ISRCF_IPI) == 0)
447 		isrc_setup_counters(isrc);
448 	mtx_unlock(&isrc_table_lock);
449 	return (0);
450 }
451 
452 /*
453  *  Deregister interrupt source from global interrupt table.
454  */
455 int
456 intr_isrc_deregister(struct intr_irqsrc *isrc)
457 {
458 	int error;
459 
460 	mtx_lock(&isrc_table_lock);
461 	if ((isrc->isrc_flags & INTR_ISRCF_IPI) == 0)
462 		isrc_release_counters(isrc);
463 	error = isrc_free_irq(isrc);
464 	mtx_unlock(&isrc_table_lock);
465 	return (error);
466 }
467 
468 #ifdef SMP
469 /*
470  *  A support function for a PIC to decide if provided ISRC should be inited
471  *  on given cpu. The logic of INTR_ISRCF_BOUND flag and isrc_cpu member of
472  *  struct intr_irqsrc is the following:
473  *
474  *     If INTR_ISRCF_BOUND is set, the ISRC should be inited only on cpus
475  *     set in isrc_cpu. If not, the ISRC should be inited on every cpu and
476  *     isrc_cpu is kept consistent with it. Thus isrc_cpu is always correct.
477  */
478 bool
479 intr_isrc_init_on_cpu(struct intr_irqsrc *isrc, u_int cpu)
480 {
481 
482 	if (isrc->isrc_handlers == 0)
483 		return (false);
484 	if ((isrc->isrc_flags & (INTR_ISRCF_PPI | INTR_ISRCF_IPI)) == 0)
485 		return (false);
486 	if (isrc->isrc_flags & INTR_ISRCF_BOUND)
487 		return (CPU_ISSET(cpu, &isrc->isrc_cpu));
488 
489 	CPU_SET(cpu, &isrc->isrc_cpu);
490 	return (true);
491 }
492 #endif
493 
494 static struct intr_dev_data *
495 intr_ddata_alloc(u_int extsize)
496 {
497 	struct intr_dev_data *ddata;
498 	size_t size;
499 
500 	size = sizeof(*ddata);
501 	ddata = malloc(size + extsize, M_INTRNG, M_WAITOK | M_ZERO);
502 
503 	mtx_lock(&isrc_table_lock);
504 	if (intr_ddata_first_unused >= nitems(intr_ddata_tab)) {
505 		mtx_unlock(&isrc_table_lock);
506 		free(ddata, M_INTRNG);
507 		return (NULL);
508 	}
509 	intr_ddata_tab[intr_ddata_first_unused] = ddata;
510 	ddata->idd_irq = IRQ_DDATA_BASE + intr_ddata_first_unused++;
511 	mtx_unlock(&isrc_table_lock);
512 
513 	ddata->idd_data = (struct intr_map_data *)((uintptr_t)ddata + size);
514 	ddata->idd_data->size = extsize;
515 	return (ddata);
516 }
517 
518 static struct intr_irqsrc *
519 intr_ddata_lookup(u_int irq, struct intr_map_data **datap)
520 {
521 	int error;
522 	struct intr_irqsrc *isrc;
523 	struct intr_dev_data *ddata;
524 
525 	isrc = isrc_lookup(irq);
526 	if (isrc != NULL) {
527 		if (datap != NULL)
528 			*datap = NULL;
529 		return (isrc);
530 	}
531 
532 	if (irq < IRQ_DDATA_BASE)
533 		return (NULL);
534 
535 	irq -= IRQ_DDATA_BASE;
536 	if (irq >= nitems(intr_ddata_tab))
537 		return (NULL);
538 
539 	ddata = intr_ddata_tab[irq];
540 	if (ddata->idd_isrc == NULL) {
541 		error = intr_map_irq(ddata->idd_dev, ddata->idd_xref,
542 		    ddata->idd_data, &irq);
543 		if (error != 0)
544 			return (NULL);
545 		ddata->idd_isrc = isrc_lookup(irq);
546 	}
547 	if (datap != NULL)
548 		*datap = ddata->idd_data;
549 	return (ddata->idd_isrc);
550 }
551 
552 #ifdef DEV_ACPI
553 /*
554  *  Map interrupt source according to ACPI info into framework. If such mapping
555  *  does not exist, create it. Return unique interrupt number (resource handle)
556  *  associated with mapped interrupt source.
557  */
558 u_int
559 intr_acpi_map_irq(device_t dev, u_int irq, enum intr_polarity pol,
560     enum intr_trigger trig)
561 {
562 	struct intr_map_data_acpi *daa;
563 	struct intr_dev_data *ddata;
564 
565 	ddata = intr_ddata_alloc(sizeof(struct intr_map_data_acpi));
566 	if (ddata == NULL)
567 		return (INTR_IRQ_INVALID);	/* no space left */
568 
569 	ddata->idd_dev = dev;
570 	ddata->idd_data->type = INTR_MAP_DATA_ACPI;
571 
572 	daa = (struct intr_map_data_acpi *)ddata->idd_data;
573 	daa->irq = irq;
574 	daa->pol = pol;
575 	daa->trig = trig;
576 
577 	return (ddata->idd_irq);
578 }
579 #endif
580 #ifdef FDT
581 /*
582  *  Map interrupt source according to FDT data into framework. If such mapping
583  *  does not exist, create it. Return unique interrupt number (resource handle)
584  *  associated with mapped interrupt source.
585  */
586 u_int
587 intr_fdt_map_irq(phandle_t node, pcell_t *cells, u_int ncells)
588 {
589 	size_t cellsize;
590 	struct intr_dev_data *ddata;
591 	struct intr_map_data_fdt *daf;
592 
593 	cellsize = ncells * sizeof(*cells);
594 	ddata = intr_ddata_alloc(sizeof(struct intr_map_data_fdt) + cellsize);
595 	if (ddata == NULL)
596 		return (INTR_IRQ_INVALID);	/* no space left */
597 
598 	ddata->idd_xref = (intptr_t)node;
599 	ddata->idd_data->type = INTR_MAP_DATA_FDT;
600 
601 	daf = (struct intr_map_data_fdt *)ddata->idd_data;
602 	daf->ncells = ncells;
603 	memcpy(daf->cells, cells, cellsize);
604 	return (ddata->idd_irq);
605 }
606 #endif
607 
608 /*
609  *  Store GPIO interrupt decription in framework and return unique interrupt
610  *  number (resource handle) associated with it.
611  */
612 u_int
613 intr_gpio_map_irq(device_t dev, u_int pin_num, u_int pin_flags, u_int intr_mode)
614 {
615 	struct intr_dev_data *ddata;
616 	struct intr_map_data_gpio *dag;
617 
618 	ddata = intr_ddata_alloc(sizeof(struct intr_map_data_gpio));
619 	if (ddata == NULL)
620 		return (INTR_IRQ_INVALID);	/* no space left */
621 
622 	ddata->idd_dev = dev;
623 	ddata->idd_data->type = INTR_MAP_DATA_GPIO;
624 
625 	dag = (struct intr_map_data_gpio *)ddata->idd_data;
626 	dag->gpio_pin_num = pin_num;
627 	dag->gpio_pin_flags = pin_flags;
628 	dag->gpio_intr_mode = intr_mode;
629 	return (ddata->idd_irq);
630 }
631 
632 #ifdef INTR_SOLO
633 /*
634  *  Setup filter into interrupt source.
635  */
636 static int
637 iscr_setup_filter(struct intr_irqsrc *isrc, const char *name,
638     intr_irq_filter_t *filter, void *arg, void **cookiep)
639 {
640 
641 	if (filter == NULL)
642 		return (EINVAL);
643 
644 	mtx_lock(&isrc_table_lock);
645 	/*
646 	 * Make sure that we do not mix the two ways
647 	 * how we handle interrupt sources.
648 	 */
649 	if (isrc->isrc_filter != NULL || isrc->isrc_event != NULL) {
650 		mtx_unlock(&isrc_table_lock);
651 		return (EBUSY);
652 	}
653 	isrc->isrc_filter = filter;
654 	isrc->isrc_arg = arg;
655 	isrc_update_name(isrc, name);
656 	mtx_unlock(&isrc_table_lock);
657 
658 	*cookiep = isrc;
659 	return (0);
660 }
661 #endif
662 
663 /*
664  *  Interrupt source pre_ithread method for MI interrupt framework.
665  */
666 static void
667 intr_isrc_pre_ithread(void *arg)
668 {
669 	struct intr_irqsrc *isrc = arg;
670 
671 	PIC_PRE_ITHREAD(isrc->isrc_dev, isrc);
672 }
673 
674 /*
675  *  Interrupt source post_ithread method for MI interrupt framework.
676  */
677 static void
678 intr_isrc_post_ithread(void *arg)
679 {
680 	struct intr_irqsrc *isrc = arg;
681 
682 	PIC_POST_ITHREAD(isrc->isrc_dev, isrc);
683 }
684 
685 /*
686  *  Interrupt source post_filter method for MI interrupt framework.
687  */
688 static void
689 intr_isrc_post_filter(void *arg)
690 {
691 	struct intr_irqsrc *isrc = arg;
692 
693 	PIC_POST_FILTER(isrc->isrc_dev, isrc);
694 }
695 
696 /*
697  *  Interrupt source assign_cpu method for MI interrupt framework.
698  */
699 static int
700 intr_isrc_assign_cpu(void *arg, int cpu)
701 {
702 #ifdef SMP
703 	struct intr_irqsrc *isrc = arg;
704 	int error;
705 
706 	if (isrc->isrc_dev != intr_irq_root_dev)
707 		return (EINVAL);
708 
709 	mtx_lock(&isrc_table_lock);
710 	if (cpu == NOCPU) {
711 		CPU_ZERO(&isrc->isrc_cpu);
712 		isrc->isrc_flags &= ~INTR_ISRCF_BOUND;
713 	} else {
714 		CPU_SETOF(cpu, &isrc->isrc_cpu);
715 		isrc->isrc_flags |= INTR_ISRCF_BOUND;
716 	}
717 
718 	/*
719 	 * In NOCPU case, it's up to PIC to either leave ISRC on same CPU or
720 	 * re-balance it to another CPU or enable it on more CPUs. However,
721 	 * PIC is expected to change isrc_cpu appropriately to keep us well
722 	 * informed if the call is successful.
723 	 */
724 	if (irq_assign_cpu) {
725 		error = PIC_BIND_INTR(isrc->isrc_dev, isrc);
726 		if (error) {
727 			CPU_ZERO(&isrc->isrc_cpu);
728 			mtx_unlock(&isrc_table_lock);
729 			return (error);
730 		}
731 	}
732 	mtx_unlock(&isrc_table_lock);
733 	return (0);
734 #else
735 	return (EOPNOTSUPP);
736 #endif
737 }
738 
739 /*
740  *  Create interrupt event for interrupt source.
741  */
742 static int
743 isrc_event_create(struct intr_irqsrc *isrc)
744 {
745 	struct intr_event *ie;
746 	int error;
747 
748 	error = intr_event_create(&ie, isrc, 0, isrc->isrc_irq,
749 	    intr_isrc_pre_ithread, intr_isrc_post_ithread, intr_isrc_post_filter,
750 	    intr_isrc_assign_cpu, "%s:", isrc->isrc_name);
751 	if (error)
752 		return (error);
753 
754 	mtx_lock(&isrc_table_lock);
755 	/*
756 	 * Make sure that we do not mix the two ways
757 	 * how we handle interrupt sources. Let contested event wins.
758 	 */
759 #ifdef INTR_SOLO
760 	if (isrc->isrc_filter != NULL || isrc->isrc_event != NULL) {
761 #else
762 	if (isrc->isrc_event != NULL) {
763 #endif
764 		mtx_unlock(&isrc_table_lock);
765 		intr_event_destroy(ie);
766 		return (isrc->isrc_event != NULL ? EBUSY : 0);
767 	}
768 	isrc->isrc_event = ie;
769 	mtx_unlock(&isrc_table_lock);
770 
771 	return (0);
772 }
773 #ifdef notyet
774 /*
775  *  Destroy interrupt event for interrupt source.
776  */
777 static void
778 isrc_event_destroy(struct intr_irqsrc *isrc)
779 {
780 	struct intr_event *ie;
781 
782 	mtx_lock(&isrc_table_lock);
783 	ie = isrc->isrc_event;
784 	isrc->isrc_event = NULL;
785 	mtx_unlock(&isrc_table_lock);
786 
787 	if (ie != NULL)
788 		intr_event_destroy(ie);
789 }
790 #endif
791 /*
792  *  Add handler to interrupt source.
793  */
794 static int
795 isrc_add_handler(struct intr_irqsrc *isrc, const char *name,
796     driver_filter_t filter, driver_intr_t handler, void *arg,
797     enum intr_type flags, void **cookiep)
798 {
799 	int error;
800 
801 	if (isrc->isrc_event == NULL) {
802 		error = isrc_event_create(isrc);
803 		if (error)
804 			return (error);
805 	}
806 
807 	error = intr_event_add_handler(isrc->isrc_event, name, filter, handler,
808 	    arg, intr_priority(flags), flags, cookiep);
809 	if (error == 0) {
810 		mtx_lock(&isrc_table_lock);
811 		intrcnt_updatename(isrc);
812 		mtx_unlock(&isrc_table_lock);
813 	}
814 
815 	return (error);
816 }
817 
818 /*
819  *  Lookup interrupt controller locked.
820  */
821 static inline struct intr_pic *
822 pic_lookup_locked(device_t dev, intptr_t xref)
823 {
824 	struct intr_pic *pic;
825 
826 	mtx_assert(&pic_list_lock, MA_OWNED);
827 
828 	if (dev == NULL && xref == 0)
829 		return (NULL);
830 
831 	/* Note that pic->pic_dev is never NULL on registered PIC. */
832 	SLIST_FOREACH(pic, &pic_list, pic_next) {
833 		if (dev == NULL) {
834 			if (xref == pic->pic_xref)
835 				return (pic);
836 		} else if (xref == 0 || pic->pic_xref == 0) {
837 			if (dev == pic->pic_dev)
838 				return (pic);
839 		} else if (xref == pic->pic_xref && dev == pic->pic_dev)
840 				return (pic);
841 	}
842 	return (NULL);
843 }
844 
845 /*
846  *  Lookup interrupt controller.
847  */
848 static struct intr_pic *
849 pic_lookup(device_t dev, intptr_t xref)
850 {
851 	struct intr_pic *pic;
852 
853 	mtx_lock(&pic_list_lock);
854 	pic = pic_lookup_locked(dev, xref);
855 	mtx_unlock(&pic_list_lock);
856 	return (pic);
857 }
858 
859 /*
860  *  Create interrupt controller.
861  */
862 static struct intr_pic *
863 pic_create(device_t dev, intptr_t xref)
864 {
865 	struct intr_pic *pic;
866 
867 	mtx_lock(&pic_list_lock);
868 	pic = pic_lookup_locked(dev, xref);
869 	if (pic != NULL) {
870 		mtx_unlock(&pic_list_lock);
871 		return (pic);
872 	}
873 	pic = malloc(sizeof(*pic), M_INTRNG, M_NOWAIT | M_ZERO);
874 	if (pic == NULL) {
875 		mtx_unlock(&pic_list_lock);
876 		return (NULL);
877 	}
878 	pic->pic_xref = xref;
879 	pic->pic_dev = dev;
880 	SLIST_INSERT_HEAD(&pic_list, pic, pic_next);
881 	mtx_unlock(&pic_list_lock);
882 
883 	return (pic);
884 }
885 #ifdef notyet
886 /*
887  *  Destroy interrupt controller.
888  */
889 static void
890 pic_destroy(device_t dev, intptr_t xref)
891 {
892 	struct intr_pic *pic;
893 
894 	mtx_lock(&pic_list_lock);
895 	pic = pic_lookup_locked(dev, xref);
896 	if (pic == NULL) {
897 		mtx_unlock(&pic_list_lock);
898 		return;
899 	}
900 	SLIST_REMOVE(&pic_list, pic, intr_pic, pic_next);
901 	mtx_unlock(&pic_list_lock);
902 
903 	free(pic, M_INTRNG);
904 }
905 #endif
906 /*
907  *  Register interrupt controller.
908  */
909 int
910 intr_pic_register(device_t dev, intptr_t xref)
911 {
912 	struct intr_pic *pic;
913 
914 	if (dev == NULL)
915 		return (EINVAL);
916 	pic = pic_create(dev, xref);
917 	if (pic == NULL)
918 		return (ENOMEM);
919 
920 	debugf("PIC %p registered for %s <dev %p, xref %x>\n", pic,
921 	    device_get_nameunit(dev), dev, xref);
922 	return (0);
923 }
924 
925 /*
926  *  Unregister interrupt controller.
927  */
928 int
929 intr_pic_deregister(device_t dev, intptr_t xref)
930 {
931 
932 	panic("%s: not implemented", __func__);
933 }
934 
935 /*
936  *  Mark interrupt controller (itself) as a root one.
937  *
938  *  Note that only an interrupt controller can really know its position
939  *  in interrupt controller's tree. So root PIC must claim itself as a root.
940  *
941  *  In FDT case, according to ePAPR approved version 1.1 from 08 April 2011,
942  *  page 30:
943  *    "The root of the interrupt tree is determined when traversal
944  *     of the interrupt tree reaches an interrupt controller node without
945  *     an interrupts property and thus no explicit interrupt parent."
946  */
947 int
948 intr_pic_claim_root(device_t dev, intptr_t xref, intr_irq_filter_t *filter,
949     void *arg, u_int ipicount)
950 {
951 
952 	if (pic_lookup(dev, xref) == NULL) {
953 		device_printf(dev, "not registered\n");
954 		return (EINVAL);
955 	}
956 	if (filter == NULL) {
957 		device_printf(dev, "filter missing\n");
958 		return (EINVAL);
959 	}
960 
961 	/*
962 	 * Only one interrupt controllers could be on the root for now.
963 	 * Note that we further suppose that there is not threaded interrupt
964 	 * routine (handler) on the root. See intr_irq_handler().
965 	 */
966 	if (intr_irq_root_dev != NULL) {
967 		device_printf(dev, "another root already set\n");
968 		return (EBUSY);
969 	}
970 
971 	intr_irq_root_dev = dev;
972 	irq_root_filter = filter;
973 	irq_root_arg = arg;
974 	irq_root_ipicount = ipicount;
975 
976 	debugf("irq root set to %s\n", device_get_nameunit(dev));
977 	return (0);
978 }
979 
980 int
981 intr_map_irq(device_t dev, intptr_t xref, struct intr_map_data *data,
982     u_int *irqp)
983 {
984 	int error;
985 	struct intr_irqsrc *isrc;
986 	struct intr_pic *pic;
987 
988 	if (data == NULL)
989 		return (EINVAL);
990 
991 	pic = pic_lookup(dev, xref);
992 	if (pic == NULL)
993 		return (ESRCH);
994 
995 	error = PIC_MAP_INTR(pic->pic_dev, data, &isrc);
996 	if (error == 0)
997 		*irqp = isrc->isrc_irq;
998 	return (error);
999 }
1000 
1001 int
1002 intr_alloc_irq(device_t dev, struct resource *res)
1003 {
1004 	struct intr_map_data *data;
1005 	struct intr_irqsrc *isrc;
1006 
1007 	KASSERT(rman_get_start(res) == rman_get_end(res),
1008 	    ("%s: more interrupts in resource", __func__));
1009 
1010 	isrc = intr_ddata_lookup(rman_get_start(res), &data);
1011 	if (isrc == NULL)
1012 		return (EINVAL);
1013 
1014 	return (PIC_ALLOC_INTR(isrc->isrc_dev, isrc, res, data));
1015 }
1016 
1017 int
1018 intr_release_irq(device_t dev, struct resource *res)
1019 {
1020 	struct intr_map_data *data;
1021 	struct intr_irqsrc *isrc;
1022 
1023 	KASSERT(rman_get_start(res) == rman_get_end(res),
1024 	    ("%s: more interrupts in resource", __func__));
1025 
1026 	isrc = intr_ddata_lookup(rman_get_start(res), &data);
1027 	if (isrc == NULL)
1028 		return (EINVAL);
1029 
1030 	return (PIC_RELEASE_INTR(isrc->isrc_dev, isrc, res, data));
1031 }
1032 
1033 int
1034 intr_setup_irq(device_t dev, struct resource *res, driver_filter_t filt,
1035     driver_intr_t hand, void *arg, int flags, void **cookiep)
1036 {
1037 	int error;
1038 	struct intr_map_data *data;
1039 	struct intr_irqsrc *isrc;
1040 	const char *name;
1041 
1042 	KASSERT(rman_get_start(res) == rman_get_end(res),
1043 	    ("%s: more interrupts in resource", __func__));
1044 
1045 	isrc = intr_ddata_lookup(rman_get_start(res), &data);
1046 	if (isrc == NULL)
1047 		return (EINVAL);
1048 
1049 	name = device_get_nameunit(dev);
1050 
1051 #ifdef INTR_SOLO
1052 	/*
1053 	 * Standard handling is done through MI interrupt framework. However,
1054 	 * some interrupts could request solely own special handling. This
1055 	 * non standard handling can be used for interrupt controllers without
1056 	 * handler (filter only), so in case that interrupt controllers are
1057 	 * chained, MI interrupt framework is called only in leaf controller.
1058 	 *
1059 	 * Note that root interrupt controller routine is served as well,
1060 	 * however in intr_irq_handler(), i.e. main system dispatch routine.
1061 	 */
1062 	if (flags & INTR_SOLO && hand != NULL) {
1063 		debugf("irq %u cannot solo on %s\n", irq, name);
1064 		return (EINVAL);
1065 	}
1066 
1067 	if (flags & INTR_SOLO) {
1068 		error = iscr_setup_filter(isrc, name, (intr_irq_filter_t *)filt,
1069 		    arg, cookiep);
1070 		debugf("irq %u setup filter error %d on %s\n", irq, error,
1071 		    name);
1072 	} else
1073 #endif
1074 		{
1075 		error = isrc_add_handler(isrc, name, filt, hand, arg, flags,
1076 		    cookiep);
1077 		debugf("irq %u add handler error %d on %s\n", irq, error, name);
1078 	}
1079 	if (error != 0)
1080 		return (error);
1081 
1082 	mtx_lock(&isrc_table_lock);
1083 	error = PIC_SETUP_INTR(isrc->isrc_dev, isrc, res, data);
1084 	if (error == 0) {
1085 		isrc->isrc_handlers++;
1086 		if (isrc->isrc_handlers == 1)
1087 			PIC_ENABLE_INTR(isrc->isrc_dev, isrc);
1088 	}
1089 	mtx_unlock(&isrc_table_lock);
1090 	if (error != 0)
1091 		intr_event_remove_handler(*cookiep);
1092 	return (error);
1093 }
1094 
1095 int
1096 intr_teardown_irq(device_t dev, struct resource *res, void *cookie)
1097 {
1098 	int error;
1099 	struct intr_map_data *data;
1100 	struct intr_irqsrc *isrc;
1101 
1102 	KASSERT(rman_get_start(res) == rman_get_end(res),
1103 	    ("%s: more interrupts in resource", __func__));
1104 
1105 	isrc = intr_ddata_lookup(rman_get_start(res), &data);
1106 	if (isrc == NULL || isrc->isrc_handlers == 0)
1107 		return (EINVAL);
1108 
1109 #ifdef INTR_SOLO
1110 	if (isrc->isrc_filter != NULL) {
1111 		if (isrc != cookie)
1112 			return (EINVAL);
1113 
1114 		mtx_lock(&isrc_table_lock);
1115 		isrc->isrc_filter = NULL;
1116 		isrc->isrc_arg = NULL;
1117 		isrc->isrc_handlers = 0;
1118 		PIC_DISABLE_INTR(isrc->isrc_dev, isrc);
1119 		PIC_TEARDOWN_INTR(isrc->isrc_dev, isrc, res, data);
1120 		isrc_update_name(isrc, NULL);
1121 		mtx_unlock(&isrc_table_lock);
1122 		return (0);
1123 	}
1124 #endif
1125 	if (isrc != intr_handler_source(cookie))
1126 		return (EINVAL);
1127 
1128 	error = intr_event_remove_handler(cookie);
1129 	if (error == 0) {
1130 		mtx_lock(&isrc_table_lock);
1131 		isrc->isrc_handlers--;
1132 		if (isrc->isrc_handlers == 0)
1133 			PIC_DISABLE_INTR(isrc->isrc_dev, isrc);
1134 		PIC_TEARDOWN_INTR(isrc->isrc_dev, isrc, res, data);
1135 		intrcnt_updatename(isrc);
1136 		mtx_unlock(&isrc_table_lock);
1137 	}
1138 	return (error);
1139 }
1140 
1141 int
1142 intr_describe_irq(device_t dev, struct resource *res, void *cookie,
1143     const char *descr)
1144 {
1145 	int error;
1146 	struct intr_irqsrc *isrc;
1147 
1148 	KASSERT(rman_get_start(res) == rman_get_end(res),
1149 	    ("%s: more interrupts in resource", __func__));
1150 
1151 	isrc = intr_ddata_lookup(rman_get_start(res), NULL);
1152 	if (isrc == NULL || isrc->isrc_handlers == 0)
1153 		return (EINVAL);
1154 #ifdef INTR_SOLO
1155 	if (isrc->isrc_filter != NULL) {
1156 		if (isrc != cookie)
1157 			return (EINVAL);
1158 
1159 		mtx_lock(&isrc_table_lock);
1160 		isrc_update_name(isrc, descr);
1161 		mtx_unlock(&isrc_table_lock);
1162 		return (0);
1163 	}
1164 #endif
1165 	error = intr_event_describe_handler(isrc->isrc_event, cookie, descr);
1166 	if (error == 0) {
1167 		mtx_lock(&isrc_table_lock);
1168 		intrcnt_updatename(isrc);
1169 		mtx_unlock(&isrc_table_lock);
1170 	}
1171 	return (error);
1172 }
1173 
1174 #ifdef SMP
1175 int
1176 intr_bind_irq(device_t dev, struct resource *res, int cpu)
1177 {
1178 	struct intr_irqsrc *isrc;
1179 
1180 	KASSERT(rman_get_start(res) == rman_get_end(res),
1181 	    ("%s: more interrupts in resource", __func__));
1182 
1183 	isrc = intr_ddata_lookup(rman_get_start(res), NULL);
1184 	if (isrc == NULL || isrc->isrc_handlers == 0)
1185 		return (EINVAL);
1186 #ifdef INTR_SOLO
1187 	if (isrc->isrc_filter != NULL)
1188 		return (intr_isrc_assign_cpu(isrc, cpu));
1189 #endif
1190 	return (intr_event_bind(isrc->isrc_event, cpu));
1191 }
1192 
1193 /*
1194  * Return the CPU that the next interrupt source should use.
1195  * For now just returns the next CPU according to round-robin.
1196  */
1197 u_int
1198 intr_irq_next_cpu(u_int last_cpu, cpuset_t *cpumask)
1199 {
1200 
1201 	if (!irq_assign_cpu || mp_ncpus == 1)
1202 		return (PCPU_GET(cpuid));
1203 
1204 	do {
1205 		last_cpu++;
1206 		if (last_cpu > mp_maxid)
1207 			last_cpu = 0;
1208 	} while (!CPU_ISSET(last_cpu, cpumask));
1209 	return (last_cpu);
1210 }
1211 
1212 /*
1213  *  Distribute all the interrupt sources among the available
1214  *  CPUs once the AP's have been launched.
1215  */
1216 static void
1217 intr_irq_shuffle(void *arg __unused)
1218 {
1219 	struct intr_irqsrc *isrc;
1220 	u_int i;
1221 
1222 	if (mp_ncpus == 1)
1223 		return;
1224 
1225 	mtx_lock(&isrc_table_lock);
1226 	irq_assign_cpu = TRUE;
1227 	for (i = 0; i < NIRQ; i++) {
1228 		isrc = irq_sources[i];
1229 		if (isrc == NULL || isrc->isrc_handlers == 0 ||
1230 		    isrc->isrc_flags & (INTR_ISRCF_PPI | INTR_ISRCF_IPI))
1231 			continue;
1232 
1233 		if (isrc->isrc_event != NULL &&
1234 		    isrc->isrc_flags & INTR_ISRCF_BOUND &&
1235 		    isrc->isrc_event->ie_cpu != CPU_FFS(&isrc->isrc_cpu) - 1)
1236 			panic("%s: CPU inconsistency", __func__);
1237 
1238 		if ((isrc->isrc_flags & INTR_ISRCF_BOUND) == 0)
1239 			CPU_ZERO(&isrc->isrc_cpu); /* start again */
1240 
1241 		/*
1242 		 * We are in wicked position here if the following call fails
1243 		 * for bound ISRC. The best thing we can do is to clear
1244 		 * isrc_cpu so inconsistency with ie_cpu will be detectable.
1245 		 */
1246 		if (PIC_BIND_INTR(isrc->isrc_dev, isrc) != 0)
1247 			CPU_ZERO(&isrc->isrc_cpu);
1248 	}
1249 	mtx_unlock(&isrc_table_lock);
1250 }
1251 SYSINIT(intr_irq_shuffle, SI_SUB_SMP, SI_ORDER_SECOND, intr_irq_shuffle, NULL);
1252 
1253 #else
1254 u_int
1255 intr_irq_next_cpu(u_int current_cpu, cpuset_t *cpumask)
1256 {
1257 
1258 	return (PCPU_GET(cpuid));
1259 }
1260 #endif
1261 
1262 void dosoftints(void);
1263 void
1264 dosoftints(void)
1265 {
1266 }
1267 
1268 #ifdef SMP
1269 /*
1270  *  Init interrupt controller on another CPU.
1271  */
1272 void
1273 intr_pic_init_secondary(void)
1274 {
1275 
1276 	/*
1277 	 * QQQ: Only root PIC is aware of other CPUs ???
1278 	 */
1279 	KASSERT(intr_irq_root_dev != NULL, ("%s: no root attached", __func__));
1280 
1281 	//mtx_lock(&isrc_table_lock);
1282 	PIC_INIT_SECONDARY(intr_irq_root_dev);
1283 	//mtx_unlock(&isrc_table_lock);
1284 }
1285 #endif
1286 
1287 #ifdef DDB
1288 DB_SHOW_COMMAND(irqs, db_show_irqs)
1289 {
1290 	u_int i, irqsum;
1291 	u_long num;
1292 	struct intr_irqsrc *isrc;
1293 
1294 	for (irqsum = 0, i = 0; i < NIRQ; i++) {
1295 		isrc = irq_sources[i];
1296 		if (isrc == NULL)
1297 			continue;
1298 
1299 		num = isrc->isrc_count != NULL ? isrc->isrc_count[0] : 0;
1300 		db_printf("irq%-3u <%s>: cpu %02lx%s cnt %lu\n", i,
1301 		    isrc->isrc_name, isrc->isrc_cpu.__bits[0],
1302 		    isrc->isrc_flags & INTR_ISRCF_BOUND ? " (bound)" : "", num);
1303 		irqsum += num;
1304 	}
1305 	db_printf("irq total %u\n", irqsum);
1306 }
1307 #endif
1308