xref: /freebsd/sys/arm/arm/gic.c (revision 8178a4e3c13241804bb9625b2ef4c1b3fea3c7d9)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2011 The FreeBSD Foundation
5  * All rights reserved.
6  *
7  * Developed by Damjan Marion <damjan.marion@gmail.com>
8  *
9  * Based on OMAP4 GIC code by Ben Gray
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. The name of the company nor the name of the author may be used to
20  *    endorse or promote products derived from this software without specific
21  *    prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  */
35 
36 #include <sys/cdefs.h>
37 #include "opt_acpi.h"
38 #include "opt_ddb.h"
39 #include "opt_platform.h"
40 
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/bus.h>
44 #include <sys/kernel.h>
45 #include <sys/ktr.h>
46 #include <sys/module.h>
47 #include <sys/malloc.h>
48 #include <sys/rman.h>
49 #include <sys/pcpu.h>
50 #include <sys/proc.h>
51 #include <sys/cpuset.h>
52 #include <sys/lock.h>
53 #include <sys/mutex.h>
54 #include <sys/smp.h>
55 #include <sys/sched.h>
56 
57 #include <vm/vm.h>
58 #include <vm/pmap.h>
59 
60 #include <machine/bus.h>
61 #include <machine/intr.h>
62 #include <machine/smp.h>
63 
64 #ifdef FDT
65 #include <dev/fdt/fdt_intr.h>
66 #include <dev/ofw/ofw_bus_subr.h>
67 #endif
68 
69 #ifdef DEV_ACPI
70 #include <contrib/dev/acpica/include/acpi.h>
71 #include <dev/acpica/acpivar.h>
72 #endif
73 
74 #ifdef DDB
75 #include <ddb/ddb.h>
76 #include <ddb/db_lex.h>
77 #endif
78 
79 #include <arm/arm/gic.h>
80 #include <arm/arm/gic_common.h>
81 
82 #include "gic_if.h"
83 #include "pic_if.h"
84 #include "msi_if.h"
85 
86 /* We are using GICv2 register naming */
87 
88 /* Distributor Registers */
89 
90 /* CPU Registers */
91 #define GICC_CTLR		0x0000			/* v1 ICCICR */
92 #define GICC_PMR		0x0004			/* v1 ICCPMR */
93 #define GICC_BPR		0x0008			/* v1 ICCBPR */
94 #define GICC_IAR		0x000C			/* v1 ICCIAR */
95 #define GICC_EOIR		0x0010			/* v1 ICCEOIR */
96 #define GICC_RPR		0x0014			/* v1 ICCRPR */
97 #define GICC_HPPIR		0x0018			/* v1 ICCHPIR */
98 #define GICC_ABPR		0x001C			/* v1 ICCABPR */
99 #define GICC_IIDR		0x00FC			/* v1 ICCIIDR*/
100 
101 /* TYPER Registers */
102 #define	GICD_TYPER_SECURITYEXT	0x400
103 #define	GIC_SUPPORT_SECEXT(_sc)	\
104     ((_sc->typer & GICD_TYPER_SECURITYEXT) == GICD_TYPER_SECURITYEXT)
105 
106 #ifndef	GIC_DEFAULT_ICFGR_INIT
107 #define	GIC_DEFAULT_ICFGR_INIT	0x00000000
108 #endif
109 
110 struct gic_irqsrc {
111 	struct intr_irqsrc	gi_isrc;
112 	uint32_t		gi_irq;
113 	enum intr_polarity	gi_pol;
114 	enum intr_trigger	gi_trig;
115 #define GI_FLAG_EARLY_EOI	(1 << 0)
116 #define GI_FLAG_MSI		(1 << 1) /* This interrupt source should only */
117 					 /* be used for MSI/MSI-X interrupts */
118 #define GI_FLAG_MSI_USED	(1 << 2) /* This irq is already allocated */
119 					 /* for a MSI/MSI-X interrupt */
120 	u_int			gi_flags;
121 };
122 
123 static u_int gic_irq_cpu;
124 static int arm_gic_bind_intr(device_t dev, struct intr_irqsrc *isrc);
125 
126 #ifdef SMP
127 static u_int sgi_to_ipi[GIC_LAST_SGI - GIC_FIRST_SGI + 1];
128 static u_int sgi_first_unused = GIC_FIRST_SGI;
129 #endif
130 
131 #define GIC_INTR_ISRC(sc, irq)	(&sc->gic_irqs[irq].gi_isrc)
132 
133 static struct resource_spec arm_gic_spec[] = {
134 	{ SYS_RES_MEMORY,	0,	RF_ACTIVE },	/* Distributor registers */
135 	{ SYS_RES_MEMORY,	1,	RF_ACTIVE },	/* CPU Interrupt Intf. registers */
136 	{ SYS_RES_IRQ,	  0, RF_ACTIVE | RF_OPTIONAL }, /* Parent interrupt */
137 	{ -1, 0 }
138 };
139 
140 #if defined(__arm__) && defined(INVARIANTS)
141 static int gic_debug_spurious = 1;
142 #else
143 static int gic_debug_spurious = 0;
144 #endif
145 TUNABLE_INT("hw.gic.debug_spurious", &gic_debug_spurious);
146 
147 static u_int arm_gic_map[GIC_MAXCPU];
148 
149 static struct arm_gic_softc *gic_sc = NULL;
150 
151 /* CPU Interface */
152 #define	gic_c_read_4(_sc, _reg)		\
153     bus_read_4((_sc)->gic_res[GIC_RES_CPU], (_reg))
154 #define	gic_c_write_4(_sc, _reg, _val)		\
155     bus_write_4((_sc)->gic_res[GIC_RES_CPU], (_reg), (_val))
156 /* Distributor Interface */
157 #define	gic_d_read_4(_sc, _reg)		\
158     bus_read_4((_sc)->gic_res[GIC_RES_DIST], (_reg))
159 #define	gic_d_write_1(_sc, _reg, _val)		\
160     bus_write_1((_sc)->gic_res[GIC_RES_DIST], (_reg), (_val))
161 #define	gic_d_write_4(_sc, _reg, _val)		\
162     bus_write_4((_sc)->gic_res[GIC_RES_DIST], (_reg), (_val))
163 
164 static inline void
gic_irq_unmask(struct arm_gic_softc * sc,u_int irq)165 gic_irq_unmask(struct arm_gic_softc *sc, u_int irq)
166 {
167 
168 	gic_d_write_4(sc, GICD_ISENABLER(irq), GICD_I_MASK(irq));
169 }
170 
171 static inline void
gic_irq_mask(struct arm_gic_softc * sc,u_int irq)172 gic_irq_mask(struct arm_gic_softc *sc, u_int irq)
173 {
174 
175 	gic_d_write_4(sc, GICD_ICENABLER(irq), GICD_I_MASK(irq));
176 }
177 
178 static uint8_t
gic_cpu_mask(struct arm_gic_softc * sc)179 gic_cpu_mask(struct arm_gic_softc *sc)
180 {
181 	uint32_t mask;
182 	int i;
183 
184 	/* Read the current cpuid mask by reading ITARGETSR{0..7} */
185 	for (i = 0; i < 8; i++) {
186 		mask = gic_d_read_4(sc, GICD_ITARGETSR(4 * i));
187 		if (mask != 0)
188 			break;
189 	}
190 	/* No mask found, assume we are on CPU interface 0 */
191 	if (mask == 0)
192 		return (1);
193 
194 	/* Collect the mask in the lower byte */
195 	mask |= mask >> 16;
196 	mask |= mask >> 8;
197 
198 	return (mask);
199 }
200 
201 #ifdef SMP
202 static void
arm_gic_init_secondary(device_t dev,uint32_t rootnum)203 arm_gic_init_secondary(device_t dev, uint32_t rootnum)
204 {
205 	struct arm_gic_softc *sc = device_get_softc(dev);
206 	u_int irq, cpu;
207 
208 	/* Set the mask so we can find this CPU to send it IPIs */
209 	cpu = PCPU_GET(cpuid);
210 	MPASS(cpu < GIC_MAXCPU);
211 	arm_gic_map[cpu] = gic_cpu_mask(sc);
212 
213 	for (irq = 0; irq < sc->nirqs; irq += 4)
214 		gic_d_write_4(sc, GICD_IPRIORITYR(irq), 0);
215 
216 	/* Set all the interrupts to be in Group 0 (secure) */
217 	for (irq = 0; GIC_SUPPORT_SECEXT(sc) && irq < sc->nirqs; irq += 32) {
218 		gic_d_write_4(sc, GICD_IGROUPR(irq), 0);
219 	}
220 
221 	/* Enable CPU interface */
222 	gic_c_write_4(sc, GICC_CTLR, 1);
223 
224 	/* Set priority mask register. */
225 	gic_c_write_4(sc, GICC_PMR, 0xff);
226 
227 	/* Enable interrupt distribution */
228 	gic_d_write_4(sc, GICD_CTLR, 0x01);
229 
230 	/* Unmask attached SGI interrupts. */
231 	for (irq = GIC_FIRST_SGI; irq <= GIC_LAST_SGI; irq++)
232 		if (intr_isrc_init_on_cpu(GIC_INTR_ISRC(sc, irq), cpu))
233 			gic_irq_unmask(sc, irq);
234 
235 	/* Unmask attached PPI interrupts. */
236 	for (irq = GIC_FIRST_PPI; irq <= GIC_LAST_PPI; irq++)
237 		if (intr_isrc_init_on_cpu(GIC_INTR_ISRC(sc, irq), cpu))
238 			gic_irq_unmask(sc, irq);
239 }
240 #endif /* SMP */
241 
242 static int
arm_gic_register_isrcs(struct arm_gic_softc * sc,uint32_t num)243 arm_gic_register_isrcs(struct arm_gic_softc *sc, uint32_t num)
244 {
245 	int error;
246 	uint32_t irq;
247 	struct gic_irqsrc *irqs;
248 	struct intr_irqsrc *isrc;
249 	const char *name;
250 
251 	irqs = malloc(num * sizeof(struct gic_irqsrc), M_DEVBUF,
252 	    M_WAITOK | M_ZERO);
253 
254 	name = device_get_nameunit(sc->gic_dev);
255 	for (irq = 0; irq < num; irq++) {
256 		irqs[irq].gi_irq = irq;
257 		irqs[irq].gi_pol = INTR_POLARITY_CONFORM;
258 		irqs[irq].gi_trig = INTR_TRIGGER_CONFORM;
259 
260 		isrc = &irqs[irq].gi_isrc;
261 		if (irq <= GIC_LAST_SGI) {
262 			error = intr_isrc_register(isrc, sc->gic_dev,
263 			    INTR_ISRCF_IPI, "%s,i%u", name, irq - GIC_FIRST_SGI);
264 		} else if (irq <= GIC_LAST_PPI) {
265 			error = intr_isrc_register(isrc, sc->gic_dev,
266 			    INTR_ISRCF_PPI, "%s,p%u", name, irq - GIC_FIRST_PPI);
267 		} else {
268 			error = intr_isrc_register(isrc, sc->gic_dev, 0,
269 			    "%s,s%u", name, irq - GIC_FIRST_SPI);
270 		}
271 		if (error != 0) {
272 			/* XXX call intr_isrc_deregister() */
273 			free(irqs, M_DEVBUF);
274 			return (error);
275 		}
276 	}
277 	sc->gic_irqs = irqs;
278 	sc->nirqs = num;
279 	return (0);
280 }
281 
282 static void
arm_gic_reserve_msi_range(device_t dev,u_int start,u_int count)283 arm_gic_reserve_msi_range(device_t dev, u_int start, u_int count)
284 {
285 	struct arm_gic_softc *sc;
286 	int i;
287 
288 	sc = device_get_softc(dev);
289 
290 	KASSERT((start + count) <= sc->nirqs,
291 	    ("%s: Trying to allocate too many MSI IRQs: %d + %d > %d", __func__,
292 	    start, count, sc->nirqs));
293 	for (i = 0; i < count; i++) {
294 		KASSERT(sc->gic_irqs[start + i].gi_isrc.isrc_handlers == 0,
295 		    ("%s: MSI interrupt %d already has a handler", __func__,
296 		    count + i));
297 		KASSERT(sc->gic_irqs[start + i].gi_pol == INTR_POLARITY_CONFORM,
298 		    ("%s: MSI interrupt %d already has a polarity", __func__,
299 		    count + i));
300 		KASSERT(sc->gic_irqs[start + i].gi_trig == INTR_TRIGGER_CONFORM,
301 		    ("%s: MSI interrupt %d already has a trigger", __func__,
302 		    count + i));
303 		sc->gic_irqs[start + i].gi_pol = INTR_POLARITY_HIGH;
304 		sc->gic_irqs[start + i].gi_trig = INTR_TRIGGER_EDGE;
305 		sc->gic_irqs[start + i].gi_flags |= GI_FLAG_MSI;
306 	}
307 }
308 
309 int
arm_gic_attach(device_t dev)310 arm_gic_attach(device_t dev)
311 {
312 	struct		arm_gic_softc *sc;
313 	int		i;
314 	uint32_t	icciidr, mask, nirqs;
315 
316 	if (gic_sc)
317 		return (ENXIO);
318 
319 	if (mp_ncpus > GIC_MAXCPU) {
320 		device_printf(dev, "Too many CPUs for IPIs to work (%d > %d)\n",
321 		    mp_ncpus, GIC_MAXCPU);
322 		return (ENXIO);
323 	}
324 
325 	sc = device_get_softc(dev);
326 
327 	if (bus_alloc_resources(dev, arm_gic_spec, sc->gic_res)) {
328 		device_printf(dev, "could not allocate resources\n");
329 		return (ENXIO);
330 	}
331 
332 	sc->gic_dev = dev;
333 	gic_sc = sc;
334 
335 	/* Initialize mutex */
336 	mtx_init(&sc->mutex, "GIC lock", NULL, MTX_SPIN);
337 
338 	/* Disable interrupt forwarding to the CPU interface */
339 	gic_d_write_4(sc, GICD_CTLR, 0x00);
340 
341 	/* Get the number of interrupts */
342 	sc->typer = gic_d_read_4(sc, GICD_TYPER);
343 	nirqs = GICD_TYPER_I_NUM(sc->typer);
344 
345 	if (arm_gic_register_isrcs(sc, nirqs)) {
346 		device_printf(dev, "could not register irqs\n");
347 		goto cleanup;
348 	}
349 
350 	icciidr = gic_c_read_4(sc, GICC_IIDR);
351 	device_printf(dev,
352 	    "pn 0x%x, arch 0x%x, rev 0x%x, implementer 0x%x irqs %u\n",
353 	    GICD_IIDR_PROD(icciidr), GICD_IIDR_VAR(icciidr),
354 	    GICD_IIDR_REV(icciidr), GICD_IIDR_IMPL(icciidr), sc->nirqs);
355 	sc->gic_iidr = icciidr;
356 
357 	/* Set all global interrupts to be level triggered, active low. */
358 	for (i = 32; i < sc->nirqs; i += 16) {
359 		gic_d_write_4(sc, GICD_ICFGR(i), GIC_DEFAULT_ICFGR_INIT);
360 	}
361 
362 	/* Disable all interrupts. */
363 	for (i = 32; i < sc->nirqs; i += 32) {
364 		gic_d_write_4(sc, GICD_ICENABLER(i), 0xFFFFFFFF);
365 	}
366 
367 	/* Find the current cpu mask */
368 	mask = gic_cpu_mask(sc);
369 	/* Set the mask so we can find this CPU to send it IPIs */
370 	MPASS(PCPU_GET(cpuid) < GIC_MAXCPU);
371 	arm_gic_map[PCPU_GET(cpuid)] = mask;
372 	/* Set all four targets to this cpu */
373 	mask |= mask << 8;
374 	mask |= mask << 16;
375 
376 	for (i = 0; i < sc->nirqs; i += 4) {
377 		gic_d_write_4(sc, GICD_IPRIORITYR(i), 0);
378 		if (i > 32) {
379 			gic_d_write_4(sc, GICD_ITARGETSR(i), mask);
380 		}
381 	}
382 
383 	/* Set all the interrupts to be in Group 0 (secure) */
384 	for (i = 0; GIC_SUPPORT_SECEXT(sc) && i < sc->nirqs; i += 32) {
385 		gic_d_write_4(sc, GICD_IGROUPR(i), 0);
386 	}
387 
388 	/* Enable CPU interface */
389 	gic_c_write_4(sc, GICC_CTLR, 1);
390 
391 	/* Set priority mask register. */
392 	gic_c_write_4(sc, GICC_PMR, 0xff);
393 
394 	/* Enable interrupt distribution */
395 	gic_d_write_4(sc, GICD_CTLR, 0x01);
396 	return (0);
397 
398 cleanup:
399 	arm_gic_detach(dev);
400 	return(ENXIO);
401 }
402 
403 int
arm_gic_detach(device_t dev)404 arm_gic_detach(device_t dev)
405 {
406 	struct arm_gic_softc *sc;
407 
408 	sc = device_get_softc(dev);
409 
410 	if (sc->gic_irqs != NULL)
411 		free(sc->gic_irqs, M_DEVBUF);
412 
413 	bus_release_resources(dev, arm_gic_spec, sc->gic_res);
414 
415 	return (0);
416 }
417 
418 static int
arm_gic_print_child(device_t bus,device_t child)419 arm_gic_print_child(device_t bus, device_t child)
420 {
421 	struct resource_list *rl;
422 	int rv;
423 
424 	rv = bus_print_child_header(bus, child);
425 
426 	rl = BUS_GET_RESOURCE_LIST(bus, child);
427 	if (rl != NULL) {
428 		rv += resource_list_print_type(rl, "mem", SYS_RES_MEMORY,
429 		    "%#jx");
430 		rv += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%jd");
431 	}
432 
433 	rv += bus_print_child_footer(bus, child);
434 
435 	return (rv);
436 }
437 
438 static struct resource *
arm_gic_alloc_resource(device_t bus,device_t child,int type,int * rid,rman_res_t start,rman_res_t end,rman_res_t count,u_int flags)439 arm_gic_alloc_resource(device_t bus, device_t child, int type, int *rid,
440     rman_res_t start, rman_res_t end, rman_res_t count, u_int flags)
441 {
442 	struct arm_gic_softc *sc;
443 	struct resource_list_entry *rle;
444 	struct resource_list *rl;
445 	int j;
446 
447 	KASSERT(type == SYS_RES_MEMORY, ("Invalid resource type %x", type));
448 
449 	sc = device_get_softc(bus);
450 
451 	/*
452 	 * Request for the default allocation with a given rid: use resource
453 	 * list stored in the local device info.
454 	 */
455 	if (RMAN_IS_DEFAULT_RANGE(start, end)) {
456 		rl = BUS_GET_RESOURCE_LIST(bus, child);
457 
458 		if (type == SYS_RES_IOPORT)
459 			type = SYS_RES_MEMORY;
460 
461 		rle = resource_list_find(rl, type, *rid);
462 		if (rle == NULL) {
463 			if (bootverbose)
464 				device_printf(bus, "no default resources for "
465 				    "rid = %d, type = %d\n", *rid, type);
466 			return (NULL);
467 		}
468 		start = rle->start;
469 		end = rle->end;
470 		count = rle->count;
471 	}
472 
473 	/* Remap through ranges property */
474 	for (j = 0; j < sc->nranges; j++) {
475 		if (start >= sc->ranges[j].bus && end <
476 		    sc->ranges[j].bus + sc->ranges[j].size) {
477 			start -= sc->ranges[j].bus;
478 			start += sc->ranges[j].host;
479 			end -= sc->ranges[j].bus;
480 			end += sc->ranges[j].host;
481 			break;
482 		}
483 	}
484 	if (j == sc->nranges && sc->nranges != 0) {
485 		if (bootverbose)
486 			device_printf(bus, "Could not map resource "
487 			    "%#jx-%#jx\n", (uintmax_t)start, (uintmax_t)end);
488 
489 		return (NULL);
490 	}
491 
492 	return (bus_generic_alloc_resource(bus, child, type, rid, start, end,
493 	    count, flags));
494 }
495 
496 static int
arm_gic_read_ivar(device_t dev,device_t child,int which,uintptr_t * result)497 arm_gic_read_ivar(device_t dev, device_t child, int which, uintptr_t *result)
498 {
499 	struct arm_gic_softc *sc;
500 
501 	sc = device_get_softc(dev);
502 
503 	switch(which) {
504 	case GIC_IVAR_HW_REV:
505 		KASSERT(GICD_IIDR_VAR(sc->gic_iidr) < 3,
506 		    ("arm_gic_read_ivar: Unknown IIDR revision %u (%.08x)",
507 		     GICD_IIDR_VAR(sc->gic_iidr), sc->gic_iidr));
508 		*result = GICD_IIDR_VAR(sc->gic_iidr);
509 		return (0);
510 	case GIC_IVAR_BUS:
511 		KASSERT(sc->gic_bus != GIC_BUS_UNKNOWN,
512 		    ("arm_gic_read_ivar: Unknown bus type"));
513 		KASSERT(sc->gic_bus <= GIC_BUS_MAX,
514 		    ("arm_gic_read_ivar: Invalid bus type %u", sc->gic_bus));
515 		*result = sc->gic_bus;
516 		return (0);
517 	case GIC_IVAR_VGIC:
518 		*result = 0;
519 		return (0);
520 	case GIC_IVAR_SUPPORT_LPIS:
521 		*result = false;
522 		return (0);
523 	}
524 
525 	return (ENOENT);
526 }
527 
528 static int
arm_gic_write_ivar(device_t dev,device_t child,int which,uintptr_t value)529 arm_gic_write_ivar(device_t dev, device_t child, int which, uintptr_t value)
530 {
531 	switch(which) {
532 	case GIC_IVAR_HW_REV:
533 	case GIC_IVAR_BUS:
534 		return (EINVAL);
535 	}
536 
537 	return (ENOENT);
538 }
539 
540 int
arm_gic_intr(void * arg)541 arm_gic_intr(void *arg)
542 {
543 	struct arm_gic_softc *sc = arg;
544 	struct gic_irqsrc *gi;
545 	uint32_t irq_active_reg, irq;
546 	struct trapframe *tf;
547 
548 	irq_active_reg = gic_c_read_4(sc, GICC_IAR);
549 	irq = irq_active_reg & 0x3FF;
550 
551 	/*
552 	 * 1. We do EOI here because recent read value from active interrupt
553 	 *    register must be used for it. Another approach is to save this
554 	 *    value into associated interrupt source.
555 	 * 2. EOI must be done on same CPU where interrupt has fired. Thus
556 	 *    we must ensure that interrupted thread does not migrate to
557 	 *    another CPU.
558 	 * 3. EOI cannot be delayed by any preemption which could happen on
559 	 *    critical_exit() used in MI intr code, when interrupt thread is
560 	 *    scheduled. See next point.
561 	 * 4. IPI_RENDEZVOUS assumes that no preemption is permitted during
562 	 *    an action and any use of critical_exit() could break this
563 	 *    assumption. See comments within smp_rendezvous_action().
564 	 * 5. We always return FILTER_HANDLED as this is an interrupt
565 	 *    controller dispatch function. Otherwise, in cascaded interrupt
566 	 *    case, the whole interrupt subtree would be masked.
567 	 */
568 
569 	if (irq >= sc->nirqs) {
570 		if (gic_debug_spurious)
571 			device_printf(sc->gic_dev,
572 			    "Spurious interrupt detected: last irq: %d on CPU%d\n",
573 			    sc->last_irq[PCPU_GET(cpuid)], PCPU_GET(cpuid));
574 		return (FILTER_HANDLED);
575 	}
576 
577 	tf = curthread->td_intr_frame;
578 dispatch_irq:
579 	gi = sc->gic_irqs + irq;
580 	/*
581 	 * Note that GIC_FIRST_SGI is zero and is not used in 'if' statement
582 	 * as compiler complains that comparing u_int >= 0 is always true.
583 	 */
584 	if (irq <= GIC_LAST_SGI) {
585 #ifdef SMP
586 		/* Call EOI for all IPI before dispatch. */
587 		gic_c_write_4(sc, GICC_EOIR, irq_active_reg);
588 		intr_ipi_dispatch(sgi_to_ipi[gi->gi_irq]);
589 		goto next_irq;
590 #else
591 		device_printf(sc->gic_dev, "SGI %u on UP system detected\n",
592 		    irq - GIC_FIRST_SGI);
593 		gic_c_write_4(sc, GICC_EOIR, irq_active_reg);
594 		goto next_irq;
595 #endif
596 	}
597 
598 	if (gic_debug_spurious)
599 		sc->last_irq[PCPU_GET(cpuid)] = irq;
600 	if ((gi->gi_flags & GI_FLAG_EARLY_EOI) == GI_FLAG_EARLY_EOI)
601 		gic_c_write_4(sc, GICC_EOIR, irq_active_reg);
602 
603 	if (intr_isrc_dispatch(&gi->gi_isrc, tf) != 0) {
604 		gic_irq_mask(sc, irq);
605 		if ((gi->gi_flags & GI_FLAG_EARLY_EOI) != GI_FLAG_EARLY_EOI)
606 			gic_c_write_4(sc, GICC_EOIR, irq_active_reg);
607 		device_printf(sc->gic_dev, "Stray irq %u disabled\n", irq);
608 	}
609 
610 next_irq:
611 	arm_irq_memory_barrier(irq);
612 	irq_active_reg = gic_c_read_4(sc, GICC_IAR);
613 	irq = irq_active_reg & 0x3FF;
614 	if (irq < sc->nirqs)
615 		goto dispatch_irq;
616 
617 	return (FILTER_HANDLED);
618 }
619 
620 static void
gic_config(struct arm_gic_softc * sc,u_int irq,enum intr_trigger trig,enum intr_polarity pol)621 gic_config(struct arm_gic_softc *sc, u_int irq, enum intr_trigger trig,
622     enum intr_polarity pol)
623 {
624 	uint32_t reg;
625 	uint32_t mask;
626 
627 	if (irq < GIC_FIRST_SPI)
628 		return;
629 
630 	mtx_lock_spin(&sc->mutex);
631 
632 	reg = gic_d_read_4(sc, GICD_ICFGR(irq));
633 	mask = (reg >> 2*(irq % 16)) & 0x3;
634 
635 	if (pol == INTR_POLARITY_LOW) {
636 		mask &= ~GICD_ICFGR_POL_MASK;
637 		mask |= GICD_ICFGR_POL_LOW;
638 	} else if (pol == INTR_POLARITY_HIGH) {
639 		mask &= ~GICD_ICFGR_POL_MASK;
640 		mask |= GICD_ICFGR_POL_HIGH;
641 	}
642 
643 	if (trig == INTR_TRIGGER_LEVEL) {
644 		mask &= ~GICD_ICFGR_TRIG_MASK;
645 		mask |= GICD_ICFGR_TRIG_LVL;
646 	} else if (trig == INTR_TRIGGER_EDGE) {
647 		mask &= ~GICD_ICFGR_TRIG_MASK;
648 		mask |= GICD_ICFGR_TRIG_EDGE;
649 	}
650 
651 	/* Set mask */
652 	reg = reg & ~(0x3 << 2*(irq % 16));
653 	reg = reg | (mask << 2*(irq % 16));
654 	gic_d_write_4(sc, GICD_ICFGR(irq), reg);
655 
656 	mtx_unlock_spin(&sc->mutex);
657 }
658 
659 static int
gic_bind(struct arm_gic_softc * sc,u_int irq,cpuset_t * cpus)660 gic_bind(struct arm_gic_softc *sc, u_int irq, cpuset_t *cpus)
661 {
662 	uint32_t cpu, end, mask;
663 
664 	end = min(mp_ncpus, GIC_MAXCPU);
665 	for (cpu = end; cpu < MAXCPU; cpu++)
666 		if (CPU_ISSET(cpu, cpus))
667 			return (EINVAL);
668 
669 	for (mask = 0, cpu = 0; cpu < end; cpu++)
670 		if (CPU_ISSET(cpu, cpus))
671 			mask |= arm_gic_map[cpu];
672 
673 	gic_d_write_1(sc, GICD_ITARGETSR(0) + irq, mask);
674 	return (0);
675 }
676 
677 #ifdef FDT
678 static int
gic_map_fdt(device_t dev,u_int ncells,pcell_t * cells,u_int * irqp,enum intr_polarity * polp,enum intr_trigger * trigp)679 gic_map_fdt(device_t dev, u_int ncells, pcell_t *cells, u_int *irqp,
680     enum intr_polarity *polp, enum intr_trigger *trigp)
681 {
682 
683 	if (ncells == 1) {
684 		*irqp = cells[0];
685 		*polp = INTR_POLARITY_CONFORM;
686 		*trigp = INTR_TRIGGER_CONFORM;
687 		return (0);
688 	}
689 	if (ncells == 3) {
690 		u_int irq, tripol;
691 
692 		/*
693 		 * The 1st cell is the interrupt type:
694 		 *	0 = SPI
695 		 *	1 = PPI
696 		 * The 2nd cell contains the interrupt number:
697 		 *	[0 - 987] for SPI
698 		 *	[0 -  15] for PPI
699 		 * The 3rd cell is the flags, encoded as follows:
700 		 *   bits[3:0] trigger type and level flags
701 		 *	1 = low-to-high edge triggered
702 		 *	2 = high-to-low edge triggered
703 		 *	4 = active high level-sensitive
704 		 *	8 = active low level-sensitive
705 		 *   bits[15:8] PPI interrupt cpu mask
706 		 *	Each bit corresponds to each of the 8 possible cpus
707 		 *	attached to the GIC.  A bit set to '1' indicated
708 		 *	the interrupt is wired to that CPU.
709 		 */
710 		switch (cells[0]) {
711 		case 0:
712 			irq = GIC_FIRST_SPI + cells[1];
713 			/* SPI irq is checked later. */
714 			break;
715 		case 1:
716 			irq = GIC_FIRST_PPI + cells[1];
717 			if (irq > GIC_LAST_PPI) {
718 				device_printf(dev, "unsupported PPI interrupt "
719 				    "number %u\n", cells[1]);
720 				return (EINVAL);
721 			}
722 			break;
723 		default:
724 			device_printf(dev, "unsupported interrupt type "
725 			    "configuration %u\n", cells[0]);
726 			return (EINVAL);
727 		}
728 
729 		tripol = cells[2] & 0xff;
730 		if (tripol & 0xf0 || (tripol & FDT_INTR_LOW_MASK &&
731 		    cells[0] == 0))
732 			device_printf(dev, "unsupported trigger/polarity "
733 			    "configuration 0x%02x\n", tripol);
734 
735 		*irqp = irq;
736 		*polp = INTR_POLARITY_CONFORM;
737 		*trigp = tripol & FDT_INTR_EDGE_MASK ?
738 		    INTR_TRIGGER_EDGE : INTR_TRIGGER_LEVEL;
739 		return (0);
740 	}
741 	return (EINVAL);
742 }
743 #endif
744 
745 static int
gic_map_msi(device_t dev,struct intr_map_data_msi * msi_data,u_int * irqp,enum intr_polarity * polp,enum intr_trigger * trigp)746 gic_map_msi(device_t dev, struct intr_map_data_msi *msi_data, u_int *irqp,
747     enum intr_polarity *polp, enum intr_trigger *trigp)
748 {
749 	struct gic_irqsrc *gi;
750 
751 	/* Map a non-GICv2m MSI */
752 	gi = (struct gic_irqsrc *)msi_data->isrc;
753 	if (gi == NULL)
754 		return (ENXIO);
755 
756 	*irqp = gi->gi_irq;
757 
758 	/* MSI/MSI-X interrupts are always edge triggered with high polarity */
759 	*polp = INTR_POLARITY_HIGH;
760 	*trigp = INTR_TRIGGER_EDGE;
761 
762 	return (0);
763 }
764 
765 static int
gic_map_intr(device_t dev,struct intr_map_data * data,u_int * irqp,enum intr_polarity * polp,enum intr_trigger * trigp)766 gic_map_intr(device_t dev, struct intr_map_data *data, u_int *irqp,
767     enum intr_polarity *polp, enum intr_trigger *trigp)
768 {
769 	u_int irq;
770 	enum intr_polarity pol;
771 	enum intr_trigger trig;
772 	struct arm_gic_softc *sc;
773 	struct intr_map_data_msi *dam;
774 #ifdef FDT
775 	struct intr_map_data_fdt *daf;
776 #endif
777 #ifdef DEV_ACPI
778 	struct intr_map_data_acpi *daa;
779 #endif
780 
781 	sc = device_get_softc(dev);
782 	switch (data->type) {
783 #ifdef FDT
784 	case INTR_MAP_DATA_FDT:
785 		daf = (struct intr_map_data_fdt *)data;
786 		if (gic_map_fdt(dev, daf->ncells, daf->cells, &irq, &pol,
787 		    &trig) != 0)
788 			return (EINVAL);
789 		KASSERT(irq >= sc->nirqs ||
790 		    (sc->gic_irqs[irq].gi_flags & GI_FLAG_MSI) == 0,
791 		    ("%s: Attempting to map a MSI interrupt from FDT",
792 		    __func__));
793 		break;
794 #endif
795 #ifdef DEV_ACPI
796 	case INTR_MAP_DATA_ACPI:
797 		daa = (struct intr_map_data_acpi *)data;
798 		irq = daa->irq;
799 		pol = daa->pol;
800 		trig = daa->trig;
801 		break;
802 #endif
803 	case INTR_MAP_DATA_MSI:
804 		/* Non-GICv2m MSI */
805 		dam = (struct intr_map_data_msi *)data;
806 		if (gic_map_msi(dev, dam, &irq, &pol, &trig) != 0)
807 			return (EINVAL);
808 		break;
809 	default:
810 		return (ENOTSUP);
811 	}
812 
813 	if (irq >= sc->nirqs)
814 		return (EINVAL);
815 	if (pol != INTR_POLARITY_CONFORM && pol != INTR_POLARITY_LOW &&
816 	    pol != INTR_POLARITY_HIGH)
817 		return (EINVAL);
818 	if (trig != INTR_TRIGGER_CONFORM && trig != INTR_TRIGGER_EDGE &&
819 	    trig != INTR_TRIGGER_LEVEL)
820 		return (EINVAL);
821 
822 	*irqp = irq;
823 	if (polp != NULL)
824 		*polp = pol;
825 	if (trigp != NULL)
826 		*trigp = trig;
827 	return (0);
828 }
829 
830 static int
arm_gic_map_intr(device_t dev,struct intr_map_data * data,struct intr_irqsrc ** isrcp)831 arm_gic_map_intr(device_t dev, struct intr_map_data *data,
832     struct intr_irqsrc **isrcp)
833 {
834 	int error;
835 	u_int irq;
836 	struct arm_gic_softc *sc;
837 
838 	error = gic_map_intr(dev, data, &irq, NULL, NULL);
839 	if (error == 0) {
840 		sc = device_get_softc(dev);
841 		*isrcp = GIC_INTR_ISRC(sc, irq);
842 	}
843 	return (error);
844 }
845 
846 static int
arm_gic_setup_intr(device_t dev,struct intr_irqsrc * isrc,struct resource * res,struct intr_map_data * data)847 arm_gic_setup_intr(device_t dev, struct intr_irqsrc *isrc,
848     struct resource *res, struct intr_map_data *data)
849 {
850 	struct arm_gic_softc *sc = device_get_softc(dev);
851 	struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc;
852 	enum intr_trigger trig;
853 	enum intr_polarity pol;
854 
855 	if ((gi->gi_flags & GI_FLAG_MSI) == GI_FLAG_MSI) {
856 		/* GICv2m MSI */
857 		pol = gi->gi_pol;
858 		trig = gi->gi_trig;
859 		KASSERT(pol == INTR_POLARITY_HIGH,
860 		    ("%s: MSI interrupts must be active-high", __func__));
861 		KASSERT(trig == INTR_TRIGGER_EDGE,
862 		    ("%s: MSI interrupts must be edge triggered", __func__));
863 	} else if (data != NULL) {
864 		u_int irq;
865 
866 		/* Get config for resource. */
867 		if (gic_map_intr(dev, data, &irq, &pol, &trig) ||
868 		    gi->gi_irq != irq)
869 			return (EINVAL);
870 	} else {
871 		pol = INTR_POLARITY_CONFORM;
872 		trig = INTR_TRIGGER_CONFORM;
873 	}
874 
875 	/* Compare config if this is not first setup. */
876 	if (isrc->isrc_handlers != 0) {
877 		if ((pol != INTR_POLARITY_CONFORM && pol != gi->gi_pol) ||
878 		    (trig != INTR_TRIGGER_CONFORM && trig != gi->gi_trig))
879 			return (EINVAL);
880 		else
881 			return (0);
882 	}
883 
884 	/* For MSI/MSI-X we should have already configured these */
885 	if ((gi->gi_flags & GI_FLAG_MSI) == 0) {
886 		if (pol == INTR_POLARITY_CONFORM)
887 			pol = INTR_POLARITY_LOW;	/* just pick some */
888 		if (trig == INTR_TRIGGER_CONFORM)
889 			trig = INTR_TRIGGER_EDGE;	/* just pick some */
890 
891 		gi->gi_pol = pol;
892 		gi->gi_trig = trig;
893 
894 		/* Edge triggered interrupts need an early EOI sent */
895 		if (gi->gi_trig == INTR_TRIGGER_EDGE)
896 			gi->gi_flags |= GI_FLAG_EARLY_EOI;
897 	}
898 
899 	/*
900 	 * XXX - In case that per CPU interrupt is going to be enabled in time
901 	 *       when SMP is already started, we need some IPI call which
902 	 *       enables it on others CPUs. Further, it's more complicated as
903 	 *       pic_enable_source() and pic_disable_source() should act on
904 	 *       per CPU basis only. Thus, it should be solved here somehow.
905 	 */
906 	if (isrc->isrc_flags & INTR_ISRCF_PPI)
907 		CPU_SET(PCPU_GET(cpuid), &isrc->isrc_cpu);
908 
909 	gic_config(sc, gi->gi_irq, gi->gi_trig, gi->gi_pol);
910 	arm_gic_bind_intr(dev, isrc);
911 	return (0);
912 }
913 
914 static int
arm_gic_teardown_intr(device_t dev,struct intr_irqsrc * isrc,struct resource * res,struct intr_map_data * data)915 arm_gic_teardown_intr(device_t dev, struct intr_irqsrc *isrc,
916     struct resource *res, struct intr_map_data *data)
917 {
918 	struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc;
919 
920 	if (isrc->isrc_handlers == 0 && (gi->gi_flags & GI_FLAG_MSI) == 0) {
921 		gi->gi_pol = INTR_POLARITY_CONFORM;
922 		gi->gi_trig = INTR_TRIGGER_CONFORM;
923 	}
924 	return (0);
925 }
926 
927 static void
arm_gic_enable_intr(device_t dev,struct intr_irqsrc * isrc)928 arm_gic_enable_intr(device_t dev, struct intr_irqsrc *isrc)
929 {
930 	struct arm_gic_softc *sc = device_get_softc(dev);
931 	struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc;
932 
933 	arm_irq_memory_barrier(gi->gi_irq);
934 	gic_irq_unmask(sc, gi->gi_irq);
935 }
936 
937 static void
arm_gic_disable_intr(device_t dev,struct intr_irqsrc * isrc)938 arm_gic_disable_intr(device_t dev, struct intr_irqsrc *isrc)
939 {
940 	struct arm_gic_softc *sc = device_get_softc(dev);
941 	struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc;
942 
943 	gic_irq_mask(sc, gi->gi_irq);
944 }
945 
946 static void
arm_gic_pre_ithread(device_t dev,struct intr_irqsrc * isrc)947 arm_gic_pre_ithread(device_t dev, struct intr_irqsrc *isrc)
948 {
949 	struct arm_gic_softc *sc = device_get_softc(dev);
950 	struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc;
951 
952 	arm_gic_disable_intr(dev, isrc);
953 	gic_c_write_4(sc, GICC_EOIR, gi->gi_irq);
954 }
955 
956 static void
arm_gic_post_ithread(device_t dev,struct intr_irqsrc * isrc)957 arm_gic_post_ithread(device_t dev, struct intr_irqsrc *isrc)
958 {
959 
960 	arm_irq_memory_barrier(0);
961 	arm_gic_enable_intr(dev, isrc);
962 }
963 
964 static void
arm_gic_post_filter(device_t dev,struct intr_irqsrc * isrc)965 arm_gic_post_filter(device_t dev, struct intr_irqsrc *isrc)
966 {
967 	struct arm_gic_softc *sc = device_get_softc(dev);
968 	struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc;
969 
970         /* EOI for edge-triggered done earlier. */
971 	if ((gi->gi_flags & GI_FLAG_EARLY_EOI) == GI_FLAG_EARLY_EOI)
972 		return;
973 
974 	arm_irq_memory_barrier(0);
975 	gic_c_write_4(sc, GICC_EOIR, gi->gi_irq);
976 }
977 
978 static int
arm_gic_bind_intr(device_t dev,struct intr_irqsrc * isrc)979 arm_gic_bind_intr(device_t dev, struct intr_irqsrc *isrc)
980 {
981 	struct arm_gic_softc *sc = device_get_softc(dev);
982 	struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc;
983 
984 	if (gi->gi_irq < GIC_FIRST_SPI)
985 		return (EINVAL);
986 
987 	if (CPU_EMPTY(&isrc->isrc_cpu)) {
988 		gic_irq_cpu = intr_irq_next_cpu(gic_irq_cpu, &all_cpus);
989 		CPU_SETOF(gic_irq_cpu, &isrc->isrc_cpu);
990 	}
991 	return (gic_bind(sc, gi->gi_irq, &isrc->isrc_cpu));
992 }
993 
994 #ifdef SMP
995 static void
arm_gic_ipi_send(device_t dev,struct intr_irqsrc * isrc,cpuset_t cpus,u_int ipi)996 arm_gic_ipi_send(device_t dev, struct intr_irqsrc *isrc, cpuset_t cpus,
997     u_int ipi)
998 {
999 	struct arm_gic_softc *sc = device_get_softc(dev);
1000 	struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc;
1001 	uint32_t val = 0, i;
1002 
1003 	for (i = 0; i < MAXCPU; i++) {
1004 		if (CPU_ISSET(i, &cpus)) {
1005 			MPASS(i < GIC_MAXCPU);
1006 			val |= arm_gic_map[i] << GICD_SGI_TARGET_SHIFT;
1007 		}
1008 	}
1009 
1010 	gic_d_write_4(sc, GICD_SGIR, val | gi->gi_irq);
1011 }
1012 
1013 static int
arm_gic_ipi_setup(device_t dev,u_int ipi,struct intr_irqsrc ** isrcp)1014 arm_gic_ipi_setup(device_t dev, u_int ipi, struct intr_irqsrc **isrcp)
1015 {
1016 	struct intr_irqsrc *isrc;
1017 	struct arm_gic_softc *sc = device_get_softc(dev);
1018 
1019 	if (sgi_first_unused > GIC_LAST_SGI)
1020 		return (ENOSPC);
1021 
1022 	isrc = GIC_INTR_ISRC(sc, sgi_first_unused);
1023 	sgi_to_ipi[sgi_first_unused++] = ipi;
1024 
1025 	CPU_SET(PCPU_GET(cpuid), &isrc->isrc_cpu);
1026 
1027 	*isrcp = isrc;
1028 	return (0);
1029 }
1030 #endif
1031 
1032 static int
arm_gic_alloc_msi(device_t dev,u_int mbi_start,u_int mbi_count,int count,int maxcount,struct intr_irqsrc ** isrc)1033 arm_gic_alloc_msi(device_t dev, u_int mbi_start, u_int mbi_count, int count,
1034     int maxcount, struct intr_irqsrc **isrc)
1035 {
1036 	struct arm_gic_softc *sc;
1037 	int i, irq, end_irq;
1038 	bool found;
1039 
1040 	KASSERT(powerof2(count), ("%s: bad count", __func__));
1041 	KASSERT(powerof2(maxcount), ("%s: bad maxcount", __func__));
1042 
1043 	sc = device_get_softc(dev);
1044 
1045 	mtx_lock_spin(&sc->mutex);
1046 
1047 	found = false;
1048 	for (irq = mbi_start; irq < mbi_start + mbi_count; irq++) {
1049 		/* Start on an aligned interrupt */
1050 		if ((irq & (maxcount - 1)) != 0)
1051 			continue;
1052 
1053 		/* Assume we found a valid range until shown otherwise */
1054 		found = true;
1055 
1056 		/* Check this range is valid */
1057 		for (end_irq = irq; end_irq != irq + count; end_irq++) {
1058 			/* No free interrupts */
1059 			if (end_irq == mbi_start + mbi_count) {
1060 				found = false;
1061 				break;
1062 			}
1063 
1064 			KASSERT((sc->gic_irqs[end_irq].gi_flags & GI_FLAG_MSI)!= 0,
1065 			    ("%s: Non-MSI interrupt found", __func__));
1066 
1067 			/* This is already used */
1068 			if ((sc->gic_irqs[end_irq].gi_flags & GI_FLAG_MSI_USED) ==
1069 			    GI_FLAG_MSI_USED) {
1070 				found = false;
1071 				break;
1072 			}
1073 		}
1074 		if (found)
1075 			break;
1076 	}
1077 
1078 	/* Not enough interrupts were found */
1079 	if (!found || irq == mbi_start + mbi_count) {
1080 		mtx_unlock_spin(&sc->mutex);
1081 		return (ENXIO);
1082 	}
1083 
1084 	for (i = 0; i < count; i++) {
1085 		/* Mark the interrupt as used */
1086 		sc->gic_irqs[irq + i].gi_flags |= GI_FLAG_MSI_USED;
1087 	}
1088 	mtx_unlock_spin(&sc->mutex);
1089 
1090 	for (i = 0; i < count; i++)
1091 		isrc[i] = (struct intr_irqsrc *)&sc->gic_irqs[irq + i];
1092 
1093 	return (0);
1094 }
1095 
1096 static int
arm_gic_release_msi(device_t dev,int count,struct intr_irqsrc ** isrc)1097 arm_gic_release_msi(device_t dev, int count, struct intr_irqsrc **isrc)
1098 {
1099 	struct arm_gic_softc *sc;
1100 	struct gic_irqsrc *gi;
1101 	int i;
1102 
1103 	sc = device_get_softc(dev);
1104 
1105 	mtx_lock_spin(&sc->mutex);
1106 	for (i = 0; i < count; i++) {
1107 		gi = (struct gic_irqsrc *)isrc[i];
1108 
1109 		KASSERT((gi->gi_flags & GI_FLAG_MSI_USED) == GI_FLAG_MSI_USED,
1110 		    ("%s: Trying to release an unused MSI-X interrupt",
1111 		    __func__));
1112 
1113 		gi->gi_flags &= ~GI_FLAG_MSI_USED;
1114 	}
1115 	mtx_unlock_spin(&sc->mutex);
1116 
1117 	return (0);
1118 }
1119 
1120 static int
arm_gic_alloc_msix(device_t dev,u_int mbi_start,u_int mbi_count,struct intr_irqsrc ** isrc)1121 arm_gic_alloc_msix(device_t dev, u_int mbi_start, u_int mbi_count,
1122     struct intr_irqsrc **isrc)
1123 {
1124 	struct arm_gic_softc *sc;
1125 	int irq;
1126 
1127 	sc = device_get_softc(dev);
1128 
1129 	mtx_lock_spin(&sc->mutex);
1130 	/* Find an unused interrupt */
1131 	for (irq = mbi_start; irq < mbi_start + mbi_count; irq++) {
1132 		KASSERT((sc->gic_irqs[irq].gi_flags & GI_FLAG_MSI) != 0,
1133 		    ("%s: Non-MSI interrupt found", __func__));
1134 		if ((sc->gic_irqs[irq].gi_flags & GI_FLAG_MSI_USED) == 0)
1135 			break;
1136 	}
1137 	/* No free interrupt was found */
1138 	if (irq == mbi_start + mbi_count) {
1139 		mtx_unlock_spin(&sc->mutex);
1140 		return (ENXIO);
1141 	}
1142 
1143 	/* Mark the interrupt as used */
1144 	sc->gic_irqs[irq].gi_flags |= GI_FLAG_MSI_USED;
1145 	mtx_unlock_spin(&sc->mutex);
1146 
1147 	*isrc = (struct intr_irqsrc *)&sc->gic_irqs[irq];
1148 
1149 	return (0);
1150 }
1151 
1152 static int
arm_gic_release_msix(device_t dev,struct intr_irqsrc * isrc)1153 arm_gic_release_msix(device_t dev, struct intr_irqsrc *isrc)
1154 {
1155 	struct arm_gic_softc *sc;
1156 	struct gic_irqsrc *gi;
1157 
1158 	sc = device_get_softc(dev);
1159 	gi = (struct gic_irqsrc *)isrc;
1160 
1161 	KASSERT((gi->gi_flags & GI_FLAG_MSI_USED) == GI_FLAG_MSI_USED,
1162 	    ("%s: Trying to release an unused MSI-X interrupt", __func__));
1163 
1164 	mtx_lock_spin(&sc->mutex);
1165 	gi->gi_flags &= ~GI_FLAG_MSI_USED;
1166 	mtx_unlock_spin(&sc->mutex);
1167 
1168 	return (0);
1169 }
1170 
1171 #ifdef DDB
1172 static void
arm_gic_db_show(device_t dev)1173 arm_gic_db_show(device_t dev)
1174 {
1175 	struct arm_gic_softc *sc = device_get_softc(dev);
1176 	uint32_t val;
1177 	u_int i;
1178 
1179 	db_printf("%s CPU registers:\n", device_get_nameunit(dev));
1180 	db_printf(" CTLR: %08x   PMR: %08x   BPR: %08x   RPR: %08x\n",
1181 	    gic_c_read_4(sc, GICC_CTLR), gic_c_read_4(sc, GICC_PMR),
1182 	    gic_c_read_4(sc, GICC_BPR), gic_c_read_4(sc, GICC_RPR));
1183 	db_printf("HPPIR: %08x  IIDR: %08x\n", gic_c_read_4(sc, GICC_HPPIR),
1184 	    gic_c_read_4(sc, GICC_IIDR));
1185 
1186 	db_printf("%s Distributor registers:\n", device_get_nameunit(dev));
1187 	db_printf(" CTLR: %08x TYPER: %08x  IIDR: %08x\n",
1188 	    gic_d_read_4(sc, GICD_CTLR), gic_d_read_4(sc, GICD_TYPER),
1189 	    gic_d_read_4(sc, GICD_IIDR));
1190 	for (i = 0; i < sc->nirqs; i++) {
1191 		if (i <= GIC_LAST_SGI)
1192 			db_printf("SGI %2u ", i);
1193 		else if (i <= GIC_LAST_PPI)
1194 			db_printf("PPI %2u ", i - GIC_FIRST_PPI);
1195 		else
1196 			db_printf("SPI %2u ", i - GIC_FIRST_SPI);
1197 		db_printf(" grp:%u",
1198 		    !!(gic_d_read_4(sc, GICD_IGROUPR(i)) & GICD_I_MASK(i)));
1199 		db_printf(" enable:%u pend:%u active:%u",
1200 		    !!(gic_d_read_4(sc, GICD_ISENABLER(i)) & GICD_I_MASK(i)),
1201 		    !!(gic_d_read_4(sc, GICD_ISPENDR(i)) & GICD_I_MASK(i)),
1202 		    !!(gic_d_read_4(sc, GICD_ISACTIVER(i)) & GICD_I_MASK(i)));
1203 		db_printf(" pri:%u",
1204 		    (gic_d_read_4(sc, GICD_IPRIORITYR(i)) >> 8 * (i & 0x3)) &
1205 		    0xff);
1206 		db_printf(" trg:%u",
1207 		    (gic_d_read_4(sc, GICD_ITARGETSR(i)) >> 8 * (i & 0x3)) &
1208 		    0xff);
1209 		val = gic_d_read_4(sc, GICD_ICFGR(i)) >> 2 * (i & 0xf);
1210 		if ((val & GICD_ICFGR_POL_MASK) == GICD_ICFGR_POL_LOW)
1211 			db_printf(" LO");
1212 		else
1213 			db_printf(" HI");
1214 		if ((val & GICD_ICFGR_TRIG_MASK) == GICD_ICFGR_TRIG_LVL)
1215 			db_printf(" LV");
1216 		else
1217 			db_printf(" ED");
1218 		db_printf("\n");
1219 	}
1220 }
1221 #endif
1222 
1223 static device_method_t arm_gic_methods[] = {
1224 	/* Bus interface */
1225 	DEVMETHOD(bus_print_child,	arm_gic_print_child),
1226 	DEVMETHOD(bus_add_child,	bus_generic_add_child),
1227 	DEVMETHOD(bus_alloc_resource,	arm_gic_alloc_resource),
1228 	DEVMETHOD(bus_release_resource,	bus_generic_release_resource),
1229 	DEVMETHOD(bus_activate_resource,bus_generic_activate_resource),
1230 	DEVMETHOD(bus_read_ivar,	arm_gic_read_ivar),
1231 	DEVMETHOD(bus_write_ivar,	arm_gic_write_ivar),
1232 
1233 	/* Interrupt controller interface */
1234 	DEVMETHOD(pic_disable_intr,	arm_gic_disable_intr),
1235 	DEVMETHOD(pic_enable_intr,	arm_gic_enable_intr),
1236 	DEVMETHOD(pic_map_intr,		arm_gic_map_intr),
1237 	DEVMETHOD(pic_setup_intr,	arm_gic_setup_intr),
1238 	DEVMETHOD(pic_teardown_intr,	arm_gic_teardown_intr),
1239 	DEVMETHOD(pic_post_filter,	arm_gic_post_filter),
1240 	DEVMETHOD(pic_post_ithread,	arm_gic_post_ithread),
1241 	DEVMETHOD(pic_pre_ithread,	arm_gic_pre_ithread),
1242 #ifdef SMP
1243 	DEVMETHOD(pic_bind_intr,	arm_gic_bind_intr),
1244 	DEVMETHOD(pic_init_secondary,	arm_gic_init_secondary),
1245 	DEVMETHOD(pic_ipi_send,		arm_gic_ipi_send),
1246 	DEVMETHOD(pic_ipi_setup,	arm_gic_ipi_setup),
1247 #endif
1248 
1249 	/* GIC */
1250 	DEVMETHOD(gic_reserve_msi_range, arm_gic_reserve_msi_range),
1251 	DEVMETHOD(gic_alloc_msi,	arm_gic_alloc_msi),
1252 	DEVMETHOD(gic_release_msi,	arm_gic_release_msi),
1253 	DEVMETHOD(gic_alloc_msix,	arm_gic_alloc_msix),
1254 	DEVMETHOD(gic_release_msix,	arm_gic_release_msix),
1255 #ifdef DDB
1256 	DEVMETHOD(gic_db_show,		arm_gic_db_show),
1257 #endif
1258 
1259 	{ 0, 0 }
1260 };
1261 
1262 DEFINE_CLASS_0(gic, arm_gic_driver, arm_gic_methods,
1263     sizeof(struct arm_gic_softc));
1264 
1265 #ifdef DDB
DB_SHOW_COMMAND_FLAGS(gic,db_show_gic,CS_OWN)1266 DB_SHOW_COMMAND_FLAGS(gic, db_show_gic, CS_OWN)
1267 {
1268 	device_t dev;
1269 	int t;
1270 	bool valid;
1271 
1272 	valid = false;
1273 	t = db_read_token();
1274 	if (t == tIDENT) {
1275 		dev = device_lookup_by_name(db_tok_string);
1276 		valid = true;
1277 	}
1278 	db_skip_to_eol();
1279 	if (!valid) {
1280 		db_printf("usage: show gic <name>\n");
1281 		return;
1282 	}
1283 
1284 	if (dev == NULL) {
1285 		db_printf("device not found\n");
1286 		return;
1287 	}
1288 
1289 	GIC_DB_SHOW(dev);
1290 }
1291 
DB_SHOW_ALL_COMMAND(gics,db_show_all_gics)1292 DB_SHOW_ALL_COMMAND(gics, db_show_all_gics)
1293 {
1294 	devclass_t dc;
1295 	device_t dev;
1296 	int i;
1297 
1298 	dc = devclass_find("gic");
1299 	if (dc == NULL)
1300 		return;
1301 
1302 	for (i = 0; i < devclass_get_maxunit(dc); i++) {
1303 		dev = devclass_get_device(dc, i);
1304 		if (dev != NULL)
1305 			GIC_DB_SHOW(dev);
1306 		if (db_pager_quit)
1307 			break;
1308 	}
1309 }
1310 
1311 #endif
1312 
1313 /*
1314  * GICv2m support -- the GICv2 MSI/MSI-X controller.
1315  */
1316 
1317 #define	GICV2M_MSI_TYPER	0x008
1318 #define	 MSI_TYPER_SPI_BASE(x)	(((x) >> 16) & 0x3ff)
1319 #define	 MSI_TYPER_SPI_COUNT(x)	(((x) >> 0) & 0x3ff)
1320 #define	GICv2M_MSI_SETSPI_NS	0x040
1321 #define	GICV2M_MSI_IIDR		0xFCC
1322 
1323 int
arm_gicv2m_attach(device_t dev)1324 arm_gicv2m_attach(device_t dev)
1325 {
1326 	struct arm_gicv2m_softc *sc;
1327 	uint32_t typer;
1328 	int rid;
1329 
1330 	sc = device_get_softc(dev);
1331 
1332 	rid = 0;
1333 	sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1334 	    RF_ACTIVE);
1335 	if (sc->sc_mem == NULL) {
1336 		device_printf(dev, "Unable to allocate resources\n");
1337 		return (ENXIO);
1338 	}
1339 
1340 	typer = bus_read_4(sc->sc_mem, GICV2M_MSI_TYPER);
1341 	sc->sc_spi_start = MSI_TYPER_SPI_BASE(typer);
1342 	sc->sc_spi_count = MSI_TYPER_SPI_COUNT(typer);
1343 
1344 	/* Reserve these interrupts for MSI/MSI-X use */
1345 	GIC_RESERVE_MSI_RANGE(device_get_parent(dev), sc->sc_spi_start,
1346 	    sc->sc_spi_count);
1347 
1348 	intr_msi_register(dev, sc->sc_xref);
1349 
1350 	if (bootverbose)
1351 		device_printf(dev, "using spi %u to %u\n", sc->sc_spi_start,
1352 		    sc->sc_spi_start + sc->sc_spi_count - 1);
1353 
1354 	return (0);
1355 }
1356 
1357 static int
arm_gicv2m_alloc_msi(device_t dev,device_t child,int count,int maxcount,device_t * pic,struct intr_irqsrc ** srcs)1358 arm_gicv2m_alloc_msi(device_t dev, device_t child, int count, int maxcount,
1359     device_t *pic, struct intr_irqsrc **srcs)
1360 {
1361 	struct arm_gicv2m_softc *sc;
1362 	int error;
1363 
1364 	sc = device_get_softc(dev);
1365 	error = GIC_ALLOC_MSI(device_get_parent(dev), sc->sc_spi_start,
1366 	    sc->sc_spi_count, count, maxcount, srcs);
1367 	if (error != 0)
1368 		return (error);
1369 
1370 	*pic = dev;
1371 	return (0);
1372 }
1373 
1374 static int
arm_gicv2m_release_msi(device_t dev,device_t child,int count,struct intr_irqsrc ** isrc)1375 arm_gicv2m_release_msi(device_t dev, device_t child, int count,
1376     struct intr_irqsrc **isrc)
1377 {
1378 	return (GIC_RELEASE_MSI(device_get_parent(dev), count, isrc));
1379 }
1380 
1381 static int
arm_gicv2m_alloc_msix(device_t dev,device_t child,device_t * pic,struct intr_irqsrc ** isrcp)1382 arm_gicv2m_alloc_msix(device_t dev, device_t child, device_t *pic,
1383     struct intr_irqsrc **isrcp)
1384 {
1385 	struct arm_gicv2m_softc *sc;
1386 	int error;
1387 
1388 	sc = device_get_softc(dev);
1389 	error = GIC_ALLOC_MSIX(device_get_parent(dev), sc->sc_spi_start,
1390 	    sc->sc_spi_count, isrcp);
1391 	if (error != 0)
1392 		return (error);
1393 
1394 	*pic = dev;
1395 	return (0);
1396 }
1397 
1398 static int
arm_gicv2m_release_msix(device_t dev,device_t child,struct intr_irqsrc * isrc)1399 arm_gicv2m_release_msix(device_t dev, device_t child, struct intr_irqsrc *isrc)
1400 {
1401 	return (GIC_RELEASE_MSIX(device_get_parent(dev), isrc));
1402 }
1403 
1404 static int
arm_gicv2m_map_msi(device_t dev,device_t child,struct intr_irqsrc * isrc,uint64_t * addr,uint32_t * data)1405 arm_gicv2m_map_msi(device_t dev, device_t child, struct intr_irqsrc *isrc,
1406     uint64_t *addr, uint32_t *data)
1407 {
1408 	struct arm_gicv2m_softc *sc = device_get_softc(dev);
1409 	struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc;
1410 
1411 	*addr = vtophys(rman_get_virtual(sc->sc_mem)) + GICv2M_MSI_SETSPI_NS;
1412 	*data = gi->gi_irq;
1413 
1414 	return (0);
1415 }
1416 
1417 static device_method_t arm_gicv2m_methods[] = {
1418 	/* Device interface */
1419 	DEVMETHOD(device_attach,	arm_gicv2m_attach),
1420 
1421 	/* MSI/MSI-X */
1422 	DEVMETHOD(msi_alloc_msi,	arm_gicv2m_alloc_msi),
1423 	DEVMETHOD(msi_release_msi,	arm_gicv2m_release_msi),
1424 	DEVMETHOD(msi_alloc_msix,	arm_gicv2m_alloc_msix),
1425 	DEVMETHOD(msi_release_msix,	arm_gicv2m_release_msix),
1426 	DEVMETHOD(msi_map_msi,		arm_gicv2m_map_msi),
1427 
1428 	/* End */
1429 	DEVMETHOD_END
1430 };
1431 
1432 DEFINE_CLASS_0(gicv2m, arm_gicv2m_driver, arm_gicv2m_methods,
1433     sizeof(struct arm_gicv2m_softc));
1434