1 /*-
2 * Copyright (c) 2015-2016 The FreeBSD Foundation
3 *
4 * This software was developed by Andrew Turner under
5 * the sponsorship of the FreeBSD Foundation.
6 *
7 * This software was developed by Semihalf under
8 * the sponsorship of the FreeBSD Foundation.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 */
31
32 #include "opt_acpi.h"
33 #include "opt_platform.h"
34
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/bitstring.h>
38 #include <sys/bus.h>
39 #include <sys/kernel.h>
40 #include <sys/ktr.h>
41 #include <sys/malloc.h>
42 #include <sys/module.h>
43 #include <sys/rman.h>
44 #include <sys/pcpu.h>
45 #include <sys/proc.h>
46 #include <sys/cpuset.h>
47 #include <sys/lock.h>
48 #include <sys/mutex.h>
49 #include <sys/smp.h>
50 #include <sys/interrupt.h>
51
52 #include <vm/vm.h>
53 #include <vm/pmap.h>
54
55 #include <machine/bus.h>
56 #include <machine/cpu.h>
57 #include <machine/intr.h>
58
59 #ifdef FDT
60 #include <dev/fdt/fdt_intr.h>
61 #include <dev/ofw/ofw_bus_subr.h>
62 #endif
63
64 #ifdef DEV_ACPI
65 #include <contrib/dev/acpica/include/acpi.h>
66 #include <dev/acpica/acpivar.h>
67 #endif
68
69 #include "gic_if.h"
70 #include "pic_if.h"
71 #include "msi_if.h"
72
73 #include <arm/arm/gic_common.h>
74 #include "gic_v3_reg.h"
75 #include "gic_v3_var.h"
76
77 static bus_print_child_t gic_v3_print_child;
78 static bus_get_domain_t gic_v3_get_domain;
79 static bus_read_ivar_t gic_v3_read_ivar;
80 static bus_write_ivar_t gic_v3_write_ivar;
81 static bus_alloc_resource_t gic_v3_alloc_resource;
82
83 static pic_disable_intr_t gic_v3_disable_intr;
84 static pic_enable_intr_t gic_v3_enable_intr;
85 static pic_map_intr_t gic_v3_map_intr;
86 static pic_setup_intr_t gic_v3_setup_intr;
87 static pic_teardown_intr_t gic_v3_teardown_intr;
88 static pic_post_filter_t gic_v3_post_filter;
89 static pic_post_ithread_t gic_v3_post_ithread;
90 static pic_pre_ithread_t gic_v3_pre_ithread;
91 static pic_bind_intr_t gic_v3_bind_intr;
92 #ifdef SMP
93 static pic_init_secondary_t gic_v3_init_secondary;
94 static pic_ipi_send_t gic_v3_ipi_send;
95 static pic_ipi_setup_t gic_v3_ipi_setup;
96 #endif
97
98 static gic_reserve_msi_range_t gic_v3_reserve_msi_range;
99 static gic_alloc_msi_t gic_v3_gic_alloc_msi;
100 static gic_release_msi_t gic_v3_gic_release_msi;
101 static gic_alloc_msix_t gic_v3_gic_alloc_msix;
102 static gic_release_msix_t gic_v3_gic_release_msix;
103
104 static msi_alloc_msi_t gic_v3_alloc_msi;
105 static msi_release_msi_t gic_v3_release_msi;
106 static msi_alloc_msix_t gic_v3_alloc_msix;
107 static msi_release_msix_t gic_v3_release_msix;
108 static msi_map_msi_t gic_v3_map_msi;
109
110 static u_int gic_irq_cpu;
111 #ifdef SMP
112 static u_int sgi_to_ipi[GIC_LAST_SGI - GIC_FIRST_SGI + 1];
113 static u_int sgi_first_unused = GIC_FIRST_SGI;
114 #endif
115
116 static device_method_t gic_v3_methods[] = {
117 /* Device interface */
118 DEVMETHOD(device_detach, gic_v3_detach),
119
120 /* Bus interface */
121 DEVMETHOD(bus_print_child, gic_v3_print_child),
122 DEVMETHOD(bus_get_domain, gic_v3_get_domain),
123 DEVMETHOD(bus_read_ivar, gic_v3_read_ivar),
124 DEVMETHOD(bus_write_ivar, gic_v3_write_ivar),
125 DEVMETHOD(bus_alloc_resource, gic_v3_alloc_resource),
126 DEVMETHOD(bus_activate_resource, bus_generic_activate_resource),
127
128 /* Interrupt controller interface */
129 DEVMETHOD(pic_disable_intr, gic_v3_disable_intr),
130 DEVMETHOD(pic_enable_intr, gic_v3_enable_intr),
131 DEVMETHOD(pic_map_intr, gic_v3_map_intr),
132 DEVMETHOD(pic_setup_intr, gic_v3_setup_intr),
133 DEVMETHOD(pic_teardown_intr, gic_v3_teardown_intr),
134 DEVMETHOD(pic_post_filter, gic_v3_post_filter),
135 DEVMETHOD(pic_post_ithread, gic_v3_post_ithread),
136 DEVMETHOD(pic_pre_ithread, gic_v3_pre_ithread),
137 #ifdef SMP
138 DEVMETHOD(pic_bind_intr, gic_v3_bind_intr),
139 DEVMETHOD(pic_init_secondary, gic_v3_init_secondary),
140 DEVMETHOD(pic_ipi_send, gic_v3_ipi_send),
141 DEVMETHOD(pic_ipi_setup, gic_v3_ipi_setup),
142 #endif
143
144 /* MSI/MSI-X */
145 DEVMETHOD(msi_alloc_msi, gic_v3_alloc_msi),
146 DEVMETHOD(msi_release_msi, gic_v3_release_msi),
147 DEVMETHOD(msi_alloc_msix, gic_v3_alloc_msix),
148 DEVMETHOD(msi_release_msix, gic_v3_release_msix),
149 DEVMETHOD(msi_map_msi, gic_v3_map_msi),
150
151 /* GIC */
152 DEVMETHOD(gic_reserve_msi_range, gic_v3_reserve_msi_range),
153 DEVMETHOD(gic_alloc_msi, gic_v3_gic_alloc_msi),
154 DEVMETHOD(gic_release_msi, gic_v3_gic_release_msi),
155 DEVMETHOD(gic_alloc_msix, gic_v3_gic_alloc_msix),
156 DEVMETHOD(gic_release_msix, gic_v3_gic_release_msix),
157
158 /* End */
159 DEVMETHOD_END
160 };
161
162 DEFINE_CLASS_0(gic, gic_v3_driver, gic_v3_methods,
163 sizeof(struct gic_v3_softc));
164
165 /*
166 * Driver-specific definitions.
167 */
168 MALLOC_DEFINE(M_GIC_V3, "GICv3", GIC_V3_DEVSTR);
169
170 /*
171 * Helper functions and definitions.
172 */
173 /* Destination registers, either Distributor or Re-Distributor */
174 enum gic_v3_xdist {
175 DIST = 0,
176 REDIST,
177 };
178
179 struct gic_v3_irqsrc {
180 struct intr_irqsrc gi_isrc;
181 uint32_t gi_irq;
182 enum intr_polarity gi_pol;
183 enum intr_trigger gi_trig;
184 #define GI_FLAG_MSI (1 << 1) /* This interrupt source should only */
185 /* be used for MSI/MSI-X interrupts */
186 #define GI_FLAG_MSI_USED (1 << 2) /* This irq is already allocated */
187 /* for a MSI/MSI-X interrupt */
188 u_int gi_flags;
189 };
190
191 /* Helper routines starting with gic_v3_ */
192 static int gic_v3_dist_init(struct gic_v3_softc *);
193 static int gic_v3_redist_alloc(struct gic_v3_softc *);
194 static int gic_v3_redist_find(struct gic_v3_softc *);
195 static int gic_v3_redist_init(struct gic_v3_softc *);
196 static int gic_v3_cpu_init(struct gic_v3_softc *);
197 static void gic_v3_wait_for_rwp(struct gic_v3_softc *, enum gic_v3_xdist);
198
199 /* A sequence of init functions for primary (boot) CPU */
200 typedef int (*gic_v3_initseq_t) (struct gic_v3_softc *);
201 /* Primary CPU initialization sequence */
202 static gic_v3_initseq_t gic_v3_primary_init[] = {
203 gic_v3_dist_init,
204 gic_v3_redist_alloc,
205 gic_v3_redist_init,
206 gic_v3_cpu_init,
207 NULL
208 };
209
210 #ifdef SMP
211 /* Secondary CPU initialization sequence */
212 static gic_v3_initseq_t gic_v3_secondary_init[] = {
213 gic_v3_redist_init,
214 gic_v3_cpu_init,
215 NULL
216 };
217 #endif
218
219 uint32_t
gic_r_read_4(device_t dev,bus_size_t offset)220 gic_r_read_4(device_t dev, bus_size_t offset)
221 {
222 struct gic_v3_softc *sc;
223 struct resource *rdist;
224
225 sc = device_get_softc(dev);
226 rdist = sc->gic_redists.pcpu[PCPU_GET(cpuid)].res;
227 offset += sc->gic_redists.pcpu[PCPU_GET(cpuid)].offset;
228 return (bus_read_4(rdist, offset));
229 }
230
231 uint64_t
gic_r_read_8(device_t dev,bus_size_t offset)232 gic_r_read_8(device_t dev, bus_size_t offset)
233 {
234 struct gic_v3_softc *sc;
235 struct resource *rdist;
236
237 sc = device_get_softc(dev);
238 rdist = sc->gic_redists.pcpu[PCPU_GET(cpuid)].res;
239 offset += sc->gic_redists.pcpu[PCPU_GET(cpuid)].offset;
240 return (bus_read_8(rdist, offset));
241 }
242
243 void
gic_r_write_4(device_t dev,bus_size_t offset,uint32_t val)244 gic_r_write_4(device_t dev, bus_size_t offset, uint32_t val)
245 {
246 struct gic_v3_softc *sc;
247 struct resource *rdist;
248
249 sc = device_get_softc(dev);
250 rdist = sc->gic_redists.pcpu[PCPU_GET(cpuid)].res;
251 offset += sc->gic_redists.pcpu[PCPU_GET(cpuid)].offset;
252 bus_write_4(rdist, offset, val);
253 }
254
255 void
gic_r_write_8(device_t dev,bus_size_t offset,uint64_t val)256 gic_r_write_8(device_t dev, bus_size_t offset, uint64_t val)
257 {
258 struct gic_v3_softc *sc;
259 struct resource *rdist;
260
261 sc = device_get_softc(dev);
262 rdist = sc->gic_redists.pcpu[PCPU_GET(cpuid)].res;
263 offset += sc->gic_redists.pcpu[PCPU_GET(cpuid)].offset;
264 bus_write_8(rdist, offset, val);
265 }
266
267 static void
gic_v3_reserve_msi_range(device_t dev,u_int start,u_int count)268 gic_v3_reserve_msi_range(device_t dev, u_int start, u_int count)
269 {
270 struct gic_v3_softc *sc;
271 int i;
272
273 sc = device_get_softc(dev);
274
275 KASSERT((start + count) < sc->gic_nirqs,
276 ("%s: Trying to allocate too many MSI IRQs: %d + %d > %d", __func__,
277 start, count, sc->gic_nirqs));
278 for (i = 0; i < count; i++) {
279 KASSERT(sc->gic_irqs[start + i].gi_isrc.isrc_handlers == 0,
280 ("%s: MSI interrupt %d already has a handler", __func__,
281 count + i));
282 KASSERT(sc->gic_irqs[start + i].gi_pol == INTR_POLARITY_CONFORM,
283 ("%s: MSI interrupt %d already has a polarity", __func__,
284 count + i));
285 KASSERT(sc->gic_irqs[start + i].gi_trig == INTR_TRIGGER_CONFORM,
286 ("%s: MSI interrupt %d already has a trigger", __func__,
287 count + i));
288 sc->gic_irqs[start + i].gi_pol = INTR_POLARITY_HIGH;
289 sc->gic_irqs[start + i].gi_trig = INTR_TRIGGER_EDGE;
290 sc->gic_irqs[start + i].gi_flags |= GI_FLAG_MSI;
291 }
292 }
293
294 /*
295 * Device interface.
296 */
297 int
gic_v3_attach(device_t dev)298 gic_v3_attach(device_t dev)
299 {
300 struct gic_v3_softc *sc;
301 gic_v3_initseq_t *init_func;
302 uint32_t typer;
303 int rid;
304 int err;
305 size_t i;
306 u_int irq;
307 const char *name;
308
309 sc = device_get_softc(dev);
310 sc->gic_registered = FALSE;
311 sc->dev = dev;
312 err = 0;
313
314 /* Initialize mutex */
315 mtx_init(&sc->gic_mtx, "GICv3 lock", NULL, MTX_SPIN);
316
317 /*
318 * Allocate array of struct resource.
319 * One entry for Distributor and all remaining for Re-Distributor.
320 */
321 sc->gic_res = malloc(
322 sizeof(*sc->gic_res) * (sc->gic_redists.nregions + 1),
323 M_GIC_V3, M_WAITOK);
324
325 /* Now allocate corresponding resources */
326 for (i = 0, rid = 0; i < (sc->gic_redists.nregions + 1); i++, rid++) {
327 sc->gic_res[rid] = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
328 &rid, RF_ACTIVE);
329 if (sc->gic_res[rid] == NULL)
330 return (ENXIO);
331 }
332
333 /*
334 * Distributor interface
335 */
336 sc->gic_dist = sc->gic_res[0];
337
338 /*
339 * Re-Dristributor interface
340 */
341 /* Allocate space under region descriptions */
342 sc->gic_redists.regions = malloc(
343 sizeof(*sc->gic_redists.regions) * sc->gic_redists.nregions,
344 M_GIC_V3, M_WAITOK);
345
346 /* Fill-up bus_space information for each region. */
347 for (i = 0, rid = 1; i < sc->gic_redists.nregions; i++, rid++)
348 sc->gic_redists.regions[i] = sc->gic_res[rid];
349
350 /* Get the number of supported SPI interrupts */
351 typer = gic_d_read(sc, 4, GICD_TYPER);
352 sc->gic_nirqs = GICD_TYPER_I_NUM(typer);
353 if (sc->gic_nirqs > GIC_I_NUM_MAX)
354 sc->gic_nirqs = GIC_I_NUM_MAX;
355
356 sc->gic_irqs = malloc(sizeof(*sc->gic_irqs) * sc->gic_nirqs,
357 M_GIC_V3, M_WAITOK | M_ZERO);
358 name = device_get_nameunit(dev);
359 for (irq = 0; irq < sc->gic_nirqs; irq++) {
360 struct intr_irqsrc *isrc;
361
362 sc->gic_irqs[irq].gi_irq = irq;
363 sc->gic_irqs[irq].gi_pol = INTR_POLARITY_CONFORM;
364 sc->gic_irqs[irq].gi_trig = INTR_TRIGGER_CONFORM;
365
366 isrc = &sc->gic_irqs[irq].gi_isrc;
367 if (irq <= GIC_LAST_SGI) {
368 err = intr_isrc_register(isrc, sc->dev,
369 INTR_ISRCF_IPI, "%s,i%u", name, irq - GIC_FIRST_SGI);
370 } else if (irq <= GIC_LAST_PPI) {
371 err = intr_isrc_register(isrc, sc->dev,
372 INTR_ISRCF_PPI, "%s,p%u", name, irq - GIC_FIRST_PPI);
373 } else {
374 err = intr_isrc_register(isrc, sc->dev, 0,
375 "%s,s%u", name, irq - GIC_FIRST_SPI);
376 }
377 if (err != 0) {
378 /* XXX call intr_isrc_deregister() */
379 free(sc->gic_irqs, M_DEVBUF);
380 return (err);
381 }
382 }
383
384 mtx_init(&sc->gic_mbi_mtx, "GICv3 mbi lock", NULL, MTX_DEF);
385 if (sc->gic_mbi_start > 0) {
386 if (!sc->gic_mbi_end) {
387 /*
388 * This is to address SPI based msi ranges, where
389 * SPI range is not specified in ACPI
390 */
391 sc->gic_mbi_end = sc->gic_nirqs - 1;
392 }
393 gic_v3_reserve_msi_range(dev, sc->gic_mbi_start,
394 sc->gic_mbi_end - sc->gic_mbi_start);
395
396 if (bootverbose) {
397 device_printf(dev, "using spi %u to %u\n", sc->gic_mbi_start,
398 sc->gic_mbi_end);
399 }
400 }
401
402 /*
403 * Read the Peripheral ID2 register. This is an implementation
404 * defined register, but seems to be implemented in all GICv3
405 * parts and Linux expects it to be there.
406 */
407 sc->gic_pidr2 = gic_d_read(sc, 4, GICD_PIDR2);
408
409 /* Get the number of supported interrupt identifier bits */
410 sc->gic_idbits = GICD_TYPER_IDBITS(typer);
411
412 if (bootverbose) {
413 device_printf(dev, "SPIs: %u, IDs: %u\n",
414 sc->gic_nirqs, (1 << sc->gic_idbits) - 1);
415 }
416
417 /* Train init sequence for boot CPU */
418 for (init_func = gic_v3_primary_init; *init_func != NULL; init_func++) {
419 err = (*init_func)(sc);
420 if (err != 0)
421 return (err);
422 }
423
424 return (0);
425 }
426
427 int
gic_v3_detach(device_t dev)428 gic_v3_detach(device_t dev)
429 {
430 struct gic_v3_softc *sc;
431 int rid;
432
433 sc = device_get_softc(dev);
434
435 if (device_is_attached(dev)) {
436 /*
437 * XXX: We should probably deregister PIC
438 */
439 if (sc->gic_registered)
440 panic("Trying to detach registered PIC");
441 }
442 for (rid = 0; rid < (sc->gic_redists.nregions + 1); rid++)
443 bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->gic_res[rid]);
444
445 free(sc->gic_redists.pcpu, M_GIC_V3);
446
447 free(sc->ranges, M_GIC_V3);
448 free(sc->gic_res, M_GIC_V3);
449 free(sc->gic_redists.regions, M_GIC_V3);
450
451 return (0);
452 }
453
454 static int
gic_v3_print_child(device_t bus,device_t child)455 gic_v3_print_child(device_t bus, device_t child)
456 {
457 struct resource_list *rl;
458 int retval = 0;
459
460 rl = BUS_GET_RESOURCE_LIST(bus, child);
461 KASSERT(rl != NULL, ("%s: No resource list", __func__));
462 retval += bus_print_child_header(bus, child);
463 retval += resource_list_print_type(rl, "mem", SYS_RES_MEMORY, "%#jx");
464 retval += bus_print_child_footer(bus, child);
465
466 return (retval);
467 }
468
469 static int
gic_v3_get_domain(device_t dev,device_t child,int * domain)470 gic_v3_get_domain(device_t dev, device_t child, int *domain)
471 {
472 struct gic_v3_devinfo *di;
473
474 di = device_get_ivars(child);
475 if (di->gic_domain < 0)
476 return (ENOENT);
477
478 *domain = di->gic_domain;
479 return (0);
480 }
481
482 static int
gic_v3_read_ivar(device_t dev,device_t child,int which,uintptr_t * result)483 gic_v3_read_ivar(device_t dev, device_t child, int which, uintptr_t *result)
484 {
485 struct gic_v3_softc *sc;
486 struct gic_v3_devinfo *di;
487
488 sc = device_get_softc(dev);
489
490 switch (which) {
491 case GICV3_IVAR_NIRQS:
492 *result = (intr_nirq - sc->gic_nirqs) / sc->gic_nchildren;
493 return (0);
494 case GICV3_IVAR_REDIST:
495 *result = (uintptr_t)&sc->gic_redists.pcpu[PCPU_GET(cpuid)];
496 return (0);
497 case GICV3_IVAR_FLAGS:
498 *result = sc->gic_flags;
499 return (0);
500 case GIC_IVAR_SUPPORT_LPIS:
501 *result =
502 (gic_d_read(sc, 4, GICD_TYPER) & GICD_TYPER_LPIS) != 0;
503 return (0);
504 case GIC_IVAR_HW_REV:
505 KASSERT(
506 GICR_PIDR2_ARCH(sc->gic_pidr2) == GICR_PIDR2_ARCH_GICv3 ||
507 GICR_PIDR2_ARCH(sc->gic_pidr2) == GICR_PIDR2_ARCH_GICv4,
508 ("gic_v3_read_ivar: Invalid GIC architecture: %d (%.08X)",
509 GICR_PIDR2_ARCH(sc->gic_pidr2), sc->gic_pidr2));
510 *result = GICR_PIDR2_ARCH(sc->gic_pidr2);
511 return (0);
512 case GIC_IVAR_BUS:
513 KASSERT(sc->gic_bus != GIC_BUS_UNKNOWN,
514 ("gic_v3_read_ivar: Unknown bus type"));
515 KASSERT(sc->gic_bus <= GIC_BUS_MAX,
516 ("gic_v3_read_ivar: Invalid bus type %u", sc->gic_bus));
517 *result = sc->gic_bus;
518 return (0);
519 case GIC_IVAR_VGIC:
520 di = device_get_ivars(child);
521 if (di == NULL)
522 return (EINVAL);
523 *result = di->is_vgic;
524 return (0);
525 }
526
527 return (ENOENT);
528 }
529
530 static int
gic_v3_write_ivar(device_t dev,device_t child,int which,uintptr_t value)531 gic_v3_write_ivar(device_t dev, device_t child, int which, uintptr_t value)
532 {
533 switch(which) {
534 case GICV3_IVAR_NIRQS:
535 case GICV3_IVAR_REDIST:
536 case GICV3_IVAR_FLAGS:
537 case GIC_IVAR_HW_REV:
538 case GIC_IVAR_BUS:
539 return (EINVAL);
540 }
541
542 return (ENOENT);
543 }
544
545 static struct resource *
gic_v3_alloc_resource(device_t bus,device_t child,int type,int * rid,rman_res_t start,rman_res_t end,rman_res_t count,u_int flags)546 gic_v3_alloc_resource(device_t bus, device_t child, int type, int *rid,
547 rman_res_t start, rman_res_t end, rman_res_t count, u_int flags)
548 {
549 struct gic_v3_softc *sc;
550 struct resource_list_entry *rle;
551 struct resource_list *rl;
552 int j;
553
554 /* We only allocate memory */
555 if (type != SYS_RES_MEMORY)
556 return (NULL);
557
558 sc = device_get_softc(bus);
559
560 if (RMAN_IS_DEFAULT_RANGE(start, end)) {
561 rl = BUS_GET_RESOURCE_LIST(bus, child);
562 if (rl == NULL)
563 return (NULL);
564
565 /* Find defaults for this rid */
566 rle = resource_list_find(rl, type, *rid);
567 if (rle == NULL)
568 return (NULL);
569
570 start = rle->start;
571 end = rle->end;
572 count = rle->count;
573 }
574
575 /* Remap through ranges property */
576 for (j = 0; j < sc->nranges; j++) {
577 if (start >= sc->ranges[j].bus && end <
578 sc->ranges[j].bus + sc->ranges[j].size) {
579 start -= sc->ranges[j].bus;
580 start += sc->ranges[j].host;
581 end -= sc->ranges[j].bus;
582 end += sc->ranges[j].host;
583 break;
584 }
585 }
586 if (j == sc->nranges && sc->nranges != 0) {
587 if (bootverbose)
588 device_printf(bus, "Could not map resource "
589 "%#jx-%#jx\n", (uintmax_t)start, (uintmax_t)end);
590
591 return (NULL);
592 }
593
594 return (bus_generic_alloc_resource(bus, child, type, rid, start, end,
595 count, flags));
596 }
597
598 int
arm_gic_v3_intr(void * arg)599 arm_gic_v3_intr(void *arg)
600 {
601 struct gic_v3_softc *sc = arg;
602 struct gic_v3_irqsrc *gi;
603 struct intr_pic *pic;
604 uint64_t active_irq;
605 struct trapframe *tf;
606
607 pic = sc->gic_pic;
608
609 while (1) {
610 if (CPU_MATCH_ERRATA_CAVIUM_THUNDERX_1_1) {
611 /*
612 * Hardware: Cavium ThunderX
613 * Chip revision: Pass 1.0 (early version)
614 * Pass 1.1 (production)
615 * ERRATUM: 22978, 23154
616 */
617 __asm __volatile(
618 "nop;nop;nop;nop;nop;nop;nop;nop; \n"
619 "mrs %0, ICC_IAR1_EL1 \n"
620 "nop;nop;nop;nop; \n"
621 "dsb sy \n"
622 : "=&r" (active_irq));
623 } else {
624 active_irq = gic_icc_read(IAR1);
625 }
626
627 if (active_irq >= GIC_FIRST_LPI) {
628 intr_child_irq_handler(pic, active_irq);
629 continue;
630 }
631
632 if (__predict_false(active_irq >= sc->gic_nirqs))
633 return (FILTER_HANDLED);
634
635 tf = curthread->td_intr_frame;
636 gi = &sc->gic_irqs[active_irq];
637 if (active_irq <= GIC_LAST_SGI) {
638 /* Call EOI for all IPI before dispatch. */
639 gic_icc_write(EOIR1, (uint64_t)active_irq);
640 #ifdef SMP
641 intr_ipi_dispatch(sgi_to_ipi[gi->gi_irq]);
642 #else
643 device_printf(sc->dev, "SGI %ju on UP system detected\n",
644 (uintmax_t)(active_irq - GIC_FIRST_SGI));
645 #endif
646 } else if (active_irq >= GIC_FIRST_PPI &&
647 active_irq <= GIC_LAST_SPI) {
648 if (gi->gi_trig == INTR_TRIGGER_EDGE)
649 gic_icc_write(EOIR1, gi->gi_irq);
650
651 if (intr_isrc_dispatch(&gi->gi_isrc, tf) != 0) {
652 if (gi->gi_trig != INTR_TRIGGER_EDGE)
653 gic_icc_write(EOIR1, gi->gi_irq);
654 gic_v3_disable_intr(sc->dev, &gi->gi_isrc);
655 device_printf(sc->dev,
656 "Stray irq %lu disabled\n", active_irq);
657 }
658 }
659 }
660 }
661
662 #ifdef FDT
663 static int
gic_map_fdt(device_t dev,u_int ncells,pcell_t * cells,u_int * irqp,enum intr_polarity * polp,enum intr_trigger * trigp)664 gic_map_fdt(device_t dev, u_int ncells, pcell_t *cells, u_int *irqp,
665 enum intr_polarity *polp, enum intr_trigger *trigp)
666 {
667 u_int irq;
668
669 if (ncells < 3)
670 return (EINVAL);
671
672 /*
673 * The 1st cell is the interrupt type:
674 * 0 = SPI
675 * 1 = PPI
676 * The 2nd cell contains the interrupt number:
677 * [0 - 987] for SPI
678 * [0 - 15] for PPI
679 * The 3rd cell is the flags, encoded as follows:
680 * bits[3:0] trigger type and level flags
681 * 1 = edge triggered
682 * 2 = edge triggered (PPI only)
683 * 4 = level-sensitive
684 * 8 = level-sensitive (PPI only)
685 */
686 switch (cells[0]) {
687 case 0:
688 irq = GIC_FIRST_SPI + cells[1];
689 /* SPI irq is checked later. */
690 break;
691 case 1:
692 irq = GIC_FIRST_PPI + cells[1];
693 if (irq > GIC_LAST_PPI) {
694 device_printf(dev, "unsupported PPI interrupt "
695 "number %u\n", cells[1]);
696 return (EINVAL);
697 }
698 break;
699 default:
700 device_printf(dev, "unsupported interrupt type "
701 "configuration %u\n", cells[0]);
702 return (EINVAL);
703 }
704
705 switch (cells[2] & FDT_INTR_MASK) {
706 case FDT_INTR_EDGE_RISING:
707 *trigp = INTR_TRIGGER_EDGE;
708 *polp = INTR_POLARITY_HIGH;
709 break;
710 case FDT_INTR_EDGE_FALLING:
711 *trigp = INTR_TRIGGER_EDGE;
712 *polp = INTR_POLARITY_LOW;
713 break;
714 case FDT_INTR_LEVEL_HIGH:
715 *trigp = INTR_TRIGGER_LEVEL;
716 *polp = INTR_POLARITY_HIGH;
717 break;
718 case FDT_INTR_LEVEL_LOW:
719 *trigp = INTR_TRIGGER_LEVEL;
720 *polp = INTR_POLARITY_LOW;
721 break;
722 default:
723 device_printf(dev, "unsupported trigger/polarity "
724 "configuration 0x%02x\n", cells[2]);
725 return (EINVAL);
726 }
727
728 /* Check the interrupt is valid */
729 if (irq >= GIC_FIRST_SPI && *polp != INTR_POLARITY_HIGH)
730 return (EINVAL);
731
732 *irqp = irq;
733 return (0);
734 }
735 #endif
736
737 static int
gic_map_msi(device_t dev,struct intr_map_data_msi * msi_data,u_int * irqp,enum intr_polarity * polp,enum intr_trigger * trigp)738 gic_map_msi(device_t dev, struct intr_map_data_msi *msi_data, u_int *irqp,
739 enum intr_polarity *polp, enum intr_trigger *trigp)
740 {
741 struct gic_v3_irqsrc *gi;
742
743 /* SPI-mapped MSI */
744 gi = (struct gic_v3_irqsrc *)msi_data->isrc;
745 if (gi == NULL)
746 return (ENXIO);
747
748 *irqp = gi->gi_irq;
749
750 /* MSI/MSI-X interrupts are always edge triggered with high polarity */
751 *polp = INTR_POLARITY_HIGH;
752 *trigp = INTR_TRIGGER_EDGE;
753
754 return (0);
755 }
756
757 static int
do_gic_v3_map_intr(device_t dev,struct intr_map_data * data,u_int * irqp,enum intr_polarity * polp,enum intr_trigger * trigp)758 do_gic_v3_map_intr(device_t dev, struct intr_map_data *data, u_int *irqp,
759 enum intr_polarity *polp, enum intr_trigger *trigp)
760 {
761 struct gic_v3_softc *sc;
762 enum intr_polarity pol;
763 enum intr_trigger trig;
764 struct intr_map_data_msi *dam;
765 #ifdef FDT
766 struct intr_map_data_fdt *daf;
767 #endif
768 #ifdef DEV_ACPI
769 struct intr_map_data_acpi *daa;
770 #endif
771 u_int irq;
772
773 sc = device_get_softc(dev);
774
775 switch (data->type) {
776 #ifdef FDT
777 case INTR_MAP_DATA_FDT:
778 daf = (struct intr_map_data_fdt *)data;
779 if (gic_map_fdt(dev, daf->ncells, daf->cells, &irq, &pol,
780 &trig) != 0)
781 return (EINVAL);
782 break;
783 #endif
784 #ifdef DEV_ACPI
785 case INTR_MAP_DATA_ACPI:
786 daa = (struct intr_map_data_acpi *)data;
787 irq = daa->irq;
788 pol = daa->pol;
789 trig = daa->trig;
790 break;
791 #endif
792 case INTR_MAP_DATA_MSI:
793 /* SPI-mapped MSI */
794 dam = (struct intr_map_data_msi *)data;
795 if (gic_map_msi(dev, dam, &irq, &pol, &trig) != 0)
796 return (EINVAL);
797 break;
798 default:
799 return (EINVAL);
800 }
801
802 if (irq >= sc->gic_nirqs)
803 return (EINVAL);
804 switch (pol) {
805 case INTR_POLARITY_CONFORM:
806 case INTR_POLARITY_LOW:
807 case INTR_POLARITY_HIGH:
808 break;
809 default:
810 return (EINVAL);
811 }
812 switch (trig) {
813 case INTR_TRIGGER_CONFORM:
814 case INTR_TRIGGER_EDGE:
815 case INTR_TRIGGER_LEVEL:
816 break;
817 default:
818 return (EINVAL);
819 }
820
821 *irqp = irq;
822 if (polp != NULL)
823 *polp = pol;
824 if (trigp != NULL)
825 *trigp = trig;
826 return (0);
827 }
828
829 static int
gic_v3_map_intr(device_t dev,struct intr_map_data * data,struct intr_irqsrc ** isrcp)830 gic_v3_map_intr(device_t dev, struct intr_map_data *data,
831 struct intr_irqsrc **isrcp)
832 {
833 struct gic_v3_softc *sc;
834 int error;
835 u_int irq;
836
837 error = do_gic_v3_map_intr(dev, data, &irq, NULL, NULL);
838 if (error == 0) {
839 sc = device_get_softc(dev);
840 *isrcp = GIC_INTR_ISRC(sc, irq);
841 }
842 return (error);
843 }
844
845 struct gic_v3_setup_periph_args {
846 device_t dev;
847 struct intr_irqsrc *isrc;
848 };
849
850 static void
gic_v3_setup_intr_periph(void * argp)851 gic_v3_setup_intr_periph(void *argp)
852 {
853 struct gic_v3_setup_periph_args *args = argp;
854 struct intr_irqsrc *isrc = args->isrc;
855 struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc;
856 device_t dev = args->dev;
857 u_int irq = gi->gi_irq;
858 struct gic_v3_softc *sc = device_get_softc(dev);
859 uint32_t reg;
860
861 MPASS(irq <= GIC_LAST_SPI);
862
863 /*
864 * We need the lock for both SGIs and PPIs for an atomic CPU_SET() at a
865 * minimum, but we also need it below for SPIs.
866 */
867 mtx_lock_spin(&sc->gic_mtx);
868
869 if (isrc->isrc_flags & INTR_ISRCF_PPI)
870 CPU_SET(PCPU_GET(cpuid), &isrc->isrc_cpu);
871
872 if (irq >= GIC_FIRST_PPI && irq <= GIC_LAST_SPI) {
873 /* Set the trigger and polarity */
874 if (irq <= GIC_LAST_PPI)
875 reg = gic_r_read(sc, 4,
876 GICR_SGI_BASE_SIZE + GICD_ICFGR(irq));
877 else
878 reg = gic_d_read(sc, 4, GICD_ICFGR(irq));
879 if (gi->gi_trig == INTR_TRIGGER_LEVEL)
880 reg &= ~(2 << ((irq % 16) * 2));
881 else
882 reg |= 2 << ((irq % 16) * 2);
883
884 if (irq <= GIC_LAST_PPI) {
885 gic_r_write(sc, 4,
886 GICR_SGI_BASE_SIZE + GICD_ICFGR(irq), reg);
887 gic_v3_wait_for_rwp(sc, REDIST);
888 } else {
889 gic_d_write(sc, 4, GICD_ICFGR(irq), reg);
890 gic_v3_wait_for_rwp(sc, DIST);
891 }
892 }
893
894 mtx_unlock_spin(&sc->gic_mtx);
895 }
896
897 static int
gic_v3_setup_intr(device_t dev,struct intr_irqsrc * isrc,struct resource * res,struct intr_map_data * data)898 gic_v3_setup_intr(device_t dev, struct intr_irqsrc *isrc,
899 struct resource *res, struct intr_map_data *data)
900 {
901 struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc;
902 struct gic_v3_setup_periph_args pargs;
903 enum intr_trigger trig;
904 enum intr_polarity pol;
905 u_int irq;
906 int error;
907
908 if (data == NULL)
909 return (ENOTSUP);
910
911 error = do_gic_v3_map_intr(dev, data, &irq, &pol, &trig);
912 if (error != 0)
913 return (error);
914
915 if (gi->gi_irq != irq || pol == INTR_POLARITY_CONFORM ||
916 trig == INTR_TRIGGER_CONFORM)
917 return (EINVAL);
918
919 /* Compare config if this is not first setup. */
920 if (isrc->isrc_handlers != 0) {
921 if (pol != gi->gi_pol || trig != gi->gi_trig)
922 return (EINVAL);
923 else
924 return (0);
925 }
926
927 /* For MSI/MSI-X we should have already configured these */
928 if ((gi->gi_flags & GI_FLAG_MSI) == 0) {
929 gi->gi_pol = pol;
930 gi->gi_trig = trig;
931 }
932
933 pargs.dev = dev;
934 pargs.isrc = isrc;
935
936 if (isrc->isrc_flags & INTR_ISRCF_PPI) {
937 /*
938 * If APs haven't been fired up yet, smp_rendezvous() will just
939 * execute it on the single CPU and gic_v3_init_secondary() will
940 * clean up afterwards.
941 */
942 smp_rendezvous(NULL, gic_v3_setup_intr_periph, NULL, &pargs);
943 } else if (irq >= GIC_FIRST_SPI && irq <= GIC_LAST_SPI) {
944 gic_v3_setup_intr_periph(&pargs);
945 gic_v3_bind_intr(dev, isrc);
946 }
947
948 return (0);
949 }
950
951 static int
gic_v3_teardown_intr(device_t dev,struct intr_irqsrc * isrc,struct resource * res,struct intr_map_data * data)952 gic_v3_teardown_intr(device_t dev, struct intr_irqsrc *isrc,
953 struct resource *res, struct intr_map_data *data)
954 {
955 struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc;
956
957 if (isrc->isrc_handlers == 0 && (gi->gi_flags & GI_FLAG_MSI) == 0) {
958 gi->gi_pol = INTR_POLARITY_CONFORM;
959 gi->gi_trig = INTR_TRIGGER_CONFORM;
960 }
961
962 return (0);
963 }
964
965 static void
gic_v3_disable_intr(device_t dev,struct intr_irqsrc * isrc)966 gic_v3_disable_intr(device_t dev, struct intr_irqsrc *isrc)
967 {
968 struct gic_v3_softc *sc;
969 struct gic_v3_irqsrc *gi;
970 u_int irq;
971
972 sc = device_get_softc(dev);
973 gi = (struct gic_v3_irqsrc *)isrc;
974 irq = gi->gi_irq;
975
976 if (irq <= GIC_LAST_PPI) {
977 /* SGIs and PPIs in corresponding Re-Distributor */
978 gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICD_ICENABLER(irq),
979 GICD_I_MASK(irq));
980 gic_v3_wait_for_rwp(sc, REDIST);
981 } else if (irq >= GIC_FIRST_SPI && irq <= GIC_LAST_SPI) {
982 /* SPIs in distributor */
983 gic_d_write(sc, 4, GICD_ICENABLER(irq), GICD_I_MASK(irq));
984 gic_v3_wait_for_rwp(sc, DIST);
985 } else
986 panic("%s: Unsupported IRQ %u", __func__, irq);
987 }
988
989 static void
gic_v3_enable_intr_periph(void * argp)990 gic_v3_enable_intr_periph(void *argp)
991 {
992 struct gic_v3_setup_periph_args *args = argp;
993 struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)args->isrc;
994 device_t dev = args->dev;
995 struct gic_v3_softc *sc = device_get_softc(dev);
996 u_int irq = gi->gi_irq;
997
998 /* SGIs and PPIs in corresponding Re-Distributor */
999 gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICD_ISENABLER(irq),
1000 GICD_I_MASK(irq));
1001 gic_v3_wait_for_rwp(sc, REDIST);
1002 }
1003
1004 static void
gic_v3_enable_intr(device_t dev,struct intr_irqsrc * isrc)1005 gic_v3_enable_intr(device_t dev, struct intr_irqsrc *isrc)
1006 {
1007 struct gic_v3_setup_periph_args pargs;
1008 struct gic_v3_softc *sc;
1009 struct gic_v3_irqsrc *gi;
1010 u_int irq;
1011
1012 gi = (struct gic_v3_irqsrc *)isrc;
1013 irq = gi->gi_irq;
1014 pargs.isrc = isrc;
1015 pargs.dev = dev;
1016
1017 if (irq <= GIC_LAST_PPI) {
1018 /*
1019 * SGIs only need configured on the current AP. We'll setup and
1020 * enable IPIs as APs come online.
1021 */
1022 if (irq <= GIC_LAST_SGI)
1023 gic_v3_enable_intr_periph(&pargs);
1024 else
1025 smp_rendezvous(NULL, gic_v3_enable_intr_periph, NULL,
1026 &pargs);
1027 return;
1028 }
1029
1030 sc = device_get_softc(dev);
1031
1032 if (irq >= GIC_FIRST_SPI && irq <= GIC_LAST_SPI) {
1033 /* SPIs in distributor */
1034 gic_d_write(sc, 4, GICD_ISENABLER(irq), GICD_I_MASK(irq));
1035 gic_v3_wait_for_rwp(sc, DIST);
1036 } else
1037 panic("%s: Unsupported IRQ %u", __func__, irq);
1038 }
1039
1040 static void
gic_v3_pre_ithread(device_t dev,struct intr_irqsrc * isrc)1041 gic_v3_pre_ithread(device_t dev, struct intr_irqsrc *isrc)
1042 {
1043 struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc;
1044
1045 gic_v3_disable_intr(dev, isrc);
1046 gic_icc_write(EOIR1, gi->gi_irq);
1047 }
1048
1049 static void
gic_v3_post_ithread(device_t dev,struct intr_irqsrc * isrc)1050 gic_v3_post_ithread(device_t dev, struct intr_irqsrc *isrc)
1051 {
1052
1053 gic_v3_enable_intr(dev, isrc);
1054 }
1055
1056 static void
gic_v3_post_filter(device_t dev,struct intr_irqsrc * isrc)1057 gic_v3_post_filter(device_t dev, struct intr_irqsrc *isrc)
1058 {
1059 struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc;
1060
1061 if (gi->gi_trig == INTR_TRIGGER_EDGE)
1062 return;
1063
1064 gic_icc_write(EOIR1, gi->gi_irq);
1065 }
1066
1067 static int
gic_v3_bind_intr(device_t dev,struct intr_irqsrc * isrc)1068 gic_v3_bind_intr(device_t dev, struct intr_irqsrc *isrc)
1069 {
1070 struct gic_v3_softc *sc;
1071 struct gic_v3_irqsrc *gi;
1072 int cpu;
1073
1074 gi = (struct gic_v3_irqsrc *)isrc;
1075
1076 KASSERT(gi->gi_irq >= GIC_FIRST_SPI && gi->gi_irq <= GIC_LAST_SPI,
1077 ("%s: Attempting to bind an invalid IRQ", __func__));
1078
1079 sc = device_get_softc(dev);
1080
1081 if (CPU_EMPTY(&isrc->isrc_cpu)) {
1082 gic_irq_cpu = intr_irq_next_cpu(gic_irq_cpu, &all_cpus);
1083 CPU_SETOF(gic_irq_cpu, &isrc->isrc_cpu);
1084 gic_d_write(sc, 8, GICD_IROUTER(gi->gi_irq),
1085 CPU_AFFINITY(gic_irq_cpu));
1086 } else {
1087 /*
1088 * We can only bind to a single CPU so select
1089 * the first CPU found.
1090 */
1091 cpu = CPU_FFS(&isrc->isrc_cpu) - 1;
1092 gic_d_write(sc, 8, GICD_IROUTER(gi->gi_irq), CPU_AFFINITY(cpu));
1093 }
1094
1095 return (0);
1096 }
1097
1098 #ifdef SMP
1099 static void
gic_v3_init_secondary(device_t dev,uint32_t rootnum)1100 gic_v3_init_secondary(device_t dev, uint32_t rootnum)
1101 {
1102 struct gic_v3_setup_periph_args pargs;
1103 device_t child;
1104 struct gic_v3_softc *sc;
1105 gic_v3_initseq_t *init_func;
1106 struct intr_irqsrc *isrc;
1107 u_int cpu, irq;
1108 int err, i;
1109
1110 sc = device_get_softc(dev);
1111 cpu = PCPU_GET(cpuid);
1112
1113 /* Train init sequence for boot CPU */
1114 for (init_func = gic_v3_secondary_init; *init_func != NULL;
1115 init_func++) {
1116 err = (*init_func)(sc);
1117 if (err != 0) {
1118 device_printf(dev,
1119 "Could not initialize GIC for CPU%u\n", cpu);
1120 return;
1121 }
1122 }
1123
1124 pargs.dev = dev;
1125
1126 /* Unmask attached SGI interrupts. */
1127 for (irq = GIC_FIRST_SGI; irq <= GIC_LAST_SGI; irq++) {
1128 isrc = GIC_INTR_ISRC(sc, irq);
1129 if (intr_isrc_init_on_cpu(isrc, cpu)) {
1130 pargs.isrc = isrc;
1131 gic_v3_enable_intr_periph(&pargs);
1132 }
1133 }
1134
1135 /* Unmask attached PPI interrupts. */
1136 for (irq = GIC_FIRST_PPI; irq <= GIC_LAST_PPI; irq++) {
1137 isrc = GIC_INTR_ISRC(sc, irq);
1138 if (intr_isrc_init_on_cpu(isrc, cpu)) {
1139 pargs.isrc = isrc;
1140 gic_v3_setup_intr_periph(&pargs);
1141 gic_v3_enable_intr_periph(&pargs);
1142 }
1143 }
1144
1145 for (i = 0; i < sc->gic_nchildren; i++) {
1146 child = sc->gic_children[i];
1147 PIC_INIT_SECONDARY(child, rootnum);
1148 }
1149 }
1150
1151 static void
gic_v3_ipi_send(device_t dev,struct intr_irqsrc * isrc,cpuset_t cpus,u_int ipi)1152 gic_v3_ipi_send(device_t dev, struct intr_irqsrc *isrc, cpuset_t cpus,
1153 u_int ipi)
1154 {
1155 struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc;
1156 uint64_t aff, val, irq;
1157 int i;
1158
1159 #define GIC_AFF_MASK (CPU_AFF3_MASK | CPU_AFF2_MASK | CPU_AFF1_MASK)
1160 #define GIC_AFFINITY(i) (CPU_AFFINITY(i) & GIC_AFF_MASK)
1161 aff = GIC_AFFINITY(0);
1162 irq = gi->gi_irq;
1163 val = 0;
1164
1165 /* Iterate through all CPUs in set */
1166 for (i = 0; i <= mp_maxid; i++) {
1167 /* Move to the next affinity group */
1168 if (aff != GIC_AFFINITY(i)) {
1169 /* Send the IPI */
1170 if (val != 0) {
1171 gic_icc_write(SGI1R, val);
1172 val = 0;
1173 }
1174 aff = GIC_AFFINITY(i);
1175 }
1176
1177 /* Send the IPI to this cpu */
1178 if (CPU_ISSET(i, &cpus)) {
1179 #define ICC_SGI1R_AFFINITY(aff) \
1180 (((uint64_t)CPU_AFF3(aff) << ICC_SGI1R_EL1_AFF3_SHIFT) | \
1181 ((uint64_t)CPU_AFF2(aff) << ICC_SGI1R_EL1_AFF2_SHIFT) | \
1182 ((uint64_t)CPU_AFF1(aff) << ICC_SGI1R_EL1_AFF1_SHIFT))
1183 /* Set the affinity when the first at this level */
1184 if (val == 0)
1185 val = ICC_SGI1R_AFFINITY(aff) |
1186 irq << ICC_SGI1R_EL1_SGIID_SHIFT;
1187 /* Set the bit to send the IPI to te CPU */
1188 val |= 1 << CPU_AFF0(CPU_AFFINITY(i));
1189 }
1190 }
1191
1192 /* Send the IPI to the last cpu affinity group */
1193 if (val != 0)
1194 gic_icc_write(SGI1R, val);
1195 #undef GIC_AFF_MASK
1196 #undef GIC_AFFINITY
1197 }
1198
1199 static int
gic_v3_ipi_setup(device_t dev,u_int ipi,struct intr_irqsrc ** isrcp)1200 gic_v3_ipi_setup(device_t dev, u_int ipi, struct intr_irqsrc **isrcp)
1201 {
1202 struct intr_irqsrc *isrc;
1203 struct gic_v3_softc *sc = device_get_softc(dev);
1204
1205 if (sgi_first_unused > GIC_LAST_SGI)
1206 return (ENOSPC);
1207
1208 isrc = GIC_INTR_ISRC(sc, sgi_first_unused);
1209 sgi_to_ipi[sgi_first_unused++] = ipi;
1210
1211 CPU_SET(PCPU_GET(cpuid), &isrc->isrc_cpu);
1212
1213 *isrcp = isrc;
1214 return (0);
1215 }
1216 #endif /* SMP */
1217
1218 /*
1219 * Helper routines
1220 */
1221 static void
gic_v3_wait_for_rwp(struct gic_v3_softc * sc,enum gic_v3_xdist xdist)1222 gic_v3_wait_for_rwp(struct gic_v3_softc *sc, enum gic_v3_xdist xdist)
1223 {
1224 struct resource *res;
1225 bus_size_t offset;
1226 u_int cpuid;
1227 size_t us_left = 1000000;
1228
1229 cpuid = PCPU_GET(cpuid);
1230
1231 switch (xdist) {
1232 case DIST:
1233 res = sc->gic_dist;
1234 offset = 0;
1235 break;
1236 case REDIST:
1237 res = sc->gic_redists.pcpu[cpuid].res;
1238 offset = sc->gic_redists.pcpu[PCPU_GET(cpuid)].offset;
1239 break;
1240 default:
1241 KASSERT(0, ("%s: Attempt to wait for unknown RWP", __func__));
1242 return;
1243 }
1244
1245 while ((bus_read_4(res, offset + GICD_CTLR) & GICD_CTLR_RWP) != 0) {
1246 DELAY(1);
1247 if (us_left-- == 0)
1248 panic("GICD Register write pending for too long");
1249 }
1250 }
1251
1252 /* CPU interface. */
1253 static __inline void
gic_v3_cpu_priority(uint64_t mask)1254 gic_v3_cpu_priority(uint64_t mask)
1255 {
1256
1257 /* Set prority mask */
1258 gic_icc_write(PMR, mask & ICC_PMR_EL1_PRIO_MASK);
1259 }
1260
1261 static int
gic_v3_cpu_enable_sre(struct gic_v3_softc * sc)1262 gic_v3_cpu_enable_sre(struct gic_v3_softc *sc)
1263 {
1264 uint64_t sre;
1265 u_int cpuid;
1266
1267 cpuid = PCPU_GET(cpuid);
1268 /*
1269 * Set the SRE bit to enable access to GIC CPU interface
1270 * via system registers.
1271 */
1272 sre = READ_SPECIALREG(icc_sre_el1);
1273 sre |= ICC_SRE_EL1_SRE;
1274 WRITE_SPECIALREG(icc_sre_el1, sre);
1275 isb();
1276 /*
1277 * Now ensure that the bit is set.
1278 */
1279 sre = READ_SPECIALREG(icc_sre_el1);
1280 if ((sre & ICC_SRE_EL1_SRE) == 0) {
1281 /* We are done. This was disabled in EL2 */
1282 device_printf(sc->dev, "ERROR: CPU%u cannot enable CPU interface "
1283 "via system registers\n", cpuid);
1284 return (ENXIO);
1285 } else if (bootverbose) {
1286 device_printf(sc->dev,
1287 "CPU%u enabled CPU interface via system registers\n",
1288 cpuid);
1289 }
1290
1291 return (0);
1292 }
1293
1294 static int
gic_v3_cpu_init(struct gic_v3_softc * sc)1295 gic_v3_cpu_init(struct gic_v3_softc *sc)
1296 {
1297 int err;
1298
1299 /* Enable access to CPU interface via system registers */
1300 err = gic_v3_cpu_enable_sre(sc);
1301 if (err != 0)
1302 return (err);
1303 /* Priority mask to minimum - accept all interrupts */
1304 gic_v3_cpu_priority(GIC_PRIORITY_MIN);
1305 /* Disable EOI mode */
1306 gic_icc_clear(CTLR, ICC_CTLR_EL1_EOIMODE);
1307 /* Enable group 1 (insecure) interrups */
1308 gic_icc_set(IGRPEN1, ICC_IGRPEN0_EL1_EN);
1309
1310 return (0);
1311 }
1312
1313 /* Distributor */
1314 static int
gic_v3_dist_init(struct gic_v3_softc * sc)1315 gic_v3_dist_init(struct gic_v3_softc *sc)
1316 {
1317 uint64_t aff;
1318 u_int i;
1319
1320 /*
1321 * 1. Disable the Distributor
1322 */
1323 gic_d_write(sc, 4, GICD_CTLR, 0);
1324 gic_v3_wait_for_rwp(sc, DIST);
1325
1326 /*
1327 * 2. Configure the Distributor
1328 */
1329 /* Set all SPIs to be Group 1 Non-secure */
1330 for (i = GIC_FIRST_SPI; i < sc->gic_nirqs; i += GICD_I_PER_IGROUPRn)
1331 gic_d_write(sc, 4, GICD_IGROUPR(i), 0xFFFFFFFF);
1332
1333 /* Set all global interrupts to be level triggered, active low. */
1334 for (i = GIC_FIRST_SPI; i < sc->gic_nirqs; i += GICD_I_PER_ICFGRn)
1335 gic_d_write(sc, 4, GICD_ICFGR(i), 0x00000000);
1336
1337 /* Set priority to all shared interrupts */
1338 for (i = GIC_FIRST_SPI;
1339 i < sc->gic_nirqs; i += GICD_I_PER_IPRIORITYn) {
1340 /* Set highest priority */
1341 gic_d_write(sc, 4, GICD_IPRIORITYR(i), GIC_PRIORITY_MAX);
1342 }
1343
1344 /*
1345 * Disable all interrupts. Leave PPI and SGIs as they are enabled in
1346 * Re-Distributor registers.
1347 */
1348 for (i = GIC_FIRST_SPI; i < sc->gic_nirqs; i += GICD_I_PER_ISENABLERn)
1349 gic_d_write(sc, 4, GICD_ICENABLER(i), 0xFFFFFFFF);
1350
1351 gic_v3_wait_for_rwp(sc, DIST);
1352
1353 /*
1354 * 3. Enable Distributor
1355 */
1356 /* Enable Distributor with ARE, Group 1 */
1357 gic_d_write(sc, 4, GICD_CTLR, GICD_CTLR_ARE_NS | GICD_CTLR_G1A |
1358 GICD_CTLR_G1);
1359
1360 /*
1361 * 4. Route all interrupts to boot CPU.
1362 */
1363 aff = CPU_AFFINITY(0);
1364 for (i = GIC_FIRST_SPI; i < sc->gic_nirqs; i++)
1365 gic_d_write(sc, 8, GICD_IROUTER(i), aff);
1366
1367 return (0);
1368 }
1369
1370 /* Re-Distributor */
1371 static int
gic_v3_redist_alloc(struct gic_v3_softc * sc)1372 gic_v3_redist_alloc(struct gic_v3_softc *sc)
1373 {
1374 sc->gic_redists.pcpu = mallocarray(mp_maxid + 1,
1375 sizeof(sc->gic_redists.pcpu[0]), M_GIC_V3, M_WAITOK);
1376 return (0);
1377 }
1378
1379 static int
gic_v3_redist_find(struct gic_v3_softc * sc)1380 gic_v3_redist_find(struct gic_v3_softc *sc)
1381 {
1382 struct resource *r_res;
1383 bus_size_t offset;
1384 uint64_t aff;
1385 uint64_t typer;
1386 uint32_t pidr2;
1387 u_int cpuid;
1388 size_t i;
1389
1390 cpuid = PCPU_GET(cpuid);
1391
1392 aff = CPU_AFFINITY(cpuid);
1393 /* Affinity in format for comparison with typer */
1394 aff = (CPU_AFF3(aff) << 24) | (CPU_AFF2(aff) << 16) |
1395 (CPU_AFF1(aff) << 8) | CPU_AFF0(aff);
1396
1397 if (bootverbose) {
1398 device_printf(sc->dev,
1399 "Start searching for Re-Distributor\n");
1400 }
1401 /* Iterate through Re-Distributor regions */
1402 for (i = 0; i < sc->gic_redists.nregions; i++) {
1403 /* Take a copy of the region's resource */
1404 r_res = sc->gic_redists.regions[i];
1405
1406 pidr2 = bus_read_4(r_res, GICR_PIDR2);
1407 switch (GICR_PIDR2_ARCH(pidr2)) {
1408 case GICR_PIDR2_ARCH_GICv3: /* fall through */
1409 case GICR_PIDR2_ARCH_GICv4:
1410 break;
1411 default:
1412 device_printf(sc->dev,
1413 "No Re-Distributor found for CPU%u\n", cpuid);
1414 return (ENODEV);
1415 }
1416
1417 offset = 0;
1418 do {
1419 typer = bus_read_8(r_res, offset + GICR_TYPER);
1420 if ((typer >> GICR_TYPER_AFF_SHIFT) == aff) {
1421 KASSERT(cpuid <= mp_maxid,
1422 ("Invalid pointer to per-CPU redistributor"));
1423 /* Copy res contents to its final destination */
1424 sc->gic_redists.pcpu[cpuid].res = r_res;
1425 sc->gic_redists.pcpu[cpuid].offset = offset;
1426 sc->gic_redists.pcpu[cpuid].lpi_enabled = false;
1427 if (bootverbose) {
1428 device_printf(sc->dev,
1429 "CPU%u Re-Distributor has been found\n",
1430 cpuid);
1431 }
1432 return (0);
1433 }
1434
1435 offset += (GICR_RD_BASE_SIZE + GICR_SGI_BASE_SIZE);
1436 if ((typer & GICR_TYPER_VLPIS) != 0) {
1437 offset +=
1438 (GICR_VLPI_BASE_SIZE + GICR_RESERVED_SIZE);
1439 }
1440 } while (offset < rman_get_size(r_res) &&
1441 !sc->gic_redists.single && (typer & GICR_TYPER_LAST) == 0);
1442 }
1443
1444 device_printf(sc->dev, "No Re-Distributor found for CPU%u\n", cpuid);
1445 return (ENXIO);
1446 }
1447
1448 static int
gic_v3_redist_wake(struct gic_v3_softc * sc)1449 gic_v3_redist_wake(struct gic_v3_softc *sc)
1450 {
1451 uint32_t waker;
1452 size_t us_left = 1000000;
1453
1454 waker = gic_r_read(sc, 4, GICR_WAKER);
1455 /* Wake up Re-Distributor for this CPU */
1456 waker &= ~GICR_WAKER_PS;
1457 gic_r_write(sc, 4, GICR_WAKER, waker);
1458 /*
1459 * When clearing ProcessorSleep bit it is required to wait for
1460 * ChildrenAsleep to become zero following the processor power-on.
1461 */
1462 while ((gic_r_read(sc, 4, GICR_WAKER) & GICR_WAKER_CA) != 0) {
1463 DELAY(1);
1464 if (us_left-- == 0) {
1465 panic("Could not wake Re-Distributor for CPU%u",
1466 PCPU_GET(cpuid));
1467 }
1468 }
1469
1470 if (bootverbose) {
1471 device_printf(sc->dev, "CPU%u Re-Distributor woke up\n",
1472 PCPU_GET(cpuid));
1473 }
1474
1475 return (0);
1476 }
1477
1478 static int
gic_v3_redist_init(struct gic_v3_softc * sc)1479 gic_v3_redist_init(struct gic_v3_softc *sc)
1480 {
1481 int err;
1482 size_t i;
1483
1484 err = gic_v3_redist_find(sc);
1485 if (err != 0)
1486 return (err);
1487
1488 err = gic_v3_redist_wake(sc);
1489 if (err != 0)
1490 return (err);
1491
1492 /* Configure SGIs and PPIs to be Group1 Non-secure */
1493 gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICR_IGROUPR0,
1494 0xFFFFFFFF);
1495
1496 /* Disable SPIs */
1497 gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICR_ICENABLER0,
1498 GICR_I_ENABLER_PPI_MASK);
1499 /* Enable SGIs */
1500 gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICR_ISENABLER0,
1501 GICR_I_ENABLER_SGI_MASK);
1502
1503 /* Set priority for SGIs and PPIs */
1504 for (i = 0; i <= GIC_LAST_PPI; i += GICR_I_PER_IPRIORITYn) {
1505 gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICD_IPRIORITYR(i),
1506 GIC_PRIORITY_MAX);
1507 }
1508
1509 gic_v3_wait_for_rwp(sc, REDIST);
1510
1511 return (0);
1512 }
1513
1514 /*
1515 * SPI-mapped Message Based Interrupts -- a GICv3 MSI/MSI-X controller.
1516 */
1517
1518 static int
gic_v3_gic_alloc_msi(device_t dev,u_int mbi_start,u_int mbi_count,int count,int maxcount,struct intr_irqsrc ** isrc)1519 gic_v3_gic_alloc_msi(device_t dev, u_int mbi_start, u_int mbi_count,
1520 int count, int maxcount, struct intr_irqsrc **isrc)
1521 {
1522 struct gic_v3_softc *sc;
1523 int i, irq, end_irq;
1524 bool found;
1525
1526 KASSERT(powerof2(count), ("%s: bad count", __func__));
1527 KASSERT(powerof2(maxcount), ("%s: bad maxcount", __func__));
1528
1529 sc = device_get_softc(dev);
1530
1531 mtx_lock(&sc->gic_mbi_mtx);
1532
1533 found = false;
1534 for (irq = mbi_start; irq < mbi_start + mbi_count; irq++) {
1535 /* Start on an aligned interrupt */
1536 if ((irq & (maxcount - 1)) != 0)
1537 continue;
1538
1539 /* Assume we found a valid range until shown otherwise */
1540 found = true;
1541
1542 /* Check this range is valid */
1543 for (end_irq = irq; end_irq != irq + count; end_irq++) {
1544 /* No free interrupts */
1545 if (end_irq == mbi_start + mbi_count) {
1546 found = false;
1547 break;
1548 }
1549
1550 KASSERT((sc->gic_irqs[end_irq].gi_flags & GI_FLAG_MSI)!= 0,
1551 ("%s: Non-MSI interrupt found", __func__));
1552
1553 /* This is already used */
1554 if ((sc->gic_irqs[end_irq].gi_flags & GI_FLAG_MSI_USED) ==
1555 GI_FLAG_MSI_USED) {
1556 found = false;
1557 break;
1558 }
1559 }
1560 if (found)
1561 break;
1562 }
1563
1564 /* Not enough interrupts were found */
1565 if (!found || irq == mbi_start + mbi_count) {
1566 mtx_unlock(&sc->gic_mbi_mtx);
1567 return (ENXIO);
1568 }
1569
1570 for (i = 0; i < count; i++) {
1571 /* Mark the interrupt as used */
1572 sc->gic_irqs[irq + i].gi_flags |= GI_FLAG_MSI_USED;
1573 }
1574 mtx_unlock(&sc->gic_mbi_mtx);
1575
1576 for (i = 0; i < count; i++)
1577 isrc[i] = (struct intr_irqsrc *)&sc->gic_irqs[irq + i];
1578
1579 return (0);
1580 }
1581
1582 static int
gic_v3_gic_release_msi(device_t dev,int count,struct intr_irqsrc ** isrc)1583 gic_v3_gic_release_msi(device_t dev, int count, struct intr_irqsrc **isrc)
1584 {
1585 struct gic_v3_softc *sc;
1586 struct gic_v3_irqsrc *gi;
1587 int i;
1588
1589 sc = device_get_softc(dev);
1590
1591 mtx_lock(&sc->gic_mbi_mtx);
1592 for (i = 0; i < count; i++) {
1593 gi = (struct gic_v3_irqsrc *)isrc[i];
1594
1595 KASSERT((gi->gi_flags & GI_FLAG_MSI_USED) == GI_FLAG_MSI_USED,
1596 ("%s: Trying to release an unused MSI-X interrupt",
1597 __func__));
1598
1599 gi->gi_flags &= ~GI_FLAG_MSI_USED;
1600 }
1601 mtx_unlock(&sc->gic_mbi_mtx);
1602
1603 return (0);
1604 }
1605
1606 static int
gic_v3_gic_alloc_msix(device_t dev,u_int mbi_start,u_int mbi_count,struct intr_irqsrc ** isrcp)1607 gic_v3_gic_alloc_msix(device_t dev, u_int mbi_start, u_int mbi_count,
1608 struct intr_irqsrc **isrcp)
1609 {
1610 struct gic_v3_softc *sc;
1611 int irq;
1612
1613 sc = device_get_softc(dev);
1614
1615 mtx_lock(&sc->gic_mbi_mtx);
1616 /* Find an unused interrupt */
1617 for (irq = mbi_start; irq < mbi_start + mbi_count; irq++) {
1618 KASSERT((sc->gic_irqs[irq].gi_flags & GI_FLAG_MSI) != 0,
1619 ("%s: Non-MSI interrupt found", __func__));
1620 if ((sc->gic_irqs[irq].gi_flags & GI_FLAG_MSI_USED) == 0)
1621 break;
1622 }
1623 /* No free interrupt was found */
1624 if (irq == mbi_start + mbi_count) {
1625 mtx_unlock(&sc->gic_mbi_mtx);
1626 return (ENXIO);
1627 }
1628
1629 /* Mark the interrupt as used */
1630 sc->gic_irqs[irq].gi_flags |= GI_FLAG_MSI_USED;
1631 mtx_unlock(&sc->gic_mbi_mtx);
1632
1633 *isrcp = (struct intr_irqsrc *)&sc->gic_irqs[irq];
1634
1635 return (0);
1636 }
1637
1638 static int
gic_v3_gic_release_msix(device_t dev,struct intr_irqsrc * isrc)1639 gic_v3_gic_release_msix(device_t dev, struct intr_irqsrc *isrc)
1640 {
1641 struct gic_v3_softc *sc;
1642 struct gic_v3_irqsrc *gi;
1643
1644 sc = device_get_softc(dev);
1645 gi = (struct gic_v3_irqsrc *)isrc;
1646
1647 KASSERT((gi->gi_flags & GI_FLAG_MSI_USED) == GI_FLAG_MSI_USED,
1648 ("%s: Trying to release an unused MSI-X interrupt", __func__));
1649
1650 mtx_lock(&sc->gic_mbi_mtx);
1651 gi->gi_flags &= ~GI_FLAG_MSI_USED;
1652 mtx_unlock(&sc->gic_mbi_mtx);
1653
1654 return (0);
1655 }
1656
1657 static int
gic_v3_alloc_msi(device_t dev,device_t child,int count,int maxcount,device_t * pic,struct intr_irqsrc ** isrc)1658 gic_v3_alloc_msi(device_t dev, device_t child, int count, int maxcount,
1659 device_t *pic, struct intr_irqsrc **isrc)
1660 {
1661 struct gic_v3_softc *sc;
1662 int error;
1663
1664 sc = device_get_softc(dev);
1665 error = gic_v3_gic_alloc_msi(dev, sc->gic_mbi_start,
1666 sc->gic_mbi_end - sc->gic_mbi_start, count, maxcount, isrc);
1667 if (error != 0)
1668 return (error);
1669
1670 *pic = dev;
1671 return (0);
1672 }
1673
1674 static int
gic_v3_release_msi(device_t dev,device_t child,int count,struct intr_irqsrc ** isrc)1675 gic_v3_release_msi(device_t dev, device_t child, int count,
1676 struct intr_irqsrc **isrc)
1677 {
1678 return (gic_v3_gic_release_msi(dev, count, isrc));
1679 }
1680
1681 static int
gic_v3_alloc_msix(device_t dev,device_t child,device_t * pic,struct intr_irqsrc ** isrc)1682 gic_v3_alloc_msix(device_t dev, device_t child, device_t *pic,
1683 struct intr_irqsrc **isrc)
1684 {
1685 struct gic_v3_softc *sc;
1686 int error;
1687
1688 sc = device_get_softc(dev);
1689 error = gic_v3_gic_alloc_msix(dev, sc->gic_mbi_start,
1690 sc->gic_mbi_end - sc->gic_mbi_start, isrc);
1691 if (error != 0)
1692 return (error);
1693
1694 *pic = dev;
1695
1696 return (0);
1697 }
1698
1699 static int
gic_v3_release_msix(device_t dev,device_t child,struct intr_irqsrc * isrc)1700 gic_v3_release_msix(device_t dev, device_t child, struct intr_irqsrc *isrc)
1701 {
1702 return (gic_v3_gic_release_msix(dev, isrc));
1703 }
1704
1705 static int
gic_v3_map_msi(device_t dev,device_t child,struct intr_irqsrc * isrc,uint64_t * addr,uint32_t * data)1706 gic_v3_map_msi(device_t dev, device_t child, struct intr_irqsrc *isrc,
1707 uint64_t *addr, uint32_t *data)
1708 {
1709 struct gic_v3_softc *sc = device_get_softc(dev);
1710 struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc;
1711
1712 *addr = vtophys(rman_get_virtual(sc->gic_dist)) + GICD_SETSPI_NSR;
1713 *data = gi->gi_irq;
1714
1715 return (0);
1716 }
1717