1 /*-
2 * Copyright (c) 2015-2016 The FreeBSD Foundation
3 * Copyright (c) 2023 Arm Ltd
4 *
5 * This software was developed by Andrew Turner under
6 * the sponsorship of the FreeBSD Foundation.
7 *
8 * This software was developed by Semihalf under
9 * the sponsorship of the FreeBSD Foundation.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 */
32
33 #include "opt_acpi.h"
34 #include "opt_platform.h"
35 #include "opt_iommu.h"
36
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/bus.h>
40 #include <sys/cpuset.h>
41 #include <sys/domainset.h>
42 #include <sys/endian.h>
43 #include <sys/kernel.h>
44 #include <sys/lock.h>
45 #include <sys/malloc.h>
46 #include <sys/module.h>
47 #include <sys/mutex.h>
48 #include <sys/physmem.h>
49 #include <sys/proc.h>
50 #include <sys/taskqueue.h>
51 #include <sys/tree.h>
52 #include <sys/queue.h>
53 #include <sys/rman.h>
54 #include <sys/sbuf.h>
55 #include <sys/smp.h>
56 #include <sys/sysctl.h>
57 #include <sys/vmem.h>
58
59 #include <vm/vm.h>
60 #include <vm/pmap.h>
61 #include <vm/vm_page.h>
62
63 #include <machine/bus.h>
64 #include <machine/intr.h>
65
66 #include <arm/arm/gic_common.h>
67 #include <arm64/arm64/gic_v3_reg.h>
68 #include <arm64/arm64/gic_v3_var.h>
69
70 #ifdef FDT
71 #include <dev/ofw/openfirm.h>
72 #include <dev/ofw/ofw_bus.h>
73 #include <dev/ofw/ofw_bus_subr.h>
74 #endif
75 #include <dev/pci/pcireg.h>
76 #include <dev/pci/pcivar.h>
77
78 #ifdef IOMMU
79 #include <dev/iommu/iommu.h>
80 #include <dev/iommu/iommu_gas.h>
81 #endif
82
83 #include "pcib_if.h"
84 #include "pic_if.h"
85 #include "msi_if.h"
86
87 MALLOC_DEFINE(M_GICV3_ITS, "GICv3 ITS",
88 "ARM GICv3 Interrupt Translation Service");
89
90 #define LPI_NIRQS (64 * 1024)
91
92 /* The size and alignment of the command circular buffer */
93 #define ITS_CMDQ_SIZE (64 * 1024) /* Must be a multiple of 4K */
94 #define ITS_CMDQ_ALIGN (64 * 1024)
95
96 #define LPI_CONFTAB_SIZE LPI_NIRQS
97 #define LPI_CONFTAB_ALIGN (64 * 1024)
98 #define LPI_CONFTAB_MAX_ADDR ((1ul << 48) - 1) /* We need a 47 bit PA */
99
100 /* 1 bit per SPI, PPI, and SGI (8k), and 1 bit per LPI (LPI_CONFTAB_SIZE) */
101 #define LPI_PENDTAB_SIZE ((LPI_NIRQS + GIC_FIRST_LPI) / 8)
102 #define LPI_PENDTAB_ALIGN (64 * 1024)
103 #define LPI_PENDTAB_MAX_ADDR ((1ul << 48) - 1) /* We need a 47 bit PA */
104
105 #define LPI_INT_TRANS_TAB_ALIGN 256
106 #define LPI_INT_TRANS_TAB_MAX_ADDR ((1ul << 48) - 1)
107
108 /* ITS commands encoding */
109 #define ITS_CMD_MOVI (0x01)
110 #define ITS_CMD_SYNC (0x05)
111 #define ITS_CMD_MAPD (0x08)
112 #define ITS_CMD_MAPC (0x09)
113 #define ITS_CMD_MAPTI (0x0a)
114 #define ITS_CMD_MAPI (0x0b)
115 #define ITS_CMD_INV (0x0c)
116 #define ITS_CMD_INVALL (0x0d)
117 /* Command */
118 #define CMD_COMMAND_MASK (0xFFUL)
119 /* PCI device ID */
120 #define CMD_DEVID_SHIFT (32)
121 #define CMD_DEVID_MASK (0xFFFFFFFFUL << CMD_DEVID_SHIFT)
122 /* Size of IRQ ID bitfield */
123 #define CMD_SIZE_MASK (0xFFUL)
124 /* Virtual LPI ID */
125 #define CMD_ID_MASK (0xFFFFFFFFUL)
126 /* Physical LPI ID */
127 #define CMD_PID_SHIFT (32)
128 #define CMD_PID_MASK (0xFFFFFFFFUL << CMD_PID_SHIFT)
129 /* Collection */
130 #define CMD_COL_MASK (0xFFFFUL)
131 /* Target (CPU or Re-Distributor) */
132 #define CMD_TARGET_SHIFT (16)
133 #define CMD_TARGET_MASK (0xFFFFFFFFUL << CMD_TARGET_SHIFT)
134 /* Interrupt Translation Table address */
135 #define CMD_ITT_MASK (0xFFFFFFFFFF00UL)
136 /* Valid command bit */
137 #define CMD_VALID_SHIFT (63)
138 #define CMD_VALID_MASK (1UL << CMD_VALID_SHIFT)
139
140 #define ITS_TARGET_NONE 0xFBADBEEF
141
142 /* LPI chunk owned by ITS device */
143 struct lpi_chunk {
144 u_int lpi_base;
145 u_int lpi_free; /* First free LPI in set */
146 u_int lpi_num; /* Total number of LPIs in chunk */
147 u_int lpi_busy; /* Number of busy LPIs in chink */
148 };
149
150 /* ITS device */
151 struct its_dev {
152 TAILQ_ENTRY(its_dev) entry;
153 /* PCI device */
154 device_t pci_dev;
155 /* Device ID (i.e. PCI device ID) */
156 uint32_t devid;
157 /* List of assigned LPIs */
158 struct lpi_chunk lpis;
159 /* Virtual address of ITT */
160 void *itt;
161 };
162
163 /*
164 * ITS command descriptor.
165 * Idea for command description passing taken from Linux.
166 */
167 struct its_cmd_desc {
168 uint8_t cmd_type;
169
170 union {
171 struct {
172 struct its_dev *its_dev;
173 struct its_col *col;
174 uint32_t id;
175 } cmd_desc_movi;
176
177 struct {
178 struct its_col *col;
179 } cmd_desc_sync;
180
181 struct {
182 struct its_col *col;
183 uint8_t valid;
184 } cmd_desc_mapc;
185
186 struct {
187 struct its_dev *its_dev;
188 struct its_col *col;
189 uint32_t pid;
190 uint32_t id;
191 } cmd_desc_mapvi;
192
193 struct {
194 struct its_dev *its_dev;
195 struct its_col *col;
196 uint32_t pid;
197 } cmd_desc_mapi;
198
199 struct {
200 struct its_dev *its_dev;
201 uint8_t valid;
202 } cmd_desc_mapd;
203
204 struct {
205 struct its_dev *its_dev;
206 struct its_col *col;
207 uint32_t pid;
208 } cmd_desc_inv;
209
210 struct {
211 struct its_col *col;
212 } cmd_desc_invall;
213 };
214 };
215
216 /* ITS command. Each command is 32 bytes long */
217 struct its_cmd {
218 uint64_t cmd_dword[4]; /* ITS command double word */
219 };
220
221 /* An ITS private table */
222 struct its_ptable {
223 void *ptab_vaddr;
224 /* Size of the L1 and L2 tables */
225 size_t ptab_l1_size;
226 size_t ptab_l2_size;
227 /* Number of L1 and L2 entries */
228 int ptab_l1_nidents;
229 int ptab_l2_nidents;
230
231 int ptab_page_size;
232 int ptab_share;
233 bool ptab_indirect;
234 };
235
236 /* ITS collection description. */
237 struct its_col {
238 uint64_t col_target; /* Target Re-Distributor */
239 uint64_t col_id; /* Collection ID */
240 };
241
242 struct gicv3_its_irqsrc {
243 struct intr_irqsrc gi_isrc;
244 u_int gi_id;
245 u_int gi_lpi;
246 struct its_dev *gi_its_dev;
247 TAILQ_ENTRY(gicv3_its_irqsrc) gi_link;
248 };
249
250 struct gicv3_its_softc {
251 device_t dev;
252 struct intr_pic *sc_pic;
253 struct resource *sc_its_res;
254
255 cpuset_t sc_cpus;
256 struct domainset *sc_ds;
257 u_int gic_irq_cpu;
258 int sc_devbits;
259 int sc_dev_table_idx;
260
261 struct its_ptable sc_its_ptab[GITS_BASER_NUM];
262 struct its_col *sc_its_cols[MAXCPU]; /* Per-CPU collections */
263
264 /*
265 * TODO: We should get these from the parent as we only want a
266 * single copy of each across the interrupt controller.
267 */
268 uint8_t *sc_conf_base;
269 void *sc_pend_base[MAXCPU];
270
271 /* Command handling */
272 struct mtx sc_its_cmd_lock;
273 struct its_cmd *sc_its_cmd_base; /* Command circular buffer address */
274 size_t sc_its_cmd_next_idx;
275
276 vmem_t *sc_irq_alloc;
277 struct gicv3_its_irqsrc **sc_irqs;
278 u_int sc_irq_base;
279 u_int sc_irq_length;
280 u_int sc_irq_count;
281
282 struct mtx sc_its_dev_lock;
283 TAILQ_HEAD(its_dev_list, its_dev) sc_its_dev_list;
284 TAILQ_HEAD(free_irqs, gicv3_its_irqsrc) sc_free_irqs;
285
286 #define ITS_FLAGS_CMDQ_FLUSH 0x00000001
287 #define ITS_FLAGS_LPI_CONF_FLUSH 0x00000002
288 #define ITS_FLAGS_ERRATA_CAVIUM_22375 0x00000004
289 #define ITS_FLAGS_LPI_PREALLOC 0x00000008
290 u_int sc_its_flags;
291 bool trace_enable;
292 vm_page_t ma; /* fake msi page */
293 };
294
295 typedef void (its_quirk_func_t)(device_t);
296 static its_quirk_func_t its_quirk_cavium_22375;
297
298 static const struct {
299 const char *desc;
300 uint32_t iidr;
301 uint32_t iidr_mask;
302 its_quirk_func_t *func;
303 } its_quirks[] = {
304 {
305 /* Cavium ThunderX Pass 1.x */
306 .desc = "Cavium ThunderX errata: 22375, 24313",
307 .iidr = GITS_IIDR_RAW(GITS_IIDR_IMPL_CAVIUM,
308 GITS_IIDR_PROD_THUNDER, GITS_IIDR_VAR_THUNDER_1, 0),
309 .iidr_mask = ~GITS_IIDR_REVISION_MASK,
310 .func = its_quirk_cavium_22375,
311 },
312 };
313
314 #define gic_its_read_4(sc, reg) \
315 bus_read_4((sc)->sc_its_res, (reg))
316 #define gic_its_read_8(sc, reg) \
317 bus_read_8((sc)->sc_its_res, (reg))
318
319 #define gic_its_write_4(sc, reg, val) \
320 bus_write_4((sc)->sc_its_res, (reg), (val))
321 #define gic_its_write_8(sc, reg, val) \
322 bus_write_8((sc)->sc_its_res, (reg), (val))
323
324 static device_attach_t gicv3_its_attach;
325 static device_detach_t gicv3_its_detach;
326
327 static pic_disable_intr_t gicv3_its_disable_intr;
328 static pic_enable_intr_t gicv3_its_enable_intr;
329 static pic_map_intr_t gicv3_its_map_intr;
330 static pic_setup_intr_t gicv3_its_setup_intr;
331 static pic_post_filter_t gicv3_its_post_filter;
332 static pic_post_ithread_t gicv3_its_post_ithread;
333 static pic_pre_ithread_t gicv3_its_pre_ithread;
334 static pic_bind_intr_t gicv3_its_bind_intr;
335 #ifdef SMP
336 static pic_init_secondary_t gicv3_its_init_secondary;
337 #endif
338 static msi_alloc_msi_t gicv3_its_alloc_msi;
339 static msi_release_msi_t gicv3_its_release_msi;
340 static msi_alloc_msix_t gicv3_its_alloc_msix;
341 static msi_release_msix_t gicv3_its_release_msix;
342 static msi_map_msi_t gicv3_its_map_msi;
343 #ifdef IOMMU
344 static msi_iommu_init_t gicv3_iommu_init;
345 static msi_iommu_deinit_t gicv3_iommu_deinit;
346 #endif
347
348 static void its_cmd_movi(device_t, struct gicv3_its_irqsrc *);
349 static void its_cmd_mapc(device_t, struct its_col *, uint8_t);
350 static void its_cmd_mapti(device_t, struct gicv3_its_irqsrc *);
351 static void its_cmd_mapd(device_t, struct its_dev *, uint8_t);
352 static void its_cmd_inv(device_t, struct its_dev *, struct gicv3_its_irqsrc *);
353 static void its_cmd_invall(device_t, struct its_col *);
354
355 static device_method_t gicv3_its_methods[] = {
356 /* Device interface */
357 DEVMETHOD(device_detach, gicv3_its_detach),
358
359 /* Interrupt controller interface */
360 DEVMETHOD(pic_disable_intr, gicv3_its_disable_intr),
361 DEVMETHOD(pic_enable_intr, gicv3_its_enable_intr),
362 DEVMETHOD(pic_map_intr, gicv3_its_map_intr),
363 DEVMETHOD(pic_setup_intr, gicv3_its_setup_intr),
364 DEVMETHOD(pic_post_filter, gicv3_its_post_filter),
365 DEVMETHOD(pic_post_ithread, gicv3_its_post_ithread),
366 DEVMETHOD(pic_pre_ithread, gicv3_its_pre_ithread),
367 #ifdef SMP
368 DEVMETHOD(pic_bind_intr, gicv3_its_bind_intr),
369 DEVMETHOD(pic_init_secondary, gicv3_its_init_secondary),
370 #endif
371
372 /* MSI/MSI-X */
373 DEVMETHOD(msi_alloc_msi, gicv3_its_alloc_msi),
374 DEVMETHOD(msi_release_msi, gicv3_its_release_msi),
375 DEVMETHOD(msi_alloc_msix, gicv3_its_alloc_msix),
376 DEVMETHOD(msi_release_msix, gicv3_its_release_msix),
377 DEVMETHOD(msi_map_msi, gicv3_its_map_msi),
378 #ifdef IOMMU
379 DEVMETHOD(msi_iommu_init, gicv3_iommu_init),
380 DEVMETHOD(msi_iommu_deinit, gicv3_iommu_deinit),
381 #endif
382
383 /* End */
384 DEVMETHOD_END
385 };
386
387 static DEFINE_CLASS_0(gic, gicv3_its_driver, gicv3_its_methods,
388 sizeof(struct gicv3_its_softc));
389
390 static void
gicv3_its_cmdq_init(struct gicv3_its_softc * sc)391 gicv3_its_cmdq_init(struct gicv3_its_softc *sc)
392 {
393 vm_paddr_t cmd_paddr;
394 uint64_t reg, tmp;
395
396 /* Set up the command circular buffer */
397 sc->sc_its_cmd_base = contigmalloc_domainset(ITS_CMDQ_SIZE, M_GICV3_ITS,
398 sc->sc_ds, M_WAITOK | M_ZERO, 0, (1ul << 48) - 1, ITS_CMDQ_ALIGN,
399 0);
400 sc->sc_its_cmd_next_idx = 0;
401
402 cmd_paddr = vtophys(sc->sc_its_cmd_base);
403
404 /* Set the base of the command buffer */
405 reg = GITS_CBASER_VALID |
406 (GITS_CBASER_CACHE_NIWAWB << GITS_CBASER_CACHE_SHIFT) |
407 cmd_paddr | (GITS_CBASER_SHARE_IS << GITS_CBASER_SHARE_SHIFT) |
408 (ITS_CMDQ_SIZE / 4096 - 1);
409 gic_its_write_8(sc, GITS_CBASER, reg);
410
411 /* Read back to check for fixed value fields */
412 tmp = gic_its_read_8(sc, GITS_CBASER);
413
414 if ((tmp & GITS_CBASER_SHARE_MASK) !=
415 (GITS_CBASER_SHARE_IS << GITS_CBASER_SHARE_SHIFT)) {
416 /* Check if the hardware reported non-shareable */
417 if ((tmp & GITS_CBASER_SHARE_MASK) ==
418 (GITS_CBASER_SHARE_NS << GITS_CBASER_SHARE_SHIFT)) {
419 /* If so remove the cache attribute */
420 reg &= ~GITS_CBASER_CACHE_MASK;
421 reg &= ~GITS_CBASER_SHARE_MASK;
422 /* Set to Non-cacheable, Non-shareable */
423 reg |= GITS_CBASER_CACHE_NIN << GITS_CBASER_CACHE_SHIFT;
424 reg |= GITS_CBASER_SHARE_NS << GITS_CBASER_SHARE_SHIFT;
425
426 gic_its_write_8(sc, GITS_CBASER, reg);
427 }
428
429 /* The command queue has to be flushed after each command */
430 sc->sc_its_flags |= ITS_FLAGS_CMDQ_FLUSH;
431 }
432
433 /* Get the next command from the start of the buffer */
434 gic_its_write_8(sc, GITS_CWRITER, 0x0);
435 }
436
437 static int
gicv3_its_table_page_size(struct gicv3_its_softc * sc,int table)438 gicv3_its_table_page_size(struct gicv3_its_softc *sc, int table)
439 {
440 uint64_t reg, tmp;
441 int page_size;
442
443 page_size = PAGE_SIZE_64K;
444 reg = gic_its_read_8(sc, GITS_BASER(table));
445
446 while (1) {
447 reg &= ~GITS_BASER_PSZ_MASK;
448 switch (page_size) {
449 case PAGE_SIZE_4K: /* 4KB */
450 reg |= GITS_BASER_PSZ_4K << GITS_BASER_PSZ_SHIFT;
451 break;
452 case PAGE_SIZE_16K: /* 16KB */
453 reg |= GITS_BASER_PSZ_16K << GITS_BASER_PSZ_SHIFT;
454 break;
455 case PAGE_SIZE_64K: /* 64KB */
456 reg |= GITS_BASER_PSZ_64K << GITS_BASER_PSZ_SHIFT;
457 break;
458 }
459
460 /* Write the new page size */
461 gic_its_write_8(sc, GITS_BASER(table), reg);
462
463 /* Read back to check */
464 tmp = gic_its_read_8(sc, GITS_BASER(table));
465
466 /* The page size is correct */
467 if ((tmp & GITS_BASER_PSZ_MASK) == (reg & GITS_BASER_PSZ_MASK))
468 return (page_size);
469
470 switch (page_size) {
471 default:
472 return (-1);
473 case PAGE_SIZE_16K:
474 page_size = PAGE_SIZE_4K;
475 break;
476 case PAGE_SIZE_64K:
477 page_size = PAGE_SIZE_16K;
478 break;
479 }
480 }
481 }
482
483 static bool
gicv3_its_table_supports_indirect(struct gicv3_its_softc * sc,int table)484 gicv3_its_table_supports_indirect(struct gicv3_its_softc *sc, int table)
485 {
486 uint64_t reg;
487
488 reg = gic_its_read_8(sc, GITS_BASER(table));
489
490 /* Try setting the indirect flag */
491 reg |= GITS_BASER_INDIRECT;
492 gic_its_write_8(sc, GITS_BASER(table), reg);
493
494 /* Read back to check */
495 reg = gic_its_read_8(sc, GITS_BASER(table));
496 return ((reg & GITS_BASER_INDIRECT) != 0);
497 }
498
499
500 static int
gicv3_its_table_init(device_t dev,struct gicv3_its_softc * sc)501 gicv3_its_table_init(device_t dev, struct gicv3_its_softc *sc)
502 {
503 void *table;
504 vm_paddr_t paddr;
505 uint64_t cache, reg, share, tmp, type;
506 size_t its_tbl_size, nitspages, npages;
507 size_t l1_esize, l2_esize, l1_nidents, l2_nidents;
508 int i, page_size;
509 int devbits;
510 bool indirect;
511
512 if ((sc->sc_its_flags & ITS_FLAGS_ERRATA_CAVIUM_22375) != 0) {
513 /*
514 * GITS_TYPER[17:13] of ThunderX reports that device IDs
515 * are to be 21 bits in length. The entry size of the ITS
516 * table can be read from GITS_BASERn[52:48] and on ThunderX
517 * is supposed to be 8 bytes in length (for device table).
518 * Finally the page size that is to be used by ITS to access
519 * this table will be set to 64KB.
520 *
521 * This gives 0x200000 entries of size 0x8 bytes covered by
522 * 256 pages each of which 64KB in size. The number of pages
523 * (minus 1) should then be written to GITS_BASERn[7:0]. In
524 * that case this value would be 0xFF but on ThunderX the
525 * maximum value that HW accepts is 0xFD.
526 *
527 * Set an arbitrary number of device ID bits to 20 in order
528 * to limit the number of entries in ITS device table to
529 * 0x100000 and the table size to 8MB.
530 */
531 devbits = 20;
532 cache = 0;
533 } else {
534 devbits = GITS_TYPER_DEVB(gic_its_read_8(sc, GITS_TYPER));
535 cache = GITS_BASER_CACHE_RAWAWB;
536 }
537 sc->sc_devbits = devbits;
538 share = GITS_BASER_SHARE_IS;
539
540 for (i = 0; i < GITS_BASER_NUM; i++) {
541 reg = gic_its_read_8(sc, GITS_BASER(i));
542 /* The type of table */
543 type = GITS_BASER_TYPE(reg);
544 if (type == GITS_BASER_TYPE_UNIMPL)
545 continue;
546
547 /* The table entry size */
548 l1_esize = GITS_BASER_ESIZE(reg);
549
550 /* Find the tables page size */
551 page_size = gicv3_its_table_page_size(sc, i);
552 if (page_size == -1) {
553 device_printf(dev, "No valid page size for table %d\n",
554 i);
555 return (EINVAL);
556 }
557
558 indirect = false;
559 l2_nidents = 0;
560 l2_esize = 0;
561 switch(type) {
562 case GITS_BASER_TYPE_DEV:
563 if (sc->sc_dev_table_idx != -1)
564 device_printf(dev,
565 "Warning: Multiple device tables found\n");
566
567 sc->sc_dev_table_idx = i;
568 l1_nidents = (1 << devbits);
569 if ((l1_esize * l1_nidents) > (page_size * 2)) {
570 indirect =
571 gicv3_its_table_supports_indirect(sc, i);
572 if (indirect) {
573 /*
574 * Each l1 entry is 8 bytes and points
575 * to an l2 table of size page_size.
576 * Calculate how many entries this is
577 * and use this to find how many
578 * 8 byte l1 idents we need.
579 */
580 l2_esize = l1_esize;
581 l2_nidents = page_size / l2_esize;
582 l1_nidents = l1_nidents / l2_nidents;
583 l1_esize = GITS_INDIRECT_L1_ESIZE;
584 }
585 }
586 its_tbl_size = l1_esize * l1_nidents;
587 its_tbl_size = roundup2(its_tbl_size, page_size);
588 break;
589 case GITS_BASER_TYPE_PP: /* Undocumented? */
590 case GITS_BASER_TYPE_IC:
591 its_tbl_size = page_size;
592 break;
593 case GITS_BASER_TYPE_VP:
594 /*
595 * If GITS_TYPER.SVPET != 0, the pending table is
596 * shared amongst the redistibutors and ther other
597 * ITSes. Requiring sharing across the ITSes when none
598 * of the redistributors have GICR_VPROPBASER.Valid==1
599 * isn't specified in the architecture, but that's how
600 * the GIC-700 behaves. We don't handle vPE tables at
601 * all yet, so just skip this base register.
602 */
603 default:
604 if (bootverbose)
605 device_printf(dev, "Unhandled table type %lx\n",
606 type);
607 continue;
608 }
609 npages = howmany(its_tbl_size, PAGE_SIZE);
610
611 /* Allocate the table */
612 table = contigmalloc_domainset(npages * PAGE_SIZE,
613 M_GICV3_ITS, sc->sc_ds, M_WAITOK | M_ZERO, 0,
614 (1ul << 48) - 1, PAGE_SIZE_64K, 0);
615
616 sc->sc_its_ptab[i].ptab_vaddr = table;
617 sc->sc_its_ptab[i].ptab_l1_size = its_tbl_size;
618 sc->sc_its_ptab[i].ptab_l1_nidents = l1_nidents;
619 sc->sc_its_ptab[i].ptab_l2_size = page_size;
620 sc->sc_its_ptab[i].ptab_l2_nidents = l2_nidents;
621
622 sc->sc_its_ptab[i].ptab_indirect = indirect;
623 sc->sc_its_ptab[i].ptab_page_size = page_size;
624
625 paddr = vtophys(table);
626
627 while (1) {
628 nitspages = howmany(its_tbl_size, page_size);
629
630 /* Clear the fields we will be setting */
631 reg &= ~(GITS_BASER_VALID | GITS_BASER_INDIRECT |
632 GITS_BASER_CACHE_MASK | GITS_BASER_TYPE_MASK |
633 GITS_BASER_PA_MASK |
634 GITS_BASER_SHARE_MASK | GITS_BASER_PSZ_MASK |
635 GITS_BASER_SIZE_MASK);
636 /* Set the new values */
637 reg |= GITS_BASER_VALID |
638 (indirect ? GITS_BASER_INDIRECT : 0) |
639 (cache << GITS_BASER_CACHE_SHIFT) |
640 (type << GITS_BASER_TYPE_SHIFT) |
641 paddr | (share << GITS_BASER_SHARE_SHIFT) |
642 (nitspages - 1);
643
644 switch (page_size) {
645 case PAGE_SIZE_4K: /* 4KB */
646 reg |=
647 GITS_BASER_PSZ_4K << GITS_BASER_PSZ_SHIFT;
648 break;
649 case PAGE_SIZE_16K: /* 16KB */
650 reg |=
651 GITS_BASER_PSZ_16K << GITS_BASER_PSZ_SHIFT;
652 break;
653 case PAGE_SIZE_64K: /* 64KB */
654 reg |=
655 GITS_BASER_PSZ_64K << GITS_BASER_PSZ_SHIFT;
656 break;
657 }
658
659 gic_its_write_8(sc, GITS_BASER(i), reg);
660
661 /* Read back to check */
662 tmp = gic_its_read_8(sc, GITS_BASER(i));
663
664 /* Do the shareability masks line up? */
665 if ((tmp & GITS_BASER_SHARE_MASK) !=
666 (reg & GITS_BASER_SHARE_MASK)) {
667 share = (tmp & GITS_BASER_SHARE_MASK) >>
668 GITS_BASER_SHARE_SHIFT;
669 continue;
670 }
671
672 if (tmp != reg) {
673 device_printf(dev, "GITS_BASER%d: "
674 "unable to be updated: %lx != %lx\n",
675 i, reg, tmp);
676 return (ENXIO);
677 }
678
679 sc->sc_its_ptab[i].ptab_share = share;
680 /* We should have made all needed changes */
681 break;
682 }
683 }
684
685 return (0);
686 }
687
688 static void
gicv3_its_conftable_init(struct gicv3_its_softc * sc)689 gicv3_its_conftable_init(struct gicv3_its_softc *sc)
690 {
691 /* note: we assume the ITS children are serialized by the parent */
692 static void *conf_table;
693 int extra_flags = 0;
694 device_t gicv3;
695 uint32_t ctlr;
696 vm_paddr_t conf_pa;
697 vm_offset_t conf_va;
698
699 /*
700 * The PROPBASER is a singleton in our parent. We only set it up the
701 * first time through. conf_table is effectively global to all the units
702 * and we rely on subr_bus to serialize probe/attach.
703 */
704 if (conf_table != NULL) {
705 sc->sc_conf_base = conf_table;
706 return;
707 }
708
709 gicv3 = device_get_parent(sc->dev);
710 ctlr = gic_r_read_4(gicv3, GICR_CTLR);
711 if ((ctlr & GICR_CTLR_LPI_ENABLE) != 0) {
712 conf_pa = gic_r_read_8(gicv3, GICR_PROPBASER);
713 conf_pa &= GICR_PROPBASER_PA_MASK;
714 /*
715 * If there was a pre-existing PROPBASER, then we need to honor
716 * it because implementation defined behavior in gicv3 makes it
717 * impossible to quiesce to change it out. We will only see a
718 * pre-existing one when we've been kexec'd from a Linux kernel,
719 * or from a LinuxBoot environment.
720 *
721 * Linux provides us with a MEMRESERVE table that we put into
722 * the excluded physmem area. If PROPBASER isn't in this tabke,
723 * the system cannot run due to random memory corruption,
724 * so we panic for this case.
725 */
726 if (!physmem_excluded(conf_pa, LPI_CONFTAB_SIZE))
727 panic("gicv3 PROPBASER needs to reuse %#lx, but not reserved",
728 conf_pa);
729 conf_va = PHYS_TO_DMAP(conf_pa);
730 if (!pmap_klookup(conf_va, NULL))
731 panic("Cannot map prior LPI mapping into KVA");
732 conf_table = (void *)conf_va;
733 extra_flags = ITS_FLAGS_LPI_PREALLOC | ITS_FLAGS_LPI_CONF_FLUSH;
734 if (bootverbose)
735 device_printf(sc->dev,
736 "LPI enabled, conf table using pa %#lx va %lx\n",
737 conf_pa, conf_va);
738 } else {
739 /*
740 * Otherwise just allocate contiguous pages. We'll configure the
741 * PROPBASER register later in its_init_cpu_lpi().
742 */
743 conf_table = contigmalloc(LPI_CONFTAB_SIZE,
744 M_GICV3_ITS, M_WAITOK, 0, LPI_CONFTAB_MAX_ADDR,
745 LPI_CONFTAB_ALIGN, 0);
746 }
747 sc->sc_conf_base = conf_table;
748 sc->sc_its_flags |= extra_flags;
749
750 /* Set the default configuration */
751 memset(sc->sc_conf_base, GIC_PRIORITY_MAX | LPI_CONF_GROUP1,
752 LPI_CONFTAB_SIZE);
753
754 /* Flush the table to memory */
755 cpu_dcache_wb_range(sc->sc_conf_base, LPI_CONFTAB_SIZE);
756 }
757
758 static void
gicv3_its_pendtables_init(struct gicv3_its_softc * sc)759 gicv3_its_pendtables_init(struct gicv3_its_softc *sc)
760 {
761
762 if ((sc->sc_its_flags & ITS_FLAGS_LPI_PREALLOC) == 0) {
763 for (int i = 0; i <= mp_maxid; i++) {
764 if (CPU_ISSET(i, &sc->sc_cpus) == 0)
765 continue;
766
767 sc->sc_pend_base[i] = contigmalloc(
768 LPI_PENDTAB_SIZE, M_GICV3_ITS, M_WAITOK | M_ZERO,
769 0, LPI_PENDTAB_MAX_ADDR, LPI_PENDTAB_ALIGN, 0);
770
771 /* Flush so the ITS can see the memory */
772 cpu_dcache_wb_range(sc->sc_pend_base[i],
773 LPI_PENDTAB_SIZE);
774 }
775 }
776 }
777
778 static void
its_init_cpu_lpi(device_t dev,struct gicv3_its_softc * sc)779 its_init_cpu_lpi(device_t dev, struct gicv3_its_softc *sc)
780 {
781 device_t gicv3;
782 uint64_t xbaser, tmp, size;
783 uint32_t ctlr;
784 u_int cpuid;
785
786 gicv3 = device_get_parent(dev);
787 cpuid = PCPU_GET(cpuid);
788
789 /*
790 * Set the redistributor base. If we're reusing what we found on boot
791 * since the gic was already running, then don't touch it here. We also
792 * don't need to disable / enable LPI if we're not changing PROPBASER,
793 * so only do that if we're not prealloced.
794 */
795 if ((sc->sc_its_flags & ITS_FLAGS_LPI_PREALLOC) == 0) {
796 /* Disable LPIs */
797 ctlr = gic_r_read_4(gicv3, GICR_CTLR);
798 ctlr &= ~GICR_CTLR_LPI_ENABLE;
799 gic_r_write_4(gicv3, GICR_CTLR, ctlr);
800
801 /* Make sure changes are observable my the GIC */
802 dsb(sy);
803
804 size = ilog2_long(LPI_CONFTAB_SIZE | GIC_FIRST_LPI) - 1;
805
806 xbaser = vtophys(sc->sc_conf_base) |
807 (GICR_PROPBASER_SHARE_IS << GICR_PROPBASER_SHARE_SHIFT) |
808 (GICR_PROPBASER_CACHE_NIWAWB << GICR_PROPBASER_CACHE_SHIFT) |
809 size;
810
811 gic_r_write_8(gicv3, GICR_PROPBASER, xbaser);
812
813 /* Check the cache attributes we set */
814 tmp = gic_r_read_8(gicv3, GICR_PROPBASER);
815
816 if ((tmp & GICR_PROPBASER_SHARE_MASK) !=
817 (xbaser & GICR_PROPBASER_SHARE_MASK)) {
818 if ((tmp & GICR_PROPBASER_SHARE_MASK) ==
819 (GICR_PROPBASER_SHARE_NS << GICR_PROPBASER_SHARE_SHIFT)) {
820 /* We need to mark as non-cacheable */
821 xbaser &= ~(GICR_PROPBASER_SHARE_MASK |
822 GICR_PROPBASER_CACHE_MASK);
823 /* Non-cacheable */
824 xbaser |= GICR_PROPBASER_CACHE_NIN <<
825 GICR_PROPBASER_CACHE_SHIFT;
826 /* Non-shareable */
827 xbaser |= GICR_PROPBASER_SHARE_NS <<
828 GICR_PROPBASER_SHARE_SHIFT;
829 gic_r_write_8(gicv3, GICR_PROPBASER, xbaser);
830 }
831 sc->sc_its_flags |= ITS_FLAGS_LPI_CONF_FLUSH;
832 }
833
834 /*
835 * Set the LPI pending table base
836 */
837 xbaser = vtophys(sc->sc_pend_base[cpuid]) |
838 (GICR_PENDBASER_CACHE_NIWAWB << GICR_PENDBASER_CACHE_SHIFT) |
839 (GICR_PENDBASER_SHARE_IS << GICR_PENDBASER_SHARE_SHIFT);
840
841 gic_r_write_8(gicv3, GICR_PENDBASER, xbaser);
842
843 tmp = gic_r_read_8(gicv3, GICR_PENDBASER);
844
845 if ((tmp & GICR_PENDBASER_SHARE_MASK) ==
846 (GICR_PENDBASER_SHARE_NS << GICR_PENDBASER_SHARE_SHIFT)) {
847 /* Clear the cahce and shareability bits */
848 xbaser &= ~(GICR_PENDBASER_CACHE_MASK |
849 GICR_PENDBASER_SHARE_MASK);
850 /* Mark as non-shareable */
851 xbaser |= GICR_PENDBASER_SHARE_NS << GICR_PENDBASER_SHARE_SHIFT;
852 /* And non-cacheable */
853 xbaser |= GICR_PENDBASER_CACHE_NIN <<
854 GICR_PENDBASER_CACHE_SHIFT;
855 }
856
857 /* Enable LPIs */
858 ctlr = gic_r_read_4(gicv3, GICR_CTLR);
859 ctlr |= GICR_CTLR_LPI_ENABLE;
860 gic_r_write_4(gicv3, GICR_CTLR, ctlr);
861
862 /* Make sure the GIC has seen everything */
863 dsb(sy);
864 } else {
865 KASSERT(sc->sc_pend_base[cpuid] == NULL,
866 ("PREALLOC too soon cpuid %d", cpuid));
867 tmp = gic_r_read_8(gicv3, GICR_PENDBASER);
868 tmp &= GICR_PENDBASER_PA_MASK;
869 if (!physmem_excluded(tmp, LPI_PENDTAB_SIZE))
870 panic("gicv3 PENDBASER on cpu %d needs to reuse 0x%#lx, but not reserved\n",
871 cpuid, tmp);
872 sc->sc_pend_base[cpuid] = (void *)PHYS_TO_DMAP(tmp);
873 }
874
875
876 if (bootverbose)
877 device_printf(gicv3, "using %sPENDBASE of %#lx on cpu %d\n",
878 (sc->sc_its_flags & ITS_FLAGS_LPI_PREALLOC) ? "pre-existing " : "",
879 vtophys(sc->sc_pend_base[cpuid]), cpuid);
880 }
881
882 static int
its_init_cpu(device_t dev,struct gicv3_its_softc * sc)883 its_init_cpu(device_t dev, struct gicv3_its_softc *sc)
884 {
885 device_t gicv3;
886 vm_paddr_t target;
887 u_int cpuid;
888 struct redist_pcpu *rpcpu;
889
890 gicv3 = device_get_parent(dev);
891 cpuid = PCPU_GET(cpuid);
892 if (!CPU_ISSET(cpuid, &sc->sc_cpus))
893 return (0);
894
895 /* Check if the ITS is enabled on this CPU */
896 if ((gic_r_read_8(gicv3, GICR_TYPER) & GICR_TYPER_PLPIS) == 0)
897 return (ENXIO);
898
899 rpcpu = gicv3_get_redist(dev);
900
901 /* Do per-cpu LPI init once */
902 if (!rpcpu->lpi_enabled) {
903 its_init_cpu_lpi(dev, sc);
904 rpcpu->lpi_enabled = true;
905 }
906
907 if ((gic_its_read_8(sc, GITS_TYPER) & GITS_TYPER_PTA) != 0) {
908 /* This ITS wants the redistributor physical address */
909 target = vtophys((vm_offset_t)rman_get_virtual(rpcpu->res) +
910 rpcpu->offset);
911 } else {
912 /* This ITS wants the unique processor number */
913 target = GICR_TYPER_CPUNUM(gic_r_read_8(gicv3, GICR_TYPER)) <<
914 CMD_TARGET_SHIFT;
915 }
916
917 sc->sc_its_cols[cpuid]->col_target = target;
918 sc->sc_its_cols[cpuid]->col_id = cpuid;
919
920 its_cmd_mapc(dev, sc->sc_its_cols[cpuid], 1);
921 its_cmd_invall(dev, sc->sc_its_cols[cpuid]);
922
923 return (0);
924 }
925
926 static int
gicv3_its_sysctl_trace_enable(SYSCTL_HANDLER_ARGS)927 gicv3_its_sysctl_trace_enable(SYSCTL_HANDLER_ARGS)
928 {
929 struct gicv3_its_softc *sc;
930 int rv;
931
932 sc = arg1;
933
934 rv = sysctl_handle_bool(oidp, &sc->trace_enable, 0, req);
935 if (rv != 0 || req->newptr == NULL)
936 return (rv);
937 if (sc->trace_enable)
938 gic_its_write_8(sc, GITS_TRKCTLR, 3);
939 else
940 gic_its_write_8(sc, GITS_TRKCTLR, 0);
941
942 return (0);
943 }
944
945 static int
gicv3_its_sysctl_trace_regs(SYSCTL_HANDLER_ARGS)946 gicv3_its_sysctl_trace_regs(SYSCTL_HANDLER_ARGS)
947 {
948 struct gicv3_its_softc *sc;
949 struct sbuf *sb;
950 int err;
951
952 sc = arg1;
953 sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
954 if (sb == NULL) {
955 device_printf(sc->dev, "Could not allocate sbuf for output.\n");
956 return (ENOMEM);
957 }
958 sbuf_cat(sb, "\n");
959 sbuf_printf(sb, "GITS_TRKCTLR: 0x%08X\n",
960 gic_its_read_4(sc, GITS_TRKCTLR));
961 sbuf_printf(sb, "GITS_TRKR: 0x%08X\n",
962 gic_its_read_4(sc, GITS_TRKR));
963 sbuf_printf(sb, "GITS_TRKDIDR: 0x%08X\n",
964 gic_its_read_4(sc, GITS_TRKDIDR));
965 sbuf_printf(sb, "GITS_TRKPIDR: 0x%08X\n",
966 gic_its_read_4(sc, GITS_TRKPIDR));
967 sbuf_printf(sb, "GITS_TRKVIDR: 0x%08X\n",
968 gic_its_read_4(sc, GITS_TRKVIDR));
969 sbuf_printf(sb, "GITS_TRKTGTR: 0x%08X\n",
970 gic_its_read_4(sc, GITS_TRKTGTR));
971
972 err = sbuf_finish(sb);
973 if (err)
974 device_printf(sc->dev, "Error finishing sbuf: %d\n", err);
975 sbuf_delete(sb);
976 return(err);
977 }
978
979 static int
gicv3_its_init_sysctl(struct gicv3_its_softc * sc)980 gicv3_its_init_sysctl(struct gicv3_its_softc *sc)
981 {
982 struct sysctl_oid *oid, *child;
983 struct sysctl_ctx_list *ctx_list;
984
985 ctx_list = device_get_sysctl_ctx(sc->dev);
986 child = device_get_sysctl_tree(sc->dev);
987 oid = SYSCTL_ADD_NODE(ctx_list,
988 SYSCTL_CHILDREN(child), OID_AUTO, "tracing",
989 CTLFLAG_RD| CTLFLAG_MPSAFE, NULL, "Messages tracing");
990 if (oid == NULL)
991 return (ENXIO);
992
993 /* Add registers */
994 SYSCTL_ADD_PROC(ctx_list,
995 SYSCTL_CHILDREN(oid), OID_AUTO, "enable",
996 CTLTYPE_U8 | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0,
997 gicv3_its_sysctl_trace_enable, "CU", "Enable tracing");
998 SYSCTL_ADD_PROC(ctx_list,
999 SYSCTL_CHILDREN(oid), OID_AUTO, "capture",
1000 CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0,
1001 gicv3_its_sysctl_trace_regs, "", "Captured tracing registers.");
1002
1003 return (0);
1004 }
1005
1006 static int
gicv3_its_attach(device_t dev)1007 gicv3_its_attach(device_t dev)
1008 {
1009 struct gicv3_its_softc *sc;
1010 int domain, err, i, rid;
1011 uint64_t phys;
1012 uint32_t ctlr, iidr;
1013
1014 sc = device_get_softc(dev);
1015
1016 sc->sc_dev_table_idx = -1;
1017 sc->sc_irq_length = gicv3_get_nirqs(dev);
1018 sc->sc_irq_base = GIC_FIRST_LPI;
1019 sc->sc_irq_base += device_get_unit(dev) * sc->sc_irq_length;
1020
1021 rid = 0;
1022 sc->sc_its_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1023 RF_ACTIVE);
1024 if (sc->sc_its_res == NULL) {
1025 device_printf(dev, "Could not allocate memory\n");
1026 return (ENXIO);
1027 }
1028
1029 phys = rounddown2(vtophys(rman_get_virtual(sc->sc_its_res)) +
1030 GITS_TRANSLATER, PAGE_SIZE);
1031 sc->ma = malloc(sizeof(struct vm_page), M_DEVBUF, M_WAITOK | M_ZERO);
1032 vm_page_initfake(sc->ma, phys, VM_MEMATTR_DEFAULT);
1033
1034 CPU_COPY(&all_cpus, &sc->sc_cpus);
1035 iidr = gic_its_read_4(sc, GITS_IIDR);
1036 for (i = 0; i < nitems(its_quirks); i++) {
1037 if ((iidr & its_quirks[i].iidr_mask) == its_quirks[i].iidr) {
1038 if (bootverbose) {
1039 device_printf(dev, "Applying %s\n",
1040 its_quirks[i].desc);
1041 }
1042 its_quirks[i].func(dev);
1043 break;
1044 }
1045 }
1046
1047 if (bus_get_domain(dev, &domain) == 0 && domain < MAXMEMDOM) {
1048 sc->sc_ds = DOMAINSET_PREF(domain);
1049 } else {
1050 sc->sc_ds = DOMAINSET_RR();
1051 }
1052
1053 /*
1054 * GIT_CTLR_EN is mandated to reset to 0 on a Warm reset, but we may be
1055 * coming in via, for instance, a kexec/kboot style setup where a
1056 * previous kernel has configured then relinquished control. Clear it
1057 * so that we can reconfigure GITS_BASER*.
1058 */
1059 ctlr = gic_its_read_4(sc, GITS_CTLR);
1060 if ((ctlr & GITS_CTLR_EN) != 0) {
1061 ctlr &= ~GITS_CTLR_EN;
1062 gic_its_write_4(sc, GITS_CTLR, ctlr);
1063 }
1064
1065 /* Allocate the private tables */
1066 err = gicv3_its_table_init(dev, sc);
1067 if (err != 0)
1068 return (err);
1069
1070 /* Protects access to the device list */
1071 mtx_init(&sc->sc_its_dev_lock, "ITS device lock", NULL, MTX_SPIN);
1072
1073 /* Protects access to the ITS command circular buffer. */
1074 mtx_init(&sc->sc_its_cmd_lock, "ITS cmd lock", NULL, MTX_SPIN);
1075
1076 /* Allocate the command circular buffer */
1077 gicv3_its_cmdq_init(sc);
1078
1079 /* Allocate the per-CPU collections */
1080 for (int cpu = 0; cpu <= mp_maxid; cpu++)
1081 if (CPU_ISSET(cpu, &sc->sc_cpus) != 0)
1082 sc->sc_its_cols[cpu] = malloc_domainset(
1083 sizeof(*sc->sc_its_cols[0]), M_GICV3_ITS,
1084 DOMAINSET_PREF(pcpu_find(cpu)->pc_domain),
1085 M_WAITOK | M_ZERO);
1086 else
1087 sc->sc_its_cols[cpu] = NULL;
1088
1089 /* Enable the ITS */
1090 gic_its_write_4(sc, GITS_CTLR, ctlr | GITS_CTLR_EN);
1091
1092 /* Create the LPI configuration table */
1093 gicv3_its_conftable_init(sc);
1094
1095 /* And the pending tebles */
1096 gicv3_its_pendtables_init(sc);
1097
1098 /* Enable LPIs on this CPU */
1099 its_init_cpu(dev, sc);
1100
1101 TAILQ_INIT(&sc->sc_its_dev_list);
1102 TAILQ_INIT(&sc->sc_free_irqs);
1103
1104 /*
1105 * Create the vmem object to allocate INTRNG IRQs from. We try to
1106 * use all IRQs not already used by the GICv3.
1107 * XXX: This assumes there are no other interrupt controllers in the
1108 * system.
1109 */
1110 sc->sc_irq_alloc = vmem_create(device_get_nameunit(dev), 0,
1111 gicv3_get_nirqs(dev), 1, 0, M_FIRSTFIT | M_WAITOK);
1112
1113 sc->sc_irqs = malloc(sizeof(*sc->sc_irqs) * sc->sc_irq_length,
1114 M_GICV3_ITS, M_WAITOK | M_ZERO);
1115
1116 /* For GIC-500 install tracking sysctls. */
1117 if ((iidr & (GITS_IIDR_PRODUCT_MASK | GITS_IIDR_IMPLEMENTOR_MASK)) ==
1118 GITS_IIDR_RAW(GITS_IIDR_IMPL_ARM, GITS_IIDR_PROD_GIC500, 0, 0))
1119 gicv3_its_init_sysctl(sc);
1120
1121 return (0);
1122 }
1123
1124 static int
gicv3_its_detach(device_t dev)1125 gicv3_its_detach(device_t dev)
1126 {
1127
1128 return (ENXIO);
1129 }
1130
1131 static void
its_quirk_cavium_22375(device_t dev)1132 its_quirk_cavium_22375(device_t dev)
1133 {
1134 struct gicv3_its_softc *sc;
1135 int domain;
1136
1137 sc = device_get_softc(dev);
1138 sc->sc_its_flags |= ITS_FLAGS_ERRATA_CAVIUM_22375;
1139
1140 /*
1141 * We need to limit which CPUs we send these interrupts to on
1142 * the original dual socket ThunderX as it is unable to
1143 * forward them between the two sockets.
1144 */
1145 if (bus_get_domain(dev, &domain) == 0) {
1146 if (domain < MAXMEMDOM) {
1147 CPU_COPY(&cpuset_domain[domain], &sc->sc_cpus);
1148 } else {
1149 CPU_ZERO(&sc->sc_cpus);
1150 }
1151 }
1152 }
1153
1154 static void
gicv3_its_disable_intr(device_t dev,struct intr_irqsrc * isrc)1155 gicv3_its_disable_intr(device_t dev, struct intr_irqsrc *isrc)
1156 {
1157 struct gicv3_its_softc *sc;
1158 struct gicv3_its_irqsrc *girq;
1159 uint8_t *conf;
1160
1161 sc = device_get_softc(dev);
1162 girq = (struct gicv3_its_irqsrc *)isrc;
1163 conf = sc->sc_conf_base;
1164
1165 conf[girq->gi_lpi] &= ~LPI_CONF_ENABLE;
1166
1167 if ((sc->sc_its_flags & ITS_FLAGS_LPI_CONF_FLUSH) != 0) {
1168 /* Clean D-cache under command. */
1169 cpu_dcache_wb_range(&conf[girq->gi_lpi], 1);
1170 } else {
1171 /* DSB inner shareable, store */
1172 dsb(ishst);
1173 }
1174
1175 its_cmd_inv(dev, girq->gi_its_dev, girq);
1176 }
1177
1178 static void
gicv3_its_enable_intr(device_t dev,struct intr_irqsrc * isrc)1179 gicv3_its_enable_intr(device_t dev, struct intr_irqsrc *isrc)
1180 {
1181 struct gicv3_its_softc *sc;
1182 struct gicv3_its_irqsrc *girq;
1183 uint8_t *conf;
1184
1185 sc = device_get_softc(dev);
1186 girq = (struct gicv3_its_irqsrc *)isrc;
1187 conf = sc->sc_conf_base;
1188
1189 conf[girq->gi_lpi] |= LPI_CONF_ENABLE;
1190
1191 if ((sc->sc_its_flags & ITS_FLAGS_LPI_CONF_FLUSH) != 0) {
1192 /* Clean D-cache under command. */
1193 cpu_dcache_wb_range(&conf[girq->gi_lpi], 1);
1194 } else {
1195 /* DSB inner shareable, store */
1196 dsb(ishst);
1197 }
1198
1199 its_cmd_inv(dev, girq->gi_its_dev, girq);
1200 }
1201
1202 static int
gicv3_its_intr(void * arg,uintptr_t irq)1203 gicv3_its_intr(void *arg, uintptr_t irq)
1204 {
1205 struct gicv3_its_softc *sc = arg;
1206 struct gicv3_its_irqsrc *girq;
1207 struct trapframe *tf;
1208
1209 irq -= sc->sc_irq_base;
1210 girq = sc->sc_irqs[irq];
1211 if (girq == NULL)
1212 panic("gicv3_its_intr: Invalid interrupt %ld",
1213 irq + sc->sc_irq_base);
1214
1215 tf = curthread->td_intr_frame;
1216 intr_isrc_dispatch(&girq->gi_isrc, tf);
1217 return (FILTER_HANDLED);
1218 }
1219
1220 static void
gicv3_its_pre_ithread(device_t dev,struct intr_irqsrc * isrc)1221 gicv3_its_pre_ithread(device_t dev, struct intr_irqsrc *isrc)
1222 {
1223 struct gicv3_its_irqsrc *girq;
1224
1225 girq = (struct gicv3_its_irqsrc *)isrc;
1226 gic_icc_write(EOIR1, girq->gi_lpi + GIC_FIRST_LPI);
1227 }
1228
1229 static void
gicv3_its_post_ithread(device_t dev,struct intr_irqsrc * isrc)1230 gicv3_its_post_ithread(device_t dev, struct intr_irqsrc *isrc)
1231 {
1232
1233 }
1234
1235 static void
gicv3_its_post_filter(device_t dev,struct intr_irqsrc * isrc)1236 gicv3_its_post_filter(device_t dev, struct intr_irqsrc *isrc)
1237 {
1238 struct gicv3_its_irqsrc *girq;
1239
1240 girq = (struct gicv3_its_irqsrc *)isrc;
1241 gic_icc_write(EOIR1, girq->gi_lpi + GIC_FIRST_LPI);
1242 }
1243
1244 static int
gicv3_its_select_cpu(device_t dev,struct intr_irqsrc * isrc)1245 gicv3_its_select_cpu(device_t dev, struct intr_irqsrc *isrc)
1246 {
1247 struct gicv3_its_softc *sc;
1248
1249 sc = device_get_softc(dev);
1250 if (CPU_EMPTY(&isrc->isrc_cpu)) {
1251 sc->gic_irq_cpu = intr_irq_next_cpu(sc->gic_irq_cpu,
1252 &sc->sc_cpus);
1253 CPU_SETOF(sc->gic_irq_cpu, &isrc->isrc_cpu);
1254 }
1255
1256 return (0);
1257 }
1258
1259 static int
gicv3_its_bind_intr(device_t dev,struct intr_irqsrc * isrc)1260 gicv3_its_bind_intr(device_t dev, struct intr_irqsrc *isrc)
1261 {
1262 struct gicv3_its_irqsrc *girq;
1263
1264 gicv3_its_select_cpu(dev, isrc);
1265
1266 girq = (struct gicv3_its_irqsrc *)isrc;
1267 its_cmd_movi(dev, girq);
1268 return (0);
1269 }
1270
1271 static int
gicv3_its_map_intr(device_t dev,struct intr_map_data * data,struct intr_irqsrc ** isrcp)1272 gicv3_its_map_intr(device_t dev, struct intr_map_data *data,
1273 struct intr_irqsrc **isrcp)
1274 {
1275
1276 /*
1277 * This should never happen, we only call this function to map
1278 * interrupts found before the controller driver is ready.
1279 */
1280 panic("gicv3_its_map_intr: Unable to map a MSI interrupt");
1281 }
1282
1283 static int
gicv3_its_setup_intr(device_t dev,struct intr_irqsrc * isrc,struct resource * res,struct intr_map_data * data)1284 gicv3_its_setup_intr(device_t dev, struct intr_irqsrc *isrc,
1285 struct resource *res, struct intr_map_data *data)
1286 {
1287
1288 /* Bind the interrupt to a CPU */
1289 gicv3_its_bind_intr(dev, isrc);
1290
1291 return (0);
1292 }
1293
1294 #ifdef SMP
1295 static void
gicv3_its_init_secondary(device_t dev,uint32_t rootnum)1296 gicv3_its_init_secondary(device_t dev, uint32_t rootnum)
1297 {
1298 struct gicv3_its_softc *sc;
1299
1300 sc = device_get_softc(dev);
1301
1302 /*
1303 * This is fatal as otherwise we may bind interrupts to this CPU.
1304 * We need a way to tell the interrupt framework to only bind to a
1305 * subset of given CPUs when it performs the shuffle.
1306 */
1307 if (its_init_cpu(dev, sc) != 0)
1308 panic("gicv3_its_init_secondary: No usable ITS on CPU%d",
1309 PCPU_GET(cpuid));
1310 }
1311 #endif
1312
1313 static uint32_t
its_get_devid(device_t pci_dev)1314 its_get_devid(device_t pci_dev)
1315 {
1316 uintptr_t id;
1317
1318 if (pci_get_id(pci_dev, PCI_ID_MSI, &id) != 0)
1319 panic("%s: %s: Unable to get the MSI DeviceID", __func__,
1320 device_get_nameunit(pci_dev));
1321
1322 return (id);
1323 }
1324
1325 static struct its_dev *
its_device_find(device_t dev,device_t child)1326 its_device_find(device_t dev, device_t child)
1327 {
1328 struct gicv3_its_softc *sc;
1329 struct its_dev *its_dev = NULL;
1330
1331 sc = device_get_softc(dev);
1332
1333 mtx_lock_spin(&sc->sc_its_dev_lock);
1334 TAILQ_FOREACH(its_dev, &sc->sc_its_dev_list, entry) {
1335 if (its_dev->pci_dev == child)
1336 break;
1337 }
1338 mtx_unlock_spin(&sc->sc_its_dev_lock);
1339
1340 return (its_dev);
1341 }
1342
1343 static bool
its_device_alloc(struct gicv3_its_softc * sc,int devid)1344 its_device_alloc(struct gicv3_its_softc *sc, int devid)
1345 {
1346 struct its_ptable *ptable;
1347 void *l2_table;
1348 uint64_t *table;
1349 uint32_t index;
1350 bool shareable;
1351
1352 /* No device table */
1353 if (sc->sc_dev_table_idx < 0) {
1354 if (devid >= (1 << sc->sc_devbits)) {
1355 if (bootverbose) {
1356 device_printf(sc->dev,
1357 "%s: Device out of range for hardware "
1358 "(%x >= %x)\n", __func__, devid,
1359 1 << sc->sc_devbits);
1360 }
1361 return (false);
1362 }
1363 return (true);
1364 }
1365
1366 ptable = &sc->sc_its_ptab[sc->sc_dev_table_idx];
1367 /* Check the devid is within the table limit */
1368 if (!ptable->ptab_indirect) {
1369 if (devid >= ptable->ptab_l1_nidents) {
1370 if (bootverbose) {
1371 device_printf(sc->dev,
1372 "%s: Device out of range for table "
1373 "(%x >= %x)\n", __func__, devid,
1374 ptable->ptab_l1_nidents);
1375 }
1376 return (false);
1377 }
1378
1379 return (true);
1380 }
1381
1382 /* Check the devid is within the allocated range */
1383 index = devid / ptable->ptab_l2_nidents;
1384 if (index >= ptable->ptab_l1_nidents) {
1385 if (bootverbose) {
1386 device_printf(sc->dev,
1387 "%s: Index out of range for table (%x >= %x)\n",
1388 __func__, index, ptable->ptab_l1_nidents);
1389 }
1390 return (false);
1391 }
1392
1393 table = (uint64_t *)ptable->ptab_vaddr;
1394 /* We have an second level table */
1395 if ((table[index] & GITS_BASER_VALID) != 0)
1396 return (true);
1397
1398 shareable = true;
1399 if ((ptable->ptab_share & GITS_BASER_SHARE_MASK) == GITS_BASER_SHARE_NS)
1400 shareable = false;
1401
1402 l2_table = contigmalloc_domainset(ptable->ptab_l2_size,
1403 M_GICV3_ITS, sc->sc_ds, M_WAITOK | M_ZERO, 0, (1ul << 48) - 1,
1404 ptable->ptab_page_size, 0);
1405
1406 if (!shareable)
1407 cpu_dcache_wb_range(l2_table, ptable->ptab_l2_size);
1408
1409 table[index] = vtophys(l2_table) | GITS_BASER_VALID;
1410 if (!shareable)
1411 cpu_dcache_wb_range(&table[index], sizeof(table[index]));
1412
1413 dsb(sy);
1414 return (true);
1415 }
1416
1417 static struct its_dev *
its_device_get(device_t dev,device_t child,u_int nvecs)1418 its_device_get(device_t dev, device_t child, u_int nvecs)
1419 {
1420 struct gicv3_its_softc *sc;
1421 struct its_dev *its_dev;
1422 vmem_addr_t irq_base;
1423 size_t esize, itt_size;
1424
1425 sc = device_get_softc(dev);
1426
1427 its_dev = its_device_find(dev, child);
1428 if (its_dev != NULL)
1429 return (its_dev);
1430
1431 its_dev = malloc(sizeof(*its_dev), M_GICV3_ITS, M_NOWAIT | M_ZERO);
1432 if (its_dev == NULL)
1433 return (NULL);
1434
1435 its_dev->pci_dev = child;
1436 its_dev->devid = its_get_devid(child);
1437
1438 its_dev->lpis.lpi_busy = 0;
1439 its_dev->lpis.lpi_num = nvecs;
1440 its_dev->lpis.lpi_free = nvecs;
1441
1442 if (!its_device_alloc(sc, its_dev->devid)) {
1443 free(its_dev, M_GICV3_ITS);
1444 return (NULL);
1445 }
1446
1447 if (vmem_alloc(sc->sc_irq_alloc, nvecs, M_FIRSTFIT | M_NOWAIT,
1448 &irq_base) != 0) {
1449 free(its_dev, M_GICV3_ITS);
1450 return (NULL);
1451 }
1452 its_dev->lpis.lpi_base = irq_base;
1453
1454 /* Get ITT entry size */
1455 esize = GITS_TYPER_ITTES(gic_its_read_8(sc, GITS_TYPER));
1456
1457 /*
1458 * Allocate ITT for this device.
1459 * PA has to be 256 B aligned. At least two entries for device.
1460 */
1461 itt_size = roundup2(MAX(nvecs, 2) * esize, 256);
1462 its_dev->itt = contigmalloc_domainset(itt_size,
1463 M_GICV3_ITS, sc->sc_ds, M_NOWAIT | M_ZERO, 0,
1464 LPI_INT_TRANS_TAB_MAX_ADDR, LPI_INT_TRANS_TAB_ALIGN, 0);
1465 if (its_dev->itt == NULL) {
1466 vmem_free(sc->sc_irq_alloc, its_dev->lpis.lpi_base, nvecs);
1467 free(its_dev, M_GICV3_ITS);
1468 return (NULL);
1469 }
1470
1471 /* Make sure device sees zeroed ITT. */
1472 if ((sc->sc_its_flags & ITS_FLAGS_CMDQ_FLUSH) != 0)
1473 cpu_dcache_wb_range(its_dev->itt, itt_size);
1474
1475 mtx_lock_spin(&sc->sc_its_dev_lock);
1476 TAILQ_INSERT_TAIL(&sc->sc_its_dev_list, its_dev, entry);
1477 mtx_unlock_spin(&sc->sc_its_dev_lock);
1478
1479 /* Map device to its ITT */
1480 its_cmd_mapd(dev, its_dev, 1);
1481
1482 return (its_dev);
1483 }
1484
1485 static void
its_device_release(device_t dev,struct its_dev * its_dev)1486 its_device_release(device_t dev, struct its_dev *its_dev)
1487 {
1488 struct gicv3_its_softc *sc;
1489
1490 KASSERT(its_dev->lpis.lpi_busy == 0,
1491 ("its_device_release: Trying to release an inuse ITS device"));
1492
1493 /* Unmap device in ITS */
1494 its_cmd_mapd(dev, its_dev, 0);
1495
1496 sc = device_get_softc(dev);
1497
1498 /* Remove the device from the list of devices */
1499 mtx_lock_spin(&sc->sc_its_dev_lock);
1500 TAILQ_REMOVE(&sc->sc_its_dev_list, its_dev, entry);
1501 mtx_unlock_spin(&sc->sc_its_dev_lock);
1502
1503 /* Free ITT */
1504 KASSERT(its_dev->itt != NULL, ("Invalid ITT in valid ITS device"));
1505 free(its_dev->itt, M_GICV3_ITS);
1506
1507 /* Free the IRQ allocation */
1508 vmem_free(sc->sc_irq_alloc, its_dev->lpis.lpi_base,
1509 its_dev->lpis.lpi_num);
1510
1511 free(its_dev, M_GICV3_ITS);
1512 }
1513
1514 static struct gicv3_its_irqsrc *
gicv3_its_alloc_irqsrc(device_t dev,struct gicv3_its_softc * sc,u_int irq)1515 gicv3_its_alloc_irqsrc(device_t dev, struct gicv3_its_softc *sc, u_int irq)
1516 {
1517 struct gicv3_its_irqsrc *girq = NULL;
1518
1519 KASSERT(sc->sc_irqs[irq] == NULL,
1520 ("%s: Interrupt %u already allocated", __func__, irq));
1521 mtx_lock_spin(&sc->sc_its_dev_lock);
1522 if (!TAILQ_EMPTY(&sc->sc_free_irqs)) {
1523 girq = TAILQ_FIRST(&sc->sc_free_irqs);
1524 TAILQ_REMOVE(&sc->sc_free_irqs, girq, gi_link);
1525 }
1526 mtx_unlock_spin(&sc->sc_its_dev_lock);
1527 if (girq == NULL) {
1528 girq = malloc(sizeof(*girq), M_GICV3_ITS,
1529 M_NOWAIT | M_ZERO);
1530 if (girq == NULL)
1531 return (NULL);
1532 girq->gi_id = -1;
1533 if (intr_isrc_register(&girq->gi_isrc, dev, 0,
1534 "%s,%u", device_get_nameunit(dev), irq) != 0) {
1535 free(girq, M_GICV3_ITS);
1536 return (NULL);
1537 }
1538 }
1539 girq->gi_lpi = irq + sc->sc_irq_base - GIC_FIRST_LPI;
1540 sc->sc_irqs[irq] = girq;
1541
1542 return (girq);
1543 }
1544
1545 static void
gicv3_its_release_irqsrc(struct gicv3_its_softc * sc,struct gicv3_its_irqsrc * girq)1546 gicv3_its_release_irqsrc(struct gicv3_its_softc *sc,
1547 struct gicv3_its_irqsrc *girq)
1548 {
1549 u_int irq;
1550
1551 mtx_assert(&sc->sc_its_dev_lock, MA_OWNED);
1552
1553 irq = girq->gi_lpi + GIC_FIRST_LPI - sc->sc_irq_base;
1554 sc->sc_irqs[irq] = NULL;
1555
1556 girq->gi_id = -1;
1557 girq->gi_its_dev = NULL;
1558 TAILQ_INSERT_TAIL(&sc->sc_free_irqs, girq, gi_link);
1559 }
1560
1561 static int
gicv3_its_alloc_msi(device_t dev,device_t child,int count,int maxcount,device_t * pic,struct intr_irqsrc ** srcs)1562 gicv3_its_alloc_msi(device_t dev, device_t child, int count, int maxcount,
1563 device_t *pic, struct intr_irqsrc **srcs)
1564 {
1565 struct gicv3_its_softc *sc;
1566 struct gicv3_its_irqsrc *girq;
1567 struct its_dev *its_dev;
1568 u_int irq;
1569 int i;
1570
1571 its_dev = its_device_get(dev, child, count);
1572 if (its_dev == NULL)
1573 return (ENXIO);
1574
1575 KASSERT(its_dev->lpis.lpi_free >= count,
1576 ("gicv3_its_alloc_msi: No free LPIs"));
1577 sc = device_get_softc(dev);
1578 irq = its_dev->lpis.lpi_base + its_dev->lpis.lpi_num -
1579 its_dev->lpis.lpi_free;
1580
1581 /* Allocate the irqsrc for each MSI */
1582 for (i = 0; i < count; i++, irq++) {
1583 its_dev->lpis.lpi_free--;
1584 srcs[i] = (struct intr_irqsrc *)gicv3_its_alloc_irqsrc(dev,
1585 sc, irq);
1586 if (srcs[i] == NULL)
1587 break;
1588 }
1589
1590 /* The allocation failed, release them */
1591 if (i != count) {
1592 mtx_lock_spin(&sc->sc_its_dev_lock);
1593 for (i = 0; i < count; i++) {
1594 girq = (struct gicv3_its_irqsrc *)srcs[i];
1595 if (girq == NULL)
1596 break;
1597 gicv3_its_release_irqsrc(sc, girq);
1598 srcs[i] = NULL;
1599 }
1600 mtx_unlock_spin(&sc->sc_its_dev_lock);
1601 return (ENXIO);
1602 }
1603
1604 /* Finish the allocation now we have all MSI irqsrcs */
1605 for (i = 0; i < count; i++) {
1606 girq = (struct gicv3_its_irqsrc *)srcs[i];
1607 girq->gi_id = i;
1608 girq->gi_its_dev = its_dev;
1609
1610 /* Map the message to the given IRQ */
1611 gicv3_its_select_cpu(dev, (struct intr_irqsrc *)girq);
1612 its_cmd_mapti(dev, girq);
1613 }
1614 its_dev->lpis.lpi_busy += count;
1615 *pic = dev;
1616
1617 return (0);
1618 }
1619
1620 static int
gicv3_its_release_msi(device_t dev,device_t child,int count,struct intr_irqsrc ** isrc)1621 gicv3_its_release_msi(device_t dev, device_t child, int count,
1622 struct intr_irqsrc **isrc)
1623 {
1624 struct gicv3_its_softc *sc;
1625 struct gicv3_its_irqsrc *girq;
1626 struct its_dev *its_dev;
1627 int i;
1628
1629 its_dev = its_device_find(dev, child);
1630
1631 KASSERT(its_dev != NULL,
1632 ("gicv3_its_release_msi: Releasing a MSI interrupt with "
1633 "no ITS device"));
1634 KASSERT(its_dev->lpis.lpi_busy >= count,
1635 ("gicv3_its_release_msi: Releasing more interrupts than "
1636 "were allocated: releasing %d, allocated %d", count,
1637 its_dev->lpis.lpi_busy));
1638
1639 sc = device_get_softc(dev);
1640 mtx_lock_spin(&sc->sc_its_dev_lock);
1641 for (i = 0; i < count; i++) {
1642 girq = (struct gicv3_its_irqsrc *)isrc[i];
1643 gicv3_its_release_irqsrc(sc, girq);
1644 }
1645 mtx_unlock_spin(&sc->sc_its_dev_lock);
1646 its_dev->lpis.lpi_busy -= count;
1647
1648 if (its_dev->lpis.lpi_busy == 0)
1649 its_device_release(dev, its_dev);
1650
1651 return (0);
1652 }
1653
1654 static int
gicv3_its_alloc_msix(device_t dev,device_t child,device_t * pic,struct intr_irqsrc ** isrcp)1655 gicv3_its_alloc_msix(device_t dev, device_t child, device_t *pic,
1656 struct intr_irqsrc **isrcp)
1657 {
1658 struct gicv3_its_softc *sc;
1659 struct gicv3_its_irqsrc *girq;
1660 struct its_dev *its_dev;
1661 u_int nvecs, irq;
1662
1663 nvecs = pci_msix_count(child);
1664 its_dev = its_device_get(dev, child, nvecs);
1665 if (its_dev == NULL)
1666 return (ENXIO);
1667
1668 KASSERT(its_dev->lpis.lpi_free > 0,
1669 ("gicv3_its_alloc_msix: No free LPIs"));
1670 sc = device_get_softc(dev);
1671 irq = its_dev->lpis.lpi_base + its_dev->lpis.lpi_num -
1672 its_dev->lpis.lpi_free;
1673
1674 girq = gicv3_its_alloc_irqsrc(dev, sc, irq);
1675 if (girq == NULL)
1676 return (ENXIO);
1677 girq->gi_id = its_dev->lpis.lpi_busy;
1678 girq->gi_its_dev = its_dev;
1679
1680 its_dev->lpis.lpi_free--;
1681 its_dev->lpis.lpi_busy++;
1682
1683 /* Map the message to the given IRQ */
1684 gicv3_its_select_cpu(dev, (struct intr_irqsrc *)girq);
1685 its_cmd_mapti(dev, girq);
1686
1687 *pic = dev;
1688 *isrcp = (struct intr_irqsrc *)girq;
1689
1690 return (0);
1691 }
1692
1693 static int
gicv3_its_release_msix(device_t dev,device_t child,struct intr_irqsrc * isrc)1694 gicv3_its_release_msix(device_t dev, device_t child, struct intr_irqsrc *isrc)
1695 {
1696 struct gicv3_its_softc *sc;
1697 struct gicv3_its_irqsrc *girq;
1698 struct its_dev *its_dev;
1699
1700 its_dev = its_device_find(dev, child);
1701
1702 KASSERT(its_dev != NULL,
1703 ("gicv3_its_release_msix: Releasing a MSI-X interrupt with "
1704 "no ITS device"));
1705 KASSERT(its_dev->lpis.lpi_busy > 0,
1706 ("gicv3_its_release_msix: Releasing more interrupts than "
1707 "were allocated: allocated %d", its_dev->lpis.lpi_busy));
1708
1709 sc = device_get_softc(dev);
1710 girq = (struct gicv3_its_irqsrc *)isrc;
1711 mtx_lock_spin(&sc->sc_its_dev_lock);
1712 gicv3_its_release_irqsrc(sc, girq);
1713 mtx_unlock_spin(&sc->sc_its_dev_lock);
1714 its_dev->lpis.lpi_busy--;
1715
1716 if (its_dev->lpis.lpi_busy == 0)
1717 its_device_release(dev, its_dev);
1718
1719 return (0);
1720 }
1721
1722 static int
gicv3_its_map_msi(device_t dev,device_t child,struct intr_irqsrc * isrc,uint64_t * addr,uint32_t * data)1723 gicv3_its_map_msi(device_t dev, device_t child, struct intr_irqsrc *isrc,
1724 uint64_t *addr, uint32_t *data)
1725 {
1726 struct gicv3_its_softc *sc;
1727 struct gicv3_its_irqsrc *girq;
1728
1729 sc = device_get_softc(dev);
1730 girq = (struct gicv3_its_irqsrc *)isrc;
1731
1732 *addr = vtophys(rman_get_virtual(sc->sc_its_res)) + GITS_TRANSLATER;
1733 *data = girq->gi_id;
1734
1735 return (0);
1736 }
1737
1738 #ifdef IOMMU
1739 static int
gicv3_iommu_init(device_t dev,device_t child,struct iommu_domain ** domain)1740 gicv3_iommu_init(device_t dev, device_t child, struct iommu_domain **domain)
1741 {
1742 struct gicv3_its_softc *sc;
1743 struct iommu_ctx *ctx;
1744 int error;
1745
1746 sc = device_get_softc(dev);
1747 /*
1748 * Get the context. If no context is found then the device isn't
1749 * behind an IOMMU so no setup is needed.
1750 */
1751 ctx = iommu_get_dev_ctx(child);
1752 if (ctx == NULL) {
1753 *domain = NULL;
1754 return (0);
1755 }
1756 /* Map the page containing the GITS_TRANSLATER register. */
1757 error = iommu_map_msi(ctx, PAGE_SIZE, 0,
1758 IOMMU_MAP_ENTRY_WRITE, IOMMU_MF_CANWAIT, &sc->ma);
1759 *domain = iommu_get_ctx_domain(ctx);
1760
1761 return (error);
1762 }
1763
1764 static void
gicv3_iommu_deinit(device_t dev,device_t child)1765 gicv3_iommu_deinit(device_t dev, device_t child)
1766 {
1767 struct iommu_ctx *ctx;
1768
1769 ctx = iommu_get_dev_ctx(child);
1770 if (ctx == NULL)
1771 return;
1772
1773 iommu_unmap_msi(ctx);
1774 }
1775 #endif
1776
1777 /*
1778 * Commands handling.
1779 */
1780
1781 static __inline void
cmd_format_command(struct its_cmd * cmd,uint8_t cmd_type)1782 cmd_format_command(struct its_cmd *cmd, uint8_t cmd_type)
1783 {
1784 /* Command field: DW0 [7:0] */
1785 cmd->cmd_dword[0] &= htole64(~CMD_COMMAND_MASK);
1786 cmd->cmd_dword[0] |= htole64(cmd_type);
1787 }
1788
1789 static __inline void
cmd_format_devid(struct its_cmd * cmd,uint32_t devid)1790 cmd_format_devid(struct its_cmd *cmd, uint32_t devid)
1791 {
1792 /* Device ID field: DW0 [63:32] */
1793 cmd->cmd_dword[0] &= htole64(~CMD_DEVID_MASK);
1794 cmd->cmd_dword[0] |= htole64((uint64_t)devid << CMD_DEVID_SHIFT);
1795 }
1796
1797 static __inline void
cmd_format_size(struct its_cmd * cmd,uint16_t size)1798 cmd_format_size(struct its_cmd *cmd, uint16_t size)
1799 {
1800 /* Size field: DW1 [4:0] */
1801 cmd->cmd_dword[1] &= htole64(~CMD_SIZE_MASK);
1802 cmd->cmd_dword[1] |= htole64((size & CMD_SIZE_MASK));
1803 }
1804
1805 static __inline void
cmd_format_id(struct its_cmd * cmd,uint32_t id)1806 cmd_format_id(struct its_cmd *cmd, uint32_t id)
1807 {
1808 /* ID field: DW1 [31:0] */
1809 cmd->cmd_dword[1] &= htole64(~CMD_ID_MASK);
1810 cmd->cmd_dword[1] |= htole64(id);
1811 }
1812
1813 static __inline void
cmd_format_pid(struct its_cmd * cmd,uint32_t pid)1814 cmd_format_pid(struct its_cmd *cmd, uint32_t pid)
1815 {
1816 /* Physical ID field: DW1 [63:32] */
1817 cmd->cmd_dword[1] &= htole64(~CMD_PID_MASK);
1818 cmd->cmd_dword[1] |= htole64((uint64_t)pid << CMD_PID_SHIFT);
1819 }
1820
1821 static __inline void
cmd_format_col(struct its_cmd * cmd,uint16_t col_id)1822 cmd_format_col(struct its_cmd *cmd, uint16_t col_id)
1823 {
1824 /* Collection field: DW2 [16:0] */
1825 cmd->cmd_dword[2] &= htole64(~CMD_COL_MASK);
1826 cmd->cmd_dword[2] |= htole64(col_id);
1827 }
1828
1829 static __inline void
cmd_format_target(struct its_cmd * cmd,uint64_t target)1830 cmd_format_target(struct its_cmd *cmd, uint64_t target)
1831 {
1832 /* Target Address field: DW2 [47:16] */
1833 cmd->cmd_dword[2] &= htole64(~CMD_TARGET_MASK);
1834 cmd->cmd_dword[2] |= htole64(target & CMD_TARGET_MASK);
1835 }
1836
1837 static __inline void
cmd_format_itt(struct its_cmd * cmd,uint64_t itt)1838 cmd_format_itt(struct its_cmd *cmd, uint64_t itt)
1839 {
1840 /* ITT Address field: DW2 [47:8] */
1841 cmd->cmd_dword[2] &= htole64(~CMD_ITT_MASK);
1842 cmd->cmd_dword[2] |= htole64(itt & CMD_ITT_MASK);
1843 }
1844
1845 static __inline void
cmd_format_valid(struct its_cmd * cmd,uint8_t valid)1846 cmd_format_valid(struct its_cmd *cmd, uint8_t valid)
1847 {
1848 /* Valid field: DW2 [63] */
1849 cmd->cmd_dword[2] &= htole64(~CMD_VALID_MASK);
1850 cmd->cmd_dword[2] |= htole64((uint64_t)valid << CMD_VALID_SHIFT);
1851 }
1852
1853 static inline bool
its_cmd_queue_full(struct gicv3_its_softc * sc)1854 its_cmd_queue_full(struct gicv3_its_softc *sc)
1855 {
1856 size_t read_idx, next_write_idx;
1857
1858 /* Get the index of the next command */
1859 next_write_idx = (sc->sc_its_cmd_next_idx + 1) %
1860 (ITS_CMDQ_SIZE / sizeof(struct its_cmd));
1861 /* And the index of the current command being read */
1862 read_idx = gic_its_read_4(sc, GITS_CREADR) / sizeof(struct its_cmd);
1863
1864 /*
1865 * The queue is full when the write offset points
1866 * at the command before the current read offset.
1867 */
1868 return (next_write_idx == read_idx);
1869 }
1870
1871 static inline void
its_cmd_sync(struct gicv3_its_softc * sc,struct its_cmd * cmd)1872 its_cmd_sync(struct gicv3_its_softc *sc, struct its_cmd *cmd)
1873 {
1874
1875 if ((sc->sc_its_flags & ITS_FLAGS_CMDQ_FLUSH) != 0) {
1876 /* Clean D-cache under command. */
1877 cpu_dcache_wb_range(cmd, sizeof(*cmd));
1878 } else {
1879 /* DSB inner shareable, store */
1880 dsb(ishst);
1881 }
1882
1883 }
1884
1885 static inline uint64_t
its_cmd_cwriter_offset(struct gicv3_its_softc * sc,struct its_cmd * cmd)1886 its_cmd_cwriter_offset(struct gicv3_its_softc *sc, struct its_cmd *cmd)
1887 {
1888 uint64_t off;
1889
1890 off = (cmd - sc->sc_its_cmd_base) * sizeof(*cmd);
1891
1892 return (off);
1893 }
1894
1895 static void
its_cmd_wait_completion(device_t dev,struct its_cmd * cmd_first,struct its_cmd * cmd_last)1896 its_cmd_wait_completion(device_t dev, struct its_cmd *cmd_first,
1897 struct its_cmd *cmd_last)
1898 {
1899 struct gicv3_its_softc *sc;
1900 uint64_t first, last, read;
1901 size_t us_left;
1902
1903 sc = device_get_softc(dev);
1904
1905 /*
1906 * XXX ARM64TODO: This is obviously a significant delay.
1907 * The reason for that is that currently the time frames for
1908 * the command to complete are not known.
1909 */
1910 us_left = 1000000;
1911
1912 first = its_cmd_cwriter_offset(sc, cmd_first);
1913 last = its_cmd_cwriter_offset(sc, cmd_last);
1914
1915 for (;;) {
1916 read = gic_its_read_8(sc, GITS_CREADR);
1917 if (first < last) {
1918 if (read < first || read >= last)
1919 break;
1920 } else if (read < first && read >= last)
1921 break;
1922
1923 if (us_left-- == 0) {
1924 /* This means timeout */
1925 device_printf(dev,
1926 "Timeout while waiting for CMD completion.\n");
1927 return;
1928 }
1929 DELAY(1);
1930 }
1931 }
1932
1933 static struct its_cmd *
its_cmd_alloc_locked(device_t dev)1934 its_cmd_alloc_locked(device_t dev)
1935 {
1936 struct gicv3_its_softc *sc;
1937 struct its_cmd *cmd;
1938 size_t us_left;
1939
1940 sc = device_get_softc(dev);
1941
1942 /*
1943 * XXX ARM64TODO: This is obviously a significant delay.
1944 * The reason for that is that currently the time frames for
1945 * the command to complete (and therefore free the descriptor)
1946 * are not known.
1947 */
1948 us_left = 1000000;
1949
1950 mtx_assert(&sc->sc_its_cmd_lock, MA_OWNED);
1951 while (its_cmd_queue_full(sc)) {
1952 if (us_left-- == 0) {
1953 /* Timeout while waiting for free command */
1954 device_printf(dev,
1955 "Timeout while waiting for free command\n");
1956 return (NULL);
1957 }
1958 DELAY(1);
1959 }
1960
1961 cmd = &sc->sc_its_cmd_base[sc->sc_its_cmd_next_idx];
1962 sc->sc_its_cmd_next_idx++;
1963 sc->sc_its_cmd_next_idx %= ITS_CMDQ_SIZE / sizeof(struct its_cmd);
1964
1965 return (cmd);
1966 }
1967
1968 static uint64_t
its_cmd_prepare(struct its_cmd * cmd,struct its_cmd_desc * desc)1969 its_cmd_prepare(struct its_cmd *cmd, struct its_cmd_desc *desc)
1970 {
1971 uint64_t target;
1972 uint8_t cmd_type;
1973 u_int size;
1974
1975 cmd_type = desc->cmd_type;
1976 target = ITS_TARGET_NONE;
1977
1978 switch (cmd_type) {
1979 case ITS_CMD_MOVI: /* Move interrupt ID to another collection */
1980 target = desc->cmd_desc_movi.col->col_target;
1981 cmd_format_command(cmd, ITS_CMD_MOVI);
1982 cmd_format_id(cmd, desc->cmd_desc_movi.id);
1983 cmd_format_col(cmd, desc->cmd_desc_movi.col->col_id);
1984 cmd_format_devid(cmd, desc->cmd_desc_movi.its_dev->devid);
1985 break;
1986 case ITS_CMD_SYNC: /* Wait for previous commands completion */
1987 target = desc->cmd_desc_sync.col->col_target;
1988 cmd_format_command(cmd, ITS_CMD_SYNC);
1989 cmd_format_target(cmd, target);
1990 break;
1991 case ITS_CMD_MAPD: /* Assign ITT to device */
1992 cmd_format_command(cmd, ITS_CMD_MAPD);
1993 cmd_format_itt(cmd, vtophys(desc->cmd_desc_mapd.its_dev->itt));
1994 /*
1995 * Size describes number of bits to encode interrupt IDs
1996 * supported by the device minus one.
1997 * When V (valid) bit is zero, this field should be written
1998 * as zero.
1999 */
2000 if (desc->cmd_desc_mapd.valid != 0) {
2001 size = fls(desc->cmd_desc_mapd.its_dev->lpis.lpi_num);
2002 size = MAX(1, size) - 1;
2003 } else
2004 size = 0;
2005
2006 cmd_format_size(cmd, size);
2007 cmd_format_devid(cmd, desc->cmd_desc_mapd.its_dev->devid);
2008 cmd_format_valid(cmd, desc->cmd_desc_mapd.valid);
2009 break;
2010 case ITS_CMD_MAPC: /* Map collection to Re-Distributor */
2011 target = desc->cmd_desc_mapc.col->col_target;
2012 cmd_format_command(cmd, ITS_CMD_MAPC);
2013 cmd_format_col(cmd, desc->cmd_desc_mapc.col->col_id);
2014 cmd_format_valid(cmd, desc->cmd_desc_mapc.valid);
2015 cmd_format_target(cmd, target);
2016 break;
2017 case ITS_CMD_MAPTI:
2018 target = desc->cmd_desc_mapvi.col->col_target;
2019 cmd_format_command(cmd, ITS_CMD_MAPTI);
2020 cmd_format_devid(cmd, desc->cmd_desc_mapvi.its_dev->devid);
2021 cmd_format_id(cmd, desc->cmd_desc_mapvi.id);
2022 cmd_format_pid(cmd, desc->cmd_desc_mapvi.pid);
2023 cmd_format_col(cmd, desc->cmd_desc_mapvi.col->col_id);
2024 break;
2025 case ITS_CMD_MAPI:
2026 target = desc->cmd_desc_mapi.col->col_target;
2027 cmd_format_command(cmd, ITS_CMD_MAPI);
2028 cmd_format_devid(cmd, desc->cmd_desc_mapi.its_dev->devid);
2029 cmd_format_id(cmd, desc->cmd_desc_mapi.pid);
2030 cmd_format_col(cmd, desc->cmd_desc_mapi.col->col_id);
2031 break;
2032 case ITS_CMD_INV:
2033 target = desc->cmd_desc_inv.col->col_target;
2034 cmd_format_command(cmd, ITS_CMD_INV);
2035 cmd_format_devid(cmd, desc->cmd_desc_inv.its_dev->devid);
2036 cmd_format_id(cmd, desc->cmd_desc_inv.pid);
2037 break;
2038 case ITS_CMD_INVALL:
2039 cmd_format_command(cmd, ITS_CMD_INVALL);
2040 cmd_format_col(cmd, desc->cmd_desc_invall.col->col_id);
2041 break;
2042 default:
2043 panic("its_cmd_prepare: Invalid command: %x", cmd_type);
2044 }
2045
2046 return (target);
2047 }
2048
2049 static int
its_cmd_send(device_t dev,struct its_cmd_desc * desc)2050 its_cmd_send(device_t dev, struct its_cmd_desc *desc)
2051 {
2052 struct gicv3_its_softc *sc;
2053 struct its_cmd *cmd, *cmd_sync, *cmd_write;
2054 struct its_col col_sync;
2055 struct its_cmd_desc desc_sync;
2056 uint64_t target, cwriter;
2057
2058 sc = device_get_softc(dev);
2059 mtx_lock_spin(&sc->sc_its_cmd_lock);
2060 cmd = its_cmd_alloc_locked(dev);
2061 if (cmd == NULL) {
2062 device_printf(dev, "could not allocate ITS command\n");
2063 mtx_unlock_spin(&sc->sc_its_cmd_lock);
2064 return (EBUSY);
2065 }
2066
2067 target = its_cmd_prepare(cmd, desc);
2068 its_cmd_sync(sc, cmd);
2069
2070 if (target != ITS_TARGET_NONE) {
2071 cmd_sync = its_cmd_alloc_locked(dev);
2072 if (cmd_sync != NULL) {
2073 desc_sync.cmd_type = ITS_CMD_SYNC;
2074 col_sync.col_target = target;
2075 desc_sync.cmd_desc_sync.col = &col_sync;
2076 its_cmd_prepare(cmd_sync, &desc_sync);
2077 its_cmd_sync(sc, cmd_sync);
2078 }
2079 }
2080
2081 /* Update GITS_CWRITER */
2082 cwriter = sc->sc_its_cmd_next_idx * sizeof(struct its_cmd);
2083 gic_its_write_8(sc, GITS_CWRITER, cwriter);
2084 cmd_write = &sc->sc_its_cmd_base[sc->sc_its_cmd_next_idx];
2085 mtx_unlock_spin(&sc->sc_its_cmd_lock);
2086
2087 its_cmd_wait_completion(dev, cmd, cmd_write);
2088
2089 return (0);
2090 }
2091
2092 /* Handlers to send commands */
2093 static void
its_cmd_movi(device_t dev,struct gicv3_its_irqsrc * girq)2094 its_cmd_movi(device_t dev, struct gicv3_its_irqsrc *girq)
2095 {
2096 struct gicv3_its_softc *sc;
2097 struct its_cmd_desc desc;
2098 struct its_col *col;
2099
2100 sc = device_get_softc(dev);
2101 col = sc->sc_its_cols[CPU_FFS(&girq->gi_isrc.isrc_cpu) - 1];
2102
2103 desc.cmd_type = ITS_CMD_MOVI;
2104 desc.cmd_desc_movi.its_dev = girq->gi_its_dev;
2105 desc.cmd_desc_movi.col = col;
2106 desc.cmd_desc_movi.id = girq->gi_id;
2107
2108 its_cmd_send(dev, &desc);
2109 }
2110
2111 static void
its_cmd_mapc(device_t dev,struct its_col * col,uint8_t valid)2112 its_cmd_mapc(device_t dev, struct its_col *col, uint8_t valid)
2113 {
2114 struct its_cmd_desc desc;
2115
2116 desc.cmd_type = ITS_CMD_MAPC;
2117 desc.cmd_desc_mapc.col = col;
2118 /*
2119 * Valid bit set - map the collection.
2120 * Valid bit cleared - unmap the collection.
2121 */
2122 desc.cmd_desc_mapc.valid = valid;
2123
2124 its_cmd_send(dev, &desc);
2125 }
2126
2127 static void
its_cmd_mapti(device_t dev,struct gicv3_its_irqsrc * girq)2128 its_cmd_mapti(device_t dev, struct gicv3_its_irqsrc *girq)
2129 {
2130 struct gicv3_its_softc *sc;
2131 struct its_cmd_desc desc;
2132 struct its_col *col;
2133 u_int col_id;
2134
2135 sc = device_get_softc(dev);
2136
2137 col_id = CPU_FFS(&girq->gi_isrc.isrc_cpu) - 1;
2138 col = sc->sc_its_cols[col_id];
2139
2140 desc.cmd_type = ITS_CMD_MAPTI;
2141 desc.cmd_desc_mapvi.its_dev = girq->gi_its_dev;
2142 desc.cmd_desc_mapvi.col = col;
2143 /* The EventID sent to the device */
2144 desc.cmd_desc_mapvi.id = girq->gi_id;
2145 /* The physical interrupt presented to softeware */
2146 desc.cmd_desc_mapvi.pid = girq->gi_lpi + GIC_FIRST_LPI;
2147
2148 its_cmd_send(dev, &desc);
2149 }
2150
2151 static void
its_cmd_mapd(device_t dev,struct its_dev * its_dev,uint8_t valid)2152 its_cmd_mapd(device_t dev, struct its_dev *its_dev, uint8_t valid)
2153 {
2154 struct its_cmd_desc desc;
2155
2156 desc.cmd_type = ITS_CMD_MAPD;
2157 desc.cmd_desc_mapd.its_dev = its_dev;
2158 desc.cmd_desc_mapd.valid = valid;
2159
2160 its_cmd_send(dev, &desc);
2161 }
2162
2163 static void
its_cmd_inv(device_t dev,struct its_dev * its_dev,struct gicv3_its_irqsrc * girq)2164 its_cmd_inv(device_t dev, struct its_dev *its_dev,
2165 struct gicv3_its_irqsrc *girq)
2166 {
2167 struct gicv3_its_softc *sc;
2168 struct its_cmd_desc desc;
2169 struct its_col *col;
2170
2171 sc = device_get_softc(dev);
2172 col = sc->sc_its_cols[CPU_FFS(&girq->gi_isrc.isrc_cpu) - 1];
2173
2174 desc.cmd_type = ITS_CMD_INV;
2175 /* The EventID sent to the device */
2176 desc.cmd_desc_inv.pid = girq->gi_id;
2177 desc.cmd_desc_inv.its_dev = its_dev;
2178 desc.cmd_desc_inv.col = col;
2179
2180 its_cmd_send(dev, &desc);
2181 }
2182
2183 static void
its_cmd_invall(device_t dev,struct its_col * col)2184 its_cmd_invall(device_t dev, struct its_col *col)
2185 {
2186 struct its_cmd_desc desc;
2187
2188 desc.cmd_type = ITS_CMD_INVALL;
2189 desc.cmd_desc_invall.col = col;
2190
2191 its_cmd_send(dev, &desc);
2192 }
2193
2194 #ifdef FDT
2195 static device_probe_t gicv3_its_fdt_probe;
2196 static device_attach_t gicv3_its_fdt_attach;
2197
2198 static device_method_t gicv3_its_fdt_methods[] = {
2199 /* Device interface */
2200 DEVMETHOD(device_probe, gicv3_its_fdt_probe),
2201 DEVMETHOD(device_attach, gicv3_its_fdt_attach),
2202
2203 /* End */
2204 DEVMETHOD_END
2205 };
2206
2207 #define its_baseclasses its_fdt_baseclasses
2208 DEFINE_CLASS_1(its, gicv3_its_fdt_driver, gicv3_its_fdt_methods,
2209 sizeof(struct gicv3_its_softc), gicv3_its_driver);
2210 #undef its_baseclasses
2211
2212 EARLY_DRIVER_MODULE(its_fdt, gic, gicv3_its_fdt_driver, 0, 0,
2213 BUS_PASS_INTERRUPT + BUS_PASS_ORDER_MIDDLE);
2214
2215 static int
gicv3_its_fdt_probe(device_t dev)2216 gicv3_its_fdt_probe(device_t dev)
2217 {
2218
2219 if (!ofw_bus_status_okay(dev))
2220 return (ENXIO);
2221
2222 if (!ofw_bus_is_compatible(dev, "arm,gic-v3-its"))
2223 return (ENXIO);
2224
2225 if (!gicv3_get_support_lpis(dev))
2226 return (ENXIO);
2227
2228 device_set_desc(dev, "ARM GIC Interrupt Translation Service");
2229 return (BUS_PROBE_DEFAULT);
2230 }
2231
2232 static int
gicv3_its_fdt_attach(device_t dev)2233 gicv3_its_fdt_attach(device_t dev)
2234 {
2235 struct gicv3_its_softc *sc;
2236 phandle_t xref;
2237 int err;
2238
2239 sc = device_get_softc(dev);
2240 sc->dev = dev;
2241 err = gicv3_its_attach(dev);
2242 if (err != 0)
2243 return (err);
2244
2245 /* Register this device as a interrupt controller */
2246 xref = OF_xref_from_node(ofw_bus_get_node(dev));
2247 sc->sc_pic = intr_pic_register(dev, xref);
2248 err = intr_pic_add_handler(device_get_parent(dev), sc->sc_pic,
2249 gicv3_its_intr, sc, sc->sc_irq_base, sc->sc_irq_length);
2250 if (err != 0) {
2251 device_printf(dev, "Failed to add PIC handler: %d\n", err);
2252 return (err);
2253 }
2254
2255 /* Register this device to handle MSI interrupts */
2256 err = intr_msi_register(dev, xref);
2257 if (err != 0) {
2258 device_printf(dev, "Failed to register for MSIs: %d\n", err);
2259 return (err);
2260 }
2261
2262 return (0);
2263 }
2264 #endif
2265
2266 #ifdef DEV_ACPI
2267 static device_probe_t gicv3_its_acpi_probe;
2268 static device_attach_t gicv3_its_acpi_attach;
2269
2270 static device_method_t gicv3_its_acpi_methods[] = {
2271 /* Device interface */
2272 DEVMETHOD(device_probe, gicv3_its_acpi_probe),
2273 DEVMETHOD(device_attach, gicv3_its_acpi_attach),
2274
2275 /* End */
2276 DEVMETHOD_END
2277 };
2278
2279 #define its_baseclasses its_acpi_baseclasses
2280 DEFINE_CLASS_1(its, gicv3_its_acpi_driver, gicv3_its_acpi_methods,
2281 sizeof(struct gicv3_its_softc), gicv3_its_driver);
2282 #undef its_baseclasses
2283
2284 EARLY_DRIVER_MODULE(its_acpi, gic, gicv3_its_acpi_driver, 0, 0,
2285 BUS_PASS_INTERRUPT + BUS_PASS_ORDER_MIDDLE);
2286
2287 static int
gicv3_its_acpi_probe(device_t dev)2288 gicv3_its_acpi_probe(device_t dev)
2289 {
2290
2291 if (gic_get_bus(dev) != GIC_BUS_ACPI)
2292 return (EINVAL);
2293
2294 if (gic_get_hw_rev(dev) < 3)
2295 return (EINVAL);
2296
2297 if (!gicv3_get_support_lpis(dev))
2298 return (ENXIO);
2299
2300 device_set_desc(dev, "ARM GIC Interrupt Translation Service");
2301 return (BUS_PROBE_DEFAULT);
2302 }
2303
2304 static int
gicv3_its_acpi_attach(device_t dev)2305 gicv3_its_acpi_attach(device_t dev)
2306 {
2307 struct gicv3_its_softc *sc;
2308 struct gic_v3_devinfo *di;
2309 int err;
2310
2311 sc = device_get_softc(dev);
2312 sc->dev = dev;
2313 err = gicv3_its_attach(dev);
2314 if (err != 0)
2315 return (err);
2316
2317 di = device_get_ivars(dev);
2318 sc->sc_pic = intr_pic_register(dev, di->msi_xref);
2319 err = intr_pic_add_handler(device_get_parent(dev), sc->sc_pic,
2320 gicv3_its_intr, sc, sc->sc_irq_base, sc->sc_irq_length);
2321 if (err != 0) {
2322 device_printf(dev, "Failed to add PIC handler: %d\n", err);
2323 return (err);
2324 }
2325
2326 /* Register this device to handle MSI interrupts */
2327 err = intr_msi_register(dev, di->msi_xref);
2328 if (err != 0) {
2329 device_printf(dev, "Failed to register for MSIs: %d\n", err);
2330 return (err);
2331 }
2332
2333 return (0);
2334 }
2335 #endif
2336