1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2024 The FreeBSD Foundation
5 *
6 * This software was developed by Konstantin Belousov <kib@FreeBSD.org>
7 * under sponsorship from the FreeBSD Foundation.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30
31 #include "opt_acpi.h"
32 #include "opt_ddb.h"
33
34 #include <sys/param.h>
35 #include <sys/bus.h>
36 #include <sys/domainset.h>
37 #include <sys/kernel.h>
38 #include <sys/lock.h>
39 #include <sys/malloc.h>
40 #include <sys/memdesc.h>
41 #include <sys/module.h>
42 #include <sys/mutex.h>
43 #include <sys/rman.h>
44 #include <sys/rwlock.h>
45 #include <sys/smp.h>
46 #include <sys/taskqueue.h>
47 #include <sys/tree.h>
48 #include <sys/vmem.h>
49 #include <vm/vm.h>
50 #include <vm/vm_extern.h>
51 #include <vm/vm_object.h>
52 #include <vm/vm_page.h>
53 #include <vm/vm_pageout.h>
54 #include <vm/vm_pager.h>
55 #include <contrib/dev/acpica/include/acpi.h>
56 #include <contrib/dev/acpica/include/accommon.h>
57 #include <dev/acpica/acpivar.h>
58 #include <dev/pci/pcireg.h>
59 #include <dev/pci/pcivar.h>
60 #include <machine/bus.h>
61 #include <machine/pci_cfgreg.h>
62 #include "pcib_if.h"
63 #include <machine/intr_machdep.h>
64 #include <machine/md_var.h>
65 #include <machine/cputypes.h>
66 #include <x86/apicreg.h>
67 #include <x86/apicvar.h>
68 #include <dev/iommu/iommu.h>
69 #include <x86/iommu/amd_reg.h>
70 #include <x86/iommu/x86_iommu.h>
71 #include <x86/iommu/amd_iommu.h>
72
73 static int amdiommu_enable = 0;
74
75 /*
76 * All enumerated AMD IOMMU units.
77 * Access is unlocked, the list is not modified after early
78 * single-threaded startup.
79 */
80 static TAILQ_HEAD(, amdiommu_unit) amdiommu_units =
81 TAILQ_HEAD_INITIALIZER(amdiommu_units);
82
83 static u_int
ivrs_info_to_unit_id(UINT32 info)84 ivrs_info_to_unit_id(UINT32 info)
85 {
86 return ((info & ACPI_IVHD_UNIT_ID_MASK) >> 8);
87 }
88
89 typedef bool (*amdiommu_itercc_t)(void *, void *);
90 typedef bool (*amdiommu_iter40_t)(ACPI_IVRS_HARDWARE2 *, void *);
91 typedef bool (*amdiommu_iter11_t)(ACPI_IVRS_HARDWARE2 *, void *);
92 typedef bool (*amdiommu_iter10_t)(ACPI_IVRS_HARDWARE1 *, void *);
93
94 static bool
amdiommu_ivrs_iterate_tbl_typed(amdiommu_itercc_t iter,void * arg,int type,ACPI_TABLE_IVRS * ivrs_tbl)95 amdiommu_ivrs_iterate_tbl_typed(amdiommu_itercc_t iter, void *arg,
96 int type, ACPI_TABLE_IVRS *ivrs_tbl)
97 {
98 char *ptr, *ptrend;
99 bool done;
100
101 done = false;
102 ptr = (char *)ivrs_tbl + sizeof(*ivrs_tbl);
103 ptrend = (char *)ivrs_tbl + ivrs_tbl->Header.Length;
104 for (;;) {
105 ACPI_IVRS_HEADER *ivrsh;
106
107 if (ptr >= ptrend)
108 break;
109 ivrsh = (ACPI_IVRS_HEADER *)ptr;
110 if (ivrsh->Length <= 0) {
111 printf("amdiommu_iterate_tbl: corrupted IVRS table, "
112 "length %d\n", ivrsh->Length);
113 break;
114 }
115 ptr += ivrsh->Length;
116 if (ivrsh->Type == type) {
117 done = iter((void *)ivrsh, arg);
118 if (done)
119 break;
120 }
121 }
122 return (done);
123 }
124
125 /*
126 * Walk over IVRS, calling callback iterators following priority:
127 * 0x40, then 0x11, then 0x10 subtable. First iterator returning true
128 * ends the walk.
129 * Returns true if any iterator returned true, otherwise false.
130 */
131 static bool
amdiommu_ivrs_iterate_tbl(amdiommu_iter40_t iter40,amdiommu_iter11_t iter11,amdiommu_iter10_t iter10,void * arg)132 amdiommu_ivrs_iterate_tbl(amdiommu_iter40_t iter40, amdiommu_iter11_t iter11,
133 amdiommu_iter10_t iter10, void *arg)
134 {
135 ACPI_TABLE_IVRS *ivrs_tbl;
136 ACPI_STATUS status;
137 bool done;
138
139 status = AcpiGetTable(ACPI_SIG_IVRS, 1,
140 (ACPI_TABLE_HEADER **)&ivrs_tbl);
141 if (ACPI_FAILURE(status))
142 return (false);
143 done = false;
144 if (iter40 != NULL)
145 done = amdiommu_ivrs_iterate_tbl_typed(
146 (amdiommu_itercc_t)iter40, arg,
147 ACPI_IVRS_TYPE_HARDWARE3, ivrs_tbl);
148 if (!done && iter11 != NULL)
149 done = amdiommu_ivrs_iterate_tbl_typed(
150 (amdiommu_itercc_t)iter11, arg, ACPI_IVRS_TYPE_HARDWARE2,
151 ivrs_tbl);
152 if (!done && iter10 != NULL)
153 done = amdiommu_ivrs_iterate_tbl_typed(
154 (amdiommu_itercc_t)iter10, arg, ACPI_IVRS_TYPE_HARDWARE1,
155 ivrs_tbl);
156 AcpiPutTable((ACPI_TABLE_HEADER *)ivrs_tbl);
157 return (done);
158 }
159
160 struct ivhd_lookup_data {
161 struct amdiommu_unit *sc;
162 uint16_t devid;
163 };
164
165 static bool
ivrs_lookup_ivhd_0x40(ACPI_IVRS_HARDWARE2 * h2,void * arg)166 ivrs_lookup_ivhd_0x40(ACPI_IVRS_HARDWARE2 *h2, void *arg)
167 {
168 struct ivhd_lookup_data *ildp;
169
170 KASSERT(h2->Header.Type == ACPI_IVRS_TYPE_HARDWARE2 ||
171 h2->Header.Type == ACPI_IVRS_TYPE_HARDWARE3,
172 ("Misparsed IVHD, h2 type %#x", h2->Header.Type));
173
174 ildp = arg;
175 if (h2->Header.DeviceId != ildp->devid)
176 return (false);
177
178 ildp->sc->unit_dom = h2->PciSegmentGroup;
179 ildp->sc->iommu.unit = ivrs_info_to_unit_id(h2->Info);
180 ildp->sc->efr = h2->EfrRegisterImage;
181 return (true);
182 }
183
184 static bool
ivrs_lookup_ivhd_0x10(ACPI_IVRS_HARDWARE1 * h1,void * arg)185 ivrs_lookup_ivhd_0x10(ACPI_IVRS_HARDWARE1 *h1, void *arg)
186 {
187 struct ivhd_lookup_data *ildp;
188
189 KASSERT(h1->Header.Type == ACPI_IVRS_TYPE_HARDWARE1,
190 ("Misparsed IVHD, h1 type %#x", h1->Header.Type));
191
192 ildp = arg;
193 if (h1->Header.DeviceId != ildp->devid)
194 return (false);
195
196 ildp->sc->unit_dom = h1->PciSegmentGroup;
197 ildp->sc->iommu.unit = ivrs_info_to_unit_id(h1->Info);
198 return (true);
199 }
200
201 static u_int
amdiommu_devtbl_sz(struct amdiommu_unit * sc __unused)202 amdiommu_devtbl_sz(struct amdiommu_unit *sc __unused)
203 {
204 return (sizeof(struct amdiommu_dte) * (1 << 16));
205 }
206
207 static void
amdiommu_free_dev_tbl(struct amdiommu_unit * sc)208 amdiommu_free_dev_tbl(struct amdiommu_unit *sc)
209 {
210 u_int devtbl_sz;
211
212 devtbl_sz = amdiommu_devtbl_sz(sc);
213 pmap_qremove((vm_offset_t)sc->dev_tbl, atop(devtbl_sz));
214 kva_free((vm_offset_t)sc->dev_tbl, devtbl_sz);
215 sc->dev_tbl = NULL;
216 vm_object_deallocate(sc->devtbl_obj);
217 sc->devtbl_obj = NULL;
218 }
219
220 static int
amdiommu_create_dev_tbl(struct amdiommu_unit * sc)221 amdiommu_create_dev_tbl(struct amdiommu_unit *sc)
222 {
223 vm_offset_t seg_vaddr;
224 u_int devtbl_sz, dom, i, reclaimno, segnum_log, segnum, seg_sz;
225 int error;
226
227 static const int devtab_base_regs[] = {
228 AMDIOMMU_DEVTAB_BASE,
229 AMDIOMMU_DEVTAB_S1_BASE,
230 AMDIOMMU_DEVTAB_S2_BASE,
231 AMDIOMMU_DEVTAB_S3_BASE,
232 AMDIOMMU_DEVTAB_S4_BASE,
233 AMDIOMMU_DEVTAB_S5_BASE,
234 AMDIOMMU_DEVTAB_S6_BASE,
235 AMDIOMMU_DEVTAB_S7_BASE
236 };
237
238 segnum_log = (sc->efr & AMDIOMMU_EFR_DEVTBLSEG_MASK) >>
239 AMDIOMMU_EFR_DEVTBLSEG_SHIFT;
240 segnum = 1 << segnum_log;
241
242 KASSERT(segnum <= nitems(devtab_base_regs),
243 ("%s: unsupported devtab segment count %u", __func__, segnum));
244
245 devtbl_sz = amdiommu_devtbl_sz(sc);
246 seg_sz = devtbl_sz / segnum;
247 sc->devtbl_obj = vm_pager_allocate(OBJT_PHYS, NULL, atop(devtbl_sz),
248 VM_PROT_ALL, 0, NULL);
249 if (bus_get_domain(sc->iommu.dev, &dom) == 0)
250 sc->devtbl_obj->domain.dr_policy = DOMAINSET_PREF(dom);
251
252 sc->hw_ctrl &= ~AMDIOMMU_CTRL_DEVTABSEG_MASK;
253 sc->hw_ctrl |= (uint64_t)segnum_log << ilog2(AMDIOMMU_CTRL_DEVTABSEG_2);
254 sc->hw_ctrl |= AMDIOMMU_CTRL_COHERENT;
255 amdiommu_write8(sc, AMDIOMMU_CTRL, sc->hw_ctrl);
256
257 seg_vaddr = kva_alloc(devtbl_sz);
258 if (seg_vaddr == 0)
259 return (ENOMEM);
260 sc->dev_tbl = (void *)seg_vaddr;
261
262 for (i = 0; i < segnum; i++) {
263 vm_page_t m;
264 uint64_t rval;
265
266 for (reclaimno = 0; reclaimno < 3; reclaimno++) {
267 VM_OBJECT_WLOCK(sc->devtbl_obj);
268 m = vm_page_alloc_contig(sc->devtbl_obj,
269 i * atop(seg_sz),
270 VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY,
271 atop(seg_sz), 0, ~0ul, IOMMU_PAGE_SIZE, 0,
272 VM_MEMATTR_DEFAULT);
273 VM_OBJECT_WUNLOCK(sc->devtbl_obj);
274 if (m != NULL)
275 break;
276 error = vm_page_reclaim_contig(VM_ALLOC_NORMAL,
277 atop(seg_sz), 0, ~0ul, IOMMU_PAGE_SIZE, 0);
278 if (error != 0)
279 vm_wait(sc->devtbl_obj);
280 }
281 if (m == NULL) {
282 amdiommu_free_dev_tbl(sc);
283 return (ENOMEM);
284 }
285
286 rval = VM_PAGE_TO_PHYS(m) | (atop(seg_sz) - 1);
287 for (u_int j = 0; j < atop(seg_sz);
288 j++, seg_vaddr += PAGE_SIZE, m++) {
289 pmap_zero_page(m);
290 pmap_qenter(seg_vaddr, &m, 1);
291 }
292 amdiommu_write8(sc, devtab_base_regs[i], rval);
293 }
294
295 return (0);
296 }
297
298 static int
amdiommu_cmd_event_intr(void * arg)299 amdiommu_cmd_event_intr(void *arg)
300 {
301 struct amdiommu_unit *unit;
302 uint64_t status;
303
304 unit = arg;
305 status = amdiommu_read8(unit, AMDIOMMU_CMDEV_STATUS);
306 if ((status & AMDIOMMU_CMDEVS_COMWAITINT) != 0) {
307 amdiommu_write8(unit, AMDIOMMU_CMDEV_STATUS,
308 AMDIOMMU_CMDEVS_COMWAITINT);
309 taskqueue_enqueue(unit->x86c.qi_taskqueue,
310 &unit->x86c.qi_task);
311 }
312 if ((status & (AMDIOMMU_CMDEVS_EVLOGINT |
313 AMDIOMMU_CMDEVS_EVOVRFLW)) != 0)
314 amdiommu_event_intr(unit, status);
315 return (FILTER_HANDLED);
316 }
317
318 static int
amdiommu_setup_intr(struct amdiommu_unit * sc)319 amdiommu_setup_intr(struct amdiommu_unit *sc)
320 {
321 int error, msi_count, msix_count;
322
323 msi_count = pci_msi_count(sc->iommu.dev);
324 msix_count = pci_msix_count(sc->iommu.dev);
325 if (msi_count == 0 && msix_count == 0) {
326 device_printf(sc->iommu.dev, "needs MSI-class intr\n");
327 return (ENXIO);
328 }
329
330 #if 0
331 /*
332 * XXXKIB how MSI-X is supposed to be organized for BAR-less
333 * function? Practically available hardware implements only
334 * one IOMMU unit per function, and uses MSI.
335 */
336 if (msix_count > 0) {
337 sc->msix_table = bus_alloc_resource_any(sc->iommu.dev,
338 SYS_RES_MEMORY, &sc->msix_tab_rid, RF_ACTIVE);
339 if (sc->msix_table == NULL)
340 return (ENXIO);
341
342 if (sc->msix_pba_rid != sc->msix_tab_rid) {
343 /* Separate BAR for PBA */
344 sc->msix_pba = bus_alloc_resource_any(sc->iommu.dev,
345 SYS_RES_MEMORY,
346 &sc->msix_pba_rid, RF_ACTIVE);
347 if (sc->msix_pba == NULL) {
348 bus_release_resource(sc->iommu.dev,
349 SYS_RES_MEMORY, &sc->msix_tab_rid,
350 sc->msix_table);
351 return (ENXIO);
352 }
353 }
354 }
355 #endif
356
357 error = ENXIO;
358 if (msix_count > 0) {
359 error = pci_alloc_msix(sc->iommu.dev, &msix_count);
360 if (error == 0)
361 sc->numirqs = msix_count;
362 }
363 if (error != 0 && msi_count > 0) {
364 error = pci_alloc_msi(sc->iommu.dev, &msi_count);
365 if (error == 0)
366 sc->numirqs = msi_count;
367 }
368 if (error != 0) {
369 device_printf(sc->iommu.dev,
370 "Failed to allocate MSI/MSI-x (%d)\n", error);
371 return (ENXIO);
372 }
373
374 /*
375 * XXXKIB Spec states that MISC0.MsiNum must be zero for IOMMU
376 * using MSI interrupts. But at least one BIOS programmed '2'
377 * there, making driver use wrong rid and causing
378 * command/event interrupt ignored as stray. Try to fix it
379 * with dirty force by assuming MsiNum is zero for MSI.
380 */
381 sc->irq_cmdev_rid = 1;
382 if (msix_count > 0) {
383 sc->irq_cmdev_rid += pci_read_config(sc->iommu.dev,
384 sc->seccap_reg + PCIR_AMDIOMMU_MISC0, 4) &
385 PCIM_AMDIOMMU_MISC0_MSINUM_MASK;
386 }
387
388 sc->irq_cmdev = bus_alloc_resource_any(sc->iommu.dev, SYS_RES_IRQ,
389 &sc->irq_cmdev_rid, RF_SHAREABLE | RF_ACTIVE);
390 if (sc->irq_cmdev == NULL) {
391 device_printf(sc->iommu.dev,
392 "unable to map CMD/EV interrupt\n");
393 return (ENXIO);
394 }
395 error = bus_setup_intr(sc->iommu.dev, sc->irq_cmdev,
396 INTR_TYPE_MISC, amdiommu_cmd_event_intr, NULL, sc,
397 &sc->irq_cmdev_cookie);
398 if (error != 0) {
399 device_printf(sc->iommu.dev,
400 "unable to setup interrupt (%d)\n", error);
401 return (ENXIO);
402 }
403 bus_describe_intr(sc->iommu.dev, sc->irq_cmdev, sc->irq_cmdev_cookie,
404 "cmdev");
405
406 if (x2apic_mode) {
407 AMDIOMMU_LOCK(sc);
408 sc->hw_ctrl |= AMDIOMMU_CTRL_GA_EN | AMDIOMMU_CTRL_XT_EN;
409 amdiommu_write8(sc, AMDIOMMU_CTRL, sc->hw_ctrl);
410 // XXXKIB AMDIOMMU_CTRL_INTCAPXT_EN and program x2APIC_CTRL
411 AMDIOMMU_UNLOCK(sc);
412 }
413
414 return (0);
415 }
416
417 static int
amdiommu_probe(device_t dev)418 amdiommu_probe(device_t dev)
419 {
420 int seccap_reg;
421 int error;
422 uint32_t cap_h, cap_type, cap_rev;
423
424 if (acpi_disabled("amdiommu"))
425 return (ENXIO);
426 TUNABLE_INT_FETCH("hw.amdiommu.enable", &amdiommu_enable);
427 if (!amdiommu_enable)
428 return (ENXIO);
429 if (pci_get_class(dev) != PCIC_BASEPERIPH ||
430 pci_get_subclass(dev) != PCIS_BASEPERIPH_IOMMU)
431 return (ENXIO);
432
433 error = pci_find_cap(dev, PCIY_SECDEV, &seccap_reg);
434 if (error != 0 || seccap_reg == 0)
435 return (ENXIO);
436
437 cap_h = pci_read_config(dev, seccap_reg + PCIR_AMDIOMMU_CAP_HEADER,
438 4);
439 cap_type = cap_h & PCIM_AMDIOMMU_CAP_TYPE_MASK;
440 cap_rev = cap_h & PCIM_AMDIOMMU_CAP_REV_MASK;
441 if (cap_type != PCIM_AMDIOMMU_CAP_TYPE_VAL &&
442 cap_rev != PCIM_AMDIOMMU_CAP_REV_VAL)
443 return (ENXIO);
444
445 device_set_desc(dev, "DMA remap");
446 return (BUS_PROBE_SPECIFIC);
447 }
448
449 static int
amdiommu_attach(device_t dev)450 amdiommu_attach(device_t dev)
451 {
452 struct amdiommu_unit *sc;
453 struct ivhd_lookup_data ild;
454 int error;
455 uint32_t base_low, base_high;
456 bool res;
457
458 sc = device_get_softc(dev);
459 sc->iommu.dev = dev;
460
461 error = pci_find_cap(dev, PCIY_SECDEV, &sc->seccap_reg);
462 if (error != 0 || sc->seccap_reg == 0)
463 return (ENXIO);
464
465 base_low = pci_read_config(dev, sc->seccap_reg +
466 PCIR_AMDIOMMU_BASE_LOW, 4);
467 base_high = pci_read_config(dev, sc->seccap_reg +
468 PCIR_AMDIOMMU_BASE_HIGH, 4);
469 sc->mmio_base = (base_low & PCIM_AMDIOMMU_BASE_LOW_ADDRM) |
470 ((uint64_t)base_high << 32);
471
472 sc->device_id = pci_get_rid(dev);
473 ild.sc = sc;
474 ild.devid = sc->device_id;
475 res = amdiommu_ivrs_iterate_tbl(ivrs_lookup_ivhd_0x40,
476 ivrs_lookup_ivhd_0x40, ivrs_lookup_ivhd_0x10, &ild);
477 if (!res) {
478 device_printf(dev, "Cannot find IVHD\n");
479 return (ENXIO);
480 }
481
482 mtx_init(&sc->iommu.lock, "amdihw", NULL, MTX_DEF);
483 sc->domids = new_unrhdr(0, 0xffff, &sc->iommu.lock);
484 LIST_INIT(&sc->domains);
485 sysctl_ctx_init(&sc->iommu.sysctl_ctx);
486
487 sc->mmio_sz = ((sc->efr & AMDIOMMU_EFR_PC_SUP) != 0 ? 512 : 16) *
488 1024;
489
490 sc->mmio_rid = AMDIOMMU_RID;
491 error = bus_set_resource(dev, SYS_RES_MEMORY, AMDIOMMU_RID,
492 sc->mmio_base, sc->mmio_sz);
493 if (error != 0) {
494 device_printf(dev,
495 "bus_set_resource %#jx-%#jx failed, error %d\n",
496 (uintmax_t)sc->mmio_base, (uintmax_t)sc->mmio_base +
497 sc->mmio_sz, error);
498 error = ENXIO;
499 goto errout1;
500 }
501 sc->mmio_res = bus_alloc_resource(dev, SYS_RES_MEMORY, &sc->mmio_rid,
502 sc->mmio_base, sc->mmio_base + sc->mmio_sz - 1, sc->mmio_sz,
503 RF_ALLOCATED | RF_ACTIVE | RF_SHAREABLE);
504 if (sc->mmio_res == NULL) {
505 device_printf(dev,
506 "bus_alloc_resource %#jx-%#jx failed\n",
507 (uintmax_t)sc->mmio_base, (uintmax_t)sc->mmio_base +
508 sc->mmio_sz);
509 error = ENXIO;
510 goto errout2;
511 }
512
513 sc->hw_ctrl = amdiommu_read8(sc, AMDIOMMU_CTRL);
514 if (bootverbose)
515 device_printf(dev, "ctrl reg %#jx\n", (uintmax_t)sc->hw_ctrl);
516 if ((sc->hw_ctrl & AMDIOMMU_CTRL_EN) != 0) {
517 device_printf(dev, "CTRL_EN is set, bailing out\n");
518 error = EBUSY;
519 goto errout2;
520 }
521
522 iommu_high = BUS_SPACE_MAXADDR;
523
524 error = amdiommu_create_dev_tbl(sc);
525 if (error != 0)
526 goto errout3;
527
528 error = amdiommu_init_cmd(sc);
529 if (error != 0)
530 goto errout4;
531
532 error = amdiommu_init_event(sc);
533 if (error != 0)
534 goto errout5;
535
536 error = amdiommu_setup_intr(sc);
537 if (error != 0)
538 goto errout6;
539
540 error = iommu_init_busdma(AMD2IOMMU(sc));
541 if (error != 0)
542 goto errout7;
543
544 error = amdiommu_init_irt(sc);
545 if (error != 0)
546 goto errout8;
547
548 /*
549 * Unlike DMAR, AMD IOMMU does not process command queue
550 * unless IOMMU is enabled. But since non-present devtab
551 * entry makes IOMMU ignore transactions from corresponding
552 * initiator, de-facto IOMMU operations are disabled for the
553 * DMA and intr remapping.
554 */
555 AMDIOMMU_LOCK(sc);
556 sc->hw_ctrl |= AMDIOMMU_CTRL_EN;
557 amdiommu_write8(sc, AMDIOMMU_CTRL, sc->hw_ctrl);
558 if (bootverbose) {
559 printf("amdiommu%d: enabled translation\n",
560 AMD2IOMMU(sc)->unit);
561 }
562 AMDIOMMU_UNLOCK(sc);
563
564 TAILQ_INSERT_TAIL(&amdiommu_units, sc, unit_next);
565 return (0);
566
567 errout8:
568 iommu_fini_busdma(&sc->iommu);
569 errout7:
570 pci_release_msi(dev);
571 errout6:
572 amdiommu_fini_event(sc);
573 errout5:
574 amdiommu_fini_cmd(sc);
575 errout4:
576 amdiommu_free_dev_tbl(sc);
577 errout3:
578 bus_release_resource(dev, SYS_RES_MEMORY, sc->mmio_rid, sc->mmio_res);
579 errout2:
580 bus_delete_resource(dev, SYS_RES_MEMORY, sc->mmio_rid);
581 errout1:
582 sysctl_ctx_free(&sc->iommu.sysctl_ctx);
583 delete_unrhdr(sc->domids);
584 mtx_destroy(&sc->iommu.lock);
585
586 return (error);
587 }
588
589 static int
amdiommu_detach(device_t dev)590 amdiommu_detach(device_t dev)
591 {
592 return (EBUSY);
593 }
594
595 static int
amdiommu_suspend(device_t dev)596 amdiommu_suspend(device_t dev)
597 {
598 /* XXXKIB */
599 return (0);
600 }
601
602 static int
amdiommu_resume(device_t dev)603 amdiommu_resume(device_t dev)
604 {
605 /* XXXKIB */
606 return (0);
607 }
608
609 static device_method_t amdiommu_methods[] = {
610 DEVMETHOD(device_probe, amdiommu_probe),
611 DEVMETHOD(device_attach, amdiommu_attach),
612 DEVMETHOD(device_detach, amdiommu_detach),
613 DEVMETHOD(device_suspend, amdiommu_suspend),
614 DEVMETHOD(device_resume, amdiommu_resume),
615 DEVMETHOD_END
616 };
617
618 static driver_t amdiommu_driver = {
619 "amdiommu",
620 amdiommu_methods,
621 sizeof(struct amdiommu_unit),
622 };
623
624 EARLY_DRIVER_MODULE(amdiommu, pci, amdiommu_driver, 0, 0, BUS_PASS_SUPPORTDEV);
625 MODULE_DEPEND(amdiommu, pci, 1, 1, 1);
626
627 static struct amdiommu_unit *
amdiommu_unit_by_device_id(u_int pci_seg,u_int device_id)628 amdiommu_unit_by_device_id(u_int pci_seg, u_int device_id)
629 {
630 struct amdiommu_unit *unit;
631
632 TAILQ_FOREACH(unit, &amdiommu_units, unit_next) {
633 if (unit->unit_dom == pci_seg && unit->device_id == device_id)
634 return (unit);
635 }
636 return (NULL);
637 }
638
639 struct ivhd_find_unit {
640 u_int domain;
641 uintptr_t rid;
642 int devno;
643 enum {
644 IFU_DEV_PCI,
645 IFU_DEV_IOAPIC,
646 IFU_DEV_HPET,
647 } type;
648 u_int device_id;
649 uint16_t rid_real;
650 uint8_t dte;
651 uint32_t edte;
652 };
653
654 static bool
amdiommu_find_unit_scan_ivrs(ACPI_IVRS_DE_HEADER * d,size_t tlen,struct ivhd_find_unit * ifu)655 amdiommu_find_unit_scan_ivrs(ACPI_IVRS_DE_HEADER *d, size_t tlen,
656 struct ivhd_find_unit *ifu)
657 {
658 char *db, *de;
659 size_t len;
660
661 for (de = (char *)d + tlen; (char *)d < de;
662 d = (ACPI_IVRS_DE_HEADER *)(db + len)) {
663 db = (char *)d;
664 if (d->Type == ACPI_IVRS_TYPE_PAD4) {
665 len = sizeof(ACPI_IVRS_DEVICE4);
666 } else if (d->Type == ACPI_IVRS_TYPE_ALL) {
667 ACPI_IVRS_DEVICE4 *d4;
668
669 d4 = (ACPI_IVRS_DEVICE4 *)db;
670 len = sizeof(*d4);
671 ifu->dte = d4->Header.DataSetting;
672 } else if (d->Type == ACPI_IVRS_TYPE_SELECT) {
673 ACPI_IVRS_DEVICE4 *d4;
674
675 d4 = (ACPI_IVRS_DEVICE4 *)db;
676 if (d4->Header.Id == ifu->rid) {
677 ifu->dte = d4->Header.DataSetting;
678 ifu->rid_real = ifu->rid;
679 return (true);
680 }
681 len = sizeof(*d4);
682 } else if (d->Type == ACPI_IVRS_TYPE_START) {
683 ACPI_IVRS_DEVICE4 *d4, *d4n;
684
685 d4 = (ACPI_IVRS_DEVICE4 *)db;
686 d4n = d4 + 1;
687 if (d4n->Header.Type != ACPI_IVRS_TYPE_END) {
688 printf("IVRS dev4 start not followed by END "
689 "(%#x)\n", d4n->Header.Type);
690 return (false);
691 }
692 if (d4->Header.Id <= ifu->rid &&
693 ifu->rid <= d4n->Header.Id) {
694 ifu->dte = d4->Header.DataSetting;
695 ifu->rid_real = ifu->rid;
696 return (true);
697 }
698 len = 2 * sizeof(*d4);
699 } else if (d->Type == ACPI_IVRS_TYPE_PAD8) {
700 len = sizeof(ACPI_IVRS_DEVICE8A);
701 } else if (d->Type == ACPI_IVRS_TYPE_ALIAS_SELECT) {
702 ACPI_IVRS_DEVICE8A *d8a;
703
704 d8a = (ACPI_IVRS_DEVICE8A *)db;
705 if (d8a->Header.Id == ifu->rid) {
706 ifu->dte = d8a->Header.DataSetting;
707 ifu->rid_real = d8a->UsedId;
708 return (true);
709 }
710 len = sizeof(*d8a);
711 } else if (d->Type == ACPI_IVRS_TYPE_ALIAS_START) {
712 ACPI_IVRS_DEVICE8A *d8a;
713 ACPI_IVRS_DEVICE4 *d4;
714
715 d8a = (ACPI_IVRS_DEVICE8A *)db;
716 d4 = (ACPI_IVRS_DEVICE4 *)(d8a + 1);
717 if (d4->Header.Type != ACPI_IVRS_TYPE_END) {
718 printf("IVRS alias start not followed by END "
719 "(%#x)\n", d4->Header.Type);
720 return (false);
721 }
722 if (d8a->Header.Id <= ifu->rid &&
723 ifu->rid <= d4->Header.Id) {
724 ifu->dte = d8a->Header.DataSetting;
725 ifu->rid_real = d8a->UsedId;
726 return (true);
727 }
728 len = sizeof(*d8a) + sizeof(*d4);
729 } else if (d->Type == ACPI_IVRS_TYPE_EXT_SELECT) {
730 ACPI_IVRS_DEVICE8B *d8b;
731
732 d8b = (ACPI_IVRS_DEVICE8B *)db;
733 if (d8b->Header.Id == ifu->rid) {
734 ifu->dte = d8b->Header.DataSetting;
735 ifu->rid_real = ifu->rid;
736 ifu->edte = d8b->ExtendedData;
737 return (true);
738 }
739 len = sizeof(*d8b);
740 } else if (d->Type == ACPI_IVRS_TYPE_EXT_START) {
741 ACPI_IVRS_DEVICE8B *d8b;
742 ACPI_IVRS_DEVICE4 *d4;
743
744 d8b = (ACPI_IVRS_DEVICE8B *)db;
745 d4 = (ACPI_IVRS_DEVICE4 *)(db + sizeof(*d8b));
746 if (d4->Header.Type != ACPI_IVRS_TYPE_END) {
747 printf("IVRS ext start not followed by END "
748 "(%#x)\n", d4->Header.Type);
749 return (false);
750 }
751 if (d8b->Header.Id >= ifu->rid &&
752 ifu->rid <= d4->Header.Id) {
753 ifu->dte = d8b->Header.DataSetting;
754 ifu->rid_real = ifu->rid;
755 ifu->edte = d8b->ExtendedData;
756 return (true);
757 }
758 len = sizeof(*d8b) + sizeof(*d4);
759 } else if (d->Type == ACPI_IVRS_TYPE_SPECIAL) {
760 ACPI_IVRS_DEVICE8C *d8c;
761
762 d8c = (ACPI_IVRS_DEVICE8C *)db;
763 if (((ifu->type == IFU_DEV_IOAPIC &&
764 d8c->Variety == ACPI_IVHD_IOAPIC) ||
765 (ifu->type == IFU_DEV_HPET &&
766 d8c->Variety == ACPI_IVHD_HPET)) &&
767 ifu->devno == d8c->Handle) {
768 ifu->dte = d8c->Header.DataSetting;
769 ifu->rid_real = d8c->UsedId;
770 return (true);
771 }
772 len = sizeof(*d8c);
773 } else if (d->Type == ACPI_IVRS_TYPE_HID) {
774 ACPI_IVRS_DEVICE_HID *dh;
775
776 dh = (ACPI_IVRS_DEVICE_HID *)db;
777 len = sizeof(*dh) + dh->UidLength;
778 /* XXXKIB */
779 } else {
780 #if 0
781 printf("amdiommu: unknown IVRS device entry type %#x\n",
782 d->Type);
783 #endif
784 if (d->Type <= 63)
785 len = sizeof(ACPI_IVRS_DEVICE4);
786 else if (d->Type <= 127)
787 len = sizeof(ACPI_IVRS_DEVICE8A);
788 else {
789 printf("amdiommu: abort, cannot "
790 "advance iterator, item type %#x\n",
791 d->Type);
792 return (false);
793 }
794 }
795 }
796 return (false);
797 }
798
799 static bool
amdiommu_find_unit_scan_0x11(ACPI_IVRS_HARDWARE2 * ivrs,void * arg)800 amdiommu_find_unit_scan_0x11(ACPI_IVRS_HARDWARE2 *ivrs, void *arg)
801 {
802 struct ivhd_find_unit *ifu = arg;
803 ACPI_IVRS_DE_HEADER *d;
804 bool res;
805
806 KASSERT(ivrs->Header.Type == ACPI_IVRS_TYPE_HARDWARE2 ||
807 ivrs->Header.Type == ACPI_IVRS_TYPE_HARDWARE3,
808 ("Misparsed IVHD h2, ivrs type %#x", ivrs->Header.Type));
809
810 if (ifu->domain != ivrs->PciSegmentGroup)
811 return (false);
812 d = (ACPI_IVRS_DE_HEADER *)(ivrs + 1);
813 res = amdiommu_find_unit_scan_ivrs(d, ivrs->Header.Length, ifu);
814 if (res)
815 ifu->device_id = ivrs->Header.DeviceId;
816 return (res);
817 }
818
819 static bool
amdiommu_find_unit_scan_0x10(ACPI_IVRS_HARDWARE1 * ivrs,void * arg)820 amdiommu_find_unit_scan_0x10(ACPI_IVRS_HARDWARE1 *ivrs, void *arg)
821 {
822 struct ivhd_find_unit *ifu = arg;
823 ACPI_IVRS_DE_HEADER *d;
824 bool res;
825
826 KASSERT(ivrs->Header.Type == ACPI_IVRS_TYPE_HARDWARE1,
827 ("Misparsed IVHD h1, ivrs type %#x", ivrs->Header.Type));
828
829 if (ifu->domain != ivrs->PciSegmentGroup)
830 return (false);
831 d = (ACPI_IVRS_DE_HEADER *)(ivrs + 1);
832 res = amdiommu_find_unit_scan_ivrs(d, ivrs->Header.Length, ifu);
833 if (res)
834 ifu->device_id = ivrs->Header.DeviceId;
835 return (res);
836 }
837
838 static void
amdiommu_dev_prop_dtr(device_t dev,const char * name,void * val,void * dtr_ctx)839 amdiommu_dev_prop_dtr(device_t dev, const char *name, void *val, void *dtr_ctx)
840 {
841 free(val, M_DEVBUF);
842 }
843
844 static int *
amdiommu_dev_fetch_flagsp(struct amdiommu_unit * unit,device_t dev)845 amdiommu_dev_fetch_flagsp(struct amdiommu_unit *unit, device_t dev)
846 {
847 int *flagsp, error;
848
849 bus_topo_assert();
850 error = device_get_prop(dev, device_get_nameunit(unit->iommu.dev),
851 (void **)&flagsp);
852 if (error == ENOENT) {
853 flagsp = malloc(sizeof(int), M_DEVBUF, M_WAITOK | M_ZERO);
854 device_set_prop(dev, device_get_nameunit(unit->iommu.dev),
855 flagsp, amdiommu_dev_prop_dtr, unit);
856 }
857 return (flagsp);
858 }
859
860 static int
amdiommu_get_dev_prop_flags(struct amdiommu_unit * unit,device_t dev)861 amdiommu_get_dev_prop_flags(struct amdiommu_unit *unit, device_t dev)
862 {
863 int *flagsp, flags;
864
865 bus_topo_lock();
866 flagsp = amdiommu_dev_fetch_flagsp(unit, dev);
867 flags = *flagsp;
868 bus_topo_unlock();
869 return (flags);
870 }
871
872 static void
amdiommu_set_dev_prop_flags(struct amdiommu_unit * unit,device_t dev,int flag)873 amdiommu_set_dev_prop_flags(struct amdiommu_unit *unit, device_t dev,
874 int flag)
875 {
876 int *flagsp;
877
878 bus_topo_lock();
879 flagsp = amdiommu_dev_fetch_flagsp(unit, dev);
880 *flagsp |= flag;
881 bus_topo_unlock();
882 }
883
884 int
amdiommu_find_unit(device_t dev,struct amdiommu_unit ** unitp,uint16_t * ridp,uint8_t * dtep,uint32_t * edtep,bool verbose)885 amdiommu_find_unit(device_t dev, struct amdiommu_unit **unitp, uint16_t *ridp,
886 uint8_t *dtep, uint32_t *edtep, bool verbose)
887 {
888 struct ivhd_find_unit ifu;
889 struct amdiommu_unit *unit;
890 int error, flags;
891 bool res;
892
893 if (!amdiommu_enable)
894 return (ENXIO);
895
896 if (device_get_devclass(device_get_parent(dev)) !=
897 devclass_find("pci"))
898 return (ENXIO);
899
900 bzero(&ifu, sizeof(ifu));
901 ifu.type = IFU_DEV_PCI;
902
903 error = pci_get_id(dev, PCI_ID_RID, &ifu.rid);
904 if (error != 0) {
905 if (verbose)
906 device_printf(dev,
907 "amdiommu cannot get rid, error %d\n", error);
908 return (ENXIO);
909 }
910
911 ifu.domain = pci_get_domain(dev);
912 res = amdiommu_ivrs_iterate_tbl(amdiommu_find_unit_scan_0x11,
913 amdiommu_find_unit_scan_0x11, amdiommu_find_unit_scan_0x10, &ifu);
914 if (!res) {
915 if (verbose)
916 device_printf(dev,
917 "(%#06x:%#06x) amdiommu cannot match rid in IVHD\n",
918 ifu.domain, (unsigned)ifu.rid);
919 return (ENXIO);
920 }
921
922 unit = amdiommu_unit_by_device_id(ifu.domain, ifu.device_id);
923 if (unit == NULL) {
924 if (verbose)
925 device_printf(dev,
926 "(%#06x:%#06x) amdiommu cannot find unit\n",
927 ifu.domain, (unsigned)ifu.rid);
928 return (ENXIO);
929 }
930 *unitp = unit;
931 iommu_device_set_iommu_prop(dev, unit->iommu.dev);
932 if (ridp != NULL)
933 *ridp = ifu.rid_real;
934 if (dtep != NULL)
935 *dtep = ifu.dte;
936 if (edtep != NULL)
937 *edtep = ifu.edte;
938 if (verbose) {
939 flags = amdiommu_get_dev_prop_flags(unit, dev);
940 if ((flags & AMDIOMMU_DEV_REPORTED) == 0) {
941 amdiommu_set_dev_prop_flags(unit, dev,
942 AMDIOMMU_DEV_REPORTED);
943 device_printf(dev, "amdiommu%d "
944 "initiator rid %#06x dte %#x edte %#x\n",
945 unit->iommu.unit, ifu.rid_real, ifu.dte, ifu.edte);
946 }
947 }
948 return (0);
949 }
950
951 int
amdiommu_find_unit_for_ioapic(int apic_id,struct amdiommu_unit ** unitp,uint16_t * ridp,uint8_t * dtep,uint32_t * edtep,bool verbose)952 amdiommu_find_unit_for_ioapic(int apic_id, struct amdiommu_unit **unitp,
953 uint16_t *ridp, uint8_t *dtep, uint32_t *edtep, bool verbose)
954 {
955 struct ivhd_find_unit ifu;
956 struct amdiommu_unit *unit;
957 device_t apic_dev;
958 bool res;
959
960 if (!amdiommu_enable)
961 return (ENXIO);
962
963 bzero(&ifu, sizeof(ifu));
964 ifu.type = IFU_DEV_IOAPIC;
965 ifu.devno = apic_id;
966 ifu.rid = -1;
967
968 res = amdiommu_ivrs_iterate_tbl(amdiommu_find_unit_scan_0x11,
969 amdiommu_find_unit_scan_0x11, amdiommu_find_unit_scan_0x10, &ifu);
970 if (!res) {
971 if (verbose)
972 printf("amdiommu cannot match ioapic no %d in IVHD\n",
973 apic_id);
974 return (ENXIO);
975 }
976
977 unit = amdiommu_unit_by_device_id(0, ifu.device_id);
978 apic_dev = ioapic_get_dev(apic_id);
979 if (apic_dev != NULL)
980 iommu_device_set_iommu_prop(apic_dev, unit->iommu.dev);
981 if (unit == NULL) {
982 if (verbose)
983 printf("amdiommu cannot find unit by dev id %#x\n",
984 ifu.device_id);
985 return (ENXIO);
986 }
987 *unitp = unit;
988 if (ridp != NULL)
989 *ridp = ifu.rid_real;
990 if (dtep != NULL)
991 *dtep = ifu.dte;
992 if (edtep != NULL)
993 *edtep = ifu.edte;
994 if (verbose) {
995 printf("amdiommu%d IOAPIC %d "
996 "initiator rid %#06x dte %#x edte %#x\n",
997 unit->iommu.unit, apic_id, ifu.rid_real, ifu.dte,
998 ifu.edte);
999 }
1000 return (0);
1001 }
1002
1003 int
amdiommu_find_unit_for_hpet(device_t hpet,struct amdiommu_unit ** unitp,uint16_t * ridp,uint8_t * dtep,uint32_t * edtep,bool verbose)1004 amdiommu_find_unit_for_hpet(device_t hpet, struct amdiommu_unit **unitp,
1005 uint16_t *ridp, uint8_t *dtep, uint32_t *edtep, bool verbose)
1006 {
1007 struct ivhd_find_unit ifu;
1008 struct amdiommu_unit *unit;
1009 int hpet_no;
1010 bool res;
1011
1012 if (!amdiommu_enable)
1013 return (ENXIO);
1014
1015 hpet_no = hpet_get_uid(hpet);
1016 bzero(&ifu, sizeof(ifu));
1017 ifu.type = IFU_DEV_HPET;
1018 ifu.devno = hpet_no;
1019 ifu.rid = -1;
1020
1021 res = amdiommu_ivrs_iterate_tbl(amdiommu_find_unit_scan_0x11,
1022 amdiommu_find_unit_scan_0x11, amdiommu_find_unit_scan_0x10, &ifu);
1023 if (!res) {
1024 if (verbose)
1025 printf("amdiommu cannot match hpet no %d in IVHD\n",
1026 hpet_no);
1027 return (ENXIO);
1028 }
1029
1030 unit = amdiommu_unit_by_device_id(0, ifu.device_id);
1031 if (unit == NULL) {
1032 if (verbose)
1033 printf("amdiommu cannot find unit id %d\n",
1034 hpet_no);
1035 return (ENXIO);
1036 }
1037 *unitp = unit;
1038 iommu_device_set_iommu_prop(hpet, unit->iommu.dev);
1039 if (ridp != NULL)
1040 *ridp = ifu.rid_real;
1041 if (dtep != NULL)
1042 *dtep = ifu.dte;
1043 if (edtep != NULL)
1044 *edtep = ifu.edte;
1045 if (verbose) {
1046 printf("amdiommu%d HPET no %d "
1047 "initiator rid %#06x dte %#x edte %#x\n",
1048 unit->iommu.unit, hpet_no, ifu.rid_real, ifu.dte,
1049 ifu.edte);
1050 }
1051 return (0);
1052 }
1053
1054 static struct iommu_unit *
amdiommu_find_method(device_t dev,bool verbose)1055 amdiommu_find_method(device_t dev, bool verbose)
1056 {
1057 struct amdiommu_unit *unit;
1058 int error;
1059 uint32_t edte;
1060 uint16_t rid;
1061 uint8_t dte;
1062
1063 error = amdiommu_find_unit(dev, &unit, &rid, &dte, &edte, verbose);
1064 if (error != 0) {
1065 if (verbose && amdiommu_enable)
1066 device_printf(dev,
1067 "cannot find amdiommu unit, error %d\n",
1068 error);
1069 return (NULL);
1070 }
1071 return (&unit->iommu);
1072 }
1073
1074 static struct x86_unit_common *
amdiommu_get_x86_common(struct iommu_unit * unit)1075 amdiommu_get_x86_common(struct iommu_unit *unit)
1076 {
1077 struct amdiommu_unit *iommu;
1078
1079 iommu = IOMMU2AMD(unit);
1080 return (&iommu->x86c);
1081 }
1082
1083 static void
amdiommu_unit_pre_instantiate_ctx(struct iommu_unit * unit)1084 amdiommu_unit_pre_instantiate_ctx(struct iommu_unit *unit)
1085 {
1086 }
1087
1088 static struct x86_iommu amd_x86_iommu = {
1089 .get_x86_common = amdiommu_get_x86_common,
1090 .unit_pre_instantiate_ctx = amdiommu_unit_pre_instantiate_ctx,
1091 .find = amdiommu_find_method,
1092 .domain_unload_entry = amdiommu_domain_unload_entry,
1093 .domain_unload = amdiommu_domain_unload,
1094 .get_ctx = amdiommu_get_ctx,
1095 .free_ctx_locked = amdiommu_free_ctx_locked_method,
1096 .alloc_msi_intr = amdiommu_alloc_msi_intr,
1097 .map_msi_intr = amdiommu_map_msi_intr,
1098 .unmap_msi_intr = amdiommu_unmap_msi_intr,
1099 .map_ioapic_intr = amdiommu_map_ioapic_intr,
1100 .unmap_ioapic_intr = amdiommu_unmap_ioapic_intr,
1101 };
1102
1103 static void
x86_iommu_set_amd(void * arg __unused)1104 x86_iommu_set_amd(void *arg __unused)
1105 {
1106 if (cpu_vendor_id == CPU_VENDOR_AMD)
1107 set_x86_iommu(&amd_x86_iommu);
1108 }
1109
1110 SYSINIT(x86_iommu, SI_SUB_TUNABLES, SI_ORDER_ANY, x86_iommu_set_amd, NULL);
1111
1112 #ifdef DDB
1113 #include <ddb/ddb.h>
1114 #include <ddb/db_lex.h>
1115
1116 static void
amdiommu_print_domain(struct amdiommu_domain * domain,bool show_mappings)1117 amdiommu_print_domain(struct amdiommu_domain *domain, bool show_mappings)
1118 {
1119 struct iommu_domain *iodom;
1120
1121 iodom = DOM2IODOM(domain);
1122
1123 db_printf(
1124 " @%p dom %d pglvl %d end %jx refs %d\n"
1125 " ctx_cnt %d flags %x pgobj %p map_ents %u\n",
1126 domain, domain->domain, domain->pglvl,
1127 (uintmax_t)domain->iodom.end, domain->refs, domain->ctx_cnt,
1128 domain->iodom.flags, domain->pgtbl_obj, domain->iodom.entries_cnt);
1129
1130 iommu_db_domain_print_contexts(iodom);
1131
1132 if (show_mappings)
1133 iommu_db_domain_print_mappings(iodom);
1134 }
1135
1136 static void
amdiommu_print_one(struct amdiommu_unit * unit,bool show_domains,bool show_mappings,bool show_cmdq)1137 amdiommu_print_one(struct amdiommu_unit *unit, bool show_domains,
1138 bool show_mappings, bool show_cmdq)
1139 {
1140 struct amdiommu_domain *domain;
1141 struct amdiommu_cmd_generic *cp;
1142 u_int cmd_head, cmd_tail, ci;
1143
1144 cmd_head = amdiommu_read4(unit, AMDIOMMU_CMDBUF_HEAD);
1145 cmd_tail = amdiommu_read4(unit, AMDIOMMU_CMDBUF_TAIL);
1146 db_printf("amdiommu%d at %p, mmio at %#jx/sz %#jx\n",
1147 unit->iommu.unit, unit, (uintmax_t)unit->mmio_base,
1148 (uintmax_t)unit->mmio_sz);
1149 db_printf(" hw ctrl %#018jx cmdevst %#018jx\n",
1150 (uintmax_t)amdiommu_read8(unit, AMDIOMMU_CTRL),
1151 (uintmax_t)amdiommu_read8(unit, AMDIOMMU_CMDEV_STATUS));
1152 db_printf(" devtbl at %p\n", unit->dev_tbl);
1153 db_printf(" hwseq at %p phys %#jx val %#jx\n",
1154 &unit->x86c.inv_waitd_seq_hw,
1155 pmap_kextract((vm_offset_t)&unit->x86c.inv_waitd_seq_hw),
1156 unit->x86c.inv_waitd_seq_hw);
1157 db_printf(" invq at %p base %#jx hw head/tail %#x/%#x\n",
1158 unit->x86c.inv_queue,
1159 (uintmax_t)amdiommu_read8(unit, AMDIOMMU_CMDBUF_BASE),
1160 cmd_head, cmd_tail);
1161
1162 if (show_cmdq) {
1163 db_printf(" cmd q:\n");
1164 for (ci = cmd_head; ci != cmd_tail;) {
1165 cp = (struct amdiommu_cmd_generic *)(unit->
1166 x86c.inv_queue + ci);
1167 db_printf(
1168 " idx %#x op %#x %#010x %#010x %#010x %#010x\n",
1169 ci >> AMDIOMMU_CMD_SZ_SHIFT, cp->op,
1170 cp->w0, cp->ww1, cp->w2, cp->w3);
1171
1172 ci += AMDIOMMU_CMD_SZ;
1173 if (ci == unit->x86c.inv_queue_size)
1174 ci = 0;
1175 }
1176 }
1177
1178 if (show_domains) {
1179 db_printf(" domains:\n");
1180 LIST_FOREACH(domain, &unit->domains, link) {
1181 amdiommu_print_domain(domain, show_mappings);
1182 if (db_pager_quit)
1183 break;
1184 }
1185 }
1186 }
1187
DB_SHOW_COMMAND(amdiommu,db_amdiommu_print)1188 DB_SHOW_COMMAND(amdiommu, db_amdiommu_print)
1189 {
1190 struct amdiommu_unit *unit;
1191 bool show_domains, show_mappings, show_cmdq;
1192
1193 show_domains = strchr(modif, 'd') != NULL;
1194 show_mappings = strchr(modif, 'm') != NULL;
1195 show_cmdq = strchr(modif, 'q') != NULL;
1196 if (!have_addr) {
1197 db_printf("usage: show amdiommu [/d] [/m] [/q] index\n");
1198 return;
1199 }
1200 if ((vm_offset_t)addr < 0x10000)
1201 unit = amdiommu_unit_by_device_id(0, (u_int)addr);
1202 else
1203 unit = (struct amdiommu_unit *)addr;
1204 amdiommu_print_one(unit, show_domains, show_mappings, show_cmdq);
1205 }
1206
DB_SHOW_ALL_COMMAND(amdiommus,db_show_all_amdiommus)1207 DB_SHOW_ALL_COMMAND(amdiommus, db_show_all_amdiommus)
1208 {
1209 struct amdiommu_unit *unit;
1210 bool show_domains, show_mappings, show_cmdq;
1211
1212 show_domains = strchr(modif, 'd') != NULL;
1213 show_mappings = strchr(modif, 'm') != NULL;
1214 show_cmdq = strchr(modif, 'q') != NULL;
1215
1216 TAILQ_FOREACH(unit, &amdiommu_units, unit_next) {
1217 amdiommu_print_one(unit, show_domains, show_mappings,
1218 show_cmdq);
1219 if (db_pager_quit)
1220 break;
1221 }
1222 }
1223 #endif
1224