1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2024 The FreeBSD Foundation
5 *
6 * This software was developed by Konstantin Belousov <kib@FreeBSD.org>
7 * under sponsorship from the FreeBSD Foundation.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30
31 #include "opt_acpi.h"
32 #include "opt_ddb.h"
33
34 #include <sys/param.h>
35 #include <sys/bus.h>
36 #include <sys/domainset.h>
37 #include <sys/kernel.h>
38 #include <sys/lock.h>
39 #include <sys/malloc.h>
40 #include <sys/memdesc.h>
41 #include <sys/module.h>
42 #include <sys/mutex.h>
43 #include <sys/rman.h>
44 #include <sys/rwlock.h>
45 #include <sys/smp.h>
46 #include <sys/taskqueue.h>
47 #include <sys/tree.h>
48 #include <sys/vmem.h>
49 #include <vm/vm.h>
50 #include <vm/vm_extern.h>
51 #include <vm/vm_object.h>
52 #include <vm/vm_page.h>
53 #include <vm/vm_pageout.h>
54 #include <vm/vm_pager.h>
55 #include <contrib/dev/acpica/include/acpi.h>
56 #include <contrib/dev/acpica/include/accommon.h>
57 #include <dev/acpica/acpivar.h>
58 #include <dev/pci/pcireg.h>
59 #include <dev/pci/pcivar.h>
60 #include <machine/bus.h>
61 #include <machine/pci_cfgreg.h>
62 #include "pcib_if.h"
63 #include <machine/intr_machdep.h>
64 #include <machine/md_var.h>
65 #include <machine/cputypes.h>
66 #include <x86/apicreg.h>
67 #include <x86/apicvar.h>
68 #include <dev/iommu/iommu.h>
69 #include <x86/iommu/amd_reg.h>
70 #include <x86/iommu/x86_iommu.h>
71 #include <x86/iommu/amd_iommu.h>
72
73 static int amdiommu_enable = 0;
74 static bool amdiommu_running = false;
75
76 /*
77 * All enumerated AMD IOMMU units.
78 * Access is unlocked, the list is not modified after early
79 * single-threaded startup.
80 */
81 static TAILQ_HEAD(, amdiommu_unit) amdiommu_units =
82 TAILQ_HEAD_INITIALIZER(amdiommu_units);
83
84 typedef bool (*amdiommu_itercc_t)(void *, void *);
85 typedef bool (*amdiommu_iter40_t)(ACPI_IVRS_HARDWARE2 *, void *);
86 typedef bool (*amdiommu_iter11_t)(ACPI_IVRS_HARDWARE2 *, void *);
87 typedef bool (*amdiommu_iter10_t)(ACPI_IVRS_HARDWARE1 *, void *);
88
89 static bool
amdiommu_ivrs_iterate_tbl_typed(amdiommu_itercc_t iter,void * arg,int type,ACPI_TABLE_IVRS * ivrs_tbl)90 amdiommu_ivrs_iterate_tbl_typed(amdiommu_itercc_t iter, void *arg,
91 int type, ACPI_TABLE_IVRS *ivrs_tbl)
92 {
93 char *ptr, *ptrend;
94 bool done;
95
96 done = false;
97 ptr = (char *)ivrs_tbl + sizeof(*ivrs_tbl);
98 ptrend = (char *)ivrs_tbl + ivrs_tbl->Header.Length;
99 for (;;) {
100 ACPI_IVRS_HEADER *ivrsh;
101
102 if (ptr >= ptrend)
103 break;
104 ivrsh = (ACPI_IVRS_HEADER *)ptr;
105 if (ivrsh->Length <= 0) {
106 printf("amdiommu_iterate_tbl: corrupted IVRS table, "
107 "length %d\n", ivrsh->Length);
108 break;
109 }
110 ptr += ivrsh->Length;
111 if (ivrsh->Type == type) {
112 done = iter((void *)ivrsh, arg);
113 if (done)
114 break;
115 }
116 }
117 return (done);
118 }
119
120 /*
121 * Walk over IVRS, calling callback iterators following priority:
122 * 0x40, then 0x11, then 0x10 subtable. First iterator returning true
123 * ends the walk.
124 * Returns true if any iterator returned true, otherwise false.
125 */
126 static bool
amdiommu_ivrs_iterate_tbl(amdiommu_iter40_t iter40,amdiommu_iter11_t iter11,amdiommu_iter10_t iter10,void * arg)127 amdiommu_ivrs_iterate_tbl(amdiommu_iter40_t iter40, amdiommu_iter11_t iter11,
128 amdiommu_iter10_t iter10, void *arg)
129 {
130 ACPI_TABLE_IVRS *ivrs_tbl;
131 ACPI_STATUS status;
132 bool done;
133
134 status = AcpiGetTable(ACPI_SIG_IVRS, 1,
135 (ACPI_TABLE_HEADER **)&ivrs_tbl);
136 if (ACPI_FAILURE(status))
137 return (false);
138 done = false;
139 if (iter40 != NULL)
140 done = amdiommu_ivrs_iterate_tbl_typed(
141 (amdiommu_itercc_t)iter40, arg,
142 ACPI_IVRS_TYPE_HARDWARE3, ivrs_tbl);
143 if (!done && iter11 != NULL)
144 done = amdiommu_ivrs_iterate_tbl_typed(
145 (amdiommu_itercc_t)iter11, arg, ACPI_IVRS_TYPE_HARDWARE2,
146 ivrs_tbl);
147 if (!done && iter10 != NULL)
148 done = amdiommu_ivrs_iterate_tbl_typed(
149 (amdiommu_itercc_t)iter10, arg, ACPI_IVRS_TYPE_HARDWARE1,
150 ivrs_tbl);
151 AcpiPutTable((ACPI_TABLE_HEADER *)ivrs_tbl);
152 return (done);
153 }
154
155 struct ivhd_lookup_data {
156 struct amdiommu_unit *sc;
157 uint16_t devid;
158 };
159
160 static bool
ivrs_lookup_ivhd_0x40(ACPI_IVRS_HARDWARE2 * h2,void * arg)161 ivrs_lookup_ivhd_0x40(ACPI_IVRS_HARDWARE2 *h2, void *arg)
162 {
163 struct ivhd_lookup_data *ildp;
164
165 KASSERT(h2->Header.Type == ACPI_IVRS_TYPE_HARDWARE2 ||
166 h2->Header.Type == ACPI_IVRS_TYPE_HARDWARE3,
167 ("Misparsed IVHD, h2 type %#x", h2->Header.Type));
168
169 ildp = arg;
170 if (h2->Header.DeviceId != ildp->devid)
171 return (false);
172
173 ildp->sc->unit_dom = h2->PciSegmentGroup;
174 ildp->sc->efr = h2->EfrRegisterImage;
175 return (true);
176 }
177
178 static bool
ivrs_lookup_ivhd_0x10(ACPI_IVRS_HARDWARE1 * h1,void * arg)179 ivrs_lookup_ivhd_0x10(ACPI_IVRS_HARDWARE1 *h1, void *arg)
180 {
181 struct ivhd_lookup_data *ildp;
182
183 KASSERT(h1->Header.Type == ACPI_IVRS_TYPE_HARDWARE1,
184 ("Misparsed IVHD, h1 type %#x", h1->Header.Type));
185
186 ildp = arg;
187 if (h1->Header.DeviceId != ildp->devid)
188 return (false);
189
190 ildp->sc->unit_dom = h1->PciSegmentGroup;
191 return (true);
192 }
193
194 static u_int
amdiommu_devtbl_sz(struct amdiommu_unit * sc __unused)195 amdiommu_devtbl_sz(struct amdiommu_unit *sc __unused)
196 {
197 return (sizeof(struct amdiommu_dte) * (1 << 16));
198 }
199
200 static void
amdiommu_free_dev_tbl(struct amdiommu_unit * sc)201 amdiommu_free_dev_tbl(struct amdiommu_unit *sc)
202 {
203 u_int devtbl_sz;
204
205 devtbl_sz = amdiommu_devtbl_sz(sc);
206 pmap_qremove((vm_offset_t)sc->dev_tbl, atop(devtbl_sz));
207 kva_free((vm_offset_t)sc->dev_tbl, devtbl_sz);
208 sc->dev_tbl = NULL;
209 vm_object_deallocate(sc->devtbl_obj);
210 sc->devtbl_obj = NULL;
211 }
212
213 static int
amdiommu_create_dev_tbl(struct amdiommu_unit * sc)214 amdiommu_create_dev_tbl(struct amdiommu_unit *sc)
215 {
216 vm_offset_t seg_vaddr;
217 u_int devtbl_sz, dom, i, reclaimno, segnum_log, segnum, seg_sz;
218 int error;
219
220 static const int devtab_base_regs[] = {
221 AMDIOMMU_DEVTAB_BASE,
222 AMDIOMMU_DEVTAB_S1_BASE,
223 AMDIOMMU_DEVTAB_S2_BASE,
224 AMDIOMMU_DEVTAB_S3_BASE,
225 AMDIOMMU_DEVTAB_S4_BASE,
226 AMDIOMMU_DEVTAB_S5_BASE,
227 AMDIOMMU_DEVTAB_S6_BASE,
228 AMDIOMMU_DEVTAB_S7_BASE
229 };
230
231 segnum_log = (sc->efr & AMDIOMMU_EFR_DEVTBLSEG_MASK) >>
232 AMDIOMMU_EFR_DEVTBLSEG_SHIFT;
233 segnum = 1 << segnum_log;
234
235 KASSERT(segnum <= nitems(devtab_base_regs),
236 ("%s: unsupported devtab segment count %u", __func__, segnum));
237
238 devtbl_sz = amdiommu_devtbl_sz(sc);
239 seg_sz = devtbl_sz / segnum;
240 sc->devtbl_obj = vm_pager_allocate(OBJT_PHYS, NULL, atop(devtbl_sz),
241 VM_PROT_ALL, 0, NULL);
242 if (bus_get_domain(sc->iommu.dev, &dom) == 0)
243 sc->devtbl_obj->domain.dr_policy = DOMAINSET_PREF(dom);
244
245 sc->hw_ctrl &= ~AMDIOMMU_CTRL_DEVTABSEG_MASK;
246 sc->hw_ctrl |= (uint64_t)segnum_log << ilog2(AMDIOMMU_CTRL_DEVTABSEG_2);
247 sc->hw_ctrl |= AMDIOMMU_CTRL_COHERENT;
248 amdiommu_write8(sc, AMDIOMMU_CTRL, sc->hw_ctrl);
249
250 seg_vaddr = kva_alloc(devtbl_sz);
251 if (seg_vaddr == 0)
252 return (ENOMEM);
253 sc->dev_tbl = (void *)seg_vaddr;
254
255 for (i = 0; i < segnum; i++) {
256 vm_page_t m;
257 uint64_t rval;
258
259 for (reclaimno = 0; reclaimno < 3; reclaimno++) {
260 VM_OBJECT_WLOCK(sc->devtbl_obj);
261 m = vm_page_alloc_contig(sc->devtbl_obj,
262 i * atop(seg_sz),
263 VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY,
264 atop(seg_sz), 0, ~0ul, IOMMU_PAGE_SIZE, 0,
265 VM_MEMATTR_DEFAULT);
266 VM_OBJECT_WUNLOCK(sc->devtbl_obj);
267 if (m != NULL)
268 break;
269 error = vm_page_reclaim_contig(VM_ALLOC_NORMAL,
270 atop(seg_sz), 0, ~0ul, IOMMU_PAGE_SIZE, 0);
271 if (error != 0)
272 vm_wait(sc->devtbl_obj);
273 }
274 if (m == NULL) {
275 amdiommu_free_dev_tbl(sc);
276 return (ENOMEM);
277 }
278
279 rval = VM_PAGE_TO_PHYS(m) | (atop(seg_sz) - 1);
280 for (u_int j = 0; j < atop(seg_sz);
281 j++, seg_vaddr += PAGE_SIZE, m++) {
282 pmap_zero_page(m);
283 pmap_qenter(seg_vaddr, &m, 1);
284 }
285 amdiommu_write8(sc, devtab_base_regs[i], rval);
286 }
287
288 return (0);
289 }
290
291 static int
amdiommu_cmd_event_intr(void * arg)292 amdiommu_cmd_event_intr(void *arg)
293 {
294 struct amdiommu_unit *unit;
295 uint64_t status;
296
297 unit = arg;
298 status = amdiommu_read8(unit, AMDIOMMU_CMDEV_STATUS);
299 if ((status & AMDIOMMU_CMDEVS_COMWAITINT) != 0) {
300 amdiommu_write8(unit, AMDIOMMU_CMDEV_STATUS,
301 AMDIOMMU_CMDEVS_COMWAITINT);
302 taskqueue_enqueue(unit->x86c.qi_taskqueue,
303 &unit->x86c.qi_task);
304 }
305 if ((status & (AMDIOMMU_CMDEVS_EVLOGINT |
306 AMDIOMMU_CMDEVS_EVOVRFLW)) != 0)
307 amdiommu_event_intr(unit, status);
308 return (FILTER_HANDLED);
309 }
310
311 static int
amdiommu_setup_intr(struct amdiommu_unit * sc)312 amdiommu_setup_intr(struct amdiommu_unit *sc)
313 {
314 int error, msi_count, msix_count;
315
316 msi_count = pci_msi_count(sc->iommu.dev);
317 msix_count = pci_msix_count(sc->iommu.dev);
318 if (msi_count == 0 && msix_count == 0) {
319 device_printf(sc->iommu.dev, "needs MSI-class intr\n");
320 return (ENXIO);
321 }
322
323 #if 0
324 /*
325 * XXXKIB how MSI-X is supposed to be organized for BAR-less
326 * function? Practically available hardware implements only
327 * one IOMMU unit per function, and uses MSI.
328 */
329 if (msix_count > 0) {
330 sc->msix_table = bus_alloc_resource_any(sc->iommu.dev,
331 SYS_RES_MEMORY, &sc->msix_tab_rid, RF_ACTIVE);
332 if (sc->msix_table == NULL)
333 return (ENXIO);
334
335 if (sc->msix_pba_rid != sc->msix_tab_rid) {
336 /* Separate BAR for PBA */
337 sc->msix_pba = bus_alloc_resource_any(sc->iommu.dev,
338 SYS_RES_MEMORY,
339 &sc->msix_pba_rid, RF_ACTIVE);
340 if (sc->msix_pba == NULL) {
341 bus_release_resource(sc->iommu.dev,
342 SYS_RES_MEMORY, &sc->msix_tab_rid,
343 sc->msix_table);
344 return (ENXIO);
345 }
346 }
347 }
348 #endif
349
350 error = ENXIO;
351 if (msix_count > 0) {
352 error = pci_alloc_msix(sc->iommu.dev, &msix_count);
353 if (error == 0)
354 sc->numirqs = msix_count;
355 }
356 if (error != 0 && msi_count > 0) {
357 error = pci_alloc_msi(sc->iommu.dev, &msi_count);
358 if (error == 0)
359 sc->numirqs = msi_count;
360 }
361 if (error != 0) {
362 device_printf(sc->iommu.dev,
363 "Failed to allocate MSI/MSI-x (%d)\n", error);
364 return (ENXIO);
365 }
366
367 /*
368 * XXXKIB Spec states that MISC0.MsiNum must be zero for IOMMU
369 * using MSI interrupts. But at least one BIOS programmed '2'
370 * there, making driver use wrong rid and causing
371 * command/event interrupt ignored as stray. Try to fix it
372 * with dirty force by assuming MsiNum is zero for MSI.
373 */
374 sc->irq_cmdev_rid = 1;
375 if (msix_count > 0) {
376 sc->irq_cmdev_rid += pci_read_config(sc->iommu.dev,
377 sc->seccap_reg + PCIR_AMDIOMMU_MISC0, 4) &
378 PCIM_AMDIOMMU_MISC0_MSINUM_MASK;
379 }
380
381 sc->irq_cmdev = bus_alloc_resource_any(sc->iommu.dev, SYS_RES_IRQ,
382 &sc->irq_cmdev_rid, RF_SHAREABLE | RF_ACTIVE);
383 if (sc->irq_cmdev == NULL) {
384 device_printf(sc->iommu.dev,
385 "unable to map CMD/EV interrupt\n");
386 return (ENXIO);
387 }
388 error = bus_setup_intr(sc->iommu.dev, sc->irq_cmdev,
389 INTR_TYPE_MISC, amdiommu_cmd_event_intr, NULL, sc,
390 &sc->irq_cmdev_cookie);
391 if (error != 0) {
392 device_printf(sc->iommu.dev,
393 "unable to setup interrupt (%d)\n", error);
394 return (ENXIO);
395 }
396 bus_describe_intr(sc->iommu.dev, sc->irq_cmdev, sc->irq_cmdev_cookie,
397 "cmdev");
398
399 if (x2apic_mode) {
400 AMDIOMMU_LOCK(sc);
401 sc->hw_ctrl |= AMDIOMMU_CTRL_GA_EN | AMDIOMMU_CTRL_XT_EN;
402 amdiommu_write8(sc, AMDIOMMU_CTRL, sc->hw_ctrl);
403 // XXXKIB AMDIOMMU_CTRL_INTCAPXT_EN and program x2APIC_CTRL
404 AMDIOMMU_UNLOCK(sc);
405 }
406
407 return (0);
408 }
409
410 static int
amdiommu_probe(device_t dev)411 amdiommu_probe(device_t dev)
412 {
413 int seccap_reg;
414 int error;
415 uint32_t cap_h, cap_type, cap_rev;
416
417 if (acpi_disabled("amdiommu"))
418 return (ENXIO);
419 TUNABLE_INT_FETCH("hw.amdiommu.enable", &amdiommu_enable);
420 if (!amdiommu_enable)
421 return (ENXIO);
422 if (pci_get_class(dev) != PCIC_BASEPERIPH ||
423 pci_get_subclass(dev) != PCIS_BASEPERIPH_IOMMU)
424 return (ENXIO);
425
426 error = pci_find_cap(dev, PCIY_SECDEV, &seccap_reg);
427 if (error != 0 || seccap_reg == 0)
428 return (ENXIO);
429
430 cap_h = pci_read_config(dev, seccap_reg + PCIR_AMDIOMMU_CAP_HEADER,
431 4);
432 cap_type = cap_h & PCIM_AMDIOMMU_CAP_TYPE_MASK;
433 cap_rev = cap_h & PCIM_AMDIOMMU_CAP_REV_MASK;
434 if (cap_type != PCIM_AMDIOMMU_CAP_TYPE_VAL &&
435 cap_rev != PCIM_AMDIOMMU_CAP_REV_VAL)
436 return (ENXIO);
437
438 device_set_desc(dev, "DMA remap");
439 return (BUS_PROBE_SPECIFIC);
440 }
441
442 static int
amdiommu_attach(device_t dev)443 amdiommu_attach(device_t dev)
444 {
445 struct amdiommu_unit *sc;
446 struct ivhd_lookup_data ild;
447 int error;
448 uint32_t base_low, base_high;
449 bool res;
450
451 sc = device_get_softc(dev);
452 sc->iommu.unit = device_get_unit(dev);
453 sc->iommu.dev = dev;
454
455 error = pci_find_cap(dev, PCIY_SECDEV, &sc->seccap_reg);
456 if (error != 0 || sc->seccap_reg == 0)
457 return (ENXIO);
458
459 base_low = pci_read_config(dev, sc->seccap_reg +
460 PCIR_AMDIOMMU_BASE_LOW, 4);
461 base_high = pci_read_config(dev, sc->seccap_reg +
462 PCIR_AMDIOMMU_BASE_HIGH, 4);
463 sc->mmio_base = (base_low & PCIM_AMDIOMMU_BASE_LOW_ADDRM) |
464 ((uint64_t)base_high << 32);
465
466 sc->device_id = pci_get_rid(dev);
467 ild.sc = sc;
468 ild.devid = sc->device_id;
469 res = amdiommu_ivrs_iterate_tbl(ivrs_lookup_ivhd_0x40,
470 ivrs_lookup_ivhd_0x40, ivrs_lookup_ivhd_0x10, &ild);
471 if (!res) {
472 device_printf(dev, "Cannot find IVHD\n");
473 return (ENXIO);
474 }
475
476 mtx_init(&sc->iommu.lock, "amdihw", NULL, MTX_DEF);
477 sc->domids = new_unrhdr(0, 0xffff, &sc->iommu.lock);
478 LIST_INIT(&sc->domains);
479 sysctl_ctx_init(&sc->iommu.sysctl_ctx);
480
481 sc->mmio_sz = ((sc->efr & AMDIOMMU_EFR_PC_SUP) != 0 ? 512 : 16) *
482 1024;
483
484 sc->mmio_rid = AMDIOMMU_RID;
485 error = bus_set_resource(dev, SYS_RES_MEMORY, AMDIOMMU_RID,
486 sc->mmio_base, sc->mmio_sz);
487 if (error != 0) {
488 device_printf(dev,
489 "bus_set_resource %#jx-%#jx failed, error %d\n",
490 (uintmax_t)sc->mmio_base, (uintmax_t)sc->mmio_base +
491 sc->mmio_sz, error);
492 error = ENXIO;
493 goto errout1;
494 }
495 sc->mmio_res = bus_alloc_resource(dev, SYS_RES_MEMORY, &sc->mmio_rid,
496 sc->mmio_base, sc->mmio_base + sc->mmio_sz - 1, sc->mmio_sz,
497 RF_ALLOCATED | RF_ACTIVE | RF_SHAREABLE);
498 if (sc->mmio_res == NULL) {
499 device_printf(dev,
500 "bus_alloc_resource %#jx-%#jx failed\n",
501 (uintmax_t)sc->mmio_base, (uintmax_t)sc->mmio_base +
502 sc->mmio_sz);
503 error = ENXIO;
504 goto errout2;
505 }
506
507 sc->hw_ctrl = amdiommu_read8(sc, AMDIOMMU_CTRL);
508 if (bootverbose)
509 device_printf(dev, "ctrl reg %#jx\n", (uintmax_t)sc->hw_ctrl);
510 if ((sc->hw_ctrl & AMDIOMMU_CTRL_EN) != 0) {
511 device_printf(dev, "CTRL_EN is set, bailing out\n");
512 error = EBUSY;
513 goto errout2;
514 }
515
516 iommu_high = BUS_SPACE_MAXADDR;
517
518 error = amdiommu_create_dev_tbl(sc);
519 if (error != 0)
520 goto errout3;
521
522 error = amdiommu_init_cmd(sc);
523 if (error != 0)
524 goto errout4;
525
526 error = amdiommu_init_event(sc);
527 if (error != 0)
528 goto errout5;
529
530 error = amdiommu_setup_intr(sc);
531 if (error != 0)
532 goto errout6;
533
534 error = iommu_init_busdma(AMD2IOMMU(sc));
535 if (error != 0)
536 goto errout7;
537
538 error = amdiommu_init_irt(sc);
539 if (error != 0)
540 goto errout8;
541
542 /*
543 * Unlike DMAR, AMD IOMMU does not process command queue
544 * unless IOMMU is enabled. But since non-present devtab
545 * entry makes IOMMU ignore transactions from corresponding
546 * initiator, de-facto IOMMU operations are disabled for the
547 * DMA and intr remapping.
548 */
549 AMDIOMMU_LOCK(sc);
550 sc->hw_ctrl |= AMDIOMMU_CTRL_EN;
551 amdiommu_write8(sc, AMDIOMMU_CTRL, sc->hw_ctrl);
552 if (bootverbose) {
553 printf("amdiommu%d: enabled translation\n",
554 AMD2IOMMU(sc)->unit);
555 }
556 AMDIOMMU_UNLOCK(sc);
557
558 TAILQ_INSERT_TAIL(&amdiommu_units, sc, unit_next);
559 amdiommu_running = true;
560 return (0);
561
562 errout8:
563 iommu_fini_busdma(&sc->iommu);
564 errout7:
565 pci_release_msi(dev);
566 errout6:
567 amdiommu_fini_event(sc);
568 errout5:
569 amdiommu_fini_cmd(sc);
570 errout4:
571 amdiommu_free_dev_tbl(sc);
572 errout3:
573 bus_release_resource(dev, SYS_RES_MEMORY, sc->mmio_rid, sc->mmio_res);
574 errout2:
575 bus_delete_resource(dev, SYS_RES_MEMORY, sc->mmio_rid);
576 errout1:
577 sysctl_ctx_free(&sc->iommu.sysctl_ctx);
578 delete_unrhdr(sc->domids);
579 mtx_destroy(&sc->iommu.lock);
580
581 return (error);
582 }
583
584 static int
amdiommu_detach(device_t dev)585 amdiommu_detach(device_t dev)
586 {
587 return (EBUSY);
588 }
589
590 static int
amdiommu_suspend(device_t dev)591 amdiommu_suspend(device_t dev)
592 {
593 /* XXXKIB */
594 return (0);
595 }
596
597 static int
amdiommu_resume(device_t dev)598 amdiommu_resume(device_t dev)
599 {
600 /* XXXKIB */
601 return (0);
602 }
603
604 static device_method_t amdiommu_methods[] = {
605 DEVMETHOD(device_probe, amdiommu_probe),
606 DEVMETHOD(device_attach, amdiommu_attach),
607 DEVMETHOD(device_detach, amdiommu_detach),
608 DEVMETHOD(device_suspend, amdiommu_suspend),
609 DEVMETHOD(device_resume, amdiommu_resume),
610 DEVMETHOD_END
611 };
612
613 static driver_t amdiommu_driver = {
614 "amdiommu",
615 amdiommu_methods,
616 sizeof(struct amdiommu_unit),
617 };
618
619 EARLY_DRIVER_MODULE(amdiommu, pci, amdiommu_driver, 0, 0, BUS_PASS_SUPPORTDEV);
620 MODULE_DEPEND(amdiommu, pci, 1, 1, 1);
621
622 int
amdiommu_is_running(void)623 amdiommu_is_running(void)
624 {
625 return (amdiommu_running ? 0 : ENXIO);
626 }
627
628 static struct amdiommu_unit *
amdiommu_unit_by_device_id(u_int pci_seg,u_int device_id)629 amdiommu_unit_by_device_id(u_int pci_seg, u_int device_id)
630 {
631 struct amdiommu_unit *unit;
632
633 TAILQ_FOREACH(unit, &amdiommu_units, unit_next) {
634 if (unit->unit_dom == pci_seg && unit->device_id == device_id)
635 return (unit);
636 }
637 return (NULL);
638 }
639
640 struct ivhd_find_unit {
641 u_int domain;
642 uintptr_t rid;
643 int devno;
644 enum {
645 IFU_DEV_PCI,
646 IFU_DEV_IOAPIC,
647 IFU_DEV_HPET,
648 } type;
649 u_int device_id;
650 uint16_t rid_real;
651 uint8_t dte;
652 uint32_t edte;
653 };
654
655 static bool
amdiommu_find_unit_scan_ivrs(ACPI_IVRS_DE_HEADER * d,size_t tlen,struct ivhd_find_unit * ifu)656 amdiommu_find_unit_scan_ivrs(ACPI_IVRS_DE_HEADER *d, size_t tlen,
657 struct ivhd_find_unit *ifu)
658 {
659 char *db, *de;
660 size_t len;
661
662 for (de = (char *)d + tlen; (char *)d < de;
663 d = (ACPI_IVRS_DE_HEADER *)(db + len)) {
664 db = (char *)d;
665 if (d->Type == ACPI_IVRS_TYPE_PAD4) {
666 len = sizeof(ACPI_IVRS_DEVICE4);
667 } else if (d->Type == ACPI_IVRS_TYPE_ALL) {
668 ACPI_IVRS_DEVICE4 *d4;
669
670 d4 = (ACPI_IVRS_DEVICE4 *)db;
671 len = sizeof(*d4);
672 ifu->dte = d4->Header.DataSetting;
673 } else if (d->Type == ACPI_IVRS_TYPE_SELECT) {
674 ACPI_IVRS_DEVICE4 *d4;
675
676 d4 = (ACPI_IVRS_DEVICE4 *)db;
677 if (d4->Header.Id == ifu->rid) {
678 ifu->dte = d4->Header.DataSetting;
679 ifu->rid_real = ifu->rid;
680 return (true);
681 }
682 len = sizeof(*d4);
683 } else if (d->Type == ACPI_IVRS_TYPE_START) {
684 ACPI_IVRS_DEVICE4 *d4, *d4n;
685
686 d4 = (ACPI_IVRS_DEVICE4 *)db;
687 d4n = d4 + 1;
688 if (d4n->Header.Type != ACPI_IVRS_TYPE_END) {
689 printf("IVRS dev4 start not followed by END "
690 "(%#x)\n", d4n->Header.Type);
691 return (false);
692 }
693 if (d4->Header.Id <= ifu->rid &&
694 ifu->rid <= d4n->Header.Id) {
695 ifu->dte = d4->Header.DataSetting;
696 ifu->rid_real = ifu->rid;
697 return (true);
698 }
699 len = 2 * sizeof(*d4);
700 } else if (d->Type == ACPI_IVRS_TYPE_PAD8) {
701 len = sizeof(ACPI_IVRS_DEVICE8A);
702 } else if (d->Type == ACPI_IVRS_TYPE_ALIAS_SELECT) {
703 ACPI_IVRS_DEVICE8A *d8a;
704
705 d8a = (ACPI_IVRS_DEVICE8A *)db;
706 if (d8a->Header.Id == ifu->rid) {
707 ifu->dte = d8a->Header.DataSetting;
708 ifu->rid_real = d8a->UsedId;
709 return (true);
710 }
711 len = sizeof(*d8a);
712 } else if (d->Type == ACPI_IVRS_TYPE_ALIAS_START) {
713 ACPI_IVRS_DEVICE8A *d8a;
714 ACPI_IVRS_DEVICE4 *d4;
715
716 d8a = (ACPI_IVRS_DEVICE8A *)db;
717 d4 = (ACPI_IVRS_DEVICE4 *)(d8a + 1);
718 if (d4->Header.Type != ACPI_IVRS_TYPE_END) {
719 printf("IVRS alias start not followed by END "
720 "(%#x)\n", d4->Header.Type);
721 return (false);
722 }
723 if (d8a->Header.Id <= ifu->rid &&
724 ifu->rid <= d4->Header.Id) {
725 ifu->dte = d8a->Header.DataSetting;
726 ifu->rid_real = d8a->UsedId;
727 return (true);
728 }
729 len = sizeof(*d8a) + sizeof(*d4);
730 } else if (d->Type == ACPI_IVRS_TYPE_EXT_SELECT) {
731 ACPI_IVRS_DEVICE8B *d8b;
732
733 d8b = (ACPI_IVRS_DEVICE8B *)db;
734 if (d8b->Header.Id == ifu->rid) {
735 ifu->dte = d8b->Header.DataSetting;
736 ifu->rid_real = ifu->rid;
737 ifu->edte = d8b->ExtendedData;
738 return (true);
739 }
740 len = sizeof(*d8b);
741 } else if (d->Type == ACPI_IVRS_TYPE_EXT_START) {
742 ACPI_IVRS_DEVICE8B *d8b;
743 ACPI_IVRS_DEVICE4 *d4;
744
745 d8b = (ACPI_IVRS_DEVICE8B *)db;
746 d4 = (ACPI_IVRS_DEVICE4 *)(db + sizeof(*d8b));
747 if (d4->Header.Type != ACPI_IVRS_TYPE_END) {
748 printf("IVRS ext start not followed by END "
749 "(%#x)\n", d4->Header.Type);
750 return (false);
751 }
752 if (d8b->Header.Id >= ifu->rid &&
753 ifu->rid <= d4->Header.Id) {
754 ifu->dte = d8b->Header.DataSetting;
755 ifu->rid_real = ifu->rid;
756 ifu->edte = d8b->ExtendedData;
757 return (true);
758 }
759 len = sizeof(*d8b) + sizeof(*d4);
760 } else if (d->Type == ACPI_IVRS_TYPE_SPECIAL) {
761 ACPI_IVRS_DEVICE8C *d8c;
762
763 d8c = (ACPI_IVRS_DEVICE8C *)db;
764 if (((ifu->type == IFU_DEV_IOAPIC &&
765 d8c->Variety == ACPI_IVHD_IOAPIC) ||
766 (ifu->type == IFU_DEV_HPET &&
767 d8c->Variety == ACPI_IVHD_HPET)) &&
768 ifu->devno == d8c->Handle) {
769 ifu->dte = d8c->Header.DataSetting;
770 ifu->rid_real = d8c->UsedId;
771 return (true);
772 }
773 len = sizeof(*d8c);
774 } else if (d->Type == ACPI_IVRS_TYPE_HID) {
775 ACPI_IVRS_DEVICE_HID *dh;
776
777 dh = (ACPI_IVRS_DEVICE_HID *)db;
778 len = sizeof(*dh) + dh->UidLength;
779 /* XXXKIB */
780 } else {
781 #if 0
782 printf("amdiommu: unknown IVRS device entry type %#x\n",
783 d->Type);
784 #endif
785 if (d->Type <= 63)
786 len = sizeof(ACPI_IVRS_DEVICE4);
787 else if (d->Type <= 127)
788 len = sizeof(ACPI_IVRS_DEVICE8A);
789 else {
790 printf("amdiommu: abort, cannot "
791 "advance iterator, item type %#x\n",
792 d->Type);
793 return (false);
794 }
795 }
796 }
797 return (false);
798 }
799
800 static bool
amdiommu_find_unit_scan_0x11(ACPI_IVRS_HARDWARE2 * ivrs,void * arg)801 amdiommu_find_unit_scan_0x11(ACPI_IVRS_HARDWARE2 *ivrs, void *arg)
802 {
803 struct ivhd_find_unit *ifu = arg;
804 ACPI_IVRS_DE_HEADER *d;
805 bool res;
806
807 KASSERT(ivrs->Header.Type == ACPI_IVRS_TYPE_HARDWARE2 ||
808 ivrs->Header.Type == ACPI_IVRS_TYPE_HARDWARE3,
809 ("Misparsed IVHD h2, ivrs type %#x", ivrs->Header.Type));
810
811 if (ifu->domain != ivrs->PciSegmentGroup)
812 return (false);
813 d = (ACPI_IVRS_DE_HEADER *)(ivrs + 1);
814 res = amdiommu_find_unit_scan_ivrs(d, ivrs->Header.Length, ifu);
815 if (res)
816 ifu->device_id = ivrs->Header.DeviceId;
817 return (res);
818 }
819
820 static bool
amdiommu_find_unit_scan_0x10(ACPI_IVRS_HARDWARE1 * ivrs,void * arg)821 amdiommu_find_unit_scan_0x10(ACPI_IVRS_HARDWARE1 *ivrs, void *arg)
822 {
823 struct ivhd_find_unit *ifu = arg;
824 ACPI_IVRS_DE_HEADER *d;
825 bool res;
826
827 KASSERT(ivrs->Header.Type == ACPI_IVRS_TYPE_HARDWARE1,
828 ("Misparsed IVHD h1, ivrs type %#x", ivrs->Header.Type));
829
830 if (ifu->domain != ivrs->PciSegmentGroup)
831 return (false);
832 d = (ACPI_IVRS_DE_HEADER *)(ivrs + 1);
833 res = amdiommu_find_unit_scan_ivrs(d, ivrs->Header.Length, ifu);
834 if (res)
835 ifu->device_id = ivrs->Header.DeviceId;
836 return (res);
837 }
838
839 static void
amdiommu_dev_prop_dtr(device_t dev,const char * name,void * val,void * dtr_ctx)840 amdiommu_dev_prop_dtr(device_t dev, const char *name, void *val, void *dtr_ctx)
841 {
842 free(val, M_DEVBUF);
843 }
844
845 static int *
amdiommu_dev_fetch_flagsp(struct amdiommu_unit * unit,device_t dev)846 amdiommu_dev_fetch_flagsp(struct amdiommu_unit *unit, device_t dev)
847 {
848 int *flagsp, error;
849
850 bus_topo_assert();
851 error = device_get_prop(dev, device_get_nameunit(unit->iommu.dev),
852 (void **)&flagsp);
853 if (error == ENOENT) {
854 flagsp = malloc(sizeof(int), M_DEVBUF, M_WAITOK | M_ZERO);
855 device_set_prop(dev, device_get_nameunit(unit->iommu.dev),
856 flagsp, amdiommu_dev_prop_dtr, unit);
857 }
858 return (flagsp);
859 }
860
861 static int
amdiommu_get_dev_prop_flags(struct amdiommu_unit * unit,device_t dev)862 amdiommu_get_dev_prop_flags(struct amdiommu_unit *unit, device_t dev)
863 {
864 int *flagsp, flags;
865
866 bus_topo_lock();
867 flagsp = amdiommu_dev_fetch_flagsp(unit, dev);
868 flags = *flagsp;
869 bus_topo_unlock();
870 return (flags);
871 }
872
873 static void
amdiommu_set_dev_prop_flags(struct amdiommu_unit * unit,device_t dev,int flag)874 amdiommu_set_dev_prop_flags(struct amdiommu_unit *unit, device_t dev,
875 int flag)
876 {
877 int *flagsp;
878
879 bus_topo_lock();
880 flagsp = amdiommu_dev_fetch_flagsp(unit, dev);
881 *flagsp |= flag;
882 bus_topo_unlock();
883 }
884
885 int
amdiommu_find_unit(device_t dev,struct amdiommu_unit ** unitp,uint16_t * ridp,uint8_t * dtep,uint32_t * edtep,bool verbose)886 amdiommu_find_unit(device_t dev, struct amdiommu_unit **unitp, uint16_t *ridp,
887 uint8_t *dtep, uint32_t *edtep, bool verbose)
888 {
889 struct ivhd_find_unit ifu;
890 struct amdiommu_unit *unit;
891 int error, flags;
892 bool res;
893
894 if (!amdiommu_enable)
895 return (ENXIO);
896
897 if (device_get_devclass(device_get_parent(dev)) !=
898 devclass_find("pci"))
899 return (ENXIO);
900
901 bzero(&ifu, sizeof(ifu));
902 ifu.type = IFU_DEV_PCI;
903
904 error = pci_get_id(dev, PCI_ID_RID, &ifu.rid);
905 if (error != 0) {
906 if (verbose)
907 device_printf(dev,
908 "amdiommu cannot get rid, error %d\n", error);
909 return (ENXIO);
910 }
911
912 ifu.domain = pci_get_domain(dev);
913 res = amdiommu_ivrs_iterate_tbl(amdiommu_find_unit_scan_0x11,
914 amdiommu_find_unit_scan_0x11, amdiommu_find_unit_scan_0x10, &ifu);
915 if (!res) {
916 if (verbose)
917 device_printf(dev,
918 "(%#06x:%#06x) amdiommu cannot match rid in IVHD\n",
919 ifu.domain, (unsigned)ifu.rid);
920 return (ENXIO);
921 }
922
923 unit = amdiommu_unit_by_device_id(ifu.domain, ifu.device_id);
924 if (unit == NULL) {
925 if (verbose)
926 device_printf(dev,
927 "(%#06x:%#06x) amdiommu cannot find unit\n",
928 ifu.domain, (unsigned)ifu.rid);
929 return (ENXIO);
930 }
931 *unitp = unit;
932 iommu_device_set_iommu_prop(dev, unit->iommu.dev);
933 if (ridp != NULL)
934 *ridp = ifu.rid_real;
935 if (dtep != NULL)
936 *dtep = ifu.dte;
937 if (edtep != NULL)
938 *edtep = ifu.edte;
939 if (verbose) {
940 flags = amdiommu_get_dev_prop_flags(unit, dev);
941 if ((flags & AMDIOMMU_DEV_REPORTED) == 0) {
942 amdiommu_set_dev_prop_flags(unit, dev,
943 AMDIOMMU_DEV_REPORTED);
944 device_printf(dev, "amdiommu%d "
945 "initiator rid %#06x dte %#x edte %#x\n",
946 unit->iommu.unit, ifu.rid_real, ifu.dte, ifu.edte);
947 }
948 }
949 return (0);
950 }
951
952 int
amdiommu_find_unit_for_ioapic(int apic_id,struct amdiommu_unit ** unitp,uint16_t * ridp,uint8_t * dtep,uint32_t * edtep,bool verbose)953 amdiommu_find_unit_for_ioapic(int apic_id, struct amdiommu_unit **unitp,
954 uint16_t *ridp, uint8_t *dtep, uint32_t *edtep, bool verbose)
955 {
956 struct ivhd_find_unit ifu;
957 struct amdiommu_unit *unit;
958 device_t apic_dev;
959 bool res;
960
961 if (!amdiommu_enable)
962 return (ENXIO);
963
964 bzero(&ifu, sizeof(ifu));
965 ifu.type = IFU_DEV_IOAPIC;
966 ifu.devno = apic_id;
967 ifu.rid = -1;
968
969 res = amdiommu_ivrs_iterate_tbl(amdiommu_find_unit_scan_0x11,
970 amdiommu_find_unit_scan_0x11, amdiommu_find_unit_scan_0x10, &ifu);
971 if (!res) {
972 if (verbose)
973 printf("amdiommu cannot match ioapic no %d in IVHD\n",
974 apic_id);
975 return (ENXIO);
976 }
977
978 unit = amdiommu_unit_by_device_id(0, ifu.device_id);
979 apic_dev = ioapic_get_dev(apic_id);
980 if (apic_dev != NULL)
981 iommu_device_set_iommu_prop(apic_dev, unit->iommu.dev);
982 if (unit == NULL) {
983 if (verbose)
984 printf("amdiommu cannot find unit by dev id %#x\n",
985 ifu.device_id);
986 return (ENXIO);
987 }
988 *unitp = unit;
989 if (ridp != NULL)
990 *ridp = ifu.rid_real;
991 if (dtep != NULL)
992 *dtep = ifu.dte;
993 if (edtep != NULL)
994 *edtep = ifu.edte;
995 if (verbose) {
996 printf("amdiommu%d IOAPIC %d "
997 "initiator rid %#06x dte %#x edte %#x\n",
998 unit->iommu.unit, apic_id, ifu.rid_real, ifu.dte,
999 ifu.edte);
1000 }
1001 return (0);
1002 }
1003
1004 int
amdiommu_find_unit_for_hpet(device_t hpet,struct amdiommu_unit ** unitp,uint16_t * ridp,uint8_t * dtep,uint32_t * edtep,bool verbose)1005 amdiommu_find_unit_for_hpet(device_t hpet, struct amdiommu_unit **unitp,
1006 uint16_t *ridp, uint8_t *dtep, uint32_t *edtep, bool verbose)
1007 {
1008 struct ivhd_find_unit ifu;
1009 struct amdiommu_unit *unit;
1010 int hpet_no;
1011 bool res;
1012
1013 if (!amdiommu_enable)
1014 return (ENXIO);
1015
1016 hpet_no = hpet_get_uid(hpet);
1017 bzero(&ifu, sizeof(ifu));
1018 ifu.type = IFU_DEV_HPET;
1019 ifu.devno = hpet_no;
1020 ifu.rid = -1;
1021
1022 res = amdiommu_ivrs_iterate_tbl(amdiommu_find_unit_scan_0x11,
1023 amdiommu_find_unit_scan_0x11, amdiommu_find_unit_scan_0x10, &ifu);
1024 if (!res) {
1025 if (verbose)
1026 printf("amdiommu cannot match hpet no %d in IVHD\n",
1027 hpet_no);
1028 return (ENXIO);
1029 }
1030
1031 unit = amdiommu_unit_by_device_id(0, ifu.device_id);
1032 if (unit == NULL) {
1033 if (verbose)
1034 printf("amdiommu cannot find unit id %d\n",
1035 hpet_no);
1036 return (ENXIO);
1037 }
1038 *unitp = unit;
1039 iommu_device_set_iommu_prop(hpet, unit->iommu.dev);
1040 if (ridp != NULL)
1041 *ridp = ifu.rid_real;
1042 if (dtep != NULL)
1043 *dtep = ifu.dte;
1044 if (edtep != NULL)
1045 *edtep = ifu.edte;
1046 if (verbose) {
1047 printf("amdiommu%d HPET no %d "
1048 "initiator rid %#06x dte %#x edte %#x\n",
1049 unit->iommu.unit, hpet_no, ifu.rid_real, ifu.dte,
1050 ifu.edte);
1051 }
1052 return (0);
1053 }
1054
1055 static struct iommu_unit *
amdiommu_find_method(device_t dev,bool verbose)1056 amdiommu_find_method(device_t dev, bool verbose)
1057 {
1058 struct amdiommu_unit *unit;
1059 int error;
1060 uint32_t edte;
1061 uint16_t rid;
1062 uint8_t dte;
1063
1064 error = amdiommu_find_unit(dev, &unit, &rid, &dte, &edte, verbose);
1065 if (error != 0) {
1066 if (verbose && amdiommu_enable)
1067 device_printf(dev,
1068 "cannot find amdiommu unit, error %d\n",
1069 error);
1070 return (NULL);
1071 }
1072 return (&unit->iommu);
1073 }
1074
1075 static struct x86_unit_common *
amdiommu_get_x86_common(struct iommu_unit * unit)1076 amdiommu_get_x86_common(struct iommu_unit *unit)
1077 {
1078 struct amdiommu_unit *iommu;
1079
1080 iommu = IOMMU2AMD(unit);
1081 return (&iommu->x86c);
1082 }
1083
1084 static void
amdiommu_unit_pre_instantiate_ctx(struct iommu_unit * unit)1085 amdiommu_unit_pre_instantiate_ctx(struct iommu_unit *unit)
1086 {
1087 }
1088
1089 static struct x86_iommu amd_x86_iommu = {
1090 .get_x86_common = amdiommu_get_x86_common,
1091 .unit_pre_instantiate_ctx = amdiommu_unit_pre_instantiate_ctx,
1092 .find = amdiommu_find_method,
1093 .domain_unload_entry = amdiommu_domain_unload_entry,
1094 .domain_unload = amdiommu_domain_unload,
1095 .get_ctx = amdiommu_get_ctx,
1096 .free_ctx_locked = amdiommu_free_ctx_locked_method,
1097 .alloc_msi_intr = amdiommu_alloc_msi_intr,
1098 .map_msi_intr = amdiommu_map_msi_intr,
1099 .unmap_msi_intr = amdiommu_unmap_msi_intr,
1100 .map_ioapic_intr = amdiommu_map_ioapic_intr,
1101 .unmap_ioapic_intr = amdiommu_unmap_ioapic_intr,
1102 };
1103
1104 static void
x86_iommu_set_amd(void * arg __unused)1105 x86_iommu_set_amd(void *arg __unused)
1106 {
1107 if (cpu_vendor_id == CPU_VENDOR_AMD)
1108 set_x86_iommu(&amd_x86_iommu);
1109 }
1110
1111 SYSINIT(x86_iommu, SI_SUB_TUNABLES, SI_ORDER_ANY, x86_iommu_set_amd, NULL);
1112
1113 #ifdef DDB
1114 #include <ddb/ddb.h>
1115 #include <ddb/db_lex.h>
1116
1117 static void
amdiommu_print_domain(struct amdiommu_domain * domain,bool show_mappings)1118 amdiommu_print_domain(struct amdiommu_domain *domain, bool show_mappings)
1119 {
1120 struct iommu_domain *iodom;
1121
1122 iodom = DOM2IODOM(domain);
1123
1124 db_printf(
1125 " @%p dom %d pglvl %d end %jx refs %d\n"
1126 " ctx_cnt %d flags %x pgobj %p map_ents %u\n",
1127 domain, domain->domain, domain->pglvl,
1128 (uintmax_t)domain->iodom.end, domain->refs, domain->ctx_cnt,
1129 domain->iodom.flags, domain->pgtbl_obj, domain->iodom.entries_cnt);
1130
1131 iommu_db_domain_print_contexts(iodom);
1132
1133 if (show_mappings)
1134 iommu_db_domain_print_mappings(iodom);
1135 }
1136
1137 static void
amdiommu_print_one(struct amdiommu_unit * unit,bool show_domains,bool show_mappings,bool show_cmdq)1138 amdiommu_print_one(struct amdiommu_unit *unit, bool show_domains,
1139 bool show_mappings, bool show_cmdq)
1140 {
1141 struct amdiommu_domain *domain;
1142 struct amdiommu_cmd_generic *cp;
1143 u_int cmd_head, cmd_tail, ci;
1144
1145 cmd_head = amdiommu_read4(unit, AMDIOMMU_CMDBUF_HEAD);
1146 cmd_tail = amdiommu_read4(unit, AMDIOMMU_CMDBUF_TAIL);
1147 db_printf("amdiommu%d at %p, mmio at %#jx/sz %#jx\n",
1148 unit->iommu.unit, unit, (uintmax_t)unit->mmio_base,
1149 (uintmax_t)unit->mmio_sz);
1150 db_printf(" hw ctrl %#018jx cmdevst %#018jx\n",
1151 (uintmax_t)amdiommu_read8(unit, AMDIOMMU_CTRL),
1152 (uintmax_t)amdiommu_read8(unit, AMDIOMMU_CMDEV_STATUS));
1153 db_printf(" devtbl at %p\n", unit->dev_tbl);
1154 db_printf(" hwseq at %p phys %#jx val %#jx\n",
1155 &unit->x86c.inv_waitd_seq_hw,
1156 pmap_kextract((vm_offset_t)&unit->x86c.inv_waitd_seq_hw),
1157 unit->x86c.inv_waitd_seq_hw);
1158 db_printf(" invq at %p base %#jx hw head/tail %#x/%#x\n",
1159 unit->x86c.inv_queue,
1160 (uintmax_t)amdiommu_read8(unit, AMDIOMMU_CMDBUF_BASE),
1161 cmd_head, cmd_tail);
1162
1163 if (show_cmdq) {
1164 db_printf(" cmd q:\n");
1165 for (ci = cmd_head; ci != cmd_tail;) {
1166 cp = (struct amdiommu_cmd_generic *)(unit->
1167 x86c.inv_queue + ci);
1168 db_printf(
1169 " idx %#x op %#x %#010x %#010x %#010x %#010x\n",
1170 ci >> AMDIOMMU_CMD_SZ_SHIFT, cp->op,
1171 cp->w0, cp->ww1, cp->w2, cp->w3);
1172
1173 ci += AMDIOMMU_CMD_SZ;
1174 if (ci == unit->x86c.inv_queue_size)
1175 ci = 0;
1176 }
1177 }
1178
1179 if (show_domains) {
1180 db_printf(" domains:\n");
1181 LIST_FOREACH(domain, &unit->domains, link) {
1182 amdiommu_print_domain(domain, show_mappings);
1183 if (db_pager_quit)
1184 break;
1185 }
1186 }
1187 }
1188
DB_SHOW_COMMAND(amdiommu,db_amdiommu_print)1189 DB_SHOW_COMMAND(amdiommu, db_amdiommu_print)
1190 {
1191 struct amdiommu_unit *unit;
1192 bool show_domains, show_mappings, show_cmdq;
1193
1194 show_domains = strchr(modif, 'd') != NULL;
1195 show_mappings = strchr(modif, 'm') != NULL;
1196 show_cmdq = strchr(modif, 'q') != NULL;
1197 if (!have_addr) {
1198 db_printf("usage: show amdiommu [/d] [/m] [/q] index\n");
1199 return;
1200 }
1201 if ((vm_offset_t)addr < 0x10000)
1202 unit = amdiommu_unit_by_device_id(0, (u_int)addr);
1203 else
1204 unit = (struct amdiommu_unit *)addr;
1205 amdiommu_print_one(unit, show_domains, show_mappings, show_cmdq);
1206 }
1207
DB_SHOW_ALL_COMMAND(amdiommus,db_show_all_amdiommus)1208 DB_SHOW_ALL_COMMAND(amdiommus, db_show_all_amdiommus)
1209 {
1210 struct amdiommu_unit *unit;
1211 bool show_domains, show_mappings, show_cmdq;
1212
1213 show_domains = strchr(modif, 'd') != NULL;
1214 show_mappings = strchr(modif, 'm') != NULL;
1215 show_cmdq = strchr(modif, 'q') != NULL;
1216
1217 TAILQ_FOREACH(unit, &amdiommu_units, unit_next) {
1218 amdiommu_print_one(unit, show_domains, show_mappings,
1219 show_cmdq);
1220 if (db_pager_quit)
1221 break;
1222 }
1223 }
1224 #endif
1225