1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2024 The FreeBSD Foundation
5 *
6 * This software was developed by Konstantin Belousov <kib@FreeBSD.org>
7 * under sponsorship from the FreeBSD Foundation.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30
31 #include "opt_acpi.h"
32 #include "opt_ddb.h"
33
34 #include <sys/param.h>
35 #include <sys/bus.h>
36 #include <sys/domainset.h>
37 #include <sys/kernel.h>
38 #include <sys/lock.h>
39 #include <sys/malloc.h>
40 #include <sys/memdesc.h>
41 #include <sys/module.h>
42 #include <sys/mutex.h>
43 #include <sys/rman.h>
44 #include <sys/rwlock.h>
45 #include <sys/smp.h>
46 #include <sys/taskqueue.h>
47 #include <sys/tree.h>
48 #include <sys/vmem.h>
49 #include <vm/vm.h>
50 #include <vm/vm_extern.h>
51 #include <vm/vm_object.h>
52 #include <vm/vm_page.h>
53 #include <vm/vm_pageout.h>
54 #include <vm/vm_pager.h>
55 #include <contrib/dev/acpica/include/acpi.h>
56 #include <contrib/dev/acpica/include/accommon.h>
57 #include <dev/acpica/acpivar.h>
58 #include <dev/pci/pcireg.h>
59 #include <dev/pci/pcivar.h>
60 #include <machine/bus.h>
61 #include <machine/pci_cfgreg.h>
62 #include "pcib_if.h"
63 #include <machine/intr_machdep.h>
64 #include <machine/md_var.h>
65 #include <machine/cputypes.h>
66 #include <x86/apicreg.h>
67 #include <x86/apicvar.h>
68 #include <dev/iommu/iommu.h>
69 #include <x86/iommu/amd_reg.h>
70 #include <x86/iommu/x86_iommu.h>
71 #include <x86/iommu/amd_iommu.h>
72
73 static int amdiommu_enable = 0;
74
75 /*
76 * All enumerated AMD IOMMU units.
77 * Access is unlocked, the list is not modified after early
78 * single-threaded startup.
79 */
80 static TAILQ_HEAD(, amdiommu_unit) amdiommu_units =
81 TAILQ_HEAD_INITIALIZER(amdiommu_units);
82
83 static u_int
ivrs_info_to_unit_id(UINT32 info)84 ivrs_info_to_unit_id(UINT32 info)
85 {
86 return ((info & ACPI_IVHD_UNIT_ID_MASK) >> 8);
87 }
88
89 typedef bool (*amdiommu_itercc_t)(void *, void *);
90 typedef bool (*amdiommu_iter40_t)(ACPI_IVRS_HARDWARE2 *, void *);
91 typedef bool (*amdiommu_iter11_t)(ACPI_IVRS_HARDWARE2 *, void *);
92 typedef bool (*amdiommu_iter10_t)(ACPI_IVRS_HARDWARE1 *, void *);
93
94 static bool
amdiommu_ivrs_iterate_tbl_typed(amdiommu_itercc_t iter,void * arg,int type,ACPI_TABLE_IVRS * ivrs_tbl)95 amdiommu_ivrs_iterate_tbl_typed(amdiommu_itercc_t iter, void *arg,
96 int type, ACPI_TABLE_IVRS *ivrs_tbl)
97 {
98 char *ptr, *ptrend;
99 bool done;
100
101 done = false;
102 ptr = (char *)ivrs_tbl + sizeof(*ivrs_tbl);
103 ptrend = (char *)ivrs_tbl + ivrs_tbl->Header.Length;
104 for (;;) {
105 ACPI_IVRS_HEADER *ivrsh;
106
107 if (ptr >= ptrend)
108 break;
109 ivrsh = (ACPI_IVRS_HEADER *)ptr;
110 if (ivrsh->Length <= 0) {
111 printf("amdiommu_iterate_tbl: corrupted IVRS table, "
112 "length %d\n", ivrsh->Length);
113 break;
114 }
115 ptr += ivrsh->Length;
116 if (ivrsh->Type == type) {
117 done = iter((void *)ivrsh, arg);
118 if (done)
119 break;
120 }
121 }
122 return (done);
123 }
124
125 /*
126 * Walk over IVRS, calling callback iterators following priority:
127 * 0x40, then 0x11, then 0x10 subtable. First iterator returning true
128 * ends the walk.
129 * Returns true if any iterator returned true, otherwise false.
130 */
131 static bool
amdiommu_ivrs_iterate_tbl(amdiommu_iter40_t iter40,amdiommu_iter11_t iter11,amdiommu_iter10_t iter10,void * arg)132 amdiommu_ivrs_iterate_tbl(amdiommu_iter40_t iter40, amdiommu_iter11_t iter11,
133 amdiommu_iter10_t iter10, void *arg)
134 {
135 ACPI_TABLE_IVRS *ivrs_tbl;
136 ACPI_STATUS status;
137 bool done;
138
139 status = AcpiGetTable(ACPI_SIG_IVRS, 1,
140 (ACPI_TABLE_HEADER **)&ivrs_tbl);
141 if (ACPI_FAILURE(status))
142 return (false);
143 done = false;
144 if (iter40 != NULL)
145 done = amdiommu_ivrs_iterate_tbl_typed(
146 (amdiommu_itercc_t)iter40, arg,
147 ACPI_IVRS_TYPE_HARDWARE3, ivrs_tbl);
148 if (!done && iter11 != NULL)
149 done = amdiommu_ivrs_iterate_tbl_typed(
150 (amdiommu_itercc_t)iter11, arg, ACPI_IVRS_TYPE_HARDWARE2,
151 ivrs_tbl);
152 if (!done && iter10 != NULL)
153 done = amdiommu_ivrs_iterate_tbl_typed(
154 (amdiommu_itercc_t)iter10, arg, ACPI_IVRS_TYPE_HARDWARE1,
155 ivrs_tbl);
156 AcpiPutTable((ACPI_TABLE_HEADER *)ivrs_tbl);
157 return (done);
158 }
159
160 struct ivhd_lookup_data {
161 struct amdiommu_unit *sc;
162 uint16_t devid;
163 };
164
165 static bool
ivrs_lookup_ivhd_0x40(ACPI_IVRS_HARDWARE2 * h2,void * arg)166 ivrs_lookup_ivhd_0x40(ACPI_IVRS_HARDWARE2 *h2, void *arg)
167 {
168 struct ivhd_lookup_data *ildp;
169
170 KASSERT(h2->Header.Type == ACPI_IVRS_TYPE_HARDWARE2 ||
171 h2->Header.Type == ACPI_IVRS_TYPE_HARDWARE3,
172 ("Misparsed IVHD, h2 type %#x", h2->Header.Type));
173
174 ildp = arg;
175 if (h2->Header.DeviceId != ildp->devid)
176 return (false);
177
178 ildp->sc->unit_dom = h2->PciSegmentGroup;
179 ildp->sc->iommu.unit = ivrs_info_to_unit_id(h2->Info);
180 ildp->sc->efr = h2->EfrRegisterImage;
181 return (true);
182 }
183
184 static bool
ivrs_lookup_ivhd_0x10(ACPI_IVRS_HARDWARE1 * h1,void * arg)185 ivrs_lookup_ivhd_0x10(ACPI_IVRS_HARDWARE1 *h1, void *arg)
186 {
187 struct ivhd_lookup_data *ildp;
188
189 KASSERT(h1->Header.Type == ACPI_IVRS_TYPE_HARDWARE1,
190 ("Misparsed IVHD, h1 type %#x", h1->Header.Type));
191
192 ildp = arg;
193 if (h1->Header.DeviceId != ildp->devid)
194 return (false);
195
196 ildp->sc->unit_dom = h1->PciSegmentGroup;
197 ildp->sc->iommu.unit = ivrs_info_to_unit_id(h1->Info);
198 return (true);
199 }
200
201 static u_int
amdiommu_devtbl_sz(struct amdiommu_unit * sc __unused)202 amdiommu_devtbl_sz(struct amdiommu_unit *sc __unused)
203 {
204 return (sizeof(struct amdiommu_dte) * (1 << 16));
205 }
206
207 static void
amdiommu_free_dev_tbl(struct amdiommu_unit * sc)208 amdiommu_free_dev_tbl(struct amdiommu_unit *sc)
209 {
210 u_int devtbl_sz;
211
212 devtbl_sz = amdiommu_devtbl_sz(sc);
213 pmap_qremove((vm_offset_t)sc->dev_tbl, atop(devtbl_sz));
214 kva_free((vm_offset_t)sc->dev_tbl, devtbl_sz);
215 sc->dev_tbl = NULL;
216 vm_object_deallocate(sc->devtbl_obj);
217 sc->devtbl_obj = NULL;
218 }
219
220 static int
amdiommu_create_dev_tbl(struct amdiommu_unit * sc)221 amdiommu_create_dev_tbl(struct amdiommu_unit *sc)
222 {
223 vm_offset_t seg_vaddr;
224 u_int devtbl_sz, dom, i, reclaimno, segnum_log, segnum, seg_sz;
225 int error;
226
227 segnum_log = (sc->efr & AMDIOMMU_EFR_DEVTBLSEG_MASK) >>
228 AMDIOMMU_EFR_DEVTBLSEG_SHIFT;
229 segnum = 1 << segnum_log;
230
231 devtbl_sz = amdiommu_devtbl_sz(sc);
232 seg_sz = devtbl_sz / segnum;
233 sc->devtbl_obj = vm_pager_allocate(OBJT_PHYS, NULL, atop(devtbl_sz),
234 VM_PROT_ALL, 0, NULL);
235 if (bus_get_domain(sc->iommu.dev, &dom) == 0)
236 sc->devtbl_obj->domain.dr_policy = DOMAINSET_PREF(dom);
237
238 sc->hw_ctrl &= ~AMDIOMMU_CTRL_DEVTABSEG_MASK;
239 sc->hw_ctrl |= (uint64_t)segnum_log << ilog2(AMDIOMMU_CTRL_DEVTABSEG_2);
240 sc->hw_ctrl |= AMDIOMMU_CTRL_COHERENT;
241 amdiommu_write8(sc, AMDIOMMU_CTRL, sc->hw_ctrl);
242
243 seg_vaddr = kva_alloc(devtbl_sz);
244 if (seg_vaddr == 0)
245 return (ENOMEM);
246 sc->dev_tbl = (void *)seg_vaddr;
247
248 for (i = 0; i < segnum; i++) {
249 vm_page_t m;
250 uint64_t rval;
251 u_int reg;
252
253 for (reclaimno = 0; reclaimno < 3; reclaimno++) {
254 VM_OBJECT_WLOCK(sc->devtbl_obj);
255 m = vm_page_alloc_contig(sc->devtbl_obj,
256 i * atop(seg_sz),
257 VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY,
258 atop(seg_sz), 0, ~0ul, IOMMU_PAGE_SIZE, 0,
259 VM_MEMATTR_DEFAULT);
260 VM_OBJECT_WUNLOCK(sc->devtbl_obj);
261 if (m != NULL)
262 break;
263 error = vm_page_reclaim_contig(VM_ALLOC_NORMAL,
264 atop(seg_sz), 0, ~0ul, IOMMU_PAGE_SIZE, 0);
265 if (error != 0)
266 vm_wait(sc->devtbl_obj);
267 }
268 if (m == NULL) {
269 amdiommu_free_dev_tbl(sc);
270 return (ENOMEM);
271 }
272
273 rval = VM_PAGE_TO_PHYS(m) | (atop(seg_sz) - 1);
274 for (u_int j = 0; j < atop(seg_sz);
275 j++, seg_vaddr += PAGE_SIZE, m++) {
276 pmap_zero_page(m);
277 pmap_qenter(seg_vaddr, &m, 1);
278 }
279 reg = i == 0 ? AMDIOMMU_DEVTAB_BASE : AMDIOMMU_DEVTAB_S1_BASE +
280 i - 1;
281 amdiommu_write8(sc, reg, rval);
282 }
283
284 return (0);
285 }
286
287 static int
amdiommu_cmd_event_intr(void * arg)288 amdiommu_cmd_event_intr(void *arg)
289 {
290 struct amdiommu_unit *unit;
291 uint64_t status;
292
293 unit = arg;
294 status = amdiommu_read8(unit, AMDIOMMU_CMDEV_STATUS);
295 if ((status & AMDIOMMU_CMDEVS_COMWAITINT) != 0) {
296 amdiommu_write8(unit, AMDIOMMU_CMDEV_STATUS,
297 AMDIOMMU_CMDEVS_COMWAITINT);
298 taskqueue_enqueue(unit->x86c.qi_taskqueue,
299 &unit->x86c.qi_task);
300 }
301 if ((status & (AMDIOMMU_CMDEVS_EVLOGINT |
302 AMDIOMMU_CMDEVS_EVOVRFLW)) != 0)
303 amdiommu_event_intr(unit, status);
304 return (FILTER_HANDLED);
305 }
306
307 static int
amdiommu_setup_intr(struct amdiommu_unit * sc)308 amdiommu_setup_intr(struct amdiommu_unit *sc)
309 {
310 int error, msi_count, msix_count;
311
312 msi_count = pci_msi_count(sc->iommu.dev);
313 msix_count = pci_msix_count(sc->iommu.dev);
314 if (msi_count == 0 && msix_count == 0) {
315 device_printf(sc->iommu.dev, "needs MSI-class intr\n");
316 return (ENXIO);
317 }
318
319 #if 0
320 /*
321 * XXXKIB how MSI-X is supposed to be organized for BAR-less
322 * function? Practically available hardware implements only
323 * one IOMMU unit per function, and uses MSI.
324 */
325 if (msix_count > 0) {
326 sc->msix_table = bus_alloc_resource_any(sc->iommu.dev,
327 SYS_RES_MEMORY, &sc->msix_tab_rid, RF_ACTIVE);
328 if (sc->msix_table == NULL)
329 return (ENXIO);
330
331 if (sc->msix_pba_rid != sc->msix_tab_rid) {
332 /* Separate BAR for PBA */
333 sc->msix_pba = bus_alloc_resource_any(sc->iommu.dev,
334 SYS_RES_MEMORY,
335 &sc->msix_pba_rid, RF_ACTIVE);
336 if (sc->msix_pba == NULL) {
337 bus_release_resource(sc->iommu.dev,
338 SYS_RES_MEMORY, &sc->msix_tab_rid,
339 sc->msix_table);
340 return (ENXIO);
341 }
342 }
343 }
344 #endif
345
346 error = ENXIO;
347 if (msix_count > 0) {
348 error = pci_alloc_msix(sc->iommu.dev, &msix_count);
349 if (error == 0)
350 sc->numirqs = msix_count;
351 }
352 if (error != 0 && msi_count > 0) {
353 error = pci_alloc_msi(sc->iommu.dev, &msi_count);
354 if (error == 0)
355 sc->numirqs = msi_count;
356 }
357 if (error != 0) {
358 device_printf(sc->iommu.dev,
359 "Failed to allocate MSI/MSI-x (%d)\n", error);
360 return (ENXIO);
361 }
362
363 /*
364 * XXXKIB Spec states that MISC0.MsiNum must be zero for IOMMU
365 * using MSI interrupts. But at least one BIOS programmed '2'
366 * there, making driver use wrong rid and causing
367 * command/event interrupt ignored as stray. Try to fix it
368 * with dirty force by assuming MsiNum is zero for MSI.
369 */
370 sc->irq_cmdev_rid = 1;
371 if (msix_count > 0) {
372 sc->irq_cmdev_rid += pci_read_config(sc->iommu.dev,
373 sc->seccap_reg + PCIR_AMDIOMMU_MISC0, 4) &
374 PCIM_AMDIOMMU_MISC0_MSINUM_MASK;
375 }
376
377 sc->irq_cmdev = bus_alloc_resource_any(sc->iommu.dev, SYS_RES_IRQ,
378 &sc->irq_cmdev_rid, RF_SHAREABLE | RF_ACTIVE);
379 if (sc->irq_cmdev == NULL) {
380 device_printf(sc->iommu.dev,
381 "unable to map CMD/EV interrupt\n");
382 return (ENXIO);
383 }
384 error = bus_setup_intr(sc->iommu.dev, sc->irq_cmdev,
385 INTR_TYPE_MISC, amdiommu_cmd_event_intr, NULL, sc,
386 &sc->irq_cmdev_cookie);
387 if (error != 0) {
388 device_printf(sc->iommu.dev,
389 "unable to setup interrupt (%d)\n", error);
390 return (ENXIO);
391 }
392 bus_describe_intr(sc->iommu.dev, sc->irq_cmdev, sc->irq_cmdev_cookie,
393 "cmdev");
394
395 if (x2apic_mode) {
396 AMDIOMMU_LOCK(sc);
397 sc->hw_ctrl |= AMDIOMMU_CTRL_GA_EN | AMDIOMMU_CTRL_XT_EN;
398 amdiommu_write8(sc, AMDIOMMU_CTRL, sc->hw_ctrl);
399 // XXXKIB AMDIOMMU_CTRL_INTCAPXT_EN and program x2APIC_CTRL
400 AMDIOMMU_UNLOCK(sc);
401 }
402
403 return (0);
404 }
405
406 static int
amdiommu_probe(device_t dev)407 amdiommu_probe(device_t dev)
408 {
409 int seccap_reg;
410 int error;
411 uint32_t cap_h, cap_type, cap_rev;
412
413 if (acpi_disabled("amdiommu"))
414 return (ENXIO);
415 TUNABLE_INT_FETCH("hw.amdiommu.enable", &amdiommu_enable);
416 if (!amdiommu_enable)
417 return (ENXIO);
418 if (pci_get_class(dev) != PCIC_BASEPERIPH ||
419 pci_get_subclass(dev) != PCIS_BASEPERIPH_IOMMU)
420 return (ENXIO);
421
422 error = pci_find_cap(dev, PCIY_SECDEV, &seccap_reg);
423 if (error != 0 || seccap_reg == 0)
424 return (ENXIO);
425
426 cap_h = pci_read_config(dev, seccap_reg + PCIR_AMDIOMMU_CAP_HEADER,
427 4);
428 cap_type = cap_h & PCIM_AMDIOMMU_CAP_TYPE_MASK;
429 cap_rev = cap_h & PCIM_AMDIOMMU_CAP_REV_MASK;
430 if (cap_type != PCIM_AMDIOMMU_CAP_TYPE_VAL &&
431 cap_rev != PCIM_AMDIOMMU_CAP_REV_VAL)
432 return (ENXIO);
433
434 device_set_desc(dev, "DMA remap");
435 return (BUS_PROBE_SPECIFIC);
436 }
437
438 static int
amdiommu_attach(device_t dev)439 amdiommu_attach(device_t dev)
440 {
441 struct amdiommu_unit *sc;
442 struct ivhd_lookup_data ild;
443 int error;
444 uint32_t base_low, base_high;
445 bool res;
446
447 sc = device_get_softc(dev);
448 sc->iommu.dev = dev;
449
450 error = pci_find_cap(dev, PCIY_SECDEV, &sc->seccap_reg);
451 if (error != 0 || sc->seccap_reg == 0)
452 return (ENXIO);
453
454 base_low = pci_read_config(dev, sc->seccap_reg +
455 PCIR_AMDIOMMU_BASE_LOW, 4);
456 base_high = pci_read_config(dev, sc->seccap_reg +
457 PCIR_AMDIOMMU_BASE_HIGH, 4);
458 sc->mmio_base = (base_low & PCIM_AMDIOMMU_BASE_LOW_ADDRM) |
459 ((uint64_t)base_high << 32);
460
461 sc->device_id = pci_get_rid(dev);
462 ild.sc = sc;
463 ild.devid = sc->device_id;
464 res = amdiommu_ivrs_iterate_tbl(ivrs_lookup_ivhd_0x40,
465 ivrs_lookup_ivhd_0x40, ivrs_lookup_ivhd_0x10, &ild);
466 if (!res) {
467 device_printf(dev, "Cannot find IVHD\n");
468 return (ENXIO);
469 }
470
471 mtx_init(&sc->iommu.lock, "amdihw", NULL, MTX_DEF);
472 sc->domids = new_unrhdr(0, 0xffff, &sc->iommu.lock);
473 LIST_INIT(&sc->domains);
474 sysctl_ctx_init(&sc->iommu.sysctl_ctx);
475
476 sc->mmio_sz = ((sc->efr & AMDIOMMU_EFR_PC_SUP) != 0 ? 512 : 16) *
477 1024;
478
479 sc->mmio_rid = AMDIOMMU_RID;
480 error = bus_set_resource(dev, SYS_RES_MEMORY, AMDIOMMU_RID,
481 sc->mmio_base, sc->mmio_sz);
482 if (error != 0) {
483 device_printf(dev,
484 "bus_set_resource %#jx-%#jx failed, error %d\n",
485 (uintmax_t)sc->mmio_base, (uintmax_t)sc->mmio_base +
486 sc->mmio_sz, error);
487 error = ENXIO;
488 goto errout1;
489 }
490 sc->mmio_res = bus_alloc_resource(dev, SYS_RES_MEMORY, &sc->mmio_rid,
491 sc->mmio_base, sc->mmio_base + sc->mmio_sz - 1, sc->mmio_sz,
492 RF_ALLOCATED | RF_ACTIVE | RF_SHAREABLE);
493 if (sc->mmio_res == NULL) {
494 device_printf(dev,
495 "bus_alloc_resource %#jx-%#jx failed\n",
496 (uintmax_t)sc->mmio_base, (uintmax_t)sc->mmio_base +
497 sc->mmio_sz);
498 error = ENXIO;
499 goto errout2;
500 }
501
502 sc->hw_ctrl = amdiommu_read8(sc, AMDIOMMU_CTRL);
503 if (bootverbose)
504 device_printf(dev, "ctrl reg %#jx\n", (uintmax_t)sc->hw_ctrl);
505 if ((sc->hw_ctrl & AMDIOMMU_CTRL_EN) != 0) {
506 device_printf(dev, "CTRL_EN is set, bailing out\n");
507 error = EBUSY;
508 goto errout2;
509 }
510
511 iommu_high = BUS_SPACE_MAXADDR;
512
513 error = amdiommu_create_dev_tbl(sc);
514 if (error != 0)
515 goto errout3;
516
517 error = amdiommu_init_cmd(sc);
518 if (error != 0)
519 goto errout4;
520
521 error = amdiommu_init_event(sc);
522 if (error != 0)
523 goto errout5;
524
525 error = amdiommu_setup_intr(sc);
526 if (error != 0)
527 goto errout6;
528
529 error = iommu_init_busdma(AMD2IOMMU(sc));
530 if (error != 0)
531 goto errout7;
532
533 error = amdiommu_init_irt(sc);
534 if (error != 0)
535 goto errout8;
536
537 /*
538 * Unlike DMAR, AMD IOMMU does not process command queue
539 * unless IOMMU is enabled. But since non-present devtab
540 * entry makes IOMMU ignore transactions from corresponding
541 * initiator, de-facto IOMMU operations are disabled for the
542 * DMA and intr remapping.
543 */
544 AMDIOMMU_LOCK(sc);
545 sc->hw_ctrl |= AMDIOMMU_CTRL_EN;
546 amdiommu_write8(sc, AMDIOMMU_CTRL, sc->hw_ctrl);
547 if (bootverbose) {
548 printf("amdiommu%d: enabled translation\n",
549 AMD2IOMMU(sc)->unit);
550 }
551 AMDIOMMU_UNLOCK(sc);
552
553 TAILQ_INSERT_TAIL(&amdiommu_units, sc, unit_next);
554 return (0);
555
556 errout8:
557 iommu_fini_busdma(&sc->iommu);
558 errout7:
559 pci_release_msi(dev);
560 errout6:
561 amdiommu_fini_event(sc);
562 errout5:
563 amdiommu_fini_cmd(sc);
564 errout4:
565 amdiommu_free_dev_tbl(sc);
566 errout3:
567 bus_release_resource(dev, SYS_RES_MEMORY, sc->mmio_rid, sc->mmio_res);
568 errout2:
569 bus_delete_resource(dev, SYS_RES_MEMORY, sc->mmio_rid);
570 errout1:
571 sysctl_ctx_free(&sc->iommu.sysctl_ctx);
572 delete_unrhdr(sc->domids);
573 mtx_destroy(&sc->iommu.lock);
574
575 return (error);
576 }
577
578 static int
amdiommu_detach(device_t dev)579 amdiommu_detach(device_t dev)
580 {
581 return (EBUSY);
582 }
583
584 static int
amdiommu_suspend(device_t dev)585 amdiommu_suspend(device_t dev)
586 {
587 /* XXXKIB */
588 return (0);
589 }
590
591 static int
amdiommu_resume(device_t dev)592 amdiommu_resume(device_t dev)
593 {
594 /* XXXKIB */
595 return (0);
596 }
597
598 static device_method_t amdiommu_methods[] = {
599 DEVMETHOD(device_probe, amdiommu_probe),
600 DEVMETHOD(device_attach, amdiommu_attach),
601 DEVMETHOD(device_detach, amdiommu_detach),
602 DEVMETHOD(device_suspend, amdiommu_suspend),
603 DEVMETHOD(device_resume, amdiommu_resume),
604 DEVMETHOD_END
605 };
606
607 static driver_t amdiommu_driver = {
608 "amdiommu",
609 amdiommu_methods,
610 sizeof(struct amdiommu_unit),
611 };
612
613 EARLY_DRIVER_MODULE(amdiommu, pci, amdiommu_driver, 0, 0, BUS_PASS_SUPPORTDEV);
614 MODULE_DEPEND(amdiommu, pci, 1, 1, 1);
615
616 static struct amdiommu_unit *
amdiommu_unit_by_device_id(u_int pci_seg,u_int device_id)617 amdiommu_unit_by_device_id(u_int pci_seg, u_int device_id)
618 {
619 struct amdiommu_unit *unit;
620
621 TAILQ_FOREACH(unit, &amdiommu_units, unit_next) {
622 if (unit->unit_dom == pci_seg && unit->device_id == device_id)
623 return (unit);
624 }
625 return (NULL);
626 }
627
628 struct ivhd_find_unit {
629 u_int domain;
630 uintptr_t rid;
631 int devno;
632 enum {
633 IFU_DEV_PCI,
634 IFU_DEV_IOAPIC,
635 IFU_DEV_HPET,
636 } type;
637 u_int device_id;
638 uint16_t rid_real;
639 uint8_t dte;
640 uint32_t edte;
641 };
642
643 static bool
amdiommu_find_unit_scan_ivrs(ACPI_IVRS_DE_HEADER * d,size_t tlen,struct ivhd_find_unit * ifu)644 amdiommu_find_unit_scan_ivrs(ACPI_IVRS_DE_HEADER *d, size_t tlen,
645 struct ivhd_find_unit *ifu)
646 {
647 char *db, *de;
648 size_t len;
649
650 for (de = (char *)d + tlen; (char *)d < de;
651 d = (ACPI_IVRS_DE_HEADER *)(db + len)) {
652 db = (char *)d;
653 if (d->Type == ACPI_IVRS_TYPE_PAD4) {
654 len = sizeof(ACPI_IVRS_DEVICE4);
655 } else if (d->Type == ACPI_IVRS_TYPE_ALL) {
656 ACPI_IVRS_DEVICE4 *d4;
657
658 d4 = (ACPI_IVRS_DEVICE4 *)db;
659 len = sizeof(*d4);
660 ifu->dte = d4->Header.DataSetting;
661 } else if (d->Type == ACPI_IVRS_TYPE_SELECT) {
662 ACPI_IVRS_DEVICE4 *d4;
663
664 d4 = (ACPI_IVRS_DEVICE4 *)db;
665 if (d4->Header.Id == ifu->rid) {
666 ifu->dte = d4->Header.DataSetting;
667 ifu->rid_real = ifu->rid;
668 return (true);
669 }
670 len = sizeof(*d4);
671 } else if (d->Type == ACPI_IVRS_TYPE_START) {
672 ACPI_IVRS_DEVICE4 *d4, *d4n;
673
674 d4 = (ACPI_IVRS_DEVICE4 *)db;
675 d4n = d4 + 1;
676 if (d4n->Header.Type != ACPI_IVRS_TYPE_END) {
677 printf("IVRS dev4 start not followed by END "
678 "(%#x)\n", d4n->Header.Type);
679 return (false);
680 }
681 if (d4->Header.Id <= ifu->rid &&
682 ifu->rid <= d4n->Header.Id) {
683 ifu->dte = d4->Header.DataSetting;
684 ifu->rid_real = ifu->rid;
685 return (true);
686 }
687 len = 2 * sizeof(*d4);
688 } else if (d->Type == ACPI_IVRS_TYPE_PAD8) {
689 len = sizeof(ACPI_IVRS_DEVICE8A);
690 } else if (d->Type == ACPI_IVRS_TYPE_ALIAS_SELECT) {
691 ACPI_IVRS_DEVICE8A *d8a;
692
693 d8a = (ACPI_IVRS_DEVICE8A *)db;
694 if (d8a->Header.Id == ifu->rid) {
695 ifu->dte = d8a->Header.DataSetting;
696 ifu->rid_real = d8a->UsedId;
697 return (true);
698 }
699 len = sizeof(*d8a);
700 } else if (d->Type == ACPI_IVRS_TYPE_ALIAS_START) {
701 ACPI_IVRS_DEVICE8A *d8a;
702 ACPI_IVRS_DEVICE4 *d4;
703
704 d8a = (ACPI_IVRS_DEVICE8A *)db;
705 d4 = (ACPI_IVRS_DEVICE4 *)(d8a + 1);
706 if (d4->Header.Type != ACPI_IVRS_TYPE_END) {
707 printf("IVRS alias start not followed by END "
708 "(%#x)\n", d4->Header.Type);
709 return (false);
710 }
711 if (d8a->Header.Id <= ifu->rid &&
712 ifu->rid <= d4->Header.Id) {
713 ifu->dte = d8a->Header.DataSetting;
714 ifu->rid_real = d8a->UsedId;
715 return (true);
716 }
717 len = sizeof(*d8a) + sizeof(*d4);
718 } else if (d->Type == ACPI_IVRS_TYPE_EXT_SELECT) {
719 ACPI_IVRS_DEVICE8B *d8b;
720
721 d8b = (ACPI_IVRS_DEVICE8B *)db;
722 if (d8b->Header.Id == ifu->rid) {
723 ifu->dte = d8b->Header.DataSetting;
724 ifu->rid_real = ifu->rid;
725 ifu->edte = d8b->ExtendedData;
726 return (true);
727 }
728 len = sizeof(*d8b);
729 } else if (d->Type == ACPI_IVRS_TYPE_EXT_START) {
730 ACPI_IVRS_DEVICE8B *d8b;
731 ACPI_IVRS_DEVICE4 *d4;
732
733 d8b = (ACPI_IVRS_DEVICE8B *)db;
734 d4 = (ACPI_IVRS_DEVICE4 *)(db + sizeof(*d8b));
735 if (d4->Header.Type != ACPI_IVRS_TYPE_END) {
736 printf("IVRS ext start not followed by END "
737 "(%#x)\n", d4->Header.Type);
738 return (false);
739 }
740 if (d8b->Header.Id >= ifu->rid &&
741 ifu->rid <= d4->Header.Id) {
742 ifu->dte = d8b->Header.DataSetting;
743 ifu->rid_real = ifu->rid;
744 ifu->edte = d8b->ExtendedData;
745 return (true);
746 }
747 len = sizeof(*d8b) + sizeof(*d4);
748 } else if (d->Type == ACPI_IVRS_TYPE_SPECIAL) {
749 ACPI_IVRS_DEVICE8C *d8c;
750
751 d8c = (ACPI_IVRS_DEVICE8C *)db;
752 if (((ifu->type == IFU_DEV_IOAPIC &&
753 d8c->Variety == ACPI_IVHD_IOAPIC) ||
754 (ifu->type == IFU_DEV_HPET &&
755 d8c->Variety == ACPI_IVHD_HPET)) &&
756 ifu->devno == d8c->Handle) {
757 ifu->dte = d8c->Header.DataSetting;
758 ifu->rid_real = d8c->UsedId;
759 return (true);
760 }
761 len = sizeof(*d8c);
762 } else if (d->Type == ACPI_IVRS_TYPE_HID) {
763 ACPI_IVRS_DEVICE_HID *dh;
764
765 dh = (ACPI_IVRS_DEVICE_HID *)db;
766 len = sizeof(*dh) + dh->UidLength;
767 /* XXXKIB */
768 } else {
769 #if 0
770 printf("amdiommu: unknown IVRS device entry type %#x\n",
771 d->Type);
772 #endif
773 if (d->Type <= 63)
774 len = sizeof(ACPI_IVRS_DEVICE4);
775 else if (d->Type <= 127)
776 len = sizeof(ACPI_IVRS_DEVICE8A);
777 else {
778 printf("amdiommu: abort, cannot "
779 "advance iterator, item type %#x\n",
780 d->Type);
781 return (false);
782 }
783 }
784 }
785 return (false);
786 }
787
788 static bool
amdiommu_find_unit_scan_0x11(ACPI_IVRS_HARDWARE2 * ivrs,void * arg)789 amdiommu_find_unit_scan_0x11(ACPI_IVRS_HARDWARE2 *ivrs, void *arg)
790 {
791 struct ivhd_find_unit *ifu = arg;
792 ACPI_IVRS_DE_HEADER *d;
793 bool res;
794
795 KASSERT(ivrs->Header.Type == ACPI_IVRS_TYPE_HARDWARE2 ||
796 ivrs->Header.Type == ACPI_IVRS_TYPE_HARDWARE3,
797 ("Misparsed IVHD h2, ivrs type %#x", ivrs->Header.Type));
798
799 if (ifu->domain != ivrs->PciSegmentGroup)
800 return (false);
801 d = (ACPI_IVRS_DE_HEADER *)(ivrs + 1);
802 res = amdiommu_find_unit_scan_ivrs(d, ivrs->Header.Length, ifu);
803 if (res)
804 ifu->device_id = ivrs->Header.DeviceId;
805 return (res);
806 }
807
808 static bool
amdiommu_find_unit_scan_0x10(ACPI_IVRS_HARDWARE1 * ivrs,void * arg)809 amdiommu_find_unit_scan_0x10(ACPI_IVRS_HARDWARE1 *ivrs, void *arg)
810 {
811 struct ivhd_find_unit *ifu = arg;
812 ACPI_IVRS_DE_HEADER *d;
813 bool res;
814
815 KASSERT(ivrs->Header.Type == ACPI_IVRS_TYPE_HARDWARE1,
816 ("Misparsed IVHD h1, ivrs type %#x", ivrs->Header.Type));
817
818 if (ifu->domain != ivrs->PciSegmentGroup)
819 return (false);
820 d = (ACPI_IVRS_DE_HEADER *)(ivrs + 1);
821 res = amdiommu_find_unit_scan_ivrs(d, ivrs->Header.Length, ifu);
822 if (res)
823 ifu->device_id = ivrs->Header.DeviceId;
824 return (res);
825 }
826
827 static void
amdiommu_dev_prop_dtr(device_t dev,const char * name,void * val,void * dtr_ctx)828 amdiommu_dev_prop_dtr(device_t dev, const char *name, void *val, void *dtr_ctx)
829 {
830 free(val, M_DEVBUF);
831 }
832
833 static int *
amdiommu_dev_fetch_flagsp(struct amdiommu_unit * unit,device_t dev)834 amdiommu_dev_fetch_flagsp(struct amdiommu_unit *unit, device_t dev)
835 {
836 int *flagsp, error;
837
838 bus_topo_assert();
839 error = device_get_prop(dev, device_get_nameunit(unit->iommu.dev),
840 (void **)&flagsp);
841 if (error == ENOENT) {
842 flagsp = malloc(sizeof(int), M_DEVBUF, M_WAITOK | M_ZERO);
843 device_set_prop(dev, device_get_nameunit(unit->iommu.dev),
844 flagsp, amdiommu_dev_prop_dtr, unit);
845 }
846 return (flagsp);
847 }
848
849 static int
amdiommu_get_dev_prop_flags(struct amdiommu_unit * unit,device_t dev)850 amdiommu_get_dev_prop_flags(struct amdiommu_unit *unit, device_t dev)
851 {
852 int *flagsp, flags;
853
854 bus_topo_lock();
855 flagsp = amdiommu_dev_fetch_flagsp(unit, dev);
856 flags = *flagsp;
857 bus_topo_unlock();
858 return (flags);
859 }
860
861 static void
amdiommu_set_dev_prop_flags(struct amdiommu_unit * unit,device_t dev,int flag)862 amdiommu_set_dev_prop_flags(struct amdiommu_unit *unit, device_t dev,
863 int flag)
864 {
865 int *flagsp;
866
867 bus_topo_lock();
868 flagsp = amdiommu_dev_fetch_flagsp(unit, dev);
869 *flagsp |= flag;
870 bus_topo_unlock();
871 }
872
873 int
amdiommu_find_unit(device_t dev,struct amdiommu_unit ** unitp,uint16_t * ridp,uint8_t * dtep,uint32_t * edtep,bool verbose)874 amdiommu_find_unit(device_t dev, struct amdiommu_unit **unitp, uint16_t *ridp,
875 uint8_t *dtep, uint32_t *edtep, bool verbose)
876 {
877 struct ivhd_find_unit ifu;
878 struct amdiommu_unit *unit;
879 int error, flags;
880 bool res;
881
882 if (!amdiommu_enable)
883 return (ENXIO);
884
885 if (device_get_devclass(device_get_parent(dev)) !=
886 devclass_find("pci"))
887 return (ENXIO);
888
889 bzero(&ifu, sizeof(ifu));
890 ifu.type = IFU_DEV_PCI;
891
892 error = pci_get_id(dev, PCI_ID_RID, &ifu.rid);
893 if (error != 0) {
894 if (verbose)
895 device_printf(dev,
896 "amdiommu cannot get rid, error %d\n", error);
897 return (ENXIO);
898 }
899
900 ifu.domain = pci_get_domain(dev);
901 res = amdiommu_ivrs_iterate_tbl(amdiommu_find_unit_scan_0x11,
902 amdiommu_find_unit_scan_0x11, amdiommu_find_unit_scan_0x10, &ifu);
903 if (!res) {
904 if (verbose)
905 device_printf(dev,
906 "(%#06x:%#06x) amdiommu cannot match rid in IVHD\n",
907 ifu.domain, (unsigned)ifu.rid);
908 return (ENXIO);
909 }
910
911 unit = amdiommu_unit_by_device_id(ifu.domain, ifu.device_id);
912 if (unit == NULL) {
913 if (verbose)
914 device_printf(dev,
915 "(%#06x:%#06x) amdiommu cannot find unit\n",
916 ifu.domain, (unsigned)ifu.rid);
917 return (ENXIO);
918 }
919 *unitp = unit;
920 iommu_device_set_iommu_prop(dev, unit->iommu.dev);
921 if (ridp != NULL)
922 *ridp = ifu.rid_real;
923 if (dtep != NULL)
924 *dtep = ifu.dte;
925 if (edtep != NULL)
926 *edtep = ifu.edte;
927 if (verbose) {
928 flags = amdiommu_get_dev_prop_flags(unit, dev);
929 if ((flags & AMDIOMMU_DEV_REPORTED) == 0) {
930 amdiommu_set_dev_prop_flags(unit, dev,
931 AMDIOMMU_DEV_REPORTED);
932 device_printf(dev, "amdiommu%d "
933 "initiator rid %#06x dte %#x edte %#x\n",
934 unit->iommu.unit, ifu.rid_real, ifu.dte, ifu.edte);
935 }
936 }
937 return (0);
938 }
939
940 int
amdiommu_find_unit_for_ioapic(int apic_id,struct amdiommu_unit ** unitp,uint16_t * ridp,uint8_t * dtep,uint32_t * edtep,bool verbose)941 amdiommu_find_unit_for_ioapic(int apic_id, struct amdiommu_unit **unitp,
942 uint16_t *ridp, uint8_t *dtep, uint32_t *edtep, bool verbose)
943 {
944 struct ivhd_find_unit ifu;
945 struct amdiommu_unit *unit;
946 device_t apic_dev;
947 bool res;
948
949 if (!amdiommu_enable)
950 return (ENXIO);
951
952 bzero(&ifu, sizeof(ifu));
953 ifu.type = IFU_DEV_IOAPIC;
954 ifu.devno = apic_id;
955 ifu.rid = -1;
956
957 res = amdiommu_ivrs_iterate_tbl(amdiommu_find_unit_scan_0x11,
958 amdiommu_find_unit_scan_0x11, amdiommu_find_unit_scan_0x10, &ifu);
959 if (!res) {
960 if (verbose)
961 printf("amdiommu cannot match ioapic no %d in IVHD\n",
962 apic_id);
963 return (ENXIO);
964 }
965
966 unit = amdiommu_unit_by_device_id(0, ifu.device_id);
967 apic_dev = ioapic_get_dev(apic_id);
968 if (apic_dev != NULL)
969 iommu_device_set_iommu_prop(apic_dev, unit->iommu.dev);
970 if (unit == NULL) {
971 if (verbose)
972 printf("amdiommu cannot find unit by dev id %#x\n",
973 ifu.device_id);
974 return (ENXIO);
975 }
976 *unitp = unit;
977 if (ridp != NULL)
978 *ridp = ifu.rid_real;
979 if (dtep != NULL)
980 *dtep = ifu.dte;
981 if (edtep != NULL)
982 *edtep = ifu.edte;
983 if (verbose) {
984 printf("amdiommu%d IOAPIC %d "
985 "initiator rid %#06x dte %#x edte %#x\n",
986 unit->iommu.unit, apic_id, ifu.rid_real, ifu.dte,
987 ifu.edte);
988 }
989 return (0);
990 }
991
992 int
amdiommu_find_unit_for_hpet(device_t hpet,struct amdiommu_unit ** unitp,uint16_t * ridp,uint8_t * dtep,uint32_t * edtep,bool verbose)993 amdiommu_find_unit_for_hpet(device_t hpet, struct amdiommu_unit **unitp,
994 uint16_t *ridp, uint8_t *dtep, uint32_t *edtep, bool verbose)
995 {
996 struct ivhd_find_unit ifu;
997 struct amdiommu_unit *unit;
998 int hpet_no;
999 bool res;
1000
1001 if (!amdiommu_enable)
1002 return (ENXIO);
1003
1004 hpet_no = hpet_get_uid(hpet);
1005 bzero(&ifu, sizeof(ifu));
1006 ifu.type = IFU_DEV_HPET;
1007 ifu.devno = hpet_no;
1008 ifu.rid = -1;
1009
1010 res = amdiommu_ivrs_iterate_tbl(amdiommu_find_unit_scan_0x11,
1011 amdiommu_find_unit_scan_0x11, amdiommu_find_unit_scan_0x10, &ifu);
1012 if (!res) {
1013 if (verbose)
1014 printf("amdiommu cannot match hpet no %d in IVHD\n",
1015 hpet_no);
1016 return (ENXIO);
1017 }
1018
1019 unit = amdiommu_unit_by_device_id(0, ifu.device_id);
1020 if (unit == NULL) {
1021 if (verbose)
1022 printf("amdiommu cannot find unit id %d\n",
1023 hpet_no);
1024 return (ENXIO);
1025 }
1026 *unitp = unit;
1027 iommu_device_set_iommu_prop(hpet, unit->iommu.dev);
1028 if (ridp != NULL)
1029 *ridp = ifu.rid_real;
1030 if (dtep != NULL)
1031 *dtep = ifu.dte;
1032 if (edtep != NULL)
1033 *edtep = ifu.edte;
1034 if (verbose) {
1035 printf("amdiommu%d HPET no %d "
1036 "initiator rid %#06x dte %#x edte %#x\n",
1037 unit->iommu.unit, hpet_no, ifu.rid_real, ifu.dte,
1038 ifu.edte);
1039 }
1040 return (0);
1041 }
1042
1043 static struct iommu_unit *
amdiommu_find_method(device_t dev,bool verbose)1044 amdiommu_find_method(device_t dev, bool verbose)
1045 {
1046 struct amdiommu_unit *unit;
1047 int error;
1048 uint32_t edte;
1049 uint16_t rid;
1050 uint8_t dte;
1051
1052 error = amdiommu_find_unit(dev, &unit, &rid, &dte, &edte, verbose);
1053 if (error != 0) {
1054 if (verbose && amdiommu_enable)
1055 device_printf(dev,
1056 "cannot find amdiommu unit, error %d\n",
1057 error);
1058 return (NULL);
1059 }
1060 return (&unit->iommu);
1061 }
1062
1063 static struct x86_unit_common *
amdiommu_get_x86_common(struct iommu_unit * unit)1064 amdiommu_get_x86_common(struct iommu_unit *unit)
1065 {
1066 struct amdiommu_unit *iommu;
1067
1068 iommu = IOMMU2AMD(unit);
1069 return (&iommu->x86c);
1070 }
1071
1072 static void
amdiommu_unit_pre_instantiate_ctx(struct iommu_unit * unit)1073 amdiommu_unit_pre_instantiate_ctx(struct iommu_unit *unit)
1074 {
1075 }
1076
1077 static struct x86_iommu amd_x86_iommu = {
1078 .get_x86_common = amdiommu_get_x86_common,
1079 .unit_pre_instantiate_ctx = amdiommu_unit_pre_instantiate_ctx,
1080 .find = amdiommu_find_method,
1081 .domain_unload_entry = amdiommu_domain_unload_entry,
1082 .domain_unload = amdiommu_domain_unload,
1083 .get_ctx = amdiommu_get_ctx,
1084 .free_ctx_locked = amdiommu_free_ctx_locked_method,
1085 .alloc_msi_intr = amdiommu_alloc_msi_intr,
1086 .map_msi_intr = amdiommu_map_msi_intr,
1087 .unmap_msi_intr = amdiommu_unmap_msi_intr,
1088 .map_ioapic_intr = amdiommu_map_ioapic_intr,
1089 .unmap_ioapic_intr = amdiommu_unmap_ioapic_intr,
1090 };
1091
1092 static void
x86_iommu_set_amd(void * arg __unused)1093 x86_iommu_set_amd(void *arg __unused)
1094 {
1095 if (cpu_vendor_id == CPU_VENDOR_AMD)
1096 set_x86_iommu(&amd_x86_iommu);
1097 }
1098
1099 SYSINIT(x86_iommu, SI_SUB_TUNABLES, SI_ORDER_ANY, x86_iommu_set_amd, NULL);
1100
1101 #ifdef DDB
1102 #include <ddb/ddb.h>
1103 #include <ddb/db_lex.h>
1104
1105 static void
amdiommu_print_domain(struct amdiommu_domain * domain,bool show_mappings)1106 amdiommu_print_domain(struct amdiommu_domain *domain, bool show_mappings)
1107 {
1108 struct iommu_domain *iodom;
1109
1110 iodom = DOM2IODOM(domain);
1111
1112 db_printf(
1113 " @%p dom %d pglvl %d end %jx refs %d\n"
1114 " ctx_cnt %d flags %x pgobj %p map_ents %u\n",
1115 domain, domain->domain, domain->pglvl,
1116 (uintmax_t)domain->iodom.end, domain->refs, domain->ctx_cnt,
1117 domain->iodom.flags, domain->pgtbl_obj, domain->iodom.entries_cnt);
1118
1119 iommu_db_domain_print_contexts(iodom);
1120
1121 if (show_mappings)
1122 iommu_db_domain_print_mappings(iodom);
1123 }
1124
1125 static void
amdiommu_print_one(struct amdiommu_unit * unit,bool show_domains,bool show_mappings,bool show_cmdq)1126 amdiommu_print_one(struct amdiommu_unit *unit, bool show_domains,
1127 bool show_mappings, bool show_cmdq)
1128 {
1129 struct amdiommu_domain *domain;
1130 struct amdiommu_cmd_generic *cp;
1131 u_int cmd_head, cmd_tail, ci;
1132
1133 cmd_head = amdiommu_read4(unit, AMDIOMMU_CMDBUF_HEAD);
1134 cmd_tail = amdiommu_read4(unit, AMDIOMMU_CMDBUF_TAIL);
1135 db_printf("amdiommu%d at %p, mmio at %#jx/sz %#jx\n",
1136 unit->iommu.unit, unit, (uintmax_t)unit->mmio_base,
1137 (uintmax_t)unit->mmio_sz);
1138 db_printf(" hw ctrl %#018jx cmdevst %#018jx\n",
1139 (uintmax_t)amdiommu_read8(unit, AMDIOMMU_CTRL),
1140 (uintmax_t)amdiommu_read8(unit, AMDIOMMU_CMDEV_STATUS));
1141 db_printf(" devtbl at %p\n", unit->dev_tbl);
1142 db_printf(" hwseq at %p phys %#jx val %#jx\n",
1143 &unit->x86c.inv_waitd_seq_hw,
1144 pmap_kextract((vm_offset_t)&unit->x86c.inv_waitd_seq_hw),
1145 unit->x86c.inv_waitd_seq_hw);
1146 db_printf(" invq at %p base %#jx hw head/tail %#x/%#x\n",
1147 unit->x86c.inv_queue,
1148 (uintmax_t)amdiommu_read8(unit, AMDIOMMU_CMDBUF_BASE),
1149 cmd_head, cmd_tail);
1150
1151 if (show_cmdq) {
1152 db_printf(" cmd q:\n");
1153 for (ci = cmd_head; ci != cmd_tail;) {
1154 cp = (struct amdiommu_cmd_generic *)(unit->
1155 x86c.inv_queue + ci);
1156 db_printf(
1157 " idx %#x op %#x %#010x %#010x %#010x %#010x\n",
1158 ci >> AMDIOMMU_CMD_SZ_SHIFT, cp->op,
1159 cp->w0, cp->ww1, cp->w2, cp->w3);
1160
1161 ci += AMDIOMMU_CMD_SZ;
1162 if (ci == unit->x86c.inv_queue_size)
1163 ci = 0;
1164 }
1165 }
1166
1167 if (show_domains) {
1168 db_printf(" domains:\n");
1169 LIST_FOREACH(domain, &unit->domains, link) {
1170 amdiommu_print_domain(domain, show_mappings);
1171 if (db_pager_quit)
1172 break;
1173 }
1174 }
1175 }
1176
DB_SHOW_COMMAND(amdiommu,db_amdiommu_print)1177 DB_SHOW_COMMAND(amdiommu, db_amdiommu_print)
1178 {
1179 struct amdiommu_unit *unit;
1180 bool show_domains, show_mappings, show_cmdq;
1181
1182 show_domains = strchr(modif, 'd') != NULL;
1183 show_mappings = strchr(modif, 'm') != NULL;
1184 show_cmdq = strchr(modif, 'q') != NULL;
1185 if (!have_addr) {
1186 db_printf("usage: show amdiommu [/d] [/m] [/q] index\n");
1187 return;
1188 }
1189 if ((vm_offset_t)addr < 0x10000)
1190 unit = amdiommu_unit_by_device_id(0, (u_int)addr);
1191 else
1192 unit = (struct amdiommu_unit *)addr;
1193 amdiommu_print_one(unit, show_domains, show_mappings, show_cmdq);
1194 }
1195
DB_SHOW_ALL_COMMAND(amdiommus,db_show_all_amdiommus)1196 DB_SHOW_ALL_COMMAND(amdiommus, db_show_all_amdiommus)
1197 {
1198 struct amdiommu_unit *unit;
1199 bool show_domains, show_mappings, show_cmdq;
1200
1201 show_domains = strchr(modif, 'd') != NULL;
1202 show_mappings = strchr(modif, 'm') != NULL;
1203 show_cmdq = strchr(modif, 'q') != NULL;
1204
1205 TAILQ_FOREACH(unit, &amdiommu_units, unit_next) {
1206 amdiommu_print_one(unit, show_domains, show_mappings,
1207 show_cmdq);
1208 if (db_pager_quit)
1209 break;
1210 }
1211 }
1212 #endif
1213