xref: /freebsd/sys/x86/iommu/amd_drv.c (revision b64c5a0ace59af62eff52bfe110a521dc73c937b)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2024 The FreeBSD Foundation
5  *
6  * This software was developed by Konstantin Belousov <kib@FreeBSD.org>
7  * under sponsorship from the FreeBSD Foundation.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  */
30 
31 #include "opt_acpi.h"
32 #include "opt_ddb.h"
33 
34 #include <sys/param.h>
35 #include <sys/bus.h>
36 #include <sys/domainset.h>
37 #include <sys/kernel.h>
38 #include <sys/lock.h>
39 #include <sys/malloc.h>
40 #include <sys/memdesc.h>
41 #include <sys/module.h>
42 #include <sys/mutex.h>
43 #include <sys/rman.h>
44 #include <sys/rwlock.h>
45 #include <sys/smp.h>
46 #include <sys/taskqueue.h>
47 #include <sys/tree.h>
48 #include <sys/vmem.h>
49 #include <vm/vm.h>
50 #include <vm/vm_extern.h>
51 #include <vm/vm_object.h>
52 #include <vm/vm_page.h>
53 #include <vm/vm_pageout.h>
54 #include <vm/vm_pager.h>
55 #include <contrib/dev/acpica/include/acpi.h>
56 #include <contrib/dev/acpica/include/accommon.h>
57 #include <dev/acpica/acpivar.h>
58 #include <dev/pci/pcireg.h>
59 #include <dev/pci/pcivar.h>
60 #include <machine/bus.h>
61 #include <machine/pci_cfgreg.h>
62 #include "pcib_if.h"
63 #include <machine/intr_machdep.h>
64 #include <machine/md_var.h>
65 #include <machine/cputypes.h>
66 #include <x86/apicreg.h>
67 #include <x86/apicvar.h>
68 #include <dev/iommu/iommu.h>
69 #include <x86/iommu/amd_reg.h>
70 #include <x86/iommu/x86_iommu.h>
71 #include <x86/iommu/amd_iommu.h>
72 
73 static int amdiommu_enable = 0;
74 
75 /*
76  * All enumerated AMD IOMMU units.
77  * Access is unlocked, the list is not modified after early
78  * single-threaded startup.
79  */
80 static TAILQ_HEAD(, amdiommu_unit) amdiommu_units =
81     TAILQ_HEAD_INITIALIZER(amdiommu_units);
82 
83 typedef bool (*amdiommu_itercc_t)(void *, void *);
84 typedef bool (*amdiommu_iter40_t)(ACPI_IVRS_HARDWARE2 *, void *);
85 typedef bool (*amdiommu_iter11_t)(ACPI_IVRS_HARDWARE2 *, void *);
86 typedef bool (*amdiommu_iter10_t)(ACPI_IVRS_HARDWARE1 *, void *);
87 
88 static bool
89 amdiommu_ivrs_iterate_tbl_typed(amdiommu_itercc_t iter, void *arg,
90     int type, ACPI_TABLE_IVRS *ivrs_tbl)
91 {
92 	char *ptr, *ptrend;
93 	bool done;
94 
95 	done = false;
96 	ptr = (char *)ivrs_tbl + sizeof(*ivrs_tbl);
97 	ptrend = (char *)ivrs_tbl + ivrs_tbl->Header.Length;
98 	for (;;) {
99 		ACPI_IVRS_HEADER *ivrsh;
100 
101 		if (ptr >= ptrend)
102 			break;
103 		ivrsh = (ACPI_IVRS_HEADER *)ptr;
104 		if (ivrsh->Length <= 0) {
105 			printf("amdiommu_iterate_tbl: corrupted IVRS table, "
106 			    "length %d\n", ivrsh->Length);
107 			break;
108 		}
109 		ptr += ivrsh->Length;
110 		if (ivrsh->Type ==  type) {
111 			done = iter((void *)ivrsh, arg);
112 			if (done)
113 				break;
114 		}
115 	}
116 	return (done);
117 }
118 
119 /*
120  * Walk over IVRS, calling callback iterators following priority:
121  * 0x40, then 0x11, then 0x10 subtable.  First iterator returning true
122  * ends the walk.
123  * Returns true if any iterator returned true, otherwise false.
124  */
125 static bool
126 amdiommu_ivrs_iterate_tbl(amdiommu_iter40_t iter40, amdiommu_iter11_t iter11,
127     amdiommu_iter10_t iter10, void *arg)
128 {
129 	ACPI_TABLE_IVRS *ivrs_tbl;
130 	ACPI_STATUS status;
131 	bool done;
132 
133 	status = AcpiGetTable(ACPI_SIG_IVRS, 1,
134 	    (ACPI_TABLE_HEADER **)&ivrs_tbl);
135 	if (ACPI_FAILURE(status))
136 		return (false);
137 	done = false;
138 	if (iter40 != NULL)
139 		done = amdiommu_ivrs_iterate_tbl_typed(
140 		    (amdiommu_itercc_t)iter40, arg,
141 		    ACPI_IVRS_TYPE_HARDWARE3, ivrs_tbl);
142 	if (!done && iter11 != NULL)
143 		done = amdiommu_ivrs_iterate_tbl_typed(
144 		    (amdiommu_itercc_t)iter11, arg, ACPI_IVRS_TYPE_HARDWARE2,
145 		    ivrs_tbl);
146 	if (!done && iter10 != NULL)
147 		done = amdiommu_ivrs_iterate_tbl_typed(
148 		    (amdiommu_itercc_t)iter10, arg, ACPI_IVRS_TYPE_HARDWARE1,
149 		    ivrs_tbl);
150 	AcpiPutTable((ACPI_TABLE_HEADER *)ivrs_tbl);
151 	return (done);
152 }
153 
154 struct ivhd_lookup_data  {
155 	struct amdiommu_unit *sc;
156 	uint16_t devid;
157 };
158 
159 static bool
160 ivrs_lookup_ivhd_0x40(ACPI_IVRS_HARDWARE2 *h2, void *arg)
161 {
162 	struct ivhd_lookup_data *ildp;
163 
164 	KASSERT(h2->Header.Type == ACPI_IVRS_TYPE_HARDWARE2 ||
165 	    h2->Header.Type == ACPI_IVRS_TYPE_HARDWARE3,
166 	    ("Misparsed IVHD, h2 type %#x", h2->Header.Type));
167 
168 	ildp = arg;
169 	if (h2->Header.DeviceId != ildp->devid)
170 		return (false);
171 
172 	ildp->sc->unit_dom = h2->PciSegmentGroup;
173 	ildp->sc->efr = h2->EfrRegisterImage;
174 	return (true);
175 }
176 
177 static bool
178 ivrs_lookup_ivhd_0x10(ACPI_IVRS_HARDWARE1 *h1, void *arg)
179 {
180 	struct ivhd_lookup_data *ildp;
181 
182 	KASSERT(h1->Header.Type == ACPI_IVRS_TYPE_HARDWARE1,
183 	    ("Misparsed IVHD, h1 type %#x", h1->Header.Type));
184 
185 	ildp = arg;
186 	if (h1->Header.DeviceId != ildp->devid)
187 		return (false);
188 
189 	ildp->sc->unit_dom = h1->PciSegmentGroup;
190 	return (true);
191 }
192 
193 static u_int
194 amdiommu_devtbl_sz(struct amdiommu_unit *sc __unused)
195 {
196 	return (sizeof(struct amdiommu_dte) * (1 << 16));
197 }
198 
199 static void
200 amdiommu_free_dev_tbl(struct amdiommu_unit *sc)
201 {
202 	u_int devtbl_sz;
203 
204 	devtbl_sz = amdiommu_devtbl_sz(sc);
205 	pmap_qremove((vm_offset_t)sc->dev_tbl, atop(devtbl_sz));
206 	kva_free((vm_offset_t)sc->dev_tbl, devtbl_sz);
207 	sc->dev_tbl = NULL;
208 	vm_object_deallocate(sc->devtbl_obj);
209 	sc->devtbl_obj = NULL;
210 }
211 
212 static int
213 amdiommu_create_dev_tbl(struct amdiommu_unit *sc)
214 {
215 	vm_offset_t seg_vaddr;
216 	u_int devtbl_sz, dom, i, reclaimno, segnum_log, segnum, seg_sz;
217 	int error;
218 
219 	static const int devtab_base_regs[] = {
220 		AMDIOMMU_DEVTAB_BASE,
221 		AMDIOMMU_DEVTAB_S1_BASE,
222 		AMDIOMMU_DEVTAB_S2_BASE,
223 		AMDIOMMU_DEVTAB_S3_BASE,
224 		AMDIOMMU_DEVTAB_S4_BASE,
225 		AMDIOMMU_DEVTAB_S5_BASE,
226 		AMDIOMMU_DEVTAB_S6_BASE,
227 		AMDIOMMU_DEVTAB_S7_BASE
228 	};
229 
230 	segnum_log = (sc->efr & AMDIOMMU_EFR_DEVTBLSEG_MASK) >>
231 	    AMDIOMMU_EFR_DEVTBLSEG_SHIFT;
232 	segnum = 1 << segnum_log;
233 
234 	KASSERT(segnum <= nitems(devtab_base_regs),
235 	    ("%s: unsupported devtab segment count %u", __func__, segnum));
236 
237 	devtbl_sz = amdiommu_devtbl_sz(sc);
238 	seg_sz = devtbl_sz / segnum;
239 	sc->devtbl_obj = vm_pager_allocate(OBJT_PHYS, NULL, atop(devtbl_sz),
240 	    VM_PROT_ALL, 0, NULL);
241 	if (bus_get_domain(sc->iommu.dev, &dom) == 0)
242 		sc->devtbl_obj->domain.dr_policy = DOMAINSET_PREF(dom);
243 
244 	sc->hw_ctrl &= ~AMDIOMMU_CTRL_DEVTABSEG_MASK;
245 	sc->hw_ctrl |= (uint64_t)segnum_log << ilog2(AMDIOMMU_CTRL_DEVTABSEG_2);
246 	sc->hw_ctrl |= AMDIOMMU_CTRL_COHERENT;
247 	amdiommu_write8(sc, AMDIOMMU_CTRL, sc->hw_ctrl);
248 
249 	seg_vaddr = kva_alloc(devtbl_sz);
250 	if (seg_vaddr == 0)
251 		return (ENOMEM);
252 	sc->dev_tbl = (void *)seg_vaddr;
253 
254 	for (i = 0; i < segnum; i++) {
255 		vm_page_t m;
256 		uint64_t rval;
257 
258 		for (reclaimno = 0; reclaimno < 3; reclaimno++) {
259 			VM_OBJECT_WLOCK(sc->devtbl_obj);
260 			m = vm_page_alloc_contig(sc->devtbl_obj,
261 			    i * atop(seg_sz),
262 			    VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY,
263 			    atop(seg_sz), 0, ~0ul, IOMMU_PAGE_SIZE, 0,
264 			    VM_MEMATTR_DEFAULT);
265 			VM_OBJECT_WUNLOCK(sc->devtbl_obj);
266 			if (m != NULL)
267 				break;
268 			error = vm_page_reclaim_contig(VM_ALLOC_NORMAL,
269 			    atop(seg_sz), 0, ~0ul, IOMMU_PAGE_SIZE, 0);
270 			if (error != 0)
271 				vm_wait(sc->devtbl_obj);
272 		}
273 		if (m == NULL) {
274 			amdiommu_free_dev_tbl(sc);
275 			return (ENOMEM);
276 		}
277 
278 		rval = VM_PAGE_TO_PHYS(m) | (atop(seg_sz) - 1);
279 		for (u_int j = 0; j < atop(seg_sz);
280 		     j++, seg_vaddr += PAGE_SIZE, m++) {
281 			pmap_zero_page(m);
282 			pmap_qenter(seg_vaddr, &m, 1);
283 		}
284 		amdiommu_write8(sc, devtab_base_regs[i], rval);
285 	}
286 
287 	return (0);
288 }
289 
290 static int
291 amdiommu_cmd_event_intr(void *arg)
292 {
293 	struct amdiommu_unit *unit;
294 	uint64_t status;
295 
296 	unit = arg;
297 	status = amdiommu_read8(unit, AMDIOMMU_CMDEV_STATUS);
298 	if ((status & AMDIOMMU_CMDEVS_COMWAITINT) != 0) {
299 		amdiommu_write8(unit, AMDIOMMU_CMDEV_STATUS,
300 		    AMDIOMMU_CMDEVS_COMWAITINT);
301 		taskqueue_enqueue(unit->x86c.qi_taskqueue,
302 		    &unit->x86c.qi_task);
303 	}
304 	if ((status & (AMDIOMMU_CMDEVS_EVLOGINT |
305 	    AMDIOMMU_CMDEVS_EVOVRFLW)) != 0)
306 		amdiommu_event_intr(unit, status);
307 	return (FILTER_HANDLED);
308 }
309 
310 static int
311 amdiommu_setup_intr(struct amdiommu_unit *sc)
312 {
313 	int error, msi_count, msix_count;
314 
315 	msi_count = pci_msi_count(sc->iommu.dev);
316 	msix_count = pci_msix_count(sc->iommu.dev);
317 	if (msi_count == 0 && msix_count == 0) {
318 		device_printf(sc->iommu.dev, "needs MSI-class intr\n");
319 		return (ENXIO);
320 	}
321 
322 #if 0
323 	/*
324 	 * XXXKIB how MSI-X is supposed to be organized for BAR-less
325 	 * function?  Practically available hardware implements only
326 	 * one IOMMU unit per function, and uses MSI.
327 	 */
328 	if (msix_count > 0) {
329 		sc->msix_table = bus_alloc_resource_any(sc->iommu.dev,
330 		    SYS_RES_MEMORY, &sc->msix_tab_rid, RF_ACTIVE);
331 		if (sc->msix_table == NULL)
332 			return (ENXIO);
333 
334 		if (sc->msix_pba_rid != sc->msix_tab_rid) {
335 			/* Separate BAR for PBA */
336 			sc->msix_pba = bus_alloc_resource_any(sc->iommu.dev,
337 			    SYS_RES_MEMORY,
338 			    &sc->msix_pba_rid, RF_ACTIVE);
339 			if (sc->msix_pba == NULL) {
340 				bus_release_resource(sc->iommu.dev,
341 				    SYS_RES_MEMORY, &sc->msix_tab_rid,
342 				    sc->msix_table);
343 				return (ENXIO);
344 			}
345 		}
346 	}
347 #endif
348 
349 	error = ENXIO;
350 	if (msix_count > 0) {
351 		error = pci_alloc_msix(sc->iommu.dev, &msix_count);
352 		if (error == 0)
353 			sc->numirqs = msix_count;
354 	}
355 	if (error != 0 && msi_count > 0) {
356 		error = pci_alloc_msi(sc->iommu.dev, &msi_count);
357 		if (error == 0)
358 			sc->numirqs = msi_count;
359 	}
360 	if (error != 0) {
361 		device_printf(sc->iommu.dev,
362 		    "Failed to allocate MSI/MSI-x (%d)\n", error);
363 		return (ENXIO);
364 	}
365 
366 	/*
367 	 * XXXKIB Spec states that MISC0.MsiNum must be zero for IOMMU
368 	 * using MSI interrupts.  But at least one BIOS programmed '2'
369 	 * there, making driver use wrong rid and causing
370 	 * command/event interrupt ignored as stray.  Try to fix it
371 	 * with dirty force by assuming MsiNum is zero for MSI.
372 	 */
373 	sc->irq_cmdev_rid = 1;
374 	if (msix_count > 0) {
375 		sc->irq_cmdev_rid += pci_read_config(sc->iommu.dev,
376 		    sc->seccap_reg + PCIR_AMDIOMMU_MISC0, 4) &
377 		    PCIM_AMDIOMMU_MISC0_MSINUM_MASK;
378 	}
379 
380 	sc->irq_cmdev = bus_alloc_resource_any(sc->iommu.dev, SYS_RES_IRQ,
381 	    &sc->irq_cmdev_rid, RF_SHAREABLE | RF_ACTIVE);
382 	if (sc->irq_cmdev == NULL) {
383 		device_printf(sc->iommu.dev,
384 		    "unable to map CMD/EV interrupt\n");
385 		return (ENXIO);
386 	}
387 	error = bus_setup_intr(sc->iommu.dev, sc->irq_cmdev,
388 	    INTR_TYPE_MISC, amdiommu_cmd_event_intr, NULL, sc,
389 	    &sc->irq_cmdev_cookie);
390 	if (error != 0) {
391 		device_printf(sc->iommu.dev,
392 		    "unable to setup interrupt (%d)\n", error);
393 		return (ENXIO);
394 	}
395 	bus_describe_intr(sc->iommu.dev, sc->irq_cmdev, sc->irq_cmdev_cookie,
396 	    "cmdev");
397 
398 	if (x2apic_mode) {
399 		AMDIOMMU_LOCK(sc);
400 		sc->hw_ctrl |= AMDIOMMU_CTRL_GA_EN | AMDIOMMU_CTRL_XT_EN;
401 		amdiommu_write8(sc, AMDIOMMU_CTRL, sc->hw_ctrl);
402 		// XXXKIB AMDIOMMU_CTRL_INTCAPXT_EN and program x2APIC_CTRL
403 		AMDIOMMU_UNLOCK(sc);
404 	}
405 
406 	return (0);
407 }
408 
409 static int
410 amdiommu_probe(device_t dev)
411 {
412 	int seccap_reg;
413 	int error;
414 	uint32_t cap_h, cap_type, cap_rev;
415 
416 	if (acpi_disabled("amdiommu"))
417 		return (ENXIO);
418 	TUNABLE_INT_FETCH("hw.amdiommu.enable", &amdiommu_enable);
419 	if (!amdiommu_enable)
420 		return (ENXIO);
421 	if (pci_get_class(dev) != PCIC_BASEPERIPH ||
422 	    pci_get_subclass(dev) != PCIS_BASEPERIPH_IOMMU)
423 		return (ENXIO);
424 
425 	error = pci_find_cap(dev, PCIY_SECDEV, &seccap_reg);
426 	if (error != 0 || seccap_reg == 0)
427 		return (ENXIO);
428 
429 	cap_h = pci_read_config(dev, seccap_reg + PCIR_AMDIOMMU_CAP_HEADER,
430 	    4);
431 	cap_type = cap_h & PCIM_AMDIOMMU_CAP_TYPE_MASK;
432 	cap_rev = cap_h & PCIM_AMDIOMMU_CAP_REV_MASK;
433 	if (cap_type != PCIM_AMDIOMMU_CAP_TYPE_VAL &&
434 	    cap_rev != PCIM_AMDIOMMU_CAP_REV_VAL)
435 		return (ENXIO);
436 
437 	device_set_desc(dev, "DMA remap");
438 	return (BUS_PROBE_SPECIFIC);
439 }
440 
441 static int
442 amdiommu_attach(device_t dev)
443 {
444 	struct amdiommu_unit *sc;
445 	struct ivhd_lookup_data ild;
446 	int error;
447 	uint32_t base_low, base_high;
448 	bool res;
449 
450 	sc = device_get_softc(dev);
451 	sc->iommu.unit = device_get_unit(dev);
452 	sc->iommu.dev = dev;
453 
454 	error = pci_find_cap(dev, PCIY_SECDEV, &sc->seccap_reg);
455 	if (error != 0 || sc->seccap_reg == 0)
456 		return (ENXIO);
457 
458 	base_low = pci_read_config(dev, sc->seccap_reg +
459 	    PCIR_AMDIOMMU_BASE_LOW, 4);
460 	base_high = pci_read_config(dev, sc->seccap_reg +
461 	    PCIR_AMDIOMMU_BASE_HIGH, 4);
462 	sc->mmio_base = (base_low & PCIM_AMDIOMMU_BASE_LOW_ADDRM) |
463 	    ((uint64_t)base_high << 32);
464 
465 	sc->device_id = pci_get_rid(dev);
466 	ild.sc = sc;
467 	ild.devid = sc->device_id;
468 	res = amdiommu_ivrs_iterate_tbl(ivrs_lookup_ivhd_0x40,
469 	    ivrs_lookup_ivhd_0x40, ivrs_lookup_ivhd_0x10, &ild);
470 	if (!res) {
471 		device_printf(dev, "Cannot find IVHD\n");
472 		return (ENXIO);
473 	}
474 
475 	mtx_init(&sc->iommu.lock, "amdihw", NULL, MTX_DEF);
476 	sc->domids = new_unrhdr(0, 0xffff, &sc->iommu.lock);
477 	LIST_INIT(&sc->domains);
478 	sysctl_ctx_init(&sc->iommu.sysctl_ctx);
479 
480 	sc->mmio_sz = ((sc->efr & AMDIOMMU_EFR_PC_SUP) != 0 ? 512 : 16) *
481 	    1024;
482 
483 	sc->mmio_rid = AMDIOMMU_RID;
484 	error = bus_set_resource(dev, SYS_RES_MEMORY, AMDIOMMU_RID,
485 	    sc->mmio_base, sc->mmio_sz);
486 	if (error != 0) {
487 		device_printf(dev,
488 		    "bus_set_resource %#jx-%#jx failed, error %d\n",
489 		    (uintmax_t)sc->mmio_base, (uintmax_t)sc->mmio_base +
490 		    sc->mmio_sz, error);
491 		error = ENXIO;
492 		goto errout1;
493 	}
494 	sc->mmio_res = bus_alloc_resource(dev, SYS_RES_MEMORY, &sc->mmio_rid,
495 	    sc->mmio_base, sc->mmio_base + sc->mmio_sz - 1, sc->mmio_sz,
496 	    RF_ALLOCATED | RF_ACTIVE | RF_SHAREABLE);
497 	if (sc->mmio_res == NULL) {
498 		device_printf(dev,
499 		    "bus_alloc_resource %#jx-%#jx failed\n",
500 		    (uintmax_t)sc->mmio_base, (uintmax_t)sc->mmio_base +
501 		    sc->mmio_sz);
502 		error = ENXIO;
503 		goto errout2;
504 	}
505 
506 	sc->hw_ctrl = amdiommu_read8(sc, AMDIOMMU_CTRL);
507 	if (bootverbose)
508 		device_printf(dev, "ctrl reg %#jx\n", (uintmax_t)sc->hw_ctrl);
509 	if ((sc->hw_ctrl & AMDIOMMU_CTRL_EN) != 0) {
510 		device_printf(dev, "CTRL_EN is set, bailing out\n");
511 		error = EBUSY;
512 		goto errout2;
513 	}
514 
515 	iommu_high = BUS_SPACE_MAXADDR;
516 
517 	error = amdiommu_create_dev_tbl(sc);
518 	if (error != 0)
519 		goto errout3;
520 
521 	error = amdiommu_init_cmd(sc);
522 	if (error != 0)
523 		goto errout4;
524 
525 	error = amdiommu_init_event(sc);
526 	if (error != 0)
527 		goto errout5;
528 
529 	error = amdiommu_setup_intr(sc);
530 	if (error != 0)
531 		goto errout6;
532 
533 	error = iommu_init_busdma(AMD2IOMMU(sc));
534 	if (error != 0)
535 		goto errout7;
536 
537 	error = amdiommu_init_irt(sc);
538 	if (error != 0)
539 		goto errout8;
540 
541 	/*
542 	 * Unlike DMAR, AMD IOMMU does not process command queue
543 	 * unless IOMMU is enabled.  But since non-present devtab
544 	 * entry makes IOMMU ignore transactions from corresponding
545 	 * initiator, de-facto IOMMU operations are disabled for the
546 	 * DMA and intr remapping.
547 	 */
548 	AMDIOMMU_LOCK(sc);
549 	sc->hw_ctrl |= AMDIOMMU_CTRL_EN;
550 	amdiommu_write8(sc, AMDIOMMU_CTRL, sc->hw_ctrl);
551 	if (bootverbose) {
552 		printf("amdiommu%d: enabled translation\n",
553 		    AMD2IOMMU(sc)->unit);
554 	}
555 	AMDIOMMU_UNLOCK(sc);
556 
557 	TAILQ_INSERT_TAIL(&amdiommu_units, sc, unit_next);
558 	return (0);
559 
560 errout8:
561 	iommu_fini_busdma(&sc->iommu);
562 errout7:
563 	pci_release_msi(dev);
564 errout6:
565 	amdiommu_fini_event(sc);
566 errout5:
567 	amdiommu_fini_cmd(sc);
568 errout4:
569 	amdiommu_free_dev_tbl(sc);
570 errout3:
571 	bus_release_resource(dev, SYS_RES_MEMORY, sc->mmio_rid, sc->mmio_res);
572 errout2:
573 	bus_delete_resource(dev, SYS_RES_MEMORY, sc->mmio_rid);
574 errout1:
575 	sysctl_ctx_free(&sc->iommu.sysctl_ctx);
576 	delete_unrhdr(sc->domids);
577 	mtx_destroy(&sc->iommu.lock);
578 
579 	return (error);
580 }
581 
582 static int
583 amdiommu_detach(device_t dev)
584 {
585 	return (EBUSY);
586 }
587 
588 static int
589 amdiommu_suspend(device_t dev)
590 {
591 	/* XXXKIB */
592 	return (0);
593 }
594 
595 static int
596 amdiommu_resume(device_t dev)
597 {
598 	/* XXXKIB */
599 	return (0);
600 }
601 
602 static device_method_t amdiommu_methods[] = {
603 	DEVMETHOD(device_probe, amdiommu_probe),
604 	DEVMETHOD(device_attach, amdiommu_attach),
605 	DEVMETHOD(device_detach, amdiommu_detach),
606 	DEVMETHOD(device_suspend, amdiommu_suspend),
607 	DEVMETHOD(device_resume, amdiommu_resume),
608 	DEVMETHOD_END
609 };
610 
611 static driver_t	amdiommu_driver = {
612 	"amdiommu",
613 	amdiommu_methods,
614 	sizeof(struct amdiommu_unit),
615 };
616 
617 EARLY_DRIVER_MODULE(amdiommu, pci, amdiommu_driver, 0, 0, BUS_PASS_SUPPORTDEV);
618 MODULE_DEPEND(amdiommu, pci, 1, 1, 1);
619 
620 static struct amdiommu_unit *
621 amdiommu_unit_by_device_id(u_int pci_seg, u_int device_id)
622 {
623 	struct amdiommu_unit *unit;
624 
625 	TAILQ_FOREACH(unit, &amdiommu_units, unit_next) {
626 		if (unit->unit_dom == pci_seg && unit->device_id == device_id)
627 			return (unit);
628 	}
629 	return (NULL);
630 }
631 
632 struct ivhd_find_unit {
633 	u_int		domain;
634 	uintptr_t	rid;
635 	int		devno;
636 	enum {
637 		IFU_DEV_PCI,
638 		IFU_DEV_IOAPIC,
639 		IFU_DEV_HPET,
640 	} type;
641 	u_int		device_id;
642 	uint16_t	rid_real;
643 	uint8_t		dte;
644 	uint32_t	edte;
645 };
646 
647 static bool
648 amdiommu_find_unit_scan_ivrs(ACPI_IVRS_DE_HEADER *d, size_t tlen,
649     struct ivhd_find_unit *ifu)
650 {
651 	char *db, *de;
652 	size_t len;
653 
654 	for (de = (char *)d + tlen; (char *)d < de;
655 	     d = (ACPI_IVRS_DE_HEADER *)(db + len)) {
656 		db = (char *)d;
657 		if (d->Type == ACPI_IVRS_TYPE_PAD4) {
658 			len = sizeof(ACPI_IVRS_DEVICE4);
659 		} else if (d->Type == ACPI_IVRS_TYPE_ALL) {
660 			ACPI_IVRS_DEVICE4 *d4;
661 
662 			d4 = (ACPI_IVRS_DEVICE4 *)db;
663 			len = sizeof(*d4);
664 			ifu->dte = d4->Header.DataSetting;
665 		} else if (d->Type == ACPI_IVRS_TYPE_SELECT) {
666 			ACPI_IVRS_DEVICE4 *d4;
667 
668 			d4 = (ACPI_IVRS_DEVICE4 *)db;
669 			if (d4->Header.Id == ifu->rid) {
670 				ifu->dte = d4->Header.DataSetting;
671 				ifu->rid_real = ifu->rid;
672 				return (true);
673 			}
674 			len = sizeof(*d4);
675 		} else if (d->Type == ACPI_IVRS_TYPE_START) {
676 			ACPI_IVRS_DEVICE4 *d4, *d4n;
677 
678 			d4 = (ACPI_IVRS_DEVICE4 *)db;
679 			d4n = d4 + 1;
680 			if (d4n->Header.Type != ACPI_IVRS_TYPE_END) {
681 				printf("IVRS dev4 start not followed by END "
682 				    "(%#x)\n", d4n->Header.Type);
683 				return (false);
684 			}
685 			if (d4->Header.Id <= ifu->rid &&
686 			    ifu->rid <= d4n->Header.Id) {
687 				ifu->dte = d4->Header.DataSetting;
688 				ifu->rid_real = ifu->rid;
689 				return (true);
690 			}
691 			len = 2 * sizeof(*d4);
692 		} else if (d->Type == ACPI_IVRS_TYPE_PAD8) {
693 			len = sizeof(ACPI_IVRS_DEVICE8A);
694 		} else if (d->Type == ACPI_IVRS_TYPE_ALIAS_SELECT) {
695 			ACPI_IVRS_DEVICE8A *d8a;
696 
697 			d8a = (ACPI_IVRS_DEVICE8A *)db;
698 			if (d8a->Header.Id == ifu->rid) {
699 				ifu->dte = d8a->Header.DataSetting;
700 				ifu->rid_real = d8a->UsedId;
701 				return (true);
702 			}
703 			len = sizeof(*d8a);
704 		} else if (d->Type == ACPI_IVRS_TYPE_ALIAS_START) {
705 			ACPI_IVRS_DEVICE8A *d8a;
706 			ACPI_IVRS_DEVICE4 *d4;
707 
708 			d8a = (ACPI_IVRS_DEVICE8A *)db;
709 			d4 = (ACPI_IVRS_DEVICE4 *)(d8a + 1);
710 			if (d4->Header.Type != ACPI_IVRS_TYPE_END) {
711 				printf("IVRS alias start not followed by END "
712 				    "(%#x)\n", d4->Header.Type);
713 				return (false);
714 			}
715 			if (d8a->Header.Id <= ifu->rid &&
716 			    ifu->rid <= d4->Header.Id) {
717 				ifu->dte = d8a->Header.DataSetting;
718 				ifu->rid_real = d8a->UsedId;
719 				return (true);
720 			}
721 			len = sizeof(*d8a) + sizeof(*d4);
722 		} else if (d->Type == ACPI_IVRS_TYPE_EXT_SELECT) {
723 			ACPI_IVRS_DEVICE8B *d8b;
724 
725 			d8b = (ACPI_IVRS_DEVICE8B *)db;
726 			if (d8b->Header.Id == ifu->rid) {
727 				ifu->dte = d8b->Header.DataSetting;
728 				ifu->rid_real = ifu->rid;
729 				ifu->edte = d8b->ExtendedData;
730 				return (true);
731 			}
732 			len = sizeof(*d8b);
733 		} else if (d->Type == ACPI_IVRS_TYPE_EXT_START) {
734 			ACPI_IVRS_DEVICE8B *d8b;
735 			ACPI_IVRS_DEVICE4 *d4;
736 
737 			d8b = (ACPI_IVRS_DEVICE8B *)db;
738 			d4 = (ACPI_IVRS_DEVICE4 *)(db + sizeof(*d8b));
739 			if (d4->Header.Type != ACPI_IVRS_TYPE_END) {
740 				printf("IVRS ext start not followed by END "
741 				    "(%#x)\n", d4->Header.Type);
742 				return (false);
743 			}
744 			if (d8b->Header.Id >= ifu->rid &&
745 			    ifu->rid <= d4->Header.Id) {
746 				ifu->dte = d8b->Header.DataSetting;
747 				ifu->rid_real = ifu->rid;
748 				ifu->edte = d8b->ExtendedData;
749 				return (true);
750 			}
751 			len = sizeof(*d8b) + sizeof(*d4);
752 		} else if (d->Type == ACPI_IVRS_TYPE_SPECIAL) {
753 			ACPI_IVRS_DEVICE8C *d8c;
754 
755 			d8c = (ACPI_IVRS_DEVICE8C *)db;
756 			if (((ifu->type == IFU_DEV_IOAPIC &&
757 			    d8c->Variety == ACPI_IVHD_IOAPIC) ||
758 			    (ifu->type == IFU_DEV_HPET &&
759 			    d8c->Variety == ACPI_IVHD_HPET)) &&
760 			    ifu->devno == d8c->Handle) {
761 				ifu->dte = d8c->Header.DataSetting;
762 				ifu->rid_real = d8c->UsedId;
763 				return (true);
764 			}
765 			len = sizeof(*d8c);
766 		} else if (d->Type == ACPI_IVRS_TYPE_HID) {
767 			ACPI_IVRS_DEVICE_HID *dh;
768 
769 			dh = (ACPI_IVRS_DEVICE_HID *)db;
770 			len = sizeof(*dh) + dh->UidLength;
771 			/* XXXKIB */
772 		} else {
773 #if 0
774 			printf("amdiommu: unknown IVRS device entry type %#x\n",
775 			    d->Type);
776 #endif
777 			if (d->Type <= 63)
778 				len = sizeof(ACPI_IVRS_DEVICE4);
779 			else if (d->Type <= 127)
780 				len = sizeof(ACPI_IVRS_DEVICE8A);
781 			else {
782 				printf("amdiommu: abort, cannot "
783 				    "advance iterator, item type %#x\n",
784 				    d->Type);
785 				return (false);
786 			}
787 		}
788 	}
789 	return (false);
790 }
791 
792 static bool
793 amdiommu_find_unit_scan_0x11(ACPI_IVRS_HARDWARE2 *ivrs, void *arg)
794 {
795 	struct ivhd_find_unit *ifu = arg;
796 	ACPI_IVRS_DE_HEADER *d;
797 	bool res;
798 
799 	KASSERT(ivrs->Header.Type == ACPI_IVRS_TYPE_HARDWARE2 ||
800 	    ivrs->Header.Type == ACPI_IVRS_TYPE_HARDWARE3,
801 	    ("Misparsed IVHD h2, ivrs type %#x", ivrs->Header.Type));
802 
803 	if (ifu->domain != ivrs->PciSegmentGroup)
804 		return (false);
805 	d = (ACPI_IVRS_DE_HEADER *)(ivrs + 1);
806 	res = amdiommu_find_unit_scan_ivrs(d, ivrs->Header.Length, ifu);
807 	if (res)
808 		ifu->device_id = ivrs->Header.DeviceId;
809 	return (res);
810 }
811 
812 static bool
813 amdiommu_find_unit_scan_0x10(ACPI_IVRS_HARDWARE1 *ivrs, void *arg)
814 {
815 	struct ivhd_find_unit *ifu = arg;
816 	ACPI_IVRS_DE_HEADER *d;
817 	bool res;
818 
819 	KASSERT(ivrs->Header.Type == ACPI_IVRS_TYPE_HARDWARE1,
820 	    ("Misparsed IVHD h1, ivrs type %#x", ivrs->Header.Type));
821 
822 	if (ifu->domain != ivrs->PciSegmentGroup)
823 		return (false);
824 	d = (ACPI_IVRS_DE_HEADER *)(ivrs + 1);
825 	res = amdiommu_find_unit_scan_ivrs(d, ivrs->Header.Length, ifu);
826 	if (res)
827 		ifu->device_id = ivrs->Header.DeviceId;
828 	return (res);
829 }
830 
831 static void
832 amdiommu_dev_prop_dtr(device_t dev, const char *name, void *val, void *dtr_ctx)
833 {
834 	free(val, M_DEVBUF);
835 }
836 
837 static int *
838 amdiommu_dev_fetch_flagsp(struct amdiommu_unit *unit, device_t dev)
839 {
840 	int *flagsp, error;
841 
842 	bus_topo_assert();
843 	error = device_get_prop(dev, device_get_nameunit(unit->iommu.dev),
844 	    (void **)&flagsp);
845 	if (error == ENOENT) {
846 		flagsp = malloc(sizeof(int), M_DEVBUF, M_WAITOK | M_ZERO);
847 		device_set_prop(dev, device_get_nameunit(unit->iommu.dev),
848 		    flagsp, amdiommu_dev_prop_dtr, unit);
849 	}
850 	return (flagsp);
851 }
852 
853 static int
854 amdiommu_get_dev_prop_flags(struct amdiommu_unit *unit, device_t dev)
855 {
856 	int *flagsp, flags;
857 
858 	bus_topo_lock();
859 	flagsp = amdiommu_dev_fetch_flagsp(unit, dev);
860 	flags = *flagsp;
861 	bus_topo_unlock();
862 	return (flags);
863 }
864 
865 static void
866 amdiommu_set_dev_prop_flags(struct amdiommu_unit *unit, device_t dev,
867     int flag)
868 {
869 	int *flagsp;
870 
871 	bus_topo_lock();
872 	flagsp = amdiommu_dev_fetch_flagsp(unit, dev);
873 	*flagsp |= flag;
874 	bus_topo_unlock();
875 }
876 
877 int
878 amdiommu_find_unit(device_t dev, struct amdiommu_unit **unitp, uint16_t *ridp,
879     uint8_t *dtep, uint32_t *edtep, bool verbose)
880 {
881 	struct ivhd_find_unit ifu;
882 	struct amdiommu_unit *unit;
883 	int error, flags;
884 	bool res;
885 
886 	if (!amdiommu_enable)
887 		return (ENXIO);
888 
889 	if (device_get_devclass(device_get_parent(dev)) !=
890 	    devclass_find("pci"))
891 		return (ENXIO);
892 
893 	bzero(&ifu, sizeof(ifu));
894 	ifu.type = IFU_DEV_PCI;
895 
896 	error = pci_get_id(dev, PCI_ID_RID, &ifu.rid);
897 	if (error != 0) {
898 		if (verbose)
899 			device_printf(dev,
900 			    "amdiommu cannot get rid, error %d\n", error);
901 		return (ENXIO);
902 	}
903 
904 	ifu.domain = pci_get_domain(dev);
905 	res = amdiommu_ivrs_iterate_tbl(amdiommu_find_unit_scan_0x11,
906 	    amdiommu_find_unit_scan_0x11, amdiommu_find_unit_scan_0x10, &ifu);
907 	if (!res) {
908 		if (verbose)
909 			device_printf(dev,
910 			    "(%#06x:%#06x) amdiommu cannot match rid in IVHD\n",
911 			    ifu.domain, (unsigned)ifu.rid);
912 		return (ENXIO);
913 	}
914 
915 	unit = amdiommu_unit_by_device_id(ifu.domain, ifu.device_id);
916 	if (unit == NULL) {
917 		if (verbose)
918 			device_printf(dev,
919 			    "(%#06x:%#06x) amdiommu cannot find unit\n",
920 			    ifu.domain, (unsigned)ifu.rid);
921 		return (ENXIO);
922 	}
923 	*unitp = unit;
924 	iommu_device_set_iommu_prop(dev, unit->iommu.dev);
925 	if (ridp != NULL)
926 		*ridp = ifu.rid_real;
927 	if (dtep != NULL)
928 		*dtep = ifu.dte;
929 	if (edtep != NULL)
930 		*edtep = ifu.edte;
931 	if (verbose) {
932 		flags = amdiommu_get_dev_prop_flags(unit, dev);
933 		if ((flags & AMDIOMMU_DEV_REPORTED) == 0) {
934 			amdiommu_set_dev_prop_flags(unit, dev,
935 			    AMDIOMMU_DEV_REPORTED);
936 			device_printf(dev, "amdiommu%d "
937 			    "initiator rid %#06x dte %#x edte %#x\n",
938 			    unit->iommu.unit, ifu.rid_real, ifu.dte, ifu.edte);
939 		}
940 	}
941 	return (0);
942 }
943 
944 int
945 amdiommu_find_unit_for_ioapic(int apic_id, struct amdiommu_unit **unitp,
946     uint16_t *ridp, uint8_t *dtep, uint32_t *edtep, bool verbose)
947 {
948 	struct ivhd_find_unit ifu;
949 	struct amdiommu_unit *unit;
950 	device_t apic_dev;
951 	bool res;
952 
953 	if (!amdiommu_enable)
954 		return (ENXIO);
955 
956 	bzero(&ifu, sizeof(ifu));
957 	ifu.type = IFU_DEV_IOAPIC;
958 	ifu.devno = apic_id;
959 	ifu.rid = -1;
960 
961 	res = amdiommu_ivrs_iterate_tbl(amdiommu_find_unit_scan_0x11,
962 	    amdiommu_find_unit_scan_0x11, amdiommu_find_unit_scan_0x10, &ifu);
963 	if (!res) {
964 		if (verbose)
965 			printf("amdiommu cannot match ioapic no %d in IVHD\n",
966 			    apic_id);
967 		return (ENXIO);
968 	}
969 
970 	unit = amdiommu_unit_by_device_id(0, ifu.device_id);
971 	apic_dev = ioapic_get_dev(apic_id);
972 	if (apic_dev != NULL)
973 		iommu_device_set_iommu_prop(apic_dev, unit->iommu.dev);
974 	if (unit == NULL) {
975 		if (verbose)
976 			printf("amdiommu cannot find unit by dev id %#x\n",
977 			    ifu.device_id);
978 		return (ENXIO);
979 	}
980 	*unitp = unit;
981 	if (ridp != NULL)
982 		*ridp = ifu.rid_real;
983 	if (dtep != NULL)
984 		*dtep = ifu.dte;
985 	if (edtep != NULL)
986 		*edtep = ifu.edte;
987 	if (verbose) {
988 		printf("amdiommu%d IOAPIC %d "
989 		    "initiator rid %#06x dte %#x edte %#x\n",
990 		    unit->iommu.unit, apic_id, ifu.rid_real, ifu.dte,
991 		    ifu.edte);
992 	}
993 	return (0);
994 }
995 
996 int
997 amdiommu_find_unit_for_hpet(device_t hpet, struct amdiommu_unit **unitp,
998     uint16_t *ridp, uint8_t *dtep, uint32_t *edtep, bool verbose)
999 {
1000 	struct ivhd_find_unit ifu;
1001 	struct amdiommu_unit *unit;
1002 	int hpet_no;
1003 	bool res;
1004 
1005 	if (!amdiommu_enable)
1006 		return (ENXIO);
1007 
1008 	hpet_no = hpet_get_uid(hpet);
1009 	bzero(&ifu, sizeof(ifu));
1010 	ifu.type = IFU_DEV_HPET;
1011 	ifu.devno = hpet_no;
1012 	ifu.rid = -1;
1013 
1014 	res = amdiommu_ivrs_iterate_tbl(amdiommu_find_unit_scan_0x11,
1015 	    amdiommu_find_unit_scan_0x11, amdiommu_find_unit_scan_0x10, &ifu);
1016 	if (!res) {
1017 		if (verbose)
1018 			printf("amdiommu cannot match hpet no %d in IVHD\n",
1019 			    hpet_no);
1020 		return (ENXIO);
1021 	}
1022 
1023 	unit = amdiommu_unit_by_device_id(0, ifu.device_id);
1024 	if (unit == NULL) {
1025 		if (verbose)
1026 			printf("amdiommu cannot find unit id %d\n",
1027 			    hpet_no);
1028 		return (ENXIO);
1029 	}
1030 	*unitp = unit;
1031 	iommu_device_set_iommu_prop(hpet, unit->iommu.dev);
1032 	if (ridp != NULL)
1033 		*ridp = ifu.rid_real;
1034 	if (dtep != NULL)
1035 		*dtep = ifu.dte;
1036 	if (edtep != NULL)
1037 		*edtep = ifu.edte;
1038 	if (verbose) {
1039 		printf("amdiommu%d HPET no %d "
1040 		    "initiator rid %#06x dte %#x edte %#x\n",
1041 		    unit->iommu.unit, hpet_no, ifu.rid_real, ifu.dte,
1042 		    ifu.edte);
1043 	}
1044 	return (0);
1045 }
1046 
1047 static struct iommu_unit *
1048 amdiommu_find_method(device_t dev, bool verbose)
1049 {
1050 	struct amdiommu_unit *unit;
1051 	int error;
1052 	uint32_t edte;
1053 	uint16_t rid;
1054 	uint8_t dte;
1055 
1056 	error = amdiommu_find_unit(dev, &unit, &rid, &dte, &edte, verbose);
1057 	if (error != 0) {
1058 		if (verbose && amdiommu_enable)
1059 			device_printf(dev,
1060 			    "cannot find amdiommu unit, error %d\n",
1061 			    error);
1062 		return (NULL);
1063 	}
1064 	return (&unit->iommu);
1065 }
1066 
1067 static struct x86_unit_common *
1068 amdiommu_get_x86_common(struct iommu_unit *unit)
1069 {
1070 	struct amdiommu_unit *iommu;
1071 
1072 	iommu = IOMMU2AMD(unit);
1073 	return (&iommu->x86c);
1074 }
1075 
1076 static void
1077 amdiommu_unit_pre_instantiate_ctx(struct iommu_unit *unit)
1078 {
1079 }
1080 
1081 static struct x86_iommu amd_x86_iommu = {
1082 	.get_x86_common = amdiommu_get_x86_common,
1083 	.unit_pre_instantiate_ctx = amdiommu_unit_pre_instantiate_ctx,
1084 	.find = amdiommu_find_method,
1085 	.domain_unload_entry = amdiommu_domain_unload_entry,
1086 	.domain_unload = amdiommu_domain_unload,
1087 	.get_ctx = amdiommu_get_ctx,
1088 	.free_ctx_locked = amdiommu_free_ctx_locked_method,
1089 	.alloc_msi_intr = amdiommu_alloc_msi_intr,
1090 	.map_msi_intr = amdiommu_map_msi_intr,
1091 	.unmap_msi_intr = amdiommu_unmap_msi_intr,
1092 	.map_ioapic_intr = amdiommu_map_ioapic_intr,
1093 	.unmap_ioapic_intr = amdiommu_unmap_ioapic_intr,
1094 };
1095 
1096 static void
1097 x86_iommu_set_amd(void *arg __unused)
1098 {
1099 	if (cpu_vendor_id == CPU_VENDOR_AMD)
1100 		set_x86_iommu(&amd_x86_iommu);
1101 }
1102 
1103 SYSINIT(x86_iommu, SI_SUB_TUNABLES, SI_ORDER_ANY, x86_iommu_set_amd, NULL);
1104 
1105 #ifdef DDB
1106 #include <ddb/ddb.h>
1107 #include <ddb/db_lex.h>
1108 
1109 static void
1110 amdiommu_print_domain(struct amdiommu_domain *domain, bool show_mappings)
1111 {
1112 	struct iommu_domain *iodom;
1113 
1114 	iodom = DOM2IODOM(domain);
1115 
1116 	db_printf(
1117 	    "  @%p dom %d pglvl %d end %jx refs %d\n"
1118 	    "   ctx_cnt %d flags %x pgobj %p map_ents %u\n",
1119 	    domain, domain->domain, domain->pglvl,
1120 	    (uintmax_t)domain->iodom.end, domain->refs, domain->ctx_cnt,
1121 	    domain->iodom.flags, domain->pgtbl_obj, domain->iodom.entries_cnt);
1122 
1123 	iommu_db_domain_print_contexts(iodom);
1124 
1125 	if (show_mappings)
1126 		iommu_db_domain_print_mappings(iodom);
1127 }
1128 
1129 static void
1130 amdiommu_print_one(struct amdiommu_unit *unit, bool show_domains,
1131     bool show_mappings, bool show_cmdq)
1132 {
1133 	struct amdiommu_domain *domain;
1134 	struct amdiommu_cmd_generic *cp;
1135 	u_int cmd_head, cmd_tail, ci;
1136 
1137 	cmd_head = amdiommu_read4(unit, AMDIOMMU_CMDBUF_HEAD);
1138 	cmd_tail = amdiommu_read4(unit, AMDIOMMU_CMDBUF_TAIL);
1139 	db_printf("amdiommu%d at %p, mmio at %#jx/sz %#jx\n",
1140 	    unit->iommu.unit, unit, (uintmax_t)unit->mmio_base,
1141 	    (uintmax_t)unit->mmio_sz);
1142 	db_printf("  hw ctrl %#018jx cmdevst %#018jx\n",
1143 	    (uintmax_t)amdiommu_read8(unit, AMDIOMMU_CTRL),
1144 	    (uintmax_t)amdiommu_read8(unit, AMDIOMMU_CMDEV_STATUS));
1145 	db_printf("  devtbl at %p\n", unit->dev_tbl);
1146 	db_printf("  hwseq at %p phys %#jx val %#jx\n",
1147 	    &unit->x86c.inv_waitd_seq_hw,
1148 	    pmap_kextract((vm_offset_t)&unit->x86c.inv_waitd_seq_hw),
1149 	    unit->x86c.inv_waitd_seq_hw);
1150 	db_printf("  invq at %p base %#jx hw head/tail %#x/%#x\n",
1151 	    unit->x86c.inv_queue,
1152 	    (uintmax_t)amdiommu_read8(unit, AMDIOMMU_CMDBUF_BASE),
1153 	    cmd_head, cmd_tail);
1154 
1155 	if (show_cmdq) {
1156 		db_printf("  cmd q:\n");
1157 		for (ci = cmd_head; ci != cmd_tail;) {
1158 			cp = (struct amdiommu_cmd_generic *)(unit->
1159 			    x86c.inv_queue + ci);
1160 			db_printf(
1161 		    "    idx %#x op %#x %#010x %#010x %#010x %#010x\n",
1162 			    ci >> AMDIOMMU_CMD_SZ_SHIFT, cp->op,
1163 		    	    cp->w0, cp->ww1, cp->w2, cp->w3);
1164 
1165 			ci += AMDIOMMU_CMD_SZ;
1166 			if (ci == unit->x86c.inv_queue_size)
1167 				ci = 0;
1168 		}
1169 	}
1170 
1171 	if (show_domains) {
1172 		db_printf("  domains:\n");
1173 		LIST_FOREACH(domain, &unit->domains, link) {
1174 			amdiommu_print_domain(domain, show_mappings);
1175 			if (db_pager_quit)
1176 				break;
1177 		}
1178 	}
1179 }
1180 
1181 DB_SHOW_COMMAND(amdiommu, db_amdiommu_print)
1182 {
1183 	struct amdiommu_unit *unit;
1184 	bool show_domains, show_mappings, show_cmdq;
1185 
1186 	show_domains = strchr(modif, 'd') != NULL;
1187 	show_mappings = strchr(modif, 'm') != NULL;
1188 	show_cmdq = strchr(modif, 'q') != NULL;
1189 	if (!have_addr) {
1190 		db_printf("usage: show amdiommu [/d] [/m] [/q] index\n");
1191 		return;
1192 	}
1193 	if ((vm_offset_t)addr < 0x10000)
1194 		unit = amdiommu_unit_by_device_id(0, (u_int)addr);
1195 	else
1196 		unit = (struct amdiommu_unit *)addr;
1197 	amdiommu_print_one(unit, show_domains, show_mappings, show_cmdq);
1198 }
1199 
1200 DB_SHOW_ALL_COMMAND(amdiommus, db_show_all_amdiommus)
1201 {
1202 	struct amdiommu_unit *unit;
1203 	bool show_domains, show_mappings, show_cmdq;
1204 
1205 	show_domains = strchr(modif, 'd') != NULL;
1206 	show_mappings = strchr(modif, 'm') != NULL;
1207 	show_cmdq = strchr(modif, 'q') != NULL;
1208 
1209 	TAILQ_FOREACH(unit, &amdiommu_units, unit_next) {
1210 		amdiommu_print_one(unit, show_domains, show_mappings,
1211 		    show_cmdq);
1212 		if (db_pager_quit)
1213 			break;
1214 	}
1215 }
1216 #endif
1217