1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2024 The FreeBSD Foundation 5 * 6 * This software was developed by Konstantin Belousov <kib@FreeBSD.org> 7 * under sponsorship from the FreeBSD Foundation. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 */ 30 31 #include "opt_acpi.h" 32 #include "opt_ddb.h" 33 34 #include <sys/param.h> 35 #include <sys/bus.h> 36 #include <sys/domainset.h> 37 #include <sys/kernel.h> 38 #include <sys/lock.h> 39 #include <sys/malloc.h> 40 #include <sys/memdesc.h> 41 #include <sys/module.h> 42 #include <sys/mutex.h> 43 #include <sys/rman.h> 44 #include <sys/rwlock.h> 45 #include <sys/smp.h> 46 #include <sys/taskqueue.h> 47 #include <sys/tree.h> 48 #include <sys/vmem.h> 49 #include <vm/vm.h> 50 #include <vm/vm_extern.h> 51 #include <vm/vm_object.h> 52 #include <vm/vm_page.h> 53 #include <vm/vm_pageout.h> 54 #include <vm/vm_pager.h> 55 #include <contrib/dev/acpica/include/acpi.h> 56 #include <contrib/dev/acpica/include/accommon.h> 57 #include <dev/acpica/acpivar.h> 58 #include <dev/pci/pcireg.h> 59 #include <dev/pci/pcivar.h> 60 #include <machine/bus.h> 61 #include <machine/pci_cfgreg.h> 62 #include "pcib_if.h" 63 #include <machine/intr_machdep.h> 64 #include <machine/md_var.h> 65 #include <machine/cputypes.h> 66 #include <x86/apicreg.h> 67 #include <x86/apicvar.h> 68 #include <dev/iommu/iommu.h> 69 #include <x86/iommu/amd_reg.h> 70 #include <x86/iommu/x86_iommu.h> 71 #include <x86/iommu/amd_iommu.h> 72 73 static int amdiommu_enable = 0; 74 75 /* 76 * All enumerated AMD IOMMU units. 77 * Access is unlocked, the list is not modified after early 78 * single-threaded startup. 79 */ 80 static TAILQ_HEAD(, amdiommu_unit) amdiommu_units = 81 TAILQ_HEAD_INITIALIZER(amdiommu_units); 82 83 static u_int 84 ivrs_info_to_unit_id(UINT32 info) 85 { 86 return ((info & ACPI_IVHD_UNIT_ID_MASK) >> 8); 87 } 88 89 typedef bool (*amdiommu_itercc_t)(void *, void *); 90 typedef bool (*amdiommu_iter40_t)(ACPI_IVRS_HARDWARE2 *, void *); 91 typedef bool (*amdiommu_iter11_t)(ACPI_IVRS_HARDWARE2 *, void *); 92 typedef bool (*amdiommu_iter10_t)(ACPI_IVRS_HARDWARE1 *, void *); 93 94 static bool 95 amdiommu_ivrs_iterate_tbl_typed(amdiommu_itercc_t iter, void *arg, 96 int type, ACPI_TABLE_IVRS *ivrs_tbl) 97 { 98 char *ptr, *ptrend; 99 bool done; 100 101 done = false; 102 ptr = (char *)ivrs_tbl + sizeof(*ivrs_tbl); 103 ptrend = (char *)ivrs_tbl + ivrs_tbl->Header.Length; 104 for (;;) { 105 ACPI_IVRS_HEADER *ivrsh; 106 107 if (ptr >= ptrend) 108 break; 109 ivrsh = (ACPI_IVRS_HEADER *)ptr; 110 if (ivrsh->Length <= 0) { 111 printf("amdiommu_iterate_tbl: corrupted IVRS table, " 112 "length %d\n", ivrsh->Length); 113 break; 114 } 115 ptr += ivrsh->Length; 116 if (ivrsh->Type == type) { 117 done = iter((void *)ivrsh, arg); 118 if (done) 119 break; 120 } 121 } 122 return (done); 123 } 124 125 /* 126 * Walk over IVRS, calling callback iterators following priority: 127 * 0x40, then 0x11, then 0x10 subtable. First iterator returning true 128 * ends the walk. 129 * Returns true if any iterator returned true, otherwise false. 130 */ 131 static bool 132 amdiommu_ivrs_iterate_tbl(amdiommu_iter40_t iter40, amdiommu_iter11_t iter11, 133 amdiommu_iter10_t iter10, void *arg) 134 { 135 ACPI_TABLE_IVRS *ivrs_tbl; 136 ACPI_STATUS status; 137 bool done; 138 139 status = AcpiGetTable(ACPI_SIG_IVRS, 1, 140 (ACPI_TABLE_HEADER **)&ivrs_tbl); 141 if (ACPI_FAILURE(status)) 142 return (false); 143 done = false; 144 if (iter40 != NULL) 145 done = amdiommu_ivrs_iterate_tbl_typed( 146 (amdiommu_itercc_t)iter40, arg, 147 ACPI_IVRS_TYPE_HARDWARE3, ivrs_tbl); 148 if (!done && iter11 != NULL) 149 done = amdiommu_ivrs_iterate_tbl_typed( 150 (amdiommu_itercc_t)iter11, arg, ACPI_IVRS_TYPE_HARDWARE2, 151 ivrs_tbl); 152 if (!done && iter10 != NULL) 153 done = amdiommu_ivrs_iterate_tbl_typed( 154 (amdiommu_itercc_t)iter10, arg, ACPI_IVRS_TYPE_HARDWARE1, 155 ivrs_tbl); 156 AcpiPutTable((ACPI_TABLE_HEADER *)ivrs_tbl); 157 return (done); 158 } 159 160 struct ivhd_lookup_data { 161 struct amdiommu_unit *sc; 162 uint16_t devid; 163 }; 164 165 static bool 166 ivrs_lookup_ivhd_0x40(ACPI_IVRS_HARDWARE2 *h2, void *arg) 167 { 168 struct ivhd_lookup_data *ildp; 169 170 KASSERT(h2->Header.Type == ACPI_IVRS_TYPE_HARDWARE2 || 171 h2->Header.Type == ACPI_IVRS_TYPE_HARDWARE3, 172 ("Misparsed IVHD, h2 type %#x", h2->Header.Type)); 173 174 ildp = arg; 175 if (h2->Header.DeviceId != ildp->devid) 176 return (false); 177 178 ildp->sc->unit_dom = h2->PciSegmentGroup; 179 ildp->sc->efr = h2->EfrRegisterImage; 180 return (true); 181 } 182 183 static bool 184 ivrs_lookup_ivhd_0x10(ACPI_IVRS_HARDWARE1 *h1, void *arg) 185 { 186 struct ivhd_lookup_data *ildp; 187 188 KASSERT(h1->Header.Type == ACPI_IVRS_TYPE_HARDWARE1, 189 ("Misparsed IVHD, h1 type %#x", h1->Header.Type)); 190 191 ildp = arg; 192 if (h1->Header.DeviceId != ildp->devid) 193 return (false); 194 195 ildp->sc->unit_dom = h1->PciSegmentGroup; 196 return (true); 197 } 198 199 static u_int 200 amdiommu_devtbl_sz(struct amdiommu_unit *sc __unused) 201 { 202 return (sizeof(struct amdiommu_dte) * (1 << 16)); 203 } 204 205 static void 206 amdiommu_free_dev_tbl(struct amdiommu_unit *sc) 207 { 208 u_int devtbl_sz; 209 210 devtbl_sz = amdiommu_devtbl_sz(sc); 211 pmap_qremove((vm_offset_t)sc->dev_tbl, atop(devtbl_sz)); 212 kva_free((vm_offset_t)sc->dev_tbl, devtbl_sz); 213 sc->dev_tbl = NULL; 214 vm_object_deallocate(sc->devtbl_obj); 215 sc->devtbl_obj = NULL; 216 } 217 218 static int 219 amdiommu_create_dev_tbl(struct amdiommu_unit *sc) 220 { 221 vm_offset_t seg_vaddr; 222 u_int devtbl_sz, dom, i, reclaimno, segnum_log, segnum, seg_sz; 223 int error; 224 225 static const int devtab_base_regs[] = { 226 AMDIOMMU_DEVTAB_BASE, 227 AMDIOMMU_DEVTAB_S1_BASE, 228 AMDIOMMU_DEVTAB_S2_BASE, 229 AMDIOMMU_DEVTAB_S3_BASE, 230 AMDIOMMU_DEVTAB_S4_BASE, 231 AMDIOMMU_DEVTAB_S5_BASE, 232 AMDIOMMU_DEVTAB_S6_BASE, 233 AMDIOMMU_DEVTAB_S7_BASE 234 }; 235 236 segnum_log = (sc->efr & AMDIOMMU_EFR_DEVTBLSEG_MASK) >> 237 AMDIOMMU_EFR_DEVTBLSEG_SHIFT; 238 segnum = 1 << segnum_log; 239 240 KASSERT(segnum <= nitems(devtab_base_regs), 241 ("%s: unsupported devtab segment count %u", __func__, segnum)); 242 243 devtbl_sz = amdiommu_devtbl_sz(sc); 244 seg_sz = devtbl_sz / segnum; 245 sc->devtbl_obj = vm_pager_allocate(OBJT_PHYS, NULL, atop(devtbl_sz), 246 VM_PROT_ALL, 0, NULL); 247 if (bus_get_domain(sc->iommu.dev, &dom) == 0) 248 sc->devtbl_obj->domain.dr_policy = DOMAINSET_PREF(dom); 249 250 sc->hw_ctrl &= ~AMDIOMMU_CTRL_DEVTABSEG_MASK; 251 sc->hw_ctrl |= (uint64_t)segnum_log << ilog2(AMDIOMMU_CTRL_DEVTABSEG_2); 252 sc->hw_ctrl |= AMDIOMMU_CTRL_COHERENT; 253 amdiommu_write8(sc, AMDIOMMU_CTRL, sc->hw_ctrl); 254 255 seg_vaddr = kva_alloc(devtbl_sz); 256 if (seg_vaddr == 0) 257 return (ENOMEM); 258 sc->dev_tbl = (void *)seg_vaddr; 259 260 for (i = 0; i < segnum; i++) { 261 vm_page_t m; 262 uint64_t rval; 263 264 for (reclaimno = 0; reclaimno < 3; reclaimno++) { 265 VM_OBJECT_WLOCK(sc->devtbl_obj); 266 m = vm_page_alloc_contig(sc->devtbl_obj, 267 i * atop(seg_sz), 268 VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY, 269 atop(seg_sz), 0, ~0ul, IOMMU_PAGE_SIZE, 0, 270 VM_MEMATTR_DEFAULT); 271 VM_OBJECT_WUNLOCK(sc->devtbl_obj); 272 if (m != NULL) 273 break; 274 error = vm_page_reclaim_contig(VM_ALLOC_NORMAL, 275 atop(seg_sz), 0, ~0ul, IOMMU_PAGE_SIZE, 0); 276 if (error != 0) 277 vm_wait(sc->devtbl_obj); 278 } 279 if (m == NULL) { 280 amdiommu_free_dev_tbl(sc); 281 return (ENOMEM); 282 } 283 284 rval = VM_PAGE_TO_PHYS(m) | (atop(seg_sz) - 1); 285 for (u_int j = 0; j < atop(seg_sz); 286 j++, seg_vaddr += PAGE_SIZE, m++) { 287 pmap_zero_page(m); 288 pmap_qenter(seg_vaddr, &m, 1); 289 } 290 amdiommu_write8(sc, devtab_base_regs[i], rval); 291 } 292 293 return (0); 294 } 295 296 static int 297 amdiommu_cmd_event_intr(void *arg) 298 { 299 struct amdiommu_unit *unit; 300 uint64_t status; 301 302 unit = arg; 303 status = amdiommu_read8(unit, AMDIOMMU_CMDEV_STATUS); 304 if ((status & AMDIOMMU_CMDEVS_COMWAITINT) != 0) { 305 amdiommu_write8(unit, AMDIOMMU_CMDEV_STATUS, 306 AMDIOMMU_CMDEVS_COMWAITINT); 307 taskqueue_enqueue(unit->x86c.qi_taskqueue, 308 &unit->x86c.qi_task); 309 } 310 if ((status & (AMDIOMMU_CMDEVS_EVLOGINT | 311 AMDIOMMU_CMDEVS_EVOVRFLW)) != 0) 312 amdiommu_event_intr(unit, status); 313 return (FILTER_HANDLED); 314 } 315 316 static int 317 amdiommu_setup_intr(struct amdiommu_unit *sc) 318 { 319 int error, msi_count, msix_count; 320 321 msi_count = pci_msi_count(sc->iommu.dev); 322 msix_count = pci_msix_count(sc->iommu.dev); 323 if (msi_count == 0 && msix_count == 0) { 324 device_printf(sc->iommu.dev, "needs MSI-class intr\n"); 325 return (ENXIO); 326 } 327 328 #if 0 329 /* 330 * XXXKIB how MSI-X is supposed to be organized for BAR-less 331 * function? Practically available hardware implements only 332 * one IOMMU unit per function, and uses MSI. 333 */ 334 if (msix_count > 0) { 335 sc->msix_table = bus_alloc_resource_any(sc->iommu.dev, 336 SYS_RES_MEMORY, &sc->msix_tab_rid, RF_ACTIVE); 337 if (sc->msix_table == NULL) 338 return (ENXIO); 339 340 if (sc->msix_pba_rid != sc->msix_tab_rid) { 341 /* Separate BAR for PBA */ 342 sc->msix_pba = bus_alloc_resource_any(sc->iommu.dev, 343 SYS_RES_MEMORY, 344 &sc->msix_pba_rid, RF_ACTIVE); 345 if (sc->msix_pba == NULL) { 346 bus_release_resource(sc->iommu.dev, 347 SYS_RES_MEMORY, &sc->msix_tab_rid, 348 sc->msix_table); 349 return (ENXIO); 350 } 351 } 352 } 353 #endif 354 355 error = ENXIO; 356 if (msix_count > 0) { 357 error = pci_alloc_msix(sc->iommu.dev, &msix_count); 358 if (error == 0) 359 sc->numirqs = msix_count; 360 } 361 if (error != 0 && msi_count > 0) { 362 error = pci_alloc_msi(sc->iommu.dev, &msi_count); 363 if (error == 0) 364 sc->numirqs = msi_count; 365 } 366 if (error != 0) { 367 device_printf(sc->iommu.dev, 368 "Failed to allocate MSI/MSI-x (%d)\n", error); 369 return (ENXIO); 370 } 371 372 /* 373 * XXXKIB Spec states that MISC0.MsiNum must be zero for IOMMU 374 * using MSI interrupts. But at least one BIOS programmed '2' 375 * there, making driver use wrong rid and causing 376 * command/event interrupt ignored as stray. Try to fix it 377 * with dirty force by assuming MsiNum is zero for MSI. 378 */ 379 sc->irq_cmdev_rid = 1; 380 if (msix_count > 0) { 381 sc->irq_cmdev_rid += pci_read_config(sc->iommu.dev, 382 sc->seccap_reg + PCIR_AMDIOMMU_MISC0, 4) & 383 PCIM_AMDIOMMU_MISC0_MSINUM_MASK; 384 } 385 386 sc->irq_cmdev = bus_alloc_resource_any(sc->iommu.dev, SYS_RES_IRQ, 387 &sc->irq_cmdev_rid, RF_SHAREABLE | RF_ACTIVE); 388 if (sc->irq_cmdev == NULL) { 389 device_printf(sc->iommu.dev, 390 "unable to map CMD/EV interrupt\n"); 391 return (ENXIO); 392 } 393 error = bus_setup_intr(sc->iommu.dev, sc->irq_cmdev, 394 INTR_TYPE_MISC, amdiommu_cmd_event_intr, NULL, sc, 395 &sc->irq_cmdev_cookie); 396 if (error != 0) { 397 device_printf(sc->iommu.dev, 398 "unable to setup interrupt (%d)\n", error); 399 return (ENXIO); 400 } 401 bus_describe_intr(sc->iommu.dev, sc->irq_cmdev, sc->irq_cmdev_cookie, 402 "cmdev"); 403 404 if (x2apic_mode) { 405 AMDIOMMU_LOCK(sc); 406 sc->hw_ctrl |= AMDIOMMU_CTRL_GA_EN | AMDIOMMU_CTRL_XT_EN; 407 amdiommu_write8(sc, AMDIOMMU_CTRL, sc->hw_ctrl); 408 // XXXKIB AMDIOMMU_CTRL_INTCAPXT_EN and program x2APIC_CTRL 409 AMDIOMMU_UNLOCK(sc); 410 } 411 412 return (0); 413 } 414 415 static int 416 amdiommu_probe(device_t dev) 417 { 418 int seccap_reg; 419 int error; 420 uint32_t cap_h, cap_type, cap_rev; 421 422 if (acpi_disabled("amdiommu")) 423 return (ENXIO); 424 TUNABLE_INT_FETCH("hw.amdiommu.enable", &amdiommu_enable); 425 if (!amdiommu_enable) 426 return (ENXIO); 427 if (pci_get_class(dev) != PCIC_BASEPERIPH || 428 pci_get_subclass(dev) != PCIS_BASEPERIPH_IOMMU) 429 return (ENXIO); 430 431 error = pci_find_cap(dev, PCIY_SECDEV, &seccap_reg); 432 if (error != 0 || seccap_reg == 0) 433 return (ENXIO); 434 435 cap_h = pci_read_config(dev, seccap_reg + PCIR_AMDIOMMU_CAP_HEADER, 436 4); 437 cap_type = cap_h & PCIM_AMDIOMMU_CAP_TYPE_MASK; 438 cap_rev = cap_h & PCIM_AMDIOMMU_CAP_REV_MASK; 439 if (cap_type != PCIM_AMDIOMMU_CAP_TYPE_VAL && 440 cap_rev != PCIM_AMDIOMMU_CAP_REV_VAL) 441 return (ENXIO); 442 443 device_set_desc(dev, "DMA remap"); 444 return (BUS_PROBE_SPECIFIC); 445 } 446 447 static int 448 amdiommu_attach(device_t dev) 449 { 450 struct amdiommu_unit *sc; 451 struct ivhd_lookup_data ild; 452 int error; 453 uint32_t base_low, base_high; 454 bool res; 455 456 sc = device_get_softc(dev); 457 sc->iommu.unit = device_get_unit(dev); 458 sc->iommu.dev = dev; 459 460 error = pci_find_cap(dev, PCIY_SECDEV, &sc->seccap_reg); 461 if (error != 0 || sc->seccap_reg == 0) 462 return (ENXIO); 463 464 base_low = pci_read_config(dev, sc->seccap_reg + 465 PCIR_AMDIOMMU_BASE_LOW, 4); 466 base_high = pci_read_config(dev, sc->seccap_reg + 467 PCIR_AMDIOMMU_BASE_HIGH, 4); 468 sc->mmio_base = (base_low & PCIM_AMDIOMMU_BASE_LOW_ADDRM) | 469 ((uint64_t)base_high << 32); 470 471 sc->device_id = pci_get_rid(dev); 472 ild.sc = sc; 473 ild.devid = sc->device_id; 474 res = amdiommu_ivrs_iterate_tbl(ivrs_lookup_ivhd_0x40, 475 ivrs_lookup_ivhd_0x40, ivrs_lookup_ivhd_0x10, &ild); 476 if (!res) { 477 device_printf(dev, "Cannot find IVHD\n"); 478 return (ENXIO); 479 } 480 481 mtx_init(&sc->iommu.lock, "amdihw", NULL, MTX_DEF); 482 sc->domids = new_unrhdr(0, 0xffff, &sc->iommu.lock); 483 LIST_INIT(&sc->domains); 484 sysctl_ctx_init(&sc->iommu.sysctl_ctx); 485 486 sc->mmio_sz = ((sc->efr & AMDIOMMU_EFR_PC_SUP) != 0 ? 512 : 16) * 487 1024; 488 489 sc->mmio_rid = AMDIOMMU_RID; 490 error = bus_set_resource(dev, SYS_RES_MEMORY, AMDIOMMU_RID, 491 sc->mmio_base, sc->mmio_sz); 492 if (error != 0) { 493 device_printf(dev, 494 "bus_set_resource %#jx-%#jx failed, error %d\n", 495 (uintmax_t)sc->mmio_base, (uintmax_t)sc->mmio_base + 496 sc->mmio_sz, error); 497 error = ENXIO; 498 goto errout1; 499 } 500 sc->mmio_res = bus_alloc_resource(dev, SYS_RES_MEMORY, &sc->mmio_rid, 501 sc->mmio_base, sc->mmio_base + sc->mmio_sz - 1, sc->mmio_sz, 502 RF_ALLOCATED | RF_ACTIVE | RF_SHAREABLE); 503 if (sc->mmio_res == NULL) { 504 device_printf(dev, 505 "bus_alloc_resource %#jx-%#jx failed\n", 506 (uintmax_t)sc->mmio_base, (uintmax_t)sc->mmio_base + 507 sc->mmio_sz); 508 error = ENXIO; 509 goto errout2; 510 } 511 512 sc->hw_ctrl = amdiommu_read8(sc, AMDIOMMU_CTRL); 513 if (bootverbose) 514 device_printf(dev, "ctrl reg %#jx\n", (uintmax_t)sc->hw_ctrl); 515 if ((sc->hw_ctrl & AMDIOMMU_CTRL_EN) != 0) { 516 device_printf(dev, "CTRL_EN is set, bailing out\n"); 517 error = EBUSY; 518 goto errout2; 519 } 520 521 iommu_high = BUS_SPACE_MAXADDR; 522 523 error = amdiommu_create_dev_tbl(sc); 524 if (error != 0) 525 goto errout3; 526 527 error = amdiommu_init_cmd(sc); 528 if (error != 0) 529 goto errout4; 530 531 error = amdiommu_init_event(sc); 532 if (error != 0) 533 goto errout5; 534 535 error = amdiommu_setup_intr(sc); 536 if (error != 0) 537 goto errout6; 538 539 error = iommu_init_busdma(AMD2IOMMU(sc)); 540 if (error != 0) 541 goto errout7; 542 543 error = amdiommu_init_irt(sc); 544 if (error != 0) 545 goto errout8; 546 547 /* 548 * Unlike DMAR, AMD IOMMU does not process command queue 549 * unless IOMMU is enabled. But since non-present devtab 550 * entry makes IOMMU ignore transactions from corresponding 551 * initiator, de-facto IOMMU operations are disabled for the 552 * DMA and intr remapping. 553 */ 554 AMDIOMMU_LOCK(sc); 555 sc->hw_ctrl |= AMDIOMMU_CTRL_EN; 556 amdiommu_write8(sc, AMDIOMMU_CTRL, sc->hw_ctrl); 557 if (bootverbose) { 558 printf("amdiommu%d: enabled translation\n", 559 AMD2IOMMU(sc)->unit); 560 } 561 AMDIOMMU_UNLOCK(sc); 562 563 TAILQ_INSERT_TAIL(&amdiommu_units, sc, unit_next); 564 return (0); 565 566 errout8: 567 iommu_fini_busdma(&sc->iommu); 568 errout7: 569 pci_release_msi(dev); 570 errout6: 571 amdiommu_fini_event(sc); 572 errout5: 573 amdiommu_fini_cmd(sc); 574 errout4: 575 amdiommu_free_dev_tbl(sc); 576 errout3: 577 bus_release_resource(dev, SYS_RES_MEMORY, sc->mmio_rid, sc->mmio_res); 578 errout2: 579 bus_delete_resource(dev, SYS_RES_MEMORY, sc->mmio_rid); 580 errout1: 581 sysctl_ctx_free(&sc->iommu.sysctl_ctx); 582 delete_unrhdr(sc->domids); 583 mtx_destroy(&sc->iommu.lock); 584 585 return (error); 586 } 587 588 static int 589 amdiommu_detach(device_t dev) 590 { 591 return (EBUSY); 592 } 593 594 static int 595 amdiommu_suspend(device_t dev) 596 { 597 /* XXXKIB */ 598 return (0); 599 } 600 601 static int 602 amdiommu_resume(device_t dev) 603 { 604 /* XXXKIB */ 605 return (0); 606 } 607 608 static device_method_t amdiommu_methods[] = { 609 DEVMETHOD(device_probe, amdiommu_probe), 610 DEVMETHOD(device_attach, amdiommu_attach), 611 DEVMETHOD(device_detach, amdiommu_detach), 612 DEVMETHOD(device_suspend, amdiommu_suspend), 613 DEVMETHOD(device_resume, amdiommu_resume), 614 DEVMETHOD_END 615 }; 616 617 static driver_t amdiommu_driver = { 618 "amdiommu", 619 amdiommu_methods, 620 sizeof(struct amdiommu_unit), 621 }; 622 623 EARLY_DRIVER_MODULE(amdiommu, pci, amdiommu_driver, 0, 0, BUS_PASS_SUPPORTDEV); 624 MODULE_DEPEND(amdiommu, pci, 1, 1, 1); 625 626 static struct amdiommu_unit * 627 amdiommu_unit_by_device_id(u_int pci_seg, u_int device_id) 628 { 629 struct amdiommu_unit *unit; 630 631 TAILQ_FOREACH(unit, &amdiommu_units, unit_next) { 632 if (unit->unit_dom == pci_seg && unit->device_id == device_id) 633 return (unit); 634 } 635 return (NULL); 636 } 637 638 struct ivhd_find_unit { 639 u_int domain; 640 uintptr_t rid; 641 int devno; 642 enum { 643 IFU_DEV_PCI, 644 IFU_DEV_IOAPIC, 645 IFU_DEV_HPET, 646 } type; 647 u_int device_id; 648 uint16_t rid_real; 649 uint8_t dte; 650 uint32_t edte; 651 }; 652 653 static bool 654 amdiommu_find_unit_scan_ivrs(ACPI_IVRS_DE_HEADER *d, size_t tlen, 655 struct ivhd_find_unit *ifu) 656 { 657 char *db, *de; 658 size_t len; 659 660 for (de = (char *)d + tlen; (char *)d < de; 661 d = (ACPI_IVRS_DE_HEADER *)(db + len)) { 662 db = (char *)d; 663 if (d->Type == ACPI_IVRS_TYPE_PAD4) { 664 len = sizeof(ACPI_IVRS_DEVICE4); 665 } else if (d->Type == ACPI_IVRS_TYPE_ALL) { 666 ACPI_IVRS_DEVICE4 *d4; 667 668 d4 = (ACPI_IVRS_DEVICE4 *)db; 669 len = sizeof(*d4); 670 ifu->dte = d4->Header.DataSetting; 671 } else if (d->Type == ACPI_IVRS_TYPE_SELECT) { 672 ACPI_IVRS_DEVICE4 *d4; 673 674 d4 = (ACPI_IVRS_DEVICE4 *)db; 675 if (d4->Header.Id == ifu->rid) { 676 ifu->dte = d4->Header.DataSetting; 677 ifu->rid_real = ifu->rid; 678 return (true); 679 } 680 len = sizeof(*d4); 681 } else if (d->Type == ACPI_IVRS_TYPE_START) { 682 ACPI_IVRS_DEVICE4 *d4, *d4n; 683 684 d4 = (ACPI_IVRS_DEVICE4 *)db; 685 d4n = d4 + 1; 686 if (d4n->Header.Type != ACPI_IVRS_TYPE_END) { 687 printf("IVRS dev4 start not followed by END " 688 "(%#x)\n", d4n->Header.Type); 689 return (false); 690 } 691 if (d4->Header.Id <= ifu->rid && 692 ifu->rid <= d4n->Header.Id) { 693 ifu->dte = d4->Header.DataSetting; 694 ifu->rid_real = ifu->rid; 695 return (true); 696 } 697 len = 2 * sizeof(*d4); 698 } else if (d->Type == ACPI_IVRS_TYPE_PAD8) { 699 len = sizeof(ACPI_IVRS_DEVICE8A); 700 } else if (d->Type == ACPI_IVRS_TYPE_ALIAS_SELECT) { 701 ACPI_IVRS_DEVICE8A *d8a; 702 703 d8a = (ACPI_IVRS_DEVICE8A *)db; 704 if (d8a->Header.Id == ifu->rid) { 705 ifu->dte = d8a->Header.DataSetting; 706 ifu->rid_real = d8a->UsedId; 707 return (true); 708 } 709 len = sizeof(*d8a); 710 } else if (d->Type == ACPI_IVRS_TYPE_ALIAS_START) { 711 ACPI_IVRS_DEVICE8A *d8a; 712 ACPI_IVRS_DEVICE4 *d4; 713 714 d8a = (ACPI_IVRS_DEVICE8A *)db; 715 d4 = (ACPI_IVRS_DEVICE4 *)(d8a + 1); 716 if (d4->Header.Type != ACPI_IVRS_TYPE_END) { 717 printf("IVRS alias start not followed by END " 718 "(%#x)\n", d4->Header.Type); 719 return (false); 720 } 721 if (d8a->Header.Id <= ifu->rid && 722 ifu->rid <= d4->Header.Id) { 723 ifu->dte = d8a->Header.DataSetting; 724 ifu->rid_real = d8a->UsedId; 725 return (true); 726 } 727 len = sizeof(*d8a) + sizeof(*d4); 728 } else if (d->Type == ACPI_IVRS_TYPE_EXT_SELECT) { 729 ACPI_IVRS_DEVICE8B *d8b; 730 731 d8b = (ACPI_IVRS_DEVICE8B *)db; 732 if (d8b->Header.Id == ifu->rid) { 733 ifu->dte = d8b->Header.DataSetting; 734 ifu->rid_real = ifu->rid; 735 ifu->edte = d8b->ExtendedData; 736 return (true); 737 } 738 len = sizeof(*d8b); 739 } else if (d->Type == ACPI_IVRS_TYPE_EXT_START) { 740 ACPI_IVRS_DEVICE8B *d8b; 741 ACPI_IVRS_DEVICE4 *d4; 742 743 d8b = (ACPI_IVRS_DEVICE8B *)db; 744 d4 = (ACPI_IVRS_DEVICE4 *)(db + sizeof(*d8b)); 745 if (d4->Header.Type != ACPI_IVRS_TYPE_END) { 746 printf("IVRS ext start not followed by END " 747 "(%#x)\n", d4->Header.Type); 748 return (false); 749 } 750 if (d8b->Header.Id >= ifu->rid && 751 ifu->rid <= d4->Header.Id) { 752 ifu->dte = d8b->Header.DataSetting; 753 ifu->rid_real = ifu->rid; 754 ifu->edte = d8b->ExtendedData; 755 return (true); 756 } 757 len = sizeof(*d8b) + sizeof(*d4); 758 } else if (d->Type == ACPI_IVRS_TYPE_SPECIAL) { 759 ACPI_IVRS_DEVICE8C *d8c; 760 761 d8c = (ACPI_IVRS_DEVICE8C *)db; 762 if (((ifu->type == IFU_DEV_IOAPIC && 763 d8c->Variety == ACPI_IVHD_IOAPIC) || 764 (ifu->type == IFU_DEV_HPET && 765 d8c->Variety == ACPI_IVHD_HPET)) && 766 ifu->devno == d8c->Handle) { 767 ifu->dte = d8c->Header.DataSetting; 768 ifu->rid_real = d8c->UsedId; 769 return (true); 770 } 771 len = sizeof(*d8c); 772 } else if (d->Type == ACPI_IVRS_TYPE_HID) { 773 ACPI_IVRS_DEVICE_HID *dh; 774 775 dh = (ACPI_IVRS_DEVICE_HID *)db; 776 len = sizeof(*dh) + dh->UidLength; 777 /* XXXKIB */ 778 } else { 779 #if 0 780 printf("amdiommu: unknown IVRS device entry type %#x\n", 781 d->Type); 782 #endif 783 if (d->Type <= 63) 784 len = sizeof(ACPI_IVRS_DEVICE4); 785 else if (d->Type <= 127) 786 len = sizeof(ACPI_IVRS_DEVICE8A); 787 else { 788 printf("amdiommu: abort, cannot " 789 "advance iterator, item type %#x\n", 790 d->Type); 791 return (false); 792 } 793 } 794 } 795 return (false); 796 } 797 798 static bool 799 amdiommu_find_unit_scan_0x11(ACPI_IVRS_HARDWARE2 *ivrs, void *arg) 800 { 801 struct ivhd_find_unit *ifu = arg; 802 ACPI_IVRS_DE_HEADER *d; 803 bool res; 804 805 KASSERT(ivrs->Header.Type == ACPI_IVRS_TYPE_HARDWARE2 || 806 ivrs->Header.Type == ACPI_IVRS_TYPE_HARDWARE3, 807 ("Misparsed IVHD h2, ivrs type %#x", ivrs->Header.Type)); 808 809 if (ifu->domain != ivrs->PciSegmentGroup) 810 return (false); 811 d = (ACPI_IVRS_DE_HEADER *)(ivrs + 1); 812 res = amdiommu_find_unit_scan_ivrs(d, ivrs->Header.Length, ifu); 813 if (res) 814 ifu->device_id = ivrs->Header.DeviceId; 815 return (res); 816 } 817 818 static bool 819 amdiommu_find_unit_scan_0x10(ACPI_IVRS_HARDWARE1 *ivrs, void *arg) 820 { 821 struct ivhd_find_unit *ifu = arg; 822 ACPI_IVRS_DE_HEADER *d; 823 bool res; 824 825 KASSERT(ivrs->Header.Type == ACPI_IVRS_TYPE_HARDWARE1, 826 ("Misparsed IVHD h1, ivrs type %#x", ivrs->Header.Type)); 827 828 if (ifu->domain != ivrs->PciSegmentGroup) 829 return (false); 830 d = (ACPI_IVRS_DE_HEADER *)(ivrs + 1); 831 res = amdiommu_find_unit_scan_ivrs(d, ivrs->Header.Length, ifu); 832 if (res) 833 ifu->device_id = ivrs->Header.DeviceId; 834 return (res); 835 } 836 837 static void 838 amdiommu_dev_prop_dtr(device_t dev, const char *name, void *val, void *dtr_ctx) 839 { 840 free(val, M_DEVBUF); 841 } 842 843 static int * 844 amdiommu_dev_fetch_flagsp(struct amdiommu_unit *unit, device_t dev) 845 { 846 int *flagsp, error; 847 848 bus_topo_assert(); 849 error = device_get_prop(dev, device_get_nameunit(unit->iommu.dev), 850 (void **)&flagsp); 851 if (error == ENOENT) { 852 flagsp = malloc(sizeof(int), M_DEVBUF, M_WAITOK | M_ZERO); 853 device_set_prop(dev, device_get_nameunit(unit->iommu.dev), 854 flagsp, amdiommu_dev_prop_dtr, unit); 855 } 856 return (flagsp); 857 } 858 859 static int 860 amdiommu_get_dev_prop_flags(struct amdiommu_unit *unit, device_t dev) 861 { 862 int *flagsp, flags; 863 864 bus_topo_lock(); 865 flagsp = amdiommu_dev_fetch_flagsp(unit, dev); 866 flags = *flagsp; 867 bus_topo_unlock(); 868 return (flags); 869 } 870 871 static void 872 amdiommu_set_dev_prop_flags(struct amdiommu_unit *unit, device_t dev, 873 int flag) 874 { 875 int *flagsp; 876 877 bus_topo_lock(); 878 flagsp = amdiommu_dev_fetch_flagsp(unit, dev); 879 *flagsp |= flag; 880 bus_topo_unlock(); 881 } 882 883 int 884 amdiommu_find_unit(device_t dev, struct amdiommu_unit **unitp, uint16_t *ridp, 885 uint8_t *dtep, uint32_t *edtep, bool verbose) 886 { 887 struct ivhd_find_unit ifu; 888 struct amdiommu_unit *unit; 889 int error, flags; 890 bool res; 891 892 if (!amdiommu_enable) 893 return (ENXIO); 894 895 if (device_get_devclass(device_get_parent(dev)) != 896 devclass_find("pci")) 897 return (ENXIO); 898 899 bzero(&ifu, sizeof(ifu)); 900 ifu.type = IFU_DEV_PCI; 901 902 error = pci_get_id(dev, PCI_ID_RID, &ifu.rid); 903 if (error != 0) { 904 if (verbose) 905 device_printf(dev, 906 "amdiommu cannot get rid, error %d\n", error); 907 return (ENXIO); 908 } 909 910 ifu.domain = pci_get_domain(dev); 911 res = amdiommu_ivrs_iterate_tbl(amdiommu_find_unit_scan_0x11, 912 amdiommu_find_unit_scan_0x11, amdiommu_find_unit_scan_0x10, &ifu); 913 if (!res) { 914 if (verbose) 915 device_printf(dev, 916 "(%#06x:%#06x) amdiommu cannot match rid in IVHD\n", 917 ifu.domain, (unsigned)ifu.rid); 918 return (ENXIO); 919 } 920 921 unit = amdiommu_unit_by_device_id(ifu.domain, ifu.device_id); 922 if (unit == NULL) { 923 if (verbose) 924 device_printf(dev, 925 "(%#06x:%#06x) amdiommu cannot find unit\n", 926 ifu.domain, (unsigned)ifu.rid); 927 return (ENXIO); 928 } 929 *unitp = unit; 930 iommu_device_set_iommu_prop(dev, unit->iommu.dev); 931 if (ridp != NULL) 932 *ridp = ifu.rid_real; 933 if (dtep != NULL) 934 *dtep = ifu.dte; 935 if (edtep != NULL) 936 *edtep = ifu.edte; 937 if (verbose) { 938 flags = amdiommu_get_dev_prop_flags(unit, dev); 939 if ((flags & AMDIOMMU_DEV_REPORTED) == 0) { 940 amdiommu_set_dev_prop_flags(unit, dev, 941 AMDIOMMU_DEV_REPORTED); 942 device_printf(dev, "amdiommu%d " 943 "initiator rid %#06x dte %#x edte %#x\n", 944 unit->iommu.unit, ifu.rid_real, ifu.dte, ifu.edte); 945 } 946 } 947 return (0); 948 } 949 950 int 951 amdiommu_find_unit_for_ioapic(int apic_id, struct amdiommu_unit **unitp, 952 uint16_t *ridp, uint8_t *dtep, uint32_t *edtep, bool verbose) 953 { 954 struct ivhd_find_unit ifu; 955 struct amdiommu_unit *unit; 956 device_t apic_dev; 957 bool res; 958 959 if (!amdiommu_enable) 960 return (ENXIO); 961 962 bzero(&ifu, sizeof(ifu)); 963 ifu.type = IFU_DEV_IOAPIC; 964 ifu.devno = apic_id; 965 ifu.rid = -1; 966 967 res = amdiommu_ivrs_iterate_tbl(amdiommu_find_unit_scan_0x11, 968 amdiommu_find_unit_scan_0x11, amdiommu_find_unit_scan_0x10, &ifu); 969 if (!res) { 970 if (verbose) 971 printf("amdiommu cannot match ioapic no %d in IVHD\n", 972 apic_id); 973 return (ENXIO); 974 } 975 976 unit = amdiommu_unit_by_device_id(0, ifu.device_id); 977 apic_dev = ioapic_get_dev(apic_id); 978 if (apic_dev != NULL) 979 iommu_device_set_iommu_prop(apic_dev, unit->iommu.dev); 980 if (unit == NULL) { 981 if (verbose) 982 printf("amdiommu cannot find unit by dev id %#x\n", 983 ifu.device_id); 984 return (ENXIO); 985 } 986 *unitp = unit; 987 if (ridp != NULL) 988 *ridp = ifu.rid_real; 989 if (dtep != NULL) 990 *dtep = ifu.dte; 991 if (edtep != NULL) 992 *edtep = ifu.edte; 993 if (verbose) { 994 printf("amdiommu%d IOAPIC %d " 995 "initiator rid %#06x dte %#x edte %#x\n", 996 unit->iommu.unit, apic_id, ifu.rid_real, ifu.dte, 997 ifu.edte); 998 } 999 return (0); 1000 } 1001 1002 int 1003 amdiommu_find_unit_for_hpet(device_t hpet, struct amdiommu_unit **unitp, 1004 uint16_t *ridp, uint8_t *dtep, uint32_t *edtep, bool verbose) 1005 { 1006 struct ivhd_find_unit ifu; 1007 struct amdiommu_unit *unit; 1008 int hpet_no; 1009 bool res; 1010 1011 if (!amdiommu_enable) 1012 return (ENXIO); 1013 1014 hpet_no = hpet_get_uid(hpet); 1015 bzero(&ifu, sizeof(ifu)); 1016 ifu.type = IFU_DEV_HPET; 1017 ifu.devno = hpet_no; 1018 ifu.rid = -1; 1019 1020 res = amdiommu_ivrs_iterate_tbl(amdiommu_find_unit_scan_0x11, 1021 amdiommu_find_unit_scan_0x11, amdiommu_find_unit_scan_0x10, &ifu); 1022 if (!res) { 1023 if (verbose) 1024 printf("amdiommu cannot match hpet no %d in IVHD\n", 1025 hpet_no); 1026 return (ENXIO); 1027 } 1028 1029 unit = amdiommu_unit_by_device_id(0, ifu.device_id); 1030 if (unit == NULL) { 1031 if (verbose) 1032 printf("amdiommu cannot find unit id %d\n", 1033 hpet_no); 1034 return (ENXIO); 1035 } 1036 *unitp = unit; 1037 iommu_device_set_iommu_prop(hpet, unit->iommu.dev); 1038 if (ridp != NULL) 1039 *ridp = ifu.rid_real; 1040 if (dtep != NULL) 1041 *dtep = ifu.dte; 1042 if (edtep != NULL) 1043 *edtep = ifu.edte; 1044 if (verbose) { 1045 printf("amdiommu%d HPET no %d " 1046 "initiator rid %#06x dte %#x edte %#x\n", 1047 unit->iommu.unit, hpet_no, ifu.rid_real, ifu.dte, 1048 ifu.edte); 1049 } 1050 return (0); 1051 } 1052 1053 static struct iommu_unit * 1054 amdiommu_find_method(device_t dev, bool verbose) 1055 { 1056 struct amdiommu_unit *unit; 1057 int error; 1058 uint32_t edte; 1059 uint16_t rid; 1060 uint8_t dte; 1061 1062 error = amdiommu_find_unit(dev, &unit, &rid, &dte, &edte, verbose); 1063 if (error != 0) { 1064 if (verbose && amdiommu_enable) 1065 device_printf(dev, 1066 "cannot find amdiommu unit, error %d\n", 1067 error); 1068 return (NULL); 1069 } 1070 return (&unit->iommu); 1071 } 1072 1073 static struct x86_unit_common * 1074 amdiommu_get_x86_common(struct iommu_unit *unit) 1075 { 1076 struct amdiommu_unit *iommu; 1077 1078 iommu = IOMMU2AMD(unit); 1079 return (&iommu->x86c); 1080 } 1081 1082 static void 1083 amdiommu_unit_pre_instantiate_ctx(struct iommu_unit *unit) 1084 { 1085 } 1086 1087 static struct x86_iommu amd_x86_iommu = { 1088 .get_x86_common = amdiommu_get_x86_common, 1089 .unit_pre_instantiate_ctx = amdiommu_unit_pre_instantiate_ctx, 1090 .find = amdiommu_find_method, 1091 .domain_unload_entry = amdiommu_domain_unload_entry, 1092 .domain_unload = amdiommu_domain_unload, 1093 .get_ctx = amdiommu_get_ctx, 1094 .free_ctx_locked = amdiommu_free_ctx_locked_method, 1095 .alloc_msi_intr = amdiommu_alloc_msi_intr, 1096 .map_msi_intr = amdiommu_map_msi_intr, 1097 .unmap_msi_intr = amdiommu_unmap_msi_intr, 1098 .map_ioapic_intr = amdiommu_map_ioapic_intr, 1099 .unmap_ioapic_intr = amdiommu_unmap_ioapic_intr, 1100 }; 1101 1102 static void 1103 x86_iommu_set_amd(void *arg __unused) 1104 { 1105 if (cpu_vendor_id == CPU_VENDOR_AMD) 1106 set_x86_iommu(&amd_x86_iommu); 1107 } 1108 1109 SYSINIT(x86_iommu, SI_SUB_TUNABLES, SI_ORDER_ANY, x86_iommu_set_amd, NULL); 1110 1111 #ifdef DDB 1112 #include <ddb/ddb.h> 1113 #include <ddb/db_lex.h> 1114 1115 static void 1116 amdiommu_print_domain(struct amdiommu_domain *domain, bool show_mappings) 1117 { 1118 struct iommu_domain *iodom; 1119 1120 iodom = DOM2IODOM(domain); 1121 1122 db_printf( 1123 " @%p dom %d pglvl %d end %jx refs %d\n" 1124 " ctx_cnt %d flags %x pgobj %p map_ents %u\n", 1125 domain, domain->domain, domain->pglvl, 1126 (uintmax_t)domain->iodom.end, domain->refs, domain->ctx_cnt, 1127 domain->iodom.flags, domain->pgtbl_obj, domain->iodom.entries_cnt); 1128 1129 iommu_db_domain_print_contexts(iodom); 1130 1131 if (show_mappings) 1132 iommu_db_domain_print_mappings(iodom); 1133 } 1134 1135 static void 1136 amdiommu_print_one(struct amdiommu_unit *unit, bool show_domains, 1137 bool show_mappings, bool show_cmdq) 1138 { 1139 struct amdiommu_domain *domain; 1140 struct amdiommu_cmd_generic *cp; 1141 u_int cmd_head, cmd_tail, ci; 1142 1143 cmd_head = amdiommu_read4(unit, AMDIOMMU_CMDBUF_HEAD); 1144 cmd_tail = amdiommu_read4(unit, AMDIOMMU_CMDBUF_TAIL); 1145 db_printf("amdiommu%d at %p, mmio at %#jx/sz %#jx\n", 1146 unit->iommu.unit, unit, (uintmax_t)unit->mmio_base, 1147 (uintmax_t)unit->mmio_sz); 1148 db_printf(" hw ctrl %#018jx cmdevst %#018jx\n", 1149 (uintmax_t)amdiommu_read8(unit, AMDIOMMU_CTRL), 1150 (uintmax_t)amdiommu_read8(unit, AMDIOMMU_CMDEV_STATUS)); 1151 db_printf(" devtbl at %p\n", unit->dev_tbl); 1152 db_printf(" hwseq at %p phys %#jx val %#jx\n", 1153 &unit->x86c.inv_waitd_seq_hw, 1154 pmap_kextract((vm_offset_t)&unit->x86c.inv_waitd_seq_hw), 1155 unit->x86c.inv_waitd_seq_hw); 1156 db_printf(" invq at %p base %#jx hw head/tail %#x/%#x\n", 1157 unit->x86c.inv_queue, 1158 (uintmax_t)amdiommu_read8(unit, AMDIOMMU_CMDBUF_BASE), 1159 cmd_head, cmd_tail); 1160 1161 if (show_cmdq) { 1162 db_printf(" cmd q:\n"); 1163 for (ci = cmd_head; ci != cmd_tail;) { 1164 cp = (struct amdiommu_cmd_generic *)(unit-> 1165 x86c.inv_queue + ci); 1166 db_printf( 1167 " idx %#x op %#x %#010x %#010x %#010x %#010x\n", 1168 ci >> AMDIOMMU_CMD_SZ_SHIFT, cp->op, 1169 cp->w0, cp->ww1, cp->w2, cp->w3); 1170 1171 ci += AMDIOMMU_CMD_SZ; 1172 if (ci == unit->x86c.inv_queue_size) 1173 ci = 0; 1174 } 1175 } 1176 1177 if (show_domains) { 1178 db_printf(" domains:\n"); 1179 LIST_FOREACH(domain, &unit->domains, link) { 1180 amdiommu_print_domain(domain, show_mappings); 1181 if (db_pager_quit) 1182 break; 1183 } 1184 } 1185 } 1186 1187 DB_SHOW_COMMAND(amdiommu, db_amdiommu_print) 1188 { 1189 struct amdiommu_unit *unit; 1190 bool show_domains, show_mappings, show_cmdq; 1191 1192 show_domains = strchr(modif, 'd') != NULL; 1193 show_mappings = strchr(modif, 'm') != NULL; 1194 show_cmdq = strchr(modif, 'q') != NULL; 1195 if (!have_addr) { 1196 db_printf("usage: show amdiommu [/d] [/m] [/q] index\n"); 1197 return; 1198 } 1199 if ((vm_offset_t)addr < 0x10000) 1200 unit = amdiommu_unit_by_device_id(0, (u_int)addr); 1201 else 1202 unit = (struct amdiommu_unit *)addr; 1203 amdiommu_print_one(unit, show_domains, show_mappings, show_cmdq); 1204 } 1205 1206 DB_SHOW_ALL_COMMAND(amdiommus, db_show_all_amdiommus) 1207 { 1208 struct amdiommu_unit *unit; 1209 bool show_domains, show_mappings, show_cmdq; 1210 1211 show_domains = strchr(modif, 'd') != NULL; 1212 show_mappings = strchr(modif, 'm') != NULL; 1213 show_cmdq = strchr(modif, 'q') != NULL; 1214 1215 TAILQ_FOREACH(unit, &amdiommu_units, unit_next) { 1216 amdiommu_print_one(unit, show_domains, show_mappings, 1217 show_cmdq); 1218 if (db_pager_quit) 1219 break; 1220 } 1221 } 1222 #endif 1223