1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2013-2015 The FreeBSD Foundation 5 * 6 * This software was developed by Konstantin Belousov <kib@FreeBSD.org> 7 * under sponsorship from the FreeBSD Foundation. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 */ 30 31 #include "opt_acpi.h" 32 #if defined(__amd64__) 33 #define DEV_APIC 34 #else 35 #include "opt_apic.h" 36 #endif 37 #include "opt_ddb.h" 38 39 #include <sys/param.h> 40 #include <sys/bus.h> 41 #include <sys/domainset.h> 42 #include <sys/kernel.h> 43 #include <sys/lock.h> 44 #include <sys/malloc.h> 45 #include <sys/memdesc.h> 46 #include <sys/module.h> 47 #include <sys/mutex.h> 48 #include <sys/rman.h> 49 #include <sys/rwlock.h> 50 #include <sys/smp.h> 51 #include <sys/taskqueue.h> 52 #include <sys/tree.h> 53 #include <sys/vmem.h> 54 #include <vm/vm.h> 55 #include <vm/vm_extern.h> 56 #include <vm/vm_kern.h> 57 #include <vm/vm_object.h> 58 #include <vm/vm_page.h> 59 #include <vm/vm_pager.h> 60 #include <vm/vm_map.h> 61 #include <contrib/dev/acpica/include/acpi.h> 62 #include <contrib/dev/acpica/include/accommon.h> 63 #include <dev/acpica/acpivar.h> 64 #include <dev/pci/pcireg.h> 65 #include <dev/pci/pcivar.h> 66 #include <machine/bus.h> 67 #include <machine/pci_cfgreg.h> 68 #include <machine/md_var.h> 69 #include <machine/cputypes.h> 70 #include <x86/include/busdma_impl.h> 71 #include <dev/iommu/busdma_iommu.h> 72 #include <x86/iommu/intel_reg.h> 73 #include <x86/iommu/x86_iommu.h> 74 #include <x86/iommu/intel_dmar.h> 75 76 #ifdef DEV_APIC 77 #include "pcib_if.h" 78 #include <machine/intr_machdep.h> 79 #include <x86/apicreg.h> 80 #include <x86/apicvar.h> 81 #endif 82 83 #define DMAR_FAULT_IRQ_RID 0 84 #define DMAR_QI_IRQ_RID 1 85 #define DMAR_REG_RID 2 86 87 static device_t *dmar_devs; 88 static int dmar_devcnt; 89 90 typedef int (*dmar_iter_t)(ACPI_DMAR_HEADER *, void *); 91 92 static void 93 dmar_iterate_tbl(dmar_iter_t iter, void *arg) 94 { 95 ACPI_TABLE_DMAR *dmartbl; 96 ACPI_DMAR_HEADER *dmarh; 97 char *ptr, *ptrend; 98 ACPI_STATUS status; 99 100 status = AcpiGetTable(ACPI_SIG_DMAR, 1, (ACPI_TABLE_HEADER **)&dmartbl); 101 if (ACPI_FAILURE(status)) 102 return; 103 ptr = (char *)dmartbl + sizeof(*dmartbl); 104 ptrend = (char *)dmartbl + dmartbl->Header.Length; 105 for (;;) { 106 if (ptr >= ptrend) 107 break; 108 dmarh = (ACPI_DMAR_HEADER *)ptr; 109 if (dmarh->Length <= 0) { 110 printf("dmar_identify: corrupted DMAR table, l %d\n", 111 dmarh->Length); 112 break; 113 } 114 ptr += dmarh->Length; 115 if (!iter(dmarh, arg)) 116 break; 117 } 118 AcpiPutTable((ACPI_TABLE_HEADER *)dmartbl); 119 } 120 121 struct find_iter_args { 122 int i; 123 ACPI_DMAR_HARDWARE_UNIT *res; 124 }; 125 126 static int 127 dmar_find_iter(ACPI_DMAR_HEADER *dmarh, void *arg) 128 { 129 struct find_iter_args *fia; 130 131 if (dmarh->Type != ACPI_DMAR_TYPE_HARDWARE_UNIT) 132 return (1); 133 134 fia = arg; 135 if (fia->i == 0) { 136 fia->res = (ACPI_DMAR_HARDWARE_UNIT *)dmarh; 137 return (0); 138 } 139 fia->i--; 140 return (1); 141 } 142 143 static ACPI_DMAR_HARDWARE_UNIT * 144 dmar_find_by_index(int idx) 145 { 146 struct find_iter_args fia; 147 148 fia.i = idx; 149 fia.res = NULL; 150 dmar_iterate_tbl(dmar_find_iter, &fia); 151 return (fia.res); 152 } 153 154 static int 155 dmar_count_iter(ACPI_DMAR_HEADER *dmarh, void *arg) 156 { 157 158 if (dmarh->Type == ACPI_DMAR_TYPE_HARDWARE_UNIT) 159 dmar_devcnt++; 160 return (1); 161 } 162 163 /* Remapping Hardware Static Affinity Structure lookup */ 164 struct rhsa_iter_arg { 165 uint64_t base; 166 u_int proxim_dom; 167 }; 168 169 static int 170 dmar_rhsa_iter(ACPI_DMAR_HEADER *dmarh, void *arg) 171 { 172 struct rhsa_iter_arg *ria; 173 ACPI_DMAR_RHSA *adr; 174 175 if (dmarh->Type == ACPI_DMAR_TYPE_HARDWARE_AFFINITY) { 176 ria = arg; 177 adr = (ACPI_DMAR_RHSA *)dmarh; 178 if (adr->BaseAddress == ria->base) 179 ria->proxim_dom = adr->ProximityDomain; 180 } 181 return (1); 182 } 183 184 int dmar_rmrr_enable = 1; 185 186 static int dmar_enable = 0; 187 static void 188 dmar_identify(driver_t *driver, device_t parent) 189 { 190 ACPI_TABLE_DMAR *dmartbl; 191 ACPI_DMAR_HARDWARE_UNIT *dmarh; 192 struct rhsa_iter_arg ria; 193 ACPI_STATUS status; 194 int i, error; 195 196 if (acpi_disabled("dmar")) 197 return; 198 TUNABLE_INT_FETCH("hw.dmar.enable", &dmar_enable); 199 if (!dmar_enable) 200 return; 201 TUNABLE_INT_FETCH("hw.dmar.rmrr_enable", &dmar_rmrr_enable); 202 203 status = AcpiGetTable(ACPI_SIG_DMAR, 1, (ACPI_TABLE_HEADER **)&dmartbl); 204 if (ACPI_FAILURE(status)) 205 return; 206 haw = dmartbl->Width + 1; 207 if ((1ULL << (haw + 1)) > BUS_SPACE_MAXADDR) 208 iommu_high = BUS_SPACE_MAXADDR; 209 else 210 iommu_high = 1ULL << (haw + 1); 211 if (bootverbose) { 212 printf("DMAR HAW=%d flags=<%b>\n", dmartbl->Width, 213 (unsigned)dmartbl->Flags, 214 "\020\001INTR_REMAP\002X2APIC_OPT_OUT"); 215 } 216 AcpiPutTable((ACPI_TABLE_HEADER *)dmartbl); 217 218 dmar_iterate_tbl(dmar_count_iter, NULL); 219 if (dmar_devcnt == 0) 220 return; 221 dmar_devs = malloc(sizeof(device_t) * dmar_devcnt, M_DEVBUF, 222 M_WAITOK | M_ZERO); 223 for (i = 0; i < dmar_devcnt; i++) { 224 dmarh = dmar_find_by_index(i); 225 if (dmarh == NULL) { 226 printf("dmar_identify: cannot find HWUNIT %d\n", i); 227 continue; 228 } 229 dmar_devs[i] = BUS_ADD_CHILD(parent, 1, "dmar", i); 230 if (dmar_devs[i] == NULL) { 231 printf("dmar_identify: cannot create instance %d\n", i); 232 continue; 233 } 234 error = bus_set_resource(dmar_devs[i], SYS_RES_MEMORY, 235 DMAR_REG_RID, dmarh->Address, PAGE_SIZE); 236 if (error != 0) { 237 printf( 238 "dmar%d: unable to alloc register window at 0x%08jx: error %d\n", 239 i, (uintmax_t)dmarh->Address, error); 240 device_delete_child(parent, dmar_devs[i]); 241 dmar_devs[i] = NULL; 242 continue; 243 } 244 245 ria.base = dmarh->Address; 246 ria.proxim_dom = -1; 247 dmar_iterate_tbl(dmar_rhsa_iter, &ria); 248 acpi_set_domain(dmar_devs[i], ria.proxim_dom == -1 ? 249 ACPI_DEV_DOMAIN_UNKNOWN : 250 acpi_map_pxm_to_vm_domainid(ria.proxim_dom)); 251 } 252 } 253 254 static int 255 dmar_probe(device_t dev) 256 { 257 258 if (acpi_get_handle(dev) != NULL) 259 return (ENXIO); 260 device_set_desc(dev, "DMA remap"); 261 return (BUS_PROBE_NOWILDCARD); 262 } 263 264 static void 265 dmar_release_resources(device_t dev, struct dmar_unit *unit) 266 { 267 int i; 268 269 iommu_fini_busdma(&unit->iommu); 270 dmar_fini_irt(unit); 271 dmar_fini_qi(unit); 272 dmar_fini_fault_log(unit); 273 for (i = 0; i < DMAR_INTR_TOTAL; i++) 274 iommu_release_intr(DMAR2IOMMU(unit), i); 275 if (unit->regs != NULL) { 276 bus_deactivate_resource(dev, SYS_RES_MEMORY, unit->reg_rid, 277 unit->regs); 278 bus_release_resource(dev, SYS_RES_MEMORY, unit->reg_rid, 279 unit->regs); 280 unit->regs = NULL; 281 } 282 if (unit->domids != NULL) { 283 delete_unrhdr(unit->domids); 284 unit->domids = NULL; 285 } 286 if (unit->ctx_obj != NULL) { 287 vm_object_deallocate(unit->ctx_obj); 288 unit->ctx_obj = NULL; 289 } 290 sysctl_ctx_free(&unit->iommu.sysctl_ctx); 291 } 292 293 #ifdef DEV_APIC 294 static int 295 dmar_remap_intr(device_t dev, device_t child, u_int irq) 296 { 297 struct dmar_unit *unit; 298 struct iommu_msi_data *dmd; 299 uint64_t msi_addr; 300 uint32_t msi_data; 301 int i, error; 302 303 unit = device_get_softc(dev); 304 for (i = 0; i < DMAR_INTR_TOTAL; i++) { 305 dmd = &unit->x86c.intrs[i]; 306 if (irq == dmd->irq) { 307 error = PCIB_MAP_MSI(device_get_parent( 308 device_get_parent(dev)), 309 dev, irq, &msi_addr, &msi_data); 310 if (error != 0) 311 return (error); 312 DMAR_LOCK(unit); 313 dmd->msi_data = msi_data; 314 dmd->msi_addr = msi_addr; 315 (dmd->disable_intr)(DMAR2IOMMU(unit)); 316 dmar_write4(unit, dmd->msi_data_reg, dmd->msi_data); 317 dmar_write4(unit, dmd->msi_addr_reg, dmd->msi_addr); 318 dmar_write4(unit, dmd->msi_uaddr_reg, 319 dmd->msi_addr >> 32); 320 (dmd->enable_intr)(DMAR2IOMMU(unit)); 321 DMAR_UNLOCK(unit); 322 return (0); 323 } 324 } 325 return (ENOENT); 326 } 327 #endif 328 329 static void 330 dmar_print_caps(device_t dev, struct dmar_unit *unit, 331 ACPI_DMAR_HARDWARE_UNIT *dmaru) 332 { 333 uint32_t caphi, ecaphi; 334 335 device_printf(dev, "regs@0x%08jx, ver=%d.%d, seg=%d, flags=<%b>\n", 336 (uintmax_t)dmaru->Address, DMAR_MAJOR_VER(unit->hw_ver), 337 DMAR_MINOR_VER(unit->hw_ver), dmaru->Segment, 338 dmaru->Flags, "\020\001INCLUDE_ALL_PCI"); 339 caphi = unit->hw_cap >> 32; 340 device_printf(dev, "cap=%b,", (u_int)unit->hw_cap, 341 "\020\004AFL\005WBF\006PLMR\007PHMR\010CM\027ZLR\030ISOCH"); 342 printf("%b, ", caphi, "\020\010PSI\027DWD\030DRD\031FL1GP\034PSI"); 343 printf("ndoms=%d, sagaw=%d, mgaw=%d, fro=%d, nfr=%d, superp=%d", 344 DMAR_CAP_ND(unit->hw_cap), DMAR_CAP_SAGAW(unit->hw_cap), 345 DMAR_CAP_MGAW(unit->hw_cap), DMAR_CAP_FRO(unit->hw_cap), 346 DMAR_CAP_NFR(unit->hw_cap), DMAR_CAP_SPS(unit->hw_cap)); 347 if ((unit->hw_cap & DMAR_CAP_PSI) != 0) 348 printf(", mamv=%d", DMAR_CAP_MAMV(unit->hw_cap)); 349 printf("\n"); 350 ecaphi = unit->hw_ecap >> 32; 351 device_printf(dev, "ecap=%b,", (u_int)unit->hw_ecap, 352 "\020\001C\002QI\003DI\004IR\005EIM\007PT\010SC\031ECS\032MTS" 353 "\033NEST\034DIS\035PASID\036PRS\037ERS\040SRS"); 354 printf("%b, ", ecaphi, "\020\002NWFS\003EAFS"); 355 printf("mhmw=%d, iro=%d\n", DMAR_ECAP_MHMV(unit->hw_ecap), 356 DMAR_ECAP_IRO(unit->hw_ecap)); 357 } 358 359 static int 360 dmar_attach(device_t dev) 361 { 362 struct dmar_unit *unit; 363 ACPI_DMAR_HARDWARE_UNIT *dmaru; 364 struct iommu_msi_data *dmd; 365 uint64_t timeout; 366 int disable_pmr; 367 int i, error; 368 369 unit = device_get_softc(dev); 370 unit->iommu.unit = device_get_unit(dev); 371 unit->iommu.dev = dev; 372 sysctl_ctx_init(&unit->iommu.sysctl_ctx); 373 dmaru = dmar_find_by_index(unit->iommu.unit); 374 if (dmaru == NULL) 375 return (EINVAL); 376 unit->segment = dmaru->Segment; 377 unit->base = dmaru->Address; 378 unit->reg_rid = DMAR_REG_RID; 379 unit->regs = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 380 &unit->reg_rid, RF_ACTIVE); 381 if (unit->regs == NULL) { 382 device_printf(dev, "cannot allocate register window\n"); 383 dmar_devs[unit->iommu.unit] = NULL; 384 return (ENOMEM); 385 } 386 unit->hw_ver = dmar_read4(unit, DMAR_VER_REG); 387 unit->hw_cap = dmar_read8(unit, DMAR_CAP_REG); 388 unit->hw_ecap = dmar_read8(unit, DMAR_ECAP_REG); 389 if (bootverbose) 390 dmar_print_caps(dev, unit, dmaru); 391 dmar_quirks_post_ident(unit); 392 unit->memdomain = acpi_get_domain(dev); 393 timeout = dmar_get_timeout(); 394 TUNABLE_UINT64_FETCH("hw.iommu.dmar.timeout", &timeout); 395 dmar_update_timeout(timeout); 396 397 for (i = 0; i < DMAR_INTR_TOTAL; i++) 398 unit->x86c.intrs[i].irq = -1; 399 400 dmd = &unit->x86c.intrs[DMAR_INTR_FAULT]; 401 dmd->name = "fault"; 402 dmd->irq_rid = DMAR_FAULT_IRQ_RID; 403 dmd->handler = dmar_fault_intr; 404 dmd->msi_data_reg = DMAR_FEDATA_REG; 405 dmd->msi_addr_reg = DMAR_FEADDR_REG; 406 dmd->msi_uaddr_reg = DMAR_FEUADDR_REG; 407 dmd->enable_intr = dmar_enable_fault_intr; 408 dmd->disable_intr = dmar_disable_fault_intr; 409 error = iommu_alloc_irq(DMAR2IOMMU(unit), DMAR_INTR_FAULT); 410 if (error != 0) { 411 dmar_release_resources(dev, unit); 412 dmar_devs[unit->iommu.unit] = NULL; 413 return (error); 414 } 415 dmar_write4(unit, dmd->msi_data_reg, dmd->msi_data); 416 dmar_write4(unit, dmd->msi_addr_reg, dmd->msi_addr); 417 dmar_write4(unit, dmd->msi_uaddr_reg, dmd->msi_addr >> 32); 418 419 if (DMAR_HAS_QI(unit)) { 420 dmd = &unit->x86c.intrs[DMAR_INTR_QI]; 421 dmd->name = "qi"; 422 dmd->irq_rid = DMAR_QI_IRQ_RID; 423 dmd->handler = dmar_qi_intr; 424 dmd->msi_data_reg = DMAR_IEDATA_REG; 425 dmd->msi_addr_reg = DMAR_IEADDR_REG; 426 dmd->msi_uaddr_reg = DMAR_IEUADDR_REG; 427 dmd->enable_intr = dmar_enable_qi_intr; 428 dmd->disable_intr = dmar_disable_qi_intr; 429 error = iommu_alloc_irq(DMAR2IOMMU(unit), DMAR_INTR_QI); 430 if (error != 0) { 431 dmar_release_resources(dev, unit); 432 dmar_devs[unit->iommu.unit] = NULL; 433 return (error); 434 } 435 436 dmar_write4(unit, dmd->msi_data_reg, dmd->msi_data); 437 dmar_write4(unit, dmd->msi_addr_reg, dmd->msi_addr); 438 dmar_write4(unit, dmd->msi_uaddr_reg, dmd->msi_addr >> 32); 439 } 440 441 mtx_init(&unit->iommu.lock, "dmarhw", NULL, MTX_DEF); 442 unit->domids = new_unrhdr(0, dmar_nd2mask(DMAR_CAP_ND(unit->hw_cap)), 443 &unit->iommu.lock); 444 LIST_INIT(&unit->domains); 445 446 /* 447 * 9.2 "Context Entry": 448 * When Caching Mode (CM) field is reported as Set, the 449 * domain-id value of zero is architecturally reserved. 450 * Software must not use domain-id value of zero 451 * when CM is Set. 452 */ 453 if ((unit->hw_cap & DMAR_CAP_CM) != 0) 454 alloc_unr_specific(unit->domids, 0); 455 456 unit->ctx_obj = vm_pager_allocate(OBJT_PHYS, NULL, IDX_TO_OFF(1 + 457 DMAR_CTX_CNT), 0, 0, NULL); 458 if (unit->memdomain != -1) { 459 unit->ctx_obj->domain.dr_policy = DOMAINSET_PREF( 460 unit->memdomain); 461 } 462 463 /* 464 * Allocate and load the root entry table pointer. Enable the 465 * address translation after the required invalidations are 466 * done. 467 */ 468 iommu_pgalloc(unit->ctx_obj, 0, IOMMU_PGF_WAITOK | IOMMU_PGF_ZERO); 469 DMAR_LOCK(unit); 470 error = dmar_load_root_entry_ptr(unit); 471 if (error != 0) { 472 DMAR_UNLOCK(unit); 473 dmar_release_resources(dev, unit); 474 dmar_devs[unit->iommu.unit] = NULL; 475 return (error); 476 } 477 error = dmar_inv_ctx_glob(unit); 478 if (error != 0) { 479 DMAR_UNLOCK(unit); 480 dmar_release_resources(dev, unit); 481 dmar_devs[unit->iommu.unit] = NULL; 482 return (error); 483 } 484 if ((unit->hw_ecap & DMAR_ECAP_DI) != 0) { 485 error = dmar_inv_iotlb_glob(unit); 486 if (error != 0) { 487 DMAR_UNLOCK(unit); 488 dmar_release_resources(dev, unit); 489 dmar_devs[unit->iommu.unit] = NULL; 490 return (error); 491 } 492 } 493 494 DMAR_UNLOCK(unit); 495 error = dmar_init_fault_log(unit); 496 if (error != 0) { 497 dmar_release_resources(dev, unit); 498 dmar_devs[unit->iommu.unit] = NULL; 499 return (error); 500 } 501 error = dmar_init_qi(unit); 502 if (error != 0) { 503 dmar_release_resources(dev, unit); 504 dmar_devs[unit->iommu.unit] = NULL; 505 return (error); 506 } 507 error = dmar_init_irt(unit); 508 if (error != 0) { 509 dmar_release_resources(dev, unit); 510 dmar_devs[unit->iommu.unit] = NULL; 511 return (error); 512 } 513 514 disable_pmr = 0; 515 TUNABLE_INT_FETCH("hw.dmar.pmr.disable", &disable_pmr); 516 if (disable_pmr) { 517 error = dmar_disable_protected_regions(unit); 518 if (error != 0) 519 device_printf(dev, 520 "Failed to disable protected regions\n"); 521 } 522 523 error = iommu_init_busdma(&unit->iommu); 524 if (error != 0) { 525 dmar_release_resources(dev, unit); 526 dmar_devs[unit->iommu.unit] = NULL; 527 return (error); 528 } 529 530 #ifdef NOTYET 531 DMAR_LOCK(unit); 532 error = dmar_enable_translation(unit); 533 if (error != 0) { 534 DMAR_UNLOCK(unit); 535 dmar_release_resources(dev, unit); 536 dmar_devs[unit->iommu.unit] = NULL; 537 return (error); 538 } 539 DMAR_UNLOCK(unit); 540 #endif 541 542 return (0); 543 } 544 545 static int 546 dmar_detach(device_t dev) 547 { 548 549 return (EBUSY); 550 } 551 552 static int 553 dmar_suspend(device_t dev) 554 { 555 556 return (0); 557 } 558 559 static int 560 dmar_resume(device_t dev) 561 { 562 563 /* XXXKIB */ 564 return (0); 565 } 566 567 static device_method_t dmar_methods[] = { 568 DEVMETHOD(device_identify, dmar_identify), 569 DEVMETHOD(device_probe, dmar_probe), 570 DEVMETHOD(device_attach, dmar_attach), 571 DEVMETHOD(device_detach, dmar_detach), 572 DEVMETHOD(device_suspend, dmar_suspend), 573 DEVMETHOD(device_resume, dmar_resume), 574 #ifdef DEV_APIC 575 DEVMETHOD(bus_remap_intr, dmar_remap_intr), 576 #endif 577 DEVMETHOD_END 578 }; 579 580 static driver_t dmar_driver = { 581 "dmar", 582 dmar_methods, 583 sizeof(struct dmar_unit), 584 }; 585 586 DRIVER_MODULE(dmar, acpi, dmar_driver, 0, 0); 587 MODULE_DEPEND(dmar, acpi, 1, 1, 1); 588 589 static void 590 dmar_print_path(int busno, int depth, const ACPI_DMAR_PCI_PATH *path) 591 { 592 int i; 593 594 printf("[%d, ", busno); 595 for (i = 0; i < depth; i++) { 596 if (i != 0) 597 printf(", "); 598 printf("(%d, %d)", path[i].Device, path[i].Function); 599 } 600 printf("]"); 601 } 602 603 int 604 dmar_dev_depth(device_t child) 605 { 606 devclass_t pci_class; 607 device_t bus, pcib; 608 int depth; 609 610 pci_class = devclass_find("pci"); 611 for (depth = 1; ; depth++) { 612 bus = device_get_parent(child); 613 pcib = device_get_parent(bus); 614 if (device_get_devclass(device_get_parent(pcib)) != 615 pci_class) 616 return (depth); 617 child = pcib; 618 } 619 } 620 621 void 622 dmar_dev_path(device_t child, int *busno, void *path1, int depth) 623 { 624 devclass_t pci_class; 625 device_t bus, pcib; 626 ACPI_DMAR_PCI_PATH *path; 627 628 pci_class = devclass_find("pci"); 629 path = path1; 630 for (depth--; depth != -1; depth--) { 631 path[depth].Device = pci_get_slot(child); 632 path[depth].Function = pci_get_function(child); 633 bus = device_get_parent(child); 634 pcib = device_get_parent(bus); 635 if (device_get_devclass(device_get_parent(pcib)) != 636 pci_class) { 637 /* reached a host bridge */ 638 *busno = pcib_get_bus(bus); 639 return; 640 } 641 child = pcib; 642 } 643 panic("wrong depth"); 644 } 645 646 static int 647 dmar_match_pathes(int busno1, const ACPI_DMAR_PCI_PATH *path1, int depth1, 648 int busno2, const ACPI_DMAR_PCI_PATH *path2, int depth2, 649 enum AcpiDmarScopeType scope_type) 650 { 651 int i, depth; 652 653 if (busno1 != busno2) 654 return (0); 655 if (scope_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT && depth1 != depth2) 656 return (0); 657 depth = depth1; 658 if (depth2 < depth) 659 depth = depth2; 660 for (i = 0; i < depth; i++) { 661 if (path1[i].Device != path2[i].Device || 662 path1[i].Function != path2[i].Function) 663 return (0); 664 } 665 return (1); 666 } 667 668 static int 669 dmar_match_devscope(ACPI_DMAR_DEVICE_SCOPE *devscope, int dev_busno, 670 const ACPI_DMAR_PCI_PATH *dev_path, int dev_path_len) 671 { 672 ACPI_DMAR_PCI_PATH *path; 673 int path_len; 674 675 if (devscope->Length < sizeof(*devscope)) { 676 printf("dmar_match_devscope: corrupted DMAR table, dl %d\n", 677 devscope->Length); 678 return (-1); 679 } 680 if (devscope->EntryType != ACPI_DMAR_SCOPE_TYPE_ENDPOINT && 681 devscope->EntryType != ACPI_DMAR_SCOPE_TYPE_BRIDGE) 682 return (0); 683 path_len = devscope->Length - sizeof(*devscope); 684 if (path_len % 2 != 0) { 685 printf("dmar_match_devscope: corrupted DMAR table, dl %d\n", 686 devscope->Length); 687 return (-1); 688 } 689 path_len /= 2; 690 path = (ACPI_DMAR_PCI_PATH *)(devscope + 1); 691 if (path_len == 0) { 692 printf("dmar_match_devscope: corrupted DMAR table, dl %d\n", 693 devscope->Length); 694 return (-1); 695 } 696 697 return (dmar_match_pathes(devscope->Bus, path, path_len, dev_busno, 698 dev_path, dev_path_len, devscope->EntryType)); 699 } 700 701 static bool 702 dmar_match_by_path(struct dmar_unit *unit, int dev_domain, int dev_busno, 703 const ACPI_DMAR_PCI_PATH *dev_path, int dev_path_len, const char **banner) 704 { 705 ACPI_DMAR_HARDWARE_UNIT *dmarh; 706 ACPI_DMAR_DEVICE_SCOPE *devscope; 707 char *ptr, *ptrend; 708 int match; 709 710 dmarh = dmar_find_by_index(unit->iommu.unit); 711 if (dmarh == NULL) 712 return (false); 713 if (dmarh->Segment != dev_domain) 714 return (false); 715 if ((dmarh->Flags & ACPI_DMAR_INCLUDE_ALL) != 0) { 716 if (banner != NULL) 717 *banner = "INCLUDE_ALL"; 718 return (true); 719 } 720 ptr = (char *)dmarh + sizeof(*dmarh); 721 ptrend = (char *)dmarh + dmarh->Header.Length; 722 while (ptr < ptrend) { 723 devscope = (ACPI_DMAR_DEVICE_SCOPE *)ptr; 724 ptr += devscope->Length; 725 match = dmar_match_devscope(devscope, dev_busno, dev_path, 726 dev_path_len); 727 if (match == -1) 728 return (false); 729 if (match == 1) { 730 if (banner != NULL) 731 *banner = "specific match"; 732 return (true); 733 } 734 } 735 return (false); 736 } 737 738 static struct dmar_unit * 739 dmar_find_by_scope(int dev_domain, int dev_busno, 740 const ACPI_DMAR_PCI_PATH *dev_path, int dev_path_len) 741 { 742 struct dmar_unit *unit; 743 int i; 744 745 for (i = 0; i < dmar_devcnt; i++) { 746 if (dmar_devs[i] == NULL) 747 continue; 748 unit = device_get_softc(dmar_devs[i]); 749 if (dmar_match_by_path(unit, dev_domain, dev_busno, dev_path, 750 dev_path_len, NULL)) 751 return (unit); 752 } 753 return (NULL); 754 } 755 756 struct dmar_unit * 757 dmar_find(device_t dev, bool verbose) 758 { 759 struct dmar_unit *unit; 760 const char *banner; 761 int i, dev_domain, dev_busno, dev_path_len; 762 763 /* 764 * This function can only handle PCI(e) devices. 765 */ 766 if (device_get_devclass(device_get_parent(dev)) != 767 devclass_find("pci")) 768 return (NULL); 769 770 dev_domain = pci_get_domain(dev); 771 dev_path_len = dmar_dev_depth(dev); 772 ACPI_DMAR_PCI_PATH dev_path[dev_path_len]; 773 dmar_dev_path(dev, &dev_busno, dev_path, dev_path_len); 774 banner = ""; 775 776 for (i = 0; i < dmar_devcnt; i++) { 777 if (dmar_devs[i] == NULL) 778 continue; 779 unit = device_get_softc(dmar_devs[i]); 780 if (dmar_match_by_path(unit, dev_domain, dev_busno, 781 dev_path, dev_path_len, &banner)) 782 break; 783 } 784 if (i == dmar_devcnt) 785 return (NULL); 786 787 if (verbose) { 788 device_printf(dev, "pci%d:%d:%d:%d matched dmar%d by %s", 789 dev_domain, pci_get_bus(dev), pci_get_slot(dev), 790 pci_get_function(dev), unit->iommu.unit, banner); 791 printf(" scope path "); 792 dmar_print_path(dev_busno, dev_path_len, dev_path); 793 printf("\n"); 794 } 795 iommu_device_set_iommu_prop(dev, unit->iommu.dev); 796 return (unit); 797 } 798 799 static struct dmar_unit * 800 dmar_find_nonpci(u_int id, u_int entry_type, uint16_t *rid) 801 { 802 device_t dmar_dev; 803 struct dmar_unit *unit; 804 ACPI_DMAR_HARDWARE_UNIT *dmarh; 805 ACPI_DMAR_DEVICE_SCOPE *devscope; 806 ACPI_DMAR_PCI_PATH *path; 807 char *ptr, *ptrend; 808 #ifdef DEV_APIC 809 int error; 810 #endif 811 int i; 812 813 for (i = 0; i < dmar_devcnt; i++) { 814 dmar_dev = dmar_devs[i]; 815 if (dmar_dev == NULL) 816 continue; 817 unit = (struct dmar_unit *)device_get_softc(dmar_dev); 818 dmarh = dmar_find_by_index(i); 819 if (dmarh == NULL) 820 continue; 821 ptr = (char *)dmarh + sizeof(*dmarh); 822 ptrend = (char *)dmarh + dmarh->Header.Length; 823 for (;;) { 824 if (ptr >= ptrend) 825 break; 826 devscope = (ACPI_DMAR_DEVICE_SCOPE *)ptr; 827 ptr += devscope->Length; 828 if (devscope->EntryType != entry_type) 829 continue; 830 if (devscope->EnumerationId != id) 831 continue; 832 #ifdef DEV_APIC 833 if (entry_type == ACPI_DMAR_SCOPE_TYPE_IOAPIC) { 834 error = ioapic_get_rid(id, rid); 835 /* 836 * If our IOAPIC has PCI bindings then 837 * use the PCI device rid. 838 */ 839 if (error == 0) 840 return (unit); 841 } 842 #endif 843 if (devscope->Length - sizeof(ACPI_DMAR_DEVICE_SCOPE) 844 == 2) { 845 if (rid != NULL) { 846 path = (ACPI_DMAR_PCI_PATH *) 847 (devscope + 1); 848 *rid = PCI_RID(devscope->Bus, 849 path->Device, path->Function); 850 } 851 return (unit); 852 } 853 printf( 854 "dmar_find_nonpci: id %d type %d path length != 2\n", 855 id, entry_type); 856 break; 857 } 858 } 859 return (NULL); 860 } 861 862 struct dmar_unit * 863 dmar_find_hpet(device_t dev, uint16_t *rid) 864 { 865 struct dmar_unit *unit; 866 867 unit = dmar_find_nonpci(hpet_get_uid(dev), ACPI_DMAR_SCOPE_TYPE_HPET, 868 rid); 869 if (unit != NULL) 870 iommu_device_set_iommu_prop(dev, unit->iommu.dev); 871 return (unit); 872 } 873 874 struct dmar_unit * 875 dmar_find_ioapic(u_int apic_id, uint16_t *rid) 876 { 877 struct dmar_unit *unit; 878 device_t apic_dev; 879 880 unit = dmar_find_nonpci(apic_id, ACPI_DMAR_SCOPE_TYPE_IOAPIC, rid); 881 if (unit != NULL) { 882 apic_dev = ioapic_get_dev(apic_id); 883 if (apic_dev != NULL) 884 iommu_device_set_iommu_prop(apic_dev, unit->iommu.dev); 885 } 886 return (unit); 887 } 888 889 struct rmrr_iter_args { 890 struct dmar_domain *domain; 891 int dev_domain; 892 int dev_busno; 893 const ACPI_DMAR_PCI_PATH *dev_path; 894 int dev_path_len; 895 struct iommu_map_entries_tailq *rmrr_entries; 896 }; 897 898 static int 899 dmar_rmrr_iter(ACPI_DMAR_HEADER *dmarh, void *arg) 900 { 901 struct rmrr_iter_args *ria; 902 ACPI_DMAR_RESERVED_MEMORY *resmem; 903 ACPI_DMAR_DEVICE_SCOPE *devscope; 904 struct iommu_map_entry *entry; 905 char *ptr, *ptrend; 906 int match; 907 908 if (!dmar_rmrr_enable) 909 return (1); 910 911 if (dmarh->Type != ACPI_DMAR_TYPE_RESERVED_MEMORY) 912 return (1); 913 914 ria = arg; 915 resmem = (ACPI_DMAR_RESERVED_MEMORY *)dmarh; 916 if (resmem->Segment != ria->dev_domain) 917 return (1); 918 919 ptr = (char *)resmem + sizeof(*resmem); 920 ptrend = (char *)resmem + resmem->Header.Length; 921 for (;;) { 922 if (ptr >= ptrend) 923 break; 924 devscope = (ACPI_DMAR_DEVICE_SCOPE *)ptr; 925 ptr += devscope->Length; 926 match = dmar_match_devscope(devscope, ria->dev_busno, 927 ria->dev_path, ria->dev_path_len); 928 if (match == 1) { 929 entry = iommu_gas_alloc_entry(DOM2IODOM(ria->domain), 930 IOMMU_PGF_WAITOK); 931 entry->start = resmem->BaseAddress; 932 /* The RMRR entry end address is inclusive. */ 933 entry->end = resmem->EndAddress; 934 TAILQ_INSERT_TAIL(ria->rmrr_entries, entry, 935 dmamap_link); 936 } 937 } 938 939 return (1); 940 } 941 942 void 943 dmar_dev_parse_rmrr(struct dmar_domain *domain, int dev_domain, int dev_busno, 944 const void *dev_path, int dev_path_len, 945 struct iommu_map_entries_tailq *rmrr_entries) 946 { 947 struct rmrr_iter_args ria; 948 949 ria.domain = domain; 950 ria.dev_domain = dev_domain; 951 ria.dev_busno = dev_busno; 952 ria.dev_path = (const ACPI_DMAR_PCI_PATH *)dev_path; 953 ria.dev_path_len = dev_path_len; 954 ria.rmrr_entries = rmrr_entries; 955 dmar_iterate_tbl(dmar_rmrr_iter, &ria); 956 } 957 958 struct inst_rmrr_iter_args { 959 struct dmar_unit *dmar; 960 }; 961 962 static device_t 963 dmar_path_dev(int segment, int path_len, int busno, 964 const ACPI_DMAR_PCI_PATH *path, uint16_t *rid) 965 { 966 device_t dev; 967 int i; 968 969 dev = NULL; 970 for (i = 0; i < path_len; i++) { 971 dev = pci_find_dbsf(segment, busno, path->Device, 972 path->Function); 973 if (i != path_len - 1) { 974 busno = pci_cfgregread(segment, busno, path->Device, 975 path->Function, PCIR_SECBUS_1, 1); 976 path++; 977 } 978 } 979 *rid = PCI_RID(busno, path->Device, path->Function); 980 return (dev); 981 } 982 983 static int 984 dmar_inst_rmrr_iter(ACPI_DMAR_HEADER *dmarh, void *arg) 985 { 986 const ACPI_DMAR_RESERVED_MEMORY *resmem; 987 const ACPI_DMAR_DEVICE_SCOPE *devscope; 988 struct inst_rmrr_iter_args *iria; 989 const char *ptr, *ptrend; 990 device_t dev; 991 struct dmar_unit *unit; 992 int dev_path_len; 993 uint16_t rid; 994 995 iria = arg; 996 997 if (!dmar_rmrr_enable) 998 return (1); 999 1000 if (dmarh->Type != ACPI_DMAR_TYPE_RESERVED_MEMORY) 1001 return (1); 1002 1003 resmem = (ACPI_DMAR_RESERVED_MEMORY *)dmarh; 1004 if (resmem->Segment != iria->dmar->segment) 1005 return (1); 1006 1007 ptr = (const char *)resmem + sizeof(*resmem); 1008 ptrend = (const char *)resmem + resmem->Header.Length; 1009 for (;;) { 1010 if (ptr >= ptrend) 1011 break; 1012 devscope = (const ACPI_DMAR_DEVICE_SCOPE *)ptr; 1013 ptr += devscope->Length; 1014 /* XXXKIB bridge */ 1015 if (devscope->EntryType != ACPI_DMAR_SCOPE_TYPE_ENDPOINT) 1016 continue; 1017 rid = 0; 1018 dev_path_len = (devscope->Length - 1019 sizeof(ACPI_DMAR_DEVICE_SCOPE)) / 2; 1020 dev = dmar_path_dev(resmem->Segment, dev_path_len, 1021 devscope->Bus, 1022 (const ACPI_DMAR_PCI_PATH *)(devscope + 1), &rid); 1023 if (dev == NULL) { 1024 if (bootverbose) { 1025 printf("dmar%d no dev found for RMRR " 1026 "[%#jx, %#jx] rid %#x scope path ", 1027 iria->dmar->iommu.unit, 1028 (uintmax_t)resmem->BaseAddress, 1029 (uintmax_t)resmem->EndAddress, 1030 rid); 1031 dmar_print_path(devscope->Bus, dev_path_len, 1032 (const ACPI_DMAR_PCI_PATH *)(devscope + 1)); 1033 printf("\n"); 1034 } 1035 unit = dmar_find_by_scope(resmem->Segment, 1036 devscope->Bus, 1037 (const ACPI_DMAR_PCI_PATH *)(devscope + 1), 1038 dev_path_len); 1039 if (iria->dmar != unit) 1040 continue; 1041 dmar_get_ctx_for_devpath(iria->dmar, rid, 1042 resmem->Segment, devscope->Bus, 1043 (const ACPI_DMAR_PCI_PATH *)(devscope + 1), 1044 dev_path_len, false, true); 1045 } else { 1046 unit = dmar_find(dev, false); 1047 if (iria->dmar != unit) 1048 continue; 1049 iommu_instantiate_ctx(&(iria)->dmar->iommu, 1050 dev, true); 1051 } 1052 } 1053 1054 return (1); 1055 1056 } 1057 1058 /* 1059 * Pre-create all contexts for the DMAR which have RMRR entries. 1060 */ 1061 int 1062 dmar_instantiate_rmrr_ctxs(struct iommu_unit *unit) 1063 { 1064 struct dmar_unit *dmar; 1065 struct inst_rmrr_iter_args iria; 1066 int error; 1067 1068 dmar = IOMMU2DMAR(unit); 1069 1070 if (!dmar_barrier_enter(dmar, DMAR_BARRIER_RMRR)) 1071 return (0); 1072 1073 error = 0; 1074 iria.dmar = dmar; 1075 dmar_iterate_tbl(dmar_inst_rmrr_iter, &iria); 1076 DMAR_LOCK(dmar); 1077 if (!LIST_EMPTY(&dmar->domains)) { 1078 KASSERT((dmar->hw_gcmd & DMAR_GCMD_TE) == 0, 1079 ("dmar%d: RMRR not handled but translation is already enabled", 1080 dmar->iommu.unit)); 1081 error = dmar_disable_protected_regions(dmar); 1082 if (error != 0) 1083 printf("dmar%d: Failed to disable protected regions\n", 1084 dmar->iommu.unit); 1085 error = dmar_enable_translation(dmar); 1086 if (bootverbose) { 1087 if (error == 0) { 1088 printf("dmar%d: enabled translation\n", 1089 dmar->iommu.unit); 1090 } else { 1091 printf("dmar%d: enabling translation failed, " 1092 "error %d\n", dmar->iommu.unit, error); 1093 } 1094 } 1095 } 1096 dmar_barrier_exit(dmar, DMAR_BARRIER_RMRR); 1097 return (error); 1098 } 1099 1100 #ifdef DDB 1101 #include <ddb/ddb.h> 1102 #include <ddb/db_lex.h> 1103 1104 static void 1105 dmar_print_domain(struct dmar_domain *domain, bool show_mappings) 1106 { 1107 struct iommu_domain *iodom; 1108 1109 iodom = DOM2IODOM(domain); 1110 1111 db_printf( 1112 " @%p dom %d mgaw %d agaw %d pglvl %d end %jx refs %d\n" 1113 " ctx_cnt %d flags %x pgobj %p map_ents %u\n", 1114 domain, domain->domain, domain->mgaw, domain->agaw, domain->pglvl, 1115 (uintmax_t)domain->iodom.end, domain->refs, domain->ctx_cnt, 1116 domain->iodom.flags, domain->pgtbl_obj, domain->iodom.entries_cnt); 1117 1118 iommu_db_domain_print_contexts(iodom); 1119 1120 if (show_mappings) 1121 iommu_db_domain_print_mappings(iodom); 1122 } 1123 1124 DB_SHOW_COMMAND_FLAGS(dmar_domain, db_dmar_print_domain, CS_OWN) 1125 { 1126 struct dmar_unit *unit; 1127 struct dmar_domain *domain; 1128 struct iommu_ctx *ctx; 1129 bool show_mappings, valid; 1130 int pci_domain, bus, device, function, i, t; 1131 db_expr_t radix; 1132 1133 valid = false; 1134 radix = db_radix; 1135 db_radix = 10; 1136 t = db_read_token(); 1137 if (t == tSLASH) { 1138 t = db_read_token(); 1139 if (t != tIDENT) { 1140 db_printf("Bad modifier\n"); 1141 db_radix = radix; 1142 db_skip_to_eol(); 1143 return; 1144 } 1145 show_mappings = strchr(db_tok_string, 'm') != NULL; 1146 t = db_read_token(); 1147 } else { 1148 show_mappings = false; 1149 } 1150 if (t == tNUMBER) { 1151 pci_domain = db_tok_number; 1152 t = db_read_token(); 1153 if (t == tNUMBER) { 1154 bus = db_tok_number; 1155 t = db_read_token(); 1156 if (t == tNUMBER) { 1157 device = db_tok_number; 1158 t = db_read_token(); 1159 if (t == tNUMBER) { 1160 function = db_tok_number; 1161 valid = true; 1162 } 1163 } 1164 } 1165 } 1166 db_radix = radix; 1167 db_skip_to_eol(); 1168 if (!valid) { 1169 db_printf("usage: show dmar_domain [/m] " 1170 "<domain> <bus> <device> <func>\n"); 1171 return; 1172 } 1173 for (i = 0; i < dmar_devcnt; i++) { 1174 unit = device_get_softc(dmar_devs[i]); 1175 LIST_FOREACH(domain, &unit->domains, link) { 1176 LIST_FOREACH(ctx, &domain->iodom.contexts, link) { 1177 if (pci_domain == unit->segment && 1178 bus == pci_get_bus(ctx->tag->owner) && 1179 device == pci_get_slot(ctx->tag->owner) && 1180 function == pci_get_function(ctx->tag-> 1181 owner)) { 1182 dmar_print_domain(domain, 1183 show_mappings); 1184 goto out; 1185 } 1186 } 1187 } 1188 } 1189 out:; 1190 } 1191 1192 static void 1193 dmar_print_one(int idx, bool show_domains, bool show_mappings) 1194 { 1195 struct dmar_unit *unit; 1196 struct dmar_domain *domain; 1197 int i, frir; 1198 1199 unit = device_get_softc(dmar_devs[idx]); 1200 db_printf("dmar%d at %p, root at 0x%jx, ver 0x%x\n", unit->iommu.unit, 1201 unit, dmar_read8(unit, DMAR_RTADDR_REG), 1202 dmar_read4(unit, DMAR_VER_REG)); 1203 db_printf("cap 0x%jx ecap 0x%jx gsts 0x%x fsts 0x%x fectl 0x%x\n", 1204 (uintmax_t)dmar_read8(unit, DMAR_CAP_REG), 1205 (uintmax_t)dmar_read8(unit, DMAR_ECAP_REG), 1206 dmar_read4(unit, DMAR_GSTS_REG), 1207 dmar_read4(unit, DMAR_FSTS_REG), 1208 dmar_read4(unit, DMAR_FECTL_REG)); 1209 if (unit->ir_enabled) { 1210 db_printf("ir is enabled; IRT @%p phys 0x%jx maxcnt %d\n", 1211 unit->irt, (uintmax_t)unit->irt_phys, unit->irte_cnt); 1212 } 1213 db_printf("fed 0x%x fea 0x%x feua 0x%x\n", 1214 dmar_read4(unit, DMAR_FEDATA_REG), 1215 dmar_read4(unit, DMAR_FEADDR_REG), 1216 dmar_read4(unit, DMAR_FEUADDR_REG)); 1217 db_printf("primary fault log:\n"); 1218 for (i = 0; i < DMAR_CAP_NFR(unit->hw_cap); i++) { 1219 frir = (DMAR_CAP_FRO(unit->hw_cap) + i) * 16; 1220 db_printf(" %d at 0x%x: %jx %jx\n", i, frir, 1221 (uintmax_t)dmar_read8(unit, frir), 1222 (uintmax_t)dmar_read8(unit, frir + 8)); 1223 } 1224 if (DMAR_HAS_QI(unit)) { 1225 db_printf("ied 0x%x iea 0x%x ieua 0x%x\n", 1226 dmar_read4(unit, DMAR_IEDATA_REG), 1227 dmar_read4(unit, DMAR_IEADDR_REG), 1228 dmar_read4(unit, DMAR_IEUADDR_REG)); 1229 if (unit->qi_enabled) { 1230 db_printf("qi is enabled: queue @0x%jx (IQA 0x%jx) " 1231 "size 0x%jx\n" 1232 " head 0x%x tail 0x%x avail 0x%x status 0x%x ctrl 0x%x\n" 1233 " hw compl 0x%jx@%p/phys@%jx next seq 0x%x gen 0x%x\n", 1234 (uintmax_t)unit->x86c.inv_queue, 1235 (uintmax_t)dmar_read8(unit, DMAR_IQA_REG), 1236 (uintmax_t)unit->x86c.inv_queue_size, 1237 dmar_read4(unit, DMAR_IQH_REG), 1238 dmar_read4(unit, DMAR_IQT_REG), 1239 unit->x86c.inv_queue_avail, 1240 dmar_read4(unit, DMAR_ICS_REG), 1241 dmar_read4(unit, DMAR_IECTL_REG), 1242 (uintmax_t)unit->x86c.inv_waitd_seq_hw, 1243 &unit->x86c.inv_waitd_seq_hw, 1244 (uintmax_t)unit->x86c.inv_waitd_seq_hw_phys, 1245 unit->x86c.inv_waitd_seq, 1246 unit->x86c.inv_waitd_gen); 1247 } else { 1248 db_printf("qi is disabled\n"); 1249 } 1250 } 1251 if (show_domains) { 1252 db_printf("domains:\n"); 1253 LIST_FOREACH(domain, &unit->domains, link) { 1254 dmar_print_domain(domain, show_mappings); 1255 if (db_pager_quit) 1256 break; 1257 } 1258 } 1259 } 1260 1261 DB_SHOW_COMMAND(dmar, db_dmar_print) 1262 { 1263 bool show_domains, show_mappings; 1264 1265 show_domains = strchr(modif, 'd') != NULL; 1266 show_mappings = strchr(modif, 'm') != NULL; 1267 if (!have_addr) { 1268 db_printf("usage: show dmar [/d] [/m] index\n"); 1269 return; 1270 } 1271 dmar_print_one((int)addr, show_domains, show_mappings); 1272 } 1273 1274 DB_SHOW_ALL_COMMAND(dmars, db_show_all_dmars) 1275 { 1276 int i; 1277 bool show_domains, show_mappings; 1278 1279 show_domains = strchr(modif, 'd') != NULL; 1280 show_mappings = strchr(modif, 'm') != NULL; 1281 1282 for (i = 0; i < dmar_devcnt; i++) { 1283 dmar_print_one(i, show_domains, show_mappings); 1284 if (db_pager_quit) 1285 break; 1286 } 1287 } 1288 #endif 1289 1290 static struct iommu_unit * 1291 dmar_find_method(device_t dev, bool verbose) 1292 { 1293 struct dmar_unit *dmar; 1294 1295 dmar = dmar_find(dev, verbose); 1296 return (&dmar->iommu); 1297 } 1298 1299 static struct x86_unit_common * 1300 dmar_get_x86_common(struct iommu_unit *unit) 1301 { 1302 struct dmar_unit *dmar; 1303 1304 dmar = IOMMU2DMAR(unit); 1305 return (&dmar->x86c); 1306 } 1307 1308 static void 1309 dmar_unit_pre_instantiate_ctx(struct iommu_unit *unit) 1310 { 1311 dmar_quirks_pre_use(unit); 1312 dmar_instantiate_rmrr_ctxs(unit); 1313 } 1314 1315 static struct x86_iommu dmar_x86_iommu = { 1316 .get_x86_common = dmar_get_x86_common, 1317 .unit_pre_instantiate_ctx = dmar_unit_pre_instantiate_ctx, 1318 .domain_unload_entry = dmar_domain_unload_entry, 1319 .domain_unload = dmar_domain_unload, 1320 .get_ctx = dmar_get_ctx, 1321 .free_ctx_locked = dmar_free_ctx_locked_method, 1322 .find = dmar_find_method, 1323 .alloc_msi_intr = dmar_alloc_msi_intr, 1324 .map_msi_intr = dmar_map_msi_intr, 1325 .unmap_msi_intr = dmar_unmap_msi_intr, 1326 .map_ioapic_intr = dmar_map_ioapic_intr, 1327 .unmap_ioapic_intr = dmar_unmap_ioapic_intr, 1328 }; 1329 1330 static void 1331 x86_iommu_set_intel(void *arg __unused) 1332 { 1333 if (cpu_vendor_id == CPU_VENDOR_INTEL) 1334 set_x86_iommu(&dmar_x86_iommu); 1335 } 1336 1337 SYSINIT(x86_iommu, SI_SUB_TUNABLES, SI_ORDER_ANY, x86_iommu_set_intel, NULL); 1338