1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2024 The FreeBSD Foundation 5 * 6 * This software was developed by Konstantin Belousov <kib@FreeBSD.org> 7 * under sponsorship from the FreeBSD Foundation. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 */ 30 31 #include <sys/param.h> 32 #include <sys/systm.h> 33 #include <sys/malloc.h> 34 #include <sys/bus.h> 35 #include <sys/interrupt.h> 36 #include <sys/kernel.h> 37 #include <sys/ktr.h> 38 #include <sys/limits.h> 39 #include <sys/lock.h> 40 #include <sys/memdesc.h> 41 #include <sys/mutex.h> 42 #include <sys/proc.h> 43 #include <sys/rwlock.h> 44 #include <sys/rman.h> 45 #include <sys/sysctl.h> 46 #include <sys/taskqueue.h> 47 #include <sys/tree.h> 48 #include <sys/uio.h> 49 #include <sys/vmem.h> 50 #include <vm/vm.h> 51 #include <vm/vm_extern.h> 52 #include <vm/vm_kern.h> 53 #include <vm/vm_object.h> 54 #include <vm/vm_page.h> 55 #include <vm/vm_pager.h> 56 #include <vm/vm_map.h> 57 #include <contrib/dev/acpica/include/acpi.h> 58 #include <contrib/dev/acpica/include/accommon.h> 59 #include <dev/pci/pcireg.h> 60 #include <dev/pci/pcivar.h> 61 #include <machine/atomic.h> 62 #include <machine/bus.h> 63 #include <machine/md_var.h> 64 #include <machine/specialreg.h> 65 #include <x86/include/busdma_impl.h> 66 #include <dev/iommu/busdma_iommu.h> 67 #include <x86/iommu/amd_reg.h> 68 #include <x86/iommu/x86_iommu.h> 69 #include <x86/iommu/amd_iommu.h> 70 71 static MALLOC_DEFINE(M_AMDIOMMU_CTX, "amdiommu_ctx", "AMD IOMMU Context"); 72 static MALLOC_DEFINE(M_AMDIOMMU_DOMAIN, "amdiommu_dom", "AMD IOMMU Domain"); 73 74 static void amdiommu_unref_domain_locked(struct amdiommu_unit *unit, 75 struct amdiommu_domain *domain); 76 77 static struct amdiommu_dte * 78 amdiommu_get_dtep(struct amdiommu_ctx *ctx) 79 { 80 return (&CTX2AMD(ctx)->dev_tbl[ctx->context.rid]); 81 } 82 83 void 84 amdiommu_domain_unload_entry(struct iommu_map_entry *entry, bool free, 85 bool cansleep) 86 { 87 struct amdiommu_domain *domain; 88 struct amdiommu_unit *unit; 89 90 domain = IODOM2DOM(entry->domain); 91 unit = DOM2AMD(domain); 92 93 /* 94 * If "free" is false, then the IOTLB invalidation must be performed 95 * synchronously. Otherwise, the caller might free the entry before 96 * dmar_qi_task() is finished processing it. 97 */ 98 if (free) { 99 AMDIOMMU_LOCK(unit); 100 iommu_qi_invalidate_locked(&domain->iodom, entry, true); 101 AMDIOMMU_UNLOCK(unit); 102 } else { 103 iommu_qi_invalidate_sync(&domain->iodom, entry->start, 104 entry->end - entry->start, cansleep); 105 iommu_domain_free_entry(entry, false); 106 } 107 } 108 109 static bool 110 amdiommu_domain_unload_emit_wait(struct amdiommu_domain *domain, 111 struct iommu_map_entry *entry) 112 { 113 return (true); /* XXXKIB */ 114 } 115 116 void 117 amdiommu_domain_unload(struct iommu_domain *iodom, 118 struct iommu_map_entries_tailq *entries, bool cansleep) 119 { 120 struct amdiommu_domain *domain; 121 struct amdiommu_unit *unit; 122 struct iommu_map_entry *entry, *entry1; 123 int error __diagused; 124 125 domain = IODOM2DOM(iodom); 126 unit = DOM2AMD(domain); 127 128 TAILQ_FOREACH_SAFE(entry, entries, dmamap_link, entry1) { 129 KASSERT((entry->flags & IOMMU_MAP_ENTRY_MAP) != 0, 130 ("not mapped entry %p %p", domain, entry)); 131 error = iodom->ops->unmap(iodom, entry, 132 cansleep ? IOMMU_PGF_WAITOK : 0); 133 KASSERT(error == 0, ("unmap %p error %d", domain, error)); 134 } 135 if (TAILQ_EMPTY(entries)) 136 return; 137 138 AMDIOMMU_LOCK(unit); 139 while ((entry = TAILQ_FIRST(entries)) != NULL) { 140 TAILQ_REMOVE(entries, entry, dmamap_link); 141 iommu_qi_invalidate_locked(&domain->iodom, entry, 142 amdiommu_domain_unload_emit_wait(domain, entry)); 143 } 144 AMDIOMMU_UNLOCK(unit); 145 } 146 147 static void 148 amdiommu_domain_destroy(struct amdiommu_domain *domain) 149 { 150 struct iommu_domain *iodom; 151 struct amdiommu_unit *unit; 152 153 iodom = DOM2IODOM(domain); 154 155 KASSERT(TAILQ_EMPTY(&domain->iodom.unload_entries), 156 ("unfinished unloads %p", domain)); 157 KASSERT(LIST_EMPTY(&iodom->contexts), 158 ("destroying dom %p with contexts", domain)); 159 KASSERT(domain->ctx_cnt == 0, 160 ("destroying dom %p with ctx_cnt %d", domain, domain->ctx_cnt)); 161 KASSERT(domain->refs == 0, 162 ("destroying dom %p with refs %d", domain, domain->refs)); 163 164 if ((domain->iodom.flags & IOMMU_DOMAIN_GAS_INITED) != 0) { 165 AMDIOMMU_DOMAIN_LOCK(domain); 166 iommu_gas_fini_domain(iodom); 167 AMDIOMMU_DOMAIN_UNLOCK(domain); 168 } 169 if ((domain->iodom.flags & IOMMU_DOMAIN_PGTBL_INITED) != 0) { 170 if (domain->pgtbl_obj != NULL) 171 AMDIOMMU_DOMAIN_PGLOCK(domain); 172 amdiommu_domain_free_pgtbl(domain); 173 } 174 iommu_domain_fini(iodom); 175 unit = DOM2AMD(domain); 176 free_unr(unit->domids, domain->domain); 177 free(domain, M_AMDIOMMU_DOMAIN); 178 } 179 180 static iommu_gaddr_t 181 lvl2addr(int lvl) 182 { 183 int x; 184 185 x = IOMMU_PAGE_SHIFT + IOMMU_NPTEPGSHIFT * lvl; 186 /* Level 6 has only 8 bits for page table index */ 187 if (x >= NBBY * sizeof(uint64_t)) 188 return (-1ull); 189 return (1ull < (1ull << x)); 190 } 191 192 static void 193 amdiommu_domain_init_pglvl(struct amdiommu_unit *unit, 194 struct amdiommu_domain *domain) 195 { 196 iommu_gaddr_t end; 197 int hats, i; 198 uint64_t efr_hats; 199 200 end = DOM2IODOM(domain)->end; 201 for (i = AMDIOMMU_PGTBL_MAXLVL; i > 1; i--) { 202 if (lvl2addr(i) >= end && lvl2addr(i - 1) < end) 203 break; 204 } 205 domain->pglvl = i; 206 207 efr_hats = unit->efr & AMDIOMMU_EFR_HATS_MASK; 208 switch (efr_hats) { 209 case AMDIOMMU_EFR_HATS_6LVL: 210 hats = 6; 211 break; 212 case AMDIOMMU_EFR_HATS_5LVL: 213 hats = 5; 214 break; 215 case AMDIOMMU_EFR_HATS_4LVL: 216 hats = 4; 217 break; 218 default: 219 printf("amdiommu%d: HATS %#jx (reserved) ignoring\n", 220 unit->iommu.unit, (uintmax_t)efr_hats); 221 return; 222 } 223 if (hats >= domain->pglvl) 224 return; 225 226 printf("amdiommu%d: domain %d HATS %d pglvl %d reducing to HATS\n", 227 unit->iommu.unit, domain->domain, hats, domain->pglvl); 228 domain->pglvl = hats; 229 domain->iodom.end = lvl2addr(hats); 230 } 231 232 static struct amdiommu_domain * 233 amdiommu_domain_alloc(struct amdiommu_unit *unit, bool id_mapped) 234 { 235 struct amdiommu_domain *domain; 236 struct iommu_domain *iodom; 237 int error, id; 238 239 id = alloc_unr(unit->domids); 240 if (id == -1) 241 return (NULL); 242 domain = malloc(sizeof(*domain), M_AMDIOMMU_DOMAIN, M_WAITOK | M_ZERO); 243 iodom = DOM2IODOM(domain); 244 domain->domain = id; 245 LIST_INIT(&iodom->contexts); 246 iommu_domain_init(AMD2IOMMU(unit), iodom, &amdiommu_domain_map_ops); 247 248 domain->unit = unit; 249 250 domain->iodom.end = id_mapped ? ptoa(Maxmem) : BUS_SPACE_MAXADDR; 251 amdiommu_domain_init_pglvl(unit, domain); 252 iommu_gas_init_domain(DOM2IODOM(domain)); 253 254 if (id_mapped) { 255 domain->iodom.flags |= IOMMU_DOMAIN_IDMAP; 256 } else { 257 error = amdiommu_domain_alloc_pgtbl(domain); 258 if (error != 0) 259 goto fail; 260 /* Disable local apic region access */ 261 error = iommu_gas_reserve_region(iodom, 0xfee00000, 262 0xfeefffff + 1, &iodom->msi_entry); 263 if (error != 0) 264 goto fail; 265 } 266 267 return (domain); 268 269 fail: 270 amdiommu_domain_destroy(domain); 271 return (NULL); 272 } 273 274 static struct amdiommu_ctx * 275 amdiommu_ctx_alloc(struct amdiommu_domain *domain, uint16_t rid) 276 { 277 struct amdiommu_ctx *ctx; 278 279 ctx = malloc(sizeof(*ctx), M_AMDIOMMU_CTX, M_WAITOK | M_ZERO); 280 ctx->context.domain = DOM2IODOM(domain); 281 ctx->context.tag = malloc(sizeof(struct bus_dma_tag_iommu), 282 M_AMDIOMMU_CTX, M_WAITOK | M_ZERO); 283 ctx->context.rid = rid; 284 ctx->context.refs = 1; 285 return (ctx); 286 } 287 288 static void 289 amdiommu_ctx_link(struct amdiommu_ctx *ctx) 290 { 291 struct amdiommu_domain *domain; 292 293 domain = CTX2DOM(ctx); 294 IOMMU_ASSERT_LOCKED(domain->iodom.iommu); 295 KASSERT(domain->refs >= domain->ctx_cnt, 296 ("dom %p ref underflow %d %d", domain, domain->refs, 297 domain->ctx_cnt)); 298 domain->refs++; 299 domain->ctx_cnt++; 300 LIST_INSERT_HEAD(&domain->iodom.contexts, &ctx->context, link); 301 } 302 303 static void 304 amdiommu_ctx_unlink(struct amdiommu_ctx *ctx) 305 { 306 struct amdiommu_domain *domain; 307 308 domain = CTX2DOM(ctx); 309 IOMMU_ASSERT_LOCKED(domain->iodom.iommu); 310 KASSERT(domain->refs > 0, 311 ("domain %p ctx dtr refs %d", domain, domain->refs)); 312 KASSERT(domain->ctx_cnt >= domain->refs, 313 ("domain %p ctx dtr refs %d ctx_cnt %d", domain, 314 domain->refs, domain->ctx_cnt)); 315 domain->refs--; 316 domain->ctx_cnt--; 317 LIST_REMOVE(&ctx->context, link); 318 } 319 320 struct amdiommu_ctx * 321 amdiommu_find_ctx_locked(struct amdiommu_unit *unit, uint16_t rid) 322 { 323 struct amdiommu_domain *domain; 324 struct iommu_ctx *ctx; 325 326 AMDIOMMU_ASSERT_LOCKED(unit); 327 328 LIST_FOREACH(domain, &unit->domains, link) { 329 LIST_FOREACH(ctx, &domain->iodom.contexts, link) { 330 if (ctx->rid == rid) 331 return (IOCTX2CTX(ctx)); 332 } 333 } 334 return (NULL); 335 } 336 337 struct amdiommu_domain * 338 amdiommu_find_domain(struct amdiommu_unit *unit, uint16_t rid) 339 { 340 struct amdiommu_domain *domain; 341 struct iommu_ctx *ctx; 342 343 AMDIOMMU_LOCK(unit); 344 LIST_FOREACH(domain, &unit->domains, link) { 345 LIST_FOREACH(ctx, &domain->iodom.contexts, link) { 346 if (ctx->rid == rid) 347 break; 348 } 349 } 350 AMDIOMMU_UNLOCK(unit); 351 return (domain); 352 } 353 354 static void 355 amdiommu_free_ctx_locked(struct amdiommu_unit *unit, struct amdiommu_ctx *ctx) 356 { 357 struct amdiommu_dte *dtep; 358 struct amdiommu_domain *domain; 359 360 AMDIOMMU_ASSERT_LOCKED(unit); 361 KASSERT(ctx->context.refs >= 1, 362 ("amdiommu %p ctx %p refs %u", unit, ctx, ctx->context.refs)); 363 364 /* 365 * If our reference is not last, only the dereference should 366 * be performed. 367 */ 368 if (ctx->context.refs > 1) { 369 ctx->context.refs--; 370 AMDIOMMU_UNLOCK(unit); 371 return; 372 } 373 374 KASSERT((ctx->context.flags & IOMMU_CTX_DISABLED) == 0, 375 ("lost ref on disabled ctx %p", ctx)); 376 377 /* 378 * Otherwise, the device table entry must be cleared before 379 * the page table is destroyed. 380 */ 381 dtep = amdiommu_get_dtep(ctx); 382 dtep->v = 0; 383 atomic_thread_fence_rel(); 384 memset(dtep, 0, sizeof(*dtep)); 385 386 domain = CTX2DOM(ctx); 387 amdiommu_qi_invalidate_ctx_locked_nowait(ctx); 388 amdiommu_qi_invalidate_ir_locked_nowait(unit, ctx->context.rid); 389 amdiommu_qi_invalidate_all_pages_locked_nowait(domain); 390 amdiommu_qi_invalidate_wait_sync(AMD2IOMMU(CTX2AMD(ctx))); 391 392 if (unit->irte_enabled) 393 amdiommu_ctx_fini_irte(ctx); 394 395 amdiommu_ctx_unlink(ctx); 396 free(ctx->context.tag, M_AMDIOMMU_CTX); 397 free(ctx, M_AMDIOMMU_CTX); 398 amdiommu_unref_domain_locked(unit, domain); 399 } 400 401 static void 402 amdiommu_free_ctx(struct amdiommu_ctx *ctx) 403 { 404 struct amdiommu_unit *unit; 405 406 unit = CTX2AMD(ctx); 407 AMDIOMMU_LOCK(unit); 408 amdiommu_free_ctx_locked(unit, ctx); 409 } 410 411 static void 412 amdiommu_unref_domain_locked(struct amdiommu_unit *unit, 413 struct amdiommu_domain *domain) 414 { 415 AMDIOMMU_ASSERT_LOCKED(unit); 416 KASSERT(domain->refs >= 1, 417 ("amdiommu%d domain %p refs %u", unit->iommu.unit, domain, 418 domain->refs)); 419 KASSERT(domain->refs > domain->ctx_cnt, 420 ("amdiommu%d domain %p refs %d ctx_cnt %d", unit->iommu.unit, 421 domain, domain->refs, domain->ctx_cnt)); 422 423 if (domain->refs > 1) { 424 domain->refs--; 425 AMDIOMMU_UNLOCK(unit); 426 return; 427 } 428 429 LIST_REMOVE(domain, link); 430 AMDIOMMU_UNLOCK(unit); 431 432 taskqueue_drain(unit->iommu.delayed_taskqueue, 433 &domain->iodom.unload_task); 434 amdiommu_domain_destroy(domain); 435 } 436 437 static void 438 dte_entry_init_one(struct amdiommu_dte *dtep, struct amdiommu_ctx *ctx, 439 vm_page_t pgtblr, uint8_t dte, uint32_t edte) 440 { 441 struct amdiommu_domain *domain; 442 struct amdiommu_unit *unit; 443 444 domain = CTX2DOM(ctx); 445 unit = DOM2AMD(domain); 446 447 dtep->tv = 1; 448 /* dtep->had not used for now */ 449 dtep->ir = 1; 450 dtep->iw = 1; 451 dtep->domainid = domain->domain; 452 dtep->pioctl = AMDIOMMU_DTE_PIOCTL_DIS; 453 454 /* fill device interrupt passing hints from IVHD. */ 455 dtep->initpass = (dte & ACPI_IVHD_INIT_PASS) != 0; 456 dtep->eintpass = (dte & ACPI_IVHD_EINT_PASS) != 0; 457 dtep->nmipass = (dte & ACPI_IVHD_NMI_PASS) != 0; 458 dtep->sysmgt = (dte & ACPI_IVHD_SYSTEM_MGMT) >> 4; 459 dtep->lint0pass = (dte & ACPI_IVHD_LINT0_PASS) != 0; 460 dtep->lint1pass = (dte & ACPI_IVHD_LINT1_PASS) != 0; 461 462 if (unit->irte_enabled) { 463 dtep->iv = 1; 464 dtep->i = 0; 465 dtep->inttablen = ilog2(unit->irte_nentries); 466 dtep->intrroot = pmap_kextract(unit->irte_x2apic ? 467 (vm_offset_t)ctx->irtx2 : 468 (vm_offset_t)ctx->irtb) >> 6; 469 470 dtep->intctl = AMDIOMMU_DTE_INTCTL_MAP; 471 } 472 473 if ((DOM2IODOM(domain)->flags & IOMMU_DOMAIN_IDMAP) != 0) { 474 dtep->pgmode = AMDIOMMU_DTE_PGMODE_1T1; 475 } else { 476 MPASS(domain->pglvl > 0 && domain->pglvl <= 477 AMDIOMMU_PGTBL_MAXLVL); 478 dtep->pgmode = domain->pglvl; 479 dtep->ptroot = VM_PAGE_TO_PHYS(pgtblr) >> 12; 480 } 481 482 atomic_thread_fence_rel(); 483 dtep->v = 1; 484 } 485 486 static void 487 dte_entry_init(struct amdiommu_ctx *ctx, bool move, uint8_t dte, uint32_t edte) 488 { 489 struct amdiommu_dte *dtep; 490 struct amdiommu_unit *unit; 491 struct amdiommu_domain *domain; 492 int i; 493 494 domain = CTX2DOM(ctx); 495 unit = DOM2AMD(domain); 496 497 dtep = amdiommu_get_dtep(ctx); 498 KASSERT(dtep->v == 0, 499 ("amdiommu%d initializing valid dte @%p %#jx", 500 CTX2AMD(ctx)->iommu.unit, dtep, (uintmax_t)(*(uint64_t *)dtep))); 501 502 if (iommu_is_buswide_ctx(AMD2IOMMU(unit), 503 PCI_RID2BUS(ctx->context.rid))) { 504 MPASS(!move); 505 for (i = 0; i <= PCI_BUSMAX; i++) { 506 dte_entry_init_one(&dtep[i], ctx, domain->pgtblr, 507 dte, edte); 508 } 509 } else { 510 dte_entry_init_one(dtep, ctx, domain->pgtblr, dte, edte); 511 } 512 } 513 514 struct amdiommu_ctx * 515 amdiommu_get_ctx_for_dev(struct amdiommu_unit *unit, device_t dev, uint16_t rid, 516 int dev_domain, bool id_mapped, bool rmrr_init, uint8_t dte, uint32_t edte) 517 { 518 struct amdiommu_domain *domain, *domain1; 519 struct amdiommu_ctx *ctx, *ctx1; 520 int bus, slot, func; 521 522 if (dev != NULL) { 523 bus = pci_get_bus(dev); 524 slot = pci_get_slot(dev); 525 func = pci_get_function(dev); 526 } else { 527 bus = PCI_RID2BUS(rid); 528 slot = PCI_RID2SLOT(rid); 529 func = PCI_RID2FUNC(rid); 530 } 531 AMDIOMMU_LOCK(unit); 532 KASSERT(!iommu_is_buswide_ctx(AMD2IOMMU(unit), bus) || 533 (slot == 0 && func == 0), 534 ("iommu%d pci%d:%d:%d get_ctx for buswide", AMD2IOMMU(unit)->unit, 535 bus, slot, func)); 536 ctx = amdiommu_find_ctx_locked(unit, rid); 537 if (ctx == NULL) { 538 /* 539 * Perform the allocations which require sleep or have 540 * higher chance to succeed if the sleep is allowed. 541 */ 542 AMDIOMMU_UNLOCK(unit); 543 domain1 = amdiommu_domain_alloc(unit, id_mapped); 544 if (domain1 == NULL) 545 return (NULL); 546 if (!id_mapped) { 547 /* 548 * XXXKIB IVMD seems to be less significant 549 * and less used on AMD than RMRR on Intel. 550 * Not implemented for now. 551 */ 552 } 553 ctx1 = amdiommu_ctx_alloc(domain1, rid); 554 amdiommu_ctx_init_irte(ctx1); 555 AMDIOMMU_LOCK(unit); 556 557 /* 558 * Recheck the contexts, other thread might have 559 * already allocated needed one. 560 */ 561 ctx = amdiommu_find_ctx_locked(unit, rid); 562 if (ctx == NULL) { 563 domain = domain1; 564 ctx = ctx1; 565 amdiommu_ctx_link(ctx); 566 ctx->context.tag->owner = dev; 567 iommu_device_tag_init(CTX2IOCTX(ctx), dev); 568 569 LIST_INSERT_HEAD(&unit->domains, domain, link); 570 dte_entry_init(ctx, false, dte, edte); 571 amdiommu_qi_invalidate_ctx_locked(ctx); 572 if (dev != NULL) { 573 device_printf(dev, 574 "amdiommu%d pci%d:%d:%d:%d rid %x domain %d " 575 "%s-mapped\n", 576 AMD2IOMMU(unit)->unit, unit->unit_dom, 577 bus, slot, func, rid, domain->domain, 578 id_mapped ? "id" : "re"); 579 } 580 } else { 581 amdiommu_domain_destroy(domain1); 582 /* Nothing needs to be done to destroy ctx1. */ 583 free(ctx1, M_AMDIOMMU_CTX); 584 domain = CTX2DOM(ctx); 585 ctx->context.refs++; /* tag referenced us */ 586 } 587 } else { 588 domain = CTX2DOM(ctx); 589 if (ctx->context.tag->owner == NULL) 590 ctx->context.tag->owner = dev; 591 ctx->context.refs++; /* tag referenced us */ 592 } 593 AMDIOMMU_UNLOCK(unit); 594 595 return (ctx); 596 } 597 598 struct iommu_ctx * 599 amdiommu_get_ctx(struct iommu_unit *iommu, device_t dev, uint16_t rid, 600 bool id_mapped, bool rmrr_init) 601 { 602 struct amdiommu_unit *unit; 603 struct amdiommu_ctx *ret; 604 int error; 605 uint32_t edte; 606 uint16_t rid1; 607 uint8_t dte; 608 609 error = amdiommu_find_unit(dev, &unit, &rid1, &dte, &edte, 610 bootverbose); 611 if (error != 0) 612 return (NULL); 613 if (AMD2IOMMU(unit) != iommu) /* XXX complain loudly */ 614 return (NULL); 615 ret = amdiommu_get_ctx_for_dev(unit, dev, rid1, pci_get_domain(dev), 616 id_mapped, rmrr_init, dte, edte); 617 return (CTX2IOCTX(ret)); 618 } 619 620 void 621 amdiommu_free_ctx_locked_method(struct iommu_unit *iommu, 622 struct iommu_ctx *context) 623 { 624 struct amdiommu_unit *unit; 625 struct amdiommu_ctx *ctx; 626 627 unit = IOMMU2AMD(iommu); 628 ctx = IOCTX2CTX(context); 629 amdiommu_free_ctx_locked(unit, ctx); 630 } 631 632 void 633 amdiommu_free_ctx_method(struct iommu_ctx *context) 634 { 635 struct amdiommu_ctx *ctx; 636 637 ctx = IOCTX2CTX(context); 638 amdiommu_free_ctx(ctx); 639 } 640