1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2013 The FreeBSD Foundation 5 * All rights reserved. 6 * 7 * This software was developed by Konstantin Belousov <kib@FreeBSD.org> 8 * under sponsorship from the FreeBSD Foundation. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 */ 31 32 #include <sys/cdefs.h> 33 __FBSDID("$FreeBSD$"); 34 35 #include <sys/param.h> 36 #include <sys/systm.h> 37 #include <sys/malloc.h> 38 #include <sys/bus.h> 39 #include <sys/interrupt.h> 40 #include <sys/kernel.h> 41 #include <sys/ktr.h> 42 #include <sys/limits.h> 43 #include <sys/lock.h> 44 #include <sys/memdesc.h> 45 #include <sys/mutex.h> 46 #include <sys/proc.h> 47 #include <sys/rwlock.h> 48 #include <sys/rman.h> 49 #include <sys/sysctl.h> 50 #include <sys/taskqueue.h> 51 #include <sys/tree.h> 52 #include <sys/uio.h> 53 #include <sys/vmem.h> 54 #include <vm/vm.h> 55 #include <vm/vm_extern.h> 56 #include <vm/vm_kern.h> 57 #include <vm/vm_object.h> 58 #include <vm/vm_page.h> 59 #include <vm/vm_pager.h> 60 #include <vm/vm_map.h> 61 #include <contrib/dev/acpica/include/acpi.h> 62 #include <contrib/dev/acpica/include/accommon.h> 63 #include <dev/pci/pcireg.h> 64 #include <dev/pci/pcivar.h> 65 #include <machine/atomic.h> 66 #include <machine/bus.h> 67 #include <machine/md_var.h> 68 #include <machine/specialreg.h> 69 #include <x86/include/busdma_impl.h> 70 #include <dev/iommu/busdma_iommu.h> 71 #include <x86/iommu/intel_reg.h> 72 #include <x86/iommu/intel_dmar.h> 73 74 static MALLOC_DEFINE(M_DMAR_CTX, "dmar_ctx", "Intel DMAR Context"); 75 static MALLOC_DEFINE(M_DMAR_DOMAIN, "dmar_dom", "Intel DMAR Domain"); 76 77 static void dmar_domain_unload_task(void *arg, int pending); 78 static void dmar_unref_domain_locked(struct dmar_unit *dmar, 79 struct dmar_domain *domain); 80 static void dmar_domain_destroy(struct dmar_domain *domain); 81 82 static void 83 dmar_ensure_ctx_page(struct dmar_unit *dmar, int bus) 84 { 85 struct sf_buf *sf; 86 dmar_root_entry_t *re; 87 vm_page_t ctxm; 88 89 /* 90 * Allocated context page must be linked. 91 */ 92 ctxm = dmar_pgalloc(dmar->ctx_obj, 1 + bus, IOMMU_PGF_NOALLOC); 93 if (ctxm != NULL) 94 return; 95 96 /* 97 * Page not present, allocate and link. Note that other 98 * thread might execute this sequence in parallel. This 99 * should be safe, because the context entries written by both 100 * threads are equal. 101 */ 102 TD_PREP_PINNED_ASSERT; 103 ctxm = dmar_pgalloc(dmar->ctx_obj, 1 + bus, IOMMU_PGF_ZERO | 104 IOMMU_PGF_WAITOK); 105 re = dmar_map_pgtbl(dmar->ctx_obj, 0, IOMMU_PGF_NOALLOC, &sf); 106 re += bus; 107 dmar_pte_store(&re->r1, DMAR_ROOT_R1_P | (DMAR_ROOT_R1_CTP_MASK & 108 VM_PAGE_TO_PHYS(ctxm))); 109 dmar_flush_root_to_ram(dmar, re); 110 dmar_unmap_pgtbl(sf); 111 TD_PINNED_ASSERT; 112 } 113 114 static dmar_ctx_entry_t * 115 dmar_map_ctx_entry(struct dmar_ctx *ctx, struct sf_buf **sfp) 116 { 117 struct dmar_unit *dmar; 118 dmar_ctx_entry_t *ctxp; 119 120 dmar = CTX2DMAR(ctx); 121 122 ctxp = dmar_map_pgtbl(dmar->ctx_obj, 1 + 123 PCI_RID2BUS(ctx->rid), IOMMU_PGF_NOALLOC | IOMMU_PGF_WAITOK, sfp); 124 ctxp += ctx->rid & 0xff; 125 return (ctxp); 126 } 127 128 static void 129 device_tag_init(struct dmar_ctx *ctx, device_t dev) 130 { 131 struct dmar_domain *domain; 132 bus_addr_t maxaddr; 133 134 domain = CTX2DOM(ctx); 135 maxaddr = MIN(domain->iodom.end, BUS_SPACE_MAXADDR); 136 ctx->context.tag->common.ref_count = 1; /* Prevent free */ 137 ctx->context.tag->common.impl = &bus_dma_iommu_impl; 138 ctx->context.tag->common.boundary = 0; 139 ctx->context.tag->common.lowaddr = maxaddr; 140 ctx->context.tag->common.highaddr = maxaddr; 141 ctx->context.tag->common.maxsize = maxaddr; 142 ctx->context.tag->common.nsegments = BUS_SPACE_UNRESTRICTED; 143 ctx->context.tag->common.maxsegsz = maxaddr; 144 ctx->context.tag->ctx = CTX2IOCTX(ctx); 145 ctx->context.tag->owner = dev; 146 } 147 148 static void 149 ctx_id_entry_init_one(dmar_ctx_entry_t *ctxp, struct dmar_domain *domain, 150 vm_page_t ctx_root) 151 { 152 /* 153 * For update due to move, the store is not atomic. It is 154 * possible that DMAR read upper doubleword, while low 155 * doubleword is not yet updated. The domain id is stored in 156 * the upper doubleword, while the table pointer in the lower. 157 * 158 * There is no good solution, for the same reason it is wrong 159 * to clear P bit in the ctx entry for update. 160 */ 161 dmar_pte_store1(&ctxp->ctx2, DMAR_CTX2_DID(domain->domain) | 162 domain->awlvl); 163 if (ctx_root == NULL) { 164 dmar_pte_store1(&ctxp->ctx1, DMAR_CTX1_T_PASS | DMAR_CTX1_P); 165 } else { 166 dmar_pte_store1(&ctxp->ctx1, DMAR_CTX1_T_UNTR | 167 (DMAR_CTX1_ASR_MASK & VM_PAGE_TO_PHYS(ctx_root)) | 168 DMAR_CTX1_P); 169 } 170 } 171 172 static void 173 ctx_id_entry_init(struct dmar_ctx *ctx, dmar_ctx_entry_t *ctxp, bool move, 174 int busno) 175 { 176 struct dmar_unit *unit; 177 struct dmar_domain *domain; 178 vm_page_t ctx_root; 179 int i; 180 181 domain = CTX2DOM(ctx); 182 unit = DOM2DMAR(domain); 183 KASSERT(move || (ctxp->ctx1 == 0 && ctxp->ctx2 == 0), 184 ("dmar%d: initialized ctx entry %d:%d:%d 0x%jx 0x%jx", 185 unit->iommu.unit, busno, pci_get_slot(ctx->context.tag->owner), 186 pci_get_function(ctx->context.tag->owner), 187 ctxp->ctx1, ctxp->ctx2)); 188 189 if ((domain->iodom.flags & IOMMU_DOMAIN_IDMAP) != 0 && 190 (unit->hw_ecap & DMAR_ECAP_PT) != 0) { 191 KASSERT(domain->pgtbl_obj == NULL, 192 ("ctx %p non-null pgtbl_obj", ctx)); 193 ctx_root = NULL; 194 } else { 195 ctx_root = dmar_pgalloc(domain->pgtbl_obj, 0, 196 IOMMU_PGF_NOALLOC); 197 } 198 199 if (iommu_is_buswide_ctx(DMAR2IOMMU(unit), busno)) { 200 MPASS(!move); 201 for (i = 0; i <= PCI_BUSMAX; i++) { 202 ctx_id_entry_init_one(&ctxp[i], domain, ctx_root); 203 } 204 } else { 205 ctx_id_entry_init_one(ctxp, domain, ctx_root); 206 } 207 dmar_flush_ctx_to_ram(unit, ctxp); 208 } 209 210 static int 211 dmar_flush_for_ctx_entry(struct dmar_unit *dmar, bool force) 212 { 213 int error; 214 215 /* 216 * If dmar declares Caching Mode as Set, follow 11.5 "Caching 217 * Mode Consideration" and do the (global) invalidation of the 218 * negative TLB entries. 219 */ 220 if ((dmar->hw_cap & DMAR_CAP_CM) == 0 && !force) 221 return (0); 222 if (dmar->qi_enabled) { 223 dmar_qi_invalidate_ctx_glob_locked(dmar); 224 if ((dmar->hw_ecap & DMAR_ECAP_DI) != 0 || force) 225 dmar_qi_invalidate_iotlb_glob_locked(dmar); 226 return (0); 227 } 228 error = dmar_inv_ctx_glob(dmar); 229 if (error == 0 && ((dmar->hw_ecap & DMAR_ECAP_DI) != 0 || force)) 230 error = dmar_inv_iotlb_glob(dmar); 231 return (error); 232 } 233 234 static int 235 domain_init_rmrr(struct dmar_domain *domain, device_t dev, int bus, 236 int slot, int func, int dev_domain, int dev_busno, 237 const void *dev_path, int dev_path_len) 238 { 239 struct iommu_map_entries_tailq rmrr_entries; 240 struct iommu_map_entry *entry, *entry1; 241 vm_page_t *ma; 242 iommu_gaddr_t start, end; 243 vm_pindex_t size, i; 244 int error, error1; 245 246 error = 0; 247 TAILQ_INIT(&rmrr_entries); 248 dmar_dev_parse_rmrr(domain, dev_domain, dev_busno, dev_path, 249 dev_path_len, &rmrr_entries); 250 TAILQ_FOREACH_SAFE(entry, &rmrr_entries, unroll_link, entry1) { 251 /* 252 * VT-d specification requires that the start of an 253 * RMRR entry is 4k-aligned. Buggy BIOSes put 254 * anything into the start and end fields. Truncate 255 * and round as neccesary. 256 * 257 * We also allow the overlapping RMRR entries, see 258 * iommu_gas_alloc_region(). 259 */ 260 start = entry->start; 261 end = entry->end; 262 if (bootverbose) 263 printf("dmar%d ctx pci%d:%d:%d RMRR [%#jx, %#jx]\n", 264 domain->iodom.iommu->unit, bus, slot, func, 265 (uintmax_t)start, (uintmax_t)end); 266 entry->start = trunc_page(start); 267 entry->end = round_page(end); 268 if (entry->start == entry->end) { 269 /* Workaround for some AMI (?) BIOSes */ 270 if (bootverbose) { 271 if (dev != NULL) 272 device_printf(dev, ""); 273 printf("pci%d:%d:%d ", bus, slot, func); 274 printf("BIOS bug: dmar%d RMRR " 275 "region (%jx, %jx) corrected\n", 276 domain->iodom.iommu->unit, start, end); 277 } 278 entry->end += DMAR_PAGE_SIZE * 0x20; 279 } 280 size = OFF_TO_IDX(entry->end - entry->start); 281 ma = malloc(sizeof(vm_page_t) * size, M_TEMP, M_WAITOK); 282 for (i = 0; i < size; i++) { 283 ma[i] = vm_page_getfake(entry->start + PAGE_SIZE * i, 284 VM_MEMATTR_DEFAULT); 285 } 286 error1 = iommu_gas_map_region(DOM2IODOM(domain), entry, 287 IOMMU_MAP_ENTRY_READ | IOMMU_MAP_ENTRY_WRITE, 288 IOMMU_MF_CANWAIT | IOMMU_MF_RMRR, ma); 289 /* 290 * Non-failed RMRR entries are owned by context rb 291 * tree. Get rid of the failed entry, but do not stop 292 * the loop. Rest of the parsed RMRR entries are 293 * loaded and removed on the context destruction. 294 */ 295 if (error1 == 0 && entry->end != entry->start) { 296 IOMMU_LOCK(domain->iodom.iommu); 297 domain->refs++; /* XXXKIB prevent free */ 298 domain->iodom.flags |= IOMMU_DOMAIN_RMRR; 299 IOMMU_UNLOCK(domain->iodom.iommu); 300 } else { 301 if (error1 != 0) { 302 if (dev != NULL) 303 device_printf(dev, ""); 304 printf("pci%d:%d:%d ", bus, slot, func); 305 printf( 306 "dmar%d failed to map RMRR region (%jx, %jx) %d\n", 307 domain->iodom.iommu->unit, start, end, 308 error1); 309 error = error1; 310 } 311 TAILQ_REMOVE(&rmrr_entries, entry, unroll_link); 312 iommu_gas_free_entry(DOM2IODOM(domain), entry); 313 } 314 for (i = 0; i < size; i++) 315 vm_page_putfake(ma[i]); 316 free(ma, M_TEMP); 317 } 318 return (error); 319 } 320 321 static struct dmar_domain * 322 dmar_domain_alloc(struct dmar_unit *dmar, bool id_mapped) 323 { 324 struct iommu_domain *iodom; 325 struct dmar_domain *domain; 326 int error, id, mgaw; 327 328 id = alloc_unr(dmar->domids); 329 if (id == -1) 330 return (NULL); 331 domain = malloc(sizeof(*domain), M_DMAR_DOMAIN, M_WAITOK | M_ZERO); 332 iodom = DOM2IODOM(domain); 333 domain->domain = id; 334 LIST_INIT(&domain->contexts); 335 RB_INIT(&domain->iodom.rb_root); 336 TAILQ_INIT(&domain->iodom.unload_entries); 337 TASK_INIT(&domain->iodom.unload_task, 0, dmar_domain_unload_task, 338 domain); 339 mtx_init(&domain->iodom.lock, "dmardom", NULL, MTX_DEF); 340 domain->dmar = dmar; 341 domain->iodom.iommu = &dmar->iommu; 342 domain_pgtbl_init(domain); 343 344 /* 345 * For now, use the maximal usable physical address of the 346 * installed memory to calculate the mgaw on id_mapped domain. 347 * It is useful for the identity mapping, and less so for the 348 * virtualized bus address space. 349 */ 350 domain->iodom.end = id_mapped ? ptoa(Maxmem) : BUS_SPACE_MAXADDR; 351 mgaw = dmar_maxaddr2mgaw(dmar, domain->iodom.end, !id_mapped); 352 error = domain_set_agaw(domain, mgaw); 353 if (error != 0) 354 goto fail; 355 if (!id_mapped) 356 /* Use all supported address space for remapping. */ 357 domain->iodom.end = 1ULL << (domain->agaw - 1); 358 359 iommu_gas_init_domain(DOM2IODOM(domain)); 360 361 if (id_mapped) { 362 if ((dmar->hw_ecap & DMAR_ECAP_PT) == 0) { 363 domain->pgtbl_obj = domain_get_idmap_pgtbl(domain, 364 domain->iodom.end); 365 } 366 domain->iodom.flags |= IOMMU_DOMAIN_IDMAP; 367 } else { 368 error = domain_alloc_pgtbl(domain); 369 if (error != 0) 370 goto fail; 371 /* Disable local apic region access */ 372 error = iommu_gas_reserve_region(iodom, 0xfee00000, 373 0xfeefffff + 1); 374 if (error != 0) 375 goto fail; 376 } 377 return (domain); 378 379 fail: 380 dmar_domain_destroy(domain); 381 return (NULL); 382 } 383 384 static struct dmar_ctx * 385 dmar_ctx_alloc(struct dmar_domain *domain, uint16_t rid) 386 { 387 struct dmar_ctx *ctx; 388 389 ctx = malloc(sizeof(*ctx), M_DMAR_CTX, M_WAITOK | M_ZERO); 390 ctx->context.domain = DOM2IODOM(domain); 391 ctx->context.tag = malloc(sizeof(struct bus_dma_tag_iommu), 392 M_DMAR_CTX, M_WAITOK | M_ZERO); 393 ctx->rid = rid; 394 ctx->refs = 1; 395 return (ctx); 396 } 397 398 static void 399 dmar_ctx_link(struct dmar_ctx *ctx) 400 { 401 struct dmar_domain *domain; 402 403 domain = CTX2DOM(ctx); 404 IOMMU_ASSERT_LOCKED(domain->iodom.iommu); 405 KASSERT(domain->refs >= domain->ctx_cnt, 406 ("dom %p ref underflow %d %d", domain, domain->refs, 407 domain->ctx_cnt)); 408 domain->refs++; 409 domain->ctx_cnt++; 410 LIST_INSERT_HEAD(&domain->contexts, ctx, link); 411 } 412 413 static void 414 dmar_ctx_unlink(struct dmar_ctx *ctx) 415 { 416 struct dmar_domain *domain; 417 418 domain = CTX2DOM(ctx); 419 IOMMU_ASSERT_LOCKED(domain->iodom.iommu); 420 KASSERT(domain->refs > 0, 421 ("domain %p ctx dtr refs %d", domain, domain->refs)); 422 KASSERT(domain->ctx_cnt >= domain->refs, 423 ("domain %p ctx dtr refs %d ctx_cnt %d", domain, 424 domain->refs, domain->ctx_cnt)); 425 domain->refs--; 426 domain->ctx_cnt--; 427 LIST_REMOVE(ctx, link); 428 } 429 430 static void 431 dmar_domain_destroy(struct dmar_domain *domain) 432 { 433 struct dmar_unit *dmar; 434 435 KASSERT(TAILQ_EMPTY(&domain->iodom.unload_entries), 436 ("unfinished unloads %p", domain)); 437 KASSERT(LIST_EMPTY(&domain->contexts), 438 ("destroying dom %p with contexts", domain)); 439 KASSERT(domain->ctx_cnt == 0, 440 ("destroying dom %p with ctx_cnt %d", domain, domain->ctx_cnt)); 441 KASSERT(domain->refs == 0, 442 ("destroying dom %p with refs %d", domain, domain->refs)); 443 if ((domain->iodom.flags & IOMMU_DOMAIN_GAS_INITED) != 0) { 444 DMAR_DOMAIN_LOCK(domain); 445 iommu_gas_fini_domain(DOM2IODOM(domain)); 446 DMAR_DOMAIN_UNLOCK(domain); 447 } 448 if ((domain->iodom.flags & IOMMU_DOMAIN_PGTBL_INITED) != 0) { 449 if (domain->pgtbl_obj != NULL) 450 DMAR_DOMAIN_PGLOCK(domain); 451 domain_free_pgtbl(domain); 452 } 453 mtx_destroy(&domain->iodom.lock); 454 dmar = DOM2DMAR(domain); 455 free_unr(dmar->domids, domain->domain); 456 free(domain, M_DMAR_DOMAIN); 457 } 458 459 static struct dmar_ctx * 460 dmar_get_ctx_for_dev1(struct dmar_unit *dmar, device_t dev, uint16_t rid, 461 int dev_domain, int dev_busno, const void *dev_path, int dev_path_len, 462 bool id_mapped, bool rmrr_init) 463 { 464 struct dmar_domain *domain, *domain1; 465 struct dmar_ctx *ctx, *ctx1; 466 struct iommu_unit *unit; 467 dmar_ctx_entry_t *ctxp; 468 struct sf_buf *sf; 469 int bus, slot, func, error; 470 bool enable; 471 472 if (dev != NULL) { 473 bus = pci_get_bus(dev); 474 slot = pci_get_slot(dev); 475 func = pci_get_function(dev); 476 } else { 477 bus = PCI_RID2BUS(rid); 478 slot = PCI_RID2SLOT(rid); 479 func = PCI_RID2FUNC(rid); 480 } 481 enable = false; 482 TD_PREP_PINNED_ASSERT; 483 unit = DMAR2IOMMU(dmar); 484 DMAR_LOCK(dmar); 485 KASSERT(!iommu_is_buswide_ctx(unit, bus) || (slot == 0 && func == 0), 486 ("iommu%d pci%d:%d:%d get_ctx for buswide", dmar->iommu.unit, bus, 487 slot, func)); 488 ctx = dmar_find_ctx_locked(dmar, rid); 489 error = 0; 490 if (ctx == NULL) { 491 /* 492 * Perform the allocations which require sleep or have 493 * higher chance to succeed if the sleep is allowed. 494 */ 495 DMAR_UNLOCK(dmar); 496 dmar_ensure_ctx_page(dmar, PCI_RID2BUS(rid)); 497 domain1 = dmar_domain_alloc(dmar, id_mapped); 498 if (domain1 == NULL) { 499 TD_PINNED_ASSERT; 500 return (NULL); 501 } 502 if (!id_mapped) { 503 error = domain_init_rmrr(domain1, dev, bus, 504 slot, func, dev_domain, dev_busno, dev_path, 505 dev_path_len); 506 if (error != 0) { 507 dmar_domain_destroy(domain1); 508 TD_PINNED_ASSERT; 509 return (NULL); 510 } 511 } 512 ctx1 = dmar_ctx_alloc(domain1, rid); 513 ctxp = dmar_map_ctx_entry(ctx1, &sf); 514 DMAR_LOCK(dmar); 515 516 /* 517 * Recheck the contexts, other thread might have 518 * already allocated needed one. 519 */ 520 ctx = dmar_find_ctx_locked(dmar, rid); 521 if (ctx == NULL) { 522 domain = domain1; 523 ctx = ctx1; 524 dmar_ctx_link(ctx); 525 ctx->context.tag->owner = dev; 526 device_tag_init(ctx, dev); 527 528 /* 529 * This is the first activated context for the 530 * DMAR unit. Enable the translation after 531 * everything is set up. 532 */ 533 if (LIST_EMPTY(&dmar->domains)) 534 enable = true; 535 LIST_INSERT_HEAD(&dmar->domains, domain, link); 536 ctx_id_entry_init(ctx, ctxp, false, bus); 537 if (dev != NULL) { 538 device_printf(dev, 539 "dmar%d pci%d:%d:%d:%d rid %x domain %d mgaw %d " 540 "agaw %d %s-mapped\n", 541 dmar->iommu.unit, dmar->segment, bus, slot, 542 func, rid, domain->domain, domain->mgaw, 543 domain->agaw, id_mapped ? "id" : "re"); 544 } 545 dmar_unmap_pgtbl(sf); 546 } else { 547 dmar_unmap_pgtbl(sf); 548 dmar_domain_destroy(domain1); 549 /* Nothing needs to be done to destroy ctx1. */ 550 free(ctx1, M_DMAR_CTX); 551 domain = CTX2DOM(ctx); 552 ctx->refs++; /* tag referenced us */ 553 } 554 } else { 555 domain = CTX2DOM(ctx); 556 if (ctx->context.tag->owner == NULL) 557 ctx->context.tag->owner = dev; 558 ctx->refs++; /* tag referenced us */ 559 } 560 561 error = dmar_flush_for_ctx_entry(dmar, enable); 562 if (error != 0) { 563 dmar_free_ctx_locked(dmar, ctx); 564 TD_PINNED_ASSERT; 565 return (NULL); 566 } 567 568 /* 569 * The dmar lock was potentially dropped between check for the 570 * empty context list and now. Recheck the state of GCMD_TE 571 * to avoid unneeded command. 572 */ 573 if (enable && !rmrr_init && (dmar->hw_gcmd & DMAR_GCMD_TE) == 0) { 574 error = dmar_enable_translation(dmar); 575 if (error == 0) { 576 if (bootverbose) { 577 printf("dmar%d: enabled translation\n", 578 dmar->iommu.unit); 579 } 580 } else { 581 printf("dmar%d: enabling translation failed, " 582 "error %d\n", dmar->iommu.unit, error); 583 dmar_free_ctx_locked(dmar, ctx); 584 TD_PINNED_ASSERT; 585 return (NULL); 586 } 587 } 588 DMAR_UNLOCK(dmar); 589 TD_PINNED_ASSERT; 590 return (ctx); 591 } 592 593 struct dmar_ctx * 594 dmar_get_ctx_for_dev(struct dmar_unit *dmar, device_t dev, uint16_t rid, 595 bool id_mapped, bool rmrr_init) 596 { 597 int dev_domain, dev_path_len, dev_busno; 598 599 dev_domain = pci_get_domain(dev); 600 dev_path_len = dmar_dev_depth(dev); 601 ACPI_DMAR_PCI_PATH dev_path[dev_path_len]; 602 dmar_dev_path(dev, &dev_busno, dev_path, dev_path_len); 603 return (dmar_get_ctx_for_dev1(dmar, dev, rid, dev_domain, dev_busno, 604 dev_path, dev_path_len, id_mapped, rmrr_init)); 605 } 606 607 struct dmar_ctx * 608 dmar_get_ctx_for_devpath(struct dmar_unit *dmar, uint16_t rid, 609 int dev_domain, int dev_busno, 610 const void *dev_path, int dev_path_len, 611 bool id_mapped, bool rmrr_init) 612 { 613 614 return (dmar_get_ctx_for_dev1(dmar, NULL, rid, dev_domain, dev_busno, 615 dev_path, dev_path_len, id_mapped, rmrr_init)); 616 } 617 618 int 619 dmar_move_ctx_to_domain(struct dmar_domain *domain, struct dmar_ctx *ctx) 620 { 621 struct dmar_unit *dmar; 622 struct dmar_domain *old_domain; 623 dmar_ctx_entry_t *ctxp; 624 struct sf_buf *sf; 625 int error; 626 627 dmar = domain->dmar; 628 old_domain = CTX2DOM(ctx); 629 if (domain == old_domain) 630 return (0); 631 KASSERT(old_domain->iodom.iommu == domain->iodom.iommu, 632 ("domain %p %u moving between dmars %u %u", domain, 633 domain->domain, old_domain->iodom.iommu->unit, 634 domain->iodom.iommu->unit)); 635 TD_PREP_PINNED_ASSERT; 636 637 ctxp = dmar_map_ctx_entry(ctx, &sf); 638 DMAR_LOCK(dmar); 639 dmar_ctx_unlink(ctx); 640 ctx->context.domain = &domain->iodom; 641 dmar_ctx_link(ctx); 642 ctx_id_entry_init(ctx, ctxp, true, PCI_BUSMAX + 100); 643 dmar_unmap_pgtbl(sf); 644 error = dmar_flush_for_ctx_entry(dmar, true); 645 /* If flush failed, rolling back would not work as well. */ 646 printf("dmar%d rid %x domain %d->%d %s-mapped\n", 647 dmar->iommu.unit, ctx->rid, old_domain->domain, domain->domain, 648 (domain->iodom.flags & IOMMU_DOMAIN_IDMAP) != 0 ? "id" : "re"); 649 dmar_unref_domain_locked(dmar, old_domain); 650 TD_PINNED_ASSERT; 651 return (error); 652 } 653 654 static void 655 dmar_unref_domain_locked(struct dmar_unit *dmar, struct dmar_domain *domain) 656 { 657 658 DMAR_ASSERT_LOCKED(dmar); 659 KASSERT(domain->refs >= 1, 660 ("dmar %d domain %p refs %u", dmar->iommu.unit, domain, 661 domain->refs)); 662 KASSERT(domain->refs > domain->ctx_cnt, 663 ("dmar %d domain %p refs %d ctx_cnt %d", dmar->iommu.unit, domain, 664 domain->refs, domain->ctx_cnt)); 665 666 if (domain->refs > 1) { 667 domain->refs--; 668 DMAR_UNLOCK(dmar); 669 return; 670 } 671 672 KASSERT((domain->iodom.flags & IOMMU_DOMAIN_RMRR) == 0, 673 ("lost ref on RMRR domain %p", domain)); 674 675 LIST_REMOVE(domain, link); 676 DMAR_UNLOCK(dmar); 677 678 taskqueue_drain(dmar->iommu.delayed_taskqueue, 679 &domain->iodom.unload_task); 680 dmar_domain_destroy(domain); 681 } 682 683 void 684 dmar_free_ctx_locked(struct dmar_unit *dmar, struct dmar_ctx *ctx) 685 { 686 struct sf_buf *sf; 687 dmar_ctx_entry_t *ctxp; 688 struct dmar_domain *domain; 689 690 DMAR_ASSERT_LOCKED(dmar); 691 KASSERT(ctx->refs >= 1, 692 ("dmar %p ctx %p refs %u", dmar, ctx, ctx->refs)); 693 694 /* 695 * If our reference is not last, only the dereference should 696 * be performed. 697 */ 698 if (ctx->refs > 1) { 699 ctx->refs--; 700 DMAR_UNLOCK(dmar); 701 return; 702 } 703 704 KASSERT((ctx->context.flags & IOMMU_CTX_DISABLED) == 0, 705 ("lost ref on disabled ctx %p", ctx)); 706 707 /* 708 * Otherwise, the context entry must be cleared before the 709 * page table is destroyed. The mapping of the context 710 * entries page could require sleep, unlock the dmar. 711 */ 712 DMAR_UNLOCK(dmar); 713 TD_PREP_PINNED_ASSERT; 714 ctxp = dmar_map_ctx_entry(ctx, &sf); 715 DMAR_LOCK(dmar); 716 KASSERT(ctx->refs >= 1, 717 ("dmar %p ctx %p refs %u", dmar, ctx, ctx->refs)); 718 719 /* 720 * Other thread might have referenced the context, in which 721 * case again only the dereference should be performed. 722 */ 723 if (ctx->refs > 1) { 724 ctx->refs--; 725 DMAR_UNLOCK(dmar); 726 dmar_unmap_pgtbl(sf); 727 TD_PINNED_ASSERT; 728 return; 729 } 730 731 KASSERT((ctx->context.flags & IOMMU_CTX_DISABLED) == 0, 732 ("lost ref on disabled ctx %p", ctx)); 733 734 /* 735 * Clear the context pointer and flush the caches. 736 * XXXKIB: cannot do this if any RMRR entries are still present. 737 */ 738 dmar_pte_clear(&ctxp->ctx1); 739 ctxp->ctx2 = 0; 740 dmar_flush_ctx_to_ram(dmar, ctxp); 741 dmar_inv_ctx_glob(dmar); 742 if ((dmar->hw_ecap & DMAR_ECAP_DI) != 0) { 743 if (dmar->qi_enabled) 744 dmar_qi_invalidate_iotlb_glob_locked(dmar); 745 else 746 dmar_inv_iotlb_glob(dmar); 747 } 748 dmar_unmap_pgtbl(sf); 749 domain = CTX2DOM(ctx); 750 dmar_ctx_unlink(ctx); 751 free(ctx->context.tag, M_DMAR_CTX); 752 free(ctx, M_DMAR_CTX); 753 dmar_unref_domain_locked(dmar, domain); 754 TD_PINNED_ASSERT; 755 } 756 757 void 758 dmar_free_ctx(struct dmar_ctx *ctx) 759 { 760 struct dmar_unit *dmar; 761 762 dmar = CTX2DMAR(ctx); 763 DMAR_LOCK(dmar); 764 dmar_free_ctx_locked(dmar, ctx); 765 } 766 767 /* 768 * Returns with the domain locked. 769 */ 770 struct dmar_ctx * 771 dmar_find_ctx_locked(struct dmar_unit *dmar, uint16_t rid) 772 { 773 struct dmar_domain *domain; 774 struct dmar_ctx *ctx; 775 776 DMAR_ASSERT_LOCKED(dmar); 777 778 LIST_FOREACH(domain, &dmar->domains, link) { 779 LIST_FOREACH(ctx, &domain->contexts, link) { 780 if (ctx->rid == rid) 781 return (ctx); 782 } 783 } 784 return (NULL); 785 } 786 787 void 788 dmar_domain_free_entry(struct iommu_map_entry *entry, bool free) 789 { 790 struct iommu_domain *domain; 791 792 domain = entry->domain; 793 IOMMU_DOMAIN_LOCK(domain); 794 if ((entry->flags & IOMMU_MAP_ENTRY_RMRR) != 0) 795 iommu_gas_free_region(domain, entry); 796 else 797 iommu_gas_free_space(domain, entry); 798 IOMMU_DOMAIN_UNLOCK(domain); 799 if (free) 800 iommu_gas_free_entry(domain, entry); 801 else 802 entry->flags = 0; 803 } 804 805 void 806 dmar_domain_unload_entry(struct iommu_map_entry *entry, bool free) 807 { 808 struct dmar_domain *domain; 809 struct dmar_unit *unit; 810 811 domain = IODOM2DOM(entry->domain); 812 unit = DOM2DMAR(domain); 813 if (unit->qi_enabled) { 814 DMAR_LOCK(unit); 815 dmar_qi_invalidate_locked(IODOM2DOM(entry->domain), 816 entry->start, entry->end - entry->start, &entry->gseq, 817 true); 818 if (!free) 819 entry->flags |= IOMMU_MAP_ENTRY_QI_NF; 820 TAILQ_INSERT_TAIL(&unit->tlb_flush_entries, entry, dmamap_link); 821 DMAR_UNLOCK(unit); 822 } else { 823 domain_flush_iotlb_sync(IODOM2DOM(entry->domain), 824 entry->start, entry->end - entry->start); 825 dmar_domain_free_entry(entry, free); 826 } 827 } 828 829 static bool 830 dmar_domain_unload_emit_wait(struct dmar_domain *domain, 831 struct iommu_map_entry *entry) 832 { 833 834 if (TAILQ_NEXT(entry, dmamap_link) == NULL) 835 return (true); 836 return (domain->batch_no++ % dmar_batch_coalesce == 0); 837 } 838 839 void 840 dmar_domain_unload(struct dmar_domain *domain, 841 struct iommu_map_entries_tailq *entries, bool cansleep) 842 { 843 struct dmar_unit *unit; 844 struct iommu_domain *iodom; 845 struct iommu_map_entry *entry, *entry1; 846 int error; 847 848 iodom = DOM2IODOM(domain); 849 unit = DOM2DMAR(domain); 850 851 TAILQ_FOREACH_SAFE(entry, entries, dmamap_link, entry1) { 852 KASSERT((entry->flags & IOMMU_MAP_ENTRY_MAP) != 0, 853 ("not mapped entry %p %p", domain, entry)); 854 error = iodom->ops->unmap(iodom, entry->start, entry->end - 855 entry->start, cansleep ? IOMMU_PGF_WAITOK : 0); 856 KASSERT(error == 0, ("unmap %p error %d", domain, error)); 857 if (!unit->qi_enabled) { 858 domain_flush_iotlb_sync(domain, entry->start, 859 entry->end - entry->start); 860 TAILQ_REMOVE(entries, entry, dmamap_link); 861 dmar_domain_free_entry(entry, true); 862 } 863 } 864 if (TAILQ_EMPTY(entries)) 865 return; 866 867 KASSERT(unit->qi_enabled, ("loaded entry left")); 868 DMAR_LOCK(unit); 869 TAILQ_FOREACH(entry, entries, dmamap_link) { 870 dmar_qi_invalidate_locked(domain, entry->start, entry->end - 871 entry->start, &entry->gseq, 872 dmar_domain_unload_emit_wait(domain, entry)); 873 } 874 TAILQ_CONCAT(&unit->tlb_flush_entries, entries, dmamap_link); 875 DMAR_UNLOCK(unit); 876 } 877 878 static void 879 dmar_domain_unload_task(void *arg, int pending) 880 { 881 struct dmar_domain *domain; 882 struct iommu_map_entries_tailq entries; 883 884 domain = arg; 885 TAILQ_INIT(&entries); 886 887 for (;;) { 888 DMAR_DOMAIN_LOCK(domain); 889 TAILQ_SWAP(&domain->iodom.unload_entries, &entries, 890 iommu_map_entry, dmamap_link); 891 DMAR_DOMAIN_UNLOCK(domain); 892 if (TAILQ_EMPTY(&entries)) 893 break; 894 dmar_domain_unload(domain, &entries, true); 895 } 896 } 897 898 struct iommu_ctx * 899 iommu_get_ctx(struct iommu_unit *iommu, device_t dev, uint16_t rid, 900 bool id_mapped, bool rmrr_init) 901 { 902 struct dmar_unit *dmar; 903 struct dmar_ctx *ret; 904 905 dmar = IOMMU2DMAR(iommu); 906 907 ret = dmar_get_ctx_for_dev(dmar, dev, rid, id_mapped, rmrr_init); 908 909 return (CTX2IOCTX(ret)); 910 } 911 912 void 913 iommu_free_ctx_locked(struct iommu_unit *iommu, struct iommu_ctx *context) 914 { 915 struct dmar_unit *dmar; 916 struct dmar_ctx *ctx; 917 918 dmar = IOMMU2DMAR(iommu); 919 ctx = IOCTX2CTX(context); 920 921 dmar_free_ctx_locked(dmar, ctx); 922 } 923 924 void 925 iommu_free_ctx(struct iommu_ctx *context) 926 { 927 struct dmar_ctx *ctx; 928 929 ctx = IOCTX2CTX(context); 930 931 dmar_free_ctx(ctx); 932 } 933 934 void 935 iommu_domain_unload_entry(struct iommu_map_entry *entry, bool free) 936 { 937 938 dmar_domain_unload_entry(entry, free); 939 } 940 941 void 942 iommu_domain_unload(struct iommu_domain *iodom, 943 struct iommu_map_entries_tailq *entries, bool cansleep) 944 { 945 struct dmar_domain *domain; 946 947 domain = IODOM2DOM(iodom); 948 949 dmar_domain_unload(domain, entries, cansleep); 950 } 951