1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2013 The FreeBSD Foundation 5 * All rights reserved. 6 * 7 * This software was developed by Konstantin Belousov <kib@FreeBSD.org> 8 * under sponsorship from the FreeBSD Foundation. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 */ 31 32 #include <sys/cdefs.h> 33 __FBSDID("$FreeBSD$"); 34 35 #include <sys/param.h> 36 #include <sys/systm.h> 37 #include <sys/malloc.h> 38 #include <sys/bus.h> 39 #include <sys/interrupt.h> 40 #include <sys/kernel.h> 41 #include <sys/ktr.h> 42 #include <sys/limits.h> 43 #include <sys/lock.h> 44 #include <sys/memdesc.h> 45 #include <sys/mutex.h> 46 #include <sys/proc.h> 47 #include <sys/rwlock.h> 48 #include <sys/rman.h> 49 #include <sys/sysctl.h> 50 #include <sys/taskqueue.h> 51 #include <sys/tree.h> 52 #include <sys/uio.h> 53 #include <sys/vmem.h> 54 #include <vm/vm.h> 55 #include <vm/vm_extern.h> 56 #include <vm/vm_kern.h> 57 #include <vm/vm_object.h> 58 #include <vm/vm_page.h> 59 #include <vm/vm_pager.h> 60 #include <vm/vm_map.h> 61 #include <machine/atomic.h> 62 #include <machine/bus.h> 63 #include <machine/md_var.h> 64 #include <machine/specialreg.h> 65 #include <contrib/dev/acpica/include/acpi.h> 66 #include <contrib/dev/acpica/include/accommon.h> 67 #include <x86/include/busdma_impl.h> 68 #include <x86/iommu/intel_reg.h> 69 #include <x86/iommu/busdma_dmar.h> 70 #include <dev/pci/pcireg.h> 71 #include <x86/iommu/intel_dmar.h> 72 #include <dev/pci/pcivar.h> 73 74 static MALLOC_DEFINE(M_DMAR_CTX, "dmar_ctx", "Intel DMAR Context"); 75 static MALLOC_DEFINE(M_DMAR_DOMAIN, "dmar_dom", "Intel DMAR Domain"); 76 77 static void dmar_domain_unload_task(void *arg, int pending); 78 static void dmar_unref_domain_locked(struct dmar_unit *dmar, 79 struct dmar_domain *domain); 80 static void dmar_domain_destroy(struct dmar_domain *domain); 81 82 static void 83 dmar_ensure_ctx_page(struct dmar_unit *dmar, int bus) 84 { 85 struct sf_buf *sf; 86 dmar_root_entry_t *re; 87 vm_page_t ctxm; 88 89 /* 90 * Allocated context page must be linked. 91 */ 92 ctxm = dmar_pgalloc(dmar->ctx_obj, 1 + bus, DMAR_PGF_NOALLOC); 93 if (ctxm != NULL) 94 return; 95 96 /* 97 * Page not present, allocate and link. Note that other 98 * thread might execute this sequence in parallel. This 99 * should be safe, because the context entries written by both 100 * threads are equal. 101 */ 102 TD_PREP_PINNED_ASSERT; 103 ctxm = dmar_pgalloc(dmar->ctx_obj, 1 + bus, DMAR_PGF_ZERO | 104 DMAR_PGF_WAITOK); 105 re = dmar_map_pgtbl(dmar->ctx_obj, 0, DMAR_PGF_NOALLOC, &sf); 106 re += bus; 107 dmar_pte_store(&re->r1, DMAR_ROOT_R1_P | (DMAR_ROOT_R1_CTP_MASK & 108 VM_PAGE_TO_PHYS(ctxm))); 109 dmar_flush_root_to_ram(dmar, re); 110 dmar_unmap_pgtbl(sf); 111 TD_PINNED_ASSERT; 112 } 113 114 static dmar_ctx_entry_t * 115 dmar_map_ctx_entry(struct dmar_ctx *ctx, struct sf_buf **sfp) 116 { 117 struct dmar_unit *dmar; 118 dmar_ctx_entry_t *ctxp; 119 120 dmar = (struct dmar_unit *)ctx->context.domain->iommu; 121 122 ctxp = dmar_map_pgtbl(dmar->ctx_obj, 1 + 123 PCI_RID2BUS(ctx->rid), DMAR_PGF_NOALLOC | DMAR_PGF_WAITOK, sfp); 124 ctxp += ctx->rid & 0xff; 125 return (ctxp); 126 } 127 128 static void 129 device_tag_init(struct dmar_ctx *ctx, device_t dev) 130 { 131 struct dmar_domain *domain; 132 bus_addr_t maxaddr; 133 134 domain = (struct dmar_domain *)ctx->context.domain; 135 maxaddr = MIN(domain->end, BUS_SPACE_MAXADDR); 136 ctx->context.tag->common.ref_count = 1; /* Prevent free */ 137 ctx->context.tag->common.impl = &bus_dma_iommu_impl; 138 ctx->context.tag->common.boundary = 0; 139 ctx->context.tag->common.lowaddr = maxaddr; 140 ctx->context.tag->common.highaddr = maxaddr; 141 ctx->context.tag->common.maxsize = maxaddr; 142 ctx->context.tag->common.nsegments = BUS_SPACE_UNRESTRICTED; 143 ctx->context.tag->common.maxsegsz = maxaddr; 144 ctx->context.tag->ctx = (struct iommu_ctx *)ctx; 145 ctx->context.tag->owner = dev; 146 } 147 148 static void 149 ctx_id_entry_init_one(dmar_ctx_entry_t *ctxp, struct dmar_domain *domain, 150 vm_page_t ctx_root) 151 { 152 /* 153 * For update due to move, the store is not atomic. It is 154 * possible that DMAR read upper doubleword, while low 155 * doubleword is not yet updated. The domain id is stored in 156 * the upper doubleword, while the table pointer in the lower. 157 * 158 * There is no good solution, for the same reason it is wrong 159 * to clear P bit in the ctx entry for update. 160 */ 161 dmar_pte_store1(&ctxp->ctx2, DMAR_CTX2_DID(domain->domain) | 162 domain->awlvl); 163 if (ctx_root == NULL) { 164 dmar_pte_store1(&ctxp->ctx1, DMAR_CTX1_T_PASS | DMAR_CTX1_P); 165 } else { 166 dmar_pte_store1(&ctxp->ctx1, DMAR_CTX1_T_UNTR | 167 (DMAR_CTX1_ASR_MASK & VM_PAGE_TO_PHYS(ctx_root)) | 168 DMAR_CTX1_P); 169 } 170 } 171 172 static void 173 ctx_id_entry_init(struct dmar_ctx *ctx, dmar_ctx_entry_t *ctxp, bool move, 174 int busno) 175 { 176 struct dmar_unit *unit; 177 struct dmar_domain *domain; 178 vm_page_t ctx_root; 179 int i; 180 181 domain = (struct dmar_domain *)ctx->context.domain; 182 unit = (struct dmar_unit *)domain->iodom.iommu; 183 KASSERT(move || (ctxp->ctx1 == 0 && ctxp->ctx2 == 0), 184 ("dmar%d: initialized ctx entry %d:%d:%d 0x%jx 0x%jx", 185 unit->iommu.unit, busno, pci_get_slot(ctx->context.tag->owner), 186 pci_get_function(ctx->context.tag->owner), 187 ctxp->ctx1, ctxp->ctx2)); 188 189 if ((domain->flags & DMAR_DOMAIN_IDMAP) != 0 && 190 (unit->hw_ecap & DMAR_ECAP_PT) != 0) { 191 KASSERT(domain->pgtbl_obj == NULL, 192 ("ctx %p non-null pgtbl_obj", ctx)); 193 ctx_root = NULL; 194 } else { 195 ctx_root = dmar_pgalloc(domain->pgtbl_obj, 0, DMAR_PGF_NOALLOC); 196 } 197 198 if (dmar_is_buswide_ctx(unit, busno)) { 199 MPASS(!move); 200 for (i = 0; i <= PCI_BUSMAX; i++) { 201 ctx_id_entry_init_one(&ctxp[i], domain, ctx_root); 202 } 203 } else { 204 ctx_id_entry_init_one(ctxp, domain, ctx_root); 205 } 206 dmar_flush_ctx_to_ram(unit, ctxp); 207 } 208 209 static int 210 dmar_flush_for_ctx_entry(struct dmar_unit *dmar, bool force) 211 { 212 int error; 213 214 /* 215 * If dmar declares Caching Mode as Set, follow 11.5 "Caching 216 * Mode Consideration" and do the (global) invalidation of the 217 * negative TLB entries. 218 */ 219 if ((dmar->hw_cap & DMAR_CAP_CM) == 0 && !force) 220 return (0); 221 if (dmar->qi_enabled) { 222 dmar_qi_invalidate_ctx_glob_locked(dmar); 223 if ((dmar->hw_ecap & DMAR_ECAP_DI) != 0 || force) 224 dmar_qi_invalidate_iotlb_glob_locked(dmar); 225 return (0); 226 } 227 error = dmar_inv_ctx_glob(dmar); 228 if (error == 0 && ((dmar->hw_ecap & DMAR_ECAP_DI) != 0 || force)) 229 error = dmar_inv_iotlb_glob(dmar); 230 return (error); 231 } 232 233 static int 234 domain_init_rmrr(struct dmar_domain *domain, device_t dev, int bus, 235 int slot, int func, int dev_domain, int dev_busno, 236 const void *dev_path, int dev_path_len) 237 { 238 struct iommu_map_entries_tailq rmrr_entries; 239 struct iommu_map_entry *entry, *entry1; 240 vm_page_t *ma; 241 iommu_gaddr_t start, end; 242 vm_pindex_t size, i; 243 int error, error1; 244 245 error = 0; 246 TAILQ_INIT(&rmrr_entries); 247 dmar_dev_parse_rmrr(domain, dev_domain, dev_busno, dev_path, 248 dev_path_len, &rmrr_entries); 249 TAILQ_FOREACH_SAFE(entry, &rmrr_entries, unroll_link, entry1) { 250 /* 251 * VT-d specification requires that the start of an 252 * RMRR entry is 4k-aligned. Buggy BIOSes put 253 * anything into the start and end fields. Truncate 254 * and round as neccesary. 255 * 256 * We also allow the overlapping RMRR entries, see 257 * dmar_gas_alloc_region(). 258 */ 259 start = entry->start; 260 end = entry->end; 261 if (bootverbose) 262 printf("dmar%d ctx pci%d:%d:%d RMRR [%#jx, %#jx]\n", 263 domain->iodom.iommu->unit, bus, slot, func, 264 (uintmax_t)start, (uintmax_t)end); 265 entry->start = trunc_page(start); 266 entry->end = round_page(end); 267 if (entry->start == entry->end) { 268 /* Workaround for some AMI (?) BIOSes */ 269 if (bootverbose) { 270 if (dev != NULL) 271 device_printf(dev, ""); 272 printf("pci%d:%d:%d ", bus, slot, func); 273 printf("BIOS bug: dmar%d RMRR " 274 "region (%jx, %jx) corrected\n", 275 domain->iodom.iommu->unit, start, end); 276 } 277 entry->end += DMAR_PAGE_SIZE * 0x20; 278 } 279 size = OFF_TO_IDX(entry->end - entry->start); 280 ma = malloc(sizeof(vm_page_t) * size, M_TEMP, M_WAITOK); 281 for (i = 0; i < size; i++) { 282 ma[i] = vm_page_getfake(entry->start + PAGE_SIZE * i, 283 VM_MEMATTR_DEFAULT); 284 } 285 error1 = dmar_gas_map_region(domain, entry, 286 IOMMU_MAP_ENTRY_READ | IOMMU_MAP_ENTRY_WRITE, 287 IOMMU_MF_CANWAIT | IOMMU_MF_RMRR, ma); 288 /* 289 * Non-failed RMRR entries are owned by context rb 290 * tree. Get rid of the failed entry, but do not stop 291 * the loop. Rest of the parsed RMRR entries are 292 * loaded and removed on the context destruction. 293 */ 294 if (error1 == 0 && entry->end != entry->start) { 295 IOMMU_LOCK(domain->iodom.iommu); 296 domain->refs++; /* XXXKIB prevent free */ 297 domain->flags |= DMAR_DOMAIN_RMRR; 298 IOMMU_UNLOCK(domain->iodom.iommu); 299 } else { 300 if (error1 != 0) { 301 if (dev != NULL) 302 device_printf(dev, ""); 303 printf("pci%d:%d:%d ", bus, slot, func); 304 printf( 305 "dmar%d failed to map RMRR region (%jx, %jx) %d\n", 306 domain->iodom.iommu->unit, start, end, 307 error1); 308 error = error1; 309 } 310 TAILQ_REMOVE(&rmrr_entries, entry, unroll_link); 311 dmar_gas_free_entry(domain, entry); 312 } 313 for (i = 0; i < size; i++) 314 vm_page_putfake(ma[i]); 315 free(ma, M_TEMP); 316 } 317 return (error); 318 } 319 320 static struct dmar_domain * 321 dmar_domain_alloc(struct dmar_unit *dmar, bool id_mapped) 322 { 323 struct dmar_domain *domain; 324 int error, id, mgaw; 325 326 id = alloc_unr(dmar->domids); 327 if (id == -1) 328 return (NULL); 329 domain = malloc(sizeof(*domain), M_DMAR_DOMAIN, M_WAITOK | M_ZERO); 330 domain->domain = id; 331 LIST_INIT(&domain->contexts); 332 RB_INIT(&domain->rb_root); 333 TAILQ_INIT(&domain->iodom.unload_entries); 334 TASK_INIT(&domain->iodom.unload_task, 0, dmar_domain_unload_task, 335 domain); 336 mtx_init(&domain->iodom.lock, "dmardom", NULL, MTX_DEF); 337 domain->dmar = dmar; 338 domain->iodom.iommu = &dmar->iommu; 339 340 /* 341 * For now, use the maximal usable physical address of the 342 * installed memory to calculate the mgaw on id_mapped domain. 343 * It is useful for the identity mapping, and less so for the 344 * virtualized bus address space. 345 */ 346 domain->end = id_mapped ? ptoa(Maxmem) : BUS_SPACE_MAXADDR; 347 mgaw = dmar_maxaddr2mgaw(dmar, domain->end, !id_mapped); 348 error = domain_set_agaw(domain, mgaw); 349 if (error != 0) 350 goto fail; 351 if (!id_mapped) 352 /* Use all supported address space for remapping. */ 353 domain->end = 1ULL << (domain->agaw - 1); 354 355 dmar_gas_init_domain(domain); 356 357 if (id_mapped) { 358 if ((dmar->hw_ecap & DMAR_ECAP_PT) == 0) { 359 domain->pgtbl_obj = domain_get_idmap_pgtbl(domain, 360 domain->end); 361 } 362 domain->flags |= DMAR_DOMAIN_IDMAP; 363 } else { 364 error = domain_alloc_pgtbl(domain); 365 if (error != 0) 366 goto fail; 367 /* Disable local apic region access */ 368 error = dmar_gas_reserve_region(domain, 0xfee00000, 369 0xfeefffff + 1); 370 if (error != 0) 371 goto fail; 372 } 373 return (domain); 374 375 fail: 376 dmar_domain_destroy(domain); 377 return (NULL); 378 } 379 380 static struct dmar_ctx * 381 dmar_ctx_alloc(struct dmar_domain *domain, uint16_t rid) 382 { 383 struct dmar_ctx *ctx; 384 385 ctx = malloc(sizeof(*ctx), M_DMAR_CTX, M_WAITOK | M_ZERO); 386 ctx->context.domain = (struct iommu_domain *)domain; 387 ctx->context.tag = malloc(sizeof(struct bus_dma_tag_iommu), 388 M_DMAR_CTX, M_WAITOK | M_ZERO); 389 ctx->rid = rid; 390 ctx->refs = 1; 391 return (ctx); 392 } 393 394 static void 395 dmar_ctx_link(struct dmar_ctx *ctx) 396 { 397 struct dmar_domain *domain; 398 399 domain = (struct dmar_domain *)ctx->context.domain; 400 IOMMU_ASSERT_LOCKED(domain->iodom.iommu); 401 KASSERT(domain->refs >= domain->ctx_cnt, 402 ("dom %p ref underflow %d %d", domain, domain->refs, 403 domain->ctx_cnt)); 404 domain->refs++; 405 domain->ctx_cnt++; 406 LIST_INSERT_HEAD(&domain->contexts, ctx, link); 407 } 408 409 static void 410 dmar_ctx_unlink(struct dmar_ctx *ctx) 411 { 412 struct dmar_domain *domain; 413 414 domain = (struct dmar_domain *)ctx->context.domain; 415 IOMMU_ASSERT_LOCKED(domain->iodom.iommu); 416 KASSERT(domain->refs > 0, 417 ("domain %p ctx dtr refs %d", domain, domain->refs)); 418 KASSERT(domain->ctx_cnt >= domain->refs, 419 ("domain %p ctx dtr refs %d ctx_cnt %d", domain, 420 domain->refs, domain->ctx_cnt)); 421 domain->refs--; 422 domain->ctx_cnt--; 423 LIST_REMOVE(ctx, link); 424 } 425 426 static void 427 dmar_domain_destroy(struct dmar_domain *domain) 428 { 429 struct dmar_unit *dmar; 430 431 KASSERT(TAILQ_EMPTY(&domain->iodom.unload_entries), 432 ("unfinished unloads %p", domain)); 433 KASSERT(LIST_EMPTY(&domain->contexts), 434 ("destroying dom %p with contexts", domain)); 435 KASSERT(domain->ctx_cnt == 0, 436 ("destroying dom %p with ctx_cnt %d", domain, domain->ctx_cnt)); 437 KASSERT(domain->refs == 0, 438 ("destroying dom %p with refs %d", domain, domain->refs)); 439 if ((domain->flags & DMAR_DOMAIN_GAS_INITED) != 0) { 440 DMAR_DOMAIN_LOCK(domain); 441 dmar_gas_fini_domain(domain); 442 DMAR_DOMAIN_UNLOCK(domain); 443 } 444 if ((domain->flags & DMAR_DOMAIN_PGTBL_INITED) != 0) { 445 if (domain->pgtbl_obj != NULL) 446 DMAR_DOMAIN_PGLOCK(domain); 447 domain_free_pgtbl(domain); 448 } 449 mtx_destroy(&domain->iodom.lock); 450 dmar = (struct dmar_unit *)domain->iodom.iommu; 451 free_unr(dmar->domids, domain->domain); 452 free(domain, M_DMAR_DOMAIN); 453 } 454 455 static struct dmar_ctx * 456 dmar_get_ctx_for_dev1(struct dmar_unit *dmar, device_t dev, uint16_t rid, 457 int dev_domain, int dev_busno, const void *dev_path, int dev_path_len, 458 bool id_mapped, bool rmrr_init) 459 { 460 struct dmar_domain *domain, *domain1; 461 struct dmar_ctx *ctx, *ctx1; 462 dmar_ctx_entry_t *ctxp; 463 struct sf_buf *sf; 464 int bus, slot, func, error; 465 bool enable; 466 467 if (dev != NULL) { 468 bus = pci_get_bus(dev); 469 slot = pci_get_slot(dev); 470 func = pci_get_function(dev); 471 } else { 472 bus = PCI_RID2BUS(rid); 473 slot = PCI_RID2SLOT(rid); 474 func = PCI_RID2FUNC(rid); 475 } 476 enable = false; 477 TD_PREP_PINNED_ASSERT; 478 DMAR_LOCK(dmar); 479 KASSERT(!dmar_is_buswide_ctx(dmar, bus) || (slot == 0 && func == 0), 480 ("dmar%d pci%d:%d:%d get_ctx for buswide", dmar->iommu.unit, bus, 481 slot, func)); 482 ctx = dmar_find_ctx_locked(dmar, rid); 483 error = 0; 484 if (ctx == NULL) { 485 /* 486 * Perform the allocations which require sleep or have 487 * higher chance to succeed if the sleep is allowed. 488 */ 489 DMAR_UNLOCK(dmar); 490 dmar_ensure_ctx_page(dmar, PCI_RID2BUS(rid)); 491 domain1 = dmar_domain_alloc(dmar, id_mapped); 492 if (domain1 == NULL) { 493 TD_PINNED_ASSERT; 494 return (NULL); 495 } 496 if (!id_mapped) { 497 error = domain_init_rmrr(domain1, dev, bus, 498 slot, func, dev_domain, dev_busno, dev_path, 499 dev_path_len); 500 if (error != 0) { 501 dmar_domain_destroy(domain1); 502 TD_PINNED_ASSERT; 503 return (NULL); 504 } 505 } 506 ctx1 = dmar_ctx_alloc(domain1, rid); 507 ctxp = dmar_map_ctx_entry(ctx1, &sf); 508 DMAR_LOCK(dmar); 509 510 /* 511 * Recheck the contexts, other thread might have 512 * already allocated needed one. 513 */ 514 ctx = dmar_find_ctx_locked(dmar, rid); 515 if (ctx == NULL) { 516 domain = domain1; 517 ctx = ctx1; 518 dmar_ctx_link(ctx); 519 ctx->context.tag->owner = dev; 520 device_tag_init(ctx, dev); 521 522 /* 523 * This is the first activated context for the 524 * DMAR unit. Enable the translation after 525 * everything is set up. 526 */ 527 if (LIST_EMPTY(&dmar->domains)) 528 enable = true; 529 LIST_INSERT_HEAD(&dmar->domains, domain, link); 530 ctx_id_entry_init(ctx, ctxp, false, bus); 531 if (dev != NULL) { 532 device_printf(dev, 533 "dmar%d pci%d:%d:%d:%d rid %x domain %d mgaw %d " 534 "agaw %d %s-mapped\n", 535 dmar->iommu.unit, dmar->segment, bus, slot, 536 func, rid, domain->domain, domain->mgaw, 537 domain->agaw, id_mapped ? "id" : "re"); 538 } 539 dmar_unmap_pgtbl(sf); 540 } else { 541 dmar_unmap_pgtbl(sf); 542 dmar_domain_destroy(domain1); 543 /* Nothing needs to be done to destroy ctx1. */ 544 free(ctx1, M_DMAR_CTX); 545 domain = (struct dmar_domain *)ctx->context.domain; 546 ctx->refs++; /* tag referenced us */ 547 } 548 } else { 549 domain = (struct dmar_domain *)ctx->context.domain; 550 if (ctx->context.tag->owner == NULL) 551 ctx->context.tag->owner = dev; 552 ctx->refs++; /* tag referenced us */ 553 } 554 555 error = dmar_flush_for_ctx_entry(dmar, enable); 556 if (error != 0) { 557 dmar_free_ctx_locked(dmar, ctx); 558 TD_PINNED_ASSERT; 559 return (NULL); 560 } 561 562 /* 563 * The dmar lock was potentially dropped between check for the 564 * empty context list and now. Recheck the state of GCMD_TE 565 * to avoid unneeded command. 566 */ 567 if (enable && !rmrr_init && (dmar->hw_gcmd & DMAR_GCMD_TE) == 0) { 568 error = dmar_enable_translation(dmar); 569 if (error == 0) { 570 if (bootverbose) { 571 printf("dmar%d: enabled translation\n", 572 dmar->iommu.unit); 573 } 574 } else { 575 printf("dmar%d: enabling translation failed, " 576 "error %d\n", dmar->iommu.unit, error); 577 dmar_free_ctx_locked(dmar, ctx); 578 TD_PINNED_ASSERT; 579 return (NULL); 580 } 581 } 582 DMAR_UNLOCK(dmar); 583 TD_PINNED_ASSERT; 584 return (ctx); 585 } 586 587 struct dmar_ctx * 588 dmar_get_ctx_for_dev(struct dmar_unit *dmar, device_t dev, uint16_t rid, 589 bool id_mapped, bool rmrr_init) 590 { 591 int dev_domain, dev_path_len, dev_busno; 592 593 dev_domain = pci_get_domain(dev); 594 dev_path_len = dmar_dev_depth(dev); 595 ACPI_DMAR_PCI_PATH dev_path[dev_path_len]; 596 dmar_dev_path(dev, &dev_busno, dev_path, dev_path_len); 597 return (dmar_get_ctx_for_dev1(dmar, dev, rid, dev_domain, dev_busno, 598 dev_path, dev_path_len, id_mapped, rmrr_init)); 599 } 600 601 struct dmar_ctx * 602 dmar_get_ctx_for_devpath(struct dmar_unit *dmar, uint16_t rid, 603 int dev_domain, int dev_busno, 604 const void *dev_path, int dev_path_len, 605 bool id_mapped, bool rmrr_init) 606 { 607 608 return (dmar_get_ctx_for_dev1(dmar, NULL, rid, dev_domain, dev_busno, 609 dev_path, dev_path_len, id_mapped, rmrr_init)); 610 } 611 612 int 613 dmar_move_ctx_to_domain(struct dmar_domain *domain, struct dmar_ctx *ctx) 614 { 615 struct dmar_unit *dmar; 616 struct dmar_domain *old_domain; 617 dmar_ctx_entry_t *ctxp; 618 struct sf_buf *sf; 619 int error; 620 621 dmar = domain->dmar; 622 old_domain = (struct dmar_domain *)ctx->context.domain; 623 if (domain == old_domain) 624 return (0); 625 KASSERT(old_domain->iodom.iommu == domain->iodom.iommu, 626 ("domain %p %u moving between dmars %u %u", domain, 627 domain->domain, old_domain->iodom.iommu->unit, 628 domain->iodom.iommu->unit)); 629 TD_PREP_PINNED_ASSERT; 630 631 ctxp = dmar_map_ctx_entry(ctx, &sf); 632 DMAR_LOCK(dmar); 633 dmar_ctx_unlink(ctx); 634 ctx->context.domain = &domain->iodom; 635 dmar_ctx_link(ctx); 636 ctx_id_entry_init(ctx, ctxp, true, PCI_BUSMAX + 100); 637 dmar_unmap_pgtbl(sf); 638 error = dmar_flush_for_ctx_entry(dmar, true); 639 /* If flush failed, rolling back would not work as well. */ 640 printf("dmar%d rid %x domain %d->%d %s-mapped\n", 641 dmar->iommu.unit, ctx->rid, old_domain->domain, domain->domain, 642 (domain->flags & DMAR_DOMAIN_IDMAP) != 0 ? "id" : "re"); 643 dmar_unref_domain_locked(dmar, old_domain); 644 TD_PINNED_ASSERT; 645 return (error); 646 } 647 648 static void 649 dmar_unref_domain_locked(struct dmar_unit *dmar, struct dmar_domain *domain) 650 { 651 652 DMAR_ASSERT_LOCKED(dmar); 653 KASSERT(domain->refs >= 1, 654 ("dmar %d domain %p refs %u", dmar->iommu.unit, domain, 655 domain->refs)); 656 KASSERT(domain->refs > domain->ctx_cnt, 657 ("dmar %d domain %p refs %d ctx_cnt %d", dmar->iommu.unit, domain, 658 domain->refs, domain->ctx_cnt)); 659 660 if (domain->refs > 1) { 661 domain->refs--; 662 DMAR_UNLOCK(dmar); 663 return; 664 } 665 666 KASSERT((domain->flags & DMAR_DOMAIN_RMRR) == 0, 667 ("lost ref on RMRR domain %p", domain)); 668 669 LIST_REMOVE(domain, link); 670 DMAR_UNLOCK(dmar); 671 672 taskqueue_drain(dmar->iommu.delayed_taskqueue, 673 &domain->iodom.unload_task); 674 dmar_domain_destroy(domain); 675 } 676 677 void 678 dmar_free_ctx_locked(struct dmar_unit *dmar, struct dmar_ctx *ctx) 679 { 680 struct sf_buf *sf; 681 dmar_ctx_entry_t *ctxp; 682 struct dmar_domain *domain; 683 684 DMAR_ASSERT_LOCKED(dmar); 685 KASSERT(ctx->refs >= 1, 686 ("dmar %p ctx %p refs %u", dmar, ctx, ctx->refs)); 687 688 /* 689 * If our reference is not last, only the dereference should 690 * be performed. 691 */ 692 if (ctx->refs > 1) { 693 ctx->refs--; 694 DMAR_UNLOCK(dmar); 695 return; 696 } 697 698 KASSERT((ctx->context.flags & IOMMU_CTX_DISABLED) == 0, 699 ("lost ref on disabled ctx %p", ctx)); 700 701 /* 702 * Otherwise, the context entry must be cleared before the 703 * page table is destroyed. The mapping of the context 704 * entries page could require sleep, unlock the dmar. 705 */ 706 DMAR_UNLOCK(dmar); 707 TD_PREP_PINNED_ASSERT; 708 ctxp = dmar_map_ctx_entry(ctx, &sf); 709 DMAR_LOCK(dmar); 710 KASSERT(ctx->refs >= 1, 711 ("dmar %p ctx %p refs %u", dmar, ctx, ctx->refs)); 712 713 /* 714 * Other thread might have referenced the context, in which 715 * case again only the dereference should be performed. 716 */ 717 if (ctx->refs > 1) { 718 ctx->refs--; 719 DMAR_UNLOCK(dmar); 720 dmar_unmap_pgtbl(sf); 721 TD_PINNED_ASSERT; 722 return; 723 } 724 725 KASSERT((ctx->context.flags & IOMMU_CTX_DISABLED) == 0, 726 ("lost ref on disabled ctx %p", ctx)); 727 728 /* 729 * Clear the context pointer and flush the caches. 730 * XXXKIB: cannot do this if any RMRR entries are still present. 731 */ 732 dmar_pte_clear(&ctxp->ctx1); 733 ctxp->ctx2 = 0; 734 dmar_flush_ctx_to_ram(dmar, ctxp); 735 dmar_inv_ctx_glob(dmar); 736 if ((dmar->hw_ecap & DMAR_ECAP_DI) != 0) { 737 if (dmar->qi_enabled) 738 dmar_qi_invalidate_iotlb_glob_locked(dmar); 739 else 740 dmar_inv_iotlb_glob(dmar); 741 } 742 dmar_unmap_pgtbl(sf); 743 domain = (struct dmar_domain *)ctx->context.domain; 744 dmar_ctx_unlink(ctx); 745 free(ctx->context.tag, M_DMAR_CTX); 746 free(ctx, M_DMAR_CTX); 747 dmar_unref_domain_locked(dmar, domain); 748 TD_PINNED_ASSERT; 749 } 750 751 void 752 dmar_free_ctx(struct dmar_ctx *ctx) 753 { 754 struct dmar_unit *dmar; 755 756 dmar = (struct dmar_unit *)ctx->context.domain->iommu; 757 DMAR_LOCK(dmar); 758 dmar_free_ctx_locked(dmar, ctx); 759 } 760 761 /* 762 * Returns with the domain locked. 763 */ 764 struct dmar_ctx * 765 dmar_find_ctx_locked(struct dmar_unit *dmar, uint16_t rid) 766 { 767 struct dmar_domain *domain; 768 struct dmar_ctx *ctx; 769 770 DMAR_ASSERT_LOCKED(dmar); 771 772 LIST_FOREACH(domain, &dmar->domains, link) { 773 LIST_FOREACH(ctx, &domain->contexts, link) { 774 if (ctx->rid == rid) 775 return (ctx); 776 } 777 } 778 return (NULL); 779 } 780 781 void 782 dmar_domain_free_entry(struct iommu_map_entry *entry, bool free) 783 { 784 struct dmar_domain *domain; 785 786 domain = (struct dmar_domain *)entry->domain; 787 DMAR_DOMAIN_LOCK(domain); 788 if ((entry->flags & IOMMU_MAP_ENTRY_RMRR) != 0) 789 dmar_gas_free_region(domain, entry); 790 else 791 dmar_gas_free_space(domain, entry); 792 DMAR_DOMAIN_UNLOCK(domain); 793 if (free) 794 dmar_gas_free_entry(domain, entry); 795 else 796 entry->flags = 0; 797 } 798 799 void 800 dmar_domain_unload_entry(struct iommu_map_entry *entry, bool free) 801 { 802 struct dmar_domain *domain; 803 struct dmar_unit *unit; 804 805 domain = (struct dmar_domain *)entry->domain; 806 unit = (struct dmar_unit *)domain->iodom.iommu; 807 if (unit->qi_enabled) { 808 DMAR_LOCK(unit); 809 dmar_qi_invalidate_locked((struct dmar_domain *)entry->domain, 810 entry->start, entry->end - entry->start, &entry->gseq, 811 true); 812 if (!free) 813 entry->flags |= IOMMU_MAP_ENTRY_QI_NF; 814 TAILQ_INSERT_TAIL(&unit->tlb_flush_entries, entry, dmamap_link); 815 DMAR_UNLOCK(unit); 816 } else { 817 domain_flush_iotlb_sync((struct dmar_domain *)entry->domain, 818 entry->start, entry->end - entry->start); 819 dmar_domain_free_entry(entry, free); 820 } 821 } 822 823 static bool 824 dmar_domain_unload_emit_wait(struct dmar_domain *domain, 825 struct iommu_map_entry *entry) 826 { 827 828 if (TAILQ_NEXT(entry, dmamap_link) == NULL) 829 return (true); 830 return (domain->batch_no++ % dmar_batch_coalesce == 0); 831 } 832 833 void 834 dmar_domain_unload(struct dmar_domain *domain, 835 struct iommu_map_entries_tailq *entries, bool cansleep) 836 { 837 struct dmar_unit *unit; 838 struct iommu_map_entry *entry, *entry1; 839 int error; 840 841 unit = (struct dmar_unit *)domain->iodom.iommu; 842 843 TAILQ_FOREACH_SAFE(entry, entries, dmamap_link, entry1) { 844 KASSERT((entry->flags & IOMMU_MAP_ENTRY_MAP) != 0, 845 ("not mapped entry %p %p", domain, entry)); 846 error = domain_unmap_buf(domain, entry->start, entry->end - 847 entry->start, cansleep ? DMAR_PGF_WAITOK : 0); 848 KASSERT(error == 0, ("unmap %p error %d", domain, error)); 849 if (!unit->qi_enabled) { 850 domain_flush_iotlb_sync(domain, entry->start, 851 entry->end - entry->start); 852 TAILQ_REMOVE(entries, entry, dmamap_link); 853 dmar_domain_free_entry(entry, true); 854 } 855 } 856 if (TAILQ_EMPTY(entries)) 857 return; 858 859 KASSERT(unit->qi_enabled, ("loaded entry left")); 860 DMAR_LOCK(unit); 861 TAILQ_FOREACH(entry, entries, dmamap_link) { 862 dmar_qi_invalidate_locked(domain, entry->start, entry->end - 863 entry->start, &entry->gseq, 864 dmar_domain_unload_emit_wait(domain, entry)); 865 } 866 TAILQ_CONCAT(&unit->tlb_flush_entries, entries, dmamap_link); 867 DMAR_UNLOCK(unit); 868 } 869 870 static void 871 dmar_domain_unload_task(void *arg, int pending) 872 { 873 struct dmar_domain *domain; 874 struct iommu_map_entries_tailq entries; 875 876 domain = arg; 877 TAILQ_INIT(&entries); 878 879 for (;;) { 880 DMAR_DOMAIN_LOCK(domain); 881 TAILQ_SWAP(&domain->iodom.unload_entries, &entries, 882 iommu_map_entry, dmamap_link); 883 DMAR_DOMAIN_UNLOCK(domain); 884 if (TAILQ_EMPTY(&entries)) 885 break; 886 dmar_domain_unload(domain, &entries, true); 887 } 888 } 889 890 struct iommu_ctx * 891 iommu_get_ctx(struct iommu_unit *iommu, device_t dev, uint16_t rid, 892 bool id_mapped, bool rmrr_init) 893 { 894 struct dmar_unit *dmar; 895 struct dmar_ctx *ret; 896 897 dmar = (struct dmar_unit *)iommu; 898 899 ret = dmar_get_ctx_for_dev(dmar, dev, rid, id_mapped, rmrr_init); 900 901 return ((struct iommu_ctx *)ret); 902 } 903 904 void 905 iommu_free_ctx_locked(struct iommu_unit *iommu, struct iommu_ctx *context) 906 { 907 struct dmar_unit *dmar; 908 struct dmar_ctx *ctx; 909 910 dmar = (struct dmar_unit *)iommu; 911 ctx = (struct dmar_ctx *)context; 912 913 dmar_free_ctx_locked(dmar, ctx); 914 } 915 916 void 917 iommu_free_ctx(struct iommu_ctx *context) 918 { 919 struct dmar_unit *dmar; 920 struct dmar_ctx *ctx; 921 922 ctx = (struct dmar_ctx *)context; 923 dmar = (struct dmar_unit *)ctx->context.domain->iommu; 924 925 dmar_free_ctx(ctx); 926 } 927 928 void 929 iommu_domain_unload_entry(struct iommu_map_entry *entry, bool free) 930 { 931 932 dmar_domain_unload_entry(entry, free); 933 } 934 935 void 936 iommu_domain_unload(struct iommu_domain *iodom, 937 struct iommu_map_entries_tailq *entries, bool cansleep) 938 { 939 struct dmar_domain *domain; 940 941 domain = (struct dmar_domain *)iodom; 942 943 dmar_domain_unload(domain, entries, cansleep); 944 } 945