1 /*- 2 * Copyright (c) 2013 The FreeBSD Foundation 3 * All rights reserved. 4 * 5 * This software was developed by Konstantin Belousov <kib@FreeBSD.org> 6 * under sponsorship from the FreeBSD Foundation. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include <sys/param.h> 34 #include <sys/systm.h> 35 #include <sys/malloc.h> 36 #include <sys/bus.h> 37 #include <sys/interrupt.h> 38 #include <sys/kernel.h> 39 #include <sys/ktr.h> 40 #include <sys/limits.h> 41 #include <sys/lock.h> 42 #include <sys/memdesc.h> 43 #include <sys/mutex.h> 44 #include <sys/proc.h> 45 #include <sys/rwlock.h> 46 #include <sys/rman.h> 47 #include <sys/sysctl.h> 48 #include <sys/taskqueue.h> 49 #include <sys/tree.h> 50 #include <sys/uio.h> 51 #include <sys/vmem.h> 52 #include <vm/vm.h> 53 #include <vm/vm_extern.h> 54 #include <vm/vm_kern.h> 55 #include <vm/vm_object.h> 56 #include <vm/vm_page.h> 57 #include <vm/vm_pager.h> 58 #include <vm/vm_map.h> 59 #include <machine/atomic.h> 60 #include <machine/bus.h> 61 #include <machine/md_var.h> 62 #include <machine/specialreg.h> 63 #include <x86/include/busdma_impl.h> 64 #include <x86/iommu/intel_reg.h> 65 #include <x86/iommu/busdma_dmar.h> 66 #include <x86/iommu/intel_dmar.h> 67 #include <dev/pci/pcireg.h> 68 #include <dev/pci/pcivar.h> 69 70 static MALLOC_DEFINE(M_DMAR_CTX, "dmar_ctx", "Intel DMAR Context"); 71 static MALLOC_DEFINE(M_DMAR_DOMAIN, "dmar_dom", "Intel DMAR Domain"); 72 73 static void dmar_domain_unload_task(void *arg, int pending); 74 static void dmar_unref_domain_locked(struct dmar_unit *dmar, 75 struct dmar_domain *domain); 76 static void dmar_domain_destroy(struct dmar_domain *domain); 77 static void dmar_ctx_dtr(struct dmar_ctx *ctx); 78 79 static void 80 dmar_ensure_ctx_page(struct dmar_unit *dmar, int bus) 81 { 82 struct sf_buf *sf; 83 dmar_root_entry_t *re; 84 vm_page_t ctxm; 85 86 /* 87 * Allocated context page must be linked. 88 */ 89 ctxm = dmar_pgalloc(dmar->ctx_obj, 1 + bus, DMAR_PGF_NOALLOC); 90 if (ctxm != NULL) 91 return; 92 93 /* 94 * Page not present, allocate and link. Note that other 95 * thread might execute this sequence in parallel. This 96 * should be safe, because the context entries written by both 97 * threads are equal. 98 */ 99 TD_PREP_PINNED_ASSERT; 100 ctxm = dmar_pgalloc(dmar->ctx_obj, 1 + bus, DMAR_PGF_ZERO | 101 DMAR_PGF_WAITOK); 102 re = dmar_map_pgtbl(dmar->ctx_obj, 0, DMAR_PGF_NOALLOC, &sf); 103 re += bus; 104 dmar_pte_store(&re->r1, DMAR_ROOT_R1_P | (DMAR_ROOT_R1_CTP_MASK & 105 VM_PAGE_TO_PHYS(ctxm))); 106 dmar_flush_root_to_ram(dmar, re); 107 dmar_unmap_pgtbl(sf); 108 TD_PINNED_ASSERT; 109 } 110 111 static dmar_ctx_entry_t * 112 dmar_map_ctx_entry(struct dmar_ctx *ctx, struct sf_buf **sfp) 113 { 114 dmar_ctx_entry_t *ctxp; 115 116 ctxp = dmar_map_pgtbl(ctx->domain->dmar->ctx_obj, 1 + 117 PCI_RID2BUS(ctx->rid), DMAR_PGF_NOALLOC | DMAR_PGF_WAITOK, sfp); 118 ctxp += ctx->rid & 0xff; 119 return (ctxp); 120 } 121 122 static void 123 ctx_tag_init(struct dmar_ctx *ctx, device_t dev) 124 { 125 bus_addr_t maxaddr; 126 127 maxaddr = MIN(ctx->domain->end, BUS_SPACE_MAXADDR); 128 ctx->ctx_tag.common.ref_count = 1; /* Prevent free */ 129 ctx->ctx_tag.common.impl = &bus_dma_dmar_impl; 130 ctx->ctx_tag.common.boundary = PCI_DMA_BOUNDARY; 131 ctx->ctx_tag.common.lowaddr = maxaddr; 132 ctx->ctx_tag.common.highaddr = maxaddr; 133 ctx->ctx_tag.common.maxsize = maxaddr; 134 ctx->ctx_tag.common.nsegments = BUS_SPACE_UNRESTRICTED; 135 ctx->ctx_tag.common.maxsegsz = maxaddr; 136 ctx->ctx_tag.ctx = ctx; 137 ctx->ctx_tag.owner = dev; 138 } 139 140 static void 141 ctx_id_entry_init(struct dmar_ctx *ctx, dmar_ctx_entry_t *ctxp, bool move) 142 { 143 struct dmar_unit *unit; 144 struct dmar_domain *domain; 145 vm_page_t ctx_root; 146 147 domain = ctx->domain; 148 unit = domain->dmar; 149 KASSERT(move || (ctxp->ctx1 == 0 && ctxp->ctx2 == 0), 150 ("dmar%d: initialized ctx entry %d:%d:%d 0x%jx 0x%jx", 151 unit->unit, pci_get_bus(ctx->ctx_tag.owner), 152 pci_get_slot(ctx->ctx_tag.owner), 153 pci_get_function(ctx->ctx_tag.owner), 154 ctxp->ctx1, ctxp->ctx2)); 155 /* 156 * For update due to move, the store is not atomic. It is 157 * possible that DMAR read upper doubleword, while low 158 * doubleword is not yet updated. The domain id is stored in 159 * the upper doubleword, while the table pointer in the lower. 160 * 161 * There is no good solution, for the same reason it is wrong 162 * to clear P bit in the ctx entry for update. 163 */ 164 dmar_pte_store1(&ctxp->ctx2, DMAR_CTX2_DID(domain->domain) | 165 domain->awlvl); 166 if ((domain->flags & DMAR_DOMAIN_IDMAP) != 0 && 167 (unit->hw_ecap & DMAR_ECAP_PT) != 0) { 168 KASSERT(domain->pgtbl_obj == NULL, 169 ("ctx %p non-null pgtbl_obj", ctx)); 170 dmar_pte_store1(&ctxp->ctx1, DMAR_CTX1_T_PASS | DMAR_CTX1_P); 171 } else { 172 ctx_root = dmar_pgalloc(domain->pgtbl_obj, 0, DMAR_PGF_NOALLOC); 173 dmar_pte_store1(&ctxp->ctx1, DMAR_CTX1_T_UNTR | 174 (DMAR_CTX1_ASR_MASK & VM_PAGE_TO_PHYS(ctx_root)) | 175 DMAR_CTX1_P); 176 } 177 dmar_flush_ctx_to_ram(unit, ctxp); 178 } 179 180 static int 181 dmar_flush_for_ctx_entry(struct dmar_unit *dmar, bool force) 182 { 183 int error; 184 185 /* 186 * If dmar declares Caching Mode as Set, follow 11.5 "Caching 187 * Mode Consideration" and do the (global) invalidation of the 188 * negative TLB entries. 189 */ 190 if ((dmar->hw_cap & DMAR_CAP_CM) == 0 && !force) 191 return (0); 192 if (dmar->qi_enabled) { 193 dmar_qi_invalidate_ctx_glob_locked(dmar); 194 if ((dmar->hw_ecap & DMAR_ECAP_DI) != 0 || force) 195 dmar_qi_invalidate_iotlb_glob_locked(dmar); 196 return (0); 197 } 198 error = dmar_inv_ctx_glob(dmar); 199 if (error == 0 && ((dmar->hw_ecap & DMAR_ECAP_DI) != 0 || force)) 200 error = dmar_inv_iotlb_glob(dmar); 201 return (error); 202 } 203 204 static int 205 domain_init_rmrr(struct dmar_domain *domain, device_t dev) 206 { 207 struct dmar_map_entries_tailq rmrr_entries; 208 struct dmar_map_entry *entry, *entry1; 209 vm_page_t *ma; 210 dmar_gaddr_t start, end; 211 vm_pindex_t size, i; 212 int error, error1; 213 214 error = 0; 215 TAILQ_INIT(&rmrr_entries); 216 dmar_dev_parse_rmrr(domain, dev, &rmrr_entries); 217 TAILQ_FOREACH_SAFE(entry, &rmrr_entries, unroll_link, entry1) { 218 /* 219 * VT-d specification requires that the start of an 220 * RMRR entry is 4k-aligned. Buggy BIOSes put 221 * anything into the start and end fields. Truncate 222 * and round as neccesary. 223 * 224 * We also allow the overlapping RMRR entries, see 225 * dmar_gas_alloc_region(). 226 */ 227 start = entry->start; 228 end = entry->end; 229 entry->start = trunc_page(start); 230 entry->end = round_page(end); 231 if (entry->start == entry->end) { 232 /* Workaround for some AMI (?) BIOSes */ 233 if (bootverbose) { 234 device_printf(dev, "BIOS bug: dmar%d RMRR " 235 "region (%jx, %jx) corrected\n", 236 domain->dmar->unit, start, end); 237 } 238 entry->end += DMAR_PAGE_SIZE * 0x20; 239 } 240 size = OFF_TO_IDX(entry->end - entry->start); 241 ma = malloc(sizeof(vm_page_t) * size, M_TEMP, M_WAITOK); 242 for (i = 0; i < size; i++) { 243 ma[i] = vm_page_getfake(entry->start + PAGE_SIZE * i, 244 VM_MEMATTR_DEFAULT); 245 } 246 error1 = dmar_gas_map_region(domain, entry, 247 DMAR_MAP_ENTRY_READ | DMAR_MAP_ENTRY_WRITE, 248 DMAR_GM_CANWAIT, ma); 249 /* 250 * Non-failed RMRR entries are owned by context rb 251 * tree. Get rid of the failed entry, but do not stop 252 * the loop. Rest of the parsed RMRR entries are 253 * loaded and removed on the context destruction. 254 */ 255 if (error1 == 0 && entry->end != entry->start) { 256 DMAR_LOCK(domain->dmar); 257 domain->refs++; /* XXXKIB prevent free */ 258 domain->flags |= DMAR_DOMAIN_RMRR; 259 DMAR_UNLOCK(domain->dmar); 260 } else { 261 if (error1 != 0) { 262 device_printf(dev, 263 "dmar%d failed to map RMRR region (%jx, %jx) %d\n", 264 domain->dmar->unit, start, end, error1); 265 error = error1; 266 } 267 TAILQ_REMOVE(&rmrr_entries, entry, unroll_link); 268 dmar_gas_free_entry(domain, entry); 269 } 270 for (i = 0; i < size; i++) 271 vm_page_putfake(ma[i]); 272 free(ma, M_TEMP); 273 } 274 return (error); 275 } 276 277 static struct dmar_domain * 278 dmar_domain_alloc(struct dmar_unit *dmar, bool id_mapped) 279 { 280 struct dmar_domain *domain; 281 int error, id, mgaw; 282 283 id = alloc_unr(dmar->domids); 284 if (id == -1) 285 return (NULL); 286 domain = malloc(sizeof(*domain), M_DMAR_DOMAIN, M_WAITOK | M_ZERO); 287 domain->domain = id; 288 LIST_INIT(&domain->contexts); 289 RB_INIT(&domain->rb_root); 290 TAILQ_INIT(&domain->unload_entries); 291 TASK_INIT(&domain->unload_task, 0, dmar_domain_unload_task, domain); 292 mtx_init(&domain->lock, "dmardom", NULL, MTX_DEF); 293 domain->dmar = dmar; 294 295 /* 296 * For now, use the maximal usable physical address of the 297 * installed memory to calculate the mgaw on id_mapped domain. 298 * It is useful for the identity mapping, and less so for the 299 * virtualized bus address space. 300 */ 301 domain->end = id_mapped ? ptoa(Maxmem) : BUS_SPACE_MAXADDR; 302 mgaw = dmar_maxaddr2mgaw(dmar, domain->end, !id_mapped); 303 error = domain_set_agaw(domain, mgaw); 304 if (error != 0) 305 goto fail; 306 if (!id_mapped) 307 /* Use all supported address space for remapping. */ 308 domain->end = 1ULL << (domain->agaw - 1); 309 310 dmar_gas_init_domain(domain); 311 312 if (id_mapped) { 313 if ((dmar->hw_ecap & DMAR_ECAP_PT) == 0) { 314 domain->pgtbl_obj = domain_get_idmap_pgtbl(domain, 315 domain->end); 316 } 317 domain->flags |= DMAR_DOMAIN_IDMAP; 318 } else { 319 error = domain_alloc_pgtbl(domain); 320 if (error != 0) 321 goto fail; 322 /* Disable local apic region access */ 323 error = dmar_gas_reserve_region(domain, 0xfee00000, 324 0xfeefffff + 1); 325 if (error != 0) 326 goto fail; 327 } 328 return (domain); 329 330 fail: 331 dmar_domain_destroy(domain); 332 return (NULL); 333 } 334 335 static struct dmar_ctx * 336 dmar_ctx_alloc(struct dmar_domain *domain, uint16_t rid) 337 { 338 struct dmar_ctx *ctx; 339 340 ctx = malloc(sizeof(*ctx), M_DMAR_CTX, M_WAITOK | M_ZERO); 341 ctx->domain = domain; 342 ctx->rid = rid; 343 ctx->refs = 1; 344 return (ctx); 345 } 346 347 static void 348 dmar_ctx_link(struct dmar_ctx *ctx) 349 { 350 struct dmar_domain *domain; 351 352 domain = ctx->domain; 353 DMAR_ASSERT_LOCKED(domain->dmar); 354 KASSERT(domain->refs >= domain->ctx_cnt, 355 ("dom %p ref underflow %d %d", domain, domain->refs, 356 domain->ctx_cnt)); 357 domain->refs++; 358 domain->ctx_cnt++; 359 LIST_INSERT_HEAD(&domain->contexts, ctx, link); 360 } 361 362 static void 363 dmar_ctx_unlink(struct dmar_ctx *ctx) 364 { 365 struct dmar_domain *domain; 366 367 domain = ctx->domain; 368 DMAR_ASSERT_LOCKED(domain->dmar); 369 KASSERT(domain->refs > 0, 370 ("domain %p ctx dtr refs %d", domain, domain->refs)); 371 KASSERT(domain->ctx_cnt >= domain->refs, 372 ("domain %p ctx dtr refs %d ctx_cnt %d", domain, 373 domain->refs, domain->ctx_cnt)); 374 domain->refs--; 375 domain->ctx_cnt--; 376 LIST_REMOVE(ctx, link); 377 } 378 379 static void 380 dmar_domain_destroy(struct dmar_domain *domain) 381 { 382 383 KASSERT(TAILQ_EMPTY(&domain->unload_entries), 384 ("unfinished unloads %p", domain)); 385 KASSERT(LIST_EMPTY(&domain->contexts), 386 ("destroying dom %p with contexts", domain)); 387 KASSERT(domain->ctx_cnt == 0, 388 ("destroying dom %p with ctx_cnt %d", domain, domain->ctx_cnt)); 389 KASSERT(domain->refs == 0, 390 ("destroying dom %p with refs %d", domain, domain->refs)); 391 if ((domain->flags & DMAR_DOMAIN_GAS_INITED) != 0) { 392 DMAR_DOMAIN_LOCK(domain); 393 dmar_gas_fini_domain(domain); 394 DMAR_DOMAIN_UNLOCK(domain); 395 } 396 if ((domain->flags & DMAR_DOMAIN_PGTBL_INITED) != 0) { 397 if (domain->pgtbl_obj != NULL) 398 DMAR_DOMAIN_PGLOCK(domain); 399 domain_free_pgtbl(domain); 400 } 401 mtx_destroy(&domain->lock); 402 free_unr(domain->dmar->domids, domain->domain); 403 free(domain, M_DMAR_DOMAIN); 404 } 405 406 struct dmar_ctx * 407 dmar_get_ctx_for_dev(struct dmar_unit *dmar, device_t dev, uint16_t rid, 408 bool id_mapped, bool rmrr_init) 409 { 410 struct dmar_domain *domain, *domain1; 411 struct dmar_ctx *ctx, *ctx1; 412 dmar_ctx_entry_t *ctxp; 413 struct sf_buf *sf; 414 int bus, slot, func, error; 415 bool enable; 416 417 bus = pci_get_bus(dev); 418 slot = pci_get_slot(dev); 419 func = pci_get_function(dev); 420 enable = false; 421 TD_PREP_PINNED_ASSERT; 422 DMAR_LOCK(dmar); 423 ctx = dmar_find_ctx_locked(dmar, rid); 424 error = 0; 425 if (ctx == NULL) { 426 /* 427 * Perform the allocations which require sleep or have 428 * higher chance to succeed if the sleep is allowed. 429 */ 430 DMAR_UNLOCK(dmar); 431 dmar_ensure_ctx_page(dmar, PCI_RID2BUS(rid)); 432 domain1 = dmar_domain_alloc(dmar, id_mapped); 433 if (domain1 == NULL) { 434 TD_PINNED_ASSERT; 435 return (NULL); 436 } 437 error = domain_init_rmrr(domain1, dev); 438 if (error != 0) { 439 dmar_domain_destroy(domain1); 440 TD_PINNED_ASSERT; 441 return (NULL); 442 } 443 ctx1 = dmar_ctx_alloc(domain1, rid); 444 ctxp = dmar_map_ctx_entry(ctx1, &sf); 445 DMAR_LOCK(dmar); 446 447 /* 448 * Recheck the contexts, other thread might have 449 * already allocated needed one. 450 */ 451 ctx = dmar_find_ctx_locked(dmar, rid); 452 if (ctx == NULL) { 453 domain = domain1; 454 ctx = ctx1; 455 dmar_ctx_link(ctx); 456 ctx->ctx_tag.owner = dev; 457 ctx_tag_init(ctx, dev); 458 459 /* 460 * This is the first activated context for the 461 * DMAR unit. Enable the translation after 462 * everything is set up. 463 */ 464 if (LIST_EMPTY(&dmar->domains)) 465 enable = true; 466 LIST_INSERT_HEAD(&dmar->domains, domain, link); 467 ctx_id_entry_init(ctx, ctxp, false); 468 device_printf(dev, 469 "dmar%d pci%d:%d:%d:%d rid %x domain %d mgaw %d " 470 "agaw %d %s-mapped\n", 471 dmar->unit, dmar->segment, bus, slot, 472 func, rid, domain->domain, domain->mgaw, 473 domain->agaw, id_mapped ? "id" : "re"); 474 } else { 475 /* Nothing needs to be done to destroy ctx1. */ 476 dmar_domain_destroy(domain1); 477 domain = ctx->domain; 478 ctx->refs++; /* tag referenced us */ 479 } 480 dmar_unmap_pgtbl(sf); 481 } else { 482 domain = ctx->domain; 483 ctx->refs++; /* tag referenced us */ 484 } 485 486 error = dmar_flush_for_ctx_entry(dmar, enable); 487 if (error != 0) { 488 dmar_free_ctx_locked(dmar, ctx); 489 TD_PINNED_ASSERT; 490 return (NULL); 491 } 492 493 /* 494 * The dmar lock was potentially dropped between check for the 495 * empty context list and now. Recheck the state of GCMD_TE 496 * to avoid unneeded command. 497 */ 498 if (enable && !rmrr_init && (dmar->hw_gcmd & DMAR_GCMD_TE) == 0) { 499 error = dmar_enable_translation(dmar); 500 if (error != 0) { 501 dmar_free_ctx_locked(dmar, ctx); 502 TD_PINNED_ASSERT; 503 return (NULL); 504 } 505 } 506 DMAR_UNLOCK(dmar); 507 TD_PINNED_ASSERT; 508 return (ctx); 509 } 510 511 int 512 dmar_move_ctx_to_domain(struct dmar_domain *domain, struct dmar_ctx *ctx) 513 { 514 struct dmar_unit *dmar; 515 struct dmar_domain *old_domain; 516 dmar_ctx_entry_t *ctxp; 517 struct sf_buf *sf; 518 int error; 519 520 dmar = domain->dmar; 521 old_domain = ctx->domain; 522 if (domain == old_domain) 523 return (0); 524 KASSERT(old_domain->dmar == dmar, 525 ("domain %p %u moving between dmars %u %u", domain, 526 domain->domain, old_domain->dmar->unit, domain->dmar->unit)); 527 TD_PREP_PINNED_ASSERT; 528 529 ctxp = dmar_map_ctx_entry(ctx, &sf); 530 DMAR_LOCK(dmar); 531 dmar_ctx_unlink(ctx); 532 ctx->domain = domain; 533 dmar_ctx_link(ctx); 534 ctx_id_entry_init(ctx, ctxp, true); 535 dmar_unmap_pgtbl(sf); 536 error = dmar_flush_for_ctx_entry(dmar, true); 537 /* If flush failed, rolling back would not work as well. */ 538 printf("dmar%d rid %x domain %d->%d %s-mapped\n", 539 dmar->unit, ctx->rid, old_domain->domain, domain->domain, 540 (domain->flags & DMAR_DOMAIN_IDMAP) != 0 ? "id" : "re"); 541 dmar_unref_domain_locked(dmar, old_domain); 542 TD_PINNED_ASSERT; 543 return (error); 544 } 545 546 static void 547 dmar_unref_domain_locked(struct dmar_unit *dmar, struct dmar_domain *domain) 548 { 549 550 DMAR_ASSERT_LOCKED(dmar); 551 KASSERT(domain->refs >= 1, 552 ("dmar %d domain %p refs %u", dmar->unit, domain, domain->refs)); 553 KASSERT(domain->refs > domain->ctx_cnt, 554 ("dmar %d domain %p refs %d ctx_cnt %d", dmar->unit, domain, 555 domain->refs, domain->ctx_cnt)); 556 557 if (domain->refs > 1) { 558 domain->refs--; 559 DMAR_UNLOCK(dmar); 560 return; 561 } 562 563 KASSERT((domain->flags & DMAR_DOMAIN_RMRR) == 0, 564 ("lost ref on RMRR domain %p", domain)); 565 566 LIST_REMOVE(domain, link); 567 DMAR_UNLOCK(dmar); 568 569 taskqueue_drain(dmar->delayed_taskqueue, &domain->unload_task); 570 dmar_domain_destroy(domain); 571 } 572 573 void 574 dmar_free_ctx_locked(struct dmar_unit *dmar, struct dmar_ctx *ctx) 575 { 576 struct sf_buf *sf; 577 dmar_ctx_entry_t *ctxp; 578 struct dmar_domain *domain; 579 580 DMAR_ASSERT_LOCKED(dmar); 581 KASSERT(ctx->refs >= 1, 582 ("dmar %p ctx %p refs %u", dmar, ctx, ctx->refs)); 583 584 /* 585 * If our reference is not last, only the dereference should 586 * be performed. 587 */ 588 if (ctx->refs > 1) { 589 ctx->refs--; 590 DMAR_UNLOCK(dmar); 591 return; 592 } 593 594 KASSERT((ctx->flags & DMAR_CTX_DISABLED) == 0, 595 ("lost ref on disabled ctx %p", ctx)); 596 597 /* 598 * Otherwise, the context entry must be cleared before the 599 * page table is destroyed. The mapping of the context 600 * entries page could require sleep, unlock the dmar. 601 */ 602 DMAR_UNLOCK(dmar); 603 TD_PREP_PINNED_ASSERT; 604 ctxp = dmar_map_ctx_entry(ctx, &sf); 605 DMAR_LOCK(dmar); 606 KASSERT(ctx->refs >= 1, 607 ("dmar %p ctx %p refs %u", dmar, ctx, ctx->refs)); 608 609 /* 610 * Other thread might have referenced the context, in which 611 * case again only the dereference should be performed. 612 */ 613 if (ctx->refs > 1) { 614 ctx->refs--; 615 DMAR_UNLOCK(dmar); 616 dmar_unmap_pgtbl(sf); 617 TD_PINNED_ASSERT; 618 return; 619 } 620 621 KASSERT((ctx->flags & DMAR_CTX_DISABLED) == 0, 622 ("lost ref on disabled ctx %p", ctx)); 623 624 /* 625 * Clear the context pointer and flush the caches. 626 * XXXKIB: cannot do this if any RMRR entries are still present. 627 */ 628 dmar_pte_clear(&ctxp->ctx1); 629 ctxp->ctx2 = 0; 630 dmar_flush_ctx_to_ram(dmar, ctxp); 631 dmar_inv_ctx_glob(dmar); 632 if ((dmar->hw_ecap & DMAR_ECAP_DI) != 0) { 633 if (dmar->qi_enabled) 634 dmar_qi_invalidate_iotlb_glob_locked(dmar); 635 else 636 dmar_inv_iotlb_glob(dmar); 637 } 638 dmar_unmap_pgtbl(sf); 639 domain = ctx->domain; 640 dmar_ctx_unlink(ctx); 641 free(ctx, M_DMAR_CTX); 642 dmar_unref_domain_locked(dmar, domain); 643 TD_PINNED_ASSERT; 644 } 645 646 void 647 dmar_free_ctx(struct dmar_ctx *ctx) 648 { 649 struct dmar_unit *dmar; 650 651 dmar = ctx->domain->dmar; 652 DMAR_LOCK(dmar); 653 dmar_free_ctx_locked(dmar, ctx); 654 } 655 656 /* 657 * Returns with the domain locked. 658 */ 659 struct dmar_ctx * 660 dmar_find_ctx_locked(struct dmar_unit *dmar, uint16_t rid) 661 { 662 struct dmar_domain *domain; 663 struct dmar_ctx *ctx; 664 665 DMAR_ASSERT_LOCKED(dmar); 666 667 LIST_FOREACH(domain, &dmar->domains, link) { 668 LIST_FOREACH(ctx, &domain->contexts, link) { 669 if (ctx->rid == rid) 670 return (ctx); 671 } 672 } 673 return (NULL); 674 } 675 676 void 677 dmar_domain_free_entry(struct dmar_map_entry *entry, bool free) 678 { 679 struct dmar_domain *domain; 680 681 domain = entry->domain; 682 DMAR_DOMAIN_LOCK(domain); 683 if ((entry->flags & DMAR_MAP_ENTRY_RMRR) != 0) 684 dmar_gas_free_region(domain, entry); 685 else 686 dmar_gas_free_space(domain, entry); 687 DMAR_DOMAIN_UNLOCK(domain); 688 if (free) 689 dmar_gas_free_entry(domain, entry); 690 else 691 entry->flags = 0; 692 } 693 694 void 695 dmar_domain_unload_entry(struct dmar_map_entry *entry, bool free) 696 { 697 struct dmar_unit *unit; 698 699 unit = entry->domain->dmar; 700 if (unit->qi_enabled) { 701 DMAR_LOCK(unit); 702 dmar_qi_invalidate_locked(entry->domain, entry->start, 703 entry->end - entry->start, &entry->gseq); 704 if (!free) 705 entry->flags |= DMAR_MAP_ENTRY_QI_NF; 706 TAILQ_INSERT_TAIL(&unit->tlb_flush_entries, entry, dmamap_link); 707 DMAR_UNLOCK(unit); 708 } else { 709 domain_flush_iotlb_sync(entry->domain, entry->start, 710 entry->end - entry->start); 711 dmar_domain_free_entry(entry, free); 712 } 713 } 714 715 void 716 dmar_domain_unload(struct dmar_domain *domain, 717 struct dmar_map_entries_tailq *entries, bool cansleep) 718 { 719 struct dmar_unit *unit; 720 struct dmar_map_entry *entry, *entry1; 721 struct dmar_qi_genseq gseq; 722 int error; 723 724 unit = domain->dmar; 725 726 TAILQ_FOREACH_SAFE(entry, entries, dmamap_link, entry1) { 727 KASSERT((entry->flags & DMAR_MAP_ENTRY_MAP) != 0, 728 ("not mapped entry %p %p", domain, entry)); 729 error = domain_unmap_buf(domain, entry->start, entry->end - 730 entry->start, cansleep ? DMAR_PGF_WAITOK : 0); 731 KASSERT(error == 0, ("unmap %p error %d", domain, error)); 732 if (!unit->qi_enabled) { 733 domain_flush_iotlb_sync(domain, entry->start, 734 entry->end - entry->start); 735 TAILQ_REMOVE(entries, entry, dmamap_link); 736 dmar_domain_free_entry(entry, true); 737 } 738 } 739 if (TAILQ_EMPTY(entries)) 740 return; 741 742 KASSERT(unit->qi_enabled, ("loaded entry left")); 743 DMAR_LOCK(unit); 744 TAILQ_FOREACH(entry, entries, dmamap_link) { 745 entry->gseq.gen = 0; 746 entry->gseq.seq = 0; 747 dmar_qi_invalidate_locked(domain, entry->start, entry->end - 748 entry->start, TAILQ_NEXT(entry, dmamap_link) == NULL ? 749 &gseq : NULL); 750 } 751 TAILQ_FOREACH_SAFE(entry, entries, dmamap_link, entry1) { 752 entry->gseq = gseq; 753 TAILQ_REMOVE(entries, entry, dmamap_link); 754 TAILQ_INSERT_TAIL(&unit->tlb_flush_entries, entry, dmamap_link); 755 } 756 DMAR_UNLOCK(unit); 757 } 758 759 static void 760 dmar_domain_unload_task(void *arg, int pending) 761 { 762 struct dmar_domain *domain; 763 struct dmar_map_entries_tailq entries; 764 765 domain = arg; 766 TAILQ_INIT(&entries); 767 768 for (;;) { 769 DMAR_DOMAIN_LOCK(domain); 770 TAILQ_SWAP(&domain->unload_entries, &entries, dmar_map_entry, 771 dmamap_link); 772 DMAR_DOMAIN_UNLOCK(domain); 773 if (TAILQ_EMPTY(&entries)) 774 break; 775 dmar_domain_unload(domain, &entries, true); 776 } 777 } 778