1 /*- 2 * Copyright (c) 2013 The FreeBSD Foundation 3 * All rights reserved. 4 * 5 * This software was developed by Konstantin Belousov <kib@FreeBSD.org> 6 * under sponsorship from the FreeBSD Foundation. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include <sys/param.h> 34 #include <sys/systm.h> 35 #include <sys/malloc.h> 36 #include <sys/bus.h> 37 #include <sys/interrupt.h> 38 #include <sys/kernel.h> 39 #include <sys/ktr.h> 40 #include <sys/limits.h> 41 #include <sys/lock.h> 42 #include <sys/memdesc.h> 43 #include <sys/mutex.h> 44 #include <sys/proc.h> 45 #include <sys/rwlock.h> 46 #include <sys/rman.h> 47 #include <sys/sysctl.h> 48 #include <sys/taskqueue.h> 49 #include <sys/tree.h> 50 #include <sys/uio.h> 51 #include <sys/vmem.h> 52 #include <vm/vm.h> 53 #include <vm/vm_extern.h> 54 #include <vm/vm_kern.h> 55 #include <vm/vm_object.h> 56 #include <vm/vm_page.h> 57 #include <vm/vm_pager.h> 58 #include <vm/vm_map.h> 59 #include <machine/atomic.h> 60 #include <machine/bus.h> 61 #include <machine/md_var.h> 62 #include <machine/specialreg.h> 63 #include <x86/include/busdma_impl.h> 64 #include <x86/iommu/intel_reg.h> 65 #include <x86/iommu/busdma_dmar.h> 66 #include <x86/iommu/intel_dmar.h> 67 #include <dev/pci/pcireg.h> 68 #include <dev/pci/pcivar.h> 69 70 static MALLOC_DEFINE(M_DMAR_CTX, "dmar_ctx", "Intel DMAR Context"); 71 72 static void dmar_ctx_unload_task(void *arg, int pending); 73 74 static void 75 dmar_ensure_ctx_page(struct dmar_unit *dmar, int bus) 76 { 77 struct sf_buf *sf; 78 dmar_root_entry_t *re; 79 vm_page_t ctxm; 80 81 /* 82 * Allocated context page must be linked. 83 */ 84 ctxm = dmar_pgalloc(dmar->ctx_obj, 1 + bus, DMAR_PGF_NOALLOC); 85 if (ctxm != NULL) 86 return; 87 88 /* 89 * Page not present, allocate and link. Note that other 90 * thread might execute this sequence in parallel. This 91 * should be safe, because the context entries written by both 92 * threads are equal. 93 */ 94 TD_PREP_PINNED_ASSERT; 95 ctxm = dmar_pgalloc(dmar->ctx_obj, 1 + bus, DMAR_PGF_ZERO | 96 DMAR_PGF_WAITOK); 97 re = dmar_map_pgtbl(dmar->ctx_obj, 0, DMAR_PGF_NOALLOC, &sf); 98 re += bus; 99 dmar_pte_store(&re->r1, DMAR_ROOT_R1_P | (DMAR_ROOT_R1_CTP_MASK & 100 VM_PAGE_TO_PHYS(ctxm))); 101 dmar_flush_root_to_ram(dmar, re); 102 dmar_unmap_pgtbl(sf); 103 TD_PINNED_ASSERT; 104 } 105 106 static dmar_ctx_entry_t * 107 dmar_map_ctx_entry(struct dmar_ctx *ctx, struct sf_buf **sfp) 108 { 109 dmar_ctx_entry_t *ctxp; 110 111 ctxp = dmar_map_pgtbl(ctx->dmar->ctx_obj, 1 + PCI_RID2BUS(ctx->rid), 112 DMAR_PGF_NOALLOC | DMAR_PGF_WAITOK, sfp); 113 ctxp += ctx->rid & 0xff; 114 return (ctxp); 115 } 116 117 static void 118 ctx_tag_init(struct dmar_ctx *ctx, device_t dev) 119 { 120 bus_addr_t maxaddr; 121 122 maxaddr = MIN(ctx->end, BUS_SPACE_MAXADDR); 123 ctx->ctx_tag.common.ref_count = 1; /* Prevent free */ 124 ctx->ctx_tag.common.impl = &bus_dma_dmar_impl; 125 ctx->ctx_tag.common.boundary = PCI_DMA_BOUNDARY; 126 ctx->ctx_tag.common.lowaddr = maxaddr; 127 ctx->ctx_tag.common.highaddr = maxaddr; 128 ctx->ctx_tag.common.maxsize = maxaddr; 129 ctx->ctx_tag.common.nsegments = BUS_SPACE_UNRESTRICTED; 130 ctx->ctx_tag.common.maxsegsz = maxaddr; 131 ctx->ctx_tag.ctx = ctx; 132 ctx->ctx_tag.owner = dev; 133 /* XXXKIB initialize tag further */ 134 } 135 136 static void 137 ctx_id_entry_init(struct dmar_ctx *ctx, dmar_ctx_entry_t *ctxp) 138 { 139 struct dmar_unit *unit; 140 vm_page_t ctx_root; 141 142 unit = ctx->dmar; 143 KASSERT(ctxp->ctx1 == 0 && ctxp->ctx2 == 0, 144 ("dmar%d: initialized ctx entry %d:%d:%d 0x%jx 0x%jx", 145 unit->unit, pci_get_bus(ctx->ctx_tag.owner), 146 pci_get_slot(ctx->ctx_tag.owner), 147 pci_get_function(ctx->ctx_tag.owner), 148 ctxp->ctx1, 149 ctxp->ctx2)); 150 ctxp->ctx2 = DMAR_CTX2_DID(ctx->domain); 151 ctxp->ctx2 |= ctx->awlvl; 152 if ((ctx->flags & DMAR_CTX_IDMAP) != 0 && 153 (unit->hw_ecap & DMAR_ECAP_PT) != 0) { 154 KASSERT(ctx->pgtbl_obj == NULL, 155 ("ctx %p non-null pgtbl_obj", ctx)); 156 dmar_pte_store(&ctxp->ctx1, DMAR_CTX1_T_PASS | DMAR_CTX1_P); 157 } else { 158 ctx_root = dmar_pgalloc(ctx->pgtbl_obj, 0, DMAR_PGF_NOALLOC); 159 dmar_pte_store(&ctxp->ctx1, DMAR_CTX1_T_UNTR | 160 (DMAR_CTX1_ASR_MASK & VM_PAGE_TO_PHYS(ctx_root)) | 161 DMAR_CTX1_P); 162 } 163 dmar_flush_ctx_to_ram(unit, ctxp); 164 } 165 166 static int 167 ctx_init_rmrr(struct dmar_ctx *ctx, device_t dev) 168 { 169 struct dmar_map_entries_tailq rmrr_entries; 170 struct dmar_map_entry *entry, *entry1; 171 vm_page_t *ma; 172 dmar_gaddr_t start, end; 173 vm_pindex_t size, i; 174 int error, error1; 175 176 error = 0; 177 TAILQ_INIT(&rmrr_entries); 178 dmar_ctx_parse_rmrr(ctx, dev, &rmrr_entries); 179 TAILQ_FOREACH_SAFE(entry, &rmrr_entries, unroll_link, entry1) { 180 /* 181 * VT-d specification requires that the start of an 182 * RMRR entry is 4k-aligned. Buggy BIOSes put 183 * anything into the start and end fields. Truncate 184 * and round as neccesary. 185 * 186 * We also allow the overlapping RMRR entries, see 187 * dmar_gas_alloc_region(). 188 */ 189 start = entry->start; 190 end = entry->end; 191 entry->start = trunc_page(start); 192 entry->end = round_page(end); 193 if (entry->start == entry->end) { 194 /* Workaround for some AMI (?) BIOSes */ 195 if (bootverbose) { 196 device_printf(dev, "BIOS bug: dmar%d RMRR " 197 "region (%jx, %jx) corrected\n", 198 ctx->dmar->unit, start, end); 199 } 200 entry->end += DMAR_PAGE_SIZE * 0x20; 201 } 202 size = OFF_TO_IDX(entry->end - entry->start); 203 ma = malloc(sizeof(vm_page_t) * size, M_TEMP, M_WAITOK); 204 for (i = 0; i < size; i++) { 205 ma[i] = vm_page_getfake(entry->start + PAGE_SIZE * i, 206 VM_MEMATTR_DEFAULT); 207 } 208 error1 = dmar_gas_map_region(ctx, entry, DMAR_MAP_ENTRY_READ | 209 DMAR_MAP_ENTRY_WRITE, DMAR_GM_CANWAIT, ma); 210 /* 211 * Non-failed RMRR entries are owned by context rb 212 * tree. Get rid of the failed entry, but do not stop 213 * the loop. Rest of the parsed RMRR entries are 214 * loaded and removed on the context destruction. 215 */ 216 if (error1 == 0 && entry->end != entry->start) { 217 DMAR_LOCK(ctx->dmar); 218 ctx->flags |= DMAR_CTX_RMRR; 219 DMAR_UNLOCK(ctx->dmar); 220 } else { 221 if (error1 != 0) { 222 device_printf(dev, 223 "dmar%d failed to map RMRR region (%jx, %jx) %d\n", 224 ctx->dmar->unit, start, end, error1); 225 error = error1; 226 } 227 TAILQ_REMOVE(&rmrr_entries, entry, unroll_link); 228 dmar_gas_free_entry(ctx, entry); 229 } 230 for (i = 0; i < size; i++) 231 vm_page_putfake(ma[i]); 232 free(ma, M_TEMP); 233 } 234 return (error); 235 } 236 237 static struct dmar_ctx * 238 dmar_get_ctx_alloc(struct dmar_unit *dmar, uint16_t rid) 239 { 240 struct dmar_ctx *ctx; 241 242 ctx = malloc(sizeof(*ctx), M_DMAR_CTX, M_WAITOK | M_ZERO); 243 RB_INIT(&ctx->rb_root); 244 TAILQ_INIT(&ctx->unload_entries); 245 TASK_INIT(&ctx->unload_task, 0, dmar_ctx_unload_task, ctx); 246 mtx_init(&ctx->lock, "dmarctx", NULL, MTX_DEF); 247 ctx->dmar = dmar; 248 ctx->rid = rid; 249 return (ctx); 250 } 251 252 static void 253 dmar_ctx_dtr(struct dmar_ctx *ctx, bool gas_inited, bool pgtbl_inited) 254 { 255 256 if (gas_inited) { 257 DMAR_CTX_LOCK(ctx); 258 dmar_gas_fini_ctx(ctx); 259 DMAR_CTX_UNLOCK(ctx); 260 } 261 if (pgtbl_inited) { 262 if (ctx->pgtbl_obj != NULL) 263 DMAR_CTX_PGLOCK(ctx); 264 ctx_free_pgtbl(ctx); 265 } 266 mtx_destroy(&ctx->lock); 267 free(ctx, M_DMAR_CTX); 268 } 269 270 struct dmar_ctx * 271 dmar_get_ctx(struct dmar_unit *dmar, device_t dev, uint16_t rid, bool id_mapped, 272 bool rmrr_init) 273 { 274 struct dmar_ctx *ctx, *ctx1; 275 dmar_ctx_entry_t *ctxp; 276 struct sf_buf *sf; 277 int bus, slot, func, error, mgaw; 278 bool enable; 279 280 bus = pci_get_bus(dev); 281 slot = pci_get_slot(dev); 282 func = pci_get_function(dev); 283 enable = false; 284 TD_PREP_PINNED_ASSERT; 285 DMAR_LOCK(dmar); 286 ctx = dmar_find_ctx_locked(dmar, rid); 287 error = 0; 288 if (ctx == NULL) { 289 /* 290 * Perform the allocations which require sleep or have 291 * higher chance to succeed if the sleep is allowed. 292 */ 293 DMAR_UNLOCK(dmar); 294 dmar_ensure_ctx_page(dmar, PCI_RID2BUS(rid)); 295 ctx1 = dmar_get_ctx_alloc(dmar, rid); 296 297 if (id_mapped) { 298 /* 299 * For now, use the maximal usable physical 300 * address of the installed memory to 301 * calculate the mgaw. It is useful for the 302 * identity mapping, and less so for the 303 * virtualized bus address space. 304 */ 305 ctx1->end = ptoa(Maxmem); 306 mgaw = dmar_maxaddr2mgaw(dmar, ctx1->end, false); 307 error = ctx_set_agaw(ctx1, mgaw); 308 if (error != 0) { 309 dmar_ctx_dtr(ctx1, false, false); 310 TD_PINNED_ASSERT; 311 return (NULL); 312 } 313 } else { 314 ctx1->end = BUS_SPACE_MAXADDR; 315 mgaw = dmar_maxaddr2mgaw(dmar, ctx1->end, true); 316 error = ctx_set_agaw(ctx1, mgaw); 317 if (error != 0) { 318 dmar_ctx_dtr(ctx1, false, false); 319 TD_PINNED_ASSERT; 320 return (NULL); 321 } 322 /* Use all supported address space for remapping. */ 323 ctx1->end = 1ULL << (ctx1->agaw - 1); 324 } 325 326 327 dmar_gas_init_ctx(ctx1); 328 if (id_mapped) { 329 if ((dmar->hw_ecap & DMAR_ECAP_PT) == 0) { 330 ctx1->pgtbl_obj = ctx_get_idmap_pgtbl(ctx1, 331 ctx1->end); 332 } 333 ctx1->flags |= DMAR_CTX_IDMAP; 334 } else { 335 error = ctx_alloc_pgtbl(ctx1); 336 if (error != 0) { 337 dmar_ctx_dtr(ctx1, true, false); 338 TD_PINNED_ASSERT; 339 return (NULL); 340 } 341 /* Disable local apic region access */ 342 error = dmar_gas_reserve_region(ctx1, 0xfee00000, 343 0xfeefffff + 1); 344 if (error != 0) { 345 dmar_ctx_dtr(ctx1, true, true); 346 TD_PINNED_ASSERT; 347 return (NULL); 348 } 349 error = ctx_init_rmrr(ctx1, dev); 350 if (error != 0) { 351 dmar_ctx_dtr(ctx1, true, true); 352 TD_PINNED_ASSERT; 353 return (NULL); 354 } 355 } 356 ctxp = dmar_map_ctx_entry(ctx1, &sf); 357 DMAR_LOCK(dmar); 358 359 /* 360 * Recheck the contexts, other thread might have 361 * already allocated needed one. 362 */ 363 ctx = dmar_find_ctx_locked(dmar, rid); 364 if (ctx == NULL) { 365 ctx = ctx1; 366 ctx->ctx_tag.owner = dev; 367 ctx->domain = alloc_unrl(dmar->domids); 368 if (ctx->domain == -1) { 369 DMAR_UNLOCK(dmar); 370 dmar_unmap_pgtbl(sf); 371 dmar_ctx_dtr(ctx, true, true); 372 TD_PINNED_ASSERT; 373 return (NULL); 374 } 375 ctx_tag_init(ctx, dev); 376 377 /* 378 * This is the first activated context for the 379 * DMAR unit. Enable the translation after 380 * everything is set up. 381 */ 382 if (LIST_EMPTY(&dmar->contexts)) 383 enable = true; 384 LIST_INSERT_HEAD(&dmar->contexts, ctx, link); 385 ctx_id_entry_init(ctx, ctxp); 386 device_printf(dev, 387 "dmar%d pci%d:%d:%d:%d rid %x domain %d mgaw %d " 388 "agaw %d %s-mapped\n", 389 dmar->unit, dmar->segment, bus, slot, 390 func, rid, ctx->domain, ctx->mgaw, ctx->agaw, 391 id_mapped ? "id" : "re"); 392 } else { 393 dmar_ctx_dtr(ctx1, true, true); 394 } 395 dmar_unmap_pgtbl(sf); 396 } 397 ctx->refs++; 398 if ((ctx->flags & DMAR_CTX_RMRR) != 0) 399 ctx->refs++; /* XXXKIB */ 400 401 /* 402 * If dmar declares Caching Mode as Set, follow 11.5 "Caching 403 * Mode Consideration" and do the (global) invalidation of the 404 * negative TLB entries. 405 */ 406 if ((dmar->hw_cap & DMAR_CAP_CM) != 0 || enable) { 407 if (dmar->qi_enabled) { 408 dmar_qi_invalidate_ctx_glob_locked(dmar); 409 if ((dmar->hw_ecap & DMAR_ECAP_DI) != 0) 410 dmar_qi_invalidate_iotlb_glob_locked(dmar); 411 } else { 412 error = dmar_inv_ctx_glob(dmar); 413 if (error == 0 && 414 (dmar->hw_ecap & DMAR_ECAP_DI) != 0) 415 error = dmar_inv_iotlb_glob(dmar); 416 if (error != 0) { 417 dmar_free_ctx_locked(dmar, ctx); 418 TD_PINNED_ASSERT; 419 return (NULL); 420 } 421 } 422 } 423 424 /* 425 * The dmar lock was potentially dropped between check for the 426 * empty context list and now. Recheck the state of GCMD_TE 427 * to avoid unneeded command. 428 */ 429 if (enable && !rmrr_init && (dmar->hw_gcmd & DMAR_GCMD_TE) == 0) { 430 error = dmar_enable_translation(dmar); 431 if (error != 0) { 432 dmar_free_ctx_locked(dmar, ctx); 433 TD_PINNED_ASSERT; 434 return (NULL); 435 } 436 } 437 DMAR_UNLOCK(dmar); 438 TD_PINNED_ASSERT; 439 return (ctx); 440 } 441 442 void 443 dmar_free_ctx_locked(struct dmar_unit *dmar, struct dmar_ctx *ctx) 444 { 445 struct sf_buf *sf; 446 dmar_ctx_entry_t *ctxp; 447 448 DMAR_ASSERT_LOCKED(dmar); 449 KASSERT(ctx->refs >= 1, 450 ("dmar %p ctx %p refs %u", dmar, ctx, ctx->refs)); 451 452 /* 453 * If our reference is not last, only the dereference should 454 * be performed. 455 */ 456 if (ctx->refs > 1) { 457 ctx->refs--; 458 DMAR_UNLOCK(dmar); 459 return; 460 } 461 462 KASSERT((ctx->flags & DMAR_CTX_RMRR) == 0, 463 ("lost ref on RMRR ctx %p", ctx)); 464 KASSERT((ctx->flags & DMAR_CTX_DISABLED) == 0, 465 ("lost ref on disabled ctx %p", ctx)); 466 467 /* 468 * Otherwise, the context entry must be cleared before the 469 * page table is destroyed. The mapping of the context 470 * entries page could require sleep, unlock the dmar. 471 */ 472 DMAR_UNLOCK(dmar); 473 TD_PREP_PINNED_ASSERT; 474 ctxp = dmar_map_ctx_entry(ctx, &sf); 475 DMAR_LOCK(dmar); 476 KASSERT(ctx->refs >= 1, 477 ("dmar %p ctx %p refs %u", dmar, ctx, ctx->refs)); 478 479 /* 480 * Other thread might have referenced the context, in which 481 * case again only the dereference should be performed. 482 */ 483 if (ctx->refs > 1) { 484 ctx->refs--; 485 DMAR_UNLOCK(dmar); 486 dmar_unmap_pgtbl(sf); 487 TD_PINNED_ASSERT; 488 return; 489 } 490 491 KASSERT((ctx->flags & DMAR_CTX_RMRR) == 0, 492 ("lost ref on RMRR ctx %p", ctx)); 493 KASSERT((ctx->flags & DMAR_CTX_DISABLED) == 0, 494 ("lost ref on disabled ctx %p", ctx)); 495 496 /* 497 * Clear the context pointer and flush the caches. 498 * XXXKIB: cannot do this if any RMRR entries are still present. 499 */ 500 dmar_pte_clear(&ctxp->ctx1); 501 ctxp->ctx2 = 0; 502 dmar_flush_ctx_to_ram(dmar, ctxp); 503 dmar_inv_ctx_glob(dmar); 504 if ((dmar->hw_ecap & DMAR_ECAP_DI) != 0) { 505 if (dmar->qi_enabled) 506 dmar_qi_invalidate_iotlb_glob_locked(dmar); 507 else 508 dmar_inv_iotlb_glob(dmar); 509 } 510 LIST_REMOVE(ctx, link); 511 DMAR_UNLOCK(dmar); 512 513 /* 514 * The rest of the destruction is invisible for other users of 515 * the dmar unit. 516 */ 517 taskqueue_drain(dmar->delayed_taskqueue, &ctx->unload_task); 518 KASSERT(TAILQ_EMPTY(&ctx->unload_entries), 519 ("unfinished unloads %p", ctx)); 520 dmar_unmap_pgtbl(sf); 521 free_unr(dmar->domids, ctx->domain); 522 dmar_ctx_dtr(ctx, true, true); 523 TD_PINNED_ASSERT; 524 } 525 526 void 527 dmar_free_ctx(struct dmar_ctx *ctx) 528 { 529 struct dmar_unit *dmar; 530 531 dmar = ctx->dmar; 532 DMAR_LOCK(dmar); 533 dmar_free_ctx_locked(dmar, ctx); 534 } 535 536 struct dmar_ctx * 537 dmar_find_ctx_locked(struct dmar_unit *dmar, uint16_t rid) 538 { 539 struct dmar_ctx *ctx; 540 541 DMAR_ASSERT_LOCKED(dmar); 542 543 LIST_FOREACH(ctx, &dmar->contexts, link) { 544 if (ctx->rid == rid) 545 return (ctx); 546 } 547 return (NULL); 548 } 549 550 void 551 dmar_ctx_free_entry(struct dmar_map_entry *entry, bool free) 552 { 553 struct dmar_ctx *ctx; 554 555 ctx = entry->ctx; 556 DMAR_CTX_LOCK(ctx); 557 if ((entry->flags & DMAR_MAP_ENTRY_RMRR) != 0) 558 dmar_gas_free_region(ctx, entry); 559 else 560 dmar_gas_free_space(ctx, entry); 561 DMAR_CTX_UNLOCK(ctx); 562 if (free) 563 dmar_gas_free_entry(ctx, entry); 564 else 565 entry->flags = 0; 566 } 567 568 void 569 dmar_ctx_unload_entry(struct dmar_map_entry *entry, bool free) 570 { 571 struct dmar_unit *unit; 572 573 unit = entry->ctx->dmar; 574 if (unit->qi_enabled) { 575 DMAR_LOCK(unit); 576 dmar_qi_invalidate_locked(entry->ctx, entry->start, 577 entry->end - entry->start, &entry->gseq); 578 if (!free) 579 entry->flags |= DMAR_MAP_ENTRY_QI_NF; 580 TAILQ_INSERT_TAIL(&unit->tlb_flush_entries, entry, dmamap_link); 581 DMAR_UNLOCK(unit); 582 } else { 583 ctx_flush_iotlb_sync(entry->ctx, entry->start, entry->end - 584 entry->start); 585 dmar_ctx_free_entry(entry, free); 586 } 587 } 588 589 void 590 dmar_ctx_unload(struct dmar_ctx *ctx, struct dmar_map_entries_tailq *entries, 591 bool cansleep) 592 { 593 struct dmar_unit *unit; 594 struct dmar_map_entry *entry, *entry1; 595 struct dmar_qi_genseq gseq; 596 int error; 597 598 unit = ctx->dmar; 599 600 TAILQ_FOREACH_SAFE(entry, entries, dmamap_link, entry1) { 601 KASSERT((entry->flags & DMAR_MAP_ENTRY_MAP) != 0, 602 ("not mapped entry %p %p", ctx, entry)); 603 error = ctx_unmap_buf(ctx, entry->start, entry->end - 604 entry->start, cansleep ? DMAR_PGF_WAITOK : 0); 605 KASSERT(error == 0, ("unmap %p error %d", ctx, error)); 606 if (!unit->qi_enabled) { 607 ctx_flush_iotlb_sync(ctx, entry->start, 608 entry->end - entry->start); 609 TAILQ_REMOVE(entries, entry, dmamap_link); 610 dmar_ctx_free_entry(entry, true); 611 } 612 } 613 if (TAILQ_EMPTY(entries)) 614 return; 615 616 KASSERT(unit->qi_enabled, ("loaded entry left")); 617 DMAR_LOCK(unit); 618 TAILQ_FOREACH(entry, entries, dmamap_link) { 619 entry->gseq.gen = 0; 620 entry->gseq.seq = 0; 621 dmar_qi_invalidate_locked(ctx, entry->start, entry->end - 622 entry->start, TAILQ_NEXT(entry, dmamap_link) == NULL ? 623 &gseq : NULL); 624 } 625 TAILQ_FOREACH_SAFE(entry, entries, dmamap_link, entry1) { 626 entry->gseq = gseq; 627 TAILQ_REMOVE(entries, entry, dmamap_link); 628 TAILQ_INSERT_TAIL(&unit->tlb_flush_entries, entry, dmamap_link); 629 } 630 DMAR_UNLOCK(unit); 631 } 632 633 static void 634 dmar_ctx_unload_task(void *arg, int pending) 635 { 636 struct dmar_ctx *ctx; 637 struct dmar_map_entries_tailq entries; 638 639 ctx = arg; 640 TAILQ_INIT(&entries); 641 642 for (;;) { 643 DMAR_CTX_LOCK(ctx); 644 TAILQ_SWAP(&ctx->unload_entries, &entries, dmar_map_entry, 645 dmamap_link); 646 DMAR_CTX_UNLOCK(ctx); 647 if (TAILQ_EMPTY(&entries)) 648 break; 649 dmar_ctx_unload(ctx, &entries, true); 650 } 651 } 652