1 /*- 2 * Copyright (c) 2013 The FreeBSD Foundation 3 * All rights reserved. 4 * 5 * This software was developed by Konstantin Belousov <kib@FreeBSD.org> 6 * under sponsorship from the FreeBSD Foundation. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include <sys/param.h> 34 #include <sys/systm.h> 35 #include <sys/malloc.h> 36 #include <sys/bus.h> 37 #include <sys/interrupt.h> 38 #include <sys/kernel.h> 39 #include <sys/ktr.h> 40 #include <sys/limits.h> 41 #include <sys/lock.h> 42 #include <sys/memdesc.h> 43 #include <sys/mutex.h> 44 #include <sys/proc.h> 45 #include <sys/rwlock.h> 46 #include <sys/rman.h> 47 #include <sys/sysctl.h> 48 #include <sys/taskqueue.h> 49 #include <sys/tree.h> 50 #include <sys/uio.h> 51 #include <vm/vm.h> 52 #include <vm/vm_extern.h> 53 #include <vm/vm_kern.h> 54 #include <vm/vm_object.h> 55 #include <vm/vm_page.h> 56 #include <vm/vm_pager.h> 57 #include <vm/vm_map.h> 58 #include <machine/atomic.h> 59 #include <machine/bus.h> 60 #include <machine/md_var.h> 61 #include <machine/specialreg.h> 62 #include <x86/include/busdma_impl.h> 63 #include <x86/iommu/intel_reg.h> 64 #include <x86/iommu/busdma_dmar.h> 65 #include <x86/iommu/intel_dmar.h> 66 #include <dev/pci/pcireg.h> 67 #include <dev/pci/pcivar.h> 68 69 static MALLOC_DEFINE(M_DMAR_CTX, "dmar_ctx", "Intel DMAR Context"); 70 71 static void dmar_ctx_unload_task(void *arg, int pending); 72 73 static void 74 dmar_ensure_ctx_page(struct dmar_unit *dmar, int bus) 75 { 76 struct sf_buf *sf; 77 dmar_root_entry_t *re; 78 vm_page_t ctxm; 79 80 /* 81 * Allocated context page must be linked. 82 */ 83 ctxm = dmar_pgalloc(dmar->ctx_obj, 1 + bus, DMAR_PGF_NOALLOC); 84 if (ctxm != NULL) 85 return; 86 87 /* 88 * Page not present, allocate and link. Note that other 89 * thread might execute this sequence in parallel. This 90 * should be safe, because the context entries written by both 91 * threads are equal. 92 */ 93 TD_PREP_PINNED_ASSERT; 94 ctxm = dmar_pgalloc(dmar->ctx_obj, 1 + bus, DMAR_PGF_ZERO | 95 DMAR_PGF_WAITOK); 96 re = dmar_map_pgtbl(dmar->ctx_obj, 0, DMAR_PGF_NOALLOC, &sf); 97 re += bus; 98 dmar_pte_store(&re->r1, DMAR_ROOT_R1_P | (DMAR_ROOT_R1_CTP_MASK & 99 VM_PAGE_TO_PHYS(ctxm))); 100 dmar_unmap_pgtbl(sf, DMAR_IS_COHERENT(dmar)); 101 TD_PINNED_ASSERT; 102 } 103 104 static dmar_ctx_entry_t * 105 dmar_map_ctx_entry(struct dmar_ctx *ctx, struct sf_buf **sfp) 106 { 107 dmar_ctx_entry_t *ctxp; 108 109 ctxp = dmar_map_pgtbl(ctx->dmar->ctx_obj, 1 + PCI_RID2BUS(ctx->rid), 110 DMAR_PGF_NOALLOC | DMAR_PGF_WAITOK, sfp); 111 ctxp += ctx->rid & 0xff; 112 return (ctxp); 113 } 114 115 static void 116 ctx_tag_init(struct dmar_ctx *ctx, device_t dev) 117 { 118 bus_addr_t maxaddr; 119 120 maxaddr = MIN(ctx->end, BUS_SPACE_MAXADDR); 121 ctx->ctx_tag.common.ref_count = 1; /* Prevent free */ 122 ctx->ctx_tag.common.impl = &bus_dma_dmar_impl; 123 ctx->ctx_tag.common.boundary = PCI_DMA_BOUNDARY; 124 ctx->ctx_tag.common.lowaddr = maxaddr; 125 ctx->ctx_tag.common.highaddr = maxaddr; 126 ctx->ctx_tag.common.maxsize = maxaddr; 127 ctx->ctx_tag.common.nsegments = BUS_SPACE_UNRESTRICTED; 128 ctx->ctx_tag.common.maxsegsz = maxaddr; 129 ctx->ctx_tag.ctx = ctx; 130 ctx->ctx_tag.owner = dev; 131 /* XXXKIB initialize tag further */ 132 } 133 134 static void 135 ctx_id_entry_init(struct dmar_ctx *ctx, dmar_ctx_entry_t *ctxp) 136 { 137 struct dmar_unit *unit; 138 vm_page_t ctx_root; 139 140 unit = ctx->dmar; 141 KASSERT(ctxp->ctx1 == 0 && ctxp->ctx2 == 0, 142 ("dmar%d: initialized ctx entry %d:%d:%d 0x%jx 0x%jx", 143 unit->unit, pci_get_bus(ctx->ctx_tag.owner), 144 pci_get_slot(ctx->ctx_tag.owner), 145 pci_get_function(ctx->ctx_tag.owner), 146 ctxp->ctx1, 147 ctxp->ctx2)); 148 ctxp->ctx2 = DMAR_CTX2_DID(ctx->domain); 149 ctxp->ctx2 |= ctx->awlvl; 150 if ((ctx->flags & DMAR_CTX_IDMAP) != 0 && 151 (unit->hw_ecap & DMAR_ECAP_PT) != 0) { 152 KASSERT(ctx->pgtbl_obj == NULL, 153 ("ctx %p non-null pgtbl_obj", ctx)); 154 dmar_pte_store(&ctxp->ctx1, DMAR_CTX1_T_PASS | DMAR_CTX1_P); 155 } else { 156 ctx_root = dmar_pgalloc(ctx->pgtbl_obj, 0, DMAR_PGF_NOALLOC); 157 dmar_pte_store(&ctxp->ctx1, DMAR_CTX1_T_UNTR | 158 (DMAR_CTX1_ASR_MASK & VM_PAGE_TO_PHYS(ctx_root)) | 159 DMAR_CTX1_P); 160 } 161 } 162 163 static int 164 ctx_init_rmrr(struct dmar_ctx *ctx, device_t dev) 165 { 166 struct dmar_map_entries_tailq rmrr_entries; 167 struct dmar_map_entry *entry, *entry1; 168 vm_page_t *ma; 169 dmar_gaddr_t start, end; 170 vm_pindex_t size, i; 171 int error, error1; 172 173 error = 0; 174 TAILQ_INIT(&rmrr_entries); 175 dmar_ctx_parse_rmrr(ctx, dev, &rmrr_entries); 176 TAILQ_FOREACH_SAFE(entry, &rmrr_entries, unroll_link, entry1) { 177 /* 178 * VT-d specification requires that the start of an 179 * RMRR entry is 4k-aligned. Buggy BIOSes put 180 * anything into the start and end fields. Truncate 181 * and round as neccesary. 182 * 183 * We also allow the overlapping RMRR entries, see 184 * dmar_gas_alloc_region(). 185 */ 186 start = entry->start; 187 end = entry->end; 188 entry->start = trunc_page(start); 189 entry->end = round_page(end); 190 if (entry->start == entry->end) { 191 /* Workaround for some AMI (?) BIOSes */ 192 if (bootverbose) { 193 device_printf(dev, "BIOS bug: dmar%d RMRR " 194 "region (%jx, %jx) corrected\n", 195 ctx->dmar->unit, start, end); 196 } 197 entry->end += DMAR_PAGE_SIZE * 0x20; 198 } 199 size = OFF_TO_IDX(entry->end - entry->start); 200 ma = malloc(sizeof(vm_page_t) * size, M_TEMP, M_WAITOK); 201 for (i = 0; i < size; i++) { 202 ma[i] = vm_page_getfake(entry->start + PAGE_SIZE * i, 203 VM_MEMATTR_DEFAULT); 204 } 205 error1 = dmar_gas_map_region(ctx, entry, DMAR_MAP_ENTRY_READ | 206 DMAR_MAP_ENTRY_WRITE, DMAR_GM_CANWAIT, ma); 207 /* 208 * Non-failed RMRR entries are owned by context rb 209 * tree. Get rid of the failed entry, but do not stop 210 * the loop. Rest of the parsed RMRR entries are 211 * loaded and removed on the context destruction. 212 */ 213 if (error1 == 0 && entry->end != entry->start) { 214 DMAR_LOCK(ctx->dmar); 215 ctx->flags |= DMAR_CTX_RMRR; 216 DMAR_UNLOCK(ctx->dmar); 217 } else { 218 if (error1 != 0) { 219 device_printf(dev, 220 "dmar%d failed to map RMRR region (%jx, %jx) %d\n", 221 ctx->dmar->unit, start, end, error1); 222 error = error1; 223 } 224 TAILQ_REMOVE(&rmrr_entries, entry, unroll_link); 225 dmar_gas_free_entry(ctx, entry); 226 } 227 for (i = 0; i < size; i++) 228 vm_page_putfake(ma[i]); 229 free(ma, M_TEMP); 230 } 231 return (error); 232 } 233 234 static struct dmar_ctx * 235 dmar_get_ctx_alloc(struct dmar_unit *dmar, uint16_t rid) 236 { 237 struct dmar_ctx *ctx; 238 239 ctx = malloc(sizeof(*ctx), M_DMAR_CTX, M_WAITOK | M_ZERO); 240 RB_INIT(&ctx->rb_root); 241 TAILQ_INIT(&ctx->unload_entries); 242 TASK_INIT(&ctx->unload_task, 0, dmar_ctx_unload_task, ctx); 243 mtx_init(&ctx->lock, "dmarctx", NULL, MTX_DEF); 244 ctx->dmar = dmar; 245 ctx->rid = rid; 246 return (ctx); 247 } 248 249 static void 250 dmar_ctx_dtr(struct dmar_ctx *ctx, bool gas_inited, bool pgtbl_inited) 251 { 252 253 if (gas_inited) { 254 DMAR_CTX_LOCK(ctx); 255 dmar_gas_fini_ctx(ctx); 256 DMAR_CTX_UNLOCK(ctx); 257 } 258 if (pgtbl_inited) { 259 if (ctx->pgtbl_obj != NULL) 260 DMAR_CTX_PGLOCK(ctx); 261 ctx_free_pgtbl(ctx); 262 } 263 mtx_destroy(&ctx->lock); 264 free(ctx, M_DMAR_CTX); 265 } 266 267 struct dmar_ctx * 268 dmar_get_ctx(struct dmar_unit *dmar, device_t dev, uint16_t rid, bool id_mapped, 269 bool rmrr_init) 270 { 271 struct dmar_ctx *ctx, *ctx1; 272 dmar_ctx_entry_t *ctxp; 273 struct sf_buf *sf; 274 int bus, slot, func, error, mgaw; 275 bool enable; 276 277 bus = pci_get_bus(dev); 278 slot = pci_get_slot(dev); 279 func = pci_get_function(dev); 280 enable = false; 281 TD_PREP_PINNED_ASSERT; 282 DMAR_LOCK(dmar); 283 ctx = dmar_find_ctx_locked(dmar, rid); 284 error = 0; 285 if (ctx == NULL) { 286 /* 287 * Perform the allocations which require sleep or have 288 * higher chance to succeed if the sleep is allowed. 289 */ 290 DMAR_UNLOCK(dmar); 291 dmar_ensure_ctx_page(dmar, bus); 292 ctx1 = dmar_get_ctx_alloc(dmar, rid); 293 294 if (id_mapped) { 295 /* 296 * For now, use the maximal usable physical 297 * address of the installed memory to 298 * calculate the mgaw. It is useful for the 299 * identity mapping, and less so for the 300 * virtualized bus address space. 301 */ 302 ctx1->end = ptoa(Maxmem); 303 mgaw = dmar_maxaddr2mgaw(dmar, ctx1->end, false); 304 error = ctx_set_agaw(ctx1, mgaw); 305 if (error != 0) { 306 dmar_ctx_dtr(ctx1, false, false); 307 TD_PINNED_ASSERT; 308 return (NULL); 309 } 310 } else { 311 ctx1->end = BUS_SPACE_MAXADDR; 312 mgaw = dmar_maxaddr2mgaw(dmar, ctx1->end, true); 313 error = ctx_set_agaw(ctx1, mgaw); 314 if (error != 0) { 315 dmar_ctx_dtr(ctx1, false, false); 316 TD_PINNED_ASSERT; 317 return (NULL); 318 } 319 /* Use all supported address space for remapping. */ 320 ctx1->end = 1ULL << (ctx1->agaw - 1); 321 } 322 323 324 dmar_gas_init_ctx(ctx1); 325 if (id_mapped) { 326 if ((dmar->hw_ecap & DMAR_ECAP_PT) == 0) { 327 ctx1->pgtbl_obj = ctx_get_idmap_pgtbl(ctx1, 328 ctx1->end); 329 } 330 ctx1->flags |= DMAR_CTX_IDMAP; 331 } else { 332 error = ctx_alloc_pgtbl(ctx1); 333 if (error != 0) { 334 dmar_ctx_dtr(ctx1, true, false); 335 TD_PINNED_ASSERT; 336 return (NULL); 337 } 338 /* Disable local apic region access */ 339 error = dmar_gas_reserve_region(ctx1, 0xfee00000, 340 0xfeefffff + 1); 341 if (error != 0) { 342 dmar_ctx_dtr(ctx1, true, true); 343 TD_PINNED_ASSERT; 344 return (NULL); 345 } 346 error = ctx_init_rmrr(ctx1, dev); 347 if (error != 0) { 348 dmar_ctx_dtr(ctx1, true, true); 349 TD_PINNED_ASSERT; 350 return (NULL); 351 } 352 } 353 ctxp = dmar_map_ctx_entry(ctx1, &sf); 354 DMAR_LOCK(dmar); 355 356 /* 357 * Recheck the contexts, other thread might have 358 * already allocated needed one. 359 */ 360 ctx = dmar_find_ctx_locked(dmar, rid); 361 if (ctx == NULL) { 362 ctx = ctx1; 363 ctx->ctx_tag.owner = dev; 364 ctx->domain = alloc_unrl(dmar->domids); 365 if (ctx->domain == -1) { 366 DMAR_UNLOCK(dmar); 367 dmar_unmap_pgtbl(sf, true); 368 dmar_ctx_dtr(ctx, true, true); 369 TD_PINNED_ASSERT; 370 return (NULL); 371 } 372 ctx_tag_init(ctx, dev); 373 374 /* 375 * This is the first activated context for the 376 * DMAR unit. Enable the translation after 377 * everything is set up. 378 */ 379 if (LIST_EMPTY(&dmar->contexts)) 380 enable = true; 381 LIST_INSERT_HEAD(&dmar->contexts, ctx, link); 382 ctx_id_entry_init(ctx, ctxp); 383 device_printf(dev, 384 "dmar%d pci%d:%d:%d:%d domain %d mgaw %d " 385 "agaw %d %s-mapped\n", 386 dmar->unit, dmar->segment, bus, slot, 387 func, ctx->domain, ctx->mgaw, ctx->agaw, 388 id_mapped ? "id" : "re"); 389 } else { 390 dmar_ctx_dtr(ctx1, true, true); 391 } 392 dmar_unmap_pgtbl(sf, DMAR_IS_COHERENT(dmar)); 393 } 394 ctx->refs++; 395 if ((ctx->flags & DMAR_CTX_RMRR) != 0) 396 ctx->refs++; /* XXXKIB */ 397 398 /* 399 * If dmar declares Caching Mode as Set, follow 11.5 "Caching 400 * Mode Consideration" and do the (global) invalidation of the 401 * negative TLB entries. 402 */ 403 if ((dmar->hw_cap & DMAR_CAP_CM) != 0 || enable) { 404 if (dmar->qi_enabled) { 405 dmar_qi_invalidate_ctx_glob_locked(dmar); 406 if ((dmar->hw_ecap & DMAR_ECAP_DI) != 0) 407 dmar_qi_invalidate_iotlb_glob_locked(dmar); 408 } else { 409 error = dmar_inv_ctx_glob(dmar); 410 if (error == 0 && 411 (dmar->hw_ecap & DMAR_ECAP_DI) != 0) 412 error = dmar_inv_iotlb_glob(dmar); 413 if (error != 0) { 414 dmar_free_ctx_locked(dmar, ctx); 415 TD_PINNED_ASSERT; 416 return (NULL); 417 } 418 } 419 } 420 421 /* 422 * The dmar lock was potentially dropped between check for the 423 * empty context list and now. Recheck the state of GCMD_TE 424 * to avoid unneeded command. 425 */ 426 if (enable && !rmrr_init && (dmar->hw_gcmd & DMAR_GCMD_TE) == 0) { 427 error = dmar_enable_translation(dmar); 428 if (error != 0) { 429 dmar_free_ctx_locked(dmar, ctx); 430 TD_PINNED_ASSERT; 431 return (NULL); 432 } 433 } 434 DMAR_UNLOCK(dmar); 435 TD_PINNED_ASSERT; 436 return (ctx); 437 } 438 439 void 440 dmar_free_ctx_locked(struct dmar_unit *dmar, struct dmar_ctx *ctx) 441 { 442 struct sf_buf *sf; 443 dmar_ctx_entry_t *ctxp; 444 445 DMAR_ASSERT_LOCKED(dmar); 446 KASSERT(ctx->refs >= 1, 447 ("dmar %p ctx %p refs %u", dmar, ctx, ctx->refs)); 448 449 /* 450 * If our reference is not last, only the dereference should 451 * be performed. 452 */ 453 if (ctx->refs > 1) { 454 ctx->refs--; 455 DMAR_UNLOCK(dmar); 456 return; 457 } 458 459 KASSERT((ctx->flags & DMAR_CTX_RMRR) == 0, 460 ("lost ref on RMRR ctx %p", ctx)); 461 KASSERT((ctx->flags & DMAR_CTX_DISABLED) == 0, 462 ("lost ref on disabled ctx %p", ctx)); 463 464 /* 465 * Otherwise, the context entry must be cleared before the 466 * page table is destroyed. The mapping of the context 467 * entries page could require sleep, unlock the dmar. 468 */ 469 DMAR_UNLOCK(dmar); 470 TD_PREP_PINNED_ASSERT; 471 ctxp = dmar_map_ctx_entry(ctx, &sf); 472 DMAR_LOCK(dmar); 473 KASSERT(ctx->refs >= 1, 474 ("dmar %p ctx %p refs %u", dmar, ctx, ctx->refs)); 475 476 /* 477 * Other thread might have referenced the context, in which 478 * case again only the dereference should be performed. 479 */ 480 if (ctx->refs > 1) { 481 ctx->refs--; 482 DMAR_UNLOCK(dmar); 483 dmar_unmap_pgtbl(sf, DMAR_IS_COHERENT(dmar)); 484 TD_PINNED_ASSERT; 485 return; 486 } 487 488 KASSERT((ctx->flags & DMAR_CTX_RMRR) == 0, 489 ("lost ref on RMRR ctx %p", ctx)); 490 KASSERT((ctx->flags & DMAR_CTX_DISABLED) == 0, 491 ("lost ref on disabled ctx %p", ctx)); 492 493 /* 494 * Clear the context pointer and flush the caches. 495 * XXXKIB: cannot do this if any RMRR entries are still present. 496 */ 497 dmar_pte_clear(&ctxp->ctx1); 498 ctxp->ctx2 = 0; 499 dmar_inv_ctx_glob(dmar); 500 if ((dmar->hw_ecap & DMAR_ECAP_DI) != 0) { 501 if (dmar->qi_enabled) 502 dmar_qi_invalidate_iotlb_glob_locked(dmar); 503 else 504 dmar_inv_iotlb_glob(dmar); 505 } 506 LIST_REMOVE(ctx, link); 507 DMAR_UNLOCK(dmar); 508 509 /* 510 * The rest of the destruction is invisible for other users of 511 * the dmar unit. 512 */ 513 taskqueue_drain(dmar->delayed_taskqueue, &ctx->unload_task); 514 KASSERT(TAILQ_EMPTY(&ctx->unload_entries), 515 ("unfinished unloads %p", ctx)); 516 dmar_unmap_pgtbl(sf, DMAR_IS_COHERENT(dmar)); 517 free_unr(dmar->domids, ctx->domain); 518 dmar_ctx_dtr(ctx, true, true); 519 TD_PINNED_ASSERT; 520 } 521 522 void 523 dmar_free_ctx(struct dmar_ctx *ctx) 524 { 525 struct dmar_unit *dmar; 526 527 dmar = ctx->dmar; 528 DMAR_LOCK(dmar); 529 dmar_free_ctx_locked(dmar, ctx); 530 } 531 532 struct dmar_ctx * 533 dmar_find_ctx_locked(struct dmar_unit *dmar, uint16_t rid) 534 { 535 struct dmar_ctx *ctx; 536 537 DMAR_ASSERT_LOCKED(dmar); 538 539 LIST_FOREACH(ctx, &dmar->contexts, link) { 540 if (ctx->rid == rid) 541 return (ctx); 542 } 543 return (NULL); 544 } 545 546 void 547 dmar_ctx_free_entry(struct dmar_map_entry *entry, bool free) 548 { 549 struct dmar_ctx *ctx; 550 551 ctx = entry->ctx; 552 DMAR_CTX_LOCK(ctx); 553 if ((entry->flags & DMAR_MAP_ENTRY_RMRR) != 0) 554 dmar_gas_free_region(ctx, entry); 555 else 556 dmar_gas_free_space(ctx, entry); 557 DMAR_CTX_UNLOCK(ctx); 558 if (free) 559 dmar_gas_free_entry(ctx, entry); 560 else 561 entry->flags = 0; 562 } 563 564 void 565 dmar_ctx_unload_entry(struct dmar_map_entry *entry, bool free) 566 { 567 struct dmar_unit *unit; 568 569 unit = entry->ctx->dmar; 570 if (unit->qi_enabled) { 571 DMAR_LOCK(unit); 572 dmar_qi_invalidate_locked(entry->ctx, entry->start, 573 entry->end - entry->start, &entry->gseq); 574 if (!free) 575 entry->flags |= DMAR_MAP_ENTRY_QI_NF; 576 TAILQ_INSERT_TAIL(&unit->tlb_flush_entries, entry, dmamap_link); 577 DMAR_UNLOCK(unit); 578 } else { 579 ctx_flush_iotlb_sync(entry->ctx, entry->start, entry->end - 580 entry->start); 581 dmar_ctx_free_entry(entry, free); 582 } 583 } 584 585 void 586 dmar_ctx_unload(struct dmar_ctx *ctx, struct dmar_map_entries_tailq *entries, 587 bool cansleep) 588 { 589 struct dmar_unit *unit; 590 struct dmar_map_entry *entry, *entry1; 591 struct dmar_qi_genseq gseq; 592 int error; 593 594 unit = ctx->dmar; 595 596 TAILQ_FOREACH_SAFE(entry, entries, dmamap_link, entry1) { 597 KASSERT((entry->flags & DMAR_MAP_ENTRY_MAP) != 0, 598 ("not mapped entry %p %p", ctx, entry)); 599 error = ctx_unmap_buf(ctx, entry->start, entry->end - 600 entry->start, cansleep ? DMAR_PGF_WAITOK : 0); 601 KASSERT(error == 0, ("unmap %p error %d", ctx, error)); 602 if (!unit->qi_enabled) { 603 ctx_flush_iotlb_sync(ctx, entry->start, 604 entry->end - entry->start); 605 TAILQ_REMOVE(entries, entry, dmamap_link); 606 dmar_ctx_free_entry(entry, true); 607 } 608 } 609 if (TAILQ_EMPTY(entries)) 610 return; 611 612 KASSERT(unit->qi_enabled, ("loaded entry left")); 613 DMAR_LOCK(unit); 614 TAILQ_FOREACH(entry, entries, dmamap_link) { 615 entry->gseq.gen = 0; 616 entry->gseq.seq = 0; 617 dmar_qi_invalidate_locked(ctx, entry->start, entry->end - 618 entry->start, TAILQ_NEXT(entry, dmamap_link) == NULL ? 619 &gseq : NULL); 620 } 621 TAILQ_FOREACH_SAFE(entry, entries, dmamap_link, entry1) { 622 entry->gseq = gseq; 623 TAILQ_REMOVE(entries, entry, dmamap_link); 624 TAILQ_INSERT_TAIL(&unit->tlb_flush_entries, entry, dmamap_link); 625 } 626 DMAR_UNLOCK(unit); 627 } 628 629 static void 630 dmar_ctx_unload_task(void *arg, int pending) 631 { 632 struct dmar_ctx *ctx; 633 struct dmar_map_entries_tailq entries; 634 635 ctx = arg; 636 TAILQ_INIT(&entries); 637 638 for (;;) { 639 DMAR_CTX_LOCK(ctx); 640 TAILQ_SWAP(&ctx->unload_entries, &entries, dmar_map_entry, 641 dmamap_link); 642 DMAR_CTX_UNLOCK(ctx); 643 if (TAILQ_EMPTY(&entries)) 644 break; 645 dmar_ctx_unload(ctx, &entries, true); 646 } 647 } 648