1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2013 The FreeBSD Foundation 5 * 6 * This software was developed by Konstantin Belousov <kib@FreeBSD.org> 7 * under sponsorship from the FreeBSD Foundation. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 */ 30 31 #include <sys/param.h> 32 #include <sys/systm.h> 33 #include <sys/malloc.h> 34 #include <sys/bus.h> 35 #include <sys/interrupt.h> 36 #include <sys/kernel.h> 37 #include <sys/ktr.h> 38 #include <sys/lock.h> 39 #include <sys/memdesc.h> 40 #include <sys/mutex.h> 41 #include <sys/proc.h> 42 #include <sys/rwlock.h> 43 #include <sys/rman.h> 44 #include <sys/sf_buf.h> 45 #include <sys/sysctl.h> 46 #include <sys/taskqueue.h> 47 #include <sys/tree.h> 48 #include <sys/uio.h> 49 #include <sys/vmem.h> 50 #include <sys/vmmeter.h> 51 #include <vm/vm.h> 52 #include <vm/vm_extern.h> 53 #include <vm/vm_kern.h> 54 #include <vm/vm_object.h> 55 #include <vm/vm_page.h> 56 #include <vm/vm_pager.h> 57 #include <vm/vm_map.h> 58 #include <dev/pci/pcireg.h> 59 #include <machine/atomic.h> 60 #include <machine/bus.h> 61 #include <machine/cpu.h> 62 #include <machine/md_var.h> 63 #include <machine/specialreg.h> 64 #include <x86/include/busdma_impl.h> 65 #include <dev/iommu/busdma_iommu.h> 66 #include <x86/iommu/intel_reg.h> 67 #include <x86/iommu/x86_iommu.h> 68 #include <x86/iommu/intel_dmar.h> 69 70 static int domain_unmap_buf_locked(struct dmar_domain *domain, 71 iommu_gaddr_t base, iommu_gaddr_t size, int flags); 72 73 /* 74 * The cache of the identity mapping page tables for the DMARs. Using 75 * the cache saves significant amount of memory for page tables by 76 * reusing the page tables, since usually DMARs are identical and have 77 * the same capabilities. Still, cache records the information needed 78 * to match DMAR capabilities and page table format, to correctly 79 * handle different DMARs. 80 */ 81 82 struct idpgtbl { 83 iommu_gaddr_t maxaddr; /* Page table covers the guest address 84 range [0..maxaddr) */ 85 int pglvl; /* Total page table levels ignoring 86 superpages */ 87 int leaf; /* The last materialized page table 88 level, it is non-zero if superpages 89 are supported */ 90 vm_object_t pgtbl_obj; /* The page table pages */ 91 LIST_ENTRY(idpgtbl) link; 92 }; 93 94 static struct sx idpgtbl_lock; 95 SX_SYSINIT(idpgtbl, &idpgtbl_lock, "idpgtbl"); 96 static LIST_HEAD(, idpgtbl) idpgtbls = LIST_HEAD_INITIALIZER(idpgtbls); 97 static MALLOC_DEFINE(M_DMAR_IDPGTBL, "dmar_idpgtbl", 98 "Intel DMAR Identity mappings cache elements"); 99 100 /* 101 * Build the next level of the page tables for the identity mapping. 102 * - lvl is the level to build; 103 * - idx is the index of the page table page in the pgtbl_obj, which is 104 * being allocated filled now; 105 * - addr is the starting address in the bus address space which is 106 * mapped by the page table page. 107 */ 108 static void 109 domain_idmap_nextlvl(struct idpgtbl *tbl, int lvl, vm_pindex_t idx, 110 iommu_gaddr_t addr) 111 { 112 vm_page_t m1; 113 iommu_pte_t *pte; 114 struct sf_buf *sf; 115 iommu_gaddr_t f, pg_sz; 116 vm_pindex_t base; 117 int i; 118 119 VM_OBJECT_ASSERT_LOCKED(tbl->pgtbl_obj); 120 if (addr >= tbl->maxaddr) 121 return; 122 (void)iommu_pgalloc(tbl->pgtbl_obj, idx, IOMMU_PGF_OBJL | 123 IOMMU_PGF_WAITOK | IOMMU_PGF_ZERO); 124 base = idx * IOMMU_NPTEPG + 1; /* Index of the first child page of idx */ 125 pg_sz = pglvl_page_size(tbl->pglvl, lvl); 126 if (lvl != tbl->leaf) { 127 for (i = 0, f = addr; i < IOMMU_NPTEPG; i++, f += pg_sz) 128 domain_idmap_nextlvl(tbl, lvl + 1, base + i, f); 129 } 130 VM_OBJECT_WUNLOCK(tbl->pgtbl_obj); 131 pte = iommu_map_pgtbl(tbl->pgtbl_obj, idx, IOMMU_PGF_WAITOK, &sf); 132 if (lvl == tbl->leaf) { 133 for (i = 0, f = addr; i < IOMMU_NPTEPG; i++, f += pg_sz) { 134 if (f >= tbl->maxaddr) 135 break; 136 pte[i].pte = (DMAR_PTE_ADDR_MASK & f) | 137 DMAR_PTE_R | DMAR_PTE_W; 138 } 139 } else { 140 for (i = 0, f = addr; i < IOMMU_NPTEPG; i++, f += pg_sz) { 141 if (f >= tbl->maxaddr) 142 break; 143 m1 = iommu_pgalloc(tbl->pgtbl_obj, base + i, 144 IOMMU_PGF_NOALLOC); 145 KASSERT(m1 != NULL, ("lost page table page")); 146 pte[i].pte = (DMAR_PTE_ADDR_MASK & 147 VM_PAGE_TO_PHYS(m1)) | DMAR_PTE_R | DMAR_PTE_W; 148 } 149 } 150 /* domain_get_idmap_pgtbl flushes CPU cache if needed. */ 151 iommu_unmap_pgtbl(sf); 152 VM_OBJECT_WLOCK(tbl->pgtbl_obj); 153 } 154 155 /* 156 * Find a ready and compatible identity-mapping page table in the 157 * cache. If not found, populate the identity-mapping page table for 158 * the context, up to the maxaddr. The maxaddr byte is allowed to be 159 * not mapped, which is aligned with the definition of Maxmem as the 160 * highest usable physical address + 1. If superpages are used, the 161 * maxaddr is typically mapped. 162 */ 163 vm_object_t 164 domain_get_idmap_pgtbl(struct dmar_domain *domain, iommu_gaddr_t maxaddr) 165 { 166 struct dmar_unit *unit; 167 struct idpgtbl *tbl; 168 vm_object_t res; 169 vm_page_t m; 170 int leaf, i; 171 172 leaf = 0; /* silence gcc */ 173 174 /* 175 * First, determine where to stop the paging structures. 176 */ 177 for (i = 0; i < domain->pglvl; i++) { 178 if (i == domain->pglvl - 1 || domain_is_sp_lvl(domain, i)) { 179 leaf = i; 180 break; 181 } 182 } 183 184 /* 185 * Search the cache for a compatible page table. Qualified 186 * page table must map up to maxaddr, its level must be 187 * supported by the DMAR and leaf should be equal to the 188 * calculated value. The later restriction could be lifted 189 * but I believe it is currently impossible to have any 190 * deviations for existing hardware. 191 */ 192 sx_slock(&idpgtbl_lock); 193 LIST_FOREACH(tbl, &idpgtbls, link) { 194 if (tbl->maxaddr >= maxaddr && 195 dmar_pglvl_supported(domain->dmar, tbl->pglvl) && 196 tbl->leaf == leaf) { 197 res = tbl->pgtbl_obj; 198 vm_object_reference(res); 199 sx_sunlock(&idpgtbl_lock); 200 domain->pglvl = tbl->pglvl; /* XXXKIB ? */ 201 goto end; 202 } 203 } 204 205 /* 206 * Not found in cache, relock the cache into exclusive mode to 207 * be able to add element, and recheck cache again after the 208 * relock. 209 */ 210 sx_sunlock(&idpgtbl_lock); 211 sx_xlock(&idpgtbl_lock); 212 LIST_FOREACH(tbl, &idpgtbls, link) { 213 if (tbl->maxaddr >= maxaddr && 214 dmar_pglvl_supported(domain->dmar, tbl->pglvl) && 215 tbl->leaf == leaf) { 216 res = tbl->pgtbl_obj; 217 vm_object_reference(res); 218 sx_xunlock(&idpgtbl_lock); 219 domain->pglvl = tbl->pglvl; /* XXXKIB ? */ 220 return (res); 221 } 222 } 223 224 /* 225 * Still not found, create new page table. 226 */ 227 tbl = malloc(sizeof(*tbl), M_DMAR_IDPGTBL, M_WAITOK); 228 tbl->pglvl = domain->pglvl; 229 tbl->leaf = leaf; 230 tbl->maxaddr = maxaddr; 231 tbl->pgtbl_obj = vm_pager_allocate(OBJT_PHYS, NULL, 232 IDX_TO_OFF(pglvl_max_pages(tbl->pglvl)), 0, 0, NULL); 233 VM_OBJECT_WLOCK(tbl->pgtbl_obj); 234 domain_idmap_nextlvl(tbl, 0, 0, 0); 235 VM_OBJECT_WUNLOCK(tbl->pgtbl_obj); 236 LIST_INSERT_HEAD(&idpgtbls, tbl, link); 237 res = tbl->pgtbl_obj; 238 vm_object_reference(res); 239 sx_xunlock(&idpgtbl_lock); 240 241 end: 242 /* 243 * Table was found or created. 244 * 245 * If DMAR does not snoop paging structures accesses, flush 246 * CPU cache to memory. Note that dmar_unmap_pgtbl() coherent 247 * argument was possibly invalid at the time of the identity 248 * page table creation, since DMAR which was passed at the 249 * time of creation could be coherent, while current DMAR is 250 * not. 251 * 252 * If DMAR cannot look into the chipset write buffer, flush it 253 * as well. 254 */ 255 unit = domain->dmar; 256 if (!DMAR_IS_COHERENT(unit)) { 257 VM_OBJECT_WLOCK(res); 258 for (m = vm_page_lookup(res, 0); m != NULL; 259 m = vm_page_next(m)) 260 pmap_invalidate_cache_pages(&m, 1); 261 VM_OBJECT_WUNLOCK(res); 262 } 263 if ((unit->hw_cap & DMAR_CAP_RWBF) != 0) { 264 DMAR_LOCK(unit); 265 dmar_flush_write_bufs(unit); 266 DMAR_UNLOCK(unit); 267 } 268 269 return (res); 270 } 271 272 /* 273 * Return a reference to the identity mapping page table to the cache. 274 */ 275 void 276 put_idmap_pgtbl(vm_object_t obj) 277 { 278 struct idpgtbl *tbl, *tbl1; 279 vm_object_t rmobj; 280 281 sx_slock(&idpgtbl_lock); 282 KASSERT(obj->ref_count >= 2, ("lost cache reference")); 283 vm_object_deallocate(obj); 284 285 /* 286 * Cache always owns one last reference on the page table object. 287 * If there is an additional reference, object must stay. 288 */ 289 if (obj->ref_count > 1) { 290 sx_sunlock(&idpgtbl_lock); 291 return; 292 } 293 294 /* 295 * Cache reference is the last, remove cache element and free 296 * page table object, returning the page table pages to the 297 * system. 298 */ 299 sx_sunlock(&idpgtbl_lock); 300 sx_xlock(&idpgtbl_lock); 301 LIST_FOREACH_SAFE(tbl, &idpgtbls, link, tbl1) { 302 rmobj = tbl->pgtbl_obj; 303 if (rmobj->ref_count == 1) { 304 LIST_REMOVE(tbl, link); 305 atomic_subtract_int(&iommu_tbl_pagecnt, 306 rmobj->resident_page_count); 307 vm_object_deallocate(rmobj); 308 free(tbl, M_DMAR_IDPGTBL); 309 } 310 } 311 sx_xunlock(&idpgtbl_lock); 312 } 313 314 /* 315 * The core routines to map and unmap host pages at the given guest 316 * address. Support superpages. 317 */ 318 319 static iommu_pte_t * 320 domain_pgtbl_map_pte(struct dmar_domain *domain, iommu_gaddr_t base, int lvl, 321 int flags, vm_pindex_t *idxp, struct sf_buf **sf) 322 { 323 vm_page_t m; 324 struct sf_buf *sfp; 325 iommu_pte_t *pte, *ptep; 326 vm_pindex_t idx, idx1; 327 328 DMAR_DOMAIN_ASSERT_PGLOCKED(domain); 329 KASSERT((flags & IOMMU_PGF_OBJL) != 0, ("lost PGF_OBJL")); 330 331 idx = pglvl_pgtbl_get_pindex(domain->pglvl, base, lvl); 332 if (*sf != NULL && idx == *idxp) { 333 pte = (iommu_pte_t *)sf_buf_kva(*sf); 334 } else { 335 if (*sf != NULL) 336 iommu_unmap_pgtbl(*sf); 337 *idxp = idx; 338 retry: 339 pte = iommu_map_pgtbl(domain->pgtbl_obj, idx, flags, sf); 340 if (pte == NULL) { 341 KASSERT(lvl > 0, 342 ("lost root page table page %p", domain)); 343 /* 344 * Page table page does not exist, allocate 345 * it and create a pte in the preceeding page level 346 * to reference the allocated page table page. 347 */ 348 m = iommu_pgalloc(domain->pgtbl_obj, idx, flags | 349 IOMMU_PGF_ZERO); 350 if (m == NULL) 351 return (NULL); 352 353 /* 354 * Prevent potential free while pgtbl_obj is 355 * unlocked in the recursive call to 356 * domain_pgtbl_map_pte(), if other thread did 357 * pte write and clean while the lock is 358 * dropped. 359 */ 360 vm_page_wire(m); 361 362 sfp = NULL; 363 ptep = domain_pgtbl_map_pte(domain, base, lvl - 1, 364 flags, &idx1, &sfp); 365 if (ptep == NULL) { 366 KASSERT(m->pindex != 0, 367 ("loosing root page %p", domain)); 368 vm_page_unwire_noq(m); 369 iommu_pgfree(domain->pgtbl_obj, m->pindex, 370 flags); 371 return (NULL); 372 } 373 dmar_pte_store(&ptep->pte, DMAR_PTE_R | DMAR_PTE_W | 374 VM_PAGE_TO_PHYS(m)); 375 dmar_flush_pte_to_ram(domain->dmar, ptep); 376 vm_page_wire(sf_buf_page(sfp)); 377 vm_page_unwire_noq(m); 378 iommu_unmap_pgtbl(sfp); 379 /* Only executed once. */ 380 goto retry; 381 } 382 } 383 pte += pglvl_pgtbl_pte_off(domain->pglvl, base, lvl); 384 return (pte); 385 } 386 387 static int 388 domain_map_buf_locked(struct dmar_domain *domain, iommu_gaddr_t base, 389 iommu_gaddr_t size, vm_page_t *ma, uint64_t pflags, int flags) 390 { 391 iommu_pte_t *pte; 392 struct sf_buf *sf; 393 iommu_gaddr_t pg_sz, base1; 394 vm_pindex_t pi, c, idx, run_sz; 395 int lvl; 396 bool superpage; 397 398 DMAR_DOMAIN_ASSERT_PGLOCKED(domain); 399 400 base1 = base; 401 flags |= IOMMU_PGF_OBJL; 402 TD_PREP_PINNED_ASSERT; 403 404 for (sf = NULL, pi = 0; size > 0; base += pg_sz, size -= pg_sz, 405 pi += run_sz) { 406 for (lvl = 0, c = 0, superpage = false;; lvl++) { 407 pg_sz = domain_page_size(domain, lvl); 408 run_sz = pg_sz >> IOMMU_PAGE_SHIFT; 409 if (lvl == domain->pglvl - 1) 410 break; 411 /* 412 * Check if the current base suitable for the 413 * superpage mapping. First, verify the level. 414 */ 415 if (!domain_is_sp_lvl(domain, lvl)) 416 continue; 417 /* 418 * Next, look at the size of the mapping and 419 * alignment of both guest and host addresses. 420 */ 421 if (size < pg_sz || (base & (pg_sz - 1)) != 0 || 422 (VM_PAGE_TO_PHYS(ma[pi]) & (pg_sz - 1)) != 0) 423 continue; 424 /* All passed, check host pages contiguouty. */ 425 if (c == 0) { 426 for (c = 1; c < run_sz; c++) { 427 if (VM_PAGE_TO_PHYS(ma[pi + c]) != 428 VM_PAGE_TO_PHYS(ma[pi + c - 1]) + 429 PAGE_SIZE) 430 break; 431 } 432 } 433 if (c >= run_sz) { 434 superpage = true; 435 break; 436 } 437 } 438 KASSERT(size >= pg_sz, 439 ("mapping loop overflow %p %jx %jx %jx", domain, 440 (uintmax_t)base, (uintmax_t)size, (uintmax_t)pg_sz)); 441 KASSERT(pg_sz > 0, ("pg_sz 0 lvl %d", lvl)); 442 pte = domain_pgtbl_map_pte(domain, base, lvl, flags, &idx, &sf); 443 if (pte == NULL) { 444 KASSERT((flags & IOMMU_PGF_WAITOK) == 0, 445 ("failed waitable pte alloc %p", domain)); 446 if (sf != NULL) 447 iommu_unmap_pgtbl(sf); 448 domain_unmap_buf_locked(domain, base1, base - base1, 449 flags); 450 TD_PINNED_ASSERT; 451 return (ENOMEM); 452 } 453 dmar_pte_store(&pte->pte, VM_PAGE_TO_PHYS(ma[pi]) | pflags | 454 (superpage ? DMAR_PTE_SP : 0)); 455 dmar_flush_pte_to_ram(domain->dmar, pte); 456 vm_page_wire(sf_buf_page(sf)); 457 } 458 if (sf != NULL) 459 iommu_unmap_pgtbl(sf); 460 TD_PINNED_ASSERT; 461 return (0); 462 } 463 464 static int 465 domain_map_buf(struct iommu_domain *iodom, iommu_gaddr_t base, 466 iommu_gaddr_t size, vm_page_t *ma, uint64_t eflags, int flags) 467 { 468 struct dmar_domain *domain; 469 struct dmar_unit *unit; 470 uint64_t pflags; 471 int error; 472 473 pflags = ((eflags & IOMMU_MAP_ENTRY_READ) != 0 ? DMAR_PTE_R : 0) | 474 ((eflags & IOMMU_MAP_ENTRY_WRITE) != 0 ? DMAR_PTE_W : 0) | 475 ((eflags & IOMMU_MAP_ENTRY_SNOOP) != 0 ? DMAR_PTE_SNP : 0) | 476 ((eflags & IOMMU_MAP_ENTRY_TM) != 0 ? DMAR_PTE_TM : 0); 477 478 domain = IODOM2DOM(iodom); 479 unit = domain->dmar; 480 481 KASSERT((iodom->flags & IOMMU_DOMAIN_IDMAP) == 0, 482 ("modifying idmap pagetable domain %p", domain)); 483 KASSERT((base & IOMMU_PAGE_MASK) == 0, 484 ("non-aligned base %p %jx %jx", domain, (uintmax_t)base, 485 (uintmax_t)size)); 486 KASSERT((size & IOMMU_PAGE_MASK) == 0, 487 ("non-aligned size %p %jx %jx", domain, (uintmax_t)base, 488 (uintmax_t)size)); 489 KASSERT(size > 0, ("zero size %p %jx %jx", domain, (uintmax_t)base, 490 (uintmax_t)size)); 491 KASSERT(base < (1ULL << domain->agaw), 492 ("base too high %p %jx %jx agaw %d", domain, (uintmax_t)base, 493 (uintmax_t)size, domain->agaw)); 494 KASSERT(base + size < (1ULL << domain->agaw), 495 ("end too high %p %jx %jx agaw %d", domain, (uintmax_t)base, 496 (uintmax_t)size, domain->agaw)); 497 KASSERT(base + size > base, 498 ("size overflow %p %jx %jx", domain, (uintmax_t)base, 499 (uintmax_t)size)); 500 KASSERT((pflags & (DMAR_PTE_R | DMAR_PTE_W)) != 0, 501 ("neither read nor write %jx", (uintmax_t)pflags)); 502 KASSERT((pflags & ~(DMAR_PTE_R | DMAR_PTE_W | DMAR_PTE_SNP | 503 DMAR_PTE_TM)) == 0, 504 ("invalid pte flags %jx", (uintmax_t)pflags)); 505 KASSERT((pflags & DMAR_PTE_SNP) == 0 || 506 (unit->hw_ecap & DMAR_ECAP_SC) != 0, 507 ("PTE_SNP for dmar without snoop control %p %jx", 508 domain, (uintmax_t)pflags)); 509 KASSERT((pflags & DMAR_PTE_TM) == 0 || 510 (unit->hw_ecap & DMAR_ECAP_DI) != 0, 511 ("PTE_TM for dmar without DIOTLB %p %jx", 512 domain, (uintmax_t)pflags)); 513 KASSERT((flags & ~IOMMU_PGF_WAITOK) == 0, ("invalid flags %x", flags)); 514 515 DMAR_DOMAIN_PGLOCK(domain); 516 error = domain_map_buf_locked(domain, base, size, ma, pflags, flags); 517 DMAR_DOMAIN_PGUNLOCK(domain); 518 if (error != 0) 519 return (error); 520 521 if ((unit->hw_cap & DMAR_CAP_CM) != 0) 522 domain_flush_iotlb_sync(domain, base, size); 523 else if ((unit->hw_cap & DMAR_CAP_RWBF) != 0) { 524 /* See 11.1 Write Buffer Flushing. */ 525 DMAR_LOCK(unit); 526 dmar_flush_write_bufs(unit); 527 DMAR_UNLOCK(unit); 528 } 529 return (0); 530 } 531 532 static void domain_unmap_clear_pte(struct dmar_domain *domain, 533 iommu_gaddr_t base, int lvl, int flags, iommu_pte_t *pte, 534 struct sf_buf **sf, bool free_fs); 535 536 static void 537 domain_free_pgtbl_pde(struct dmar_domain *domain, iommu_gaddr_t base, 538 int lvl, int flags) 539 { 540 struct sf_buf *sf; 541 iommu_pte_t *pde; 542 vm_pindex_t idx; 543 544 sf = NULL; 545 pde = domain_pgtbl_map_pte(domain, base, lvl, flags, &idx, &sf); 546 domain_unmap_clear_pte(domain, base, lvl, flags, pde, &sf, true); 547 } 548 549 static void 550 domain_unmap_clear_pte(struct dmar_domain *domain, iommu_gaddr_t base, int lvl, 551 int flags, iommu_pte_t *pte, struct sf_buf **sf, bool free_sf) 552 { 553 vm_page_t m; 554 555 dmar_pte_clear(&pte->pte); 556 dmar_flush_pte_to_ram(domain->dmar, pte); 557 m = sf_buf_page(*sf); 558 if (free_sf) { 559 iommu_unmap_pgtbl(*sf); 560 *sf = NULL; 561 } 562 if (!vm_page_unwire_noq(m)) 563 return; 564 KASSERT(lvl != 0, 565 ("lost reference (lvl) on root pg domain %p base %jx lvl %d", 566 domain, (uintmax_t)base, lvl)); 567 KASSERT(m->pindex != 0, 568 ("lost reference (idx) on root pg domain %p base %jx lvl %d", 569 domain, (uintmax_t)base, lvl)); 570 iommu_pgfree(domain->pgtbl_obj, m->pindex, flags); 571 domain_free_pgtbl_pde(domain, base, lvl - 1, flags); 572 } 573 574 /* 575 * Assumes that the unmap is never partial. 576 */ 577 static int 578 domain_unmap_buf_locked(struct dmar_domain *domain, iommu_gaddr_t base, 579 iommu_gaddr_t size, int flags) 580 { 581 iommu_pte_t *pte; 582 struct sf_buf *sf; 583 vm_pindex_t idx; 584 iommu_gaddr_t pg_sz; 585 int lvl; 586 587 DMAR_DOMAIN_ASSERT_PGLOCKED(domain); 588 if (size == 0) 589 return (0); 590 591 KASSERT((domain->iodom.flags & IOMMU_DOMAIN_IDMAP) == 0, 592 ("modifying idmap pagetable domain %p", domain)); 593 KASSERT((base & IOMMU_PAGE_MASK) == 0, 594 ("non-aligned base %p %jx %jx", domain, (uintmax_t)base, 595 (uintmax_t)size)); 596 KASSERT((size & IOMMU_PAGE_MASK) == 0, 597 ("non-aligned size %p %jx %jx", domain, (uintmax_t)base, 598 (uintmax_t)size)); 599 KASSERT(base < (1ULL << domain->agaw), 600 ("base too high %p %jx %jx agaw %d", domain, (uintmax_t)base, 601 (uintmax_t)size, domain->agaw)); 602 KASSERT(base + size < (1ULL << domain->agaw), 603 ("end too high %p %jx %jx agaw %d", domain, (uintmax_t)base, 604 (uintmax_t)size, domain->agaw)); 605 KASSERT(base + size > base, 606 ("size overflow %p %jx %jx", domain, (uintmax_t)base, 607 (uintmax_t)size)); 608 KASSERT((flags & ~IOMMU_PGF_WAITOK) == 0, ("invalid flags %x", flags)); 609 610 pg_sz = 0; /* silence gcc */ 611 flags |= IOMMU_PGF_OBJL; 612 TD_PREP_PINNED_ASSERT; 613 614 for (sf = NULL; size > 0; base += pg_sz, size -= pg_sz) { 615 for (lvl = 0; lvl < domain->pglvl; lvl++) { 616 if (lvl != domain->pglvl - 1 && 617 !domain_is_sp_lvl(domain, lvl)) 618 continue; 619 pg_sz = domain_page_size(domain, lvl); 620 if (pg_sz > size) 621 continue; 622 pte = domain_pgtbl_map_pte(domain, base, lvl, flags, 623 &idx, &sf); 624 KASSERT(pte != NULL, 625 ("sleeping or page missed %p %jx %d 0x%x", 626 domain, (uintmax_t)base, lvl, flags)); 627 if ((pte->pte & DMAR_PTE_SP) != 0 || 628 lvl == domain->pglvl - 1) { 629 domain_unmap_clear_pte(domain, base, lvl, 630 flags, pte, &sf, false); 631 break; 632 } 633 } 634 KASSERT(size >= pg_sz, 635 ("unmapping loop overflow %p %jx %jx %jx", domain, 636 (uintmax_t)base, (uintmax_t)size, (uintmax_t)pg_sz)); 637 } 638 if (sf != NULL) 639 iommu_unmap_pgtbl(sf); 640 /* 641 * See 11.1 Write Buffer Flushing for an explanation why RWBF 642 * can be ignored there. 643 */ 644 645 TD_PINNED_ASSERT; 646 return (0); 647 } 648 649 static int 650 domain_unmap_buf(struct iommu_domain *iodom, iommu_gaddr_t base, 651 iommu_gaddr_t size, int flags) 652 { 653 struct dmar_domain *domain; 654 int error; 655 656 domain = IODOM2DOM(iodom); 657 658 DMAR_DOMAIN_PGLOCK(domain); 659 error = domain_unmap_buf_locked(domain, base, size, flags); 660 DMAR_DOMAIN_PGUNLOCK(domain); 661 return (error); 662 } 663 664 int 665 dmar_domain_alloc_pgtbl(struct dmar_domain *domain) 666 { 667 vm_page_t m; 668 669 KASSERT(domain->pgtbl_obj == NULL, 670 ("already initialized %p", domain)); 671 672 domain->pgtbl_obj = vm_pager_allocate(OBJT_PHYS, NULL, 673 IDX_TO_OFF(pglvl_max_pages(domain->pglvl)), 0, 0, NULL); 674 DMAR_DOMAIN_PGLOCK(domain); 675 m = iommu_pgalloc(domain->pgtbl_obj, 0, IOMMU_PGF_WAITOK | 676 IOMMU_PGF_ZERO | IOMMU_PGF_OBJL); 677 /* No implicit free of the top level page table page. */ 678 vm_page_wire(m); 679 DMAR_DOMAIN_PGUNLOCK(domain); 680 DMAR_LOCK(domain->dmar); 681 domain->iodom.flags |= IOMMU_DOMAIN_PGTBL_INITED; 682 DMAR_UNLOCK(domain->dmar); 683 return (0); 684 } 685 686 void 687 dmar_domain_free_pgtbl(struct dmar_domain *domain) 688 { 689 vm_object_t obj; 690 vm_page_t m; 691 692 obj = domain->pgtbl_obj; 693 if (obj == NULL) { 694 KASSERT((domain->dmar->hw_ecap & DMAR_ECAP_PT) != 0 && 695 (domain->iodom.flags & IOMMU_DOMAIN_IDMAP) != 0, 696 ("lost pagetable object domain %p", domain)); 697 return; 698 } 699 DMAR_DOMAIN_ASSERT_PGLOCKED(domain); 700 domain->pgtbl_obj = NULL; 701 702 if ((domain->iodom.flags & IOMMU_DOMAIN_IDMAP) != 0) { 703 put_idmap_pgtbl(obj); 704 domain->iodom.flags &= ~IOMMU_DOMAIN_IDMAP; 705 return; 706 } 707 708 /* Obliterate ref_counts */ 709 VM_OBJECT_ASSERT_WLOCKED(obj); 710 for (m = vm_page_lookup(obj, 0); m != NULL; m = vm_page_next(m)) { 711 vm_page_clearref(m); 712 vm_wire_sub(1); 713 } 714 VM_OBJECT_WUNLOCK(obj); 715 vm_object_deallocate(obj); 716 } 717 718 static inline uint64_t 719 domain_wait_iotlb_flush(struct dmar_unit *unit, uint64_t wt, int iro) 720 { 721 uint64_t iotlbr; 722 723 dmar_write8(unit, iro + DMAR_IOTLB_REG_OFF, DMAR_IOTLB_IVT | 724 DMAR_IOTLB_DR | DMAR_IOTLB_DW | wt); 725 for (;;) { 726 iotlbr = dmar_read8(unit, iro + DMAR_IOTLB_REG_OFF); 727 if ((iotlbr & DMAR_IOTLB_IVT) == 0) 728 break; 729 cpu_spinwait(); 730 } 731 return (iotlbr); 732 } 733 734 void 735 domain_flush_iotlb_sync(struct dmar_domain *domain, iommu_gaddr_t base, 736 iommu_gaddr_t size) 737 { 738 struct dmar_unit *unit; 739 iommu_gaddr_t isize; 740 uint64_t iotlbr; 741 int am, iro; 742 743 unit = domain->dmar; 744 KASSERT(!unit->qi_enabled, ("dmar%d: sync iotlb flush call", 745 unit->iommu.unit)); 746 iro = DMAR_ECAP_IRO(unit->hw_ecap) * 16; 747 DMAR_LOCK(unit); 748 if ((unit->hw_cap & DMAR_CAP_PSI) == 0 || size > 2 * 1024 * 1024) { 749 iotlbr = domain_wait_iotlb_flush(unit, DMAR_IOTLB_IIRG_DOM | 750 DMAR_IOTLB_DID(domain->domain), iro); 751 KASSERT((iotlbr & DMAR_IOTLB_IAIG_MASK) != 752 DMAR_IOTLB_IAIG_INVLD, 753 ("dmar%d: invalidation failed %jx", unit->iommu.unit, 754 (uintmax_t)iotlbr)); 755 } else { 756 for (; size > 0; base += isize, size -= isize) { 757 am = calc_am(unit, base, size, &isize); 758 dmar_write8(unit, iro, base | am); 759 iotlbr = domain_wait_iotlb_flush(unit, 760 DMAR_IOTLB_IIRG_PAGE | 761 DMAR_IOTLB_DID(domain->domain), iro); 762 KASSERT((iotlbr & DMAR_IOTLB_IAIG_MASK) != 763 DMAR_IOTLB_IAIG_INVLD, 764 ("dmar%d: PSI invalidation failed " 765 "iotlbr 0x%jx base 0x%jx size 0x%jx am %d", 766 unit->iommu.unit, (uintmax_t)iotlbr, 767 (uintmax_t)base, (uintmax_t)size, am)); 768 /* 769 * Any non-page granularity covers whole guest 770 * address space for the domain. 771 */ 772 if ((iotlbr & DMAR_IOTLB_IAIG_MASK) != 773 DMAR_IOTLB_IAIG_PAGE) 774 break; 775 } 776 } 777 DMAR_UNLOCK(unit); 778 } 779 780 const struct iommu_domain_map_ops dmar_domain_map_ops = { 781 .map = domain_map_buf, 782 .unmap = domain_unmap_buf, 783 }; 784