1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2013 The FreeBSD Foundation 5 * All rights reserved. 6 * 7 * This software was developed by Konstantin Belousov <kib@FreeBSD.org> 8 * under sponsorship from the FreeBSD Foundation. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 */ 31 32 #include <sys/cdefs.h> 33 __FBSDID("$FreeBSD$"); 34 35 #include <sys/param.h> 36 #include <sys/systm.h> 37 #include <sys/malloc.h> 38 #include <sys/bus.h> 39 #include <sys/interrupt.h> 40 #include <sys/kernel.h> 41 #include <sys/ktr.h> 42 #include <sys/lock.h> 43 #include <sys/memdesc.h> 44 #include <sys/mutex.h> 45 #include <sys/proc.h> 46 #include <sys/rwlock.h> 47 #include <sys/rman.h> 48 #include <sys/sf_buf.h> 49 #include <sys/sysctl.h> 50 #include <sys/taskqueue.h> 51 #include <sys/tree.h> 52 #include <sys/uio.h> 53 #include <sys/vmem.h> 54 #include <vm/vm.h> 55 #include <vm/vm_extern.h> 56 #include <vm/vm_kern.h> 57 #include <vm/vm_object.h> 58 #include <vm/vm_page.h> 59 #include <vm/vm_pager.h> 60 #include <vm/vm_map.h> 61 #include <machine/atomic.h> 62 #include <machine/bus.h> 63 #include <machine/cpu.h> 64 #include <machine/md_var.h> 65 #include <machine/specialreg.h> 66 #include <x86/include/busdma_impl.h> 67 #include <x86/iommu/intel_reg.h> 68 #include <x86/iommu/busdma_dmar.h> 69 #include <dev/pci/pcireg.h> 70 #include <x86/iommu/intel_dmar.h> 71 72 static int domain_unmap_buf_locked(struct dmar_domain *domain, 73 dmar_gaddr_t base, dmar_gaddr_t size, int flags); 74 75 /* 76 * The cache of the identity mapping page tables for the DMARs. Using 77 * the cache saves significant amount of memory for page tables by 78 * reusing the page tables, since usually DMARs are identical and have 79 * the same capabilities. Still, cache records the information needed 80 * to match DMAR capabilities and page table format, to correctly 81 * handle different DMARs. 82 */ 83 84 struct idpgtbl { 85 dmar_gaddr_t maxaddr; /* Page table covers the guest address 86 range [0..maxaddr) */ 87 int pglvl; /* Total page table levels ignoring 88 superpages */ 89 int leaf; /* The last materialized page table 90 level, it is non-zero if superpages 91 are supported */ 92 vm_object_t pgtbl_obj; /* The page table pages */ 93 LIST_ENTRY(idpgtbl) link; 94 }; 95 96 static struct sx idpgtbl_lock; 97 SX_SYSINIT(idpgtbl, &idpgtbl_lock, "idpgtbl"); 98 static LIST_HEAD(, idpgtbl) idpgtbls = LIST_HEAD_INITIALIZER(idpgtbls); 99 static MALLOC_DEFINE(M_DMAR_IDPGTBL, "dmar_idpgtbl", 100 "Intel DMAR Identity mappings cache elements"); 101 102 /* 103 * Build the next level of the page tables for the identity mapping. 104 * - lvl is the level to build; 105 * - idx is the index of the page table page in the pgtbl_obj, which is 106 * being allocated filled now; 107 * - addr is the starting address in the bus address space which is 108 * mapped by the page table page. 109 */ 110 static void 111 domain_idmap_nextlvl(struct idpgtbl *tbl, int lvl, vm_pindex_t idx, 112 dmar_gaddr_t addr) 113 { 114 vm_page_t m1; 115 dmar_pte_t *pte; 116 struct sf_buf *sf; 117 dmar_gaddr_t f, pg_sz; 118 vm_pindex_t base; 119 int i; 120 121 VM_OBJECT_ASSERT_LOCKED(tbl->pgtbl_obj); 122 if (addr >= tbl->maxaddr) 123 return; 124 (void)dmar_pgalloc(tbl->pgtbl_obj, idx, DMAR_PGF_OBJL | DMAR_PGF_WAITOK | 125 DMAR_PGF_ZERO); 126 base = idx * DMAR_NPTEPG + 1; /* Index of the first child page of idx */ 127 pg_sz = pglvl_page_size(tbl->pglvl, lvl); 128 if (lvl != tbl->leaf) { 129 for (i = 0, f = addr; i < DMAR_NPTEPG; i++, f += pg_sz) 130 domain_idmap_nextlvl(tbl, lvl + 1, base + i, f); 131 } 132 VM_OBJECT_WUNLOCK(tbl->pgtbl_obj); 133 pte = dmar_map_pgtbl(tbl->pgtbl_obj, idx, DMAR_PGF_WAITOK, &sf); 134 if (lvl == tbl->leaf) { 135 for (i = 0, f = addr; i < DMAR_NPTEPG; i++, f += pg_sz) { 136 if (f >= tbl->maxaddr) 137 break; 138 pte[i].pte = (DMAR_PTE_ADDR_MASK & f) | 139 DMAR_PTE_R | DMAR_PTE_W; 140 } 141 } else { 142 for (i = 0, f = addr; i < DMAR_NPTEPG; i++, f += pg_sz) { 143 if (f >= tbl->maxaddr) 144 break; 145 m1 = dmar_pgalloc(tbl->pgtbl_obj, base + i, 146 DMAR_PGF_NOALLOC); 147 KASSERT(m1 != NULL, ("lost page table page")); 148 pte[i].pte = (DMAR_PTE_ADDR_MASK & 149 VM_PAGE_TO_PHYS(m1)) | DMAR_PTE_R | DMAR_PTE_W; 150 } 151 } 152 /* domain_get_idmap_pgtbl flushes CPU cache if needed. */ 153 dmar_unmap_pgtbl(sf); 154 VM_OBJECT_WLOCK(tbl->pgtbl_obj); 155 } 156 157 /* 158 * Find a ready and compatible identity-mapping page table in the 159 * cache. If not found, populate the identity-mapping page table for 160 * the context, up to the maxaddr. The maxaddr byte is allowed to be 161 * not mapped, which is aligned with the definition of Maxmem as the 162 * highest usable physical address + 1. If superpages are used, the 163 * maxaddr is typically mapped. 164 */ 165 vm_object_t 166 domain_get_idmap_pgtbl(struct dmar_domain *domain, dmar_gaddr_t maxaddr) 167 { 168 struct dmar_unit *unit; 169 struct idpgtbl *tbl; 170 vm_object_t res; 171 vm_page_t m; 172 int leaf, i; 173 174 leaf = 0; /* silence gcc */ 175 176 /* 177 * First, determine where to stop the paging structures. 178 */ 179 for (i = 0; i < domain->pglvl; i++) { 180 if (i == domain->pglvl - 1 || domain_is_sp_lvl(domain, i)) { 181 leaf = i; 182 break; 183 } 184 } 185 186 /* 187 * Search the cache for a compatible page table. Qualified 188 * page table must map up to maxaddr, its level must be 189 * supported by the DMAR and leaf should be equal to the 190 * calculated value. The later restriction could be lifted 191 * but I believe it is currently impossible to have any 192 * deviations for existing hardware. 193 */ 194 sx_slock(&idpgtbl_lock); 195 LIST_FOREACH(tbl, &idpgtbls, link) { 196 if (tbl->maxaddr >= maxaddr && 197 dmar_pglvl_supported(domain->dmar, tbl->pglvl) && 198 tbl->leaf == leaf) { 199 res = tbl->pgtbl_obj; 200 vm_object_reference(res); 201 sx_sunlock(&idpgtbl_lock); 202 domain->pglvl = tbl->pglvl; /* XXXKIB ? */ 203 goto end; 204 } 205 } 206 207 /* 208 * Not found in cache, relock the cache into exclusive mode to 209 * be able to add element, and recheck cache again after the 210 * relock. 211 */ 212 sx_sunlock(&idpgtbl_lock); 213 sx_xlock(&idpgtbl_lock); 214 LIST_FOREACH(tbl, &idpgtbls, link) { 215 if (tbl->maxaddr >= maxaddr && 216 dmar_pglvl_supported(domain->dmar, tbl->pglvl) && 217 tbl->leaf == leaf) { 218 res = tbl->pgtbl_obj; 219 vm_object_reference(res); 220 sx_xunlock(&idpgtbl_lock); 221 domain->pglvl = tbl->pglvl; /* XXXKIB ? */ 222 return (res); 223 } 224 } 225 226 /* 227 * Still not found, create new page table. 228 */ 229 tbl = malloc(sizeof(*tbl), M_DMAR_IDPGTBL, M_WAITOK); 230 tbl->pglvl = domain->pglvl; 231 tbl->leaf = leaf; 232 tbl->maxaddr = maxaddr; 233 tbl->pgtbl_obj = vm_pager_allocate(OBJT_PHYS, NULL, 234 IDX_TO_OFF(pglvl_max_pages(tbl->pglvl)), 0, 0, NULL); 235 VM_OBJECT_WLOCK(tbl->pgtbl_obj); 236 domain_idmap_nextlvl(tbl, 0, 0, 0); 237 VM_OBJECT_WUNLOCK(tbl->pgtbl_obj); 238 LIST_INSERT_HEAD(&idpgtbls, tbl, link); 239 res = tbl->pgtbl_obj; 240 vm_object_reference(res); 241 sx_xunlock(&idpgtbl_lock); 242 243 end: 244 /* 245 * Table was found or created. 246 * 247 * If DMAR does not snoop paging structures accesses, flush 248 * CPU cache to memory. Note that dmar_unmap_pgtbl() coherent 249 * argument was possibly invalid at the time of the identity 250 * page table creation, since DMAR which was passed at the 251 * time of creation could be coherent, while current DMAR is 252 * not. 253 * 254 * If DMAR cannot look into the chipset write buffer, flush it 255 * as well. 256 */ 257 unit = domain->dmar; 258 if (!DMAR_IS_COHERENT(unit)) { 259 VM_OBJECT_WLOCK(res); 260 for (m = vm_page_lookup(res, 0); m != NULL; 261 m = vm_page_next(m)) 262 pmap_invalidate_cache_pages(&m, 1); 263 VM_OBJECT_WUNLOCK(res); 264 } 265 if ((unit->hw_cap & DMAR_CAP_RWBF) != 0) { 266 DMAR_LOCK(unit); 267 dmar_flush_write_bufs(unit); 268 DMAR_UNLOCK(unit); 269 } 270 271 return (res); 272 } 273 274 /* 275 * Return a reference to the identity mapping page table to the cache. 276 */ 277 void 278 put_idmap_pgtbl(vm_object_t obj) 279 { 280 struct idpgtbl *tbl, *tbl1; 281 vm_object_t rmobj; 282 283 sx_slock(&idpgtbl_lock); 284 KASSERT(obj->ref_count >= 2, ("lost cache reference")); 285 vm_object_deallocate(obj); 286 287 /* 288 * Cache always owns one last reference on the page table object. 289 * If there is an additional reference, object must stay. 290 */ 291 if (obj->ref_count > 1) { 292 sx_sunlock(&idpgtbl_lock); 293 return; 294 } 295 296 /* 297 * Cache reference is the last, remove cache element and free 298 * page table object, returning the page table pages to the 299 * system. 300 */ 301 sx_sunlock(&idpgtbl_lock); 302 sx_xlock(&idpgtbl_lock); 303 LIST_FOREACH_SAFE(tbl, &idpgtbls, link, tbl1) { 304 rmobj = tbl->pgtbl_obj; 305 if (rmobj->ref_count == 1) { 306 LIST_REMOVE(tbl, link); 307 atomic_subtract_int(&dmar_tbl_pagecnt, 308 rmobj->resident_page_count); 309 vm_object_deallocate(rmobj); 310 free(tbl, M_DMAR_IDPGTBL); 311 } 312 } 313 sx_xunlock(&idpgtbl_lock); 314 } 315 316 /* 317 * The core routines to map and unmap host pages at the given guest 318 * address. Support superpages. 319 */ 320 321 /* 322 * Index of the pte for the guest address base in the page table at 323 * the level lvl. 324 */ 325 static int 326 domain_pgtbl_pte_off(struct dmar_domain *domain, dmar_gaddr_t base, int lvl) 327 { 328 329 base >>= DMAR_PAGE_SHIFT + (domain->pglvl - lvl - 1) * 330 DMAR_NPTEPGSHIFT; 331 return (base & DMAR_PTEMASK); 332 } 333 334 /* 335 * Returns the page index of the page table page in the page table 336 * object, which maps the given address base at the page table level 337 * lvl. 338 */ 339 static vm_pindex_t 340 domain_pgtbl_get_pindex(struct dmar_domain *domain, dmar_gaddr_t base, int lvl) 341 { 342 vm_pindex_t idx, pidx; 343 int i; 344 345 KASSERT(lvl >= 0 && lvl < domain->pglvl, 346 ("wrong lvl %p %d", domain, lvl)); 347 348 for (pidx = idx = 0, i = 0; i < lvl; i++, pidx = idx) { 349 idx = domain_pgtbl_pte_off(domain, base, i) + 350 pidx * DMAR_NPTEPG + 1; 351 } 352 return (idx); 353 } 354 355 static dmar_pte_t * 356 domain_pgtbl_map_pte(struct dmar_domain *domain, dmar_gaddr_t base, int lvl, 357 int flags, vm_pindex_t *idxp, struct sf_buf **sf) 358 { 359 vm_page_t m; 360 struct sf_buf *sfp; 361 dmar_pte_t *pte, *ptep; 362 vm_pindex_t idx, idx1; 363 364 DMAR_DOMAIN_ASSERT_PGLOCKED(domain); 365 KASSERT((flags & DMAR_PGF_OBJL) != 0, ("lost PGF_OBJL")); 366 367 idx = domain_pgtbl_get_pindex(domain, base, lvl); 368 if (*sf != NULL && idx == *idxp) { 369 pte = (dmar_pte_t *)sf_buf_kva(*sf); 370 } else { 371 if (*sf != NULL) 372 dmar_unmap_pgtbl(*sf); 373 *idxp = idx; 374 retry: 375 pte = dmar_map_pgtbl(domain->pgtbl_obj, idx, flags, sf); 376 if (pte == NULL) { 377 KASSERT(lvl > 0, 378 ("lost root page table page %p", domain)); 379 /* 380 * Page table page does not exist, allocate 381 * it and create a pte in the preceeding page level 382 * to reference the allocated page table page. 383 */ 384 m = dmar_pgalloc(domain->pgtbl_obj, idx, flags | 385 DMAR_PGF_ZERO); 386 if (m == NULL) 387 return (NULL); 388 389 /* 390 * Prevent potential free while pgtbl_obj is 391 * unlocked in the recursive call to 392 * domain_pgtbl_map_pte(), if other thread did 393 * pte write and clean while the lock is 394 * dropped. 395 */ 396 m->ref_count++; 397 398 sfp = NULL; 399 ptep = domain_pgtbl_map_pte(domain, base, lvl - 1, 400 flags, &idx1, &sfp); 401 if (ptep == NULL) { 402 KASSERT(m->pindex != 0, 403 ("loosing root page %p", domain)); 404 m->ref_count--; 405 dmar_pgfree(domain->pgtbl_obj, m->pindex, 406 flags); 407 return (NULL); 408 } 409 dmar_pte_store(&ptep->pte, DMAR_PTE_R | DMAR_PTE_W | 410 VM_PAGE_TO_PHYS(m)); 411 dmar_flush_pte_to_ram(domain->dmar, ptep); 412 sf_buf_page(sfp)->ref_count += 1; 413 m->ref_count--; 414 dmar_unmap_pgtbl(sfp); 415 /* Only executed once. */ 416 goto retry; 417 } 418 } 419 pte += domain_pgtbl_pte_off(domain, base, lvl); 420 return (pte); 421 } 422 423 static int 424 domain_map_buf_locked(struct dmar_domain *domain, dmar_gaddr_t base, 425 dmar_gaddr_t size, vm_page_t *ma, uint64_t pflags, int flags) 426 { 427 dmar_pte_t *pte; 428 struct sf_buf *sf; 429 dmar_gaddr_t pg_sz, base1, size1; 430 vm_pindex_t pi, c, idx, run_sz; 431 int lvl; 432 bool superpage; 433 434 DMAR_DOMAIN_ASSERT_PGLOCKED(domain); 435 436 base1 = base; 437 size1 = size; 438 flags |= DMAR_PGF_OBJL; 439 TD_PREP_PINNED_ASSERT; 440 441 for (sf = NULL, pi = 0; size > 0; base += pg_sz, size -= pg_sz, 442 pi += run_sz) { 443 for (lvl = 0, c = 0, superpage = false;; lvl++) { 444 pg_sz = domain_page_size(domain, lvl); 445 run_sz = pg_sz >> DMAR_PAGE_SHIFT; 446 if (lvl == domain->pglvl - 1) 447 break; 448 /* 449 * Check if the current base suitable for the 450 * superpage mapping. First, verify the level. 451 */ 452 if (!domain_is_sp_lvl(domain, lvl)) 453 continue; 454 /* 455 * Next, look at the size of the mapping and 456 * alignment of both guest and host addresses. 457 */ 458 if (size < pg_sz || (base & (pg_sz - 1)) != 0 || 459 (VM_PAGE_TO_PHYS(ma[pi]) & (pg_sz - 1)) != 0) 460 continue; 461 /* All passed, check host pages contiguouty. */ 462 if (c == 0) { 463 for (c = 1; c < run_sz; c++) { 464 if (VM_PAGE_TO_PHYS(ma[pi + c]) != 465 VM_PAGE_TO_PHYS(ma[pi + c - 1]) + 466 PAGE_SIZE) 467 break; 468 } 469 } 470 if (c >= run_sz) { 471 superpage = true; 472 break; 473 } 474 } 475 KASSERT(size >= pg_sz, 476 ("mapping loop overflow %p %jx %jx %jx", domain, 477 (uintmax_t)base, (uintmax_t)size, (uintmax_t)pg_sz)); 478 KASSERT(pg_sz > 0, ("pg_sz 0 lvl %d", lvl)); 479 pte = domain_pgtbl_map_pte(domain, base, lvl, flags, &idx, &sf); 480 if (pte == NULL) { 481 KASSERT((flags & DMAR_PGF_WAITOK) == 0, 482 ("failed waitable pte alloc %p", domain)); 483 if (sf != NULL) 484 dmar_unmap_pgtbl(sf); 485 domain_unmap_buf_locked(domain, base1, base - base1, 486 flags); 487 TD_PINNED_ASSERT; 488 return (ENOMEM); 489 } 490 dmar_pte_store(&pte->pte, VM_PAGE_TO_PHYS(ma[pi]) | pflags | 491 (superpage ? DMAR_PTE_SP : 0)); 492 dmar_flush_pte_to_ram(domain->dmar, pte); 493 sf_buf_page(sf)->ref_count += 1; 494 } 495 if (sf != NULL) 496 dmar_unmap_pgtbl(sf); 497 TD_PINNED_ASSERT; 498 return (0); 499 } 500 501 int 502 domain_map_buf(struct dmar_domain *domain, dmar_gaddr_t base, dmar_gaddr_t size, 503 vm_page_t *ma, uint64_t pflags, int flags) 504 { 505 struct dmar_unit *unit; 506 int error; 507 508 unit = domain->dmar; 509 510 KASSERT((domain->flags & DMAR_DOMAIN_IDMAP) == 0, 511 ("modifying idmap pagetable domain %p", domain)); 512 KASSERT((base & DMAR_PAGE_MASK) == 0, 513 ("non-aligned base %p %jx %jx", domain, (uintmax_t)base, 514 (uintmax_t)size)); 515 KASSERT((size & DMAR_PAGE_MASK) == 0, 516 ("non-aligned size %p %jx %jx", domain, (uintmax_t)base, 517 (uintmax_t)size)); 518 KASSERT(size > 0, ("zero size %p %jx %jx", domain, (uintmax_t)base, 519 (uintmax_t)size)); 520 KASSERT(base < (1ULL << domain->agaw), 521 ("base too high %p %jx %jx agaw %d", domain, (uintmax_t)base, 522 (uintmax_t)size, domain->agaw)); 523 KASSERT(base + size < (1ULL << domain->agaw), 524 ("end too high %p %jx %jx agaw %d", domain, (uintmax_t)base, 525 (uintmax_t)size, domain->agaw)); 526 KASSERT(base + size > base, 527 ("size overflow %p %jx %jx", domain, (uintmax_t)base, 528 (uintmax_t)size)); 529 KASSERT((pflags & (DMAR_PTE_R | DMAR_PTE_W)) != 0, 530 ("neither read nor write %jx", (uintmax_t)pflags)); 531 KASSERT((pflags & ~(DMAR_PTE_R | DMAR_PTE_W | DMAR_PTE_SNP | 532 DMAR_PTE_TM)) == 0, 533 ("invalid pte flags %jx", (uintmax_t)pflags)); 534 KASSERT((pflags & DMAR_PTE_SNP) == 0 || 535 (unit->hw_ecap & DMAR_ECAP_SC) != 0, 536 ("PTE_SNP for dmar without snoop control %p %jx", 537 domain, (uintmax_t)pflags)); 538 KASSERT((pflags & DMAR_PTE_TM) == 0 || 539 (unit->hw_ecap & DMAR_ECAP_DI) != 0, 540 ("PTE_TM for dmar without DIOTLB %p %jx", 541 domain, (uintmax_t)pflags)); 542 KASSERT((flags & ~DMAR_PGF_WAITOK) == 0, ("invalid flags %x", flags)); 543 544 DMAR_DOMAIN_PGLOCK(domain); 545 error = domain_map_buf_locked(domain, base, size, ma, pflags, flags); 546 DMAR_DOMAIN_PGUNLOCK(domain); 547 if (error != 0) 548 return (error); 549 550 if ((unit->hw_cap & DMAR_CAP_CM) != 0) 551 domain_flush_iotlb_sync(domain, base, size); 552 else if ((unit->hw_cap & DMAR_CAP_RWBF) != 0) { 553 /* See 11.1 Write Buffer Flushing. */ 554 DMAR_LOCK(unit); 555 dmar_flush_write_bufs(unit); 556 DMAR_UNLOCK(unit); 557 } 558 return (0); 559 } 560 561 static void domain_unmap_clear_pte(struct dmar_domain *domain, 562 dmar_gaddr_t base, int lvl, int flags, dmar_pte_t *pte, 563 struct sf_buf **sf, bool free_fs); 564 565 static void 566 domain_free_pgtbl_pde(struct dmar_domain *domain, dmar_gaddr_t base, 567 int lvl, int flags) 568 { 569 struct sf_buf *sf; 570 dmar_pte_t *pde; 571 vm_pindex_t idx; 572 573 sf = NULL; 574 pde = domain_pgtbl_map_pte(domain, base, lvl, flags, &idx, &sf); 575 domain_unmap_clear_pte(domain, base, lvl, flags, pde, &sf, true); 576 } 577 578 static void 579 domain_unmap_clear_pte(struct dmar_domain *domain, dmar_gaddr_t base, int lvl, 580 int flags, dmar_pte_t *pte, struct sf_buf **sf, bool free_sf) 581 { 582 vm_page_t m; 583 584 dmar_pte_clear(&pte->pte); 585 dmar_flush_pte_to_ram(domain->dmar, pte); 586 m = sf_buf_page(*sf); 587 if (free_sf) { 588 dmar_unmap_pgtbl(*sf); 589 *sf = NULL; 590 } 591 m->ref_count--; 592 if (m->ref_count != 0) 593 return; 594 KASSERT(lvl != 0, 595 ("lost reference (lvl) on root pg domain %p base %jx lvl %d", 596 domain, (uintmax_t)base, lvl)); 597 KASSERT(m->pindex != 0, 598 ("lost reference (idx) on root pg domain %p base %jx lvl %d", 599 domain, (uintmax_t)base, lvl)); 600 dmar_pgfree(domain->pgtbl_obj, m->pindex, flags); 601 domain_free_pgtbl_pde(domain, base, lvl - 1, flags); 602 } 603 604 /* 605 * Assumes that the unmap is never partial. 606 */ 607 static int 608 domain_unmap_buf_locked(struct dmar_domain *domain, dmar_gaddr_t base, 609 dmar_gaddr_t size, int flags) 610 { 611 dmar_pte_t *pte; 612 struct sf_buf *sf; 613 vm_pindex_t idx; 614 dmar_gaddr_t pg_sz; 615 int lvl; 616 617 DMAR_DOMAIN_ASSERT_PGLOCKED(domain); 618 if (size == 0) 619 return (0); 620 621 KASSERT((domain->flags & DMAR_DOMAIN_IDMAP) == 0, 622 ("modifying idmap pagetable domain %p", domain)); 623 KASSERT((base & DMAR_PAGE_MASK) == 0, 624 ("non-aligned base %p %jx %jx", domain, (uintmax_t)base, 625 (uintmax_t)size)); 626 KASSERT((size & DMAR_PAGE_MASK) == 0, 627 ("non-aligned size %p %jx %jx", domain, (uintmax_t)base, 628 (uintmax_t)size)); 629 KASSERT(base < (1ULL << domain->agaw), 630 ("base too high %p %jx %jx agaw %d", domain, (uintmax_t)base, 631 (uintmax_t)size, domain->agaw)); 632 KASSERT(base + size < (1ULL << domain->agaw), 633 ("end too high %p %jx %jx agaw %d", domain, (uintmax_t)base, 634 (uintmax_t)size, domain->agaw)); 635 KASSERT(base + size > base, 636 ("size overflow %p %jx %jx", domain, (uintmax_t)base, 637 (uintmax_t)size)); 638 KASSERT((flags & ~DMAR_PGF_WAITOK) == 0, ("invalid flags %x", flags)); 639 640 pg_sz = 0; /* silence gcc */ 641 flags |= DMAR_PGF_OBJL; 642 TD_PREP_PINNED_ASSERT; 643 644 for (sf = NULL; size > 0; base += pg_sz, size -= pg_sz) { 645 for (lvl = 0; lvl < domain->pglvl; lvl++) { 646 if (lvl != domain->pglvl - 1 && 647 !domain_is_sp_lvl(domain, lvl)) 648 continue; 649 pg_sz = domain_page_size(domain, lvl); 650 if (pg_sz > size) 651 continue; 652 pte = domain_pgtbl_map_pte(domain, base, lvl, flags, 653 &idx, &sf); 654 KASSERT(pte != NULL, 655 ("sleeping or page missed %p %jx %d 0x%x", 656 domain, (uintmax_t)base, lvl, flags)); 657 if ((pte->pte & DMAR_PTE_SP) != 0 || 658 lvl == domain->pglvl - 1) { 659 domain_unmap_clear_pte(domain, base, lvl, 660 flags, pte, &sf, false); 661 break; 662 } 663 } 664 KASSERT(size >= pg_sz, 665 ("unmapping loop overflow %p %jx %jx %jx", domain, 666 (uintmax_t)base, (uintmax_t)size, (uintmax_t)pg_sz)); 667 } 668 if (sf != NULL) 669 dmar_unmap_pgtbl(sf); 670 /* 671 * See 11.1 Write Buffer Flushing for an explanation why RWBF 672 * can be ignored there. 673 */ 674 675 TD_PINNED_ASSERT; 676 return (0); 677 } 678 679 int 680 domain_unmap_buf(struct dmar_domain *domain, dmar_gaddr_t base, 681 dmar_gaddr_t size, int flags) 682 { 683 int error; 684 685 DMAR_DOMAIN_PGLOCK(domain); 686 error = domain_unmap_buf_locked(domain, base, size, flags); 687 DMAR_DOMAIN_PGUNLOCK(domain); 688 return (error); 689 } 690 691 int 692 domain_alloc_pgtbl(struct dmar_domain *domain) 693 { 694 vm_page_t m; 695 696 KASSERT(domain->pgtbl_obj == NULL, 697 ("already initialized %p", domain)); 698 699 domain->pgtbl_obj = vm_pager_allocate(OBJT_PHYS, NULL, 700 IDX_TO_OFF(pglvl_max_pages(domain->pglvl)), 0, 0, NULL); 701 DMAR_DOMAIN_PGLOCK(domain); 702 m = dmar_pgalloc(domain->pgtbl_obj, 0, DMAR_PGF_WAITOK | 703 DMAR_PGF_ZERO | DMAR_PGF_OBJL); 704 /* No implicit free of the top level page table page. */ 705 m->ref_count = 1; 706 DMAR_DOMAIN_PGUNLOCK(domain); 707 DMAR_LOCK(domain->dmar); 708 domain->flags |= DMAR_DOMAIN_PGTBL_INITED; 709 DMAR_UNLOCK(domain->dmar); 710 return (0); 711 } 712 713 void 714 domain_free_pgtbl(struct dmar_domain *domain) 715 { 716 vm_object_t obj; 717 vm_page_t m; 718 719 obj = domain->pgtbl_obj; 720 if (obj == NULL) { 721 KASSERT((domain->dmar->hw_ecap & DMAR_ECAP_PT) != 0 && 722 (domain->flags & DMAR_DOMAIN_IDMAP) != 0, 723 ("lost pagetable object domain %p", domain)); 724 return; 725 } 726 DMAR_DOMAIN_ASSERT_PGLOCKED(domain); 727 domain->pgtbl_obj = NULL; 728 729 if ((domain->flags & DMAR_DOMAIN_IDMAP) != 0) { 730 put_idmap_pgtbl(obj); 731 domain->flags &= ~DMAR_DOMAIN_IDMAP; 732 return; 733 } 734 735 /* Obliterate ref_counts */ 736 VM_OBJECT_ASSERT_WLOCKED(obj); 737 for (m = vm_page_lookup(obj, 0); m != NULL; m = vm_page_next(m)) 738 m->ref_count = 0; 739 VM_OBJECT_WUNLOCK(obj); 740 vm_object_deallocate(obj); 741 } 742 743 static inline uint64_t 744 domain_wait_iotlb_flush(struct dmar_unit *unit, uint64_t wt, int iro) 745 { 746 uint64_t iotlbr; 747 748 dmar_write8(unit, iro + DMAR_IOTLB_REG_OFF, DMAR_IOTLB_IVT | 749 DMAR_IOTLB_DR | DMAR_IOTLB_DW | wt); 750 for (;;) { 751 iotlbr = dmar_read8(unit, iro + DMAR_IOTLB_REG_OFF); 752 if ((iotlbr & DMAR_IOTLB_IVT) == 0) 753 break; 754 cpu_spinwait(); 755 } 756 return (iotlbr); 757 } 758 759 void 760 domain_flush_iotlb_sync(struct dmar_domain *domain, dmar_gaddr_t base, 761 dmar_gaddr_t size) 762 { 763 struct dmar_unit *unit; 764 dmar_gaddr_t isize; 765 uint64_t iotlbr; 766 int am, iro; 767 768 unit = domain->dmar; 769 KASSERT(!unit->qi_enabled, ("dmar%d: sync iotlb flush call", 770 unit->unit)); 771 iro = DMAR_ECAP_IRO(unit->hw_ecap) * 16; 772 DMAR_LOCK(unit); 773 if ((unit->hw_cap & DMAR_CAP_PSI) == 0 || size > 2 * 1024 * 1024) { 774 iotlbr = domain_wait_iotlb_flush(unit, DMAR_IOTLB_IIRG_DOM | 775 DMAR_IOTLB_DID(domain->domain), iro); 776 KASSERT((iotlbr & DMAR_IOTLB_IAIG_MASK) != 777 DMAR_IOTLB_IAIG_INVLD, 778 ("dmar%d: invalidation failed %jx", unit->unit, 779 (uintmax_t)iotlbr)); 780 } else { 781 for (; size > 0; base += isize, size -= isize) { 782 am = calc_am(unit, base, size, &isize); 783 dmar_write8(unit, iro, base | am); 784 iotlbr = domain_wait_iotlb_flush(unit, 785 DMAR_IOTLB_IIRG_PAGE | 786 DMAR_IOTLB_DID(domain->domain), iro); 787 KASSERT((iotlbr & DMAR_IOTLB_IAIG_MASK) != 788 DMAR_IOTLB_IAIG_INVLD, 789 ("dmar%d: PSI invalidation failed " 790 "iotlbr 0x%jx base 0x%jx size 0x%jx am %d", 791 unit->unit, (uintmax_t)iotlbr, 792 (uintmax_t)base, (uintmax_t)size, am)); 793 /* 794 * Any non-page granularity covers whole guest 795 * address space for the domain. 796 */ 797 if ((iotlbr & DMAR_IOTLB_IAIG_MASK) != 798 DMAR_IOTLB_IAIG_PAGE) 799 break; 800 } 801 } 802 DMAR_UNLOCK(unit); 803 } 804