1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2020-2021 Ruslan Bukin <br@bsdpad.com> 5 * Copyright (c) 2014-2021 Andrew Turner 6 * Copyright (c) 2014-2016 The FreeBSD Foundation 7 * All rights reserved. 8 * 9 * This work was supported by Innovate UK project 105694, "Digital Security 10 * by Design (DSbD) Technology Platform Prototype". 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 */ 33 34 #include <sys/cdefs.h> 35 __FBSDID("$FreeBSD$"); 36 37 /* 38 * Manages physical address maps for ARM SMMUv3 and ARM Mali GPU. 39 */ 40 41 #include "opt_vm.h" 42 43 #include <sys/param.h> 44 #include <sys/ktr.h> 45 #include <sys/mutex.h> 46 #include <sys/rwlock.h> 47 48 #include <vm/vm.h> 49 #include <vm/vm_param.h> 50 #include <vm/vm_page.h> 51 #include <vm/vm_map.h> 52 #include <vm/vm_object.h> 53 #include <vm/vm_pageout.h> 54 #include <vm/vm_radix.h> 55 56 #include <machine/machdep.h> 57 58 #include <arm64/iommu/iommu_pmap.h> 59 #include <arm64/iommu/iommu_pte.h> 60 61 #define IOMMU_PAGE_SIZE 4096 62 63 #define NL0PG (IOMMU_PAGE_SIZE/(sizeof (pd_entry_t))) 64 #define NL1PG (IOMMU_PAGE_SIZE/(sizeof (pd_entry_t))) 65 #define NL2PG (IOMMU_PAGE_SIZE/(sizeof (pd_entry_t))) 66 #define NL3PG (IOMMU_PAGE_SIZE/(sizeof (pt_entry_t))) 67 68 #define NUL0E IOMMU_L0_ENTRIES 69 #define NUL1E (NUL0E * NL1PG) 70 #define NUL2E (NUL1E * NL2PG) 71 72 #define iommu_l0_pindex(v) (NUL2E + NUL1E + ((v) >> IOMMU_L0_SHIFT)) 73 #define iommu_l1_pindex(v) (NUL2E + ((v) >> IOMMU_L1_SHIFT)) 74 #define iommu_l2_pindex(v) ((v) >> IOMMU_L2_SHIFT) 75 76 /* This code assumes all L1 DMAP entries will be used */ 77 CTASSERT((DMAP_MIN_ADDRESS & ~IOMMU_L0_OFFSET) == DMAP_MIN_ADDRESS); 78 CTASSERT((DMAP_MAX_ADDRESS & ~IOMMU_L0_OFFSET) == DMAP_MAX_ADDRESS); 79 80 static vm_page_t _pmap_alloc_l3(pmap_t pmap, vm_pindex_t ptepindex); 81 static void _pmap_unwire_l3(pmap_t pmap, vm_offset_t va, vm_page_t m, 82 struct spglist *free); 83 84 /* 85 * These load the old table data and store the new value. 86 * They need to be atomic as the System MMU may write to the table at 87 * the same time as the CPU. 88 */ 89 #define pmap_load(table) (*table) 90 #define pmap_clear(table) atomic_store_64(table, 0) 91 #define pmap_store(table, entry) atomic_store_64(table, entry) 92 93 /********************/ 94 /* Inline functions */ 95 /********************/ 96 97 static __inline pd_entry_t * 98 pmap_l0(pmap_t pmap, vm_offset_t va) 99 { 100 101 return (&pmap->pm_l0[iommu_l0_index(va)]); 102 } 103 104 static __inline pd_entry_t * 105 pmap_l0_to_l1(pd_entry_t *l0, vm_offset_t va) 106 { 107 pd_entry_t *l1; 108 109 l1 = (pd_entry_t *)PHYS_TO_DMAP(pmap_load(l0) & ~ATTR_MASK); 110 return (&l1[iommu_l1_index(va)]); 111 } 112 113 static __inline pd_entry_t * 114 pmap_l1(pmap_t pmap, vm_offset_t va) 115 { 116 pd_entry_t *l0; 117 118 l0 = pmap_l0(pmap, va); 119 if ((pmap_load(l0) & ATTR_DESCR_MASK) != IOMMU_L0_TABLE) 120 return (NULL); 121 122 return (pmap_l0_to_l1(l0, va)); 123 } 124 125 static __inline pd_entry_t * 126 pmap_l1_to_l2(pd_entry_t *l1p, vm_offset_t va) 127 { 128 pd_entry_t l1, *l2p; 129 130 l1 = pmap_load(l1p); 131 132 /* 133 * The valid bit may be clear if pmap_update_entry() is concurrently 134 * modifying the entry, so for KVA only the entry type may be checked. 135 */ 136 KASSERT(va >= VM_MAX_USER_ADDRESS || (l1 & ATTR_DESCR_VALID) != 0, 137 ("%s: L1 entry %#lx for %#lx is invalid", __func__, l1, va)); 138 KASSERT((l1 & ATTR_DESCR_TYPE_MASK) == ATTR_DESCR_TYPE_TABLE, 139 ("%s: L1 entry %#lx for %#lx is a leaf", __func__, l1, va)); 140 l2p = (pd_entry_t *)PHYS_TO_DMAP(l1 & ~ATTR_MASK); 141 return (&l2p[iommu_l2_index(va)]); 142 } 143 144 static __inline pd_entry_t * 145 pmap_l2(pmap_t pmap, vm_offset_t va) 146 { 147 pd_entry_t *l1; 148 149 l1 = pmap_l1(pmap, va); 150 if ((pmap_load(l1) & ATTR_DESCR_MASK) != IOMMU_L1_TABLE) 151 return (NULL); 152 153 return (pmap_l1_to_l2(l1, va)); 154 } 155 156 static __inline pt_entry_t * 157 pmap_l2_to_l3(pd_entry_t *l2p, vm_offset_t va) 158 { 159 pd_entry_t l2; 160 pt_entry_t *l3p; 161 162 l2 = pmap_load(l2p); 163 164 /* 165 * The valid bit may be clear if pmap_update_entry() is concurrently 166 * modifying the entry, so for KVA only the entry type may be checked. 167 */ 168 KASSERT(va >= VM_MAX_USER_ADDRESS || (l2 & ATTR_DESCR_VALID) != 0, 169 ("%s: L2 entry %#lx for %#lx is invalid", __func__, l2, va)); 170 KASSERT((l2 & ATTR_DESCR_TYPE_MASK) == ATTR_DESCR_TYPE_TABLE, 171 ("%s: L2 entry %#lx for %#lx is a leaf", __func__, l2, va)); 172 l3p = (pt_entry_t *)PHYS_TO_DMAP(l2 & ~ATTR_MASK); 173 return (&l3p[iommu_l3_index(va)]); 174 } 175 176 /* 177 * Returns the lowest valid pde for a given virtual address. 178 * The next level may or may not point to a valid page or block. 179 */ 180 static __inline pd_entry_t * 181 pmap_pde(pmap_t pmap, vm_offset_t va, int *level) 182 { 183 pd_entry_t *l0, *l1, *l2, desc; 184 185 l0 = pmap_l0(pmap, va); 186 desc = pmap_load(l0) & ATTR_DESCR_MASK; 187 if (desc != IOMMU_L0_TABLE) { 188 *level = -1; 189 return (NULL); 190 } 191 192 l1 = pmap_l0_to_l1(l0, va); 193 desc = pmap_load(l1) & ATTR_DESCR_MASK; 194 if (desc != IOMMU_L1_TABLE) { 195 *level = 0; 196 return (l0); 197 } 198 199 l2 = pmap_l1_to_l2(l1, va); 200 desc = pmap_load(l2) & ATTR_DESCR_MASK; 201 if (desc != IOMMU_L2_TABLE) { 202 *level = 1; 203 return (l1); 204 } 205 206 *level = 2; 207 return (l2); 208 } 209 210 /* 211 * Returns the lowest valid pte block or table entry for a given virtual 212 * address. If there are no valid entries return NULL and set the level to 213 * the first invalid level. 214 */ 215 static __inline pt_entry_t * 216 pmap_pte(pmap_t pmap, vm_offset_t va, int *level) 217 { 218 pd_entry_t *l1, *l2, desc; 219 pt_entry_t *l3; 220 221 l1 = pmap_l1(pmap, va); 222 if (l1 == NULL) { 223 *level = 0; 224 return (NULL); 225 } 226 desc = pmap_load(l1) & ATTR_DESCR_MASK; 227 if (desc == IOMMU_L1_BLOCK) { 228 *level = 1; 229 return (l1); 230 } 231 232 if (desc != IOMMU_L1_TABLE) { 233 *level = 1; 234 return (NULL); 235 } 236 237 l2 = pmap_l1_to_l2(l1, va); 238 desc = pmap_load(l2) & ATTR_DESCR_MASK; 239 if (desc == IOMMU_L2_BLOCK) { 240 *level = 2; 241 return (l2); 242 } 243 244 if (desc != IOMMU_L2_TABLE) { 245 *level = 2; 246 return (NULL); 247 } 248 249 *level = 3; 250 l3 = pmap_l2_to_l3(l2, va); 251 if ((pmap_load(l3) & ATTR_DESCR_MASK) != IOMMU_L3_PAGE) 252 return (NULL); 253 254 return (l3); 255 } 256 257 static __inline int 258 pmap_l3_valid(pt_entry_t l3) 259 { 260 261 return ((l3 & ATTR_DESCR_MASK) == IOMMU_L3_PAGE); 262 } 263 264 CTASSERT(IOMMU_L1_BLOCK == IOMMU_L2_BLOCK); 265 266 static __inline void 267 pmap_resident_count_inc(pmap_t pmap, int count) 268 { 269 270 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 271 pmap->pm_stats.resident_count += count; 272 } 273 274 static __inline void 275 pmap_resident_count_dec(pmap_t pmap, int count) 276 { 277 278 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 279 KASSERT(pmap->pm_stats.resident_count >= count, 280 ("pmap %p resident count underflow %ld %d", pmap, 281 pmap->pm_stats.resident_count, count)); 282 pmap->pm_stats.resident_count -= count; 283 } 284 285 /*************************************************** 286 * Page table page management routines..... 287 ***************************************************/ 288 /* 289 * Schedule the specified unused page table page to be freed. Specifically, 290 * add the page to the specified list of pages that will be released to the 291 * physical memory manager after the TLB has been updated. 292 */ 293 static __inline void 294 pmap_add_delayed_free_list(vm_page_t m, struct spglist *free, 295 boolean_t set_PG_ZERO) 296 { 297 298 if (set_PG_ZERO) 299 m->flags |= PG_ZERO; 300 else 301 m->flags &= ~PG_ZERO; 302 SLIST_INSERT_HEAD(free, m, plinks.s.ss); 303 } 304 305 /*************************************************** 306 * Low level mapping routines..... 307 ***************************************************/ 308 309 /* 310 * Decrements a page table page's reference count, which is used to record the 311 * number of valid page table entries within the page. If the reference count 312 * drops to zero, then the page table page is unmapped. Returns TRUE if the 313 * page table page was unmapped and FALSE otherwise. 314 */ 315 static inline boolean_t 316 pmap_unwire_l3(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free) 317 { 318 319 --m->ref_count; 320 if (m->ref_count == 0) { 321 _pmap_unwire_l3(pmap, va, m, free); 322 return (TRUE); 323 } else 324 return (FALSE); 325 } 326 327 static void 328 _pmap_unwire_l3(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free) 329 { 330 331 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 332 /* 333 * unmap the page table page 334 */ 335 if (m->pindex >= (NUL2E + NUL1E)) { 336 /* l1 page */ 337 pd_entry_t *l0; 338 339 l0 = pmap_l0(pmap, va); 340 pmap_clear(l0); 341 } else if (m->pindex >= NUL2E) { 342 /* l2 page */ 343 pd_entry_t *l1; 344 345 l1 = pmap_l1(pmap, va); 346 pmap_clear(l1); 347 } else { 348 /* l3 page */ 349 pd_entry_t *l2; 350 351 l2 = pmap_l2(pmap, va); 352 pmap_clear(l2); 353 } 354 pmap_resident_count_dec(pmap, 1); 355 if (m->pindex < NUL2E) { 356 /* We just released an l3, unhold the matching l2 */ 357 pd_entry_t *l1, tl1; 358 vm_page_t l2pg; 359 360 l1 = pmap_l1(pmap, va); 361 tl1 = pmap_load(l1); 362 l2pg = PHYS_TO_VM_PAGE(tl1 & ~ATTR_MASK); 363 pmap_unwire_l3(pmap, va, l2pg, free); 364 } else if (m->pindex < (NUL2E + NUL1E)) { 365 /* We just released an l2, unhold the matching l1 */ 366 pd_entry_t *l0, tl0; 367 vm_page_t l1pg; 368 369 l0 = pmap_l0(pmap, va); 370 tl0 = pmap_load(l0); 371 l1pg = PHYS_TO_VM_PAGE(tl0 & ~ATTR_MASK); 372 pmap_unwire_l3(pmap, va, l1pg, free); 373 } 374 375 /* 376 * Put page on a list so that it is released after 377 * *ALL* TLB shootdown is done 378 */ 379 pmap_add_delayed_free_list(m, free, TRUE); 380 } 381 382 static int 383 iommu_pmap_pinit_levels(pmap_t pmap, int levels) 384 { 385 vm_page_t m; 386 387 /* 388 * allocate the l0 page 389 */ 390 m = vm_page_alloc_noobj(VM_ALLOC_WAITOK | VM_ALLOC_WIRED | 391 VM_ALLOC_ZERO); 392 pmap->pm_l0_paddr = VM_PAGE_TO_PHYS(m); 393 pmap->pm_l0 = (pd_entry_t *)PHYS_TO_DMAP(pmap->pm_l0_paddr); 394 395 vm_radix_init(&pmap->pm_root); 396 bzero(&pmap->pm_stats, sizeof(pmap->pm_stats)); 397 398 MPASS(levels == 3 || levels == 4); 399 pmap->pm_levels = levels; 400 401 /* 402 * Allocate the level 1 entry to use as the root. This will increase 403 * the refcount on the level 1 page so it won't be removed until 404 * pmap_release() is called. 405 */ 406 if (pmap->pm_levels == 3) { 407 PMAP_LOCK(pmap); 408 m = _pmap_alloc_l3(pmap, NUL2E + NUL1E); 409 PMAP_UNLOCK(pmap); 410 } 411 pmap->pm_ttbr = VM_PAGE_TO_PHYS(m); 412 413 return (1); 414 } 415 416 int 417 iommu_pmap_pinit(pmap_t pmap) 418 { 419 420 return (iommu_pmap_pinit_levels(pmap, 4)); 421 } 422 423 /* 424 * This routine is called if the desired page table page does not exist. 425 * 426 * If page table page allocation fails, this routine may sleep before 427 * returning NULL. It sleeps only if a lock pointer was given. 428 * 429 * Note: If a page allocation fails at page table level two or three, 430 * one or two pages may be held during the wait, only to be released 431 * afterwards. This conservative approach is easily argued to avoid 432 * race conditions. 433 */ 434 static vm_page_t 435 _pmap_alloc_l3(pmap_t pmap, vm_pindex_t ptepindex) 436 { 437 vm_page_t m, l1pg, l2pg; 438 439 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 440 441 /* 442 * Allocate a page table page. 443 */ 444 if ((m = vm_page_alloc_noobj(VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) { 445 /* 446 * Indicate the need to retry. While waiting, the page table 447 * page may have been allocated. 448 */ 449 return (NULL); 450 } 451 m->pindex = ptepindex; 452 453 /* 454 * Because of AArch64's weak memory consistency model, we must have a 455 * barrier here to ensure that the stores for zeroing "m", whether by 456 * pmap_zero_page() or an earlier function, are visible before adding 457 * "m" to the page table. Otherwise, a page table walk by another 458 * processor's MMU could see the mapping to "m" and a stale, non-zero 459 * PTE within "m". 460 */ 461 dmb(ishst); 462 463 /* 464 * Map the pagetable page into the process address space, if 465 * it isn't already there. 466 */ 467 468 if (ptepindex >= (NUL2E + NUL1E)) { 469 pd_entry_t *l0; 470 vm_pindex_t l0index; 471 472 l0index = ptepindex - (NUL2E + NUL1E); 473 l0 = &pmap->pm_l0[l0index]; 474 pmap_store(l0, VM_PAGE_TO_PHYS(m) | IOMMU_L0_TABLE); 475 } else if (ptepindex >= NUL2E) { 476 vm_pindex_t l0index, l1index; 477 pd_entry_t *l0, *l1; 478 pd_entry_t tl0; 479 480 l1index = ptepindex - NUL2E; 481 l0index = l1index >> IOMMU_L0_ENTRIES_SHIFT; 482 483 l0 = &pmap->pm_l0[l0index]; 484 tl0 = pmap_load(l0); 485 if (tl0 == 0) { 486 /* recurse for allocating page dir */ 487 if (_pmap_alloc_l3(pmap, NUL2E + NUL1E + l0index) 488 == NULL) { 489 vm_page_unwire_noq(m); 490 vm_page_free_zero(m); 491 return (NULL); 492 } 493 } else { 494 l1pg = PHYS_TO_VM_PAGE(tl0 & ~ATTR_MASK); 495 l1pg->ref_count++; 496 } 497 498 l1 = (pd_entry_t *)PHYS_TO_DMAP(pmap_load(l0) & ~ATTR_MASK); 499 l1 = &l1[ptepindex & Ln_ADDR_MASK]; 500 pmap_store(l1, VM_PAGE_TO_PHYS(m) | IOMMU_L1_TABLE); 501 } else { 502 vm_pindex_t l0index, l1index; 503 pd_entry_t *l0, *l1, *l2; 504 pd_entry_t tl0, tl1; 505 506 l1index = ptepindex >> Ln_ENTRIES_SHIFT; 507 l0index = l1index >> IOMMU_L0_ENTRIES_SHIFT; 508 509 l0 = &pmap->pm_l0[l0index]; 510 tl0 = pmap_load(l0); 511 if (tl0 == 0) { 512 /* recurse for allocating page dir */ 513 if (_pmap_alloc_l3(pmap, NUL2E + l1index) == NULL) { 514 vm_page_unwire_noq(m); 515 vm_page_free_zero(m); 516 return (NULL); 517 } 518 tl0 = pmap_load(l0); 519 l1 = (pd_entry_t *)PHYS_TO_DMAP(tl0 & ~ATTR_MASK); 520 l1 = &l1[l1index & Ln_ADDR_MASK]; 521 } else { 522 l1 = (pd_entry_t *)PHYS_TO_DMAP(tl0 & ~ATTR_MASK); 523 l1 = &l1[l1index & Ln_ADDR_MASK]; 524 tl1 = pmap_load(l1); 525 if (tl1 == 0) { 526 /* recurse for allocating page dir */ 527 if (_pmap_alloc_l3(pmap, NUL2E + l1index) 528 == NULL) { 529 vm_page_unwire_noq(m); 530 vm_page_free_zero(m); 531 return (NULL); 532 } 533 } else { 534 l2pg = PHYS_TO_VM_PAGE(tl1 & ~ATTR_MASK); 535 l2pg->ref_count++; 536 } 537 } 538 539 l2 = (pd_entry_t *)PHYS_TO_DMAP(pmap_load(l1) & ~ATTR_MASK); 540 l2 = &l2[ptepindex & Ln_ADDR_MASK]; 541 pmap_store(l2, VM_PAGE_TO_PHYS(m) | IOMMU_L2_TABLE); 542 } 543 544 pmap_resident_count_inc(pmap, 1); 545 546 return (m); 547 } 548 549 /*************************************************** 550 * Pmap allocation/deallocation routines. 551 ***************************************************/ 552 553 /* 554 * Release any resources held by the given physical map. 555 * Called when a pmap initialized by pmap_pinit is being released. 556 * Should only be called if the map contains no valid mappings. 557 */ 558 void 559 iommu_pmap_release(pmap_t pmap) 560 { 561 boolean_t rv; 562 struct spglist free; 563 vm_page_t m; 564 565 if (pmap->pm_levels != 4) { 566 KASSERT(pmap->pm_stats.resident_count == 1, 567 ("pmap_release: pmap resident count %ld != 0", 568 pmap->pm_stats.resident_count)); 569 KASSERT((pmap->pm_l0[0] & ATTR_DESCR_VALID) == ATTR_DESCR_VALID, 570 ("pmap_release: Invalid l0 entry: %lx", pmap->pm_l0[0])); 571 572 SLIST_INIT(&free); 573 m = PHYS_TO_VM_PAGE(pmap->pm_ttbr); 574 PMAP_LOCK(pmap); 575 rv = pmap_unwire_l3(pmap, 0, m, &free); 576 PMAP_UNLOCK(pmap); 577 MPASS(rv == TRUE); 578 vm_page_free_pages_toq(&free, true); 579 } 580 581 KASSERT(pmap->pm_stats.resident_count == 0, 582 ("pmap_release: pmap resident count %ld != 0", 583 pmap->pm_stats.resident_count)); 584 KASSERT(vm_radix_is_empty(&pmap->pm_root), 585 ("pmap_release: pmap has reserved page table page(s)")); 586 587 m = PHYS_TO_VM_PAGE(pmap->pm_l0_paddr); 588 vm_page_unwire_noq(m); 589 vm_page_free_zero(m); 590 } 591 592 /*************************************************** 593 * page management routines. 594 ***************************************************/ 595 596 /* 597 * Add a single Mali GPU entry. This function does not sleep. 598 */ 599 int 600 pmap_gpu_enter(pmap_t pmap, vm_offset_t va, vm_paddr_t pa, 601 vm_prot_t prot, u_int flags) 602 { 603 pd_entry_t *pde; 604 pt_entry_t new_l3, orig_l3; 605 pt_entry_t *l3; 606 vm_page_t mpte; 607 pd_entry_t *l1p; 608 pd_entry_t *l2p; 609 int lvl; 610 int rv; 611 612 KASSERT(pmap != kernel_pmap, ("kernel pmap used for GPU")); 613 KASSERT(va < VM_MAXUSER_ADDRESS, ("wrong address space")); 614 KASSERT((va & PAGE_MASK) == 0, ("va is misaligned")); 615 KASSERT((pa & PAGE_MASK) == 0, ("pa is misaligned")); 616 617 new_l3 = (pt_entry_t)(pa | ATTR_SH(ATTR_SH_IS) | IOMMU_L3_BLOCK); 618 619 if ((prot & VM_PROT_WRITE) != 0) 620 new_l3 |= ATTR_S2_S2AP(ATTR_S2_S2AP_WRITE); 621 if ((prot & VM_PROT_READ) != 0) 622 new_l3 |= ATTR_S2_S2AP(ATTR_S2_S2AP_READ); 623 if ((prot & VM_PROT_EXECUTE) == 0) 624 new_l3 |= ATTR_S2_XN(ATTR_S2_XN_ALL); 625 626 CTR2(KTR_PMAP, "pmap_gpu_enter: %.16lx -> %.16lx", va, pa); 627 628 PMAP_LOCK(pmap); 629 630 /* 631 * In the case that a page table page is not 632 * resident, we are creating it here. 633 */ 634 retry: 635 pde = pmap_pde(pmap, va, &lvl); 636 if (pde != NULL && lvl == 2) { 637 l3 = pmap_l2_to_l3(pde, va); 638 } else { 639 mpte = _pmap_alloc_l3(pmap, iommu_l2_pindex(va)); 640 if (mpte == NULL) { 641 CTR0(KTR_PMAP, "pmap_enter: mpte == NULL"); 642 rv = KERN_RESOURCE_SHORTAGE; 643 goto out; 644 } 645 646 /* 647 * Ensure newly created l1, l2 are visible to GPU. 648 * l0 is already visible by similar call in panfrost driver. 649 * The cache entry for l3 handled below. 650 */ 651 652 l1p = pmap_l1(pmap, va); 653 l2p = pmap_l2(pmap, va); 654 cpu_dcache_wb_range((vm_offset_t)l1p, sizeof(pd_entry_t)); 655 cpu_dcache_wb_range((vm_offset_t)l2p, sizeof(pd_entry_t)); 656 657 goto retry; 658 } 659 660 orig_l3 = pmap_load(l3); 661 KASSERT(!pmap_l3_valid(orig_l3), ("l3 is valid")); 662 663 /* New mapping */ 664 pmap_store(l3, new_l3); 665 666 cpu_dcache_wb_range((vm_offset_t)l3, sizeof(pt_entry_t)); 667 668 pmap_resident_count_inc(pmap, 1); 669 dsb(ishst); 670 671 rv = KERN_SUCCESS; 672 out: 673 PMAP_UNLOCK(pmap); 674 675 return (rv); 676 } 677 678 /* 679 * Remove a single Mali GPU entry. 680 */ 681 int 682 pmap_gpu_remove(pmap_t pmap, vm_offset_t va) 683 { 684 pd_entry_t *pde; 685 pt_entry_t *pte; 686 int lvl; 687 int rc; 688 689 KASSERT((va & PAGE_MASK) == 0, ("va is misaligned")); 690 KASSERT(pmap != kernel_pmap, ("kernel pmap used for GPU")); 691 692 PMAP_LOCK(pmap); 693 694 pde = pmap_pde(pmap, va, &lvl); 695 if (pde == NULL || lvl != 2) { 696 rc = KERN_FAILURE; 697 goto out; 698 } 699 700 pte = pmap_l2_to_l3(pde, va); 701 702 pmap_resident_count_dec(pmap, 1); 703 pmap_clear(pte); 704 cpu_dcache_wb_range((vm_offset_t)pte, sizeof(pt_entry_t)); 705 rc = KERN_SUCCESS; 706 707 out: 708 PMAP_UNLOCK(pmap); 709 710 return (rc); 711 } 712 713 /* 714 * Add a single SMMU entry. This function does not sleep. 715 */ 716 int 717 pmap_smmu_enter(pmap_t pmap, vm_offset_t va, vm_paddr_t pa, 718 vm_prot_t prot, u_int flags) 719 { 720 pd_entry_t *pde; 721 pt_entry_t new_l3, orig_l3; 722 pt_entry_t *l3; 723 vm_page_t mpte; 724 int lvl; 725 int rv; 726 727 KASSERT(va < VM_MAXUSER_ADDRESS, ("wrong address space")); 728 729 va = trunc_page(va); 730 new_l3 = (pt_entry_t)(pa | ATTR_DEFAULT | 731 ATTR_S1_IDX(VM_MEMATTR_DEVICE) | IOMMU_L3_PAGE); 732 if ((prot & VM_PROT_WRITE) == 0) 733 new_l3 |= ATTR_S1_AP(ATTR_S1_AP_RO); 734 new_l3 |= ATTR_S1_XN; /* Execute never. */ 735 new_l3 |= ATTR_S1_AP(ATTR_S1_AP_USER); 736 new_l3 |= ATTR_S1_nG; /* Non global. */ 737 738 CTR2(KTR_PMAP, "pmap_senter: %.16lx -> %.16lx", va, pa); 739 740 PMAP_LOCK(pmap); 741 742 /* 743 * In the case that a page table page is not 744 * resident, we are creating it here. 745 */ 746 retry: 747 pde = pmap_pde(pmap, va, &lvl); 748 if (pde != NULL && lvl == 2) { 749 l3 = pmap_l2_to_l3(pde, va); 750 } else { 751 mpte = _pmap_alloc_l3(pmap, iommu_l2_pindex(va)); 752 if (mpte == NULL) { 753 CTR0(KTR_PMAP, "pmap_enter: mpte == NULL"); 754 rv = KERN_RESOURCE_SHORTAGE; 755 goto out; 756 } 757 goto retry; 758 } 759 760 orig_l3 = pmap_load(l3); 761 KASSERT(!pmap_l3_valid(orig_l3), ("l3 is valid")); 762 763 /* New mapping */ 764 pmap_store(l3, new_l3); 765 pmap_resident_count_inc(pmap, 1); 766 dsb(ishst); 767 768 rv = KERN_SUCCESS; 769 out: 770 PMAP_UNLOCK(pmap); 771 772 return (rv); 773 } 774 775 /* 776 * Remove a single SMMU entry. 777 */ 778 int 779 pmap_smmu_remove(pmap_t pmap, vm_offset_t va) 780 { 781 pt_entry_t *pte; 782 int lvl; 783 int rc; 784 785 PMAP_LOCK(pmap); 786 787 pte = pmap_pte(pmap, va, &lvl); 788 KASSERT(lvl == 3, 789 ("Invalid SMMU pagetable level: %d != 3", lvl)); 790 791 if (pte != NULL) { 792 pmap_resident_count_dec(pmap, 1); 793 pmap_clear(pte); 794 rc = KERN_SUCCESS; 795 } else 796 rc = KERN_FAILURE; 797 798 PMAP_UNLOCK(pmap); 799 800 return (rc); 801 } 802 803 /* 804 * Remove all the allocated L1, L2 pages from SMMU pmap. 805 * All the L3 entires must be cleared in advance, otherwise 806 * this function panics. 807 */ 808 void 809 iommu_pmap_remove_pages(pmap_t pmap) 810 { 811 pd_entry_t l0e, *l1, l1e, *l2, l2e; 812 pt_entry_t *l3, l3e; 813 vm_page_t m, m0, m1; 814 vm_offset_t sva; 815 vm_paddr_t pa; 816 vm_paddr_t pa0; 817 vm_paddr_t pa1; 818 int i, j, k, l; 819 820 PMAP_LOCK(pmap); 821 822 for (sva = VM_MINUSER_ADDRESS, i = iommu_l0_index(sva); 823 (i < Ln_ENTRIES && sva < VM_MAXUSER_ADDRESS); i++) { 824 l0e = pmap->pm_l0[i]; 825 if ((l0e & ATTR_DESCR_VALID) == 0) { 826 sva += IOMMU_L0_SIZE; 827 continue; 828 } 829 pa0 = l0e & ~ATTR_MASK; 830 m0 = PHYS_TO_VM_PAGE(pa0); 831 l1 = (pd_entry_t *)PHYS_TO_DMAP(pa0); 832 833 for (j = iommu_l1_index(sva); j < Ln_ENTRIES; j++) { 834 l1e = l1[j]; 835 if ((l1e & ATTR_DESCR_VALID) == 0) { 836 sva += IOMMU_L1_SIZE; 837 continue; 838 } 839 if ((l1e & ATTR_DESCR_MASK) == IOMMU_L1_BLOCK) { 840 sva += IOMMU_L1_SIZE; 841 continue; 842 } 843 pa1 = l1e & ~ATTR_MASK; 844 m1 = PHYS_TO_VM_PAGE(pa1); 845 l2 = (pd_entry_t *)PHYS_TO_DMAP(pa1); 846 847 for (k = iommu_l2_index(sva); k < Ln_ENTRIES; k++) { 848 l2e = l2[k]; 849 if ((l2e & ATTR_DESCR_VALID) == 0) { 850 sva += IOMMU_L2_SIZE; 851 continue; 852 } 853 pa = l2e & ~ATTR_MASK; 854 m = PHYS_TO_VM_PAGE(pa); 855 l3 = (pt_entry_t *)PHYS_TO_DMAP(pa); 856 857 for (l = iommu_l3_index(sva); l < Ln_ENTRIES; 858 l++, sva += IOMMU_L3_SIZE) { 859 l3e = l3[l]; 860 if ((l3e & ATTR_DESCR_VALID) == 0) 861 continue; 862 panic("%s: l3e found for va %jx\n", 863 __func__, sva); 864 } 865 866 vm_page_unwire_noq(m1); 867 vm_page_unwire_noq(m); 868 pmap_resident_count_dec(pmap, 1); 869 vm_page_free(m); 870 pmap_clear(&l2[k]); 871 } 872 873 vm_page_unwire_noq(m0); 874 pmap_resident_count_dec(pmap, 1); 875 vm_page_free(m1); 876 pmap_clear(&l1[j]); 877 } 878 879 pmap_resident_count_dec(pmap, 1); 880 vm_page_free(m0); 881 pmap_clear(&pmap->pm_l0[i]); 882 } 883 884 KASSERT(pmap->pm_stats.resident_count == 0, 885 ("Invalid resident count %jd", pmap->pm_stats.resident_count)); 886 887 PMAP_UNLOCK(pmap); 888 } 889