1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (C) 2020 Justin Hibbits 5 * Copyright (C) 2007-2009 Semihalf, Rafal Jaworowski <raj@semihalf.com> 6 * Copyright (C) 2006 Semihalf, Marian Balakowicz <m8@semihalf.com> 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN 21 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 23 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 24 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 25 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 26 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 27 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 * 29 * Some hw specific parts of this pmap were derived or influenced 30 * by NetBSD's ibm4xx pmap module. More generic code is shared with 31 * a few other pmap modules from the FreeBSD tree. 32 */ 33 34 /* 35 * VM layout notes: 36 * 37 * Kernel and user threads run within one common virtual address space 38 * defined by AS=0. 39 * 40 * 64-bit pmap: 41 * Virtual address space layout: 42 * ----------------------------- 43 * 0x0000_0000_0000_0000 - 0x3fff_ffff_ffff_ffff : user process 44 * 0x4000_0000_0000_0000 - 0x7fff_ffff_ffff_ffff : unused 45 * 0x8000_0000_0000_0000 - 0xbfff_ffff_ffff_ffff : mmio region 46 * 0xc000_0000_0000_0000 - 0xdfff_ffff_ffff_ffff : direct map 47 * 0xe000_0000_0000_0000 - 0xffff_ffff_ffff_ffff : KVA 48 */ 49 50 #include <sys/cdefs.h> 51 __FBSDID("$FreeBSD$"); 52 53 #include "opt_ddb.h" 54 #include "opt_kstack_pages.h" 55 56 #include <sys/param.h> 57 #include <sys/conf.h> 58 #include <sys/malloc.h> 59 #include <sys/ktr.h> 60 #include <sys/proc.h> 61 #include <sys/user.h> 62 #include <sys/queue.h> 63 #include <sys/systm.h> 64 #include <sys/kernel.h> 65 #include <sys/kerneldump.h> 66 #include <sys/linker.h> 67 #include <sys/msgbuf.h> 68 #include <sys/lock.h> 69 #include <sys/mutex.h> 70 #include <sys/rwlock.h> 71 #include <sys/sched.h> 72 #include <sys/smp.h> 73 #include <sys/vmmeter.h> 74 75 #include <vm/vm.h> 76 #include <vm/vm_page.h> 77 #include <vm/vm_kern.h> 78 #include <vm/vm_pageout.h> 79 #include <vm/vm_extern.h> 80 #include <vm/vm_object.h> 81 #include <vm/vm_param.h> 82 #include <vm/vm_map.h> 83 #include <vm/vm_pager.h> 84 #include <vm/vm_phys.h> 85 #include <vm/vm_pagequeue.h> 86 #include <vm/uma.h> 87 88 #include <machine/_inttypes.h> 89 #include <machine/cpu.h> 90 #include <machine/pcb.h> 91 #include <machine/platform.h> 92 93 #include <machine/tlb.h> 94 #include <machine/spr.h> 95 #include <machine/md_var.h> 96 #include <machine/mmuvar.h> 97 #include <machine/pmap.h> 98 #include <machine/pte.h> 99 100 #include <ddb/ddb.h> 101 102 #ifdef DEBUG 103 #define debugf(fmt, args...) printf(fmt, ##args) 104 #else 105 #define debugf(fmt, args...) 106 #endif 107 108 #define PRI0ptrX "016lx" 109 110 /**************************************************************************/ 111 /* PMAP */ 112 /**************************************************************************/ 113 114 unsigned int kernel_pdirs; 115 static uma_zone_t ptbl_root_zone; 116 static pte_t ****kernel_ptbl_root; 117 118 /* 119 * Base of the pmap_mapdev() region. On 32-bit it immediately follows the 120 * userspace address range. On On 64-bit it's far above, at (1 << 63), and 121 * ranges up to the DMAP, giving 62 bits of PA allowed. This is far larger than 122 * the widest Book-E address bus, the e6500 has a 40-bit PA space. This allows 123 * us to map akin to the DMAP, with addresses identical to the PA, offset by the 124 * base. 125 */ 126 #define VM_MAPDEV_BASE 0x8000000000000000 127 #define VM_MAPDEV_PA_MAX 0x4000000000000000 /* Don't encroach on DMAP */ 128 129 static void tid_flush(tlbtid_t tid); 130 static unsigned long ilog2(unsigned long); 131 132 /**************************************************************************/ 133 /* Page table management */ 134 /**************************************************************************/ 135 136 #define PMAP_ROOT_SIZE (sizeof(pte_t****) * PG_ROOT_NENTRIES) 137 static pte_t *ptbl_alloc(pmap_t pmap, vm_offset_t va, 138 bool nosleep, bool *is_new); 139 static void ptbl_hold(pmap_t, pte_t *); 140 static int ptbl_unhold(pmap_t, vm_offset_t); 141 142 static vm_paddr_t pte_vatopa(pmap_t, vm_offset_t); 143 static int pte_enter(pmap_t, vm_page_t, vm_offset_t, uint32_t, boolean_t); 144 static int pte_remove(pmap_t, vm_offset_t, uint8_t); 145 static pte_t *pte_find(pmap_t, vm_offset_t); 146 static pte_t *pte_find_next(pmap_t, vm_offset_t *); 147 static void kernel_pte_alloc(vm_offset_t, vm_offset_t); 148 149 /**************************************************************************/ 150 /* Page table related */ 151 /**************************************************************************/ 152 153 /* Allocate a page, to be used in a page table. */ 154 static vm_offset_t 155 mmu_booke_alloc_page(pmap_t pmap, unsigned int idx, bool nosleep) 156 { 157 vm_page_t m; 158 int req; 159 160 req = VM_ALLOC_WIRED | VM_ALLOC_ZERO; 161 while ((m = vm_page_alloc_noobj(req)) == NULL) { 162 if (nosleep) 163 return (0); 164 165 PMAP_UNLOCK(pmap); 166 rw_wunlock(&pvh_global_lock); 167 vm_wait(NULL); 168 rw_wlock(&pvh_global_lock); 169 PMAP_LOCK(pmap); 170 } 171 m->pindex = idx; 172 173 return (PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m))); 174 } 175 176 /* Initialize pool of kva ptbl buffers. */ 177 static void 178 ptbl_init(void) 179 { 180 } 181 182 /* Get a pointer to a PTE in a page table. */ 183 static __inline pte_t * 184 pte_find(pmap_t pmap, vm_offset_t va) 185 { 186 pte_t ***pdir_l1; 187 pte_t **pdir; 188 pte_t *ptbl; 189 190 KASSERT((pmap != NULL), ("pte_find: invalid pmap")); 191 192 pdir_l1 = pmap->pm_root[PG_ROOT_IDX(va)]; 193 if (pdir_l1 == NULL) 194 return (NULL); 195 pdir = pdir_l1[PDIR_L1_IDX(va)]; 196 if (pdir == NULL) 197 return (NULL); 198 ptbl = pdir[PDIR_IDX(va)]; 199 200 return ((ptbl != NULL) ? &ptbl[PTBL_IDX(va)] : NULL); 201 } 202 203 /* Get a pointer to a PTE in a page table, or the next closest (greater) one. */ 204 static __inline pte_t * 205 pte_find_next(pmap_t pmap, vm_offset_t *pva) 206 { 207 vm_offset_t va; 208 pte_t ****pm_root; 209 pte_t *pte; 210 unsigned long i, j, k, l; 211 212 KASSERT((pmap != NULL), ("pte_find: invalid pmap")); 213 214 va = *pva; 215 i = PG_ROOT_IDX(va); 216 j = PDIR_L1_IDX(va); 217 k = PDIR_IDX(va); 218 l = PTBL_IDX(va); 219 pm_root = pmap->pm_root; 220 221 /* truncate the VA for later. */ 222 va &= ~((1UL << (PG_ROOT_H + 1)) - 1); 223 for (; i < PG_ROOT_NENTRIES; i++, j = 0, k = 0, l = 0) { 224 if (pm_root[i] == 0) 225 continue; 226 for (; j < PDIR_L1_NENTRIES; j++, k = 0, l = 0) { 227 if (pm_root[i][j] == 0) 228 continue; 229 for (; k < PDIR_NENTRIES; k++, l = 0) { 230 if (pm_root[i][j][k] == NULL) 231 continue; 232 for (; l < PTBL_NENTRIES; l++) { 233 pte = &pm_root[i][j][k][l]; 234 if (!PTE_ISVALID(pte)) 235 continue; 236 *pva = va + PG_ROOT_SIZE * i + 237 PDIR_L1_SIZE * j + 238 PDIR_SIZE * k + 239 PAGE_SIZE * l; 240 return (pte); 241 } 242 } 243 } 244 } 245 return (NULL); 246 } 247 248 static bool 249 unhold_free_page(pmap_t pmap, vm_page_t m) 250 { 251 252 if (vm_page_unwire_noq(m)) { 253 vm_page_free_zero(m); 254 return (true); 255 } 256 257 return (false); 258 } 259 260 static vm_offset_t 261 get_pgtbl_page(pmap_t pmap, vm_offset_t *ptr_tbl, uint32_t index, 262 bool nosleep, bool hold_parent, bool *isnew) 263 { 264 vm_offset_t page; 265 vm_page_t m; 266 267 page = ptr_tbl[index]; 268 KASSERT(page != 0 || pmap != kernel_pmap, 269 ("NULL page table page found in kernel pmap!")); 270 if (page == 0) { 271 page = mmu_booke_alloc_page(pmap, index, nosleep); 272 if (ptr_tbl[index] == 0) { 273 *isnew = true; 274 ptr_tbl[index] = page; 275 if (hold_parent) { 276 m = PHYS_TO_VM_PAGE(pmap_kextract((vm_offset_t)ptr_tbl)); 277 m->ref_count++; 278 } 279 return (page); 280 } 281 m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS(page)); 282 page = ptr_tbl[index]; 283 vm_page_unwire_noq(m); 284 vm_page_free_zero(m); 285 } 286 287 *isnew = false; 288 289 return (page); 290 } 291 292 /* Allocate page table. */ 293 static pte_t* 294 ptbl_alloc(pmap_t pmap, vm_offset_t va, bool nosleep, bool *is_new) 295 { 296 unsigned int pg_root_idx = PG_ROOT_IDX(va); 297 unsigned int pdir_l1_idx = PDIR_L1_IDX(va); 298 unsigned int pdir_idx = PDIR_IDX(va); 299 vm_offset_t pdir_l1, pdir, ptbl; 300 301 /* When holding a parent, no need to hold the root index pages. */ 302 pdir_l1 = get_pgtbl_page(pmap, (vm_offset_t *)pmap->pm_root, 303 pg_root_idx, nosleep, false, is_new); 304 if (pdir_l1 == 0) 305 return (NULL); 306 pdir = get_pgtbl_page(pmap, (vm_offset_t *)pdir_l1, pdir_l1_idx, 307 nosleep, !*is_new, is_new); 308 if (pdir == 0) 309 return (NULL); 310 ptbl = get_pgtbl_page(pmap, (vm_offset_t *)pdir, pdir_idx, 311 nosleep, !*is_new, is_new); 312 313 return ((pte_t *)ptbl); 314 } 315 316 /* 317 * Decrement ptbl pages hold count and attempt to free ptbl pages. Called 318 * when removing pte entry from ptbl. 319 * 320 * Return 1 if ptbl pages were freed. 321 */ 322 static int 323 ptbl_unhold(pmap_t pmap, vm_offset_t va) 324 { 325 pte_t *ptbl; 326 vm_page_t m; 327 u_int pg_root_idx; 328 pte_t ***pdir_l1; 329 u_int pdir_l1_idx; 330 pte_t **pdir; 331 u_int pdir_idx; 332 333 pg_root_idx = PG_ROOT_IDX(va); 334 pdir_l1_idx = PDIR_L1_IDX(va); 335 pdir_idx = PDIR_IDX(va); 336 337 KASSERT((pmap != kernel_pmap), 338 ("ptbl_unhold: unholding kernel ptbl!")); 339 340 pdir_l1 = pmap->pm_root[pg_root_idx]; 341 pdir = pdir_l1[pdir_l1_idx]; 342 ptbl = pdir[pdir_idx]; 343 344 /* decrement hold count */ 345 m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t) ptbl)); 346 347 if (!unhold_free_page(pmap, m)) 348 return (0); 349 350 pdir[pdir_idx] = NULL; 351 m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t) pdir)); 352 353 if (!unhold_free_page(pmap, m)) 354 return (1); 355 356 pdir_l1[pdir_l1_idx] = NULL; 357 m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t) pdir_l1)); 358 359 if (!unhold_free_page(pmap, m)) 360 return (1); 361 pmap->pm_root[pg_root_idx] = NULL; 362 363 return (1); 364 } 365 366 /* 367 * Increment hold count for ptbl pages. This routine is used when new pte 368 * entry is being inserted into ptbl. 369 */ 370 static void 371 ptbl_hold(pmap_t pmap, pte_t *ptbl) 372 { 373 vm_page_t m; 374 375 KASSERT((pmap != kernel_pmap), 376 ("ptbl_hold: holding kernel ptbl!")); 377 378 m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t) ptbl)); 379 m->ref_count++; 380 } 381 382 /* 383 * Clean pte entry, try to free page table page if requested. 384 * 385 * Return 1 if ptbl pages were freed, otherwise return 0. 386 */ 387 static int 388 pte_remove(pmap_t pmap, vm_offset_t va, u_int8_t flags) 389 { 390 vm_page_t m; 391 pte_t *pte; 392 393 pte = pte_find(pmap, va); 394 KASSERT(pte != NULL, ("%s: NULL pte for va %#jx, pmap %p", 395 __func__, (uintmax_t)va, pmap)); 396 397 if (!PTE_ISVALID(pte)) 398 return (0); 399 400 /* Get vm_page_t for mapped pte. */ 401 m = PHYS_TO_VM_PAGE(PTE_PA(pte)); 402 403 if (PTE_ISWIRED(pte)) 404 pmap->pm_stats.wired_count--; 405 406 /* Handle managed entry. */ 407 if (PTE_ISMANAGED(pte)) { 408 /* Handle modified pages. */ 409 if (PTE_ISMODIFIED(pte)) 410 vm_page_dirty(m); 411 412 /* Referenced pages. */ 413 if (PTE_ISREFERENCED(pte)) 414 vm_page_aflag_set(m, PGA_REFERENCED); 415 416 /* Remove pv_entry from pv_list. */ 417 pv_remove(pmap, va, m); 418 } else if (pmap == kernel_pmap && m && m->md.pv_tracked) { 419 pv_remove(pmap, va, m); 420 if (TAILQ_EMPTY(&m->md.pv_list)) 421 m->md.pv_tracked = false; 422 } 423 mtx_lock_spin(&tlbivax_mutex); 424 tlb_miss_lock(); 425 426 tlb0_flush_entry(va); 427 *pte = 0; 428 429 tlb_miss_unlock(); 430 mtx_unlock_spin(&tlbivax_mutex); 431 432 pmap->pm_stats.resident_count--; 433 434 if (flags & PTBL_UNHOLD) { 435 return (ptbl_unhold(pmap, va)); 436 } 437 return (0); 438 } 439 440 /* 441 * Insert PTE for a given page and virtual address. 442 */ 443 static int 444 pte_enter(pmap_t pmap, vm_page_t m, vm_offset_t va, uint32_t flags, 445 boolean_t nosleep) 446 { 447 unsigned int ptbl_idx = PTBL_IDX(va); 448 pte_t *ptbl, *pte, pte_tmp; 449 bool is_new; 450 451 /* Get the page directory pointer. */ 452 ptbl = ptbl_alloc(pmap, va, nosleep, &is_new); 453 if (ptbl == NULL) { 454 KASSERT(nosleep, ("nosleep and NULL ptbl")); 455 return (ENOMEM); 456 } 457 if (is_new) { 458 pte = &ptbl[ptbl_idx]; 459 } else { 460 /* 461 * Check if there is valid mapping for requested va, if there 462 * is, remove it. 463 */ 464 pte = &ptbl[ptbl_idx]; 465 if (PTE_ISVALID(pte)) { 466 pte_remove(pmap, va, PTBL_HOLD); 467 } else { 468 /* 469 * pte is not used, increment hold count for ptbl 470 * pages. 471 */ 472 if (pmap != kernel_pmap) 473 ptbl_hold(pmap, ptbl); 474 } 475 } 476 477 /* 478 * Insert pv_entry into pv_list for mapped page if part of managed 479 * memory. 480 */ 481 if ((m->oflags & VPO_UNMANAGED) == 0) { 482 flags |= PTE_MANAGED; 483 484 /* Create and insert pv entry. */ 485 pv_insert(pmap, va, m); 486 } 487 488 pmap->pm_stats.resident_count++; 489 490 pte_tmp = PTE_RPN_FROM_PA(VM_PAGE_TO_PHYS(m)); 491 pte_tmp |= (PTE_VALID | flags); 492 493 mtx_lock_spin(&tlbivax_mutex); 494 tlb_miss_lock(); 495 496 tlb0_flush_entry(va); 497 *pte = pte_tmp; 498 499 tlb_miss_unlock(); 500 mtx_unlock_spin(&tlbivax_mutex); 501 502 return (0); 503 } 504 505 /* Return the pa for the given pmap/va. */ 506 static vm_paddr_t 507 pte_vatopa(pmap_t pmap, vm_offset_t va) 508 { 509 vm_paddr_t pa = 0; 510 pte_t *pte; 511 512 pte = pte_find(pmap, va); 513 if ((pte != NULL) && PTE_ISVALID(pte)) 514 pa = (PTE_PA(pte) | (va & PTE_PA_MASK)); 515 return (pa); 516 } 517 518 /* allocate pte entries to manage (addr & mask) to (addr & mask) + size */ 519 static void 520 kernel_pte_alloc(vm_offset_t data_end, vm_offset_t addr) 521 { 522 pte_t *pte; 523 vm_size_t kva_size; 524 int kernel_pdirs, kernel_pgtbls, pdir_l1s; 525 vm_offset_t va, l1_va, pdir_va, ptbl_va; 526 int i, j, k; 527 528 kva_size = VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS; 529 kernel_pmap->pm_root = kernel_ptbl_root; 530 pdir_l1s = howmany(kva_size, PG_ROOT_SIZE); 531 kernel_pdirs = howmany(kva_size, PDIR_L1_SIZE); 532 kernel_pgtbls = howmany(kva_size, PDIR_SIZE); 533 534 /* Initialize kernel pdir */ 535 l1_va = (vm_offset_t)kernel_ptbl_root + 536 round_page(PG_ROOT_NENTRIES * sizeof(pte_t ***)); 537 pdir_va = l1_va + pdir_l1s * PAGE_SIZE; 538 ptbl_va = pdir_va + kernel_pdirs * PAGE_SIZE; 539 if (bootverbose) { 540 printf("ptbl_root_va: %#lx\n", (vm_offset_t)kernel_ptbl_root); 541 printf("l1_va: %#lx (%d entries)\n", l1_va, pdir_l1s); 542 printf("pdir_va: %#lx(%d entries)\n", pdir_va, kernel_pdirs); 543 printf("ptbl_va: %#lx(%d entries)\n", ptbl_va, kernel_pgtbls); 544 } 545 546 va = VM_MIN_KERNEL_ADDRESS; 547 for (i = PG_ROOT_IDX(va); i < PG_ROOT_IDX(va) + pdir_l1s; 548 i++, l1_va += PAGE_SIZE) { 549 kernel_pmap->pm_root[i] = (pte_t ***)l1_va; 550 for (j = 0; 551 j < PDIR_L1_NENTRIES && va < VM_MAX_KERNEL_ADDRESS; 552 j++, pdir_va += PAGE_SIZE) { 553 kernel_pmap->pm_root[i][j] = (pte_t **)pdir_va; 554 for (k = 0; 555 k < PDIR_NENTRIES && va < VM_MAX_KERNEL_ADDRESS; 556 k++, va += PDIR_SIZE, ptbl_va += PAGE_SIZE) 557 kernel_pmap->pm_root[i][j][k] = (pte_t *)ptbl_va; 558 } 559 } 560 /* 561 * Fill in PTEs covering kernel code and data. They are not required 562 * for address translation, as this area is covered by static TLB1 563 * entries, but for pte_vatopa() to work correctly with kernel area 564 * addresses. 565 */ 566 for (va = addr; va < data_end; va += PAGE_SIZE) { 567 pte = &(kernel_pmap->pm_root[PG_ROOT_IDX(va)][PDIR_L1_IDX(va)][PDIR_IDX(va)][PTBL_IDX(va)]); 568 *pte = PTE_RPN_FROM_PA(kernload + (va - kernstart)); 569 *pte |= PTE_M | PTE_SR | PTE_SW | PTE_SX | PTE_WIRED | 570 PTE_VALID | PTE_PS_4KB; 571 } 572 } 573 574 static vm_offset_t 575 mmu_booke_alloc_kernel_pgtables(vm_offset_t data_end) 576 { 577 vm_size_t kva_size = VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS; 578 kernel_ptbl_root = (pte_t ****)data_end; 579 580 data_end += round_page(PG_ROOT_NENTRIES * sizeof(pte_t ***)); 581 data_end += howmany(kva_size, PG_ROOT_SIZE) * PAGE_SIZE; 582 data_end += howmany(kva_size, PDIR_L1_SIZE) * PAGE_SIZE; 583 data_end += howmany(kva_size, PDIR_SIZE) * PAGE_SIZE; 584 585 return (data_end); 586 } 587 588 /* 589 * Initialize a preallocated and zeroed pmap structure, 590 * such as one in a vmspace structure. 591 */ 592 static int 593 mmu_booke_pinit(pmap_t pmap) 594 { 595 int i; 596 597 CTR4(KTR_PMAP, "%s: pmap = %p, proc %d '%s'", __func__, pmap, 598 curthread->td_proc->p_pid, curthread->td_proc->p_comm); 599 600 KASSERT((pmap != kernel_pmap), ("pmap_pinit: initializing kernel_pmap")); 601 602 for (i = 0; i < MAXCPU; i++) 603 pmap->pm_tid[i] = TID_NONE; 604 CPU_ZERO(&kernel_pmap->pm_active); 605 bzero(&pmap->pm_stats, sizeof(pmap->pm_stats)); 606 pmap->pm_root = uma_zalloc(ptbl_root_zone, M_WAITOK); 607 bzero(pmap->pm_root, sizeof(pte_t **) * PG_ROOT_NENTRIES); 608 609 return (1); 610 } 611 612 /* 613 * Release any resources held by the given physical map. 614 * Called when a pmap initialized by mmu_booke_pinit is being released. 615 * Should only be called if the map contains no valid mappings. 616 */ 617 static void 618 mmu_booke_release(pmap_t pmap) 619 { 620 621 KASSERT(pmap->pm_stats.resident_count == 0, 622 ("pmap_release: pmap resident count %ld != 0", 623 pmap->pm_stats.resident_count)); 624 #ifdef INVARIANTS 625 /* 626 * Verify that all page directories are gone. 627 * Protects against reference count leakage. 628 */ 629 for (int i = 0; i < PG_ROOT_NENTRIES; i++) 630 KASSERT(pmap->pm_root[i] == 0, 631 ("Index %d on root page %p is non-zero!\n", i, pmap->pm_root)); 632 #endif 633 uma_zfree(ptbl_root_zone, pmap->pm_root); 634 } 635 636 static void 637 mmu_booke_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz) 638 { 639 pte_t *pte; 640 vm_paddr_t pa = 0; 641 int sync_sz, valid; 642 643 while (sz > 0) { 644 PMAP_LOCK(pm); 645 pte = pte_find(pm, va); 646 valid = (pte != NULL && PTE_ISVALID(pte)) ? 1 : 0; 647 if (valid) 648 pa = PTE_PA(pte); 649 PMAP_UNLOCK(pm); 650 sync_sz = PAGE_SIZE - (va & PAGE_MASK); 651 sync_sz = min(sync_sz, sz); 652 if (valid) { 653 pa += (va & PAGE_MASK); 654 __syncicache((void *)PHYS_TO_DMAP(pa), sync_sz); 655 } 656 va += sync_sz; 657 sz -= sync_sz; 658 } 659 } 660 661 /* 662 * mmu_booke_zero_page_area zeros the specified hardware page by 663 * mapping it into virtual memory and using bzero to clear 664 * its contents. 665 * 666 * off and size must reside within a single page. 667 */ 668 static void 669 mmu_booke_zero_page_area(vm_page_t m, int off, int size) 670 { 671 vm_offset_t va; 672 673 /* XXX KASSERT off and size are within a single page? */ 674 675 va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)); 676 bzero((caddr_t)va + off, size); 677 } 678 679 /* 680 * mmu_booke_zero_page zeros the specified hardware page. 681 */ 682 static void 683 mmu_booke_zero_page(vm_page_t m) 684 { 685 vm_offset_t off, va; 686 687 va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)); 688 689 for (off = 0; off < PAGE_SIZE; off += cacheline_size) 690 __asm __volatile("dcbz 0,%0" :: "r"(va + off)); 691 } 692 693 /* 694 * mmu_booke_copy_page copies the specified (machine independent) page by 695 * mapping the page into virtual memory and using memcopy to copy the page, 696 * one machine dependent page at a time. 697 */ 698 static void 699 mmu_booke_copy_page(vm_page_t sm, vm_page_t dm) 700 { 701 vm_offset_t sva, dva; 702 703 sva = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(sm)); 704 dva = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(dm)); 705 memcpy((caddr_t)dva, (caddr_t)sva, PAGE_SIZE); 706 } 707 708 static inline void 709 mmu_booke_copy_pages(vm_page_t *ma, vm_offset_t a_offset, 710 vm_page_t *mb, vm_offset_t b_offset, int xfersize) 711 { 712 void *a_cp, *b_cp; 713 vm_offset_t a_pg_offset, b_pg_offset; 714 int cnt; 715 716 vm_page_t pa, pb; 717 718 while (xfersize > 0) { 719 a_pg_offset = a_offset & PAGE_MASK; 720 pa = ma[a_offset >> PAGE_SHIFT]; 721 b_pg_offset = b_offset & PAGE_MASK; 722 pb = mb[b_offset >> PAGE_SHIFT]; 723 cnt = min(xfersize, PAGE_SIZE - a_pg_offset); 724 cnt = min(cnt, PAGE_SIZE - b_pg_offset); 725 a_cp = (caddr_t)((uintptr_t)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pa)) + 726 a_pg_offset); 727 b_cp = (caddr_t)((uintptr_t)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pb)) + 728 b_pg_offset); 729 bcopy(a_cp, b_cp, cnt); 730 a_offset += cnt; 731 b_offset += cnt; 732 xfersize -= cnt; 733 } 734 } 735 736 static vm_offset_t 737 mmu_booke_quick_enter_page(vm_page_t m) 738 { 739 return (PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m))); 740 } 741 742 static void 743 mmu_booke_quick_remove_page(vm_offset_t addr) 744 { 745 } 746 747 /**************************************************************************/ 748 /* TID handling */ 749 /**************************************************************************/ 750 751 /* 752 * Return the largest uint value log such that 2^log <= num. 753 */ 754 static unsigned long 755 ilog2(unsigned long num) 756 { 757 long lz; 758 759 __asm ("cntlzd %0, %1" : "=r" (lz) : "r" (num)); 760 return (63 - lz); 761 } 762 763 /* 764 * Invalidate all TLB0 entries which match the given TID. Note this is 765 * dedicated for cases when invalidations should NOT be propagated to other 766 * CPUs. 767 */ 768 static void 769 tid_flush(tlbtid_t tid) 770 { 771 register_t msr; 772 773 /* Don't evict kernel translations */ 774 if (tid == TID_KERNEL) 775 return; 776 777 msr = mfmsr(); 778 __asm __volatile("wrteei 0"); 779 780 /* 781 * Newer (e500mc and later) have tlbilx, which doesn't broadcast, so use 782 * it for PID invalidation. 783 */ 784 mtspr(SPR_MAS6, tid << MAS6_SPID0_SHIFT); 785 __asm __volatile("isync; .long 0x7c200024; isync; msync"); 786 787 __asm __volatile("wrtee %0" :: "r"(msr)); 788 } 789