1 /*- 2 * Copyright (c) 2008-2015 Nathan Whitehorn 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 /* 31 * Manages physical address maps. 32 * 33 * Since the information managed by this module is also stored by the 34 * logical address mapping module, this module may throw away valid virtual 35 * to physical mappings at almost any time. However, invalidations of 36 * mappings must be done as requested. 37 * 38 * In order to cope with hardware architectures which make virtual to 39 * physical map invalidates expensive, this module may delay invalidate 40 * reduced protection operations until such time as they are actually 41 * necessary. This module is given full information as to which processors 42 * are currently using which maps, and to when physical maps must be made 43 * correct. 44 */ 45 46 #include "opt_compat.h" 47 #include "opt_kstack_pages.h" 48 49 #include <sys/param.h> 50 #include <sys/kernel.h> 51 #include <sys/conf.h> 52 #include <sys/queue.h> 53 #include <sys/cpuset.h> 54 #include <sys/kerneldump.h> 55 #include <sys/ktr.h> 56 #include <sys/lock.h> 57 #include <sys/msgbuf.h> 58 #include <sys/malloc.h> 59 #include <sys/mutex.h> 60 #include <sys/proc.h> 61 #include <sys/rwlock.h> 62 #include <sys/sched.h> 63 #include <sys/sysctl.h> 64 #include <sys/systm.h> 65 #include <sys/vmmeter.h> 66 #include <sys/smp.h> 67 68 #include <sys/kdb.h> 69 70 #include <dev/ofw/openfirm.h> 71 72 #include <vm/vm.h> 73 #include <vm/vm_param.h> 74 #include <vm/vm_kern.h> 75 #include <vm/vm_page.h> 76 #include <vm/vm_map.h> 77 #include <vm/vm_object.h> 78 #include <vm/vm_extern.h> 79 #include <vm/vm_pageout.h> 80 #include <vm/uma.h> 81 82 #include <machine/_inttypes.h> 83 #include <machine/cpu.h> 84 #include <machine/platform.h> 85 #include <machine/frame.h> 86 #include <machine/md_var.h> 87 #include <machine/psl.h> 88 #include <machine/bat.h> 89 #include <machine/hid.h> 90 #include <machine/pte.h> 91 #include <machine/sr.h> 92 #include <machine/trap.h> 93 #include <machine/mmuvar.h> 94 95 #include "mmu_oea64.h" 96 #include "mmu_if.h" 97 #include "moea64_if.h" 98 99 void moea64_release_vsid(uint64_t vsid); 100 uintptr_t moea64_get_unique_vsid(void); 101 102 #define DISABLE_TRANS(msr) msr = mfmsr(); mtmsr(msr & ~PSL_DR) 103 #define ENABLE_TRANS(msr) mtmsr(msr) 104 105 #define VSID_MAKE(sr, hash) ((sr) | (((hash) & 0xfffff) << 4)) 106 #define VSID_TO_HASH(vsid) (((vsid) >> 4) & 0xfffff) 107 #define VSID_HASH_MASK 0x0000007fffffffffULL 108 109 /* 110 * Locking semantics: 111 * 112 * There are two locks of interest: the page locks and the pmap locks, which 113 * protect their individual PVO lists and are locked in that order. The contents 114 * of all PVO entries are protected by the locks of their respective pmaps. 115 * The pmap of any PVO is guaranteed not to change so long as the PVO is linked 116 * into any list. 117 * 118 */ 119 120 #define PV_LOCK_COUNT PA_LOCK_COUNT*3 121 static struct mtx_padalign pv_lock[PV_LOCK_COUNT]; 122 123 #define PV_LOCKPTR(pa) ((struct mtx *)(&pv_lock[pa_index(pa) % PV_LOCK_COUNT])) 124 #define PV_LOCK(pa) mtx_lock(PV_LOCKPTR(pa)) 125 #define PV_UNLOCK(pa) mtx_unlock(PV_LOCKPTR(pa)) 126 #define PV_LOCKASSERT(pa) mtx_assert(PV_LOCKPTR(pa), MA_OWNED) 127 #define PV_PAGE_LOCK(m) PV_LOCK(VM_PAGE_TO_PHYS(m)) 128 #define PV_PAGE_UNLOCK(m) PV_UNLOCK(VM_PAGE_TO_PHYS(m)) 129 #define PV_PAGE_LOCKASSERT(m) PV_LOCKASSERT(VM_PAGE_TO_PHYS(m)) 130 131 struct ofw_map { 132 cell_t om_va; 133 cell_t om_len; 134 uint64_t om_pa; 135 cell_t om_mode; 136 }; 137 138 extern unsigned char _etext[]; 139 extern unsigned char _end[]; 140 141 /* 142 * Map of physical memory regions. 143 */ 144 static struct mem_region *regions; 145 static struct mem_region *pregions; 146 static u_int phys_avail_count; 147 static int regions_sz, pregions_sz; 148 149 extern void bs_remap_earlyboot(void); 150 151 /* 152 * Lock for the SLB tables. 153 */ 154 struct mtx moea64_slb_mutex; 155 156 /* 157 * PTEG data. 158 */ 159 u_int moea64_pteg_count; 160 u_int moea64_pteg_mask; 161 162 /* 163 * PVO data. 164 */ 165 166 uma_zone_t moea64_pvo_zone; /* zone for pvo entries */ 167 168 static struct pvo_entry *moea64_bpvo_pool; 169 static int moea64_bpvo_pool_index = 0; 170 static int moea64_bpvo_pool_size = 327680; 171 TUNABLE_INT("machdep.moea64_bpvo_pool_size", &moea64_bpvo_pool_size); 172 SYSCTL_INT(_machdep, OID_AUTO, moea64_allocated_bpvo_entries, CTLFLAG_RD, 173 &moea64_bpvo_pool_index, 0, ""); 174 175 #define VSID_NBPW (sizeof(u_int32_t) * 8) 176 #ifdef __powerpc64__ 177 #define NVSIDS (NPMAPS * 16) 178 #define VSID_HASHMASK 0xffffffffUL 179 #else 180 #define NVSIDS NPMAPS 181 #define VSID_HASHMASK 0xfffffUL 182 #endif 183 static u_int moea64_vsid_bitmap[NVSIDS / VSID_NBPW]; 184 185 static boolean_t moea64_initialized = FALSE; 186 187 /* 188 * Statistics. 189 */ 190 u_int moea64_pte_valid = 0; 191 u_int moea64_pte_overflow = 0; 192 u_int moea64_pvo_entries = 0; 193 u_int moea64_pvo_enter_calls = 0; 194 u_int moea64_pvo_remove_calls = 0; 195 SYSCTL_INT(_machdep, OID_AUTO, moea64_pte_valid, CTLFLAG_RD, 196 &moea64_pte_valid, 0, ""); 197 SYSCTL_INT(_machdep, OID_AUTO, moea64_pte_overflow, CTLFLAG_RD, 198 &moea64_pte_overflow, 0, ""); 199 SYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_entries, CTLFLAG_RD, 200 &moea64_pvo_entries, 0, ""); 201 SYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_enter_calls, CTLFLAG_RD, 202 &moea64_pvo_enter_calls, 0, ""); 203 SYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_remove_calls, CTLFLAG_RD, 204 &moea64_pvo_remove_calls, 0, ""); 205 206 vm_offset_t moea64_scratchpage_va[2]; 207 struct pvo_entry *moea64_scratchpage_pvo[2]; 208 struct mtx moea64_scratchpage_mtx; 209 210 uint64_t moea64_large_page_mask = 0; 211 uint64_t moea64_large_page_size = 0; 212 int moea64_large_page_shift = 0; 213 214 /* 215 * PVO calls. 216 */ 217 static int moea64_pvo_enter(mmu_t mmu, struct pvo_entry *pvo, 218 struct pvo_head *pvo_head); 219 static void moea64_pvo_remove_from_pmap(mmu_t mmu, struct pvo_entry *pvo); 220 static void moea64_pvo_remove_from_page(mmu_t mmu, struct pvo_entry *pvo); 221 static struct pvo_entry *moea64_pvo_find_va(pmap_t, vm_offset_t); 222 223 /* 224 * Utility routines. 225 */ 226 static boolean_t moea64_query_bit(mmu_t, vm_page_t, uint64_t); 227 static u_int moea64_clear_bit(mmu_t, vm_page_t, uint64_t); 228 static void moea64_kremove(mmu_t, vm_offset_t); 229 static void moea64_syncicache(mmu_t, pmap_t pmap, vm_offset_t va, 230 vm_paddr_t pa, vm_size_t sz); 231 static void moea64_pmap_init_qpages(void); 232 233 /* 234 * Kernel MMU interface 235 */ 236 void moea64_clear_modify(mmu_t, vm_page_t); 237 void moea64_copy_page(mmu_t, vm_page_t, vm_page_t); 238 void moea64_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset, 239 vm_page_t *mb, vm_offset_t b_offset, int xfersize); 240 int moea64_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t, 241 u_int flags, int8_t psind); 242 void moea64_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_page_t, 243 vm_prot_t); 244 void moea64_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t); 245 vm_paddr_t moea64_extract(mmu_t, pmap_t, vm_offset_t); 246 vm_page_t moea64_extract_and_hold(mmu_t, pmap_t, vm_offset_t, vm_prot_t); 247 void moea64_init(mmu_t); 248 boolean_t moea64_is_modified(mmu_t, vm_page_t); 249 boolean_t moea64_is_prefaultable(mmu_t, pmap_t, vm_offset_t); 250 boolean_t moea64_is_referenced(mmu_t, vm_page_t); 251 int moea64_ts_referenced(mmu_t, vm_page_t); 252 vm_offset_t moea64_map(mmu_t, vm_offset_t *, vm_paddr_t, vm_paddr_t, int); 253 boolean_t moea64_page_exists_quick(mmu_t, pmap_t, vm_page_t); 254 void moea64_page_init(mmu_t, vm_page_t); 255 int moea64_page_wired_mappings(mmu_t, vm_page_t); 256 void moea64_pinit(mmu_t, pmap_t); 257 void moea64_pinit0(mmu_t, pmap_t); 258 void moea64_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_prot_t); 259 void moea64_qenter(mmu_t, vm_offset_t, vm_page_t *, int); 260 void moea64_qremove(mmu_t, vm_offset_t, int); 261 void moea64_release(mmu_t, pmap_t); 262 void moea64_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t); 263 void moea64_remove_pages(mmu_t, pmap_t); 264 void moea64_remove_all(mmu_t, vm_page_t); 265 void moea64_remove_write(mmu_t, vm_page_t); 266 void moea64_unwire(mmu_t, pmap_t, vm_offset_t, vm_offset_t); 267 void moea64_zero_page(mmu_t, vm_page_t); 268 void moea64_zero_page_area(mmu_t, vm_page_t, int, int); 269 void moea64_activate(mmu_t, struct thread *); 270 void moea64_deactivate(mmu_t, struct thread *); 271 void *moea64_mapdev(mmu_t, vm_paddr_t, vm_size_t); 272 void *moea64_mapdev_attr(mmu_t, vm_paddr_t, vm_size_t, vm_memattr_t); 273 void moea64_unmapdev(mmu_t, vm_offset_t, vm_size_t); 274 vm_paddr_t moea64_kextract(mmu_t, vm_offset_t); 275 void moea64_page_set_memattr(mmu_t, vm_page_t m, vm_memattr_t ma); 276 void moea64_kenter_attr(mmu_t, vm_offset_t, vm_paddr_t, vm_memattr_t ma); 277 void moea64_kenter(mmu_t, vm_offset_t, vm_paddr_t); 278 boolean_t moea64_dev_direct_mapped(mmu_t, vm_paddr_t, vm_size_t); 279 static void moea64_sync_icache(mmu_t, pmap_t, vm_offset_t, vm_size_t); 280 void moea64_dumpsys_map(mmu_t mmu, vm_paddr_t pa, size_t sz, 281 void **va); 282 void moea64_scan_init(mmu_t mmu); 283 vm_offset_t moea64_quick_enter_page(mmu_t mmu, vm_page_t m); 284 void moea64_quick_remove_page(mmu_t mmu, vm_offset_t addr); 285 286 static mmu_method_t moea64_methods[] = { 287 MMUMETHOD(mmu_clear_modify, moea64_clear_modify), 288 MMUMETHOD(mmu_copy_page, moea64_copy_page), 289 MMUMETHOD(mmu_copy_pages, moea64_copy_pages), 290 MMUMETHOD(mmu_enter, moea64_enter), 291 MMUMETHOD(mmu_enter_object, moea64_enter_object), 292 MMUMETHOD(mmu_enter_quick, moea64_enter_quick), 293 MMUMETHOD(mmu_extract, moea64_extract), 294 MMUMETHOD(mmu_extract_and_hold, moea64_extract_and_hold), 295 MMUMETHOD(mmu_init, moea64_init), 296 MMUMETHOD(mmu_is_modified, moea64_is_modified), 297 MMUMETHOD(mmu_is_prefaultable, moea64_is_prefaultable), 298 MMUMETHOD(mmu_is_referenced, moea64_is_referenced), 299 MMUMETHOD(mmu_ts_referenced, moea64_ts_referenced), 300 MMUMETHOD(mmu_map, moea64_map), 301 MMUMETHOD(mmu_page_exists_quick,moea64_page_exists_quick), 302 MMUMETHOD(mmu_page_init, moea64_page_init), 303 MMUMETHOD(mmu_page_wired_mappings,moea64_page_wired_mappings), 304 MMUMETHOD(mmu_pinit, moea64_pinit), 305 MMUMETHOD(mmu_pinit0, moea64_pinit0), 306 MMUMETHOD(mmu_protect, moea64_protect), 307 MMUMETHOD(mmu_qenter, moea64_qenter), 308 MMUMETHOD(mmu_qremove, moea64_qremove), 309 MMUMETHOD(mmu_release, moea64_release), 310 MMUMETHOD(mmu_remove, moea64_remove), 311 MMUMETHOD(mmu_remove_pages, moea64_remove_pages), 312 MMUMETHOD(mmu_remove_all, moea64_remove_all), 313 MMUMETHOD(mmu_remove_write, moea64_remove_write), 314 MMUMETHOD(mmu_sync_icache, moea64_sync_icache), 315 MMUMETHOD(mmu_unwire, moea64_unwire), 316 MMUMETHOD(mmu_zero_page, moea64_zero_page), 317 MMUMETHOD(mmu_zero_page_area, moea64_zero_page_area), 318 MMUMETHOD(mmu_activate, moea64_activate), 319 MMUMETHOD(mmu_deactivate, moea64_deactivate), 320 MMUMETHOD(mmu_page_set_memattr, moea64_page_set_memattr), 321 MMUMETHOD(mmu_quick_enter_page, moea64_quick_enter_page), 322 MMUMETHOD(mmu_quick_remove_page, moea64_quick_remove_page), 323 324 /* Internal interfaces */ 325 MMUMETHOD(mmu_mapdev, moea64_mapdev), 326 MMUMETHOD(mmu_mapdev_attr, moea64_mapdev_attr), 327 MMUMETHOD(mmu_unmapdev, moea64_unmapdev), 328 MMUMETHOD(mmu_kextract, moea64_kextract), 329 MMUMETHOD(mmu_kenter, moea64_kenter), 330 MMUMETHOD(mmu_kenter_attr, moea64_kenter_attr), 331 MMUMETHOD(mmu_dev_direct_mapped,moea64_dev_direct_mapped), 332 MMUMETHOD(mmu_scan_init, moea64_scan_init), 333 MMUMETHOD(mmu_dumpsys_map, moea64_dumpsys_map), 334 335 { 0, 0 } 336 }; 337 338 MMU_DEF(oea64_mmu, "mmu_oea64_base", moea64_methods, 0); 339 340 static struct pvo_head * 341 vm_page_to_pvoh(vm_page_t m) 342 { 343 344 mtx_assert(PV_LOCKPTR(VM_PAGE_TO_PHYS(m)), MA_OWNED); 345 return (&m->md.mdpg_pvoh); 346 } 347 348 static struct pvo_entry * 349 alloc_pvo_entry(int bootstrap) 350 { 351 struct pvo_entry *pvo; 352 353 if (!moea64_initialized || bootstrap) { 354 if (moea64_bpvo_pool_index >= moea64_bpvo_pool_size) { 355 panic("moea64_enter: bpvo pool exhausted, %d, %d, %zd", 356 moea64_bpvo_pool_index, moea64_bpvo_pool_size, 357 moea64_bpvo_pool_size * sizeof(struct pvo_entry)); 358 } 359 pvo = &moea64_bpvo_pool[ 360 atomic_fetchadd_int(&moea64_bpvo_pool_index, 1)]; 361 bzero(pvo, sizeof(*pvo)); 362 pvo->pvo_vaddr = PVO_BOOTSTRAP; 363 } else { 364 pvo = uma_zalloc(moea64_pvo_zone, M_NOWAIT); 365 bzero(pvo, sizeof(*pvo)); 366 } 367 368 return (pvo); 369 } 370 371 372 static void 373 init_pvo_entry(struct pvo_entry *pvo, pmap_t pmap, vm_offset_t va) 374 { 375 uint64_t vsid; 376 uint64_t hash; 377 int shift; 378 379 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 380 381 pvo->pvo_pmap = pmap; 382 va &= ~ADDR_POFF; 383 pvo->pvo_vaddr |= va; 384 vsid = va_to_vsid(pmap, va); 385 pvo->pvo_vpn = (uint64_t)((va & ADDR_PIDX) >> ADDR_PIDX_SHFT) 386 | (vsid << 16); 387 388 shift = (pvo->pvo_vaddr & PVO_LARGE) ? moea64_large_page_shift : 389 ADDR_PIDX_SHFT; 390 hash = (vsid & VSID_HASH_MASK) ^ (((uint64_t)va & ADDR_PIDX) >> shift); 391 pvo->pvo_pte.slot = (hash & moea64_pteg_mask) << 3; 392 } 393 394 static void 395 free_pvo_entry(struct pvo_entry *pvo) 396 { 397 398 if (!(pvo->pvo_vaddr & PVO_BOOTSTRAP)) 399 uma_zfree(moea64_pvo_zone, pvo); 400 } 401 402 void 403 moea64_pte_from_pvo(const struct pvo_entry *pvo, struct lpte *lpte) 404 { 405 406 lpte->pte_hi = (pvo->pvo_vpn >> (ADDR_API_SHFT64 - ADDR_PIDX_SHFT)) & 407 LPTE_AVPN_MASK; 408 lpte->pte_hi |= LPTE_VALID; 409 410 if (pvo->pvo_vaddr & PVO_LARGE) 411 lpte->pte_hi |= LPTE_BIG; 412 if (pvo->pvo_vaddr & PVO_WIRED) 413 lpte->pte_hi |= LPTE_WIRED; 414 if (pvo->pvo_vaddr & PVO_HID) 415 lpte->pte_hi |= LPTE_HID; 416 417 lpte->pte_lo = pvo->pvo_pte.pa; /* Includes WIMG bits */ 418 if (pvo->pvo_pte.prot & VM_PROT_WRITE) 419 lpte->pte_lo |= LPTE_BW; 420 else 421 lpte->pte_lo |= LPTE_BR; 422 423 if (!(pvo->pvo_pte.prot & VM_PROT_EXECUTE)) 424 lpte->pte_lo |= LPTE_NOEXEC; 425 } 426 427 static __inline uint64_t 428 moea64_calc_wimg(vm_paddr_t pa, vm_memattr_t ma) 429 { 430 uint64_t pte_lo; 431 int i; 432 433 if (ma != VM_MEMATTR_DEFAULT) { 434 switch (ma) { 435 case VM_MEMATTR_UNCACHEABLE: 436 return (LPTE_I | LPTE_G); 437 case VM_MEMATTR_CACHEABLE: 438 return (LPTE_M); 439 case VM_MEMATTR_WRITE_COMBINING: 440 case VM_MEMATTR_WRITE_BACK: 441 case VM_MEMATTR_PREFETCHABLE: 442 return (LPTE_I); 443 case VM_MEMATTR_WRITE_THROUGH: 444 return (LPTE_W | LPTE_M); 445 } 446 } 447 448 /* 449 * Assume the page is cache inhibited and access is guarded unless 450 * it's in our available memory array. 451 */ 452 pte_lo = LPTE_I | LPTE_G; 453 for (i = 0; i < pregions_sz; i++) { 454 if ((pa >= pregions[i].mr_start) && 455 (pa < (pregions[i].mr_start + pregions[i].mr_size))) { 456 pte_lo &= ~(LPTE_I | LPTE_G); 457 pte_lo |= LPTE_M; 458 break; 459 } 460 } 461 462 return pte_lo; 463 } 464 465 /* 466 * Quick sort callout for comparing memory regions. 467 */ 468 static int om_cmp(const void *a, const void *b); 469 470 static int 471 om_cmp(const void *a, const void *b) 472 { 473 const struct ofw_map *mapa; 474 const struct ofw_map *mapb; 475 476 mapa = a; 477 mapb = b; 478 if (mapa->om_pa < mapb->om_pa) 479 return (-1); 480 else if (mapa->om_pa > mapb->om_pa) 481 return (1); 482 else 483 return (0); 484 } 485 486 static void 487 moea64_add_ofw_mappings(mmu_t mmup, phandle_t mmu, size_t sz) 488 { 489 struct ofw_map translations[sz/(4*sizeof(cell_t))]; /*>= 4 cells per */ 490 pcell_t acells, trans_cells[sz/sizeof(cell_t)]; 491 struct pvo_entry *pvo; 492 register_t msr; 493 vm_offset_t off; 494 vm_paddr_t pa_base; 495 int i, j; 496 497 bzero(translations, sz); 498 OF_getencprop(OF_finddevice("/"), "#address-cells", &acells, 499 sizeof(acells)); 500 if (OF_getencprop(mmu, "translations", trans_cells, sz) == -1) 501 panic("moea64_bootstrap: can't get ofw translations"); 502 503 CTR0(KTR_PMAP, "moea64_add_ofw_mappings: translations"); 504 sz /= sizeof(cell_t); 505 for (i = 0, j = 0; i < sz; j++) { 506 translations[j].om_va = trans_cells[i++]; 507 translations[j].om_len = trans_cells[i++]; 508 translations[j].om_pa = trans_cells[i++]; 509 if (acells == 2) { 510 translations[j].om_pa <<= 32; 511 translations[j].om_pa |= trans_cells[i++]; 512 } 513 translations[j].om_mode = trans_cells[i++]; 514 } 515 KASSERT(i == sz, ("Translations map has incorrect cell count (%d/%zd)", 516 i, sz)); 517 518 sz = j; 519 qsort(translations, sz, sizeof (*translations), om_cmp); 520 521 for (i = 0; i < sz; i++) { 522 pa_base = translations[i].om_pa; 523 #ifndef __powerpc64__ 524 if ((translations[i].om_pa >> 32) != 0) 525 panic("OFW translations above 32-bit boundary!"); 526 #endif 527 528 if (pa_base % PAGE_SIZE) 529 panic("OFW translation not page-aligned (phys)!"); 530 if (translations[i].om_va % PAGE_SIZE) 531 panic("OFW translation not page-aligned (virt)!"); 532 533 CTR3(KTR_PMAP, "translation: pa=%#zx va=%#x len=%#x", 534 pa_base, translations[i].om_va, translations[i].om_len); 535 536 /* Now enter the pages for this mapping */ 537 538 DISABLE_TRANS(msr); 539 for (off = 0; off < translations[i].om_len; off += PAGE_SIZE) { 540 /* If this address is direct-mapped, skip remapping */ 541 if (hw_direct_map && translations[i].om_va == pa_base && 542 moea64_calc_wimg(pa_base + off, VM_MEMATTR_DEFAULT) == LPTE_M) 543 continue; 544 545 PMAP_LOCK(kernel_pmap); 546 pvo = moea64_pvo_find_va(kernel_pmap, 547 translations[i].om_va + off); 548 PMAP_UNLOCK(kernel_pmap); 549 if (pvo != NULL) 550 continue; 551 552 moea64_kenter(mmup, translations[i].om_va + off, 553 pa_base + off); 554 } 555 ENABLE_TRANS(msr); 556 } 557 } 558 559 #ifdef __powerpc64__ 560 static void 561 moea64_probe_large_page(void) 562 { 563 uint16_t pvr = mfpvr() >> 16; 564 565 switch (pvr) { 566 case IBM970: 567 case IBM970FX: 568 case IBM970MP: 569 powerpc_sync(); isync(); 570 mtspr(SPR_HID4, mfspr(SPR_HID4) & ~HID4_970_DISABLE_LG_PG); 571 powerpc_sync(); isync(); 572 573 /* FALLTHROUGH */ 574 default: 575 if (moea64_large_page_size == 0) { 576 moea64_large_page_size = 0x1000000; /* 16 MB */ 577 moea64_large_page_shift = 24; 578 } 579 } 580 581 moea64_large_page_mask = moea64_large_page_size - 1; 582 } 583 584 static void 585 moea64_bootstrap_slb_prefault(vm_offset_t va, int large) 586 { 587 struct slb *cache; 588 struct slb entry; 589 uint64_t esid, slbe; 590 uint64_t i; 591 592 cache = PCPU_GET(slb); 593 esid = va >> ADDR_SR_SHFT; 594 slbe = (esid << SLBE_ESID_SHIFT) | SLBE_VALID; 595 596 for (i = 0; i < 64; i++) { 597 if (cache[i].slbe == (slbe | i)) 598 return; 599 } 600 601 entry.slbe = slbe; 602 entry.slbv = KERNEL_VSID(esid) << SLBV_VSID_SHIFT; 603 if (large) 604 entry.slbv |= SLBV_L; 605 606 slb_insert_kernel(entry.slbe, entry.slbv); 607 } 608 #endif 609 610 static void 611 moea64_setup_direct_map(mmu_t mmup, vm_offset_t kernelstart, 612 vm_offset_t kernelend) 613 { 614 struct pvo_entry *pvo; 615 register_t msr; 616 vm_paddr_t pa; 617 vm_offset_t size, off; 618 uint64_t pte_lo; 619 int i; 620 621 if (moea64_large_page_size == 0) 622 hw_direct_map = 0; 623 624 DISABLE_TRANS(msr); 625 if (hw_direct_map) { 626 PMAP_LOCK(kernel_pmap); 627 for (i = 0; i < pregions_sz; i++) { 628 for (pa = pregions[i].mr_start; pa < pregions[i].mr_start + 629 pregions[i].mr_size; pa += moea64_large_page_size) { 630 pte_lo = LPTE_M; 631 632 pvo = alloc_pvo_entry(1 /* bootstrap */); 633 pvo->pvo_vaddr |= PVO_WIRED | PVO_LARGE; 634 init_pvo_entry(pvo, kernel_pmap, pa); 635 636 /* 637 * Set memory access as guarded if prefetch within 638 * the page could exit the available physmem area. 639 */ 640 if (pa & moea64_large_page_mask) { 641 pa &= moea64_large_page_mask; 642 pte_lo |= LPTE_G; 643 } 644 if (pa + moea64_large_page_size > 645 pregions[i].mr_start + pregions[i].mr_size) 646 pte_lo |= LPTE_G; 647 648 pvo->pvo_pte.prot = VM_PROT_READ | VM_PROT_WRITE | 649 VM_PROT_EXECUTE; 650 pvo->pvo_pte.pa = pa | pte_lo; 651 moea64_pvo_enter(mmup, pvo, NULL); 652 } 653 } 654 PMAP_UNLOCK(kernel_pmap); 655 } else { 656 size = moea64_bpvo_pool_size*sizeof(struct pvo_entry); 657 off = (vm_offset_t)(moea64_bpvo_pool); 658 for (pa = off; pa < off + size; pa += PAGE_SIZE) 659 moea64_kenter(mmup, pa, pa); 660 661 /* 662 * Map certain important things, like ourselves. 663 * 664 * NOTE: We do not map the exception vector space. That code is 665 * used only in real mode, and leaving it unmapped allows us to 666 * catch NULL pointer deferences, instead of making NULL a valid 667 * address. 668 */ 669 670 for (pa = kernelstart & ~PAGE_MASK; pa < kernelend; 671 pa += PAGE_SIZE) 672 moea64_kenter(mmup, pa, pa); 673 } 674 ENABLE_TRANS(msr); 675 676 /* 677 * Allow user to override unmapped_buf_allowed for testing. 678 * XXXKIB Only direct map implementation was tested. 679 */ 680 if (!TUNABLE_INT_FETCH("vfs.unmapped_buf_allowed", 681 &unmapped_buf_allowed)) 682 unmapped_buf_allowed = hw_direct_map; 683 } 684 685 void 686 moea64_early_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend) 687 { 688 int i, j; 689 vm_size_t physsz, hwphyssz; 690 691 #ifndef __powerpc64__ 692 /* We don't have a direct map since there is no BAT */ 693 hw_direct_map = 0; 694 695 /* Make sure battable is zero, since we have no BAT */ 696 for (i = 0; i < 16; i++) { 697 battable[i].batu = 0; 698 battable[i].batl = 0; 699 } 700 #else 701 moea64_probe_large_page(); 702 703 /* Use a direct map if we have large page support */ 704 if (moea64_large_page_size > 0) 705 hw_direct_map = 1; 706 else 707 hw_direct_map = 0; 708 #endif 709 710 /* Get physical memory regions from firmware */ 711 mem_regions(&pregions, &pregions_sz, ®ions, ®ions_sz); 712 CTR0(KTR_PMAP, "moea64_bootstrap: physical memory"); 713 714 if (sizeof(phys_avail)/sizeof(phys_avail[0]) < regions_sz) 715 panic("moea64_bootstrap: phys_avail too small"); 716 717 phys_avail_count = 0; 718 physsz = 0; 719 hwphyssz = 0; 720 TUNABLE_ULONG_FETCH("hw.physmem", (u_long *) &hwphyssz); 721 for (i = 0, j = 0; i < regions_sz; i++, j += 2) { 722 CTR3(KTR_PMAP, "region: %#zx - %#zx (%#zx)", 723 regions[i].mr_start, regions[i].mr_start + 724 regions[i].mr_size, regions[i].mr_size); 725 if (hwphyssz != 0 && 726 (physsz + regions[i].mr_size) >= hwphyssz) { 727 if (physsz < hwphyssz) { 728 phys_avail[j] = regions[i].mr_start; 729 phys_avail[j + 1] = regions[i].mr_start + 730 hwphyssz - physsz; 731 physsz = hwphyssz; 732 phys_avail_count++; 733 } 734 break; 735 } 736 phys_avail[j] = regions[i].mr_start; 737 phys_avail[j + 1] = regions[i].mr_start + regions[i].mr_size; 738 phys_avail_count++; 739 physsz += regions[i].mr_size; 740 } 741 742 /* Check for overlap with the kernel and exception vectors */ 743 for (j = 0; j < 2*phys_avail_count; j+=2) { 744 if (phys_avail[j] < EXC_LAST) 745 phys_avail[j] += EXC_LAST; 746 747 if (kernelstart >= phys_avail[j] && 748 kernelstart < phys_avail[j+1]) { 749 if (kernelend < phys_avail[j+1]) { 750 phys_avail[2*phys_avail_count] = 751 (kernelend & ~PAGE_MASK) + PAGE_SIZE; 752 phys_avail[2*phys_avail_count + 1] = 753 phys_avail[j+1]; 754 phys_avail_count++; 755 } 756 757 phys_avail[j+1] = kernelstart & ~PAGE_MASK; 758 } 759 760 if (kernelend >= phys_avail[j] && 761 kernelend < phys_avail[j+1]) { 762 if (kernelstart > phys_avail[j]) { 763 phys_avail[2*phys_avail_count] = phys_avail[j]; 764 phys_avail[2*phys_avail_count + 1] = 765 kernelstart & ~PAGE_MASK; 766 phys_avail_count++; 767 } 768 769 phys_avail[j] = (kernelend & ~PAGE_MASK) + PAGE_SIZE; 770 } 771 } 772 773 physmem = btoc(physsz); 774 775 #ifdef PTEGCOUNT 776 moea64_pteg_count = PTEGCOUNT; 777 #else 778 moea64_pteg_count = 0x1000; 779 780 while (moea64_pteg_count < physmem) 781 moea64_pteg_count <<= 1; 782 783 moea64_pteg_count >>= 1; 784 #endif /* PTEGCOUNT */ 785 } 786 787 void 788 moea64_mid_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend) 789 { 790 int i; 791 792 /* 793 * Set PTEG mask 794 */ 795 moea64_pteg_mask = moea64_pteg_count - 1; 796 797 /* 798 * Initialize SLB table lock and page locks 799 */ 800 mtx_init(&moea64_slb_mutex, "SLB table", NULL, MTX_DEF); 801 for (i = 0; i < PV_LOCK_COUNT; i++) 802 mtx_init(&pv_lock[i], "page pv", NULL, MTX_DEF); 803 804 /* 805 * Initialise the bootstrap pvo pool. 806 */ 807 moea64_bpvo_pool = (struct pvo_entry *)moea64_bootstrap_alloc( 808 moea64_bpvo_pool_size*sizeof(struct pvo_entry), 0); 809 moea64_bpvo_pool_index = 0; 810 811 /* 812 * Make sure kernel vsid is allocated as well as VSID 0. 813 */ 814 #ifndef __powerpc64__ 815 moea64_vsid_bitmap[(KERNEL_VSIDBITS & (NVSIDS - 1)) / VSID_NBPW] 816 |= 1 << (KERNEL_VSIDBITS % VSID_NBPW); 817 moea64_vsid_bitmap[0] |= 1; 818 #endif 819 820 /* 821 * Initialize the kernel pmap (which is statically allocated). 822 */ 823 #ifdef __powerpc64__ 824 for (i = 0; i < 64; i++) { 825 pcpup->pc_slb[i].slbv = 0; 826 pcpup->pc_slb[i].slbe = 0; 827 } 828 #else 829 for (i = 0; i < 16; i++) 830 kernel_pmap->pm_sr[i] = EMPTY_SEGMENT + i; 831 #endif 832 833 kernel_pmap->pmap_phys = kernel_pmap; 834 CPU_FILL(&kernel_pmap->pm_active); 835 RB_INIT(&kernel_pmap->pmap_pvo); 836 837 PMAP_LOCK_INIT(kernel_pmap); 838 839 /* 840 * Now map in all the other buffers we allocated earlier 841 */ 842 843 moea64_setup_direct_map(mmup, kernelstart, kernelend); 844 } 845 846 void 847 moea64_late_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend) 848 { 849 ihandle_t mmui; 850 phandle_t chosen; 851 phandle_t mmu; 852 ssize_t sz; 853 int i; 854 vm_offset_t pa, va; 855 void *dpcpu; 856 857 /* 858 * Set up the Open Firmware pmap and add its mappings if not in real 859 * mode. 860 */ 861 862 chosen = OF_finddevice("/chosen"); 863 if (chosen != -1 && OF_getencprop(chosen, "mmu", &mmui, 4) != -1) { 864 mmu = OF_instance_to_package(mmui); 865 if (mmu == -1 || 866 (sz = OF_getproplen(mmu, "translations")) == -1) 867 sz = 0; 868 if (sz > 6144 /* tmpstksz - 2 KB headroom */) 869 panic("moea64_bootstrap: too many ofw translations"); 870 871 if (sz > 0) 872 moea64_add_ofw_mappings(mmup, mmu, sz); 873 } 874 875 /* 876 * Calculate the last available physical address. 877 */ 878 Maxmem = 0; 879 for (i = 0; phys_avail[i + 2] != 0; i += 2) 880 Maxmem = max(Maxmem, powerpc_btop(phys_avail[i + 1])); 881 882 /* 883 * Initialize MMU and remap early physical mappings 884 */ 885 MMU_CPU_BOOTSTRAP(mmup,0); 886 mtmsr(mfmsr() | PSL_DR | PSL_IR); 887 pmap_bootstrapped++; 888 bs_remap_earlyboot(); 889 890 /* 891 * Set the start and end of kva. 892 */ 893 virtual_avail = VM_MIN_KERNEL_ADDRESS; 894 virtual_end = VM_MAX_SAFE_KERNEL_ADDRESS; 895 896 /* 897 * Map the entire KVA range into the SLB. We must not fault there. 898 */ 899 #ifdef __powerpc64__ 900 for (va = virtual_avail; va < virtual_end; va += SEGMENT_LENGTH) 901 moea64_bootstrap_slb_prefault(va, 0); 902 #endif 903 904 /* 905 * Figure out how far we can extend virtual_end into segment 16 906 * without running into existing mappings. Segment 16 is guaranteed 907 * to contain neither RAM nor devices (at least on Apple hardware), 908 * but will generally contain some OFW mappings we should not 909 * step on. 910 */ 911 912 #ifndef __powerpc64__ /* KVA is in high memory on PPC64 */ 913 PMAP_LOCK(kernel_pmap); 914 while (virtual_end < VM_MAX_KERNEL_ADDRESS && 915 moea64_pvo_find_va(kernel_pmap, virtual_end+1) == NULL) 916 virtual_end += PAGE_SIZE; 917 PMAP_UNLOCK(kernel_pmap); 918 #endif 919 920 /* 921 * Allocate a kernel stack with a guard page for thread0 and map it 922 * into the kernel page map. 923 */ 924 pa = moea64_bootstrap_alloc(kstack_pages * PAGE_SIZE, PAGE_SIZE); 925 va = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE; 926 virtual_avail = va + kstack_pages * PAGE_SIZE; 927 CTR2(KTR_PMAP, "moea64_bootstrap: kstack0 at %#x (%#x)", pa, va); 928 thread0.td_kstack = va; 929 thread0.td_kstack_pages = kstack_pages; 930 for (i = 0; i < kstack_pages; i++) { 931 moea64_kenter(mmup, va, pa); 932 pa += PAGE_SIZE; 933 va += PAGE_SIZE; 934 } 935 936 /* 937 * Allocate virtual address space for the message buffer. 938 */ 939 pa = msgbuf_phys = moea64_bootstrap_alloc(msgbufsize, PAGE_SIZE); 940 msgbufp = (struct msgbuf *)virtual_avail; 941 va = virtual_avail; 942 virtual_avail += round_page(msgbufsize); 943 while (va < virtual_avail) { 944 moea64_kenter(mmup, va, pa); 945 pa += PAGE_SIZE; 946 va += PAGE_SIZE; 947 } 948 949 /* 950 * Allocate virtual address space for the dynamic percpu area. 951 */ 952 pa = moea64_bootstrap_alloc(DPCPU_SIZE, PAGE_SIZE); 953 dpcpu = (void *)virtual_avail; 954 va = virtual_avail; 955 virtual_avail += DPCPU_SIZE; 956 while (va < virtual_avail) { 957 moea64_kenter(mmup, va, pa); 958 pa += PAGE_SIZE; 959 va += PAGE_SIZE; 960 } 961 dpcpu_init(dpcpu, curcpu); 962 963 /* 964 * Allocate some things for page zeroing. We put this directly 965 * in the page table and use MOEA64_PTE_REPLACE to avoid any 966 * of the PVO book-keeping or other parts of the VM system 967 * from even knowing that this hack exists. 968 */ 969 970 if (!hw_direct_map) { 971 mtx_init(&moea64_scratchpage_mtx, "pvo zero page", NULL, 972 MTX_DEF); 973 for (i = 0; i < 2; i++) { 974 moea64_scratchpage_va[i] = (virtual_end+1) - PAGE_SIZE; 975 virtual_end -= PAGE_SIZE; 976 977 moea64_kenter(mmup, moea64_scratchpage_va[i], 0); 978 979 PMAP_LOCK(kernel_pmap); 980 moea64_scratchpage_pvo[i] = moea64_pvo_find_va( 981 kernel_pmap, (vm_offset_t)moea64_scratchpage_va[i]); 982 PMAP_UNLOCK(kernel_pmap); 983 } 984 } 985 } 986 987 static void 988 moea64_pmap_init_qpages(void) 989 { 990 struct pcpu *pc; 991 int i; 992 993 if (hw_direct_map) 994 return; 995 996 CPU_FOREACH(i) { 997 pc = pcpu_find(i); 998 pc->pc_qmap_addr = kva_alloc(PAGE_SIZE); 999 if (pc->pc_qmap_addr == 0) 1000 panic("pmap_init_qpages: unable to allocate KVA"); 1001 PMAP_LOCK(kernel_pmap); 1002 pc->pc_qmap_pvo = moea64_pvo_find_va(kernel_pmap, pc->pc_qmap_addr); 1003 PMAP_UNLOCK(kernel_pmap); 1004 mtx_init(&pc->pc_qmap_lock, "qmap lock", NULL, MTX_DEF); 1005 } 1006 } 1007 1008 SYSINIT(qpages_init, SI_SUB_CPU, SI_ORDER_ANY, moea64_pmap_init_qpages, NULL); 1009 1010 /* 1011 * Activate a user pmap. This mostly involves setting some non-CPU 1012 * state. 1013 */ 1014 void 1015 moea64_activate(mmu_t mmu, struct thread *td) 1016 { 1017 pmap_t pm; 1018 1019 pm = &td->td_proc->p_vmspace->vm_pmap; 1020 CPU_SET(PCPU_GET(cpuid), &pm->pm_active); 1021 1022 #ifdef __powerpc64__ 1023 PCPU_SET(userslb, pm->pm_slb); 1024 __asm __volatile("slbmte %0, %1; isync" :: 1025 "r"(td->td_pcb->pcb_cpu.aim.usr_vsid), "r"(USER_SLB_SLBE)); 1026 #else 1027 PCPU_SET(curpmap, pm->pmap_phys); 1028 mtsrin(USER_SR << ADDR_SR_SHFT, td->td_pcb->pcb_cpu.aim.usr_vsid); 1029 #endif 1030 } 1031 1032 void 1033 moea64_deactivate(mmu_t mmu, struct thread *td) 1034 { 1035 pmap_t pm; 1036 1037 __asm __volatile("isync; slbie %0" :: "r"(USER_ADDR)); 1038 1039 pm = &td->td_proc->p_vmspace->vm_pmap; 1040 CPU_CLR(PCPU_GET(cpuid), &pm->pm_active); 1041 #ifdef __powerpc64__ 1042 PCPU_SET(userslb, NULL); 1043 #else 1044 PCPU_SET(curpmap, NULL); 1045 #endif 1046 } 1047 1048 void 1049 moea64_unwire(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva) 1050 { 1051 struct pvo_entry key, *pvo; 1052 vm_page_t m; 1053 int64_t refchg; 1054 1055 key.pvo_vaddr = sva; 1056 PMAP_LOCK(pm); 1057 for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key); 1058 pvo != NULL && PVO_VADDR(pvo) < eva; 1059 pvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo)) { 1060 if ((pvo->pvo_vaddr & PVO_WIRED) == 0) 1061 panic("moea64_unwire: pvo %p is missing PVO_WIRED", 1062 pvo); 1063 pvo->pvo_vaddr &= ~PVO_WIRED; 1064 refchg = MOEA64_PTE_REPLACE(mmu, pvo, 0 /* No invalidation */); 1065 if ((pvo->pvo_vaddr & PVO_MANAGED) && 1066 (pvo->pvo_pte.prot & VM_PROT_WRITE)) { 1067 if (refchg < 0) 1068 refchg = LPTE_CHG; 1069 m = PHYS_TO_VM_PAGE(pvo->pvo_pte.pa & LPTE_RPGN); 1070 1071 refchg |= atomic_readandclear_32(&m->md.mdpg_attrs); 1072 if (refchg & LPTE_CHG) 1073 vm_page_dirty(m); 1074 if (refchg & LPTE_REF) 1075 vm_page_aflag_set(m, PGA_REFERENCED); 1076 } 1077 pm->pm_stats.wired_count--; 1078 } 1079 PMAP_UNLOCK(pm); 1080 } 1081 1082 /* 1083 * This goes through and sets the physical address of our 1084 * special scratch PTE to the PA we want to zero or copy. Because 1085 * of locking issues (this can get called in pvo_enter() by 1086 * the UMA allocator), we can't use most other utility functions here 1087 */ 1088 1089 static __inline 1090 void moea64_set_scratchpage_pa(mmu_t mmup, int which, vm_paddr_t pa) { 1091 1092 KASSERT(!hw_direct_map, ("Using OEA64 scratchpage with a direct map!")); 1093 mtx_assert(&moea64_scratchpage_mtx, MA_OWNED); 1094 1095 moea64_scratchpage_pvo[which]->pvo_pte.pa = 1096 moea64_calc_wimg(pa, VM_MEMATTR_DEFAULT) | (uint64_t)pa; 1097 MOEA64_PTE_REPLACE(mmup, moea64_scratchpage_pvo[which], 1098 MOEA64_PTE_INVALIDATE); 1099 isync(); 1100 } 1101 1102 void 1103 moea64_copy_page(mmu_t mmu, vm_page_t msrc, vm_page_t mdst) 1104 { 1105 vm_offset_t dst; 1106 vm_offset_t src; 1107 1108 dst = VM_PAGE_TO_PHYS(mdst); 1109 src = VM_PAGE_TO_PHYS(msrc); 1110 1111 if (hw_direct_map) { 1112 bcopy((void *)src, (void *)dst, PAGE_SIZE); 1113 } else { 1114 mtx_lock(&moea64_scratchpage_mtx); 1115 1116 moea64_set_scratchpage_pa(mmu, 0, src); 1117 moea64_set_scratchpage_pa(mmu, 1, dst); 1118 1119 bcopy((void *)moea64_scratchpage_va[0], 1120 (void *)moea64_scratchpage_va[1], PAGE_SIZE); 1121 1122 mtx_unlock(&moea64_scratchpage_mtx); 1123 } 1124 } 1125 1126 static inline void 1127 moea64_copy_pages_dmap(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset, 1128 vm_page_t *mb, vm_offset_t b_offset, int xfersize) 1129 { 1130 void *a_cp, *b_cp; 1131 vm_offset_t a_pg_offset, b_pg_offset; 1132 int cnt; 1133 1134 while (xfersize > 0) { 1135 a_pg_offset = a_offset & PAGE_MASK; 1136 cnt = min(xfersize, PAGE_SIZE - a_pg_offset); 1137 a_cp = (char *)VM_PAGE_TO_PHYS(ma[a_offset >> PAGE_SHIFT]) + 1138 a_pg_offset; 1139 b_pg_offset = b_offset & PAGE_MASK; 1140 cnt = min(cnt, PAGE_SIZE - b_pg_offset); 1141 b_cp = (char *)VM_PAGE_TO_PHYS(mb[b_offset >> PAGE_SHIFT]) + 1142 b_pg_offset; 1143 bcopy(a_cp, b_cp, cnt); 1144 a_offset += cnt; 1145 b_offset += cnt; 1146 xfersize -= cnt; 1147 } 1148 } 1149 1150 static inline void 1151 moea64_copy_pages_nodmap(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset, 1152 vm_page_t *mb, vm_offset_t b_offset, int xfersize) 1153 { 1154 void *a_cp, *b_cp; 1155 vm_offset_t a_pg_offset, b_pg_offset; 1156 int cnt; 1157 1158 mtx_lock(&moea64_scratchpage_mtx); 1159 while (xfersize > 0) { 1160 a_pg_offset = a_offset & PAGE_MASK; 1161 cnt = min(xfersize, PAGE_SIZE - a_pg_offset); 1162 moea64_set_scratchpage_pa(mmu, 0, 1163 VM_PAGE_TO_PHYS(ma[a_offset >> PAGE_SHIFT])); 1164 a_cp = (char *)moea64_scratchpage_va[0] + a_pg_offset; 1165 b_pg_offset = b_offset & PAGE_MASK; 1166 cnt = min(cnt, PAGE_SIZE - b_pg_offset); 1167 moea64_set_scratchpage_pa(mmu, 1, 1168 VM_PAGE_TO_PHYS(mb[b_offset >> PAGE_SHIFT])); 1169 b_cp = (char *)moea64_scratchpage_va[1] + b_pg_offset; 1170 bcopy(a_cp, b_cp, cnt); 1171 a_offset += cnt; 1172 b_offset += cnt; 1173 xfersize -= cnt; 1174 } 1175 mtx_unlock(&moea64_scratchpage_mtx); 1176 } 1177 1178 void 1179 moea64_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset, 1180 vm_page_t *mb, vm_offset_t b_offset, int xfersize) 1181 { 1182 1183 if (hw_direct_map) { 1184 moea64_copy_pages_dmap(mmu, ma, a_offset, mb, b_offset, 1185 xfersize); 1186 } else { 1187 moea64_copy_pages_nodmap(mmu, ma, a_offset, mb, b_offset, 1188 xfersize); 1189 } 1190 } 1191 1192 void 1193 moea64_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size) 1194 { 1195 vm_paddr_t pa = VM_PAGE_TO_PHYS(m); 1196 1197 if (size + off > PAGE_SIZE) 1198 panic("moea64_zero_page: size + off > PAGE_SIZE"); 1199 1200 if (hw_direct_map) { 1201 bzero((caddr_t)pa + off, size); 1202 } else { 1203 mtx_lock(&moea64_scratchpage_mtx); 1204 moea64_set_scratchpage_pa(mmu, 0, pa); 1205 bzero((caddr_t)moea64_scratchpage_va[0] + off, size); 1206 mtx_unlock(&moea64_scratchpage_mtx); 1207 } 1208 } 1209 1210 /* 1211 * Zero a page of physical memory by temporarily mapping it 1212 */ 1213 void 1214 moea64_zero_page(mmu_t mmu, vm_page_t m) 1215 { 1216 vm_paddr_t pa = VM_PAGE_TO_PHYS(m); 1217 vm_offset_t va, off; 1218 1219 if (!hw_direct_map) { 1220 mtx_lock(&moea64_scratchpage_mtx); 1221 1222 moea64_set_scratchpage_pa(mmu, 0, pa); 1223 va = moea64_scratchpage_va[0]; 1224 } else { 1225 va = pa; 1226 } 1227 1228 for (off = 0; off < PAGE_SIZE; off += cacheline_size) 1229 __asm __volatile("dcbz 0,%0" :: "r"(va + off)); 1230 1231 if (!hw_direct_map) 1232 mtx_unlock(&moea64_scratchpage_mtx); 1233 } 1234 1235 vm_offset_t 1236 moea64_quick_enter_page(mmu_t mmu, vm_page_t m) 1237 { 1238 struct pvo_entry *pvo; 1239 vm_paddr_t pa = VM_PAGE_TO_PHYS(m); 1240 1241 if (hw_direct_map) 1242 return (pa); 1243 1244 /* 1245 * MOEA64_PTE_REPLACE does some locking, so we can't just grab 1246 * a critical section and access the PCPU data like on i386. 1247 * Instead, pin the thread and grab the PCPU lock to prevent 1248 * a preempting thread from using the same PCPU data. 1249 */ 1250 sched_pin(); 1251 1252 mtx_assert(PCPU_PTR(qmap_lock), MA_NOTOWNED); 1253 pvo = PCPU_GET(qmap_pvo); 1254 1255 mtx_lock(PCPU_PTR(qmap_lock)); 1256 pvo->pvo_pte.pa = moea64_calc_wimg(pa, pmap_page_get_memattr(m)) | 1257 (uint64_t)pa; 1258 MOEA64_PTE_REPLACE(mmu, pvo, MOEA64_PTE_INVALIDATE); 1259 isync(); 1260 1261 return (PCPU_GET(qmap_addr)); 1262 } 1263 1264 void 1265 moea64_quick_remove_page(mmu_t mmu, vm_offset_t addr) 1266 { 1267 if (hw_direct_map) 1268 return; 1269 1270 mtx_assert(PCPU_PTR(qmap_lock), MA_OWNED); 1271 KASSERT(PCPU_GET(qmap_addr) == addr, 1272 ("moea64_quick_remove_page: invalid address")); 1273 mtx_unlock(PCPU_PTR(qmap_lock)); 1274 sched_unpin(); 1275 } 1276 1277 /* 1278 * Map the given physical page at the specified virtual address in the 1279 * target pmap with the protection requested. If specified the page 1280 * will be wired down. 1281 */ 1282 1283 int 1284 moea64_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, 1285 vm_prot_t prot, u_int flags, int8_t psind) 1286 { 1287 struct pvo_entry *pvo, *oldpvo; 1288 struct pvo_head *pvo_head; 1289 uint64_t pte_lo; 1290 int error; 1291 1292 if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_xbusied(m)) 1293 VM_OBJECT_ASSERT_LOCKED(m->object); 1294 1295 pvo = alloc_pvo_entry(0); 1296 pvo->pvo_pmap = NULL; /* to be filled in later */ 1297 pvo->pvo_pte.prot = prot; 1298 1299 pte_lo = moea64_calc_wimg(VM_PAGE_TO_PHYS(m), pmap_page_get_memattr(m)); 1300 pvo->pvo_pte.pa = VM_PAGE_TO_PHYS(m) | pte_lo; 1301 1302 if ((flags & PMAP_ENTER_WIRED) != 0) 1303 pvo->pvo_vaddr |= PVO_WIRED; 1304 1305 if ((m->oflags & VPO_UNMANAGED) != 0 || !moea64_initialized) { 1306 pvo_head = NULL; 1307 } else { 1308 pvo_head = &m->md.mdpg_pvoh; 1309 pvo->pvo_vaddr |= PVO_MANAGED; 1310 } 1311 1312 for (;;) { 1313 PV_PAGE_LOCK(m); 1314 PMAP_LOCK(pmap); 1315 if (pvo->pvo_pmap == NULL) 1316 init_pvo_entry(pvo, pmap, va); 1317 if (prot & VM_PROT_WRITE) 1318 if (pmap_bootstrapped && 1319 (m->oflags & VPO_UNMANAGED) == 0) 1320 vm_page_aflag_set(m, PGA_WRITEABLE); 1321 1322 oldpvo = moea64_pvo_find_va(pmap, va); 1323 if (oldpvo != NULL) { 1324 if (oldpvo->pvo_vaddr == pvo->pvo_vaddr && 1325 oldpvo->pvo_pte.pa == pvo->pvo_pte.pa && 1326 oldpvo->pvo_pte.prot == prot) { 1327 /* Identical mapping already exists */ 1328 error = 0; 1329 1330 /* If not in page table, reinsert it */ 1331 if (MOEA64_PTE_SYNCH(mmu, oldpvo) < 0) { 1332 moea64_pte_overflow--; 1333 MOEA64_PTE_INSERT(mmu, oldpvo); 1334 } 1335 1336 /* Then just clean up and go home */ 1337 PV_PAGE_UNLOCK(m); 1338 PMAP_UNLOCK(pmap); 1339 free_pvo_entry(pvo); 1340 break; 1341 } 1342 1343 /* Otherwise, need to kill it first */ 1344 KASSERT(oldpvo->pvo_pmap == pmap, ("pmap of old " 1345 "mapping does not match new mapping")); 1346 moea64_pvo_remove_from_pmap(mmu, oldpvo); 1347 } 1348 error = moea64_pvo_enter(mmu, pvo, pvo_head); 1349 PV_PAGE_UNLOCK(m); 1350 PMAP_UNLOCK(pmap); 1351 1352 /* Free any dead pages */ 1353 if (oldpvo != NULL) { 1354 PV_LOCK(oldpvo->pvo_pte.pa & LPTE_RPGN); 1355 moea64_pvo_remove_from_page(mmu, oldpvo); 1356 PV_UNLOCK(oldpvo->pvo_pte.pa & LPTE_RPGN); 1357 free_pvo_entry(oldpvo); 1358 } 1359 1360 if (error != ENOMEM) 1361 break; 1362 if ((flags & PMAP_ENTER_NOSLEEP) != 0) 1363 return (KERN_RESOURCE_SHORTAGE); 1364 VM_OBJECT_ASSERT_UNLOCKED(m->object); 1365 VM_WAIT; 1366 } 1367 1368 /* 1369 * Flush the page from the instruction cache if this page is 1370 * mapped executable and cacheable. 1371 */ 1372 if (pmap != kernel_pmap && !(m->aflags & PGA_EXECUTABLE) && 1373 (pte_lo & (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) { 1374 vm_page_aflag_set(m, PGA_EXECUTABLE); 1375 moea64_syncicache(mmu, pmap, va, VM_PAGE_TO_PHYS(m), PAGE_SIZE); 1376 } 1377 return (KERN_SUCCESS); 1378 } 1379 1380 static void 1381 moea64_syncicache(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_paddr_t pa, 1382 vm_size_t sz) 1383 { 1384 1385 /* 1386 * This is much trickier than on older systems because 1387 * we can't sync the icache on physical addresses directly 1388 * without a direct map. Instead we check a couple of cases 1389 * where the memory is already mapped in and, failing that, 1390 * use the same trick we use for page zeroing to create 1391 * a temporary mapping for this physical address. 1392 */ 1393 1394 if (!pmap_bootstrapped) { 1395 /* 1396 * If PMAP is not bootstrapped, we are likely to be 1397 * in real mode. 1398 */ 1399 __syncicache((void *)pa, sz); 1400 } else if (pmap == kernel_pmap) { 1401 __syncicache((void *)va, sz); 1402 } else if (hw_direct_map) { 1403 __syncicache((void *)pa, sz); 1404 } else { 1405 /* Use the scratch page to set up a temp mapping */ 1406 1407 mtx_lock(&moea64_scratchpage_mtx); 1408 1409 moea64_set_scratchpage_pa(mmu, 1, pa & ~ADDR_POFF); 1410 __syncicache((void *)(moea64_scratchpage_va[1] + 1411 (va & ADDR_POFF)), sz); 1412 1413 mtx_unlock(&moea64_scratchpage_mtx); 1414 } 1415 } 1416 1417 /* 1418 * Maps a sequence of resident pages belonging to the same object. 1419 * The sequence begins with the given page m_start. This page is 1420 * mapped at the given virtual address start. Each subsequent page is 1421 * mapped at a virtual address that is offset from start by the same 1422 * amount as the page is offset from m_start within the object. The 1423 * last page in the sequence is the page with the largest offset from 1424 * m_start that can be mapped at a virtual address less than the given 1425 * virtual address end. Not every virtual page between start and end 1426 * is mapped; only those for which a resident page exists with the 1427 * corresponding offset from m_start are mapped. 1428 */ 1429 void 1430 moea64_enter_object(mmu_t mmu, pmap_t pm, vm_offset_t start, vm_offset_t end, 1431 vm_page_t m_start, vm_prot_t prot) 1432 { 1433 vm_page_t m; 1434 vm_pindex_t diff, psize; 1435 1436 VM_OBJECT_ASSERT_LOCKED(m_start->object); 1437 1438 psize = atop(end - start); 1439 m = m_start; 1440 while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { 1441 moea64_enter(mmu, pm, start + ptoa(diff), m, prot & 1442 (VM_PROT_READ | VM_PROT_EXECUTE), PMAP_ENTER_NOSLEEP, 0); 1443 m = TAILQ_NEXT(m, listq); 1444 } 1445 } 1446 1447 void 1448 moea64_enter_quick(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_page_t m, 1449 vm_prot_t prot) 1450 { 1451 1452 moea64_enter(mmu, pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE), 1453 PMAP_ENTER_NOSLEEP, 0); 1454 } 1455 1456 vm_paddr_t 1457 moea64_extract(mmu_t mmu, pmap_t pm, vm_offset_t va) 1458 { 1459 struct pvo_entry *pvo; 1460 vm_paddr_t pa; 1461 1462 PMAP_LOCK(pm); 1463 pvo = moea64_pvo_find_va(pm, va); 1464 if (pvo == NULL) 1465 pa = 0; 1466 else 1467 pa = (pvo->pvo_pte.pa & LPTE_RPGN) | (va - PVO_VADDR(pvo)); 1468 PMAP_UNLOCK(pm); 1469 1470 return (pa); 1471 } 1472 1473 /* 1474 * Atomically extract and hold the physical page with the given 1475 * pmap and virtual address pair if that mapping permits the given 1476 * protection. 1477 */ 1478 vm_page_t 1479 moea64_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_prot_t prot) 1480 { 1481 struct pvo_entry *pvo; 1482 vm_page_t m; 1483 vm_paddr_t pa; 1484 1485 m = NULL; 1486 pa = 0; 1487 PMAP_LOCK(pmap); 1488 retry: 1489 pvo = moea64_pvo_find_va(pmap, va & ~ADDR_POFF); 1490 if (pvo != NULL && (pvo->pvo_pte.prot & prot) == prot) { 1491 if (vm_page_pa_tryrelock(pmap, 1492 pvo->pvo_pte.pa & LPTE_RPGN, &pa)) 1493 goto retry; 1494 m = PHYS_TO_VM_PAGE(pvo->pvo_pte.pa & LPTE_RPGN); 1495 vm_page_hold(m); 1496 } 1497 PA_UNLOCK_COND(pa); 1498 PMAP_UNLOCK(pmap); 1499 return (m); 1500 } 1501 1502 static mmu_t installed_mmu; 1503 1504 static void * 1505 moea64_uma_page_alloc(uma_zone_t zone, vm_size_t bytes, uint8_t *flags, 1506 int wait) 1507 { 1508 struct pvo_entry *pvo; 1509 vm_offset_t va; 1510 vm_page_t m; 1511 int needed_lock; 1512 1513 /* 1514 * This entire routine is a horrible hack to avoid bothering kmem 1515 * for new KVA addresses. Because this can get called from inside 1516 * kmem allocation routines, calling kmem for a new address here 1517 * can lead to multiply locking non-recursive mutexes. 1518 */ 1519 1520 *flags = UMA_SLAB_PRIV; 1521 needed_lock = !PMAP_LOCKED(kernel_pmap); 1522 1523 m = vm_page_alloc(NULL, 0, 1524 malloc2vm_flags(wait) | VM_ALLOC_WIRED | VM_ALLOC_NOOBJ); 1525 if (m == NULL) 1526 return (NULL); 1527 1528 va = VM_PAGE_TO_PHYS(m); 1529 1530 pvo = alloc_pvo_entry(1 /* bootstrap */); 1531 1532 pvo->pvo_pte.prot = VM_PROT_READ | VM_PROT_WRITE; 1533 pvo->pvo_pte.pa = VM_PAGE_TO_PHYS(m) | LPTE_M; 1534 1535 if (needed_lock) 1536 PMAP_LOCK(kernel_pmap); 1537 1538 init_pvo_entry(pvo, kernel_pmap, va); 1539 pvo->pvo_vaddr |= PVO_WIRED; 1540 1541 moea64_pvo_enter(installed_mmu, pvo, NULL); 1542 1543 if (needed_lock) 1544 PMAP_UNLOCK(kernel_pmap); 1545 1546 if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0) 1547 bzero((void *)va, PAGE_SIZE); 1548 1549 return (void *)va; 1550 } 1551 1552 extern int elf32_nxstack; 1553 1554 void 1555 moea64_init(mmu_t mmu) 1556 { 1557 1558 CTR0(KTR_PMAP, "moea64_init"); 1559 1560 moea64_pvo_zone = uma_zcreate("UPVO entry", sizeof (struct pvo_entry), 1561 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 1562 UMA_ZONE_VM | UMA_ZONE_NOFREE); 1563 1564 if (!hw_direct_map) { 1565 installed_mmu = mmu; 1566 uma_zone_set_allocf(moea64_pvo_zone,moea64_uma_page_alloc); 1567 } 1568 1569 #ifdef COMPAT_FREEBSD32 1570 elf32_nxstack = 1; 1571 #endif 1572 1573 moea64_initialized = TRUE; 1574 } 1575 1576 boolean_t 1577 moea64_is_referenced(mmu_t mmu, vm_page_t m) 1578 { 1579 1580 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1581 ("moea64_is_referenced: page %p is not managed", m)); 1582 1583 return (moea64_query_bit(mmu, m, LPTE_REF)); 1584 } 1585 1586 boolean_t 1587 moea64_is_modified(mmu_t mmu, vm_page_t m) 1588 { 1589 1590 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1591 ("moea64_is_modified: page %p is not managed", m)); 1592 1593 /* 1594 * If the page is not exclusive busied, then PGA_WRITEABLE cannot be 1595 * concurrently set while the object is locked. Thus, if PGA_WRITEABLE 1596 * is clear, no PTEs can have LPTE_CHG set. 1597 */ 1598 VM_OBJECT_ASSERT_LOCKED(m->object); 1599 if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0) 1600 return (FALSE); 1601 return (moea64_query_bit(mmu, m, LPTE_CHG)); 1602 } 1603 1604 boolean_t 1605 moea64_is_prefaultable(mmu_t mmu, pmap_t pmap, vm_offset_t va) 1606 { 1607 struct pvo_entry *pvo; 1608 boolean_t rv = TRUE; 1609 1610 PMAP_LOCK(pmap); 1611 pvo = moea64_pvo_find_va(pmap, va & ~ADDR_POFF); 1612 if (pvo != NULL) 1613 rv = FALSE; 1614 PMAP_UNLOCK(pmap); 1615 return (rv); 1616 } 1617 1618 void 1619 moea64_clear_modify(mmu_t mmu, vm_page_t m) 1620 { 1621 1622 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1623 ("moea64_clear_modify: page %p is not managed", m)); 1624 VM_OBJECT_ASSERT_WLOCKED(m->object); 1625 KASSERT(!vm_page_xbusied(m), 1626 ("moea64_clear_modify: page %p is exclusive busied", m)); 1627 1628 /* 1629 * If the page is not PGA_WRITEABLE, then no PTEs can have LPTE_CHG 1630 * set. If the object containing the page is locked and the page is 1631 * not exclusive busied, then PGA_WRITEABLE cannot be concurrently set. 1632 */ 1633 if ((m->aflags & PGA_WRITEABLE) == 0) 1634 return; 1635 moea64_clear_bit(mmu, m, LPTE_CHG); 1636 } 1637 1638 /* 1639 * Clear the write and modified bits in each of the given page's mappings. 1640 */ 1641 void 1642 moea64_remove_write(mmu_t mmu, vm_page_t m) 1643 { 1644 struct pvo_entry *pvo; 1645 int64_t refchg, ret; 1646 pmap_t pmap; 1647 1648 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1649 ("moea64_remove_write: page %p is not managed", m)); 1650 1651 /* 1652 * If the page is not exclusive busied, then PGA_WRITEABLE cannot be 1653 * set by another thread while the object is locked. Thus, 1654 * if PGA_WRITEABLE is clear, no page table entries need updating. 1655 */ 1656 VM_OBJECT_ASSERT_WLOCKED(m->object); 1657 if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0) 1658 return; 1659 powerpc_sync(); 1660 PV_PAGE_LOCK(m); 1661 refchg = 0; 1662 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 1663 pmap = pvo->pvo_pmap; 1664 PMAP_LOCK(pmap); 1665 if (!(pvo->pvo_vaddr & PVO_DEAD) && 1666 (pvo->pvo_pte.prot & VM_PROT_WRITE)) { 1667 pvo->pvo_pte.prot &= ~VM_PROT_WRITE; 1668 ret = MOEA64_PTE_REPLACE(mmu, pvo, 1669 MOEA64_PTE_PROT_UPDATE); 1670 if (ret < 0) 1671 ret = LPTE_CHG; 1672 refchg |= ret; 1673 if (pvo->pvo_pmap == kernel_pmap) 1674 isync(); 1675 } 1676 PMAP_UNLOCK(pmap); 1677 } 1678 if ((refchg | atomic_readandclear_32(&m->md.mdpg_attrs)) & LPTE_CHG) 1679 vm_page_dirty(m); 1680 vm_page_aflag_clear(m, PGA_WRITEABLE); 1681 PV_PAGE_UNLOCK(m); 1682 } 1683 1684 /* 1685 * moea64_ts_referenced: 1686 * 1687 * Return a count of reference bits for a page, clearing those bits. 1688 * It is not necessary for every reference bit to be cleared, but it 1689 * is necessary that 0 only be returned when there are truly no 1690 * reference bits set. 1691 * 1692 * XXX: The exact number of bits to check and clear is a matter that 1693 * should be tested and standardized at some point in the future for 1694 * optimal aging of shared pages. 1695 */ 1696 int 1697 moea64_ts_referenced(mmu_t mmu, vm_page_t m) 1698 { 1699 1700 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1701 ("moea64_ts_referenced: page %p is not managed", m)); 1702 return (moea64_clear_bit(mmu, m, LPTE_REF)); 1703 } 1704 1705 /* 1706 * Modify the WIMG settings of all mappings for a page. 1707 */ 1708 void 1709 moea64_page_set_memattr(mmu_t mmu, vm_page_t m, vm_memattr_t ma) 1710 { 1711 struct pvo_entry *pvo; 1712 int64_t refchg; 1713 pmap_t pmap; 1714 uint64_t lo; 1715 1716 if ((m->oflags & VPO_UNMANAGED) != 0) { 1717 m->md.mdpg_cache_attrs = ma; 1718 return; 1719 } 1720 1721 lo = moea64_calc_wimg(VM_PAGE_TO_PHYS(m), ma); 1722 1723 PV_PAGE_LOCK(m); 1724 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 1725 pmap = pvo->pvo_pmap; 1726 PMAP_LOCK(pmap); 1727 if (!(pvo->pvo_vaddr & PVO_DEAD)) { 1728 pvo->pvo_pte.pa &= ~LPTE_WIMG; 1729 pvo->pvo_pte.pa |= lo; 1730 refchg = MOEA64_PTE_REPLACE(mmu, pvo, 1731 MOEA64_PTE_INVALIDATE); 1732 if (refchg < 0) 1733 refchg = (pvo->pvo_pte.prot & VM_PROT_WRITE) ? 1734 LPTE_CHG : 0; 1735 if ((pvo->pvo_vaddr & PVO_MANAGED) && 1736 (pvo->pvo_pte.prot & VM_PROT_WRITE)) { 1737 refchg |= 1738 atomic_readandclear_32(&m->md.mdpg_attrs); 1739 if (refchg & LPTE_CHG) 1740 vm_page_dirty(m); 1741 if (refchg & LPTE_REF) 1742 vm_page_aflag_set(m, PGA_REFERENCED); 1743 } 1744 if (pvo->pvo_pmap == kernel_pmap) 1745 isync(); 1746 } 1747 PMAP_UNLOCK(pmap); 1748 } 1749 m->md.mdpg_cache_attrs = ma; 1750 PV_PAGE_UNLOCK(m); 1751 } 1752 1753 /* 1754 * Map a wired page into kernel virtual address space. 1755 */ 1756 void 1757 moea64_kenter_attr(mmu_t mmu, vm_offset_t va, vm_paddr_t pa, vm_memattr_t ma) 1758 { 1759 int error; 1760 struct pvo_entry *pvo, *oldpvo; 1761 1762 pvo = alloc_pvo_entry(0); 1763 pvo->pvo_pte.prot = VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE; 1764 pvo->pvo_pte.pa = (pa & ~ADDR_POFF) | moea64_calc_wimg(pa, ma); 1765 pvo->pvo_vaddr |= PVO_WIRED; 1766 1767 PMAP_LOCK(kernel_pmap); 1768 oldpvo = moea64_pvo_find_va(kernel_pmap, va); 1769 if (oldpvo != NULL) 1770 moea64_pvo_remove_from_pmap(mmu, oldpvo); 1771 init_pvo_entry(pvo, kernel_pmap, va); 1772 error = moea64_pvo_enter(mmu, pvo, NULL); 1773 PMAP_UNLOCK(kernel_pmap); 1774 1775 /* Free any dead pages */ 1776 if (oldpvo != NULL) { 1777 PV_LOCK(oldpvo->pvo_pte.pa & LPTE_RPGN); 1778 moea64_pvo_remove_from_page(mmu, oldpvo); 1779 PV_UNLOCK(oldpvo->pvo_pte.pa & LPTE_RPGN); 1780 free_pvo_entry(oldpvo); 1781 } 1782 1783 if (error != 0 && error != ENOENT) 1784 panic("moea64_kenter: failed to enter va %#zx pa %#zx: %d", va, 1785 pa, error); 1786 } 1787 1788 void 1789 moea64_kenter(mmu_t mmu, vm_offset_t va, vm_paddr_t pa) 1790 { 1791 1792 moea64_kenter_attr(mmu, va, pa, VM_MEMATTR_DEFAULT); 1793 } 1794 1795 /* 1796 * Extract the physical page address associated with the given kernel virtual 1797 * address. 1798 */ 1799 vm_paddr_t 1800 moea64_kextract(mmu_t mmu, vm_offset_t va) 1801 { 1802 struct pvo_entry *pvo; 1803 vm_paddr_t pa; 1804 1805 /* 1806 * Shortcut the direct-mapped case when applicable. We never put 1807 * anything but 1:1 mappings below VM_MIN_KERNEL_ADDRESS. 1808 */ 1809 if (va < VM_MIN_KERNEL_ADDRESS) 1810 return (va); 1811 1812 PMAP_LOCK(kernel_pmap); 1813 pvo = moea64_pvo_find_va(kernel_pmap, va); 1814 KASSERT(pvo != NULL, ("moea64_kextract: no addr found for %#" PRIxPTR, 1815 va)); 1816 pa = (pvo->pvo_pte.pa & LPTE_RPGN) | (va - PVO_VADDR(pvo)); 1817 PMAP_UNLOCK(kernel_pmap); 1818 return (pa); 1819 } 1820 1821 /* 1822 * Remove a wired page from kernel virtual address space. 1823 */ 1824 void 1825 moea64_kremove(mmu_t mmu, vm_offset_t va) 1826 { 1827 moea64_remove(mmu, kernel_pmap, va, va + PAGE_SIZE); 1828 } 1829 1830 /* 1831 * Map a range of physical addresses into kernel virtual address space. 1832 * 1833 * The value passed in *virt is a suggested virtual address for the mapping. 1834 * Architectures which can support a direct-mapped physical to virtual region 1835 * can return the appropriate address within that region, leaving '*virt' 1836 * unchanged. Other architectures should map the pages starting at '*virt' and 1837 * update '*virt' with the first usable address after the mapped region. 1838 */ 1839 vm_offset_t 1840 moea64_map(mmu_t mmu, vm_offset_t *virt, vm_paddr_t pa_start, 1841 vm_paddr_t pa_end, int prot) 1842 { 1843 vm_offset_t sva, va; 1844 1845 if (hw_direct_map) { 1846 /* 1847 * Check if every page in the region is covered by the direct 1848 * map. The direct map covers all of physical memory. Use 1849 * moea64_calc_wimg() as a shortcut to see if the page is in 1850 * physical memory as a way to see if the direct map covers it. 1851 */ 1852 for (va = pa_start; va < pa_end; va += PAGE_SIZE) 1853 if (moea64_calc_wimg(va, VM_MEMATTR_DEFAULT) != LPTE_M) 1854 break; 1855 if (va == pa_end) 1856 return (pa_start); 1857 } 1858 sva = *virt; 1859 va = sva; 1860 /* XXX respect prot argument */ 1861 for (; pa_start < pa_end; pa_start += PAGE_SIZE, va += PAGE_SIZE) 1862 moea64_kenter(mmu, va, pa_start); 1863 *virt = va; 1864 1865 return (sva); 1866 } 1867 1868 /* 1869 * Returns true if the pmap's pv is one of the first 1870 * 16 pvs linked to from this page. This count may 1871 * be changed upwards or downwards in the future; it 1872 * is only necessary that true be returned for a small 1873 * subset of pmaps for proper page aging. 1874 */ 1875 boolean_t 1876 moea64_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m) 1877 { 1878 int loops; 1879 struct pvo_entry *pvo; 1880 boolean_t rv; 1881 1882 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1883 ("moea64_page_exists_quick: page %p is not managed", m)); 1884 loops = 0; 1885 rv = FALSE; 1886 PV_PAGE_LOCK(m); 1887 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 1888 if (!(pvo->pvo_vaddr & PVO_DEAD) && pvo->pvo_pmap == pmap) { 1889 rv = TRUE; 1890 break; 1891 } 1892 if (++loops >= 16) 1893 break; 1894 } 1895 PV_PAGE_UNLOCK(m); 1896 return (rv); 1897 } 1898 1899 void 1900 moea64_page_init(mmu_t mmu __unused, vm_page_t m) 1901 { 1902 1903 m->md.mdpg_attrs = 0; 1904 m->md.mdpg_cache_attrs = VM_MEMATTR_DEFAULT; 1905 LIST_INIT(&m->md.mdpg_pvoh); 1906 } 1907 1908 /* 1909 * Return the number of managed mappings to the given physical page 1910 * that are wired. 1911 */ 1912 int 1913 moea64_page_wired_mappings(mmu_t mmu, vm_page_t m) 1914 { 1915 struct pvo_entry *pvo; 1916 int count; 1917 1918 count = 0; 1919 if ((m->oflags & VPO_UNMANAGED) != 0) 1920 return (count); 1921 PV_PAGE_LOCK(m); 1922 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) 1923 if ((pvo->pvo_vaddr & (PVO_DEAD | PVO_WIRED)) == PVO_WIRED) 1924 count++; 1925 PV_PAGE_UNLOCK(m); 1926 return (count); 1927 } 1928 1929 static uintptr_t moea64_vsidcontext; 1930 1931 uintptr_t 1932 moea64_get_unique_vsid(void) { 1933 u_int entropy; 1934 register_t hash; 1935 uint32_t mask; 1936 int i; 1937 1938 entropy = 0; 1939 __asm __volatile("mftb %0" : "=r"(entropy)); 1940 1941 mtx_lock(&moea64_slb_mutex); 1942 for (i = 0; i < NVSIDS; i += VSID_NBPW) { 1943 u_int n; 1944 1945 /* 1946 * Create a new value by mutiplying by a prime and adding in 1947 * entropy from the timebase register. This is to make the 1948 * VSID more random so that the PT hash function collides 1949 * less often. (Note that the prime casues gcc to do shifts 1950 * instead of a multiply.) 1951 */ 1952 moea64_vsidcontext = (moea64_vsidcontext * 0x1105) + entropy; 1953 hash = moea64_vsidcontext & (NVSIDS - 1); 1954 if (hash == 0) /* 0 is special, avoid it */ 1955 continue; 1956 n = hash >> 5; 1957 mask = 1 << (hash & (VSID_NBPW - 1)); 1958 hash = (moea64_vsidcontext & VSID_HASHMASK); 1959 if (moea64_vsid_bitmap[n] & mask) { /* collision? */ 1960 /* anything free in this bucket? */ 1961 if (moea64_vsid_bitmap[n] == 0xffffffff) { 1962 entropy = (moea64_vsidcontext >> 20); 1963 continue; 1964 } 1965 i = ffs(~moea64_vsid_bitmap[n]) - 1; 1966 mask = 1 << i; 1967 hash &= rounddown2(VSID_HASHMASK, VSID_NBPW); 1968 hash |= i; 1969 } 1970 if (hash == VSID_VRMA) /* also special, avoid this too */ 1971 continue; 1972 KASSERT(!(moea64_vsid_bitmap[n] & mask), 1973 ("Allocating in-use VSID %#zx\n", hash)); 1974 moea64_vsid_bitmap[n] |= mask; 1975 mtx_unlock(&moea64_slb_mutex); 1976 return (hash); 1977 } 1978 1979 mtx_unlock(&moea64_slb_mutex); 1980 panic("%s: out of segments",__func__); 1981 } 1982 1983 #ifdef __powerpc64__ 1984 void 1985 moea64_pinit(mmu_t mmu, pmap_t pmap) 1986 { 1987 1988 RB_INIT(&pmap->pmap_pvo); 1989 1990 pmap->pm_slb_tree_root = slb_alloc_tree(); 1991 pmap->pm_slb = slb_alloc_user_cache(); 1992 pmap->pm_slb_len = 0; 1993 } 1994 #else 1995 void 1996 moea64_pinit(mmu_t mmu, pmap_t pmap) 1997 { 1998 int i; 1999 uint32_t hash; 2000 2001 RB_INIT(&pmap->pmap_pvo); 2002 2003 if (pmap_bootstrapped) 2004 pmap->pmap_phys = (pmap_t)moea64_kextract(mmu, 2005 (vm_offset_t)pmap); 2006 else 2007 pmap->pmap_phys = pmap; 2008 2009 /* 2010 * Allocate some segment registers for this pmap. 2011 */ 2012 hash = moea64_get_unique_vsid(); 2013 2014 for (i = 0; i < 16; i++) 2015 pmap->pm_sr[i] = VSID_MAKE(i, hash); 2016 2017 KASSERT(pmap->pm_sr[0] != 0, ("moea64_pinit: pm_sr[0] = 0")); 2018 } 2019 #endif 2020 2021 /* 2022 * Initialize the pmap associated with process 0. 2023 */ 2024 void 2025 moea64_pinit0(mmu_t mmu, pmap_t pm) 2026 { 2027 2028 PMAP_LOCK_INIT(pm); 2029 moea64_pinit(mmu, pm); 2030 bzero(&pm->pm_stats, sizeof(pm->pm_stats)); 2031 } 2032 2033 /* 2034 * Set the physical protection on the specified range of this map as requested. 2035 */ 2036 static void 2037 moea64_pvo_protect(mmu_t mmu, pmap_t pm, struct pvo_entry *pvo, vm_prot_t prot) 2038 { 2039 struct vm_page *pg; 2040 vm_prot_t oldprot; 2041 int32_t refchg; 2042 2043 PMAP_LOCK_ASSERT(pm, MA_OWNED); 2044 2045 /* 2046 * Change the protection of the page. 2047 */ 2048 oldprot = pvo->pvo_pte.prot; 2049 pvo->pvo_pte.prot = prot; 2050 pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.pa & LPTE_RPGN); 2051 2052 /* 2053 * If the PVO is in the page table, update mapping 2054 */ 2055 refchg = MOEA64_PTE_REPLACE(mmu, pvo, MOEA64_PTE_PROT_UPDATE); 2056 if (refchg < 0) 2057 refchg = (oldprot & VM_PROT_WRITE) ? LPTE_CHG : 0; 2058 2059 if (pm != kernel_pmap && pg != NULL && !(pg->aflags & PGA_EXECUTABLE) && 2060 (pvo->pvo_pte.pa & (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) { 2061 if ((pg->oflags & VPO_UNMANAGED) == 0) 2062 vm_page_aflag_set(pg, PGA_EXECUTABLE); 2063 moea64_syncicache(mmu, pm, PVO_VADDR(pvo), 2064 pvo->pvo_pte.pa & LPTE_RPGN, PAGE_SIZE); 2065 } 2066 2067 /* 2068 * Update vm about the REF/CHG bits if the page is managed and we have 2069 * removed write access. 2070 */ 2071 if (pg != NULL && (pvo->pvo_vaddr & PVO_MANAGED) && 2072 (oldprot & VM_PROT_WRITE)) { 2073 refchg |= atomic_readandclear_32(&pg->md.mdpg_attrs); 2074 if (refchg & LPTE_CHG) 2075 vm_page_dirty(pg); 2076 if (refchg & LPTE_REF) 2077 vm_page_aflag_set(pg, PGA_REFERENCED); 2078 } 2079 } 2080 2081 void 2082 moea64_protect(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva, 2083 vm_prot_t prot) 2084 { 2085 struct pvo_entry *pvo, *tpvo, key; 2086 2087 CTR4(KTR_PMAP, "moea64_protect: pm=%p sva=%#x eva=%#x prot=%#x", pm, 2088 sva, eva, prot); 2089 2090 KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap, 2091 ("moea64_protect: non current pmap")); 2092 2093 if ((prot & VM_PROT_READ) == VM_PROT_NONE) { 2094 moea64_remove(mmu, pm, sva, eva); 2095 return; 2096 } 2097 2098 PMAP_LOCK(pm); 2099 key.pvo_vaddr = sva; 2100 for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key); 2101 pvo != NULL && PVO_VADDR(pvo) < eva; pvo = tpvo) { 2102 tpvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo); 2103 moea64_pvo_protect(mmu, pm, pvo, prot); 2104 } 2105 PMAP_UNLOCK(pm); 2106 } 2107 2108 /* 2109 * Map a list of wired pages into kernel virtual address space. This is 2110 * intended for temporary mappings which do not need page modification or 2111 * references recorded. Existing mappings in the region are overwritten. 2112 */ 2113 void 2114 moea64_qenter(mmu_t mmu, vm_offset_t va, vm_page_t *m, int count) 2115 { 2116 while (count-- > 0) { 2117 moea64_kenter(mmu, va, VM_PAGE_TO_PHYS(*m)); 2118 va += PAGE_SIZE; 2119 m++; 2120 } 2121 } 2122 2123 /* 2124 * Remove page mappings from kernel virtual address space. Intended for 2125 * temporary mappings entered by moea64_qenter. 2126 */ 2127 void 2128 moea64_qremove(mmu_t mmu, vm_offset_t va, int count) 2129 { 2130 while (count-- > 0) { 2131 moea64_kremove(mmu, va); 2132 va += PAGE_SIZE; 2133 } 2134 } 2135 2136 void 2137 moea64_release_vsid(uint64_t vsid) 2138 { 2139 int idx, mask; 2140 2141 mtx_lock(&moea64_slb_mutex); 2142 idx = vsid & (NVSIDS-1); 2143 mask = 1 << (idx % VSID_NBPW); 2144 idx /= VSID_NBPW; 2145 KASSERT(moea64_vsid_bitmap[idx] & mask, 2146 ("Freeing unallocated VSID %#jx", vsid)); 2147 moea64_vsid_bitmap[idx] &= ~mask; 2148 mtx_unlock(&moea64_slb_mutex); 2149 } 2150 2151 2152 void 2153 moea64_release(mmu_t mmu, pmap_t pmap) 2154 { 2155 2156 /* 2157 * Free segment registers' VSIDs 2158 */ 2159 #ifdef __powerpc64__ 2160 slb_free_tree(pmap); 2161 slb_free_user_cache(pmap->pm_slb); 2162 #else 2163 KASSERT(pmap->pm_sr[0] != 0, ("moea64_release: pm_sr[0] = 0")); 2164 2165 moea64_release_vsid(VSID_TO_HASH(pmap->pm_sr[0])); 2166 #endif 2167 } 2168 2169 /* 2170 * Remove all pages mapped by the specified pmap 2171 */ 2172 void 2173 moea64_remove_pages(mmu_t mmu, pmap_t pm) 2174 { 2175 struct pvo_entry *pvo, *tpvo; 2176 struct pvo_tree tofree; 2177 2178 RB_INIT(&tofree); 2179 2180 PMAP_LOCK(pm); 2181 RB_FOREACH_SAFE(pvo, pvo_tree, &pm->pmap_pvo, tpvo) { 2182 if (pvo->pvo_vaddr & PVO_WIRED) 2183 continue; 2184 2185 /* 2186 * For locking reasons, remove this from the page table and 2187 * pmap, but save delinking from the vm_page for a second 2188 * pass 2189 */ 2190 moea64_pvo_remove_from_pmap(mmu, pvo); 2191 RB_INSERT(pvo_tree, &tofree, pvo); 2192 } 2193 PMAP_UNLOCK(pm); 2194 2195 RB_FOREACH_SAFE(pvo, pvo_tree, &tofree, tpvo) { 2196 PV_LOCK(pvo->pvo_pte.pa & LPTE_RPGN); 2197 moea64_pvo_remove_from_page(mmu, pvo); 2198 PV_UNLOCK(pvo->pvo_pte.pa & LPTE_RPGN); 2199 RB_REMOVE(pvo_tree, &tofree, pvo); 2200 free_pvo_entry(pvo); 2201 } 2202 } 2203 2204 /* 2205 * Remove the given range of addresses from the specified map. 2206 */ 2207 void 2208 moea64_remove(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva) 2209 { 2210 struct pvo_entry *pvo, *tpvo, key; 2211 struct pvo_tree tofree; 2212 2213 /* 2214 * Perform an unsynchronized read. This is, however, safe. 2215 */ 2216 if (pm->pm_stats.resident_count == 0) 2217 return; 2218 2219 key.pvo_vaddr = sva; 2220 2221 RB_INIT(&tofree); 2222 2223 PMAP_LOCK(pm); 2224 for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key); 2225 pvo != NULL && PVO_VADDR(pvo) < eva; pvo = tpvo) { 2226 tpvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo); 2227 2228 /* 2229 * For locking reasons, remove this from the page table and 2230 * pmap, but save delinking from the vm_page for a second 2231 * pass 2232 */ 2233 moea64_pvo_remove_from_pmap(mmu, pvo); 2234 RB_INSERT(pvo_tree, &tofree, pvo); 2235 } 2236 PMAP_UNLOCK(pm); 2237 2238 RB_FOREACH_SAFE(pvo, pvo_tree, &tofree, tpvo) { 2239 PV_LOCK(pvo->pvo_pte.pa & LPTE_RPGN); 2240 moea64_pvo_remove_from_page(mmu, pvo); 2241 PV_UNLOCK(pvo->pvo_pte.pa & LPTE_RPGN); 2242 RB_REMOVE(pvo_tree, &tofree, pvo); 2243 free_pvo_entry(pvo); 2244 } 2245 } 2246 2247 /* 2248 * Remove physical page from all pmaps in which it resides. moea64_pvo_remove() 2249 * will reflect changes in pte's back to the vm_page. 2250 */ 2251 void 2252 moea64_remove_all(mmu_t mmu, vm_page_t m) 2253 { 2254 struct pvo_entry *pvo, *next_pvo; 2255 struct pvo_head freequeue; 2256 int wasdead; 2257 pmap_t pmap; 2258 2259 LIST_INIT(&freequeue); 2260 2261 PV_PAGE_LOCK(m); 2262 LIST_FOREACH_SAFE(pvo, vm_page_to_pvoh(m), pvo_vlink, next_pvo) { 2263 pmap = pvo->pvo_pmap; 2264 PMAP_LOCK(pmap); 2265 wasdead = (pvo->pvo_vaddr & PVO_DEAD); 2266 if (!wasdead) 2267 moea64_pvo_remove_from_pmap(mmu, pvo); 2268 moea64_pvo_remove_from_page(mmu, pvo); 2269 if (!wasdead) 2270 LIST_INSERT_HEAD(&freequeue, pvo, pvo_vlink); 2271 PMAP_UNLOCK(pmap); 2272 2273 } 2274 KASSERT(!pmap_page_is_mapped(m), ("Page still has mappings")); 2275 KASSERT(!(m->aflags & PGA_WRITEABLE), ("Page still writable")); 2276 PV_PAGE_UNLOCK(m); 2277 2278 /* Clean up UMA allocations */ 2279 LIST_FOREACH_SAFE(pvo, &freequeue, pvo_vlink, next_pvo) 2280 free_pvo_entry(pvo); 2281 } 2282 2283 /* 2284 * Allocate a physical page of memory directly from the phys_avail map. 2285 * Can only be called from moea64_bootstrap before avail start and end are 2286 * calculated. 2287 */ 2288 vm_offset_t 2289 moea64_bootstrap_alloc(vm_size_t size, u_int align) 2290 { 2291 vm_offset_t s, e; 2292 int i, j; 2293 2294 size = round_page(size); 2295 for (i = 0; phys_avail[i + 1] != 0; i += 2) { 2296 if (align != 0) 2297 s = roundup2(phys_avail[i], align); 2298 else 2299 s = phys_avail[i]; 2300 e = s + size; 2301 2302 if (s < phys_avail[i] || e > phys_avail[i + 1]) 2303 continue; 2304 2305 if (s + size > platform_real_maxaddr()) 2306 continue; 2307 2308 if (s == phys_avail[i]) { 2309 phys_avail[i] += size; 2310 } else if (e == phys_avail[i + 1]) { 2311 phys_avail[i + 1] -= size; 2312 } else { 2313 for (j = phys_avail_count * 2; j > i; j -= 2) { 2314 phys_avail[j] = phys_avail[j - 2]; 2315 phys_avail[j + 1] = phys_avail[j - 1]; 2316 } 2317 2318 phys_avail[i + 3] = phys_avail[i + 1]; 2319 phys_avail[i + 1] = s; 2320 phys_avail[i + 2] = e; 2321 phys_avail_count++; 2322 } 2323 2324 return (s); 2325 } 2326 panic("moea64_bootstrap_alloc: could not allocate memory"); 2327 } 2328 2329 static int 2330 moea64_pvo_enter(mmu_t mmu, struct pvo_entry *pvo, struct pvo_head *pvo_head) 2331 { 2332 int first, err; 2333 2334 PMAP_LOCK_ASSERT(pvo->pvo_pmap, MA_OWNED); 2335 KASSERT(moea64_pvo_find_va(pvo->pvo_pmap, PVO_VADDR(pvo)) == NULL, 2336 ("Existing mapping for VA %#jx", (uintmax_t)PVO_VADDR(pvo))); 2337 2338 moea64_pvo_enter_calls++; 2339 2340 /* 2341 * Add to pmap list 2342 */ 2343 RB_INSERT(pvo_tree, &pvo->pvo_pmap->pmap_pvo, pvo); 2344 2345 /* 2346 * Remember if the list was empty and therefore will be the first 2347 * item. 2348 */ 2349 if (pvo_head != NULL) { 2350 if (LIST_FIRST(pvo_head) == NULL) 2351 first = 1; 2352 LIST_INSERT_HEAD(pvo_head, pvo, pvo_vlink); 2353 } 2354 2355 if (pvo->pvo_vaddr & PVO_WIRED) 2356 pvo->pvo_pmap->pm_stats.wired_count++; 2357 pvo->pvo_pmap->pm_stats.resident_count++; 2358 2359 /* 2360 * Insert it into the hardware page table 2361 */ 2362 err = MOEA64_PTE_INSERT(mmu, pvo); 2363 if (err != 0) { 2364 panic("moea64_pvo_enter: overflow"); 2365 } 2366 2367 moea64_pvo_entries++; 2368 2369 if (pvo->pvo_pmap == kernel_pmap) 2370 isync(); 2371 2372 #ifdef __powerpc64__ 2373 /* 2374 * Make sure all our bootstrap mappings are in the SLB as soon 2375 * as virtual memory is switched on. 2376 */ 2377 if (!pmap_bootstrapped) 2378 moea64_bootstrap_slb_prefault(PVO_VADDR(pvo), 2379 pvo->pvo_vaddr & PVO_LARGE); 2380 #endif 2381 2382 return (first ? ENOENT : 0); 2383 } 2384 2385 static void 2386 moea64_pvo_remove_from_pmap(mmu_t mmu, struct pvo_entry *pvo) 2387 { 2388 struct vm_page *pg; 2389 int32_t refchg; 2390 2391 KASSERT(pvo->pvo_pmap != NULL, ("Trying to remove PVO with no pmap")); 2392 PMAP_LOCK_ASSERT(pvo->pvo_pmap, MA_OWNED); 2393 KASSERT(!(pvo->pvo_vaddr & PVO_DEAD), ("Trying to remove dead PVO")); 2394 2395 /* 2396 * If there is an active pte entry, we need to deactivate it 2397 */ 2398 refchg = MOEA64_PTE_UNSET(mmu, pvo); 2399 if (refchg < 0) { 2400 /* 2401 * If it was evicted from the page table, be pessimistic and 2402 * dirty the page. 2403 */ 2404 if (pvo->pvo_pte.prot & VM_PROT_WRITE) 2405 refchg = LPTE_CHG; 2406 else 2407 refchg = 0; 2408 } 2409 2410 /* 2411 * Update our statistics. 2412 */ 2413 pvo->pvo_pmap->pm_stats.resident_count--; 2414 if (pvo->pvo_vaddr & PVO_WIRED) 2415 pvo->pvo_pmap->pm_stats.wired_count--; 2416 2417 /* 2418 * Remove this PVO from the pmap list. 2419 */ 2420 RB_REMOVE(pvo_tree, &pvo->pvo_pmap->pmap_pvo, pvo); 2421 2422 /* 2423 * Mark this for the next sweep 2424 */ 2425 pvo->pvo_vaddr |= PVO_DEAD; 2426 2427 /* Send RC bits to VM */ 2428 if ((pvo->pvo_vaddr & PVO_MANAGED) && 2429 (pvo->pvo_pte.prot & VM_PROT_WRITE)) { 2430 pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.pa & LPTE_RPGN); 2431 if (pg != NULL) { 2432 refchg |= atomic_readandclear_32(&pg->md.mdpg_attrs); 2433 if (refchg & LPTE_CHG) 2434 vm_page_dirty(pg); 2435 if (refchg & LPTE_REF) 2436 vm_page_aflag_set(pg, PGA_REFERENCED); 2437 } 2438 } 2439 } 2440 2441 static void 2442 moea64_pvo_remove_from_page(mmu_t mmu, struct pvo_entry *pvo) 2443 { 2444 struct vm_page *pg; 2445 2446 KASSERT(pvo->pvo_vaddr & PVO_DEAD, ("Trying to delink live page")); 2447 2448 /* Use NULL pmaps as a sentinel for races in page deletion */ 2449 if (pvo->pvo_pmap == NULL) 2450 return; 2451 pvo->pvo_pmap = NULL; 2452 2453 /* 2454 * Update vm about page writeability/executability if managed 2455 */ 2456 PV_LOCKASSERT(pvo->pvo_pte.pa & LPTE_RPGN); 2457 pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.pa & LPTE_RPGN); 2458 2459 if ((pvo->pvo_vaddr & PVO_MANAGED) && pg != NULL) { 2460 LIST_REMOVE(pvo, pvo_vlink); 2461 if (LIST_EMPTY(vm_page_to_pvoh(pg))) 2462 vm_page_aflag_clear(pg, PGA_WRITEABLE | PGA_EXECUTABLE); 2463 } 2464 2465 moea64_pvo_entries--; 2466 moea64_pvo_remove_calls++; 2467 } 2468 2469 static struct pvo_entry * 2470 moea64_pvo_find_va(pmap_t pm, vm_offset_t va) 2471 { 2472 struct pvo_entry key; 2473 2474 PMAP_LOCK_ASSERT(pm, MA_OWNED); 2475 2476 key.pvo_vaddr = va & ~ADDR_POFF; 2477 return (RB_FIND(pvo_tree, &pm->pmap_pvo, &key)); 2478 } 2479 2480 static boolean_t 2481 moea64_query_bit(mmu_t mmu, vm_page_t m, uint64_t ptebit) 2482 { 2483 struct pvo_entry *pvo; 2484 int64_t ret; 2485 boolean_t rv; 2486 2487 /* 2488 * See if this bit is stored in the page already. 2489 */ 2490 if (m->md.mdpg_attrs & ptebit) 2491 return (TRUE); 2492 2493 /* 2494 * Examine each PTE. Sync so that any pending REF/CHG bits are 2495 * flushed to the PTEs. 2496 */ 2497 rv = FALSE; 2498 powerpc_sync(); 2499 PV_PAGE_LOCK(m); 2500 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 2501 ret = 0; 2502 2503 /* 2504 * See if this pvo has a valid PTE. if so, fetch the 2505 * REF/CHG bits from the valid PTE. If the appropriate 2506 * ptebit is set, return success. 2507 */ 2508 PMAP_LOCK(pvo->pvo_pmap); 2509 if (!(pvo->pvo_vaddr & PVO_DEAD)) 2510 ret = MOEA64_PTE_SYNCH(mmu, pvo); 2511 PMAP_UNLOCK(pvo->pvo_pmap); 2512 2513 if (ret > 0) { 2514 atomic_set_32(&m->md.mdpg_attrs, 2515 ret & (LPTE_CHG | LPTE_REF)); 2516 if (ret & ptebit) { 2517 rv = TRUE; 2518 break; 2519 } 2520 } 2521 } 2522 PV_PAGE_UNLOCK(m); 2523 2524 return (rv); 2525 } 2526 2527 static u_int 2528 moea64_clear_bit(mmu_t mmu, vm_page_t m, u_int64_t ptebit) 2529 { 2530 u_int count; 2531 struct pvo_entry *pvo; 2532 int64_t ret; 2533 2534 /* 2535 * Sync so that any pending REF/CHG bits are flushed to the PTEs (so 2536 * we can reset the right ones). 2537 */ 2538 powerpc_sync(); 2539 2540 /* 2541 * For each pvo entry, clear the pte's ptebit. 2542 */ 2543 count = 0; 2544 PV_PAGE_LOCK(m); 2545 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 2546 ret = 0; 2547 2548 PMAP_LOCK(pvo->pvo_pmap); 2549 if (!(pvo->pvo_vaddr & PVO_DEAD)) 2550 ret = MOEA64_PTE_CLEAR(mmu, pvo, ptebit); 2551 PMAP_UNLOCK(pvo->pvo_pmap); 2552 2553 if (ret > 0 && (ret & ptebit)) 2554 count++; 2555 } 2556 atomic_clear_32(&m->md.mdpg_attrs, ptebit); 2557 PV_PAGE_UNLOCK(m); 2558 2559 return (count); 2560 } 2561 2562 boolean_t 2563 moea64_dev_direct_mapped(mmu_t mmu, vm_paddr_t pa, vm_size_t size) 2564 { 2565 struct pvo_entry *pvo, key; 2566 vm_offset_t ppa; 2567 int error = 0; 2568 2569 PMAP_LOCK(kernel_pmap); 2570 key.pvo_vaddr = ppa = pa & ~ADDR_POFF; 2571 for (pvo = RB_FIND(pvo_tree, &kernel_pmap->pmap_pvo, &key); 2572 ppa < pa + size; ppa += PAGE_SIZE, 2573 pvo = RB_NEXT(pvo_tree, &kernel_pmap->pmap_pvo, pvo)) { 2574 if (pvo == NULL || (pvo->pvo_pte.pa & LPTE_RPGN) != ppa) { 2575 error = EFAULT; 2576 break; 2577 } 2578 } 2579 PMAP_UNLOCK(kernel_pmap); 2580 2581 return (error); 2582 } 2583 2584 /* 2585 * Map a set of physical memory pages into the kernel virtual 2586 * address space. Return a pointer to where it is mapped. This 2587 * routine is intended to be used for mapping device memory, 2588 * NOT real memory. 2589 */ 2590 void * 2591 moea64_mapdev_attr(mmu_t mmu, vm_paddr_t pa, vm_size_t size, vm_memattr_t ma) 2592 { 2593 vm_offset_t va, tmpva, ppa, offset; 2594 2595 ppa = trunc_page(pa); 2596 offset = pa & PAGE_MASK; 2597 size = roundup2(offset + size, PAGE_SIZE); 2598 2599 va = kva_alloc(size); 2600 2601 if (!va) 2602 panic("moea64_mapdev: Couldn't alloc kernel virtual memory"); 2603 2604 for (tmpva = va; size > 0;) { 2605 moea64_kenter_attr(mmu, tmpva, ppa, ma); 2606 size -= PAGE_SIZE; 2607 tmpva += PAGE_SIZE; 2608 ppa += PAGE_SIZE; 2609 } 2610 2611 return ((void *)(va + offset)); 2612 } 2613 2614 void * 2615 moea64_mapdev(mmu_t mmu, vm_paddr_t pa, vm_size_t size) 2616 { 2617 2618 return moea64_mapdev_attr(mmu, pa, size, VM_MEMATTR_DEFAULT); 2619 } 2620 2621 void 2622 moea64_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size) 2623 { 2624 vm_offset_t base, offset; 2625 2626 base = trunc_page(va); 2627 offset = va & PAGE_MASK; 2628 size = roundup2(offset + size, PAGE_SIZE); 2629 2630 kva_free(base, size); 2631 } 2632 2633 void 2634 moea64_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz) 2635 { 2636 struct pvo_entry *pvo; 2637 vm_offset_t lim; 2638 vm_paddr_t pa; 2639 vm_size_t len; 2640 2641 PMAP_LOCK(pm); 2642 while (sz > 0) { 2643 lim = round_page(va); 2644 len = MIN(lim - va, sz); 2645 pvo = moea64_pvo_find_va(pm, va & ~ADDR_POFF); 2646 if (pvo != NULL && !(pvo->pvo_pte.pa & LPTE_I)) { 2647 pa = (pvo->pvo_pte.pa & LPTE_RPGN) | (va & ADDR_POFF); 2648 moea64_syncicache(mmu, pm, va, pa, len); 2649 } 2650 va += len; 2651 sz -= len; 2652 } 2653 PMAP_UNLOCK(pm); 2654 } 2655 2656 void 2657 moea64_dumpsys_map(mmu_t mmu, vm_paddr_t pa, size_t sz, void **va) 2658 { 2659 2660 *va = (void *)pa; 2661 } 2662 2663 extern struct dump_pa dump_map[PHYS_AVAIL_SZ + 1]; 2664 2665 void 2666 moea64_scan_init(mmu_t mmu) 2667 { 2668 struct pvo_entry *pvo; 2669 vm_offset_t va; 2670 int i; 2671 2672 if (!do_minidump) { 2673 /* Initialize phys. segments for dumpsys(). */ 2674 memset(&dump_map, 0, sizeof(dump_map)); 2675 mem_regions(&pregions, &pregions_sz, ®ions, ®ions_sz); 2676 for (i = 0; i < pregions_sz; i++) { 2677 dump_map[i].pa_start = pregions[i].mr_start; 2678 dump_map[i].pa_size = pregions[i].mr_size; 2679 } 2680 return; 2681 } 2682 2683 /* Virtual segments for minidumps: */ 2684 memset(&dump_map, 0, sizeof(dump_map)); 2685 2686 /* 1st: kernel .data and .bss. */ 2687 dump_map[0].pa_start = trunc_page((uintptr_t)_etext); 2688 dump_map[0].pa_size = round_page((uintptr_t)_end) - 2689 dump_map[0].pa_start; 2690 2691 /* 2nd: msgbuf and tables (see pmap_bootstrap()). */ 2692 dump_map[1].pa_start = (vm_paddr_t)msgbufp->msg_ptr; 2693 dump_map[1].pa_size = round_page(msgbufp->msg_size); 2694 2695 /* 3rd: kernel VM. */ 2696 va = dump_map[1].pa_start + dump_map[1].pa_size; 2697 /* Find start of next chunk (from va). */ 2698 while (va < virtual_end) { 2699 /* Don't dump the buffer cache. */ 2700 if (va >= kmi.buffer_sva && va < kmi.buffer_eva) { 2701 va = kmi.buffer_eva; 2702 continue; 2703 } 2704 pvo = moea64_pvo_find_va(kernel_pmap, va & ~ADDR_POFF); 2705 if (pvo != NULL && !(pvo->pvo_vaddr & PVO_DEAD)) 2706 break; 2707 va += PAGE_SIZE; 2708 } 2709 if (va < virtual_end) { 2710 dump_map[2].pa_start = va; 2711 va += PAGE_SIZE; 2712 /* Find last page in chunk. */ 2713 while (va < virtual_end) { 2714 /* Don't run into the buffer cache. */ 2715 if (va == kmi.buffer_sva) 2716 break; 2717 pvo = moea64_pvo_find_va(kernel_pmap, va & ~ADDR_POFF); 2718 if (pvo != NULL && !(pvo->pvo_vaddr & PVO_DEAD)) 2719 break; 2720 va += PAGE_SIZE; 2721 } 2722 dump_map[2].pa_size = va - dump_map[2].pa_start; 2723 } 2724 } 2725 2726