1 /*- 2 * Copyright (c) 2001 The NetBSD Foundation, Inc. 3 * All rights reserved. 4 * 5 * This code is derived from software contributed to The NetBSD Foundation 6 * by Matt Thomas <matt@3am-software.com> of Allegro Networks, Inc. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 19 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 20 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 21 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 22 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 25 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 27 * POSSIBILITY OF SUCH DAMAGE. 28 */ 29 /*- 30 * Copyright (C) 1995, 1996 Wolfgang Solfrank. 31 * Copyright (C) 1995, 1996 TooLs GmbH. 32 * All rights reserved. 33 * 34 * Redistribution and use in source and binary forms, with or without 35 * modification, are permitted provided that the following conditions 36 * are met: 37 * 1. Redistributions of source code must retain the above copyright 38 * notice, this list of conditions and the following disclaimer. 39 * 2. Redistributions in binary form must reproduce the above copyright 40 * notice, this list of conditions and the following disclaimer in the 41 * documentation and/or other materials provided with the distribution. 42 * 3. All advertising materials mentioning features or use of this software 43 * must display the following acknowledgement: 44 * This product includes software developed by TooLs GmbH. 45 * 4. The name of TooLs GmbH may not be used to endorse or promote products 46 * derived from this software without specific prior written permission. 47 * 48 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR 49 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 50 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 51 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 52 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 53 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 54 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 55 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 56 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 57 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 58 * 59 * $NetBSD: pmap.c,v 1.28 2000/03/26 20:42:36 kleink Exp $ 60 */ 61 /*- 62 * Copyright (C) 2001 Benno Rice. 63 * All rights reserved. 64 * 65 * Redistribution and use in source and binary forms, with or without 66 * modification, are permitted provided that the following conditions 67 * are met: 68 * 1. Redistributions of source code must retain the above copyright 69 * notice, this list of conditions and the following disclaimer. 70 * 2. Redistributions in binary form must reproduce the above copyright 71 * notice, this list of conditions and the following disclaimer in the 72 * documentation and/or other materials provided with the distribution. 73 * 74 * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR 75 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 76 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 77 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 78 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 79 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 80 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 81 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 82 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 83 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 84 */ 85 86 #include <sys/cdefs.h> 87 __FBSDID("$FreeBSD$"); 88 89 /* 90 * Manages physical address maps. 91 * 92 * Since the information managed by this module is also stored by the 93 * logical address mapping module, this module may throw away valid virtual 94 * to physical mappings at almost any time. However, invalidations of 95 * mappings must be done as requested. 96 * 97 * In order to cope with hardware architectures which make virtual to 98 * physical map invalidates expensive, this module may delay invalidate 99 * reduced protection operations until such time as they are actually 100 * necessary. This module is given full information as to which processors 101 * are currently using which maps, and to when physical maps must be made 102 * correct. 103 */ 104 105 #include "opt_compat.h" 106 #include "opt_kstack_pages.h" 107 108 #include <sys/param.h> 109 #include <sys/kernel.h> 110 #include <sys/queue.h> 111 #include <sys/cpuset.h> 112 #include <sys/ktr.h> 113 #include <sys/lock.h> 114 #include <sys/msgbuf.h> 115 #include <sys/malloc.h> 116 #include <sys/mutex.h> 117 #include <sys/proc.h> 118 #include <sys/rwlock.h> 119 #include <sys/sched.h> 120 #include <sys/sysctl.h> 121 #include <sys/systm.h> 122 #include <sys/vmmeter.h> 123 124 #include <sys/kdb.h> 125 126 #include <dev/ofw/openfirm.h> 127 128 #include <vm/vm.h> 129 #include <vm/vm_param.h> 130 #include <vm/vm_kern.h> 131 #include <vm/vm_page.h> 132 #include <vm/vm_map.h> 133 #include <vm/vm_object.h> 134 #include <vm/vm_extern.h> 135 #include <vm/vm_pageout.h> 136 #include <vm/uma.h> 137 138 #include <machine/_inttypes.h> 139 #include <machine/cpu.h> 140 #include <machine/platform.h> 141 #include <machine/frame.h> 142 #include <machine/md_var.h> 143 #include <machine/psl.h> 144 #include <machine/bat.h> 145 #include <machine/hid.h> 146 #include <machine/pte.h> 147 #include <machine/sr.h> 148 #include <machine/trap.h> 149 #include <machine/mmuvar.h> 150 151 #include "mmu_oea64.h" 152 #include "mmu_if.h" 153 #include "moea64_if.h" 154 155 void moea64_release_vsid(uint64_t vsid); 156 uintptr_t moea64_get_unique_vsid(void); 157 158 #define DISABLE_TRANS(msr) msr = mfmsr(); mtmsr(msr & ~PSL_DR) 159 #define ENABLE_TRANS(msr) mtmsr(msr) 160 161 #define VSID_MAKE(sr, hash) ((sr) | (((hash) & 0xfffff) << 4)) 162 #define VSID_TO_HASH(vsid) (((vsid) >> 4) & 0xfffff) 163 #define VSID_HASH_MASK 0x0000007fffffffffULL 164 165 /* 166 * Locking semantics: 167 * -- Read lock: if no modifications are being made to either the PVO lists 168 * or page table or if any modifications being made result in internal 169 * changes (e.g. wiring, protection) such that the existence of the PVOs 170 * is unchanged and they remain associated with the same pmap (in which 171 * case the changes should be protected by the pmap lock) 172 * -- Write lock: required if PTEs/PVOs are being inserted or removed. 173 */ 174 175 #define LOCK_TABLE_RD() rw_rlock(&moea64_table_lock) 176 #define UNLOCK_TABLE_RD() rw_runlock(&moea64_table_lock) 177 #define LOCK_TABLE_WR() rw_wlock(&moea64_table_lock) 178 #define UNLOCK_TABLE_WR() rw_wunlock(&moea64_table_lock) 179 180 struct ofw_map { 181 cell_t om_va; 182 cell_t om_len; 183 uint64_t om_pa; 184 cell_t om_mode; 185 }; 186 187 extern unsigned char _etext[]; 188 extern unsigned char _end[]; 189 190 extern int dumpsys_minidump; 191 192 /* 193 * Map of physical memory regions. 194 */ 195 static struct mem_region *regions; 196 static struct mem_region *pregions; 197 static u_int phys_avail_count; 198 static int regions_sz, pregions_sz; 199 200 extern void bs_remap_earlyboot(void); 201 202 /* 203 * Lock for the pteg and pvo tables. 204 */ 205 struct rwlock moea64_table_lock; 206 struct mtx moea64_slb_mutex; 207 208 /* 209 * PTEG data. 210 */ 211 u_int moea64_pteg_count; 212 u_int moea64_pteg_mask; 213 214 /* 215 * PVO data. 216 */ 217 struct pvo_head *moea64_pvo_table; /* pvo entries by pteg index */ 218 219 uma_zone_t moea64_upvo_zone; /* zone for pvo entries for unmanaged pages */ 220 uma_zone_t moea64_mpvo_zone; /* zone for pvo entries for managed pages */ 221 222 #define BPVO_POOL_SIZE 327680 223 static struct pvo_entry *moea64_bpvo_pool; 224 static int moea64_bpvo_pool_index = 0; 225 226 #define VSID_NBPW (sizeof(u_int32_t) * 8) 227 #ifdef __powerpc64__ 228 #define NVSIDS (NPMAPS * 16) 229 #define VSID_HASHMASK 0xffffffffUL 230 #else 231 #define NVSIDS NPMAPS 232 #define VSID_HASHMASK 0xfffffUL 233 #endif 234 static u_int moea64_vsid_bitmap[NVSIDS / VSID_NBPW]; 235 236 static boolean_t moea64_initialized = FALSE; 237 238 /* 239 * Statistics. 240 */ 241 u_int moea64_pte_valid = 0; 242 u_int moea64_pte_overflow = 0; 243 u_int moea64_pvo_entries = 0; 244 u_int moea64_pvo_enter_calls = 0; 245 u_int moea64_pvo_remove_calls = 0; 246 SYSCTL_INT(_machdep, OID_AUTO, moea64_pte_valid, CTLFLAG_RD, 247 &moea64_pte_valid, 0, ""); 248 SYSCTL_INT(_machdep, OID_AUTO, moea64_pte_overflow, CTLFLAG_RD, 249 &moea64_pte_overflow, 0, ""); 250 SYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_entries, CTLFLAG_RD, 251 &moea64_pvo_entries, 0, ""); 252 SYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_enter_calls, CTLFLAG_RD, 253 &moea64_pvo_enter_calls, 0, ""); 254 SYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_remove_calls, CTLFLAG_RD, 255 &moea64_pvo_remove_calls, 0, ""); 256 257 vm_offset_t moea64_scratchpage_va[2]; 258 struct pvo_entry *moea64_scratchpage_pvo[2]; 259 uintptr_t moea64_scratchpage_pte[2]; 260 struct mtx moea64_scratchpage_mtx; 261 262 uint64_t moea64_large_page_mask = 0; 263 uint64_t moea64_large_page_size = 0; 264 int moea64_large_page_shift = 0; 265 266 /* 267 * PVO calls. 268 */ 269 static int moea64_pvo_enter(mmu_t, pmap_t, uma_zone_t, struct pvo_head *, 270 vm_offset_t, vm_offset_t, uint64_t, int, int8_t); 271 static void moea64_pvo_remove(mmu_t, struct pvo_entry *); 272 static struct pvo_entry *moea64_pvo_find_va(pmap_t, vm_offset_t); 273 274 /* 275 * Utility routines. 276 */ 277 static boolean_t moea64_query_bit(mmu_t, vm_page_t, u_int64_t); 278 static u_int moea64_clear_bit(mmu_t, vm_page_t, u_int64_t); 279 static void moea64_kremove(mmu_t, vm_offset_t); 280 static void moea64_syncicache(mmu_t, pmap_t pmap, vm_offset_t va, 281 vm_offset_t pa, vm_size_t sz); 282 283 /* 284 * Kernel MMU interface 285 */ 286 void moea64_clear_modify(mmu_t, vm_page_t); 287 void moea64_copy_page(mmu_t, vm_page_t, vm_page_t); 288 void moea64_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset, 289 vm_page_t *mb, vm_offset_t b_offset, int xfersize); 290 int moea64_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t, 291 u_int flags, int8_t psind); 292 void moea64_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_page_t, 293 vm_prot_t); 294 void moea64_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t); 295 vm_paddr_t moea64_extract(mmu_t, pmap_t, vm_offset_t); 296 vm_page_t moea64_extract_and_hold(mmu_t, pmap_t, vm_offset_t, vm_prot_t); 297 void moea64_init(mmu_t); 298 boolean_t moea64_is_modified(mmu_t, vm_page_t); 299 boolean_t moea64_is_prefaultable(mmu_t, pmap_t, vm_offset_t); 300 boolean_t moea64_is_referenced(mmu_t, vm_page_t); 301 int moea64_ts_referenced(mmu_t, vm_page_t); 302 vm_offset_t moea64_map(mmu_t, vm_offset_t *, vm_paddr_t, vm_paddr_t, int); 303 boolean_t moea64_page_exists_quick(mmu_t, pmap_t, vm_page_t); 304 int moea64_page_wired_mappings(mmu_t, vm_page_t); 305 void moea64_pinit(mmu_t, pmap_t); 306 void moea64_pinit0(mmu_t, pmap_t); 307 void moea64_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_prot_t); 308 void moea64_qenter(mmu_t, vm_offset_t, vm_page_t *, int); 309 void moea64_qremove(mmu_t, vm_offset_t, int); 310 void moea64_release(mmu_t, pmap_t); 311 void moea64_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t); 312 void moea64_remove_pages(mmu_t, pmap_t); 313 void moea64_remove_all(mmu_t, vm_page_t); 314 void moea64_remove_write(mmu_t, vm_page_t); 315 void moea64_unwire(mmu_t, pmap_t, vm_offset_t, vm_offset_t); 316 void moea64_zero_page(mmu_t, vm_page_t); 317 void moea64_zero_page_area(mmu_t, vm_page_t, int, int); 318 void moea64_zero_page_idle(mmu_t, vm_page_t); 319 void moea64_activate(mmu_t, struct thread *); 320 void moea64_deactivate(mmu_t, struct thread *); 321 void *moea64_mapdev(mmu_t, vm_paddr_t, vm_size_t); 322 void *moea64_mapdev_attr(mmu_t, vm_offset_t, vm_size_t, vm_memattr_t); 323 void moea64_unmapdev(mmu_t, vm_offset_t, vm_size_t); 324 vm_paddr_t moea64_kextract(mmu_t, vm_offset_t); 325 void moea64_page_set_memattr(mmu_t, vm_page_t m, vm_memattr_t ma); 326 void moea64_kenter_attr(mmu_t, vm_offset_t, vm_offset_t, vm_memattr_t ma); 327 void moea64_kenter(mmu_t, vm_offset_t, vm_paddr_t); 328 boolean_t moea64_dev_direct_mapped(mmu_t, vm_paddr_t, vm_size_t); 329 static void moea64_sync_icache(mmu_t, pmap_t, vm_offset_t, vm_size_t); 330 vm_offset_t moea64_dumpsys_map(mmu_t mmu, struct pmap_md *md, vm_size_t ofs, 331 vm_size_t *sz); 332 struct pmap_md * moea64_scan_md(mmu_t mmu, struct pmap_md *prev); 333 334 static mmu_method_t moea64_methods[] = { 335 MMUMETHOD(mmu_clear_modify, moea64_clear_modify), 336 MMUMETHOD(mmu_copy_page, moea64_copy_page), 337 MMUMETHOD(mmu_copy_pages, moea64_copy_pages), 338 MMUMETHOD(mmu_enter, moea64_enter), 339 MMUMETHOD(mmu_enter_object, moea64_enter_object), 340 MMUMETHOD(mmu_enter_quick, moea64_enter_quick), 341 MMUMETHOD(mmu_extract, moea64_extract), 342 MMUMETHOD(mmu_extract_and_hold, moea64_extract_and_hold), 343 MMUMETHOD(mmu_init, moea64_init), 344 MMUMETHOD(mmu_is_modified, moea64_is_modified), 345 MMUMETHOD(mmu_is_prefaultable, moea64_is_prefaultable), 346 MMUMETHOD(mmu_is_referenced, moea64_is_referenced), 347 MMUMETHOD(mmu_ts_referenced, moea64_ts_referenced), 348 MMUMETHOD(mmu_map, moea64_map), 349 MMUMETHOD(mmu_page_exists_quick,moea64_page_exists_quick), 350 MMUMETHOD(mmu_page_wired_mappings,moea64_page_wired_mappings), 351 MMUMETHOD(mmu_pinit, moea64_pinit), 352 MMUMETHOD(mmu_pinit0, moea64_pinit0), 353 MMUMETHOD(mmu_protect, moea64_protect), 354 MMUMETHOD(mmu_qenter, moea64_qenter), 355 MMUMETHOD(mmu_qremove, moea64_qremove), 356 MMUMETHOD(mmu_release, moea64_release), 357 MMUMETHOD(mmu_remove, moea64_remove), 358 MMUMETHOD(mmu_remove_pages, moea64_remove_pages), 359 MMUMETHOD(mmu_remove_all, moea64_remove_all), 360 MMUMETHOD(mmu_remove_write, moea64_remove_write), 361 MMUMETHOD(mmu_sync_icache, moea64_sync_icache), 362 MMUMETHOD(mmu_unwire, moea64_unwire), 363 MMUMETHOD(mmu_zero_page, moea64_zero_page), 364 MMUMETHOD(mmu_zero_page_area, moea64_zero_page_area), 365 MMUMETHOD(mmu_zero_page_idle, moea64_zero_page_idle), 366 MMUMETHOD(mmu_activate, moea64_activate), 367 MMUMETHOD(mmu_deactivate, moea64_deactivate), 368 MMUMETHOD(mmu_page_set_memattr, moea64_page_set_memattr), 369 370 /* Internal interfaces */ 371 MMUMETHOD(mmu_mapdev, moea64_mapdev), 372 MMUMETHOD(mmu_mapdev_attr, moea64_mapdev_attr), 373 MMUMETHOD(mmu_unmapdev, moea64_unmapdev), 374 MMUMETHOD(mmu_kextract, moea64_kextract), 375 MMUMETHOD(mmu_kenter, moea64_kenter), 376 MMUMETHOD(mmu_kenter_attr, moea64_kenter_attr), 377 MMUMETHOD(mmu_dev_direct_mapped,moea64_dev_direct_mapped), 378 MMUMETHOD(mmu_scan_md, moea64_scan_md), 379 MMUMETHOD(mmu_dumpsys_map, moea64_dumpsys_map), 380 381 { 0, 0 } 382 }; 383 384 MMU_DEF(oea64_mmu, "mmu_oea64_base", moea64_methods, 0); 385 386 static __inline u_int 387 va_to_pteg(uint64_t vsid, vm_offset_t addr, int large) 388 { 389 uint64_t hash; 390 int shift; 391 392 shift = large ? moea64_large_page_shift : ADDR_PIDX_SHFT; 393 hash = (vsid & VSID_HASH_MASK) ^ (((uint64_t)addr & ADDR_PIDX) >> 394 shift); 395 return (hash & moea64_pteg_mask); 396 } 397 398 static __inline struct pvo_head * 399 vm_page_to_pvoh(vm_page_t m) 400 { 401 402 return (&m->md.mdpg_pvoh); 403 } 404 405 static __inline void 406 moea64_pte_create(struct lpte *pt, uint64_t vsid, vm_offset_t va, 407 uint64_t pte_lo, int flags) 408 { 409 410 /* 411 * Construct a PTE. Default to IMB initially. Valid bit only gets 412 * set when the real pte is set in memory. 413 * 414 * Note: Don't set the valid bit for correct operation of tlb update. 415 */ 416 pt->pte_hi = (vsid << LPTE_VSID_SHIFT) | 417 (((uint64_t)(va & ADDR_PIDX) >> ADDR_API_SHFT64) & LPTE_API); 418 419 if (flags & PVO_LARGE) 420 pt->pte_hi |= LPTE_BIG; 421 422 pt->pte_lo = pte_lo; 423 } 424 425 static __inline uint64_t 426 moea64_calc_wimg(vm_offset_t pa, vm_memattr_t ma) 427 { 428 uint64_t pte_lo; 429 int i; 430 431 if (ma != VM_MEMATTR_DEFAULT) { 432 switch (ma) { 433 case VM_MEMATTR_UNCACHEABLE: 434 return (LPTE_I | LPTE_G); 435 case VM_MEMATTR_WRITE_COMBINING: 436 case VM_MEMATTR_WRITE_BACK: 437 case VM_MEMATTR_PREFETCHABLE: 438 return (LPTE_I); 439 case VM_MEMATTR_WRITE_THROUGH: 440 return (LPTE_W | LPTE_M); 441 } 442 } 443 444 /* 445 * Assume the page is cache inhibited and access is guarded unless 446 * it's in our available memory array. 447 */ 448 pte_lo = LPTE_I | LPTE_G; 449 for (i = 0; i < pregions_sz; i++) { 450 if ((pa >= pregions[i].mr_start) && 451 (pa < (pregions[i].mr_start + pregions[i].mr_size))) { 452 pte_lo &= ~(LPTE_I | LPTE_G); 453 pte_lo |= LPTE_M; 454 break; 455 } 456 } 457 458 return pte_lo; 459 } 460 461 /* 462 * Quick sort callout for comparing memory regions. 463 */ 464 static int om_cmp(const void *a, const void *b); 465 466 static int 467 om_cmp(const void *a, const void *b) 468 { 469 const struct ofw_map *mapa; 470 const struct ofw_map *mapb; 471 472 mapa = a; 473 mapb = b; 474 if (mapa->om_pa < mapb->om_pa) 475 return (-1); 476 else if (mapa->om_pa > mapb->om_pa) 477 return (1); 478 else 479 return (0); 480 } 481 482 static void 483 moea64_add_ofw_mappings(mmu_t mmup, phandle_t mmu, size_t sz) 484 { 485 struct ofw_map translations[sz/(4*sizeof(cell_t))]; /*>= 4 cells per */ 486 pcell_t acells, trans_cells[sz/sizeof(cell_t)]; 487 register_t msr; 488 vm_offset_t off; 489 vm_paddr_t pa_base; 490 int i, j; 491 492 bzero(translations, sz); 493 OF_getprop(OF_finddevice("/"), "#address-cells", &acells, 494 sizeof(acells)); 495 if (OF_getprop(mmu, "translations", trans_cells, sz) == -1) 496 panic("moea64_bootstrap: can't get ofw translations"); 497 498 CTR0(KTR_PMAP, "moea64_add_ofw_mappings: translations"); 499 sz /= sizeof(cell_t); 500 for (i = 0, j = 0; i < sz; j++) { 501 translations[j].om_va = trans_cells[i++]; 502 translations[j].om_len = trans_cells[i++]; 503 translations[j].om_pa = trans_cells[i++]; 504 if (acells == 2) { 505 translations[j].om_pa <<= 32; 506 translations[j].om_pa |= trans_cells[i++]; 507 } 508 translations[j].om_mode = trans_cells[i++]; 509 } 510 KASSERT(i == sz, ("Translations map has incorrect cell count (%d/%zd)", 511 i, sz)); 512 513 sz = j; 514 qsort(translations, sz, sizeof (*translations), om_cmp); 515 516 for (i = 0; i < sz; i++) { 517 pa_base = translations[i].om_pa; 518 #ifndef __powerpc64__ 519 if ((translations[i].om_pa >> 32) != 0) 520 panic("OFW translations above 32-bit boundary!"); 521 #endif 522 523 if (pa_base % PAGE_SIZE) 524 panic("OFW translation not page-aligned (phys)!"); 525 if (translations[i].om_va % PAGE_SIZE) 526 panic("OFW translation not page-aligned (virt)!"); 527 528 CTR3(KTR_PMAP, "translation: pa=%#zx va=%#x len=%#x", 529 pa_base, translations[i].om_va, translations[i].om_len); 530 531 /* Now enter the pages for this mapping */ 532 533 DISABLE_TRANS(msr); 534 for (off = 0; off < translations[i].om_len; off += PAGE_SIZE) { 535 if (moea64_pvo_find_va(kernel_pmap, 536 translations[i].om_va + off) != NULL) 537 continue; 538 539 moea64_kenter(mmup, translations[i].om_va + off, 540 pa_base + off); 541 } 542 ENABLE_TRANS(msr); 543 } 544 } 545 546 #ifdef __powerpc64__ 547 static void 548 moea64_probe_large_page(void) 549 { 550 uint16_t pvr = mfpvr() >> 16; 551 552 switch (pvr) { 553 case IBM970: 554 case IBM970FX: 555 case IBM970MP: 556 powerpc_sync(); isync(); 557 mtspr(SPR_HID4, mfspr(SPR_HID4) & ~HID4_970_DISABLE_LG_PG); 558 powerpc_sync(); isync(); 559 560 /* FALLTHROUGH */ 561 default: 562 moea64_large_page_size = 0x1000000; /* 16 MB */ 563 moea64_large_page_shift = 24; 564 } 565 566 moea64_large_page_mask = moea64_large_page_size - 1; 567 } 568 569 static void 570 moea64_bootstrap_slb_prefault(vm_offset_t va, int large) 571 { 572 struct slb *cache; 573 struct slb entry; 574 uint64_t esid, slbe; 575 uint64_t i; 576 577 cache = PCPU_GET(slb); 578 esid = va >> ADDR_SR_SHFT; 579 slbe = (esid << SLBE_ESID_SHIFT) | SLBE_VALID; 580 581 for (i = 0; i < 64; i++) { 582 if (cache[i].slbe == (slbe | i)) 583 return; 584 } 585 586 entry.slbe = slbe; 587 entry.slbv = KERNEL_VSID(esid) << SLBV_VSID_SHIFT; 588 if (large) 589 entry.slbv |= SLBV_L; 590 591 slb_insert_kernel(entry.slbe, entry.slbv); 592 } 593 #endif 594 595 static void 596 moea64_setup_direct_map(mmu_t mmup, vm_offset_t kernelstart, 597 vm_offset_t kernelend) 598 { 599 register_t msr; 600 vm_paddr_t pa; 601 vm_offset_t size, off; 602 uint64_t pte_lo; 603 int i; 604 605 if (moea64_large_page_size == 0) 606 hw_direct_map = 0; 607 608 DISABLE_TRANS(msr); 609 if (hw_direct_map) { 610 LOCK_TABLE_WR(); 611 PMAP_LOCK(kernel_pmap); 612 for (i = 0; i < pregions_sz; i++) { 613 for (pa = pregions[i].mr_start; pa < pregions[i].mr_start + 614 pregions[i].mr_size; pa += moea64_large_page_size) { 615 pte_lo = LPTE_M; 616 617 /* 618 * Set memory access as guarded if prefetch within 619 * the page could exit the available physmem area. 620 */ 621 if (pa & moea64_large_page_mask) { 622 pa &= moea64_large_page_mask; 623 pte_lo |= LPTE_G; 624 } 625 if (pa + moea64_large_page_size > 626 pregions[i].mr_start + pregions[i].mr_size) 627 pte_lo |= LPTE_G; 628 629 moea64_pvo_enter(mmup, kernel_pmap, moea64_upvo_zone, 630 NULL, pa, pa, pte_lo, 631 PVO_WIRED | PVO_LARGE, 0); 632 } 633 } 634 PMAP_UNLOCK(kernel_pmap); 635 UNLOCK_TABLE_WR(); 636 } else { 637 size = sizeof(struct pvo_head) * moea64_pteg_count; 638 off = (vm_offset_t)(moea64_pvo_table); 639 for (pa = off; pa < off + size; pa += PAGE_SIZE) 640 moea64_kenter(mmup, pa, pa); 641 size = BPVO_POOL_SIZE*sizeof(struct pvo_entry); 642 off = (vm_offset_t)(moea64_bpvo_pool); 643 for (pa = off; pa < off + size; pa += PAGE_SIZE) 644 moea64_kenter(mmup, pa, pa); 645 646 /* 647 * Map certain important things, like ourselves. 648 * 649 * NOTE: We do not map the exception vector space. That code is 650 * used only in real mode, and leaving it unmapped allows us to 651 * catch NULL pointer deferences, instead of making NULL a valid 652 * address. 653 */ 654 655 for (pa = kernelstart & ~PAGE_MASK; pa < kernelend; 656 pa += PAGE_SIZE) 657 moea64_kenter(mmup, pa, pa); 658 } 659 ENABLE_TRANS(msr); 660 661 /* 662 * Allow user to override unmapped_buf_allowed for testing. 663 * XXXKIB Only direct map implementation was tested. 664 */ 665 if (!TUNABLE_INT_FETCH("vfs.unmapped_buf_allowed", 666 &unmapped_buf_allowed)) 667 unmapped_buf_allowed = hw_direct_map; 668 } 669 670 void 671 moea64_early_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend) 672 { 673 int i, j; 674 vm_size_t physsz, hwphyssz; 675 676 #ifndef __powerpc64__ 677 /* We don't have a direct map since there is no BAT */ 678 hw_direct_map = 0; 679 680 /* Make sure battable is zero, since we have no BAT */ 681 for (i = 0; i < 16; i++) { 682 battable[i].batu = 0; 683 battable[i].batl = 0; 684 } 685 #else 686 moea64_probe_large_page(); 687 688 /* Use a direct map if we have large page support */ 689 if (moea64_large_page_size > 0) 690 hw_direct_map = 1; 691 else 692 hw_direct_map = 0; 693 #endif 694 695 /* Get physical memory regions from firmware */ 696 mem_regions(&pregions, &pregions_sz, ®ions, ®ions_sz); 697 CTR0(KTR_PMAP, "moea64_bootstrap: physical memory"); 698 699 if (sizeof(phys_avail)/sizeof(phys_avail[0]) < regions_sz) 700 panic("moea64_bootstrap: phys_avail too small"); 701 702 phys_avail_count = 0; 703 physsz = 0; 704 hwphyssz = 0; 705 TUNABLE_ULONG_FETCH("hw.physmem", (u_long *) &hwphyssz); 706 for (i = 0, j = 0; i < regions_sz; i++, j += 2) { 707 CTR3(KTR_PMAP, "region: %#zx - %#zx (%#zx)", 708 regions[i].mr_start, regions[i].mr_start + 709 regions[i].mr_size, regions[i].mr_size); 710 if (hwphyssz != 0 && 711 (physsz + regions[i].mr_size) >= hwphyssz) { 712 if (physsz < hwphyssz) { 713 phys_avail[j] = regions[i].mr_start; 714 phys_avail[j + 1] = regions[i].mr_start + 715 hwphyssz - physsz; 716 physsz = hwphyssz; 717 phys_avail_count++; 718 } 719 break; 720 } 721 phys_avail[j] = regions[i].mr_start; 722 phys_avail[j + 1] = regions[i].mr_start + regions[i].mr_size; 723 phys_avail_count++; 724 physsz += regions[i].mr_size; 725 } 726 727 /* Check for overlap with the kernel and exception vectors */ 728 for (j = 0; j < 2*phys_avail_count; j+=2) { 729 if (phys_avail[j] < EXC_LAST) 730 phys_avail[j] += EXC_LAST; 731 732 if (kernelstart >= phys_avail[j] && 733 kernelstart < phys_avail[j+1]) { 734 if (kernelend < phys_avail[j+1]) { 735 phys_avail[2*phys_avail_count] = 736 (kernelend & ~PAGE_MASK) + PAGE_SIZE; 737 phys_avail[2*phys_avail_count + 1] = 738 phys_avail[j+1]; 739 phys_avail_count++; 740 } 741 742 phys_avail[j+1] = kernelstart & ~PAGE_MASK; 743 } 744 745 if (kernelend >= phys_avail[j] && 746 kernelend < phys_avail[j+1]) { 747 if (kernelstart > phys_avail[j]) { 748 phys_avail[2*phys_avail_count] = phys_avail[j]; 749 phys_avail[2*phys_avail_count + 1] = 750 kernelstart & ~PAGE_MASK; 751 phys_avail_count++; 752 } 753 754 phys_avail[j] = (kernelend & ~PAGE_MASK) + PAGE_SIZE; 755 } 756 } 757 758 physmem = btoc(physsz); 759 760 #ifdef PTEGCOUNT 761 moea64_pteg_count = PTEGCOUNT; 762 #else 763 moea64_pteg_count = 0x1000; 764 765 while (moea64_pteg_count < physmem) 766 moea64_pteg_count <<= 1; 767 768 moea64_pteg_count >>= 1; 769 #endif /* PTEGCOUNT */ 770 } 771 772 void 773 moea64_mid_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend) 774 { 775 vm_size_t size; 776 register_t msr; 777 int i; 778 779 /* 780 * Set PTEG mask 781 */ 782 moea64_pteg_mask = moea64_pteg_count - 1; 783 784 /* 785 * Allocate pv/overflow lists. 786 */ 787 size = sizeof(struct pvo_head) * moea64_pteg_count; 788 789 moea64_pvo_table = (struct pvo_head *)moea64_bootstrap_alloc(size, 790 PAGE_SIZE); 791 CTR1(KTR_PMAP, "moea64_bootstrap: PVO table at %p", moea64_pvo_table); 792 793 DISABLE_TRANS(msr); 794 for (i = 0; i < moea64_pteg_count; i++) 795 LIST_INIT(&moea64_pvo_table[i]); 796 ENABLE_TRANS(msr); 797 798 /* 799 * Initialize the lock that synchronizes access to the pteg and pvo 800 * tables. 801 */ 802 rw_init_flags(&moea64_table_lock, "pmap tables", RW_RECURSE); 803 mtx_init(&moea64_slb_mutex, "SLB table", NULL, MTX_DEF); 804 805 /* 806 * Initialise the unmanaged pvo pool. 807 */ 808 moea64_bpvo_pool = (struct pvo_entry *)moea64_bootstrap_alloc( 809 BPVO_POOL_SIZE*sizeof(struct pvo_entry), 0); 810 moea64_bpvo_pool_index = 0; 811 812 /* 813 * Make sure kernel vsid is allocated as well as VSID 0. 814 */ 815 #ifndef __powerpc64__ 816 moea64_vsid_bitmap[(KERNEL_VSIDBITS & (NVSIDS - 1)) / VSID_NBPW] 817 |= 1 << (KERNEL_VSIDBITS % VSID_NBPW); 818 moea64_vsid_bitmap[0] |= 1; 819 #endif 820 821 /* 822 * Initialize the kernel pmap (which is statically allocated). 823 */ 824 #ifdef __powerpc64__ 825 for (i = 0; i < 64; i++) { 826 pcpup->pc_slb[i].slbv = 0; 827 pcpup->pc_slb[i].slbe = 0; 828 } 829 #else 830 for (i = 0; i < 16; i++) 831 kernel_pmap->pm_sr[i] = EMPTY_SEGMENT + i; 832 #endif 833 834 kernel_pmap->pmap_phys = kernel_pmap; 835 CPU_FILL(&kernel_pmap->pm_active); 836 RB_INIT(&kernel_pmap->pmap_pvo); 837 838 PMAP_LOCK_INIT(kernel_pmap); 839 840 /* 841 * Now map in all the other buffers we allocated earlier 842 */ 843 844 moea64_setup_direct_map(mmup, kernelstart, kernelend); 845 } 846 847 void 848 moea64_late_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend) 849 { 850 ihandle_t mmui; 851 phandle_t chosen; 852 phandle_t mmu; 853 size_t sz; 854 int i; 855 vm_offset_t pa, va; 856 void *dpcpu; 857 858 /* 859 * Set up the Open Firmware pmap and add its mappings if not in real 860 * mode. 861 */ 862 863 chosen = OF_finddevice("/chosen"); 864 if (chosen != -1 && OF_getprop(chosen, "mmu", &mmui, 4) != -1) { 865 mmu = OF_instance_to_package(mmui); 866 if (mmu == -1 || (sz = OF_getproplen(mmu, "translations")) == -1) 867 sz = 0; 868 if (sz > 6144 /* tmpstksz - 2 KB headroom */) 869 panic("moea64_bootstrap: too many ofw translations"); 870 871 if (sz > 0) 872 moea64_add_ofw_mappings(mmup, mmu, sz); 873 } 874 875 /* 876 * Calculate the last available physical address. 877 */ 878 for (i = 0; phys_avail[i + 2] != 0; i += 2) 879 ; 880 Maxmem = powerpc_btop(phys_avail[i + 1]); 881 882 /* 883 * Initialize MMU and remap early physical mappings 884 */ 885 MMU_CPU_BOOTSTRAP(mmup,0); 886 mtmsr(mfmsr() | PSL_DR | PSL_IR); 887 pmap_bootstrapped++; 888 bs_remap_earlyboot(); 889 890 /* 891 * Set the start and end of kva. 892 */ 893 virtual_avail = VM_MIN_KERNEL_ADDRESS; 894 virtual_end = VM_MAX_SAFE_KERNEL_ADDRESS; 895 896 /* 897 * Map the entire KVA range into the SLB. We must not fault there. 898 */ 899 #ifdef __powerpc64__ 900 for (va = virtual_avail; va < virtual_end; va += SEGMENT_LENGTH) 901 moea64_bootstrap_slb_prefault(va, 0); 902 #endif 903 904 /* 905 * Figure out how far we can extend virtual_end into segment 16 906 * without running into existing mappings. Segment 16 is guaranteed 907 * to contain neither RAM nor devices (at least on Apple hardware), 908 * but will generally contain some OFW mappings we should not 909 * step on. 910 */ 911 912 #ifndef __powerpc64__ /* KVA is in high memory on PPC64 */ 913 PMAP_LOCK(kernel_pmap); 914 while (virtual_end < VM_MAX_KERNEL_ADDRESS && 915 moea64_pvo_find_va(kernel_pmap, virtual_end+1) == NULL) 916 virtual_end += PAGE_SIZE; 917 PMAP_UNLOCK(kernel_pmap); 918 #endif 919 920 /* 921 * Allocate a kernel stack with a guard page for thread0 and map it 922 * into the kernel page map. 923 */ 924 pa = moea64_bootstrap_alloc(KSTACK_PAGES * PAGE_SIZE, PAGE_SIZE); 925 va = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE; 926 virtual_avail = va + KSTACK_PAGES * PAGE_SIZE; 927 CTR2(KTR_PMAP, "moea64_bootstrap: kstack0 at %#x (%#x)", pa, va); 928 thread0.td_kstack = va; 929 thread0.td_kstack_pages = KSTACK_PAGES; 930 for (i = 0; i < KSTACK_PAGES; i++) { 931 moea64_kenter(mmup, va, pa); 932 pa += PAGE_SIZE; 933 va += PAGE_SIZE; 934 } 935 936 /* 937 * Allocate virtual address space for the message buffer. 938 */ 939 pa = msgbuf_phys = moea64_bootstrap_alloc(msgbufsize, PAGE_SIZE); 940 msgbufp = (struct msgbuf *)virtual_avail; 941 va = virtual_avail; 942 virtual_avail += round_page(msgbufsize); 943 while (va < virtual_avail) { 944 moea64_kenter(mmup, va, pa); 945 pa += PAGE_SIZE; 946 va += PAGE_SIZE; 947 } 948 949 /* 950 * Allocate virtual address space for the dynamic percpu area. 951 */ 952 pa = moea64_bootstrap_alloc(DPCPU_SIZE, PAGE_SIZE); 953 dpcpu = (void *)virtual_avail; 954 va = virtual_avail; 955 virtual_avail += DPCPU_SIZE; 956 while (va < virtual_avail) { 957 moea64_kenter(mmup, va, pa); 958 pa += PAGE_SIZE; 959 va += PAGE_SIZE; 960 } 961 dpcpu_init(dpcpu, 0); 962 963 /* 964 * Allocate some things for page zeroing. We put this directly 965 * in the page table, marked with LPTE_LOCKED, to avoid any 966 * of the PVO book-keeping or other parts of the VM system 967 * from even knowing that this hack exists. 968 */ 969 970 if (!hw_direct_map) { 971 mtx_init(&moea64_scratchpage_mtx, "pvo zero page", NULL, 972 MTX_DEF); 973 for (i = 0; i < 2; i++) { 974 moea64_scratchpage_va[i] = (virtual_end+1) - PAGE_SIZE; 975 virtual_end -= PAGE_SIZE; 976 977 moea64_kenter(mmup, moea64_scratchpage_va[i], 0); 978 979 moea64_scratchpage_pvo[i] = moea64_pvo_find_va( 980 kernel_pmap, (vm_offset_t)moea64_scratchpage_va[i]); 981 LOCK_TABLE_RD(); 982 moea64_scratchpage_pte[i] = MOEA64_PVO_TO_PTE( 983 mmup, moea64_scratchpage_pvo[i]); 984 moea64_scratchpage_pvo[i]->pvo_pte.lpte.pte_hi 985 |= LPTE_LOCKED; 986 MOEA64_PTE_CHANGE(mmup, moea64_scratchpage_pte[i], 987 &moea64_scratchpage_pvo[i]->pvo_pte.lpte, 988 moea64_scratchpage_pvo[i]->pvo_vpn); 989 UNLOCK_TABLE_RD(); 990 } 991 } 992 } 993 994 /* 995 * Activate a user pmap. The pmap must be activated before its address 996 * space can be accessed in any way. 997 */ 998 void 999 moea64_activate(mmu_t mmu, struct thread *td) 1000 { 1001 pmap_t pm; 1002 1003 pm = &td->td_proc->p_vmspace->vm_pmap; 1004 CPU_SET(PCPU_GET(cpuid), &pm->pm_active); 1005 1006 #ifdef __powerpc64__ 1007 PCPU_SET(userslb, pm->pm_slb); 1008 #else 1009 PCPU_SET(curpmap, pm->pmap_phys); 1010 #endif 1011 } 1012 1013 void 1014 moea64_deactivate(mmu_t mmu, struct thread *td) 1015 { 1016 pmap_t pm; 1017 1018 pm = &td->td_proc->p_vmspace->vm_pmap; 1019 CPU_CLR(PCPU_GET(cpuid), &pm->pm_active); 1020 #ifdef __powerpc64__ 1021 PCPU_SET(userslb, NULL); 1022 #else 1023 PCPU_SET(curpmap, NULL); 1024 #endif 1025 } 1026 1027 void 1028 moea64_unwire(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva) 1029 { 1030 struct pvo_entry key, *pvo; 1031 uintptr_t pt; 1032 1033 LOCK_TABLE_RD(); 1034 PMAP_LOCK(pm); 1035 key.pvo_vaddr = sva; 1036 for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key); 1037 pvo != NULL && PVO_VADDR(pvo) < eva; 1038 pvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo)) { 1039 pt = MOEA64_PVO_TO_PTE(mmu, pvo); 1040 if ((pvo->pvo_vaddr & PVO_WIRED) == 0) 1041 panic("moea64_unwire: pvo %p is missing PVO_WIRED", 1042 pvo); 1043 pvo->pvo_vaddr &= ~PVO_WIRED; 1044 if ((pvo->pvo_pte.lpte.pte_hi & LPTE_WIRED) == 0) 1045 panic("moea64_unwire: pte %p is missing LPTE_WIRED", 1046 &pvo->pvo_pte.lpte); 1047 pvo->pvo_pte.lpte.pte_hi &= ~LPTE_WIRED; 1048 if (pt != -1) { 1049 /* 1050 * The PTE's wired attribute is not a hardware 1051 * feature, so there is no need to invalidate any TLB 1052 * entries. 1053 */ 1054 MOEA64_PTE_CHANGE(mmu, pt, &pvo->pvo_pte.lpte, 1055 pvo->pvo_vpn); 1056 } 1057 pm->pm_stats.wired_count--; 1058 } 1059 UNLOCK_TABLE_RD(); 1060 PMAP_UNLOCK(pm); 1061 } 1062 1063 /* 1064 * This goes through and sets the physical address of our 1065 * special scratch PTE to the PA we want to zero or copy. Because 1066 * of locking issues (this can get called in pvo_enter() by 1067 * the UMA allocator), we can't use most other utility functions here 1068 */ 1069 1070 static __inline 1071 void moea64_set_scratchpage_pa(mmu_t mmup, int which, vm_offset_t pa) { 1072 1073 KASSERT(!hw_direct_map, ("Using OEA64 scratchpage with a direct map!")); 1074 mtx_assert(&moea64_scratchpage_mtx, MA_OWNED); 1075 1076 moea64_scratchpage_pvo[which]->pvo_pte.lpte.pte_lo &= 1077 ~(LPTE_WIMG | LPTE_RPGN); 1078 moea64_scratchpage_pvo[which]->pvo_pte.lpte.pte_lo |= 1079 moea64_calc_wimg(pa, VM_MEMATTR_DEFAULT) | (uint64_t)pa; 1080 MOEA64_PTE_CHANGE(mmup, moea64_scratchpage_pte[which], 1081 &moea64_scratchpage_pvo[which]->pvo_pte.lpte, 1082 moea64_scratchpage_pvo[which]->pvo_vpn); 1083 isync(); 1084 } 1085 1086 void 1087 moea64_copy_page(mmu_t mmu, vm_page_t msrc, vm_page_t mdst) 1088 { 1089 vm_offset_t dst; 1090 vm_offset_t src; 1091 1092 dst = VM_PAGE_TO_PHYS(mdst); 1093 src = VM_PAGE_TO_PHYS(msrc); 1094 1095 if (hw_direct_map) { 1096 bcopy((void *)src, (void *)dst, PAGE_SIZE); 1097 } else { 1098 mtx_lock(&moea64_scratchpage_mtx); 1099 1100 moea64_set_scratchpage_pa(mmu, 0, src); 1101 moea64_set_scratchpage_pa(mmu, 1, dst); 1102 1103 bcopy((void *)moea64_scratchpage_va[0], 1104 (void *)moea64_scratchpage_va[1], PAGE_SIZE); 1105 1106 mtx_unlock(&moea64_scratchpage_mtx); 1107 } 1108 } 1109 1110 static inline void 1111 moea64_copy_pages_dmap(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset, 1112 vm_page_t *mb, vm_offset_t b_offset, int xfersize) 1113 { 1114 void *a_cp, *b_cp; 1115 vm_offset_t a_pg_offset, b_pg_offset; 1116 int cnt; 1117 1118 while (xfersize > 0) { 1119 a_pg_offset = a_offset & PAGE_MASK; 1120 cnt = min(xfersize, PAGE_SIZE - a_pg_offset); 1121 a_cp = (char *)VM_PAGE_TO_PHYS(ma[a_offset >> PAGE_SHIFT]) + 1122 a_pg_offset; 1123 b_pg_offset = b_offset & PAGE_MASK; 1124 cnt = min(cnt, PAGE_SIZE - b_pg_offset); 1125 b_cp = (char *)VM_PAGE_TO_PHYS(mb[b_offset >> PAGE_SHIFT]) + 1126 b_pg_offset; 1127 bcopy(a_cp, b_cp, cnt); 1128 a_offset += cnt; 1129 b_offset += cnt; 1130 xfersize -= cnt; 1131 } 1132 } 1133 1134 static inline void 1135 moea64_copy_pages_nodmap(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset, 1136 vm_page_t *mb, vm_offset_t b_offset, int xfersize) 1137 { 1138 void *a_cp, *b_cp; 1139 vm_offset_t a_pg_offset, b_pg_offset; 1140 int cnt; 1141 1142 mtx_lock(&moea64_scratchpage_mtx); 1143 while (xfersize > 0) { 1144 a_pg_offset = a_offset & PAGE_MASK; 1145 cnt = min(xfersize, PAGE_SIZE - a_pg_offset); 1146 moea64_set_scratchpage_pa(mmu, 0, 1147 VM_PAGE_TO_PHYS(ma[a_offset >> PAGE_SHIFT])); 1148 a_cp = (char *)moea64_scratchpage_va[0] + a_pg_offset; 1149 b_pg_offset = b_offset & PAGE_MASK; 1150 cnt = min(cnt, PAGE_SIZE - b_pg_offset); 1151 moea64_set_scratchpage_pa(mmu, 1, 1152 VM_PAGE_TO_PHYS(mb[b_offset >> PAGE_SHIFT])); 1153 b_cp = (char *)moea64_scratchpage_va[1] + b_pg_offset; 1154 bcopy(a_cp, b_cp, cnt); 1155 a_offset += cnt; 1156 b_offset += cnt; 1157 xfersize -= cnt; 1158 } 1159 mtx_unlock(&moea64_scratchpage_mtx); 1160 } 1161 1162 void 1163 moea64_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset, 1164 vm_page_t *mb, vm_offset_t b_offset, int xfersize) 1165 { 1166 1167 if (hw_direct_map) { 1168 moea64_copy_pages_dmap(mmu, ma, a_offset, mb, b_offset, 1169 xfersize); 1170 } else { 1171 moea64_copy_pages_nodmap(mmu, ma, a_offset, mb, b_offset, 1172 xfersize); 1173 } 1174 } 1175 1176 void 1177 moea64_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size) 1178 { 1179 vm_offset_t pa = VM_PAGE_TO_PHYS(m); 1180 1181 if (size + off > PAGE_SIZE) 1182 panic("moea64_zero_page: size + off > PAGE_SIZE"); 1183 1184 if (hw_direct_map) { 1185 bzero((caddr_t)pa + off, size); 1186 } else { 1187 mtx_lock(&moea64_scratchpage_mtx); 1188 moea64_set_scratchpage_pa(mmu, 0, pa); 1189 bzero((caddr_t)moea64_scratchpage_va[0] + off, size); 1190 mtx_unlock(&moea64_scratchpage_mtx); 1191 } 1192 } 1193 1194 /* 1195 * Zero a page of physical memory by temporarily mapping it 1196 */ 1197 void 1198 moea64_zero_page(mmu_t mmu, vm_page_t m) 1199 { 1200 vm_offset_t pa = VM_PAGE_TO_PHYS(m); 1201 vm_offset_t va, off; 1202 1203 if (!hw_direct_map) { 1204 mtx_lock(&moea64_scratchpage_mtx); 1205 1206 moea64_set_scratchpage_pa(mmu, 0, pa); 1207 va = moea64_scratchpage_va[0]; 1208 } else { 1209 va = pa; 1210 } 1211 1212 for (off = 0; off < PAGE_SIZE; off += cacheline_size) 1213 __asm __volatile("dcbz 0,%0" :: "r"(va + off)); 1214 1215 if (!hw_direct_map) 1216 mtx_unlock(&moea64_scratchpage_mtx); 1217 } 1218 1219 void 1220 moea64_zero_page_idle(mmu_t mmu, vm_page_t m) 1221 { 1222 1223 moea64_zero_page(mmu, m); 1224 } 1225 1226 /* 1227 * Map the given physical page at the specified virtual address in the 1228 * target pmap with the protection requested. If specified the page 1229 * will be wired down. 1230 */ 1231 1232 int 1233 moea64_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, 1234 vm_prot_t prot, u_int flags, int8_t psind) 1235 { 1236 struct pvo_head *pvo_head; 1237 uma_zone_t zone; 1238 uint64_t pte_lo; 1239 u_int pvo_flags; 1240 int error; 1241 1242 if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_xbusied(m)) 1243 VM_OBJECT_ASSERT_LOCKED(m->object); 1244 1245 if ((m->oflags & VPO_UNMANAGED) != 0 || !moea64_initialized) { 1246 pvo_head = NULL; 1247 zone = moea64_upvo_zone; 1248 pvo_flags = 0; 1249 } else { 1250 pvo_head = vm_page_to_pvoh(m); 1251 zone = moea64_mpvo_zone; 1252 pvo_flags = PVO_MANAGED; 1253 } 1254 1255 pte_lo = moea64_calc_wimg(VM_PAGE_TO_PHYS(m), pmap_page_get_memattr(m)); 1256 1257 if (prot & VM_PROT_WRITE) { 1258 pte_lo |= LPTE_BW; 1259 if (pmap_bootstrapped && 1260 (m->oflags & VPO_UNMANAGED) == 0) 1261 vm_page_aflag_set(m, PGA_WRITEABLE); 1262 } else 1263 pte_lo |= LPTE_BR; 1264 1265 if ((prot & VM_PROT_EXECUTE) == 0) 1266 pte_lo |= LPTE_NOEXEC; 1267 1268 if ((flags & PMAP_ENTER_WIRED) != 0) 1269 pvo_flags |= PVO_WIRED; 1270 1271 for (;;) { 1272 LOCK_TABLE_WR(); 1273 PMAP_LOCK(pmap); 1274 error = moea64_pvo_enter(mmu, pmap, zone, pvo_head, va, 1275 VM_PAGE_TO_PHYS(m), pte_lo, pvo_flags, psind); 1276 PMAP_UNLOCK(pmap); 1277 UNLOCK_TABLE_WR(); 1278 if (error != ENOMEM) 1279 break; 1280 if ((flags & PMAP_ENTER_NOSLEEP) != 0) 1281 return (KERN_RESOURCE_SHORTAGE); 1282 VM_OBJECT_ASSERT_UNLOCKED(m->object); 1283 VM_WAIT; 1284 } 1285 1286 /* 1287 * Flush the page from the instruction cache if this page is 1288 * mapped executable and cacheable. 1289 */ 1290 if (pmap != kernel_pmap && !(m->aflags & PGA_EXECUTABLE) && 1291 (pte_lo & (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) { 1292 vm_page_aflag_set(m, PGA_EXECUTABLE); 1293 moea64_syncicache(mmu, pmap, va, VM_PAGE_TO_PHYS(m), PAGE_SIZE); 1294 } 1295 return (KERN_SUCCESS); 1296 } 1297 1298 static void 1299 moea64_syncicache(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_offset_t pa, 1300 vm_size_t sz) 1301 { 1302 1303 /* 1304 * This is much trickier than on older systems because 1305 * we can't sync the icache on physical addresses directly 1306 * without a direct map. Instead we check a couple of cases 1307 * where the memory is already mapped in and, failing that, 1308 * use the same trick we use for page zeroing to create 1309 * a temporary mapping for this physical address. 1310 */ 1311 1312 if (!pmap_bootstrapped) { 1313 /* 1314 * If PMAP is not bootstrapped, we are likely to be 1315 * in real mode. 1316 */ 1317 __syncicache((void *)pa, sz); 1318 } else if (pmap == kernel_pmap) { 1319 __syncicache((void *)va, sz); 1320 } else if (hw_direct_map) { 1321 __syncicache((void *)pa, sz); 1322 } else { 1323 /* Use the scratch page to set up a temp mapping */ 1324 1325 mtx_lock(&moea64_scratchpage_mtx); 1326 1327 moea64_set_scratchpage_pa(mmu, 1, pa & ~ADDR_POFF); 1328 __syncicache((void *)(moea64_scratchpage_va[1] + 1329 (va & ADDR_POFF)), sz); 1330 1331 mtx_unlock(&moea64_scratchpage_mtx); 1332 } 1333 } 1334 1335 /* 1336 * Maps a sequence of resident pages belonging to the same object. 1337 * The sequence begins with the given page m_start. This page is 1338 * mapped at the given virtual address start. Each subsequent page is 1339 * mapped at a virtual address that is offset from start by the same 1340 * amount as the page is offset from m_start within the object. The 1341 * last page in the sequence is the page with the largest offset from 1342 * m_start that can be mapped at a virtual address less than the given 1343 * virtual address end. Not every virtual page between start and end 1344 * is mapped; only those for which a resident page exists with the 1345 * corresponding offset from m_start are mapped. 1346 */ 1347 void 1348 moea64_enter_object(mmu_t mmu, pmap_t pm, vm_offset_t start, vm_offset_t end, 1349 vm_page_t m_start, vm_prot_t prot) 1350 { 1351 vm_page_t m; 1352 vm_pindex_t diff, psize; 1353 1354 VM_OBJECT_ASSERT_LOCKED(m_start->object); 1355 1356 psize = atop(end - start); 1357 m = m_start; 1358 while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { 1359 moea64_enter(mmu, pm, start + ptoa(diff), m, prot & 1360 (VM_PROT_READ | VM_PROT_EXECUTE), PMAP_ENTER_NOSLEEP, 0); 1361 m = TAILQ_NEXT(m, listq); 1362 } 1363 } 1364 1365 void 1366 moea64_enter_quick(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_page_t m, 1367 vm_prot_t prot) 1368 { 1369 1370 moea64_enter(mmu, pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE), 1371 PMAP_ENTER_NOSLEEP, 0); 1372 } 1373 1374 vm_paddr_t 1375 moea64_extract(mmu_t mmu, pmap_t pm, vm_offset_t va) 1376 { 1377 struct pvo_entry *pvo; 1378 vm_paddr_t pa; 1379 1380 PMAP_LOCK(pm); 1381 pvo = moea64_pvo_find_va(pm, va); 1382 if (pvo == NULL) 1383 pa = 0; 1384 else 1385 pa = (pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) | 1386 (va - PVO_VADDR(pvo)); 1387 PMAP_UNLOCK(pm); 1388 return (pa); 1389 } 1390 1391 /* 1392 * Atomically extract and hold the physical page with the given 1393 * pmap and virtual address pair if that mapping permits the given 1394 * protection. 1395 */ 1396 vm_page_t 1397 moea64_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_prot_t prot) 1398 { 1399 struct pvo_entry *pvo; 1400 vm_page_t m; 1401 vm_paddr_t pa; 1402 1403 m = NULL; 1404 pa = 0; 1405 PMAP_LOCK(pmap); 1406 retry: 1407 pvo = moea64_pvo_find_va(pmap, va & ~ADDR_POFF); 1408 if (pvo != NULL && (pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) && 1409 ((pvo->pvo_pte.lpte.pte_lo & LPTE_PP) == LPTE_RW || 1410 (prot & VM_PROT_WRITE) == 0)) { 1411 if (vm_page_pa_tryrelock(pmap, 1412 pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN, &pa)) 1413 goto retry; 1414 m = PHYS_TO_VM_PAGE(pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN); 1415 vm_page_hold(m); 1416 } 1417 PA_UNLOCK_COND(pa); 1418 PMAP_UNLOCK(pmap); 1419 return (m); 1420 } 1421 1422 static mmu_t installed_mmu; 1423 1424 static void * 1425 moea64_uma_page_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait) 1426 { 1427 /* 1428 * This entire routine is a horrible hack to avoid bothering kmem 1429 * for new KVA addresses. Because this can get called from inside 1430 * kmem allocation routines, calling kmem for a new address here 1431 * can lead to multiply locking non-recursive mutexes. 1432 */ 1433 vm_offset_t va; 1434 1435 vm_page_t m; 1436 int pflags, needed_lock; 1437 1438 *flags = UMA_SLAB_PRIV; 1439 needed_lock = !PMAP_LOCKED(kernel_pmap); 1440 pflags = malloc2vm_flags(wait) | VM_ALLOC_WIRED; 1441 1442 for (;;) { 1443 m = vm_page_alloc(NULL, 0, pflags | VM_ALLOC_NOOBJ); 1444 if (m == NULL) { 1445 if (wait & M_NOWAIT) 1446 return (NULL); 1447 VM_WAIT; 1448 } else 1449 break; 1450 } 1451 1452 va = VM_PAGE_TO_PHYS(m); 1453 1454 LOCK_TABLE_WR(); 1455 if (needed_lock) 1456 PMAP_LOCK(kernel_pmap); 1457 1458 moea64_pvo_enter(installed_mmu, kernel_pmap, moea64_upvo_zone, 1459 NULL, va, VM_PAGE_TO_PHYS(m), LPTE_M, PVO_WIRED | PVO_BOOTSTRAP, 1460 0); 1461 1462 if (needed_lock) 1463 PMAP_UNLOCK(kernel_pmap); 1464 UNLOCK_TABLE_WR(); 1465 1466 if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0) 1467 bzero((void *)va, PAGE_SIZE); 1468 1469 return (void *)va; 1470 } 1471 1472 extern int elf32_nxstack; 1473 1474 void 1475 moea64_init(mmu_t mmu) 1476 { 1477 1478 CTR0(KTR_PMAP, "moea64_init"); 1479 1480 moea64_upvo_zone = uma_zcreate("UPVO entry", sizeof (struct pvo_entry), 1481 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 1482 UMA_ZONE_VM | UMA_ZONE_NOFREE); 1483 moea64_mpvo_zone = uma_zcreate("MPVO entry", sizeof(struct pvo_entry), 1484 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 1485 UMA_ZONE_VM | UMA_ZONE_NOFREE); 1486 1487 if (!hw_direct_map) { 1488 installed_mmu = mmu; 1489 uma_zone_set_allocf(moea64_upvo_zone,moea64_uma_page_alloc); 1490 uma_zone_set_allocf(moea64_mpvo_zone,moea64_uma_page_alloc); 1491 } 1492 1493 #ifdef COMPAT_FREEBSD32 1494 elf32_nxstack = 1; 1495 #endif 1496 1497 moea64_initialized = TRUE; 1498 } 1499 1500 boolean_t 1501 moea64_is_referenced(mmu_t mmu, vm_page_t m) 1502 { 1503 1504 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1505 ("moea64_is_referenced: page %p is not managed", m)); 1506 return (moea64_query_bit(mmu, m, PTE_REF)); 1507 } 1508 1509 boolean_t 1510 moea64_is_modified(mmu_t mmu, vm_page_t m) 1511 { 1512 1513 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1514 ("moea64_is_modified: page %p is not managed", m)); 1515 1516 /* 1517 * If the page is not exclusive busied, then PGA_WRITEABLE cannot be 1518 * concurrently set while the object is locked. Thus, if PGA_WRITEABLE 1519 * is clear, no PTEs can have LPTE_CHG set. 1520 */ 1521 VM_OBJECT_ASSERT_LOCKED(m->object); 1522 if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0) 1523 return (FALSE); 1524 return (moea64_query_bit(mmu, m, LPTE_CHG)); 1525 } 1526 1527 boolean_t 1528 moea64_is_prefaultable(mmu_t mmu, pmap_t pmap, vm_offset_t va) 1529 { 1530 struct pvo_entry *pvo; 1531 boolean_t rv; 1532 1533 PMAP_LOCK(pmap); 1534 pvo = moea64_pvo_find_va(pmap, va & ~ADDR_POFF); 1535 rv = pvo == NULL || (pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) == 0; 1536 PMAP_UNLOCK(pmap); 1537 return (rv); 1538 } 1539 1540 void 1541 moea64_clear_modify(mmu_t mmu, vm_page_t m) 1542 { 1543 1544 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1545 ("moea64_clear_modify: page %p is not managed", m)); 1546 VM_OBJECT_ASSERT_WLOCKED(m->object); 1547 KASSERT(!vm_page_xbusied(m), 1548 ("moea64_clear_modify: page %p is exclusive busied", m)); 1549 1550 /* 1551 * If the page is not PGA_WRITEABLE, then no PTEs can have LPTE_CHG 1552 * set. If the object containing the page is locked and the page is 1553 * not exclusive busied, then PGA_WRITEABLE cannot be concurrently set. 1554 */ 1555 if ((m->aflags & PGA_WRITEABLE) == 0) 1556 return; 1557 moea64_clear_bit(mmu, m, LPTE_CHG); 1558 } 1559 1560 /* 1561 * Clear the write and modified bits in each of the given page's mappings. 1562 */ 1563 void 1564 moea64_remove_write(mmu_t mmu, vm_page_t m) 1565 { 1566 struct pvo_entry *pvo; 1567 uintptr_t pt; 1568 pmap_t pmap; 1569 uint64_t lo = 0; 1570 1571 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1572 ("moea64_remove_write: page %p is not managed", m)); 1573 1574 /* 1575 * If the page is not exclusive busied, then PGA_WRITEABLE cannot be 1576 * set by another thread while the object is locked. Thus, 1577 * if PGA_WRITEABLE is clear, no page table entries need updating. 1578 */ 1579 VM_OBJECT_ASSERT_WLOCKED(m->object); 1580 if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0) 1581 return; 1582 powerpc_sync(); 1583 LOCK_TABLE_RD(); 1584 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 1585 pmap = pvo->pvo_pmap; 1586 PMAP_LOCK(pmap); 1587 if ((pvo->pvo_pte.lpte.pte_lo & LPTE_PP) != LPTE_BR) { 1588 pt = MOEA64_PVO_TO_PTE(mmu, pvo); 1589 pvo->pvo_pte.lpte.pte_lo &= ~LPTE_PP; 1590 pvo->pvo_pte.lpte.pte_lo |= LPTE_BR; 1591 if (pt != -1) { 1592 MOEA64_PTE_SYNCH(mmu, pt, &pvo->pvo_pte.lpte); 1593 lo |= pvo->pvo_pte.lpte.pte_lo; 1594 pvo->pvo_pte.lpte.pte_lo &= ~LPTE_CHG; 1595 MOEA64_PTE_CHANGE(mmu, pt, 1596 &pvo->pvo_pte.lpte, pvo->pvo_vpn); 1597 if (pvo->pvo_pmap == kernel_pmap) 1598 isync(); 1599 } 1600 } 1601 if ((lo & LPTE_CHG) != 0) 1602 vm_page_dirty(m); 1603 PMAP_UNLOCK(pmap); 1604 } 1605 UNLOCK_TABLE_RD(); 1606 vm_page_aflag_clear(m, PGA_WRITEABLE); 1607 } 1608 1609 /* 1610 * moea64_ts_referenced: 1611 * 1612 * Return a count of reference bits for a page, clearing those bits. 1613 * It is not necessary for every reference bit to be cleared, but it 1614 * is necessary that 0 only be returned when there are truly no 1615 * reference bits set. 1616 * 1617 * XXX: The exact number of bits to check and clear is a matter that 1618 * should be tested and standardized at some point in the future for 1619 * optimal aging of shared pages. 1620 */ 1621 int 1622 moea64_ts_referenced(mmu_t mmu, vm_page_t m) 1623 { 1624 1625 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1626 ("moea64_ts_referenced: page %p is not managed", m)); 1627 return (moea64_clear_bit(mmu, m, LPTE_REF)); 1628 } 1629 1630 /* 1631 * Modify the WIMG settings of all mappings for a page. 1632 */ 1633 void 1634 moea64_page_set_memattr(mmu_t mmu, vm_page_t m, vm_memattr_t ma) 1635 { 1636 struct pvo_entry *pvo; 1637 struct pvo_head *pvo_head; 1638 uintptr_t pt; 1639 pmap_t pmap; 1640 uint64_t lo; 1641 1642 if ((m->oflags & VPO_UNMANAGED) != 0) { 1643 m->md.mdpg_cache_attrs = ma; 1644 return; 1645 } 1646 1647 pvo_head = vm_page_to_pvoh(m); 1648 lo = moea64_calc_wimg(VM_PAGE_TO_PHYS(m), ma); 1649 LOCK_TABLE_RD(); 1650 LIST_FOREACH(pvo, pvo_head, pvo_vlink) { 1651 pmap = pvo->pvo_pmap; 1652 PMAP_LOCK(pmap); 1653 pt = MOEA64_PVO_TO_PTE(mmu, pvo); 1654 pvo->pvo_pte.lpte.pte_lo &= ~LPTE_WIMG; 1655 pvo->pvo_pte.lpte.pte_lo |= lo; 1656 if (pt != -1) { 1657 MOEA64_PTE_CHANGE(mmu, pt, &pvo->pvo_pte.lpte, 1658 pvo->pvo_vpn); 1659 if (pvo->pvo_pmap == kernel_pmap) 1660 isync(); 1661 } 1662 PMAP_UNLOCK(pmap); 1663 } 1664 UNLOCK_TABLE_RD(); 1665 m->md.mdpg_cache_attrs = ma; 1666 } 1667 1668 /* 1669 * Map a wired page into kernel virtual address space. 1670 */ 1671 void 1672 moea64_kenter_attr(mmu_t mmu, vm_offset_t va, vm_offset_t pa, vm_memattr_t ma) 1673 { 1674 uint64_t pte_lo; 1675 int error; 1676 1677 pte_lo = moea64_calc_wimg(pa, ma); 1678 1679 LOCK_TABLE_WR(); 1680 PMAP_LOCK(kernel_pmap); 1681 error = moea64_pvo_enter(mmu, kernel_pmap, moea64_upvo_zone, 1682 NULL, va, pa, pte_lo, PVO_WIRED, 0); 1683 PMAP_UNLOCK(kernel_pmap); 1684 UNLOCK_TABLE_WR(); 1685 1686 if (error != 0 && error != ENOENT) 1687 panic("moea64_kenter: failed to enter va %#zx pa %#zx: %d", va, 1688 pa, error); 1689 } 1690 1691 void 1692 moea64_kenter(mmu_t mmu, vm_offset_t va, vm_paddr_t pa) 1693 { 1694 1695 moea64_kenter_attr(mmu, va, pa, VM_MEMATTR_DEFAULT); 1696 } 1697 1698 /* 1699 * Extract the physical page address associated with the given kernel virtual 1700 * address. 1701 */ 1702 vm_paddr_t 1703 moea64_kextract(mmu_t mmu, vm_offset_t va) 1704 { 1705 struct pvo_entry *pvo; 1706 vm_paddr_t pa; 1707 1708 /* 1709 * Shortcut the direct-mapped case when applicable. We never put 1710 * anything but 1:1 mappings below VM_MIN_KERNEL_ADDRESS. 1711 */ 1712 if (va < VM_MIN_KERNEL_ADDRESS) 1713 return (va); 1714 1715 PMAP_LOCK(kernel_pmap); 1716 pvo = moea64_pvo_find_va(kernel_pmap, va); 1717 KASSERT(pvo != NULL, ("moea64_kextract: no addr found for %#" PRIxPTR, 1718 va)); 1719 pa = (pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) | (va - PVO_VADDR(pvo)); 1720 PMAP_UNLOCK(kernel_pmap); 1721 return (pa); 1722 } 1723 1724 /* 1725 * Remove a wired page from kernel virtual address space. 1726 */ 1727 void 1728 moea64_kremove(mmu_t mmu, vm_offset_t va) 1729 { 1730 moea64_remove(mmu, kernel_pmap, va, va + PAGE_SIZE); 1731 } 1732 1733 /* 1734 * Map a range of physical addresses into kernel virtual address space. 1735 * 1736 * The value passed in *virt is a suggested virtual address for the mapping. 1737 * Architectures which can support a direct-mapped physical to virtual region 1738 * can return the appropriate address within that region, leaving '*virt' 1739 * unchanged. We cannot and therefore do not; *virt is updated with the 1740 * first usable address after the mapped region. 1741 */ 1742 vm_offset_t 1743 moea64_map(mmu_t mmu, vm_offset_t *virt, vm_paddr_t pa_start, 1744 vm_paddr_t pa_end, int prot) 1745 { 1746 vm_offset_t sva, va; 1747 1748 sva = *virt; 1749 va = sva; 1750 for (; pa_start < pa_end; pa_start += PAGE_SIZE, va += PAGE_SIZE) 1751 moea64_kenter(mmu, va, pa_start); 1752 *virt = va; 1753 1754 return (sva); 1755 } 1756 1757 /* 1758 * Returns true if the pmap's pv is one of the first 1759 * 16 pvs linked to from this page. This count may 1760 * be changed upwards or downwards in the future; it 1761 * is only necessary that true be returned for a small 1762 * subset of pmaps for proper page aging. 1763 */ 1764 boolean_t 1765 moea64_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m) 1766 { 1767 int loops; 1768 struct pvo_entry *pvo; 1769 boolean_t rv; 1770 1771 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1772 ("moea64_page_exists_quick: page %p is not managed", m)); 1773 loops = 0; 1774 rv = FALSE; 1775 LOCK_TABLE_RD(); 1776 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 1777 if (pvo->pvo_pmap == pmap) { 1778 rv = TRUE; 1779 break; 1780 } 1781 if (++loops >= 16) 1782 break; 1783 } 1784 UNLOCK_TABLE_RD(); 1785 return (rv); 1786 } 1787 1788 /* 1789 * Return the number of managed mappings to the given physical page 1790 * that are wired. 1791 */ 1792 int 1793 moea64_page_wired_mappings(mmu_t mmu, vm_page_t m) 1794 { 1795 struct pvo_entry *pvo; 1796 int count; 1797 1798 count = 0; 1799 if ((m->oflags & VPO_UNMANAGED) != 0) 1800 return (count); 1801 LOCK_TABLE_RD(); 1802 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) 1803 if ((pvo->pvo_vaddr & PVO_WIRED) != 0) 1804 count++; 1805 UNLOCK_TABLE_RD(); 1806 return (count); 1807 } 1808 1809 static uintptr_t moea64_vsidcontext; 1810 1811 uintptr_t 1812 moea64_get_unique_vsid(void) { 1813 u_int entropy; 1814 register_t hash; 1815 uint32_t mask; 1816 int i; 1817 1818 entropy = 0; 1819 __asm __volatile("mftb %0" : "=r"(entropy)); 1820 1821 mtx_lock(&moea64_slb_mutex); 1822 for (i = 0; i < NVSIDS; i += VSID_NBPW) { 1823 u_int n; 1824 1825 /* 1826 * Create a new value by mutiplying by a prime and adding in 1827 * entropy from the timebase register. This is to make the 1828 * VSID more random so that the PT hash function collides 1829 * less often. (Note that the prime casues gcc to do shifts 1830 * instead of a multiply.) 1831 */ 1832 moea64_vsidcontext = (moea64_vsidcontext * 0x1105) + entropy; 1833 hash = moea64_vsidcontext & (NVSIDS - 1); 1834 if (hash == 0) /* 0 is special, avoid it */ 1835 continue; 1836 n = hash >> 5; 1837 mask = 1 << (hash & (VSID_NBPW - 1)); 1838 hash = (moea64_vsidcontext & VSID_HASHMASK); 1839 if (moea64_vsid_bitmap[n] & mask) { /* collision? */ 1840 /* anything free in this bucket? */ 1841 if (moea64_vsid_bitmap[n] == 0xffffffff) { 1842 entropy = (moea64_vsidcontext >> 20); 1843 continue; 1844 } 1845 i = ffs(~moea64_vsid_bitmap[n]) - 1; 1846 mask = 1 << i; 1847 hash &= VSID_HASHMASK & ~(VSID_NBPW - 1); 1848 hash |= i; 1849 } 1850 KASSERT(!(moea64_vsid_bitmap[n] & mask), 1851 ("Allocating in-use VSID %#zx\n", hash)); 1852 moea64_vsid_bitmap[n] |= mask; 1853 mtx_unlock(&moea64_slb_mutex); 1854 return (hash); 1855 } 1856 1857 mtx_unlock(&moea64_slb_mutex); 1858 panic("%s: out of segments",__func__); 1859 } 1860 1861 #ifdef __powerpc64__ 1862 void 1863 moea64_pinit(mmu_t mmu, pmap_t pmap) 1864 { 1865 1866 RB_INIT(&pmap->pmap_pvo); 1867 1868 pmap->pm_slb_tree_root = slb_alloc_tree(); 1869 pmap->pm_slb = slb_alloc_user_cache(); 1870 pmap->pm_slb_len = 0; 1871 } 1872 #else 1873 void 1874 moea64_pinit(mmu_t mmu, pmap_t pmap) 1875 { 1876 int i; 1877 uint32_t hash; 1878 1879 RB_INIT(&pmap->pmap_pvo); 1880 1881 if (pmap_bootstrapped) 1882 pmap->pmap_phys = (pmap_t)moea64_kextract(mmu, 1883 (vm_offset_t)pmap); 1884 else 1885 pmap->pmap_phys = pmap; 1886 1887 /* 1888 * Allocate some segment registers for this pmap. 1889 */ 1890 hash = moea64_get_unique_vsid(); 1891 1892 for (i = 0; i < 16; i++) 1893 pmap->pm_sr[i] = VSID_MAKE(i, hash); 1894 1895 KASSERT(pmap->pm_sr[0] != 0, ("moea64_pinit: pm_sr[0] = 0")); 1896 } 1897 #endif 1898 1899 /* 1900 * Initialize the pmap associated with process 0. 1901 */ 1902 void 1903 moea64_pinit0(mmu_t mmu, pmap_t pm) 1904 { 1905 1906 PMAP_LOCK_INIT(pm); 1907 moea64_pinit(mmu, pm); 1908 bzero(&pm->pm_stats, sizeof(pm->pm_stats)); 1909 } 1910 1911 /* 1912 * Set the physical protection on the specified range of this map as requested. 1913 */ 1914 static void 1915 moea64_pvo_protect(mmu_t mmu, pmap_t pm, struct pvo_entry *pvo, vm_prot_t prot) 1916 { 1917 uintptr_t pt; 1918 struct vm_page *pg; 1919 uint64_t oldlo; 1920 1921 PMAP_LOCK_ASSERT(pm, MA_OWNED); 1922 1923 /* 1924 * Grab the PTE pointer before we diddle with the cached PTE 1925 * copy. 1926 */ 1927 pt = MOEA64_PVO_TO_PTE(mmu, pvo); 1928 1929 /* 1930 * Change the protection of the page. 1931 */ 1932 oldlo = pvo->pvo_pte.lpte.pte_lo; 1933 pvo->pvo_pte.lpte.pte_lo &= ~LPTE_PP; 1934 pvo->pvo_pte.lpte.pte_lo &= ~LPTE_NOEXEC; 1935 if ((prot & VM_PROT_EXECUTE) == 0) 1936 pvo->pvo_pte.lpte.pte_lo |= LPTE_NOEXEC; 1937 if (prot & VM_PROT_WRITE) 1938 pvo->pvo_pte.lpte.pte_lo |= LPTE_BW; 1939 else 1940 pvo->pvo_pte.lpte.pte_lo |= LPTE_BR; 1941 1942 pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN); 1943 1944 /* 1945 * If the PVO is in the page table, update that pte as well. 1946 */ 1947 if (pt != -1) 1948 MOEA64_PTE_CHANGE(mmu, pt, &pvo->pvo_pte.lpte, 1949 pvo->pvo_vpn); 1950 if (pm != kernel_pmap && pg != NULL && !(pg->aflags & PGA_EXECUTABLE) && 1951 (pvo->pvo_pte.lpte.pte_lo & (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) { 1952 if ((pg->oflags & VPO_UNMANAGED) == 0) 1953 vm_page_aflag_set(pg, PGA_EXECUTABLE); 1954 moea64_syncicache(mmu, pm, PVO_VADDR(pvo), 1955 pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN, PAGE_SIZE); 1956 } 1957 1958 /* 1959 * Update vm about the REF/CHG bits if the page is managed and we have 1960 * removed write access. 1961 */ 1962 if ((pvo->pvo_vaddr & PVO_MANAGED) == PVO_MANAGED && 1963 (oldlo & LPTE_PP) != LPTE_BR && !(prot & VM_PROT_WRITE)) { 1964 if (pg != NULL) { 1965 if (pvo->pvo_pte.lpte.pte_lo & LPTE_CHG) 1966 vm_page_dirty(pg); 1967 if (pvo->pvo_pte.lpte.pte_lo & LPTE_REF) 1968 vm_page_aflag_set(pg, PGA_REFERENCED); 1969 } 1970 } 1971 } 1972 1973 void 1974 moea64_protect(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva, 1975 vm_prot_t prot) 1976 { 1977 struct pvo_entry *pvo, *tpvo, key; 1978 1979 CTR4(KTR_PMAP, "moea64_protect: pm=%p sva=%#x eva=%#x prot=%#x", pm, 1980 sva, eva, prot); 1981 1982 KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap, 1983 ("moea64_protect: non current pmap")); 1984 1985 if ((prot & VM_PROT_READ) == VM_PROT_NONE) { 1986 moea64_remove(mmu, pm, sva, eva); 1987 return; 1988 } 1989 1990 LOCK_TABLE_RD(); 1991 PMAP_LOCK(pm); 1992 key.pvo_vaddr = sva; 1993 for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key); 1994 pvo != NULL && PVO_VADDR(pvo) < eva; pvo = tpvo) { 1995 tpvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo); 1996 moea64_pvo_protect(mmu, pm, pvo, prot); 1997 } 1998 UNLOCK_TABLE_RD(); 1999 PMAP_UNLOCK(pm); 2000 } 2001 2002 /* 2003 * Map a list of wired pages into kernel virtual address space. This is 2004 * intended for temporary mappings which do not need page modification or 2005 * references recorded. Existing mappings in the region are overwritten. 2006 */ 2007 void 2008 moea64_qenter(mmu_t mmu, vm_offset_t va, vm_page_t *m, int count) 2009 { 2010 while (count-- > 0) { 2011 moea64_kenter(mmu, va, VM_PAGE_TO_PHYS(*m)); 2012 va += PAGE_SIZE; 2013 m++; 2014 } 2015 } 2016 2017 /* 2018 * Remove page mappings from kernel virtual address space. Intended for 2019 * temporary mappings entered by moea64_qenter. 2020 */ 2021 void 2022 moea64_qremove(mmu_t mmu, vm_offset_t va, int count) 2023 { 2024 while (count-- > 0) { 2025 moea64_kremove(mmu, va); 2026 va += PAGE_SIZE; 2027 } 2028 } 2029 2030 void 2031 moea64_release_vsid(uint64_t vsid) 2032 { 2033 int idx, mask; 2034 2035 mtx_lock(&moea64_slb_mutex); 2036 idx = vsid & (NVSIDS-1); 2037 mask = 1 << (idx % VSID_NBPW); 2038 idx /= VSID_NBPW; 2039 KASSERT(moea64_vsid_bitmap[idx] & mask, 2040 ("Freeing unallocated VSID %#jx", vsid)); 2041 moea64_vsid_bitmap[idx] &= ~mask; 2042 mtx_unlock(&moea64_slb_mutex); 2043 } 2044 2045 2046 void 2047 moea64_release(mmu_t mmu, pmap_t pmap) 2048 { 2049 2050 /* 2051 * Free segment registers' VSIDs 2052 */ 2053 #ifdef __powerpc64__ 2054 slb_free_tree(pmap); 2055 slb_free_user_cache(pmap->pm_slb); 2056 #else 2057 KASSERT(pmap->pm_sr[0] != 0, ("moea64_release: pm_sr[0] = 0")); 2058 2059 moea64_release_vsid(VSID_TO_HASH(pmap->pm_sr[0])); 2060 #endif 2061 } 2062 2063 /* 2064 * Remove all pages mapped by the specified pmap 2065 */ 2066 void 2067 moea64_remove_pages(mmu_t mmu, pmap_t pm) 2068 { 2069 struct pvo_entry *pvo, *tpvo; 2070 2071 LOCK_TABLE_WR(); 2072 PMAP_LOCK(pm); 2073 RB_FOREACH_SAFE(pvo, pvo_tree, &pm->pmap_pvo, tpvo) { 2074 if (!(pvo->pvo_vaddr & PVO_WIRED)) 2075 moea64_pvo_remove(mmu, pvo); 2076 } 2077 UNLOCK_TABLE_WR(); 2078 PMAP_UNLOCK(pm); 2079 } 2080 2081 /* 2082 * Remove the given range of addresses from the specified map. 2083 */ 2084 void 2085 moea64_remove(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva) 2086 { 2087 struct pvo_entry *pvo, *tpvo, key; 2088 2089 /* 2090 * Perform an unsynchronized read. This is, however, safe. 2091 */ 2092 if (pm->pm_stats.resident_count == 0) 2093 return; 2094 2095 LOCK_TABLE_WR(); 2096 PMAP_LOCK(pm); 2097 key.pvo_vaddr = sva; 2098 for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key); 2099 pvo != NULL && PVO_VADDR(pvo) < eva; pvo = tpvo) { 2100 tpvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo); 2101 moea64_pvo_remove(mmu, pvo); 2102 } 2103 UNLOCK_TABLE_WR(); 2104 PMAP_UNLOCK(pm); 2105 } 2106 2107 /* 2108 * Remove physical page from all pmaps in which it resides. moea64_pvo_remove() 2109 * will reflect changes in pte's back to the vm_page. 2110 */ 2111 void 2112 moea64_remove_all(mmu_t mmu, vm_page_t m) 2113 { 2114 struct pvo_entry *pvo, *next_pvo; 2115 pmap_t pmap; 2116 2117 LOCK_TABLE_WR(); 2118 LIST_FOREACH_SAFE(pvo, vm_page_to_pvoh(m), pvo_vlink, next_pvo) { 2119 pmap = pvo->pvo_pmap; 2120 PMAP_LOCK(pmap); 2121 moea64_pvo_remove(mmu, pvo); 2122 PMAP_UNLOCK(pmap); 2123 } 2124 UNLOCK_TABLE_WR(); 2125 if ((m->aflags & PGA_WRITEABLE) && moea64_is_modified(mmu, m)) 2126 vm_page_dirty(m); 2127 vm_page_aflag_clear(m, PGA_WRITEABLE); 2128 vm_page_aflag_clear(m, PGA_EXECUTABLE); 2129 } 2130 2131 /* 2132 * Allocate a physical page of memory directly from the phys_avail map. 2133 * Can only be called from moea64_bootstrap before avail start and end are 2134 * calculated. 2135 */ 2136 vm_offset_t 2137 moea64_bootstrap_alloc(vm_size_t size, u_int align) 2138 { 2139 vm_offset_t s, e; 2140 int i, j; 2141 2142 size = round_page(size); 2143 for (i = 0; phys_avail[i + 1] != 0; i += 2) { 2144 if (align != 0) 2145 s = (phys_avail[i] + align - 1) & ~(align - 1); 2146 else 2147 s = phys_avail[i]; 2148 e = s + size; 2149 2150 if (s < phys_avail[i] || e > phys_avail[i + 1]) 2151 continue; 2152 2153 if (s + size > platform_real_maxaddr()) 2154 continue; 2155 2156 if (s == phys_avail[i]) { 2157 phys_avail[i] += size; 2158 } else if (e == phys_avail[i + 1]) { 2159 phys_avail[i + 1] -= size; 2160 } else { 2161 for (j = phys_avail_count * 2; j > i; j -= 2) { 2162 phys_avail[j] = phys_avail[j - 2]; 2163 phys_avail[j + 1] = phys_avail[j - 1]; 2164 } 2165 2166 phys_avail[i + 3] = phys_avail[i + 1]; 2167 phys_avail[i + 1] = s; 2168 phys_avail[i + 2] = e; 2169 phys_avail_count++; 2170 } 2171 2172 return (s); 2173 } 2174 panic("moea64_bootstrap_alloc: could not allocate memory"); 2175 } 2176 2177 static int 2178 moea64_pvo_enter(mmu_t mmu, pmap_t pm, uma_zone_t zone, 2179 struct pvo_head *pvo_head, vm_offset_t va, vm_offset_t pa, 2180 uint64_t pte_lo, int flags, int8_t psind __unused) 2181 { 2182 struct pvo_entry *pvo; 2183 uintptr_t pt; 2184 uint64_t vsid; 2185 int first; 2186 u_int ptegidx; 2187 int i; 2188 int bootstrap; 2189 2190 /* 2191 * One nasty thing that can happen here is that the UMA calls to 2192 * allocate new PVOs need to map more memory, which calls pvo_enter(), 2193 * which calls UMA... 2194 * 2195 * We break the loop by detecting recursion and allocating out of 2196 * the bootstrap pool. 2197 */ 2198 2199 first = 0; 2200 bootstrap = (flags & PVO_BOOTSTRAP); 2201 2202 if (!moea64_initialized) 2203 bootstrap = 1; 2204 2205 PMAP_LOCK_ASSERT(pm, MA_OWNED); 2206 rw_assert(&moea64_table_lock, RA_WLOCKED); 2207 2208 /* 2209 * Compute the PTE Group index. 2210 */ 2211 va &= ~ADDR_POFF; 2212 vsid = va_to_vsid(pm, va); 2213 ptegidx = va_to_pteg(vsid, va, flags & PVO_LARGE); 2214 2215 /* 2216 * Remove any existing mapping for this page. Reuse the pvo entry if 2217 * there is a mapping. 2218 */ 2219 moea64_pvo_enter_calls++; 2220 2221 LIST_FOREACH(pvo, &moea64_pvo_table[ptegidx], pvo_olink) { 2222 if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) { 2223 if ((pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) == pa && 2224 (pvo->pvo_pte.lpte.pte_lo & (LPTE_NOEXEC | LPTE_PP)) 2225 == (pte_lo & (LPTE_NOEXEC | LPTE_PP))) { 2226 /* 2227 * The physical page and protection are not 2228 * changing. Instead, this may be a request 2229 * to change the mapping's wired attribute. 2230 */ 2231 pt = -1; 2232 if ((flags & PVO_WIRED) != 0 && 2233 (pvo->pvo_vaddr & PVO_WIRED) == 0) { 2234 pt = MOEA64_PVO_TO_PTE(mmu, pvo); 2235 pvo->pvo_vaddr |= PVO_WIRED; 2236 pvo->pvo_pte.lpte.pte_hi |= LPTE_WIRED; 2237 pm->pm_stats.wired_count++; 2238 } else if ((flags & PVO_WIRED) == 0 && 2239 (pvo->pvo_vaddr & PVO_WIRED) != 0) { 2240 pt = MOEA64_PVO_TO_PTE(mmu, pvo); 2241 pvo->pvo_vaddr &= ~PVO_WIRED; 2242 pvo->pvo_pte.lpte.pte_hi &= ~LPTE_WIRED; 2243 pm->pm_stats.wired_count--; 2244 } 2245 if (!(pvo->pvo_pte.lpte.pte_hi & LPTE_VALID)) { 2246 KASSERT(pt == -1, 2247 ("moea64_pvo_enter: valid pt")); 2248 /* Re-insert if spilled */ 2249 i = MOEA64_PTE_INSERT(mmu, ptegidx, 2250 &pvo->pvo_pte.lpte); 2251 if (i >= 0) 2252 PVO_PTEGIDX_SET(pvo, i); 2253 moea64_pte_overflow--; 2254 } else if (pt != -1) { 2255 /* 2256 * The PTE's wired attribute is not a 2257 * hardware feature, so there is no 2258 * need to invalidate any TLB entries. 2259 */ 2260 MOEA64_PTE_CHANGE(mmu, pt, 2261 &pvo->pvo_pte.lpte, pvo->pvo_vpn); 2262 } 2263 return (0); 2264 } 2265 moea64_pvo_remove(mmu, pvo); 2266 break; 2267 } 2268 } 2269 2270 /* 2271 * If we aren't overwriting a mapping, try to allocate. 2272 */ 2273 if (bootstrap) { 2274 if (moea64_bpvo_pool_index >= BPVO_POOL_SIZE) { 2275 panic("moea64_enter: bpvo pool exhausted, %d, %d, %zd", 2276 moea64_bpvo_pool_index, BPVO_POOL_SIZE, 2277 BPVO_POOL_SIZE * sizeof(struct pvo_entry)); 2278 } 2279 pvo = &moea64_bpvo_pool[moea64_bpvo_pool_index]; 2280 moea64_bpvo_pool_index++; 2281 bootstrap = 1; 2282 } else { 2283 pvo = uma_zalloc(zone, M_NOWAIT); 2284 } 2285 2286 if (pvo == NULL) 2287 return (ENOMEM); 2288 2289 moea64_pvo_entries++; 2290 pvo->pvo_vaddr = va; 2291 pvo->pvo_vpn = (uint64_t)((va & ADDR_PIDX) >> ADDR_PIDX_SHFT) 2292 | (vsid << 16); 2293 pvo->pvo_pmap = pm; 2294 LIST_INSERT_HEAD(&moea64_pvo_table[ptegidx], pvo, pvo_olink); 2295 pvo->pvo_vaddr &= ~ADDR_POFF; 2296 2297 if (flags & PVO_WIRED) 2298 pvo->pvo_vaddr |= PVO_WIRED; 2299 if (pvo_head != NULL) 2300 pvo->pvo_vaddr |= PVO_MANAGED; 2301 if (bootstrap) 2302 pvo->pvo_vaddr |= PVO_BOOTSTRAP; 2303 if (flags & PVO_LARGE) 2304 pvo->pvo_vaddr |= PVO_LARGE; 2305 2306 moea64_pte_create(&pvo->pvo_pte.lpte, vsid, va, 2307 (uint64_t)(pa) | pte_lo, flags); 2308 2309 /* 2310 * Add to pmap list 2311 */ 2312 RB_INSERT(pvo_tree, &pm->pmap_pvo, pvo); 2313 2314 /* 2315 * Remember if the list was empty and therefore will be the first 2316 * item. 2317 */ 2318 if (pvo_head != NULL) { 2319 if (LIST_FIRST(pvo_head) == NULL) 2320 first = 1; 2321 LIST_INSERT_HEAD(pvo_head, pvo, pvo_vlink); 2322 } 2323 2324 if (pvo->pvo_vaddr & PVO_WIRED) { 2325 pvo->pvo_pte.lpte.pte_hi |= LPTE_WIRED; 2326 pm->pm_stats.wired_count++; 2327 } 2328 pm->pm_stats.resident_count++; 2329 2330 /* 2331 * We hope this succeeds but it isn't required. 2332 */ 2333 i = MOEA64_PTE_INSERT(mmu, ptegidx, &pvo->pvo_pte.lpte); 2334 if (i >= 0) { 2335 PVO_PTEGIDX_SET(pvo, i); 2336 } else { 2337 panic("moea64_pvo_enter: overflow"); 2338 moea64_pte_overflow++; 2339 } 2340 2341 if (pm == kernel_pmap) 2342 isync(); 2343 2344 #ifdef __powerpc64__ 2345 /* 2346 * Make sure all our bootstrap mappings are in the SLB as soon 2347 * as virtual memory is switched on. 2348 */ 2349 if (!pmap_bootstrapped) 2350 moea64_bootstrap_slb_prefault(va, flags & PVO_LARGE); 2351 #endif 2352 2353 return (first ? ENOENT : 0); 2354 } 2355 2356 static void 2357 moea64_pvo_remove(mmu_t mmu, struct pvo_entry *pvo) 2358 { 2359 struct vm_page *pg; 2360 uintptr_t pt; 2361 2362 PMAP_LOCK_ASSERT(pvo->pvo_pmap, MA_OWNED); 2363 rw_assert(&moea64_table_lock, RA_WLOCKED); 2364 2365 /* 2366 * If there is an active pte entry, we need to deactivate it (and 2367 * save the ref & cfg bits). 2368 */ 2369 pt = MOEA64_PVO_TO_PTE(mmu, pvo); 2370 if (pt != -1) { 2371 MOEA64_PTE_UNSET(mmu, pt, &pvo->pvo_pte.lpte, pvo->pvo_vpn); 2372 PVO_PTEGIDX_CLR(pvo); 2373 } else { 2374 moea64_pte_overflow--; 2375 } 2376 2377 /* 2378 * Update our statistics. 2379 */ 2380 pvo->pvo_pmap->pm_stats.resident_count--; 2381 if (pvo->pvo_vaddr & PVO_WIRED) 2382 pvo->pvo_pmap->pm_stats.wired_count--; 2383 2384 /* 2385 * Remove this PVO from the pmap list. 2386 */ 2387 RB_REMOVE(pvo_tree, &pvo->pvo_pmap->pmap_pvo, pvo); 2388 2389 /* 2390 * Remove this from the overflow list and return it to the pool 2391 * if we aren't going to reuse it. 2392 */ 2393 LIST_REMOVE(pvo, pvo_olink); 2394 2395 /* 2396 * Update vm about the REF/CHG bits if the page is managed. 2397 */ 2398 pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN); 2399 2400 if ((pvo->pvo_vaddr & PVO_MANAGED) == PVO_MANAGED && pg != NULL) { 2401 LIST_REMOVE(pvo, pvo_vlink); 2402 if ((pvo->pvo_pte.lpte.pte_lo & LPTE_PP) != LPTE_BR) { 2403 if (pvo->pvo_pte.lpte.pte_lo & LPTE_CHG) 2404 vm_page_dirty(pg); 2405 if (pvo->pvo_pte.lpte.pte_lo & LPTE_REF) 2406 vm_page_aflag_set(pg, PGA_REFERENCED); 2407 if (LIST_EMPTY(vm_page_to_pvoh(pg))) 2408 vm_page_aflag_clear(pg, PGA_WRITEABLE); 2409 } 2410 if (LIST_EMPTY(vm_page_to_pvoh(pg))) 2411 vm_page_aflag_clear(pg, PGA_EXECUTABLE); 2412 } 2413 2414 moea64_pvo_entries--; 2415 moea64_pvo_remove_calls++; 2416 2417 if (!(pvo->pvo_vaddr & PVO_BOOTSTRAP)) 2418 uma_zfree((pvo->pvo_vaddr & PVO_MANAGED) ? moea64_mpvo_zone : 2419 moea64_upvo_zone, pvo); 2420 } 2421 2422 static struct pvo_entry * 2423 moea64_pvo_find_va(pmap_t pm, vm_offset_t va) 2424 { 2425 struct pvo_entry key; 2426 2427 key.pvo_vaddr = va & ~ADDR_POFF; 2428 return (RB_FIND(pvo_tree, &pm->pmap_pvo, &key)); 2429 } 2430 2431 static boolean_t 2432 moea64_query_bit(mmu_t mmu, vm_page_t m, u_int64_t ptebit) 2433 { 2434 struct pvo_entry *pvo; 2435 uintptr_t pt; 2436 2437 LOCK_TABLE_RD(); 2438 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 2439 /* 2440 * See if we saved the bit off. If so, return success. 2441 */ 2442 if (pvo->pvo_pte.lpte.pte_lo & ptebit) { 2443 UNLOCK_TABLE_RD(); 2444 return (TRUE); 2445 } 2446 } 2447 2448 /* 2449 * No luck, now go through the hard part of looking at the PTEs 2450 * themselves. Sync so that any pending REF/CHG bits are flushed to 2451 * the PTEs. 2452 */ 2453 powerpc_sync(); 2454 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 2455 2456 /* 2457 * See if this pvo has a valid PTE. if so, fetch the 2458 * REF/CHG bits from the valid PTE. If the appropriate 2459 * ptebit is set, return success. 2460 */ 2461 PMAP_LOCK(pvo->pvo_pmap); 2462 pt = MOEA64_PVO_TO_PTE(mmu, pvo); 2463 if (pt != -1) { 2464 MOEA64_PTE_SYNCH(mmu, pt, &pvo->pvo_pte.lpte); 2465 if (pvo->pvo_pte.lpte.pte_lo & ptebit) { 2466 PMAP_UNLOCK(pvo->pvo_pmap); 2467 UNLOCK_TABLE_RD(); 2468 return (TRUE); 2469 } 2470 } 2471 PMAP_UNLOCK(pvo->pvo_pmap); 2472 } 2473 2474 UNLOCK_TABLE_RD(); 2475 return (FALSE); 2476 } 2477 2478 static u_int 2479 moea64_clear_bit(mmu_t mmu, vm_page_t m, u_int64_t ptebit) 2480 { 2481 u_int count; 2482 struct pvo_entry *pvo; 2483 uintptr_t pt; 2484 2485 /* 2486 * Sync so that any pending REF/CHG bits are flushed to the PTEs (so 2487 * we can reset the right ones). note that since the pvo entries and 2488 * list heads are accessed via BAT0 and are never placed in the page 2489 * table, we don't have to worry about further accesses setting the 2490 * REF/CHG bits. 2491 */ 2492 powerpc_sync(); 2493 2494 /* 2495 * For each pvo entry, clear the pvo's ptebit. If this pvo has a 2496 * valid pte clear the ptebit from the valid pte. 2497 */ 2498 count = 0; 2499 LOCK_TABLE_RD(); 2500 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 2501 PMAP_LOCK(pvo->pvo_pmap); 2502 pt = MOEA64_PVO_TO_PTE(mmu, pvo); 2503 if (pt != -1) { 2504 MOEA64_PTE_SYNCH(mmu, pt, &pvo->pvo_pte.lpte); 2505 if (pvo->pvo_pte.lpte.pte_lo & ptebit) { 2506 count++; 2507 MOEA64_PTE_CLEAR(mmu, pt, &pvo->pvo_pte.lpte, 2508 pvo->pvo_vpn, ptebit); 2509 } 2510 } 2511 pvo->pvo_pte.lpte.pte_lo &= ~ptebit; 2512 PMAP_UNLOCK(pvo->pvo_pmap); 2513 } 2514 2515 UNLOCK_TABLE_RD(); 2516 return (count); 2517 } 2518 2519 boolean_t 2520 moea64_dev_direct_mapped(mmu_t mmu, vm_paddr_t pa, vm_size_t size) 2521 { 2522 struct pvo_entry *pvo, key; 2523 vm_offset_t ppa; 2524 int error = 0; 2525 2526 PMAP_LOCK(kernel_pmap); 2527 key.pvo_vaddr = ppa = pa & ~ADDR_POFF; 2528 for (pvo = RB_FIND(pvo_tree, &kernel_pmap->pmap_pvo, &key); 2529 ppa < pa + size; ppa += PAGE_SIZE, 2530 pvo = RB_NEXT(pvo_tree, &kernel_pmap->pmap_pvo, pvo)) { 2531 if (pvo == NULL || 2532 (pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) != ppa) { 2533 error = EFAULT; 2534 break; 2535 } 2536 } 2537 PMAP_UNLOCK(kernel_pmap); 2538 2539 return (error); 2540 } 2541 2542 /* 2543 * Map a set of physical memory pages into the kernel virtual 2544 * address space. Return a pointer to where it is mapped. This 2545 * routine is intended to be used for mapping device memory, 2546 * NOT real memory. 2547 */ 2548 void * 2549 moea64_mapdev_attr(mmu_t mmu, vm_offset_t pa, vm_size_t size, vm_memattr_t ma) 2550 { 2551 vm_offset_t va, tmpva, ppa, offset; 2552 2553 ppa = trunc_page(pa); 2554 offset = pa & PAGE_MASK; 2555 size = roundup2(offset + size, PAGE_SIZE); 2556 2557 va = kva_alloc(size); 2558 2559 if (!va) 2560 panic("moea64_mapdev: Couldn't alloc kernel virtual memory"); 2561 2562 for (tmpva = va; size > 0;) { 2563 moea64_kenter_attr(mmu, tmpva, ppa, ma); 2564 size -= PAGE_SIZE; 2565 tmpva += PAGE_SIZE; 2566 ppa += PAGE_SIZE; 2567 } 2568 2569 return ((void *)(va + offset)); 2570 } 2571 2572 void * 2573 moea64_mapdev(mmu_t mmu, vm_paddr_t pa, vm_size_t size) 2574 { 2575 2576 return moea64_mapdev_attr(mmu, pa, size, VM_MEMATTR_DEFAULT); 2577 } 2578 2579 void 2580 moea64_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size) 2581 { 2582 vm_offset_t base, offset; 2583 2584 base = trunc_page(va); 2585 offset = va & PAGE_MASK; 2586 size = roundup2(offset + size, PAGE_SIZE); 2587 2588 kva_free(base, size); 2589 } 2590 2591 void 2592 moea64_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz) 2593 { 2594 struct pvo_entry *pvo; 2595 vm_offset_t lim; 2596 vm_paddr_t pa; 2597 vm_size_t len; 2598 2599 PMAP_LOCK(pm); 2600 while (sz > 0) { 2601 lim = round_page(va); 2602 len = MIN(lim - va, sz); 2603 pvo = moea64_pvo_find_va(pm, va & ~ADDR_POFF); 2604 if (pvo != NULL && !(pvo->pvo_pte.lpte.pte_lo & LPTE_I)) { 2605 pa = (pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) | 2606 (va & ADDR_POFF); 2607 moea64_syncicache(mmu, pm, va, pa, len); 2608 } 2609 va += len; 2610 sz -= len; 2611 } 2612 PMAP_UNLOCK(pm); 2613 } 2614 2615 vm_offset_t 2616 moea64_dumpsys_map(mmu_t mmu, struct pmap_md *md, vm_size_t ofs, 2617 vm_size_t *sz) 2618 { 2619 if (md->md_vaddr == ~0UL) 2620 return (md->md_paddr + ofs); 2621 else 2622 return (md->md_vaddr + ofs); 2623 } 2624 2625 struct pmap_md * 2626 moea64_scan_md(mmu_t mmu, struct pmap_md *prev) 2627 { 2628 static struct pmap_md md; 2629 struct pvo_entry *pvo; 2630 vm_offset_t va; 2631 2632 if (dumpsys_minidump) { 2633 md.md_paddr = ~0UL; /* Minidumps use virtual addresses. */ 2634 if (prev == NULL) { 2635 /* 1st: kernel .data and .bss. */ 2636 md.md_index = 1; 2637 md.md_vaddr = trunc_page((uintptr_t)_etext); 2638 md.md_size = round_page((uintptr_t)_end) - md.md_vaddr; 2639 return (&md); 2640 } 2641 switch (prev->md_index) { 2642 case 1: 2643 /* 2nd: msgbuf and tables (see pmap_bootstrap()). */ 2644 md.md_index = 2; 2645 md.md_vaddr = (vm_offset_t)msgbufp->msg_ptr; 2646 md.md_size = round_page(msgbufp->msg_size); 2647 break; 2648 case 2: 2649 /* 3rd: kernel VM. */ 2650 va = prev->md_vaddr + prev->md_size; 2651 /* Find start of next chunk (from va). */ 2652 while (va < virtual_end) { 2653 /* Don't dump the buffer cache. */ 2654 if (va >= kmi.buffer_sva && 2655 va < kmi.buffer_eva) { 2656 va = kmi.buffer_eva; 2657 continue; 2658 } 2659 pvo = moea64_pvo_find_va(kernel_pmap, 2660 va & ~ADDR_POFF); 2661 if (pvo != NULL && 2662 (pvo->pvo_pte.lpte.pte_hi & LPTE_VALID)) 2663 break; 2664 va += PAGE_SIZE; 2665 } 2666 if (va < virtual_end) { 2667 md.md_vaddr = va; 2668 va += PAGE_SIZE; 2669 /* Find last page in chunk. */ 2670 while (va < virtual_end) { 2671 /* Don't run into the buffer cache. */ 2672 if (va == kmi.buffer_sva) 2673 break; 2674 pvo = moea64_pvo_find_va(kernel_pmap, 2675 va & ~ADDR_POFF); 2676 if (pvo == NULL || 2677 !(pvo->pvo_pte.lpte.pte_hi & LPTE_VALID)) 2678 break; 2679 va += PAGE_SIZE; 2680 } 2681 md.md_size = va - md.md_vaddr; 2682 break; 2683 } 2684 md.md_index = 3; 2685 /* FALLTHROUGH */ 2686 default: 2687 return (NULL); 2688 } 2689 } else { /* minidumps */ 2690 if (prev == NULL) { 2691 /* first physical chunk. */ 2692 md.md_paddr = pregions[0].mr_start; 2693 md.md_size = pregions[0].mr_size; 2694 md.md_vaddr = ~0UL; 2695 md.md_index = 1; 2696 } else if (md.md_index < pregions_sz) { 2697 md.md_paddr = pregions[md.md_index].mr_start; 2698 md.md_size = pregions[md.md_index].mr_size; 2699 md.md_vaddr = ~0UL; 2700 md.md_index++; 2701 } else { 2702 /* There's no next physical chunk. */ 2703 return (NULL); 2704 } 2705 } 2706 2707 return (&md); 2708 } 2709