1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2018 Matthew Macy 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 #include <sys/param.h> 32 #include <sys/kernel.h> 33 #include <sys/systm.h> 34 #include <sys/conf.h> 35 #include <sys/bitstring.h> 36 #include <sys/queue.h> 37 #include <sys/cpuset.h> 38 #include <sys/endian.h> 39 #include <sys/kerneldump.h> 40 #include <sys/ktr.h> 41 #include <sys/lock.h> 42 #include <sys/syslog.h> 43 #include <sys/msgbuf.h> 44 #include <sys/malloc.h> 45 #include <sys/mman.h> 46 #include <sys/mutex.h> 47 #include <sys/proc.h> 48 #include <sys/rwlock.h> 49 #include <sys/sched.h> 50 #include <sys/sysctl.h> 51 #include <sys/systm.h> 52 #include <sys/vmem.h> 53 #include <sys/vmmeter.h> 54 #include <sys/smp.h> 55 56 #include <sys/kdb.h> 57 58 #include <dev/ofw/openfirm.h> 59 60 #include <vm/vm.h> 61 #include <vm/pmap.h> 62 #include <vm/vm_param.h> 63 #include <vm/vm_kern.h> 64 #include <vm/vm_page.h> 65 #include <vm/vm_map.h> 66 #include <vm/vm_object.h> 67 #include <vm/vm_extern.h> 68 #include <vm/vm_pageout.h> 69 #include <vm/vm_phys.h> 70 #include <vm/vm_reserv.h> 71 #include <vm/vm_dumpset.h> 72 #include <vm/uma.h> 73 74 #include <machine/_inttypes.h> 75 #include <machine/cpu.h> 76 #include <machine/platform.h> 77 #include <machine/frame.h> 78 #include <machine/md_var.h> 79 #include <machine/psl.h> 80 #include <machine/bat.h> 81 #include <machine/hid.h> 82 #include <machine/pte.h> 83 #include <machine/sr.h> 84 #include <machine/trap.h> 85 #include <machine/mmuvar.h> 86 87 #ifdef INVARIANTS 88 #include <vm/uma_dbg.h> 89 #endif 90 91 #define PPC_BITLSHIFT(bit) (sizeof(long)*NBBY - 1 - (bit)) 92 #define PPC_BIT(bit) (1UL << PPC_BITLSHIFT(bit)) 93 #define PPC_BITLSHIFT_VAL(val, bit) ((val) << PPC_BITLSHIFT(bit)) 94 95 #include "opt_ddb.h" 96 #ifdef DDB 97 static void pmap_pte_walk(pml1_entry_t *l1, vm_offset_t va); 98 #endif 99 100 #define PG_W RPTE_WIRED 101 #define PG_V RPTE_VALID 102 #define PG_MANAGED RPTE_MANAGED 103 #define PG_PROMOTED RPTE_PROMOTED 104 #define PG_M RPTE_C 105 #define PG_A RPTE_R 106 #define PG_X RPTE_EAA_X 107 #define PG_RW RPTE_EAA_W 108 #define PG_PTE_CACHE RPTE_ATTR_MASK 109 110 #define RPTE_SHIFT 9 111 #define NLS_MASK ((1UL<<5)-1) 112 #define RPTE_ENTRIES (1UL<<RPTE_SHIFT) 113 #define RPTE_MASK (RPTE_ENTRIES-1) 114 115 #define NLB_SHIFT 0 116 #define NLB_MASK (((1UL<<52)-1) << 8) 117 118 extern int nkpt; 119 extern caddr_t crashdumpmap; 120 121 #define RIC_FLUSH_TLB 0 122 #define RIC_FLUSH_PWC 1 123 #define RIC_FLUSH_ALL 2 124 125 #define POWER9_TLB_SETS_RADIX 128 /* # sets in POWER9 TLB Radix mode */ 126 127 #define PPC_INST_TLBIE 0x7c000264 128 #define PPC_INST_TLBIEL 0x7c000224 129 #define PPC_INST_SLBIA 0x7c0003e4 130 131 #define ___PPC_RA(a) (((a) & 0x1f) << 16) 132 #define ___PPC_RB(b) (((b) & 0x1f) << 11) 133 #define ___PPC_RS(s) (((s) & 0x1f) << 21) 134 #define ___PPC_RT(t) ___PPC_RS(t) 135 #define ___PPC_R(r) (((r) & 0x1) << 16) 136 #define ___PPC_PRS(prs) (((prs) & 0x1) << 17) 137 #define ___PPC_RIC(ric) (((ric) & 0x3) << 18) 138 139 #define PPC_SLBIA(IH) __XSTRING(.long PPC_INST_SLBIA | \ 140 ((IH & 0x7) << 21)) 141 #define PPC_TLBIE_5(rb,rs,ric,prs,r) \ 142 __XSTRING(.long PPC_INST_TLBIE | \ 143 ___PPC_RB(rb) | ___PPC_RS(rs) | \ 144 ___PPC_RIC(ric) | ___PPC_PRS(prs) | \ 145 ___PPC_R(r)) 146 147 #define PPC_TLBIEL(rb,rs,ric,prs,r) \ 148 __XSTRING(.long PPC_INST_TLBIEL | \ 149 ___PPC_RB(rb) | ___PPC_RS(rs) | \ 150 ___PPC_RIC(ric) | ___PPC_PRS(prs) | \ 151 ___PPC_R(r)) 152 153 #define PPC_INVALIDATE_ERAT PPC_SLBIA(7) 154 155 static __inline void 156 ttusync(void) 157 { 158 __asm __volatile("eieio; tlbsync; ptesync" ::: "memory"); 159 } 160 161 #define TLBIEL_INVAL_SEL_MASK 0xc00 /* invalidation selector */ 162 #define TLBIEL_INVAL_PAGE 0x000 /* invalidate a single page */ 163 #define TLBIEL_INVAL_SET_PID 0x400 /* invalidate a set for the current PID */ 164 #define TLBIEL_INVAL_SET_LPID 0x800 /* invalidate a set for current LPID */ 165 #define TLBIEL_INVAL_SET 0xc00 /* invalidate a set for all LPIDs */ 166 167 #define TLBIE_ACTUAL_PAGE_MASK 0xe0 168 #define TLBIE_ACTUAL_PAGE_4K 0x00 169 #define TLBIE_ACTUAL_PAGE_64K 0xa0 170 #define TLBIE_ACTUAL_PAGE_2M 0x20 171 #define TLBIE_ACTUAL_PAGE_1G 0x40 172 173 #define TLBIE_PRS_PARTITION_SCOPE 0x0 174 #define TLBIE_PRS_PROCESS_SCOPE 0x1 175 176 #define TLBIE_RIC_INVALIDATE_TLB 0x0 /* Invalidate just TLB */ 177 #define TLBIE_RIC_INVALIDATE_PWC 0x1 /* Invalidate just PWC */ 178 #define TLBIE_RIC_INVALIDATE_ALL 0x2 /* Invalidate TLB, PWC, 179 * cached {proc, part}tab entries 180 */ 181 #define TLBIE_RIC_INVALIDATE_SEQ 0x3 /* HPT - only: 182 * Invalidate a range of translations 183 */ 184 185 static __always_inline void 186 radix_tlbie(uint8_t ric, uint8_t prs, uint16_t is, uint32_t pid, uint32_t lpid, 187 vm_offset_t va, uint16_t ap) 188 { 189 uint64_t rb, rs; 190 191 MPASS((va & PAGE_MASK) == 0); 192 193 rs = ((uint64_t)pid << 32) | lpid; 194 rb = va | is | ap; 195 __asm __volatile(PPC_TLBIE_5(%0, %1, %2, %3, 1) : : 196 "r" (rb), "r" (rs), "i" (ric), "i" (prs)); 197 } 198 199 static __inline void 200 radix_tlbie_invlpg_user_4k(uint32_t pid, vm_offset_t va) 201 { 202 203 radix_tlbie(TLBIE_RIC_INVALIDATE_TLB, TLBIE_PRS_PROCESS_SCOPE, 204 TLBIEL_INVAL_PAGE, pid, 0, va, TLBIE_ACTUAL_PAGE_4K); 205 } 206 207 static __inline void 208 radix_tlbie_invlpg_user_2m(uint32_t pid, vm_offset_t va) 209 { 210 211 radix_tlbie(TLBIE_RIC_INVALIDATE_TLB, TLBIE_PRS_PROCESS_SCOPE, 212 TLBIEL_INVAL_PAGE, pid, 0, va, TLBIE_ACTUAL_PAGE_2M); 213 } 214 215 static __inline void 216 radix_tlbie_invlpwc_user(uint32_t pid) 217 { 218 219 radix_tlbie(TLBIE_RIC_INVALIDATE_PWC, TLBIE_PRS_PROCESS_SCOPE, 220 TLBIEL_INVAL_SET_PID, pid, 0, 0, 0); 221 } 222 223 static __inline void 224 radix_tlbie_flush_user(uint32_t pid) 225 { 226 227 radix_tlbie(TLBIE_RIC_INVALIDATE_ALL, TLBIE_PRS_PROCESS_SCOPE, 228 TLBIEL_INVAL_SET_PID, pid, 0, 0, 0); 229 } 230 231 static __inline void 232 radix_tlbie_invlpg_kernel_4k(vm_offset_t va) 233 { 234 235 radix_tlbie(TLBIE_RIC_INVALIDATE_TLB, TLBIE_PRS_PROCESS_SCOPE, 236 TLBIEL_INVAL_PAGE, 0, 0, va, TLBIE_ACTUAL_PAGE_4K); 237 } 238 239 static __inline void 240 radix_tlbie_invlpg_kernel_2m(vm_offset_t va) 241 { 242 243 radix_tlbie(TLBIE_RIC_INVALIDATE_TLB, TLBIE_PRS_PROCESS_SCOPE, 244 TLBIEL_INVAL_PAGE, 0, 0, va, TLBIE_ACTUAL_PAGE_2M); 245 } 246 247 /* 1GB pages aren't currently supported. */ 248 static __inline __unused void 249 radix_tlbie_invlpg_kernel_1g(vm_offset_t va) 250 { 251 252 radix_tlbie(TLBIE_RIC_INVALIDATE_TLB, TLBIE_PRS_PROCESS_SCOPE, 253 TLBIEL_INVAL_PAGE, 0, 0, va, TLBIE_ACTUAL_PAGE_1G); 254 } 255 256 static __inline void 257 radix_tlbie_invlpwc_kernel(void) 258 { 259 260 radix_tlbie(TLBIE_RIC_INVALIDATE_PWC, TLBIE_PRS_PROCESS_SCOPE, 261 TLBIEL_INVAL_SET_LPID, 0, 0, 0, 0); 262 } 263 264 static __inline void 265 radix_tlbie_flush_kernel(void) 266 { 267 268 radix_tlbie(TLBIE_RIC_INVALIDATE_ALL, TLBIE_PRS_PROCESS_SCOPE, 269 TLBIEL_INVAL_SET_LPID, 0, 0, 0, 0); 270 } 271 272 static __inline vm_pindex_t 273 pmap_l3e_pindex(vm_offset_t va) 274 { 275 return ((va & PG_FRAME) >> L3_PAGE_SIZE_SHIFT); 276 } 277 278 static __inline vm_pindex_t 279 pmap_pml3e_index(vm_offset_t va) 280 { 281 282 return ((va >> L3_PAGE_SIZE_SHIFT) & RPTE_MASK); 283 } 284 285 static __inline vm_pindex_t 286 pmap_pml2e_index(vm_offset_t va) 287 { 288 return ((va >> L2_PAGE_SIZE_SHIFT) & RPTE_MASK); 289 } 290 291 static __inline vm_pindex_t 292 pmap_pml1e_index(vm_offset_t va) 293 { 294 return ((va & PG_FRAME) >> L1_PAGE_SIZE_SHIFT); 295 } 296 297 /* Return various clipped indexes for a given VA */ 298 static __inline vm_pindex_t 299 pmap_pte_index(vm_offset_t va) 300 { 301 302 return ((va >> PAGE_SHIFT) & RPTE_MASK); 303 } 304 305 /* Return a pointer to the PT slot that corresponds to a VA */ 306 static __inline pt_entry_t * 307 pmap_l3e_to_pte(pt_entry_t *l3e, vm_offset_t va) 308 { 309 pt_entry_t *pte; 310 vm_paddr_t ptepa; 311 312 ptepa = (*l3e & NLB_MASK); 313 pte = (pt_entry_t *)PHYS_TO_DMAP(ptepa); 314 return (&pte[pmap_pte_index(va)]); 315 } 316 317 /* Return a pointer to the PD slot that corresponds to a VA */ 318 static __inline pt_entry_t * 319 pmap_l2e_to_l3e(pt_entry_t *l2e, vm_offset_t va) 320 { 321 pt_entry_t *l3e; 322 vm_paddr_t l3pa; 323 324 l3pa = (*l2e & NLB_MASK); 325 l3e = (pml3_entry_t *)PHYS_TO_DMAP(l3pa); 326 return (&l3e[pmap_pml3e_index(va)]); 327 } 328 329 /* Return a pointer to the PD slot that corresponds to a VA */ 330 static __inline pt_entry_t * 331 pmap_l1e_to_l2e(pt_entry_t *l1e, vm_offset_t va) 332 { 333 pt_entry_t *l2e; 334 vm_paddr_t l2pa; 335 336 l2pa = (*l1e & NLB_MASK); 337 338 l2e = (pml2_entry_t *)PHYS_TO_DMAP(l2pa); 339 return (&l2e[pmap_pml2e_index(va)]); 340 } 341 342 static __inline pml1_entry_t * 343 pmap_pml1e(pmap_t pmap, vm_offset_t va) 344 { 345 346 return (&pmap->pm_pml1[pmap_pml1e_index(va)]); 347 } 348 349 static pt_entry_t * 350 pmap_pml2e(pmap_t pmap, vm_offset_t va) 351 { 352 pt_entry_t *l1e; 353 354 l1e = pmap_pml1e(pmap, va); 355 if (l1e == NULL || (*l1e & RPTE_VALID) == 0) 356 return (NULL); 357 return (pmap_l1e_to_l2e(l1e, va)); 358 } 359 360 static __inline pt_entry_t * 361 pmap_pml3e(pmap_t pmap, vm_offset_t va) 362 { 363 pt_entry_t *l2e; 364 365 l2e = pmap_pml2e(pmap, va); 366 if (l2e == NULL || (*l2e & RPTE_VALID) == 0) 367 return (NULL); 368 return (pmap_l2e_to_l3e(l2e, va)); 369 } 370 371 static __inline pt_entry_t * 372 pmap_pte(pmap_t pmap, vm_offset_t va) 373 { 374 pt_entry_t *l3e; 375 376 l3e = pmap_pml3e(pmap, va); 377 if (l3e == NULL || (*l3e & RPTE_VALID) == 0) 378 return (NULL); 379 return (pmap_l3e_to_pte(l3e, va)); 380 } 381 382 int nkpt = 64; 383 SYSCTL_INT(_machdep, OID_AUTO, nkpt, CTLFLAG_RD, &nkpt, 0, 384 "Number of kernel page table pages allocated on bootup"); 385 386 vm_paddr_t dmaplimit; 387 388 SYSCTL_NODE(_vm, OID_AUTO, pmap, CTLFLAG_RD, 0, "VM/pmap parameters"); 389 390 static int pg_ps_enabled = 1; 391 SYSCTL_INT(_vm_pmap, OID_AUTO, pg_ps_enabled, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, 392 &pg_ps_enabled, 0, "Are large page mappings enabled?"); 393 #ifdef INVARIANTS 394 #define VERBOSE_PMAP 0 395 #define VERBOSE_PROTECT 0 396 static int pmap_logging; 397 SYSCTL_INT(_vm_pmap, OID_AUTO, pmap_logging, CTLFLAG_RWTUN, 398 &pmap_logging, 0, "verbose debug logging"); 399 #endif 400 401 static u_int64_t KPTphys; /* phys addr of kernel level 1 */ 402 403 //static vm_paddr_t KERNend; /* phys addr of end of bootstrap data */ 404 405 static vm_offset_t qframe = 0; 406 static struct mtx qframe_mtx; 407 408 void mmu_radix_activate(struct thread *); 409 void mmu_radix_advise(pmap_t, vm_offset_t, vm_offset_t, int); 410 void mmu_radix_align_superpage(vm_object_t, vm_ooffset_t, vm_offset_t *, 411 vm_size_t); 412 void mmu_radix_clear_modify(vm_page_t); 413 void mmu_radix_copy(pmap_t, pmap_t, vm_offset_t, vm_size_t, vm_offset_t); 414 int mmu_radix_decode_kernel_ptr(vm_offset_t, int *, vm_offset_t *); 415 int mmu_radix_enter(pmap_t, vm_offset_t, vm_page_t, vm_prot_t, u_int, int8_t); 416 void mmu_radix_enter_object(pmap_t, vm_offset_t, vm_offset_t, vm_page_t, 417 vm_prot_t); 418 void mmu_radix_enter_quick(pmap_t, vm_offset_t, vm_page_t, vm_prot_t); 419 vm_paddr_t mmu_radix_extract(pmap_t pmap, vm_offset_t va); 420 vm_page_t mmu_radix_extract_and_hold(pmap_t, vm_offset_t, vm_prot_t); 421 void mmu_radix_kenter(vm_offset_t, vm_paddr_t); 422 vm_paddr_t mmu_radix_kextract(vm_offset_t); 423 void mmu_radix_kremove(vm_offset_t); 424 boolean_t mmu_radix_is_modified(vm_page_t); 425 boolean_t mmu_radix_is_prefaultable(pmap_t, vm_offset_t); 426 boolean_t mmu_radix_is_referenced(vm_page_t); 427 void mmu_radix_object_init_pt(pmap_t, vm_offset_t, vm_object_t, 428 vm_pindex_t, vm_size_t); 429 boolean_t mmu_radix_page_exists_quick(pmap_t, vm_page_t); 430 void mmu_radix_page_init(vm_page_t); 431 boolean_t mmu_radix_page_is_mapped(vm_page_t m); 432 void mmu_radix_page_set_memattr(vm_page_t, vm_memattr_t); 433 int mmu_radix_page_wired_mappings(vm_page_t); 434 int mmu_radix_pinit(pmap_t); 435 void mmu_radix_protect(pmap_t, vm_offset_t, vm_offset_t, vm_prot_t); 436 bool mmu_radix_ps_enabled(pmap_t); 437 void mmu_radix_qenter(vm_offset_t, vm_page_t *, int); 438 void mmu_radix_qremove(vm_offset_t, int); 439 vm_offset_t mmu_radix_quick_enter_page(vm_page_t); 440 void mmu_radix_quick_remove_page(vm_offset_t); 441 boolean_t mmu_radix_ts_referenced(vm_page_t); 442 void mmu_radix_release(pmap_t); 443 void mmu_radix_remove(pmap_t, vm_offset_t, vm_offset_t); 444 void mmu_radix_remove_all(vm_page_t); 445 void mmu_radix_remove_pages(pmap_t); 446 void mmu_radix_remove_write(vm_page_t); 447 void mmu_radix_unwire(pmap_t, vm_offset_t, vm_offset_t); 448 void mmu_radix_zero_page(vm_page_t); 449 void mmu_radix_zero_page_area(vm_page_t, int, int); 450 int mmu_radix_change_attr(vm_offset_t, vm_size_t, vm_memattr_t); 451 void mmu_radix_page_array_startup(long pages); 452 453 #include "mmu_oea64.h" 454 455 /* 456 * Kernel MMU interface 457 */ 458 459 static void mmu_radix_bootstrap(vm_offset_t, vm_offset_t); 460 461 static void mmu_radix_copy_page(vm_page_t, vm_page_t); 462 static void mmu_radix_copy_pages(vm_page_t *ma, vm_offset_t a_offset, 463 vm_page_t *mb, vm_offset_t b_offset, int xfersize); 464 static void mmu_radix_growkernel(vm_offset_t); 465 static void mmu_radix_init(void); 466 static int mmu_radix_mincore(pmap_t, vm_offset_t, vm_paddr_t *); 467 static vm_offset_t mmu_radix_map(vm_offset_t *, vm_paddr_t, vm_paddr_t, int); 468 static void mmu_radix_pinit0(pmap_t); 469 470 static void *mmu_radix_mapdev(vm_paddr_t, vm_size_t); 471 static void *mmu_radix_mapdev_attr(vm_paddr_t, vm_size_t, vm_memattr_t); 472 static void mmu_radix_unmapdev(vm_offset_t, vm_size_t); 473 static void mmu_radix_kenter_attr(vm_offset_t, vm_paddr_t, vm_memattr_t ma); 474 static boolean_t mmu_radix_dev_direct_mapped(vm_paddr_t, vm_size_t); 475 static void mmu_radix_dumpsys_map(vm_paddr_t pa, size_t sz, void **va); 476 static void mmu_radix_scan_init(void); 477 static void mmu_radix_cpu_bootstrap(int ap); 478 static void mmu_radix_tlbie_all(void); 479 480 static struct pmap_funcs mmu_radix_methods = { 481 .bootstrap = mmu_radix_bootstrap, 482 .copy_page = mmu_radix_copy_page, 483 .copy_pages = mmu_radix_copy_pages, 484 .cpu_bootstrap = mmu_radix_cpu_bootstrap, 485 .growkernel = mmu_radix_growkernel, 486 .init = mmu_radix_init, 487 .map = mmu_radix_map, 488 .mincore = mmu_radix_mincore, 489 .pinit = mmu_radix_pinit, 490 .pinit0 = mmu_radix_pinit0, 491 492 .mapdev = mmu_radix_mapdev, 493 .mapdev_attr = mmu_radix_mapdev_attr, 494 .unmapdev = mmu_radix_unmapdev, 495 .kenter_attr = mmu_radix_kenter_attr, 496 .dev_direct_mapped = mmu_radix_dev_direct_mapped, 497 .dumpsys_pa_init = mmu_radix_scan_init, 498 .dumpsys_map_chunk = mmu_radix_dumpsys_map, 499 .page_is_mapped = mmu_radix_page_is_mapped, 500 .ps_enabled = mmu_radix_ps_enabled, 501 .object_init_pt = mmu_radix_object_init_pt, 502 .protect = mmu_radix_protect, 503 /* pmap dispatcher interface */ 504 .clear_modify = mmu_radix_clear_modify, 505 .copy = mmu_radix_copy, 506 .enter = mmu_radix_enter, 507 .enter_object = mmu_radix_enter_object, 508 .enter_quick = mmu_radix_enter_quick, 509 .extract = mmu_radix_extract, 510 .extract_and_hold = mmu_radix_extract_and_hold, 511 .is_modified = mmu_radix_is_modified, 512 .is_prefaultable = mmu_radix_is_prefaultable, 513 .is_referenced = mmu_radix_is_referenced, 514 .ts_referenced = mmu_radix_ts_referenced, 515 .page_exists_quick = mmu_radix_page_exists_quick, 516 .page_init = mmu_radix_page_init, 517 .page_wired_mappings = mmu_radix_page_wired_mappings, 518 .qenter = mmu_radix_qenter, 519 .qremove = mmu_radix_qremove, 520 .release = mmu_radix_release, 521 .remove = mmu_radix_remove, 522 .remove_all = mmu_radix_remove_all, 523 .remove_write = mmu_radix_remove_write, 524 .unwire = mmu_radix_unwire, 525 .zero_page = mmu_radix_zero_page, 526 .zero_page_area = mmu_radix_zero_page_area, 527 .activate = mmu_radix_activate, 528 .quick_enter_page = mmu_radix_quick_enter_page, 529 .quick_remove_page = mmu_radix_quick_remove_page, 530 .page_set_memattr = mmu_radix_page_set_memattr, 531 .page_array_startup = mmu_radix_page_array_startup, 532 533 /* Internal interfaces */ 534 .kenter = mmu_radix_kenter, 535 .kextract = mmu_radix_kextract, 536 .kremove = mmu_radix_kremove, 537 .change_attr = mmu_radix_change_attr, 538 .decode_kernel_ptr = mmu_radix_decode_kernel_ptr, 539 540 .tlbie_all = mmu_radix_tlbie_all, 541 }; 542 543 MMU_DEF(mmu_radix, MMU_TYPE_RADIX, mmu_radix_methods); 544 545 static boolean_t pmap_demote_l3e_locked(pmap_t pmap, pml3_entry_t *l3e, vm_offset_t va, 546 struct rwlock **lockp); 547 static boolean_t pmap_demote_l3e(pmap_t pmap, pml3_entry_t *pde, vm_offset_t va); 548 static int pmap_unuse_pt(pmap_t, vm_offset_t, pml3_entry_t, struct spglist *); 549 static int pmap_remove_l3e(pmap_t pmap, pml3_entry_t *pdq, vm_offset_t sva, 550 struct spglist *free, struct rwlock **lockp); 551 static int pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t sva, 552 pml3_entry_t ptepde, struct spglist *free, struct rwlock **lockp); 553 static vm_page_t pmap_remove_pt_page(pmap_t pmap, vm_offset_t va); 554 static bool pmap_remove_page(pmap_t pmap, vm_offset_t va, pml3_entry_t *pde, 555 struct spglist *free); 556 static bool pmap_remove_ptes(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, 557 pml3_entry_t *l3e, struct spglist *free, struct rwlock **lockp); 558 559 static bool pmap_pv_insert_l3e(pmap_t pmap, vm_offset_t va, pml3_entry_t l3e, 560 u_int flags, struct rwlock **lockp); 561 #if VM_NRESERVLEVEL > 0 562 static void pmap_pv_promote_l3e(pmap_t pmap, vm_offset_t va, vm_paddr_t pa, 563 struct rwlock **lockp); 564 #endif 565 static void pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va); 566 static int pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte); 567 static vm_page_t mmu_radix_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, 568 vm_prot_t prot, vm_page_t mpte, struct rwlock **lockp, bool *invalidate); 569 570 static bool pmap_enter_2mpage(pmap_t pmap, vm_offset_t va, vm_page_t m, 571 vm_prot_t prot, struct rwlock **lockp); 572 static int pmap_enter_l3e(pmap_t pmap, vm_offset_t va, pml3_entry_t newpde, 573 u_int flags, vm_page_t m, struct rwlock **lockp); 574 575 static vm_page_t reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **lockp); 576 static void free_pv_chunk(struct pv_chunk *pc); 577 static vm_page_t _pmap_allocpte(pmap_t pmap, vm_pindex_t ptepindex, struct rwlock **lockp); 578 static vm_page_t pmap_allocl3e(pmap_t pmap, vm_offset_t va, 579 struct rwlock **lockp); 580 static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va, 581 struct rwlock **lockp); 582 static void _pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m, 583 struct spglist *free); 584 static boolean_t pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free); 585 586 static void pmap_invalidate_page(pmap_t pmap, vm_offset_t start); 587 static void pmap_invalidate_all(pmap_t pmap); 588 static int pmap_change_attr_locked(vm_offset_t va, vm_size_t size, int mode, bool flush); 589 590 /* 591 * Internal flags for pmap_enter()'s helper functions. 592 */ 593 #define PMAP_ENTER_NORECLAIM 0x1000000 /* Don't reclaim PV entries. */ 594 #define PMAP_ENTER_NOREPLACE 0x2000000 /* Don't replace mappings. */ 595 596 #define UNIMPLEMENTED() panic("%s not implemented", __func__) 597 #define UNTESTED() panic("%s not yet tested", __func__) 598 599 /* Number of supported PID bits */ 600 static unsigned int isa3_pid_bits; 601 602 /* PID to start allocating from */ 603 static unsigned int isa3_base_pid; 604 605 #define PROCTAB_SIZE_SHIFT (isa3_pid_bits + 4) 606 #define PROCTAB_ENTRIES (1ul << isa3_pid_bits) 607 608 /* 609 * Map of physical memory regions. 610 */ 611 static struct mem_region *regions, *pregions; 612 static struct numa_mem_region *numa_pregions; 613 static u_int phys_avail_count; 614 static int regions_sz, pregions_sz, numa_pregions_sz; 615 static struct pate *isa3_parttab; 616 static struct prte *isa3_proctab; 617 static vmem_t *asid_arena; 618 619 extern void bs_remap_earlyboot(void); 620 621 #define RADIX_PGD_SIZE_SHIFT 16 622 #define RADIX_PGD_SIZE (1UL << RADIX_PGD_SIZE_SHIFT) 623 624 #define RADIX_PGD_INDEX_SHIFT (RADIX_PGD_SIZE_SHIFT-3) 625 #define NL2EPG (PAGE_SIZE/sizeof(pml2_entry_t)) 626 #define NL3EPG (PAGE_SIZE/sizeof(pml3_entry_t)) 627 628 #define NUPML1E (RADIX_PGD_SIZE/sizeof(uint64_t)) /* number of userland PML1 pages */ 629 #define NUPDPE (NUPML1E * NL2EPG)/* number of userland PDP pages */ 630 #define NUPDE (NUPDPE * NL3EPG) /* number of userland PD entries */ 631 632 /* POWER9 only permits a 64k partition table size. */ 633 #define PARTTAB_SIZE_SHIFT 16 634 #define PARTTAB_SIZE (1UL << PARTTAB_SIZE_SHIFT) 635 636 #define PARTTAB_HR (1UL << 63) /* host uses radix */ 637 #define PARTTAB_GR (1UL << 63) /* guest uses radix must match host */ 638 639 /* TLB flush actions. Used as argument to tlbiel_all() */ 640 enum { 641 TLB_INVAL_SCOPE_LPID = 0, /* invalidate TLBs for current LPID */ 642 TLB_INVAL_SCOPE_GLOBAL = 1, /* invalidate all TLBs */ 643 }; 644 645 #define NPV_LIST_LOCKS MAXCPU 646 static int pmap_initialized; 647 static vm_paddr_t proctab0pa; 648 static vm_paddr_t parttab_phys; 649 CTASSERT(sizeof(struct pv_chunk) == PAGE_SIZE); 650 651 /* 652 * Data for the pv entry allocation mechanism. 653 * Updates to pv_invl_gen are protected by the pv_list_locks[] 654 * elements, but reads are not. 655 */ 656 static TAILQ_HEAD(pch, pv_chunk) pv_chunks = TAILQ_HEAD_INITIALIZER(pv_chunks); 657 static struct mtx __exclusive_cache_line pv_chunks_mutex; 658 static struct rwlock __exclusive_cache_line pv_list_locks[NPV_LIST_LOCKS]; 659 static struct md_page *pv_table; 660 static struct md_page pv_dummy; 661 662 #ifdef PV_STATS 663 #define PV_STAT(x) do { x ; } while (0) 664 #else 665 #define PV_STAT(x) do { } while (0) 666 #endif 667 668 #define pa_radix_index(pa) ((pa) >> L3_PAGE_SIZE_SHIFT) 669 #define pa_to_pvh(pa) (&pv_table[pa_radix_index(pa)]) 670 671 #define PHYS_TO_PV_LIST_LOCK(pa) \ 672 (&pv_list_locks[pa_radix_index(pa) % NPV_LIST_LOCKS]) 673 674 #define CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa) do { \ 675 struct rwlock **_lockp = (lockp); \ 676 struct rwlock *_new_lock; \ 677 \ 678 _new_lock = PHYS_TO_PV_LIST_LOCK(pa); \ 679 if (_new_lock != *_lockp) { \ 680 if (*_lockp != NULL) \ 681 rw_wunlock(*_lockp); \ 682 *_lockp = _new_lock; \ 683 rw_wlock(*_lockp); \ 684 } \ 685 } while (0) 686 687 #define CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m) \ 688 CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, VM_PAGE_TO_PHYS(m)) 689 690 #define RELEASE_PV_LIST_LOCK(lockp) do { \ 691 struct rwlock **_lockp = (lockp); \ 692 \ 693 if (*_lockp != NULL) { \ 694 rw_wunlock(*_lockp); \ 695 *_lockp = NULL; \ 696 } \ 697 } while (0) 698 699 #define VM_PAGE_TO_PV_LIST_LOCK(m) \ 700 PHYS_TO_PV_LIST_LOCK(VM_PAGE_TO_PHYS(m)) 701 702 /* 703 * We support 52 bits, hence: 704 * bits 52 - 31 = 21, 0b10101 705 * RTS encoding details 706 * bits 0 - 3 of rts -> bits 6 - 8 unsigned long 707 * bits 4 - 5 of rts -> bits 62 - 63 of unsigned long 708 */ 709 #define RTS_SIZE ((0x2UL << 61) | (0x5UL << 5)) 710 711 static int powernv_enabled = 1; 712 713 static __always_inline void 714 tlbiel_radix_set_isa300(uint32_t set, uint32_t is, 715 uint32_t pid, uint32_t ric, uint32_t prs) 716 { 717 uint64_t rb; 718 uint64_t rs; 719 720 rb = PPC_BITLSHIFT_VAL(set, 51) | PPC_BITLSHIFT_VAL(is, 53); 721 rs = PPC_BITLSHIFT_VAL((uint64_t)pid, 31); 722 723 __asm __volatile(PPC_TLBIEL(%0, %1, %2, %3, 1) 724 : : "r"(rb), "r"(rs), "i"(ric), "i"(prs) 725 : "memory"); 726 } 727 728 static void 729 tlbiel_flush_isa3(uint32_t num_sets, uint32_t is) 730 { 731 uint32_t set; 732 733 __asm __volatile("ptesync": : :"memory"); 734 735 /* 736 * Flush the first set of the TLB, and the entire Page Walk Cache 737 * and partition table entries. Then flush the remaining sets of the 738 * TLB. 739 */ 740 tlbiel_radix_set_isa300(0, is, 0, RIC_FLUSH_ALL, 0); 741 for (set = 1; set < num_sets; set++) 742 tlbiel_radix_set_isa300(set, is, 0, RIC_FLUSH_TLB, 0); 743 744 /* Do the same for process scoped entries. */ 745 tlbiel_radix_set_isa300(0, is, 0, RIC_FLUSH_ALL, 1); 746 for (set = 1; set < num_sets; set++) 747 tlbiel_radix_set_isa300(set, is, 0, RIC_FLUSH_TLB, 1); 748 749 __asm __volatile("ptesync": : :"memory"); 750 } 751 752 static void 753 mmu_radix_tlbiel_flush(int scope) 754 { 755 int is; 756 757 MPASS(scope == TLB_INVAL_SCOPE_LPID || 758 scope == TLB_INVAL_SCOPE_GLOBAL); 759 is = scope + 2; 760 761 tlbiel_flush_isa3(POWER9_TLB_SETS_RADIX, is); 762 __asm __volatile(PPC_INVALIDATE_ERAT "; isync" : : :"memory"); 763 } 764 765 static void 766 mmu_radix_tlbie_all() 767 { 768 /* TODO: LPID invalidate */ 769 mmu_radix_tlbiel_flush(TLB_INVAL_SCOPE_GLOBAL); 770 } 771 772 static void 773 mmu_radix_init_amor(void) 774 { 775 /* 776 * In HV mode, we init AMOR (Authority Mask Override Register) so that 777 * the hypervisor and guest can setup IAMR (Instruction Authority Mask 778 * Register), enable key 0 and set it to 1. 779 * 780 * AMOR = 0b1100 .... 0000 (Mask for key 0 is 11) 781 */ 782 mtspr(SPR_AMOR, (3ul << 62)); 783 } 784 785 static void 786 mmu_radix_init_iamr(void) 787 { 788 /* 789 * Radix always uses key0 of the IAMR to determine if an access is 790 * allowed. We set bit 0 (IBM bit 1) of key0, to prevent instruction 791 * fetch. 792 */ 793 mtspr(SPR_IAMR, (1ul << 62)); 794 } 795 796 static void 797 mmu_radix_pid_set(pmap_t pmap) 798 { 799 800 mtspr(SPR_PID, pmap->pm_pid); 801 isync(); 802 } 803 804 /* Quick sort callout for comparing physical addresses. */ 805 static int 806 pa_cmp(const void *a, const void *b) 807 { 808 const vm_paddr_t *pa = a, *pb = b; 809 810 if (*pa < *pb) 811 return (-1); 812 else if (*pa > *pb) 813 return (1); 814 else 815 return (0); 816 } 817 818 #define pte_load_store(ptep, pte) atomic_swap_long(ptep, pte) 819 #define pte_load_clear(ptep) atomic_swap_long(ptep, 0) 820 #define pte_store(ptep, pte) do { \ 821 MPASS((pte) & (RPTE_EAA_R | RPTE_EAA_W | RPTE_EAA_X)); \ 822 *(u_long *)(ptep) = (u_long)((pte) | PG_V | RPTE_LEAF); \ 823 } while (0) 824 /* 825 * NB: should only be used for adding directories - not for direct mappings 826 */ 827 #define pde_store(ptep, pa) do { \ 828 *(u_long *)(ptep) = (u_long)(pa|RPTE_VALID|RPTE_SHIFT); \ 829 } while (0) 830 831 #define pte_clear(ptep) do { \ 832 *(u_long *)(ptep) = (u_long)(0); \ 833 } while (0) 834 835 #define PMAP_PDE_SUPERPAGE (1 << 8) /* supports 2MB superpages */ 836 837 /* 838 * Promotion to a 2MB (PDE) page mapping requires that the corresponding 4KB 839 * (PTE) page mappings have identical settings for the following fields: 840 */ 841 #define PG_PTE_PROMOTE (PG_X | PG_MANAGED | PG_W | PG_PTE_CACHE | \ 842 PG_M | PG_A | RPTE_EAA_MASK | PG_V) 843 844 static __inline void 845 pmap_resident_count_inc(pmap_t pmap, int count) 846 { 847 848 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 849 pmap->pm_stats.resident_count += count; 850 } 851 852 static __inline void 853 pmap_resident_count_dec(pmap_t pmap, int count) 854 { 855 856 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 857 KASSERT(pmap->pm_stats.resident_count >= count, 858 ("pmap %p resident count underflow %ld %d", pmap, 859 pmap->pm_stats.resident_count, count)); 860 pmap->pm_stats.resident_count -= count; 861 } 862 863 static void 864 pagezero(vm_offset_t va) 865 { 866 va = trunc_page(va); 867 868 bzero((void *)va, PAGE_SIZE); 869 } 870 871 static uint64_t 872 allocpages(int n) 873 { 874 u_int64_t ret; 875 876 ret = moea64_bootstrap_alloc(n * PAGE_SIZE, PAGE_SIZE); 877 for (int i = 0; i < n; i++) 878 pagezero(PHYS_TO_DMAP(ret + i * PAGE_SIZE)); 879 return (ret); 880 } 881 882 static pt_entry_t * 883 kvtopte(vm_offset_t va) 884 { 885 pt_entry_t *l3e; 886 887 l3e = pmap_pml3e(kernel_pmap, va); 888 if ((*l3e & RPTE_VALID) == 0) 889 return (NULL); 890 return (pmap_l3e_to_pte(l3e, va)); 891 } 892 893 void 894 mmu_radix_kenter(vm_offset_t va, vm_paddr_t pa) 895 { 896 pt_entry_t *pte; 897 898 pte = kvtopte(va); 899 MPASS(pte != NULL); 900 *pte = pa | RPTE_VALID | RPTE_LEAF | RPTE_EAA_R | RPTE_EAA_W | \ 901 RPTE_EAA_P | PG_M | PG_A; 902 } 903 904 bool 905 mmu_radix_ps_enabled(pmap_t pmap) 906 { 907 return (pg_ps_enabled && (pmap->pm_flags & PMAP_PDE_SUPERPAGE) != 0); 908 } 909 910 static pt_entry_t * 911 pmap_nofault_pte(pmap_t pmap, vm_offset_t va, int *is_l3e) 912 { 913 pml3_entry_t *l3e; 914 pt_entry_t *pte; 915 916 va &= PG_PS_FRAME; 917 l3e = pmap_pml3e(pmap, va); 918 if (l3e == NULL || (*l3e & PG_V) == 0) 919 return (NULL); 920 921 if (*l3e & RPTE_LEAF) { 922 *is_l3e = 1; 923 return (l3e); 924 } 925 *is_l3e = 0; 926 va &= PG_FRAME; 927 pte = pmap_l3e_to_pte(l3e, va); 928 if (pte == NULL || (*pte & PG_V) == 0) 929 return (NULL); 930 return (pte); 931 } 932 933 int 934 pmap_nofault(pmap_t pmap, vm_offset_t va, vm_prot_t flags) 935 { 936 pt_entry_t *pte; 937 pt_entry_t startpte, origpte, newpte; 938 vm_page_t m; 939 int is_l3e; 940 941 startpte = 0; 942 retry: 943 if ((pte = pmap_nofault_pte(pmap, va, &is_l3e)) == NULL) 944 return (KERN_INVALID_ADDRESS); 945 origpte = newpte = *pte; 946 if (startpte == 0) { 947 startpte = origpte; 948 if (((flags & VM_PROT_WRITE) && (startpte & PG_M)) || 949 ((flags & VM_PROT_READ) && (startpte & PG_A))) { 950 pmap_invalidate_all(pmap); 951 #ifdef INVARIANTS 952 if (VERBOSE_PMAP || pmap_logging) 953 printf("%s(%p, %#lx, %#x) (%#lx) -- invalidate all\n", 954 __func__, pmap, va, flags, origpte); 955 #endif 956 return (KERN_FAILURE); 957 } 958 } 959 #ifdef INVARIANTS 960 if (VERBOSE_PMAP || pmap_logging) 961 printf("%s(%p, %#lx, %#x) (%#lx)\n", __func__, pmap, va, 962 flags, origpte); 963 #endif 964 PMAP_LOCK(pmap); 965 if ((pte = pmap_nofault_pte(pmap, va, &is_l3e)) == NULL || 966 *pte != origpte) { 967 PMAP_UNLOCK(pmap); 968 return (KERN_FAILURE); 969 } 970 m = PHYS_TO_VM_PAGE(newpte & PG_FRAME); 971 MPASS(m != NULL); 972 switch (flags) { 973 case VM_PROT_READ: 974 if ((newpte & (RPTE_EAA_R|RPTE_EAA_X)) == 0) 975 goto protfail; 976 newpte |= PG_A; 977 vm_page_aflag_set(m, PGA_REFERENCED); 978 break; 979 case VM_PROT_WRITE: 980 if ((newpte & RPTE_EAA_W) == 0) 981 goto protfail; 982 if (is_l3e) 983 goto protfail; 984 newpte |= PG_M; 985 vm_page_dirty(m); 986 break; 987 case VM_PROT_EXECUTE: 988 if ((newpte & RPTE_EAA_X) == 0) 989 goto protfail; 990 newpte |= PG_A; 991 vm_page_aflag_set(m, PGA_REFERENCED); 992 break; 993 } 994 995 if (!atomic_cmpset_long(pte, origpte, newpte)) 996 goto retry; 997 ptesync(); 998 PMAP_UNLOCK(pmap); 999 if (startpte == newpte) 1000 return (KERN_FAILURE); 1001 return (0); 1002 protfail: 1003 PMAP_UNLOCK(pmap); 1004 return (KERN_PROTECTION_FAILURE); 1005 } 1006 1007 /* 1008 * Returns TRUE if the given page is mapped individually or as part of 1009 * a 2mpage. Otherwise, returns FALSE. 1010 */ 1011 boolean_t 1012 mmu_radix_page_is_mapped(vm_page_t m) 1013 { 1014 struct rwlock *lock; 1015 boolean_t rv; 1016 1017 if ((m->oflags & VPO_UNMANAGED) != 0) 1018 return (FALSE); 1019 lock = VM_PAGE_TO_PV_LIST_LOCK(m); 1020 rw_rlock(lock); 1021 rv = !TAILQ_EMPTY(&m->md.pv_list) || 1022 ((m->flags & PG_FICTITIOUS) == 0 && 1023 !TAILQ_EMPTY(&pa_to_pvh(VM_PAGE_TO_PHYS(m))->pv_list)); 1024 rw_runlock(lock); 1025 return (rv); 1026 } 1027 1028 /* 1029 * Determine the appropriate bits to set in a PTE or PDE for a specified 1030 * caching mode. 1031 */ 1032 static int 1033 pmap_cache_bits(vm_memattr_t ma) 1034 { 1035 if (ma != VM_MEMATTR_DEFAULT) { 1036 switch (ma) { 1037 case VM_MEMATTR_UNCACHEABLE: 1038 return (RPTE_ATTR_GUARDEDIO); 1039 case VM_MEMATTR_CACHEABLE: 1040 return (RPTE_ATTR_MEM); 1041 case VM_MEMATTR_WRITE_BACK: 1042 case VM_MEMATTR_PREFETCHABLE: 1043 case VM_MEMATTR_WRITE_COMBINING: 1044 return (RPTE_ATTR_UNGUARDEDIO); 1045 } 1046 } 1047 return (0); 1048 } 1049 1050 static void 1051 pmap_invalidate_page(pmap_t pmap, vm_offset_t start) 1052 { 1053 ptesync(); 1054 if (pmap == kernel_pmap) 1055 radix_tlbie_invlpg_kernel_4k(start); 1056 else 1057 radix_tlbie_invlpg_user_4k(pmap->pm_pid, start); 1058 ttusync(); 1059 } 1060 1061 static void 1062 pmap_invalidate_page_2m(pmap_t pmap, vm_offset_t start) 1063 { 1064 ptesync(); 1065 if (pmap == kernel_pmap) 1066 radix_tlbie_invlpg_kernel_2m(start); 1067 else 1068 radix_tlbie_invlpg_user_2m(pmap->pm_pid, start); 1069 ttusync(); 1070 } 1071 1072 static void 1073 pmap_invalidate_pwc(pmap_t pmap) 1074 { 1075 ptesync(); 1076 if (pmap == kernel_pmap) 1077 radix_tlbie_invlpwc_kernel(); 1078 else 1079 radix_tlbie_invlpwc_user(pmap->pm_pid); 1080 ttusync(); 1081 } 1082 1083 static void 1084 pmap_invalidate_range(pmap_t pmap, vm_offset_t start, vm_offset_t end) 1085 { 1086 if (((start - end) >> PAGE_SHIFT) > 8) { 1087 pmap_invalidate_all(pmap); 1088 return; 1089 } 1090 ptesync(); 1091 if (pmap == kernel_pmap) { 1092 while (start < end) { 1093 radix_tlbie_invlpg_kernel_4k(start); 1094 start += PAGE_SIZE; 1095 } 1096 } else { 1097 while (start < end) { 1098 radix_tlbie_invlpg_user_4k(pmap->pm_pid, start); 1099 start += PAGE_SIZE; 1100 } 1101 } 1102 ttusync(); 1103 } 1104 1105 static void 1106 pmap_invalidate_all(pmap_t pmap) 1107 { 1108 ptesync(); 1109 if (pmap == kernel_pmap) 1110 radix_tlbie_flush_kernel(); 1111 else 1112 radix_tlbie_flush_user(pmap->pm_pid); 1113 ttusync(); 1114 } 1115 1116 static void 1117 pmap_invalidate_l3e_page(pmap_t pmap, vm_offset_t va, pml3_entry_t l3e) 1118 { 1119 1120 /* 1121 * When the PDE has PG_PROMOTED set, the 2MB page mapping was created 1122 * by a promotion that did not invalidate the 512 4KB page mappings 1123 * that might exist in the TLB. Consequently, at this point, the TLB 1124 * may hold both 4KB and 2MB page mappings for the address range [va, 1125 * va + L3_PAGE_SIZE). Therefore, the entire range must be invalidated here. 1126 * In contrast, when PG_PROMOTED is clear, the TLB will not hold any 1127 * 4KB page mappings for the address range [va, va + L3_PAGE_SIZE), and so a 1128 * single INVLPG suffices to invalidate the 2MB page mapping from the 1129 * TLB. 1130 */ 1131 ptesync(); 1132 if ((l3e & PG_PROMOTED) != 0) 1133 pmap_invalidate_range(pmap, va, va + L3_PAGE_SIZE - 1); 1134 else 1135 pmap_invalidate_page_2m(pmap, va); 1136 1137 pmap_invalidate_pwc(pmap); 1138 } 1139 1140 static __inline struct pv_chunk * 1141 pv_to_chunk(pv_entry_t pv) 1142 { 1143 1144 return ((struct pv_chunk *)((uintptr_t)pv & ~(uintptr_t)PAGE_MASK)); 1145 } 1146 1147 #define PV_PMAP(pv) (pv_to_chunk(pv)->pc_pmap) 1148 1149 #define PC_FREE0 0xfffffffffffffffful 1150 #define PC_FREE1 0x3ffffffffffffffful 1151 1152 static const uint64_t pc_freemask[_NPCM] = { PC_FREE0, PC_FREE1 }; 1153 1154 /* 1155 * Ensure that the number of spare PV entries in the specified pmap meets or 1156 * exceeds the given count, "needed". 1157 * 1158 * The given PV list lock may be released. 1159 */ 1160 static void 1161 reserve_pv_entries(pmap_t pmap, int needed, struct rwlock **lockp) 1162 { 1163 struct pch new_tail; 1164 struct pv_chunk *pc; 1165 vm_page_t m; 1166 int avail, free; 1167 bool reclaimed; 1168 1169 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 1170 KASSERT(lockp != NULL, ("reserve_pv_entries: lockp is NULL")); 1171 1172 /* 1173 * Newly allocated PV chunks must be stored in a private list until 1174 * the required number of PV chunks have been allocated. Otherwise, 1175 * reclaim_pv_chunk() could recycle one of these chunks. In 1176 * contrast, these chunks must be added to the pmap upon allocation. 1177 */ 1178 TAILQ_INIT(&new_tail); 1179 retry: 1180 avail = 0; 1181 TAILQ_FOREACH(pc, &pmap->pm_pvchunk, pc_list) { 1182 // if ((cpu_feature2 & CPUID2_POPCNT) == 0) 1183 bit_count((bitstr_t *)pc->pc_map, 0, 1184 sizeof(pc->pc_map) * NBBY, &free); 1185 #if 0 1186 free = popcnt_pc_map_pq(pc->pc_map); 1187 #endif 1188 if (free == 0) 1189 break; 1190 avail += free; 1191 if (avail >= needed) 1192 break; 1193 } 1194 for (reclaimed = false; avail < needed; avail += _NPCPV) { 1195 m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | 1196 VM_ALLOC_WIRED); 1197 if (m == NULL) { 1198 m = reclaim_pv_chunk(pmap, lockp); 1199 if (m == NULL) 1200 goto retry; 1201 reclaimed = true; 1202 } 1203 PV_STAT(atomic_add_int(&pc_chunk_count, 1)); 1204 PV_STAT(atomic_add_int(&pc_chunk_allocs, 1)); 1205 pc = (void *)PHYS_TO_DMAP(m->phys_addr); 1206 pc->pc_pmap = pmap; 1207 pc->pc_map[0] = PC_FREE0; 1208 pc->pc_map[1] = PC_FREE1; 1209 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list); 1210 TAILQ_INSERT_TAIL(&new_tail, pc, pc_lru); 1211 PV_STAT(atomic_add_int(&pv_entry_spare, _NPCPV)); 1212 1213 /* 1214 * The reclaim might have freed a chunk from the current pmap. 1215 * If that chunk contained available entries, we need to 1216 * re-count the number of available entries. 1217 */ 1218 if (reclaimed) 1219 goto retry; 1220 } 1221 if (!TAILQ_EMPTY(&new_tail)) { 1222 mtx_lock(&pv_chunks_mutex); 1223 TAILQ_CONCAT(&pv_chunks, &new_tail, pc_lru); 1224 mtx_unlock(&pv_chunks_mutex); 1225 } 1226 } 1227 1228 /* 1229 * First find and then remove the pv entry for the specified pmap and virtual 1230 * address from the specified pv list. Returns the pv entry if found and NULL 1231 * otherwise. This operation can be performed on pv lists for either 4KB or 1232 * 2MB page mappings. 1233 */ 1234 static __inline pv_entry_t 1235 pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, vm_offset_t va) 1236 { 1237 pv_entry_t pv; 1238 1239 TAILQ_FOREACH(pv, &pvh->pv_list, pv_link) { 1240 #ifdef INVARIANTS 1241 if (PV_PMAP(pv) == NULL) { 1242 printf("corrupted pv_chunk/pv %p\n", pv); 1243 printf("pv_chunk: %64D\n", pv_to_chunk(pv), ":"); 1244 } 1245 MPASS(PV_PMAP(pv) != NULL); 1246 MPASS(pv->pv_va != 0); 1247 #endif 1248 if (pmap == PV_PMAP(pv) && va == pv->pv_va) { 1249 TAILQ_REMOVE(&pvh->pv_list, pv, pv_link); 1250 pvh->pv_gen++; 1251 break; 1252 } 1253 } 1254 return (pv); 1255 } 1256 1257 /* 1258 * After demotion from a 2MB page mapping to 512 4KB page mappings, 1259 * destroy the pv entry for the 2MB page mapping and reinstantiate the pv 1260 * entries for each of the 4KB page mappings. 1261 */ 1262 static void 1263 pmap_pv_demote_l3e(pmap_t pmap, vm_offset_t va, vm_paddr_t pa, 1264 struct rwlock **lockp) 1265 { 1266 struct md_page *pvh; 1267 struct pv_chunk *pc; 1268 pv_entry_t pv; 1269 vm_offset_t va_last; 1270 vm_page_t m; 1271 int bit, field; 1272 1273 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 1274 KASSERT((pa & L3_PAGE_MASK) == 0, 1275 ("pmap_pv_demote_pde: pa is not 2mpage aligned")); 1276 CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa); 1277 1278 /* 1279 * Transfer the 2mpage's pv entry for this mapping to the first 1280 * page's pv list. Once this transfer begins, the pv list lock 1281 * must not be released until the last pv entry is reinstantiated. 1282 */ 1283 pvh = pa_to_pvh(pa); 1284 va = trunc_2mpage(va); 1285 pv = pmap_pvh_remove(pvh, pmap, va); 1286 KASSERT(pv != NULL, ("pmap_pv_demote_pde: pv not found")); 1287 m = PHYS_TO_VM_PAGE(pa); 1288 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_link); 1289 1290 m->md.pv_gen++; 1291 /* Instantiate the remaining NPTEPG - 1 pv entries. */ 1292 PV_STAT(atomic_add_long(&pv_entry_allocs, NPTEPG - 1)); 1293 va_last = va + L3_PAGE_SIZE - PAGE_SIZE; 1294 for (;;) { 1295 pc = TAILQ_FIRST(&pmap->pm_pvchunk); 1296 KASSERT(pc->pc_map[0] != 0 || pc->pc_map[1] != 0 1297 , ("pmap_pv_demote_pde: missing spare")); 1298 for (field = 0; field < _NPCM; field++) { 1299 while (pc->pc_map[field]) { 1300 bit = cnttzd(pc->pc_map[field]); 1301 pc->pc_map[field] &= ~(1ul << bit); 1302 pv = &pc->pc_pventry[field * 64 + bit]; 1303 va += PAGE_SIZE; 1304 pv->pv_va = va; 1305 m++; 1306 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1307 ("pmap_pv_demote_pde: page %p is not managed", m)); 1308 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_link); 1309 1310 m->md.pv_gen++; 1311 if (va == va_last) 1312 goto out; 1313 } 1314 } 1315 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 1316 TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list); 1317 } 1318 out: 1319 if (pc->pc_map[0] == 0 && pc->pc_map[1] == 0) { 1320 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 1321 TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list); 1322 } 1323 PV_STAT(atomic_add_long(&pv_entry_count, NPTEPG - 1)); 1324 PV_STAT(atomic_subtract_int(&pv_entry_spare, NPTEPG - 1)); 1325 } 1326 1327 static void 1328 reclaim_pv_chunk_leave_pmap(pmap_t pmap, pmap_t locked_pmap) 1329 { 1330 1331 if (pmap == NULL) 1332 return; 1333 pmap_invalidate_all(pmap); 1334 if (pmap != locked_pmap) 1335 PMAP_UNLOCK(pmap); 1336 } 1337 1338 /* 1339 * We are in a serious low memory condition. Resort to 1340 * drastic measures to free some pages so we can allocate 1341 * another pv entry chunk. 1342 * 1343 * Returns NULL if PV entries were reclaimed from the specified pmap. 1344 * 1345 * We do not, however, unmap 2mpages because subsequent accesses will 1346 * allocate per-page pv entries until repromotion occurs, thereby 1347 * exacerbating the shortage of free pv entries. 1348 */ 1349 static int active_reclaims = 0; 1350 static vm_page_t 1351 reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **lockp) 1352 { 1353 struct pv_chunk *pc, *pc_marker, *pc_marker_end; 1354 struct pv_chunk_header pc_marker_b, pc_marker_end_b; 1355 struct md_page *pvh; 1356 pml3_entry_t *l3e; 1357 pmap_t next_pmap, pmap; 1358 pt_entry_t *pte, tpte; 1359 pv_entry_t pv; 1360 vm_offset_t va; 1361 vm_page_t m, m_pc; 1362 struct spglist free; 1363 uint64_t inuse; 1364 int bit, field, freed; 1365 1366 PMAP_LOCK_ASSERT(locked_pmap, MA_OWNED); 1367 KASSERT(lockp != NULL, ("reclaim_pv_chunk: lockp is NULL")); 1368 pmap = NULL; 1369 m_pc = NULL; 1370 SLIST_INIT(&free); 1371 bzero(&pc_marker_b, sizeof(pc_marker_b)); 1372 bzero(&pc_marker_end_b, sizeof(pc_marker_end_b)); 1373 pc_marker = (struct pv_chunk *)&pc_marker_b; 1374 pc_marker_end = (struct pv_chunk *)&pc_marker_end_b; 1375 1376 mtx_lock(&pv_chunks_mutex); 1377 active_reclaims++; 1378 TAILQ_INSERT_HEAD(&pv_chunks, pc_marker, pc_lru); 1379 TAILQ_INSERT_TAIL(&pv_chunks, pc_marker_end, pc_lru); 1380 while ((pc = TAILQ_NEXT(pc_marker, pc_lru)) != pc_marker_end && 1381 SLIST_EMPTY(&free)) { 1382 next_pmap = pc->pc_pmap; 1383 if (next_pmap == NULL) { 1384 /* 1385 * The next chunk is a marker. However, it is 1386 * not our marker, so active_reclaims must be 1387 * > 1. Consequently, the next_chunk code 1388 * will not rotate the pv_chunks list. 1389 */ 1390 goto next_chunk; 1391 } 1392 mtx_unlock(&pv_chunks_mutex); 1393 1394 /* 1395 * A pv_chunk can only be removed from the pc_lru list 1396 * when both pc_chunks_mutex is owned and the 1397 * corresponding pmap is locked. 1398 */ 1399 if (pmap != next_pmap) { 1400 reclaim_pv_chunk_leave_pmap(pmap, locked_pmap); 1401 pmap = next_pmap; 1402 /* Avoid deadlock and lock recursion. */ 1403 if (pmap > locked_pmap) { 1404 RELEASE_PV_LIST_LOCK(lockp); 1405 PMAP_LOCK(pmap); 1406 mtx_lock(&pv_chunks_mutex); 1407 continue; 1408 } else if (pmap != locked_pmap) { 1409 if (PMAP_TRYLOCK(pmap)) { 1410 mtx_lock(&pv_chunks_mutex); 1411 continue; 1412 } else { 1413 pmap = NULL; /* pmap is not locked */ 1414 mtx_lock(&pv_chunks_mutex); 1415 pc = TAILQ_NEXT(pc_marker, pc_lru); 1416 if (pc == NULL || 1417 pc->pc_pmap != next_pmap) 1418 continue; 1419 goto next_chunk; 1420 } 1421 } 1422 } 1423 1424 /* 1425 * Destroy every non-wired, 4 KB page mapping in the chunk. 1426 */ 1427 freed = 0; 1428 for (field = 0; field < _NPCM; field++) { 1429 for (inuse = ~pc->pc_map[field] & pc_freemask[field]; 1430 inuse != 0; inuse &= ~(1UL << bit)) { 1431 bit = cnttzd(inuse); 1432 pv = &pc->pc_pventry[field * 64 + bit]; 1433 va = pv->pv_va; 1434 l3e = pmap_pml3e(pmap, va); 1435 if ((*l3e & RPTE_LEAF) != 0) 1436 continue; 1437 pte = pmap_l3e_to_pte(l3e, va); 1438 if ((*pte & PG_W) != 0) 1439 continue; 1440 tpte = pte_load_clear(pte); 1441 m = PHYS_TO_VM_PAGE(tpte & PG_FRAME); 1442 if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) 1443 vm_page_dirty(m); 1444 if ((tpte & PG_A) != 0) 1445 vm_page_aflag_set(m, PGA_REFERENCED); 1446 CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m); 1447 TAILQ_REMOVE(&m->md.pv_list, pv, pv_link); 1448 1449 m->md.pv_gen++; 1450 if (TAILQ_EMPTY(&m->md.pv_list) && 1451 (m->flags & PG_FICTITIOUS) == 0) { 1452 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); 1453 if (TAILQ_EMPTY(&pvh->pv_list)) { 1454 vm_page_aflag_clear(m, 1455 PGA_WRITEABLE); 1456 } 1457 } 1458 pc->pc_map[field] |= 1UL << bit; 1459 pmap_unuse_pt(pmap, va, *l3e, &free); 1460 freed++; 1461 } 1462 } 1463 if (freed == 0) { 1464 mtx_lock(&pv_chunks_mutex); 1465 goto next_chunk; 1466 } 1467 /* Every freed mapping is for a 4 KB page. */ 1468 pmap_resident_count_dec(pmap, freed); 1469 PV_STAT(atomic_add_long(&pv_entry_frees, freed)); 1470 PV_STAT(atomic_add_int(&pv_entry_spare, freed)); 1471 PV_STAT(atomic_subtract_long(&pv_entry_count, freed)); 1472 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 1473 if (pc->pc_map[0] == PC_FREE0 && pc->pc_map[1] == PC_FREE1) { 1474 PV_STAT(atomic_subtract_int(&pv_entry_spare, _NPCPV)); 1475 PV_STAT(atomic_subtract_int(&pc_chunk_count, 1)); 1476 PV_STAT(atomic_add_int(&pc_chunk_frees, 1)); 1477 /* Entire chunk is free; return it. */ 1478 m_pc = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pc)); 1479 mtx_lock(&pv_chunks_mutex); 1480 TAILQ_REMOVE(&pv_chunks, pc, pc_lru); 1481 break; 1482 } 1483 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list); 1484 mtx_lock(&pv_chunks_mutex); 1485 /* One freed pv entry in locked_pmap is sufficient. */ 1486 if (pmap == locked_pmap) 1487 break; 1488 next_chunk: 1489 TAILQ_REMOVE(&pv_chunks, pc_marker, pc_lru); 1490 TAILQ_INSERT_AFTER(&pv_chunks, pc, pc_marker, pc_lru); 1491 if (active_reclaims == 1 && pmap != NULL) { 1492 /* 1493 * Rotate the pv chunks list so that we do not 1494 * scan the same pv chunks that could not be 1495 * freed (because they contained a wired 1496 * and/or superpage mapping) on every 1497 * invocation of reclaim_pv_chunk(). 1498 */ 1499 while ((pc = TAILQ_FIRST(&pv_chunks)) != pc_marker) { 1500 MPASS(pc->pc_pmap != NULL); 1501 TAILQ_REMOVE(&pv_chunks, pc, pc_lru); 1502 TAILQ_INSERT_TAIL(&pv_chunks, pc, pc_lru); 1503 } 1504 } 1505 } 1506 TAILQ_REMOVE(&pv_chunks, pc_marker, pc_lru); 1507 TAILQ_REMOVE(&pv_chunks, pc_marker_end, pc_lru); 1508 active_reclaims--; 1509 mtx_unlock(&pv_chunks_mutex); 1510 reclaim_pv_chunk_leave_pmap(pmap, locked_pmap); 1511 if (m_pc == NULL && !SLIST_EMPTY(&free)) { 1512 m_pc = SLIST_FIRST(&free); 1513 SLIST_REMOVE_HEAD(&free, plinks.s.ss); 1514 /* Recycle a freed page table page. */ 1515 m_pc->ref_count = 1; 1516 } 1517 vm_page_free_pages_toq(&free, true); 1518 return (m_pc); 1519 } 1520 1521 /* 1522 * free the pv_entry back to the free list 1523 */ 1524 static void 1525 free_pv_entry(pmap_t pmap, pv_entry_t pv) 1526 { 1527 struct pv_chunk *pc; 1528 int idx, field, bit; 1529 1530 #ifdef VERBOSE_PV 1531 if (pmap != kernel_pmap) 1532 printf("%s(%p, %p)\n", __func__, pmap, pv); 1533 #endif 1534 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 1535 PV_STAT(atomic_add_long(&pv_entry_frees, 1)); 1536 PV_STAT(atomic_add_int(&pv_entry_spare, 1)); 1537 PV_STAT(atomic_subtract_long(&pv_entry_count, 1)); 1538 pc = pv_to_chunk(pv); 1539 idx = pv - &pc->pc_pventry[0]; 1540 field = idx / 64; 1541 bit = idx % 64; 1542 pc->pc_map[field] |= 1ul << bit; 1543 if (pc->pc_map[0] != PC_FREE0 || pc->pc_map[1] != PC_FREE1) { 1544 /* 98% of the time, pc is already at the head of the list. */ 1545 if (__predict_false(pc != TAILQ_FIRST(&pmap->pm_pvchunk))) { 1546 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 1547 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list); 1548 } 1549 return; 1550 } 1551 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 1552 free_pv_chunk(pc); 1553 } 1554 1555 static void 1556 free_pv_chunk(struct pv_chunk *pc) 1557 { 1558 vm_page_t m; 1559 1560 mtx_lock(&pv_chunks_mutex); 1561 TAILQ_REMOVE(&pv_chunks, pc, pc_lru); 1562 mtx_unlock(&pv_chunks_mutex); 1563 PV_STAT(atomic_subtract_int(&pv_entry_spare, _NPCPV)); 1564 PV_STAT(atomic_subtract_int(&pc_chunk_count, 1)); 1565 PV_STAT(atomic_add_int(&pc_chunk_frees, 1)); 1566 /* entire chunk is free, return it */ 1567 m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pc)); 1568 vm_page_unwire_noq(m); 1569 vm_page_free(m); 1570 } 1571 1572 /* 1573 * Returns a new PV entry, allocating a new PV chunk from the system when 1574 * needed. If this PV chunk allocation fails and a PV list lock pointer was 1575 * given, a PV chunk is reclaimed from an arbitrary pmap. Otherwise, NULL is 1576 * returned. 1577 * 1578 * The given PV list lock may be released. 1579 */ 1580 static pv_entry_t 1581 get_pv_entry(pmap_t pmap, struct rwlock **lockp) 1582 { 1583 int bit, field; 1584 pv_entry_t pv; 1585 struct pv_chunk *pc; 1586 vm_page_t m; 1587 1588 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 1589 PV_STAT(atomic_add_long(&pv_entry_allocs, 1)); 1590 retry: 1591 pc = TAILQ_FIRST(&pmap->pm_pvchunk); 1592 if (pc != NULL) { 1593 for (field = 0; field < _NPCM; field++) { 1594 if (pc->pc_map[field]) { 1595 bit = cnttzd(pc->pc_map[field]); 1596 break; 1597 } 1598 } 1599 if (field < _NPCM) { 1600 pv = &pc->pc_pventry[field * 64 + bit]; 1601 pc->pc_map[field] &= ~(1ul << bit); 1602 /* If this was the last item, move it to tail */ 1603 if (pc->pc_map[0] == 0 && pc->pc_map[1] == 0) { 1604 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 1605 TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, 1606 pc_list); 1607 } 1608 PV_STAT(atomic_add_long(&pv_entry_count, 1)); 1609 PV_STAT(atomic_subtract_int(&pv_entry_spare, 1)); 1610 MPASS(PV_PMAP(pv) != NULL); 1611 return (pv); 1612 } 1613 } 1614 /* No free items, allocate another chunk */ 1615 m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | 1616 VM_ALLOC_WIRED); 1617 if (m == NULL) { 1618 if (lockp == NULL) { 1619 PV_STAT(pc_chunk_tryfail++); 1620 return (NULL); 1621 } 1622 m = reclaim_pv_chunk(pmap, lockp); 1623 if (m == NULL) 1624 goto retry; 1625 } 1626 PV_STAT(atomic_add_int(&pc_chunk_count, 1)); 1627 PV_STAT(atomic_add_int(&pc_chunk_allocs, 1)); 1628 pc = (void *)PHYS_TO_DMAP(m->phys_addr); 1629 pc->pc_pmap = pmap; 1630 pc->pc_map[0] = PC_FREE0 & ~1ul; /* preallocated bit 0 */ 1631 pc->pc_map[1] = PC_FREE1; 1632 mtx_lock(&pv_chunks_mutex); 1633 TAILQ_INSERT_TAIL(&pv_chunks, pc, pc_lru); 1634 mtx_unlock(&pv_chunks_mutex); 1635 pv = &pc->pc_pventry[0]; 1636 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list); 1637 PV_STAT(atomic_add_long(&pv_entry_count, 1)); 1638 PV_STAT(atomic_add_int(&pv_entry_spare, _NPCPV - 1)); 1639 MPASS(PV_PMAP(pv) != NULL); 1640 return (pv); 1641 } 1642 1643 #if VM_NRESERVLEVEL > 0 1644 /* 1645 * After promotion from 512 4KB page mappings to a single 2MB page mapping, 1646 * replace the many pv entries for the 4KB page mappings by a single pv entry 1647 * for the 2MB page mapping. 1648 */ 1649 static void 1650 pmap_pv_promote_l3e(pmap_t pmap, vm_offset_t va, vm_paddr_t pa, 1651 struct rwlock **lockp) 1652 { 1653 struct md_page *pvh; 1654 pv_entry_t pv; 1655 vm_offset_t va_last; 1656 vm_page_t m; 1657 1658 KASSERT((pa & L3_PAGE_MASK) == 0, 1659 ("pmap_pv_promote_pde: pa is not 2mpage aligned")); 1660 CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa); 1661 1662 /* 1663 * Transfer the first page's pv entry for this mapping to the 2mpage's 1664 * pv list. Aside from avoiding the cost of a call to get_pv_entry(), 1665 * a transfer avoids the possibility that get_pv_entry() calls 1666 * reclaim_pv_chunk() and that reclaim_pv_chunk() removes one of the 1667 * mappings that is being promoted. 1668 */ 1669 m = PHYS_TO_VM_PAGE(pa); 1670 va = trunc_2mpage(va); 1671 pv = pmap_pvh_remove(&m->md, pmap, va); 1672 KASSERT(pv != NULL, ("pmap_pv_promote_pde: pv not found")); 1673 pvh = pa_to_pvh(pa); 1674 TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_link); 1675 pvh->pv_gen++; 1676 /* Free the remaining NPTEPG - 1 pv entries. */ 1677 va_last = va + L3_PAGE_SIZE - PAGE_SIZE; 1678 do { 1679 m++; 1680 va += PAGE_SIZE; 1681 pmap_pvh_free(&m->md, pmap, va); 1682 } while (va < va_last); 1683 } 1684 #endif /* VM_NRESERVLEVEL > 0 */ 1685 1686 /* 1687 * First find and then destroy the pv entry for the specified pmap and virtual 1688 * address. This operation can be performed on pv lists for either 4KB or 2MB 1689 * page mappings. 1690 */ 1691 static void 1692 pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va) 1693 { 1694 pv_entry_t pv; 1695 1696 pv = pmap_pvh_remove(pvh, pmap, va); 1697 KASSERT(pv != NULL, ("pmap_pvh_free: pv not found")); 1698 free_pv_entry(pmap, pv); 1699 } 1700 1701 /* 1702 * Conditionally create the PV entry for a 4KB page mapping if the required 1703 * memory can be allocated without resorting to reclamation. 1704 */ 1705 static boolean_t 1706 pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m, 1707 struct rwlock **lockp) 1708 { 1709 pv_entry_t pv; 1710 1711 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 1712 /* Pass NULL instead of the lock pointer to disable reclamation. */ 1713 if ((pv = get_pv_entry(pmap, NULL)) != NULL) { 1714 pv->pv_va = va; 1715 CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m); 1716 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_link); 1717 m->md.pv_gen++; 1718 return (TRUE); 1719 } else 1720 return (FALSE); 1721 } 1722 1723 vm_paddr_t phys_avail_debug[2 * VM_PHYSSEG_MAX]; 1724 #ifdef INVARIANTS 1725 static void 1726 validate_addr(vm_paddr_t addr, vm_size_t size) 1727 { 1728 vm_paddr_t end = addr + size; 1729 bool found = false; 1730 1731 for (int i = 0; i < 2 * phys_avail_count; i += 2) { 1732 if (addr >= phys_avail_debug[i] && 1733 end <= phys_avail_debug[i + 1]) { 1734 found = true; 1735 break; 1736 } 1737 } 1738 KASSERT(found, ("%#lx-%#lx outside of initial phys_avail array", 1739 addr, end)); 1740 } 1741 #else 1742 static void validate_addr(vm_paddr_t addr, vm_size_t size) {} 1743 #endif 1744 #define DMAP_PAGE_BITS (RPTE_VALID | RPTE_LEAF | RPTE_EAA_MASK | PG_M | PG_A) 1745 1746 static vm_paddr_t 1747 alloc_pt_page(void) 1748 { 1749 vm_paddr_t page; 1750 1751 page = allocpages(1); 1752 pagezero(PHYS_TO_DMAP(page)); 1753 return (page); 1754 } 1755 1756 static void 1757 mmu_radix_dmap_range(vm_paddr_t start, vm_paddr_t end) 1758 { 1759 pt_entry_t *pte, pteval; 1760 vm_paddr_t page; 1761 1762 if (bootverbose) 1763 printf("%s %lx -> %lx\n", __func__, start, end); 1764 while (start < end) { 1765 pteval = start | DMAP_PAGE_BITS; 1766 pte = pmap_pml1e(kernel_pmap, PHYS_TO_DMAP(start)); 1767 if ((*pte & RPTE_VALID) == 0) { 1768 page = alloc_pt_page(); 1769 pde_store(pte, page); 1770 } 1771 pte = pmap_l1e_to_l2e(pte, PHYS_TO_DMAP(start)); 1772 if ((start & L2_PAGE_MASK) == 0 && 1773 end - start >= L2_PAGE_SIZE) { 1774 start += L2_PAGE_SIZE; 1775 goto done; 1776 } else if ((*pte & RPTE_VALID) == 0) { 1777 page = alloc_pt_page(); 1778 pde_store(pte, page); 1779 } 1780 1781 pte = pmap_l2e_to_l3e(pte, PHYS_TO_DMAP(start)); 1782 if ((start & L3_PAGE_MASK) == 0 && 1783 end - start >= L3_PAGE_SIZE) { 1784 start += L3_PAGE_SIZE; 1785 goto done; 1786 } else if ((*pte & RPTE_VALID) == 0) { 1787 page = alloc_pt_page(); 1788 pde_store(pte, page); 1789 } 1790 pte = pmap_l3e_to_pte(pte, PHYS_TO_DMAP(start)); 1791 start += PAGE_SIZE; 1792 done: 1793 pte_store(pte, pteval); 1794 } 1795 } 1796 1797 static void 1798 mmu_radix_dmap_populate(vm_size_t hwphyssz) 1799 { 1800 vm_paddr_t start, end; 1801 1802 for (int i = 0; i < pregions_sz; i++) { 1803 start = pregions[i].mr_start; 1804 end = start + pregions[i].mr_size; 1805 if (hwphyssz && start >= hwphyssz) 1806 break; 1807 if (hwphyssz && hwphyssz < end) 1808 end = hwphyssz; 1809 mmu_radix_dmap_range(start, end); 1810 } 1811 } 1812 1813 static void 1814 mmu_radix_setup_pagetables(vm_size_t hwphyssz) 1815 { 1816 vm_paddr_t ptpages, pages; 1817 pt_entry_t *pte; 1818 vm_paddr_t l1phys; 1819 1820 bzero(kernel_pmap, sizeof(struct pmap)); 1821 PMAP_LOCK_INIT(kernel_pmap); 1822 1823 ptpages = allocpages(2); 1824 l1phys = moea64_bootstrap_alloc(RADIX_PGD_SIZE, RADIX_PGD_SIZE); 1825 validate_addr(l1phys, RADIX_PGD_SIZE); 1826 if (bootverbose) 1827 printf("l1phys=%lx\n", l1phys); 1828 MPASS((l1phys & (RADIX_PGD_SIZE-1)) == 0); 1829 for (int i = 0; i < RADIX_PGD_SIZE/PAGE_SIZE; i++) 1830 pagezero(PHYS_TO_DMAP(l1phys + i * PAGE_SIZE)); 1831 kernel_pmap->pm_pml1 = (pml1_entry_t *)PHYS_TO_DMAP(l1phys); 1832 1833 mmu_radix_dmap_populate(hwphyssz); 1834 1835 /* 1836 * Create page tables for first 128MB of KVA 1837 */ 1838 pages = ptpages; 1839 pte = pmap_pml1e(kernel_pmap, VM_MIN_KERNEL_ADDRESS); 1840 *pte = (pages | RPTE_VALID | RPTE_SHIFT); 1841 pages += PAGE_SIZE; 1842 pte = pmap_l1e_to_l2e(pte, VM_MIN_KERNEL_ADDRESS); 1843 *pte = (pages | RPTE_VALID | RPTE_SHIFT); 1844 pages += PAGE_SIZE; 1845 pte = pmap_l2e_to_l3e(pte, VM_MIN_KERNEL_ADDRESS); 1846 /* 1847 * the kernel page table pages need to be preserved in 1848 * phys_avail and not overlap with previous allocations 1849 */ 1850 pages = allocpages(nkpt); 1851 if (bootverbose) { 1852 printf("phys_avail after dmap populate and nkpt allocation\n"); 1853 for (int j = 0; j < 2 * phys_avail_count; j+=2) 1854 printf("phys_avail[%d]=%08lx - phys_avail[%d]=%08lx\n", 1855 j, phys_avail[j], j + 1, phys_avail[j + 1]); 1856 } 1857 KPTphys = pages; 1858 for (int i = 0; i < nkpt; i++, pte++, pages += PAGE_SIZE) 1859 *pte = (pages | RPTE_VALID | RPTE_SHIFT); 1860 kernel_vm_end = VM_MIN_KERNEL_ADDRESS + nkpt * L3_PAGE_SIZE; 1861 if (bootverbose) 1862 printf("kernel_pmap pml1 %p\n", kernel_pmap->pm_pml1); 1863 /* 1864 * Add a physical memory segment (vm_phys_seg) corresponding to the 1865 * preallocated kernel page table pages so that vm_page structures 1866 * representing these pages will be created. The vm_page structures 1867 * are required for promotion of the corresponding kernel virtual 1868 * addresses to superpage mappings. 1869 */ 1870 vm_phys_add_seg(KPTphys, KPTphys + ptoa(nkpt)); 1871 } 1872 1873 static void 1874 mmu_radix_early_bootstrap(vm_offset_t start, vm_offset_t end) 1875 { 1876 vm_paddr_t kpstart, kpend; 1877 vm_size_t physsz, hwphyssz; 1878 //uint64_t l2virt; 1879 int rm_pavail, proctab_size; 1880 int i, j; 1881 1882 kpstart = start & ~DMAP_BASE_ADDRESS; 1883 kpend = end & ~DMAP_BASE_ADDRESS; 1884 1885 /* Get physical memory regions from firmware */ 1886 mem_regions(&pregions, &pregions_sz, ®ions, ®ions_sz); 1887 CTR0(KTR_PMAP, "mmu_radix_early_bootstrap: physical memory"); 1888 1889 if (2 * VM_PHYSSEG_MAX < regions_sz) 1890 panic("mmu_radix_early_bootstrap: phys_avail too small"); 1891 1892 if (bootverbose) 1893 for (int i = 0; i < regions_sz; i++) 1894 printf("regions[%d].mr_start=%lx regions[%d].mr_size=%lx\n", 1895 i, regions[i].mr_start, i, regions[i].mr_size); 1896 /* 1897 * XXX workaround a simulator bug 1898 */ 1899 for (int i = 0; i < regions_sz; i++) 1900 if (regions[i].mr_start & PAGE_MASK) { 1901 regions[i].mr_start += PAGE_MASK; 1902 regions[i].mr_start &= ~PAGE_MASK; 1903 regions[i].mr_size &= ~PAGE_MASK; 1904 } 1905 if (bootverbose) 1906 for (int i = 0; i < pregions_sz; i++) 1907 printf("pregions[%d].mr_start=%lx pregions[%d].mr_size=%lx\n", 1908 i, pregions[i].mr_start, i, pregions[i].mr_size); 1909 1910 phys_avail_count = 0; 1911 physsz = 0; 1912 hwphyssz = 0; 1913 TUNABLE_ULONG_FETCH("hw.physmem", (u_long *) &hwphyssz); 1914 for (i = 0, j = 0; i < regions_sz; i++) { 1915 if (bootverbose) 1916 printf("regions[%d].mr_start=%016lx regions[%d].mr_size=%016lx\n", 1917 i, regions[i].mr_start, i, regions[i].mr_size); 1918 1919 if (regions[i].mr_size < PAGE_SIZE) 1920 continue; 1921 1922 if (hwphyssz != 0 && 1923 (physsz + regions[i].mr_size) >= hwphyssz) { 1924 if (physsz < hwphyssz) { 1925 phys_avail[j] = regions[i].mr_start; 1926 phys_avail[j + 1] = regions[i].mr_start + 1927 (hwphyssz - physsz); 1928 physsz = hwphyssz; 1929 phys_avail_count++; 1930 dump_avail[j] = phys_avail[j]; 1931 dump_avail[j + 1] = phys_avail[j + 1]; 1932 } 1933 break; 1934 } 1935 phys_avail[j] = regions[i].mr_start; 1936 phys_avail[j + 1] = regions[i].mr_start + regions[i].mr_size; 1937 dump_avail[j] = phys_avail[j]; 1938 dump_avail[j + 1] = phys_avail[j + 1]; 1939 1940 phys_avail_count++; 1941 physsz += regions[i].mr_size; 1942 j += 2; 1943 } 1944 1945 /* Check for overlap with the kernel and exception vectors */ 1946 rm_pavail = 0; 1947 for (j = 0; j < 2 * phys_avail_count; j+=2) { 1948 if (phys_avail[j] < EXC_LAST) 1949 phys_avail[j] += EXC_LAST; 1950 1951 if (phys_avail[j] >= kpstart && 1952 phys_avail[j + 1] <= kpend) { 1953 phys_avail[j] = phys_avail[j + 1] = ~0; 1954 rm_pavail++; 1955 continue; 1956 } 1957 1958 if (kpstart >= phys_avail[j] && 1959 kpstart < phys_avail[j + 1]) { 1960 if (kpend < phys_avail[j + 1]) { 1961 phys_avail[2 * phys_avail_count] = 1962 (kpend & ~PAGE_MASK) + PAGE_SIZE; 1963 phys_avail[2 * phys_avail_count + 1] = 1964 phys_avail[j + 1]; 1965 phys_avail_count++; 1966 } 1967 1968 phys_avail[j + 1] = kpstart & ~PAGE_MASK; 1969 } 1970 1971 if (kpend >= phys_avail[j] && 1972 kpend < phys_avail[j + 1]) { 1973 if (kpstart > phys_avail[j]) { 1974 phys_avail[2 * phys_avail_count] = phys_avail[j]; 1975 phys_avail[2 * phys_avail_count + 1] = 1976 kpstart & ~PAGE_MASK; 1977 phys_avail_count++; 1978 } 1979 1980 phys_avail[j] = (kpend & ~PAGE_MASK) + 1981 PAGE_SIZE; 1982 } 1983 } 1984 qsort(phys_avail, 2 * phys_avail_count, sizeof(phys_avail[0]), pa_cmp); 1985 for (i = 0; i < 2 * phys_avail_count; i++) 1986 phys_avail_debug[i] = phys_avail[i]; 1987 1988 /* Remove physical available regions marked for removal (~0) */ 1989 if (rm_pavail) { 1990 phys_avail_count -= rm_pavail; 1991 for (i = 2 * phys_avail_count; 1992 i < 2*(phys_avail_count + rm_pavail); i+=2) 1993 phys_avail[i] = phys_avail[i + 1] = 0; 1994 } 1995 if (bootverbose) { 1996 printf("phys_avail ranges after filtering:\n"); 1997 for (j = 0; j < 2 * phys_avail_count; j+=2) 1998 printf("phys_avail[%d]=%08lx - phys_avail[%d]=%08lx\n", 1999 j, phys_avail[j], j + 1, phys_avail[j + 1]); 2000 } 2001 physmem = btoc(physsz); 2002 2003 /* XXX assume we're running non-virtualized and 2004 * we don't support BHYVE 2005 */ 2006 if (isa3_pid_bits == 0) 2007 isa3_pid_bits = 20; 2008 parttab_phys = moea64_bootstrap_alloc(PARTTAB_SIZE, PARTTAB_SIZE); 2009 validate_addr(parttab_phys, PARTTAB_SIZE); 2010 for (int i = 0; i < PARTTAB_SIZE/PAGE_SIZE; i++) 2011 pagezero(PHYS_TO_DMAP(parttab_phys + i * PAGE_SIZE)); 2012 2013 proctab_size = 1UL << PROCTAB_SIZE_SHIFT; 2014 proctab0pa = moea64_bootstrap_alloc(proctab_size, proctab_size); 2015 validate_addr(proctab0pa, proctab_size); 2016 for (int i = 0; i < proctab_size/PAGE_SIZE; i++) 2017 pagezero(PHYS_TO_DMAP(proctab0pa + i * PAGE_SIZE)); 2018 2019 mmu_radix_setup_pagetables(hwphyssz); 2020 } 2021 2022 static void 2023 mmu_radix_late_bootstrap(vm_offset_t start, vm_offset_t end) 2024 { 2025 int i; 2026 vm_paddr_t pa; 2027 void *dpcpu; 2028 vm_offset_t va; 2029 2030 /* 2031 * Set up the Open Firmware pmap and add its mappings if not in real 2032 * mode. 2033 */ 2034 if (bootverbose) 2035 printf("%s enter\n", __func__); 2036 2037 /* 2038 * Calculate the last available physical address, and reserve the 2039 * vm_page_array (upper bound). 2040 */ 2041 Maxmem = 0; 2042 for (i = 0; phys_avail[i + 2] != 0; i += 2) 2043 Maxmem = MAX(Maxmem, powerpc_btop(phys_avail[i + 1])); 2044 2045 /* 2046 * Set the start and end of kva. 2047 */ 2048 virtual_avail = VM_MIN_KERNEL_ADDRESS; 2049 virtual_end = VM_MAX_SAFE_KERNEL_ADDRESS; 2050 2051 /* 2052 * Remap any early IO mappings (console framebuffer, etc.) 2053 */ 2054 bs_remap_earlyboot(); 2055 2056 /* 2057 * Allocate a kernel stack with a guard page for thread0 and map it 2058 * into the kernel page map. 2059 */ 2060 pa = allocpages(kstack_pages); 2061 va = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE; 2062 virtual_avail = va + kstack_pages * PAGE_SIZE; 2063 CTR2(KTR_PMAP, "moea64_bootstrap: kstack0 at %#x (%#x)", pa, va); 2064 thread0.td_kstack = va; 2065 for (i = 0; i < kstack_pages; i++) { 2066 mmu_radix_kenter(va, pa); 2067 pa += PAGE_SIZE; 2068 va += PAGE_SIZE; 2069 } 2070 thread0.td_kstack_pages = kstack_pages; 2071 2072 /* 2073 * Allocate virtual address space for the message buffer. 2074 */ 2075 pa = msgbuf_phys = allocpages((msgbufsize + PAGE_MASK) >> PAGE_SHIFT); 2076 msgbufp = (struct msgbuf *)PHYS_TO_DMAP(pa); 2077 2078 /* 2079 * Allocate virtual address space for the dynamic percpu area. 2080 */ 2081 pa = allocpages(DPCPU_SIZE >> PAGE_SHIFT); 2082 dpcpu = (void *)PHYS_TO_DMAP(pa); 2083 dpcpu_init(dpcpu, curcpu); 2084 /* 2085 * Reserve some special page table entries/VA space for temporary 2086 * mapping of pages. 2087 */ 2088 } 2089 2090 static void 2091 mmu_parttab_init(void) 2092 { 2093 uint64_t ptcr; 2094 2095 isa3_parttab = (struct pate *)PHYS_TO_DMAP(parttab_phys); 2096 2097 if (bootverbose) 2098 printf("%s parttab: %p\n", __func__, isa3_parttab); 2099 ptcr = parttab_phys | (PARTTAB_SIZE_SHIFT-12); 2100 if (bootverbose) 2101 printf("setting ptcr %lx\n", ptcr); 2102 mtspr(SPR_PTCR, ptcr); 2103 } 2104 2105 static void 2106 mmu_parttab_update(uint64_t lpid, uint64_t pagetab, uint64_t proctab) 2107 { 2108 uint64_t prev; 2109 2110 if (bootverbose) 2111 printf("%s isa3_parttab %p lpid %lx pagetab %lx proctab %lx\n", __func__, isa3_parttab, 2112 lpid, pagetab, proctab); 2113 prev = be64toh(isa3_parttab[lpid].pagetab); 2114 isa3_parttab[lpid].pagetab = htobe64(pagetab); 2115 isa3_parttab[lpid].proctab = htobe64(proctab); 2116 2117 if (prev & PARTTAB_HR) { 2118 __asm __volatile(PPC_TLBIE_5(%0,%1,2,0,1) : : 2119 "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid)); 2120 __asm __volatile(PPC_TLBIE_5(%0,%1,2,1,1) : : 2121 "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid)); 2122 } else { 2123 __asm __volatile(PPC_TLBIE_5(%0,%1,2,0,0) : : 2124 "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid)); 2125 } 2126 ttusync(); 2127 } 2128 2129 static void 2130 mmu_radix_parttab_init(void) 2131 { 2132 uint64_t pagetab; 2133 2134 mmu_parttab_init(); 2135 pagetab = RTS_SIZE | DMAP_TO_PHYS((vm_offset_t)kernel_pmap->pm_pml1) | \ 2136 RADIX_PGD_INDEX_SHIFT | PARTTAB_HR; 2137 mmu_parttab_update(0, pagetab, 0); 2138 } 2139 2140 static void 2141 mmu_radix_proctab_register(vm_paddr_t proctabpa, uint64_t table_size) 2142 { 2143 uint64_t pagetab, proctab; 2144 2145 pagetab = be64toh(isa3_parttab[0].pagetab); 2146 proctab = proctabpa | table_size | PARTTAB_GR; 2147 mmu_parttab_update(0, pagetab, proctab); 2148 } 2149 2150 static void 2151 mmu_radix_proctab_init(void) 2152 { 2153 2154 isa3_base_pid = 1; 2155 2156 isa3_proctab = (void*)PHYS_TO_DMAP(proctab0pa); 2157 isa3_proctab->proctab0 = 2158 htobe64(RTS_SIZE | DMAP_TO_PHYS((vm_offset_t)kernel_pmap->pm_pml1) | 2159 RADIX_PGD_INDEX_SHIFT); 2160 2161 mmu_radix_proctab_register(proctab0pa, PROCTAB_SIZE_SHIFT - 12); 2162 2163 __asm __volatile("ptesync" : : : "memory"); 2164 __asm __volatile(PPC_TLBIE_5(%0,%1,2,1,1) : : 2165 "r" (TLBIEL_INVAL_SET_LPID), "r" (0)); 2166 __asm __volatile("eieio; tlbsync; ptesync" : : : "memory"); 2167 if (bootverbose) 2168 printf("process table %p and kernel radix PDE: %p\n", 2169 isa3_proctab, kernel_pmap->pm_pml1); 2170 mtmsr(mfmsr() | PSL_DR ); 2171 mtmsr(mfmsr() & ~PSL_DR); 2172 kernel_pmap->pm_pid = isa3_base_pid; 2173 isa3_base_pid++; 2174 } 2175 2176 void 2177 mmu_radix_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, 2178 int advice) 2179 { 2180 struct rwlock *lock; 2181 pml1_entry_t *l1e; 2182 pml2_entry_t *l2e; 2183 pml3_entry_t oldl3e, *l3e; 2184 pt_entry_t *pte; 2185 vm_offset_t va, va_next; 2186 vm_page_t m; 2187 boolean_t anychanged; 2188 2189 if (advice != MADV_DONTNEED && advice != MADV_FREE) 2190 return; 2191 anychanged = FALSE; 2192 PMAP_LOCK(pmap); 2193 for (; sva < eva; sva = va_next) { 2194 l1e = pmap_pml1e(pmap, sva); 2195 if ((*l1e & PG_V) == 0) { 2196 va_next = (sva + L1_PAGE_SIZE) & ~L1_PAGE_MASK; 2197 if (va_next < sva) 2198 va_next = eva; 2199 continue; 2200 } 2201 l2e = pmap_l1e_to_l2e(l1e, sva); 2202 if ((*l2e & PG_V) == 0) { 2203 va_next = (sva + L2_PAGE_SIZE) & ~L2_PAGE_MASK; 2204 if (va_next < sva) 2205 va_next = eva; 2206 continue; 2207 } 2208 va_next = (sva + L3_PAGE_SIZE) & ~L3_PAGE_MASK; 2209 if (va_next < sva) 2210 va_next = eva; 2211 l3e = pmap_l2e_to_l3e(l2e, sva); 2212 oldl3e = *l3e; 2213 if ((oldl3e & PG_V) == 0) 2214 continue; 2215 else if ((oldl3e & RPTE_LEAF) != 0) { 2216 if ((oldl3e & PG_MANAGED) == 0) 2217 continue; 2218 lock = NULL; 2219 if (!pmap_demote_l3e_locked(pmap, l3e, sva, &lock)) { 2220 if (lock != NULL) 2221 rw_wunlock(lock); 2222 2223 /* 2224 * The large page mapping was destroyed. 2225 */ 2226 continue; 2227 } 2228 2229 /* 2230 * Unless the page mappings are wired, remove the 2231 * mapping to a single page so that a subsequent 2232 * access may repromote. Since the underlying page 2233 * table page is fully populated, this removal never 2234 * frees a page table page. 2235 */ 2236 if ((oldl3e & PG_W) == 0) { 2237 pte = pmap_l3e_to_pte(l3e, sva); 2238 KASSERT((*pte & PG_V) != 0, 2239 ("pmap_advise: invalid PTE")); 2240 pmap_remove_pte(pmap, pte, sva, *l3e, NULL, 2241 &lock); 2242 anychanged = TRUE; 2243 } 2244 if (lock != NULL) 2245 rw_wunlock(lock); 2246 } 2247 if (va_next > eva) 2248 va_next = eva; 2249 va = va_next; 2250 for (pte = pmap_l3e_to_pte(l3e, sva); sva != va_next; 2251 pte++, sva += PAGE_SIZE) { 2252 MPASS(pte == pmap_pte(pmap, sva)); 2253 2254 if ((*pte & (PG_MANAGED | PG_V)) != (PG_MANAGED | PG_V)) 2255 goto maybe_invlrng; 2256 else if ((*pte & (PG_M | PG_RW)) == (PG_M | PG_RW)) { 2257 if (advice == MADV_DONTNEED) { 2258 /* 2259 * Future calls to pmap_is_modified() 2260 * can be avoided by making the page 2261 * dirty now. 2262 */ 2263 m = PHYS_TO_VM_PAGE(*pte & PG_FRAME); 2264 vm_page_dirty(m); 2265 } 2266 atomic_clear_long(pte, PG_M | PG_A); 2267 } else if ((*pte & PG_A) != 0) 2268 atomic_clear_long(pte, PG_A); 2269 else 2270 goto maybe_invlrng; 2271 anychanged = TRUE; 2272 continue; 2273 maybe_invlrng: 2274 if (va != va_next) { 2275 anychanged = true; 2276 va = va_next; 2277 } 2278 } 2279 if (va != va_next) 2280 anychanged = true; 2281 } 2282 if (anychanged) 2283 pmap_invalidate_all(pmap); 2284 PMAP_UNLOCK(pmap); 2285 } 2286 2287 /* 2288 * Routines used in machine-dependent code 2289 */ 2290 static void 2291 mmu_radix_bootstrap(vm_offset_t start, vm_offset_t end) 2292 { 2293 uint64_t lpcr; 2294 2295 if (bootverbose) 2296 printf("%s\n", __func__); 2297 hw_direct_map = 1; 2298 mmu_radix_early_bootstrap(start, end); 2299 if (bootverbose) 2300 printf("early bootstrap complete\n"); 2301 if (powernv_enabled) { 2302 lpcr = mfspr(SPR_LPCR); 2303 mtspr(SPR_LPCR, lpcr | LPCR_UPRT | LPCR_HR); 2304 mmu_radix_parttab_init(); 2305 mmu_radix_init_amor(); 2306 if (bootverbose) 2307 printf("powernv init complete\n"); 2308 } 2309 mmu_radix_init_iamr(); 2310 mmu_radix_proctab_init(); 2311 mmu_radix_pid_set(kernel_pmap); 2312 /* XXX assume CPU_FTR_HVMODE */ 2313 mmu_radix_tlbiel_flush(TLB_INVAL_SCOPE_GLOBAL); 2314 2315 mmu_radix_late_bootstrap(start, end); 2316 numa_mem_regions(&numa_pregions, &numa_pregions_sz); 2317 if (bootverbose) 2318 printf("%s done\n", __func__); 2319 pmap_bootstrapped = 1; 2320 dmaplimit = roundup2(powerpc_ptob(Maxmem), L2_PAGE_SIZE); 2321 PCPU_SET(flags, PCPU_GET(flags) | PC_FLAG_NOSRS); 2322 } 2323 2324 static void 2325 mmu_radix_cpu_bootstrap(int ap) 2326 { 2327 uint64_t lpcr; 2328 uint64_t ptcr; 2329 2330 if (powernv_enabled) { 2331 lpcr = mfspr(SPR_LPCR); 2332 mtspr(SPR_LPCR, lpcr | LPCR_UPRT | LPCR_HR); 2333 2334 ptcr = parttab_phys | (PARTTAB_SIZE_SHIFT-12); 2335 mtspr(SPR_PTCR, ptcr); 2336 mmu_radix_init_amor(); 2337 } 2338 mmu_radix_init_iamr(); 2339 mmu_radix_pid_set(kernel_pmap); 2340 mmu_radix_tlbiel_flush(TLB_INVAL_SCOPE_GLOBAL); 2341 } 2342 2343 static SYSCTL_NODE(_vm_pmap, OID_AUTO, l3e, CTLFLAG_RD, 0, 2344 "2MB page mapping counters"); 2345 2346 static u_long pmap_l3e_demotions; 2347 SYSCTL_ULONG(_vm_pmap_l3e, OID_AUTO, demotions, CTLFLAG_RD, 2348 &pmap_l3e_demotions, 0, "2MB page demotions"); 2349 2350 static u_long pmap_l3e_mappings; 2351 SYSCTL_ULONG(_vm_pmap_l3e, OID_AUTO, mappings, CTLFLAG_RD, 2352 &pmap_l3e_mappings, 0, "2MB page mappings"); 2353 2354 static u_long pmap_l3e_p_failures; 2355 SYSCTL_ULONG(_vm_pmap_l3e, OID_AUTO, p_failures, CTLFLAG_RD, 2356 &pmap_l3e_p_failures, 0, "2MB page promotion failures"); 2357 2358 static u_long pmap_l3e_promotions; 2359 SYSCTL_ULONG(_vm_pmap_l3e, OID_AUTO, promotions, CTLFLAG_RD, 2360 &pmap_l3e_promotions, 0, "2MB page promotions"); 2361 2362 static SYSCTL_NODE(_vm_pmap, OID_AUTO, l2e, CTLFLAG_RD, 0, 2363 "1GB page mapping counters"); 2364 2365 static u_long pmap_l2e_demotions; 2366 SYSCTL_ULONG(_vm_pmap_l2e, OID_AUTO, demotions, CTLFLAG_RD, 2367 &pmap_l2e_demotions, 0, "1GB page demotions"); 2368 2369 void 2370 mmu_radix_clear_modify(vm_page_t m) 2371 { 2372 struct md_page *pvh; 2373 pmap_t pmap; 2374 pv_entry_t next_pv, pv; 2375 pml3_entry_t oldl3e, *l3e; 2376 pt_entry_t oldpte, *pte; 2377 struct rwlock *lock; 2378 vm_offset_t va; 2379 int md_gen, pvh_gen; 2380 2381 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 2382 ("pmap_clear_modify: page %p is not managed", m)); 2383 vm_page_assert_busied(m); 2384 CTR2(KTR_PMAP, "%s(%p)", __func__, m); 2385 2386 /* 2387 * If the page is not PGA_WRITEABLE, then no PTEs can have PG_M set. 2388 * If the object containing the page is locked and the page is not 2389 * exclusive busied, then PGA_WRITEABLE cannot be concurrently set. 2390 */ 2391 if ((m->a.flags & PGA_WRITEABLE) == 0) 2392 return; 2393 pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy : 2394 pa_to_pvh(VM_PAGE_TO_PHYS(m)); 2395 lock = VM_PAGE_TO_PV_LIST_LOCK(m); 2396 rw_wlock(lock); 2397 restart: 2398 TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_link, next_pv) { 2399 pmap = PV_PMAP(pv); 2400 if (!PMAP_TRYLOCK(pmap)) { 2401 pvh_gen = pvh->pv_gen; 2402 rw_wunlock(lock); 2403 PMAP_LOCK(pmap); 2404 rw_wlock(lock); 2405 if (pvh_gen != pvh->pv_gen) { 2406 PMAP_UNLOCK(pmap); 2407 goto restart; 2408 } 2409 } 2410 va = pv->pv_va; 2411 l3e = pmap_pml3e(pmap, va); 2412 oldl3e = *l3e; 2413 if ((oldl3e & PG_RW) != 0) { 2414 if (pmap_demote_l3e_locked(pmap, l3e, va, &lock)) { 2415 if ((oldl3e & PG_W) == 0) { 2416 /* 2417 * Write protect the mapping to a 2418 * single page so that a subsequent 2419 * write access may repromote. 2420 */ 2421 va += VM_PAGE_TO_PHYS(m) - (oldl3e & 2422 PG_PS_FRAME); 2423 pte = pmap_l3e_to_pte(l3e, va); 2424 oldpte = *pte; 2425 if ((oldpte & PG_V) != 0) { 2426 while (!atomic_cmpset_long(pte, 2427 oldpte, 2428 (oldpte | RPTE_EAA_R) & ~(PG_M | PG_RW))) 2429 oldpte = *pte; 2430 vm_page_dirty(m); 2431 pmap_invalidate_page(pmap, va); 2432 } 2433 } 2434 } 2435 } 2436 PMAP_UNLOCK(pmap); 2437 } 2438 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2439 pmap = PV_PMAP(pv); 2440 if (!PMAP_TRYLOCK(pmap)) { 2441 md_gen = m->md.pv_gen; 2442 pvh_gen = pvh->pv_gen; 2443 rw_wunlock(lock); 2444 PMAP_LOCK(pmap); 2445 rw_wlock(lock); 2446 if (pvh_gen != pvh->pv_gen || md_gen != m->md.pv_gen) { 2447 PMAP_UNLOCK(pmap); 2448 goto restart; 2449 } 2450 } 2451 l3e = pmap_pml3e(pmap, pv->pv_va); 2452 KASSERT((*l3e & RPTE_LEAF) == 0, ("pmap_clear_modify: found" 2453 " a 2mpage in page %p's pv list", m)); 2454 pte = pmap_l3e_to_pte(l3e, pv->pv_va); 2455 if ((*pte & (PG_M | PG_RW)) == (PG_M | PG_RW)) { 2456 atomic_clear_long(pte, PG_M); 2457 pmap_invalidate_page(pmap, pv->pv_va); 2458 } 2459 PMAP_UNLOCK(pmap); 2460 } 2461 rw_wunlock(lock); 2462 } 2463 2464 void 2465 mmu_radix_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, 2466 vm_size_t len, vm_offset_t src_addr) 2467 { 2468 struct rwlock *lock; 2469 struct spglist free; 2470 vm_offset_t addr; 2471 vm_offset_t end_addr = src_addr + len; 2472 vm_offset_t va_next; 2473 vm_page_t dst_pdpg, dstmpte, srcmpte; 2474 bool invalidate_all; 2475 2476 CTR6(KTR_PMAP, 2477 "%s(dst_pmap=%p, src_pmap=%p, dst_addr=%lx, len=%lu, src_addr=%lx)\n", 2478 __func__, dst_pmap, src_pmap, dst_addr, len, src_addr); 2479 2480 if (dst_addr != src_addr) 2481 return; 2482 lock = NULL; 2483 invalidate_all = false; 2484 if (dst_pmap < src_pmap) { 2485 PMAP_LOCK(dst_pmap); 2486 PMAP_LOCK(src_pmap); 2487 } else { 2488 PMAP_LOCK(src_pmap); 2489 PMAP_LOCK(dst_pmap); 2490 } 2491 2492 for (addr = src_addr; addr < end_addr; addr = va_next) { 2493 pml1_entry_t *l1e; 2494 pml2_entry_t *l2e; 2495 pml3_entry_t srcptepaddr, *l3e; 2496 pt_entry_t *src_pte, *dst_pte; 2497 2498 l1e = pmap_pml1e(src_pmap, addr); 2499 if ((*l1e & PG_V) == 0) { 2500 va_next = (addr + L1_PAGE_SIZE) & ~L1_PAGE_MASK; 2501 if (va_next < addr) 2502 va_next = end_addr; 2503 continue; 2504 } 2505 2506 l2e = pmap_l1e_to_l2e(l1e, addr); 2507 if ((*l2e & PG_V) == 0) { 2508 va_next = (addr + L2_PAGE_SIZE) & ~L2_PAGE_MASK; 2509 if (va_next < addr) 2510 va_next = end_addr; 2511 continue; 2512 } 2513 2514 va_next = (addr + L3_PAGE_SIZE) & ~L3_PAGE_MASK; 2515 if (va_next < addr) 2516 va_next = end_addr; 2517 2518 l3e = pmap_l2e_to_l3e(l2e, addr); 2519 srcptepaddr = *l3e; 2520 if (srcptepaddr == 0) 2521 continue; 2522 2523 if (srcptepaddr & RPTE_LEAF) { 2524 if ((addr & L3_PAGE_MASK) != 0 || 2525 addr + L3_PAGE_SIZE > end_addr) 2526 continue; 2527 dst_pdpg = pmap_allocl3e(dst_pmap, addr, NULL); 2528 if (dst_pdpg == NULL) 2529 break; 2530 l3e = (pml3_entry_t *) 2531 PHYS_TO_DMAP(VM_PAGE_TO_PHYS(dst_pdpg)); 2532 l3e = &l3e[pmap_pml3e_index(addr)]; 2533 if (*l3e == 0 && ((srcptepaddr & PG_MANAGED) == 0 || 2534 pmap_pv_insert_l3e(dst_pmap, addr, srcptepaddr, 2535 PMAP_ENTER_NORECLAIM, &lock))) { 2536 *l3e = srcptepaddr & ~PG_W; 2537 pmap_resident_count_inc(dst_pmap, 2538 L3_PAGE_SIZE / PAGE_SIZE); 2539 atomic_add_long(&pmap_l3e_mappings, 1); 2540 } else 2541 dst_pdpg->ref_count--; 2542 continue; 2543 } 2544 2545 srcptepaddr &= PG_FRAME; 2546 srcmpte = PHYS_TO_VM_PAGE(srcptepaddr); 2547 KASSERT(srcmpte->ref_count > 0, 2548 ("pmap_copy: source page table page is unused")); 2549 2550 if (va_next > end_addr) 2551 va_next = end_addr; 2552 2553 src_pte = (pt_entry_t *)PHYS_TO_DMAP(srcptepaddr); 2554 src_pte = &src_pte[pmap_pte_index(addr)]; 2555 dstmpte = NULL; 2556 while (addr < va_next) { 2557 pt_entry_t ptetemp; 2558 ptetemp = *src_pte; 2559 /* 2560 * we only virtual copy managed pages 2561 */ 2562 if ((ptetemp & PG_MANAGED) != 0) { 2563 if (dstmpte != NULL && 2564 dstmpte->pindex == pmap_l3e_pindex(addr)) 2565 dstmpte->ref_count++; 2566 else if ((dstmpte = pmap_allocpte(dst_pmap, 2567 addr, NULL)) == NULL) 2568 goto out; 2569 dst_pte = (pt_entry_t *) 2570 PHYS_TO_DMAP(VM_PAGE_TO_PHYS(dstmpte)); 2571 dst_pte = &dst_pte[pmap_pte_index(addr)]; 2572 if (*dst_pte == 0 && 2573 pmap_try_insert_pv_entry(dst_pmap, addr, 2574 PHYS_TO_VM_PAGE(ptetemp & PG_FRAME), 2575 &lock)) { 2576 /* 2577 * Clear the wired, modified, and 2578 * accessed (referenced) bits 2579 * during the copy. 2580 */ 2581 *dst_pte = ptetemp & ~(PG_W | PG_M | 2582 PG_A); 2583 pmap_resident_count_inc(dst_pmap, 1); 2584 } else { 2585 SLIST_INIT(&free); 2586 if (pmap_unwire_ptp(dst_pmap, addr, 2587 dstmpte, &free)) { 2588 /* 2589 * Although "addr" is not 2590 * mapped, paging-structure 2591 * caches could nonetheless 2592 * have entries that refer to 2593 * the freed page table pages. 2594 * Invalidate those entries. 2595 */ 2596 invalidate_all = true; 2597 vm_page_free_pages_toq(&free, 2598 true); 2599 } 2600 goto out; 2601 } 2602 if (dstmpte->ref_count >= srcmpte->ref_count) 2603 break; 2604 } 2605 addr += PAGE_SIZE; 2606 if (__predict_false((addr & L3_PAGE_MASK) == 0)) 2607 src_pte = pmap_pte(src_pmap, addr); 2608 else 2609 src_pte++; 2610 } 2611 } 2612 out: 2613 if (invalidate_all) 2614 pmap_invalidate_all(dst_pmap); 2615 if (lock != NULL) 2616 rw_wunlock(lock); 2617 PMAP_UNLOCK(src_pmap); 2618 PMAP_UNLOCK(dst_pmap); 2619 } 2620 2621 static void 2622 mmu_radix_copy_page(vm_page_t msrc, vm_page_t mdst) 2623 { 2624 vm_offset_t src = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(msrc)); 2625 vm_offset_t dst = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mdst)); 2626 2627 CTR3(KTR_PMAP, "%s(%p, %p)", __func__, src, dst); 2628 /* 2629 * XXX slow 2630 */ 2631 bcopy((void *)src, (void *)dst, PAGE_SIZE); 2632 } 2633 2634 static void 2635 mmu_radix_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[], 2636 vm_offset_t b_offset, int xfersize) 2637 { 2638 2639 CTR6(KTR_PMAP, "%s(%p, %#x, %p, %#x, %#x)", __func__, ma, 2640 a_offset, mb, b_offset, xfersize); 2641 UNIMPLEMENTED(); 2642 } 2643 2644 #if VM_NRESERVLEVEL > 0 2645 /* 2646 * Tries to promote the 512, contiguous 4KB page mappings that are within a 2647 * single page table page (PTP) to a single 2MB page mapping. For promotion 2648 * to occur, two conditions must be met: (1) the 4KB page mappings must map 2649 * aligned, contiguous physical memory and (2) the 4KB page mappings must have 2650 * identical characteristics. 2651 */ 2652 static int 2653 pmap_promote_l3e(pmap_t pmap, pml3_entry_t *pde, vm_offset_t va, 2654 struct rwlock **lockp) 2655 { 2656 pml3_entry_t newpde; 2657 pt_entry_t *firstpte, oldpte, pa, *pte; 2658 vm_page_t mpte; 2659 2660 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2661 2662 /* 2663 * Examine the first PTE in the specified PTP. Abort if this PTE is 2664 * either invalid, unused, or does not map the first 4KB physical page 2665 * within a 2MB page. 2666 */ 2667 firstpte = (pt_entry_t *)PHYS_TO_DMAP(*pde & PG_FRAME); 2668 setpde: 2669 newpde = *firstpte; 2670 if ((newpde & ((PG_FRAME & L3_PAGE_MASK) | PG_A | PG_V)) != (PG_A | PG_V)) { 2671 CTR2(KTR_PMAP, "pmap_promote_l3e: failure for va %#lx" 2672 " in pmap %p", va, pmap); 2673 goto fail; 2674 } 2675 if ((newpde & (PG_M | PG_RW)) == PG_RW) { 2676 /* 2677 * When PG_M is already clear, PG_RW can be cleared without 2678 * a TLB invalidation. 2679 */ 2680 if (!atomic_cmpset_long(firstpte, newpde, (newpde | RPTE_EAA_R) & ~RPTE_EAA_W)) 2681 goto setpde; 2682 newpde &= ~RPTE_EAA_W; 2683 } 2684 2685 /* 2686 * Examine each of the other PTEs in the specified PTP. Abort if this 2687 * PTE maps an unexpected 4KB physical page or does not have identical 2688 * characteristics to the first PTE. 2689 */ 2690 pa = (newpde & (PG_PS_FRAME | PG_A | PG_V)) + L3_PAGE_SIZE - PAGE_SIZE; 2691 for (pte = firstpte + NPTEPG - 1; pte > firstpte; pte--) { 2692 setpte: 2693 oldpte = *pte; 2694 if ((oldpte & (PG_FRAME | PG_A | PG_V)) != pa) { 2695 CTR2(KTR_PMAP, "pmap_promote_l3e: failure for va %#lx" 2696 " in pmap %p", va, pmap); 2697 goto fail; 2698 } 2699 if ((oldpte & (PG_M | PG_RW)) == PG_RW) { 2700 /* 2701 * When PG_M is already clear, PG_RW can be cleared 2702 * without a TLB invalidation. 2703 */ 2704 if (!atomic_cmpset_long(pte, oldpte, (oldpte | RPTE_EAA_R) & ~RPTE_EAA_W)) 2705 goto setpte; 2706 oldpte &= ~RPTE_EAA_W; 2707 CTR2(KTR_PMAP, "pmap_promote_l3e: protect for va %#lx" 2708 " in pmap %p", (oldpte & PG_FRAME & L3_PAGE_MASK) | 2709 (va & ~L3_PAGE_MASK), pmap); 2710 } 2711 if ((oldpte & PG_PTE_PROMOTE) != (newpde & PG_PTE_PROMOTE)) { 2712 CTR2(KTR_PMAP, "pmap_promote_l3e: failure for va %#lx" 2713 " in pmap %p", va, pmap); 2714 goto fail; 2715 } 2716 pa -= PAGE_SIZE; 2717 } 2718 2719 /* 2720 * Save the page table page in its current state until the PDE 2721 * mapping the superpage is demoted by pmap_demote_pde() or 2722 * destroyed by pmap_remove_pde(). 2723 */ 2724 mpte = PHYS_TO_VM_PAGE(*pde & PG_FRAME); 2725 KASSERT(mpte >= vm_page_array && 2726 mpte < &vm_page_array[vm_page_array_size], 2727 ("pmap_promote_l3e: page table page is out of range")); 2728 KASSERT(mpte->pindex == pmap_l3e_pindex(va), 2729 ("pmap_promote_l3e: page table page's pindex is wrong")); 2730 if (pmap_insert_pt_page(pmap, mpte)) { 2731 CTR2(KTR_PMAP, 2732 "pmap_promote_l3e: failure for va %#lx in pmap %p", va, 2733 pmap); 2734 goto fail; 2735 } 2736 2737 /* 2738 * Promote the pv entries. 2739 */ 2740 if ((newpde & PG_MANAGED) != 0) 2741 pmap_pv_promote_l3e(pmap, va, newpde & PG_PS_FRAME, lockp); 2742 2743 pte_store(pde, PG_PROMOTED | newpde); 2744 atomic_add_long(&pmap_l3e_promotions, 1); 2745 CTR2(KTR_PMAP, "pmap_promote_l3e: success for va %#lx" 2746 " in pmap %p", va, pmap); 2747 return (0); 2748 fail: 2749 atomic_add_long(&pmap_l3e_p_failures, 1); 2750 return (KERN_FAILURE); 2751 } 2752 #endif /* VM_NRESERVLEVEL > 0 */ 2753 2754 int 2755 mmu_radix_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, 2756 vm_prot_t prot, u_int flags, int8_t psind) 2757 { 2758 struct rwlock *lock; 2759 pml3_entry_t *l3e; 2760 pt_entry_t *pte; 2761 pt_entry_t newpte, origpte; 2762 pv_entry_t pv; 2763 vm_paddr_t opa, pa; 2764 vm_page_t mpte, om; 2765 int rv, retrycount; 2766 boolean_t nosleep, invalidate_all, invalidate_page; 2767 2768 va = trunc_page(va); 2769 retrycount = 0; 2770 invalidate_page = invalidate_all = false; 2771 CTR6(KTR_PMAP, "pmap_enter(%p, %#lx, %p, %#x, %#x, %d)", pmap, va, 2772 m, prot, flags, psind); 2773 KASSERT(va <= VM_MAX_KERNEL_ADDRESS, ("pmap_enter: toobig")); 2774 KASSERT((m->oflags & VPO_UNMANAGED) != 0 || va < kmi.clean_sva || 2775 va >= kmi.clean_eva, 2776 ("pmap_enter: managed mapping within the clean submap")); 2777 if ((m->oflags & VPO_UNMANAGED) == 0) 2778 VM_PAGE_OBJECT_BUSY_ASSERT(m); 2779 2780 KASSERT((flags & PMAP_ENTER_RESERVED) == 0, 2781 ("pmap_enter: flags %u has reserved bits set", flags)); 2782 pa = VM_PAGE_TO_PHYS(m); 2783 newpte = (pt_entry_t)(pa | PG_A | PG_V | RPTE_LEAF); 2784 if ((flags & VM_PROT_WRITE) != 0) 2785 newpte |= PG_M; 2786 if ((flags & VM_PROT_READ) != 0) 2787 newpte |= PG_A; 2788 if (prot & VM_PROT_READ) 2789 newpte |= RPTE_EAA_R; 2790 if ((prot & VM_PROT_WRITE) != 0) 2791 newpte |= RPTE_EAA_W; 2792 KASSERT((newpte & (PG_M | PG_RW)) != PG_M, 2793 ("pmap_enter: flags includes VM_PROT_WRITE but prot doesn't")); 2794 2795 if (prot & VM_PROT_EXECUTE) 2796 newpte |= PG_X; 2797 if ((flags & PMAP_ENTER_WIRED) != 0) 2798 newpte |= PG_W; 2799 if (va >= DMAP_MIN_ADDRESS) 2800 newpte |= RPTE_EAA_P; 2801 newpte |= pmap_cache_bits(m->md.mdpg_cache_attrs); 2802 /* 2803 * Set modified bit gratuitously for writeable mappings if 2804 * the page is unmanaged. We do not want to take a fault 2805 * to do the dirty bit accounting for these mappings. 2806 */ 2807 if ((m->oflags & VPO_UNMANAGED) != 0) { 2808 if ((newpte & PG_RW) != 0) 2809 newpte |= PG_M; 2810 } else 2811 newpte |= PG_MANAGED; 2812 2813 lock = NULL; 2814 PMAP_LOCK(pmap); 2815 if (psind == 1) { 2816 /* Assert the required virtual and physical alignment. */ 2817 KASSERT((va & L3_PAGE_MASK) == 0, ("pmap_enter: va unaligned")); 2818 KASSERT(m->psind > 0, ("pmap_enter: m->psind < psind")); 2819 rv = pmap_enter_l3e(pmap, va, newpte | RPTE_LEAF, flags, m, &lock); 2820 goto out; 2821 } 2822 mpte = NULL; 2823 2824 /* 2825 * In the case that a page table page is not 2826 * resident, we are creating it here. 2827 */ 2828 retry: 2829 l3e = pmap_pml3e(pmap, va); 2830 if (l3e != NULL && (*l3e & PG_V) != 0 && ((*l3e & RPTE_LEAF) == 0 || 2831 pmap_demote_l3e_locked(pmap, l3e, va, &lock))) { 2832 pte = pmap_l3e_to_pte(l3e, va); 2833 if (va < VM_MAXUSER_ADDRESS && mpte == NULL) { 2834 mpte = PHYS_TO_VM_PAGE(*l3e & PG_FRAME); 2835 mpte->ref_count++; 2836 } 2837 } else if (va < VM_MAXUSER_ADDRESS) { 2838 /* 2839 * Here if the pte page isn't mapped, or if it has been 2840 * deallocated. 2841 */ 2842 nosleep = (flags & PMAP_ENTER_NOSLEEP) != 0; 2843 mpte = _pmap_allocpte(pmap, pmap_l3e_pindex(va), 2844 nosleep ? NULL : &lock); 2845 if (mpte == NULL && nosleep) { 2846 rv = KERN_RESOURCE_SHORTAGE; 2847 goto out; 2848 } 2849 if (__predict_false(retrycount++ == 6)) 2850 panic("too many retries"); 2851 invalidate_all = true; 2852 goto retry; 2853 } else 2854 panic("pmap_enter: invalid page directory va=%#lx", va); 2855 2856 origpte = *pte; 2857 pv = NULL; 2858 2859 /* 2860 * Is the specified virtual address already mapped? 2861 */ 2862 if ((origpte & PG_V) != 0) { 2863 #ifdef INVARIANTS 2864 if (VERBOSE_PMAP || pmap_logging) { 2865 printf("cow fault pmap_enter(%p, %#lx, %p, %#x, %x, %d) --" 2866 " asid=%lu curpid=%d name=%s origpte0x%lx\n", 2867 pmap, va, m, prot, flags, psind, pmap->pm_pid, 2868 curproc->p_pid, curproc->p_comm, origpte); 2869 pmap_pte_walk(pmap->pm_pml1, va); 2870 } 2871 #endif 2872 /* 2873 * Wiring change, just update stats. We don't worry about 2874 * wiring PT pages as they remain resident as long as there 2875 * are valid mappings in them. Hence, if a user page is wired, 2876 * the PT page will be also. 2877 */ 2878 if ((newpte & PG_W) != 0 && (origpte & PG_W) == 0) 2879 pmap->pm_stats.wired_count++; 2880 else if ((newpte & PG_W) == 0 && (origpte & PG_W) != 0) 2881 pmap->pm_stats.wired_count--; 2882 2883 /* 2884 * Remove the extra PT page reference. 2885 */ 2886 if (mpte != NULL) { 2887 mpte->ref_count--; 2888 KASSERT(mpte->ref_count > 0, 2889 ("pmap_enter: missing reference to page table page," 2890 " va: 0x%lx", va)); 2891 } 2892 2893 /* 2894 * Has the physical page changed? 2895 */ 2896 opa = origpte & PG_FRAME; 2897 if (opa == pa) { 2898 /* 2899 * No, might be a protection or wiring change. 2900 */ 2901 if ((origpte & PG_MANAGED) != 0 && 2902 (newpte & PG_RW) != 0) 2903 vm_page_aflag_set(m, PGA_WRITEABLE); 2904 if (((origpte ^ newpte) & ~(PG_M | PG_A)) == 0) { 2905 if ((newpte & (PG_A|PG_M)) != (origpte & (PG_A|PG_M))) { 2906 if (!atomic_cmpset_long(pte, origpte, newpte)) 2907 goto retry; 2908 if ((newpte & PG_M) != (origpte & PG_M)) 2909 vm_page_dirty(m); 2910 if ((newpte & PG_A) != (origpte & PG_A)) 2911 vm_page_aflag_set(m, PGA_REFERENCED); 2912 ptesync(); 2913 } else 2914 invalidate_all = true; 2915 if (((origpte ^ newpte) & ~(PG_M | PG_A)) == 0) 2916 goto unchanged; 2917 } 2918 goto validate; 2919 } 2920 2921 /* 2922 * The physical page has changed. Temporarily invalidate 2923 * the mapping. This ensures that all threads sharing the 2924 * pmap keep a consistent view of the mapping, which is 2925 * necessary for the correct handling of COW faults. It 2926 * also permits reuse of the old mapping's PV entry, 2927 * avoiding an allocation. 2928 * 2929 * For consistency, handle unmanaged mappings the same way. 2930 */ 2931 origpte = pte_load_clear(pte); 2932 KASSERT((origpte & PG_FRAME) == opa, 2933 ("pmap_enter: unexpected pa update for %#lx", va)); 2934 if ((origpte & PG_MANAGED) != 0) { 2935 om = PHYS_TO_VM_PAGE(opa); 2936 2937 /* 2938 * The pmap lock is sufficient to synchronize with 2939 * concurrent calls to pmap_page_test_mappings() and 2940 * pmap_ts_referenced(). 2941 */ 2942 if ((origpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) 2943 vm_page_dirty(om); 2944 if ((origpte & PG_A) != 0) 2945 vm_page_aflag_set(om, PGA_REFERENCED); 2946 CHANGE_PV_LIST_LOCK_TO_PHYS(&lock, opa); 2947 pv = pmap_pvh_remove(&om->md, pmap, va); 2948 if ((newpte & PG_MANAGED) == 0) 2949 free_pv_entry(pmap, pv); 2950 #ifdef INVARIANTS 2951 else if (origpte & PG_MANAGED) { 2952 if (pv == NULL) { 2953 pmap_page_print_mappings(om); 2954 MPASS(pv != NULL); 2955 } 2956 } 2957 #endif 2958 if ((om->a.flags & PGA_WRITEABLE) != 0 && 2959 TAILQ_EMPTY(&om->md.pv_list) && 2960 ((om->flags & PG_FICTITIOUS) != 0 || 2961 TAILQ_EMPTY(&pa_to_pvh(opa)->pv_list))) 2962 vm_page_aflag_clear(om, PGA_WRITEABLE); 2963 } 2964 if ((origpte & PG_A) != 0) 2965 invalidate_page = true; 2966 origpte = 0; 2967 } else { 2968 if (pmap != kernel_pmap) { 2969 #ifdef INVARIANTS 2970 if (VERBOSE_PMAP || pmap_logging) 2971 printf("pmap_enter(%p, %#lx, %p, %#x, %x, %d) -- asid=%lu curpid=%d name=%s\n", 2972 pmap, va, m, prot, flags, psind, 2973 pmap->pm_pid, curproc->p_pid, 2974 curproc->p_comm); 2975 #endif 2976 } 2977 2978 /* 2979 * Increment the counters. 2980 */ 2981 if ((newpte & PG_W) != 0) 2982 pmap->pm_stats.wired_count++; 2983 pmap_resident_count_inc(pmap, 1); 2984 } 2985 2986 /* 2987 * Enter on the PV list if part of our managed memory. 2988 */ 2989 if ((newpte & PG_MANAGED) != 0) { 2990 if (pv == NULL) { 2991 pv = get_pv_entry(pmap, &lock); 2992 pv->pv_va = va; 2993 } 2994 #ifdef VERBOSE_PV 2995 else 2996 printf("reassigning pv: %p to pmap: %p\n", 2997 pv, pmap); 2998 #endif 2999 CHANGE_PV_LIST_LOCK_TO_PHYS(&lock, pa); 3000 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_link); 3001 m->md.pv_gen++; 3002 if ((newpte & PG_RW) != 0) 3003 vm_page_aflag_set(m, PGA_WRITEABLE); 3004 } 3005 3006 /* 3007 * Update the PTE. 3008 */ 3009 if ((origpte & PG_V) != 0) { 3010 validate: 3011 origpte = pte_load_store(pte, newpte); 3012 KASSERT((origpte & PG_FRAME) == pa, 3013 ("pmap_enter: unexpected pa update for %#lx", va)); 3014 if ((newpte & PG_M) == 0 && (origpte & (PG_M | PG_RW)) == 3015 (PG_M | PG_RW)) { 3016 if ((origpte & PG_MANAGED) != 0) 3017 vm_page_dirty(m); 3018 invalidate_page = true; 3019 3020 /* 3021 * Although the PTE may still have PG_RW set, TLB 3022 * invalidation may nonetheless be required because 3023 * the PTE no longer has PG_M set. 3024 */ 3025 } else if ((origpte & PG_X) != 0 || (newpte & PG_X) == 0) { 3026 /* 3027 * Removing capabilities requires invalidation on POWER 3028 */ 3029 invalidate_page = true; 3030 goto unchanged; 3031 } 3032 if ((origpte & PG_A) != 0) 3033 invalidate_page = true; 3034 } else { 3035 pte_store(pte, newpte); 3036 ptesync(); 3037 } 3038 unchanged: 3039 3040 #if VM_NRESERVLEVEL > 0 3041 /* 3042 * If both the page table page and the reservation are fully 3043 * populated, then attempt promotion. 3044 */ 3045 if ((mpte == NULL || mpte->ref_count == NPTEPG) && 3046 mmu_radix_ps_enabled(pmap) && 3047 (m->flags & PG_FICTITIOUS) == 0 && 3048 vm_reserv_level_iffullpop(m) == 0 && 3049 pmap_promote_l3e(pmap, l3e, va, &lock) == 0) 3050 invalidate_all = true; 3051 #endif 3052 if (invalidate_all) 3053 pmap_invalidate_all(pmap); 3054 else if (invalidate_page) 3055 pmap_invalidate_page(pmap, va); 3056 3057 rv = KERN_SUCCESS; 3058 out: 3059 if (lock != NULL) 3060 rw_wunlock(lock); 3061 PMAP_UNLOCK(pmap); 3062 3063 return (rv); 3064 } 3065 3066 /* 3067 * Tries to create a read- and/or execute-only 2MB page mapping. Returns true 3068 * if successful. Returns false if (1) a page table page cannot be allocated 3069 * without sleeping, (2) a mapping already exists at the specified virtual 3070 * address, or (3) a PV entry cannot be allocated without reclaiming another 3071 * PV entry. 3072 */ 3073 static bool 3074 pmap_enter_2mpage(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, 3075 struct rwlock **lockp) 3076 { 3077 pml3_entry_t newpde; 3078 3079 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 3080 newpde = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(m->md.mdpg_cache_attrs) | 3081 RPTE_LEAF | PG_V; 3082 if ((m->oflags & VPO_UNMANAGED) == 0) 3083 newpde |= PG_MANAGED; 3084 if (prot & VM_PROT_EXECUTE) 3085 newpde |= PG_X; 3086 if (prot & VM_PROT_READ) 3087 newpde |= RPTE_EAA_R; 3088 if (va >= DMAP_MIN_ADDRESS) 3089 newpde |= RPTE_EAA_P; 3090 return (pmap_enter_l3e(pmap, va, newpde, PMAP_ENTER_NOSLEEP | 3091 PMAP_ENTER_NOREPLACE | PMAP_ENTER_NORECLAIM, NULL, lockp) == 3092 KERN_SUCCESS); 3093 } 3094 3095 /* 3096 * Tries to create the specified 2MB page mapping. Returns KERN_SUCCESS if 3097 * the mapping was created, and either KERN_FAILURE or KERN_RESOURCE_SHORTAGE 3098 * otherwise. Returns KERN_FAILURE if PMAP_ENTER_NOREPLACE was specified and 3099 * a mapping already exists at the specified virtual address. Returns 3100 * KERN_RESOURCE_SHORTAGE if PMAP_ENTER_NOSLEEP was specified and a page table 3101 * page allocation failed. Returns KERN_RESOURCE_SHORTAGE if 3102 * PMAP_ENTER_NORECLAIM was specified and a PV entry allocation failed. 3103 * 3104 * The parameter "m" is only used when creating a managed, writeable mapping. 3105 */ 3106 static int 3107 pmap_enter_l3e(pmap_t pmap, vm_offset_t va, pml3_entry_t newpde, u_int flags, 3108 vm_page_t m, struct rwlock **lockp) 3109 { 3110 struct spglist free; 3111 pml3_entry_t oldl3e, *l3e; 3112 vm_page_t mt, pdpg; 3113 3114 KASSERT((newpde & (PG_M | PG_RW)) != PG_RW, 3115 ("pmap_enter_pde: newpde is missing PG_M")); 3116 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 3117 3118 if ((pdpg = pmap_allocl3e(pmap, va, (flags & PMAP_ENTER_NOSLEEP) != 0 ? 3119 NULL : lockp)) == NULL) { 3120 CTR2(KTR_PMAP, "pmap_enter_pde: failure for va %#lx" 3121 " in pmap %p", va, pmap); 3122 return (KERN_RESOURCE_SHORTAGE); 3123 } 3124 l3e = (pml3_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pdpg)); 3125 l3e = &l3e[pmap_pml3e_index(va)]; 3126 oldl3e = *l3e; 3127 if ((oldl3e & PG_V) != 0) { 3128 KASSERT(pdpg->ref_count > 1, 3129 ("pmap_enter_pde: pdpg's wire count is too low")); 3130 if ((flags & PMAP_ENTER_NOREPLACE) != 0) { 3131 pdpg->ref_count--; 3132 CTR2(KTR_PMAP, "pmap_enter_pde: failure for va %#lx" 3133 " in pmap %p", va, pmap); 3134 return (KERN_FAILURE); 3135 } 3136 /* Break the existing mapping(s). */ 3137 SLIST_INIT(&free); 3138 if ((oldl3e & RPTE_LEAF) != 0) { 3139 /* 3140 * The reference to the PD page that was acquired by 3141 * pmap_allocl3e() ensures that it won't be freed. 3142 * However, if the PDE resulted from a promotion, then 3143 * a reserved PT page could be freed. 3144 */ 3145 (void)pmap_remove_l3e(pmap, l3e, va, &free, lockp); 3146 } else { 3147 if (pmap_remove_ptes(pmap, va, va + L3_PAGE_SIZE, l3e, 3148 &free, lockp)) 3149 pmap_invalidate_all(pmap); 3150 } 3151 vm_page_free_pages_toq(&free, true); 3152 if (va >= VM_MAXUSER_ADDRESS) { 3153 mt = PHYS_TO_VM_PAGE(*l3e & PG_FRAME); 3154 if (pmap_insert_pt_page(pmap, mt)) { 3155 /* 3156 * XXX Currently, this can't happen because 3157 * we do not perform pmap_enter(psind == 1) 3158 * on the kernel pmap. 3159 */ 3160 panic("pmap_enter_pde: trie insert failed"); 3161 } 3162 } else 3163 KASSERT(*l3e == 0, ("pmap_enter_pde: non-zero pde %p", 3164 l3e)); 3165 } 3166 if ((newpde & PG_MANAGED) != 0) { 3167 /* 3168 * Abort this mapping if its PV entry could not be created. 3169 */ 3170 if (!pmap_pv_insert_l3e(pmap, va, newpde, flags, lockp)) { 3171 SLIST_INIT(&free); 3172 if (pmap_unwire_ptp(pmap, va, pdpg, &free)) { 3173 /* 3174 * Although "va" is not mapped, paging- 3175 * structure caches could nonetheless have 3176 * entries that refer to the freed page table 3177 * pages. Invalidate those entries. 3178 */ 3179 pmap_invalidate_page(pmap, va); 3180 vm_page_free_pages_toq(&free, true); 3181 } 3182 CTR2(KTR_PMAP, "pmap_enter_pde: failure for va %#lx" 3183 " in pmap %p", va, pmap); 3184 return (KERN_RESOURCE_SHORTAGE); 3185 } 3186 if ((newpde & PG_RW) != 0) { 3187 for (mt = m; mt < &m[L3_PAGE_SIZE / PAGE_SIZE]; mt++) 3188 vm_page_aflag_set(mt, PGA_WRITEABLE); 3189 } 3190 } 3191 3192 /* 3193 * Increment counters. 3194 */ 3195 if ((newpde & PG_W) != 0) 3196 pmap->pm_stats.wired_count += L3_PAGE_SIZE / PAGE_SIZE; 3197 pmap_resident_count_inc(pmap, L3_PAGE_SIZE / PAGE_SIZE); 3198 3199 /* 3200 * Map the superpage. (This is not a promoted mapping; there will not 3201 * be any lingering 4KB page mappings in the TLB.) 3202 */ 3203 pte_store(l3e, newpde); 3204 3205 atomic_add_long(&pmap_l3e_mappings, 1); 3206 CTR2(KTR_PMAP, "pmap_enter_pde: success for va %#lx" 3207 " in pmap %p", va, pmap); 3208 return (KERN_SUCCESS); 3209 } 3210 3211 void 3212 mmu_radix_enter_object(pmap_t pmap, vm_offset_t start, 3213 vm_offset_t end, vm_page_t m_start, vm_prot_t prot) 3214 { 3215 3216 struct rwlock *lock; 3217 vm_offset_t va; 3218 vm_page_t m, mpte; 3219 vm_pindex_t diff, psize; 3220 bool invalidate; 3221 VM_OBJECT_ASSERT_LOCKED(m_start->object); 3222 3223 CTR6(KTR_PMAP, "%s(%p, %#x, %#x, %p, %#x)", __func__, pmap, start, 3224 end, m_start, prot); 3225 3226 invalidate = false; 3227 psize = atop(end - start); 3228 mpte = NULL; 3229 m = m_start; 3230 lock = NULL; 3231 PMAP_LOCK(pmap); 3232 while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { 3233 va = start + ptoa(diff); 3234 if ((va & L3_PAGE_MASK) == 0 && va + L3_PAGE_SIZE <= end && 3235 m->psind == 1 && mmu_radix_ps_enabled(pmap) && 3236 pmap_enter_2mpage(pmap, va, m, prot, &lock)) 3237 m = &m[L3_PAGE_SIZE / PAGE_SIZE - 1]; 3238 else 3239 mpte = mmu_radix_enter_quick_locked(pmap, va, m, prot, 3240 mpte, &lock, &invalidate); 3241 m = TAILQ_NEXT(m, listq); 3242 } 3243 ptesync(); 3244 if (lock != NULL) 3245 rw_wunlock(lock); 3246 if (invalidate) 3247 pmap_invalidate_all(pmap); 3248 PMAP_UNLOCK(pmap); 3249 } 3250 3251 static vm_page_t 3252 mmu_radix_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, 3253 vm_prot_t prot, vm_page_t mpte, struct rwlock **lockp, bool *invalidate) 3254 { 3255 struct spglist free; 3256 pt_entry_t *pte; 3257 vm_paddr_t pa; 3258 3259 KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva || 3260 (m->oflags & VPO_UNMANAGED) != 0, 3261 ("mmu_radix_enter_quick_locked: managed mapping within the clean submap")); 3262 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 3263 3264 /* 3265 * In the case that a page table page is not 3266 * resident, we are creating it here. 3267 */ 3268 if (va < VM_MAXUSER_ADDRESS) { 3269 vm_pindex_t ptepindex; 3270 pml3_entry_t *ptepa; 3271 3272 /* 3273 * Calculate pagetable page index 3274 */ 3275 ptepindex = pmap_l3e_pindex(va); 3276 if (mpte && (mpte->pindex == ptepindex)) { 3277 mpte->ref_count++; 3278 } else { 3279 /* 3280 * Get the page directory entry 3281 */ 3282 ptepa = pmap_pml3e(pmap, va); 3283 3284 /* 3285 * If the page table page is mapped, we just increment 3286 * the hold count, and activate it. Otherwise, we 3287 * attempt to allocate a page table page. If this 3288 * attempt fails, we don't retry. Instead, we give up. 3289 */ 3290 if (ptepa && (*ptepa & PG_V) != 0) { 3291 if (*ptepa & RPTE_LEAF) 3292 return (NULL); 3293 mpte = PHYS_TO_VM_PAGE(*ptepa & PG_FRAME); 3294 mpte->ref_count++; 3295 } else { 3296 /* 3297 * Pass NULL instead of the PV list lock 3298 * pointer, because we don't intend to sleep. 3299 */ 3300 mpte = _pmap_allocpte(pmap, ptepindex, NULL); 3301 if (mpte == NULL) 3302 return (mpte); 3303 } 3304 } 3305 pte = (pt_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mpte)); 3306 pte = &pte[pmap_pte_index(va)]; 3307 } else { 3308 mpte = NULL; 3309 pte = pmap_pte(pmap, va); 3310 } 3311 if (*pte) { 3312 if (mpte != NULL) { 3313 mpte->ref_count--; 3314 mpte = NULL; 3315 } 3316 return (mpte); 3317 } 3318 3319 /* 3320 * Enter on the PV list if part of our managed memory. 3321 */ 3322 if ((m->oflags & VPO_UNMANAGED) == 0 && 3323 !pmap_try_insert_pv_entry(pmap, va, m, lockp)) { 3324 if (mpte != NULL) { 3325 SLIST_INIT(&free); 3326 if (pmap_unwire_ptp(pmap, va, mpte, &free)) { 3327 /* 3328 * Although "va" is not mapped, paging- 3329 * structure caches could nonetheless have 3330 * entries that refer to the freed page table 3331 * pages. Invalidate those entries. 3332 */ 3333 *invalidate = true; 3334 vm_page_free_pages_toq(&free, true); 3335 } 3336 mpte = NULL; 3337 } 3338 return (mpte); 3339 } 3340 3341 /* 3342 * Increment counters 3343 */ 3344 pmap_resident_count_inc(pmap, 1); 3345 3346 pa = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(m->md.mdpg_cache_attrs); 3347 if (prot & VM_PROT_EXECUTE) 3348 pa |= PG_X; 3349 else 3350 pa |= RPTE_EAA_R; 3351 if ((m->oflags & VPO_UNMANAGED) == 0) 3352 pa |= PG_MANAGED; 3353 3354 pte_store(pte, pa); 3355 return (mpte); 3356 } 3357 3358 void 3359 mmu_radix_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, 3360 vm_prot_t prot) 3361 { 3362 struct rwlock *lock; 3363 bool invalidate; 3364 3365 lock = NULL; 3366 invalidate = false; 3367 PMAP_LOCK(pmap); 3368 mmu_radix_enter_quick_locked(pmap, va, m, prot, NULL, &lock, 3369 &invalidate); 3370 ptesync(); 3371 if (lock != NULL) 3372 rw_wunlock(lock); 3373 if (invalidate) 3374 pmap_invalidate_all(pmap); 3375 PMAP_UNLOCK(pmap); 3376 } 3377 3378 vm_paddr_t 3379 mmu_radix_extract(pmap_t pmap, vm_offset_t va) 3380 { 3381 pml3_entry_t *l3e; 3382 pt_entry_t *pte; 3383 vm_paddr_t pa; 3384 3385 l3e = pmap_pml3e(pmap, va); 3386 if (__predict_false(l3e == NULL)) 3387 return (0); 3388 if (*l3e & RPTE_LEAF) { 3389 pa = (*l3e & PG_PS_FRAME) | (va & L3_PAGE_MASK); 3390 pa |= (va & L3_PAGE_MASK); 3391 } else { 3392 /* 3393 * Beware of a concurrent promotion that changes the 3394 * PDE at this point! For example, vtopte() must not 3395 * be used to access the PTE because it would use the 3396 * new PDE. It is, however, safe to use the old PDE 3397 * because the page table page is preserved by the 3398 * promotion. 3399 */ 3400 pte = pmap_l3e_to_pte(l3e, va); 3401 if (__predict_false(pte == NULL)) 3402 return (0); 3403 pa = *pte; 3404 pa = (pa & PG_FRAME) | (va & PAGE_MASK); 3405 pa |= (va & PAGE_MASK); 3406 } 3407 return (pa); 3408 } 3409 3410 vm_page_t 3411 mmu_radix_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot) 3412 { 3413 pml3_entry_t l3e, *l3ep; 3414 pt_entry_t pte; 3415 vm_paddr_t pa; 3416 vm_page_t m; 3417 3418 pa = 0; 3419 m = NULL; 3420 CTR4(KTR_PMAP, "%s(%p, %#x, %#x)", __func__, pmap, va, prot); 3421 PMAP_LOCK(pmap); 3422 l3ep = pmap_pml3e(pmap, va); 3423 if (l3ep != NULL && (l3e = *l3ep)) { 3424 if (l3e & RPTE_LEAF) { 3425 if ((l3e & PG_RW) || (prot & VM_PROT_WRITE) == 0) 3426 m = PHYS_TO_VM_PAGE((l3e & PG_PS_FRAME) | 3427 (va & L3_PAGE_MASK)); 3428 } else { 3429 pte = *pmap_l3e_to_pte(l3ep, va); 3430 if ((pte & PG_V) && 3431 ((pte & PG_RW) || (prot & VM_PROT_WRITE) == 0)) 3432 m = PHYS_TO_VM_PAGE(pte & PG_FRAME); 3433 } 3434 if (m != NULL && !vm_page_wire_mapped(m)) 3435 m = NULL; 3436 } 3437 PMAP_UNLOCK(pmap); 3438 return (m); 3439 } 3440 3441 static void 3442 mmu_radix_growkernel(vm_offset_t addr) 3443 { 3444 vm_paddr_t paddr; 3445 vm_page_t nkpg; 3446 pml3_entry_t *l3e; 3447 pml2_entry_t *l2e; 3448 3449 CTR2(KTR_PMAP, "%s(%#x)", __func__, addr); 3450 if (VM_MIN_KERNEL_ADDRESS < addr && 3451 addr < (VM_MIN_KERNEL_ADDRESS + nkpt * L3_PAGE_SIZE)) 3452 return; 3453 3454 addr = roundup2(addr, L3_PAGE_SIZE); 3455 if (addr - 1 >= vm_map_max(kernel_map)) 3456 addr = vm_map_max(kernel_map); 3457 while (kernel_vm_end < addr) { 3458 l2e = pmap_pml2e(kernel_pmap, kernel_vm_end); 3459 if ((*l2e & PG_V) == 0) { 3460 /* We need a new PDP entry */ 3461 nkpg = vm_page_alloc(NULL, kernel_vm_end >> L2_PAGE_SIZE_SHIFT, 3462 VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ | 3463 VM_ALLOC_WIRED | VM_ALLOC_ZERO); 3464 if (nkpg == NULL) 3465 panic("pmap_growkernel: no memory to grow kernel"); 3466 if ((nkpg->flags & PG_ZERO) == 0) 3467 mmu_radix_zero_page(nkpg); 3468 paddr = VM_PAGE_TO_PHYS(nkpg); 3469 pde_store(l2e, paddr); 3470 continue; /* try again */ 3471 } 3472 l3e = pmap_l2e_to_l3e(l2e, kernel_vm_end); 3473 if ((*l3e & PG_V) != 0) { 3474 kernel_vm_end = (kernel_vm_end + L3_PAGE_SIZE) & ~L3_PAGE_MASK; 3475 if (kernel_vm_end - 1 >= vm_map_max(kernel_map)) { 3476 kernel_vm_end = vm_map_max(kernel_map); 3477 break; 3478 } 3479 continue; 3480 } 3481 3482 nkpg = vm_page_alloc(NULL, pmap_l3e_pindex(kernel_vm_end), 3483 VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | 3484 VM_ALLOC_ZERO); 3485 if (nkpg == NULL) 3486 panic("pmap_growkernel: no memory to grow kernel"); 3487 if ((nkpg->flags & PG_ZERO) == 0) 3488 mmu_radix_zero_page(nkpg); 3489 paddr = VM_PAGE_TO_PHYS(nkpg); 3490 pde_store(l3e, paddr); 3491 3492 kernel_vm_end = (kernel_vm_end + L3_PAGE_SIZE) & ~L3_PAGE_MASK; 3493 if (kernel_vm_end - 1 >= vm_map_max(kernel_map)) { 3494 kernel_vm_end = vm_map_max(kernel_map); 3495 break; 3496 } 3497 } 3498 ptesync(); 3499 } 3500 3501 static MALLOC_DEFINE(M_RADIX_PGD, "radix_pgd", "radix page table root directory"); 3502 static uma_zone_t zone_radix_pgd; 3503 3504 static int 3505 radix_pgd_import(void *arg __unused, void **store, int count, int domain __unused, 3506 int flags) 3507 { 3508 3509 for (int i = 0; i < count; i++) { 3510 vm_page_t m = vm_page_alloc_contig(NULL, 0, 3511 VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | 3512 VM_ALLOC_ZERO | VM_ALLOC_WAITOK, RADIX_PGD_SIZE/PAGE_SIZE, 3513 0, (vm_paddr_t)-1, RADIX_PGD_SIZE, L1_PAGE_SIZE, 3514 VM_MEMATTR_DEFAULT); 3515 /* XXX zero on alloc here so we don't have to later */ 3516 store[i] = (void *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)); 3517 } 3518 return (count); 3519 } 3520 3521 static void 3522 radix_pgd_release(void *arg __unused, void **store, int count) 3523 { 3524 vm_page_t m; 3525 struct spglist free; 3526 int page_count; 3527 3528 SLIST_INIT(&free); 3529 page_count = RADIX_PGD_SIZE/PAGE_SIZE; 3530 3531 for (int i = 0; i < count; i++) { 3532 /* 3533 * XXX selectively remove dmap and KVA entries so we don't 3534 * need to bzero 3535 */ 3536 m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)store[i])); 3537 for (int j = page_count-1; j >= 0; j--) { 3538 vm_page_unwire_noq(&m[j]); 3539 SLIST_INSERT_HEAD(&free, &m[j], plinks.s.ss); 3540 } 3541 vm_page_free_pages_toq(&free, false); 3542 } 3543 } 3544 3545 static void 3546 mmu_radix_init() 3547 { 3548 vm_page_t mpte; 3549 vm_size_t s; 3550 int error, i, pv_npg; 3551 3552 /* L1TF, reserve page @0 unconditionally */ 3553 vm_page_blacklist_add(0, bootverbose); 3554 3555 zone_radix_pgd = uma_zcache_create("radix_pgd_cache", 3556 RADIX_PGD_SIZE, NULL, NULL, 3557 #ifdef INVARIANTS 3558 trash_init, trash_fini, 3559 #else 3560 NULL, NULL, 3561 #endif 3562 radix_pgd_import, radix_pgd_release, 3563 NULL, UMA_ZONE_NOBUCKET); 3564 3565 /* 3566 * Initialize the vm page array entries for the kernel pmap's 3567 * page table pages. 3568 */ 3569 PMAP_LOCK(kernel_pmap); 3570 for (i = 0; i < nkpt; i++) { 3571 mpte = PHYS_TO_VM_PAGE(KPTphys + (i << PAGE_SHIFT)); 3572 KASSERT(mpte >= vm_page_array && 3573 mpte < &vm_page_array[vm_page_array_size], 3574 ("pmap_init: page table page is out of range size: %lu", 3575 vm_page_array_size)); 3576 mpte->pindex = pmap_l3e_pindex(VM_MIN_KERNEL_ADDRESS) + i; 3577 mpte->phys_addr = KPTphys + (i << PAGE_SHIFT); 3578 MPASS(PHYS_TO_VM_PAGE(mpte->phys_addr) == mpte); 3579 //pmap_insert_pt_page(kernel_pmap, mpte); 3580 mpte->ref_count = 1; 3581 } 3582 PMAP_UNLOCK(kernel_pmap); 3583 vm_wire_add(nkpt); 3584 3585 CTR1(KTR_PMAP, "%s()", __func__); 3586 TAILQ_INIT(&pv_dummy.pv_list); 3587 3588 /* 3589 * Are large page mappings enabled? 3590 */ 3591 TUNABLE_INT_FETCH("vm.pmap.pg_ps_enabled", &pg_ps_enabled); 3592 if (pg_ps_enabled) { 3593 KASSERT(MAXPAGESIZES > 1 && pagesizes[1] == 0, 3594 ("pmap_init: can't assign to pagesizes[1]")); 3595 pagesizes[1] = L3_PAGE_SIZE; 3596 } 3597 3598 /* 3599 * Initialize the pv chunk list mutex. 3600 */ 3601 mtx_init(&pv_chunks_mutex, "pmap pv chunk list", NULL, MTX_DEF); 3602 3603 /* 3604 * Initialize the pool of pv list locks. 3605 */ 3606 for (i = 0; i < NPV_LIST_LOCKS; i++) 3607 rw_init(&pv_list_locks[i], "pmap pv list"); 3608 3609 /* 3610 * Calculate the size of the pv head table for superpages. 3611 */ 3612 pv_npg = howmany(vm_phys_segs[vm_phys_nsegs - 1].end, L3_PAGE_SIZE); 3613 3614 /* 3615 * Allocate memory for the pv head table for superpages. 3616 */ 3617 s = (vm_size_t)(pv_npg * sizeof(struct md_page)); 3618 s = round_page(s); 3619 pv_table = (struct md_page *)kmem_malloc(s, M_WAITOK | M_ZERO); 3620 for (i = 0; i < pv_npg; i++) 3621 TAILQ_INIT(&pv_table[i].pv_list); 3622 TAILQ_INIT(&pv_dummy.pv_list); 3623 3624 pmap_initialized = 1; 3625 mtx_init(&qframe_mtx, "qfrmlk", NULL, MTX_SPIN); 3626 error = vmem_alloc(kernel_arena, PAGE_SIZE, M_BESTFIT | M_WAITOK, 3627 (vmem_addr_t *)&qframe); 3628 3629 if (error != 0) 3630 panic("qframe allocation failed"); 3631 asid_arena = vmem_create("ASID", isa3_base_pid + 1, (1<<isa3_pid_bits), 3632 1, 1, M_WAITOK); 3633 } 3634 3635 static boolean_t 3636 pmap_page_test_mappings(vm_page_t m, boolean_t accessed, boolean_t modified) 3637 { 3638 struct rwlock *lock; 3639 pv_entry_t pv; 3640 struct md_page *pvh; 3641 pt_entry_t *pte, mask; 3642 pmap_t pmap; 3643 int md_gen, pvh_gen; 3644 boolean_t rv; 3645 3646 rv = FALSE; 3647 lock = VM_PAGE_TO_PV_LIST_LOCK(m); 3648 rw_rlock(lock); 3649 restart: 3650 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 3651 pmap = PV_PMAP(pv); 3652 if (!PMAP_TRYLOCK(pmap)) { 3653 md_gen = m->md.pv_gen; 3654 rw_runlock(lock); 3655 PMAP_LOCK(pmap); 3656 rw_rlock(lock); 3657 if (md_gen != m->md.pv_gen) { 3658 PMAP_UNLOCK(pmap); 3659 goto restart; 3660 } 3661 } 3662 pte = pmap_pte(pmap, pv->pv_va); 3663 mask = 0; 3664 if (modified) 3665 mask |= PG_RW | PG_M; 3666 if (accessed) 3667 mask |= PG_V | PG_A; 3668 rv = (*pte & mask) == mask; 3669 PMAP_UNLOCK(pmap); 3670 if (rv) 3671 goto out; 3672 } 3673 if ((m->flags & PG_FICTITIOUS) == 0) { 3674 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); 3675 TAILQ_FOREACH(pv, &pvh->pv_list, pv_link) { 3676 pmap = PV_PMAP(pv); 3677 if (!PMAP_TRYLOCK(pmap)) { 3678 md_gen = m->md.pv_gen; 3679 pvh_gen = pvh->pv_gen; 3680 rw_runlock(lock); 3681 PMAP_LOCK(pmap); 3682 rw_rlock(lock); 3683 if (md_gen != m->md.pv_gen || 3684 pvh_gen != pvh->pv_gen) { 3685 PMAP_UNLOCK(pmap); 3686 goto restart; 3687 } 3688 } 3689 pte = pmap_pml3e(pmap, pv->pv_va); 3690 mask = 0; 3691 if (modified) 3692 mask |= PG_RW | PG_M; 3693 if (accessed) 3694 mask |= PG_V | PG_A; 3695 rv = (*pte & mask) == mask; 3696 PMAP_UNLOCK(pmap); 3697 if (rv) 3698 goto out; 3699 } 3700 } 3701 out: 3702 rw_runlock(lock); 3703 return (rv); 3704 } 3705 3706 /* 3707 * pmap_is_modified: 3708 * 3709 * Return whether or not the specified physical page was modified 3710 * in any physical maps. 3711 */ 3712 boolean_t 3713 mmu_radix_is_modified(vm_page_t m) 3714 { 3715 3716 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 3717 ("pmap_is_modified: page %p is not managed", m)); 3718 3719 CTR2(KTR_PMAP, "%s(%p)", __func__, m); 3720 /* 3721 * If the page is not busied then this check is racy. 3722 */ 3723 if (!pmap_page_is_write_mapped(m)) 3724 return (FALSE); 3725 return (pmap_page_test_mappings(m, FALSE, TRUE)); 3726 } 3727 3728 boolean_t 3729 mmu_radix_is_prefaultable(pmap_t pmap, vm_offset_t addr) 3730 { 3731 pml3_entry_t *l3e; 3732 pt_entry_t *pte; 3733 boolean_t rv; 3734 3735 CTR3(KTR_PMAP, "%s(%p, %#x)", __func__, pmap, addr); 3736 rv = FALSE; 3737 PMAP_LOCK(pmap); 3738 l3e = pmap_pml3e(pmap, addr); 3739 if (l3e != NULL && (*l3e & (RPTE_LEAF | PG_V)) == PG_V) { 3740 pte = pmap_l3e_to_pte(l3e, addr); 3741 rv = (*pte & PG_V) == 0; 3742 } 3743 PMAP_UNLOCK(pmap); 3744 return (rv); 3745 } 3746 3747 boolean_t 3748 mmu_radix_is_referenced(vm_page_t m) 3749 { 3750 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 3751 ("pmap_is_referenced: page %p is not managed", m)); 3752 CTR2(KTR_PMAP, "%s(%p)", __func__, m); 3753 return (pmap_page_test_mappings(m, TRUE, FALSE)); 3754 } 3755 3756 /* 3757 * pmap_ts_referenced: 3758 * 3759 * Return a count of reference bits for a page, clearing those bits. 3760 * It is not necessary for every reference bit to be cleared, but it 3761 * is necessary that 0 only be returned when there are truly no 3762 * reference bits set. 3763 * 3764 * As an optimization, update the page's dirty field if a modified bit is 3765 * found while counting reference bits. This opportunistic update can be 3766 * performed at low cost and can eliminate the need for some future calls 3767 * to pmap_is_modified(). However, since this function stops after 3768 * finding PMAP_TS_REFERENCED_MAX reference bits, it may not detect some 3769 * dirty pages. Those dirty pages will only be detected by a future call 3770 * to pmap_is_modified(). 3771 * 3772 * A DI block is not needed within this function, because 3773 * invalidations are performed before the PV list lock is 3774 * released. 3775 */ 3776 boolean_t 3777 mmu_radix_ts_referenced(vm_page_t m) 3778 { 3779 struct md_page *pvh; 3780 pv_entry_t pv, pvf; 3781 pmap_t pmap; 3782 struct rwlock *lock; 3783 pml3_entry_t oldl3e, *l3e; 3784 pt_entry_t *pte; 3785 vm_paddr_t pa; 3786 int cleared, md_gen, not_cleared, pvh_gen; 3787 struct spglist free; 3788 3789 CTR2(KTR_PMAP, "%s(%p)", __func__, m); 3790 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 3791 ("pmap_ts_referenced: page %p is not managed", m)); 3792 SLIST_INIT(&free); 3793 cleared = 0; 3794 pa = VM_PAGE_TO_PHYS(m); 3795 lock = PHYS_TO_PV_LIST_LOCK(pa); 3796 pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy : pa_to_pvh(pa); 3797 rw_wlock(lock); 3798 retry: 3799 not_cleared = 0; 3800 if ((pvf = TAILQ_FIRST(&pvh->pv_list)) == NULL) 3801 goto small_mappings; 3802 pv = pvf; 3803 do { 3804 if (pvf == NULL) 3805 pvf = pv; 3806 pmap = PV_PMAP(pv); 3807 if (!PMAP_TRYLOCK(pmap)) { 3808 pvh_gen = pvh->pv_gen; 3809 rw_wunlock(lock); 3810 PMAP_LOCK(pmap); 3811 rw_wlock(lock); 3812 if (pvh_gen != pvh->pv_gen) { 3813 PMAP_UNLOCK(pmap); 3814 goto retry; 3815 } 3816 } 3817 l3e = pmap_pml3e(pmap, pv->pv_va); 3818 oldl3e = *l3e; 3819 if ((oldl3e & (PG_M | PG_RW)) == (PG_M | PG_RW)) { 3820 /* 3821 * Although "oldpde" is mapping a 2MB page, because 3822 * this function is called at a 4KB page granularity, 3823 * we only update the 4KB page under test. 3824 */ 3825 vm_page_dirty(m); 3826 } 3827 if ((oldl3e & PG_A) != 0) { 3828 /* 3829 * Since this reference bit is shared by 512 4KB 3830 * pages, it should not be cleared every time it is 3831 * tested. Apply a simple "hash" function on the 3832 * physical page number, the virtual superpage number, 3833 * and the pmap address to select one 4KB page out of 3834 * the 512 on which testing the reference bit will 3835 * result in clearing that reference bit. This 3836 * function is designed to avoid the selection of the 3837 * same 4KB page for every 2MB page mapping. 3838 * 3839 * On demotion, a mapping that hasn't been referenced 3840 * is simply destroyed. To avoid the possibility of a 3841 * subsequent page fault on a demoted wired mapping, 3842 * always leave its reference bit set. Moreover, 3843 * since the superpage is wired, the current state of 3844 * its reference bit won't affect page replacement. 3845 */ 3846 if ((((pa >> PAGE_SHIFT) ^ (pv->pv_va >> L3_PAGE_SIZE_SHIFT) ^ 3847 (uintptr_t)pmap) & (NPTEPG - 1)) == 0 && 3848 (oldl3e & PG_W) == 0) { 3849 atomic_clear_long(l3e, PG_A); 3850 pmap_invalidate_page(pmap, pv->pv_va); 3851 cleared++; 3852 KASSERT(lock == VM_PAGE_TO_PV_LIST_LOCK(m), 3853 ("inconsistent pv lock %p %p for page %p", 3854 lock, VM_PAGE_TO_PV_LIST_LOCK(m), m)); 3855 } else 3856 not_cleared++; 3857 } 3858 PMAP_UNLOCK(pmap); 3859 /* Rotate the PV list if it has more than one entry. */ 3860 if (pv != NULL && TAILQ_NEXT(pv, pv_link) != NULL) { 3861 TAILQ_REMOVE(&pvh->pv_list, pv, pv_link); 3862 TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_link); 3863 pvh->pv_gen++; 3864 } 3865 if (cleared + not_cleared >= PMAP_TS_REFERENCED_MAX) 3866 goto out; 3867 } while ((pv = TAILQ_FIRST(&pvh->pv_list)) != pvf); 3868 small_mappings: 3869 if ((pvf = TAILQ_FIRST(&m->md.pv_list)) == NULL) 3870 goto out; 3871 pv = pvf; 3872 do { 3873 if (pvf == NULL) 3874 pvf = pv; 3875 pmap = PV_PMAP(pv); 3876 if (!PMAP_TRYLOCK(pmap)) { 3877 pvh_gen = pvh->pv_gen; 3878 md_gen = m->md.pv_gen; 3879 rw_wunlock(lock); 3880 PMAP_LOCK(pmap); 3881 rw_wlock(lock); 3882 if (pvh_gen != pvh->pv_gen || md_gen != m->md.pv_gen) { 3883 PMAP_UNLOCK(pmap); 3884 goto retry; 3885 } 3886 } 3887 l3e = pmap_pml3e(pmap, pv->pv_va); 3888 KASSERT((*l3e & RPTE_LEAF) == 0, 3889 ("pmap_ts_referenced: found a 2mpage in page %p's pv list", 3890 m)); 3891 pte = pmap_l3e_to_pte(l3e, pv->pv_va); 3892 if ((*pte & (PG_M | PG_RW)) == (PG_M | PG_RW)) 3893 vm_page_dirty(m); 3894 if ((*pte & PG_A) != 0) { 3895 atomic_clear_long(pte, PG_A); 3896 pmap_invalidate_page(pmap, pv->pv_va); 3897 cleared++; 3898 } 3899 PMAP_UNLOCK(pmap); 3900 /* Rotate the PV list if it has more than one entry. */ 3901 if (pv != NULL && TAILQ_NEXT(pv, pv_link) != NULL) { 3902 TAILQ_REMOVE(&m->md.pv_list, pv, pv_link); 3903 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_link); 3904 m->md.pv_gen++; 3905 } 3906 } while ((pv = TAILQ_FIRST(&m->md.pv_list)) != pvf && cleared + 3907 not_cleared < PMAP_TS_REFERENCED_MAX); 3908 out: 3909 rw_wunlock(lock); 3910 vm_page_free_pages_toq(&free, true); 3911 return (cleared + not_cleared); 3912 } 3913 3914 static vm_offset_t 3915 mmu_radix_map(vm_offset_t *virt __unused, vm_paddr_t start, 3916 vm_paddr_t end, int prot __unused) 3917 { 3918 3919 CTR5(KTR_PMAP, "%s(%p, %#x, %#x, %#x)", __func__, virt, start, end, 3920 prot); 3921 return (PHYS_TO_DMAP(start)); 3922 } 3923 3924 void 3925 mmu_radix_object_init_pt(pmap_t pmap, vm_offset_t addr, 3926 vm_object_t object, vm_pindex_t pindex, vm_size_t size) 3927 { 3928 pml3_entry_t *l3e; 3929 vm_paddr_t pa, ptepa; 3930 vm_page_t p, pdpg; 3931 vm_memattr_t ma; 3932 3933 CTR6(KTR_PMAP, "%s(%p, %#x, %p, %u, %#x)", __func__, pmap, addr, 3934 object, pindex, size); 3935 VM_OBJECT_ASSERT_WLOCKED(object); 3936 KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG, 3937 ("pmap_object_init_pt: non-device object")); 3938 /* NB: size can be logically ored with addr here */ 3939 if ((addr & L3_PAGE_MASK) == 0 && (size & L3_PAGE_MASK) == 0) { 3940 if (!mmu_radix_ps_enabled(pmap)) 3941 return; 3942 if (!vm_object_populate(object, pindex, pindex + atop(size))) 3943 return; 3944 p = vm_page_lookup(object, pindex); 3945 KASSERT(p->valid == VM_PAGE_BITS_ALL, 3946 ("pmap_object_init_pt: invalid page %p", p)); 3947 ma = p->md.mdpg_cache_attrs; 3948 3949 /* 3950 * Abort the mapping if the first page is not physically 3951 * aligned to a 2MB page boundary. 3952 */ 3953 ptepa = VM_PAGE_TO_PHYS(p); 3954 if (ptepa & L3_PAGE_MASK) 3955 return; 3956 3957 /* 3958 * Skip the first page. Abort the mapping if the rest of 3959 * the pages are not physically contiguous or have differing 3960 * memory attributes. 3961 */ 3962 p = TAILQ_NEXT(p, listq); 3963 for (pa = ptepa + PAGE_SIZE; pa < ptepa + size; 3964 pa += PAGE_SIZE) { 3965 KASSERT(p->valid == VM_PAGE_BITS_ALL, 3966 ("pmap_object_init_pt: invalid page %p", p)); 3967 if (pa != VM_PAGE_TO_PHYS(p) || 3968 ma != p->md.mdpg_cache_attrs) 3969 return; 3970 p = TAILQ_NEXT(p, listq); 3971 } 3972 3973 PMAP_LOCK(pmap); 3974 for (pa = ptepa | pmap_cache_bits(ma); 3975 pa < ptepa + size; pa += L3_PAGE_SIZE) { 3976 pdpg = pmap_allocl3e(pmap, addr, NULL); 3977 if (pdpg == NULL) { 3978 /* 3979 * The creation of mappings below is only an 3980 * optimization. If a page directory page 3981 * cannot be allocated without blocking, 3982 * continue on to the next mapping rather than 3983 * blocking. 3984 */ 3985 addr += L3_PAGE_SIZE; 3986 continue; 3987 } 3988 l3e = (pml3_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pdpg)); 3989 l3e = &l3e[pmap_pml3e_index(addr)]; 3990 if ((*l3e & PG_V) == 0) { 3991 pa |= PG_M | PG_A | PG_RW; 3992 pte_store(l3e, pa); 3993 pmap_resident_count_inc(pmap, L3_PAGE_SIZE / PAGE_SIZE); 3994 atomic_add_long(&pmap_l3e_mappings, 1); 3995 } else { 3996 /* Continue on if the PDE is already valid. */ 3997 pdpg->ref_count--; 3998 KASSERT(pdpg->ref_count > 0, 3999 ("pmap_object_init_pt: missing reference " 4000 "to page directory page, va: 0x%lx", addr)); 4001 } 4002 addr += L3_PAGE_SIZE; 4003 } 4004 ptesync(); 4005 PMAP_UNLOCK(pmap); 4006 } 4007 } 4008 4009 boolean_t 4010 mmu_radix_page_exists_quick(pmap_t pmap, vm_page_t m) 4011 { 4012 struct md_page *pvh; 4013 struct rwlock *lock; 4014 pv_entry_t pv; 4015 int loops = 0; 4016 boolean_t rv; 4017 4018 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 4019 ("pmap_page_exists_quick: page %p is not managed", m)); 4020 CTR3(KTR_PMAP, "%s(%p, %p)", __func__, pmap, m); 4021 rv = FALSE; 4022 lock = VM_PAGE_TO_PV_LIST_LOCK(m); 4023 rw_rlock(lock); 4024 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 4025 if (PV_PMAP(pv) == pmap) { 4026 rv = TRUE; 4027 break; 4028 } 4029 loops++; 4030 if (loops >= 16) 4031 break; 4032 } 4033 if (!rv && loops < 16 && (m->flags & PG_FICTITIOUS) == 0) { 4034 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); 4035 TAILQ_FOREACH(pv, &pvh->pv_list, pv_link) { 4036 if (PV_PMAP(pv) == pmap) { 4037 rv = TRUE; 4038 break; 4039 } 4040 loops++; 4041 if (loops >= 16) 4042 break; 4043 } 4044 } 4045 rw_runlock(lock); 4046 return (rv); 4047 } 4048 4049 void 4050 mmu_radix_page_init(vm_page_t m) 4051 { 4052 4053 CTR2(KTR_PMAP, "%s(%p)", __func__, m); 4054 TAILQ_INIT(&m->md.pv_list); 4055 m->md.mdpg_cache_attrs = VM_MEMATTR_DEFAULT; 4056 } 4057 4058 int 4059 mmu_radix_page_wired_mappings(vm_page_t m) 4060 { 4061 struct rwlock *lock; 4062 struct md_page *pvh; 4063 pmap_t pmap; 4064 pt_entry_t *pte; 4065 pv_entry_t pv; 4066 int count, md_gen, pvh_gen; 4067 4068 if ((m->oflags & VPO_UNMANAGED) != 0) 4069 return (0); 4070 CTR2(KTR_PMAP, "%s(%p)", __func__, m); 4071 lock = VM_PAGE_TO_PV_LIST_LOCK(m); 4072 rw_rlock(lock); 4073 restart: 4074 count = 0; 4075 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 4076 pmap = PV_PMAP(pv); 4077 if (!PMAP_TRYLOCK(pmap)) { 4078 md_gen = m->md.pv_gen; 4079 rw_runlock(lock); 4080 PMAP_LOCK(pmap); 4081 rw_rlock(lock); 4082 if (md_gen != m->md.pv_gen) { 4083 PMAP_UNLOCK(pmap); 4084 goto restart; 4085 } 4086 } 4087 pte = pmap_pte(pmap, pv->pv_va); 4088 if ((*pte & PG_W) != 0) 4089 count++; 4090 PMAP_UNLOCK(pmap); 4091 } 4092 if ((m->flags & PG_FICTITIOUS) == 0) { 4093 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); 4094 TAILQ_FOREACH(pv, &pvh->pv_list, pv_link) { 4095 pmap = PV_PMAP(pv); 4096 if (!PMAP_TRYLOCK(pmap)) { 4097 md_gen = m->md.pv_gen; 4098 pvh_gen = pvh->pv_gen; 4099 rw_runlock(lock); 4100 PMAP_LOCK(pmap); 4101 rw_rlock(lock); 4102 if (md_gen != m->md.pv_gen || 4103 pvh_gen != pvh->pv_gen) { 4104 PMAP_UNLOCK(pmap); 4105 goto restart; 4106 } 4107 } 4108 pte = pmap_pml3e(pmap, pv->pv_va); 4109 if ((*pte & PG_W) != 0) 4110 count++; 4111 PMAP_UNLOCK(pmap); 4112 } 4113 } 4114 rw_runlock(lock); 4115 return (count); 4116 } 4117 4118 static void 4119 mmu_radix_update_proctab(int pid, pml1_entry_t l1pa) 4120 { 4121 isa3_proctab[pid].proctab0 = htobe64(RTS_SIZE | l1pa | RADIX_PGD_INDEX_SHIFT); 4122 } 4123 4124 int 4125 mmu_radix_pinit(pmap_t pmap) 4126 { 4127 vmem_addr_t pid; 4128 vm_paddr_t l1pa; 4129 4130 CTR2(KTR_PMAP, "%s(%p)", __func__, pmap); 4131 4132 /* 4133 * allocate the page directory page 4134 */ 4135 pmap->pm_pml1 = uma_zalloc(zone_radix_pgd, M_WAITOK); 4136 4137 for (int j = 0; j < RADIX_PGD_SIZE_SHIFT; j++) 4138 pagezero((vm_offset_t)pmap->pm_pml1 + j * PAGE_SIZE); 4139 pmap->pm_radix.rt_root = 0; 4140 TAILQ_INIT(&pmap->pm_pvchunk); 4141 bzero(&pmap->pm_stats, sizeof pmap->pm_stats); 4142 pmap->pm_flags = PMAP_PDE_SUPERPAGE; 4143 vmem_alloc(asid_arena, 1, M_FIRSTFIT|M_WAITOK, &pid); 4144 4145 pmap->pm_pid = pid; 4146 l1pa = DMAP_TO_PHYS((vm_offset_t)pmap->pm_pml1); 4147 mmu_radix_update_proctab(pid, l1pa); 4148 __asm __volatile("ptesync;isync" : : : "memory"); 4149 4150 return (1); 4151 } 4152 4153 /* 4154 * This routine is called if the desired page table page does not exist. 4155 * 4156 * If page table page allocation fails, this routine may sleep before 4157 * returning NULL. It sleeps only if a lock pointer was given. 4158 * 4159 * Note: If a page allocation fails at page table level two or three, 4160 * one or two pages may be held during the wait, only to be released 4161 * afterwards. This conservative approach is easily argued to avoid 4162 * race conditions. 4163 */ 4164 static vm_page_t 4165 _pmap_allocpte(pmap_t pmap, vm_pindex_t ptepindex, struct rwlock **lockp) 4166 { 4167 vm_page_t m, pdppg, pdpg; 4168 4169 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 4170 4171 /* 4172 * Allocate a page table page. 4173 */ 4174 if ((m = vm_page_alloc(NULL, ptepindex, VM_ALLOC_NOOBJ | 4175 VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) { 4176 if (lockp != NULL) { 4177 RELEASE_PV_LIST_LOCK(lockp); 4178 PMAP_UNLOCK(pmap); 4179 vm_wait(NULL); 4180 PMAP_LOCK(pmap); 4181 } 4182 /* 4183 * Indicate the need to retry. While waiting, the page table 4184 * page may have been allocated. 4185 */ 4186 return (NULL); 4187 } 4188 if ((m->flags & PG_ZERO) == 0) 4189 mmu_radix_zero_page(m); 4190 4191 /* 4192 * Map the pagetable page into the process address space, if 4193 * it isn't already there. 4194 */ 4195 4196 if (ptepindex >= (NUPDE + NUPDPE)) { 4197 pml1_entry_t *l1e; 4198 vm_pindex_t pml1index; 4199 4200 /* Wire up a new PDPE page */ 4201 pml1index = ptepindex - (NUPDE + NUPDPE); 4202 l1e = &pmap->pm_pml1[pml1index]; 4203 pde_store(l1e, VM_PAGE_TO_PHYS(m)); 4204 4205 } else if (ptepindex >= NUPDE) { 4206 vm_pindex_t pml1index; 4207 vm_pindex_t pdpindex; 4208 pml1_entry_t *l1e; 4209 pml2_entry_t *l2e; 4210 4211 /* Wire up a new l2e page */ 4212 pdpindex = ptepindex - NUPDE; 4213 pml1index = pdpindex >> RPTE_SHIFT; 4214 4215 l1e = &pmap->pm_pml1[pml1index]; 4216 if ((*l1e & PG_V) == 0) { 4217 /* Have to allocate a new pdp, recurse */ 4218 if (_pmap_allocpte(pmap, NUPDE + NUPDPE + pml1index, 4219 lockp) == NULL) { 4220 vm_page_unwire_noq(m); 4221 vm_page_free_zero(m); 4222 return (NULL); 4223 } 4224 } else { 4225 /* Add reference to l2e page */ 4226 pdppg = PHYS_TO_VM_PAGE(*l1e & PG_FRAME); 4227 pdppg->ref_count++; 4228 } 4229 l2e = (pml2_entry_t *)PHYS_TO_DMAP(*l1e & PG_FRAME); 4230 4231 /* Now find the pdp page */ 4232 l2e = &l2e[pdpindex & RPTE_MASK]; 4233 pde_store(l2e, VM_PAGE_TO_PHYS(m)); 4234 4235 } else { 4236 vm_pindex_t pml1index; 4237 vm_pindex_t pdpindex; 4238 pml1_entry_t *l1e; 4239 pml2_entry_t *l2e; 4240 pml3_entry_t *l3e; 4241 4242 /* Wire up a new PTE page */ 4243 pdpindex = ptepindex >> RPTE_SHIFT; 4244 pml1index = pdpindex >> RPTE_SHIFT; 4245 4246 /* First, find the pdp and check that its valid. */ 4247 l1e = &pmap->pm_pml1[pml1index]; 4248 if ((*l1e & PG_V) == 0) { 4249 /* Have to allocate a new pd, recurse */ 4250 if (_pmap_allocpte(pmap, NUPDE + pdpindex, 4251 lockp) == NULL) { 4252 vm_page_unwire_noq(m); 4253 vm_page_free_zero(m); 4254 return (NULL); 4255 } 4256 l2e = (pml2_entry_t *)PHYS_TO_DMAP(*l1e & PG_FRAME); 4257 l2e = &l2e[pdpindex & RPTE_MASK]; 4258 } else { 4259 l2e = (pml2_entry_t *)PHYS_TO_DMAP(*l1e & PG_FRAME); 4260 l2e = &l2e[pdpindex & RPTE_MASK]; 4261 if ((*l2e & PG_V) == 0) { 4262 /* Have to allocate a new pd, recurse */ 4263 if (_pmap_allocpte(pmap, NUPDE + pdpindex, 4264 lockp) == NULL) { 4265 vm_page_unwire_noq(m); 4266 vm_page_free_zero(m); 4267 return (NULL); 4268 } 4269 } else { 4270 /* Add reference to the pd page */ 4271 pdpg = PHYS_TO_VM_PAGE(*l2e & PG_FRAME); 4272 pdpg->ref_count++; 4273 } 4274 } 4275 l3e = (pml3_entry_t *)PHYS_TO_DMAP(*l2e & PG_FRAME); 4276 4277 /* Now we know where the page directory page is */ 4278 l3e = &l3e[ptepindex & RPTE_MASK]; 4279 pde_store(l3e, VM_PAGE_TO_PHYS(m)); 4280 } 4281 4282 pmap_resident_count_inc(pmap, 1); 4283 return (m); 4284 } 4285 static vm_page_t 4286 pmap_allocl3e(pmap_t pmap, vm_offset_t va, struct rwlock **lockp) 4287 { 4288 vm_pindex_t pdpindex, ptepindex; 4289 pml2_entry_t *pdpe; 4290 vm_page_t pdpg; 4291 4292 retry: 4293 pdpe = pmap_pml2e(pmap, va); 4294 if (pdpe != NULL && (*pdpe & PG_V) != 0) { 4295 /* Add a reference to the pd page. */ 4296 pdpg = PHYS_TO_VM_PAGE(*pdpe & PG_FRAME); 4297 pdpg->ref_count++; 4298 } else { 4299 /* Allocate a pd page. */ 4300 ptepindex = pmap_l3e_pindex(va); 4301 pdpindex = ptepindex >> RPTE_SHIFT; 4302 pdpg = _pmap_allocpte(pmap, NUPDE + pdpindex, lockp); 4303 if (pdpg == NULL && lockp != NULL) 4304 goto retry; 4305 } 4306 return (pdpg); 4307 } 4308 4309 static vm_page_t 4310 pmap_allocpte(pmap_t pmap, vm_offset_t va, struct rwlock **lockp) 4311 { 4312 vm_pindex_t ptepindex; 4313 pml3_entry_t *pd; 4314 vm_page_t m; 4315 4316 /* 4317 * Calculate pagetable page index 4318 */ 4319 ptepindex = pmap_l3e_pindex(va); 4320 retry: 4321 /* 4322 * Get the page directory entry 4323 */ 4324 pd = pmap_pml3e(pmap, va); 4325 4326 /* 4327 * This supports switching from a 2MB page to a 4328 * normal 4K page. 4329 */ 4330 if (pd != NULL && (*pd & (RPTE_LEAF | PG_V)) == (RPTE_LEAF | PG_V)) { 4331 if (!pmap_demote_l3e_locked(pmap, pd, va, lockp)) { 4332 /* 4333 * Invalidation of the 2MB page mapping may have caused 4334 * the deallocation of the underlying PD page. 4335 */ 4336 pd = NULL; 4337 } 4338 } 4339 4340 /* 4341 * If the page table page is mapped, we just increment the 4342 * hold count, and activate it. 4343 */ 4344 if (pd != NULL && (*pd & PG_V) != 0) { 4345 m = PHYS_TO_VM_PAGE(*pd & PG_FRAME); 4346 m->ref_count++; 4347 } else { 4348 /* 4349 * Here if the pte page isn't mapped, or if it has been 4350 * deallocated. 4351 */ 4352 m = _pmap_allocpte(pmap, ptepindex, lockp); 4353 if (m == NULL && lockp != NULL) 4354 goto retry; 4355 } 4356 return (m); 4357 } 4358 4359 static void 4360 mmu_radix_pinit0(pmap_t pmap) 4361 { 4362 4363 CTR2(KTR_PMAP, "%s(%p)", __func__, pmap); 4364 PMAP_LOCK_INIT(pmap); 4365 pmap->pm_pml1 = kernel_pmap->pm_pml1; 4366 pmap->pm_pid = kernel_pmap->pm_pid; 4367 4368 pmap->pm_radix.rt_root = 0; 4369 TAILQ_INIT(&pmap->pm_pvchunk); 4370 bzero(&pmap->pm_stats, sizeof pmap->pm_stats); 4371 kernel_pmap->pm_flags = 4372 pmap->pm_flags = PMAP_PDE_SUPERPAGE; 4373 } 4374 /* 4375 * pmap_protect_l3e: do the things to protect a 2mpage in a process 4376 */ 4377 static boolean_t 4378 pmap_protect_l3e(pmap_t pmap, pt_entry_t *l3e, vm_offset_t sva, vm_prot_t prot) 4379 { 4380 pt_entry_t newpde, oldpde; 4381 vm_offset_t eva, va; 4382 vm_page_t m; 4383 boolean_t anychanged; 4384 4385 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 4386 KASSERT((sva & L3_PAGE_MASK) == 0, 4387 ("pmap_protect_l3e: sva is not 2mpage aligned")); 4388 anychanged = FALSE; 4389 retry: 4390 oldpde = newpde = *l3e; 4391 if ((oldpde & (PG_MANAGED | PG_M | PG_RW)) == 4392 (PG_MANAGED | PG_M | PG_RW)) { 4393 eva = sva + L3_PAGE_SIZE; 4394 for (va = sva, m = PHYS_TO_VM_PAGE(oldpde & PG_PS_FRAME); 4395 va < eva; va += PAGE_SIZE, m++) 4396 vm_page_dirty(m); 4397 } 4398 if ((prot & VM_PROT_WRITE) == 0) { 4399 newpde &= ~(PG_RW | PG_M); 4400 newpde |= RPTE_EAA_R; 4401 } 4402 if (prot & VM_PROT_EXECUTE) 4403 newpde |= PG_X; 4404 if (newpde != oldpde) { 4405 /* 4406 * As an optimization to future operations on this PDE, clear 4407 * PG_PROMOTED. The impending invalidation will remove any 4408 * lingering 4KB page mappings from the TLB. 4409 */ 4410 if (!atomic_cmpset_long(l3e, oldpde, newpde & ~PG_PROMOTED)) 4411 goto retry; 4412 anychanged = TRUE; 4413 } 4414 return (anychanged); 4415 } 4416 4417 void 4418 mmu_radix_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, 4419 vm_prot_t prot) 4420 { 4421 vm_offset_t va_next; 4422 pml1_entry_t *l1e; 4423 pml2_entry_t *l2e; 4424 pml3_entry_t ptpaddr, *l3e; 4425 pt_entry_t *pte; 4426 boolean_t anychanged; 4427 4428 CTR5(KTR_PMAP, "%s(%p, %#x, %#x, %#x)", __func__, pmap, sva, eva, 4429 prot); 4430 4431 KASSERT((prot & ~VM_PROT_ALL) == 0, ("invalid prot %x", prot)); 4432 if (prot == VM_PROT_NONE) { 4433 mmu_radix_remove(pmap, sva, eva); 4434 return; 4435 } 4436 4437 if ((prot & (VM_PROT_WRITE|VM_PROT_EXECUTE)) == 4438 (VM_PROT_WRITE|VM_PROT_EXECUTE)) 4439 return; 4440 4441 #ifdef INVARIANTS 4442 if (VERBOSE_PROTECT || pmap_logging) 4443 printf("pmap_protect(%p, %#lx, %#lx, %x) - asid: %lu\n", 4444 pmap, sva, eva, prot, pmap->pm_pid); 4445 #endif 4446 anychanged = FALSE; 4447 4448 PMAP_LOCK(pmap); 4449 for (; sva < eva; sva = va_next) { 4450 l1e = pmap_pml1e(pmap, sva); 4451 if ((*l1e & PG_V) == 0) { 4452 va_next = (sva + L1_PAGE_SIZE) & ~L1_PAGE_MASK; 4453 if (va_next < sva) 4454 va_next = eva; 4455 continue; 4456 } 4457 4458 l2e = pmap_l1e_to_l2e(l1e, sva); 4459 if ((*l2e & PG_V) == 0) { 4460 va_next = (sva + L2_PAGE_SIZE) & ~L2_PAGE_MASK; 4461 if (va_next < sva) 4462 va_next = eva; 4463 continue; 4464 } 4465 4466 va_next = (sva + L3_PAGE_SIZE) & ~L3_PAGE_MASK; 4467 if (va_next < sva) 4468 va_next = eva; 4469 4470 l3e = pmap_l2e_to_l3e(l2e, sva); 4471 ptpaddr = *l3e; 4472 4473 /* 4474 * Weed out invalid mappings. 4475 */ 4476 if (ptpaddr == 0) 4477 continue; 4478 4479 /* 4480 * Check for large page. 4481 */ 4482 if ((ptpaddr & RPTE_LEAF) != 0) { 4483 /* 4484 * Are we protecting the entire large page? If not, 4485 * demote the mapping and fall through. 4486 */ 4487 if (sva + L3_PAGE_SIZE == va_next && eva >= va_next) { 4488 if (pmap_protect_l3e(pmap, l3e, sva, prot)) 4489 anychanged = TRUE; 4490 continue; 4491 } else if (!pmap_demote_l3e(pmap, l3e, sva)) { 4492 /* 4493 * The large page mapping was destroyed. 4494 */ 4495 continue; 4496 } 4497 } 4498 4499 if (va_next > eva) 4500 va_next = eva; 4501 4502 for (pte = pmap_l3e_to_pte(l3e, sva); sva != va_next; pte++, 4503 sva += PAGE_SIZE) { 4504 pt_entry_t obits, pbits; 4505 vm_page_t m; 4506 4507 retry: 4508 MPASS(pte == pmap_pte(pmap, sva)); 4509 obits = pbits = *pte; 4510 if ((pbits & PG_V) == 0) 4511 continue; 4512 4513 if ((prot & VM_PROT_WRITE) == 0) { 4514 if ((pbits & (PG_MANAGED | PG_M | PG_RW)) == 4515 (PG_MANAGED | PG_M | PG_RW)) { 4516 m = PHYS_TO_VM_PAGE(pbits & PG_FRAME); 4517 vm_page_dirty(m); 4518 } 4519 pbits &= ~(PG_RW | PG_M); 4520 pbits |= RPTE_EAA_R; 4521 } 4522 if (prot & VM_PROT_EXECUTE) 4523 pbits |= PG_X; 4524 4525 if (pbits != obits) { 4526 if (!atomic_cmpset_long(pte, obits, pbits)) 4527 goto retry; 4528 if (obits & (PG_A|PG_M)) { 4529 anychanged = TRUE; 4530 #ifdef INVARIANTS 4531 if (VERBOSE_PROTECT || pmap_logging) 4532 printf("%#lx %#lx -> %#lx\n", 4533 sva, obits, pbits); 4534 #endif 4535 } 4536 } 4537 } 4538 } 4539 if (anychanged) 4540 pmap_invalidate_all(pmap); 4541 PMAP_UNLOCK(pmap); 4542 } 4543 4544 void 4545 mmu_radix_qenter(vm_offset_t sva, vm_page_t *ma, int count) 4546 { 4547 4548 CTR4(KTR_PMAP, "%s(%#x, %p, %d)", __func__, sva, ma, count); 4549 pt_entry_t oldpte, pa, *pte; 4550 vm_page_t m; 4551 uint64_t cache_bits, attr_bits; 4552 vm_offset_t va; 4553 4554 oldpte = 0; 4555 attr_bits = RPTE_EAA_R | RPTE_EAA_W | RPTE_EAA_P | PG_M | PG_A; 4556 va = sva; 4557 pte = kvtopte(va); 4558 while (va < sva + PAGE_SIZE * count) { 4559 if (__predict_false((va & L3_PAGE_MASK) == 0)) 4560 pte = kvtopte(va); 4561 MPASS(pte == pmap_pte(kernel_pmap, va)); 4562 4563 /* 4564 * XXX there has to be a more efficient way than traversing 4565 * the page table every time - but go for correctness for 4566 * today 4567 */ 4568 4569 m = *ma++; 4570 cache_bits = pmap_cache_bits(m->md.mdpg_cache_attrs); 4571 pa = VM_PAGE_TO_PHYS(m) | cache_bits | attr_bits; 4572 if (*pte != pa) { 4573 oldpte |= *pte; 4574 pte_store(pte, pa); 4575 } 4576 va += PAGE_SIZE; 4577 pte++; 4578 } 4579 if (__predict_false((oldpte & RPTE_VALID) != 0)) 4580 pmap_invalidate_range(kernel_pmap, sva, sva + count * 4581 PAGE_SIZE); 4582 else 4583 ptesync(); 4584 } 4585 4586 void 4587 mmu_radix_qremove(vm_offset_t sva, int count) 4588 { 4589 vm_offset_t va; 4590 pt_entry_t *pte; 4591 4592 CTR3(KTR_PMAP, "%s(%#x, %d)", __func__, sva, count); 4593 KASSERT(sva >= VM_MIN_KERNEL_ADDRESS, ("usermode or dmap va %lx", sva)); 4594 4595 va = sva; 4596 pte = kvtopte(va); 4597 while (va < sva + PAGE_SIZE * count) { 4598 if (__predict_false((va & L3_PAGE_MASK) == 0)) 4599 pte = kvtopte(va); 4600 pte_clear(pte); 4601 pte++; 4602 va += PAGE_SIZE; 4603 } 4604 pmap_invalidate_range(kernel_pmap, sva, va); 4605 } 4606 4607 /*************************************************** 4608 * Page table page management routines..... 4609 ***************************************************/ 4610 /* 4611 * Schedule the specified unused page table page to be freed. Specifically, 4612 * add the page to the specified list of pages that will be released to the 4613 * physical memory manager after the TLB has been updated. 4614 */ 4615 static __inline void 4616 pmap_add_delayed_free_list(vm_page_t m, struct spglist *free, 4617 boolean_t set_PG_ZERO) 4618 { 4619 4620 if (set_PG_ZERO) 4621 m->flags |= PG_ZERO; 4622 else 4623 m->flags &= ~PG_ZERO; 4624 SLIST_INSERT_HEAD(free, m, plinks.s.ss); 4625 } 4626 4627 /* 4628 * Inserts the specified page table page into the specified pmap's collection 4629 * of idle page table pages. Each of a pmap's page table pages is responsible 4630 * for mapping a distinct range of virtual addresses. The pmap's collection is 4631 * ordered by this virtual address range. 4632 */ 4633 static __inline int 4634 pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte) 4635 { 4636 4637 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 4638 return (vm_radix_insert(&pmap->pm_radix, mpte)); 4639 } 4640 4641 /* 4642 * Removes the page table page mapping the specified virtual address from the 4643 * specified pmap's collection of idle page table pages, and returns it. 4644 * Otherwise, returns NULL if there is no page table page corresponding to the 4645 * specified virtual address. 4646 */ 4647 static __inline vm_page_t 4648 pmap_remove_pt_page(pmap_t pmap, vm_offset_t va) 4649 { 4650 4651 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 4652 return (vm_radix_remove(&pmap->pm_radix, pmap_l3e_pindex(va))); 4653 } 4654 4655 /* 4656 * Decrements a page table page's wire count, which is used to record the 4657 * number of valid page table entries within the page. If the wire count 4658 * drops to zero, then the page table page is unmapped. Returns TRUE if the 4659 * page table page was unmapped and FALSE otherwise. 4660 */ 4661 static inline boolean_t 4662 pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free) 4663 { 4664 4665 --m->ref_count; 4666 if (m->ref_count == 0) { 4667 _pmap_unwire_ptp(pmap, va, m, free); 4668 return (TRUE); 4669 } else 4670 return (FALSE); 4671 } 4672 4673 static void 4674 _pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free) 4675 { 4676 4677 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 4678 /* 4679 * unmap the page table page 4680 */ 4681 if (m->pindex >= (NUPDE + NUPDPE)) { 4682 /* PDP page */ 4683 pml1_entry_t *pml1; 4684 pml1 = pmap_pml1e(pmap, va); 4685 *pml1 = 0; 4686 } else if (m->pindex >= NUPDE) { 4687 /* PD page */ 4688 pml2_entry_t *l2e; 4689 l2e = pmap_pml2e(pmap, va); 4690 *l2e = 0; 4691 } else { 4692 /* PTE page */ 4693 pml3_entry_t *l3e; 4694 l3e = pmap_pml3e(pmap, va); 4695 *l3e = 0; 4696 } 4697 pmap_resident_count_dec(pmap, 1); 4698 if (m->pindex < NUPDE) { 4699 /* We just released a PT, unhold the matching PD */ 4700 vm_page_t pdpg; 4701 4702 pdpg = PHYS_TO_VM_PAGE(*pmap_pml2e(pmap, va) & PG_FRAME); 4703 pmap_unwire_ptp(pmap, va, pdpg, free); 4704 } 4705 if (m->pindex >= NUPDE && m->pindex < (NUPDE + NUPDPE)) { 4706 /* We just released a PD, unhold the matching PDP */ 4707 vm_page_t pdppg; 4708 4709 pdppg = PHYS_TO_VM_PAGE(*pmap_pml1e(pmap, va) & PG_FRAME); 4710 pmap_unwire_ptp(pmap, va, pdppg, free); 4711 } 4712 4713 /* 4714 * Put page on a list so that it is released after 4715 * *ALL* TLB shootdown is done 4716 */ 4717 pmap_add_delayed_free_list(m, free, TRUE); 4718 } 4719 4720 /* 4721 * After removing a page table entry, this routine is used to 4722 * conditionally free the page, and manage the hold/wire counts. 4723 */ 4724 static int 4725 pmap_unuse_pt(pmap_t pmap, vm_offset_t va, pml3_entry_t ptepde, 4726 struct spglist *free) 4727 { 4728 vm_page_t mpte; 4729 4730 if (va >= VM_MAXUSER_ADDRESS) 4731 return (0); 4732 KASSERT(ptepde != 0, ("pmap_unuse_pt: ptepde != 0")); 4733 mpte = PHYS_TO_VM_PAGE(ptepde & PG_FRAME); 4734 return (pmap_unwire_ptp(pmap, va, mpte, free)); 4735 } 4736 4737 void 4738 mmu_radix_release(pmap_t pmap) 4739 { 4740 4741 CTR2(KTR_PMAP, "%s(%p)", __func__, pmap); 4742 KASSERT(pmap->pm_stats.resident_count == 0, 4743 ("pmap_release: pmap resident count %ld != 0", 4744 pmap->pm_stats.resident_count)); 4745 KASSERT(vm_radix_is_empty(&pmap->pm_radix), 4746 ("pmap_release: pmap has reserved page table page(s)")); 4747 4748 pmap_invalidate_all(pmap); 4749 isa3_proctab[pmap->pm_pid].proctab0 = 0; 4750 uma_zfree(zone_radix_pgd, pmap->pm_pml1); 4751 vmem_free(asid_arena, pmap->pm_pid, 1); 4752 } 4753 4754 /* 4755 * Create the PV entry for a 2MB page mapping. Always returns true unless the 4756 * flag PMAP_ENTER_NORECLAIM is specified. If that flag is specified, returns 4757 * false if the PV entry cannot be allocated without resorting to reclamation. 4758 */ 4759 static bool 4760 pmap_pv_insert_l3e(pmap_t pmap, vm_offset_t va, pml3_entry_t pde, u_int flags, 4761 struct rwlock **lockp) 4762 { 4763 struct md_page *pvh; 4764 pv_entry_t pv; 4765 vm_paddr_t pa; 4766 4767 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 4768 /* Pass NULL instead of the lock pointer to disable reclamation. */ 4769 if ((pv = get_pv_entry(pmap, (flags & PMAP_ENTER_NORECLAIM) != 0 ? 4770 NULL : lockp)) == NULL) 4771 return (false); 4772 pv->pv_va = va; 4773 pa = pde & PG_PS_FRAME; 4774 CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa); 4775 pvh = pa_to_pvh(pa); 4776 TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_link); 4777 pvh->pv_gen++; 4778 return (true); 4779 } 4780 4781 /* 4782 * Fills a page table page with mappings to consecutive physical pages. 4783 */ 4784 static void 4785 pmap_fill_ptp(pt_entry_t *firstpte, pt_entry_t newpte) 4786 { 4787 pt_entry_t *pte; 4788 4789 for (pte = firstpte; pte < firstpte + NPTEPG; pte++) { 4790 *pte = newpte; 4791 newpte += PAGE_SIZE; 4792 } 4793 } 4794 4795 static boolean_t 4796 pmap_demote_l3e(pmap_t pmap, pml3_entry_t *pde, vm_offset_t va) 4797 { 4798 struct rwlock *lock; 4799 boolean_t rv; 4800 4801 lock = NULL; 4802 rv = pmap_demote_l3e_locked(pmap, pde, va, &lock); 4803 if (lock != NULL) 4804 rw_wunlock(lock); 4805 return (rv); 4806 } 4807 4808 static boolean_t 4809 pmap_demote_l3e_locked(pmap_t pmap, pml3_entry_t *l3e, vm_offset_t va, 4810 struct rwlock **lockp) 4811 { 4812 pml3_entry_t oldpde; 4813 pt_entry_t *firstpte; 4814 vm_paddr_t mptepa; 4815 vm_page_t mpte; 4816 struct spglist free; 4817 vm_offset_t sva; 4818 4819 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 4820 oldpde = *l3e; 4821 KASSERT((oldpde & (RPTE_LEAF | PG_V)) == (RPTE_LEAF | PG_V), 4822 ("pmap_demote_l3e: oldpde is missing RPTE_LEAF and/or PG_V %lx", 4823 oldpde)); 4824 if ((oldpde & PG_A) == 0 || (mpte = pmap_remove_pt_page(pmap, va)) == 4825 NULL) { 4826 KASSERT((oldpde & PG_W) == 0, 4827 ("pmap_demote_l3e: page table page for a wired mapping" 4828 " is missing")); 4829 4830 /* 4831 * Invalidate the 2MB page mapping and return "failure" if the 4832 * mapping was never accessed or the allocation of the new 4833 * page table page fails. If the 2MB page mapping belongs to 4834 * the direct map region of the kernel's address space, then 4835 * the page allocation request specifies the highest possible 4836 * priority (VM_ALLOC_INTERRUPT). Otherwise, the priority is 4837 * normal. Page table pages are preallocated for every other 4838 * part of the kernel address space, so the direct map region 4839 * is the only part of the kernel address space that must be 4840 * handled here. 4841 */ 4842 if ((oldpde & PG_A) == 0 || (mpte = vm_page_alloc(NULL, 4843 pmap_l3e_pindex(va), (va >= DMAP_MIN_ADDRESS && va < 4844 DMAP_MAX_ADDRESS ? VM_ALLOC_INTERRUPT : VM_ALLOC_NORMAL) | 4845 VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) { 4846 SLIST_INIT(&free); 4847 sva = trunc_2mpage(va); 4848 pmap_remove_l3e(pmap, l3e, sva, &free, lockp); 4849 pmap_invalidate_l3e_page(pmap, sva, oldpde); 4850 vm_page_free_pages_toq(&free, true); 4851 CTR2(KTR_PMAP, "pmap_demote_l3e: failure for va %#lx" 4852 " in pmap %p", va, pmap); 4853 return (FALSE); 4854 } 4855 if (va < VM_MAXUSER_ADDRESS) 4856 pmap_resident_count_inc(pmap, 1); 4857 } 4858 mptepa = VM_PAGE_TO_PHYS(mpte); 4859 firstpte = (pt_entry_t *)PHYS_TO_DMAP(mptepa); 4860 KASSERT((oldpde & PG_A) != 0, 4861 ("pmap_demote_l3e: oldpde is missing PG_A")); 4862 KASSERT((oldpde & (PG_M | PG_RW)) != PG_RW, 4863 ("pmap_demote_l3e: oldpde is missing PG_M")); 4864 4865 /* 4866 * If the page table page is new, initialize it. 4867 */ 4868 if (mpte->ref_count == 1) { 4869 mpte->ref_count = NPTEPG; 4870 pmap_fill_ptp(firstpte, oldpde); 4871 } 4872 4873 KASSERT((*firstpte & PG_FRAME) == (oldpde & PG_FRAME), 4874 ("pmap_demote_l3e: firstpte and newpte map different physical" 4875 " addresses")); 4876 4877 /* 4878 * If the mapping has changed attributes, update the page table 4879 * entries. 4880 */ 4881 if ((*firstpte & PG_PTE_PROMOTE) != (oldpde & PG_PTE_PROMOTE)) 4882 pmap_fill_ptp(firstpte, oldpde); 4883 4884 /* 4885 * The spare PV entries must be reserved prior to demoting the 4886 * mapping, that is, prior to changing the PDE. Otherwise, the state 4887 * of the PDE and the PV lists will be inconsistent, which can result 4888 * in reclaim_pv_chunk() attempting to remove a PV entry from the 4889 * wrong PV list and pmap_pv_demote_l3e() failing to find the expected 4890 * PV entry for the 2MB page mapping that is being demoted. 4891 */ 4892 if ((oldpde & PG_MANAGED) != 0) 4893 reserve_pv_entries(pmap, NPTEPG - 1, lockp); 4894 4895 /* 4896 * Demote the mapping. This pmap is locked. The old PDE has 4897 * PG_A set. If the old PDE has PG_RW set, it also has PG_M 4898 * set. Thus, there is no danger of a race with another 4899 * processor changing the setting of PG_A and/or PG_M between 4900 * the read above and the store below. 4901 */ 4902 pde_store(l3e, mptepa); 4903 ptesync(); 4904 /* 4905 * Demote the PV entry. 4906 */ 4907 if ((oldpde & PG_MANAGED) != 0) 4908 pmap_pv_demote_l3e(pmap, va, oldpde & PG_PS_FRAME, lockp); 4909 4910 atomic_add_long(&pmap_l3e_demotions, 1); 4911 CTR2(KTR_PMAP, "pmap_demote_l3e: success for va %#lx" 4912 " in pmap %p", va, pmap); 4913 return (TRUE); 4914 } 4915 4916 /* 4917 * pmap_remove_kernel_pde: Remove a kernel superpage mapping. 4918 */ 4919 static void 4920 pmap_remove_kernel_l3e(pmap_t pmap, pml3_entry_t *l3e, vm_offset_t va) 4921 { 4922 vm_paddr_t mptepa; 4923 vm_page_t mpte; 4924 4925 KASSERT(pmap == kernel_pmap, ("pmap %p is not kernel_pmap", pmap)); 4926 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 4927 mpte = pmap_remove_pt_page(pmap, va); 4928 if (mpte == NULL) 4929 panic("pmap_remove_kernel_pde: Missing pt page."); 4930 4931 mptepa = VM_PAGE_TO_PHYS(mpte); 4932 4933 /* 4934 * Initialize the page table page. 4935 */ 4936 pagezero(PHYS_TO_DMAP(mptepa)); 4937 4938 /* 4939 * Demote the mapping. 4940 */ 4941 pde_store(l3e, mptepa); 4942 ptesync(); 4943 } 4944 4945 /* 4946 * pmap_remove_l3e: do the things to unmap a superpage in a process 4947 */ 4948 static int 4949 pmap_remove_l3e(pmap_t pmap, pml3_entry_t *pdq, vm_offset_t sva, 4950 struct spglist *free, struct rwlock **lockp) 4951 { 4952 struct md_page *pvh; 4953 pml3_entry_t oldpde; 4954 vm_offset_t eva, va; 4955 vm_page_t m, mpte; 4956 4957 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 4958 KASSERT((sva & L3_PAGE_MASK) == 0, 4959 ("pmap_remove_l3e: sva is not 2mpage aligned")); 4960 oldpde = pte_load_clear(pdq); 4961 if (oldpde & PG_W) 4962 pmap->pm_stats.wired_count -= (L3_PAGE_SIZE / PAGE_SIZE); 4963 pmap_resident_count_dec(pmap, L3_PAGE_SIZE / PAGE_SIZE); 4964 if (oldpde & PG_MANAGED) { 4965 CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, oldpde & PG_PS_FRAME); 4966 pvh = pa_to_pvh(oldpde & PG_PS_FRAME); 4967 pmap_pvh_free(pvh, pmap, sva); 4968 eva = sva + L3_PAGE_SIZE; 4969 for (va = sva, m = PHYS_TO_VM_PAGE(oldpde & PG_PS_FRAME); 4970 va < eva; va += PAGE_SIZE, m++) { 4971 if ((oldpde & (PG_M | PG_RW)) == (PG_M | PG_RW)) 4972 vm_page_dirty(m); 4973 if (oldpde & PG_A) 4974 vm_page_aflag_set(m, PGA_REFERENCED); 4975 if (TAILQ_EMPTY(&m->md.pv_list) && 4976 TAILQ_EMPTY(&pvh->pv_list)) 4977 vm_page_aflag_clear(m, PGA_WRITEABLE); 4978 } 4979 } 4980 if (pmap == kernel_pmap) { 4981 pmap_remove_kernel_l3e(pmap, pdq, sva); 4982 } else { 4983 mpte = pmap_remove_pt_page(pmap, sva); 4984 if (mpte != NULL) { 4985 pmap_resident_count_dec(pmap, 1); 4986 KASSERT(mpte->ref_count == NPTEPG, 4987 ("pmap_remove_l3e: pte page wire count error")); 4988 mpte->ref_count = 0; 4989 pmap_add_delayed_free_list(mpte, free, FALSE); 4990 } 4991 } 4992 return (pmap_unuse_pt(pmap, sva, *pmap_pml2e(pmap, sva), free)); 4993 } 4994 4995 /* 4996 * pmap_remove_pte: do the things to unmap a page in a process 4997 */ 4998 static int 4999 pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t va, 5000 pml3_entry_t ptepde, struct spglist *free, struct rwlock **lockp) 5001 { 5002 struct md_page *pvh; 5003 pt_entry_t oldpte; 5004 vm_page_t m; 5005 5006 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 5007 oldpte = pte_load_clear(ptq); 5008 if (oldpte & RPTE_WIRED) 5009 pmap->pm_stats.wired_count -= 1; 5010 pmap_resident_count_dec(pmap, 1); 5011 if (oldpte & RPTE_MANAGED) { 5012 m = PHYS_TO_VM_PAGE(oldpte & PG_FRAME); 5013 if ((oldpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) 5014 vm_page_dirty(m); 5015 if (oldpte & PG_A) 5016 vm_page_aflag_set(m, PGA_REFERENCED); 5017 CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m); 5018 pmap_pvh_free(&m->md, pmap, va); 5019 if (TAILQ_EMPTY(&m->md.pv_list) && 5020 (m->flags & PG_FICTITIOUS) == 0) { 5021 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); 5022 if (TAILQ_EMPTY(&pvh->pv_list)) 5023 vm_page_aflag_clear(m, PGA_WRITEABLE); 5024 } 5025 } 5026 return (pmap_unuse_pt(pmap, va, ptepde, free)); 5027 } 5028 5029 /* 5030 * Remove a single page from a process address space 5031 */ 5032 static bool 5033 pmap_remove_page(pmap_t pmap, vm_offset_t va, pml3_entry_t *l3e, 5034 struct spglist *free) 5035 { 5036 struct rwlock *lock; 5037 pt_entry_t *pte; 5038 bool invalidate_all; 5039 5040 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 5041 if ((*l3e & RPTE_VALID) == 0) { 5042 return (false); 5043 } 5044 pte = pmap_l3e_to_pte(l3e, va); 5045 if ((*pte & RPTE_VALID) == 0) { 5046 return (false); 5047 } 5048 lock = NULL; 5049 5050 invalidate_all = pmap_remove_pte(pmap, pte, va, *l3e, free, &lock); 5051 if (lock != NULL) 5052 rw_wunlock(lock); 5053 if (!invalidate_all) 5054 pmap_invalidate_page(pmap, va); 5055 return (invalidate_all); 5056 } 5057 5058 /* 5059 * Removes the specified range of addresses from the page table page. 5060 */ 5061 static bool 5062 pmap_remove_ptes(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, 5063 pml3_entry_t *l3e, struct spglist *free, struct rwlock **lockp) 5064 { 5065 pt_entry_t *pte; 5066 vm_offset_t va; 5067 bool anyvalid; 5068 5069 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 5070 anyvalid = false; 5071 va = eva; 5072 for (pte = pmap_l3e_to_pte(l3e, sva); sva != eva; pte++, 5073 sva += PAGE_SIZE) { 5074 MPASS(pte == pmap_pte(pmap, sva)); 5075 if (*pte == 0) { 5076 if (va != eva) { 5077 anyvalid = true; 5078 va = eva; 5079 } 5080 continue; 5081 } 5082 if (va == eva) 5083 va = sva; 5084 if (pmap_remove_pte(pmap, pte, sva, *l3e, free, lockp)) { 5085 anyvalid = true; 5086 sva += PAGE_SIZE; 5087 break; 5088 } 5089 } 5090 if (anyvalid) 5091 pmap_invalidate_all(pmap); 5092 else if (va != eva) 5093 pmap_invalidate_range(pmap, va, sva); 5094 return (anyvalid); 5095 } 5096 5097 void 5098 mmu_radix_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) 5099 { 5100 struct rwlock *lock; 5101 vm_offset_t va_next; 5102 pml1_entry_t *l1e; 5103 pml2_entry_t *l2e; 5104 pml3_entry_t ptpaddr, *l3e; 5105 struct spglist free; 5106 bool anyvalid; 5107 5108 CTR4(KTR_PMAP, "%s(%p, %#x, %#x)", __func__, pmap, sva, eva); 5109 5110 /* 5111 * Perform an unsynchronized read. This is, however, safe. 5112 */ 5113 if (pmap->pm_stats.resident_count == 0) 5114 return; 5115 5116 anyvalid = false; 5117 SLIST_INIT(&free); 5118 5119 /* XXX something fishy here */ 5120 sva = (sva + PAGE_MASK) & ~PAGE_MASK; 5121 eva = (eva + PAGE_MASK) & ~PAGE_MASK; 5122 5123 PMAP_LOCK(pmap); 5124 5125 /* 5126 * special handling of removing one page. a very 5127 * common operation and easy to short circuit some 5128 * code. 5129 */ 5130 if (sva + PAGE_SIZE == eva) { 5131 l3e = pmap_pml3e(pmap, sva); 5132 if (l3e && (*l3e & RPTE_LEAF) == 0) { 5133 anyvalid = pmap_remove_page(pmap, sva, l3e, &free); 5134 goto out; 5135 } 5136 } 5137 5138 lock = NULL; 5139 for (; sva < eva; sva = va_next) { 5140 if (pmap->pm_stats.resident_count == 0) 5141 break; 5142 l1e = pmap_pml1e(pmap, sva); 5143 if (l1e == NULL || (*l1e & PG_V) == 0) { 5144 va_next = (sva + L1_PAGE_SIZE) & ~L1_PAGE_MASK; 5145 if (va_next < sva) 5146 va_next = eva; 5147 continue; 5148 } 5149 5150 l2e = pmap_l1e_to_l2e(l1e, sva); 5151 if (l2e == NULL || (*l2e & PG_V) == 0) { 5152 va_next = (sva + L2_PAGE_SIZE) & ~L2_PAGE_MASK; 5153 if (va_next < sva) 5154 va_next = eva; 5155 continue; 5156 } 5157 5158 /* 5159 * Calculate index for next page table. 5160 */ 5161 va_next = (sva + L3_PAGE_SIZE) & ~L3_PAGE_MASK; 5162 if (va_next < sva) 5163 va_next = eva; 5164 5165 l3e = pmap_l2e_to_l3e(l2e, sva); 5166 ptpaddr = *l3e; 5167 5168 /* 5169 * Weed out invalid mappings. 5170 */ 5171 if (ptpaddr == 0) 5172 continue; 5173 5174 /* 5175 * Check for large page. 5176 */ 5177 if ((ptpaddr & RPTE_LEAF) != 0) { 5178 /* 5179 * Are we removing the entire large page? If not, 5180 * demote the mapping and fall through. 5181 */ 5182 if (sva + L3_PAGE_SIZE == va_next && eva >= va_next) { 5183 pmap_remove_l3e(pmap, l3e, sva, &free, &lock); 5184 continue; 5185 } else if (!pmap_demote_l3e_locked(pmap, l3e, sva, 5186 &lock)) { 5187 /* The large page mapping was destroyed. */ 5188 continue; 5189 } else 5190 ptpaddr = *l3e; 5191 } 5192 5193 /* 5194 * Limit our scan to either the end of the va represented 5195 * by the current page table page, or to the end of the 5196 * range being removed. 5197 */ 5198 if (va_next > eva) 5199 va_next = eva; 5200 5201 if (pmap_remove_ptes(pmap, sva, va_next, l3e, &free, &lock)) 5202 anyvalid = true; 5203 } 5204 if (lock != NULL) 5205 rw_wunlock(lock); 5206 out: 5207 if (anyvalid) 5208 pmap_invalidate_all(pmap); 5209 PMAP_UNLOCK(pmap); 5210 vm_page_free_pages_toq(&free, true); 5211 } 5212 5213 void 5214 mmu_radix_remove_all(vm_page_t m) 5215 { 5216 struct md_page *pvh; 5217 pv_entry_t pv; 5218 pmap_t pmap; 5219 struct rwlock *lock; 5220 pt_entry_t *pte, tpte; 5221 pml3_entry_t *l3e; 5222 vm_offset_t va; 5223 struct spglist free; 5224 int pvh_gen, md_gen; 5225 5226 CTR2(KTR_PMAP, "%s(%p)", __func__, m); 5227 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 5228 ("pmap_remove_all: page %p is not managed", m)); 5229 SLIST_INIT(&free); 5230 lock = VM_PAGE_TO_PV_LIST_LOCK(m); 5231 pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy : 5232 pa_to_pvh(VM_PAGE_TO_PHYS(m)); 5233 retry: 5234 rw_wlock(lock); 5235 while ((pv = TAILQ_FIRST(&pvh->pv_list)) != NULL) { 5236 pmap = PV_PMAP(pv); 5237 if (!PMAP_TRYLOCK(pmap)) { 5238 pvh_gen = pvh->pv_gen; 5239 rw_wunlock(lock); 5240 PMAP_LOCK(pmap); 5241 rw_wlock(lock); 5242 if (pvh_gen != pvh->pv_gen) { 5243 rw_wunlock(lock); 5244 PMAP_UNLOCK(pmap); 5245 goto retry; 5246 } 5247 } 5248 va = pv->pv_va; 5249 l3e = pmap_pml3e(pmap, va); 5250 (void)pmap_demote_l3e_locked(pmap, l3e, va, &lock); 5251 PMAP_UNLOCK(pmap); 5252 } 5253 while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) { 5254 pmap = PV_PMAP(pv); 5255 if (!PMAP_TRYLOCK(pmap)) { 5256 pvh_gen = pvh->pv_gen; 5257 md_gen = m->md.pv_gen; 5258 rw_wunlock(lock); 5259 PMAP_LOCK(pmap); 5260 rw_wlock(lock); 5261 if (pvh_gen != pvh->pv_gen || md_gen != m->md.pv_gen) { 5262 rw_wunlock(lock); 5263 PMAP_UNLOCK(pmap); 5264 goto retry; 5265 } 5266 } 5267 pmap_resident_count_dec(pmap, 1); 5268 l3e = pmap_pml3e(pmap, pv->pv_va); 5269 KASSERT((*l3e & RPTE_LEAF) == 0, ("pmap_remove_all: found" 5270 " a 2mpage in page %p's pv list", m)); 5271 pte = pmap_l3e_to_pte(l3e, pv->pv_va); 5272 tpte = pte_load_clear(pte); 5273 if (tpte & PG_W) 5274 pmap->pm_stats.wired_count--; 5275 if (tpte & PG_A) 5276 vm_page_aflag_set(m, PGA_REFERENCED); 5277 5278 /* 5279 * Update the vm_page_t clean and reference bits. 5280 */ 5281 if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) 5282 vm_page_dirty(m); 5283 pmap_unuse_pt(pmap, pv->pv_va, *l3e, &free); 5284 pmap_invalidate_page(pmap, pv->pv_va); 5285 TAILQ_REMOVE(&m->md.pv_list, pv, pv_link); 5286 m->md.pv_gen++; 5287 free_pv_entry(pmap, pv); 5288 PMAP_UNLOCK(pmap); 5289 } 5290 vm_page_aflag_clear(m, PGA_WRITEABLE); 5291 rw_wunlock(lock); 5292 vm_page_free_pages_toq(&free, true); 5293 } 5294 5295 /* 5296 * Destroy all managed, non-wired mappings in the given user-space 5297 * pmap. This pmap cannot be active on any processor besides the 5298 * caller. 5299 * 5300 * This function cannot be applied to the kernel pmap. Moreover, it 5301 * is not intended for general use. It is only to be used during 5302 * process termination. Consequently, it can be implemented in ways 5303 * that make it faster than pmap_remove(). First, it can more quickly 5304 * destroy mappings by iterating over the pmap's collection of PV 5305 * entries, rather than searching the page table. Second, it doesn't 5306 * have to test and clear the page table entries atomically, because 5307 * no processor is currently accessing the user address space. In 5308 * particular, a page table entry's dirty bit won't change state once 5309 * this function starts. 5310 * 5311 * Although this function destroys all of the pmap's managed, 5312 * non-wired mappings, it can delay and batch the invalidation of TLB 5313 * entries without calling pmap_delayed_invl_started() and 5314 * pmap_delayed_invl_finished(). Because the pmap is not active on 5315 * any other processor, none of these TLB entries will ever be used 5316 * before their eventual invalidation. Consequently, there is no need 5317 * for either pmap_remove_all() or pmap_remove_write() to wait for 5318 * that eventual TLB invalidation. 5319 */ 5320 5321 void 5322 mmu_radix_remove_pages(pmap_t pmap) 5323 { 5324 5325 CTR2(KTR_PMAP, "%s(%p)", __func__, pmap); 5326 pml3_entry_t ptel3e; 5327 pt_entry_t *pte, tpte; 5328 struct spglist free; 5329 vm_page_t m, mpte, mt; 5330 pv_entry_t pv; 5331 struct md_page *pvh; 5332 struct pv_chunk *pc, *npc; 5333 struct rwlock *lock; 5334 int64_t bit; 5335 uint64_t inuse, bitmask; 5336 int allfree, field, freed, idx; 5337 boolean_t superpage; 5338 vm_paddr_t pa; 5339 5340 /* 5341 * Assert that the given pmap is only active on the current 5342 * CPU. Unfortunately, we cannot block another CPU from 5343 * activating the pmap while this function is executing. 5344 */ 5345 KASSERT(pmap->pm_pid == mfspr(SPR_PID), 5346 ("non-current asid %lu - expected %lu", pmap->pm_pid, 5347 mfspr(SPR_PID))); 5348 5349 lock = NULL; 5350 5351 SLIST_INIT(&free); 5352 PMAP_LOCK(pmap); 5353 TAILQ_FOREACH_SAFE(pc, &pmap->pm_pvchunk, pc_list, npc) { 5354 allfree = 1; 5355 freed = 0; 5356 for (field = 0; field < _NPCM; field++) { 5357 inuse = ~pc->pc_map[field] & pc_freemask[field]; 5358 while (inuse != 0) { 5359 bit = cnttzd(inuse); 5360 bitmask = 1UL << bit; 5361 idx = field * 64 + bit; 5362 pv = &pc->pc_pventry[idx]; 5363 inuse &= ~bitmask; 5364 5365 pte = pmap_pml2e(pmap, pv->pv_va); 5366 ptel3e = *pte; 5367 pte = pmap_l2e_to_l3e(pte, pv->pv_va); 5368 tpte = *pte; 5369 if ((tpte & (RPTE_LEAF | PG_V)) == PG_V) { 5370 superpage = FALSE; 5371 ptel3e = tpte; 5372 pte = (pt_entry_t *)PHYS_TO_DMAP(tpte & 5373 PG_FRAME); 5374 pte = &pte[pmap_pte_index(pv->pv_va)]; 5375 tpte = *pte; 5376 } else { 5377 /* 5378 * Keep track whether 'tpte' is a 5379 * superpage explicitly instead of 5380 * relying on RPTE_LEAF being set. 5381 * 5382 * This is because RPTE_LEAF is numerically 5383 * identical to PG_PTE_PAT and thus a 5384 * regular page could be mistaken for 5385 * a superpage. 5386 */ 5387 superpage = TRUE; 5388 } 5389 5390 if ((tpte & PG_V) == 0) { 5391 panic("bad pte va %lx pte %lx", 5392 pv->pv_va, tpte); 5393 } 5394 5395 /* 5396 * We cannot remove wired pages from a process' mapping at this time 5397 */ 5398 if (tpte & PG_W) { 5399 allfree = 0; 5400 continue; 5401 } 5402 5403 if (superpage) 5404 pa = tpte & PG_PS_FRAME; 5405 else 5406 pa = tpte & PG_FRAME; 5407 5408 m = PHYS_TO_VM_PAGE(pa); 5409 KASSERT(m->phys_addr == pa, 5410 ("vm_page_t %p phys_addr mismatch %016jx %016jx", 5411 m, (uintmax_t)m->phys_addr, 5412 (uintmax_t)tpte)); 5413 5414 KASSERT((m->flags & PG_FICTITIOUS) != 0 || 5415 m < &vm_page_array[vm_page_array_size], 5416 ("pmap_remove_pages: bad tpte %#jx", 5417 (uintmax_t)tpte)); 5418 5419 pte_clear(pte); 5420 5421 /* 5422 * Update the vm_page_t clean/reference bits. 5423 */ 5424 if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) { 5425 if (superpage) { 5426 for (mt = m; mt < &m[L3_PAGE_SIZE / PAGE_SIZE]; mt++) 5427 vm_page_dirty(mt); 5428 } else 5429 vm_page_dirty(m); 5430 } 5431 5432 CHANGE_PV_LIST_LOCK_TO_VM_PAGE(&lock, m); 5433 5434 /* Mark free */ 5435 pc->pc_map[field] |= bitmask; 5436 if (superpage) { 5437 pmap_resident_count_dec(pmap, L3_PAGE_SIZE / PAGE_SIZE); 5438 pvh = pa_to_pvh(tpte & PG_PS_FRAME); 5439 TAILQ_REMOVE(&pvh->pv_list, pv, pv_link); 5440 pvh->pv_gen++; 5441 if (TAILQ_EMPTY(&pvh->pv_list)) { 5442 for (mt = m; mt < &m[L3_PAGE_SIZE / PAGE_SIZE]; mt++) 5443 if ((mt->a.flags & PGA_WRITEABLE) != 0 && 5444 TAILQ_EMPTY(&mt->md.pv_list)) 5445 vm_page_aflag_clear(mt, PGA_WRITEABLE); 5446 } 5447 mpte = pmap_remove_pt_page(pmap, pv->pv_va); 5448 if (mpte != NULL) { 5449 pmap_resident_count_dec(pmap, 1); 5450 KASSERT(mpte->ref_count == NPTEPG, 5451 ("pmap_remove_pages: pte page wire count error")); 5452 mpte->ref_count = 0; 5453 pmap_add_delayed_free_list(mpte, &free, FALSE); 5454 } 5455 } else { 5456 pmap_resident_count_dec(pmap, 1); 5457 #ifdef VERBOSE_PV 5458 printf("freeing pv (%p, %p)\n", 5459 pmap, pv); 5460 #endif 5461 TAILQ_REMOVE(&m->md.pv_list, pv, pv_link); 5462 m->md.pv_gen++; 5463 if ((m->a.flags & PGA_WRITEABLE) != 0 && 5464 TAILQ_EMPTY(&m->md.pv_list) && 5465 (m->flags & PG_FICTITIOUS) == 0) { 5466 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); 5467 if (TAILQ_EMPTY(&pvh->pv_list)) 5468 vm_page_aflag_clear(m, PGA_WRITEABLE); 5469 } 5470 } 5471 pmap_unuse_pt(pmap, pv->pv_va, ptel3e, &free); 5472 freed++; 5473 } 5474 } 5475 PV_STAT(atomic_add_long(&pv_entry_frees, freed)); 5476 PV_STAT(atomic_add_int(&pv_entry_spare, freed)); 5477 PV_STAT(atomic_subtract_long(&pv_entry_count, freed)); 5478 if (allfree) { 5479 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 5480 free_pv_chunk(pc); 5481 } 5482 } 5483 if (lock != NULL) 5484 rw_wunlock(lock); 5485 pmap_invalidate_all(pmap); 5486 PMAP_UNLOCK(pmap); 5487 vm_page_free_pages_toq(&free, true); 5488 } 5489 5490 void 5491 mmu_radix_remove_write(vm_page_t m) 5492 { 5493 struct md_page *pvh; 5494 pmap_t pmap; 5495 struct rwlock *lock; 5496 pv_entry_t next_pv, pv; 5497 pml3_entry_t *l3e; 5498 pt_entry_t oldpte, *pte; 5499 int pvh_gen, md_gen; 5500 5501 CTR2(KTR_PMAP, "%s(%p)", __func__, m); 5502 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 5503 ("pmap_remove_write: page %p is not managed", m)); 5504 vm_page_assert_busied(m); 5505 5506 if (!pmap_page_is_write_mapped(m)) 5507 return; 5508 lock = VM_PAGE_TO_PV_LIST_LOCK(m); 5509 pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy : 5510 pa_to_pvh(VM_PAGE_TO_PHYS(m)); 5511 retry_pv_loop: 5512 rw_wlock(lock); 5513 TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_link, next_pv) { 5514 pmap = PV_PMAP(pv); 5515 if (!PMAP_TRYLOCK(pmap)) { 5516 pvh_gen = pvh->pv_gen; 5517 rw_wunlock(lock); 5518 PMAP_LOCK(pmap); 5519 rw_wlock(lock); 5520 if (pvh_gen != pvh->pv_gen) { 5521 PMAP_UNLOCK(pmap); 5522 rw_wunlock(lock); 5523 goto retry_pv_loop; 5524 } 5525 } 5526 l3e = pmap_pml3e(pmap, pv->pv_va); 5527 if ((*l3e & PG_RW) != 0) 5528 (void)pmap_demote_l3e_locked(pmap, l3e, pv->pv_va, &lock); 5529 KASSERT(lock == VM_PAGE_TO_PV_LIST_LOCK(m), 5530 ("inconsistent pv lock %p %p for page %p", 5531 lock, VM_PAGE_TO_PV_LIST_LOCK(m), m)); 5532 PMAP_UNLOCK(pmap); 5533 } 5534 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 5535 pmap = PV_PMAP(pv); 5536 if (!PMAP_TRYLOCK(pmap)) { 5537 pvh_gen = pvh->pv_gen; 5538 md_gen = m->md.pv_gen; 5539 rw_wunlock(lock); 5540 PMAP_LOCK(pmap); 5541 rw_wlock(lock); 5542 if (pvh_gen != pvh->pv_gen || 5543 md_gen != m->md.pv_gen) { 5544 PMAP_UNLOCK(pmap); 5545 rw_wunlock(lock); 5546 goto retry_pv_loop; 5547 } 5548 } 5549 l3e = pmap_pml3e(pmap, pv->pv_va); 5550 KASSERT((*l3e & RPTE_LEAF) == 0, 5551 ("pmap_remove_write: found a 2mpage in page %p's pv list", 5552 m)); 5553 pte = pmap_l3e_to_pte(l3e, pv->pv_va); 5554 retry: 5555 oldpte = *pte; 5556 if (oldpte & PG_RW) { 5557 if (!atomic_cmpset_long(pte, oldpte, 5558 (oldpte | RPTE_EAA_R) & ~(PG_RW | PG_M))) 5559 goto retry; 5560 if ((oldpte & PG_M) != 0) 5561 vm_page_dirty(m); 5562 pmap_invalidate_page(pmap, pv->pv_va); 5563 } 5564 PMAP_UNLOCK(pmap); 5565 } 5566 rw_wunlock(lock); 5567 vm_page_aflag_clear(m, PGA_WRITEABLE); 5568 } 5569 5570 /* 5571 * Clear the wired attribute from the mappings for the specified range of 5572 * addresses in the given pmap. Every valid mapping within that range 5573 * must have the wired attribute set. In contrast, invalid mappings 5574 * cannot have the wired attribute set, so they are ignored. 5575 * 5576 * The wired attribute of the page table entry is not a hardware 5577 * feature, so there is no need to invalidate any TLB entries. 5578 * Since pmap_demote_l3e() for the wired entry must never fail, 5579 * pmap_delayed_invl_started()/finished() calls around the 5580 * function are not needed. 5581 */ 5582 void 5583 mmu_radix_unwire(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) 5584 { 5585 vm_offset_t va_next; 5586 pml1_entry_t *l1e; 5587 pml2_entry_t *l2e; 5588 pml3_entry_t *l3e; 5589 pt_entry_t *pte; 5590 5591 CTR4(KTR_PMAP, "%s(%p, %#x, %#x)", __func__, pmap, sva, eva); 5592 PMAP_LOCK(pmap); 5593 for (; sva < eva; sva = va_next) { 5594 l1e = pmap_pml1e(pmap, sva); 5595 if ((*l1e & PG_V) == 0) { 5596 va_next = (sva + L1_PAGE_SIZE) & ~L1_PAGE_MASK; 5597 if (va_next < sva) 5598 va_next = eva; 5599 continue; 5600 } 5601 l2e = pmap_l1e_to_l2e(l1e, sva); 5602 if ((*l2e & PG_V) == 0) { 5603 va_next = (sva + L2_PAGE_SIZE) & ~L2_PAGE_MASK; 5604 if (va_next < sva) 5605 va_next = eva; 5606 continue; 5607 } 5608 va_next = (sva + L3_PAGE_SIZE) & ~L3_PAGE_MASK; 5609 if (va_next < sva) 5610 va_next = eva; 5611 l3e = pmap_l2e_to_l3e(l2e, sva); 5612 if ((*l3e & PG_V) == 0) 5613 continue; 5614 if ((*l3e & RPTE_LEAF) != 0) { 5615 if ((*l3e & PG_W) == 0) 5616 panic("pmap_unwire: pde %#jx is missing PG_W", 5617 (uintmax_t)*l3e); 5618 5619 /* 5620 * Are we unwiring the entire large page? If not, 5621 * demote the mapping and fall through. 5622 */ 5623 if (sva + L3_PAGE_SIZE == va_next && eva >= va_next) { 5624 atomic_clear_long(l3e, PG_W); 5625 pmap->pm_stats.wired_count -= L3_PAGE_SIZE / 5626 PAGE_SIZE; 5627 continue; 5628 } else if (!pmap_demote_l3e(pmap, l3e, sva)) 5629 panic("pmap_unwire: demotion failed"); 5630 } 5631 if (va_next > eva) 5632 va_next = eva; 5633 for (pte = pmap_l3e_to_pte(l3e, sva); sva != va_next; pte++, 5634 sva += PAGE_SIZE) { 5635 MPASS(pte == pmap_pte(pmap, sva)); 5636 if ((*pte & PG_V) == 0) 5637 continue; 5638 if ((*pte & PG_W) == 0) 5639 panic("pmap_unwire: pte %#jx is missing PG_W", 5640 (uintmax_t)*pte); 5641 5642 /* 5643 * PG_W must be cleared atomically. Although the pmap 5644 * lock synchronizes access to PG_W, another processor 5645 * could be setting PG_M and/or PG_A concurrently. 5646 */ 5647 atomic_clear_long(pte, PG_W); 5648 pmap->pm_stats.wired_count--; 5649 } 5650 } 5651 PMAP_UNLOCK(pmap); 5652 } 5653 5654 void 5655 mmu_radix_zero_page(vm_page_t m) 5656 { 5657 vm_offset_t addr; 5658 5659 CTR2(KTR_PMAP, "%s(%p)", __func__, m); 5660 addr = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)); 5661 pagezero(addr); 5662 } 5663 5664 void 5665 mmu_radix_zero_page_area(vm_page_t m, int off, int size) 5666 { 5667 caddr_t addr; 5668 5669 CTR4(KTR_PMAP, "%s(%p, %d, %d)", __func__, m, off, size); 5670 MPASS(off + size <= PAGE_SIZE); 5671 addr = (caddr_t)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)); 5672 memset(addr + off, 0, size); 5673 } 5674 5675 static int 5676 mmu_radix_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa) 5677 { 5678 pml3_entry_t *l3ep; 5679 pt_entry_t pte; 5680 vm_paddr_t pa; 5681 int val; 5682 5683 CTR3(KTR_PMAP, "%s(%p, %#x)", __func__, pmap, addr); 5684 PMAP_LOCK(pmap); 5685 5686 l3ep = pmap_pml3e(pmap, addr); 5687 if (l3ep != NULL && (*l3ep & PG_V)) { 5688 if (*l3ep & RPTE_LEAF) { 5689 pte = *l3ep; 5690 /* Compute the physical address of the 4KB page. */ 5691 pa = ((*l3ep & PG_PS_FRAME) | (addr & L3_PAGE_MASK)) & 5692 PG_FRAME; 5693 val = MINCORE_PSIND(1); 5694 } else { 5695 pte = *pmap_l3e_to_pte(l3ep, addr); 5696 pa = pte & PG_FRAME; 5697 val = 0; 5698 } 5699 } else { 5700 pte = 0; 5701 pa = 0; 5702 val = 0; 5703 } 5704 if ((pte & PG_V) != 0) { 5705 val |= MINCORE_INCORE; 5706 if ((pte & (PG_M | PG_RW)) == (PG_M | PG_RW)) 5707 val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER; 5708 if ((pte & PG_A) != 0) 5709 val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER; 5710 } 5711 if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) != 5712 (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER) && 5713 (pte & (PG_MANAGED | PG_V)) == (PG_MANAGED | PG_V)) { 5714 *locked_pa = pa; 5715 } 5716 PMAP_UNLOCK(pmap); 5717 return (val); 5718 } 5719 5720 void 5721 mmu_radix_activate(struct thread *td) 5722 { 5723 pmap_t pmap; 5724 uint32_t curpid; 5725 5726 CTR2(KTR_PMAP, "%s(%p)", __func__, td); 5727 critical_enter(); 5728 pmap = vmspace_pmap(td->td_proc->p_vmspace); 5729 curpid = mfspr(SPR_PID); 5730 if (pmap->pm_pid > isa3_base_pid && 5731 curpid != pmap->pm_pid) { 5732 mmu_radix_pid_set(pmap); 5733 } 5734 critical_exit(); 5735 } 5736 5737 /* 5738 * Increase the starting virtual address of the given mapping if a 5739 * different alignment might result in more superpage mappings. 5740 */ 5741 void 5742 mmu_radix_align_superpage(vm_object_t object, vm_ooffset_t offset, 5743 vm_offset_t *addr, vm_size_t size) 5744 { 5745 5746 CTR5(KTR_PMAP, "%s(%p, %#x, %p, %#x)", __func__, object, offset, addr, 5747 size); 5748 vm_offset_t superpage_offset; 5749 5750 if (size < L3_PAGE_SIZE) 5751 return; 5752 if (object != NULL && (object->flags & OBJ_COLORED) != 0) 5753 offset += ptoa(object->pg_color); 5754 superpage_offset = offset & L3_PAGE_MASK; 5755 if (size - ((L3_PAGE_SIZE - superpage_offset) & L3_PAGE_MASK) < L3_PAGE_SIZE || 5756 (*addr & L3_PAGE_MASK) == superpage_offset) 5757 return; 5758 if ((*addr & L3_PAGE_MASK) < superpage_offset) 5759 *addr = (*addr & ~L3_PAGE_MASK) + superpage_offset; 5760 else 5761 *addr = ((*addr + L3_PAGE_MASK) & ~L3_PAGE_MASK) + superpage_offset; 5762 } 5763 5764 static void * 5765 mmu_radix_mapdev_attr(vm_paddr_t pa, vm_size_t size, vm_memattr_t attr) 5766 { 5767 vm_offset_t va, tmpva, ppa, offset; 5768 5769 ppa = trunc_page(pa); 5770 offset = pa & PAGE_MASK; 5771 size = roundup2(offset + size, PAGE_SIZE); 5772 if (pa < powerpc_ptob(Maxmem)) 5773 panic("bad pa: %#lx less than Maxmem %#lx\n", 5774 pa, powerpc_ptob(Maxmem)); 5775 va = kva_alloc(size); 5776 if (bootverbose) 5777 printf("%s(%#lx, %lu, %d)\n", __func__, pa, size, attr); 5778 KASSERT(size > 0, ("%s(%#lx, %lu, %d)", __func__, pa, size, attr)); 5779 5780 if (!va) 5781 panic("%s: Couldn't alloc kernel virtual memory", __func__); 5782 5783 for (tmpva = va; size > 0;) { 5784 mmu_radix_kenter_attr(tmpva, ppa, attr); 5785 size -= PAGE_SIZE; 5786 tmpva += PAGE_SIZE; 5787 ppa += PAGE_SIZE; 5788 } 5789 ptesync(); 5790 5791 return ((void *)(va + offset)); 5792 } 5793 5794 static void * 5795 mmu_radix_mapdev(vm_paddr_t pa, vm_size_t size) 5796 { 5797 5798 CTR3(KTR_PMAP, "%s(%#x, %#x)", __func__, pa, size); 5799 5800 return (mmu_radix_mapdev_attr(pa, size, VM_MEMATTR_DEFAULT)); 5801 } 5802 5803 void 5804 mmu_radix_page_set_memattr(vm_page_t m, vm_memattr_t ma) 5805 { 5806 5807 CTR3(KTR_PMAP, "%s(%p, %#x)", __func__, m, ma); 5808 m->md.mdpg_cache_attrs = ma; 5809 5810 /* 5811 * If "m" is a normal page, update its direct mapping. This update 5812 * can be relied upon to perform any cache operations that are 5813 * required for data coherence. 5814 */ 5815 if ((m->flags & PG_FICTITIOUS) == 0 && 5816 mmu_radix_change_attr(PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)), 5817 PAGE_SIZE, m->md.mdpg_cache_attrs)) 5818 panic("memory attribute change on the direct map failed"); 5819 } 5820 5821 static void 5822 mmu_radix_unmapdev(vm_offset_t va, vm_size_t size) 5823 { 5824 vm_offset_t offset; 5825 5826 CTR3(KTR_PMAP, "%s(%#x, %#x)", __func__, va, size); 5827 /* If we gave a direct map region in pmap_mapdev, do nothing */ 5828 if (va >= DMAP_MIN_ADDRESS && va < DMAP_MAX_ADDRESS) 5829 return; 5830 5831 offset = va & PAGE_MASK; 5832 size = round_page(offset + size); 5833 va = trunc_page(va); 5834 5835 if (pmap_initialized) { 5836 mmu_radix_qremove(va, atop(size)); 5837 kva_free(va, size); 5838 } 5839 } 5840 5841 static __inline void 5842 pmap_pte_attr(pt_entry_t *pte, uint64_t cache_bits, uint64_t mask) 5843 { 5844 uint64_t opte, npte; 5845 5846 /* 5847 * The cache mode bits are all in the low 32-bits of the 5848 * PTE, so we can just spin on updating the low 32-bits. 5849 */ 5850 do { 5851 opte = *pte; 5852 npte = opte & ~mask; 5853 npte |= cache_bits; 5854 } while (npte != opte && !atomic_cmpset_long(pte, opte, npte)); 5855 } 5856 5857 /* 5858 * Tries to demote a 1GB page mapping. 5859 */ 5860 static boolean_t 5861 pmap_demote_l2e(pmap_t pmap, pml2_entry_t *l2e, vm_offset_t va) 5862 { 5863 pml2_entry_t oldpdpe; 5864 pml3_entry_t *firstpde, newpde, *pde; 5865 vm_paddr_t pdpgpa; 5866 vm_page_t pdpg; 5867 5868 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 5869 oldpdpe = *l2e; 5870 KASSERT((oldpdpe & (RPTE_LEAF | PG_V)) == (RPTE_LEAF | PG_V), 5871 ("pmap_demote_pdpe: oldpdpe is missing PG_PS and/or PG_V")); 5872 pdpg = vm_page_alloc(NULL, va >> L2_PAGE_SIZE_SHIFT, 5873 VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED); 5874 if (pdpg == NULL) { 5875 CTR2(KTR_PMAP, "pmap_demote_pdpe: failure for va %#lx" 5876 " in pmap %p", va, pmap); 5877 return (FALSE); 5878 } 5879 pdpgpa = VM_PAGE_TO_PHYS(pdpg); 5880 firstpde = (pml3_entry_t *)PHYS_TO_DMAP(pdpgpa); 5881 KASSERT((oldpdpe & PG_A) != 0, 5882 ("pmap_demote_pdpe: oldpdpe is missing PG_A")); 5883 KASSERT((oldpdpe & (PG_M | PG_RW)) != PG_RW, 5884 ("pmap_demote_pdpe: oldpdpe is missing PG_M")); 5885 newpde = oldpdpe; 5886 5887 /* 5888 * Initialize the page directory page. 5889 */ 5890 for (pde = firstpde; pde < firstpde + NPDEPG; pde++) { 5891 *pde = newpde; 5892 newpde += L3_PAGE_SIZE; 5893 } 5894 5895 /* 5896 * Demote the mapping. 5897 */ 5898 pde_store(l2e, pdpgpa); 5899 5900 /* 5901 * Flush PWC --- XXX revisit 5902 */ 5903 pmap_invalidate_all(pmap); 5904 5905 pmap_l2e_demotions++; 5906 CTR2(KTR_PMAP, "pmap_demote_pdpe: success for va %#lx" 5907 " in pmap %p", va, pmap); 5908 return (TRUE); 5909 } 5910 5911 vm_paddr_t 5912 mmu_radix_kextract(vm_offset_t va) 5913 { 5914 pml3_entry_t l3e; 5915 vm_paddr_t pa; 5916 5917 CTR2(KTR_PMAP, "%s(%#x)", __func__, va); 5918 if (va >= DMAP_MIN_ADDRESS && va < DMAP_MAX_ADDRESS) { 5919 pa = DMAP_TO_PHYS(va); 5920 } else { 5921 l3e = *pmap_pml3e(kernel_pmap, va); 5922 if (l3e & RPTE_LEAF) { 5923 pa = (l3e & PG_PS_FRAME) | (va & L3_PAGE_MASK); 5924 pa |= (va & L3_PAGE_MASK); 5925 } else { 5926 /* 5927 * Beware of a concurrent promotion that changes the 5928 * PDE at this point! For example, vtopte() must not 5929 * be used to access the PTE because it would use the 5930 * new PDE. It is, however, safe to use the old PDE 5931 * because the page table page is preserved by the 5932 * promotion. 5933 */ 5934 pa = *pmap_l3e_to_pte(&l3e, va); 5935 pa = (pa & PG_FRAME) | (va & PAGE_MASK); 5936 pa |= (va & PAGE_MASK); 5937 } 5938 } 5939 return (pa); 5940 } 5941 5942 static pt_entry_t 5943 mmu_radix_calc_wimg(vm_paddr_t pa, vm_memattr_t ma) 5944 { 5945 5946 if (ma != VM_MEMATTR_DEFAULT) { 5947 return pmap_cache_bits(ma); 5948 } 5949 5950 /* 5951 * Assume the page is cache inhibited and access is guarded unless 5952 * it's in our available memory array. 5953 */ 5954 for (int i = 0; i < pregions_sz; i++) { 5955 if ((pa >= pregions[i].mr_start) && 5956 (pa < (pregions[i].mr_start + pregions[i].mr_size))) 5957 return (RPTE_ATTR_MEM); 5958 } 5959 return (RPTE_ATTR_GUARDEDIO); 5960 } 5961 5962 static void 5963 mmu_radix_kenter_attr(vm_offset_t va, vm_paddr_t pa, vm_memattr_t ma) 5964 { 5965 pt_entry_t *pte, pteval; 5966 uint64_t cache_bits; 5967 5968 pte = kvtopte(va); 5969 MPASS(pte != NULL); 5970 pteval = pa | RPTE_EAA_R | RPTE_EAA_W | RPTE_EAA_P | PG_M | PG_A; 5971 cache_bits = mmu_radix_calc_wimg(pa, ma); 5972 pte_store(pte, pteval | cache_bits); 5973 } 5974 5975 void 5976 mmu_radix_kremove(vm_offset_t va) 5977 { 5978 pt_entry_t *pte; 5979 5980 CTR2(KTR_PMAP, "%s(%#x)", __func__, va); 5981 5982 pte = kvtopte(va); 5983 pte_clear(pte); 5984 } 5985 5986 int 5987 mmu_radix_decode_kernel_ptr(vm_offset_t addr, 5988 int *is_user, vm_offset_t *decoded) 5989 { 5990 5991 CTR2(KTR_PMAP, "%s(%#jx)", __func__, (uintmax_t)addr); 5992 *decoded = addr; 5993 *is_user = (addr < VM_MAXUSER_ADDRESS); 5994 return (0); 5995 } 5996 5997 static boolean_t 5998 mmu_radix_dev_direct_mapped(vm_paddr_t pa, vm_size_t size) 5999 { 6000 6001 CTR3(KTR_PMAP, "%s(%#x, %#x)", __func__, pa, size); 6002 return (mem_valid(pa, size)); 6003 } 6004 6005 static void 6006 mmu_radix_scan_init() 6007 { 6008 6009 CTR1(KTR_PMAP, "%s()", __func__); 6010 UNIMPLEMENTED(); 6011 } 6012 6013 static void 6014 mmu_radix_dumpsys_map(vm_paddr_t pa, size_t sz, 6015 void **va) 6016 { 6017 CTR4(KTR_PMAP, "%s(%#jx, %#zx, %p)", __func__, (uintmax_t)pa, sz, va); 6018 UNIMPLEMENTED(); 6019 } 6020 6021 vm_offset_t 6022 mmu_radix_quick_enter_page(vm_page_t m) 6023 { 6024 vm_paddr_t paddr; 6025 6026 CTR2(KTR_PMAP, "%s(%p)", __func__, m); 6027 paddr = VM_PAGE_TO_PHYS(m); 6028 return (PHYS_TO_DMAP(paddr)); 6029 } 6030 6031 void 6032 mmu_radix_quick_remove_page(vm_offset_t addr __unused) 6033 { 6034 /* no work to do here */ 6035 CTR2(KTR_PMAP, "%s(%#x)", __func__, addr); 6036 } 6037 6038 static void 6039 pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva) 6040 { 6041 cpu_flush_dcache((void *)sva, eva - sva); 6042 } 6043 6044 int 6045 mmu_radix_change_attr(vm_offset_t va, vm_size_t size, 6046 vm_memattr_t mode) 6047 { 6048 int error; 6049 6050 CTR4(KTR_PMAP, "%s(%#x, %#zx, %d)", __func__, va, size, mode); 6051 PMAP_LOCK(kernel_pmap); 6052 error = pmap_change_attr_locked(va, size, mode, true); 6053 PMAP_UNLOCK(kernel_pmap); 6054 return (error); 6055 } 6056 6057 static int 6058 pmap_change_attr_locked(vm_offset_t va, vm_size_t size, int mode, bool flush) 6059 { 6060 vm_offset_t base, offset, tmpva; 6061 vm_paddr_t pa_start, pa_end, pa_end1; 6062 pml2_entry_t *l2e; 6063 pml3_entry_t *l3e; 6064 pt_entry_t *pte; 6065 int cache_bits, error; 6066 boolean_t changed; 6067 6068 PMAP_LOCK_ASSERT(kernel_pmap, MA_OWNED); 6069 base = trunc_page(va); 6070 offset = va & PAGE_MASK; 6071 size = round_page(offset + size); 6072 6073 /* 6074 * Only supported on kernel virtual addresses, including the direct 6075 * map but excluding the recursive map. 6076 */ 6077 if (base < DMAP_MIN_ADDRESS) 6078 return (EINVAL); 6079 6080 cache_bits = pmap_cache_bits(mode); 6081 changed = FALSE; 6082 6083 /* 6084 * Pages that aren't mapped aren't supported. Also break down 2MB pages 6085 * into 4KB pages if required. 6086 */ 6087 for (tmpva = base; tmpva < base + size; ) { 6088 l2e = pmap_pml2e(kernel_pmap, tmpva); 6089 if (l2e == NULL || *l2e == 0) 6090 return (EINVAL); 6091 if (*l2e & RPTE_LEAF) { 6092 /* 6093 * If the current 1GB page already has the required 6094 * memory type, then we need not demote this page. Just 6095 * increment tmpva to the next 1GB page frame. 6096 */ 6097 if ((*l2e & RPTE_ATTR_MASK) == cache_bits) { 6098 tmpva = trunc_1gpage(tmpva) + L2_PAGE_SIZE; 6099 continue; 6100 } 6101 6102 /* 6103 * If the current offset aligns with a 1GB page frame 6104 * and there is at least 1GB left within the range, then 6105 * we need not break down this page into 2MB pages. 6106 */ 6107 if ((tmpva & L2_PAGE_MASK) == 0 && 6108 tmpva + L2_PAGE_MASK < base + size) { 6109 tmpva += L2_PAGE_MASK; 6110 continue; 6111 } 6112 if (!pmap_demote_l2e(kernel_pmap, l2e, tmpva)) 6113 return (ENOMEM); 6114 } 6115 l3e = pmap_l2e_to_l3e(l2e, tmpva); 6116 KASSERT(l3e != NULL, ("no l3e entry for %#lx in %p\n", 6117 tmpva, l2e)); 6118 if (*l3e == 0) 6119 return (EINVAL); 6120 if (*l3e & RPTE_LEAF) { 6121 /* 6122 * If the current 2MB page already has the required 6123 * memory type, then we need not demote this page. Just 6124 * increment tmpva to the next 2MB page frame. 6125 */ 6126 if ((*l3e & RPTE_ATTR_MASK) == cache_bits) { 6127 tmpva = trunc_2mpage(tmpva) + L3_PAGE_SIZE; 6128 continue; 6129 } 6130 6131 /* 6132 * If the current offset aligns with a 2MB page frame 6133 * and there is at least 2MB left within the range, then 6134 * we need not break down this page into 4KB pages. 6135 */ 6136 if ((tmpva & L3_PAGE_MASK) == 0 && 6137 tmpva + L3_PAGE_MASK < base + size) { 6138 tmpva += L3_PAGE_SIZE; 6139 continue; 6140 } 6141 if (!pmap_demote_l3e(kernel_pmap, l3e, tmpva)) 6142 return (ENOMEM); 6143 } 6144 pte = pmap_l3e_to_pte(l3e, tmpva); 6145 if (*pte == 0) 6146 return (EINVAL); 6147 tmpva += PAGE_SIZE; 6148 } 6149 error = 0; 6150 6151 /* 6152 * Ok, all the pages exist, so run through them updating their 6153 * cache mode if required. 6154 */ 6155 pa_start = pa_end = 0; 6156 for (tmpva = base; tmpva < base + size; ) { 6157 l2e = pmap_pml2e(kernel_pmap, tmpva); 6158 if (*l2e & RPTE_LEAF) { 6159 if ((*l2e & RPTE_ATTR_MASK) != cache_bits) { 6160 pmap_pte_attr(l2e, cache_bits, 6161 RPTE_ATTR_MASK); 6162 changed = TRUE; 6163 } 6164 if (tmpva >= VM_MIN_KERNEL_ADDRESS && 6165 (*l2e & PG_PS_FRAME) < dmaplimit) { 6166 if (pa_start == pa_end) { 6167 /* Start physical address run. */ 6168 pa_start = *l2e & PG_PS_FRAME; 6169 pa_end = pa_start + L2_PAGE_SIZE; 6170 } else if (pa_end == (*l2e & PG_PS_FRAME)) 6171 pa_end += L2_PAGE_SIZE; 6172 else { 6173 /* Run ended, update direct map. */ 6174 error = pmap_change_attr_locked( 6175 PHYS_TO_DMAP(pa_start), 6176 pa_end - pa_start, mode, flush); 6177 if (error != 0) 6178 break; 6179 /* Start physical address run. */ 6180 pa_start = *l2e & PG_PS_FRAME; 6181 pa_end = pa_start + L2_PAGE_SIZE; 6182 } 6183 } 6184 tmpva = trunc_1gpage(tmpva) + L2_PAGE_SIZE; 6185 continue; 6186 } 6187 l3e = pmap_l2e_to_l3e(l2e, tmpva); 6188 if (*l3e & RPTE_LEAF) { 6189 if ((*l3e & RPTE_ATTR_MASK) != cache_bits) { 6190 pmap_pte_attr(l3e, cache_bits, 6191 RPTE_ATTR_MASK); 6192 changed = TRUE; 6193 } 6194 if (tmpva >= VM_MIN_KERNEL_ADDRESS && 6195 (*l3e & PG_PS_FRAME) < dmaplimit) { 6196 if (pa_start == pa_end) { 6197 /* Start physical address run. */ 6198 pa_start = *l3e & PG_PS_FRAME; 6199 pa_end = pa_start + L3_PAGE_SIZE; 6200 } else if (pa_end == (*l3e & PG_PS_FRAME)) 6201 pa_end += L3_PAGE_SIZE; 6202 else { 6203 /* Run ended, update direct map. */ 6204 error = pmap_change_attr_locked( 6205 PHYS_TO_DMAP(pa_start), 6206 pa_end - pa_start, mode, flush); 6207 if (error != 0) 6208 break; 6209 /* Start physical address run. */ 6210 pa_start = *l3e & PG_PS_FRAME; 6211 pa_end = pa_start + L3_PAGE_SIZE; 6212 } 6213 } 6214 tmpva = trunc_2mpage(tmpva) + L3_PAGE_SIZE; 6215 } else { 6216 pte = pmap_l3e_to_pte(l3e, tmpva); 6217 if ((*pte & RPTE_ATTR_MASK) != cache_bits) { 6218 pmap_pte_attr(pte, cache_bits, 6219 RPTE_ATTR_MASK); 6220 changed = TRUE; 6221 } 6222 if (tmpva >= VM_MIN_KERNEL_ADDRESS && 6223 (*pte & PG_FRAME) < dmaplimit) { 6224 if (pa_start == pa_end) { 6225 /* Start physical address run. */ 6226 pa_start = *pte & PG_FRAME; 6227 pa_end = pa_start + PAGE_SIZE; 6228 } else if (pa_end == (*pte & PG_FRAME)) 6229 pa_end += PAGE_SIZE; 6230 else { 6231 /* Run ended, update direct map. */ 6232 error = pmap_change_attr_locked( 6233 PHYS_TO_DMAP(pa_start), 6234 pa_end - pa_start, mode, flush); 6235 if (error != 0) 6236 break; 6237 /* Start physical address run. */ 6238 pa_start = *pte & PG_FRAME; 6239 pa_end = pa_start + PAGE_SIZE; 6240 } 6241 } 6242 tmpva += PAGE_SIZE; 6243 } 6244 } 6245 if (error == 0 && pa_start != pa_end && pa_start < dmaplimit) { 6246 pa_end1 = MIN(pa_end, dmaplimit); 6247 if (pa_start != pa_end1) 6248 error = pmap_change_attr_locked(PHYS_TO_DMAP(pa_start), 6249 pa_end1 - pa_start, mode, flush); 6250 } 6251 6252 /* 6253 * Flush CPU caches if required to make sure any data isn't cached that 6254 * shouldn't be, etc. 6255 */ 6256 if (changed) { 6257 pmap_invalidate_all(kernel_pmap); 6258 6259 if (flush) 6260 pmap_invalidate_cache_range(base, tmpva); 6261 } 6262 return (error); 6263 } 6264 6265 /* 6266 * Allocate physical memory for the vm_page array and map it into KVA, 6267 * attempting to back the vm_pages with domain-local memory. 6268 */ 6269 void 6270 mmu_radix_page_array_startup(long pages) 6271 { 6272 #ifdef notyet 6273 pml2_entry_t *l2e; 6274 pml3_entry_t *pde; 6275 pml3_entry_t newl3; 6276 vm_offset_t va; 6277 long pfn; 6278 int domain, i; 6279 #endif 6280 vm_paddr_t pa; 6281 vm_offset_t start, end; 6282 6283 vm_page_array_size = pages; 6284 6285 start = VM_MIN_KERNEL_ADDRESS; 6286 end = start + pages * sizeof(struct vm_page); 6287 6288 pa = vm_phys_early_alloc(0, end - start); 6289 6290 start = mmu_radix_map(&start, pa, end - start, VM_MEMATTR_DEFAULT); 6291 #ifdef notyet 6292 /* TODO: NUMA vm_page_array. Blocked out until then (copied from amd64). */ 6293 for (va = start; va < end; va += L3_PAGE_SIZE) { 6294 pfn = first_page + (va - start) / sizeof(struct vm_page); 6295 domain = _vm_phys_domain(ptoa(pfn)); 6296 l2e = pmap_pml2e(kernel_pmap, va); 6297 if ((*l2e & PG_V) == 0) { 6298 pa = vm_phys_early_alloc(domain, PAGE_SIZE); 6299 dump_add_page(pa); 6300 pagezero(PHYS_TO_DMAP(pa)); 6301 pde_store(l2e, (pml2_entry_t)pa); 6302 } 6303 pde = pmap_l2e_to_l3e(l2e, va); 6304 if ((*pde & PG_V) != 0) 6305 panic("Unexpected pde %p", pde); 6306 pa = vm_phys_early_alloc(domain, L3_PAGE_SIZE); 6307 for (i = 0; i < NPDEPG; i++) 6308 dump_add_page(pa + i * PAGE_SIZE); 6309 newl3 = (pml3_entry_t)(pa | RPTE_EAA_P | RPTE_EAA_R | RPTE_EAA_W); 6310 pte_store(pde, newl3); 6311 } 6312 #endif 6313 vm_page_array = (vm_page_t)start; 6314 } 6315 6316 #ifdef DDB 6317 #include <sys/kdb.h> 6318 #include <ddb/ddb.h> 6319 6320 static void 6321 pmap_pte_walk(pml1_entry_t *l1, vm_offset_t va) 6322 { 6323 pml1_entry_t *l1e; 6324 pml2_entry_t *l2e; 6325 pml3_entry_t *l3e; 6326 pt_entry_t *pte; 6327 6328 l1e = &l1[pmap_pml1e_index(va)]; 6329 db_printf("VA %#016lx l1e %#016lx", va, *l1e); 6330 if ((*l1e & PG_V) == 0) { 6331 db_printf("\n"); 6332 return; 6333 } 6334 l2e = pmap_l1e_to_l2e(l1e, va); 6335 db_printf(" l2e %#016lx", *l2e); 6336 if ((*l2e & PG_V) == 0 || (*l2e & RPTE_LEAF) != 0) { 6337 db_printf("\n"); 6338 return; 6339 } 6340 l3e = pmap_l2e_to_l3e(l2e, va); 6341 db_printf(" l3e %#016lx", *l3e); 6342 if ((*l3e & PG_V) == 0 || (*l3e & RPTE_LEAF) != 0) { 6343 db_printf("\n"); 6344 return; 6345 } 6346 pte = pmap_l3e_to_pte(l3e, va); 6347 db_printf(" pte %#016lx\n", *pte); 6348 } 6349 6350 void 6351 pmap_page_print_mappings(vm_page_t m) 6352 { 6353 pmap_t pmap; 6354 pv_entry_t pv; 6355 6356 db_printf("page %p(%lx)\n", m, m->phys_addr); 6357 /* need to elide locks if running in ddb */ 6358 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 6359 db_printf("pv: %p ", pv); 6360 db_printf("va: %#016lx ", pv->pv_va); 6361 pmap = PV_PMAP(pv); 6362 db_printf("pmap %p ", pmap); 6363 if (pmap != NULL) { 6364 db_printf("asid: %lu\n", pmap->pm_pid); 6365 pmap_pte_walk(pmap->pm_pml1, pv->pv_va); 6366 } 6367 } 6368 } 6369 6370 DB_SHOW_COMMAND(pte, pmap_print_pte) 6371 { 6372 vm_offset_t va; 6373 pmap_t pmap; 6374 6375 if (!have_addr) { 6376 db_printf("show pte addr\n"); 6377 return; 6378 } 6379 va = (vm_offset_t)addr; 6380 6381 if (va >= DMAP_MIN_ADDRESS) 6382 pmap = kernel_pmap; 6383 else if (kdb_thread != NULL) 6384 pmap = vmspace_pmap(kdb_thread->td_proc->p_vmspace); 6385 else 6386 pmap = vmspace_pmap(curthread->td_proc->p_vmspace); 6387 6388 pmap_pte_walk(pmap->pm_pml1, va); 6389 } 6390 6391 #endif 6392