1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD AND BSD-4-Clause 3 * 4 * Copyright (c) 2001 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Matt Thomas <matt@3am-software.com> of Allegro Networks, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 /*- 32 * Copyright (C) 1995, 1996 Wolfgang Solfrank. 33 * Copyright (C) 1995, 1996 TooLs GmbH. 34 * All rights reserved. 35 * 36 * Redistribution and use in source and binary forms, with or without 37 * modification, are permitted provided that the following conditions 38 * are met: 39 * 1. Redistributions of source code must retain the above copyright 40 * notice, this list of conditions and the following disclaimer. 41 * 2. Redistributions in binary form must reproduce the above copyright 42 * notice, this list of conditions and the following disclaimer in the 43 * documentation and/or other materials provided with the distribution. 44 * 3. All advertising materials mentioning features or use of this software 45 * must display the following acknowledgement: 46 * This product includes software developed by TooLs GmbH. 47 * 4. The name of TooLs GmbH may not be used to endorse or promote products 48 * derived from this software without specific prior written permission. 49 * 50 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR 51 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 52 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 53 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 54 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 55 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 56 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 57 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 58 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 59 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 60 * 61 * $NetBSD: pmap.c,v 1.28 2000/03/26 20:42:36 kleink Exp $ 62 */ 63 /*- 64 * Copyright (C) 2001 Benno Rice. 65 * All rights reserved. 66 * 67 * Redistribution and use in source and binary forms, with or without 68 * modification, are permitted provided that the following conditions 69 * are met: 70 * 1. Redistributions of source code must retain the above copyright 71 * notice, this list of conditions and the following disclaimer. 72 * 2. Redistributions in binary form must reproduce the above copyright 73 * notice, this list of conditions and the following disclaimer in the 74 * documentation and/or other materials provided with the distribution. 75 * 76 * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR 77 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 78 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 79 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 80 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 81 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 82 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 83 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 84 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 85 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 86 */ 87 88 #include <sys/cdefs.h> 89 __FBSDID("$FreeBSD$"); 90 91 /* 92 * Native 64-bit page table operations for running without a hypervisor. 93 */ 94 95 #include <sys/param.h> 96 #include <sys/kernel.h> 97 #include <sys/ktr.h> 98 #include <sys/lock.h> 99 #include <sys/mutex.h> 100 #include <sys/proc.h> 101 #include <sys/sched.h> 102 #include <sys/sysctl.h> 103 #include <sys/systm.h> 104 #include <sys/rwlock.h> 105 #include <sys/endian.h> 106 107 #include <sys/kdb.h> 108 109 #include <vm/vm.h> 110 #include <vm/vm_param.h> 111 #include <vm/vm_kern.h> 112 #include <vm/vm_page.h> 113 #include <vm/vm_map.h> 114 #include <vm/vm_object.h> 115 #include <vm/vm_extern.h> 116 #include <vm/vm_pageout.h> 117 118 #include <machine/cpu.h> 119 #include <machine/hid.h> 120 #include <machine/md_var.h> 121 #include <machine/mmuvar.h> 122 123 #include "mmu_oea64.h" 124 125 #define PTESYNC() __asm __volatile("ptesync"); 126 #define TLBSYNC() __asm __volatile("tlbsync; ptesync"); 127 #define SYNC() __asm __volatile("sync"); 128 #define EIEIO() __asm __volatile("eieio"); 129 130 #define VSID_HASH_MASK 0x0000007fffffffffULL 131 132 /* POWER9 only permits a 64k partition table size. */ 133 #define PART_SIZE 0x10000 134 135 /* Actual page sizes (to be used with tlbie, when L=0) */ 136 #define AP_4K 0x00 137 #define AP_16M 0x80 138 139 #define LPTE_KERNEL_VSID_BIT (KERNEL_VSID_BIT << \ 140 (16 - (ADDR_API_SHFT64 - ADDR_PIDX_SHFT))) 141 142 /* Abbreviated Virtual Address Page - high bits */ 143 #define LPTE_AVA_PGNHI_MASK 0x0000000000000F80ULL 144 #define LPTE_AVA_PGNHI_SHIFT 7 145 146 /* Effective Address Page - low bits */ 147 #define EA_PAGELO_MASK 0x7ffULL 148 #define EA_PAGELO_SHIFT 11 149 150 static bool moea64_crop_tlbie; 151 static bool moea64_need_lock; 152 153 /* 154 * The tlbie instruction has two forms: an old one used by PowerISA 155 * 2.03 and prior, and a newer one used by PowerISA 2.06 and later. 156 * We need to support both. 157 */ 158 static __inline void 159 TLBIE(uint64_t vpn, uint64_t oldptehi) 160 { 161 #ifndef __powerpc64__ 162 register_t vpn_hi, vpn_lo; 163 register_t msr; 164 register_t scratch, intr; 165 #endif 166 167 static volatile u_int tlbie_lock = 0; 168 bool need_lock = moea64_need_lock; 169 170 vpn <<= ADDR_PIDX_SHFT; 171 172 /* Hobo spinlock: we need stronger guarantees than mutexes provide */ 173 if (need_lock) { 174 while (!atomic_cmpset_int(&tlbie_lock, 0, 1)); 175 isync(); /* Flush instruction queue once lock acquired */ 176 177 if (moea64_crop_tlbie) { 178 vpn &= ~(0xffffULL << 48); 179 #ifdef __powerpc64__ 180 if ((oldptehi & LPTE_BIG) != 0) 181 __asm __volatile("tlbie %0, 1" :: "r"(vpn) : 182 "memory"); 183 else 184 __asm __volatile("tlbie %0, 0" :: "r"(vpn) : 185 "memory"); 186 __asm __volatile("eieio; tlbsync; ptesync" ::: 187 "memory"); 188 goto done; 189 #endif 190 } 191 } 192 193 #ifdef __powerpc64__ 194 /* 195 * If this page has LPTE_BIG set and is from userspace, then 196 * it must be a superpage with 4KB base/16MB actual page size. 197 */ 198 if ((oldptehi & LPTE_BIG) != 0 && 199 (oldptehi & LPTE_KERNEL_VSID_BIT) == 0) 200 vpn |= AP_16M; 201 202 /* 203 * Explicitly clobber r0. The tlbie instruction has two forms: an old 204 * one used by PowerISA 2.03 and prior, and a newer one used by PowerISA 205 * 2.06 (maybe 2.05?) and later. We need to support both, and it just 206 * so happens that since we use 4k pages we can simply zero out r0, and 207 * clobber it, and the assembler will interpret the single-operand form 208 * of tlbie as having RB set, and everything else as 0. The RS operand 209 * in the newer form is in the same position as the L(page size) bit of 210 * the old form, so a slong as RS is 0, we're good on both sides. 211 */ 212 __asm __volatile("li 0, 0 \n tlbie %0, 0" :: "r"(vpn) : "r0", "memory"); 213 __asm __volatile("eieio; tlbsync; ptesync" ::: "memory"); 214 done: 215 216 #else 217 vpn_hi = (uint32_t)(vpn >> 32); 218 vpn_lo = (uint32_t)vpn; 219 220 intr = intr_disable(); 221 __asm __volatile("\ 222 mfmsr %0; \ 223 mr %1, %0; \ 224 insrdi %1,%5,1,0; \ 225 mtmsrd %1; isync; \ 226 \ 227 sld %1,%2,%4; \ 228 or %1,%1,%3; \ 229 tlbie %1; \ 230 \ 231 mtmsrd %0; isync; \ 232 eieio; \ 233 tlbsync; \ 234 ptesync;" 235 : "=r"(msr), "=r"(scratch) : "r"(vpn_hi), "r"(vpn_lo), "r"(32), "r"(1) 236 : "memory"); 237 intr_restore(intr); 238 #endif 239 240 /* No barriers or special ops -- taken care of by ptesync above */ 241 if (need_lock) 242 tlbie_lock = 0; 243 } 244 245 #define DISABLE_TRANS(msr) msr = mfmsr(); mtmsr(msr & ~PSL_DR) 246 #define ENABLE_TRANS(msr) mtmsr(msr) 247 248 /* 249 * PTEG data. 250 */ 251 static volatile struct lpte *moea64_pteg_table; 252 static struct rwlock moea64_eviction_lock; 253 254 static volatile struct pate *moea64_part_table; 255 256 /* 257 * Dump function. 258 */ 259 static void *moea64_dump_pmap_native(void *ctx, void *buf, 260 u_long *nbytes); 261 262 /* 263 * PTE calls. 264 */ 265 static int64_t moea64_pte_insert_native(struct pvo_entry *); 266 static int64_t moea64_pte_synch_native(struct pvo_entry *); 267 static int64_t moea64_pte_clear_native(struct pvo_entry *, uint64_t); 268 static int64_t moea64_pte_replace_native(struct pvo_entry *, int); 269 static int64_t moea64_pte_unset_native(struct pvo_entry *); 270 static int64_t moea64_pte_insert_sp_native(struct pvo_entry *); 271 static int64_t moea64_pte_unset_sp_native(struct pvo_entry *); 272 static int64_t moea64_pte_replace_sp_native(struct pvo_entry *); 273 274 /* 275 * Utility routines. 276 */ 277 static void moea64_bootstrap_native( 278 vm_offset_t kernelstart, vm_offset_t kernelend); 279 static void moea64_cpu_bootstrap_native(int ap); 280 static void tlbia(void); 281 static void moea64_install_native(void); 282 283 static struct pmap_funcs moea64_native_methods = { 284 .install = moea64_install_native, 285 286 /* Internal interfaces */ 287 .bootstrap = moea64_bootstrap_native, 288 .cpu_bootstrap = moea64_cpu_bootstrap_native, 289 .dumpsys_dump_pmap = moea64_dump_pmap_native, 290 }; 291 292 static struct moea64_funcs moea64_native_funcs = { 293 .pte_synch = moea64_pte_synch_native, 294 .pte_clear = moea64_pte_clear_native, 295 .pte_unset = moea64_pte_unset_native, 296 .pte_replace = moea64_pte_replace_native, 297 .pte_insert = moea64_pte_insert_native, 298 .pte_insert_sp = moea64_pte_insert_sp_native, 299 .pte_unset_sp = moea64_pte_unset_sp_native, 300 .pte_replace_sp = moea64_pte_replace_sp_native, 301 }; 302 303 MMU_DEF_INHERIT(oea64_mmu_native, MMU_TYPE_G5, moea64_native_methods, oea64_mmu); 304 305 static void 306 moea64_install_native() 307 { 308 309 /* Install the MOEA64 ops. */ 310 moea64_ops = &moea64_native_funcs; 311 312 moea64_install(); 313 } 314 315 static int64_t 316 moea64_pte_synch_native(struct pvo_entry *pvo) 317 { 318 volatile struct lpte *pt = moea64_pteg_table + pvo->pvo_pte.slot; 319 uint64_t ptelo, pvo_ptevpn; 320 321 PMAP_LOCK_ASSERT(pvo->pvo_pmap, MA_OWNED); 322 323 pvo_ptevpn = moea64_pte_vpn_from_pvo_vpn(pvo); 324 325 rw_rlock(&moea64_eviction_lock); 326 if ((be64toh(pt->pte_hi) & LPTE_AVPN_MASK) != pvo_ptevpn) { 327 /* Evicted */ 328 rw_runlock(&moea64_eviction_lock); 329 return (-1); 330 } 331 332 PTESYNC(); 333 ptelo = be64toh(pt->pte_lo); 334 335 rw_runlock(&moea64_eviction_lock); 336 337 return (ptelo & (LPTE_REF | LPTE_CHG)); 338 } 339 340 static int64_t 341 moea64_pte_clear_native(struct pvo_entry *pvo, uint64_t ptebit) 342 { 343 volatile struct lpte *pt = moea64_pteg_table + pvo->pvo_pte.slot; 344 struct lpte properpt; 345 uint64_t ptelo; 346 347 PMAP_LOCK_ASSERT(pvo->pvo_pmap, MA_OWNED); 348 349 moea64_pte_from_pvo(pvo, &properpt); 350 351 rw_rlock(&moea64_eviction_lock); 352 if ((be64toh(pt->pte_hi) & LPTE_AVPN_MASK) != 353 (properpt.pte_hi & LPTE_AVPN_MASK)) { 354 /* Evicted */ 355 rw_runlock(&moea64_eviction_lock); 356 return (-1); 357 } 358 359 if (ptebit == LPTE_REF) { 360 /* See "Resetting the Reference Bit" in arch manual */ 361 PTESYNC(); 362 /* 2-step here safe: precision is not guaranteed */ 363 ptelo = be64toh(pt->pte_lo); 364 365 /* One-byte store to avoid touching the C bit */ 366 ((volatile uint8_t *)(&pt->pte_lo))[6] = 367 #if BYTE_ORDER == BIG_ENDIAN 368 ((uint8_t *)(&properpt.pte_lo))[6]; 369 #else 370 ((uint8_t *)(&properpt.pte_lo))[1]; 371 #endif 372 rw_runlock(&moea64_eviction_lock); 373 374 critical_enter(); 375 TLBIE(pvo->pvo_vpn, properpt.pte_hi); 376 critical_exit(); 377 } else { 378 rw_runlock(&moea64_eviction_lock); 379 ptelo = moea64_pte_unset_native(pvo); 380 moea64_pte_insert_native(pvo); 381 } 382 383 return (ptelo & (LPTE_REF | LPTE_CHG)); 384 } 385 386 static __always_inline int64_t 387 moea64_pte_unset_locked(volatile struct lpte *pt, uint64_t vpn) 388 { 389 uint64_t ptelo, ptehi; 390 391 /* 392 * Invalidate the pte, briefly locking it to collect RC bits. No 393 * atomics needed since this is protected against eviction by the lock. 394 */ 395 isync(); 396 critical_enter(); 397 ptehi = (be64toh(pt->pte_hi) & ~LPTE_VALID) | LPTE_LOCKED; 398 pt->pte_hi = htobe64(ptehi); 399 PTESYNC(); 400 TLBIE(vpn, ptehi); 401 ptelo = be64toh(pt->pte_lo); 402 *((volatile int32_t *)(&pt->pte_hi) + 1) = 0; /* Release lock */ 403 critical_exit(); 404 405 /* Keep statistics */ 406 STAT_MOEA64(moea64_pte_valid--); 407 408 return (ptelo & (LPTE_CHG | LPTE_REF)); 409 } 410 411 static int64_t 412 moea64_pte_unset_native(struct pvo_entry *pvo) 413 { 414 volatile struct lpte *pt = moea64_pteg_table + pvo->pvo_pte.slot; 415 int64_t ret; 416 uint64_t pvo_ptevpn; 417 418 pvo_ptevpn = moea64_pte_vpn_from_pvo_vpn(pvo); 419 420 rw_rlock(&moea64_eviction_lock); 421 422 if ((be64toh(pt->pte_hi) & LPTE_AVPN_MASK) != pvo_ptevpn) { 423 /* Evicted */ 424 STAT_MOEA64(moea64_pte_overflow--); 425 ret = -1; 426 } else 427 ret = moea64_pte_unset_locked(pt, pvo->pvo_vpn); 428 429 rw_runlock(&moea64_eviction_lock); 430 431 return (ret); 432 } 433 434 static int64_t 435 moea64_pte_replace_inval_native(struct pvo_entry *pvo, 436 volatile struct lpte *pt) 437 { 438 struct lpte properpt; 439 uint64_t ptelo, ptehi; 440 441 moea64_pte_from_pvo(pvo, &properpt); 442 443 rw_rlock(&moea64_eviction_lock); 444 if ((be64toh(pt->pte_hi) & LPTE_AVPN_MASK) != 445 (properpt.pte_hi & LPTE_AVPN_MASK)) { 446 /* Evicted */ 447 STAT_MOEA64(moea64_pte_overflow--); 448 rw_runlock(&moea64_eviction_lock); 449 return (-1); 450 } 451 452 /* 453 * Replace the pte, briefly locking it to collect RC bits. No 454 * atomics needed since this is protected against eviction by the lock. 455 */ 456 isync(); 457 critical_enter(); 458 ptehi = (be64toh(pt->pte_hi) & ~LPTE_VALID) | LPTE_LOCKED; 459 pt->pte_hi = htobe64(ptehi); 460 PTESYNC(); 461 TLBIE(pvo->pvo_vpn, ptehi); 462 ptelo = be64toh(pt->pte_lo); 463 EIEIO(); 464 pt->pte_lo = htobe64(properpt.pte_lo); 465 EIEIO(); 466 pt->pte_hi = htobe64(properpt.pte_hi); /* Release lock */ 467 PTESYNC(); 468 critical_exit(); 469 rw_runlock(&moea64_eviction_lock); 470 471 return (ptelo & (LPTE_CHG | LPTE_REF)); 472 } 473 474 static int64_t 475 moea64_pte_replace_native(struct pvo_entry *pvo, int flags) 476 { 477 volatile struct lpte *pt = moea64_pteg_table + pvo->pvo_pte.slot; 478 struct lpte properpt; 479 int64_t ptelo; 480 481 if (flags == 0) { 482 /* Just some software bits changing. */ 483 moea64_pte_from_pvo(pvo, &properpt); 484 485 rw_rlock(&moea64_eviction_lock); 486 if ((be64toh(pt->pte_hi) & LPTE_AVPN_MASK) != 487 (properpt.pte_hi & LPTE_AVPN_MASK)) { 488 rw_runlock(&moea64_eviction_lock); 489 return (-1); 490 } 491 pt->pte_hi = htobe64(properpt.pte_hi); 492 ptelo = be64toh(pt->pte_lo); 493 rw_runlock(&moea64_eviction_lock); 494 } else { 495 /* Otherwise, need reinsertion and deletion */ 496 ptelo = moea64_pte_replace_inval_native(pvo, pt); 497 } 498 499 return (ptelo); 500 } 501 502 static void 503 moea64_cpu_bootstrap_native(int ap) 504 { 505 int i = 0; 506 #ifdef __powerpc64__ 507 struct slb *slb = PCPU_GET(aim.slb); 508 register_t seg0; 509 #endif 510 511 /* 512 * Initialize segment registers and MMU 513 */ 514 515 mtmsr(mfmsr() & ~PSL_DR & ~PSL_IR); 516 517 switch(mfpvr() >> 16) { 518 case IBMPOWER9: 519 mtspr(SPR_HID0, mfspr(SPR_HID0) & ~HID0_RADIX); 520 break; 521 } 522 523 /* 524 * Install kernel SLB entries 525 */ 526 527 #ifdef __powerpc64__ 528 __asm __volatile ("slbia"); 529 __asm __volatile ("slbmfee %0,%1; slbie %0;" : "=r"(seg0) : 530 "r"(0)); 531 532 for (i = 0; i < n_slbs; i++) { 533 if (!(slb[i].slbe & SLBE_VALID)) 534 continue; 535 536 __asm __volatile ("slbmte %0, %1" :: 537 "r"(slb[i].slbv), "r"(slb[i].slbe)); 538 } 539 #else 540 for (i = 0; i < 16; i++) 541 mtsrin(i << ADDR_SR_SHFT, kernel_pmap->pm_sr[i]); 542 #endif 543 544 /* 545 * Install page table 546 */ 547 548 if (cpu_features2 & PPC_FEATURE2_ARCH_3_00) 549 mtspr(SPR_PTCR, 550 ((uintptr_t)moea64_part_table & ~DMAP_BASE_ADDRESS) | 551 flsl((PART_SIZE >> 12) - 1)); 552 else 553 __asm __volatile ("ptesync; mtsdr1 %0; isync" 554 :: "r"(((uintptr_t)moea64_pteg_table & ~DMAP_BASE_ADDRESS) 555 | (uintptr_t)(flsl(moea64_pteg_mask >> 11)))); 556 tlbia(); 557 } 558 559 static void 560 moea64_bootstrap_native(vm_offset_t kernelstart, vm_offset_t kernelend) 561 { 562 vm_size_t size; 563 vm_offset_t off; 564 vm_paddr_t pa; 565 register_t msr; 566 567 moea64_early_bootstrap(kernelstart, kernelend); 568 569 switch (mfpvr() >> 16) { 570 case IBMPOWER9: 571 moea64_need_lock = false; 572 break; 573 case IBMPOWER4: 574 case IBMPOWER4PLUS: 575 case IBM970: 576 case IBM970FX: 577 case IBM970GX: 578 case IBM970MP: 579 moea64_crop_tlbie = true; 580 default: 581 moea64_need_lock = true; 582 } 583 /* 584 * Allocate PTEG table. 585 */ 586 587 size = moea64_pteg_count * sizeof(struct lpteg); 588 CTR2(KTR_PMAP, "moea64_bootstrap: %lu PTEGs, %lu bytes", 589 moea64_pteg_count, size); 590 rw_init(&moea64_eviction_lock, "pte eviction"); 591 592 /* 593 * We now need to allocate memory. This memory, to be allocated, 594 * has to reside in a page table. The page table we are about to 595 * allocate. We don't have BAT. So drop to data real mode for a minute 596 * as a measure of last resort. We do this a couple times. 597 */ 598 /* 599 * PTEG table must be aligned on a 256k boundary, but can be placed 600 * anywhere with that alignment on POWER ISA 3+ systems. On earlier 601 * systems, offset addition is done by the CPU with bitwise OR rather 602 * than addition, so the table must also be aligned on a boundary of 603 * its own size. Pick the larger of the two, which works on all 604 * systems. 605 */ 606 moea64_pteg_table = (struct lpte *)moea64_bootstrap_alloc(size, 607 MAX(256*1024, size)); 608 if (hw_direct_map) 609 moea64_pteg_table = 610 (struct lpte *)PHYS_TO_DMAP((vm_offset_t)moea64_pteg_table); 611 /* Allocate partition table (ISA 3.0). */ 612 if (cpu_features2 & PPC_FEATURE2_ARCH_3_00) { 613 moea64_part_table = 614 (struct pate *)moea64_bootstrap_alloc(PART_SIZE, PART_SIZE); 615 moea64_part_table = 616 (struct pate *)PHYS_TO_DMAP((vm_offset_t)moea64_part_table); 617 } 618 DISABLE_TRANS(msr); 619 bzero(__DEVOLATILE(void *, moea64_pteg_table), moea64_pteg_count * 620 sizeof(struct lpteg)); 621 if (cpu_features2 & PPC_FEATURE2_ARCH_3_00) { 622 bzero(__DEVOLATILE(void *, moea64_part_table), PART_SIZE); 623 moea64_part_table[0].pagetab = htobe64( 624 (DMAP_TO_PHYS((vm_offset_t)moea64_pteg_table)) | 625 (uintptr_t)(flsl((moea64_pteg_count - 1) >> 11))); 626 } 627 ENABLE_TRANS(msr); 628 629 CTR1(KTR_PMAP, "moea64_bootstrap: PTEG table at %p", moea64_pteg_table); 630 631 moea64_mid_bootstrap(kernelstart, kernelend); 632 633 /* 634 * Add a mapping for the page table itself if there is no direct map. 635 */ 636 if (!hw_direct_map) { 637 size = moea64_pteg_count * sizeof(struct lpteg); 638 off = (vm_offset_t)(moea64_pteg_table); 639 DISABLE_TRANS(msr); 640 for (pa = off; pa < off + size; pa += PAGE_SIZE) 641 pmap_kenter(pa, pa); 642 ENABLE_TRANS(msr); 643 } 644 645 /* Bring up virtual memory */ 646 moea64_late_bootstrap(kernelstart, kernelend); 647 } 648 649 static void 650 tlbia(void) 651 { 652 vm_offset_t i; 653 #ifndef __powerpc64__ 654 register_t msr, scratch; 655 #endif 656 657 i = 0xc00; /* IS = 11 */ 658 switch (mfpvr() >> 16) { 659 case IBM970: 660 case IBM970FX: 661 case IBM970MP: 662 case IBM970GX: 663 case IBMPOWER4: 664 case IBMPOWER4PLUS: 665 case IBMPOWER5: 666 case IBMPOWER5PLUS: 667 i = 0; /* IS not supported */ 668 break; 669 } 670 671 TLBSYNC(); 672 673 for (; i < 0x400000; i += 0x00001000) { 674 #ifdef __powerpc64__ 675 __asm __volatile("tlbiel %0" :: "r"(i)); 676 #else 677 __asm __volatile("\ 678 mfmsr %0; \ 679 mr %1, %0; \ 680 insrdi %1,%3,1,0; \ 681 mtmsrd %1; \ 682 isync; \ 683 \ 684 tlbiel %2; \ 685 \ 686 mtmsrd %0; \ 687 isync;" 688 : "=r"(msr), "=r"(scratch) : "r"(i), "r"(1)); 689 #endif 690 } 691 692 EIEIO(); 693 TLBSYNC(); 694 } 695 696 static int 697 atomic_pte_lock(volatile struct lpte *pte, uint64_t bitmask, uint64_t *oldhi) 698 { 699 int ret; 700 #ifdef __powerpc64__ 701 uint64_t temp; 702 #else 703 uint32_t oldhihalf; 704 #endif 705 706 /* 707 * Note: in principle, if just the locked bit were set here, we 708 * could avoid needing the eviction lock. However, eviction occurs 709 * so rarely that it isn't worth bothering about in practice. 710 */ 711 #ifdef __powerpc64__ 712 /* 713 * Note: Success of this sequence has the side effect of invalidating 714 * the PTE, as we are setting it to LPTE_LOCKED and discarding the 715 * other bits, including LPTE_V. 716 */ 717 __asm __volatile ( 718 "1:\tldarx %1, 0, %3\n\t" /* load old value */ 719 "and. %0,%1,%4\n\t" /* check if any bits set */ 720 "bne 2f\n\t" /* exit if any set */ 721 "stdcx. %5, 0, %3\n\t" /* attempt to store */ 722 "bne- 1b\n\t" /* spin if failed */ 723 "li %0, 1\n\t" /* success - retval = 1 */ 724 "b 3f\n\t" /* we've succeeded */ 725 "2:\n\t" 726 "stdcx. %1, 0, %3\n\t" /* clear reservation (74xx) */ 727 "li %0, 0\n\t" /* failure - retval = 0 */ 728 "3:\n\t" 729 : "=&r" (ret), "=&r"(temp), "=m" (pte->pte_hi) 730 : "r" ((volatile char *)&pte->pte_hi), 731 "r" (htobe64(bitmask)), "r" (htobe64(LPTE_LOCKED)), 732 "m" (pte->pte_hi) 733 : "cr0", "cr1", "cr2", "memory"); 734 *oldhi = be64toh(temp); 735 #else 736 /* 737 * This code is used on bridge mode only. 738 */ 739 __asm __volatile ( 740 "1:\tlwarx %1, 0, %3\n\t" /* load old value */ 741 "and. %0,%1,%4\n\t" /* check if any bits set */ 742 "bne 2f\n\t" /* exit if any set */ 743 "stwcx. %5, 0, %3\n\t" /* attempt to store */ 744 "bne- 1b\n\t" /* spin if failed */ 745 "li %0, 1\n\t" /* success - retval = 1 */ 746 "b 3f\n\t" /* we've succeeded */ 747 "2:\n\t" 748 "stwcx. %1, 0, %3\n\t" /* clear reservation (74xx) */ 749 "li %0, 0\n\t" /* failure - retval = 0 */ 750 "3:\n\t" 751 : "=&r" (ret), "=&r"(oldhihalf), "=m" (pte->pte_hi) 752 : "r" ((volatile char *)&pte->pte_hi + 4), 753 "r" ((uint32_t)bitmask), "r" ((uint32_t)LPTE_LOCKED), 754 "m" (pte->pte_hi) 755 : "cr0", "cr1", "cr2", "memory"); 756 757 *oldhi = (pte->pte_hi & 0xffffffff00000000ULL) | oldhihalf; 758 #endif 759 760 return (ret); 761 } 762 763 static uintptr_t 764 moea64_insert_to_pteg_native(struct lpte *pvo_pt, uintptr_t slotbase, 765 uint64_t mask) 766 { 767 volatile struct lpte *pt; 768 uint64_t oldptehi, va; 769 uintptr_t k; 770 int i, j; 771 772 /* Start at a random slot */ 773 i = mftb() % 8; 774 for (j = 0; j < 8; j++) { 775 k = slotbase + (i + j) % 8; 776 pt = &moea64_pteg_table[k]; 777 /* Invalidate and seize lock only if no bits in mask set */ 778 if (atomic_pte_lock(pt, mask, &oldptehi)) /* Lock obtained */ 779 break; 780 } 781 782 if (j == 8) 783 return (-1); 784 785 if (oldptehi & LPTE_VALID) { 786 KASSERT(!(oldptehi & LPTE_WIRED), ("Unmapped wired entry")); 787 /* 788 * Need to invalidate old entry completely: see 789 * "Modifying a Page Table Entry". Need to reconstruct 790 * the virtual address for the outgoing entry to do that. 791 */ 792 va = oldptehi >> (ADDR_SR_SHFT - ADDR_API_SHFT64); 793 if (oldptehi & LPTE_HID) 794 va = (((k >> 3) ^ moea64_pteg_mask) ^ va) & 795 (ADDR_PIDX >> ADDR_PIDX_SHFT); 796 else 797 va = ((k >> 3) ^ va) & (ADDR_PIDX >> ADDR_PIDX_SHFT); 798 va |= (oldptehi & LPTE_AVPN_MASK) << 799 (ADDR_API_SHFT64 - ADDR_PIDX_SHFT); 800 PTESYNC(); 801 TLBIE(va, oldptehi); 802 STAT_MOEA64(moea64_pte_valid--); 803 STAT_MOEA64(moea64_pte_overflow++); 804 } 805 806 /* 807 * Update the PTE as per "Adding a Page Table Entry". Lock is released 808 * by setting the high doubleworld. 809 */ 810 pt->pte_lo = htobe64(pvo_pt->pte_lo); 811 EIEIO(); 812 pt->pte_hi = htobe64(pvo_pt->pte_hi); 813 PTESYNC(); 814 815 /* Keep statistics */ 816 STAT_MOEA64(moea64_pte_valid++); 817 818 return (k); 819 } 820 821 static __always_inline int64_t 822 moea64_pte_insert_locked(struct pvo_entry *pvo, struct lpte *insertpt, 823 uint64_t mask) 824 { 825 uintptr_t slot; 826 827 /* 828 * First try primary hash. 829 */ 830 slot = moea64_insert_to_pteg_native(insertpt, pvo->pvo_pte.slot, 831 mask | LPTE_WIRED | LPTE_LOCKED); 832 if (slot != -1) { 833 pvo->pvo_pte.slot = slot; 834 return (0); 835 } 836 837 /* 838 * Now try secondary hash. 839 */ 840 pvo->pvo_vaddr ^= PVO_HID; 841 insertpt->pte_hi ^= LPTE_HID; 842 pvo->pvo_pte.slot ^= (moea64_pteg_mask << 3); 843 slot = moea64_insert_to_pteg_native(insertpt, pvo->pvo_pte.slot, 844 mask | LPTE_WIRED | LPTE_LOCKED); 845 if (slot != -1) { 846 pvo->pvo_pte.slot = slot; 847 return (0); 848 } 849 850 return (-1); 851 } 852 853 static int64_t 854 moea64_pte_insert_native(struct pvo_entry *pvo) 855 { 856 struct lpte insertpt; 857 int64_t ret; 858 859 /* Initialize PTE */ 860 moea64_pte_from_pvo(pvo, &insertpt); 861 862 /* Make sure further insertion is locked out during evictions */ 863 rw_rlock(&moea64_eviction_lock); 864 865 pvo->pvo_pte.slot &= ~7ULL; /* Base slot address */ 866 ret = moea64_pte_insert_locked(pvo, &insertpt, LPTE_VALID); 867 if (ret == -1) { 868 /* 869 * Out of luck. Find a PTE to sacrifice. 870 */ 871 872 /* Lock out all insertions for a bit */ 873 if (!rw_try_upgrade(&moea64_eviction_lock)) { 874 rw_runlock(&moea64_eviction_lock); 875 rw_wlock(&moea64_eviction_lock); 876 } 877 /* Don't evict large pages */ 878 ret = moea64_pte_insert_locked(pvo, &insertpt, LPTE_BIG); 879 rw_wunlock(&moea64_eviction_lock); 880 /* No freeable slots in either PTEG? We're hosed. */ 881 if (ret == -1) 882 panic("moea64_pte_insert: overflow"); 883 } else 884 rw_runlock(&moea64_eviction_lock); 885 886 return (0); 887 } 888 889 static void * 890 moea64_dump_pmap_native(void *ctx, void *buf, u_long *nbytes) 891 { 892 struct dump_context *dctx; 893 u_long ptex, ptex_end; 894 895 dctx = (struct dump_context *)ctx; 896 ptex = dctx->ptex; 897 ptex_end = ptex + dctx->blksz / sizeof(struct lpte); 898 ptex_end = MIN(ptex_end, dctx->ptex_end); 899 *nbytes = (ptex_end - ptex) * sizeof(struct lpte); 900 901 if (*nbytes == 0) 902 return (NULL); 903 904 dctx->ptex = ptex_end; 905 return (__DEVOLATILE(struct lpte *, moea64_pteg_table) + ptex); 906 } 907 908 static __always_inline uint64_t 909 moea64_vpn_from_pte(uint64_t ptehi, uintptr_t slot) 910 { 911 uint64_t pgn, pgnlo, vsid; 912 913 vsid = (ptehi & LPTE_AVA_MASK) >> LPTE_VSID_SHIFT; 914 if ((ptehi & LPTE_HID) != 0) 915 slot ^= (moea64_pteg_mask << 3); 916 pgnlo = ((vsid & VSID_HASH_MASK) ^ (slot >> 3)) & EA_PAGELO_MASK; 917 pgn = ((ptehi & LPTE_AVA_PGNHI_MASK) << (EA_PAGELO_SHIFT - 918 LPTE_AVA_PGNHI_SHIFT)) | pgnlo; 919 return ((vsid << 16) | pgn); 920 } 921 922 static __always_inline int64_t 923 moea64_pte_unset_sp_locked(struct pvo_entry *pvo) 924 { 925 volatile struct lpte *pt; 926 uint64_t ptehi, refchg, vpn; 927 vm_offset_t eva; 928 pmap_t pm; 929 930 pm = pvo->pvo_pmap; 931 refchg = 0; 932 eva = PVO_VADDR(pvo) + HPT_SP_SIZE; 933 934 for (; pvo != NULL && PVO_VADDR(pvo) < eva; 935 pvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo)) { 936 pt = moea64_pteg_table + pvo->pvo_pte.slot; 937 ptehi = be64toh(pt->pte_hi); 938 if ((ptehi & LPTE_AVPN_MASK) != 939 moea64_pte_vpn_from_pvo_vpn(pvo)) { 940 /* Evicted: invalidate new entry */ 941 STAT_MOEA64(moea64_pte_overflow--); 942 vpn = moea64_vpn_from_pte(ptehi, pvo->pvo_pte.slot); 943 CTR1(KTR_PMAP, "Evicted page in pte_unset_sp: vpn=%jx", 944 (uintmax_t)vpn); 945 /* Assume evicted page was modified */ 946 refchg |= LPTE_CHG; 947 } else 948 vpn = pvo->pvo_vpn; 949 950 refchg |= moea64_pte_unset_locked(pt, vpn); 951 } 952 953 return (refchg); 954 } 955 956 static int64_t 957 moea64_pte_unset_sp_native(struct pvo_entry *pvo) 958 { 959 uint64_t refchg; 960 961 PMAP_LOCK_ASSERT(pvo->pvo_pmap, MA_OWNED); 962 KASSERT((PVO_VADDR(pvo) & HPT_SP_MASK) == 0, 963 ("%s: va %#jx unaligned", __func__, (uintmax_t)PVO_VADDR(pvo))); 964 965 rw_rlock(&moea64_eviction_lock); 966 refchg = moea64_pte_unset_sp_locked(pvo); 967 rw_runlock(&moea64_eviction_lock); 968 969 return (refchg); 970 } 971 972 static __always_inline int64_t 973 moea64_pte_insert_sp_locked(struct pvo_entry *pvo) 974 { 975 struct lpte insertpt; 976 int64_t ret; 977 vm_offset_t eva; 978 pmap_t pm; 979 980 pm = pvo->pvo_pmap; 981 eva = PVO_VADDR(pvo) + HPT_SP_SIZE; 982 983 for (; pvo != NULL && PVO_VADDR(pvo) < eva; 984 pvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo)) { 985 moea64_pte_from_pvo(pvo, &insertpt); 986 pvo->pvo_pte.slot &= ~7ULL; /* Base slot address */ 987 988 ret = moea64_pte_insert_locked(pvo, &insertpt, LPTE_VALID); 989 if (ret == -1) { 990 /* Lock out all insertions for a bit */ 991 if (!rw_try_upgrade(&moea64_eviction_lock)) { 992 rw_runlock(&moea64_eviction_lock); 993 rw_wlock(&moea64_eviction_lock); 994 } 995 /* Don't evict large pages */ 996 ret = moea64_pte_insert_locked(pvo, &insertpt, 997 LPTE_BIG); 998 rw_downgrade(&moea64_eviction_lock); 999 /* No freeable slots in either PTEG? We're hosed. */ 1000 if (ret == -1) 1001 panic("moea64_pte_insert_sp: overflow"); 1002 } 1003 } 1004 1005 return (0); 1006 } 1007 1008 static int64_t 1009 moea64_pte_insert_sp_native(struct pvo_entry *pvo) 1010 { 1011 PMAP_LOCK_ASSERT(pvo->pvo_pmap, MA_OWNED); 1012 KASSERT((PVO_VADDR(pvo) & HPT_SP_MASK) == 0, 1013 ("%s: va %#jx unaligned", __func__, (uintmax_t)PVO_VADDR(pvo))); 1014 1015 rw_rlock(&moea64_eviction_lock); 1016 moea64_pte_insert_sp_locked(pvo); 1017 rw_runlock(&moea64_eviction_lock); 1018 1019 return (0); 1020 } 1021 1022 static int64_t 1023 moea64_pte_replace_sp_native(struct pvo_entry *pvo) 1024 { 1025 uint64_t refchg; 1026 1027 PMAP_LOCK_ASSERT(pvo->pvo_pmap, MA_OWNED); 1028 KASSERT((PVO_VADDR(pvo) & HPT_SP_MASK) == 0, 1029 ("%s: va %#jx unaligned", __func__, (uintmax_t)PVO_VADDR(pvo))); 1030 1031 rw_rlock(&moea64_eviction_lock); 1032 refchg = moea64_pte_unset_sp_locked(pvo); 1033 moea64_pte_insert_sp_locked(pvo); 1034 rw_runlock(&moea64_eviction_lock); 1035 1036 return (refchg); 1037 } 1038