1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD AND 4-Clause-BSD 3 * 4 * Copyright (c) 2001 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Matt Thomas <matt@3am-software.com> of Allegro Networks, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 /*- 32 * Copyright (C) 1995, 1996 Wolfgang Solfrank. 33 * Copyright (C) 1995, 1996 TooLs GmbH. 34 * All rights reserved. 35 * 36 * Redistribution and use in source and binary forms, with or without 37 * modification, are permitted provided that the following conditions 38 * are met: 39 * 1. Redistributions of source code must retain the above copyright 40 * notice, this list of conditions and the following disclaimer. 41 * 2. Redistributions in binary form must reproduce the above copyright 42 * notice, this list of conditions and the following disclaimer in the 43 * documentation and/or other materials provided with the distribution. 44 * 3. All advertising materials mentioning features or use of this software 45 * must display the following acknowledgement: 46 * This product includes software developed by TooLs GmbH. 47 * 4. The name of TooLs GmbH may not be used to endorse or promote products 48 * derived from this software without specific prior written permission. 49 * 50 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR 51 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 52 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 53 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 54 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 55 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 56 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 57 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 58 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 59 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 60 * 61 * $NetBSD: pmap.c,v 1.28 2000/03/26 20:42:36 kleink Exp $ 62 */ 63 /*- 64 * Copyright (C) 2001 Benno Rice. 65 * All rights reserved. 66 * 67 * Redistribution and use in source and binary forms, with or without 68 * modification, are permitted provided that the following conditions 69 * are met: 70 * 1. Redistributions of source code must retain the above copyright 71 * notice, this list of conditions and the following disclaimer. 72 * 2. Redistributions in binary form must reproduce the above copyright 73 * notice, this list of conditions and the following disclaimer in the 74 * documentation and/or other materials provided with the distribution. 75 * 76 * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR 77 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 78 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 79 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 80 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 81 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 82 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 83 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 84 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 85 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 86 */ 87 88 #include <sys/cdefs.h> 89 __FBSDID("$FreeBSD$"); 90 91 /* 92 * Native 64-bit page table operations for running without a hypervisor. 93 */ 94 95 #include <sys/param.h> 96 #include <sys/kernel.h> 97 #include <sys/ktr.h> 98 #include <sys/lock.h> 99 #include <sys/mutex.h> 100 #include <sys/proc.h> 101 #include <sys/sched.h> 102 #include <sys/sysctl.h> 103 #include <sys/systm.h> 104 #include <sys/rwlock.h> 105 #include <sys/endian.h> 106 107 #include <sys/kdb.h> 108 109 #include <vm/vm.h> 110 #include <vm/vm_param.h> 111 #include <vm/vm_kern.h> 112 #include <vm/vm_page.h> 113 #include <vm/vm_map.h> 114 #include <vm/vm_object.h> 115 #include <vm/vm_extern.h> 116 #include <vm/vm_pageout.h> 117 118 #include <machine/cpu.h> 119 #include <machine/hid.h> 120 #include <machine/md_var.h> 121 #include <machine/mmuvar.h> 122 123 #include "mmu_oea64.h" 124 125 #define PTESYNC() __asm __volatile("ptesync"); 126 #define TLBSYNC() __asm __volatile("tlbsync; ptesync"); 127 #define SYNC() __asm __volatile("sync"); 128 #define EIEIO() __asm __volatile("eieio"); 129 130 #define VSID_HASH_MASK 0x0000007fffffffffULL 131 132 /* POWER9 only permits a 64k partition table size. */ 133 #define PART_SIZE 0x10000 134 135 static bool moea64_crop_tlbie; 136 static bool moea64_need_lock; 137 138 static __inline void 139 TLBIE(uint64_t vpn) { 140 #ifndef __powerpc64__ 141 register_t vpn_hi, vpn_lo; 142 register_t msr; 143 register_t scratch, intr; 144 #endif 145 146 static volatile u_int tlbie_lock = 0; 147 bool need_lock = moea64_need_lock; 148 149 vpn <<= ADDR_PIDX_SHFT; 150 151 /* Hobo spinlock: we need stronger guarantees than mutexes provide */ 152 if (need_lock) { 153 while (!atomic_cmpset_int(&tlbie_lock, 0, 1)); 154 isync(); /* Flush instruction queue once lock acquired */ 155 156 if (moea64_crop_tlbie) 157 vpn &= ~(0xffffULL << 48); 158 } 159 160 #ifdef __powerpc64__ 161 /* 162 * Explicitly clobber r0. The tlbie instruction has two forms: an old 163 * one used by PowerISA 2.03 and prior, and a newer one used by PowerISA 164 * 2.06 (maybe 2.05?) and later. We need to support both, and it just 165 * so happens that since we use 4k pages we can simply zero out r0, and 166 * clobber it, and the assembler will interpret the single-operand form 167 * of tlbie as having RB set, and everything else as 0. The RS operand 168 * in the newer form is in the same position as the L(page size) bit of 169 * the old form, so a slong as RS is 0, we're good on both sides. 170 */ 171 __asm __volatile("li 0, 0 \n tlbie %0" :: "r"(vpn) : "r0", "memory"); 172 __asm __volatile("eieio; tlbsync; ptesync" ::: "memory"); 173 #else 174 vpn_hi = (uint32_t)(vpn >> 32); 175 vpn_lo = (uint32_t)vpn; 176 177 intr = intr_disable(); 178 __asm __volatile("\ 179 mfmsr %0; \ 180 mr %1, %0; \ 181 insrdi %1,%5,1,0; \ 182 mtmsrd %1; isync; \ 183 \ 184 sld %1,%2,%4; \ 185 or %1,%1,%3; \ 186 tlbie %1; \ 187 \ 188 mtmsrd %0; isync; \ 189 eieio; \ 190 tlbsync; \ 191 ptesync;" 192 : "=r"(msr), "=r"(scratch) : "r"(vpn_hi), "r"(vpn_lo), "r"(32), "r"(1) 193 : "memory"); 194 intr_restore(intr); 195 #endif 196 197 /* No barriers or special ops -- taken care of by ptesync above */ 198 if (need_lock) 199 tlbie_lock = 0; 200 } 201 202 #define DISABLE_TRANS(msr) msr = mfmsr(); mtmsr(msr & ~PSL_DR) 203 #define ENABLE_TRANS(msr) mtmsr(msr) 204 205 /* 206 * PTEG data. 207 */ 208 static volatile struct lpte *moea64_pteg_table; 209 static struct rwlock moea64_eviction_lock; 210 211 static volatile struct pate *moea64_part_table; 212 213 /* 214 * Dump function. 215 */ 216 static void *moea64_dump_pmap_native(void *ctx, void *buf, 217 u_long *nbytes); 218 219 /* 220 * PTE calls. 221 */ 222 static int64_t moea64_pte_insert_native(struct pvo_entry *); 223 static int64_t moea64_pte_synch_native(struct pvo_entry *); 224 static int64_t moea64_pte_clear_native(struct pvo_entry *, uint64_t); 225 static int64_t moea64_pte_replace_native(struct pvo_entry *, int); 226 static int64_t moea64_pte_unset_native(struct pvo_entry *); 227 228 /* 229 * Utility routines. 230 */ 231 static void moea64_bootstrap_native( 232 vm_offset_t kernelstart, vm_offset_t kernelend); 233 static void moea64_cpu_bootstrap_native(int ap); 234 static void tlbia(void); 235 static void moea64_install_native(void); 236 237 static struct pmap_funcs moea64_native_methods = { 238 .install = moea64_install_native, 239 240 /* Internal interfaces */ 241 .bootstrap = moea64_bootstrap_native, 242 .cpu_bootstrap = moea64_cpu_bootstrap_native, 243 .dumpsys_dump_pmap = moea64_dump_pmap_native, 244 }; 245 246 static struct moea64_funcs moea64_native_funcs = { 247 .pte_synch = moea64_pte_synch_native, 248 .pte_clear = moea64_pte_clear_native, 249 .pte_unset = moea64_pte_unset_native, 250 .pte_replace = moea64_pte_replace_native, 251 .pte_insert = moea64_pte_insert_native, 252 }; 253 254 MMU_DEF_INHERIT(oea64_mmu_native, MMU_TYPE_G5, moea64_native_methods, oea64_mmu); 255 256 static void 257 moea64_install_native() 258 { 259 260 /* Install the MOEA64 ops. */ 261 moea64_ops = &moea64_native_funcs; 262 } 263 264 static int64_t 265 moea64_pte_synch_native(struct pvo_entry *pvo) 266 { 267 volatile struct lpte *pt = moea64_pteg_table + pvo->pvo_pte.slot; 268 uint64_t ptelo, pvo_ptevpn; 269 270 PMAP_LOCK_ASSERT(pvo->pvo_pmap, MA_OWNED); 271 272 pvo_ptevpn = moea64_pte_vpn_from_pvo_vpn(pvo); 273 274 rw_rlock(&moea64_eviction_lock); 275 if ((be64toh(pt->pte_hi) & LPTE_AVPN_MASK) != pvo_ptevpn) { 276 /* Evicted */ 277 rw_runlock(&moea64_eviction_lock); 278 return (-1); 279 } 280 281 PTESYNC(); 282 ptelo = be64toh(pt->pte_lo); 283 284 rw_runlock(&moea64_eviction_lock); 285 286 return (ptelo & (LPTE_REF | LPTE_CHG)); 287 } 288 289 static int64_t 290 moea64_pte_clear_native(struct pvo_entry *pvo, uint64_t ptebit) 291 { 292 volatile struct lpte *pt = moea64_pteg_table + pvo->pvo_pte.slot; 293 struct lpte properpt; 294 uint64_t ptelo; 295 296 PMAP_LOCK_ASSERT(pvo->pvo_pmap, MA_OWNED); 297 298 moea64_pte_from_pvo(pvo, &properpt); 299 300 rw_rlock(&moea64_eviction_lock); 301 if ((be64toh(pt->pte_hi) & LPTE_AVPN_MASK) != 302 (properpt.pte_hi & LPTE_AVPN_MASK)) { 303 /* Evicted */ 304 rw_runlock(&moea64_eviction_lock); 305 return (-1); 306 } 307 308 if (ptebit == LPTE_REF) { 309 /* See "Resetting the Reference Bit" in arch manual */ 310 PTESYNC(); 311 /* 2-step here safe: precision is not guaranteed */ 312 ptelo = be64toh(pt->pte_lo); 313 314 /* One-byte store to avoid touching the C bit */ 315 ((volatile uint8_t *)(&pt->pte_lo))[6] = 316 #if BYTE_ORDER == BIG_ENDIAN 317 ((uint8_t *)(&properpt.pte_lo))[6]; 318 #else 319 ((uint8_t *)(&properpt.pte_lo))[1]; 320 #endif 321 rw_runlock(&moea64_eviction_lock); 322 323 critical_enter(); 324 TLBIE(pvo->pvo_vpn); 325 critical_exit(); 326 } else { 327 rw_runlock(&moea64_eviction_lock); 328 ptelo = moea64_pte_unset_native(pvo); 329 moea64_pte_insert_native(pvo); 330 } 331 332 return (ptelo & (LPTE_REF | LPTE_CHG)); 333 } 334 335 static int64_t 336 moea64_pte_unset_native(struct pvo_entry *pvo) 337 { 338 volatile struct lpte *pt = moea64_pteg_table + pvo->pvo_pte.slot; 339 uint64_t ptelo, pvo_ptevpn; 340 341 pvo_ptevpn = moea64_pte_vpn_from_pvo_vpn(pvo); 342 343 rw_rlock(&moea64_eviction_lock); 344 if ((be64toh(pt->pte_hi) & LPTE_AVPN_MASK) != pvo_ptevpn) { 345 /* Evicted */ 346 STAT_MOEA64(moea64_pte_overflow--); 347 rw_runlock(&moea64_eviction_lock); 348 return (-1); 349 } 350 351 /* 352 * Invalidate the pte, briefly locking it to collect RC bits. No 353 * atomics needed since this is protected against eviction by the lock. 354 */ 355 isync(); 356 critical_enter(); 357 pt->pte_hi = htobe64((be64toh(pt->pte_hi) & ~LPTE_VALID) | LPTE_LOCKED); 358 PTESYNC(); 359 TLBIE(pvo->pvo_vpn); 360 ptelo = be64toh(pt->pte_lo); 361 *((volatile int32_t *)(&pt->pte_hi) + 1) = 0; /* Release lock */ 362 critical_exit(); 363 rw_runlock(&moea64_eviction_lock); 364 365 /* Keep statistics */ 366 STAT_MOEA64(moea64_pte_valid--); 367 368 return (ptelo & (LPTE_CHG | LPTE_REF)); 369 } 370 371 static int64_t 372 moea64_pte_replace_inval_native(struct pvo_entry *pvo, 373 volatile struct lpte *pt) 374 { 375 struct lpte properpt; 376 uint64_t ptelo; 377 378 moea64_pte_from_pvo(pvo, &properpt); 379 380 rw_rlock(&moea64_eviction_lock); 381 if ((be64toh(pt->pte_hi) & LPTE_AVPN_MASK) != 382 (properpt.pte_hi & LPTE_AVPN_MASK)) { 383 /* Evicted */ 384 STAT_MOEA64(moea64_pte_overflow--); 385 rw_runlock(&moea64_eviction_lock); 386 return (-1); 387 } 388 389 /* 390 * Replace the pte, briefly locking it to collect RC bits. No 391 * atomics needed since this is protected against eviction by the lock. 392 */ 393 isync(); 394 critical_enter(); 395 pt->pte_hi = htobe64((be64toh(pt->pte_hi) & ~LPTE_VALID) | LPTE_LOCKED); 396 PTESYNC(); 397 TLBIE(pvo->pvo_vpn); 398 ptelo = be64toh(pt->pte_lo); 399 EIEIO(); 400 pt->pte_lo = htobe64(properpt.pte_lo); 401 EIEIO(); 402 pt->pte_hi = htobe64(properpt.pte_hi); /* Release lock */ 403 PTESYNC(); 404 critical_exit(); 405 rw_runlock(&moea64_eviction_lock); 406 407 return (ptelo & (LPTE_CHG | LPTE_REF)); 408 } 409 410 static int64_t 411 moea64_pte_replace_native(struct pvo_entry *pvo, int flags) 412 { 413 volatile struct lpte *pt = moea64_pteg_table + pvo->pvo_pte.slot; 414 struct lpte properpt; 415 int64_t ptelo; 416 417 if (flags == 0) { 418 /* Just some software bits changing. */ 419 moea64_pte_from_pvo(pvo, &properpt); 420 421 rw_rlock(&moea64_eviction_lock); 422 if ((be64toh(pt->pte_hi) & LPTE_AVPN_MASK) != 423 (properpt.pte_hi & LPTE_AVPN_MASK)) { 424 rw_runlock(&moea64_eviction_lock); 425 return (-1); 426 } 427 pt->pte_hi = htobe64(properpt.pte_hi); 428 ptelo = be64toh(pt->pte_lo); 429 rw_runlock(&moea64_eviction_lock); 430 } else { 431 /* Otherwise, need reinsertion and deletion */ 432 ptelo = moea64_pte_replace_inval_native(pvo, pt); 433 } 434 435 return (ptelo); 436 } 437 438 static void 439 moea64_cpu_bootstrap_native(int ap) 440 { 441 int i = 0; 442 #ifdef __powerpc64__ 443 struct slb *slb = PCPU_GET(aim.slb); 444 register_t seg0; 445 #endif 446 447 /* 448 * Initialize segment registers and MMU 449 */ 450 451 mtmsr(mfmsr() & ~PSL_DR & ~PSL_IR); 452 453 switch(mfpvr() >> 16) { 454 case IBMPOWER9: 455 mtspr(SPR_HID0, mfspr(SPR_HID0) & ~HID0_RADIX); 456 break; 457 } 458 459 /* 460 * Install kernel SLB entries 461 */ 462 463 #ifdef __powerpc64__ 464 __asm __volatile ("slbia"); 465 __asm __volatile ("slbmfee %0,%1; slbie %0;" : "=r"(seg0) : 466 "r"(0)); 467 468 for (i = 0; i < n_slbs; i++) { 469 if (!(slb[i].slbe & SLBE_VALID)) 470 continue; 471 472 __asm __volatile ("slbmte %0, %1" :: 473 "r"(slb[i].slbv), "r"(slb[i].slbe)); 474 } 475 #else 476 for (i = 0; i < 16; i++) 477 mtsrin(i << ADDR_SR_SHFT, kernel_pmap->pm_sr[i]); 478 #endif 479 480 /* 481 * Install page table 482 */ 483 484 if (cpu_features2 & PPC_FEATURE2_ARCH_3_00) 485 mtspr(SPR_PTCR, 486 ((uintptr_t)moea64_part_table & ~DMAP_BASE_ADDRESS) | 487 flsl((PART_SIZE >> 12) - 1)); 488 else 489 __asm __volatile ("ptesync; mtsdr1 %0; isync" 490 :: "r"(((uintptr_t)moea64_pteg_table & ~DMAP_BASE_ADDRESS) 491 | (uintptr_t)(flsl(moea64_pteg_mask >> 11)))); 492 tlbia(); 493 } 494 495 static void 496 moea64_bootstrap_native(vm_offset_t kernelstart, vm_offset_t kernelend) 497 { 498 vm_size_t size; 499 vm_offset_t off; 500 vm_paddr_t pa; 501 register_t msr; 502 503 moea64_early_bootstrap(kernelstart, kernelend); 504 505 switch (mfpvr() >> 16) { 506 case IBMPOWER9: 507 moea64_need_lock = false; 508 break; 509 case IBMPOWER4: 510 case IBMPOWER4PLUS: 511 case IBM970: 512 case IBM970FX: 513 case IBM970GX: 514 case IBM970MP: 515 moea64_crop_tlbie = true; 516 default: 517 moea64_need_lock = true; 518 } 519 /* 520 * Allocate PTEG table. 521 */ 522 523 size = moea64_pteg_count * sizeof(struct lpteg); 524 CTR2(KTR_PMAP, "moea64_bootstrap: %lu PTEGs, %lu bytes", 525 moea64_pteg_count, size); 526 rw_init(&moea64_eviction_lock, "pte eviction"); 527 528 /* 529 * We now need to allocate memory. This memory, to be allocated, 530 * has to reside in a page table. The page table we are about to 531 * allocate. We don't have BAT. So drop to data real mode for a minute 532 * as a measure of last resort. We do this a couple times. 533 */ 534 /* 535 * PTEG table must be aligned on a 256k boundary, but can be placed 536 * anywhere with that alignment on POWER ISA 3+ systems. On earlier 537 * systems, offset addition is done by the CPU with bitwise OR rather 538 * than addition, so the table must also be aligned on a boundary of 539 * its own size. Pick the larger of the two, which works on all 540 * systems. 541 */ 542 moea64_pteg_table = (struct lpte *)moea64_bootstrap_alloc(size, 543 MAX(256*1024, size)); 544 if (hw_direct_map) 545 moea64_pteg_table = 546 (struct lpte *)PHYS_TO_DMAP((vm_offset_t)moea64_pteg_table); 547 /* Allocate partition table (ISA 3.0). */ 548 if (cpu_features2 & PPC_FEATURE2_ARCH_3_00) { 549 moea64_part_table = 550 (struct pate *)moea64_bootstrap_alloc(PART_SIZE, PART_SIZE); 551 moea64_part_table = 552 (struct pate *)PHYS_TO_DMAP((vm_offset_t)moea64_part_table); 553 } 554 DISABLE_TRANS(msr); 555 bzero(__DEVOLATILE(void *, moea64_pteg_table), moea64_pteg_count * 556 sizeof(struct lpteg)); 557 if (cpu_features2 & PPC_FEATURE2_ARCH_3_00) { 558 bzero(__DEVOLATILE(void *, moea64_part_table), PART_SIZE); 559 moea64_part_table[0].pagetab = htobe64( 560 (DMAP_TO_PHYS((vm_offset_t)moea64_pteg_table)) | 561 (uintptr_t)(flsl((moea64_pteg_count - 1) >> 11))); 562 } 563 ENABLE_TRANS(msr); 564 565 CTR1(KTR_PMAP, "moea64_bootstrap: PTEG table at %p", moea64_pteg_table); 566 567 moea64_mid_bootstrap(kernelstart, kernelend); 568 569 /* 570 * Add a mapping for the page table itself if there is no direct map. 571 */ 572 if (!hw_direct_map) { 573 size = moea64_pteg_count * sizeof(struct lpteg); 574 off = (vm_offset_t)(moea64_pteg_table); 575 DISABLE_TRANS(msr); 576 for (pa = off; pa < off + size; pa += PAGE_SIZE) 577 pmap_kenter(pa, pa); 578 ENABLE_TRANS(msr); 579 } 580 581 /* Bring up virtual memory */ 582 moea64_late_bootstrap(kernelstart, kernelend); 583 } 584 585 static void 586 tlbia(void) 587 { 588 vm_offset_t i; 589 #ifndef __powerpc64__ 590 register_t msr, scratch; 591 #endif 592 593 i = 0xc00; /* IS = 11 */ 594 switch (mfpvr() >> 16) { 595 case IBM970: 596 case IBM970FX: 597 case IBM970MP: 598 case IBM970GX: 599 case IBMPOWER4: 600 case IBMPOWER4PLUS: 601 case IBMPOWER5: 602 case IBMPOWER5PLUS: 603 i = 0; /* IS not supported */ 604 break; 605 } 606 607 TLBSYNC(); 608 609 for (; i < 0x400000; i += 0x00001000) { 610 #ifdef __powerpc64__ 611 __asm __volatile("tlbiel %0" :: "r"(i)); 612 #else 613 __asm __volatile("\ 614 mfmsr %0; \ 615 mr %1, %0; \ 616 insrdi %1,%3,1,0; \ 617 mtmsrd %1; \ 618 isync; \ 619 \ 620 tlbiel %2; \ 621 \ 622 mtmsrd %0; \ 623 isync;" 624 : "=r"(msr), "=r"(scratch) : "r"(i), "r"(1)); 625 #endif 626 } 627 628 EIEIO(); 629 TLBSYNC(); 630 } 631 632 static int 633 atomic_pte_lock(volatile struct lpte *pte, uint64_t bitmask, uint64_t *oldhi) 634 { 635 int ret; 636 uint32_t oldhihalf; 637 638 /* 639 * Note: in principle, if just the locked bit were set here, we 640 * could avoid needing the eviction lock. However, eviction occurs 641 * so rarely that it isn't worth bothering about in practice. 642 */ 643 644 __asm __volatile ( 645 "1:\tlwarx %1, 0, %3\n\t" /* load old value */ 646 "and. %0,%1,%4\n\t" /* check if any bits set */ 647 "bne 2f\n\t" /* exit if any set */ 648 "stwcx. %5, 0, %3\n\t" /* attempt to store */ 649 "bne- 1b\n\t" /* spin if failed */ 650 "li %0, 1\n\t" /* success - retval = 1 */ 651 "b 3f\n\t" /* we've succeeded */ 652 "2:\n\t" 653 "stwcx. %1, 0, %3\n\t" /* clear reservation (74xx) */ 654 "li %0, 0\n\t" /* failure - retval = 0 */ 655 "3:\n\t" 656 : "=&r" (ret), "=&r"(oldhihalf), "=m" (pte->pte_hi) 657 : "r" ((volatile char *)&pte->pte_hi + 4), 658 "r" ((uint32_t)bitmask), "r" ((uint32_t)LPTE_LOCKED), 659 "m" (pte->pte_hi) 660 : "cr0", "cr1", "cr2", "memory"); 661 662 *oldhi = (pte->pte_hi & 0xffffffff00000000ULL) | oldhihalf; 663 664 return (ret); 665 } 666 667 static uintptr_t 668 moea64_insert_to_pteg_native(struct lpte *pvo_pt, uintptr_t slotbase, 669 uint64_t mask) 670 { 671 volatile struct lpte *pt; 672 uint64_t oldptehi, va; 673 uintptr_t k; 674 int i, j; 675 676 /* Start at a random slot */ 677 i = mftb() % 8; 678 for (j = 0; j < 8; j++) { 679 k = slotbase + (i + j) % 8; 680 pt = &moea64_pteg_table[k]; 681 /* Invalidate and seize lock only if no bits in mask set */ 682 if (atomic_pte_lock(pt, mask, &oldptehi)) /* Lock obtained */ 683 break; 684 } 685 686 if (j == 8) 687 return (-1); 688 689 if (oldptehi & LPTE_VALID) { 690 KASSERT(!(oldptehi & LPTE_WIRED), ("Unmapped wired entry")); 691 /* 692 * Need to invalidate old entry completely: see 693 * "Modifying a Page Table Entry". Need to reconstruct 694 * the virtual address for the outgoing entry to do that. 695 */ 696 va = oldptehi >> (ADDR_SR_SHFT - ADDR_API_SHFT64); 697 if (oldptehi & LPTE_HID) 698 va = (((k >> 3) ^ moea64_pteg_mask) ^ va) & 699 (ADDR_PIDX >> ADDR_PIDX_SHFT); 700 else 701 va = ((k >> 3) ^ va) & (ADDR_PIDX >> ADDR_PIDX_SHFT); 702 va |= (oldptehi & LPTE_AVPN_MASK) << 703 (ADDR_API_SHFT64 - ADDR_PIDX_SHFT); 704 PTESYNC(); 705 TLBIE(va); 706 STAT_MOEA64(moea64_pte_valid--); 707 STAT_MOEA64(moea64_pte_overflow++); 708 } 709 710 /* 711 * Update the PTE as per "Adding a Page Table Entry". Lock is released 712 * by setting the high doubleworld. 713 */ 714 pt->pte_lo = htobe64(pvo_pt->pte_lo); 715 EIEIO(); 716 pt->pte_hi = htobe64(pvo_pt->pte_hi); 717 PTESYNC(); 718 719 /* Keep statistics */ 720 STAT_MOEA64(moea64_pte_valid++); 721 722 return (k); 723 } 724 725 static int64_t 726 moea64_pte_insert_native(struct pvo_entry *pvo) 727 { 728 struct lpte insertpt; 729 uintptr_t slot; 730 731 /* Initialize PTE */ 732 moea64_pte_from_pvo(pvo, &insertpt); 733 734 /* Make sure further insertion is locked out during evictions */ 735 rw_rlock(&moea64_eviction_lock); 736 737 /* 738 * First try primary hash. 739 */ 740 pvo->pvo_pte.slot &= ~7ULL; /* Base slot address */ 741 slot = moea64_insert_to_pteg_native(&insertpt, pvo->pvo_pte.slot, 742 LPTE_VALID | LPTE_WIRED | LPTE_LOCKED); 743 if (slot != -1) { 744 rw_runlock(&moea64_eviction_lock); 745 pvo->pvo_pte.slot = slot; 746 return (0); 747 } 748 749 /* 750 * Now try secondary hash. 751 */ 752 pvo->pvo_vaddr ^= PVO_HID; 753 insertpt.pte_hi ^= LPTE_HID; 754 pvo->pvo_pte.slot ^= (moea64_pteg_mask << 3); 755 slot = moea64_insert_to_pteg_native(&insertpt, pvo->pvo_pte.slot, 756 LPTE_VALID | LPTE_WIRED | LPTE_LOCKED); 757 if (slot != -1) { 758 rw_runlock(&moea64_eviction_lock); 759 pvo->pvo_pte.slot = slot; 760 return (0); 761 } 762 763 /* 764 * Out of luck. Find a PTE to sacrifice. 765 */ 766 767 /* Lock out all insertions for a bit */ 768 if (!rw_try_upgrade(&moea64_eviction_lock)) { 769 rw_runlock(&moea64_eviction_lock); 770 rw_wlock(&moea64_eviction_lock); 771 } 772 773 slot = moea64_insert_to_pteg_native(&insertpt, pvo->pvo_pte.slot, 774 LPTE_WIRED | LPTE_LOCKED); 775 if (slot != -1) { 776 rw_wunlock(&moea64_eviction_lock); 777 pvo->pvo_pte.slot = slot; 778 return (0); 779 } 780 781 /* Try other hash table. Now we're getting desperate... */ 782 pvo->pvo_vaddr ^= PVO_HID; 783 insertpt.pte_hi ^= LPTE_HID; 784 pvo->pvo_pte.slot ^= (moea64_pteg_mask << 3); 785 slot = moea64_insert_to_pteg_native(&insertpt, pvo->pvo_pte.slot, 786 LPTE_WIRED | LPTE_LOCKED); 787 if (slot != -1) { 788 rw_wunlock(&moea64_eviction_lock); 789 pvo->pvo_pte.slot = slot; 790 return (0); 791 } 792 793 /* No freeable slots in either PTEG? We're hosed. */ 794 rw_wunlock(&moea64_eviction_lock); 795 panic("moea64_pte_insert: overflow"); 796 return (-1); 797 } 798 799 static void * 800 moea64_dump_pmap_native(void *ctx, void *buf, u_long *nbytes) 801 { 802 struct dump_context *dctx; 803 u_long ptex, ptex_end; 804 805 dctx = (struct dump_context *)ctx; 806 ptex = dctx->ptex; 807 ptex_end = ptex + dctx->blksz / sizeof(struct lpte); 808 ptex_end = MIN(ptex_end, dctx->ptex_end); 809 *nbytes = (ptex_end - ptex) * sizeof(struct lpte); 810 811 if (*nbytes == 0) 812 return (NULL); 813 814 dctx->ptex = ptex_end; 815 return (__DEVOLATILE(struct lpte *, moea64_pteg_table) + ptex); 816 } 817