1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD AND 4-Clause-BSD 3 * 4 * Copyright (c) 2001 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Matt Thomas <matt@3am-software.com> of Allegro Networks, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 /*- 32 * Copyright (C) 1995, 1996 Wolfgang Solfrank. 33 * Copyright (C) 1995, 1996 TooLs GmbH. 34 * All rights reserved. 35 * 36 * Redistribution and use in source and binary forms, with or without 37 * modification, are permitted provided that the following conditions 38 * are met: 39 * 1. Redistributions of source code must retain the above copyright 40 * notice, this list of conditions and the following disclaimer. 41 * 2. Redistributions in binary form must reproduce the above copyright 42 * notice, this list of conditions and the following disclaimer in the 43 * documentation and/or other materials provided with the distribution. 44 * 3. All advertising materials mentioning features or use of this software 45 * must display the following acknowledgement: 46 * This product includes software developed by TooLs GmbH. 47 * 4. The name of TooLs GmbH may not be used to endorse or promote products 48 * derived from this software without specific prior written permission. 49 * 50 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR 51 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 52 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 53 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 54 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 55 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 56 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 57 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 58 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 59 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 60 * 61 * $NetBSD: pmap.c,v 1.28 2000/03/26 20:42:36 kleink Exp $ 62 */ 63 /*- 64 * Copyright (C) 2001 Benno Rice. 65 * All rights reserved. 66 * 67 * Redistribution and use in source and binary forms, with or without 68 * modification, are permitted provided that the following conditions 69 * are met: 70 * 1. Redistributions of source code must retain the above copyright 71 * notice, this list of conditions and the following disclaimer. 72 * 2. Redistributions in binary form must reproduce the above copyright 73 * notice, this list of conditions and the following disclaimer in the 74 * documentation and/or other materials provided with the distribution. 75 * 76 * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR 77 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 78 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 79 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 80 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 81 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 82 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 83 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 84 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 85 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 86 */ 87 88 #include <sys/cdefs.h> 89 __FBSDID("$FreeBSD$"); 90 91 /* 92 * Native 64-bit page table operations for running without a hypervisor. 93 */ 94 95 #include <sys/param.h> 96 #include <sys/kernel.h> 97 #include <sys/ktr.h> 98 #include <sys/lock.h> 99 #include <sys/mutex.h> 100 #include <sys/proc.h> 101 #include <sys/sched.h> 102 #include <sys/sysctl.h> 103 #include <sys/systm.h> 104 #include <sys/rwlock.h> 105 #include <sys/endian.h> 106 107 #include <sys/kdb.h> 108 109 #include <vm/vm.h> 110 #include <vm/vm_param.h> 111 #include <vm/vm_kern.h> 112 #include <vm/vm_page.h> 113 #include <vm/vm_map.h> 114 #include <vm/vm_object.h> 115 #include <vm/vm_extern.h> 116 #include <vm/vm_pageout.h> 117 118 #include <machine/cpu.h> 119 #include <machine/hid.h> 120 #include <machine/md_var.h> 121 #include <machine/mmuvar.h> 122 123 #include "mmu_oea64.h" 124 #include "mmu_if.h" 125 #include "moea64_if.h" 126 127 #define PTESYNC() __asm __volatile("ptesync"); 128 #define TLBSYNC() __asm __volatile("tlbsync; ptesync"); 129 #define SYNC() __asm __volatile("sync"); 130 #define EIEIO() __asm __volatile("eieio"); 131 132 #define VSID_HASH_MASK 0x0000007fffffffffULL 133 134 /* POWER9 only permits a 64k partition table size. */ 135 #define PART_SIZE 0x10000 136 137 static bool moea64_crop_tlbie; 138 static bool moea64_need_lock; 139 140 static __inline void 141 TLBIE(uint64_t vpn) { 142 #ifndef __powerpc64__ 143 register_t vpn_hi, vpn_lo; 144 register_t msr; 145 register_t scratch, intr; 146 #endif 147 148 static volatile u_int tlbie_lock = 0; 149 bool need_lock = moea64_need_lock; 150 151 vpn <<= ADDR_PIDX_SHFT; 152 153 /* Hobo spinlock: we need stronger guarantees than mutexes provide */ 154 if (need_lock) { 155 while (!atomic_cmpset_int(&tlbie_lock, 0, 1)); 156 isync(); /* Flush instruction queue once lock acquired */ 157 158 if (moea64_crop_tlbie) 159 vpn &= ~(0xffffULL << 48); 160 } 161 162 #ifdef __powerpc64__ 163 /* 164 * Explicitly clobber r0. The tlbie instruction has two forms: an old 165 * one used by PowerISA 2.03 and prior, and a newer one used by PowerISA 166 * 2.06 (maybe 2.05?) and later. We need to support both, and it just 167 * so happens that since we use 4k pages we can simply zero out r0, and 168 * clobber it, and the assembler will interpret the single-operand form 169 * of tlbie as having RB set, and everything else as 0. The RS operand 170 * in the newer form is in the same position as the L(page size) bit of 171 * the old form, so a slong as RS is 0, we're good on both sides. 172 */ 173 __asm __volatile("li 0, 0 \n tlbie %0" :: "r"(vpn) : "r0", "memory"); 174 __asm __volatile("eieio; tlbsync; ptesync" ::: "memory"); 175 #else 176 vpn_hi = (uint32_t)(vpn >> 32); 177 vpn_lo = (uint32_t)vpn; 178 179 intr = intr_disable(); 180 __asm __volatile("\ 181 mfmsr %0; \ 182 mr %1, %0; \ 183 insrdi %1,%5,1,0; \ 184 mtmsrd %1; isync; \ 185 \ 186 sld %1,%2,%4; \ 187 or %1,%1,%3; \ 188 tlbie %1; \ 189 \ 190 mtmsrd %0; isync; \ 191 eieio; \ 192 tlbsync; \ 193 ptesync;" 194 : "=r"(msr), "=r"(scratch) : "r"(vpn_hi), "r"(vpn_lo), "r"(32), "r"(1) 195 : "memory"); 196 intr_restore(intr); 197 #endif 198 199 /* No barriers or special ops -- taken care of by ptesync above */ 200 if (need_lock) 201 tlbie_lock = 0; 202 } 203 204 #define DISABLE_TRANS(msr) msr = mfmsr(); mtmsr(msr & ~PSL_DR) 205 #define ENABLE_TRANS(msr) mtmsr(msr) 206 207 /* 208 * PTEG data. 209 */ 210 static volatile struct lpte *moea64_pteg_table; 211 static struct rwlock moea64_eviction_lock; 212 213 static volatile struct pate *moea64_part_table; 214 215 /* 216 * Dump function. 217 */ 218 static void *moea64_dump_pmap_native(mmu_t mmu, void *ctx, void *buf, 219 u_long *nbytes); 220 221 /* 222 * PTE calls. 223 */ 224 static int moea64_pte_insert_native(mmu_t, struct pvo_entry *); 225 static int64_t moea64_pte_synch_native(mmu_t, struct pvo_entry *); 226 static int64_t moea64_pte_clear_native(mmu_t, struct pvo_entry *, uint64_t); 227 static int64_t moea64_pte_replace_native(mmu_t, struct pvo_entry *, int); 228 static int64_t moea64_pte_unset_native(mmu_t mmu, struct pvo_entry *); 229 230 /* 231 * Utility routines. 232 */ 233 static void moea64_bootstrap_native(mmu_t mmup, 234 vm_offset_t kernelstart, vm_offset_t kernelend); 235 static void moea64_cpu_bootstrap_native(mmu_t, int ap); 236 static void tlbia(void); 237 238 static mmu_method_t moea64_native_methods[] = { 239 /* Internal interfaces */ 240 MMUMETHOD(mmu_bootstrap, moea64_bootstrap_native), 241 MMUMETHOD(mmu_cpu_bootstrap, moea64_cpu_bootstrap_native), 242 MMUMETHOD(mmu_dump_pmap, moea64_dump_pmap_native), 243 244 MMUMETHOD(moea64_pte_synch, moea64_pte_synch_native), 245 MMUMETHOD(moea64_pte_clear, moea64_pte_clear_native), 246 MMUMETHOD(moea64_pte_unset, moea64_pte_unset_native), 247 MMUMETHOD(moea64_pte_replace, moea64_pte_replace_native), 248 MMUMETHOD(moea64_pte_insert, moea64_pte_insert_native), 249 250 { 0, 0 } 251 }; 252 253 MMU_DEF_INHERIT(oea64_mmu_native, MMU_TYPE_G5, moea64_native_methods, 254 0, oea64_mmu); 255 256 static int64_t 257 moea64_pte_synch_native(mmu_t mmu, struct pvo_entry *pvo) 258 { 259 volatile struct lpte *pt = moea64_pteg_table + pvo->pvo_pte.slot; 260 struct lpte properpt; 261 uint64_t ptelo; 262 263 PMAP_LOCK_ASSERT(pvo->pvo_pmap, MA_OWNED); 264 265 moea64_pte_from_pvo(pvo, &properpt); 266 267 rw_rlock(&moea64_eviction_lock); 268 if ((be64toh(pt->pte_hi) & LPTE_AVPN_MASK) != 269 (properpt.pte_hi & LPTE_AVPN_MASK)) { 270 /* Evicted */ 271 rw_runlock(&moea64_eviction_lock); 272 return (-1); 273 } 274 275 PTESYNC(); 276 ptelo = be64toh(pt->pte_lo); 277 278 rw_runlock(&moea64_eviction_lock); 279 280 return (ptelo & (LPTE_REF | LPTE_CHG)); 281 } 282 283 static int64_t 284 moea64_pte_clear_native(mmu_t mmu, struct pvo_entry *pvo, uint64_t ptebit) 285 { 286 volatile struct lpte *pt = moea64_pteg_table + pvo->pvo_pte.slot; 287 struct lpte properpt; 288 uint64_t ptelo; 289 290 PMAP_LOCK_ASSERT(pvo->pvo_pmap, MA_OWNED); 291 292 moea64_pte_from_pvo(pvo, &properpt); 293 294 rw_rlock(&moea64_eviction_lock); 295 if ((be64toh(pt->pte_hi) & LPTE_AVPN_MASK) != 296 (properpt.pte_hi & LPTE_AVPN_MASK)) { 297 /* Evicted */ 298 rw_runlock(&moea64_eviction_lock); 299 return (-1); 300 } 301 302 if (ptebit == LPTE_REF) { 303 /* See "Resetting the Reference Bit" in arch manual */ 304 PTESYNC(); 305 /* 2-step here safe: precision is not guaranteed */ 306 ptelo = be64toh(pt->pte_lo); 307 308 /* One-byte store to avoid touching the C bit */ 309 ((volatile uint8_t *)(&pt->pte_lo))[6] = 310 #if BYTE_ORDER == BIG_ENDIAN 311 ((uint8_t *)(&properpt.pte_lo))[6]; 312 #else 313 ((uint8_t *)(&properpt.pte_lo))[1]; 314 #endif 315 rw_runlock(&moea64_eviction_lock); 316 317 critical_enter(); 318 TLBIE(pvo->pvo_vpn); 319 critical_exit(); 320 } else { 321 rw_runlock(&moea64_eviction_lock); 322 ptelo = moea64_pte_unset_native(mmu, pvo); 323 moea64_pte_insert_native(mmu, pvo); 324 } 325 326 return (ptelo & (LPTE_REF | LPTE_CHG)); 327 } 328 329 static int64_t 330 moea64_pte_unset_native(mmu_t mmu, struct pvo_entry *pvo) 331 { 332 volatile struct lpte *pt = moea64_pteg_table + pvo->pvo_pte.slot; 333 struct lpte properpt; 334 uint64_t ptelo; 335 336 moea64_pte_from_pvo(pvo, &properpt); 337 338 rw_rlock(&moea64_eviction_lock); 339 if ((be64toh(pt->pte_hi & LPTE_AVPN_MASK)) != 340 (properpt.pte_hi & LPTE_AVPN_MASK)) { 341 /* Evicted */ 342 STAT_MOEA64(moea64_pte_overflow--); 343 rw_runlock(&moea64_eviction_lock); 344 return (-1); 345 } 346 347 /* 348 * Invalidate the pte, briefly locking it to collect RC bits. No 349 * atomics needed since this is protected against eviction by the lock. 350 */ 351 isync(); 352 critical_enter(); 353 pt->pte_hi = be64toh((pt->pte_hi & ~LPTE_VALID) | LPTE_LOCKED); 354 PTESYNC(); 355 TLBIE(pvo->pvo_vpn); 356 ptelo = be64toh(pt->pte_lo); 357 *((volatile int32_t *)(&pt->pte_hi) + 1) = 0; /* Release lock */ 358 critical_exit(); 359 rw_runlock(&moea64_eviction_lock); 360 361 /* Keep statistics */ 362 STAT_MOEA64(moea64_pte_valid--); 363 364 return (ptelo & (LPTE_CHG | LPTE_REF)); 365 } 366 367 static int64_t 368 moea64_pte_replace_inval_native(mmu_t mmu, struct pvo_entry *pvo, 369 volatile struct lpte *pt) 370 { 371 struct lpte properpt; 372 uint64_t ptelo; 373 374 moea64_pte_from_pvo(pvo, &properpt); 375 376 rw_rlock(&moea64_eviction_lock); 377 if ((be64toh(pt->pte_hi & LPTE_AVPN_MASK)) != 378 (properpt.pte_hi & LPTE_AVPN_MASK)) { 379 /* Evicted */ 380 STAT_MOEA64(moea64_pte_overflow--); 381 rw_runlock(&moea64_eviction_lock); 382 return (-1); 383 } 384 385 /* 386 * Replace the pte, briefly locking it to collect RC bits. No 387 * atomics needed since this is protected against eviction by the lock. 388 */ 389 isync(); 390 critical_enter(); 391 pt->pte_hi = be64toh((pt->pte_hi & ~LPTE_VALID) | LPTE_LOCKED); 392 PTESYNC(); 393 TLBIE(pvo->pvo_vpn); 394 ptelo = be64toh(pt->pte_lo); 395 EIEIO(); 396 pt->pte_lo = htobe64(properpt.pte_lo); 397 EIEIO(); 398 pt->pte_hi = htobe64(properpt.pte_hi); /* Release lock */ 399 PTESYNC(); 400 critical_exit(); 401 rw_runlock(&moea64_eviction_lock); 402 403 return (ptelo & (LPTE_CHG | LPTE_REF)); 404 } 405 406 static int64_t 407 moea64_pte_replace_native(mmu_t mmu, struct pvo_entry *pvo, int flags) 408 { 409 volatile struct lpte *pt = moea64_pteg_table + pvo->pvo_pte.slot; 410 struct lpte properpt; 411 int64_t ptelo; 412 413 if (flags == 0) { 414 /* Just some software bits changing. */ 415 moea64_pte_from_pvo(pvo, &properpt); 416 417 rw_rlock(&moea64_eviction_lock); 418 if ((be64toh(pt->pte_hi) & LPTE_AVPN_MASK) != 419 (properpt.pte_hi & LPTE_AVPN_MASK)) { 420 rw_runlock(&moea64_eviction_lock); 421 return (-1); 422 } 423 pt->pte_hi = htobe64(properpt.pte_hi); 424 ptelo = be64toh(pt->pte_lo); 425 rw_runlock(&moea64_eviction_lock); 426 } else { 427 /* Otherwise, need reinsertion and deletion */ 428 ptelo = moea64_pte_replace_inval_native(mmu, pvo, pt); 429 } 430 431 return (ptelo); 432 } 433 434 static void 435 moea64_cpu_bootstrap_native(mmu_t mmup, int ap) 436 { 437 int i = 0; 438 #ifdef __powerpc64__ 439 struct slb *slb = PCPU_GET(aim.slb); 440 register_t seg0; 441 #endif 442 443 /* 444 * Initialize segment registers and MMU 445 */ 446 447 mtmsr(mfmsr() & ~PSL_DR & ~PSL_IR); 448 449 switch(mfpvr() >> 16) { 450 case IBMPOWER9: 451 mtspr(SPR_HID0, mfspr(SPR_HID0) & ~HID0_RADIX); 452 break; 453 } 454 455 /* 456 * Install kernel SLB entries 457 */ 458 459 #ifdef __powerpc64__ 460 __asm __volatile ("slbia"); 461 __asm __volatile ("slbmfee %0,%1; slbie %0;" : "=r"(seg0) : 462 "r"(0)); 463 464 for (i = 0; i < n_slbs; i++) { 465 if (!(slb[i].slbe & SLBE_VALID)) 466 continue; 467 468 __asm __volatile ("slbmte %0, %1" :: 469 "r"(slb[i].slbv), "r"(slb[i].slbe)); 470 } 471 #else 472 for (i = 0; i < 16; i++) 473 mtsrin(i << ADDR_SR_SHFT, kernel_pmap->pm_sr[i]); 474 #endif 475 476 /* 477 * Install page table 478 */ 479 480 if (cpu_features2 & PPC_FEATURE2_ARCH_3_00) 481 mtspr(SPR_PTCR, 482 ((uintptr_t)moea64_part_table & ~DMAP_BASE_ADDRESS) | 483 flsl((PART_SIZE >> 12) - 1)); 484 else 485 __asm __volatile ("ptesync; mtsdr1 %0; isync" 486 :: "r"(((uintptr_t)moea64_pteg_table & ~DMAP_BASE_ADDRESS) 487 | (uintptr_t)(flsl(moea64_pteg_mask >> 11)))); 488 tlbia(); 489 } 490 491 static void 492 moea64_bootstrap_native(mmu_t mmup, vm_offset_t kernelstart, 493 vm_offset_t kernelend) 494 { 495 vm_size_t size; 496 vm_offset_t off; 497 vm_paddr_t pa; 498 register_t msr; 499 500 moea64_early_bootstrap(mmup, kernelstart, kernelend); 501 502 switch (mfpvr() >> 16) { 503 case IBMPOWER9: 504 moea64_need_lock = false; 505 break; 506 case IBMPOWER4: 507 case IBMPOWER4PLUS: 508 case IBM970: 509 case IBM970FX: 510 case IBM970GX: 511 case IBM970MP: 512 moea64_crop_tlbie = true; 513 default: 514 moea64_need_lock = true; 515 } 516 /* 517 * Allocate PTEG table. 518 */ 519 520 size = moea64_pteg_count * sizeof(struct lpteg); 521 CTR2(KTR_PMAP, "moea64_bootstrap: %lu PTEGs, %lu bytes", 522 moea64_pteg_count, size); 523 rw_init(&moea64_eviction_lock, "pte eviction"); 524 525 /* 526 * We now need to allocate memory. This memory, to be allocated, 527 * has to reside in a page table. The page table we are about to 528 * allocate. We don't have BAT. So drop to data real mode for a minute 529 * as a measure of last resort. We do this a couple times. 530 */ 531 /* 532 * PTEG table must be aligned on a 256k boundary, but can be placed 533 * anywhere with that alignment on POWER ISA 3+ systems. On earlier 534 * systems, offset addition is done by the CPU with bitwise OR rather 535 * than addition, so the table must also be aligned on a boundary of 536 * its own size. Pick the larger of the two, which works on all 537 * systems. 538 */ 539 moea64_pteg_table = (struct lpte *)moea64_bootstrap_alloc(size, 540 MAX(256*1024, size)); 541 if (hw_direct_map) 542 moea64_pteg_table = 543 (struct lpte *)PHYS_TO_DMAP((vm_offset_t)moea64_pteg_table); 544 /* Allocate partition table (ISA 3.0). */ 545 if (cpu_features2 & PPC_FEATURE2_ARCH_3_00) { 546 moea64_part_table = 547 (struct pate *)moea64_bootstrap_alloc(PART_SIZE, PART_SIZE); 548 moea64_part_table = 549 (struct pate *)PHYS_TO_DMAP((vm_offset_t)moea64_part_table); 550 } 551 DISABLE_TRANS(msr); 552 bzero(__DEVOLATILE(void *, moea64_pteg_table), moea64_pteg_count * 553 sizeof(struct lpteg)); 554 if (cpu_features2 & PPC_FEATURE2_ARCH_3_00) { 555 bzero(__DEVOLATILE(void *, moea64_part_table), PART_SIZE); 556 moea64_part_table[0].pagetab = 557 (DMAP_TO_PHYS((vm_offset_t)moea64_pteg_table)) | 558 (uintptr_t)(flsl((moea64_pteg_count - 1) >> 11)); 559 } 560 ENABLE_TRANS(msr); 561 562 CTR1(KTR_PMAP, "moea64_bootstrap: PTEG table at %p", moea64_pteg_table); 563 564 moea64_mid_bootstrap(mmup, kernelstart, kernelend); 565 566 /* 567 * Add a mapping for the page table itself if there is no direct map. 568 */ 569 if (!hw_direct_map) { 570 size = moea64_pteg_count * sizeof(struct lpteg); 571 off = (vm_offset_t)(moea64_pteg_table); 572 DISABLE_TRANS(msr); 573 for (pa = off; pa < off + size; pa += PAGE_SIZE) 574 pmap_kenter(pa, pa); 575 ENABLE_TRANS(msr); 576 } 577 578 /* Bring up virtual memory */ 579 moea64_late_bootstrap(mmup, kernelstart, kernelend); 580 } 581 582 static void 583 tlbia(void) 584 { 585 vm_offset_t i; 586 #ifndef __powerpc64__ 587 register_t msr, scratch; 588 #endif 589 590 i = 0xc00; /* IS = 11 */ 591 switch (mfpvr() >> 16) { 592 case IBM970: 593 case IBM970FX: 594 case IBM970MP: 595 case IBM970GX: 596 case IBMPOWER4: 597 case IBMPOWER4PLUS: 598 case IBMPOWER5: 599 case IBMPOWER5PLUS: 600 i = 0; /* IS not supported */ 601 break; 602 } 603 604 TLBSYNC(); 605 606 for (; i < 0x400000; i += 0x00001000) { 607 #ifdef __powerpc64__ 608 __asm __volatile("tlbiel %0" :: "r"(i)); 609 #else 610 __asm __volatile("\ 611 mfmsr %0; \ 612 mr %1, %0; \ 613 insrdi %1,%3,1,0; \ 614 mtmsrd %1; \ 615 isync; \ 616 \ 617 tlbiel %2; \ 618 \ 619 mtmsrd %0; \ 620 isync;" 621 : "=r"(msr), "=r"(scratch) : "r"(i), "r"(1)); 622 #endif 623 } 624 625 EIEIO(); 626 TLBSYNC(); 627 } 628 629 static int 630 atomic_pte_lock(volatile struct lpte *pte, uint64_t bitmask, uint64_t *oldhi) 631 { 632 int ret; 633 uint32_t oldhihalf; 634 635 /* 636 * Note: in principle, if just the locked bit were set here, we 637 * could avoid needing the eviction lock. However, eviction occurs 638 * so rarely that it isn't worth bothering about in practice. 639 */ 640 641 __asm __volatile ( 642 "1:\tlwarx %1, 0, %3\n\t" /* load old value */ 643 "and. %0,%1,%4\n\t" /* check if any bits set */ 644 "bne 2f\n\t" /* exit if any set */ 645 "stwcx. %5, 0, %3\n\t" /* attempt to store */ 646 "bne- 1b\n\t" /* spin if failed */ 647 "li %0, 1\n\t" /* success - retval = 1 */ 648 "b 3f\n\t" /* we've succeeded */ 649 "2:\n\t" 650 "stwcx. %1, 0, %3\n\t" /* clear reservation (74xx) */ 651 "li %0, 0\n\t" /* failure - retval = 0 */ 652 "3:\n\t" 653 : "=&r" (ret), "=&r"(oldhihalf), "=m" (pte->pte_hi) 654 : "r" ((volatile char *)&pte->pte_hi + 4), 655 "r" ((uint32_t)bitmask), "r" ((uint32_t)LPTE_LOCKED), 656 "m" (pte->pte_hi) 657 : "cr0", "cr1", "cr2", "memory"); 658 659 *oldhi = (pte->pte_hi & 0xffffffff00000000ULL) | oldhihalf; 660 661 return (ret); 662 } 663 664 static uintptr_t 665 moea64_insert_to_pteg_native(struct lpte *pvo_pt, uintptr_t slotbase, 666 uint64_t mask) 667 { 668 volatile struct lpte *pt; 669 uint64_t oldptehi, va; 670 uintptr_t k; 671 int i, j; 672 673 /* Start at a random slot */ 674 i = mftb() % 8; 675 for (j = 0; j < 8; j++) { 676 k = slotbase + (i + j) % 8; 677 pt = &moea64_pteg_table[k]; 678 /* Invalidate and seize lock only if no bits in mask set */ 679 if (atomic_pte_lock(pt, mask, &oldptehi)) /* Lock obtained */ 680 break; 681 } 682 683 if (j == 8) 684 return (-1); 685 686 if (oldptehi & LPTE_VALID) { 687 KASSERT(!(oldptehi & LPTE_WIRED), ("Unmapped wired entry")); 688 /* 689 * Need to invalidate old entry completely: see 690 * "Modifying a Page Table Entry". Need to reconstruct 691 * the virtual address for the outgoing entry to do that. 692 */ 693 va = oldptehi >> (ADDR_SR_SHFT - ADDR_API_SHFT64); 694 if (oldptehi & LPTE_HID) 695 va = (((k >> 3) ^ moea64_pteg_mask) ^ va) & 696 (ADDR_PIDX >> ADDR_PIDX_SHFT); 697 else 698 va = ((k >> 3) ^ va) & (ADDR_PIDX >> ADDR_PIDX_SHFT); 699 va |= (oldptehi & LPTE_AVPN_MASK) << 700 (ADDR_API_SHFT64 - ADDR_PIDX_SHFT); 701 PTESYNC(); 702 TLBIE(va); 703 STAT_MOEA64(moea64_pte_valid--); 704 STAT_MOEA64(moea64_pte_overflow++); 705 } 706 707 /* 708 * Update the PTE as per "Adding a Page Table Entry". Lock is released 709 * by setting the high doubleworld. 710 */ 711 pt->pte_lo = htobe64(pvo_pt->pte_lo); 712 EIEIO(); 713 pt->pte_hi = htobe64(pvo_pt->pte_hi); 714 PTESYNC(); 715 716 /* Keep statistics */ 717 STAT_MOEA64(moea64_pte_valid++); 718 719 return (k); 720 } 721 722 static int 723 moea64_pte_insert_native(mmu_t mmu, struct pvo_entry *pvo) 724 { 725 struct lpte insertpt; 726 uintptr_t slot; 727 728 /* Initialize PTE */ 729 moea64_pte_from_pvo(pvo, &insertpt); 730 731 /* Make sure further insertion is locked out during evictions */ 732 rw_rlock(&moea64_eviction_lock); 733 734 /* 735 * First try primary hash. 736 */ 737 pvo->pvo_pte.slot &= ~7ULL; /* Base slot address */ 738 slot = moea64_insert_to_pteg_native(&insertpt, pvo->pvo_pte.slot, 739 LPTE_VALID | LPTE_WIRED | LPTE_LOCKED); 740 if (slot != -1) { 741 rw_runlock(&moea64_eviction_lock); 742 pvo->pvo_pte.slot = slot; 743 return (0); 744 } 745 746 /* 747 * Now try secondary hash. 748 */ 749 pvo->pvo_vaddr ^= PVO_HID; 750 insertpt.pte_hi ^= LPTE_HID; 751 pvo->pvo_pte.slot ^= (moea64_pteg_mask << 3); 752 slot = moea64_insert_to_pteg_native(&insertpt, pvo->pvo_pte.slot, 753 LPTE_VALID | LPTE_WIRED | LPTE_LOCKED); 754 if (slot != -1) { 755 rw_runlock(&moea64_eviction_lock); 756 pvo->pvo_pte.slot = slot; 757 return (0); 758 } 759 760 /* 761 * Out of luck. Find a PTE to sacrifice. 762 */ 763 764 /* Lock out all insertions for a bit */ 765 if (!rw_try_upgrade(&moea64_eviction_lock)) { 766 rw_runlock(&moea64_eviction_lock); 767 rw_wlock(&moea64_eviction_lock); 768 } 769 770 slot = moea64_insert_to_pteg_native(&insertpt, pvo->pvo_pte.slot, 771 LPTE_WIRED | LPTE_LOCKED); 772 if (slot != -1) { 773 rw_wunlock(&moea64_eviction_lock); 774 pvo->pvo_pte.slot = slot; 775 return (0); 776 } 777 778 /* Try other hash table. Now we're getting desperate... */ 779 pvo->pvo_vaddr ^= PVO_HID; 780 insertpt.pte_hi ^= LPTE_HID; 781 pvo->pvo_pte.slot ^= (moea64_pteg_mask << 3); 782 slot = moea64_insert_to_pteg_native(&insertpt, pvo->pvo_pte.slot, 783 LPTE_WIRED | LPTE_LOCKED); 784 if (slot != -1) { 785 rw_wunlock(&moea64_eviction_lock); 786 pvo->pvo_pte.slot = slot; 787 return (0); 788 } 789 790 /* No freeable slots in either PTEG? We're hosed. */ 791 rw_wunlock(&moea64_eviction_lock); 792 panic("moea64_pte_insert: overflow"); 793 return (-1); 794 } 795 796 static void * 797 moea64_dump_pmap_native(mmu_t mmu, void *ctx, void *buf, u_long *nbytes) 798 { 799 struct dump_context *dctx; 800 u_long ptex, ptex_end; 801 802 dctx = (struct dump_context *)ctx; 803 ptex = dctx->ptex; 804 ptex_end = ptex + dctx->blksz / sizeof(struct lpte); 805 ptex_end = MIN(ptex_end, dctx->ptex_end); 806 *nbytes = (ptex_end - ptex) * sizeof(struct lpte); 807 808 if (*nbytes == 0) 809 return (NULL); 810 811 dctx->ptex = ptex_end; 812 return (__DEVOLATILE(struct lpte *, moea64_pteg_table) + ptex); 813 } 814