1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD AND 4-Clause-BSD 3 * 4 * Copyright (c) 2001 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Matt Thomas <matt@3am-software.com> of Allegro Networks, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 /*- 32 * Copyright (C) 1995, 1996 Wolfgang Solfrank. 33 * Copyright (C) 1995, 1996 TooLs GmbH. 34 * All rights reserved. 35 * 36 * Redistribution and use in source and binary forms, with or without 37 * modification, are permitted provided that the following conditions 38 * are met: 39 * 1. Redistributions of source code must retain the above copyright 40 * notice, this list of conditions and the following disclaimer. 41 * 2. Redistributions in binary form must reproduce the above copyright 42 * notice, this list of conditions and the following disclaimer in the 43 * documentation and/or other materials provided with the distribution. 44 * 3. All advertising materials mentioning features or use of this software 45 * must display the following acknowledgement: 46 * This product includes software developed by TooLs GmbH. 47 * 4. The name of TooLs GmbH may not be used to endorse or promote products 48 * derived from this software without specific prior written permission. 49 * 50 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR 51 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 52 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 53 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 54 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 55 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 56 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 57 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 58 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 59 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 60 * 61 * $NetBSD: pmap.c,v 1.28 2000/03/26 20:42:36 kleink Exp $ 62 */ 63 /*- 64 * Copyright (C) 2001 Benno Rice. 65 * All rights reserved. 66 * 67 * Redistribution and use in source and binary forms, with or without 68 * modification, are permitted provided that the following conditions 69 * are met: 70 * 1. Redistributions of source code must retain the above copyright 71 * notice, this list of conditions and the following disclaimer. 72 * 2. Redistributions in binary form must reproduce the above copyright 73 * notice, this list of conditions and the following disclaimer in the 74 * documentation and/or other materials provided with the distribution. 75 * 76 * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR 77 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 78 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 79 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 80 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 81 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 82 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 83 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 84 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 85 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 86 */ 87 88 #include <sys/cdefs.h> 89 __FBSDID("$FreeBSD$"); 90 91 /* 92 * Native 64-bit page table operations for running without a hypervisor. 93 */ 94 95 #include <sys/param.h> 96 #include <sys/kernel.h> 97 #include <sys/ktr.h> 98 #include <sys/lock.h> 99 #include <sys/mutex.h> 100 #include <sys/proc.h> 101 #include <sys/sched.h> 102 #include <sys/sysctl.h> 103 #include <sys/systm.h> 104 #include <sys/rwlock.h> 105 #include <sys/endian.h> 106 107 #include <sys/kdb.h> 108 109 #include <vm/vm.h> 110 #include <vm/vm_param.h> 111 #include <vm/vm_kern.h> 112 #include <vm/vm_page.h> 113 #include <vm/vm_map.h> 114 #include <vm/vm_object.h> 115 #include <vm/vm_extern.h> 116 #include <vm/vm_pageout.h> 117 118 #include <machine/cpu.h> 119 #include <machine/md_var.h> 120 #include <machine/mmuvar.h> 121 122 #include "mmu_oea64.h" 123 #include "mmu_if.h" 124 #include "moea64_if.h" 125 126 #define PTESYNC() __asm __volatile("ptesync"); 127 #define TLBSYNC() __asm __volatile("tlbsync; ptesync"); 128 #define SYNC() __asm __volatile("sync"); 129 #define EIEIO() __asm __volatile("eieio"); 130 131 #define VSID_HASH_MASK 0x0000007fffffffffULL 132 133 /* POWER9 only permits a 64k partition table size. */ 134 #define PART_SIZE 0x10000 135 136 static __inline void 137 TLBIE(uint64_t vpn) { 138 #ifndef __powerpc64__ 139 register_t vpn_hi, vpn_lo; 140 register_t msr; 141 register_t scratch, intr; 142 #endif 143 144 static volatile u_int tlbie_lock = 0; 145 146 vpn <<= ADDR_PIDX_SHFT; 147 vpn &= ~(0xffffULL << 48); 148 149 /* Hobo spinlock: we need stronger guarantees than mutexes provide */ 150 while (!atomic_cmpset_int(&tlbie_lock, 0, 1)); 151 isync(); /* Flush instruction queue once lock acquired */ 152 153 #ifdef __powerpc64__ 154 __asm __volatile("tlbie %0" :: "r"(vpn) : "memory"); 155 __asm __volatile("eieio; tlbsync; ptesync" ::: "memory"); 156 #else 157 vpn_hi = (uint32_t)(vpn >> 32); 158 vpn_lo = (uint32_t)vpn; 159 160 intr = intr_disable(); 161 __asm __volatile("\ 162 mfmsr %0; \ 163 mr %1, %0; \ 164 insrdi %1,%5,1,0; \ 165 mtmsrd %1; isync; \ 166 \ 167 sld %1,%2,%4; \ 168 or %1,%1,%3; \ 169 tlbie %1; \ 170 \ 171 mtmsrd %0; isync; \ 172 eieio; \ 173 tlbsync; \ 174 ptesync;" 175 : "=r"(msr), "=r"(scratch) : "r"(vpn_hi), "r"(vpn_lo), "r"(32), "r"(1) 176 : "memory"); 177 intr_restore(intr); 178 #endif 179 180 /* No barriers or special ops -- taken care of by ptesync above */ 181 tlbie_lock = 0; 182 } 183 184 #define DISABLE_TRANS(msr) msr = mfmsr(); mtmsr(msr & ~PSL_DR) 185 #define ENABLE_TRANS(msr) mtmsr(msr) 186 187 /* 188 * PTEG data. 189 */ 190 static volatile struct pate *moea64_part_table; 191 static volatile struct lpte *moea64_pteg_table; 192 static struct rwlock moea64_eviction_lock; 193 194 /* 195 * PTE calls. 196 */ 197 static int moea64_pte_insert_native(mmu_t, struct pvo_entry *); 198 static int64_t moea64_pte_synch_native(mmu_t, struct pvo_entry *); 199 static int64_t moea64_pte_clear_native(mmu_t, struct pvo_entry *, uint64_t); 200 static int64_t moea64_pte_replace_native(mmu_t, struct pvo_entry *, int); 201 static int64_t moea64_pte_unset_native(mmu_t mmu, struct pvo_entry *); 202 203 /* 204 * Utility routines. 205 */ 206 static void moea64_bootstrap_native(mmu_t mmup, 207 vm_offset_t kernelstart, vm_offset_t kernelend); 208 static void moea64_cpu_bootstrap_native(mmu_t, int ap); 209 static void tlbia(void); 210 211 static mmu_method_t moea64_native_methods[] = { 212 /* Internal interfaces */ 213 MMUMETHOD(mmu_bootstrap, moea64_bootstrap_native), 214 MMUMETHOD(mmu_cpu_bootstrap, moea64_cpu_bootstrap_native), 215 216 MMUMETHOD(moea64_pte_synch, moea64_pte_synch_native), 217 MMUMETHOD(moea64_pte_clear, moea64_pte_clear_native), 218 MMUMETHOD(moea64_pte_unset, moea64_pte_unset_native), 219 MMUMETHOD(moea64_pte_replace, moea64_pte_replace_native), 220 MMUMETHOD(moea64_pte_insert, moea64_pte_insert_native), 221 222 { 0, 0 } 223 }; 224 225 MMU_DEF_INHERIT(oea64_mmu_native, MMU_TYPE_G5, moea64_native_methods, 226 0, oea64_mmu); 227 228 static int64_t 229 moea64_pte_synch_native(mmu_t mmu, struct pvo_entry *pvo) 230 { 231 volatile struct lpte *pt = moea64_pteg_table + pvo->pvo_pte.slot; 232 struct lpte properpt; 233 uint64_t ptelo; 234 235 PMAP_LOCK_ASSERT(pvo->pvo_pmap, MA_OWNED); 236 237 moea64_pte_from_pvo(pvo, &properpt); 238 239 rw_rlock(&moea64_eviction_lock); 240 if ((be64toh(pt->pte_hi) & LPTE_AVPN_MASK) != 241 (properpt.pte_hi & LPTE_AVPN_MASK)) { 242 /* Evicted */ 243 rw_runlock(&moea64_eviction_lock); 244 return (-1); 245 } 246 247 PTESYNC(); 248 ptelo = be64toh(pt->pte_lo); 249 250 rw_runlock(&moea64_eviction_lock); 251 252 return (ptelo & (LPTE_REF | LPTE_CHG)); 253 } 254 255 static int64_t 256 moea64_pte_clear_native(mmu_t mmu, struct pvo_entry *pvo, uint64_t ptebit) 257 { 258 volatile struct lpte *pt = moea64_pteg_table + pvo->pvo_pte.slot; 259 struct lpte properpt; 260 uint64_t ptelo; 261 262 PMAP_LOCK_ASSERT(pvo->pvo_pmap, MA_OWNED); 263 264 moea64_pte_from_pvo(pvo, &properpt); 265 266 rw_rlock(&moea64_eviction_lock); 267 if ((be64toh(pt->pte_hi) & LPTE_AVPN_MASK) != 268 (properpt.pte_hi & LPTE_AVPN_MASK)) { 269 /* Evicted */ 270 rw_runlock(&moea64_eviction_lock); 271 return (-1); 272 } 273 274 if (ptebit == LPTE_REF) { 275 /* See "Resetting the Reference Bit" in arch manual */ 276 PTESYNC(); 277 /* 2-step here safe: precision is not guaranteed */ 278 ptelo = be64toh(pt->pte_lo); 279 280 /* One-byte store to avoid touching the C bit */ 281 ((volatile uint8_t *)(&pt->pte_lo))[6] = 282 #if BYTE_ORDER == BIG_ENDIAN 283 ((uint8_t *)(&properpt.pte_lo))[6]; 284 #else 285 ((uint8_t *)(&properpt.pte_lo))[1]; 286 #endif 287 rw_runlock(&moea64_eviction_lock); 288 289 critical_enter(); 290 TLBIE(pvo->pvo_vpn); 291 critical_exit(); 292 } else { 293 rw_runlock(&moea64_eviction_lock); 294 ptelo = moea64_pte_unset_native(mmu, pvo); 295 moea64_pte_insert_native(mmu, pvo); 296 } 297 298 return (ptelo & (LPTE_REF | LPTE_CHG)); 299 } 300 301 static int64_t 302 moea64_pte_unset_native(mmu_t mmu, struct pvo_entry *pvo) 303 { 304 volatile struct lpte *pt = moea64_pteg_table + pvo->pvo_pte.slot; 305 struct lpte properpt; 306 uint64_t ptelo; 307 308 moea64_pte_from_pvo(pvo, &properpt); 309 310 rw_rlock(&moea64_eviction_lock); 311 if ((be64toh(pt->pte_hi & LPTE_AVPN_MASK)) != 312 (properpt.pte_hi & LPTE_AVPN_MASK)) { 313 /* Evicted */ 314 moea64_pte_overflow--; 315 rw_runlock(&moea64_eviction_lock); 316 return (-1); 317 } 318 319 /* 320 * Invalidate the pte, briefly locking it to collect RC bits. No 321 * atomics needed since this is protected against eviction by the lock. 322 */ 323 isync(); 324 critical_enter(); 325 pt->pte_hi = be64toh((pt->pte_hi & ~LPTE_VALID) | LPTE_LOCKED); 326 PTESYNC(); 327 TLBIE(pvo->pvo_vpn); 328 ptelo = be64toh(pt->pte_lo); 329 *((volatile int32_t *)(&pt->pte_hi) + 1) = 0; /* Release lock */ 330 critical_exit(); 331 rw_runlock(&moea64_eviction_lock); 332 333 /* Keep statistics */ 334 moea64_pte_valid--; 335 336 return (ptelo & (LPTE_CHG | LPTE_REF)); 337 } 338 339 static int64_t 340 moea64_pte_replace_native(mmu_t mmu, struct pvo_entry *pvo, int flags) 341 { 342 volatile struct lpte *pt = moea64_pteg_table + pvo->pvo_pte.slot; 343 struct lpte properpt; 344 int64_t ptelo; 345 346 if (flags == 0) { 347 /* Just some software bits changing. */ 348 moea64_pte_from_pvo(pvo, &properpt); 349 350 rw_rlock(&moea64_eviction_lock); 351 if ((be64toh(pt->pte_hi) & LPTE_AVPN_MASK) != 352 (properpt.pte_hi & LPTE_AVPN_MASK)) { 353 rw_runlock(&moea64_eviction_lock); 354 return (-1); 355 } 356 pt->pte_hi = htobe64(properpt.pte_hi); 357 ptelo = be64toh(pt->pte_lo); 358 rw_runlock(&moea64_eviction_lock); 359 } else { 360 /* Otherwise, need reinsertion and deletion */ 361 ptelo = moea64_pte_unset_native(mmu, pvo); 362 moea64_pte_insert_native(mmu, pvo); 363 } 364 365 return (ptelo); 366 } 367 368 static void 369 moea64_cpu_bootstrap_native(mmu_t mmup, int ap) 370 { 371 int i = 0; 372 #ifdef __powerpc64__ 373 struct slb *slb = PCPU_GET(aim.slb); 374 register_t seg0; 375 #endif 376 377 /* 378 * Initialize segment registers and MMU 379 */ 380 381 mtmsr(mfmsr() & ~PSL_DR & ~PSL_IR); 382 383 /* 384 * Install kernel SLB entries 385 */ 386 387 #ifdef __powerpc64__ 388 __asm __volatile ("slbia"); 389 __asm __volatile ("slbmfee %0,%1; slbie %0;" : "=r"(seg0) : 390 "r"(0)); 391 392 for (i = 0; i < n_slbs; i++) { 393 if (!(slb[i].slbe & SLBE_VALID)) 394 continue; 395 396 __asm __volatile ("slbmte %0, %1" :: 397 "r"(slb[i].slbv), "r"(slb[i].slbe)); 398 } 399 #else 400 for (i = 0; i < 16; i++) 401 mtsrin(i << ADDR_SR_SHFT, kernel_pmap->pm_sr[i]); 402 #endif 403 404 /* 405 * Install page table 406 */ 407 408 if (cpu_features2 & PPC_FEATURE2_ARCH_3_00) { 409 mtspr(SPR_PTCR, 410 ((uintptr_t)moea64_part_table & ~DMAP_BASE_ADDRESS) | 411 flsl((PART_SIZE >> 12) - 1)); 412 } else { 413 __asm __volatile ("ptesync; mtsdr1 %0; isync" 414 :: "r"(((uintptr_t)moea64_pteg_table & ~DMAP_BASE_ADDRESS) 415 | (uintptr_t)(flsl(moea64_pteg_mask >> 11)))); 416 } 417 tlbia(); 418 } 419 420 static void 421 moea64_bootstrap_native(mmu_t mmup, vm_offset_t kernelstart, 422 vm_offset_t kernelend) 423 { 424 vm_size_t size; 425 vm_offset_t off; 426 vm_paddr_t pa; 427 register_t msr; 428 429 moea64_early_bootstrap(mmup, kernelstart, kernelend); 430 431 /* 432 * Allocate PTEG table. 433 */ 434 435 size = moea64_pteg_count * sizeof(struct lpteg); 436 CTR2(KTR_PMAP, "moea64_bootstrap: %d PTEGs, %d bytes", 437 moea64_pteg_count, size); 438 rw_init(&moea64_eviction_lock, "pte eviction"); 439 440 /* 441 * We now need to allocate memory. This memory, to be allocated, 442 * has to reside in a page table. The page table we are about to 443 * allocate. We don't have BAT. So drop to data real mode for a minute 444 * as a measure of last resort. We do this a couple times. 445 */ 446 447 if (cpu_features2 & PPC_FEATURE2_ARCH_3_00) { 448 moea64_part_table = 449 (struct pate *)moea64_bootstrap_alloc(PART_SIZE, PART_SIZE); 450 if (hw_direct_map) 451 moea64_part_table = 452 (struct pate *)PHYS_TO_DMAP((vm_offset_t)moea64_part_table); 453 } 454 /* 455 * PTEG table must be aligned on a 256k boundary, but can be placed 456 * anywhere with that alignment. 457 */ 458 moea64_pteg_table = (struct lpte *)moea64_bootstrap_alloc(size, 256*1024); 459 if (hw_direct_map) 460 moea64_pteg_table = 461 (struct lpte *)PHYS_TO_DMAP((vm_offset_t)moea64_pteg_table); 462 DISABLE_TRANS(msr); 463 if (cpu_features2 & PPC_FEATURE2_ARCH_3_00) { 464 bzero(__DEVOLATILE(void *, moea64_part_table), PART_SIZE); 465 moea64_part_table[0].pagetab = 466 ((uintptr_t)moea64_pteg_table & ~DMAP_BASE_ADDRESS) | 467 (uintptr_t)(flsl((moea64_pteg_count - 1) >> 11)); 468 } 469 bzero(__DEVOLATILE(void *, moea64_pteg_table), moea64_pteg_count * 470 sizeof(struct lpteg)); 471 ENABLE_TRANS(msr); 472 473 CTR1(KTR_PMAP, "moea64_bootstrap: PTEG table at %p", moea64_pteg_table); 474 475 moea64_mid_bootstrap(mmup, kernelstart, kernelend); 476 477 /* 478 * Add a mapping for the page table itself if there is no direct map. 479 */ 480 if (!hw_direct_map) { 481 size = moea64_pteg_count * sizeof(struct lpteg); 482 off = (vm_offset_t)(moea64_pteg_table); 483 DISABLE_TRANS(msr); 484 for (pa = off; pa < off + size; pa += PAGE_SIZE) 485 pmap_kenter(pa, pa); 486 ENABLE_TRANS(msr); 487 } 488 489 /* Bring up virtual memory */ 490 moea64_late_bootstrap(mmup, kernelstart, kernelend); 491 } 492 493 static void 494 tlbia(void) 495 { 496 vm_offset_t i; 497 #ifndef __powerpc64__ 498 register_t msr, scratch; 499 #endif 500 501 i = 0xc00; /* IS = 11 */ 502 switch (mfpvr() >> 16) { 503 case IBM970: 504 case IBM970FX: 505 case IBM970MP: 506 case IBM970GX: 507 case IBMPOWER4: 508 case IBMPOWER4PLUS: 509 case IBMPOWER5: 510 case IBMPOWER5PLUS: 511 i = 0; /* IS not supported */ 512 break; 513 } 514 515 TLBSYNC(); 516 517 for (; i < 0x200000; i += 0x00001000) { 518 #ifdef __powerpc64__ 519 __asm __volatile("tlbiel %0" :: "r"(i)); 520 #else 521 __asm __volatile("\ 522 mfmsr %0; \ 523 mr %1, %0; \ 524 insrdi %1,%3,1,0; \ 525 mtmsrd %1; \ 526 isync; \ 527 \ 528 tlbiel %2; \ 529 \ 530 mtmsrd %0; \ 531 isync;" 532 : "=r"(msr), "=r"(scratch) : "r"(i), "r"(1)); 533 #endif 534 } 535 536 EIEIO(); 537 TLBSYNC(); 538 } 539 540 static int 541 atomic_pte_lock(volatile struct lpte *pte, uint64_t bitmask, uint64_t *oldhi) 542 { 543 int ret; 544 uint32_t oldhihalf; 545 546 /* 547 * Note: in principle, if just the locked bit were set here, we 548 * could avoid needing the eviction lock. However, eviction occurs 549 * so rarely that it isn't worth bothering about in practice. 550 */ 551 552 __asm __volatile ( 553 "1:\tlwarx %1, 0, %3\n\t" /* load old value */ 554 "and. %0,%1,%4\n\t" /* check if any bits set */ 555 "bne 2f\n\t" /* exit if any set */ 556 "stwcx. %5, 0, %3\n\t" /* attempt to store */ 557 "bne- 1b\n\t" /* spin if failed */ 558 "li %0, 1\n\t" /* success - retval = 1 */ 559 "b 3f\n\t" /* we've succeeded */ 560 "2:\n\t" 561 "stwcx. %1, 0, %3\n\t" /* clear reservation (74xx) */ 562 "li %0, 0\n\t" /* failure - retval = 0 */ 563 "3:\n\t" 564 : "=&r" (ret), "=&r"(oldhihalf), "=m" (pte->pte_hi) 565 : "r" ((volatile char *)&pte->pte_hi + 4), 566 "r" ((uint32_t)bitmask), "r" ((uint32_t)LPTE_LOCKED), 567 "m" (pte->pte_hi) 568 : "cr0", "cr1", "cr2", "memory"); 569 570 *oldhi = (pte->pte_hi & 0xffffffff00000000ULL) | oldhihalf; 571 572 return (ret); 573 } 574 575 static uintptr_t 576 moea64_insert_to_pteg_native(struct lpte *pvo_pt, uintptr_t slotbase, 577 uint64_t mask) 578 { 579 volatile struct lpte *pt; 580 uint64_t oldptehi, va; 581 uintptr_t k; 582 int i, j; 583 584 /* Start at a random slot */ 585 i = mftb() % 8; 586 for (j = 0; j < 8; j++) { 587 k = slotbase + (i + j) % 8; 588 pt = &moea64_pteg_table[k]; 589 /* Invalidate and seize lock only if no bits in mask set */ 590 if (atomic_pte_lock(pt, mask, &oldptehi)) /* Lock obtained */ 591 break; 592 } 593 594 if (j == 8) 595 return (-1); 596 597 if (oldptehi & LPTE_VALID) { 598 KASSERT(!(oldptehi & LPTE_WIRED), ("Unmapped wired entry")); 599 /* 600 * Need to invalidate old entry completely: see 601 * "Modifying a Page Table Entry". Need to reconstruct 602 * the virtual address for the outgoing entry to do that. 603 */ 604 if (oldptehi & LPTE_BIG) 605 va = oldptehi >> moea64_large_page_shift; 606 else 607 va = oldptehi >> ADDR_PIDX_SHFT; 608 if (oldptehi & LPTE_HID) 609 va = (((k >> 3) ^ moea64_pteg_mask) ^ va) & 610 VSID_HASH_MASK; 611 else 612 va = ((k >> 3) ^ va) & VSID_HASH_MASK; 613 va |= (oldptehi & LPTE_AVPN_MASK) << 614 (ADDR_API_SHFT64 - ADDR_PIDX_SHFT); 615 PTESYNC(); 616 TLBIE(va); 617 moea64_pte_valid--; 618 moea64_pte_overflow++; 619 } 620 621 /* 622 * Update the PTE as per "Adding a Page Table Entry". Lock is released 623 * by setting the high doubleworld. 624 */ 625 pt->pte_lo = htobe64(pvo_pt->pte_lo); 626 EIEIO(); 627 pt->pte_hi = htobe64(pvo_pt->pte_hi); 628 PTESYNC(); 629 630 /* Keep statistics */ 631 moea64_pte_valid++; 632 633 return (k); 634 } 635 636 static int 637 moea64_pte_insert_native(mmu_t mmu, struct pvo_entry *pvo) 638 { 639 struct lpte insertpt; 640 uintptr_t slot; 641 642 /* Initialize PTE */ 643 moea64_pte_from_pvo(pvo, &insertpt); 644 645 /* Make sure further insertion is locked out during evictions */ 646 rw_rlock(&moea64_eviction_lock); 647 648 /* 649 * First try primary hash. 650 */ 651 pvo->pvo_pte.slot &= ~7ULL; /* Base slot address */ 652 slot = moea64_insert_to_pteg_native(&insertpt, pvo->pvo_pte.slot, 653 LPTE_VALID | LPTE_WIRED | LPTE_LOCKED); 654 if (slot != -1) { 655 rw_runlock(&moea64_eviction_lock); 656 pvo->pvo_pte.slot = slot; 657 return (0); 658 } 659 660 /* 661 * Now try secondary hash. 662 */ 663 pvo->pvo_vaddr ^= PVO_HID; 664 insertpt.pte_hi ^= LPTE_HID; 665 pvo->pvo_pte.slot ^= (moea64_pteg_mask << 3); 666 slot = moea64_insert_to_pteg_native(&insertpt, pvo->pvo_pte.slot, 667 LPTE_VALID | LPTE_WIRED | LPTE_LOCKED); 668 if (slot != -1) { 669 rw_runlock(&moea64_eviction_lock); 670 pvo->pvo_pte.slot = slot; 671 return (0); 672 } 673 674 /* 675 * Out of luck. Find a PTE to sacrifice. 676 */ 677 678 /* Lock out all insertions for a bit */ 679 if (!rw_try_upgrade(&moea64_eviction_lock)) { 680 rw_runlock(&moea64_eviction_lock); 681 rw_wlock(&moea64_eviction_lock); 682 } 683 684 slot = moea64_insert_to_pteg_native(&insertpt, pvo->pvo_pte.slot, 685 LPTE_WIRED | LPTE_LOCKED); 686 if (slot != -1) { 687 rw_wunlock(&moea64_eviction_lock); 688 pvo->pvo_pte.slot = slot; 689 return (0); 690 } 691 692 /* Try other hash table. Now we're getting desperate... */ 693 pvo->pvo_vaddr ^= PVO_HID; 694 insertpt.pte_hi ^= LPTE_HID; 695 pvo->pvo_pte.slot ^= (moea64_pteg_mask << 3); 696 slot = moea64_insert_to_pteg_native(&insertpt, pvo->pvo_pte.slot, 697 LPTE_WIRED | LPTE_LOCKED); 698 if (slot != -1) { 699 rw_wunlock(&moea64_eviction_lock); 700 pvo->pvo_pte.slot = slot; 701 return (0); 702 } 703 704 /* No freeable slots in either PTEG? We're hosed. */ 705 rw_wunlock(&moea64_eviction_lock); 706 panic("moea64_pte_insert: overflow"); 707 return (-1); 708 } 709 710