1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD AND 4-Clause-BSD 3 * 4 * Copyright (c) 2001 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Matt Thomas <matt@3am-software.com> of Allegro Networks, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 /*- 32 * Copyright (C) 1995, 1996 Wolfgang Solfrank. 33 * Copyright (C) 1995, 1996 TooLs GmbH. 34 * All rights reserved. 35 * 36 * Redistribution and use in source and binary forms, with or without 37 * modification, are permitted provided that the following conditions 38 * are met: 39 * 1. Redistributions of source code must retain the above copyright 40 * notice, this list of conditions and the following disclaimer. 41 * 2. Redistributions in binary form must reproduce the above copyright 42 * notice, this list of conditions and the following disclaimer in the 43 * documentation and/or other materials provided with the distribution. 44 * 3. All advertising materials mentioning features or use of this software 45 * must display the following acknowledgement: 46 * This product includes software developed by TooLs GmbH. 47 * 4. The name of TooLs GmbH may not be used to endorse or promote products 48 * derived from this software without specific prior written permission. 49 * 50 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR 51 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 52 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 53 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 54 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 55 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 56 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 57 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 58 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 59 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 60 * 61 * $NetBSD: pmap.c,v 1.28 2000/03/26 20:42:36 kleink Exp $ 62 */ 63 /*- 64 * Copyright (C) 2001 Benno Rice. 65 * All rights reserved. 66 * 67 * Redistribution and use in source and binary forms, with or without 68 * modification, are permitted provided that the following conditions 69 * are met: 70 * 1. Redistributions of source code must retain the above copyright 71 * notice, this list of conditions and the following disclaimer. 72 * 2. Redistributions in binary form must reproduce the above copyright 73 * notice, this list of conditions and the following disclaimer in the 74 * documentation and/or other materials provided with the distribution. 75 * 76 * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR 77 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 78 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 79 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 80 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 81 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 82 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 83 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 84 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 85 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 86 */ 87 88 #include <sys/cdefs.h> 89 __FBSDID("$FreeBSD$"); 90 91 /* 92 * Native 64-bit page table operations for running without a hypervisor. 93 */ 94 95 #include <sys/param.h> 96 #include <sys/kernel.h> 97 #include <sys/ktr.h> 98 #include <sys/lock.h> 99 #include <sys/mutex.h> 100 #include <sys/proc.h> 101 #include <sys/sched.h> 102 #include <sys/sysctl.h> 103 #include <sys/systm.h> 104 #include <sys/rwlock.h> 105 #include <sys/endian.h> 106 107 #include <sys/kdb.h> 108 109 #include <vm/vm.h> 110 #include <vm/vm_param.h> 111 #include <vm/vm_kern.h> 112 #include <vm/vm_page.h> 113 #include <vm/vm_map.h> 114 #include <vm/vm_object.h> 115 #include <vm/vm_extern.h> 116 #include <vm/vm_pageout.h> 117 118 #include <machine/md_var.h> 119 #include <machine/mmuvar.h> 120 121 #include "mmu_oea64.h" 122 #include "mmu_if.h" 123 #include "moea64_if.h" 124 125 #define PTESYNC() __asm __volatile("ptesync"); 126 #define TLBSYNC() __asm __volatile("tlbsync; ptesync"); 127 #define SYNC() __asm __volatile("sync"); 128 #define EIEIO() __asm __volatile("eieio"); 129 130 #define VSID_HASH_MASK 0x0000007fffffffffULL 131 132 static __inline void 133 TLBIE(uint64_t vpn) { 134 #ifndef __powerpc64__ 135 register_t vpn_hi, vpn_lo; 136 register_t msr; 137 register_t scratch, intr; 138 #endif 139 140 static volatile u_int tlbie_lock = 0; 141 142 vpn <<= ADDR_PIDX_SHFT; 143 vpn &= ~(0xffffULL << 48); 144 145 /* Hobo spinlock: we need stronger guarantees than mutexes provide */ 146 while (!atomic_cmpset_int(&tlbie_lock, 0, 1)); 147 isync(); /* Flush instruction queue once lock acquired */ 148 149 #ifdef __powerpc64__ 150 __asm __volatile("tlbie %0" :: "r"(vpn) : "memory"); 151 __asm __volatile("eieio; tlbsync; ptesync" ::: "memory"); 152 #else 153 vpn_hi = (uint32_t)(vpn >> 32); 154 vpn_lo = (uint32_t)vpn; 155 156 intr = intr_disable(); 157 __asm __volatile("\ 158 mfmsr %0; \ 159 mr %1, %0; \ 160 insrdi %1,%5,1,0; \ 161 mtmsrd %1; isync; \ 162 \ 163 sld %1,%2,%4; \ 164 or %1,%1,%3; \ 165 tlbie %1; \ 166 \ 167 mtmsrd %0; isync; \ 168 eieio; \ 169 tlbsync; \ 170 ptesync;" 171 : "=r"(msr), "=r"(scratch) : "r"(vpn_hi), "r"(vpn_lo), "r"(32), "r"(1) 172 : "memory"); 173 intr_restore(intr); 174 #endif 175 176 /* No barriers or special ops -- taken care of by ptesync above */ 177 tlbie_lock = 0; 178 } 179 180 #define DISABLE_TRANS(msr) msr = mfmsr(); mtmsr(msr & ~PSL_DR) 181 #define ENABLE_TRANS(msr) mtmsr(msr) 182 183 /* 184 * PTEG data. 185 */ 186 static volatile struct lpte *moea64_pteg_table; 187 static struct rwlock moea64_eviction_lock; 188 189 /* 190 * PTE calls. 191 */ 192 static int moea64_pte_insert_native(mmu_t, struct pvo_entry *); 193 static int64_t moea64_pte_synch_native(mmu_t, struct pvo_entry *); 194 static int64_t moea64_pte_clear_native(mmu_t, struct pvo_entry *, uint64_t); 195 static int64_t moea64_pte_replace_native(mmu_t, struct pvo_entry *, int); 196 static int64_t moea64_pte_unset_native(mmu_t mmu, struct pvo_entry *); 197 198 /* 199 * Utility routines. 200 */ 201 static void moea64_bootstrap_native(mmu_t mmup, 202 vm_offset_t kernelstart, vm_offset_t kernelend); 203 static void moea64_cpu_bootstrap_native(mmu_t, int ap); 204 static void tlbia(void); 205 206 static mmu_method_t moea64_native_methods[] = { 207 /* Internal interfaces */ 208 MMUMETHOD(mmu_bootstrap, moea64_bootstrap_native), 209 MMUMETHOD(mmu_cpu_bootstrap, moea64_cpu_bootstrap_native), 210 211 MMUMETHOD(moea64_pte_synch, moea64_pte_synch_native), 212 MMUMETHOD(moea64_pte_clear, moea64_pte_clear_native), 213 MMUMETHOD(moea64_pte_unset, moea64_pte_unset_native), 214 MMUMETHOD(moea64_pte_replace, moea64_pte_replace_native), 215 MMUMETHOD(moea64_pte_insert, moea64_pte_insert_native), 216 217 { 0, 0 } 218 }; 219 220 MMU_DEF_INHERIT(oea64_mmu_native, MMU_TYPE_G5, moea64_native_methods, 221 0, oea64_mmu); 222 223 static int64_t 224 moea64_pte_synch_native(mmu_t mmu, struct pvo_entry *pvo) 225 { 226 volatile struct lpte *pt = moea64_pteg_table + pvo->pvo_pte.slot; 227 struct lpte properpt; 228 uint64_t ptelo; 229 230 PMAP_LOCK_ASSERT(pvo->pvo_pmap, MA_OWNED); 231 232 moea64_pte_from_pvo(pvo, &properpt); 233 234 rw_rlock(&moea64_eviction_lock); 235 if ((be64toh(pt->pte_hi) & LPTE_AVPN_MASK) != 236 (properpt.pte_hi & LPTE_AVPN_MASK)) { 237 /* Evicted */ 238 rw_runlock(&moea64_eviction_lock); 239 return (-1); 240 } 241 242 PTESYNC(); 243 ptelo = be64toh(pt->pte_lo); 244 245 rw_runlock(&moea64_eviction_lock); 246 247 return (ptelo & (LPTE_REF | LPTE_CHG)); 248 } 249 250 static int64_t 251 moea64_pte_clear_native(mmu_t mmu, struct pvo_entry *pvo, uint64_t ptebit) 252 { 253 volatile struct lpte *pt = moea64_pteg_table + pvo->pvo_pte.slot; 254 struct lpte properpt; 255 uint64_t ptelo; 256 257 PMAP_LOCK_ASSERT(pvo->pvo_pmap, MA_OWNED); 258 259 moea64_pte_from_pvo(pvo, &properpt); 260 261 rw_rlock(&moea64_eviction_lock); 262 if ((be64toh(pt->pte_hi) & LPTE_AVPN_MASK) != 263 (properpt.pte_hi & LPTE_AVPN_MASK)) { 264 /* Evicted */ 265 rw_runlock(&moea64_eviction_lock); 266 return (-1); 267 } 268 269 if (ptebit == LPTE_REF) { 270 /* See "Resetting the Reference Bit" in arch manual */ 271 PTESYNC(); 272 /* 2-step here safe: precision is not guaranteed */ 273 ptelo = be64toh(pt->pte_lo); 274 275 /* One-byte store to avoid touching the C bit */ 276 ((volatile uint8_t *)(&pt->pte_lo))[6] = 277 #if BYTE_ORDER == BIG_ENDIAN 278 ((uint8_t *)(&properpt.pte_lo))[6]; 279 #else 280 ((uint8_t *)(&properpt.pte_lo))[1]; 281 #endif 282 rw_runlock(&moea64_eviction_lock); 283 284 critical_enter(); 285 TLBIE(pvo->pvo_vpn); 286 critical_exit(); 287 } else { 288 rw_runlock(&moea64_eviction_lock); 289 ptelo = moea64_pte_unset_native(mmu, pvo); 290 moea64_pte_insert_native(mmu, pvo); 291 } 292 293 return (ptelo & (LPTE_REF | LPTE_CHG)); 294 } 295 296 static int64_t 297 moea64_pte_unset_native(mmu_t mmu, struct pvo_entry *pvo) 298 { 299 volatile struct lpte *pt = moea64_pteg_table + pvo->pvo_pte.slot; 300 struct lpte properpt; 301 uint64_t ptelo; 302 303 moea64_pte_from_pvo(pvo, &properpt); 304 305 rw_rlock(&moea64_eviction_lock); 306 if ((be64toh(pt->pte_hi & LPTE_AVPN_MASK)) != 307 (properpt.pte_hi & LPTE_AVPN_MASK)) { 308 /* Evicted */ 309 moea64_pte_overflow--; 310 rw_runlock(&moea64_eviction_lock); 311 return (-1); 312 } 313 314 /* 315 * Invalidate the pte, briefly locking it to collect RC bits. No 316 * atomics needed since this is protected against eviction by the lock. 317 */ 318 isync(); 319 critical_enter(); 320 pt->pte_hi = be64toh((pt->pte_hi & ~LPTE_VALID) | LPTE_LOCKED); 321 PTESYNC(); 322 TLBIE(pvo->pvo_vpn); 323 ptelo = be64toh(pt->pte_lo); 324 *((volatile int32_t *)(&pt->pte_hi) + 1) = 0; /* Release lock */ 325 critical_exit(); 326 rw_runlock(&moea64_eviction_lock); 327 328 /* Keep statistics */ 329 moea64_pte_valid--; 330 331 return (ptelo & (LPTE_CHG | LPTE_REF)); 332 } 333 334 static int64_t 335 moea64_pte_replace_native(mmu_t mmu, struct pvo_entry *pvo, int flags) 336 { 337 volatile struct lpte *pt = moea64_pteg_table + pvo->pvo_pte.slot; 338 struct lpte properpt; 339 int64_t ptelo; 340 341 if (flags == 0) { 342 /* Just some software bits changing. */ 343 moea64_pte_from_pvo(pvo, &properpt); 344 345 rw_rlock(&moea64_eviction_lock); 346 if ((be64toh(pt->pte_hi) & LPTE_AVPN_MASK) != 347 (properpt.pte_hi & LPTE_AVPN_MASK)) { 348 rw_runlock(&moea64_eviction_lock); 349 return (-1); 350 } 351 pt->pte_hi = htobe64(properpt.pte_hi); 352 ptelo = be64toh(pt->pte_lo); 353 rw_runlock(&moea64_eviction_lock); 354 } else { 355 /* Otherwise, need reinsertion and deletion */ 356 ptelo = moea64_pte_unset_native(mmu, pvo); 357 moea64_pte_insert_native(mmu, pvo); 358 } 359 360 return (ptelo); 361 } 362 363 static void 364 moea64_cpu_bootstrap_native(mmu_t mmup, int ap) 365 { 366 int i = 0; 367 #ifdef __powerpc64__ 368 struct slb *slb = PCPU_GET(aim.slb); 369 register_t seg0; 370 #endif 371 372 /* 373 * Initialize segment registers and MMU 374 */ 375 376 mtmsr(mfmsr() & ~PSL_DR & ~PSL_IR); 377 378 /* 379 * Install kernel SLB entries 380 */ 381 382 #ifdef __powerpc64__ 383 __asm __volatile ("slbia"); 384 __asm __volatile ("slbmfee %0,%1; slbie %0;" : "=r"(seg0) : 385 "r"(0)); 386 387 for (i = 0; i < n_slbs; i++) { 388 if (!(slb[i].slbe & SLBE_VALID)) 389 continue; 390 391 __asm __volatile ("slbmte %0, %1" :: 392 "r"(slb[i].slbv), "r"(slb[i].slbe)); 393 } 394 #else 395 for (i = 0; i < 16; i++) 396 mtsrin(i << ADDR_SR_SHFT, kernel_pmap->pm_sr[i]); 397 #endif 398 399 /* 400 * Install page table 401 */ 402 403 __asm __volatile ("ptesync; mtsdr1 %0; isync" 404 :: "r"((uintptr_t)moea64_pteg_table 405 | (uintptr_t)(flsl(moea64_pteg_mask >> 11)))); 406 tlbia(); 407 } 408 409 static void 410 moea64_bootstrap_native(mmu_t mmup, vm_offset_t kernelstart, 411 vm_offset_t kernelend) 412 { 413 vm_size_t size; 414 vm_offset_t off; 415 vm_paddr_t pa; 416 register_t msr; 417 418 moea64_early_bootstrap(mmup, kernelstart, kernelend); 419 420 /* 421 * Allocate PTEG table. 422 */ 423 424 size = moea64_pteg_count * sizeof(struct lpteg); 425 CTR2(KTR_PMAP, "moea64_bootstrap: %d PTEGs, %d bytes", 426 moea64_pteg_count, size); 427 rw_init(&moea64_eviction_lock, "pte eviction"); 428 429 /* 430 * We now need to allocate memory. This memory, to be allocated, 431 * has to reside in a page table. The page table we are about to 432 * allocate. We don't have BAT. So drop to data real mode for a minute 433 * as a measure of last resort. We do this a couple times. 434 */ 435 436 moea64_pteg_table = (struct lpte *)moea64_bootstrap_alloc(size, size); 437 DISABLE_TRANS(msr); 438 bzero(__DEVOLATILE(void *, moea64_pteg_table), moea64_pteg_count * 439 sizeof(struct lpteg)); 440 ENABLE_TRANS(msr); 441 442 CTR1(KTR_PMAP, "moea64_bootstrap: PTEG table at %p", moea64_pteg_table); 443 444 moea64_mid_bootstrap(mmup, kernelstart, kernelend); 445 446 /* 447 * Add a mapping for the page table itself if there is no direct map. 448 */ 449 if (!hw_direct_map) { 450 size = moea64_pteg_count * sizeof(struct lpteg); 451 off = (vm_offset_t)(moea64_pteg_table); 452 DISABLE_TRANS(msr); 453 for (pa = off; pa < off + size; pa += PAGE_SIZE) 454 pmap_kenter(pa, pa); 455 ENABLE_TRANS(msr); 456 } 457 458 /* Bring up virtual memory */ 459 moea64_late_bootstrap(mmup, kernelstart, kernelend); 460 } 461 462 static void 463 tlbia(void) 464 { 465 vm_offset_t i; 466 #ifndef __powerpc64__ 467 register_t msr, scratch; 468 #endif 469 470 i = 0xc00; /* IS = 11 */ 471 switch (mfpvr() >> 16) { 472 case IBM970: 473 case IBM970FX: 474 case IBM970MP: 475 case IBM970GX: 476 case IBMPOWER4: 477 case IBMPOWER4PLUS: 478 case IBMPOWER5: 479 case IBMPOWER5PLUS: 480 i = 0; /* IS not supported */ 481 break; 482 } 483 484 TLBSYNC(); 485 486 for (; i < 0x200000; i += 0x00001000) { 487 #ifdef __powerpc64__ 488 __asm __volatile("tlbiel %0" :: "r"(i)); 489 #else 490 __asm __volatile("\ 491 mfmsr %0; \ 492 mr %1, %0; \ 493 insrdi %1,%3,1,0; \ 494 mtmsrd %1; \ 495 isync; \ 496 \ 497 tlbiel %2; \ 498 \ 499 mtmsrd %0; \ 500 isync;" 501 : "=r"(msr), "=r"(scratch) : "r"(i), "r"(1)); 502 #endif 503 } 504 505 EIEIO(); 506 TLBSYNC(); 507 } 508 509 static int 510 atomic_pte_lock(volatile struct lpte *pte, uint64_t bitmask, uint64_t *oldhi) 511 { 512 int ret; 513 uint32_t oldhihalf; 514 515 /* 516 * Note: in principle, if just the locked bit were set here, we 517 * could avoid needing the eviction lock. However, eviction occurs 518 * so rarely that it isn't worth bothering about in practice. 519 */ 520 521 __asm __volatile ( 522 "1:\tlwarx %1, 0, %3\n\t" /* load old value */ 523 "and. %0,%1,%4\n\t" /* check if any bits set */ 524 "bne 2f\n\t" /* exit if any set */ 525 "stwcx. %5, 0, %3\n\t" /* attempt to store */ 526 "bne- 1b\n\t" /* spin if failed */ 527 "li %0, 1\n\t" /* success - retval = 1 */ 528 "b 3f\n\t" /* we've succeeded */ 529 "2:\n\t" 530 "stwcx. %1, 0, %3\n\t" /* clear reservation (74xx) */ 531 "li %0, 0\n\t" /* failure - retval = 0 */ 532 "3:\n\t" 533 : "=&r" (ret), "=&r"(oldhihalf), "=m" (pte->pte_hi) 534 : "r" ((volatile char *)&pte->pte_hi + 4), 535 "r" ((uint32_t)bitmask), "r" ((uint32_t)LPTE_LOCKED), 536 "m" (pte->pte_hi) 537 : "cr0", "cr1", "cr2", "memory"); 538 539 *oldhi = (pte->pte_hi & 0xffffffff00000000ULL) | oldhihalf; 540 541 return (ret); 542 } 543 544 static uintptr_t 545 moea64_insert_to_pteg_native(struct lpte *pvo_pt, uintptr_t slotbase, 546 uint64_t mask) 547 { 548 volatile struct lpte *pt; 549 uint64_t oldptehi, va; 550 uintptr_t k; 551 int i, j; 552 553 /* Start at a random slot */ 554 i = mftb() % 8; 555 for (j = 0; j < 8; j++) { 556 k = slotbase + (i + j) % 8; 557 pt = &moea64_pteg_table[k]; 558 /* Invalidate and seize lock only if no bits in mask set */ 559 if (atomic_pte_lock(pt, mask, &oldptehi)) /* Lock obtained */ 560 break; 561 } 562 563 if (j == 8) 564 return (-1); 565 566 if (oldptehi & LPTE_VALID) { 567 KASSERT(!(oldptehi & LPTE_WIRED), ("Unmapped wired entry")); 568 /* 569 * Need to invalidate old entry completely: see 570 * "Modifying a Page Table Entry". Need to reconstruct 571 * the virtual address for the outgoing entry to do that. 572 */ 573 if (oldptehi & LPTE_BIG) 574 va = oldptehi >> moea64_large_page_shift; 575 else 576 va = oldptehi >> ADDR_PIDX_SHFT; 577 if (oldptehi & LPTE_HID) 578 va = (((k >> 3) ^ moea64_pteg_mask) ^ va) & 579 VSID_HASH_MASK; 580 else 581 va = ((k >> 3) ^ va) & VSID_HASH_MASK; 582 va |= (oldptehi & LPTE_AVPN_MASK) << 583 (ADDR_API_SHFT64 - ADDR_PIDX_SHFT); 584 PTESYNC(); 585 TLBIE(va); 586 moea64_pte_valid--; 587 moea64_pte_overflow++; 588 } 589 590 /* 591 * Update the PTE as per "Adding a Page Table Entry". Lock is released 592 * by setting the high doubleworld. 593 */ 594 pt->pte_lo = htobe64(pvo_pt->pte_lo); 595 EIEIO(); 596 pt->pte_hi = htobe64(pvo_pt->pte_hi); 597 PTESYNC(); 598 599 /* Keep statistics */ 600 moea64_pte_valid++; 601 602 return (k); 603 } 604 605 static int 606 moea64_pte_insert_native(mmu_t mmu, struct pvo_entry *pvo) 607 { 608 struct lpte insertpt; 609 uintptr_t slot; 610 611 /* Initialize PTE */ 612 moea64_pte_from_pvo(pvo, &insertpt); 613 614 /* Make sure further insertion is locked out during evictions */ 615 rw_rlock(&moea64_eviction_lock); 616 617 /* 618 * First try primary hash. 619 */ 620 pvo->pvo_pte.slot &= ~7ULL; /* Base slot address */ 621 slot = moea64_insert_to_pteg_native(&insertpt, pvo->pvo_pte.slot, 622 LPTE_VALID | LPTE_WIRED | LPTE_LOCKED); 623 if (slot != -1) { 624 rw_runlock(&moea64_eviction_lock); 625 pvo->pvo_pte.slot = slot; 626 return (0); 627 } 628 629 /* 630 * Now try secondary hash. 631 */ 632 pvo->pvo_vaddr ^= PVO_HID; 633 insertpt.pte_hi ^= LPTE_HID; 634 pvo->pvo_pte.slot ^= (moea64_pteg_mask << 3); 635 slot = moea64_insert_to_pteg_native(&insertpt, pvo->pvo_pte.slot, 636 LPTE_VALID | LPTE_WIRED | LPTE_LOCKED); 637 if (slot != -1) { 638 rw_runlock(&moea64_eviction_lock); 639 pvo->pvo_pte.slot = slot; 640 return (0); 641 } 642 643 /* 644 * Out of luck. Find a PTE to sacrifice. 645 */ 646 647 /* Lock out all insertions for a bit */ 648 if (!rw_try_upgrade(&moea64_eviction_lock)) { 649 rw_runlock(&moea64_eviction_lock); 650 rw_wlock(&moea64_eviction_lock); 651 } 652 653 slot = moea64_insert_to_pteg_native(&insertpt, pvo->pvo_pte.slot, 654 LPTE_WIRED | LPTE_LOCKED); 655 if (slot != -1) { 656 rw_wunlock(&moea64_eviction_lock); 657 pvo->pvo_pte.slot = slot; 658 return (0); 659 } 660 661 /* Try other hash table. Now we're getting desperate... */ 662 pvo->pvo_vaddr ^= PVO_HID; 663 insertpt.pte_hi ^= LPTE_HID; 664 pvo->pvo_pte.slot ^= (moea64_pteg_mask << 3); 665 slot = moea64_insert_to_pteg_native(&insertpt, pvo->pvo_pte.slot, 666 LPTE_WIRED | LPTE_LOCKED); 667 if (slot != -1) { 668 rw_wunlock(&moea64_eviction_lock); 669 pvo->pvo_pte.slot = slot; 670 return (0); 671 } 672 673 /* No freeable slots in either PTEG? We're hosed. */ 674 rw_wunlock(&moea64_eviction_lock); 675 panic("moea64_pte_insert: overflow"); 676 return (-1); 677 } 678 679