1 /*- 2 * Copyright (c) 2001 The NetBSD Foundation, Inc. 3 * All rights reserved. 4 * 5 * This code is derived from software contributed to The NetBSD Foundation 6 * by Matt Thomas <matt@3am-software.com> of Allegro Networks, Inc. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 19 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 20 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 21 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 22 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 25 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 27 * POSSIBILITY OF SUCH DAMAGE. 28 */ 29 /*- 30 * Copyright (C) 1995, 1996 Wolfgang Solfrank. 31 * Copyright (C) 1995, 1996 TooLs GmbH. 32 * All rights reserved. 33 * 34 * Redistribution and use in source and binary forms, with or without 35 * modification, are permitted provided that the following conditions 36 * are met: 37 * 1. Redistributions of source code must retain the above copyright 38 * notice, this list of conditions and the following disclaimer. 39 * 2. Redistributions in binary form must reproduce the above copyright 40 * notice, this list of conditions and the following disclaimer in the 41 * documentation and/or other materials provided with the distribution. 42 * 3. All advertising materials mentioning features or use of this software 43 * must display the following acknowledgement: 44 * This product includes software developed by TooLs GmbH. 45 * 4. The name of TooLs GmbH may not be used to endorse or promote products 46 * derived from this software without specific prior written permission. 47 * 48 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR 49 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 50 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 51 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 52 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 53 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 54 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 55 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 56 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 57 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 58 * 59 * $NetBSD: pmap.c,v 1.28 2000/03/26 20:42:36 kleink Exp $ 60 */ 61 /*- 62 * Copyright (C) 2001 Benno Rice. 63 * All rights reserved. 64 * 65 * Redistribution and use in source and binary forms, with or without 66 * modification, are permitted provided that the following conditions 67 * are met: 68 * 1. Redistributions of source code must retain the above copyright 69 * notice, this list of conditions and the following disclaimer. 70 * 2. Redistributions in binary form must reproduce the above copyright 71 * notice, this list of conditions and the following disclaimer in the 72 * documentation and/or other materials provided with the distribution. 73 * 74 * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR 75 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 76 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 77 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 78 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 79 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 80 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 81 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 82 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 83 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 84 */ 85 86 #include <sys/cdefs.h> 87 __FBSDID("$FreeBSD$"); 88 89 /* 90 * Native 64-bit page table operations for running without a hypervisor. 91 */ 92 93 #include <sys/param.h> 94 #include <sys/kernel.h> 95 #include <sys/ktr.h> 96 #include <sys/lock.h> 97 #include <sys/mutex.h> 98 #include <sys/proc.h> 99 #include <sys/sched.h> 100 #include <sys/sysctl.h> 101 #include <sys/systm.h> 102 #include <sys/rwlock.h> 103 #include <sys/endian.h> 104 105 #include <sys/kdb.h> 106 107 #include <vm/vm.h> 108 #include <vm/vm_param.h> 109 #include <vm/vm_kern.h> 110 #include <vm/vm_page.h> 111 #include <vm/vm_map.h> 112 #include <vm/vm_object.h> 113 #include <vm/vm_extern.h> 114 #include <vm/vm_pageout.h> 115 116 #include <machine/md_var.h> 117 #include <machine/mmuvar.h> 118 119 #include "mmu_oea64.h" 120 #include "mmu_if.h" 121 #include "moea64_if.h" 122 123 #define PTESYNC() __asm __volatile("ptesync"); 124 #define TLBSYNC() __asm __volatile("tlbsync; ptesync"); 125 #define SYNC() __asm __volatile("sync"); 126 #define EIEIO() __asm __volatile("eieio"); 127 128 #define VSID_HASH_MASK 0x0000007fffffffffULL 129 130 static __inline void 131 TLBIE(uint64_t vpn) { 132 #ifndef __powerpc64__ 133 register_t vpn_hi, vpn_lo; 134 register_t msr; 135 register_t scratch, intr; 136 #endif 137 138 static volatile u_int tlbie_lock = 0; 139 140 vpn <<= ADDR_PIDX_SHFT; 141 vpn &= ~(0xffffULL << 48); 142 143 /* Hobo spinlock: we need stronger guarantees than mutexes provide */ 144 while (!atomic_cmpset_int(&tlbie_lock, 0, 1)); 145 isync(); /* Flush instruction queue once lock acquired */ 146 147 #ifdef __powerpc64__ 148 __asm __volatile("tlbie %0" :: "r"(vpn) : "memory"); 149 __asm __volatile("eieio; tlbsync; ptesync" ::: "memory"); 150 #else 151 vpn_hi = (uint32_t)(vpn >> 32); 152 vpn_lo = (uint32_t)vpn; 153 154 intr = intr_disable(); 155 __asm __volatile("\ 156 mfmsr %0; \ 157 mr %1, %0; \ 158 insrdi %1,%5,1,0; \ 159 mtmsrd %1; isync; \ 160 \ 161 sld %1,%2,%4; \ 162 or %1,%1,%3; \ 163 tlbie %1; \ 164 \ 165 mtmsrd %0; isync; \ 166 eieio; \ 167 tlbsync; \ 168 ptesync;" 169 : "=r"(msr), "=r"(scratch) : "r"(vpn_hi), "r"(vpn_lo), "r"(32), "r"(1) 170 : "memory"); 171 intr_restore(intr); 172 #endif 173 174 /* No barriers or special ops -- taken care of by ptesync above */ 175 tlbie_lock = 0; 176 } 177 178 #define DISABLE_TRANS(msr) msr = mfmsr(); mtmsr(msr & ~PSL_DR) 179 #define ENABLE_TRANS(msr) mtmsr(msr) 180 181 /* 182 * PTEG data. 183 */ 184 static volatile struct lpte *moea64_pteg_table; 185 static struct rwlock moea64_eviction_lock; 186 187 /* 188 * PTE calls. 189 */ 190 static int moea64_pte_insert_native(mmu_t, struct pvo_entry *); 191 static int64_t moea64_pte_synch_native(mmu_t, struct pvo_entry *); 192 static int64_t moea64_pte_clear_native(mmu_t, struct pvo_entry *, uint64_t); 193 static int64_t moea64_pte_replace_native(mmu_t, struct pvo_entry *, int); 194 static int64_t moea64_pte_unset_native(mmu_t mmu, struct pvo_entry *); 195 196 /* 197 * Utility routines. 198 */ 199 static void moea64_bootstrap_native(mmu_t mmup, 200 vm_offset_t kernelstart, vm_offset_t kernelend); 201 static void moea64_cpu_bootstrap_native(mmu_t, int ap); 202 static void tlbia(void); 203 204 static mmu_method_t moea64_native_methods[] = { 205 /* Internal interfaces */ 206 MMUMETHOD(mmu_bootstrap, moea64_bootstrap_native), 207 MMUMETHOD(mmu_cpu_bootstrap, moea64_cpu_bootstrap_native), 208 209 MMUMETHOD(moea64_pte_synch, moea64_pte_synch_native), 210 MMUMETHOD(moea64_pte_clear, moea64_pte_clear_native), 211 MMUMETHOD(moea64_pte_unset, moea64_pte_unset_native), 212 MMUMETHOD(moea64_pte_replace, moea64_pte_replace_native), 213 MMUMETHOD(moea64_pte_insert, moea64_pte_insert_native), 214 215 { 0, 0 } 216 }; 217 218 MMU_DEF_INHERIT(oea64_mmu_native, MMU_TYPE_G5, moea64_native_methods, 219 0, oea64_mmu); 220 221 static int64_t 222 moea64_pte_synch_native(mmu_t mmu, struct pvo_entry *pvo) 223 { 224 volatile struct lpte *pt = moea64_pteg_table + pvo->pvo_pte.slot; 225 struct lpte properpt; 226 uint64_t ptelo; 227 228 PMAP_LOCK_ASSERT(pvo->pvo_pmap, MA_OWNED); 229 230 moea64_pte_from_pvo(pvo, &properpt); 231 232 rw_rlock(&moea64_eviction_lock); 233 if ((pt->pte_hi & LPTE_AVPN_MASK) != 234 (properpt.pte_hi & LPTE_AVPN_MASK)) { 235 /* Evicted */ 236 rw_runlock(&moea64_eviction_lock); 237 return (-1); 238 } 239 240 PTESYNC(); 241 ptelo = be64toh(pt->pte_lo); 242 243 rw_runlock(&moea64_eviction_lock); 244 245 return (ptelo & (LPTE_REF | LPTE_CHG)); 246 } 247 248 static int64_t 249 moea64_pte_clear_native(mmu_t mmu, struct pvo_entry *pvo, uint64_t ptebit) 250 { 251 volatile struct lpte *pt = moea64_pteg_table + pvo->pvo_pte.slot; 252 struct lpte properpt; 253 uint64_t ptelo; 254 255 PMAP_LOCK_ASSERT(pvo->pvo_pmap, MA_OWNED); 256 257 moea64_pte_from_pvo(pvo, &properpt); 258 259 rw_rlock(&moea64_eviction_lock); 260 if ((pt->pte_hi & LPTE_AVPN_MASK) != 261 (properpt.pte_hi & LPTE_AVPN_MASK)) { 262 /* Evicted */ 263 rw_runlock(&moea64_eviction_lock); 264 return (-1); 265 } 266 267 if (ptebit == LPTE_REF) { 268 /* See "Resetting the Reference Bit" in arch manual */ 269 PTESYNC(); 270 /* 2-step here safe: precision is not guaranteed */ 271 ptelo = pt->pte_lo; 272 273 /* One-byte store to avoid touching the C bit */ 274 ((volatile uint8_t *)(&pt->pte_lo))[6] = 275 ((uint8_t *)(&properpt.pte_lo))[6]; 276 rw_runlock(&moea64_eviction_lock); 277 278 critical_enter(); 279 TLBIE(pvo->pvo_vpn); 280 critical_exit(); 281 } else { 282 rw_runlock(&moea64_eviction_lock); 283 ptelo = moea64_pte_unset_native(mmu, pvo); 284 moea64_pte_insert_native(mmu, pvo); 285 } 286 287 return (ptelo & (LPTE_REF | LPTE_CHG)); 288 } 289 290 static int64_t 291 moea64_pte_unset_native(mmu_t mmu, struct pvo_entry *pvo) 292 { 293 volatile struct lpte *pt = moea64_pteg_table + pvo->pvo_pte.slot; 294 struct lpte properpt; 295 uint64_t ptelo; 296 297 moea64_pte_from_pvo(pvo, &properpt); 298 299 rw_rlock(&moea64_eviction_lock); 300 if ((pt->pte_hi & LPTE_AVPN_MASK) != 301 (properpt.pte_hi & LPTE_AVPN_MASK)) { 302 /* Evicted */ 303 moea64_pte_overflow--; 304 rw_runlock(&moea64_eviction_lock); 305 return (-1); 306 } 307 308 /* 309 * Invalidate the pte, briefly locking it to collect RC bits. No 310 * atomics needed since this is protected against eviction by the lock. 311 */ 312 isync(); 313 critical_enter(); 314 pt->pte_hi = (pt->pte_hi & ~LPTE_VALID) | LPTE_LOCKED; 315 PTESYNC(); 316 TLBIE(pvo->pvo_vpn); 317 ptelo = be64toh(pt->pte_lo); 318 *((volatile int32_t *)(&pt->pte_hi) + 1) = 0; /* Release lock */ 319 critical_exit(); 320 rw_runlock(&moea64_eviction_lock); 321 322 /* Keep statistics */ 323 moea64_pte_valid--; 324 325 return (ptelo & (LPTE_CHG | LPTE_REF)); 326 } 327 328 static int64_t 329 moea64_pte_replace_native(mmu_t mmu, struct pvo_entry *pvo, int flags) 330 { 331 volatile struct lpte *pt = moea64_pteg_table + pvo->pvo_pte.slot; 332 struct lpte properpt; 333 int64_t ptelo; 334 335 if (flags == 0) { 336 /* Just some software bits changing. */ 337 moea64_pte_from_pvo(pvo, &properpt); 338 339 rw_rlock(&moea64_eviction_lock); 340 if ((pt->pte_hi & LPTE_AVPN_MASK) != 341 (properpt.pte_hi & LPTE_AVPN_MASK)) { 342 rw_runlock(&moea64_eviction_lock); 343 return (-1); 344 } 345 pt->pte_hi = properpt.pte_hi; 346 ptelo = pt->pte_lo; 347 rw_runlock(&moea64_eviction_lock); 348 } else { 349 /* Otherwise, need reinsertion and deletion */ 350 ptelo = moea64_pte_unset_native(mmu, pvo); 351 moea64_pte_insert_native(mmu, pvo); 352 } 353 354 return (ptelo); 355 } 356 357 static void 358 moea64_cpu_bootstrap_native(mmu_t mmup, int ap) 359 { 360 int i = 0; 361 #ifdef __powerpc64__ 362 struct slb *slb = PCPU_GET(slb); 363 register_t seg0; 364 #endif 365 366 /* 367 * Initialize segment registers and MMU 368 */ 369 370 mtmsr(mfmsr() & ~PSL_DR & ~PSL_IR); 371 372 /* 373 * Install kernel SLB entries 374 */ 375 376 #ifdef __powerpc64__ 377 __asm __volatile ("slbia"); 378 __asm __volatile ("slbmfee %0,%1; slbie %0;" : "=r"(seg0) : 379 "r"(0)); 380 381 for (i = 0; i < 64; i++) { 382 if (!(slb[i].slbe & SLBE_VALID)) 383 continue; 384 385 __asm __volatile ("slbmte %0, %1" :: 386 "r"(slb[i].slbv), "r"(slb[i].slbe)); 387 } 388 #else 389 for (i = 0; i < 16; i++) 390 mtsrin(i << ADDR_SR_SHFT, kernel_pmap->pm_sr[i]); 391 #endif 392 393 /* 394 * Install page table 395 */ 396 397 __asm __volatile ("ptesync; mtsdr1 %0; isync" 398 :: "r"((uintptr_t)moea64_pteg_table 399 | (uintptr_t)(flsl(moea64_pteg_mask >> 11)))); 400 tlbia(); 401 } 402 403 static void 404 moea64_bootstrap_native(mmu_t mmup, vm_offset_t kernelstart, 405 vm_offset_t kernelend) 406 { 407 vm_size_t size; 408 vm_offset_t off; 409 vm_paddr_t pa; 410 register_t msr; 411 412 moea64_early_bootstrap(mmup, kernelstart, kernelend); 413 414 /* 415 * Allocate PTEG table. 416 */ 417 418 size = moea64_pteg_count * sizeof(struct lpteg); 419 CTR2(KTR_PMAP, "moea64_bootstrap: %d PTEGs, %d bytes", 420 moea64_pteg_count, size); 421 rw_init(&moea64_eviction_lock, "pte eviction"); 422 423 /* 424 * We now need to allocate memory. This memory, to be allocated, 425 * has to reside in a page table. The page table we are about to 426 * allocate. We don't have BAT. So drop to data real mode for a minute 427 * as a measure of last resort. We do this a couple times. 428 */ 429 430 moea64_pteg_table = (struct lpte *)moea64_bootstrap_alloc(size, size); 431 DISABLE_TRANS(msr); 432 bzero(__DEVOLATILE(void *, moea64_pteg_table), moea64_pteg_count * 433 sizeof(struct lpteg)); 434 ENABLE_TRANS(msr); 435 436 CTR1(KTR_PMAP, "moea64_bootstrap: PTEG table at %p", moea64_pteg_table); 437 438 moea64_mid_bootstrap(mmup, kernelstart, kernelend); 439 440 /* 441 * Add a mapping for the page table itself if there is no direct map. 442 */ 443 if (!hw_direct_map) { 444 size = moea64_pteg_count * sizeof(struct lpteg); 445 off = (vm_offset_t)(moea64_pteg_table); 446 DISABLE_TRANS(msr); 447 for (pa = off; pa < off + size; pa += PAGE_SIZE) 448 pmap_kenter(pa, pa); 449 ENABLE_TRANS(msr); 450 } 451 452 /* Bring up virtual memory */ 453 moea64_late_bootstrap(mmup, kernelstart, kernelend); 454 } 455 456 static void 457 tlbia(void) 458 { 459 vm_offset_t i; 460 #ifndef __powerpc64__ 461 register_t msr, scratch; 462 #endif 463 464 TLBSYNC(); 465 466 for (i = 0; i < 0xFF000; i += 0x00001000) { 467 #ifdef __powerpc64__ 468 __asm __volatile("tlbiel %0" :: "r"(i)); 469 #else 470 __asm __volatile("\ 471 mfmsr %0; \ 472 mr %1, %0; \ 473 insrdi %1,%3,1,0; \ 474 mtmsrd %1; \ 475 isync; \ 476 \ 477 tlbiel %2; \ 478 \ 479 mtmsrd %0; \ 480 isync;" 481 : "=r"(msr), "=r"(scratch) : "r"(i), "r"(1)); 482 #endif 483 } 484 485 EIEIO(); 486 TLBSYNC(); 487 } 488 489 static int 490 atomic_pte_lock(volatile struct lpte *pte, uint64_t bitmask, uint64_t *oldhi) 491 { 492 int ret; 493 uint32_t oldhihalf; 494 495 /* 496 * Note: in principle, if just the locked bit were set here, we 497 * could avoid needing the eviction lock. However, eviction occurs 498 * so rarely that it isn't worth bothering about in practice. 499 */ 500 501 __asm __volatile ( 502 "1:\tlwarx %1, 0, %3\n\t" /* load old value */ 503 "and. %0,%1,%4\n\t" /* check if any bits set */ 504 "bne 2f\n\t" /* exit if any set */ 505 "stwcx. %5, 0, %3\n\t" /* attempt to store */ 506 "bne- 1b\n\t" /* spin if failed */ 507 "li %0, 1\n\t" /* success - retval = 1 */ 508 "b 3f\n\t" /* we've succeeded */ 509 "2:\n\t" 510 "stwcx. %1, 0, %3\n\t" /* clear reservation (74xx) */ 511 "li %0, 0\n\t" /* failure - retval = 0 */ 512 "3:\n\t" 513 : "=&r" (ret), "=&r"(oldhihalf), "=m" (pte->pte_hi) 514 : "r" ((volatile char *)&pte->pte_hi + 4), 515 "r" ((uint32_t)bitmask), "r" ((uint32_t)LPTE_LOCKED), 516 "m" (pte->pte_hi) 517 : "cr0", "cr1", "cr2", "memory"); 518 519 *oldhi = (pte->pte_hi & 0xffffffff00000000ULL) | oldhihalf; 520 521 return (ret); 522 } 523 524 static uintptr_t 525 moea64_insert_to_pteg_native(struct lpte *pvo_pt, uintptr_t slotbase, 526 uint64_t mask) 527 { 528 volatile struct lpte *pt; 529 uint64_t oldptehi, va; 530 uintptr_t k; 531 int i, j; 532 533 /* Start at a random slot */ 534 i = mftb() % 8; 535 for (j = 0; j < 8; j++) { 536 k = slotbase + (i + j) % 8; 537 pt = &moea64_pteg_table[k]; 538 /* Invalidate and seize lock only if no bits in mask set */ 539 if (atomic_pte_lock(pt, mask, &oldptehi)) /* Lock obtained */ 540 break; 541 } 542 543 if (j == 8) 544 return (-1); 545 546 if (oldptehi & LPTE_VALID) { 547 KASSERT(!(oldptehi & LPTE_WIRED), ("Unmapped wired entry")); 548 /* 549 * Need to invalidate old entry completely: see 550 * "Modifying a Page Table Entry". Need to reconstruct 551 * the virtual address for the outgoing entry to do that. 552 */ 553 if (oldptehi & LPTE_BIG) 554 va = oldptehi >> moea64_large_page_shift; 555 else 556 va = oldptehi >> ADDR_PIDX_SHFT; 557 if (oldptehi & LPTE_HID) 558 va = (((k >> 3) ^ moea64_pteg_mask) ^ va) & 559 VSID_HASH_MASK; 560 else 561 va = ((k >> 3) ^ va) & VSID_HASH_MASK; 562 va |= (oldptehi & LPTE_AVPN_MASK) << 563 (ADDR_API_SHFT64 - ADDR_PIDX_SHFT); 564 PTESYNC(); 565 TLBIE(va); 566 moea64_pte_valid--; 567 moea64_pte_overflow++; 568 } 569 570 /* 571 * Update the PTE as per "Adding a Page Table Entry". Lock is released 572 * by setting the high doubleworld. 573 */ 574 pt->pte_lo = pvo_pt->pte_lo; 575 EIEIO(); 576 pt->pte_hi = pvo_pt->pte_hi; 577 PTESYNC(); 578 579 /* Keep statistics */ 580 moea64_pte_valid++; 581 582 return (k); 583 } 584 585 static int 586 moea64_pte_insert_native(mmu_t mmu, struct pvo_entry *pvo) 587 { 588 struct lpte insertpt; 589 uintptr_t slot; 590 591 /* Initialize PTE */ 592 moea64_pte_from_pvo(pvo, &insertpt); 593 594 /* Make sure further insertion is locked out during evictions */ 595 rw_rlock(&moea64_eviction_lock); 596 597 /* 598 * First try primary hash. 599 */ 600 pvo->pvo_pte.slot &= ~7ULL; /* Base slot address */ 601 slot = moea64_insert_to_pteg_native(&insertpt, pvo->pvo_pte.slot, 602 LPTE_VALID | LPTE_WIRED | LPTE_LOCKED); 603 if (slot != -1) { 604 rw_runlock(&moea64_eviction_lock); 605 pvo->pvo_pte.slot = slot; 606 return (0); 607 } 608 609 /* 610 * Now try secondary hash. 611 */ 612 pvo->pvo_vaddr ^= PVO_HID; 613 insertpt.pte_hi ^= LPTE_HID; 614 pvo->pvo_pte.slot ^= (moea64_pteg_mask << 3); 615 slot = moea64_insert_to_pteg_native(&insertpt, pvo->pvo_pte.slot, 616 LPTE_VALID | LPTE_WIRED | LPTE_LOCKED); 617 if (slot != -1) { 618 rw_runlock(&moea64_eviction_lock); 619 pvo->pvo_pte.slot = slot; 620 return (0); 621 } 622 623 /* 624 * Out of luck. Find a PTE to sacrifice. 625 */ 626 627 /* Lock out all insertions for a bit */ 628 if (!rw_try_upgrade(&moea64_eviction_lock)) { 629 rw_runlock(&moea64_eviction_lock); 630 rw_wlock(&moea64_eviction_lock); 631 } 632 633 slot = moea64_insert_to_pteg_native(&insertpt, pvo->pvo_pte.slot, 634 LPTE_WIRED | LPTE_LOCKED); 635 if (slot != -1) { 636 rw_wunlock(&moea64_eviction_lock); 637 pvo->pvo_pte.slot = slot; 638 return (0); 639 } 640 641 /* Try other hash table. Now we're getting desperate... */ 642 pvo->pvo_vaddr ^= PVO_HID; 643 insertpt.pte_hi ^= LPTE_HID; 644 pvo->pvo_pte.slot ^= (moea64_pteg_mask << 3); 645 slot = moea64_insert_to_pteg_native(&insertpt, pvo->pvo_pte.slot, 646 LPTE_WIRED | LPTE_LOCKED); 647 if (slot != -1) { 648 rw_wunlock(&moea64_eviction_lock); 649 pvo->pvo_pte.slot = slot; 650 return (0); 651 } 652 653 /* No freeable slots in either PTEG? We're hosed. */ 654 rw_wunlock(&moea64_eviction_lock); 655 panic("moea64_pte_insert: overflow"); 656 return (-1); 657 } 658 659