1 /*- 2 * Copyright (c) 2001 The NetBSD Foundation, Inc. 3 * All rights reserved. 4 * 5 * This code is derived from software contributed to The NetBSD Foundation 6 * by Matt Thomas <matt@3am-software.com> of Allegro Networks, Inc. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the NetBSD 19 * Foundation, Inc. and its contributors. 20 * 4. Neither the name of The NetBSD Foundation nor the names of its 21 * contributors may be used to endorse or promote products derived 22 * from this software without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 25 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 26 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGE. 35 */ 36 /*- 37 * Copyright (C) 1995, 1996 Wolfgang Solfrank. 38 * Copyright (C) 1995, 1996 TooLs GmbH. 39 * All rights reserved. 40 * 41 * Redistribution and use in source and binary forms, with or without 42 * modification, are permitted provided that the following conditions 43 * are met: 44 * 1. Redistributions of source code must retain the above copyright 45 * notice, this list of conditions and the following disclaimer. 46 * 2. Redistributions in binary form must reproduce the above copyright 47 * notice, this list of conditions and the following disclaimer in the 48 * documentation and/or other materials provided with the distribution. 49 * 3. All advertising materials mentioning features or use of this software 50 * must display the following acknowledgement: 51 * This product includes software developed by TooLs GmbH. 52 * 4. The name of TooLs GmbH may not be used to endorse or promote products 53 * derived from this software without specific prior written permission. 54 * 55 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR 56 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 57 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 58 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 59 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 60 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 61 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 62 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 63 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 64 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 65 * 66 * $NetBSD: pmap.c,v 1.28 2000/03/26 20:42:36 kleink Exp $ 67 */ 68 /*- 69 * Copyright (C) 2001 Benno Rice. 70 * All rights reserved. 71 * 72 * Redistribution and use in source and binary forms, with or without 73 * modification, are permitted provided that the following conditions 74 * are met: 75 * 1. Redistributions of source code must retain the above copyright 76 * notice, this list of conditions and the following disclaimer. 77 * 2. Redistributions in binary form must reproduce the above copyright 78 * notice, this list of conditions and the following disclaimer in the 79 * documentation and/or other materials provided with the distribution. 80 * 81 * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR 82 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 83 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 84 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 85 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 86 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 87 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 88 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 89 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 90 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 91 */ 92 93 #include <sys/cdefs.h> 94 __FBSDID("$FreeBSD$"); 95 96 /* 97 * Native 64-bit page table operations for running without a hypervisor. 98 */ 99 100 #include <sys/param.h> 101 #include <sys/kernel.h> 102 #include <sys/ktr.h> 103 #include <sys/lock.h> 104 #include <sys/mutex.h> 105 #include <sys/proc.h> 106 #include <sys/sysctl.h> 107 #include <sys/systm.h> 108 109 #include <sys/kdb.h> 110 111 #include <vm/vm.h> 112 #include <vm/vm_param.h> 113 #include <vm/vm_kern.h> 114 #include <vm/vm_page.h> 115 #include <vm/vm_map.h> 116 #include <vm/vm_object.h> 117 #include <vm/vm_extern.h> 118 #include <vm/vm_pageout.h> 119 #include <vm/vm_pager.h> 120 121 #include <machine/md_var.h> 122 #include <machine/mmuvar.h> 123 124 #include "mmu_oea64.h" 125 #include "mmu_if.h" 126 #include "moea64_if.h" 127 128 #define PTESYNC() __asm __volatile("ptesync"); 129 #define TLBSYNC() __asm __volatile("tlbsync; ptesync"); 130 #define SYNC() __asm __volatile("sync"); 131 #define EIEIO() __asm __volatile("eieio"); 132 133 #define VSID_HASH_MASK 0x0000007fffffffffULL 134 135 /* 136 * The tlbie instruction must be executed in 64-bit mode 137 * so we have to twiddle MSR[SF] around every invocation. 138 * Just to add to the fun, exceptions must be off as well 139 * so that we can't trap in 64-bit mode. What a pain. 140 */ 141 struct mtx tlbie_mutex; 142 143 static __inline void 144 TLBIE(uint64_t vpn) { 145 #ifndef __powerpc64__ 146 register_t vpn_hi, vpn_lo; 147 register_t msr; 148 register_t scratch; 149 #endif 150 151 vpn <<= ADDR_PIDX_SHFT; 152 vpn &= ~(0xffffULL << 48); 153 154 mtx_lock_spin(&tlbie_mutex); 155 #ifdef __powerpc64__ 156 __asm __volatile("\ 157 ptesync; \ 158 tlbie %0; \ 159 eieio; \ 160 tlbsync; \ 161 ptesync;" 162 :: "r"(vpn) : "memory"); 163 #else 164 vpn_hi = (uint32_t)(vpn >> 32); 165 vpn_lo = (uint32_t)vpn; 166 167 __asm __volatile("\ 168 mfmsr %0; \ 169 mr %1, %0; \ 170 insrdi %1,%5,1,0; \ 171 mtmsrd %1; isync; \ 172 ptesync; \ 173 \ 174 sld %1,%2,%4; \ 175 or %1,%1,%3; \ 176 tlbie %1; \ 177 \ 178 mtmsrd %0; isync; \ 179 eieio; \ 180 tlbsync; \ 181 ptesync;" 182 : "=r"(msr), "=r"(scratch) : "r"(vpn_hi), "r"(vpn_lo), "r"(32), "r"(1) 183 : "memory"); 184 #endif 185 mtx_unlock_spin(&tlbie_mutex); 186 } 187 188 #define DISABLE_TRANS(msr) msr = mfmsr(); mtmsr(msr & ~PSL_DR); isync() 189 #define ENABLE_TRANS(msr) mtmsr(msr); isync() 190 191 /* 192 * PTEG data. 193 */ 194 static struct lpteg *moea64_pteg_table; 195 196 /* 197 * PTE calls. 198 */ 199 static int moea64_pte_insert_native(mmu_t, u_int, struct lpte *); 200 static uintptr_t moea64_pvo_to_pte_native(mmu_t, const struct pvo_entry *); 201 static void moea64_pte_synch_native(mmu_t, uintptr_t pt, 202 struct lpte *pvo_pt); 203 static void moea64_pte_clear_native(mmu_t, uintptr_t pt, 204 struct lpte *pvo_pt, uint64_t vpn, uint64_t ptebit); 205 static void moea64_pte_change_native(mmu_t, uintptr_t pt, 206 struct lpte *pvo_pt, uint64_t vpn); 207 static void moea64_pte_unset_native(mmu_t mmu, uintptr_t pt, 208 struct lpte *pvo_pt, uint64_t vpn); 209 210 /* 211 * Utility routines. 212 */ 213 static void moea64_bootstrap_native(mmu_t mmup, 214 vm_offset_t kernelstart, vm_offset_t kernelend); 215 static void moea64_cpu_bootstrap_native(mmu_t, int ap); 216 static void tlbia(void); 217 218 static mmu_method_t moea64_native_methods[] = { 219 /* Internal interfaces */ 220 MMUMETHOD(mmu_bootstrap, moea64_bootstrap_native), 221 MMUMETHOD(mmu_cpu_bootstrap, moea64_cpu_bootstrap_native), 222 223 MMUMETHOD(moea64_pte_synch, moea64_pte_synch_native), 224 MMUMETHOD(moea64_pte_clear, moea64_pte_clear_native), 225 MMUMETHOD(moea64_pte_unset, moea64_pte_unset_native), 226 MMUMETHOD(moea64_pte_change, moea64_pte_change_native), 227 MMUMETHOD(moea64_pte_insert, moea64_pte_insert_native), 228 MMUMETHOD(moea64_pvo_to_pte, moea64_pvo_to_pte_native), 229 230 { 0, 0 } 231 }; 232 233 MMU_DEF_INHERIT(oea64_mmu_native, MMU_TYPE_G5, moea64_native_methods, 234 0, oea64_mmu); 235 236 static __inline u_int 237 va_to_pteg(uint64_t vsid, vm_offset_t addr, int large) 238 { 239 uint64_t hash; 240 int shift; 241 242 shift = large ? moea64_large_page_shift : ADDR_PIDX_SHFT; 243 hash = (vsid & VSID_HASH_MASK) ^ (((uint64_t)addr & ADDR_PIDX) >> 244 shift); 245 return (hash & moea64_pteg_mask); 246 } 247 248 static void 249 moea64_pte_synch_native(mmu_t mmu, uintptr_t pt_cookie, struct lpte *pvo_pt) 250 { 251 struct lpte *pt = (struct lpte *)pt_cookie; 252 253 pvo_pt->pte_lo |= pt->pte_lo & (LPTE_REF | LPTE_CHG); 254 } 255 256 static void 257 moea64_pte_clear_native(mmu_t mmu, uintptr_t pt_cookie, struct lpte *pvo_pt, 258 uint64_t vpn, uint64_t ptebit) 259 { 260 struct lpte *pt = (struct lpte *)pt_cookie; 261 262 /* 263 * As shown in Section 7.6.3.2.3 264 */ 265 pt->pte_lo &= ~ptebit; 266 TLBIE(vpn); 267 } 268 269 static void 270 moea64_pte_set_native(struct lpte *pt, struct lpte *pvo_pt) 271 { 272 273 pvo_pt->pte_hi |= LPTE_VALID; 274 275 /* 276 * Update the PTE as defined in section 7.6.3.1. 277 * Note that the REF/CHG bits are from pvo_pt and thus should have 278 * been saved so this routine can restore them (if desired). 279 */ 280 pt->pte_lo = pvo_pt->pte_lo; 281 EIEIO(); 282 pt->pte_hi = pvo_pt->pte_hi; 283 PTESYNC(); 284 moea64_pte_valid++; 285 } 286 287 static void 288 moea64_pte_unset_native(mmu_t mmu, uintptr_t pt_cookie, struct lpte *pvo_pt, 289 uint64_t vpn) 290 { 291 struct lpte *pt = (struct lpte *)pt_cookie; 292 293 pvo_pt->pte_hi &= ~LPTE_VALID; 294 295 /* 296 * Force the reg & chg bits back into the PTEs. 297 */ 298 SYNC(); 299 300 /* 301 * Invalidate the pte. 302 */ 303 pt->pte_hi &= ~LPTE_VALID; 304 TLBIE(vpn); 305 306 /* 307 * Save the reg & chg bits. 308 */ 309 moea64_pte_synch_native(mmu, pt_cookie, pvo_pt); 310 moea64_pte_valid--; 311 } 312 313 static void 314 moea64_pte_change_native(mmu_t mmu, uintptr_t pt, struct lpte *pvo_pt, 315 uint64_t vpn) 316 { 317 318 /* 319 * Invalidate the PTE 320 */ 321 moea64_pte_unset_native(mmu, pt, pvo_pt, vpn); 322 moea64_pte_set_native((struct lpte *)pt, pvo_pt); 323 } 324 325 static void 326 moea64_cpu_bootstrap_native(mmu_t mmup, int ap) 327 { 328 int i = 0; 329 #ifdef __powerpc64__ 330 struct slb *slb = PCPU_GET(slb); 331 register_t seg0; 332 #endif 333 334 /* 335 * Initialize segment registers and MMU 336 */ 337 338 mtmsr(mfmsr() & ~PSL_DR & ~PSL_IR); isync(); 339 340 /* 341 * Install kernel SLB entries 342 */ 343 344 #ifdef __powerpc64__ 345 __asm __volatile ("slbia"); 346 __asm __volatile ("slbmfee %0,%1; slbie %0;" : "=r"(seg0) : 347 "r"(0)); 348 349 for (i = 0; i < 64; i++) { 350 if (!(slb[i].slbe & SLBE_VALID)) 351 continue; 352 353 __asm __volatile ("slbmte %0, %1" :: 354 "r"(slb[i].slbv), "r"(slb[i].slbe)); 355 } 356 #else 357 for (i = 0; i < 16; i++) 358 mtsrin(i << ADDR_SR_SHFT, kernel_pmap->pm_sr[i]); 359 #endif 360 361 /* 362 * Install page table 363 */ 364 365 __asm __volatile ("ptesync; mtsdr1 %0; isync" 366 :: "r"((uintptr_t)moea64_pteg_table 367 | (uintptr_t)(flsl(moea64_pteg_mask >> 11)))); 368 tlbia(); 369 } 370 371 static void 372 moea64_bootstrap_native(mmu_t mmup, vm_offset_t kernelstart, 373 vm_offset_t kernelend) 374 { 375 vm_size_t size; 376 vm_offset_t off; 377 vm_paddr_t pa; 378 register_t msr; 379 380 moea64_early_bootstrap(mmup, kernelstart, kernelend); 381 382 /* 383 * Allocate PTEG table. 384 */ 385 386 size = moea64_pteg_count * sizeof(struct lpteg); 387 CTR2(KTR_PMAP, "moea64_bootstrap: %d PTEGs, %d bytes", 388 moea64_pteg_count, size); 389 390 /* 391 * We now need to allocate memory. This memory, to be allocated, 392 * has to reside in a page table. The page table we are about to 393 * allocate. We don't have BAT. So drop to data real mode for a minute 394 * as a measure of last resort. We do this a couple times. 395 */ 396 397 moea64_pteg_table = (struct lpteg *)moea64_bootstrap_alloc(size, size); 398 DISABLE_TRANS(msr); 399 bzero((void *)moea64_pteg_table, moea64_pteg_count * sizeof(struct lpteg)); 400 ENABLE_TRANS(msr); 401 402 CTR1(KTR_PMAP, "moea64_bootstrap: PTEG table at %p", moea64_pteg_table); 403 404 /* 405 * Initialize the TLBIE lock. TLBIE can only be executed by one CPU. 406 */ 407 mtx_init(&tlbie_mutex, "tlbie mutex", NULL, MTX_SPIN); 408 409 moea64_mid_bootstrap(mmup, kernelstart, kernelend); 410 411 /* 412 * Add a mapping for the page table itself if there is no direct map. 413 */ 414 if (!hw_direct_map) { 415 size = moea64_pteg_count * sizeof(struct lpteg); 416 off = (vm_offset_t)(moea64_pteg_table); 417 DISABLE_TRANS(msr); 418 for (pa = off; pa < off + size; pa += PAGE_SIZE) 419 pmap_kenter(pa, pa); 420 ENABLE_TRANS(msr); 421 } 422 423 /* Bring up virtual memory */ 424 moea64_late_bootstrap(mmup, kernelstart, kernelend); 425 } 426 427 static void 428 tlbia(void) 429 { 430 vm_offset_t i; 431 #ifndef __powerpc64__ 432 register_t msr, scratch; 433 #endif 434 435 TLBSYNC(); 436 437 for (i = 0; i < 0xFF000; i += 0x00001000) { 438 #ifdef __powerpc64__ 439 __asm __volatile("tlbiel %0" :: "r"(i)); 440 #else 441 __asm __volatile("\ 442 mfmsr %0; \ 443 mr %1, %0; \ 444 insrdi %1,%3,1,0; \ 445 mtmsrd %1; \ 446 isync; \ 447 \ 448 tlbiel %2; \ 449 \ 450 mtmsrd %0; \ 451 isync;" 452 : "=r"(msr), "=r"(scratch) : "r"(i), "r"(1)); 453 #endif 454 } 455 456 EIEIO(); 457 TLBSYNC(); 458 } 459 460 static uintptr_t 461 moea64_pvo_to_pte_native(mmu_t mmu, const struct pvo_entry *pvo) 462 { 463 struct lpte *pt; 464 int pteidx, ptegidx; 465 uint64_t vsid; 466 467 /* If the PTEG index is not set, then there is no page table entry */ 468 if (!PVO_PTEGIDX_ISSET(pvo)) 469 return (-1); 470 471 /* 472 * Calculate the ptegidx 473 */ 474 vsid = PVO_VSID(pvo); 475 ptegidx = va_to_pteg(vsid, PVO_VADDR(pvo), 476 pvo->pvo_vaddr & PVO_LARGE); 477 478 /* 479 * We can find the actual pte entry without searching by grabbing 480 * the PTEG index from 3 unused bits in pvo_vaddr and by 481 * noticing the HID bit. 482 */ 483 if (pvo->pvo_pte.lpte.pte_hi & LPTE_HID) 484 ptegidx ^= moea64_pteg_mask; 485 486 pteidx = (ptegidx << 3) | PVO_PTEGIDX_GET(pvo); 487 488 if ((pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) && 489 !PVO_PTEGIDX_ISSET(pvo)) { 490 panic("moea64_pvo_to_pte: pvo %p has valid pte in pvo but no " 491 "valid pte index", pvo); 492 } 493 494 if ((pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) == 0 && 495 PVO_PTEGIDX_ISSET(pvo)) { 496 panic("moea64_pvo_to_pte: pvo %p has valid pte index in pvo " 497 "pvo but no valid pte", pvo); 498 } 499 500 pt = &moea64_pteg_table[pteidx >> 3].pt[pteidx & 7]; 501 if ((pt->pte_hi ^ (pvo->pvo_pte.lpte.pte_hi & ~LPTE_VALID)) == 502 LPTE_VALID) { 503 if ((pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) == 0) { 504 panic("moea64_pvo_to_pte: pvo %p has valid pte in " 505 "moea64_pteg_table %p but invalid in pvo", pvo, pt); 506 } 507 508 if (((pt->pte_lo ^ pvo->pvo_pte.lpte.pte_lo) & 509 ~(LPTE_M|LPTE_CHG|LPTE_REF)) != 0) { 510 panic("moea64_pvo_to_pte: pvo %p pte does not match " 511 "pte %p in moea64_pteg_table difference is %#x", 512 pvo, pt, 513 (uint32_t)(pt->pte_lo ^ pvo->pvo_pte.lpte.pte_lo)); 514 } 515 516 return ((uintptr_t)pt); 517 } 518 519 if (pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) { 520 panic("moea64_pvo_to_pte: pvo %p has invalid pte %p in " 521 "moea64_pteg_table but valid in pvo", pvo, pt); 522 } 523 524 return (-1); 525 } 526 527 static __inline int 528 moea64_pte_spillable_ident(u_int ptegidx) 529 { 530 struct lpte *pt; 531 int i, j, k; 532 533 /* Start at a random slot */ 534 i = mftb() % 8; 535 k = -1; 536 for (j = 0; j < 8; j++) { 537 pt = &moea64_pteg_table[ptegidx].pt[(i + j) % 8]; 538 if (pt->pte_hi & (LPTE_LOCKED | LPTE_WIRED)) 539 continue; 540 541 /* This is a candidate, so remember it */ 542 k = (i + j) % 8; 543 544 /* Try to get a page that has not been used lately */ 545 if (!(pt->pte_lo & LPTE_REF)) 546 return (k); 547 } 548 549 return (k); 550 } 551 552 static int 553 moea64_pte_insert_native(mmu_t mmu, u_int ptegidx, struct lpte *pvo_pt) 554 { 555 struct lpte *pt; 556 struct pvo_entry *pvo; 557 u_int pteg_bktidx; 558 int i; 559 560 /* 561 * First try primary hash. 562 */ 563 pteg_bktidx = ptegidx; 564 for (pt = moea64_pteg_table[pteg_bktidx].pt, i = 0; i < 8; i++, pt++) { 565 if ((pt->pte_hi & (LPTE_VALID | LPTE_LOCKED)) == 0) { 566 pvo_pt->pte_hi &= ~LPTE_HID; 567 moea64_pte_set_native(pt, pvo_pt); 568 return (i); 569 } 570 } 571 572 /* 573 * Now try secondary hash. 574 */ 575 pteg_bktidx ^= moea64_pteg_mask; 576 for (pt = moea64_pteg_table[pteg_bktidx].pt, i = 0; i < 8; i++, pt++) { 577 if ((pt->pte_hi & (LPTE_VALID | LPTE_LOCKED)) == 0) { 578 pvo_pt->pte_hi |= LPTE_HID; 579 moea64_pte_set_native(pt, pvo_pt); 580 return (i); 581 } 582 } 583 584 /* 585 * Out of luck. Find a PTE to sacrifice. 586 */ 587 pteg_bktidx = ptegidx; 588 i = moea64_pte_spillable_ident(pteg_bktidx); 589 if (i < 0) { 590 pteg_bktidx ^= moea64_pteg_mask; 591 i = moea64_pte_spillable_ident(pteg_bktidx); 592 } 593 594 if (i < 0) { 595 /* No freeable slots in either PTEG? We're hosed. */ 596 panic("moea64_pte_insert: overflow"); 597 return (-1); 598 } 599 600 if (pteg_bktidx == ptegidx) 601 pvo_pt->pte_hi &= ~LPTE_HID; 602 else 603 pvo_pt->pte_hi |= LPTE_HID; 604 605 /* 606 * Synchronize the sacrifice PTE with its PVO, then mark both 607 * invalid. The PVO will be reused when/if the VM system comes 608 * here after a fault. 609 */ 610 pt = &moea64_pteg_table[pteg_bktidx].pt[i]; 611 612 if (pt->pte_hi & LPTE_HID) 613 pteg_bktidx ^= moea64_pteg_mask; /* PTEs indexed by primary */ 614 615 LIST_FOREACH(pvo, &moea64_pvo_table[pteg_bktidx], pvo_olink) { 616 if (pvo->pvo_pte.lpte.pte_hi == pt->pte_hi) { 617 KASSERT(pvo->pvo_pte.lpte.pte_hi & LPTE_VALID, 618 ("Invalid PVO for valid PTE!")); 619 moea64_pte_unset_native(mmu, (uintptr_t)pt, 620 &pvo->pvo_pte.lpte, pvo->pvo_vpn); 621 PVO_PTEGIDX_CLR(pvo); 622 moea64_pte_overflow++; 623 break; 624 } 625 } 626 627 KASSERT(pvo->pvo_pte.lpte.pte_hi == pt->pte_hi, 628 ("Unable to find PVO for spilled PTE")); 629 630 /* 631 * Set the new PTE. 632 */ 633 moea64_pte_set_native(pt, pvo_pt); 634 635 return (i); 636 } 637 638