1 /*- 2 * Copyright (c) 2001 The NetBSD Foundation, Inc. 3 * All rights reserved. 4 * 5 * This code is derived from software contributed to The NetBSD Foundation 6 * by Matt Thomas <matt@3am-software.com> of Allegro Networks, Inc. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the NetBSD 19 * Foundation, Inc. and its contributors. 20 * 4. Neither the name of The NetBSD Foundation nor the names of its 21 * contributors may be used to endorse or promote products derived 22 * from this software without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 25 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 26 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGE. 35 */ 36 /*- 37 * Copyright (C) 1995, 1996 Wolfgang Solfrank. 38 * Copyright (C) 1995, 1996 TooLs GmbH. 39 * All rights reserved. 40 * 41 * Redistribution and use in source and binary forms, with or without 42 * modification, are permitted provided that the following conditions 43 * are met: 44 * 1. Redistributions of source code must retain the above copyright 45 * notice, this list of conditions and the following disclaimer. 46 * 2. Redistributions in binary form must reproduce the above copyright 47 * notice, this list of conditions and the following disclaimer in the 48 * documentation and/or other materials provided with the distribution. 49 * 3. All advertising materials mentioning features or use of this software 50 * must display the following acknowledgement: 51 * This product includes software developed by TooLs GmbH. 52 * 4. The name of TooLs GmbH may not be used to endorse or promote products 53 * derived from this software without specific prior written permission. 54 * 55 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR 56 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 57 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 58 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 59 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 60 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 61 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 62 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 63 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 64 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 65 * 66 * $NetBSD: pmap.c,v 1.28 2000/03/26 20:42:36 kleink Exp $ 67 */ 68 /*- 69 * Copyright (C) 2001 Benno Rice. 70 * All rights reserved. 71 * 72 * Redistribution and use in source and binary forms, with or without 73 * modification, are permitted provided that the following conditions 74 * are met: 75 * 1. Redistributions of source code must retain the above copyright 76 * notice, this list of conditions and the following disclaimer. 77 * 2. Redistributions in binary form must reproduce the above copyright 78 * notice, this list of conditions and the following disclaimer in the 79 * documentation and/or other materials provided with the distribution. 80 * 81 * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR 82 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 83 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 84 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 85 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 86 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 87 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 88 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 89 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 90 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 91 */ 92 93 #include <sys/cdefs.h> 94 __FBSDID("$FreeBSD$"); 95 96 /* 97 * Native 64-bit page table operations for running without a hypervisor. 98 */ 99 100 #include <sys/param.h> 101 #include <sys/kernel.h> 102 #include <sys/ktr.h> 103 #include <sys/lock.h> 104 #include <sys/mutex.h> 105 #include <sys/proc.h> 106 #include <sys/sched.h> 107 #include <sys/sysctl.h> 108 #include <sys/systm.h> 109 110 #include <sys/kdb.h> 111 112 #include <vm/vm.h> 113 #include <vm/vm_param.h> 114 #include <vm/vm_kern.h> 115 #include <vm/vm_page.h> 116 #include <vm/vm_map.h> 117 #include <vm/vm_object.h> 118 #include <vm/vm_extern.h> 119 #include <vm/vm_pageout.h> 120 121 #include <machine/md_var.h> 122 #include <machine/mmuvar.h> 123 124 #include "mmu_oea64.h" 125 #include "mmu_if.h" 126 #include "moea64_if.h" 127 128 #define PTESYNC() __asm __volatile("ptesync"); 129 #define TLBSYNC() __asm __volatile("tlbsync; ptesync"); 130 #define SYNC() __asm __volatile("sync"); 131 #define EIEIO() __asm __volatile("eieio"); 132 133 #define VSID_HASH_MASK 0x0000007fffffffffULL 134 135 static __inline void 136 TLBIE(uint64_t vpn) { 137 #ifndef __powerpc64__ 138 register_t vpn_hi, vpn_lo; 139 register_t msr; 140 register_t scratch, intr; 141 #endif 142 143 static volatile u_int tlbie_lock = 0; 144 145 vpn <<= ADDR_PIDX_SHFT; 146 vpn &= ~(0xffffULL << 48); 147 148 /* Hobo spinlock: we need stronger guarantees than mutexes provide */ 149 while (!atomic_cmpset_int(&tlbie_lock, 0, 1)); 150 isync(); /* Flush instruction queue once lock acquired */ 151 152 #ifdef __powerpc64__ 153 __asm __volatile("tlbie %0" :: "r"(vpn) : "memory"); 154 __asm __volatile("eieio; tlbsync; ptesync" ::: "memory"); 155 #else 156 vpn_hi = (uint32_t)(vpn >> 32); 157 vpn_lo = (uint32_t)vpn; 158 159 intr = intr_disable(); 160 __asm __volatile("\ 161 mfmsr %0; \ 162 mr %1, %0; \ 163 insrdi %1,%5,1,0; \ 164 mtmsrd %1; isync; \ 165 \ 166 sld %1,%2,%4; \ 167 or %1,%1,%3; \ 168 tlbie %1; \ 169 \ 170 mtmsrd %0; isync; \ 171 eieio; \ 172 tlbsync; \ 173 ptesync;" 174 : "=r"(msr), "=r"(scratch) : "r"(vpn_hi), "r"(vpn_lo), "r"(32), "r"(1) 175 : "memory"); 176 intr_restore(intr); 177 #endif 178 179 /* No barriers or special ops -- taken care of by ptesync above */ 180 tlbie_lock = 0; 181 } 182 183 #define DISABLE_TRANS(msr) msr = mfmsr(); mtmsr(msr & ~PSL_DR) 184 #define ENABLE_TRANS(msr) mtmsr(msr) 185 186 /* 187 * PTEG data. 188 */ 189 static struct lpteg *moea64_pteg_table; 190 191 /* 192 * PTE calls. 193 */ 194 static int moea64_pte_insert_native(mmu_t, u_int, struct lpte *); 195 static uintptr_t moea64_pvo_to_pte_native(mmu_t, const struct pvo_entry *); 196 static void moea64_pte_synch_native(mmu_t, uintptr_t pt, 197 struct lpte *pvo_pt); 198 static void moea64_pte_clear_native(mmu_t, uintptr_t pt, 199 struct lpte *pvo_pt, uint64_t vpn, uint64_t ptebit); 200 static void moea64_pte_change_native(mmu_t, uintptr_t pt, 201 struct lpte *pvo_pt, uint64_t vpn); 202 static void moea64_pte_unset_native(mmu_t mmu, uintptr_t pt, 203 struct lpte *pvo_pt, uint64_t vpn); 204 205 /* 206 * Utility routines. 207 */ 208 static void moea64_bootstrap_native(mmu_t mmup, 209 vm_offset_t kernelstart, vm_offset_t kernelend); 210 static void moea64_cpu_bootstrap_native(mmu_t, int ap); 211 static void tlbia(void); 212 213 static mmu_method_t moea64_native_methods[] = { 214 /* Internal interfaces */ 215 MMUMETHOD(mmu_bootstrap, moea64_bootstrap_native), 216 MMUMETHOD(mmu_cpu_bootstrap, moea64_cpu_bootstrap_native), 217 218 MMUMETHOD(moea64_pte_synch, moea64_pte_synch_native), 219 MMUMETHOD(moea64_pte_clear, moea64_pte_clear_native), 220 MMUMETHOD(moea64_pte_unset, moea64_pte_unset_native), 221 MMUMETHOD(moea64_pte_change, moea64_pte_change_native), 222 MMUMETHOD(moea64_pte_insert, moea64_pte_insert_native), 223 MMUMETHOD(moea64_pvo_to_pte, moea64_pvo_to_pte_native), 224 225 { 0, 0 } 226 }; 227 228 MMU_DEF_INHERIT(oea64_mmu_native, MMU_TYPE_G5, moea64_native_methods, 229 0, oea64_mmu); 230 231 static __inline u_int 232 va_to_pteg(uint64_t vsid, vm_offset_t addr, int large) 233 { 234 uint64_t hash; 235 int shift; 236 237 shift = large ? moea64_large_page_shift : ADDR_PIDX_SHFT; 238 hash = (vsid & VSID_HASH_MASK) ^ (((uint64_t)addr & ADDR_PIDX) >> 239 shift); 240 return (hash & moea64_pteg_mask); 241 } 242 243 static void 244 moea64_pte_synch_native(mmu_t mmu, uintptr_t pt_cookie, struct lpte *pvo_pt) 245 { 246 struct lpte *pt = (struct lpte *)pt_cookie; 247 248 pvo_pt->pte_lo |= pt->pte_lo & (LPTE_REF | LPTE_CHG); 249 } 250 251 static void 252 moea64_pte_clear_native(mmu_t mmu, uintptr_t pt_cookie, struct lpte *pvo_pt, 253 uint64_t vpn, uint64_t ptebit) 254 { 255 struct lpte *pt = (struct lpte *)pt_cookie; 256 257 /* 258 * As shown in Section 7.6.3.2.3 259 */ 260 pt->pte_lo &= ~ptebit; 261 critical_enter(); 262 TLBIE(vpn); 263 critical_exit(); 264 } 265 266 static void 267 moea64_pte_set_native(struct lpte *pt, struct lpte *pvo_pt) 268 { 269 270 pvo_pt->pte_hi |= LPTE_VALID; 271 272 /* 273 * Update the PTE as defined in section 7.6.3.1. 274 * Note that the REF/CHG bits are from pvo_pt and thus should have 275 * been saved so this routine can restore them (if desired). 276 */ 277 pt->pte_lo = pvo_pt->pte_lo; 278 EIEIO(); 279 pt->pte_hi = pvo_pt->pte_hi; 280 PTESYNC(); 281 282 /* Keep statistics for unlocked pages */ 283 if (!(pvo_pt->pte_hi & LPTE_LOCKED)) 284 moea64_pte_valid++; 285 } 286 287 static void 288 moea64_pte_unset_native(mmu_t mmu, uintptr_t pt_cookie, struct lpte *pvo_pt, 289 uint64_t vpn) 290 { 291 struct lpte *pt = (struct lpte *)pt_cookie; 292 293 /* 294 * Invalidate the pte. 295 */ 296 isync(); 297 critical_enter(); 298 pvo_pt->pte_hi &= ~LPTE_VALID; 299 pt->pte_hi &= ~LPTE_VALID; 300 PTESYNC(); 301 TLBIE(vpn); 302 critical_exit(); 303 304 /* 305 * Save the reg & chg bits. 306 */ 307 moea64_pte_synch_native(mmu, pt_cookie, pvo_pt); 308 309 /* Keep statistics for unlocked pages */ 310 if (!(pvo_pt->pte_hi & LPTE_LOCKED)) 311 moea64_pte_valid--; 312 } 313 314 static void 315 moea64_pte_change_native(mmu_t mmu, uintptr_t pt, struct lpte *pvo_pt, 316 uint64_t vpn) 317 { 318 319 /* 320 * Invalidate the PTE 321 */ 322 moea64_pte_unset_native(mmu, pt, pvo_pt, vpn); 323 moea64_pte_set_native((struct lpte *)pt, pvo_pt); 324 } 325 326 static void 327 moea64_cpu_bootstrap_native(mmu_t mmup, int ap) 328 { 329 int i = 0; 330 #ifdef __powerpc64__ 331 struct slb *slb = PCPU_GET(slb); 332 register_t seg0; 333 #endif 334 335 /* 336 * Initialize segment registers and MMU 337 */ 338 339 mtmsr(mfmsr() & ~PSL_DR & ~PSL_IR); 340 341 /* 342 * Install kernel SLB entries 343 */ 344 345 #ifdef __powerpc64__ 346 __asm __volatile ("slbia"); 347 __asm __volatile ("slbmfee %0,%1; slbie %0;" : "=r"(seg0) : 348 "r"(0)); 349 350 for (i = 0; i < 64; i++) { 351 if (!(slb[i].slbe & SLBE_VALID)) 352 continue; 353 354 __asm __volatile ("slbmte %0, %1" :: 355 "r"(slb[i].slbv), "r"(slb[i].slbe)); 356 } 357 #else 358 for (i = 0; i < 16; i++) 359 mtsrin(i << ADDR_SR_SHFT, kernel_pmap->pm_sr[i]); 360 #endif 361 362 /* 363 * Install page table 364 */ 365 366 __asm __volatile ("ptesync; mtsdr1 %0; isync" 367 :: "r"((uintptr_t)moea64_pteg_table 368 | (uintptr_t)(flsl(moea64_pteg_mask >> 11)))); 369 tlbia(); 370 } 371 372 static void 373 moea64_bootstrap_native(mmu_t mmup, vm_offset_t kernelstart, 374 vm_offset_t kernelend) 375 { 376 vm_size_t size; 377 vm_offset_t off; 378 vm_paddr_t pa; 379 register_t msr; 380 381 moea64_early_bootstrap(mmup, kernelstart, kernelend); 382 383 /* 384 * Allocate PTEG table. 385 */ 386 387 size = moea64_pteg_count * sizeof(struct lpteg); 388 CTR2(KTR_PMAP, "moea64_bootstrap: %d PTEGs, %d bytes", 389 moea64_pteg_count, size); 390 391 /* 392 * We now need to allocate memory. This memory, to be allocated, 393 * has to reside in a page table. The page table we are about to 394 * allocate. We don't have BAT. So drop to data real mode for a minute 395 * as a measure of last resort. We do this a couple times. 396 */ 397 398 moea64_pteg_table = (struct lpteg *)moea64_bootstrap_alloc(size, size); 399 DISABLE_TRANS(msr); 400 bzero((void *)moea64_pteg_table, moea64_pteg_count * sizeof(struct lpteg)); 401 ENABLE_TRANS(msr); 402 403 CTR1(KTR_PMAP, "moea64_bootstrap: PTEG table at %p", moea64_pteg_table); 404 405 moea64_mid_bootstrap(mmup, kernelstart, kernelend); 406 407 /* 408 * Add a mapping for the page table itself if there is no direct map. 409 */ 410 if (!hw_direct_map) { 411 size = moea64_pteg_count * sizeof(struct lpteg); 412 off = (vm_offset_t)(moea64_pteg_table); 413 DISABLE_TRANS(msr); 414 for (pa = off; pa < off + size; pa += PAGE_SIZE) 415 pmap_kenter(pa, pa); 416 ENABLE_TRANS(msr); 417 } 418 419 /* Bring up virtual memory */ 420 moea64_late_bootstrap(mmup, kernelstart, kernelend); 421 } 422 423 static void 424 tlbia(void) 425 { 426 vm_offset_t i; 427 #ifndef __powerpc64__ 428 register_t msr, scratch; 429 #endif 430 431 TLBSYNC(); 432 433 for (i = 0; i < 0xFF000; i += 0x00001000) { 434 #ifdef __powerpc64__ 435 __asm __volatile("tlbiel %0" :: "r"(i)); 436 #else 437 __asm __volatile("\ 438 mfmsr %0; \ 439 mr %1, %0; \ 440 insrdi %1,%3,1,0; \ 441 mtmsrd %1; \ 442 isync; \ 443 \ 444 tlbiel %2; \ 445 \ 446 mtmsrd %0; \ 447 isync;" 448 : "=r"(msr), "=r"(scratch) : "r"(i), "r"(1)); 449 #endif 450 } 451 452 EIEIO(); 453 TLBSYNC(); 454 } 455 456 static uintptr_t 457 moea64_pvo_to_pte_native(mmu_t mmu, const struct pvo_entry *pvo) 458 { 459 struct lpte *pt; 460 int pteidx, ptegidx; 461 uint64_t vsid; 462 463 /* If the PTEG index is not set, then there is no page table entry */ 464 if (!PVO_PTEGIDX_ISSET(pvo)) 465 return (-1); 466 467 /* 468 * Calculate the ptegidx 469 */ 470 vsid = PVO_VSID(pvo); 471 ptegidx = va_to_pteg(vsid, PVO_VADDR(pvo), 472 pvo->pvo_vaddr & PVO_LARGE); 473 474 /* 475 * We can find the actual pte entry without searching by grabbing 476 * the PTEG index from 3 unused bits in pvo_vaddr and by 477 * noticing the HID bit. 478 */ 479 if (pvo->pvo_pte.lpte.pte_hi & LPTE_HID) 480 ptegidx ^= moea64_pteg_mask; 481 482 pteidx = (ptegidx << 3) | PVO_PTEGIDX_GET(pvo); 483 484 if ((pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) && 485 !PVO_PTEGIDX_ISSET(pvo)) { 486 panic("moea64_pvo_to_pte: pvo %p has valid pte in pvo but no " 487 "valid pte index", pvo); 488 } 489 490 if ((pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) == 0 && 491 PVO_PTEGIDX_ISSET(pvo)) { 492 panic("moea64_pvo_to_pte: pvo %p has valid pte index in pvo " 493 "pvo but no valid pte", pvo); 494 } 495 496 pt = &moea64_pteg_table[pteidx >> 3].pt[pteidx & 7]; 497 if ((pt->pte_hi ^ (pvo->pvo_pte.lpte.pte_hi & ~LPTE_VALID)) == 498 LPTE_VALID) { 499 if ((pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) == 0) { 500 panic("moea64_pvo_to_pte: pvo %p has valid pte in " 501 "moea64_pteg_table %p but invalid in pvo", pvo, pt); 502 } 503 504 if (((pt->pte_lo ^ pvo->pvo_pte.lpte.pte_lo) & 505 ~(LPTE_M|LPTE_CHG|LPTE_REF)) != 0) { 506 panic("moea64_pvo_to_pte: pvo %p pte does not match " 507 "pte %p in moea64_pteg_table difference is %#x", 508 pvo, pt, 509 (uint32_t)(pt->pte_lo ^ pvo->pvo_pte.lpte.pte_lo)); 510 } 511 512 return ((uintptr_t)pt); 513 } 514 515 if (pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) { 516 panic("moea64_pvo_to_pte: pvo %p has invalid pte %p in " 517 "moea64_pteg_table but valid in pvo", pvo, pt); 518 } 519 520 return (-1); 521 } 522 523 static __inline int 524 moea64_pte_spillable_ident(u_int ptegidx) 525 { 526 struct lpte *pt; 527 int i, j, k; 528 529 /* Start at a random slot */ 530 i = mftb() % 8; 531 k = -1; 532 for (j = 0; j < 8; j++) { 533 pt = &moea64_pteg_table[ptegidx].pt[(i + j) % 8]; 534 if (pt->pte_hi & (LPTE_LOCKED | LPTE_WIRED)) 535 continue; 536 537 /* This is a candidate, so remember it */ 538 k = (i + j) % 8; 539 540 /* Try to get a page that has not been used lately */ 541 if (!(pt->pte_lo & LPTE_REF)) 542 return (k); 543 } 544 545 return (k); 546 } 547 548 static int 549 moea64_pte_insert_native(mmu_t mmu, u_int ptegidx, struct lpte *pvo_pt) 550 { 551 struct lpte *pt; 552 struct pvo_entry *pvo; 553 u_int pteg_bktidx; 554 int i; 555 556 /* 557 * First try primary hash. 558 */ 559 pteg_bktidx = ptegidx; 560 for (pt = moea64_pteg_table[pteg_bktidx].pt, i = 0; i < 8; i++, pt++) { 561 if ((pt->pte_hi & (LPTE_VALID | LPTE_LOCKED)) == 0) { 562 pvo_pt->pte_hi &= ~LPTE_HID; 563 moea64_pte_set_native(pt, pvo_pt); 564 return (i); 565 } 566 } 567 568 /* 569 * Now try secondary hash. 570 */ 571 pteg_bktidx ^= moea64_pteg_mask; 572 for (pt = moea64_pteg_table[pteg_bktidx].pt, i = 0; i < 8; i++, pt++) { 573 if ((pt->pte_hi & (LPTE_VALID | LPTE_LOCKED)) == 0) { 574 pvo_pt->pte_hi |= LPTE_HID; 575 moea64_pte_set_native(pt, pvo_pt); 576 return (i); 577 } 578 } 579 580 /* 581 * Out of luck. Find a PTE to sacrifice. 582 */ 583 pteg_bktidx = ptegidx; 584 i = moea64_pte_spillable_ident(pteg_bktidx); 585 if (i < 0) { 586 pteg_bktidx ^= moea64_pteg_mask; 587 i = moea64_pte_spillable_ident(pteg_bktidx); 588 } 589 590 if (i < 0) { 591 /* No freeable slots in either PTEG? We're hosed. */ 592 panic("moea64_pte_insert: overflow"); 593 return (-1); 594 } 595 596 if (pteg_bktidx == ptegidx) 597 pvo_pt->pte_hi &= ~LPTE_HID; 598 else 599 pvo_pt->pte_hi |= LPTE_HID; 600 601 /* 602 * Synchronize the sacrifice PTE with its PVO, then mark both 603 * invalid. The PVO will be reused when/if the VM system comes 604 * here after a fault. 605 */ 606 pt = &moea64_pteg_table[pteg_bktidx].pt[i]; 607 608 if (pt->pte_hi & LPTE_HID) 609 pteg_bktidx ^= moea64_pteg_mask; /* PTEs indexed by primary */ 610 611 LIST_FOREACH(pvo, &moea64_pvo_table[pteg_bktidx], pvo_olink) { 612 if (pvo->pvo_pte.lpte.pte_hi == pt->pte_hi) { 613 KASSERT(pvo->pvo_pte.lpte.pte_hi & LPTE_VALID, 614 ("Invalid PVO for valid PTE!")); 615 moea64_pte_unset_native(mmu, (uintptr_t)pt, 616 &pvo->pvo_pte.lpte, pvo->pvo_vpn); 617 PVO_PTEGIDX_CLR(pvo); 618 moea64_pte_overflow++; 619 break; 620 } 621 } 622 623 KASSERT(pvo->pvo_pte.lpte.pte_hi == pt->pte_hi, 624 ("Unable to find PVO for spilled PTE")); 625 626 /* 627 * Set the new PTE. 628 */ 629 moea64_pte_set_native(pt, pvo_pt); 630 631 return (i); 632 } 633 634