xref: /freebsd/sys/powerpc/aim/moea64_native.c (revision 7e00348e7605b9906601438008341ffc37c00e2c)
1 /*-
2  * Copyright (c) 2001 The NetBSD Foundation, Inc.
3  * All rights reserved.
4  *
5  * This code is derived from software contributed to The NetBSD Foundation
6  * by Matt Thomas <matt@3am-software.com> of Allegro Networks, Inc.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
18  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
19  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
21  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27  * POSSIBILITY OF SUCH DAMAGE.
28  */
29 /*-
30  * Copyright (C) 1995, 1996 Wolfgang Solfrank.
31  * Copyright (C) 1995, 1996 TooLs GmbH.
32  * All rights reserved.
33  *
34  * Redistribution and use in source and binary forms, with or without
35  * modification, are permitted provided that the following conditions
36  * are met:
37  * 1. Redistributions of source code must retain the above copyright
38  *    notice, this list of conditions and the following disclaimer.
39  * 2. Redistributions in binary form must reproduce the above copyright
40  *    notice, this list of conditions and the following disclaimer in the
41  *    documentation and/or other materials provided with the distribution.
42  * 3. All advertising materials mentioning features or use of this software
43  *    must display the following acknowledgement:
44  *	This product includes software developed by TooLs GmbH.
45  * 4. The name of TooLs GmbH may not be used to endorse or promote products
46  *    derived from this software without specific prior written permission.
47  *
48  * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
49  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
50  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
51  * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
52  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
53  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
54  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
55  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
56  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
57  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
58  *
59  * $NetBSD: pmap.c,v 1.28 2000/03/26 20:42:36 kleink Exp $
60  */
61 /*-
62  * Copyright (C) 2001 Benno Rice.
63  * All rights reserved.
64  *
65  * Redistribution and use in source and binary forms, with or without
66  * modification, are permitted provided that the following conditions
67  * are met:
68  * 1. Redistributions of source code must retain the above copyright
69  *    notice, this list of conditions and the following disclaimer.
70  * 2. Redistributions in binary form must reproduce the above copyright
71  *    notice, this list of conditions and the following disclaimer in the
72  *    documentation and/or other materials provided with the distribution.
73  *
74  * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR
75  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
76  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
77  * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
78  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
79  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
80  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
81  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
82  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
83  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
84  */
85 
86 #include <sys/cdefs.h>
87 __FBSDID("$FreeBSD$");
88 
89 /*
90  * Native 64-bit page table operations for running without a hypervisor.
91  */
92 
93 #include <sys/param.h>
94 #include <sys/kernel.h>
95 #include <sys/ktr.h>
96 #include <sys/lock.h>
97 #include <sys/mutex.h>
98 #include <sys/proc.h>
99 #include <sys/sched.h>
100 #include <sys/sysctl.h>
101 #include <sys/systm.h>
102 
103 #include <sys/kdb.h>
104 
105 #include <vm/vm.h>
106 #include <vm/vm_param.h>
107 #include <vm/vm_kern.h>
108 #include <vm/vm_page.h>
109 #include <vm/vm_map.h>
110 #include <vm/vm_object.h>
111 #include <vm/vm_extern.h>
112 #include <vm/vm_pageout.h>
113 
114 #include <machine/md_var.h>
115 #include <machine/mmuvar.h>
116 
117 #include "mmu_oea64.h"
118 #include "mmu_if.h"
119 #include "moea64_if.h"
120 
121 #define	PTESYNC()	__asm __volatile("ptesync");
122 #define	TLBSYNC()	__asm __volatile("tlbsync; ptesync");
123 #define	SYNC()		__asm __volatile("sync");
124 #define	EIEIO()		__asm __volatile("eieio");
125 
126 #define	VSID_HASH_MASK	0x0000007fffffffffULL
127 
128 static __inline void
129 TLBIE(uint64_t vpn) {
130 #ifndef __powerpc64__
131 	register_t vpn_hi, vpn_lo;
132 	register_t msr;
133 	register_t scratch, intr;
134 #endif
135 
136 	static volatile u_int tlbie_lock = 0;
137 
138 	vpn <<= ADDR_PIDX_SHFT;
139 	vpn &= ~(0xffffULL << 48);
140 
141 	/* Hobo spinlock: we need stronger guarantees than mutexes provide */
142 	while (!atomic_cmpset_int(&tlbie_lock, 0, 1));
143 	isync(); /* Flush instruction queue once lock acquired */
144 
145 #ifdef __powerpc64__
146 	__asm __volatile("tlbie %0" :: "r"(vpn) : "memory");
147 	__asm __volatile("eieio; tlbsync; ptesync" ::: "memory");
148 #else
149 	vpn_hi = (uint32_t)(vpn >> 32);
150 	vpn_lo = (uint32_t)vpn;
151 
152 	intr = intr_disable();
153 	__asm __volatile("\
154 	    mfmsr %0; \
155 	    mr %1, %0; \
156 	    insrdi %1,%5,1,0; \
157 	    mtmsrd %1; isync; \
158 	    \
159 	    sld %1,%2,%4; \
160 	    or %1,%1,%3; \
161 	    tlbie %1; \
162 	    \
163 	    mtmsrd %0; isync; \
164 	    eieio; \
165 	    tlbsync; \
166 	    ptesync;"
167 	: "=r"(msr), "=r"(scratch) : "r"(vpn_hi), "r"(vpn_lo), "r"(32), "r"(1)
168 	    : "memory");
169 	intr_restore(intr);
170 #endif
171 
172 	/* No barriers or special ops -- taken care of by ptesync above */
173 	tlbie_lock = 0;
174 }
175 
176 #define DISABLE_TRANS(msr)	msr = mfmsr(); mtmsr(msr & ~PSL_DR)
177 #define ENABLE_TRANS(msr)	mtmsr(msr)
178 
179 /*
180  * PTEG data.
181  */
182 static struct	lpteg *moea64_pteg_table;
183 
184 /*
185  * PTE calls.
186  */
187 static int	moea64_pte_insert_native(mmu_t, u_int, struct lpte *);
188 static uintptr_t moea64_pvo_to_pte_native(mmu_t, const struct pvo_entry *);
189 static void	moea64_pte_synch_native(mmu_t, uintptr_t pt,
190 		    struct lpte *pvo_pt);
191 static void	moea64_pte_clear_native(mmu_t, uintptr_t pt,
192 		    struct lpte *pvo_pt, uint64_t vpn, uint64_t ptebit);
193 static void	moea64_pte_change_native(mmu_t, uintptr_t pt,
194 		    struct lpte *pvo_pt, uint64_t vpn);
195 static void	moea64_pte_unset_native(mmu_t mmu, uintptr_t pt,
196 		    struct lpte *pvo_pt, uint64_t vpn);
197 
198 /*
199  * Utility routines.
200  */
201 static void		moea64_bootstrap_native(mmu_t mmup,
202 			    vm_offset_t kernelstart, vm_offset_t kernelend);
203 static void		moea64_cpu_bootstrap_native(mmu_t, int ap);
204 static void		tlbia(void);
205 
206 static mmu_method_t moea64_native_methods[] = {
207 	/* Internal interfaces */
208 	MMUMETHOD(mmu_bootstrap,	moea64_bootstrap_native),
209 	MMUMETHOD(mmu_cpu_bootstrap,	moea64_cpu_bootstrap_native),
210 
211 	MMUMETHOD(moea64_pte_synch,	moea64_pte_synch_native),
212 	MMUMETHOD(moea64_pte_clear,	moea64_pte_clear_native),
213 	MMUMETHOD(moea64_pte_unset,	moea64_pte_unset_native),
214 	MMUMETHOD(moea64_pte_change,	moea64_pte_change_native),
215 	MMUMETHOD(moea64_pte_insert,	moea64_pte_insert_native),
216 	MMUMETHOD(moea64_pvo_to_pte,	moea64_pvo_to_pte_native),
217 
218 	{ 0, 0 }
219 };
220 
221 MMU_DEF_INHERIT(oea64_mmu_native, MMU_TYPE_G5, moea64_native_methods,
222     0, oea64_mmu);
223 
224 static __inline u_int
225 va_to_pteg(uint64_t vsid, vm_offset_t addr, int large)
226 {
227 	uint64_t hash;
228 	int shift;
229 
230 	shift = large ? moea64_large_page_shift : ADDR_PIDX_SHFT;
231 	hash = (vsid & VSID_HASH_MASK) ^ (((uint64_t)addr & ADDR_PIDX) >>
232 	    shift);
233 	return (hash & moea64_pteg_mask);
234 }
235 
236 static void
237 moea64_pte_synch_native(mmu_t mmu, uintptr_t pt_cookie, struct lpte *pvo_pt)
238 {
239 	struct lpte *pt = (struct lpte *)pt_cookie;
240 
241 	pvo_pt->pte_lo |= pt->pte_lo & (LPTE_REF | LPTE_CHG);
242 }
243 
244 static void
245 moea64_pte_clear_native(mmu_t mmu, uintptr_t pt_cookie, struct lpte *pvo_pt,
246     uint64_t vpn, uint64_t ptebit)
247 {
248 	struct lpte *pt = (struct lpte *)pt_cookie;
249 
250 	/*
251 	 * As shown in Section 7.6.3.2.3
252 	 */
253 	pt->pte_lo &= ~ptebit;
254 	critical_enter();
255 	TLBIE(vpn);
256 	critical_exit();
257 }
258 
259 static void
260 moea64_pte_set_native(struct lpte *pt, struct lpte *pvo_pt)
261 {
262 
263 	pvo_pt->pte_hi |= LPTE_VALID;
264 
265 	/*
266 	 * Update the PTE as defined in section 7.6.3.1.
267 	 * Note that the REF/CHG bits are from pvo_pt and thus should have
268 	 * been saved so this routine can restore them (if desired).
269 	 */
270 	pt->pte_lo = pvo_pt->pte_lo;
271 	EIEIO();
272 	pt->pte_hi = pvo_pt->pte_hi;
273 	PTESYNC();
274 
275 	/* Keep statistics for unlocked pages */
276 	if (!(pvo_pt->pte_hi & LPTE_LOCKED))
277 		moea64_pte_valid++;
278 }
279 
280 static void
281 moea64_pte_unset_native(mmu_t mmu, uintptr_t pt_cookie, struct lpte *pvo_pt,
282     uint64_t vpn)
283 {
284 	struct lpte *pt = (struct lpte *)pt_cookie;
285 
286 	/*
287 	 * Invalidate the pte.
288 	 */
289 	isync();
290 	critical_enter();
291 	pvo_pt->pte_hi &= ~LPTE_VALID;
292 	pt->pte_hi &= ~LPTE_VALID;
293 	PTESYNC();
294 	TLBIE(vpn);
295 	critical_exit();
296 
297 	/*
298 	 * Save the reg & chg bits.
299 	 */
300 	moea64_pte_synch_native(mmu, pt_cookie, pvo_pt);
301 
302 	/* Keep statistics for unlocked pages */
303 	if (!(pvo_pt->pte_hi & LPTE_LOCKED))
304 		moea64_pte_valid--;
305 }
306 
307 static void
308 moea64_pte_change_native(mmu_t mmu, uintptr_t pt, struct lpte *pvo_pt,
309     uint64_t vpn)
310 {
311 
312 	/*
313 	 * Invalidate the PTE
314 	 */
315 	moea64_pte_unset_native(mmu, pt, pvo_pt, vpn);
316 	moea64_pte_set_native((struct lpte *)pt, pvo_pt);
317 }
318 
319 static void
320 moea64_cpu_bootstrap_native(mmu_t mmup, int ap)
321 {
322 	int i = 0;
323 	#ifdef __powerpc64__
324 	struct slb *slb = PCPU_GET(slb);
325 	register_t seg0;
326 	#endif
327 
328 	/*
329 	 * Initialize segment registers and MMU
330 	 */
331 
332 	mtmsr(mfmsr() & ~PSL_DR & ~PSL_IR);
333 
334 	/*
335 	 * Install kernel SLB entries
336 	 */
337 
338 	#ifdef __powerpc64__
339 		__asm __volatile ("slbia");
340 		__asm __volatile ("slbmfee %0,%1; slbie %0;" : "=r"(seg0) :
341 		    "r"(0));
342 
343 		for (i = 0; i < 64; i++) {
344 			if (!(slb[i].slbe & SLBE_VALID))
345 				continue;
346 
347 			__asm __volatile ("slbmte %0, %1" ::
348 			    "r"(slb[i].slbv), "r"(slb[i].slbe));
349 		}
350 	#else
351 		for (i = 0; i < 16; i++)
352 			mtsrin(i << ADDR_SR_SHFT, kernel_pmap->pm_sr[i]);
353 	#endif
354 
355 	/*
356 	 * Install page table
357 	 */
358 
359 	__asm __volatile ("ptesync; mtsdr1 %0; isync"
360 	    :: "r"((uintptr_t)moea64_pteg_table
361 		     | (uintptr_t)(flsl(moea64_pteg_mask >> 11))));
362 	tlbia();
363 }
364 
365 static void
366 moea64_bootstrap_native(mmu_t mmup, vm_offset_t kernelstart,
367     vm_offset_t kernelend)
368 {
369 	vm_size_t	size;
370 	vm_offset_t	off;
371 	vm_paddr_t	pa;
372 	register_t	msr;
373 
374 	moea64_early_bootstrap(mmup, kernelstart, kernelend);
375 
376 	/*
377 	 * Allocate PTEG table.
378 	 */
379 
380 	size = moea64_pteg_count * sizeof(struct lpteg);
381 	CTR2(KTR_PMAP, "moea64_bootstrap: %d PTEGs, %d bytes",
382 	    moea64_pteg_count, size);
383 
384 	/*
385 	 * We now need to allocate memory. This memory, to be allocated,
386 	 * has to reside in a page table. The page table we are about to
387 	 * allocate. We don't have BAT. So drop to data real mode for a minute
388 	 * as a measure of last resort. We do this a couple times.
389 	 */
390 
391 	moea64_pteg_table = (struct lpteg *)moea64_bootstrap_alloc(size, size);
392 	DISABLE_TRANS(msr);
393 	bzero((void *)moea64_pteg_table, moea64_pteg_count * sizeof(struct lpteg));
394 	ENABLE_TRANS(msr);
395 
396 	CTR1(KTR_PMAP, "moea64_bootstrap: PTEG table at %p", moea64_pteg_table);
397 
398 	moea64_mid_bootstrap(mmup, kernelstart, kernelend);
399 
400 	/*
401 	 * Add a mapping for the page table itself if there is no direct map.
402 	 */
403 	if (!hw_direct_map) {
404 		size = moea64_pteg_count * sizeof(struct lpteg);
405 		off = (vm_offset_t)(moea64_pteg_table);
406 		DISABLE_TRANS(msr);
407 		for (pa = off; pa < off + size; pa += PAGE_SIZE)
408 			pmap_kenter(pa, pa);
409 		ENABLE_TRANS(msr);
410 	}
411 
412 	/* Bring up virtual memory */
413 	moea64_late_bootstrap(mmup, kernelstart, kernelend);
414 }
415 
416 static void
417 tlbia(void)
418 {
419 	vm_offset_t i;
420 	#ifndef __powerpc64__
421 	register_t msr, scratch;
422 	#endif
423 
424 	TLBSYNC();
425 
426 	for (i = 0; i < 0xFF000; i += 0x00001000) {
427 		#ifdef __powerpc64__
428 		__asm __volatile("tlbiel %0" :: "r"(i));
429 		#else
430 		__asm __volatile("\
431 		    mfmsr %0; \
432 		    mr %1, %0; \
433 		    insrdi %1,%3,1,0; \
434 		    mtmsrd %1; \
435 		    isync; \
436 		    \
437 		    tlbiel %2; \
438 		    \
439 		    mtmsrd %0; \
440 		    isync;"
441 		: "=r"(msr), "=r"(scratch) : "r"(i), "r"(1));
442 		#endif
443 	}
444 
445 	EIEIO();
446 	TLBSYNC();
447 }
448 
449 static uintptr_t
450 moea64_pvo_to_pte_native(mmu_t mmu, const struct pvo_entry *pvo)
451 {
452 	struct lpte 	*pt;
453 	int		pteidx, ptegidx;
454 	uint64_t	vsid;
455 
456 	/* If the PTEG index is not set, then there is no page table entry */
457 	if (!PVO_PTEGIDX_ISSET(pvo))
458 		return (-1);
459 
460 	/*
461 	 * Calculate the ptegidx
462 	 */
463 	vsid = PVO_VSID(pvo);
464 	ptegidx = va_to_pteg(vsid, PVO_VADDR(pvo),
465 	    pvo->pvo_vaddr & PVO_LARGE);
466 
467 	/*
468 	 * We can find the actual pte entry without searching by grabbing
469 	 * the PTEG index from 3 unused bits in pvo_vaddr and by
470 	 * noticing the HID bit.
471 	 */
472 	if (pvo->pvo_pte.lpte.pte_hi & LPTE_HID)
473 		ptegidx ^= moea64_pteg_mask;
474 
475 	pteidx = (ptegidx << 3) | PVO_PTEGIDX_GET(pvo);
476 
477 	if ((pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) &&
478 	    !PVO_PTEGIDX_ISSET(pvo)) {
479 		panic("moea64_pvo_to_pte: pvo %p has valid pte in pvo but no "
480 		    "valid pte index", pvo);
481 	}
482 
483 	if ((pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) == 0 &&
484 	    PVO_PTEGIDX_ISSET(pvo)) {
485 		panic("moea64_pvo_to_pte: pvo %p has valid pte index in pvo "
486 		    "pvo but no valid pte", pvo);
487 	}
488 
489 	pt = &moea64_pteg_table[pteidx >> 3].pt[pteidx & 7];
490 	if ((pt->pte_hi ^ (pvo->pvo_pte.lpte.pte_hi & ~LPTE_VALID)) ==
491 	    LPTE_VALID) {
492 		if ((pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) == 0) {
493 			panic("moea64_pvo_to_pte: pvo %p has valid pte in "
494 			    "moea64_pteg_table %p but invalid in pvo", pvo, pt);
495 		}
496 
497 		if (((pt->pte_lo ^ pvo->pvo_pte.lpte.pte_lo) &
498 		    ~(LPTE_M|LPTE_CHG|LPTE_REF)) != 0) {
499 			panic("moea64_pvo_to_pte: pvo %p pte does not match "
500 			    "pte %p in moea64_pteg_table difference is %#x",
501 			    pvo, pt,
502 			    (uint32_t)(pt->pte_lo ^ pvo->pvo_pte.lpte.pte_lo));
503 		}
504 
505 		return ((uintptr_t)pt);
506 	}
507 
508 	if (pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) {
509 		panic("moea64_pvo_to_pte: pvo %p has invalid pte %p in "
510 		    "moea64_pteg_table but valid in pvo", pvo, pt);
511 	}
512 
513 	return (-1);
514 }
515 
516 static __inline int
517 moea64_pte_spillable_ident(u_int ptegidx)
518 {
519 	struct	lpte *pt;
520 	int	i, j, k;
521 
522 	/* Start at a random slot */
523 	i = mftb() % 8;
524 	k = -1;
525 	for (j = 0; j < 8; j++) {
526 		pt = &moea64_pteg_table[ptegidx].pt[(i + j) % 8];
527 		if (pt->pte_hi & (LPTE_LOCKED | LPTE_WIRED))
528 			continue;
529 
530 		/* This is a candidate, so remember it */
531 		k = (i + j) % 8;
532 
533 		/* Try to get a page that has not been used lately */
534 		if (!(pt->pte_lo & LPTE_REF))
535 			return (k);
536 	}
537 
538 	return (k);
539 }
540 
541 static int
542 moea64_pte_insert_native(mmu_t mmu, u_int ptegidx, struct lpte *pvo_pt)
543 {
544 	struct	lpte *pt;
545 	struct	pvo_entry *pvo;
546 	u_int	pteg_bktidx;
547 	int	i;
548 
549 	/*
550 	 * First try primary hash.
551 	 */
552 	pteg_bktidx = ptegidx;
553 	for (pt = moea64_pteg_table[pteg_bktidx].pt, i = 0; i < 8; i++, pt++) {
554 		if ((pt->pte_hi & (LPTE_VALID | LPTE_LOCKED)) == 0) {
555 			pvo_pt->pte_hi &= ~LPTE_HID;
556 			moea64_pte_set_native(pt, pvo_pt);
557 			return (i);
558 		}
559 	}
560 
561 	/*
562 	 * Now try secondary hash.
563 	 */
564 	pteg_bktidx ^= moea64_pteg_mask;
565 	for (pt = moea64_pteg_table[pteg_bktidx].pt, i = 0; i < 8; i++, pt++) {
566 		if ((pt->pte_hi & (LPTE_VALID | LPTE_LOCKED)) == 0) {
567 			pvo_pt->pte_hi |= LPTE_HID;
568 			moea64_pte_set_native(pt, pvo_pt);
569 			return (i);
570 		}
571 	}
572 
573 	/*
574 	 * Out of luck. Find a PTE to sacrifice.
575 	 */
576 	pteg_bktidx = ptegidx;
577 	i = moea64_pte_spillable_ident(pteg_bktidx);
578 	if (i < 0) {
579 		pteg_bktidx ^= moea64_pteg_mask;
580 		i = moea64_pte_spillable_ident(pteg_bktidx);
581 	}
582 
583 	if (i < 0) {
584 		/* No freeable slots in either PTEG? We're hosed. */
585 		panic("moea64_pte_insert: overflow");
586 		return (-1);
587 	}
588 
589 	if (pteg_bktidx == ptegidx)
590 		pvo_pt->pte_hi &= ~LPTE_HID;
591 	else
592 		pvo_pt->pte_hi |= LPTE_HID;
593 
594 	/*
595 	 * Synchronize the sacrifice PTE with its PVO, then mark both
596 	 * invalid. The PVO will be reused when/if the VM system comes
597 	 * here after a fault.
598 	 */
599 	pt = &moea64_pteg_table[pteg_bktidx].pt[i];
600 
601 	if (pt->pte_hi & LPTE_HID)
602 		pteg_bktidx ^= moea64_pteg_mask; /* PTEs indexed by primary */
603 
604 	LIST_FOREACH(pvo, &moea64_pvo_table[pteg_bktidx], pvo_olink) {
605 		if (pvo->pvo_pte.lpte.pte_hi == pt->pte_hi) {
606 			KASSERT(pvo->pvo_pte.lpte.pte_hi & LPTE_VALID,
607 			    ("Invalid PVO for valid PTE!"));
608 			moea64_pte_unset_native(mmu, (uintptr_t)pt,
609 			    &pvo->pvo_pte.lpte, pvo->pvo_vpn);
610 			PVO_PTEGIDX_CLR(pvo);
611 			moea64_pte_overflow++;
612 			break;
613 		}
614 	}
615 
616 	KASSERT(pvo->pvo_pte.lpte.pte_hi == pt->pte_hi,
617 	   ("Unable to find PVO for spilled PTE"));
618 
619 	/*
620 	 * Set the new PTE.
621 	 */
622 	moea64_pte_set_native(pt, pvo_pt);
623 
624 	return (i);
625 }
626 
627