xref: /freebsd/sys/powerpc/pseries/mmu_phyp.c (revision fe75646a0234a261c0013bf1840fdac4acaf0cec)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (C) 2010 Andreas Tobler
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19  * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
20  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
21  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
22  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
23  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
24  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
25  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 #include <sys/param.h>
29 #include <sys/kernel.h>
30 #include <sys/ktr.h>
31 #include <sys/lock.h>
32 #include <sys/rmlock.h>
33 #include <sys/mutex.h>
34 #include <sys/proc.h>
35 #include <sys/sysctl.h>
36 #include <sys/systm.h>
37 #include <sys/vmmeter.h>
38 
39 #include <dev/ofw/openfirm.h>
40 #include <machine/ofw_machdep.h>
41 
42 #include <vm/vm.h>
43 #include <vm/vm_param.h>
44 #include <vm/vm_kern.h>
45 #include <vm/vm_page.h>
46 #include <vm/vm_map.h>
47 #include <vm/vm_object.h>
48 #include <vm/vm_extern.h>
49 #include <vm/vm_pageout.h>
50 #include <vm/uma.h>
51 
52 #include <powerpc/aim/mmu_oea64.h>
53 
54 #include "phyp-hvcall.h"
55 
56 #define MMU_PHYP_DEBUG 0
57 #define MMU_PHYP_ID "mmu_phyp: "
58 #if MMU_PHYP_DEBUG
59 #define dprintf(fmt, ...) printf(fmt, ## __VA_ARGS__)
60 #define dprintf0(fmt, ...) dprintf(MMU_PHYP_ID fmt, ## __VA_ARGS__)
61 #else
62 #define dprintf(fmt, args...) do { ; } while(0)
63 #define dprintf0(fmt, args...) do { ; } while(0)
64 #endif
65 
66 static struct rmlock mphyp_eviction_lock;
67 
68 /*
69  * Kernel MMU interface
70  */
71 
72 static void	mphyp_install(void);
73 static void	mphyp_bootstrap(vm_offset_t kernelstart,
74 		    vm_offset_t kernelend);
75 static void	mphyp_cpu_bootstrap(int ap);
76 static void	*mphyp_dump_pmap(void *ctx, void *buf,
77 		    u_long *nbytes);
78 static int64_t	mphyp_pte_synch(struct pvo_entry *pvo);
79 static int64_t	mphyp_pte_clear(struct pvo_entry *pvo, uint64_t ptebit);
80 static int64_t	mphyp_pte_unset(struct pvo_entry *pvo);
81 static int64_t	mphyp_pte_insert(struct pvo_entry *pvo);
82 static int64_t	mphyp_pte_unset_sp(struct pvo_entry *pvo);
83 static int64_t	mphyp_pte_insert_sp(struct pvo_entry *pvo);
84 static int64_t	mphyp_pte_replace_sp(struct pvo_entry *pvo);
85 
86 static struct pmap_funcs mphyp_methods = {
87 	.install =           mphyp_install,
88         .bootstrap =         mphyp_bootstrap,
89         .cpu_bootstrap =     mphyp_cpu_bootstrap,
90         .dumpsys_dump_pmap = mphyp_dump_pmap,
91 };
92 
93 static struct moea64_funcs mmu_phyp_funcs = {
94 	.pte_synch =      mphyp_pte_synch,
95         .pte_clear =      mphyp_pte_clear,
96         .pte_unset =      mphyp_pte_unset,
97         .pte_insert =     mphyp_pte_insert,
98         .pte_unset_sp =   mphyp_pte_unset_sp,
99         .pte_insert_sp =  mphyp_pte_insert_sp,
100         .pte_replace_sp = mphyp_pte_replace_sp,
101 };
102 
103 MMU_DEF_INHERIT(pseries_mmu, "mmu_phyp", mphyp_methods, oea64_mmu);
104 
105 static int brokenkvm = 0;
106 static uint64_t final_pteg_count = 0;
107 
108 static void
109 print_kvm_bug_warning(void *data)
110 {
111 
112 	if (brokenkvm)
113 		printf("WARNING: Running on a broken hypervisor that does "
114 		    "not support mandatory H_CLEAR_MOD and H_CLEAR_REF "
115 		    "hypercalls. Performance will be suboptimal.\n");
116 }
117 
118 SYSINIT(kvmbugwarn1, SI_SUB_COPYRIGHT, SI_ORDER_THIRD + 1,
119     print_kvm_bug_warning, NULL);
120 SYSINIT(kvmbugwarn2, SI_SUB_LAST, SI_ORDER_THIRD + 1, print_kvm_bug_warning,
121     NULL);
122 
123 static void
124 mphyp_install(void)
125 {
126 	char buf[8];
127 	uint32_t prop[2];
128 	uint32_t nptlp, shift = 0, slb_encoding = 0;
129 	uint32_t lp_size, lp_encoding;
130 	phandle_t dev, node, root;
131 	int idx, len, res;
132 	bool has_lp;
133 
134 	root = OF_peer(0);
135 
136 	dev = OF_child(root);
137 	while (dev != 0) {
138 		res = OF_getprop(dev, "name", buf, sizeof(buf));
139 		if (res > 0 && strcmp(buf, "cpus") == 0)
140 			break;
141 		dev = OF_peer(dev);
142 	}
143 
144 	node = OF_child(dev);
145 
146 	while (node != 0) {
147 		res = OF_getprop(node, "device_type", buf, sizeof(buf));
148 		if (res > 0 && strcmp(buf, "cpu") == 0)
149 			break;
150 		node = OF_peer(node);
151 	}
152 
153 	res = OF_getencprop(node, "ibm,pft-size", prop, sizeof(prop));
154 	if (res <= 0)
155 		panic("mmu_phyp: unknown PFT size");
156 	final_pteg_count = 1 << prop[1];
157 	res = OF_getencprop(node, "ibm,slb-size", prop, sizeof(prop[0]));
158 	if (res > 0)
159 		n_slbs = prop[0];
160 	dprintf0("slb-size=%i\n", n_slbs);
161 
162 	/*
163 	 * Scan the large page size property for PAPR compatible machines.
164 	 * See PAPR D.5 Changes to Section 5.1.4, 'CPU Node Properties'
165 	 * for the encoding of the property.
166 	 */
167 
168 	len = OF_getproplen(node, "ibm,segment-page-sizes");
169 	if (len > 0) {
170 		/*
171 		 * We have to use a variable length array on the stack
172 		 * since we have very limited stack space.
173 		 */
174 		pcell_t arr[len/sizeof(cell_t)];
175 		res = OF_getencprop(node, "ibm,segment-page-sizes", arr,
176 		    sizeof(arr));
177 		len /= 4;
178 		idx = 0;
179 		has_lp = false;
180 		while (len > 0) {
181 			shift = arr[idx];
182 			slb_encoding = arr[idx + 1];
183 			nptlp = arr[idx + 2];
184 
185 			dprintf0("Segment Page Size: "
186 			    "%uKB, slb_enc=0x%X: {size, encoding}[%u] =",
187 			    shift > 10? 1 << (shift-10) : 0,
188 			    slb_encoding, nptlp);
189 
190 			idx += 3;
191 			len -= 3;
192 			while (len > 0 && nptlp) {
193 				lp_size = arr[idx];
194 				lp_encoding = arr[idx+1];
195 
196 				dprintf(" {%uKB, 0x%X}",
197 				    lp_size > 10? 1 << (lp_size-10) : 0,
198 				    lp_encoding);
199 
200 				if (slb_encoding == SLBV_L && lp_encoding == 0)
201 					has_lp = true;
202 
203 				if (slb_encoding == SLB_PGSZ_4K_4K &&
204 				    lp_encoding == LP_4K_16M)
205 					moea64_has_lp_4k_16m = true;
206 
207 				idx += 2;
208 				len -= 2;
209 				nptlp--;
210 			}
211 			dprintf("\n");
212 			if (has_lp && moea64_has_lp_4k_16m)
213 				break;
214 		}
215 
216 		if (has_lp) {
217 			moea64_large_page_shift = shift;
218 			moea64_large_page_size = 1ULL << lp_size;
219 			moea64_large_page_mask = moea64_large_page_size - 1;
220 			hw_direct_map = 1;
221 			printf(MMU_PHYP_ID
222 			    "Support for hugepages of %uKB detected\n",
223 			    moea64_large_page_shift > 10?
224 				1 << (moea64_large_page_shift-10) : 0);
225 		} else {
226 			moea64_large_page_size = 0;
227 			moea64_large_page_shift = 0;
228 			moea64_large_page_mask = 0;
229 			hw_direct_map = 0;
230 			printf(MMU_PHYP_ID
231 			    "Support for hugepages not found\n");
232 		}
233 	}
234 
235 	moea64_ops = &mmu_phyp_funcs;
236 
237 	moea64_install();
238 }
239 
240 static void
241 mphyp_bootstrap(vm_offset_t kernelstart, vm_offset_t kernelend)
242 {
243 	struct lpte old;
244 	uint64_t vsid;
245 	int idx;
246 
247 	rm_init(&mphyp_eviction_lock, "pte eviction");
248 
249 	moea64_early_bootstrap(kernelstart, kernelend);
250 
251 	moea64_pteg_count = final_pteg_count / sizeof(struct lpteg);
252 
253 	/* Clear any old page table entries */
254 	for (idx = 0; idx < moea64_pteg_count*8; idx++) {
255 		phyp_pft_hcall(H_READ, 0, idx, 0, 0, &old.pte_hi,
256 		    &old.pte_lo, &old.pte_lo);
257 		vsid = (old.pte_hi << (ADDR_API_SHFT64 - ADDR_PIDX_SHFT)) >> 28;
258 		if (vsid == VSID_VRMA || vsid == 0 /* Older VRMA */)
259 			continue;
260 
261 		if (old.pte_hi & LPTE_VALID)
262 			phyp_hcall(H_REMOVE, 0, idx, 0);
263 	}
264 
265 	moea64_mid_bootstrap(kernelstart, kernelend);
266 	moea64_late_bootstrap(kernelstart, kernelend);
267 
268 	/* Test for broken versions of KVM that don't conform to the spec */
269 	if (phyp_hcall(H_CLEAR_MOD, 0, 0) == H_FUNCTION)
270 		brokenkvm = 1;
271 }
272 
273 static void
274 mphyp_cpu_bootstrap(int ap)
275 {
276 	struct slb *slb = PCPU_GET(aim.slb);
277 	register_t seg0;
278 	int i;
279 
280 	/*
281 	 * Install kernel SLB entries
282 	 */
283 
284         __asm __volatile ("slbia");
285         __asm __volatile ("slbmfee %0,%1; slbie %0;" : "=r"(seg0) : "r"(0));
286 	for (i = 0; i < 64; i++) {
287 		if (!(slb[i].slbe & SLBE_VALID))
288 			continue;
289 
290 		__asm __volatile ("slbmte %0, %1" ::
291 		    "r"(slb[i].slbv), "r"(slb[i].slbe));
292 	}
293 }
294 
295 static int64_t
296 mphyp_pte_synch(struct pvo_entry *pvo)
297 {
298 	struct lpte pte;
299 	uint64_t junk;
300 
301 	__asm __volatile("ptesync");
302 	phyp_pft_hcall(H_READ, 0, pvo->pvo_pte.slot, 0, 0, &pte.pte_hi,
303 	    &pte.pte_lo, &junk);
304 	if ((pte.pte_hi & LPTE_AVPN_MASK) !=
305 	    ((pvo->pvo_vpn >> (ADDR_API_SHFT64 - ADDR_PIDX_SHFT)) &
306 	    LPTE_AVPN_MASK))
307 		return (-1);
308 	if (!(pte.pte_hi & LPTE_VALID))
309 		return (-1);
310 
311 	return (pte.pte_lo & (LPTE_CHG | LPTE_REF));
312 }
313 
314 static int64_t
315 mphyp_pte_clear(struct pvo_entry *pvo, uint64_t ptebit)
316 {
317 	struct rm_priotracker track;
318 	int64_t refchg;
319 	uint64_t ptelo, junk;
320 	int err __diagused;
321 
322 	/*
323 	 * This involves two steps (synch and clear) so we need the entry
324 	 * not to change in the middle. We are protected against deliberate
325 	 * unset by virtue of holding the pmap lock. Protection against
326 	 * incidental unset (page table eviction) comes from holding the
327 	 * shared eviction lock.
328 	 */
329 	PMAP_LOCK_ASSERT(pvo->pvo_pmap, MA_OWNED);
330 	rm_rlock(&mphyp_eviction_lock, &track);
331 
332 	refchg = mphyp_pte_synch(pvo);
333 	if (refchg < 0) {
334 		rm_runlock(&mphyp_eviction_lock, &track);
335 		return (refchg);
336 	}
337 
338 	if (brokenkvm) {
339 		/*
340 		 * No way to clear either bit, which is total madness.
341 		 * Pessimistically claim that, once modified, it stays so
342 		 * forever and that it is never referenced.
343 		 */
344 		rm_runlock(&mphyp_eviction_lock, &track);
345 		return (refchg & ~LPTE_REF);
346 	}
347 
348 	if (ptebit & LPTE_CHG) {
349 		err = phyp_pft_hcall(H_CLEAR_MOD, 0, pvo->pvo_pte.slot, 0, 0,
350 		    &ptelo, &junk, &junk);
351 		KASSERT(err == H_SUCCESS,
352 		    ("Error clearing page change bit: %d", err));
353 		refchg |= (ptelo & LPTE_CHG);
354 	}
355 	if (ptebit & LPTE_REF) {
356 		err = phyp_pft_hcall(H_CLEAR_REF, 0, pvo->pvo_pte.slot, 0, 0,
357 		    &ptelo, &junk, &junk);
358 		KASSERT(err == H_SUCCESS,
359 		    ("Error clearing page reference bit: %d", err));
360 		refchg |= (ptelo & LPTE_REF);
361 	}
362 
363 	rm_runlock(&mphyp_eviction_lock, &track);
364 
365 	return (refchg);
366 }
367 
368 static int64_t
369 mphyp_pte_unset(struct pvo_entry *pvo)
370 {
371 	struct lpte pte;
372 	uint64_t junk;
373 	int err;
374 
375 	PMAP_LOCK_ASSERT(pvo->pvo_pmap, MA_OWNED);
376 
377 	moea64_pte_from_pvo(pvo, &pte);
378 
379 	err = phyp_pft_hcall(H_REMOVE, H_AVPN, pvo->pvo_pte.slot,
380 	    pte.pte_hi & LPTE_AVPN_MASK, 0, &pte.pte_hi, &pte.pte_lo,
381 	    &junk);
382 	KASSERT(err == H_SUCCESS || err == H_NOT_FOUND,
383 	    ("Error removing page: %d", err));
384 
385 	if (err == H_NOT_FOUND) {
386 		STAT_MOEA64(moea64_pte_overflow--);
387 		return (-1);
388 	}
389 
390 	return (pte.pte_lo & (LPTE_REF | LPTE_CHG));
391 }
392 
393 static uintptr_t
394 mphyp_pte_spillable_ident(uintptr_t ptegbase, struct lpte *to_evict)
395 {
396 	uint64_t slot, junk, k;
397 	struct lpte pt;
398 	int     i, j;
399 
400 	/* Start at a random slot */
401 	i = mftb() % 8;
402 	k = -1;
403 	for (j = 0; j < 8; j++) {
404 		slot = ptegbase + (i + j) % 8;
405 		phyp_pft_hcall(H_READ, 0, slot, 0, 0, &pt.pte_hi,
406 		    &pt.pte_lo, &junk);
407 
408 		if ((pt.pte_hi & (LPTE_WIRED | LPTE_BIG)) != 0)
409 			continue;
410 
411 		/* This is a candidate, so remember it */
412 		k = slot;
413 
414 		/* Try to get a page that has not been used lately */
415 		if (!(pt.pte_hi & LPTE_VALID) || !(pt.pte_lo & LPTE_REF)) {
416 			memcpy(to_evict, &pt, sizeof(struct lpte));
417 			return (k);
418 		}
419 	}
420 
421 	if (k == -1)
422 		return (k);
423 
424 	phyp_pft_hcall(H_READ, 0, k, 0, 0, &to_evict->pte_hi,
425 	    &to_evict->pte_lo, &junk);
426 	return (k);
427 }
428 
429 static __inline int64_t
430 mphyp_pte_insert_locked(struct pvo_entry *pvo, struct lpte *pte)
431 {
432 	struct lpte evicted;
433 	uint64_t index, junk;
434 	int64_t result;
435 
436 	/*
437 	 * First try primary hash.
438 	 */
439 	pvo->pvo_pte.slot &= ~7UL; /* Base slot address */
440 	result = phyp_pft_hcall(H_ENTER, 0, pvo->pvo_pte.slot, pte->pte_hi,
441 	    pte->pte_lo, &index, &evicted.pte_lo, &junk);
442 	if (result == H_SUCCESS) {
443 		pvo->pvo_pte.slot = index;
444 		return (0);
445 	}
446 	KASSERT(result == H_PTEG_FULL, ("Page insertion error: %ld "
447 	    "(ptegidx: %#zx/%#lx, PTE %#lx/%#lx", result, pvo->pvo_pte.slot,
448 	    moea64_pteg_count, pte->pte_hi, pte->pte_lo));
449 
450 	/*
451 	 * Next try secondary hash.
452 	 */
453 	pvo->pvo_vaddr ^= PVO_HID;
454 	pte->pte_hi ^= LPTE_HID;
455 	pvo->pvo_pte.slot ^= (moea64_pteg_mask << 3);
456 
457 	result = phyp_pft_hcall(H_ENTER, 0, pvo->pvo_pte.slot,
458 	    pte->pte_hi, pte->pte_lo, &index, &evicted.pte_lo, &junk);
459 	if (result == H_SUCCESS) {
460 		pvo->pvo_pte.slot = index;
461 		return (0);
462 	}
463 	KASSERT(result == H_PTEG_FULL, ("Secondary page insertion error: %ld",
464 	    result));
465 
466 	return (-1);
467 }
468 
469 
470 static __inline int64_t
471 mphyp_pte_evict_and_insert_locked(struct pvo_entry *pvo, struct lpte *pte)
472 {
473 	struct lpte evicted;
474 	uint64_t index, junk, lastptelo;
475 	int64_t result;
476 
477 	evicted.pte_hi = 0;
478 
479 	index = mphyp_pte_spillable_ident(pvo->pvo_pte.slot, &evicted);
480 	if (index == -1L) {
481 		/* Try other hash table? */
482 		pvo->pvo_vaddr ^= PVO_HID;
483 		pte->pte_hi ^= LPTE_HID;
484 		pvo->pvo_pte.slot ^= (moea64_pteg_mask << 3);
485 		index = mphyp_pte_spillable_ident(pvo->pvo_pte.slot, &evicted);
486 	}
487 
488 	if (index == -1L) {
489 		/* No freeable slots in either PTEG? We're hosed. */
490 		rm_wunlock(&mphyp_eviction_lock);
491 		panic("mphyp_pte_insert: overflow");
492 		return (-1);
493 	}
494 
495 	/* Victim acquired: update page before waving goodbye */
496 	if (evicted.pte_hi & LPTE_VALID) {
497 		result = phyp_pft_hcall(H_REMOVE, H_AVPN, index,
498 		    evicted.pte_hi & LPTE_AVPN_MASK, 0, &junk, &lastptelo,
499 		    &junk);
500 		STAT_MOEA64(moea64_pte_overflow++);
501 		KASSERT(result == H_SUCCESS || result == H_NOT_FOUND,
502 		    ("Error evicting page: %d", (int)result));
503 	}
504 
505 	/*
506 	 * Set the new PTE.
507 	 */
508 	result = phyp_pft_hcall(H_ENTER, H_EXACT, index, pte->pte_hi,
509 	    pte->pte_lo, &index, &evicted.pte_lo, &junk);
510 
511 	pvo->pvo_pte.slot = index;
512 	if (result == H_SUCCESS)
513 		return (0);
514 
515 	rm_wunlock(&mphyp_eviction_lock);
516 	panic("Page replacement error: %ld", result);
517 	return (result);
518 }
519 
520 static int64_t
521 mphyp_pte_insert(struct pvo_entry *pvo)
522 {
523 	struct rm_priotracker track;
524 	int64_t ret;
525 	struct lpte pte;
526 
527 	PMAP_LOCK_ASSERT(pvo->pvo_pmap, MA_OWNED);
528 
529 	/* Initialize PTE */
530 	moea64_pte_from_pvo(pvo, &pte);
531 
532 	/* Make sure further insertion is locked out during evictions */
533 	rm_rlock(&mphyp_eviction_lock, &track);
534 
535 	ret = mphyp_pte_insert_locked(pvo, &pte);
536 	rm_runlock(&mphyp_eviction_lock, &track);
537 
538 	if (ret == -1) {
539 		/*
540 		 * Out of luck. Find a PTE to sacrifice.
541 		 */
542 
543 		/* Lock out all insertions for a bit */
544 		rm_wlock(&mphyp_eviction_lock);
545 		ret = mphyp_pte_evict_and_insert_locked(pvo, &pte);
546 		rm_wunlock(&mphyp_eviction_lock); /* All clear */
547 	}
548 
549 	return (ret);
550 }
551 
552 static void *
553 mphyp_dump_pmap(void *ctx, void *buf, u_long *nbytes)
554 {
555 	struct dump_context *dctx;
556 	struct lpte p, *pbuf;
557 	int bufidx;
558 	uint64_t junk;
559 	u_long ptex, ptex_end;
560 
561 	dctx = (struct dump_context *)ctx;
562 	pbuf = (struct lpte *)buf;
563 	bufidx = 0;
564 	ptex = dctx->ptex;
565 	ptex_end = ptex + dctx->blksz / sizeof(struct lpte);
566 	ptex_end = MIN(ptex_end, dctx->ptex_end);
567 	*nbytes = (ptex_end - ptex) * sizeof(struct lpte);
568 
569 	if (*nbytes == 0)
570 		return (NULL);
571 
572 	for (; ptex < ptex_end; ptex++) {
573 		phyp_pft_hcall(H_READ, 0, ptex, 0, 0,
574 			&p.pte_hi, &p.pte_lo, &junk);
575 		pbuf[bufidx++] = p;
576 	}
577 
578 	dctx->ptex = ptex;
579 	return (buf);
580 }
581 
582 static int64_t
583 mphyp_pte_unset_sp(struct pvo_entry *pvo)
584 {
585 	struct lpte pte;
586 	uint64_t junk, refchg;
587 	int err;
588 	vm_offset_t eva;
589 	pmap_t pm __diagused;
590 
591 	pm = pvo->pvo_pmap;
592 	PMAP_LOCK_ASSERT(pm, MA_OWNED);
593 	KASSERT((PVO_VADDR(pvo) & HPT_SP_MASK) == 0,
594 	    ("%s: va %#jx unaligned", __func__, (uintmax_t)PVO_VADDR(pvo)));
595 
596 	refchg = 0;
597 	eva = PVO_VADDR(pvo) + HPT_SP_SIZE;
598 
599 	for (; pvo != NULL && PVO_VADDR(pvo) < eva;
600 	    pvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo)) {
601 		moea64_pte_from_pvo(pvo, &pte);
602 
603 		err = phyp_pft_hcall(H_REMOVE, H_AVPN, pvo->pvo_pte.slot,
604 		    pte.pte_hi & LPTE_AVPN_MASK, 0, &pte.pte_hi, &pte.pte_lo,
605 		    &junk);
606 		KASSERT(err == H_SUCCESS || err == H_NOT_FOUND,
607 		    ("Error removing page: %d", err));
608 
609 		if (err == H_NOT_FOUND)
610 			STAT_MOEA64(moea64_pte_overflow--);
611 		refchg |= pte.pte_lo & (LPTE_REF | LPTE_CHG);
612 	}
613 
614 	return (refchg);
615 }
616 
617 static int64_t
618 mphyp_pte_insert_sp(struct pvo_entry *pvo)
619 {
620 	struct rm_priotracker track;
621 	int64_t ret;
622 	struct lpte pte;
623 	vm_offset_t eva;
624 	pmap_t pm __diagused;
625 
626 	pm = pvo->pvo_pmap;
627 	PMAP_LOCK_ASSERT(pm, MA_OWNED);
628 	KASSERT((PVO_VADDR(pvo) & HPT_SP_MASK) == 0,
629 	    ("%s: va %#jx unaligned", __func__, (uintmax_t)PVO_VADDR(pvo)));
630 
631 	eva = PVO_VADDR(pvo) + HPT_SP_SIZE;
632 
633 	/* Make sure further insertion is locked out during evictions */
634 	rm_rlock(&mphyp_eviction_lock, &track);
635 
636 	for (; pvo != NULL && PVO_VADDR(pvo) < eva;
637 	    pvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo)) {
638 		/* Initialize PTE */
639 		moea64_pte_from_pvo(pvo, &pte);
640 
641 		ret = mphyp_pte_insert_locked(pvo, &pte);
642 		if (ret == -1) {
643 			/*
644 			 * Out of luck. Find a PTE to sacrifice.
645 			 */
646 
647 			/* Lock out all insertions for a bit */
648 			rm_runlock(&mphyp_eviction_lock, &track);
649 			rm_wlock(&mphyp_eviction_lock);
650 			mphyp_pte_evict_and_insert_locked(pvo, &pte);
651 			rm_wunlock(&mphyp_eviction_lock); /* All clear */
652 			rm_rlock(&mphyp_eviction_lock, &track);
653 		}
654 	}
655 
656 	rm_runlock(&mphyp_eviction_lock, &track);
657 	return (0);
658 }
659 
660 static int64_t
661 mphyp_pte_replace_sp(struct pvo_entry *pvo)
662 {
663 	int64_t refchg;
664 
665 	refchg = mphyp_pte_unset_sp(pvo);
666 	mphyp_pte_insert_sp(pvo);
667 	return (refchg);
668 }
669