xref: /freebsd/sys/powerpc/aim/mmu_oea64.c (revision a66ffea41d7ea4e39a49bc146e6f6decb4fbd02c)
1 /*-
2  * Copyright (c) 2001 The NetBSD Foundation, Inc.
3  * All rights reserved.
4  *
5  * This code is derived from software contributed to The NetBSD Foundation
6  * by Matt Thomas <matt@3am-software.com> of Allegro Networks, Inc.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *        This product includes software developed by the NetBSD
19  *        Foundation, Inc. and its contributors.
20  * 4. Neither the name of The NetBSD Foundation nor the names of its
21  *    contributors may be used to endorse or promote products derived
22  *    from this software without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
25  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
26  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
28  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34  * POSSIBILITY OF SUCH DAMAGE.
35  */
36 /*-
37  * Copyright (C) 1995, 1996 Wolfgang Solfrank.
38  * Copyright (C) 1995, 1996 TooLs GmbH.
39  * All rights reserved.
40  *
41  * Redistribution and use in source and binary forms, with or without
42  * modification, are permitted provided that the following conditions
43  * are met:
44  * 1. Redistributions of source code must retain the above copyright
45  *    notice, this list of conditions and the following disclaimer.
46  * 2. Redistributions in binary form must reproduce the above copyright
47  *    notice, this list of conditions and the following disclaimer in the
48  *    documentation and/or other materials provided with the distribution.
49  * 3. All advertising materials mentioning features or use of this software
50  *    must display the following acknowledgement:
51  *	This product includes software developed by TooLs GmbH.
52  * 4. The name of TooLs GmbH may not be used to endorse or promote products
53  *    derived from this software without specific prior written permission.
54  *
55  * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
56  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
57  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
58  * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
59  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
60  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
61  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
62  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
63  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
64  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
65  *
66  * $NetBSD: pmap.c,v 1.28 2000/03/26 20:42:36 kleink Exp $
67  */
68 /*-
69  * Copyright (C) 2001 Benno Rice.
70  * All rights reserved.
71  *
72  * Redistribution and use in source and binary forms, with or without
73  * modification, are permitted provided that the following conditions
74  * are met:
75  * 1. Redistributions of source code must retain the above copyright
76  *    notice, this list of conditions and the following disclaimer.
77  * 2. Redistributions in binary form must reproduce the above copyright
78  *    notice, this list of conditions and the following disclaimer in the
79  *    documentation and/or other materials provided with the distribution.
80  *
81  * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR
82  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
83  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
84  * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
85  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
86  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
87  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
88  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
89  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
90  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
91  */
92 
93 #include <sys/cdefs.h>
94 __FBSDID("$FreeBSD$");
95 
96 /*
97  * Manages physical address maps.
98  *
99  * Since the information managed by this module is also stored by the
100  * logical address mapping module, this module may throw away valid virtual
101  * to physical mappings at almost any time.  However, invalidations of
102  * mappings must be done as requested.
103  *
104  * In order to cope with hardware architectures which make virtual to
105  * physical map invalidates expensive, this module may delay invalidate
106  * reduced protection operations until such time as they are actually
107  * necessary.  This module is given full information as to which processors
108  * are currently using which maps, and to when physical maps must be made
109  * correct.
110  */
111 
112 #include "opt_compat.h"
113 #include "opt_kstack_pages.h"
114 
115 #include <sys/param.h>
116 #include <sys/kernel.h>
117 #include <sys/queue.h>
118 #include <sys/cpuset.h>
119 #include <sys/ktr.h>
120 #include <sys/lock.h>
121 #include <sys/msgbuf.h>
122 #include <sys/malloc.h>
123 #include <sys/mutex.h>
124 #include <sys/proc.h>
125 #include <sys/rwlock.h>
126 #include <sys/sched.h>
127 #include <sys/sysctl.h>
128 #include <sys/systm.h>
129 #include <sys/vmmeter.h>
130 
131 #include <sys/kdb.h>
132 
133 #include <dev/ofw/openfirm.h>
134 
135 #include <vm/vm.h>
136 #include <vm/vm_param.h>
137 #include <vm/vm_kern.h>
138 #include <vm/vm_page.h>
139 #include <vm/vm_map.h>
140 #include <vm/vm_object.h>
141 #include <vm/vm_extern.h>
142 #include <vm/vm_pageout.h>
143 #include <vm/uma.h>
144 
145 #include <machine/_inttypes.h>
146 #include <machine/cpu.h>
147 #include <machine/platform.h>
148 #include <machine/frame.h>
149 #include <machine/md_var.h>
150 #include <machine/psl.h>
151 #include <machine/bat.h>
152 #include <machine/hid.h>
153 #include <machine/pte.h>
154 #include <machine/sr.h>
155 #include <machine/trap.h>
156 #include <machine/mmuvar.h>
157 
158 #include "mmu_oea64.h"
159 #include "mmu_if.h"
160 #include "moea64_if.h"
161 
162 void moea64_release_vsid(uint64_t vsid);
163 uintptr_t moea64_get_unique_vsid(void);
164 
165 #define DISABLE_TRANS(msr)	msr = mfmsr(); mtmsr(msr & ~PSL_DR)
166 #define ENABLE_TRANS(msr)	mtmsr(msr)
167 
168 #define	VSID_MAKE(sr, hash)	((sr) | (((hash) & 0xfffff) << 4))
169 #define	VSID_TO_HASH(vsid)	(((vsid) >> 4) & 0xfffff)
170 #define	VSID_HASH_MASK		0x0000007fffffffffULL
171 
172 /*
173  * Locking semantics:
174  * -- Read lock: if no modifications are being made to either the PVO lists
175  *    or page table or if any modifications being made result in internal
176  *    changes (e.g. wiring, protection) such that the existence of the PVOs
177  *    is unchanged and they remain associated with the same pmap (in which
178  *    case the changes should be protected by the pmap lock)
179  * -- Write lock: required if PTEs/PVOs are being inserted or removed.
180  */
181 
182 #define LOCK_TABLE_RD() rw_rlock(&moea64_table_lock)
183 #define UNLOCK_TABLE_RD() rw_runlock(&moea64_table_lock)
184 #define LOCK_TABLE_WR() rw_wlock(&moea64_table_lock)
185 #define UNLOCK_TABLE_WR() rw_wunlock(&moea64_table_lock)
186 
187 struct ofw_map {
188 	cell_t	om_va;
189 	cell_t	om_len;
190 	cell_t	om_pa_hi;
191 	cell_t	om_pa_lo;
192 	cell_t	om_mode;
193 };
194 
195 /*
196  * Map of physical memory regions.
197  */
198 static struct	mem_region *regions;
199 static struct	mem_region *pregions;
200 static u_int	phys_avail_count;
201 static int	regions_sz, pregions_sz;
202 
203 extern void bs_remap_earlyboot(void);
204 
205 /*
206  * Lock for the pteg and pvo tables.
207  */
208 struct rwlock	moea64_table_lock;
209 struct mtx	moea64_slb_mutex;
210 
211 /*
212  * PTEG data.
213  */
214 u_int		moea64_pteg_count;
215 u_int		moea64_pteg_mask;
216 
217 /*
218  * PVO data.
219  */
220 struct	pvo_head *moea64_pvo_table;		/* pvo entries by pteg index */
221 
222 uma_zone_t	moea64_upvo_zone; /* zone for pvo entries for unmanaged pages */
223 uma_zone_t	moea64_mpvo_zone; /* zone for pvo entries for managed pages */
224 
225 #define	BPVO_POOL_SIZE	327680
226 static struct	pvo_entry *moea64_bpvo_pool;
227 static int	moea64_bpvo_pool_index = 0;
228 
229 #define	VSID_NBPW	(sizeof(u_int32_t) * 8)
230 #ifdef __powerpc64__
231 #define	NVSIDS		(NPMAPS * 16)
232 #define VSID_HASHMASK	0xffffffffUL
233 #else
234 #define NVSIDS		NPMAPS
235 #define VSID_HASHMASK	0xfffffUL
236 #endif
237 static u_int	moea64_vsid_bitmap[NVSIDS / VSID_NBPW];
238 
239 static boolean_t moea64_initialized = FALSE;
240 
241 /*
242  * Statistics.
243  */
244 u_int	moea64_pte_valid = 0;
245 u_int	moea64_pte_overflow = 0;
246 u_int	moea64_pvo_entries = 0;
247 u_int	moea64_pvo_enter_calls = 0;
248 u_int	moea64_pvo_remove_calls = 0;
249 SYSCTL_INT(_machdep, OID_AUTO, moea64_pte_valid, CTLFLAG_RD,
250     &moea64_pte_valid, 0, "");
251 SYSCTL_INT(_machdep, OID_AUTO, moea64_pte_overflow, CTLFLAG_RD,
252     &moea64_pte_overflow, 0, "");
253 SYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_entries, CTLFLAG_RD,
254     &moea64_pvo_entries, 0, "");
255 SYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_enter_calls, CTLFLAG_RD,
256     &moea64_pvo_enter_calls, 0, "");
257 SYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_remove_calls, CTLFLAG_RD,
258     &moea64_pvo_remove_calls, 0, "");
259 
260 vm_offset_t	moea64_scratchpage_va[2];
261 struct pvo_entry *moea64_scratchpage_pvo[2];
262 uintptr_t	moea64_scratchpage_pte[2];
263 struct	mtx	moea64_scratchpage_mtx;
264 
265 uint64_t 	moea64_large_page_mask = 0;
266 int		moea64_large_page_size = 0;
267 int		moea64_large_page_shift = 0;
268 
269 /*
270  * PVO calls.
271  */
272 static int	moea64_pvo_enter(mmu_t, pmap_t, uma_zone_t, struct pvo_head *,
273 		    vm_offset_t, vm_offset_t, uint64_t, int);
274 static void	moea64_pvo_remove(mmu_t, struct pvo_entry *);
275 static struct	pvo_entry *moea64_pvo_find_va(pmap_t, vm_offset_t);
276 
277 /*
278  * Utility routines.
279  */
280 static boolean_t	moea64_query_bit(mmu_t, vm_page_t, u_int64_t);
281 static u_int		moea64_clear_bit(mmu_t, vm_page_t, u_int64_t);
282 static void		moea64_kremove(mmu_t, vm_offset_t);
283 static void		moea64_syncicache(mmu_t, pmap_t pmap, vm_offset_t va,
284 			    vm_offset_t pa, vm_size_t sz);
285 
286 /*
287  * Kernel MMU interface
288  */
289 void moea64_change_wiring(mmu_t, pmap_t, vm_offset_t, boolean_t);
290 void moea64_clear_modify(mmu_t, vm_page_t);
291 void moea64_clear_reference(mmu_t, vm_page_t);
292 void moea64_copy_page(mmu_t, vm_page_t, vm_page_t);
293 void moea64_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t, boolean_t);
294 void moea64_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_page_t,
295     vm_prot_t);
296 void moea64_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t);
297 vm_paddr_t moea64_extract(mmu_t, pmap_t, vm_offset_t);
298 vm_page_t moea64_extract_and_hold(mmu_t, pmap_t, vm_offset_t, vm_prot_t);
299 void moea64_init(mmu_t);
300 boolean_t moea64_is_modified(mmu_t, vm_page_t);
301 boolean_t moea64_is_prefaultable(mmu_t, pmap_t, vm_offset_t);
302 boolean_t moea64_is_referenced(mmu_t, vm_page_t);
303 int moea64_ts_referenced(mmu_t, vm_page_t);
304 vm_offset_t moea64_map(mmu_t, vm_offset_t *, vm_paddr_t, vm_paddr_t, int);
305 boolean_t moea64_page_exists_quick(mmu_t, pmap_t, vm_page_t);
306 int moea64_page_wired_mappings(mmu_t, vm_page_t);
307 void moea64_pinit(mmu_t, pmap_t);
308 void moea64_pinit0(mmu_t, pmap_t);
309 void moea64_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_prot_t);
310 void moea64_qenter(mmu_t, vm_offset_t, vm_page_t *, int);
311 void moea64_qremove(mmu_t, vm_offset_t, int);
312 void moea64_release(mmu_t, pmap_t);
313 void moea64_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t);
314 void moea64_remove_pages(mmu_t, pmap_t);
315 void moea64_remove_all(mmu_t, vm_page_t);
316 void moea64_remove_write(mmu_t, vm_page_t);
317 void moea64_zero_page(mmu_t, vm_page_t);
318 void moea64_zero_page_area(mmu_t, vm_page_t, int, int);
319 void moea64_zero_page_idle(mmu_t, vm_page_t);
320 void moea64_activate(mmu_t, struct thread *);
321 void moea64_deactivate(mmu_t, struct thread *);
322 void *moea64_mapdev(mmu_t, vm_paddr_t, vm_size_t);
323 void *moea64_mapdev_attr(mmu_t, vm_offset_t, vm_size_t, vm_memattr_t);
324 void moea64_unmapdev(mmu_t, vm_offset_t, vm_size_t);
325 vm_paddr_t moea64_kextract(mmu_t, vm_offset_t);
326 void moea64_page_set_memattr(mmu_t, vm_page_t m, vm_memattr_t ma);
327 void moea64_kenter_attr(mmu_t, vm_offset_t, vm_offset_t, vm_memattr_t ma);
328 void moea64_kenter(mmu_t, vm_offset_t, vm_paddr_t);
329 boolean_t moea64_dev_direct_mapped(mmu_t, vm_paddr_t, vm_size_t);
330 static void moea64_sync_icache(mmu_t, pmap_t, vm_offset_t, vm_size_t);
331 
332 static mmu_method_t moea64_methods[] = {
333 	MMUMETHOD(mmu_change_wiring,	moea64_change_wiring),
334 	MMUMETHOD(mmu_clear_modify,	moea64_clear_modify),
335 	MMUMETHOD(mmu_clear_reference,	moea64_clear_reference),
336 	MMUMETHOD(mmu_copy_page,	moea64_copy_page),
337 	MMUMETHOD(mmu_enter,		moea64_enter),
338 	MMUMETHOD(mmu_enter_object,	moea64_enter_object),
339 	MMUMETHOD(mmu_enter_quick,	moea64_enter_quick),
340 	MMUMETHOD(mmu_extract,		moea64_extract),
341 	MMUMETHOD(mmu_extract_and_hold,	moea64_extract_and_hold),
342 	MMUMETHOD(mmu_init,		moea64_init),
343 	MMUMETHOD(mmu_is_modified,	moea64_is_modified),
344 	MMUMETHOD(mmu_is_prefaultable,	moea64_is_prefaultable),
345 	MMUMETHOD(mmu_is_referenced,	moea64_is_referenced),
346 	MMUMETHOD(mmu_ts_referenced,	moea64_ts_referenced),
347 	MMUMETHOD(mmu_map,     		moea64_map),
348 	MMUMETHOD(mmu_page_exists_quick,moea64_page_exists_quick),
349 	MMUMETHOD(mmu_page_wired_mappings,moea64_page_wired_mappings),
350 	MMUMETHOD(mmu_pinit,		moea64_pinit),
351 	MMUMETHOD(mmu_pinit0,		moea64_pinit0),
352 	MMUMETHOD(mmu_protect,		moea64_protect),
353 	MMUMETHOD(mmu_qenter,		moea64_qenter),
354 	MMUMETHOD(mmu_qremove,		moea64_qremove),
355 	MMUMETHOD(mmu_release,		moea64_release),
356 	MMUMETHOD(mmu_remove,		moea64_remove),
357 	MMUMETHOD(mmu_remove_pages,	moea64_remove_pages),
358 	MMUMETHOD(mmu_remove_all,      	moea64_remove_all),
359 	MMUMETHOD(mmu_remove_write,	moea64_remove_write),
360 	MMUMETHOD(mmu_sync_icache,	moea64_sync_icache),
361 	MMUMETHOD(mmu_zero_page,       	moea64_zero_page),
362 	MMUMETHOD(mmu_zero_page_area,	moea64_zero_page_area),
363 	MMUMETHOD(mmu_zero_page_idle,	moea64_zero_page_idle),
364 	MMUMETHOD(mmu_activate,		moea64_activate),
365 	MMUMETHOD(mmu_deactivate,      	moea64_deactivate),
366 	MMUMETHOD(mmu_page_set_memattr,	moea64_page_set_memattr),
367 
368 	/* Internal interfaces */
369 	MMUMETHOD(mmu_mapdev,		moea64_mapdev),
370 	MMUMETHOD(mmu_mapdev_attr,	moea64_mapdev_attr),
371 	MMUMETHOD(mmu_unmapdev,		moea64_unmapdev),
372 	MMUMETHOD(mmu_kextract,		moea64_kextract),
373 	MMUMETHOD(mmu_kenter,		moea64_kenter),
374 	MMUMETHOD(mmu_kenter_attr,	moea64_kenter_attr),
375 	MMUMETHOD(mmu_dev_direct_mapped,moea64_dev_direct_mapped),
376 
377 	{ 0, 0 }
378 };
379 
380 MMU_DEF(oea64_mmu, "mmu_oea64_base", moea64_methods, 0);
381 
382 static __inline u_int
383 va_to_pteg(uint64_t vsid, vm_offset_t addr, int large)
384 {
385 	uint64_t hash;
386 	int shift;
387 
388 	shift = large ? moea64_large_page_shift : ADDR_PIDX_SHFT;
389 	hash = (vsid & VSID_HASH_MASK) ^ (((uint64_t)addr & ADDR_PIDX) >>
390 	    shift);
391 	return (hash & moea64_pteg_mask);
392 }
393 
394 static __inline struct pvo_head *
395 vm_page_to_pvoh(vm_page_t m)
396 {
397 
398 	return (&m->md.mdpg_pvoh);
399 }
400 
401 static __inline void
402 moea64_pte_create(struct lpte *pt, uint64_t vsid, vm_offset_t va,
403     uint64_t pte_lo, int flags)
404 {
405 
406 	/*
407 	 * Construct a PTE.  Default to IMB initially.  Valid bit only gets
408 	 * set when the real pte is set in memory.
409 	 *
410 	 * Note: Don't set the valid bit for correct operation of tlb update.
411 	 */
412 	pt->pte_hi = (vsid << LPTE_VSID_SHIFT) |
413 	    (((uint64_t)(va & ADDR_PIDX) >> ADDR_API_SHFT64) & LPTE_API);
414 
415 	if (flags & PVO_LARGE)
416 		pt->pte_hi |= LPTE_BIG;
417 
418 	pt->pte_lo = pte_lo;
419 }
420 
421 static __inline uint64_t
422 moea64_calc_wimg(vm_offset_t pa, vm_memattr_t ma)
423 {
424 	uint64_t pte_lo;
425 	int i;
426 
427 	if (ma != VM_MEMATTR_DEFAULT) {
428 		switch (ma) {
429 		case VM_MEMATTR_UNCACHEABLE:
430 			return (LPTE_I | LPTE_G);
431 		case VM_MEMATTR_WRITE_COMBINING:
432 		case VM_MEMATTR_WRITE_BACK:
433 		case VM_MEMATTR_PREFETCHABLE:
434 			return (LPTE_I);
435 		case VM_MEMATTR_WRITE_THROUGH:
436 			return (LPTE_W | LPTE_M);
437 		}
438 	}
439 
440 	/*
441 	 * Assume the page is cache inhibited and access is guarded unless
442 	 * it's in our available memory array.
443 	 */
444 	pte_lo = LPTE_I | LPTE_G;
445 	for (i = 0; i < pregions_sz; i++) {
446 		if ((pa >= pregions[i].mr_start) &&
447 		    (pa < (pregions[i].mr_start + pregions[i].mr_size))) {
448 			pte_lo &= ~(LPTE_I | LPTE_G);
449 			pte_lo |= LPTE_M;
450 			break;
451 		}
452 	}
453 
454 	return pte_lo;
455 }
456 
457 /*
458  * Quick sort callout for comparing memory regions.
459  */
460 static int	om_cmp(const void *a, const void *b);
461 
462 static int
463 om_cmp(const void *a, const void *b)
464 {
465 	const struct	ofw_map *mapa;
466 	const struct	ofw_map *mapb;
467 
468 	mapa = a;
469 	mapb = b;
470 	if (mapa->om_pa_hi < mapb->om_pa_hi)
471 		return (-1);
472 	else if (mapa->om_pa_hi > mapb->om_pa_hi)
473 		return (1);
474 	else if (mapa->om_pa_lo < mapb->om_pa_lo)
475 		return (-1);
476 	else if (mapa->om_pa_lo > mapb->om_pa_lo)
477 		return (1);
478 	else
479 		return (0);
480 }
481 
482 static void
483 moea64_add_ofw_mappings(mmu_t mmup, phandle_t mmu, size_t sz)
484 {
485 	struct ofw_map	translations[sz/sizeof(struct ofw_map)];
486 	register_t	msr;
487 	vm_offset_t	off;
488 	vm_paddr_t	pa_base;
489 	int		i;
490 
491 	bzero(translations, sz);
492 	if (OF_getprop(mmu, "translations", translations, sz) == -1)
493 		panic("moea64_bootstrap: can't get ofw translations");
494 
495 	CTR0(KTR_PMAP, "moea64_add_ofw_mappings: translations");
496 	sz /= sizeof(*translations);
497 	qsort(translations, sz, sizeof (*translations), om_cmp);
498 
499 	for (i = 0; i < sz; i++) {
500 		CTR3(KTR_PMAP, "translation: pa=%#x va=%#x len=%#x",
501 		    (uint32_t)(translations[i].om_pa_lo), translations[i].om_va,
502 		    translations[i].om_len);
503 
504 		if (translations[i].om_pa_lo % PAGE_SIZE)
505 			panic("OFW translation not page-aligned!");
506 
507 		pa_base = translations[i].om_pa_lo;
508 
509 	      #ifdef __powerpc64__
510 		pa_base += (vm_offset_t)translations[i].om_pa_hi << 32;
511 	      #else
512 		if (translations[i].om_pa_hi)
513 			panic("OFW translations above 32-bit boundary!");
514 	      #endif
515 
516 		/* Now enter the pages for this mapping */
517 
518 		DISABLE_TRANS(msr);
519 		for (off = 0; off < translations[i].om_len; off += PAGE_SIZE) {
520 			if (moea64_pvo_find_va(kernel_pmap,
521 			    translations[i].om_va + off) != NULL)
522 				continue;
523 
524 			moea64_kenter(mmup, translations[i].om_va + off,
525 			    pa_base + off);
526 		}
527 		ENABLE_TRANS(msr);
528 	}
529 }
530 
531 #ifdef __powerpc64__
532 static void
533 moea64_probe_large_page(void)
534 {
535 	uint16_t pvr = mfpvr() >> 16;
536 
537 	switch (pvr) {
538 	case IBM970:
539 	case IBM970FX:
540 	case IBM970MP:
541 		powerpc_sync(); isync();
542 		mtspr(SPR_HID4, mfspr(SPR_HID4) & ~HID4_970_DISABLE_LG_PG);
543 		powerpc_sync(); isync();
544 
545 		/* FALLTHROUGH */
546 	case IBMCELLBE:
547 		moea64_large_page_size = 0x1000000; /* 16 MB */
548 		moea64_large_page_shift = 24;
549 		break;
550 	default:
551 		moea64_large_page_size = 0;
552 	}
553 
554 	moea64_large_page_mask = moea64_large_page_size - 1;
555 }
556 
557 static void
558 moea64_bootstrap_slb_prefault(vm_offset_t va, int large)
559 {
560 	struct slb *cache;
561 	struct slb entry;
562 	uint64_t esid, slbe;
563 	uint64_t i;
564 
565 	cache = PCPU_GET(slb);
566 	esid = va >> ADDR_SR_SHFT;
567 	slbe = (esid << SLBE_ESID_SHIFT) | SLBE_VALID;
568 
569 	for (i = 0; i < 64; i++) {
570 		if (cache[i].slbe == (slbe | i))
571 			return;
572 	}
573 
574 	entry.slbe = slbe;
575 	entry.slbv = KERNEL_VSID(esid) << SLBV_VSID_SHIFT;
576 	if (large)
577 		entry.slbv |= SLBV_L;
578 
579 	slb_insert_kernel(entry.slbe, entry.slbv);
580 }
581 #endif
582 
583 static void
584 moea64_setup_direct_map(mmu_t mmup, vm_offset_t kernelstart,
585     vm_offset_t kernelend)
586 {
587 	register_t msr;
588 	vm_paddr_t pa;
589 	vm_offset_t size, off;
590 	uint64_t pte_lo;
591 	int i;
592 
593 	if (moea64_large_page_size == 0)
594 		hw_direct_map = 0;
595 
596 	DISABLE_TRANS(msr);
597 	if (hw_direct_map) {
598 		LOCK_TABLE_WR();
599 		PMAP_LOCK(kernel_pmap);
600 		for (i = 0; i < pregions_sz; i++) {
601 		  for (pa = pregions[i].mr_start; pa < pregions[i].mr_start +
602 		     pregions[i].mr_size; pa += moea64_large_page_size) {
603 			pte_lo = LPTE_M;
604 
605 			/*
606 			 * Set memory access as guarded if prefetch within
607 			 * the page could exit the available physmem area.
608 			 */
609 			if (pa & moea64_large_page_mask) {
610 				pa &= moea64_large_page_mask;
611 				pte_lo |= LPTE_G;
612 			}
613 			if (pa + moea64_large_page_size >
614 			    pregions[i].mr_start + pregions[i].mr_size)
615 				pte_lo |= LPTE_G;
616 
617 			moea64_pvo_enter(mmup, kernel_pmap, moea64_upvo_zone,
618 				    NULL, pa, pa, pte_lo,
619 				    PVO_WIRED | PVO_LARGE);
620 		  }
621 		}
622 		PMAP_UNLOCK(kernel_pmap);
623 		UNLOCK_TABLE_WR();
624 	} else {
625 		size = sizeof(struct pvo_head) * moea64_pteg_count;
626 		off = (vm_offset_t)(moea64_pvo_table);
627 		for (pa = off; pa < off + size; pa += PAGE_SIZE)
628 			moea64_kenter(mmup, pa, pa);
629 		size = BPVO_POOL_SIZE*sizeof(struct pvo_entry);
630 		off = (vm_offset_t)(moea64_bpvo_pool);
631 		for (pa = off; pa < off + size; pa += PAGE_SIZE)
632 		moea64_kenter(mmup, pa, pa);
633 
634 		/*
635 		 * Map certain important things, like ourselves.
636 		 *
637 		 * NOTE: We do not map the exception vector space. That code is
638 		 * used only in real mode, and leaving it unmapped allows us to
639 		 * catch NULL pointer deferences, instead of making NULL a valid
640 		 * address.
641 		 */
642 
643 		for (pa = kernelstart & ~PAGE_MASK; pa < kernelend;
644 		    pa += PAGE_SIZE)
645 			moea64_kenter(mmup, pa, pa);
646 	}
647 	ENABLE_TRANS(msr);
648 }
649 
650 void
651 moea64_early_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend)
652 {
653 	int		i, j;
654 	vm_size_t	physsz, hwphyssz;
655 
656 #ifndef __powerpc64__
657 	/* We don't have a direct map since there is no BAT */
658 	hw_direct_map = 0;
659 
660 	/* Make sure battable is zero, since we have no BAT */
661 	for (i = 0; i < 16; i++) {
662 		battable[i].batu = 0;
663 		battable[i].batl = 0;
664 	}
665 #else
666 	moea64_probe_large_page();
667 
668 	/* Use a direct map if we have large page support */
669 	if (moea64_large_page_size > 0)
670 		hw_direct_map = 1;
671 	else
672 		hw_direct_map = 0;
673 #endif
674 
675 	/* Get physical memory regions from firmware */
676 	mem_regions(&pregions, &pregions_sz, &regions, &regions_sz);
677 	CTR0(KTR_PMAP, "moea64_bootstrap: physical memory");
678 
679 	if (sizeof(phys_avail)/sizeof(phys_avail[0]) < regions_sz)
680 		panic("moea64_bootstrap: phys_avail too small");
681 
682 	phys_avail_count = 0;
683 	physsz = 0;
684 	hwphyssz = 0;
685 	TUNABLE_ULONG_FETCH("hw.physmem", (u_long *) &hwphyssz);
686 	for (i = 0, j = 0; i < regions_sz; i++, j += 2) {
687 		CTR3(KTR_PMAP, "region: %#x - %#x (%#x)", regions[i].mr_start,
688 		    regions[i].mr_start + regions[i].mr_size,
689 		    regions[i].mr_size);
690 		if (hwphyssz != 0 &&
691 		    (physsz + regions[i].mr_size) >= hwphyssz) {
692 			if (physsz < hwphyssz) {
693 				phys_avail[j] = regions[i].mr_start;
694 				phys_avail[j + 1] = regions[i].mr_start +
695 				    hwphyssz - physsz;
696 				physsz = hwphyssz;
697 				phys_avail_count++;
698 			}
699 			break;
700 		}
701 		phys_avail[j] = regions[i].mr_start;
702 		phys_avail[j + 1] = regions[i].mr_start + regions[i].mr_size;
703 		phys_avail_count++;
704 		physsz += regions[i].mr_size;
705 	}
706 
707 	/* Check for overlap with the kernel and exception vectors */
708 	for (j = 0; j < 2*phys_avail_count; j+=2) {
709 		if (phys_avail[j] < EXC_LAST)
710 			phys_avail[j] += EXC_LAST;
711 
712 		if (kernelstart >= phys_avail[j] &&
713 		    kernelstart < phys_avail[j+1]) {
714 			if (kernelend < phys_avail[j+1]) {
715 				phys_avail[2*phys_avail_count] =
716 				    (kernelend & ~PAGE_MASK) + PAGE_SIZE;
717 				phys_avail[2*phys_avail_count + 1] =
718 				    phys_avail[j+1];
719 				phys_avail_count++;
720 			}
721 
722 			phys_avail[j+1] = kernelstart & ~PAGE_MASK;
723 		}
724 
725 		if (kernelend >= phys_avail[j] &&
726 		    kernelend < phys_avail[j+1]) {
727 			if (kernelstart > phys_avail[j]) {
728 				phys_avail[2*phys_avail_count] = phys_avail[j];
729 				phys_avail[2*phys_avail_count + 1] =
730 				    kernelstart & ~PAGE_MASK;
731 				phys_avail_count++;
732 			}
733 
734 			phys_avail[j] = (kernelend & ~PAGE_MASK) + PAGE_SIZE;
735 		}
736 	}
737 
738 	physmem = btoc(physsz);
739 
740 #ifdef PTEGCOUNT
741 	moea64_pteg_count = PTEGCOUNT;
742 #else
743 	moea64_pteg_count = 0x1000;
744 
745 	while (moea64_pteg_count < physmem)
746 		moea64_pteg_count <<= 1;
747 
748 	moea64_pteg_count >>= 1;
749 #endif /* PTEGCOUNT */
750 }
751 
752 void
753 moea64_mid_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend)
754 {
755 	vm_size_t	size;
756 	register_t	msr;
757 	int		i;
758 
759 	/*
760 	 * Set PTEG mask
761 	 */
762 	moea64_pteg_mask = moea64_pteg_count - 1;
763 
764 	/*
765 	 * Allocate pv/overflow lists.
766 	 */
767 	size = sizeof(struct pvo_head) * moea64_pteg_count;
768 
769 	moea64_pvo_table = (struct pvo_head *)moea64_bootstrap_alloc(size,
770 	    PAGE_SIZE);
771 	CTR1(KTR_PMAP, "moea64_bootstrap: PVO table at %p", moea64_pvo_table);
772 
773 	DISABLE_TRANS(msr);
774 	for (i = 0; i < moea64_pteg_count; i++)
775 		LIST_INIT(&moea64_pvo_table[i]);
776 	ENABLE_TRANS(msr);
777 
778 	/*
779 	 * Initialize the lock that synchronizes access to the pteg and pvo
780 	 * tables.
781 	 */
782 	rw_init_flags(&moea64_table_lock, "pmap tables", RW_RECURSE);
783 	mtx_init(&moea64_slb_mutex, "SLB table", NULL, MTX_DEF);
784 
785 	/*
786 	 * Initialise the unmanaged pvo pool.
787 	 */
788 	moea64_bpvo_pool = (struct pvo_entry *)moea64_bootstrap_alloc(
789 		BPVO_POOL_SIZE*sizeof(struct pvo_entry), 0);
790 	moea64_bpvo_pool_index = 0;
791 
792 	/*
793 	 * Make sure kernel vsid is allocated as well as VSID 0.
794 	 */
795 	#ifndef __powerpc64__
796 	moea64_vsid_bitmap[(KERNEL_VSIDBITS & (NVSIDS - 1)) / VSID_NBPW]
797 		|= 1 << (KERNEL_VSIDBITS % VSID_NBPW);
798 	moea64_vsid_bitmap[0] |= 1;
799 	#endif
800 
801 	/*
802 	 * Initialize the kernel pmap (which is statically allocated).
803 	 */
804 	#ifdef __powerpc64__
805 	for (i = 0; i < 64; i++) {
806 		pcpup->pc_slb[i].slbv = 0;
807 		pcpup->pc_slb[i].slbe = 0;
808 	}
809 	#else
810 	for (i = 0; i < 16; i++)
811 		kernel_pmap->pm_sr[i] = EMPTY_SEGMENT + i;
812 	#endif
813 
814 	kernel_pmap->pmap_phys = kernel_pmap;
815 	CPU_FILL(&kernel_pmap->pm_active);
816 	RB_INIT(&kernel_pmap->pmap_pvo);
817 
818 	PMAP_LOCK_INIT(kernel_pmap);
819 
820 	/*
821 	 * Now map in all the other buffers we allocated earlier
822 	 */
823 
824 	moea64_setup_direct_map(mmup, kernelstart, kernelend);
825 }
826 
827 void
828 moea64_late_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend)
829 {
830 	ihandle_t	mmui;
831 	phandle_t	chosen;
832 	phandle_t	mmu;
833 	size_t		sz;
834 	int		i;
835 	vm_offset_t	pa, va;
836 	void		*dpcpu;
837 
838 	/*
839 	 * Set up the Open Firmware pmap and add its mappings if not in real
840 	 * mode.
841 	 */
842 
843 	chosen = OF_finddevice("/chosen");
844 	if (chosen != -1 && OF_getprop(chosen, "mmu", &mmui, 4) != -1) {
845 	    mmu = OF_instance_to_package(mmui);
846 	    if (mmu == -1 || (sz = OF_getproplen(mmu, "translations")) == -1)
847 		sz = 0;
848 	    if (sz > 6144 /* tmpstksz - 2 KB headroom */)
849 		panic("moea64_bootstrap: too many ofw translations");
850 
851 	    if (sz > 0)
852 		moea64_add_ofw_mappings(mmup, mmu, sz);
853 	}
854 
855 	/*
856 	 * Calculate the last available physical address.
857 	 */
858 	for (i = 0; phys_avail[i + 2] != 0; i += 2)
859 		;
860 	Maxmem = powerpc_btop(phys_avail[i + 1]);
861 
862 	/*
863 	 * Initialize MMU and remap early physical mappings
864 	 */
865 	MMU_CPU_BOOTSTRAP(mmup,0);
866 	mtmsr(mfmsr() | PSL_DR | PSL_IR);
867 	pmap_bootstrapped++;
868 	bs_remap_earlyboot();
869 
870 	/*
871 	 * Set the start and end of kva.
872 	 */
873 	virtual_avail = VM_MIN_KERNEL_ADDRESS;
874 	virtual_end = VM_MAX_SAFE_KERNEL_ADDRESS;
875 
876 	/*
877 	 * Map the entire KVA range into the SLB. We must not fault there.
878 	 */
879 	#ifdef __powerpc64__
880 	for (va = virtual_avail; va < virtual_end; va += SEGMENT_LENGTH)
881 		moea64_bootstrap_slb_prefault(va, 0);
882 	#endif
883 
884 	/*
885 	 * Figure out how far we can extend virtual_end into segment 16
886 	 * without running into existing mappings. Segment 16 is guaranteed
887 	 * to contain neither RAM nor devices (at least on Apple hardware),
888 	 * but will generally contain some OFW mappings we should not
889 	 * step on.
890 	 */
891 
892 	#ifndef __powerpc64__	/* KVA is in high memory on PPC64 */
893 	PMAP_LOCK(kernel_pmap);
894 	while (virtual_end < VM_MAX_KERNEL_ADDRESS &&
895 	    moea64_pvo_find_va(kernel_pmap, virtual_end+1) == NULL)
896 		virtual_end += PAGE_SIZE;
897 	PMAP_UNLOCK(kernel_pmap);
898 	#endif
899 
900 	/*
901 	 * Allocate a kernel stack with a guard page for thread0 and map it
902 	 * into the kernel page map.
903 	 */
904 	pa = moea64_bootstrap_alloc(KSTACK_PAGES * PAGE_SIZE, PAGE_SIZE);
905 	va = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE;
906 	virtual_avail = va + KSTACK_PAGES * PAGE_SIZE;
907 	CTR2(KTR_PMAP, "moea64_bootstrap: kstack0 at %#x (%#x)", pa, va);
908 	thread0.td_kstack = va;
909 	thread0.td_kstack_pages = KSTACK_PAGES;
910 	for (i = 0; i < KSTACK_PAGES; i++) {
911 		moea64_kenter(mmup, va, pa);
912 		pa += PAGE_SIZE;
913 		va += PAGE_SIZE;
914 	}
915 
916 	/*
917 	 * Allocate virtual address space for the message buffer.
918 	 */
919 	pa = msgbuf_phys = moea64_bootstrap_alloc(msgbufsize, PAGE_SIZE);
920 	msgbufp = (struct msgbuf *)virtual_avail;
921 	va = virtual_avail;
922 	virtual_avail += round_page(msgbufsize);
923 	while (va < virtual_avail) {
924 		moea64_kenter(mmup, va, pa);
925 		pa += PAGE_SIZE;
926 		va += PAGE_SIZE;
927 	}
928 
929 	/*
930 	 * Allocate virtual address space for the dynamic percpu area.
931 	 */
932 	pa = moea64_bootstrap_alloc(DPCPU_SIZE, PAGE_SIZE);
933 	dpcpu = (void *)virtual_avail;
934 	va = virtual_avail;
935 	virtual_avail += DPCPU_SIZE;
936 	while (va < virtual_avail) {
937 		moea64_kenter(mmup, va, pa);
938 		pa += PAGE_SIZE;
939 		va += PAGE_SIZE;
940 	}
941 	dpcpu_init(dpcpu, 0);
942 
943 	/*
944 	 * Allocate some things for page zeroing. We put this directly
945 	 * in the page table, marked with LPTE_LOCKED, to avoid any
946 	 * of the PVO book-keeping or other parts of the VM system
947 	 * from even knowing that this hack exists.
948 	 */
949 
950 	if (!hw_direct_map) {
951 		mtx_init(&moea64_scratchpage_mtx, "pvo zero page", NULL,
952 		    MTX_DEF);
953 		for (i = 0; i < 2; i++) {
954 			moea64_scratchpage_va[i] = (virtual_end+1) - PAGE_SIZE;
955 			virtual_end -= PAGE_SIZE;
956 
957 			moea64_kenter(mmup, moea64_scratchpage_va[i], 0);
958 
959 			moea64_scratchpage_pvo[i] = moea64_pvo_find_va(
960 			    kernel_pmap, (vm_offset_t)moea64_scratchpage_va[i]);
961 			LOCK_TABLE_RD();
962 			moea64_scratchpage_pte[i] = MOEA64_PVO_TO_PTE(
963 			    mmup, moea64_scratchpage_pvo[i]);
964 			moea64_scratchpage_pvo[i]->pvo_pte.lpte.pte_hi
965 			    |= LPTE_LOCKED;
966 			MOEA64_PTE_CHANGE(mmup, moea64_scratchpage_pte[i],
967 			    &moea64_scratchpage_pvo[i]->pvo_pte.lpte,
968 			    moea64_scratchpage_pvo[i]->pvo_vpn);
969 			UNLOCK_TABLE_RD();
970 		}
971 	}
972 }
973 
974 /*
975  * Activate a user pmap.  The pmap must be activated before its address
976  * space can be accessed in any way.
977  */
978 void
979 moea64_activate(mmu_t mmu, struct thread *td)
980 {
981 	pmap_t	pm;
982 
983 	pm = &td->td_proc->p_vmspace->vm_pmap;
984 	CPU_SET(PCPU_GET(cpuid), &pm->pm_active);
985 
986 	#ifdef __powerpc64__
987 	PCPU_SET(userslb, pm->pm_slb);
988 	#else
989 	PCPU_SET(curpmap, pm->pmap_phys);
990 	#endif
991 }
992 
993 void
994 moea64_deactivate(mmu_t mmu, struct thread *td)
995 {
996 	pmap_t	pm;
997 
998 	pm = &td->td_proc->p_vmspace->vm_pmap;
999 	CPU_CLR(PCPU_GET(cpuid), &pm->pm_active);
1000 	#ifdef __powerpc64__
1001 	PCPU_SET(userslb, NULL);
1002 	#else
1003 	PCPU_SET(curpmap, NULL);
1004 	#endif
1005 }
1006 
1007 void
1008 moea64_change_wiring(mmu_t mmu, pmap_t pm, vm_offset_t va, boolean_t wired)
1009 {
1010 	struct	pvo_entry *pvo;
1011 	uintptr_t pt;
1012 	uint64_t vsid;
1013 	int	i, ptegidx;
1014 
1015 	LOCK_TABLE_WR();
1016 	PMAP_LOCK(pm);
1017 	pvo = moea64_pvo_find_va(pm, va & ~ADDR_POFF);
1018 
1019 	if (pvo != NULL) {
1020 		pt = MOEA64_PVO_TO_PTE(mmu, pvo);
1021 
1022 		if (wired) {
1023 			if ((pvo->pvo_vaddr & PVO_WIRED) == 0)
1024 				pm->pm_stats.wired_count++;
1025 			pvo->pvo_vaddr |= PVO_WIRED;
1026 			pvo->pvo_pte.lpte.pte_hi |= LPTE_WIRED;
1027 		} else {
1028 			if ((pvo->pvo_vaddr & PVO_WIRED) != 0)
1029 				pm->pm_stats.wired_count--;
1030 			pvo->pvo_vaddr &= ~PVO_WIRED;
1031 			pvo->pvo_pte.lpte.pte_hi &= ~LPTE_WIRED;
1032 		}
1033 
1034 		if (pt != -1) {
1035 			/* Update wiring flag in page table. */
1036 			MOEA64_PTE_CHANGE(mmu, pt, &pvo->pvo_pte.lpte,
1037 			    pvo->pvo_vpn);
1038 		} else if (wired) {
1039 			/*
1040 			 * If we are wiring the page, and it wasn't in the
1041 			 * page table before, add it.
1042 			 */
1043 			vsid = PVO_VSID(pvo);
1044 			ptegidx = va_to_pteg(vsid, PVO_VADDR(pvo),
1045 			    pvo->pvo_vaddr & PVO_LARGE);
1046 
1047 			i = MOEA64_PTE_INSERT(mmu, ptegidx, &pvo->pvo_pte.lpte);
1048 
1049 			if (i >= 0) {
1050 				PVO_PTEGIDX_CLR(pvo);
1051 				PVO_PTEGIDX_SET(pvo, i);
1052 			}
1053 		}
1054 
1055 	}
1056 	UNLOCK_TABLE_WR();
1057 	PMAP_UNLOCK(pm);
1058 }
1059 
1060 /*
1061  * This goes through and sets the physical address of our
1062  * special scratch PTE to the PA we want to zero or copy. Because
1063  * of locking issues (this can get called in pvo_enter() by
1064  * the UMA allocator), we can't use most other utility functions here
1065  */
1066 
1067 static __inline
1068 void moea64_set_scratchpage_pa(mmu_t mmup, int which, vm_offset_t pa) {
1069 
1070 	KASSERT(!hw_direct_map, ("Using OEA64 scratchpage with a direct map!"));
1071 	mtx_assert(&moea64_scratchpage_mtx, MA_OWNED);
1072 
1073 	moea64_scratchpage_pvo[which]->pvo_pte.lpte.pte_lo &=
1074 	    ~(LPTE_WIMG | LPTE_RPGN);
1075 	moea64_scratchpage_pvo[which]->pvo_pte.lpte.pte_lo |=
1076 	    moea64_calc_wimg(pa, VM_MEMATTR_DEFAULT) | (uint64_t)pa;
1077 	MOEA64_PTE_CHANGE(mmup, moea64_scratchpage_pte[which],
1078 	    &moea64_scratchpage_pvo[which]->pvo_pte.lpte,
1079 	    moea64_scratchpage_pvo[which]->pvo_vpn);
1080 	isync();
1081 }
1082 
1083 void
1084 moea64_copy_page(mmu_t mmu, vm_page_t msrc, vm_page_t mdst)
1085 {
1086 	vm_offset_t	dst;
1087 	vm_offset_t	src;
1088 
1089 	dst = VM_PAGE_TO_PHYS(mdst);
1090 	src = VM_PAGE_TO_PHYS(msrc);
1091 
1092 	if (hw_direct_map) {
1093 		bcopy((void *)src, (void *)dst, PAGE_SIZE);
1094 	} else {
1095 		mtx_lock(&moea64_scratchpage_mtx);
1096 
1097 		moea64_set_scratchpage_pa(mmu, 0, src);
1098 		moea64_set_scratchpage_pa(mmu, 1, dst);
1099 
1100 		bcopy((void *)moea64_scratchpage_va[0],
1101 		    (void *)moea64_scratchpage_va[1], PAGE_SIZE);
1102 
1103 		mtx_unlock(&moea64_scratchpage_mtx);
1104 	}
1105 }
1106 
1107 void
1108 moea64_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size)
1109 {
1110 	vm_offset_t pa = VM_PAGE_TO_PHYS(m);
1111 
1112 	if (size + off > PAGE_SIZE)
1113 		panic("moea64_zero_page: size + off > PAGE_SIZE");
1114 
1115 	if (hw_direct_map) {
1116 		bzero((caddr_t)pa + off, size);
1117 	} else {
1118 		mtx_lock(&moea64_scratchpage_mtx);
1119 		moea64_set_scratchpage_pa(mmu, 0, pa);
1120 		bzero((caddr_t)moea64_scratchpage_va[0] + off, size);
1121 		mtx_unlock(&moea64_scratchpage_mtx);
1122 	}
1123 }
1124 
1125 /*
1126  * Zero a page of physical memory by temporarily mapping it
1127  */
1128 void
1129 moea64_zero_page(mmu_t mmu, vm_page_t m)
1130 {
1131 	vm_offset_t pa = VM_PAGE_TO_PHYS(m);
1132 	vm_offset_t va, off;
1133 
1134 	if (!hw_direct_map) {
1135 		mtx_lock(&moea64_scratchpage_mtx);
1136 
1137 		moea64_set_scratchpage_pa(mmu, 0, pa);
1138 		va = moea64_scratchpage_va[0];
1139 	} else {
1140 		va = pa;
1141 	}
1142 
1143 	for (off = 0; off < PAGE_SIZE; off += cacheline_size)
1144 		__asm __volatile("dcbz 0,%0" :: "r"(va + off));
1145 
1146 	if (!hw_direct_map)
1147 		mtx_unlock(&moea64_scratchpage_mtx);
1148 }
1149 
1150 void
1151 moea64_zero_page_idle(mmu_t mmu, vm_page_t m)
1152 {
1153 
1154 	moea64_zero_page(mmu, m);
1155 }
1156 
1157 /*
1158  * Map the given physical page at the specified virtual address in the
1159  * target pmap with the protection requested.  If specified the page
1160  * will be wired down.
1161  */
1162 
1163 void
1164 moea64_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
1165     vm_prot_t prot, boolean_t wired)
1166 {
1167 	struct		pvo_head *pvo_head;
1168 	uma_zone_t	zone;
1169 	vm_page_t	pg;
1170 	uint64_t	pte_lo;
1171 	u_int		pvo_flags;
1172 	int		error;
1173 
1174 	if (!moea64_initialized) {
1175 		pvo_head = NULL;
1176 		pg = NULL;
1177 		zone = moea64_upvo_zone;
1178 		pvo_flags = 0;
1179 	} else {
1180 		pvo_head = vm_page_to_pvoh(m);
1181 		pg = m;
1182 		zone = moea64_mpvo_zone;
1183 		pvo_flags = PVO_MANAGED;
1184 	}
1185 
1186 	if ((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) == 0)
1187 		VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
1188 
1189 	/* XXX change the pvo head for fake pages */
1190 	if ((m->oflags & VPO_UNMANAGED) != 0) {
1191 		pvo_flags &= ~PVO_MANAGED;
1192 		pvo_head = NULL;
1193 		zone = moea64_upvo_zone;
1194 	}
1195 
1196 	pte_lo = moea64_calc_wimg(VM_PAGE_TO_PHYS(m), pmap_page_get_memattr(m));
1197 
1198 	if (prot & VM_PROT_WRITE) {
1199 		pte_lo |= LPTE_BW;
1200 		if (pmap_bootstrapped &&
1201 		    (m->oflags & VPO_UNMANAGED) == 0)
1202 			vm_page_aflag_set(m, PGA_WRITEABLE);
1203 	} else
1204 		pte_lo |= LPTE_BR;
1205 
1206 	if ((prot & VM_PROT_EXECUTE) == 0)
1207 		pte_lo |= LPTE_NOEXEC;
1208 
1209 	if (wired)
1210 		pvo_flags |= PVO_WIRED;
1211 
1212 	LOCK_TABLE_WR();
1213 	PMAP_LOCK(pmap);
1214 	error = moea64_pvo_enter(mmu, pmap, zone, pvo_head, va,
1215 	    VM_PAGE_TO_PHYS(m), pte_lo, pvo_flags);
1216 	PMAP_UNLOCK(pmap);
1217 	UNLOCK_TABLE_WR();
1218 
1219 	/*
1220 	 * Flush the page from the instruction cache if this page is
1221 	 * mapped executable and cacheable.
1222 	 */
1223 	if (pmap != kernel_pmap && !(m->aflags & PGA_EXECUTABLE) &&
1224 	    (pte_lo & (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) {
1225 		vm_page_aflag_set(m, PGA_EXECUTABLE);
1226 		moea64_syncicache(mmu, pmap, va, VM_PAGE_TO_PHYS(m), PAGE_SIZE);
1227 	}
1228 }
1229 
1230 static void
1231 moea64_syncicache(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_offset_t pa,
1232     vm_size_t sz)
1233 {
1234 
1235 	/*
1236 	 * This is much trickier than on older systems because
1237 	 * we can't sync the icache on physical addresses directly
1238 	 * without a direct map. Instead we check a couple of cases
1239 	 * where the memory is already mapped in and, failing that,
1240 	 * use the same trick we use for page zeroing to create
1241 	 * a temporary mapping for this physical address.
1242 	 */
1243 
1244 	if (!pmap_bootstrapped) {
1245 		/*
1246 		 * If PMAP is not bootstrapped, we are likely to be
1247 		 * in real mode.
1248 		 */
1249 		__syncicache((void *)pa, sz);
1250 	} else if (pmap == kernel_pmap) {
1251 		__syncicache((void *)va, sz);
1252 	} else if (hw_direct_map) {
1253 		__syncicache((void *)pa, sz);
1254 	} else {
1255 		/* Use the scratch page to set up a temp mapping */
1256 
1257 		mtx_lock(&moea64_scratchpage_mtx);
1258 
1259 		moea64_set_scratchpage_pa(mmu, 1, pa & ~ADDR_POFF);
1260 		__syncicache((void *)(moea64_scratchpage_va[1] +
1261 		    (va & ADDR_POFF)), sz);
1262 
1263 		mtx_unlock(&moea64_scratchpage_mtx);
1264 	}
1265 }
1266 
1267 /*
1268  * Maps a sequence of resident pages belonging to the same object.
1269  * The sequence begins with the given page m_start.  This page is
1270  * mapped at the given virtual address start.  Each subsequent page is
1271  * mapped at a virtual address that is offset from start by the same
1272  * amount as the page is offset from m_start within the object.  The
1273  * last page in the sequence is the page with the largest offset from
1274  * m_start that can be mapped at a virtual address less than the given
1275  * virtual address end.  Not every virtual page between start and end
1276  * is mapped; only those for which a resident page exists with the
1277  * corresponding offset from m_start are mapped.
1278  */
1279 void
1280 moea64_enter_object(mmu_t mmu, pmap_t pm, vm_offset_t start, vm_offset_t end,
1281     vm_page_t m_start, vm_prot_t prot)
1282 {
1283 	vm_page_t m;
1284 	vm_pindex_t diff, psize;
1285 
1286 	psize = atop(end - start);
1287 	m = m_start;
1288 	while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
1289 		moea64_enter(mmu, pm, start + ptoa(diff), m, prot &
1290 		    (VM_PROT_READ | VM_PROT_EXECUTE), FALSE);
1291 		m = TAILQ_NEXT(m, listq);
1292 	}
1293 }
1294 
1295 void
1296 moea64_enter_quick(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_page_t m,
1297     vm_prot_t prot)
1298 {
1299 
1300 	moea64_enter(mmu, pm, va, m,
1301 	    prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE);
1302 }
1303 
1304 vm_paddr_t
1305 moea64_extract(mmu_t mmu, pmap_t pm, vm_offset_t va)
1306 {
1307 	struct	pvo_entry *pvo;
1308 	vm_paddr_t pa;
1309 
1310 	PMAP_LOCK(pm);
1311 	pvo = moea64_pvo_find_va(pm, va);
1312 	if (pvo == NULL)
1313 		pa = 0;
1314 	else
1315 		pa = (pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) |
1316 		    (va - PVO_VADDR(pvo));
1317 	PMAP_UNLOCK(pm);
1318 	return (pa);
1319 }
1320 
1321 /*
1322  * Atomically extract and hold the physical page with the given
1323  * pmap and virtual address pair if that mapping permits the given
1324  * protection.
1325  */
1326 vm_page_t
1327 moea64_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_prot_t prot)
1328 {
1329 	struct	pvo_entry *pvo;
1330 	vm_page_t m;
1331         vm_paddr_t pa;
1332 
1333 	m = NULL;
1334 	pa = 0;
1335 	PMAP_LOCK(pmap);
1336 retry:
1337 	pvo = moea64_pvo_find_va(pmap, va & ~ADDR_POFF);
1338 	if (pvo != NULL && (pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) &&
1339 	    ((pvo->pvo_pte.lpte.pte_lo & LPTE_PP) == LPTE_RW ||
1340 	     (prot & VM_PROT_WRITE) == 0)) {
1341 		if (vm_page_pa_tryrelock(pmap,
1342 			pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN, &pa))
1343 			goto retry;
1344 		m = PHYS_TO_VM_PAGE(pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN);
1345 		vm_page_hold(m);
1346 	}
1347 	PA_UNLOCK_COND(pa);
1348 	PMAP_UNLOCK(pmap);
1349 	return (m);
1350 }
1351 
1352 static mmu_t installed_mmu;
1353 
1354 static void *
1355 moea64_uma_page_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
1356 {
1357 	/*
1358 	 * This entire routine is a horrible hack to avoid bothering kmem
1359 	 * for new KVA addresses. Because this can get called from inside
1360 	 * kmem allocation routines, calling kmem for a new address here
1361 	 * can lead to multiply locking non-recursive mutexes.
1362 	 */
1363         vm_offset_t va;
1364 
1365         vm_page_t m;
1366         int pflags, needed_lock;
1367 
1368 	*flags = UMA_SLAB_PRIV;
1369 	needed_lock = !PMAP_LOCKED(kernel_pmap);
1370 	pflags = malloc2vm_flags(wait) | VM_ALLOC_WIRED;
1371 
1372         for (;;) {
1373                 m = vm_page_alloc(NULL, 0, pflags | VM_ALLOC_NOOBJ);
1374                 if (m == NULL) {
1375                         if (wait & M_NOWAIT)
1376                                 return (NULL);
1377                         VM_WAIT;
1378                 } else
1379                         break;
1380         }
1381 
1382 	va = VM_PAGE_TO_PHYS(m);
1383 
1384 	LOCK_TABLE_WR();
1385 	if (needed_lock)
1386 		PMAP_LOCK(kernel_pmap);
1387 
1388 	moea64_pvo_enter(installed_mmu, kernel_pmap, moea64_upvo_zone,
1389 	    NULL, va, VM_PAGE_TO_PHYS(m), LPTE_M, PVO_WIRED | PVO_BOOTSTRAP);
1390 
1391 	if (needed_lock)
1392 		PMAP_UNLOCK(kernel_pmap);
1393 	UNLOCK_TABLE_WR();
1394 
1395 	if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0)
1396                 bzero((void *)va, PAGE_SIZE);
1397 
1398 	return (void *)va;
1399 }
1400 
1401 extern int elf32_nxstack;
1402 
1403 void
1404 moea64_init(mmu_t mmu)
1405 {
1406 
1407 	CTR0(KTR_PMAP, "moea64_init");
1408 
1409 	moea64_upvo_zone = uma_zcreate("UPVO entry", sizeof (struct pvo_entry),
1410 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
1411 	    UMA_ZONE_VM | UMA_ZONE_NOFREE);
1412 	moea64_mpvo_zone = uma_zcreate("MPVO entry", sizeof(struct pvo_entry),
1413 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
1414 	    UMA_ZONE_VM | UMA_ZONE_NOFREE);
1415 
1416 	if (!hw_direct_map) {
1417 		installed_mmu = mmu;
1418 		uma_zone_set_allocf(moea64_upvo_zone,moea64_uma_page_alloc);
1419 		uma_zone_set_allocf(moea64_mpvo_zone,moea64_uma_page_alloc);
1420 	}
1421 
1422 #ifdef COMPAT_FREEBSD32
1423 	elf32_nxstack = 1;
1424 #endif
1425 
1426 	moea64_initialized = TRUE;
1427 }
1428 
1429 boolean_t
1430 moea64_is_referenced(mmu_t mmu, vm_page_t m)
1431 {
1432 
1433 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1434 	    ("moea64_is_referenced: page %p is not managed", m));
1435 	return (moea64_query_bit(mmu, m, PTE_REF));
1436 }
1437 
1438 boolean_t
1439 moea64_is_modified(mmu_t mmu, vm_page_t m)
1440 {
1441 
1442 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1443 	    ("moea64_is_modified: page %p is not managed", m));
1444 
1445 	/*
1446 	 * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be
1447 	 * concurrently set while the object is locked.  Thus, if PGA_WRITEABLE
1448 	 * is clear, no PTEs can have LPTE_CHG set.
1449 	 */
1450 	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
1451 	if ((m->oflags & VPO_BUSY) == 0 &&
1452 	    (m->aflags & PGA_WRITEABLE) == 0)
1453 		return (FALSE);
1454 	return (moea64_query_bit(mmu, m, LPTE_CHG));
1455 }
1456 
1457 boolean_t
1458 moea64_is_prefaultable(mmu_t mmu, pmap_t pmap, vm_offset_t va)
1459 {
1460 	struct pvo_entry *pvo;
1461 	boolean_t rv;
1462 
1463 	PMAP_LOCK(pmap);
1464 	pvo = moea64_pvo_find_va(pmap, va & ~ADDR_POFF);
1465 	rv = pvo == NULL || (pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) == 0;
1466 	PMAP_UNLOCK(pmap);
1467 	return (rv);
1468 }
1469 
1470 void
1471 moea64_clear_reference(mmu_t mmu, vm_page_t m)
1472 {
1473 
1474 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1475 	    ("moea64_clear_reference: page %p is not managed", m));
1476 	moea64_clear_bit(mmu, m, LPTE_REF);
1477 }
1478 
1479 void
1480 moea64_clear_modify(mmu_t mmu, vm_page_t m)
1481 {
1482 
1483 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1484 	    ("moea64_clear_modify: page %p is not managed", m));
1485 	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
1486 	KASSERT((m->oflags & VPO_BUSY) == 0,
1487 	    ("moea64_clear_modify: page %p is busy", m));
1488 
1489 	/*
1490 	 * If the page is not PGA_WRITEABLE, then no PTEs can have LPTE_CHG
1491 	 * set.  If the object containing the page is locked and the page is
1492 	 * not VPO_BUSY, then PGA_WRITEABLE cannot be concurrently set.
1493 	 */
1494 	if ((m->aflags & PGA_WRITEABLE) == 0)
1495 		return;
1496 	moea64_clear_bit(mmu, m, LPTE_CHG);
1497 }
1498 
1499 /*
1500  * Clear the write and modified bits in each of the given page's mappings.
1501  */
1502 void
1503 moea64_remove_write(mmu_t mmu, vm_page_t m)
1504 {
1505 	struct	pvo_entry *pvo;
1506 	uintptr_t pt;
1507 	pmap_t	pmap;
1508 	uint64_t lo = 0;
1509 
1510 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1511 	    ("moea64_remove_write: page %p is not managed", m));
1512 
1513 	/*
1514 	 * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be set by
1515 	 * another thread while the object is locked.  Thus, if PGA_WRITEABLE
1516 	 * is clear, no page table entries need updating.
1517 	 */
1518 	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
1519 	if ((m->oflags & VPO_BUSY) == 0 &&
1520 	    (m->aflags & PGA_WRITEABLE) == 0)
1521 		return;
1522 	powerpc_sync();
1523 	LOCK_TABLE_RD();
1524 	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
1525 		pmap = pvo->pvo_pmap;
1526 		PMAP_LOCK(pmap);
1527 		if ((pvo->pvo_pte.lpte.pte_lo & LPTE_PP) != LPTE_BR) {
1528 			pt = MOEA64_PVO_TO_PTE(mmu, pvo);
1529 			pvo->pvo_pte.lpte.pte_lo &= ~LPTE_PP;
1530 			pvo->pvo_pte.lpte.pte_lo |= LPTE_BR;
1531 			if (pt != -1) {
1532 				MOEA64_PTE_SYNCH(mmu, pt, &pvo->pvo_pte.lpte);
1533 				lo |= pvo->pvo_pte.lpte.pte_lo;
1534 				pvo->pvo_pte.lpte.pte_lo &= ~LPTE_CHG;
1535 				MOEA64_PTE_CHANGE(mmu, pt,
1536 				    &pvo->pvo_pte.lpte, pvo->pvo_vpn);
1537 				if (pvo->pvo_pmap == kernel_pmap)
1538 					isync();
1539 			}
1540 		}
1541 		if ((lo & LPTE_CHG) != 0)
1542 			vm_page_dirty(m);
1543 		PMAP_UNLOCK(pmap);
1544 	}
1545 	UNLOCK_TABLE_RD();
1546 	vm_page_aflag_clear(m, PGA_WRITEABLE);
1547 }
1548 
1549 /*
1550  *	moea64_ts_referenced:
1551  *
1552  *	Return a count of reference bits for a page, clearing those bits.
1553  *	It is not necessary for every reference bit to be cleared, but it
1554  *	is necessary that 0 only be returned when there are truly no
1555  *	reference bits set.
1556  *
1557  *	XXX: The exact number of bits to check and clear is a matter that
1558  *	should be tested and standardized at some point in the future for
1559  *	optimal aging of shared pages.
1560  */
1561 int
1562 moea64_ts_referenced(mmu_t mmu, vm_page_t m)
1563 {
1564 
1565 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1566 	    ("moea64_ts_referenced: page %p is not managed", m));
1567 	return (moea64_clear_bit(mmu, m, LPTE_REF));
1568 }
1569 
1570 /*
1571  * Modify the WIMG settings of all mappings for a page.
1572  */
1573 void
1574 moea64_page_set_memattr(mmu_t mmu, vm_page_t m, vm_memattr_t ma)
1575 {
1576 	struct	pvo_entry *pvo;
1577 	struct  pvo_head *pvo_head;
1578 	uintptr_t pt;
1579 	pmap_t	pmap;
1580 	uint64_t lo;
1581 
1582 	if ((m->oflags & VPO_UNMANAGED) != 0) {
1583 		m->md.mdpg_cache_attrs = ma;
1584 		return;
1585 	}
1586 
1587 	pvo_head = vm_page_to_pvoh(m);
1588 	lo = moea64_calc_wimg(VM_PAGE_TO_PHYS(m), ma);
1589 	LOCK_TABLE_RD();
1590 	LIST_FOREACH(pvo, pvo_head, pvo_vlink) {
1591 		pmap = pvo->pvo_pmap;
1592 		PMAP_LOCK(pmap);
1593 		pt = MOEA64_PVO_TO_PTE(mmu, pvo);
1594 		pvo->pvo_pte.lpte.pte_lo &= ~LPTE_WIMG;
1595 		pvo->pvo_pte.lpte.pte_lo |= lo;
1596 		if (pt != -1) {
1597 			MOEA64_PTE_CHANGE(mmu, pt, &pvo->pvo_pte.lpte,
1598 			    pvo->pvo_vpn);
1599 			if (pvo->pvo_pmap == kernel_pmap)
1600 				isync();
1601 		}
1602 		PMAP_UNLOCK(pmap);
1603 	}
1604 	UNLOCK_TABLE_RD();
1605 	m->md.mdpg_cache_attrs = ma;
1606 }
1607 
1608 /*
1609  * Map a wired page into kernel virtual address space.
1610  */
1611 void
1612 moea64_kenter_attr(mmu_t mmu, vm_offset_t va, vm_offset_t pa, vm_memattr_t ma)
1613 {
1614 	uint64_t	pte_lo;
1615 	int		error;
1616 
1617 	pte_lo = moea64_calc_wimg(pa, ma);
1618 
1619 	LOCK_TABLE_WR();
1620 	PMAP_LOCK(kernel_pmap);
1621 	error = moea64_pvo_enter(mmu, kernel_pmap, moea64_upvo_zone,
1622 	    NULL, va, pa, pte_lo, PVO_WIRED);
1623 	PMAP_UNLOCK(kernel_pmap);
1624 	UNLOCK_TABLE_WR();
1625 
1626 	if (error != 0 && error != ENOENT)
1627 		panic("moea64_kenter: failed to enter va %#zx pa %#zx: %d", va,
1628 		    pa, error);
1629 }
1630 
1631 void
1632 moea64_kenter(mmu_t mmu, vm_offset_t va, vm_paddr_t pa)
1633 {
1634 
1635 	moea64_kenter_attr(mmu, va, pa, VM_MEMATTR_DEFAULT);
1636 }
1637 
1638 /*
1639  * Extract the physical page address associated with the given kernel virtual
1640  * address.
1641  */
1642 vm_paddr_t
1643 moea64_kextract(mmu_t mmu, vm_offset_t va)
1644 {
1645 	struct		pvo_entry *pvo;
1646 	vm_paddr_t pa;
1647 
1648 	/*
1649 	 * Shortcut the direct-mapped case when applicable.  We never put
1650 	 * anything but 1:1 mappings below VM_MIN_KERNEL_ADDRESS.
1651 	 */
1652 	if (va < VM_MIN_KERNEL_ADDRESS)
1653 		return (va);
1654 
1655 	PMAP_LOCK(kernel_pmap);
1656 	pvo = moea64_pvo_find_va(kernel_pmap, va);
1657 	KASSERT(pvo != NULL, ("moea64_kextract: no addr found for %#" PRIxPTR,
1658 	    va));
1659 	pa = (pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) | (va - PVO_VADDR(pvo));
1660 	PMAP_UNLOCK(kernel_pmap);
1661 	return (pa);
1662 }
1663 
1664 /*
1665  * Remove a wired page from kernel virtual address space.
1666  */
1667 void
1668 moea64_kremove(mmu_t mmu, vm_offset_t va)
1669 {
1670 	moea64_remove(mmu, kernel_pmap, va, va + PAGE_SIZE);
1671 }
1672 
1673 /*
1674  * Map a range of physical addresses into kernel virtual address space.
1675  *
1676  * The value passed in *virt is a suggested virtual address for the mapping.
1677  * Architectures which can support a direct-mapped physical to virtual region
1678  * can return the appropriate address within that region, leaving '*virt'
1679  * unchanged.  We cannot and therefore do not; *virt is updated with the
1680  * first usable address after the mapped region.
1681  */
1682 vm_offset_t
1683 moea64_map(mmu_t mmu, vm_offset_t *virt, vm_paddr_t pa_start,
1684     vm_paddr_t pa_end, int prot)
1685 {
1686 	vm_offset_t	sva, va;
1687 
1688 	sva = *virt;
1689 	va = sva;
1690 	for (; pa_start < pa_end; pa_start += PAGE_SIZE, va += PAGE_SIZE)
1691 		moea64_kenter(mmu, va, pa_start);
1692 	*virt = va;
1693 
1694 	return (sva);
1695 }
1696 
1697 /*
1698  * Returns true if the pmap's pv is one of the first
1699  * 16 pvs linked to from this page.  This count may
1700  * be changed upwards or downwards in the future; it
1701  * is only necessary that true be returned for a small
1702  * subset of pmaps for proper page aging.
1703  */
1704 boolean_t
1705 moea64_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m)
1706 {
1707         int loops;
1708 	struct pvo_entry *pvo;
1709 	boolean_t rv;
1710 
1711 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1712 	    ("moea64_page_exists_quick: page %p is not managed", m));
1713 	loops = 0;
1714 	rv = FALSE;
1715 	LOCK_TABLE_RD();
1716 	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
1717 		if (pvo->pvo_pmap == pmap) {
1718 			rv = TRUE;
1719 			break;
1720 		}
1721 		if (++loops >= 16)
1722 			break;
1723 	}
1724 	UNLOCK_TABLE_RD();
1725 	return (rv);
1726 }
1727 
1728 /*
1729  * Return the number of managed mappings to the given physical page
1730  * that are wired.
1731  */
1732 int
1733 moea64_page_wired_mappings(mmu_t mmu, vm_page_t m)
1734 {
1735 	struct pvo_entry *pvo;
1736 	int count;
1737 
1738 	count = 0;
1739 	if ((m->oflags & VPO_UNMANAGED) != 0)
1740 		return (count);
1741 	LOCK_TABLE_RD();
1742 	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink)
1743 		if ((pvo->pvo_vaddr & PVO_WIRED) != 0)
1744 			count++;
1745 	UNLOCK_TABLE_RD();
1746 	return (count);
1747 }
1748 
1749 static uintptr_t	moea64_vsidcontext;
1750 
1751 uintptr_t
1752 moea64_get_unique_vsid(void) {
1753 	u_int entropy;
1754 	register_t hash;
1755 	uint32_t mask;
1756 	int i;
1757 
1758 	entropy = 0;
1759 	__asm __volatile("mftb %0" : "=r"(entropy));
1760 
1761 	mtx_lock(&moea64_slb_mutex);
1762 	for (i = 0; i < NVSIDS; i += VSID_NBPW) {
1763 		u_int	n;
1764 
1765 		/*
1766 		 * Create a new value by mutiplying by a prime and adding in
1767 		 * entropy from the timebase register.  This is to make the
1768 		 * VSID more random so that the PT hash function collides
1769 		 * less often.  (Note that the prime casues gcc to do shifts
1770 		 * instead of a multiply.)
1771 		 */
1772 		moea64_vsidcontext = (moea64_vsidcontext * 0x1105) + entropy;
1773 		hash = moea64_vsidcontext & (NVSIDS - 1);
1774 		if (hash == 0)		/* 0 is special, avoid it */
1775 			continue;
1776 		n = hash >> 5;
1777 		mask = 1 << (hash & (VSID_NBPW - 1));
1778 		hash = (moea64_vsidcontext & VSID_HASHMASK);
1779 		if (moea64_vsid_bitmap[n] & mask) {	/* collision? */
1780 			/* anything free in this bucket? */
1781 			if (moea64_vsid_bitmap[n] == 0xffffffff) {
1782 				entropy = (moea64_vsidcontext >> 20);
1783 				continue;
1784 			}
1785 			i = ffs(~moea64_vsid_bitmap[n]) - 1;
1786 			mask = 1 << i;
1787 			hash &= VSID_HASHMASK & ~(VSID_NBPW - 1);
1788 			hash |= i;
1789 		}
1790 		KASSERT(!(moea64_vsid_bitmap[n] & mask),
1791 		    ("Allocating in-use VSID %#zx\n", hash));
1792 		moea64_vsid_bitmap[n] |= mask;
1793 		mtx_unlock(&moea64_slb_mutex);
1794 		return (hash);
1795 	}
1796 
1797 	mtx_unlock(&moea64_slb_mutex);
1798 	panic("%s: out of segments",__func__);
1799 }
1800 
1801 #ifdef __powerpc64__
1802 void
1803 moea64_pinit(mmu_t mmu, pmap_t pmap)
1804 {
1805 	PMAP_LOCK_INIT(pmap);
1806 	RB_INIT(&pmap->pmap_pvo);
1807 
1808 	pmap->pm_slb_tree_root = slb_alloc_tree();
1809 	pmap->pm_slb = slb_alloc_user_cache();
1810 	pmap->pm_slb_len = 0;
1811 }
1812 #else
1813 void
1814 moea64_pinit(mmu_t mmu, pmap_t pmap)
1815 {
1816 	int	i;
1817 	uint32_t hash;
1818 
1819 	PMAP_LOCK_INIT(pmap);
1820 	RB_INIT(&pmap->pmap_pvo);
1821 
1822 	if (pmap_bootstrapped)
1823 		pmap->pmap_phys = (pmap_t)moea64_kextract(mmu,
1824 		    (vm_offset_t)pmap);
1825 	else
1826 		pmap->pmap_phys = pmap;
1827 
1828 	/*
1829 	 * Allocate some segment registers for this pmap.
1830 	 */
1831 	hash = moea64_get_unique_vsid();
1832 
1833 	for (i = 0; i < 16; i++)
1834 		pmap->pm_sr[i] = VSID_MAKE(i, hash);
1835 
1836 	KASSERT(pmap->pm_sr[0] != 0, ("moea64_pinit: pm_sr[0] = 0"));
1837 }
1838 #endif
1839 
1840 /*
1841  * Initialize the pmap associated with process 0.
1842  */
1843 void
1844 moea64_pinit0(mmu_t mmu, pmap_t pm)
1845 {
1846 	moea64_pinit(mmu, pm);
1847 	bzero(&pm->pm_stats, sizeof(pm->pm_stats));
1848 }
1849 
1850 /*
1851  * Set the physical protection on the specified range of this map as requested.
1852  */
1853 static void
1854 moea64_pvo_protect(mmu_t mmu,  pmap_t pm, struct pvo_entry *pvo, vm_prot_t prot)
1855 {
1856 	uintptr_t pt;
1857 	struct	vm_page *pg;
1858 	uint64_t oldlo;
1859 
1860 	PMAP_LOCK_ASSERT(pm, MA_OWNED);
1861 
1862 	/*
1863 	 * Grab the PTE pointer before we diddle with the cached PTE
1864 	 * copy.
1865 	 */
1866 	pt = MOEA64_PVO_TO_PTE(mmu, pvo);
1867 
1868 	/*
1869 	 * Change the protection of the page.
1870 	 */
1871 	oldlo = pvo->pvo_pte.lpte.pte_lo;
1872 	pvo->pvo_pte.lpte.pte_lo &= ~LPTE_PP;
1873 	pvo->pvo_pte.lpte.pte_lo &= ~LPTE_NOEXEC;
1874 	if ((prot & VM_PROT_EXECUTE) == 0)
1875 		pvo->pvo_pte.lpte.pte_lo |= LPTE_NOEXEC;
1876 	if (prot & VM_PROT_WRITE)
1877 		pvo->pvo_pte.lpte.pte_lo |= LPTE_BW;
1878 	else
1879 		pvo->pvo_pte.lpte.pte_lo |= LPTE_BR;
1880 
1881 	pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN);
1882 
1883 	/*
1884 	 * If the PVO is in the page table, update that pte as well.
1885 	 */
1886 	if (pt != -1)
1887 		MOEA64_PTE_CHANGE(mmu, pt, &pvo->pvo_pte.lpte,
1888 		    pvo->pvo_vpn);
1889 	if (pm != kernel_pmap && pg != NULL && !(pg->aflags & PGA_EXECUTABLE) &&
1890 	    (pvo->pvo_pte.lpte.pte_lo & (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) {
1891 		if ((pg->oflags & VPO_UNMANAGED) == 0)
1892 			vm_page_aflag_set(pg, PGA_EXECUTABLE);
1893 		moea64_syncicache(mmu, pm, PVO_VADDR(pvo),
1894 		    pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN, PAGE_SIZE);
1895 	}
1896 
1897 	/*
1898 	 * Update vm about the REF/CHG bits if the page is managed and we have
1899 	 * removed write access.
1900 	 */
1901 	if ((pvo->pvo_vaddr & PVO_MANAGED) == PVO_MANAGED &&
1902 	    (oldlo & LPTE_PP) != LPTE_BR && !(prot && VM_PROT_WRITE)) {
1903 		if (pg != NULL) {
1904 			if (pvo->pvo_pte.lpte.pte_lo & LPTE_CHG)
1905 				vm_page_dirty(pg);
1906 			if (pvo->pvo_pte.lpte.pte_lo & LPTE_REF)
1907 				vm_page_aflag_set(pg, PGA_REFERENCED);
1908 		}
1909 	}
1910 }
1911 
1912 void
1913 moea64_protect(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva,
1914     vm_prot_t prot)
1915 {
1916 	struct	pvo_entry *pvo, *tpvo, key;
1917 
1918 	CTR4(KTR_PMAP, "moea64_protect: pm=%p sva=%#x eva=%#x prot=%#x", pm,
1919 	    sva, eva, prot);
1920 
1921 	KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap,
1922 	    ("moea64_protect: non current pmap"));
1923 
1924 	if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
1925 		moea64_remove(mmu, pm, sva, eva);
1926 		return;
1927 	}
1928 
1929 	LOCK_TABLE_RD();
1930 	PMAP_LOCK(pm);
1931 	key.pvo_vaddr = sva;
1932 	for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key);
1933 	    pvo != NULL && PVO_VADDR(pvo) < eva; pvo = tpvo) {
1934 		tpvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo);
1935 		moea64_pvo_protect(mmu, pm, pvo, prot);
1936 	}
1937 	UNLOCK_TABLE_RD();
1938 	PMAP_UNLOCK(pm);
1939 }
1940 
1941 /*
1942  * Map a list of wired pages into kernel virtual address space.  This is
1943  * intended for temporary mappings which do not need page modification or
1944  * references recorded.  Existing mappings in the region are overwritten.
1945  */
1946 void
1947 moea64_qenter(mmu_t mmu, vm_offset_t va, vm_page_t *m, int count)
1948 {
1949 	while (count-- > 0) {
1950 		moea64_kenter(mmu, va, VM_PAGE_TO_PHYS(*m));
1951 		va += PAGE_SIZE;
1952 		m++;
1953 	}
1954 }
1955 
1956 /*
1957  * Remove page mappings from kernel virtual address space.  Intended for
1958  * temporary mappings entered by moea64_qenter.
1959  */
1960 void
1961 moea64_qremove(mmu_t mmu, vm_offset_t va, int count)
1962 {
1963 	while (count-- > 0) {
1964 		moea64_kremove(mmu, va);
1965 		va += PAGE_SIZE;
1966 	}
1967 }
1968 
1969 void
1970 moea64_release_vsid(uint64_t vsid)
1971 {
1972 	int idx, mask;
1973 
1974 	mtx_lock(&moea64_slb_mutex);
1975 	idx = vsid & (NVSIDS-1);
1976 	mask = 1 << (idx % VSID_NBPW);
1977 	idx /= VSID_NBPW;
1978 	KASSERT(moea64_vsid_bitmap[idx] & mask,
1979 	    ("Freeing unallocated VSID %#jx", vsid));
1980 	moea64_vsid_bitmap[idx] &= ~mask;
1981 	mtx_unlock(&moea64_slb_mutex);
1982 }
1983 
1984 
1985 void
1986 moea64_release(mmu_t mmu, pmap_t pmap)
1987 {
1988 
1989 	/*
1990 	 * Free segment registers' VSIDs
1991 	 */
1992     #ifdef __powerpc64__
1993 	slb_free_tree(pmap);
1994 	slb_free_user_cache(pmap->pm_slb);
1995     #else
1996 	KASSERT(pmap->pm_sr[0] != 0, ("moea64_release: pm_sr[0] = 0"));
1997 
1998 	moea64_release_vsid(VSID_TO_HASH(pmap->pm_sr[0]));
1999     #endif
2000 
2001 	PMAP_LOCK_DESTROY(pmap);
2002 }
2003 
2004 /*
2005  * Remove all pages mapped by the specified pmap
2006  */
2007 void
2008 moea64_remove_pages(mmu_t mmu, pmap_t pm)
2009 {
2010 	struct	pvo_entry *pvo, *tpvo;
2011 
2012 	LOCK_TABLE_WR();
2013 	PMAP_LOCK(pm);
2014 	RB_FOREACH_SAFE(pvo, pvo_tree, &pm->pmap_pvo, tpvo) {
2015 		if (!(pvo->pvo_vaddr & PVO_WIRED))
2016 			moea64_pvo_remove(mmu, pvo);
2017 	}
2018 	UNLOCK_TABLE_WR();
2019 	PMAP_UNLOCK(pm);
2020 }
2021 
2022 /*
2023  * Remove the given range of addresses from the specified map.
2024  */
2025 void
2026 moea64_remove(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva)
2027 {
2028 	struct	pvo_entry *pvo, *tpvo, key;
2029 
2030 	/*
2031 	 * Perform an unsynchronized read.  This is, however, safe.
2032 	 */
2033 	if (pm->pm_stats.resident_count == 0)
2034 		return;
2035 
2036 	LOCK_TABLE_WR();
2037 	PMAP_LOCK(pm);
2038 	key.pvo_vaddr = sva;
2039 	for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key);
2040 	    pvo != NULL && PVO_VADDR(pvo) < eva; pvo = tpvo) {
2041 		tpvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo);
2042 		moea64_pvo_remove(mmu, pvo);
2043 	}
2044 	UNLOCK_TABLE_WR();
2045 	PMAP_UNLOCK(pm);
2046 }
2047 
2048 /*
2049  * Remove physical page from all pmaps in which it resides. moea64_pvo_remove()
2050  * will reflect changes in pte's back to the vm_page.
2051  */
2052 void
2053 moea64_remove_all(mmu_t mmu, vm_page_t m)
2054 {
2055 	struct	pvo_entry *pvo, *next_pvo;
2056 	pmap_t	pmap;
2057 
2058 	LOCK_TABLE_WR();
2059 	LIST_FOREACH_SAFE(pvo, vm_page_to_pvoh(m), pvo_vlink, next_pvo) {
2060 		pmap = pvo->pvo_pmap;
2061 		PMAP_LOCK(pmap);
2062 		moea64_pvo_remove(mmu, pvo);
2063 		PMAP_UNLOCK(pmap);
2064 	}
2065 	UNLOCK_TABLE_WR();
2066 	if ((m->aflags & PGA_WRITEABLE) && moea64_is_modified(mmu, m))
2067 		vm_page_dirty(m);
2068 	vm_page_aflag_clear(m, PGA_WRITEABLE);
2069 	vm_page_aflag_clear(m, PGA_EXECUTABLE);
2070 }
2071 
2072 /*
2073  * Allocate a physical page of memory directly from the phys_avail map.
2074  * Can only be called from moea64_bootstrap before avail start and end are
2075  * calculated.
2076  */
2077 vm_offset_t
2078 moea64_bootstrap_alloc(vm_size_t size, u_int align)
2079 {
2080 	vm_offset_t	s, e;
2081 	int		i, j;
2082 
2083 	size = round_page(size);
2084 	for (i = 0; phys_avail[i + 1] != 0; i += 2) {
2085 		if (align != 0)
2086 			s = (phys_avail[i] + align - 1) & ~(align - 1);
2087 		else
2088 			s = phys_avail[i];
2089 		e = s + size;
2090 
2091 		if (s < phys_avail[i] || e > phys_avail[i + 1])
2092 			continue;
2093 
2094 		if (s + size > platform_real_maxaddr())
2095 			continue;
2096 
2097 		if (s == phys_avail[i]) {
2098 			phys_avail[i] += size;
2099 		} else if (e == phys_avail[i + 1]) {
2100 			phys_avail[i + 1] -= size;
2101 		} else {
2102 			for (j = phys_avail_count * 2; j > i; j -= 2) {
2103 				phys_avail[j] = phys_avail[j - 2];
2104 				phys_avail[j + 1] = phys_avail[j - 1];
2105 			}
2106 
2107 			phys_avail[i + 3] = phys_avail[i + 1];
2108 			phys_avail[i + 1] = s;
2109 			phys_avail[i + 2] = e;
2110 			phys_avail_count++;
2111 		}
2112 
2113 		return (s);
2114 	}
2115 	panic("moea64_bootstrap_alloc: could not allocate memory");
2116 }
2117 
2118 static int
2119 moea64_pvo_enter(mmu_t mmu, pmap_t pm, uma_zone_t zone,
2120     struct pvo_head *pvo_head, vm_offset_t va, vm_offset_t pa,
2121     uint64_t pte_lo, int flags)
2122 {
2123 	struct	 pvo_entry *pvo;
2124 	uint64_t vsid;
2125 	int	 first;
2126 	u_int	 ptegidx;
2127 	int	 i;
2128 	int      bootstrap;
2129 
2130 	/*
2131 	 * One nasty thing that can happen here is that the UMA calls to
2132 	 * allocate new PVOs need to map more memory, which calls pvo_enter(),
2133 	 * which calls UMA...
2134 	 *
2135 	 * We break the loop by detecting recursion and allocating out of
2136 	 * the bootstrap pool.
2137 	 */
2138 
2139 	first = 0;
2140 	bootstrap = (flags & PVO_BOOTSTRAP);
2141 
2142 	if (!moea64_initialized)
2143 		bootstrap = 1;
2144 
2145 	PMAP_LOCK_ASSERT(pm, MA_OWNED);
2146 	rw_assert(&moea64_table_lock, RA_WLOCKED);
2147 
2148 	/*
2149 	 * Compute the PTE Group index.
2150 	 */
2151 	va &= ~ADDR_POFF;
2152 	vsid = va_to_vsid(pm, va);
2153 	ptegidx = va_to_pteg(vsid, va, flags & PVO_LARGE);
2154 
2155 	/*
2156 	 * Remove any existing mapping for this page.  Reuse the pvo entry if
2157 	 * there is a mapping.
2158 	 */
2159 	moea64_pvo_enter_calls++;
2160 
2161 	LIST_FOREACH(pvo, &moea64_pvo_table[ptegidx], pvo_olink) {
2162 		if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) {
2163 			if ((pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) == pa &&
2164 			    (pvo->pvo_pte.lpte.pte_lo & (LPTE_NOEXEC | LPTE_PP))
2165 			    == (pte_lo & (LPTE_NOEXEC | LPTE_PP))) {
2166 			    	if (!(pvo->pvo_pte.lpte.pte_hi & LPTE_VALID)) {
2167 					/* Re-insert if spilled */
2168 					i = MOEA64_PTE_INSERT(mmu, ptegidx,
2169 					    &pvo->pvo_pte.lpte);
2170 					if (i >= 0)
2171 						PVO_PTEGIDX_SET(pvo, i);
2172 					moea64_pte_overflow--;
2173 				}
2174 				return (0);
2175 			}
2176 			moea64_pvo_remove(mmu, pvo);
2177 			break;
2178 		}
2179 	}
2180 
2181 	/*
2182 	 * If we aren't overwriting a mapping, try to allocate.
2183 	 */
2184 	if (bootstrap) {
2185 		if (moea64_bpvo_pool_index >= BPVO_POOL_SIZE) {
2186 			panic("moea64_enter: bpvo pool exhausted, %d, %d, %zd",
2187 			      moea64_bpvo_pool_index, BPVO_POOL_SIZE,
2188 			      BPVO_POOL_SIZE * sizeof(struct pvo_entry));
2189 		}
2190 		pvo = &moea64_bpvo_pool[moea64_bpvo_pool_index];
2191 		moea64_bpvo_pool_index++;
2192 		bootstrap = 1;
2193 	} else {
2194 		pvo = uma_zalloc(zone, M_NOWAIT);
2195 	}
2196 
2197 	if (pvo == NULL)
2198 		return (ENOMEM);
2199 
2200 	moea64_pvo_entries++;
2201 	pvo->pvo_vaddr = va;
2202 	pvo->pvo_vpn = (uint64_t)((va & ADDR_PIDX) >> ADDR_PIDX_SHFT)
2203 	    | (vsid << 16);
2204 	pvo->pvo_pmap = pm;
2205 	LIST_INSERT_HEAD(&moea64_pvo_table[ptegidx], pvo, pvo_olink);
2206 	pvo->pvo_vaddr &= ~ADDR_POFF;
2207 
2208 	if (flags & PVO_WIRED)
2209 		pvo->pvo_vaddr |= PVO_WIRED;
2210 	if (pvo_head != NULL)
2211 		pvo->pvo_vaddr |= PVO_MANAGED;
2212 	if (bootstrap)
2213 		pvo->pvo_vaddr |= PVO_BOOTSTRAP;
2214 	if (flags & PVO_LARGE)
2215 		pvo->pvo_vaddr |= PVO_LARGE;
2216 
2217 	moea64_pte_create(&pvo->pvo_pte.lpte, vsid, va,
2218 	    (uint64_t)(pa) | pte_lo, flags);
2219 
2220 	/*
2221 	 * Add to pmap list
2222 	 */
2223 	RB_INSERT(pvo_tree, &pm->pmap_pvo, pvo);
2224 
2225 	/*
2226 	 * Remember if the list was empty and therefore will be the first
2227 	 * item.
2228 	 */
2229 	if (pvo_head != NULL) {
2230 		if (LIST_FIRST(pvo_head) == NULL)
2231 			first = 1;
2232 		LIST_INSERT_HEAD(pvo_head, pvo, pvo_vlink);
2233 	}
2234 
2235 	if (pvo->pvo_vaddr & PVO_WIRED) {
2236 		pvo->pvo_pte.lpte.pte_hi |= LPTE_WIRED;
2237 		pm->pm_stats.wired_count++;
2238 	}
2239 	pm->pm_stats.resident_count++;
2240 
2241 	/*
2242 	 * We hope this succeeds but it isn't required.
2243 	 */
2244 	i = MOEA64_PTE_INSERT(mmu, ptegidx, &pvo->pvo_pte.lpte);
2245 	if (i >= 0) {
2246 		PVO_PTEGIDX_SET(pvo, i);
2247 	} else {
2248 		panic("moea64_pvo_enter: overflow");
2249 		moea64_pte_overflow++;
2250 	}
2251 
2252 	if (pm == kernel_pmap)
2253 		isync();
2254 
2255 #ifdef __powerpc64__
2256 	/*
2257 	 * Make sure all our bootstrap mappings are in the SLB as soon
2258 	 * as virtual memory is switched on.
2259 	 */
2260 	if (!pmap_bootstrapped)
2261 		moea64_bootstrap_slb_prefault(va, flags & PVO_LARGE);
2262 #endif
2263 
2264 	return (first ? ENOENT : 0);
2265 }
2266 
2267 static void
2268 moea64_pvo_remove(mmu_t mmu, struct pvo_entry *pvo)
2269 {
2270 	struct	vm_page *pg;
2271 	uintptr_t pt;
2272 
2273 	PMAP_LOCK_ASSERT(pvo->pvo_pmap, MA_OWNED);
2274 	rw_assert(&moea64_table_lock, RA_WLOCKED);
2275 
2276 	/*
2277 	 * If there is an active pte entry, we need to deactivate it (and
2278 	 * save the ref & cfg bits).
2279 	 */
2280 	pt = MOEA64_PVO_TO_PTE(mmu, pvo);
2281 	if (pt != -1) {
2282 		MOEA64_PTE_UNSET(mmu, pt, &pvo->pvo_pte.lpte, pvo->pvo_vpn);
2283 		PVO_PTEGIDX_CLR(pvo);
2284 	} else {
2285 		moea64_pte_overflow--;
2286 	}
2287 
2288 	/*
2289 	 * Update our statistics.
2290 	 */
2291 	pvo->pvo_pmap->pm_stats.resident_count--;
2292 	if (pvo->pvo_vaddr & PVO_WIRED)
2293 		pvo->pvo_pmap->pm_stats.wired_count--;
2294 
2295 	/*
2296 	 * Remove this PVO from the pmap list.
2297 	 */
2298 	RB_REMOVE(pvo_tree, &pvo->pvo_pmap->pmap_pvo, pvo);
2299 
2300 	/*
2301 	 * Remove this from the overflow list and return it to the pool
2302 	 * if we aren't going to reuse it.
2303 	 */
2304 	LIST_REMOVE(pvo, pvo_olink);
2305 
2306 	/*
2307 	 * Update vm about the REF/CHG bits if the page is managed.
2308 	 */
2309 	pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN);
2310 
2311 	if ((pvo->pvo_vaddr & PVO_MANAGED) == PVO_MANAGED && pg != NULL) {
2312 		LIST_REMOVE(pvo, pvo_vlink);
2313 		if ((pvo->pvo_pte.lpte.pte_lo & LPTE_PP) != LPTE_BR) {
2314 			if (pvo->pvo_pte.lpte.pte_lo & LPTE_CHG)
2315 				vm_page_dirty(pg);
2316 			if (pvo->pvo_pte.lpte.pte_lo & LPTE_REF)
2317 				vm_page_aflag_set(pg, PGA_REFERENCED);
2318 			if (LIST_EMPTY(vm_page_to_pvoh(pg)))
2319 				vm_page_aflag_clear(pg, PGA_WRITEABLE);
2320 		}
2321 		if (LIST_EMPTY(vm_page_to_pvoh(pg)))
2322 			vm_page_aflag_clear(pg, PGA_EXECUTABLE);
2323 	}
2324 
2325 	moea64_pvo_entries--;
2326 	moea64_pvo_remove_calls++;
2327 
2328 	if (!(pvo->pvo_vaddr & PVO_BOOTSTRAP))
2329 		uma_zfree((pvo->pvo_vaddr & PVO_MANAGED) ? moea64_mpvo_zone :
2330 		    moea64_upvo_zone, pvo);
2331 }
2332 
2333 static struct pvo_entry *
2334 moea64_pvo_find_va(pmap_t pm, vm_offset_t va)
2335 {
2336 	struct pvo_entry key;
2337 
2338 	key.pvo_vaddr = va & ~ADDR_POFF;
2339 	return (RB_FIND(pvo_tree, &pm->pmap_pvo, &key));
2340 }
2341 
2342 static boolean_t
2343 moea64_query_bit(mmu_t mmu, vm_page_t m, u_int64_t ptebit)
2344 {
2345 	struct	pvo_entry *pvo;
2346 	uintptr_t pt;
2347 
2348 	LOCK_TABLE_RD();
2349 	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
2350 		/*
2351 		 * See if we saved the bit off.  If so, return success.
2352 		 */
2353 		if (pvo->pvo_pte.lpte.pte_lo & ptebit) {
2354 			UNLOCK_TABLE_RD();
2355 			return (TRUE);
2356 		}
2357 	}
2358 
2359 	/*
2360 	 * No luck, now go through the hard part of looking at the PTEs
2361 	 * themselves.  Sync so that any pending REF/CHG bits are flushed to
2362 	 * the PTEs.
2363 	 */
2364 	powerpc_sync();
2365 	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
2366 
2367 		/*
2368 		 * See if this pvo has a valid PTE.  if so, fetch the
2369 		 * REF/CHG bits from the valid PTE.  If the appropriate
2370 		 * ptebit is set, return success.
2371 		 */
2372 		PMAP_LOCK(pvo->pvo_pmap);
2373 		pt = MOEA64_PVO_TO_PTE(mmu, pvo);
2374 		if (pt != -1) {
2375 			MOEA64_PTE_SYNCH(mmu, pt, &pvo->pvo_pte.lpte);
2376 			if (pvo->pvo_pte.lpte.pte_lo & ptebit) {
2377 				PMAP_UNLOCK(pvo->pvo_pmap);
2378 				UNLOCK_TABLE_RD();
2379 				return (TRUE);
2380 			}
2381 		}
2382 		PMAP_UNLOCK(pvo->pvo_pmap);
2383 	}
2384 
2385 	UNLOCK_TABLE_RD();
2386 	return (FALSE);
2387 }
2388 
2389 static u_int
2390 moea64_clear_bit(mmu_t mmu, vm_page_t m, u_int64_t ptebit)
2391 {
2392 	u_int	count;
2393 	struct	pvo_entry *pvo;
2394 	uintptr_t pt;
2395 
2396 	/*
2397 	 * Sync so that any pending REF/CHG bits are flushed to the PTEs (so
2398 	 * we can reset the right ones).  note that since the pvo entries and
2399 	 * list heads are accessed via BAT0 and are never placed in the page
2400 	 * table, we don't have to worry about further accesses setting the
2401 	 * REF/CHG bits.
2402 	 */
2403 	powerpc_sync();
2404 
2405 	/*
2406 	 * For each pvo entry, clear the pvo's ptebit.  If this pvo has a
2407 	 * valid pte clear the ptebit from the valid pte.
2408 	 */
2409 	count = 0;
2410 	LOCK_TABLE_RD();
2411 	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
2412 		PMAP_LOCK(pvo->pvo_pmap);
2413 		pt = MOEA64_PVO_TO_PTE(mmu, pvo);
2414 		if (pt != -1) {
2415 			MOEA64_PTE_SYNCH(mmu, pt, &pvo->pvo_pte.lpte);
2416 			if (pvo->pvo_pte.lpte.pte_lo & ptebit) {
2417 				count++;
2418 				MOEA64_PTE_CLEAR(mmu, pt, &pvo->pvo_pte.lpte,
2419 				    pvo->pvo_vpn, ptebit);
2420 			}
2421 		}
2422 		pvo->pvo_pte.lpte.pte_lo &= ~ptebit;
2423 		PMAP_UNLOCK(pvo->pvo_pmap);
2424 	}
2425 
2426 	UNLOCK_TABLE_RD();
2427 	return (count);
2428 }
2429 
2430 boolean_t
2431 moea64_dev_direct_mapped(mmu_t mmu, vm_paddr_t pa, vm_size_t size)
2432 {
2433 	struct pvo_entry *pvo, key;
2434 	vm_offset_t ppa;
2435 	int error = 0;
2436 
2437 	PMAP_LOCK(kernel_pmap);
2438 	key.pvo_vaddr = ppa = pa & ~ADDR_POFF;
2439 	for (pvo = RB_FIND(pvo_tree, &kernel_pmap->pmap_pvo, &key);
2440 	    ppa < pa + size; ppa += PAGE_SIZE,
2441 	    pvo = RB_NEXT(pvo_tree, &kernel_pmap->pmap_pvo, pvo)) {
2442 		if (pvo == NULL ||
2443 		    (pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) != ppa) {
2444 			error = EFAULT;
2445 			break;
2446 		}
2447 	}
2448 	PMAP_UNLOCK(kernel_pmap);
2449 
2450 	return (error);
2451 }
2452 
2453 /*
2454  * Map a set of physical memory pages into the kernel virtual
2455  * address space. Return a pointer to where it is mapped. This
2456  * routine is intended to be used for mapping device memory,
2457  * NOT real memory.
2458  */
2459 void *
2460 moea64_mapdev_attr(mmu_t mmu, vm_offset_t pa, vm_size_t size, vm_memattr_t ma)
2461 {
2462 	vm_offset_t va, tmpva, ppa, offset;
2463 
2464 	ppa = trunc_page(pa);
2465 	offset = pa & PAGE_MASK;
2466 	size = roundup2(offset + size, PAGE_SIZE);
2467 
2468 	va = kmem_alloc_nofault(kernel_map, size);
2469 
2470 	if (!va)
2471 		panic("moea64_mapdev: Couldn't alloc kernel virtual memory");
2472 
2473 	for (tmpva = va; size > 0;) {
2474 		moea64_kenter_attr(mmu, tmpva, ppa, ma);
2475 		size -= PAGE_SIZE;
2476 		tmpva += PAGE_SIZE;
2477 		ppa += PAGE_SIZE;
2478 	}
2479 
2480 	return ((void *)(va + offset));
2481 }
2482 
2483 void *
2484 moea64_mapdev(mmu_t mmu, vm_paddr_t pa, vm_size_t size)
2485 {
2486 
2487 	return moea64_mapdev_attr(mmu, pa, size, VM_MEMATTR_DEFAULT);
2488 }
2489 
2490 void
2491 moea64_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size)
2492 {
2493 	vm_offset_t base, offset;
2494 
2495 	base = trunc_page(va);
2496 	offset = va & PAGE_MASK;
2497 	size = roundup2(offset + size, PAGE_SIZE);
2498 
2499 	kmem_free(kernel_map, base, size);
2500 }
2501 
2502 void
2503 moea64_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz)
2504 {
2505 	struct pvo_entry *pvo;
2506 	vm_offset_t lim;
2507 	vm_paddr_t pa;
2508 	vm_size_t len;
2509 
2510 	PMAP_LOCK(pm);
2511 	while (sz > 0) {
2512 		lim = round_page(va);
2513 		len = MIN(lim - va, sz);
2514 		pvo = moea64_pvo_find_va(pm, va & ~ADDR_POFF);
2515 		if (pvo != NULL && !(pvo->pvo_pte.lpte.pte_lo & LPTE_I)) {
2516 			pa = (pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) |
2517 			    (va & ADDR_POFF);
2518 			moea64_syncicache(mmu, pm, va, pa, len);
2519 		}
2520 		va += len;
2521 		sz -= len;
2522 	}
2523 	PMAP_UNLOCK(pm);
2524 }
2525