xref: /freebsd/sys/powerpc/aim/mmu_oea64.c (revision bb15ca603fa442c72dde3f3cb8b46db6970e3950)
1 /*-
2  * Copyright (c) 2001 The NetBSD Foundation, Inc.
3  * All rights reserved.
4  *
5  * This code is derived from software contributed to The NetBSD Foundation
6  * by Matt Thomas <matt@3am-software.com> of Allegro Networks, Inc.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *        This product includes software developed by the NetBSD
19  *        Foundation, Inc. and its contributors.
20  * 4. Neither the name of The NetBSD Foundation nor the names of its
21  *    contributors may be used to endorse or promote products derived
22  *    from this software without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
25  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
26  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
28  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34  * POSSIBILITY OF SUCH DAMAGE.
35  */
36 /*-
37  * Copyright (C) 1995, 1996 Wolfgang Solfrank.
38  * Copyright (C) 1995, 1996 TooLs GmbH.
39  * All rights reserved.
40  *
41  * Redistribution and use in source and binary forms, with or without
42  * modification, are permitted provided that the following conditions
43  * are met:
44  * 1. Redistributions of source code must retain the above copyright
45  *    notice, this list of conditions and the following disclaimer.
46  * 2. Redistributions in binary form must reproduce the above copyright
47  *    notice, this list of conditions and the following disclaimer in the
48  *    documentation and/or other materials provided with the distribution.
49  * 3. All advertising materials mentioning features or use of this software
50  *    must display the following acknowledgement:
51  *	This product includes software developed by TooLs GmbH.
52  * 4. The name of TooLs GmbH may not be used to endorse or promote products
53  *    derived from this software without specific prior written permission.
54  *
55  * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
56  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
57  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
58  * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
59  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
60  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
61  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
62  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
63  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
64  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
65  *
66  * $NetBSD: pmap.c,v 1.28 2000/03/26 20:42:36 kleink Exp $
67  */
68 /*-
69  * Copyright (C) 2001 Benno Rice.
70  * All rights reserved.
71  *
72  * Redistribution and use in source and binary forms, with or without
73  * modification, are permitted provided that the following conditions
74  * are met:
75  * 1. Redistributions of source code must retain the above copyright
76  *    notice, this list of conditions and the following disclaimer.
77  * 2. Redistributions in binary form must reproduce the above copyright
78  *    notice, this list of conditions and the following disclaimer in the
79  *    documentation and/or other materials provided with the distribution.
80  *
81  * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR
82  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
83  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
84  * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
85  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
86  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
87  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
88  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
89  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
90  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
91  */
92 
93 #include <sys/cdefs.h>
94 __FBSDID("$FreeBSD$");
95 
96 /*
97  * Manages physical address maps.
98  *
99  * In addition to hardware address maps, this module is called upon to
100  * provide software-use-only maps which may or may not be stored in the
101  * same form as hardware maps.  These pseudo-maps are used to store
102  * intermediate results from copy operations to and from address spaces.
103  *
104  * Since the information managed by this module is also stored by the
105  * logical address mapping module, this module may throw away valid virtual
106  * to physical mappings at almost any time.  However, invalidations of
107  * mappings must be done as requested.
108  *
109  * In order to cope with hardware architectures which make virtual to
110  * physical map invalidates expensive, this module may delay invalidate
111  * reduced protection operations until such time as they are actually
112  * necessary.  This module is given full information as to which processors
113  * are currently using which maps, and to when physical maps must be made
114  * correct.
115  */
116 
117 #include "opt_kstack_pages.h"
118 
119 #include <sys/param.h>
120 #include <sys/kernel.h>
121 #include <sys/queue.h>
122 #include <sys/cpuset.h>
123 #include <sys/ktr.h>
124 #include <sys/lock.h>
125 #include <sys/msgbuf.h>
126 #include <sys/mutex.h>
127 #include <sys/proc.h>
128 #include <sys/sched.h>
129 #include <sys/sysctl.h>
130 #include <sys/systm.h>
131 #include <sys/vmmeter.h>
132 
133 #include <sys/kdb.h>
134 
135 #include <dev/ofw/openfirm.h>
136 
137 #include <vm/vm.h>
138 #include <vm/vm_param.h>
139 #include <vm/vm_kern.h>
140 #include <vm/vm_page.h>
141 #include <vm/vm_map.h>
142 #include <vm/vm_object.h>
143 #include <vm/vm_extern.h>
144 #include <vm/vm_pageout.h>
145 #include <vm/vm_pager.h>
146 #include <vm/uma.h>
147 
148 #include <machine/_inttypes.h>
149 #include <machine/cpu.h>
150 #include <machine/platform.h>
151 #include <machine/frame.h>
152 #include <machine/md_var.h>
153 #include <machine/psl.h>
154 #include <machine/bat.h>
155 #include <machine/hid.h>
156 #include <machine/pte.h>
157 #include <machine/sr.h>
158 #include <machine/trap.h>
159 #include <machine/mmuvar.h>
160 
161 #include "mmu_oea64.h"
162 #include "mmu_if.h"
163 #include "moea64_if.h"
164 
165 void moea64_release_vsid(uint64_t vsid);
166 uintptr_t moea64_get_unique_vsid(void);
167 
168 #define DISABLE_TRANS(msr)	msr = mfmsr(); mtmsr(msr & ~PSL_DR)
169 #define ENABLE_TRANS(msr)	mtmsr(msr)
170 
171 #define	VSID_MAKE(sr, hash)	((sr) | (((hash) & 0xfffff) << 4))
172 #define	VSID_TO_HASH(vsid)	(((vsid) >> 4) & 0xfffff)
173 #define	VSID_HASH_MASK		0x0000007fffffffffULL
174 
175 #define LOCK_TABLE() mtx_lock(&moea64_table_mutex)
176 #define UNLOCK_TABLE() mtx_unlock(&moea64_table_mutex);
177 #define ASSERT_TABLE_LOCK() mtx_assert(&moea64_table_mutex, MA_OWNED)
178 
179 struct ofw_map {
180 	cell_t	om_va;
181 	cell_t	om_len;
182 	cell_t	om_pa_hi;
183 	cell_t	om_pa_lo;
184 	cell_t	om_mode;
185 };
186 
187 /*
188  * Map of physical memory regions.
189  */
190 static struct	mem_region *regions;
191 static struct	mem_region *pregions;
192 static u_int	phys_avail_count;
193 static int	regions_sz, pregions_sz;
194 
195 extern void bs_remap_earlyboot(void);
196 
197 /*
198  * Lock for the pteg and pvo tables.
199  */
200 struct mtx	moea64_table_mutex;
201 struct mtx	moea64_slb_mutex;
202 
203 /*
204  * PTEG data.
205  */
206 u_int		moea64_pteg_count;
207 u_int		moea64_pteg_mask;
208 
209 /*
210  * PVO data.
211  */
212 struct	pvo_head *moea64_pvo_table;		/* pvo entries by pteg index */
213 struct	pvo_head moea64_pvo_kunmanaged =	/* list of unmanaged pages */
214     LIST_HEAD_INITIALIZER(moea64_pvo_kunmanaged);
215 
216 uma_zone_t	moea64_upvo_zone; /* zone for pvo entries for unmanaged pages */
217 uma_zone_t	moea64_mpvo_zone; /* zone for pvo entries for managed pages */
218 
219 #define	BPVO_POOL_SIZE	327680
220 static struct	pvo_entry *moea64_bpvo_pool;
221 static int	moea64_bpvo_pool_index = 0;
222 
223 #define	VSID_NBPW	(sizeof(u_int32_t) * 8)
224 #ifdef __powerpc64__
225 #define	NVSIDS		(NPMAPS * 16)
226 #define VSID_HASHMASK	0xffffffffUL
227 #else
228 #define NVSIDS		NPMAPS
229 #define VSID_HASHMASK	0xfffffUL
230 #endif
231 static u_int	moea64_vsid_bitmap[NVSIDS / VSID_NBPW];
232 
233 static boolean_t moea64_initialized = FALSE;
234 
235 /*
236  * Statistics.
237  */
238 u_int	moea64_pte_valid = 0;
239 u_int	moea64_pte_overflow = 0;
240 u_int	moea64_pvo_entries = 0;
241 u_int	moea64_pvo_enter_calls = 0;
242 u_int	moea64_pvo_remove_calls = 0;
243 SYSCTL_INT(_machdep, OID_AUTO, moea64_pte_valid, CTLFLAG_RD,
244     &moea64_pte_valid, 0, "");
245 SYSCTL_INT(_machdep, OID_AUTO, moea64_pte_overflow, CTLFLAG_RD,
246     &moea64_pte_overflow, 0, "");
247 SYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_entries, CTLFLAG_RD,
248     &moea64_pvo_entries, 0, "");
249 SYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_enter_calls, CTLFLAG_RD,
250     &moea64_pvo_enter_calls, 0, "");
251 SYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_remove_calls, CTLFLAG_RD,
252     &moea64_pvo_remove_calls, 0, "");
253 
254 vm_offset_t	moea64_scratchpage_va[2];
255 struct pvo_entry *moea64_scratchpage_pvo[2];
256 uintptr_t	moea64_scratchpage_pte[2];
257 struct	mtx	moea64_scratchpage_mtx;
258 
259 uint64_t 	moea64_large_page_mask = 0;
260 int		moea64_large_page_size = 0;
261 int		moea64_large_page_shift = 0;
262 
263 /*
264  * PVO calls.
265  */
266 static int	moea64_pvo_enter(mmu_t, pmap_t, uma_zone_t, struct pvo_head *,
267 		    vm_offset_t, vm_offset_t, uint64_t, int);
268 static void	moea64_pvo_remove(mmu_t, struct pvo_entry *);
269 static struct	pvo_entry *moea64_pvo_find_va(pmap_t, vm_offset_t);
270 
271 /*
272  * Utility routines.
273  */
274 static void		moea64_enter_locked(mmu_t, pmap_t, vm_offset_t,
275 			    vm_page_t, vm_prot_t, boolean_t);
276 static boolean_t	moea64_query_bit(mmu_t, vm_page_t, u_int64_t);
277 static u_int		moea64_clear_bit(mmu_t, vm_page_t, u_int64_t);
278 static void		moea64_kremove(mmu_t, vm_offset_t);
279 static void		moea64_syncicache(mmu_t, pmap_t pmap, vm_offset_t va,
280 			    vm_offset_t pa, vm_size_t sz);
281 
282 /*
283  * Kernel MMU interface
284  */
285 void moea64_change_wiring(mmu_t, pmap_t, vm_offset_t, boolean_t);
286 void moea64_clear_modify(mmu_t, vm_page_t);
287 void moea64_clear_reference(mmu_t, vm_page_t);
288 void moea64_copy_page(mmu_t, vm_page_t, vm_page_t);
289 void moea64_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t, boolean_t);
290 void moea64_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_page_t,
291     vm_prot_t);
292 void moea64_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t);
293 vm_paddr_t moea64_extract(mmu_t, pmap_t, vm_offset_t);
294 vm_page_t moea64_extract_and_hold(mmu_t, pmap_t, vm_offset_t, vm_prot_t);
295 void moea64_init(mmu_t);
296 boolean_t moea64_is_modified(mmu_t, vm_page_t);
297 boolean_t moea64_is_prefaultable(mmu_t, pmap_t, vm_offset_t);
298 boolean_t moea64_is_referenced(mmu_t, vm_page_t);
299 boolean_t moea64_ts_referenced(mmu_t, vm_page_t);
300 vm_offset_t moea64_map(mmu_t, vm_offset_t *, vm_offset_t, vm_offset_t, int);
301 boolean_t moea64_page_exists_quick(mmu_t, pmap_t, vm_page_t);
302 int moea64_page_wired_mappings(mmu_t, vm_page_t);
303 void moea64_pinit(mmu_t, pmap_t);
304 void moea64_pinit0(mmu_t, pmap_t);
305 void moea64_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_prot_t);
306 void moea64_qenter(mmu_t, vm_offset_t, vm_page_t *, int);
307 void moea64_qremove(mmu_t, vm_offset_t, int);
308 void moea64_release(mmu_t, pmap_t);
309 void moea64_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t);
310 void moea64_remove_all(mmu_t, vm_page_t);
311 void moea64_remove_write(mmu_t, vm_page_t);
312 void moea64_zero_page(mmu_t, vm_page_t);
313 void moea64_zero_page_area(mmu_t, vm_page_t, int, int);
314 void moea64_zero_page_idle(mmu_t, vm_page_t);
315 void moea64_activate(mmu_t, struct thread *);
316 void moea64_deactivate(mmu_t, struct thread *);
317 void *moea64_mapdev(mmu_t, vm_offset_t, vm_size_t);
318 void *moea64_mapdev_attr(mmu_t, vm_offset_t, vm_size_t, vm_memattr_t);
319 void moea64_unmapdev(mmu_t, vm_offset_t, vm_size_t);
320 vm_offset_t moea64_kextract(mmu_t, vm_offset_t);
321 void moea64_page_set_memattr(mmu_t, vm_page_t m, vm_memattr_t ma);
322 void moea64_kenter_attr(mmu_t, vm_offset_t, vm_offset_t, vm_memattr_t ma);
323 void moea64_kenter(mmu_t, vm_offset_t, vm_offset_t);
324 boolean_t moea64_dev_direct_mapped(mmu_t, vm_offset_t, vm_size_t);
325 static void moea64_sync_icache(mmu_t, pmap_t, vm_offset_t, vm_size_t);
326 
327 static mmu_method_t moea64_methods[] = {
328 	MMUMETHOD(mmu_change_wiring,	moea64_change_wiring),
329 	MMUMETHOD(mmu_clear_modify,	moea64_clear_modify),
330 	MMUMETHOD(mmu_clear_reference,	moea64_clear_reference),
331 	MMUMETHOD(mmu_copy_page,	moea64_copy_page),
332 	MMUMETHOD(mmu_enter,		moea64_enter),
333 	MMUMETHOD(mmu_enter_object,	moea64_enter_object),
334 	MMUMETHOD(mmu_enter_quick,	moea64_enter_quick),
335 	MMUMETHOD(mmu_extract,		moea64_extract),
336 	MMUMETHOD(mmu_extract_and_hold,	moea64_extract_and_hold),
337 	MMUMETHOD(mmu_init,		moea64_init),
338 	MMUMETHOD(mmu_is_modified,	moea64_is_modified),
339 	MMUMETHOD(mmu_is_prefaultable,	moea64_is_prefaultable),
340 	MMUMETHOD(mmu_is_referenced,	moea64_is_referenced),
341 	MMUMETHOD(mmu_ts_referenced,	moea64_ts_referenced),
342 	MMUMETHOD(mmu_map,     		moea64_map),
343 	MMUMETHOD(mmu_page_exists_quick,moea64_page_exists_quick),
344 	MMUMETHOD(mmu_page_wired_mappings,moea64_page_wired_mappings),
345 	MMUMETHOD(mmu_pinit,		moea64_pinit),
346 	MMUMETHOD(mmu_pinit0,		moea64_pinit0),
347 	MMUMETHOD(mmu_protect,		moea64_protect),
348 	MMUMETHOD(mmu_qenter,		moea64_qenter),
349 	MMUMETHOD(mmu_qremove,		moea64_qremove),
350 	MMUMETHOD(mmu_release,		moea64_release),
351 	MMUMETHOD(mmu_remove,		moea64_remove),
352 	MMUMETHOD(mmu_remove_all,      	moea64_remove_all),
353 	MMUMETHOD(mmu_remove_write,	moea64_remove_write),
354 	MMUMETHOD(mmu_sync_icache,	moea64_sync_icache),
355 	MMUMETHOD(mmu_zero_page,       	moea64_zero_page),
356 	MMUMETHOD(mmu_zero_page_area,	moea64_zero_page_area),
357 	MMUMETHOD(mmu_zero_page_idle,	moea64_zero_page_idle),
358 	MMUMETHOD(mmu_activate,		moea64_activate),
359 	MMUMETHOD(mmu_deactivate,      	moea64_deactivate),
360 	MMUMETHOD(mmu_page_set_memattr,	moea64_page_set_memattr),
361 
362 	/* Internal interfaces */
363 	MMUMETHOD(mmu_mapdev,		moea64_mapdev),
364 	MMUMETHOD(mmu_mapdev_attr,	moea64_mapdev_attr),
365 	MMUMETHOD(mmu_unmapdev,		moea64_unmapdev),
366 	MMUMETHOD(mmu_kextract,		moea64_kextract),
367 	MMUMETHOD(mmu_kenter,		moea64_kenter),
368 	MMUMETHOD(mmu_kenter_attr,	moea64_kenter_attr),
369 	MMUMETHOD(mmu_dev_direct_mapped,moea64_dev_direct_mapped),
370 
371 	{ 0, 0 }
372 };
373 
374 MMU_DEF(oea64_mmu, "mmu_oea64_base", moea64_methods, 0);
375 
376 static __inline u_int
377 va_to_pteg(uint64_t vsid, vm_offset_t addr, int large)
378 {
379 	uint64_t hash;
380 	int shift;
381 
382 	shift = large ? moea64_large_page_shift : ADDR_PIDX_SHFT;
383 	hash = (vsid & VSID_HASH_MASK) ^ (((uint64_t)addr & ADDR_PIDX) >>
384 	    shift);
385 	return (hash & moea64_pteg_mask);
386 }
387 
388 static __inline struct pvo_head *
389 vm_page_to_pvoh(vm_page_t m)
390 {
391 
392 	return (&m->md.mdpg_pvoh);
393 }
394 
395 static __inline void
396 moea64_attr_clear(vm_page_t m, u_int64_t ptebit)
397 {
398 
399 	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
400 	m->md.mdpg_attrs &= ~ptebit;
401 }
402 
403 static __inline u_int64_t
404 moea64_attr_fetch(vm_page_t m)
405 {
406 
407 	return (m->md.mdpg_attrs);
408 }
409 
410 static __inline void
411 moea64_attr_save(vm_page_t m, u_int64_t ptebit)
412 {
413 
414 	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
415 	m->md.mdpg_attrs |= ptebit;
416 }
417 
418 static __inline void
419 moea64_pte_create(struct lpte *pt, uint64_t vsid, vm_offset_t va,
420     uint64_t pte_lo, int flags)
421 {
422 
423 	ASSERT_TABLE_LOCK();
424 
425 	/*
426 	 * Construct a PTE.  Default to IMB initially.  Valid bit only gets
427 	 * set when the real pte is set in memory.
428 	 *
429 	 * Note: Don't set the valid bit for correct operation of tlb update.
430 	 */
431 	pt->pte_hi = (vsid << LPTE_VSID_SHIFT) |
432 	    (((uint64_t)(va & ADDR_PIDX) >> ADDR_API_SHFT64) & LPTE_API);
433 
434 	if (flags & PVO_LARGE)
435 		pt->pte_hi |= LPTE_BIG;
436 
437 	pt->pte_lo = pte_lo;
438 }
439 
440 static __inline uint64_t
441 moea64_calc_wimg(vm_offset_t pa, vm_memattr_t ma)
442 {
443 	uint64_t pte_lo;
444 	int i;
445 
446 	if (ma != VM_MEMATTR_DEFAULT) {
447 		switch (ma) {
448 		case VM_MEMATTR_UNCACHEABLE:
449 			return (LPTE_I | LPTE_G);
450 		case VM_MEMATTR_WRITE_COMBINING:
451 		case VM_MEMATTR_WRITE_BACK:
452 		case VM_MEMATTR_PREFETCHABLE:
453 			return (LPTE_I);
454 		case VM_MEMATTR_WRITE_THROUGH:
455 			return (LPTE_W | LPTE_M);
456 		}
457 	}
458 
459 	/*
460 	 * Assume the page is cache inhibited and access is guarded unless
461 	 * it's in our available memory array.
462 	 */
463 	pte_lo = LPTE_I | LPTE_G;
464 	for (i = 0; i < pregions_sz; i++) {
465 		if ((pa >= pregions[i].mr_start) &&
466 		    (pa < (pregions[i].mr_start + pregions[i].mr_size))) {
467 			pte_lo &= ~(LPTE_I | LPTE_G);
468 			pte_lo |= LPTE_M;
469 			break;
470 		}
471 	}
472 
473 	return pte_lo;
474 }
475 
476 /*
477  * Quick sort callout for comparing memory regions.
478  */
479 static int	om_cmp(const void *a, const void *b);
480 
481 static int
482 om_cmp(const void *a, const void *b)
483 {
484 	const struct	ofw_map *mapa;
485 	const struct	ofw_map *mapb;
486 
487 	mapa = a;
488 	mapb = b;
489 	if (mapa->om_pa_hi < mapb->om_pa_hi)
490 		return (-1);
491 	else if (mapa->om_pa_hi > mapb->om_pa_hi)
492 		return (1);
493 	else if (mapa->om_pa_lo < mapb->om_pa_lo)
494 		return (-1);
495 	else if (mapa->om_pa_lo > mapb->om_pa_lo)
496 		return (1);
497 	else
498 		return (0);
499 }
500 
501 static void
502 moea64_add_ofw_mappings(mmu_t mmup, phandle_t mmu, size_t sz)
503 {
504 	struct ofw_map	translations[sz/sizeof(struct ofw_map)];
505 	register_t	msr;
506 	vm_offset_t	off;
507 	vm_paddr_t	pa_base;
508 	int		i;
509 
510 	bzero(translations, sz);
511 	if (OF_getprop(mmu, "translations", translations, sz) == -1)
512 		panic("moea64_bootstrap: can't get ofw translations");
513 
514 	CTR0(KTR_PMAP, "moea64_add_ofw_mappings: translations");
515 	sz /= sizeof(*translations);
516 	qsort(translations, sz, sizeof (*translations), om_cmp);
517 
518 	for (i = 0; i < sz; i++) {
519 		CTR3(KTR_PMAP, "translation: pa=%#x va=%#x len=%#x",
520 		    (uint32_t)(translations[i].om_pa_lo), translations[i].om_va,
521 		    translations[i].om_len);
522 
523 		if (translations[i].om_pa_lo % PAGE_SIZE)
524 			panic("OFW translation not page-aligned!");
525 
526 		pa_base = translations[i].om_pa_lo;
527 
528 	      #ifdef __powerpc64__
529 		pa_base += (vm_offset_t)translations[i].om_pa_hi << 32;
530 	      #else
531 		if (translations[i].om_pa_hi)
532 			panic("OFW translations above 32-bit boundary!");
533 	      #endif
534 
535 		/* Now enter the pages for this mapping */
536 
537 		DISABLE_TRANS(msr);
538 		for (off = 0; off < translations[i].om_len; off += PAGE_SIZE) {
539 			if (moea64_pvo_find_va(kernel_pmap,
540 			    translations[i].om_va + off) != NULL)
541 				continue;
542 
543 			moea64_kenter(mmup, translations[i].om_va + off,
544 			    pa_base + off);
545 		}
546 		ENABLE_TRANS(msr);
547 	}
548 }
549 
550 #ifdef __powerpc64__
551 static void
552 moea64_probe_large_page(void)
553 {
554 	uint16_t pvr = mfpvr() >> 16;
555 
556 	switch (pvr) {
557 	case IBM970:
558 	case IBM970FX:
559 	case IBM970MP:
560 		powerpc_sync(); isync();
561 		mtspr(SPR_HID4, mfspr(SPR_HID4) & ~HID4_970_DISABLE_LG_PG);
562 		powerpc_sync(); isync();
563 
564 		/* FALLTHROUGH */
565 	case IBMCELLBE:
566 		moea64_large_page_size = 0x1000000; /* 16 MB */
567 		moea64_large_page_shift = 24;
568 		break;
569 	default:
570 		moea64_large_page_size = 0;
571 	}
572 
573 	moea64_large_page_mask = moea64_large_page_size - 1;
574 }
575 
576 static void
577 moea64_bootstrap_slb_prefault(vm_offset_t va, int large)
578 {
579 	struct slb *cache;
580 	struct slb entry;
581 	uint64_t esid, slbe;
582 	uint64_t i;
583 
584 	cache = PCPU_GET(slb);
585 	esid = va >> ADDR_SR_SHFT;
586 	slbe = (esid << SLBE_ESID_SHIFT) | SLBE_VALID;
587 
588 	for (i = 0; i < 64; i++) {
589 		if (cache[i].slbe == (slbe | i))
590 			return;
591 	}
592 
593 	entry.slbe = slbe;
594 	entry.slbv = KERNEL_VSID(esid) << SLBV_VSID_SHIFT;
595 	if (large)
596 		entry.slbv |= SLBV_L;
597 
598 	slb_insert_kernel(entry.slbe, entry.slbv);
599 }
600 #endif
601 
602 static void
603 moea64_setup_direct_map(mmu_t mmup, vm_offset_t kernelstart,
604     vm_offset_t kernelend)
605 {
606 	register_t msr;
607 	vm_paddr_t pa;
608 	vm_offset_t size, off;
609 	uint64_t pte_lo;
610 	int i;
611 
612 	if (moea64_large_page_size == 0)
613 		hw_direct_map = 0;
614 
615 	DISABLE_TRANS(msr);
616 	if (hw_direct_map) {
617 		PMAP_LOCK(kernel_pmap);
618 		for (i = 0; i < pregions_sz; i++) {
619 		  for (pa = pregions[i].mr_start; pa < pregions[i].mr_start +
620 		     pregions[i].mr_size; pa += moea64_large_page_size) {
621 			pte_lo = LPTE_M;
622 
623 			/*
624 			 * Set memory access as guarded if prefetch within
625 			 * the page could exit the available physmem area.
626 			 */
627 			if (pa & moea64_large_page_mask) {
628 				pa &= moea64_large_page_mask;
629 				pte_lo |= LPTE_G;
630 			}
631 			if (pa + moea64_large_page_size >
632 			    pregions[i].mr_start + pregions[i].mr_size)
633 				pte_lo |= LPTE_G;
634 
635 			moea64_pvo_enter(mmup, kernel_pmap, moea64_upvo_zone,
636 				    &moea64_pvo_kunmanaged, pa, pa,
637 				    pte_lo, PVO_WIRED | PVO_LARGE);
638 		  }
639 		}
640 		PMAP_UNLOCK(kernel_pmap);
641 	} else {
642 		size = sizeof(struct pvo_head) * moea64_pteg_count;
643 		off = (vm_offset_t)(moea64_pvo_table);
644 		for (pa = off; pa < off + size; pa += PAGE_SIZE)
645 			moea64_kenter(mmup, pa, pa);
646 		size = BPVO_POOL_SIZE*sizeof(struct pvo_entry);
647 		off = (vm_offset_t)(moea64_bpvo_pool);
648 		for (pa = off; pa < off + size; pa += PAGE_SIZE)
649 		moea64_kenter(mmup, pa, pa);
650 
651 		/*
652 		 * Map certain important things, like ourselves.
653 		 *
654 		 * NOTE: We do not map the exception vector space. That code is
655 		 * used only in real mode, and leaving it unmapped allows us to
656 		 * catch NULL pointer deferences, instead of making NULL a valid
657 		 * address.
658 		 */
659 
660 		for (pa = kernelstart & ~PAGE_MASK; pa < kernelend;
661 		    pa += PAGE_SIZE)
662 			moea64_kenter(mmup, pa, pa);
663 	}
664 	ENABLE_TRANS(msr);
665 }
666 
667 void
668 moea64_early_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend)
669 {
670 	int		i, j;
671 	vm_size_t	physsz, hwphyssz;
672 
673 #ifndef __powerpc64__
674 	/* We don't have a direct map since there is no BAT */
675 	hw_direct_map = 0;
676 
677 	/* Make sure battable is zero, since we have no BAT */
678 	for (i = 0; i < 16; i++) {
679 		battable[i].batu = 0;
680 		battable[i].batl = 0;
681 	}
682 #else
683 	moea64_probe_large_page();
684 
685 	/* Use a direct map if we have large page support */
686 	if (moea64_large_page_size > 0)
687 		hw_direct_map = 1;
688 	else
689 		hw_direct_map = 0;
690 #endif
691 
692 	/* Get physical memory regions from firmware */
693 	mem_regions(&pregions, &pregions_sz, &regions, &regions_sz);
694 	CTR0(KTR_PMAP, "moea64_bootstrap: physical memory");
695 
696 	if (sizeof(phys_avail)/sizeof(phys_avail[0]) < regions_sz)
697 		panic("moea64_bootstrap: phys_avail too small");
698 
699 	phys_avail_count = 0;
700 	physsz = 0;
701 	hwphyssz = 0;
702 	TUNABLE_ULONG_FETCH("hw.physmem", (u_long *) &hwphyssz);
703 	for (i = 0, j = 0; i < regions_sz; i++, j += 2) {
704 		CTR3(KTR_PMAP, "region: %#x - %#x (%#x)", regions[i].mr_start,
705 		    regions[i].mr_start + regions[i].mr_size,
706 		    regions[i].mr_size);
707 		if (hwphyssz != 0 &&
708 		    (physsz + regions[i].mr_size) >= hwphyssz) {
709 			if (physsz < hwphyssz) {
710 				phys_avail[j] = regions[i].mr_start;
711 				phys_avail[j + 1] = regions[i].mr_start +
712 				    hwphyssz - physsz;
713 				physsz = hwphyssz;
714 				phys_avail_count++;
715 			}
716 			break;
717 		}
718 		phys_avail[j] = regions[i].mr_start;
719 		phys_avail[j + 1] = regions[i].mr_start + regions[i].mr_size;
720 		phys_avail_count++;
721 		physsz += regions[i].mr_size;
722 	}
723 
724 	/* Check for overlap with the kernel and exception vectors */
725 	for (j = 0; j < 2*phys_avail_count; j+=2) {
726 		if (phys_avail[j] < EXC_LAST)
727 			phys_avail[j] += EXC_LAST;
728 
729 		if (kernelstart >= phys_avail[j] &&
730 		    kernelstart < phys_avail[j+1]) {
731 			if (kernelend < phys_avail[j+1]) {
732 				phys_avail[2*phys_avail_count] =
733 				    (kernelend & ~PAGE_MASK) + PAGE_SIZE;
734 				phys_avail[2*phys_avail_count + 1] =
735 				    phys_avail[j+1];
736 				phys_avail_count++;
737 			}
738 
739 			phys_avail[j+1] = kernelstart & ~PAGE_MASK;
740 		}
741 
742 		if (kernelend >= phys_avail[j] &&
743 		    kernelend < phys_avail[j+1]) {
744 			if (kernelstart > phys_avail[j]) {
745 				phys_avail[2*phys_avail_count] = phys_avail[j];
746 				phys_avail[2*phys_avail_count + 1] =
747 				    kernelstart & ~PAGE_MASK;
748 				phys_avail_count++;
749 			}
750 
751 			phys_avail[j] = (kernelend & ~PAGE_MASK) + PAGE_SIZE;
752 		}
753 	}
754 
755 	physmem = btoc(physsz);
756 
757 #ifdef PTEGCOUNT
758 	moea64_pteg_count = PTEGCOUNT;
759 #else
760 	moea64_pteg_count = 0x1000;
761 
762 	while (moea64_pteg_count < physmem)
763 		moea64_pteg_count <<= 1;
764 
765 	moea64_pteg_count >>= 1;
766 #endif /* PTEGCOUNT */
767 }
768 
769 void
770 moea64_mid_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend)
771 {
772 	vm_size_t	size;
773 	register_t	msr;
774 	int		i;
775 
776 	/*
777 	 * Set PTEG mask
778 	 */
779 	moea64_pteg_mask = moea64_pteg_count - 1;
780 
781 	/*
782 	 * Allocate pv/overflow lists.
783 	 */
784 	size = sizeof(struct pvo_head) * moea64_pteg_count;
785 
786 	moea64_pvo_table = (struct pvo_head *)moea64_bootstrap_alloc(size,
787 	    PAGE_SIZE);
788 	CTR1(KTR_PMAP, "moea64_bootstrap: PVO table at %p", moea64_pvo_table);
789 
790 	DISABLE_TRANS(msr);
791 	for (i = 0; i < moea64_pteg_count; i++)
792 		LIST_INIT(&moea64_pvo_table[i]);
793 	ENABLE_TRANS(msr);
794 
795 	/*
796 	 * Initialize the lock that synchronizes access to the pteg and pvo
797 	 * tables.
798 	 */
799 	mtx_init(&moea64_table_mutex, "pmap table", NULL, MTX_DEF |
800 	    MTX_RECURSE);
801 	mtx_init(&moea64_slb_mutex, "SLB table", NULL, MTX_DEF);
802 
803 	/*
804 	 * Initialise the unmanaged pvo pool.
805 	 */
806 	moea64_bpvo_pool = (struct pvo_entry *)moea64_bootstrap_alloc(
807 		BPVO_POOL_SIZE*sizeof(struct pvo_entry), 0);
808 	moea64_bpvo_pool_index = 0;
809 
810 	/*
811 	 * Make sure kernel vsid is allocated as well as VSID 0.
812 	 */
813 	#ifndef __powerpc64__
814 	moea64_vsid_bitmap[(KERNEL_VSIDBITS & (NVSIDS - 1)) / VSID_NBPW]
815 		|= 1 << (KERNEL_VSIDBITS % VSID_NBPW);
816 	moea64_vsid_bitmap[0] |= 1;
817 	#endif
818 
819 	/*
820 	 * Initialize the kernel pmap (which is statically allocated).
821 	 */
822 	#ifdef __powerpc64__
823 	for (i = 0; i < 64; i++) {
824 		pcpup->pc_slb[i].slbv = 0;
825 		pcpup->pc_slb[i].slbe = 0;
826 	}
827 	#else
828 	for (i = 0; i < 16; i++)
829 		kernel_pmap->pm_sr[i] = EMPTY_SEGMENT + i;
830 	#endif
831 
832 	kernel_pmap->pmap_phys = kernel_pmap;
833 	CPU_FILL(&kernel_pmap->pm_active);
834 	LIST_INIT(&kernel_pmap->pmap_pvo);
835 
836 	PMAP_LOCK_INIT(kernel_pmap);
837 
838 	/*
839 	 * Now map in all the other buffers we allocated earlier
840 	 */
841 
842 	moea64_setup_direct_map(mmup, kernelstart, kernelend);
843 }
844 
845 void
846 moea64_late_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend)
847 {
848 	ihandle_t	mmui;
849 	phandle_t	chosen;
850 	phandle_t	mmu;
851 	size_t		sz;
852 	int		i;
853 	vm_offset_t	pa, va;
854 	void		*dpcpu;
855 
856 	/*
857 	 * Set up the Open Firmware pmap and add its mappings if not in real
858 	 * mode.
859 	 */
860 
861 	chosen = OF_finddevice("/chosen");
862 	if (chosen != -1 && OF_getprop(chosen, "mmu", &mmui, 4) != -1) {
863 	    mmu = OF_instance_to_package(mmui);
864 	    if (mmu == -1 || (sz = OF_getproplen(mmu, "translations")) == -1)
865 		sz = 0;
866 	    if (sz > 6144 /* tmpstksz - 2 KB headroom */)
867 		panic("moea64_bootstrap: too many ofw translations");
868 
869 	    if (sz > 0)
870 		moea64_add_ofw_mappings(mmup, mmu, sz);
871 	}
872 
873 	/*
874 	 * Calculate the last available physical address.
875 	 */
876 	for (i = 0; phys_avail[i + 2] != 0; i += 2)
877 		;
878 	Maxmem = powerpc_btop(phys_avail[i + 1]);
879 
880 	/*
881 	 * Initialize MMU and remap early physical mappings
882 	 */
883 	MMU_CPU_BOOTSTRAP(mmup,0);
884 	mtmsr(mfmsr() | PSL_DR | PSL_IR);
885 	pmap_bootstrapped++;
886 	bs_remap_earlyboot();
887 
888 	/*
889 	 * Set the start and end of kva.
890 	 */
891 	virtual_avail = VM_MIN_KERNEL_ADDRESS;
892 	virtual_end = VM_MAX_SAFE_KERNEL_ADDRESS;
893 
894 	/*
895 	 * Map the entire KVA range into the SLB. We must not fault there.
896 	 */
897 	#ifdef __powerpc64__
898 	for (va = virtual_avail; va < virtual_end; va += SEGMENT_LENGTH)
899 		moea64_bootstrap_slb_prefault(va, 0);
900 	#endif
901 
902 	/*
903 	 * Figure out how far we can extend virtual_end into segment 16
904 	 * without running into existing mappings. Segment 16 is guaranteed
905 	 * to contain neither RAM nor devices (at least on Apple hardware),
906 	 * but will generally contain some OFW mappings we should not
907 	 * step on.
908 	 */
909 
910 	#ifndef __powerpc64__	/* KVA is in high memory on PPC64 */
911 	PMAP_LOCK(kernel_pmap);
912 	while (virtual_end < VM_MAX_KERNEL_ADDRESS &&
913 	    moea64_pvo_find_va(kernel_pmap, virtual_end+1) == NULL)
914 		virtual_end += PAGE_SIZE;
915 	PMAP_UNLOCK(kernel_pmap);
916 	#endif
917 
918 	/*
919 	 * Allocate a kernel stack with a guard page for thread0 and map it
920 	 * into the kernel page map.
921 	 */
922 	pa = moea64_bootstrap_alloc(KSTACK_PAGES * PAGE_SIZE, PAGE_SIZE);
923 	va = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE;
924 	virtual_avail = va + KSTACK_PAGES * PAGE_SIZE;
925 	CTR2(KTR_PMAP, "moea64_bootstrap: kstack0 at %#x (%#x)", pa, va);
926 	thread0.td_kstack = va;
927 	thread0.td_kstack_pages = KSTACK_PAGES;
928 	for (i = 0; i < KSTACK_PAGES; i++) {
929 		moea64_kenter(mmup, va, pa);
930 		pa += PAGE_SIZE;
931 		va += PAGE_SIZE;
932 	}
933 
934 	/*
935 	 * Allocate virtual address space for the message buffer.
936 	 */
937 	pa = msgbuf_phys = moea64_bootstrap_alloc(msgbufsize, PAGE_SIZE);
938 	msgbufp = (struct msgbuf *)virtual_avail;
939 	va = virtual_avail;
940 	virtual_avail += round_page(msgbufsize);
941 	while (va < virtual_avail) {
942 		moea64_kenter(mmup, va, pa);
943 		pa += PAGE_SIZE;
944 		va += PAGE_SIZE;
945 	}
946 
947 	/*
948 	 * Allocate virtual address space for the dynamic percpu area.
949 	 */
950 	pa = moea64_bootstrap_alloc(DPCPU_SIZE, PAGE_SIZE);
951 	dpcpu = (void *)virtual_avail;
952 	va = virtual_avail;
953 	virtual_avail += DPCPU_SIZE;
954 	while (va < virtual_avail) {
955 		moea64_kenter(mmup, va, pa);
956 		pa += PAGE_SIZE;
957 		va += PAGE_SIZE;
958 	}
959 	dpcpu_init(dpcpu, 0);
960 
961 	/*
962 	 * Allocate some things for page zeroing. We put this directly
963 	 * in the page table, marked with LPTE_LOCKED, to avoid any
964 	 * of the PVO book-keeping or other parts of the VM system
965 	 * from even knowing that this hack exists.
966 	 */
967 
968 	if (!hw_direct_map) {
969 		mtx_init(&moea64_scratchpage_mtx, "pvo zero page", NULL,
970 		    MTX_DEF);
971 		for (i = 0; i < 2; i++) {
972 			moea64_scratchpage_va[i] = (virtual_end+1) - PAGE_SIZE;
973 			virtual_end -= PAGE_SIZE;
974 
975 			moea64_kenter(mmup, moea64_scratchpage_va[i], 0);
976 
977 			moea64_scratchpage_pvo[i] = moea64_pvo_find_va(
978 			    kernel_pmap, (vm_offset_t)moea64_scratchpage_va[i]);
979 			LOCK_TABLE();
980 			moea64_scratchpage_pte[i] = MOEA64_PVO_TO_PTE(
981 			    mmup, moea64_scratchpage_pvo[i]);
982 			moea64_scratchpage_pvo[i]->pvo_pte.lpte.pte_hi
983 			    |= LPTE_LOCKED;
984 			MOEA64_PTE_CHANGE(mmup, moea64_scratchpage_pte[i],
985 			    &moea64_scratchpage_pvo[i]->pvo_pte.lpte,
986 			    moea64_scratchpage_pvo[i]->pvo_vpn);
987 			UNLOCK_TABLE();
988 		}
989 	}
990 }
991 
992 /*
993  * Activate a user pmap.  The pmap must be activated before its address
994  * space can be accessed in any way.
995  */
996 void
997 moea64_activate(mmu_t mmu, struct thread *td)
998 {
999 	pmap_t	pm;
1000 
1001 	pm = &td->td_proc->p_vmspace->vm_pmap;
1002 	CPU_SET(PCPU_GET(cpuid), &pm->pm_active);
1003 
1004 	#ifdef __powerpc64__
1005 	PCPU_SET(userslb, pm->pm_slb);
1006 	#else
1007 	PCPU_SET(curpmap, pm->pmap_phys);
1008 	#endif
1009 }
1010 
1011 void
1012 moea64_deactivate(mmu_t mmu, struct thread *td)
1013 {
1014 	pmap_t	pm;
1015 
1016 	pm = &td->td_proc->p_vmspace->vm_pmap;
1017 	CPU_CLR(PCPU_GET(cpuid), &pm->pm_active);
1018 	#ifdef __powerpc64__
1019 	PCPU_SET(userslb, NULL);
1020 	#else
1021 	PCPU_SET(curpmap, NULL);
1022 	#endif
1023 }
1024 
1025 void
1026 moea64_change_wiring(mmu_t mmu, pmap_t pm, vm_offset_t va, boolean_t wired)
1027 {
1028 	struct	pvo_entry *pvo;
1029 	uintptr_t pt;
1030 	uint64_t vsid;
1031 	int	i, ptegidx;
1032 
1033 	PMAP_LOCK(pm);
1034 	pvo = moea64_pvo_find_va(pm, va & ~ADDR_POFF);
1035 
1036 	if (pvo != NULL) {
1037 		LOCK_TABLE();
1038 		pt = MOEA64_PVO_TO_PTE(mmu, pvo);
1039 
1040 		if (wired) {
1041 			if ((pvo->pvo_vaddr & PVO_WIRED) == 0)
1042 				pm->pm_stats.wired_count++;
1043 			pvo->pvo_vaddr |= PVO_WIRED;
1044 			pvo->pvo_pte.lpte.pte_hi |= LPTE_WIRED;
1045 		} else {
1046 			if ((pvo->pvo_vaddr & PVO_WIRED) != 0)
1047 				pm->pm_stats.wired_count--;
1048 			pvo->pvo_vaddr &= ~PVO_WIRED;
1049 			pvo->pvo_pte.lpte.pte_hi &= ~LPTE_WIRED;
1050 		}
1051 
1052 		if (pt != -1) {
1053 			/* Update wiring flag in page table. */
1054 			MOEA64_PTE_CHANGE(mmu, pt, &pvo->pvo_pte.lpte,
1055 			    pvo->pvo_vpn);
1056 		} else if (wired) {
1057 			/*
1058 			 * If we are wiring the page, and it wasn't in the
1059 			 * page table before, add it.
1060 			 */
1061 			vsid = PVO_VSID(pvo);
1062 			ptegidx = va_to_pteg(vsid, PVO_VADDR(pvo),
1063 			    pvo->pvo_vaddr & PVO_LARGE);
1064 
1065 			i = MOEA64_PTE_INSERT(mmu, ptegidx, &pvo->pvo_pte.lpte);
1066 
1067 			if (i >= 0) {
1068 				PVO_PTEGIDX_CLR(pvo);
1069 				PVO_PTEGIDX_SET(pvo, i);
1070 			}
1071 		}
1072 
1073 		UNLOCK_TABLE();
1074 	}
1075 	PMAP_UNLOCK(pm);
1076 }
1077 
1078 /*
1079  * This goes through and sets the physical address of our
1080  * special scratch PTE to the PA we want to zero or copy. Because
1081  * of locking issues (this can get called in pvo_enter() by
1082  * the UMA allocator), we can't use most other utility functions here
1083  */
1084 
1085 static __inline
1086 void moea64_set_scratchpage_pa(mmu_t mmup, int which, vm_offset_t pa) {
1087 
1088 	KASSERT(!hw_direct_map, ("Using OEA64 scratchpage with a direct map!"));
1089 	mtx_assert(&moea64_scratchpage_mtx, MA_OWNED);
1090 
1091 	moea64_scratchpage_pvo[which]->pvo_pte.lpte.pte_lo &=
1092 	    ~(LPTE_WIMG | LPTE_RPGN);
1093 	moea64_scratchpage_pvo[which]->pvo_pte.lpte.pte_lo |=
1094 	    moea64_calc_wimg(pa, VM_MEMATTR_DEFAULT) | (uint64_t)pa;
1095 	MOEA64_PTE_CHANGE(mmup, moea64_scratchpage_pte[which],
1096 	    &moea64_scratchpage_pvo[which]->pvo_pte.lpte,
1097 	    moea64_scratchpage_pvo[which]->pvo_vpn);
1098 	isync();
1099 }
1100 
1101 void
1102 moea64_copy_page(mmu_t mmu, vm_page_t msrc, vm_page_t mdst)
1103 {
1104 	vm_offset_t	dst;
1105 	vm_offset_t	src;
1106 
1107 	dst = VM_PAGE_TO_PHYS(mdst);
1108 	src = VM_PAGE_TO_PHYS(msrc);
1109 
1110 	if (hw_direct_map) {
1111 		kcopy((void *)src, (void *)dst, PAGE_SIZE);
1112 	} else {
1113 		mtx_lock(&moea64_scratchpage_mtx);
1114 
1115 		moea64_set_scratchpage_pa(mmu, 0, src);
1116 		moea64_set_scratchpage_pa(mmu, 1, dst);
1117 
1118 		kcopy((void *)moea64_scratchpage_va[0],
1119 		    (void *)moea64_scratchpage_va[1], PAGE_SIZE);
1120 
1121 		mtx_unlock(&moea64_scratchpage_mtx);
1122 	}
1123 }
1124 
1125 void
1126 moea64_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size)
1127 {
1128 	vm_offset_t pa = VM_PAGE_TO_PHYS(m);
1129 
1130 	if (size + off > PAGE_SIZE)
1131 		panic("moea64_zero_page: size + off > PAGE_SIZE");
1132 
1133 	if (hw_direct_map) {
1134 		bzero((caddr_t)pa + off, size);
1135 	} else {
1136 		mtx_lock(&moea64_scratchpage_mtx);
1137 		moea64_set_scratchpage_pa(mmu, 0, pa);
1138 		bzero((caddr_t)moea64_scratchpage_va[0] + off, size);
1139 		mtx_unlock(&moea64_scratchpage_mtx);
1140 	}
1141 }
1142 
1143 /*
1144  * Zero a page of physical memory by temporarily mapping it
1145  */
1146 void
1147 moea64_zero_page(mmu_t mmu, vm_page_t m)
1148 {
1149 	vm_offset_t pa = VM_PAGE_TO_PHYS(m);
1150 	vm_offset_t va, off;
1151 
1152 	if (!hw_direct_map) {
1153 		mtx_lock(&moea64_scratchpage_mtx);
1154 
1155 		moea64_set_scratchpage_pa(mmu, 0, pa);
1156 		va = moea64_scratchpage_va[0];
1157 	} else {
1158 		va = pa;
1159 	}
1160 
1161 	for (off = 0; off < PAGE_SIZE; off += cacheline_size)
1162 		__asm __volatile("dcbz 0,%0" :: "r"(va + off));
1163 
1164 	if (!hw_direct_map)
1165 		mtx_unlock(&moea64_scratchpage_mtx);
1166 }
1167 
1168 void
1169 moea64_zero_page_idle(mmu_t mmu, vm_page_t m)
1170 {
1171 
1172 	moea64_zero_page(mmu, m);
1173 }
1174 
1175 /*
1176  * Map the given physical page at the specified virtual address in the
1177  * target pmap with the protection requested.  If specified the page
1178  * will be wired down.
1179  */
1180 void
1181 moea64_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
1182     vm_prot_t prot, boolean_t wired)
1183 {
1184 
1185 	vm_page_lock_queues();
1186 	PMAP_LOCK(pmap);
1187 	moea64_enter_locked(mmu, pmap, va, m, prot, wired);
1188 	vm_page_unlock_queues();
1189 	PMAP_UNLOCK(pmap);
1190 }
1191 
1192 /*
1193  * Map the given physical page at the specified virtual address in the
1194  * target pmap with the protection requested.  If specified the page
1195  * will be wired down.
1196  *
1197  * The page queues and pmap must be locked.
1198  */
1199 
1200 static void
1201 moea64_enter_locked(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
1202     vm_prot_t prot, boolean_t wired)
1203 {
1204 	struct		pvo_head *pvo_head;
1205 	uma_zone_t	zone;
1206 	vm_page_t	pg;
1207 	uint64_t	pte_lo;
1208 	u_int		pvo_flags;
1209 	int		error;
1210 
1211 	if (!moea64_initialized) {
1212 		pvo_head = &moea64_pvo_kunmanaged;
1213 		pg = NULL;
1214 		zone = moea64_upvo_zone;
1215 		pvo_flags = 0;
1216 	} else {
1217 		pvo_head = vm_page_to_pvoh(m);
1218 		pg = m;
1219 		zone = moea64_mpvo_zone;
1220 		pvo_flags = PVO_MANAGED;
1221 	}
1222 
1223 	if (pmap_bootstrapped)
1224 		mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1225 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1226 	KASSERT((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) != 0 ||
1227 	    VM_OBJECT_LOCKED(m->object),
1228 	    ("moea64_enter_locked: page %p is not busy", m));
1229 
1230 	/* XXX change the pvo head for fake pages */
1231 	if ((m->oflags & VPO_UNMANAGED) != 0) {
1232 		pvo_flags &= ~PVO_MANAGED;
1233 		pvo_head = &moea64_pvo_kunmanaged;
1234 		zone = moea64_upvo_zone;
1235 	}
1236 
1237 	pte_lo = moea64_calc_wimg(VM_PAGE_TO_PHYS(m), pmap_page_get_memattr(m));
1238 
1239 	if (prot & VM_PROT_WRITE) {
1240 		pte_lo |= LPTE_BW;
1241 		if (pmap_bootstrapped &&
1242 		    (m->oflags & VPO_UNMANAGED) == 0)
1243 			vm_page_aflag_set(m, PGA_WRITEABLE);
1244 	} else
1245 		pte_lo |= LPTE_BR;
1246 
1247 	if ((prot & VM_PROT_EXECUTE) == 0)
1248 		pte_lo |= LPTE_NOEXEC;
1249 
1250 	if (wired)
1251 		pvo_flags |= PVO_WIRED;
1252 
1253 	error = moea64_pvo_enter(mmu, pmap, zone, pvo_head, va,
1254 	    VM_PAGE_TO_PHYS(m), pte_lo, pvo_flags);
1255 
1256 	/*
1257 	 * Flush the page from the instruction cache if this page is
1258 	 * mapped executable and cacheable.
1259 	 */
1260 	if ((pte_lo & (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0)
1261 		moea64_syncicache(mmu, pmap, va, VM_PAGE_TO_PHYS(m), PAGE_SIZE);
1262 }
1263 
1264 static void
1265 moea64_syncicache(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_offset_t pa,
1266     vm_size_t sz)
1267 {
1268 
1269 	/*
1270 	 * This is much trickier than on older systems because
1271 	 * we can't sync the icache on physical addresses directly
1272 	 * without a direct map. Instead we check a couple of cases
1273 	 * where the memory is already mapped in and, failing that,
1274 	 * use the same trick we use for page zeroing to create
1275 	 * a temporary mapping for this physical address.
1276 	 */
1277 
1278 	if (!pmap_bootstrapped) {
1279 		/*
1280 		 * If PMAP is not bootstrapped, we are likely to be
1281 		 * in real mode.
1282 		 */
1283 		__syncicache((void *)pa, sz);
1284 	} else if (pmap == kernel_pmap) {
1285 		__syncicache((void *)va, sz);
1286 	} else if (hw_direct_map) {
1287 		__syncicache((void *)pa, sz);
1288 	} else {
1289 		/* Use the scratch page to set up a temp mapping */
1290 
1291 		mtx_lock(&moea64_scratchpage_mtx);
1292 
1293 		moea64_set_scratchpage_pa(mmu, 1, pa & ~ADDR_POFF);
1294 		__syncicache((void *)(moea64_scratchpage_va[1] +
1295 		    (va & ADDR_POFF)), sz);
1296 
1297 		mtx_unlock(&moea64_scratchpage_mtx);
1298 	}
1299 }
1300 
1301 /*
1302  * Maps a sequence of resident pages belonging to the same object.
1303  * The sequence begins with the given page m_start.  This page is
1304  * mapped at the given virtual address start.  Each subsequent page is
1305  * mapped at a virtual address that is offset from start by the same
1306  * amount as the page is offset from m_start within the object.  The
1307  * last page in the sequence is the page with the largest offset from
1308  * m_start that can be mapped at a virtual address less than the given
1309  * virtual address end.  Not every virtual page between start and end
1310  * is mapped; only those for which a resident page exists with the
1311  * corresponding offset from m_start are mapped.
1312  */
1313 void
1314 moea64_enter_object(mmu_t mmu, pmap_t pm, vm_offset_t start, vm_offset_t end,
1315     vm_page_t m_start, vm_prot_t prot)
1316 {
1317 	vm_page_t m;
1318 	vm_pindex_t diff, psize;
1319 
1320 	psize = atop(end - start);
1321 	m = m_start;
1322 	vm_page_lock_queues();
1323 	PMAP_LOCK(pm);
1324 	while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
1325 		moea64_enter_locked(mmu, pm, start + ptoa(diff), m, prot &
1326 		    (VM_PROT_READ | VM_PROT_EXECUTE), FALSE);
1327 		m = TAILQ_NEXT(m, listq);
1328 	}
1329 	vm_page_unlock_queues();
1330 	PMAP_UNLOCK(pm);
1331 }
1332 
1333 void
1334 moea64_enter_quick(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_page_t m,
1335     vm_prot_t prot)
1336 {
1337 
1338 	vm_page_lock_queues();
1339 	PMAP_LOCK(pm);
1340 	moea64_enter_locked(mmu, pm, va, m,
1341 	    prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE);
1342 	vm_page_unlock_queues();
1343 	PMAP_UNLOCK(pm);
1344 }
1345 
1346 vm_paddr_t
1347 moea64_extract(mmu_t mmu, pmap_t pm, vm_offset_t va)
1348 {
1349 	struct	pvo_entry *pvo;
1350 	vm_paddr_t pa;
1351 
1352 	PMAP_LOCK(pm);
1353 	pvo = moea64_pvo_find_va(pm, va);
1354 	if (pvo == NULL)
1355 		pa = 0;
1356 	else
1357 		pa = (pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) |
1358 		    (va - PVO_VADDR(pvo));
1359 	PMAP_UNLOCK(pm);
1360 	return (pa);
1361 }
1362 
1363 /*
1364  * Atomically extract and hold the physical page with the given
1365  * pmap and virtual address pair if that mapping permits the given
1366  * protection.
1367  */
1368 vm_page_t
1369 moea64_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_prot_t prot)
1370 {
1371 	struct	pvo_entry *pvo;
1372 	vm_page_t m;
1373         vm_paddr_t pa;
1374 
1375 	m = NULL;
1376 	pa = 0;
1377 	PMAP_LOCK(pmap);
1378 retry:
1379 	pvo = moea64_pvo_find_va(pmap, va & ~ADDR_POFF);
1380 	if (pvo != NULL && (pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) &&
1381 	    ((pvo->pvo_pte.lpte.pte_lo & LPTE_PP) == LPTE_RW ||
1382 	     (prot & VM_PROT_WRITE) == 0)) {
1383 		if (vm_page_pa_tryrelock(pmap,
1384 			pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN, &pa))
1385 			goto retry;
1386 		m = PHYS_TO_VM_PAGE(pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN);
1387 		vm_page_hold(m);
1388 	}
1389 	PA_UNLOCK_COND(pa);
1390 	PMAP_UNLOCK(pmap);
1391 	return (m);
1392 }
1393 
1394 static mmu_t installed_mmu;
1395 
1396 static void *
1397 moea64_uma_page_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
1398 {
1399 	/*
1400 	 * This entire routine is a horrible hack to avoid bothering kmem
1401 	 * for new KVA addresses. Because this can get called from inside
1402 	 * kmem allocation routines, calling kmem for a new address here
1403 	 * can lead to multiply locking non-recursive mutexes.
1404 	 */
1405         vm_offset_t va;
1406 
1407         vm_page_t m;
1408         int pflags, needed_lock;
1409 
1410 	*flags = UMA_SLAB_PRIV;
1411 	needed_lock = !PMAP_LOCKED(kernel_pmap);
1412 
1413 	if (needed_lock)
1414 		PMAP_LOCK(kernel_pmap);
1415 
1416         if ((wait & (M_NOWAIT|M_USE_RESERVE)) == M_NOWAIT)
1417                 pflags = VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED;
1418         else
1419                 pflags = VM_ALLOC_SYSTEM | VM_ALLOC_WIRED;
1420         if (wait & M_ZERO)
1421                 pflags |= VM_ALLOC_ZERO;
1422 
1423         for (;;) {
1424                 m = vm_page_alloc(NULL, 0, pflags | VM_ALLOC_NOOBJ);
1425                 if (m == NULL) {
1426                         if (wait & M_NOWAIT)
1427                                 return (NULL);
1428                         VM_WAIT;
1429                 } else
1430                         break;
1431         }
1432 
1433 	va = VM_PAGE_TO_PHYS(m);
1434 
1435 	moea64_pvo_enter(installed_mmu, kernel_pmap, moea64_upvo_zone,
1436 	    &moea64_pvo_kunmanaged, va, VM_PAGE_TO_PHYS(m), LPTE_M,
1437 	    PVO_WIRED | PVO_BOOTSTRAP);
1438 
1439 	if (needed_lock)
1440 		PMAP_UNLOCK(kernel_pmap);
1441 
1442 	if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0)
1443                 bzero((void *)va, PAGE_SIZE);
1444 
1445 	return (void *)va;
1446 }
1447 
1448 void
1449 moea64_init(mmu_t mmu)
1450 {
1451 
1452 	CTR0(KTR_PMAP, "moea64_init");
1453 
1454 	moea64_upvo_zone = uma_zcreate("UPVO entry", sizeof (struct pvo_entry),
1455 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
1456 	    UMA_ZONE_VM | UMA_ZONE_NOFREE);
1457 	moea64_mpvo_zone = uma_zcreate("MPVO entry", sizeof(struct pvo_entry),
1458 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
1459 	    UMA_ZONE_VM | UMA_ZONE_NOFREE);
1460 
1461 	if (!hw_direct_map) {
1462 		installed_mmu = mmu;
1463 		uma_zone_set_allocf(moea64_upvo_zone,moea64_uma_page_alloc);
1464 		uma_zone_set_allocf(moea64_mpvo_zone,moea64_uma_page_alloc);
1465 	}
1466 
1467 	moea64_initialized = TRUE;
1468 }
1469 
1470 boolean_t
1471 moea64_is_referenced(mmu_t mmu, vm_page_t m)
1472 {
1473 
1474 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1475 	    ("moea64_is_referenced: page %p is not managed", m));
1476 	return (moea64_query_bit(mmu, m, PTE_REF));
1477 }
1478 
1479 boolean_t
1480 moea64_is_modified(mmu_t mmu, vm_page_t m)
1481 {
1482 
1483 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1484 	    ("moea64_is_modified: page %p is not managed", m));
1485 
1486 	/*
1487 	 * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be
1488 	 * concurrently set while the object is locked.  Thus, if PGA_WRITEABLE
1489 	 * is clear, no PTEs can have LPTE_CHG set.
1490 	 */
1491 	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
1492 	if ((m->oflags & VPO_BUSY) == 0 &&
1493 	    (m->aflags & PGA_WRITEABLE) == 0)
1494 		return (FALSE);
1495 	return (moea64_query_bit(mmu, m, LPTE_CHG));
1496 }
1497 
1498 boolean_t
1499 moea64_is_prefaultable(mmu_t mmu, pmap_t pmap, vm_offset_t va)
1500 {
1501 	struct pvo_entry *pvo;
1502 	boolean_t rv;
1503 
1504 	PMAP_LOCK(pmap);
1505 	pvo = moea64_pvo_find_va(pmap, va & ~ADDR_POFF);
1506 	rv = pvo == NULL || (pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) == 0;
1507 	PMAP_UNLOCK(pmap);
1508 	return (rv);
1509 }
1510 
1511 void
1512 moea64_clear_reference(mmu_t mmu, vm_page_t m)
1513 {
1514 
1515 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1516 	    ("moea64_clear_reference: page %p is not managed", m));
1517 	moea64_clear_bit(mmu, m, LPTE_REF);
1518 }
1519 
1520 void
1521 moea64_clear_modify(mmu_t mmu, vm_page_t m)
1522 {
1523 
1524 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1525 	    ("moea64_clear_modify: page %p is not managed", m));
1526 	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
1527 	KASSERT((m->oflags & VPO_BUSY) == 0,
1528 	    ("moea64_clear_modify: page %p is busy", m));
1529 
1530 	/*
1531 	 * If the page is not PGA_WRITEABLE, then no PTEs can have LPTE_CHG
1532 	 * set.  If the object containing the page is locked and the page is
1533 	 * not VPO_BUSY, then PGA_WRITEABLE cannot be concurrently set.
1534 	 */
1535 	if ((m->aflags & PGA_WRITEABLE) == 0)
1536 		return;
1537 	moea64_clear_bit(mmu, m, LPTE_CHG);
1538 }
1539 
1540 /*
1541  * Clear the write and modified bits in each of the given page's mappings.
1542  */
1543 void
1544 moea64_remove_write(mmu_t mmu, vm_page_t m)
1545 {
1546 	struct	pvo_entry *pvo;
1547 	uintptr_t pt;
1548 	pmap_t	pmap;
1549 	uint64_t lo;
1550 
1551 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1552 	    ("moea64_remove_write: page %p is not managed", m));
1553 
1554 	/*
1555 	 * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be set by
1556 	 * another thread while the object is locked.  Thus, if PGA_WRITEABLE
1557 	 * is clear, no page table entries need updating.
1558 	 */
1559 	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
1560 	if ((m->oflags & VPO_BUSY) == 0 &&
1561 	    (m->aflags & PGA_WRITEABLE) == 0)
1562 		return;
1563 	vm_page_lock_queues();
1564 	lo = moea64_attr_fetch(m);
1565 	powerpc_sync();
1566 	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
1567 		pmap = pvo->pvo_pmap;
1568 		PMAP_LOCK(pmap);
1569 		LOCK_TABLE();
1570 		if ((pvo->pvo_pte.lpte.pte_lo & LPTE_PP) != LPTE_BR) {
1571 			pt = MOEA64_PVO_TO_PTE(mmu, pvo);
1572 			pvo->pvo_pte.lpte.pte_lo &= ~LPTE_PP;
1573 			pvo->pvo_pte.lpte.pte_lo |= LPTE_BR;
1574 			if (pt != -1) {
1575 				MOEA64_PTE_SYNCH(mmu, pt, &pvo->pvo_pte.lpte);
1576 				lo |= pvo->pvo_pte.lpte.pte_lo;
1577 				pvo->pvo_pte.lpte.pte_lo &= ~LPTE_CHG;
1578 				MOEA64_PTE_CHANGE(mmu, pt,
1579 				    &pvo->pvo_pte.lpte, pvo->pvo_vpn);
1580 				if (pvo->pvo_pmap == kernel_pmap)
1581 					isync();
1582 			}
1583 		}
1584 		UNLOCK_TABLE();
1585 		PMAP_UNLOCK(pmap);
1586 	}
1587 	if ((lo & LPTE_CHG) != 0) {
1588 		moea64_attr_clear(m, LPTE_CHG);
1589 		vm_page_dirty(m);
1590 	}
1591 	vm_page_aflag_clear(m, PGA_WRITEABLE);
1592 	vm_page_unlock_queues();
1593 }
1594 
1595 /*
1596  *	moea64_ts_referenced:
1597  *
1598  *	Return a count of reference bits for a page, clearing those bits.
1599  *	It is not necessary for every reference bit to be cleared, but it
1600  *	is necessary that 0 only be returned when there are truly no
1601  *	reference bits set.
1602  *
1603  *	XXX: The exact number of bits to check and clear is a matter that
1604  *	should be tested and standardized at some point in the future for
1605  *	optimal aging of shared pages.
1606  */
1607 boolean_t
1608 moea64_ts_referenced(mmu_t mmu, vm_page_t m)
1609 {
1610 
1611 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1612 	    ("moea64_ts_referenced: page %p is not managed", m));
1613 	return (moea64_clear_bit(mmu, m, LPTE_REF));
1614 }
1615 
1616 /*
1617  * Modify the WIMG settings of all mappings for a page.
1618  */
1619 void
1620 moea64_page_set_memattr(mmu_t mmu, vm_page_t m, vm_memattr_t ma)
1621 {
1622 	struct	pvo_entry *pvo;
1623 	struct  pvo_head *pvo_head;
1624 	uintptr_t pt;
1625 	pmap_t	pmap;
1626 	uint64_t lo;
1627 
1628 	if ((m->oflags & VPO_UNMANAGED) != 0) {
1629 		m->md.mdpg_cache_attrs = ma;
1630 		return;
1631 	}
1632 
1633 	vm_page_lock_queues();
1634 	pvo_head = vm_page_to_pvoh(m);
1635 	lo = moea64_calc_wimg(VM_PAGE_TO_PHYS(m), ma);
1636 	LIST_FOREACH(pvo, pvo_head, pvo_vlink) {
1637 		pmap = pvo->pvo_pmap;
1638 		PMAP_LOCK(pmap);
1639 		LOCK_TABLE();
1640 		pt = MOEA64_PVO_TO_PTE(mmu, pvo);
1641 		pvo->pvo_pte.lpte.pte_lo &= ~LPTE_WIMG;
1642 		pvo->pvo_pte.lpte.pte_lo |= lo;
1643 		if (pt != -1) {
1644 			MOEA64_PTE_CHANGE(mmu, pt, &pvo->pvo_pte.lpte,
1645 			    pvo->pvo_vpn);
1646 			if (pvo->pvo_pmap == kernel_pmap)
1647 				isync();
1648 		}
1649 		UNLOCK_TABLE();
1650 		PMAP_UNLOCK(pmap);
1651 	}
1652 	m->md.mdpg_cache_attrs = ma;
1653 	vm_page_unlock_queues();
1654 }
1655 
1656 /*
1657  * Map a wired page into kernel virtual address space.
1658  */
1659 void
1660 moea64_kenter_attr(mmu_t mmu, vm_offset_t va, vm_offset_t pa, vm_memattr_t ma)
1661 {
1662 	uint64_t	pte_lo;
1663 	int		error;
1664 
1665 	pte_lo = moea64_calc_wimg(pa, ma);
1666 
1667 	PMAP_LOCK(kernel_pmap);
1668 	error = moea64_pvo_enter(mmu, kernel_pmap, moea64_upvo_zone,
1669 	    &moea64_pvo_kunmanaged, va, pa, pte_lo, PVO_WIRED);
1670 
1671 	if (error != 0 && error != ENOENT)
1672 		panic("moea64_kenter: failed to enter va %#zx pa %#zx: %d", va,
1673 		    pa, error);
1674 
1675 	/*
1676 	 * Flush the memory from the instruction cache.
1677 	 */
1678 	if ((pte_lo & (LPTE_I | LPTE_G)) == 0)
1679 		__syncicache((void *)va, PAGE_SIZE);
1680 	PMAP_UNLOCK(kernel_pmap);
1681 }
1682 
1683 void
1684 moea64_kenter(mmu_t mmu, vm_offset_t va, vm_offset_t pa)
1685 {
1686 
1687 	moea64_kenter_attr(mmu, va, pa, VM_MEMATTR_DEFAULT);
1688 }
1689 
1690 /*
1691  * Extract the physical page address associated with the given kernel virtual
1692  * address.
1693  */
1694 vm_offset_t
1695 moea64_kextract(mmu_t mmu, vm_offset_t va)
1696 {
1697 	struct		pvo_entry *pvo;
1698 	vm_paddr_t pa;
1699 
1700 	/*
1701 	 * Shortcut the direct-mapped case when applicable.  We never put
1702 	 * anything but 1:1 mappings below VM_MIN_KERNEL_ADDRESS.
1703 	 */
1704 	if (va < VM_MIN_KERNEL_ADDRESS)
1705 		return (va);
1706 
1707 	PMAP_LOCK(kernel_pmap);
1708 	pvo = moea64_pvo_find_va(kernel_pmap, va);
1709 	KASSERT(pvo != NULL, ("moea64_kextract: no addr found for %#" PRIxPTR,
1710 	    va));
1711 	pa = (pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) | (va - PVO_VADDR(pvo));
1712 	PMAP_UNLOCK(kernel_pmap);
1713 	return (pa);
1714 }
1715 
1716 /*
1717  * Remove a wired page from kernel virtual address space.
1718  */
1719 void
1720 moea64_kremove(mmu_t mmu, vm_offset_t va)
1721 {
1722 	moea64_remove(mmu, kernel_pmap, va, va + PAGE_SIZE);
1723 }
1724 
1725 /*
1726  * Map a range of physical addresses into kernel virtual address space.
1727  *
1728  * The value passed in *virt is a suggested virtual address for the mapping.
1729  * Architectures which can support a direct-mapped physical to virtual region
1730  * can return the appropriate address within that region, leaving '*virt'
1731  * unchanged.  We cannot and therefore do not; *virt is updated with the
1732  * first usable address after the mapped region.
1733  */
1734 vm_offset_t
1735 moea64_map(mmu_t mmu, vm_offset_t *virt, vm_offset_t pa_start,
1736     vm_offset_t pa_end, int prot)
1737 {
1738 	vm_offset_t	sva, va;
1739 
1740 	sva = *virt;
1741 	va = sva;
1742 	for (; pa_start < pa_end; pa_start += PAGE_SIZE, va += PAGE_SIZE)
1743 		moea64_kenter(mmu, va, pa_start);
1744 	*virt = va;
1745 
1746 	return (sva);
1747 }
1748 
1749 /*
1750  * Returns true if the pmap's pv is one of the first
1751  * 16 pvs linked to from this page.  This count may
1752  * be changed upwards or downwards in the future; it
1753  * is only necessary that true be returned for a small
1754  * subset of pmaps for proper page aging.
1755  */
1756 boolean_t
1757 moea64_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m)
1758 {
1759         int loops;
1760 	struct pvo_entry *pvo;
1761 	boolean_t rv;
1762 
1763 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1764 	    ("moea64_page_exists_quick: page %p is not managed", m));
1765 	loops = 0;
1766 	rv = FALSE;
1767 	vm_page_lock_queues();
1768 	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
1769 		if (pvo->pvo_pmap == pmap) {
1770 			rv = TRUE;
1771 			break;
1772 		}
1773 		if (++loops >= 16)
1774 			break;
1775 	}
1776 	vm_page_unlock_queues();
1777 	return (rv);
1778 }
1779 
1780 /*
1781  * Return the number of managed mappings to the given physical page
1782  * that are wired.
1783  */
1784 int
1785 moea64_page_wired_mappings(mmu_t mmu, vm_page_t m)
1786 {
1787 	struct pvo_entry *pvo;
1788 	int count;
1789 
1790 	count = 0;
1791 	if ((m->oflags & VPO_UNMANAGED) != 0)
1792 		return (count);
1793 	vm_page_lock_queues();
1794 	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink)
1795 		if ((pvo->pvo_vaddr & PVO_WIRED) != 0)
1796 			count++;
1797 	vm_page_unlock_queues();
1798 	return (count);
1799 }
1800 
1801 static uintptr_t	moea64_vsidcontext;
1802 
1803 uintptr_t
1804 moea64_get_unique_vsid(void) {
1805 	u_int entropy;
1806 	register_t hash;
1807 	uint32_t mask;
1808 	int i;
1809 
1810 	entropy = 0;
1811 	__asm __volatile("mftb %0" : "=r"(entropy));
1812 
1813 	mtx_lock(&moea64_slb_mutex);
1814 	for (i = 0; i < NVSIDS; i += VSID_NBPW) {
1815 		u_int	n;
1816 
1817 		/*
1818 		 * Create a new value by mutiplying by a prime and adding in
1819 		 * entropy from the timebase register.  This is to make the
1820 		 * VSID more random so that the PT hash function collides
1821 		 * less often.  (Note that the prime casues gcc to do shifts
1822 		 * instead of a multiply.)
1823 		 */
1824 		moea64_vsidcontext = (moea64_vsidcontext * 0x1105) + entropy;
1825 		hash = moea64_vsidcontext & (NVSIDS - 1);
1826 		if (hash == 0)		/* 0 is special, avoid it */
1827 			continue;
1828 		n = hash >> 5;
1829 		mask = 1 << (hash & (VSID_NBPW - 1));
1830 		hash = (moea64_vsidcontext & VSID_HASHMASK);
1831 		if (moea64_vsid_bitmap[n] & mask) {	/* collision? */
1832 			/* anything free in this bucket? */
1833 			if (moea64_vsid_bitmap[n] == 0xffffffff) {
1834 				entropy = (moea64_vsidcontext >> 20);
1835 				continue;
1836 			}
1837 			i = ffs(~moea64_vsid_bitmap[n]) - 1;
1838 			mask = 1 << i;
1839 			hash &= VSID_HASHMASK & ~(VSID_NBPW - 1);
1840 			hash |= i;
1841 		}
1842 		KASSERT(!(moea64_vsid_bitmap[n] & mask),
1843 		    ("Allocating in-use VSID %#zx\n", hash));
1844 		moea64_vsid_bitmap[n] |= mask;
1845 		mtx_unlock(&moea64_slb_mutex);
1846 		return (hash);
1847 	}
1848 
1849 	mtx_unlock(&moea64_slb_mutex);
1850 	panic("%s: out of segments",__func__);
1851 }
1852 
1853 #ifdef __powerpc64__
1854 void
1855 moea64_pinit(mmu_t mmu, pmap_t pmap)
1856 {
1857 	PMAP_LOCK_INIT(pmap);
1858 	LIST_INIT(&pmap->pmap_pvo);
1859 
1860 	pmap->pm_slb_tree_root = slb_alloc_tree();
1861 	pmap->pm_slb = slb_alloc_user_cache();
1862 	pmap->pm_slb_len = 0;
1863 }
1864 #else
1865 void
1866 moea64_pinit(mmu_t mmu, pmap_t pmap)
1867 {
1868 	int	i;
1869 	uint32_t hash;
1870 
1871 	PMAP_LOCK_INIT(pmap);
1872 	LIST_INIT(&pmap->pmap_pvo);
1873 
1874 	if (pmap_bootstrapped)
1875 		pmap->pmap_phys = (pmap_t)moea64_kextract(mmu,
1876 		    (vm_offset_t)pmap);
1877 	else
1878 		pmap->pmap_phys = pmap;
1879 
1880 	/*
1881 	 * Allocate some segment registers for this pmap.
1882 	 */
1883 	hash = moea64_get_unique_vsid();
1884 
1885 	for (i = 0; i < 16; i++)
1886 		pmap->pm_sr[i] = VSID_MAKE(i, hash);
1887 
1888 	KASSERT(pmap->pm_sr[0] != 0, ("moea64_pinit: pm_sr[0] = 0"));
1889 }
1890 #endif
1891 
1892 /*
1893  * Initialize the pmap associated with process 0.
1894  */
1895 void
1896 moea64_pinit0(mmu_t mmu, pmap_t pm)
1897 {
1898 	moea64_pinit(mmu, pm);
1899 	bzero(&pm->pm_stats, sizeof(pm->pm_stats));
1900 }
1901 
1902 /*
1903  * Set the physical protection on the specified range of this map as requested.
1904  */
1905 void
1906 moea64_protect(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva,
1907     vm_prot_t prot)
1908 {
1909 	struct	pvo_entry *pvo;
1910 	uintptr_t pt;
1911 
1912 	CTR4(KTR_PMAP, "moea64_protect: pm=%p sva=%#x eva=%#x prot=%#x", pm, sva,
1913 	    eva, prot);
1914 
1915 
1916 	KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap,
1917 	    ("moea64_protect: non current pmap"));
1918 
1919 	if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
1920 		moea64_remove(mmu, pm, sva, eva);
1921 		return;
1922 	}
1923 
1924 	vm_page_lock_queues();
1925 	PMAP_LOCK(pm);
1926 	for (; sva < eva; sva += PAGE_SIZE) {
1927 		pvo = moea64_pvo_find_va(pm, sva);
1928 		if (pvo == NULL)
1929 			continue;
1930 
1931 		/*
1932 		 * Grab the PTE pointer before we diddle with the cached PTE
1933 		 * copy.
1934 		 */
1935 		LOCK_TABLE();
1936 		pt = MOEA64_PVO_TO_PTE(mmu, pvo);
1937 
1938 		/*
1939 		 * Change the protection of the page.
1940 		 */
1941 		pvo->pvo_pte.lpte.pte_lo &= ~LPTE_PP;
1942 		pvo->pvo_pte.lpte.pte_lo |= LPTE_BR;
1943 		pvo->pvo_pte.lpte.pte_lo &= ~LPTE_NOEXEC;
1944 		if ((prot & VM_PROT_EXECUTE) == 0)
1945 			pvo->pvo_pte.lpte.pte_lo |= LPTE_NOEXEC;
1946 
1947 		/*
1948 		 * If the PVO is in the page table, update that pte as well.
1949 		 */
1950 		if (pt != -1) {
1951 			MOEA64_PTE_CHANGE(mmu, pt, &pvo->pvo_pte.lpte,
1952 			    pvo->pvo_vpn);
1953 			if ((pvo->pvo_pte.lpte.pte_lo &
1954 			    (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) {
1955 				moea64_syncicache(mmu, pm, sva,
1956 				    pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN,
1957 				    PAGE_SIZE);
1958 			}
1959 		}
1960 		UNLOCK_TABLE();
1961 	}
1962 	vm_page_unlock_queues();
1963 	PMAP_UNLOCK(pm);
1964 }
1965 
1966 /*
1967  * Map a list of wired pages into kernel virtual address space.  This is
1968  * intended for temporary mappings which do not need page modification or
1969  * references recorded.  Existing mappings in the region are overwritten.
1970  */
1971 void
1972 moea64_qenter(mmu_t mmu, vm_offset_t va, vm_page_t *m, int count)
1973 {
1974 	while (count-- > 0) {
1975 		moea64_kenter(mmu, va, VM_PAGE_TO_PHYS(*m));
1976 		va += PAGE_SIZE;
1977 		m++;
1978 	}
1979 }
1980 
1981 /*
1982  * Remove page mappings from kernel virtual address space.  Intended for
1983  * temporary mappings entered by moea64_qenter.
1984  */
1985 void
1986 moea64_qremove(mmu_t mmu, vm_offset_t va, int count)
1987 {
1988 	while (count-- > 0) {
1989 		moea64_kremove(mmu, va);
1990 		va += PAGE_SIZE;
1991 	}
1992 }
1993 
1994 void
1995 moea64_release_vsid(uint64_t vsid)
1996 {
1997 	int idx, mask;
1998 
1999 	mtx_lock(&moea64_slb_mutex);
2000 	idx = vsid & (NVSIDS-1);
2001 	mask = 1 << (idx % VSID_NBPW);
2002 	idx /= VSID_NBPW;
2003 	KASSERT(moea64_vsid_bitmap[idx] & mask,
2004 	    ("Freeing unallocated VSID %#jx", vsid));
2005 	moea64_vsid_bitmap[idx] &= ~mask;
2006 	mtx_unlock(&moea64_slb_mutex);
2007 }
2008 
2009 
2010 void
2011 moea64_release(mmu_t mmu, pmap_t pmap)
2012 {
2013 
2014 	/*
2015 	 * Free segment registers' VSIDs
2016 	 */
2017     #ifdef __powerpc64__
2018 	slb_free_tree(pmap);
2019 	slb_free_user_cache(pmap->pm_slb);
2020     #else
2021 	KASSERT(pmap->pm_sr[0] != 0, ("moea64_release: pm_sr[0] = 0"));
2022 
2023 	moea64_release_vsid(VSID_TO_HASH(pmap->pm_sr[0]));
2024     #endif
2025 
2026 	PMAP_LOCK_DESTROY(pmap);
2027 }
2028 
2029 /*
2030  * Remove the given range of addresses from the specified map.
2031  */
2032 void
2033 moea64_remove(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva)
2034 {
2035 	struct	pvo_entry *pvo;
2036 
2037 	vm_page_lock_queues();
2038 	PMAP_LOCK(pm);
2039 	if ((eva - sva)/PAGE_SIZE < 10) {
2040 		for (; sva < eva; sva += PAGE_SIZE) {
2041 			pvo = moea64_pvo_find_va(pm, sva);
2042 			if (pvo != NULL)
2043 				moea64_pvo_remove(mmu, pvo);
2044 		}
2045 	} else {
2046 		LIST_FOREACH(pvo, &pm->pmap_pvo, pvo_plink) {
2047 			if (PVO_VADDR(pvo) < sva || PVO_VADDR(pvo) >= eva)
2048 				continue;
2049 			moea64_pvo_remove(mmu, pvo);
2050 		}
2051 	}
2052 	vm_page_unlock_queues();
2053 	PMAP_UNLOCK(pm);
2054 }
2055 
2056 /*
2057  * Remove physical page from all pmaps in which it resides. moea64_pvo_remove()
2058  * will reflect changes in pte's back to the vm_page.
2059  */
2060 void
2061 moea64_remove_all(mmu_t mmu, vm_page_t m)
2062 {
2063 	struct  pvo_head *pvo_head;
2064 	struct	pvo_entry *pvo, *next_pvo;
2065 	pmap_t	pmap;
2066 
2067 	vm_page_lock_queues();
2068 	pvo_head = vm_page_to_pvoh(m);
2069 	for (pvo = LIST_FIRST(pvo_head); pvo != NULL; pvo = next_pvo) {
2070 		next_pvo = LIST_NEXT(pvo, pvo_vlink);
2071 
2072 		pmap = pvo->pvo_pmap;
2073 		PMAP_LOCK(pmap);
2074 		moea64_pvo_remove(mmu, pvo);
2075 		PMAP_UNLOCK(pmap);
2076 	}
2077 	if ((m->aflags & PGA_WRITEABLE) && moea64_is_modified(mmu, m)) {
2078 		moea64_attr_clear(m, LPTE_CHG);
2079 		vm_page_dirty(m);
2080 	}
2081 	vm_page_aflag_clear(m, PGA_WRITEABLE);
2082 	vm_page_unlock_queues();
2083 }
2084 
2085 /*
2086  * Allocate a physical page of memory directly from the phys_avail map.
2087  * Can only be called from moea64_bootstrap before avail start and end are
2088  * calculated.
2089  */
2090 vm_offset_t
2091 moea64_bootstrap_alloc(vm_size_t size, u_int align)
2092 {
2093 	vm_offset_t	s, e;
2094 	int		i, j;
2095 
2096 	size = round_page(size);
2097 	for (i = 0; phys_avail[i + 1] != 0; i += 2) {
2098 		if (align != 0)
2099 			s = (phys_avail[i] + align - 1) & ~(align - 1);
2100 		else
2101 			s = phys_avail[i];
2102 		e = s + size;
2103 
2104 		if (s < phys_avail[i] || e > phys_avail[i + 1])
2105 			continue;
2106 
2107 		if (s + size > platform_real_maxaddr())
2108 			continue;
2109 
2110 		if (s == phys_avail[i]) {
2111 			phys_avail[i] += size;
2112 		} else if (e == phys_avail[i + 1]) {
2113 			phys_avail[i + 1] -= size;
2114 		} else {
2115 			for (j = phys_avail_count * 2; j > i; j -= 2) {
2116 				phys_avail[j] = phys_avail[j - 2];
2117 				phys_avail[j + 1] = phys_avail[j - 1];
2118 			}
2119 
2120 			phys_avail[i + 3] = phys_avail[i + 1];
2121 			phys_avail[i + 1] = s;
2122 			phys_avail[i + 2] = e;
2123 			phys_avail_count++;
2124 		}
2125 
2126 		return (s);
2127 	}
2128 	panic("moea64_bootstrap_alloc: could not allocate memory");
2129 }
2130 
2131 static int
2132 moea64_pvo_enter(mmu_t mmu, pmap_t pm, uma_zone_t zone,
2133     struct pvo_head *pvo_head, vm_offset_t va, vm_offset_t pa,
2134     uint64_t pte_lo, int flags)
2135 {
2136 	struct	 pvo_entry *pvo;
2137 	uint64_t vsid;
2138 	int	 first;
2139 	u_int	 ptegidx;
2140 	int	 i;
2141 	int      bootstrap;
2142 
2143 	/*
2144 	 * One nasty thing that can happen here is that the UMA calls to
2145 	 * allocate new PVOs need to map more memory, which calls pvo_enter(),
2146 	 * which calls UMA...
2147 	 *
2148 	 * We break the loop by detecting recursion and allocating out of
2149 	 * the bootstrap pool.
2150 	 */
2151 
2152 	first = 0;
2153 	bootstrap = (flags & PVO_BOOTSTRAP);
2154 
2155 	if (!moea64_initialized)
2156 		bootstrap = 1;
2157 
2158 	/*
2159 	 * Compute the PTE Group index.
2160 	 */
2161 	va &= ~ADDR_POFF;
2162 	vsid = va_to_vsid(pm, va);
2163 	ptegidx = va_to_pteg(vsid, va, flags & PVO_LARGE);
2164 
2165 	/*
2166 	 * Remove any existing mapping for this page.  Reuse the pvo entry if
2167 	 * there is a mapping.
2168 	 */
2169 	LOCK_TABLE();
2170 
2171 	moea64_pvo_enter_calls++;
2172 
2173 	LIST_FOREACH(pvo, &moea64_pvo_table[ptegidx], pvo_olink) {
2174 		if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) {
2175 			if ((pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) == pa &&
2176 			    (pvo->pvo_pte.lpte.pte_lo & (LPTE_NOEXEC | LPTE_PP))
2177 			    == (pte_lo & (LPTE_NOEXEC | LPTE_PP))) {
2178 			    	if (!(pvo->pvo_pte.lpte.pte_hi & LPTE_VALID)) {
2179 					/* Re-insert if spilled */
2180 					i = MOEA64_PTE_INSERT(mmu, ptegidx,
2181 					    &pvo->pvo_pte.lpte);
2182 					if (i >= 0)
2183 						PVO_PTEGIDX_SET(pvo, i);
2184 					moea64_pte_overflow--;
2185 				}
2186 				UNLOCK_TABLE();
2187 				return (0);
2188 			}
2189 			moea64_pvo_remove(mmu, pvo);
2190 			break;
2191 		}
2192 	}
2193 
2194 	/*
2195 	 * If we aren't overwriting a mapping, try to allocate.
2196 	 */
2197 	if (bootstrap) {
2198 		if (moea64_bpvo_pool_index >= BPVO_POOL_SIZE) {
2199 			panic("moea64_enter: bpvo pool exhausted, %d, %d, %zd",
2200 			      moea64_bpvo_pool_index, BPVO_POOL_SIZE,
2201 			      BPVO_POOL_SIZE * sizeof(struct pvo_entry));
2202 		}
2203 		pvo = &moea64_bpvo_pool[moea64_bpvo_pool_index];
2204 		moea64_bpvo_pool_index++;
2205 		bootstrap = 1;
2206 	} else {
2207 		/*
2208 		 * Note: drop the table lock around the UMA allocation in
2209 		 * case the UMA allocator needs to manipulate the page
2210 		 * table. The mapping we are working with is already
2211 		 * protected by the PMAP lock.
2212 		 */
2213 		UNLOCK_TABLE();
2214 		pvo = uma_zalloc(zone, M_NOWAIT);
2215 		LOCK_TABLE();
2216 	}
2217 
2218 	if (pvo == NULL) {
2219 		UNLOCK_TABLE();
2220 		return (ENOMEM);
2221 	}
2222 
2223 	moea64_pvo_entries++;
2224 	pvo->pvo_vaddr = va;
2225 	pvo->pvo_vpn = (uint64_t)((va & ADDR_PIDX) >> ADDR_PIDX_SHFT)
2226 	    | (vsid << 16);
2227 	pvo->pvo_pmap = pm;
2228 	LIST_INSERT_HEAD(&moea64_pvo_table[ptegidx], pvo, pvo_olink);
2229 	pvo->pvo_vaddr &= ~ADDR_POFF;
2230 
2231 	if (flags & PVO_WIRED)
2232 		pvo->pvo_vaddr |= PVO_WIRED;
2233 	if (pvo_head != &moea64_pvo_kunmanaged)
2234 		pvo->pvo_vaddr |= PVO_MANAGED;
2235 	if (bootstrap)
2236 		pvo->pvo_vaddr |= PVO_BOOTSTRAP;
2237 	if (flags & PVO_LARGE)
2238 		pvo->pvo_vaddr |= PVO_LARGE;
2239 
2240 	moea64_pte_create(&pvo->pvo_pte.lpte, vsid, va,
2241 	    (uint64_t)(pa) | pte_lo, flags);
2242 
2243 	/*
2244 	 * Add to pmap list
2245 	 */
2246 	LIST_INSERT_HEAD(&pm->pmap_pvo, pvo, pvo_plink);
2247 
2248 	/*
2249 	 * Remember if the list was empty and therefore will be the first
2250 	 * item.
2251 	 */
2252 	if (LIST_FIRST(pvo_head) == NULL)
2253 		first = 1;
2254 	LIST_INSERT_HEAD(pvo_head, pvo, pvo_vlink);
2255 
2256 	if (pvo->pvo_vaddr & PVO_WIRED) {
2257 		pvo->pvo_pte.lpte.pte_hi |= LPTE_WIRED;
2258 		pm->pm_stats.wired_count++;
2259 	}
2260 	pm->pm_stats.resident_count++;
2261 
2262 	/*
2263 	 * We hope this succeeds but it isn't required.
2264 	 */
2265 	i = MOEA64_PTE_INSERT(mmu, ptegidx, &pvo->pvo_pte.lpte);
2266 	if (i >= 0) {
2267 		PVO_PTEGIDX_SET(pvo, i);
2268 	} else {
2269 		panic("moea64_pvo_enter: overflow");
2270 		moea64_pte_overflow++;
2271 	}
2272 
2273 	if (pm == kernel_pmap)
2274 		isync();
2275 
2276 	UNLOCK_TABLE();
2277 
2278 #ifdef __powerpc64__
2279 	/*
2280 	 * Make sure all our bootstrap mappings are in the SLB as soon
2281 	 * as virtual memory is switched on.
2282 	 */
2283 	if (!pmap_bootstrapped)
2284 		moea64_bootstrap_slb_prefault(va, flags & PVO_LARGE);
2285 #endif
2286 
2287 	return (first ? ENOENT : 0);
2288 }
2289 
2290 static void
2291 moea64_pvo_remove(mmu_t mmu, struct pvo_entry *pvo)
2292 {
2293 	uintptr_t pt;
2294 
2295 	/*
2296 	 * If there is an active pte entry, we need to deactivate it (and
2297 	 * save the ref & cfg bits).
2298 	 */
2299 	LOCK_TABLE();
2300 	pt = MOEA64_PVO_TO_PTE(mmu, pvo);
2301 	if (pt != -1) {
2302 		MOEA64_PTE_UNSET(mmu, pt, &pvo->pvo_pte.lpte, pvo->pvo_vpn);
2303 		PVO_PTEGIDX_CLR(pvo);
2304 	} else {
2305 		moea64_pte_overflow--;
2306 	}
2307 
2308 	/*
2309 	 * Update our statistics.
2310 	 */
2311 	pvo->pvo_pmap->pm_stats.resident_count--;
2312 	if (pvo->pvo_vaddr & PVO_WIRED)
2313 		pvo->pvo_pmap->pm_stats.wired_count--;
2314 
2315 	/*
2316 	 * Save the REF/CHG bits into their cache if the page is managed.
2317 	 */
2318 	if ((pvo->pvo_vaddr & PVO_MANAGED) == PVO_MANAGED) {
2319 		struct	vm_page *pg;
2320 
2321 		pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN);
2322 		if (pg != NULL) {
2323 			moea64_attr_save(pg, pvo->pvo_pte.lpte.pte_lo &
2324 			    (LPTE_REF | LPTE_CHG));
2325 		}
2326 	}
2327 
2328 	/*
2329 	 * Remove this PVO from the PV and pmap lists.
2330 	 */
2331 	LIST_REMOVE(pvo, pvo_vlink);
2332 	LIST_REMOVE(pvo, pvo_plink);
2333 
2334 	/*
2335 	 * Remove this from the overflow list and return it to the pool
2336 	 * if we aren't going to reuse it.
2337 	 */
2338 	LIST_REMOVE(pvo, pvo_olink);
2339 
2340 	moea64_pvo_entries--;
2341 	moea64_pvo_remove_calls++;
2342 
2343 	UNLOCK_TABLE();
2344 
2345 	if (!(pvo->pvo_vaddr & PVO_BOOTSTRAP))
2346 		uma_zfree((pvo->pvo_vaddr & PVO_MANAGED) ? moea64_mpvo_zone :
2347 		    moea64_upvo_zone, pvo);
2348 }
2349 
2350 static struct pvo_entry *
2351 moea64_pvo_find_va(pmap_t pm, vm_offset_t va)
2352 {
2353 	struct		pvo_entry *pvo;
2354 	int		ptegidx;
2355 	uint64_t	vsid;
2356 	#ifdef __powerpc64__
2357 	uint64_t	slbv;
2358 
2359 	if (pm == kernel_pmap) {
2360 		slbv = kernel_va_to_slbv(va);
2361 	} else {
2362 		struct slb *slb;
2363 		slb = user_va_to_slb_entry(pm, va);
2364 		/* The page is not mapped if the segment isn't */
2365 		if (slb == NULL)
2366 			return NULL;
2367 		slbv = slb->slbv;
2368 	}
2369 
2370 	vsid = (slbv & SLBV_VSID_MASK) >> SLBV_VSID_SHIFT;
2371 	if (slbv & SLBV_L)
2372 		va &= ~moea64_large_page_mask;
2373 	else
2374 		va &= ~ADDR_POFF;
2375 	ptegidx = va_to_pteg(vsid, va, slbv & SLBV_L);
2376 	#else
2377 	va &= ~ADDR_POFF;
2378 	vsid = va_to_vsid(pm, va);
2379 	ptegidx = va_to_pteg(vsid, va, 0);
2380 	#endif
2381 
2382 	LOCK_TABLE();
2383 	LIST_FOREACH(pvo, &moea64_pvo_table[ptegidx], pvo_olink) {
2384 		if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va)
2385 			break;
2386 	}
2387 	UNLOCK_TABLE();
2388 
2389 	return (pvo);
2390 }
2391 
2392 static boolean_t
2393 moea64_query_bit(mmu_t mmu, vm_page_t m, u_int64_t ptebit)
2394 {
2395 	struct	pvo_entry *pvo;
2396 	uintptr_t pt;
2397 
2398 	if (moea64_attr_fetch(m) & ptebit)
2399 		return (TRUE);
2400 
2401 	vm_page_lock_queues();
2402 
2403 	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
2404 
2405 		/*
2406 		 * See if we saved the bit off.  If so, cache it and return
2407 		 * success.
2408 		 */
2409 		if (pvo->pvo_pte.lpte.pte_lo & ptebit) {
2410 			moea64_attr_save(m, ptebit);
2411 			vm_page_unlock_queues();
2412 			return (TRUE);
2413 		}
2414 	}
2415 
2416 	/*
2417 	 * No luck, now go through the hard part of looking at the PTEs
2418 	 * themselves.  Sync so that any pending REF/CHG bits are flushed to
2419 	 * the PTEs.
2420 	 */
2421 	powerpc_sync();
2422 	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
2423 
2424 		/*
2425 		 * See if this pvo has a valid PTE.  if so, fetch the
2426 		 * REF/CHG bits from the valid PTE.  If the appropriate
2427 		 * ptebit is set, cache it and return success.
2428 		 */
2429 		LOCK_TABLE();
2430 		pt = MOEA64_PVO_TO_PTE(mmu, pvo);
2431 		if (pt != -1) {
2432 			MOEA64_PTE_SYNCH(mmu, pt, &pvo->pvo_pte.lpte);
2433 			if (pvo->pvo_pte.lpte.pte_lo & ptebit) {
2434 				UNLOCK_TABLE();
2435 
2436 				moea64_attr_save(m, ptebit);
2437 				vm_page_unlock_queues();
2438 				return (TRUE);
2439 			}
2440 		}
2441 		UNLOCK_TABLE();
2442 	}
2443 
2444 	vm_page_unlock_queues();
2445 	return (FALSE);
2446 }
2447 
2448 static u_int
2449 moea64_clear_bit(mmu_t mmu, vm_page_t m, u_int64_t ptebit)
2450 {
2451 	u_int	count;
2452 	struct	pvo_entry *pvo;
2453 	uintptr_t pt;
2454 
2455 	vm_page_lock_queues();
2456 
2457 	/*
2458 	 * Clear the cached value.
2459 	 */
2460 	moea64_attr_clear(m, ptebit);
2461 
2462 	/*
2463 	 * Sync so that any pending REF/CHG bits are flushed to the PTEs (so
2464 	 * we can reset the right ones).  note that since the pvo entries and
2465 	 * list heads are accessed via BAT0 and are never placed in the page
2466 	 * table, we don't have to worry about further accesses setting the
2467 	 * REF/CHG bits.
2468 	 */
2469 	powerpc_sync();
2470 
2471 	/*
2472 	 * For each pvo entry, clear the pvo's ptebit.  If this pvo has a
2473 	 * valid pte clear the ptebit from the valid pte.
2474 	 */
2475 	count = 0;
2476 	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
2477 
2478 		LOCK_TABLE();
2479 		pt = MOEA64_PVO_TO_PTE(mmu, pvo);
2480 		if (pt != -1) {
2481 			MOEA64_PTE_SYNCH(mmu, pt, &pvo->pvo_pte.lpte);
2482 			if (pvo->pvo_pte.lpte.pte_lo & ptebit) {
2483 				count++;
2484 				MOEA64_PTE_CLEAR(mmu, pt, &pvo->pvo_pte.lpte,
2485 				    pvo->pvo_vpn, ptebit);
2486 			}
2487 		}
2488 		pvo->pvo_pte.lpte.pte_lo &= ~ptebit;
2489 		UNLOCK_TABLE();
2490 	}
2491 
2492 	vm_page_unlock_queues();
2493 	return (count);
2494 }
2495 
2496 boolean_t
2497 moea64_dev_direct_mapped(mmu_t mmu, vm_offset_t pa, vm_size_t size)
2498 {
2499 	struct pvo_entry *pvo;
2500 	vm_offset_t ppa;
2501 	int error = 0;
2502 
2503 	PMAP_LOCK(kernel_pmap);
2504 	for (ppa = pa & ~ADDR_POFF; ppa < pa + size; ppa += PAGE_SIZE) {
2505 		pvo = moea64_pvo_find_va(kernel_pmap, ppa);
2506 		if (pvo == NULL ||
2507 		    (pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) != ppa) {
2508 			error = EFAULT;
2509 			break;
2510 		}
2511 	}
2512 	PMAP_UNLOCK(kernel_pmap);
2513 
2514 	return (error);
2515 }
2516 
2517 /*
2518  * Map a set of physical memory pages into the kernel virtual
2519  * address space. Return a pointer to where it is mapped. This
2520  * routine is intended to be used for mapping device memory,
2521  * NOT real memory.
2522  */
2523 void *
2524 moea64_mapdev_attr(mmu_t mmu, vm_offset_t pa, vm_size_t size, vm_memattr_t ma)
2525 {
2526 	vm_offset_t va, tmpva, ppa, offset;
2527 
2528 	ppa = trunc_page(pa);
2529 	offset = pa & PAGE_MASK;
2530 	size = roundup(offset + size, PAGE_SIZE);
2531 
2532 	va = kmem_alloc_nofault(kernel_map, size);
2533 
2534 	if (!va)
2535 		panic("moea64_mapdev: Couldn't alloc kernel virtual memory");
2536 
2537 	for (tmpva = va; size > 0;) {
2538 		moea64_kenter_attr(mmu, tmpva, ppa, ma);
2539 		size -= PAGE_SIZE;
2540 		tmpva += PAGE_SIZE;
2541 		ppa += PAGE_SIZE;
2542 	}
2543 
2544 	return ((void *)(va + offset));
2545 }
2546 
2547 void *
2548 moea64_mapdev(mmu_t mmu, vm_offset_t pa, vm_size_t size)
2549 {
2550 
2551 	return moea64_mapdev_attr(mmu, pa, size, VM_MEMATTR_DEFAULT);
2552 }
2553 
2554 void
2555 moea64_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size)
2556 {
2557 	vm_offset_t base, offset;
2558 
2559 	base = trunc_page(va);
2560 	offset = va & PAGE_MASK;
2561 	size = roundup(offset + size, PAGE_SIZE);
2562 
2563 	kmem_free(kernel_map, base, size);
2564 }
2565 
2566 void
2567 moea64_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz)
2568 {
2569 	struct pvo_entry *pvo;
2570 	vm_offset_t lim;
2571 	vm_paddr_t pa;
2572 	vm_size_t len;
2573 
2574 	PMAP_LOCK(pm);
2575 	while (sz > 0) {
2576 		lim = round_page(va);
2577 		len = MIN(lim - va, sz);
2578 		pvo = moea64_pvo_find_va(pm, va & ~ADDR_POFF);
2579 		if (pvo != NULL && !(pvo->pvo_pte.lpte.pte_lo & LPTE_I)) {
2580 			pa = (pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) |
2581 			    (va & ADDR_POFF);
2582 			moea64_syncicache(mmu, pm, va, pa, len);
2583 		}
2584 		va += len;
2585 		sz -= len;
2586 	}
2587 	PMAP_UNLOCK(pm);
2588 }
2589