xref: /freebsd/sys/powerpc/aim/mmu_oea64.c (revision c4e61d85686098ad2be060dc3f05c9cca98e8916)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2008-2015 Nathan Whitehorn
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31 
32 /*
33  * Manages physical address maps.
34  *
35  * Since the information managed by this module is also stored by the
36  * logical address mapping module, this module may throw away valid virtual
37  * to physical mappings at almost any time.  However, invalidations of
38  * mappings must be done as requested.
39  *
40  * In order to cope with hardware architectures which make virtual to
41  * physical map invalidates expensive, this module may delay invalidate
42  * reduced protection operations until such time as they are actually
43  * necessary.  This module is given full information as to which processors
44  * are currently using which maps, and to when physical maps must be made
45  * correct.
46  */
47 
48 #include "opt_kstack_pages.h"
49 
50 #include <sys/param.h>
51 #include <sys/kernel.h>
52 #include <sys/conf.h>
53 #include <sys/queue.h>
54 #include <sys/cpuset.h>
55 #include <sys/kerneldump.h>
56 #include <sys/ktr.h>
57 #include <sys/lock.h>
58 #include <sys/msgbuf.h>
59 #include <sys/malloc.h>
60 #include <sys/mutex.h>
61 #include <sys/proc.h>
62 #include <sys/rwlock.h>
63 #include <sys/sched.h>
64 #include <sys/sysctl.h>
65 #include <sys/systm.h>
66 #include <sys/vmmeter.h>
67 #include <sys/smp.h>
68 #include <sys/reboot.h>
69 
70 #include <sys/kdb.h>
71 
72 #include <dev/ofw/openfirm.h>
73 
74 #include <vm/vm.h>
75 #include <vm/vm_param.h>
76 #include <vm/vm_kern.h>
77 #include <vm/vm_page.h>
78 #include <vm/vm_phys.h>
79 #include <vm/vm_map.h>
80 #include <vm/vm_object.h>
81 #include <vm/vm_extern.h>
82 #include <vm/vm_pageout.h>
83 #include <vm/uma.h>
84 
85 #include <machine/_inttypes.h>
86 #include <machine/cpu.h>
87 #include <machine/platform.h>
88 #include <machine/frame.h>
89 #include <machine/md_var.h>
90 #include <machine/psl.h>
91 #include <machine/bat.h>
92 #include <machine/hid.h>
93 #include <machine/pte.h>
94 #include <machine/sr.h>
95 #include <machine/trap.h>
96 #include <machine/mmuvar.h>
97 
98 #include "mmu_oea64.h"
99 #include "mmu_if.h"
100 #include "moea64_if.h"
101 
102 void moea64_release_vsid(uint64_t vsid);
103 uintptr_t moea64_get_unique_vsid(void);
104 
105 #define DISABLE_TRANS(msr)	msr = mfmsr(); mtmsr(msr & ~PSL_DR)
106 #define ENABLE_TRANS(msr)	mtmsr(msr)
107 
108 #define	VSID_MAKE(sr, hash)	((sr) | (((hash) & 0xfffff) << 4))
109 #define	VSID_TO_HASH(vsid)	(((vsid) >> 4) & 0xfffff)
110 #define	VSID_HASH_MASK		0x0000007fffffffffULL
111 
112 /*
113  * Locking semantics:
114  *
115  * There are two locks of interest: the page locks and the pmap locks, which
116  * protect their individual PVO lists and are locked in that order. The contents
117  * of all PVO entries are protected by the locks of their respective pmaps.
118  * The pmap of any PVO is guaranteed not to change so long as the PVO is linked
119  * into any list.
120  *
121  */
122 
123 #define PV_LOCK_COUNT	PA_LOCK_COUNT
124 static struct mtx_padalign pv_lock[PV_LOCK_COUNT];
125 
126 /*
127  * Cheap NUMA-izing of the pv locks, to reduce contention across domains.
128  * NUMA domains on POWER9 appear to be indexed as sparse memory spaces, with the
129  * index at (N << 45).
130  */
131 #ifdef __powerpc64__
132 #define PV_LOCK_IDX(pa)	((pa_index(pa) * (((pa) >> 45) + 1)) % PV_LOCK_COUNT)
133 #else
134 #define PV_LOCK_IDX(pa)	(pa_index(pa) % PV_LOCK_COUNT)
135 #endif
136 #define PV_LOCKPTR(pa)	((struct mtx *)(&pv_lock[PV_LOCK_IDX(pa)]))
137 #define PV_LOCK(pa)		mtx_lock(PV_LOCKPTR(pa))
138 #define PV_UNLOCK(pa)		mtx_unlock(PV_LOCKPTR(pa))
139 #define PV_LOCKASSERT(pa) 	mtx_assert(PV_LOCKPTR(pa), MA_OWNED)
140 #define PV_PAGE_LOCK(m)		PV_LOCK(VM_PAGE_TO_PHYS(m))
141 #define PV_PAGE_UNLOCK(m)	PV_UNLOCK(VM_PAGE_TO_PHYS(m))
142 #define PV_PAGE_LOCKASSERT(m)	PV_LOCKASSERT(VM_PAGE_TO_PHYS(m))
143 
144 struct ofw_map {
145 	cell_t	om_va;
146 	cell_t	om_len;
147 	uint64_t om_pa;
148 	cell_t	om_mode;
149 };
150 
151 extern unsigned char _etext[];
152 extern unsigned char _end[];
153 
154 extern void *slbtrap, *slbtrapend;
155 
156 /*
157  * Map of physical memory regions.
158  */
159 static struct	mem_region *regions;
160 static struct	mem_region *pregions;
161 static struct	numa_mem_region *numa_pregions;
162 static u_int	phys_avail_count;
163 static int	regions_sz, pregions_sz, numapregions_sz;
164 
165 extern void bs_remap_earlyboot(void);
166 
167 /*
168  * Lock for the SLB tables.
169  */
170 struct mtx	moea64_slb_mutex;
171 
172 /*
173  * PTEG data.
174  */
175 u_long		moea64_pteg_count;
176 u_long		moea64_pteg_mask;
177 
178 /*
179  * PVO data.
180  */
181 
182 uma_zone_t	moea64_pvo_zone; /* zone for pvo entries */
183 
184 static struct	pvo_entry *moea64_bpvo_pool;
185 static int	moea64_bpvo_pool_index = 0;
186 static int	moea64_bpvo_pool_size = 0;
187 SYSCTL_INT(_machdep, OID_AUTO, moea64_allocated_bpvo_entries, CTLFLAG_RD,
188     &moea64_bpvo_pool_index, 0, "");
189 
190 #define	BPVO_POOL_SIZE	327680 /* Sensible historical default value */
191 #define	BPVO_POOL_EXPANSION_FACTOR	3
192 #define	VSID_NBPW	(sizeof(u_int32_t) * 8)
193 #ifdef __powerpc64__
194 #define	NVSIDS		(NPMAPS * 16)
195 #define VSID_HASHMASK	0xffffffffUL
196 #else
197 #define NVSIDS		NPMAPS
198 #define VSID_HASHMASK	0xfffffUL
199 #endif
200 static u_int	moea64_vsid_bitmap[NVSIDS / VSID_NBPW];
201 
202 static boolean_t moea64_initialized = FALSE;
203 
204 #ifdef MOEA64_STATS
205 /*
206  * Statistics.
207  */
208 u_int	moea64_pte_valid = 0;
209 u_int	moea64_pte_overflow = 0;
210 u_int	moea64_pvo_entries = 0;
211 u_int	moea64_pvo_enter_calls = 0;
212 u_int	moea64_pvo_remove_calls = 0;
213 SYSCTL_INT(_machdep, OID_AUTO, moea64_pte_valid, CTLFLAG_RD,
214     &moea64_pte_valid, 0, "");
215 SYSCTL_INT(_machdep, OID_AUTO, moea64_pte_overflow, CTLFLAG_RD,
216     &moea64_pte_overflow, 0, "");
217 SYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_entries, CTLFLAG_RD,
218     &moea64_pvo_entries, 0, "");
219 SYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_enter_calls, CTLFLAG_RD,
220     &moea64_pvo_enter_calls, 0, "");
221 SYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_remove_calls, CTLFLAG_RD,
222     &moea64_pvo_remove_calls, 0, "");
223 #endif
224 
225 vm_offset_t	moea64_scratchpage_va[2];
226 struct pvo_entry *moea64_scratchpage_pvo[2];
227 struct	mtx	moea64_scratchpage_mtx;
228 
229 uint64_t 	moea64_large_page_mask = 0;
230 uint64_t	moea64_large_page_size = 0;
231 int		moea64_large_page_shift = 0;
232 
233 /*
234  * PVO calls.
235  */
236 static int	moea64_pvo_enter(mmu_t mmu, struct pvo_entry *pvo,
237 		    struct pvo_head *pvo_head, struct pvo_entry **oldpvo);
238 static void	moea64_pvo_remove_from_pmap(mmu_t mmu, struct pvo_entry *pvo);
239 static void	moea64_pvo_remove_from_page(mmu_t mmu, struct pvo_entry *pvo);
240 static void	moea64_pvo_remove_from_page_locked(mmu_t mmu,
241 		    struct pvo_entry *pvo, vm_page_t m);
242 static struct	pvo_entry *moea64_pvo_find_va(pmap_t, vm_offset_t);
243 
244 /*
245  * Utility routines.
246  */
247 static boolean_t	moea64_query_bit(mmu_t, vm_page_t, uint64_t);
248 static u_int		moea64_clear_bit(mmu_t, vm_page_t, uint64_t);
249 static void		moea64_kremove(mmu_t, vm_offset_t);
250 static void		moea64_syncicache(mmu_t, pmap_t pmap, vm_offset_t va,
251 			    vm_paddr_t pa, vm_size_t sz);
252 static void		moea64_pmap_init_qpages(void);
253 
254 /*
255  * Kernel MMU interface
256  */
257 void moea64_clear_modify(mmu_t, vm_page_t);
258 void moea64_copy_page(mmu_t, vm_page_t, vm_page_t);
259 void moea64_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
260     vm_page_t *mb, vm_offset_t b_offset, int xfersize);
261 int moea64_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t,
262     u_int flags, int8_t psind);
263 void moea64_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_page_t,
264     vm_prot_t);
265 void moea64_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t);
266 vm_paddr_t moea64_extract(mmu_t, pmap_t, vm_offset_t);
267 vm_page_t moea64_extract_and_hold(mmu_t, pmap_t, vm_offset_t, vm_prot_t);
268 void moea64_init(mmu_t);
269 boolean_t moea64_is_modified(mmu_t, vm_page_t);
270 boolean_t moea64_is_prefaultable(mmu_t, pmap_t, vm_offset_t);
271 boolean_t moea64_is_referenced(mmu_t, vm_page_t);
272 int moea64_ts_referenced(mmu_t, vm_page_t);
273 vm_offset_t moea64_map(mmu_t, vm_offset_t *, vm_paddr_t, vm_paddr_t, int);
274 boolean_t moea64_page_exists_quick(mmu_t, pmap_t, vm_page_t);
275 void moea64_page_init(mmu_t, vm_page_t);
276 int moea64_page_wired_mappings(mmu_t, vm_page_t);
277 void moea64_pinit(mmu_t, pmap_t);
278 void moea64_pinit0(mmu_t, pmap_t);
279 void moea64_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_prot_t);
280 void moea64_qenter(mmu_t, vm_offset_t, vm_page_t *, int);
281 void moea64_qremove(mmu_t, vm_offset_t, int);
282 void moea64_release(mmu_t, pmap_t);
283 void moea64_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t);
284 void moea64_remove_pages(mmu_t, pmap_t);
285 void moea64_remove_all(mmu_t, vm_page_t);
286 void moea64_remove_write(mmu_t, vm_page_t);
287 void moea64_unwire(mmu_t, pmap_t, vm_offset_t, vm_offset_t);
288 void moea64_zero_page(mmu_t, vm_page_t);
289 void moea64_zero_page_area(mmu_t, vm_page_t, int, int);
290 void moea64_activate(mmu_t, struct thread *);
291 void moea64_deactivate(mmu_t, struct thread *);
292 void *moea64_mapdev(mmu_t, vm_paddr_t, vm_size_t);
293 void *moea64_mapdev_attr(mmu_t, vm_paddr_t, vm_size_t, vm_memattr_t);
294 void moea64_unmapdev(mmu_t, vm_offset_t, vm_size_t);
295 vm_paddr_t moea64_kextract(mmu_t, vm_offset_t);
296 void moea64_page_set_memattr(mmu_t, vm_page_t m, vm_memattr_t ma);
297 void moea64_kenter_attr(mmu_t, vm_offset_t, vm_paddr_t, vm_memattr_t ma);
298 void moea64_kenter(mmu_t, vm_offset_t, vm_paddr_t);
299 boolean_t moea64_dev_direct_mapped(mmu_t, vm_paddr_t, vm_size_t);
300 static void moea64_sync_icache(mmu_t, pmap_t, vm_offset_t, vm_size_t);
301 void moea64_dumpsys_map(mmu_t mmu, vm_paddr_t pa, size_t sz,
302     void **va);
303 void moea64_scan_init(mmu_t mmu);
304 vm_offset_t moea64_quick_enter_page(mmu_t mmu, vm_page_t m);
305 void moea64_quick_remove_page(mmu_t mmu, vm_offset_t addr);
306 boolean_t moea64_page_is_mapped(mmu_t mmu, vm_page_t m);
307 static int moea64_map_user_ptr(mmu_t mmu, pmap_t pm,
308     volatile const void *uaddr, void **kaddr, size_t ulen, size_t *klen);
309 static int moea64_decode_kernel_ptr(mmu_t mmu, vm_offset_t addr,
310     int *is_user, vm_offset_t *decoded_addr);
311 static size_t moea64_scan_pmap(mmu_t mmu);
312 static void *moea64_dump_pmap_init(mmu_t mmu, unsigned blkpgs);
313 #ifdef __powerpc64__
314 static void moea64_page_array_startup(mmu_t, long);
315 #endif
316 
317 
318 static mmu_method_t moea64_methods[] = {
319 	MMUMETHOD(mmu_clear_modify,	moea64_clear_modify),
320 	MMUMETHOD(mmu_copy_page,	moea64_copy_page),
321 	MMUMETHOD(mmu_copy_pages,	moea64_copy_pages),
322 	MMUMETHOD(mmu_enter,		moea64_enter),
323 	MMUMETHOD(mmu_enter_object,	moea64_enter_object),
324 	MMUMETHOD(mmu_enter_quick,	moea64_enter_quick),
325 	MMUMETHOD(mmu_extract,		moea64_extract),
326 	MMUMETHOD(mmu_extract_and_hold,	moea64_extract_and_hold),
327 	MMUMETHOD(mmu_init,		moea64_init),
328 	MMUMETHOD(mmu_is_modified,	moea64_is_modified),
329 	MMUMETHOD(mmu_is_prefaultable,	moea64_is_prefaultable),
330 	MMUMETHOD(mmu_is_referenced,	moea64_is_referenced),
331 	MMUMETHOD(mmu_ts_referenced,	moea64_ts_referenced),
332 	MMUMETHOD(mmu_map,     		moea64_map),
333 	MMUMETHOD(mmu_page_exists_quick,moea64_page_exists_quick),
334 	MMUMETHOD(mmu_page_init,	moea64_page_init),
335 	MMUMETHOD(mmu_page_wired_mappings,moea64_page_wired_mappings),
336 	MMUMETHOD(mmu_pinit,		moea64_pinit),
337 	MMUMETHOD(mmu_pinit0,		moea64_pinit0),
338 	MMUMETHOD(mmu_protect,		moea64_protect),
339 	MMUMETHOD(mmu_qenter,		moea64_qenter),
340 	MMUMETHOD(mmu_qremove,		moea64_qremove),
341 	MMUMETHOD(mmu_release,		moea64_release),
342 	MMUMETHOD(mmu_remove,		moea64_remove),
343 	MMUMETHOD(mmu_remove_pages,	moea64_remove_pages),
344 	MMUMETHOD(mmu_remove_all,      	moea64_remove_all),
345 	MMUMETHOD(mmu_remove_write,	moea64_remove_write),
346 	MMUMETHOD(mmu_sync_icache,	moea64_sync_icache),
347 	MMUMETHOD(mmu_unwire,		moea64_unwire),
348 	MMUMETHOD(mmu_zero_page,       	moea64_zero_page),
349 	MMUMETHOD(mmu_zero_page_area,	moea64_zero_page_area),
350 	MMUMETHOD(mmu_activate,		moea64_activate),
351 	MMUMETHOD(mmu_deactivate,      	moea64_deactivate),
352 	MMUMETHOD(mmu_page_set_memattr,	moea64_page_set_memattr),
353 	MMUMETHOD(mmu_quick_enter_page, moea64_quick_enter_page),
354 	MMUMETHOD(mmu_quick_remove_page, moea64_quick_remove_page),
355 	MMUMETHOD(mmu_page_is_mapped,	moea64_page_is_mapped),
356 #ifdef __powerpc64__
357 	MMUMETHOD(mmu_page_array_startup,	moea64_page_array_startup),
358 #endif
359 
360 	/* Internal interfaces */
361 	MMUMETHOD(mmu_mapdev,		moea64_mapdev),
362 	MMUMETHOD(mmu_mapdev_attr,	moea64_mapdev_attr),
363 	MMUMETHOD(mmu_unmapdev,		moea64_unmapdev),
364 	MMUMETHOD(mmu_kextract,		moea64_kextract),
365 	MMUMETHOD(mmu_kenter,		moea64_kenter),
366 	MMUMETHOD(mmu_kenter_attr,	moea64_kenter_attr),
367 	MMUMETHOD(mmu_dev_direct_mapped,moea64_dev_direct_mapped),
368 	MMUMETHOD(mmu_scan_init,	moea64_scan_init),
369 	MMUMETHOD(mmu_scan_pmap,	moea64_scan_pmap),
370 	MMUMETHOD(mmu_dump_pmap_init,   moea64_dump_pmap_init),
371 	MMUMETHOD(mmu_dumpsys_map,	moea64_dumpsys_map),
372 	MMUMETHOD(mmu_map_user_ptr,	moea64_map_user_ptr),
373 	MMUMETHOD(mmu_decode_kernel_ptr, moea64_decode_kernel_ptr),
374 
375 	{ 0, 0 }
376 };
377 
378 MMU_DEF(oea64_mmu, "mmu_oea64_base", moea64_methods, 0);
379 
380 static struct pvo_head *
381 vm_page_to_pvoh(vm_page_t m)
382 {
383 
384 	mtx_assert(PV_LOCKPTR(VM_PAGE_TO_PHYS(m)), MA_OWNED);
385 	return (&m->md.mdpg_pvoh);
386 }
387 
388 static struct pvo_entry *
389 alloc_pvo_entry(int bootstrap)
390 {
391 	struct pvo_entry *pvo;
392 
393 	if (!moea64_initialized || bootstrap) {
394 		if (moea64_bpvo_pool_index >= moea64_bpvo_pool_size) {
395 			panic("%s: bpvo pool exhausted, index=%d, size=%d, bytes=%zd."
396 			    "Try setting machdep.moea64_bpvo_pool_size tunable",
397 			    __func__, moea64_bpvo_pool_index,
398 			    moea64_bpvo_pool_size,
399 			    moea64_bpvo_pool_size * sizeof(struct pvo_entry));
400 		}
401 		pvo = &moea64_bpvo_pool[
402 		    atomic_fetchadd_int(&moea64_bpvo_pool_index, 1)];
403 		bzero(pvo, sizeof(*pvo));
404 		pvo->pvo_vaddr = PVO_BOOTSTRAP;
405 	} else
406 		pvo = uma_zalloc(moea64_pvo_zone, M_NOWAIT | M_ZERO);
407 
408 	return (pvo);
409 }
410 
411 
412 static void
413 init_pvo_entry(struct pvo_entry *pvo, pmap_t pmap, vm_offset_t va)
414 {
415 	uint64_t vsid;
416 	uint64_t hash;
417 	int shift;
418 
419 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
420 
421 	pvo->pvo_pmap = pmap;
422 	va &= ~ADDR_POFF;
423 	pvo->pvo_vaddr |= va;
424 	vsid = va_to_vsid(pmap, va);
425 	pvo->pvo_vpn = (uint64_t)((va & ADDR_PIDX) >> ADDR_PIDX_SHFT)
426 	    | (vsid << 16);
427 
428 	shift = (pvo->pvo_vaddr & PVO_LARGE) ? moea64_large_page_shift :
429 	    ADDR_PIDX_SHFT;
430 	hash = (vsid & VSID_HASH_MASK) ^ (((uint64_t)va & ADDR_PIDX) >> shift);
431 	pvo->pvo_pte.slot = (hash & moea64_pteg_mask) << 3;
432 }
433 
434 static void
435 free_pvo_entry(struct pvo_entry *pvo)
436 {
437 
438 	if (!(pvo->pvo_vaddr & PVO_BOOTSTRAP))
439 		uma_zfree(moea64_pvo_zone, pvo);
440 }
441 
442 void
443 moea64_pte_from_pvo(const struct pvo_entry *pvo, struct lpte *lpte)
444 {
445 
446 	lpte->pte_hi = moea64_pte_vpn_from_pvo_vpn(pvo);
447 	lpte->pte_hi |= LPTE_VALID;
448 
449 	if (pvo->pvo_vaddr & PVO_LARGE)
450 		lpte->pte_hi |= LPTE_BIG;
451 	if (pvo->pvo_vaddr & PVO_WIRED)
452 		lpte->pte_hi |= LPTE_WIRED;
453 	if (pvo->pvo_vaddr & PVO_HID)
454 		lpte->pte_hi |= LPTE_HID;
455 
456 	lpte->pte_lo = pvo->pvo_pte.pa; /* Includes WIMG bits */
457 	if (pvo->pvo_pte.prot & VM_PROT_WRITE)
458 		lpte->pte_lo |= LPTE_BW;
459 	else
460 		lpte->pte_lo |= LPTE_BR;
461 
462 	if (!(pvo->pvo_pte.prot & VM_PROT_EXECUTE))
463 		lpte->pte_lo |= LPTE_NOEXEC;
464 }
465 
466 static __inline uint64_t
467 moea64_calc_wimg(vm_paddr_t pa, vm_memattr_t ma)
468 {
469 	uint64_t pte_lo;
470 	int i;
471 
472 	if (ma != VM_MEMATTR_DEFAULT) {
473 		switch (ma) {
474 		case VM_MEMATTR_UNCACHEABLE:
475 			return (LPTE_I | LPTE_G);
476 		case VM_MEMATTR_CACHEABLE:
477 			return (LPTE_M);
478 		case VM_MEMATTR_WRITE_COMBINING:
479 		case VM_MEMATTR_WRITE_BACK:
480 		case VM_MEMATTR_PREFETCHABLE:
481 			return (LPTE_I);
482 		case VM_MEMATTR_WRITE_THROUGH:
483 			return (LPTE_W | LPTE_M);
484 		}
485 	}
486 
487 	/*
488 	 * Assume the page is cache inhibited and access is guarded unless
489 	 * it's in our available memory array.
490 	 */
491 	pte_lo = LPTE_I | LPTE_G;
492 	for (i = 0; i < pregions_sz; i++) {
493 		if ((pa >= pregions[i].mr_start) &&
494 		    (pa < (pregions[i].mr_start + pregions[i].mr_size))) {
495 			pte_lo &= ~(LPTE_I | LPTE_G);
496 			pte_lo |= LPTE_M;
497 			break;
498 		}
499 	}
500 
501 	return pte_lo;
502 }
503 
504 /*
505  * Quick sort callout for comparing memory regions.
506  */
507 static int	om_cmp(const void *a, const void *b);
508 
509 static int
510 om_cmp(const void *a, const void *b)
511 {
512 	const struct	ofw_map *mapa;
513 	const struct	ofw_map *mapb;
514 
515 	mapa = a;
516 	mapb = b;
517 	if (mapa->om_pa < mapb->om_pa)
518 		return (-1);
519 	else if (mapa->om_pa > mapb->om_pa)
520 		return (1);
521 	else
522 		return (0);
523 }
524 
525 static void
526 moea64_add_ofw_mappings(mmu_t mmup, phandle_t mmu, size_t sz)
527 {
528 	struct ofw_map	translations[sz/(4*sizeof(cell_t))]; /*>= 4 cells per */
529 	pcell_t		acells, trans_cells[sz/sizeof(cell_t)];
530 	struct pvo_entry *pvo;
531 	register_t	msr;
532 	vm_offset_t	off;
533 	vm_paddr_t	pa_base;
534 	int		i, j;
535 
536 	bzero(translations, sz);
537 	OF_getencprop(OF_finddevice("/"), "#address-cells", &acells,
538 	    sizeof(acells));
539 	if (OF_getencprop(mmu, "translations", trans_cells, sz) == -1)
540 		panic("moea64_bootstrap: can't get ofw translations");
541 
542 	CTR0(KTR_PMAP, "moea64_add_ofw_mappings: translations");
543 	sz /= sizeof(cell_t);
544 	for (i = 0, j = 0; i < sz; j++) {
545 		translations[j].om_va = trans_cells[i++];
546 		translations[j].om_len = trans_cells[i++];
547 		translations[j].om_pa = trans_cells[i++];
548 		if (acells == 2) {
549 			translations[j].om_pa <<= 32;
550 			translations[j].om_pa |= trans_cells[i++];
551 		}
552 		translations[j].om_mode = trans_cells[i++];
553 	}
554 	KASSERT(i == sz, ("Translations map has incorrect cell count (%d/%zd)",
555 	    i, sz));
556 
557 	sz = j;
558 	qsort(translations, sz, sizeof (*translations), om_cmp);
559 
560 	for (i = 0; i < sz; i++) {
561 		pa_base = translations[i].om_pa;
562 	      #ifndef __powerpc64__
563 		if ((translations[i].om_pa >> 32) != 0)
564 			panic("OFW translations above 32-bit boundary!");
565 	      #endif
566 
567 		if (pa_base % PAGE_SIZE)
568 			panic("OFW translation not page-aligned (phys)!");
569 		if (translations[i].om_va % PAGE_SIZE)
570 			panic("OFW translation not page-aligned (virt)!");
571 
572 		CTR3(KTR_PMAP, "translation: pa=%#zx va=%#x len=%#x",
573 		    pa_base, translations[i].om_va, translations[i].om_len);
574 
575 		/* Now enter the pages for this mapping */
576 
577 		DISABLE_TRANS(msr);
578 		for (off = 0; off < translations[i].om_len; off += PAGE_SIZE) {
579 			/* If this address is direct-mapped, skip remapping */
580 			if (hw_direct_map &&
581 			    translations[i].om_va == PHYS_TO_DMAP(pa_base) &&
582 			    moea64_calc_wimg(pa_base + off, VM_MEMATTR_DEFAULT)
583  			    == LPTE_M)
584 				continue;
585 
586 			PMAP_LOCK(kernel_pmap);
587 			pvo = moea64_pvo_find_va(kernel_pmap,
588 			    translations[i].om_va + off);
589 			PMAP_UNLOCK(kernel_pmap);
590 			if (pvo != NULL)
591 				continue;
592 
593 			moea64_kenter(mmup, translations[i].om_va + off,
594 			    pa_base + off);
595 		}
596 		ENABLE_TRANS(msr);
597 	}
598 }
599 
600 #ifdef __powerpc64__
601 static void
602 moea64_probe_large_page(void)
603 {
604 	uint16_t pvr = mfpvr() >> 16;
605 
606 	switch (pvr) {
607 	case IBM970:
608 	case IBM970FX:
609 	case IBM970MP:
610 		powerpc_sync(); isync();
611 		mtspr(SPR_HID4, mfspr(SPR_HID4) & ~HID4_970_DISABLE_LG_PG);
612 		powerpc_sync(); isync();
613 
614 		/* FALLTHROUGH */
615 	default:
616 		if (moea64_large_page_size == 0) {
617 			moea64_large_page_size = 0x1000000; /* 16 MB */
618 			moea64_large_page_shift = 24;
619 		}
620 	}
621 
622 	moea64_large_page_mask = moea64_large_page_size - 1;
623 }
624 
625 static void
626 moea64_bootstrap_slb_prefault(vm_offset_t va, int large)
627 {
628 	struct slb *cache;
629 	struct slb entry;
630 	uint64_t esid, slbe;
631 	uint64_t i;
632 
633 	cache = PCPU_GET(aim.slb);
634 	esid = va >> ADDR_SR_SHFT;
635 	slbe = (esid << SLBE_ESID_SHIFT) | SLBE_VALID;
636 
637 	for (i = 0; i < 64; i++) {
638 		if (cache[i].slbe == (slbe | i))
639 			return;
640 	}
641 
642 	entry.slbe = slbe;
643 	entry.slbv = KERNEL_VSID(esid) << SLBV_VSID_SHIFT;
644 	if (large)
645 		entry.slbv |= SLBV_L;
646 
647 	slb_insert_kernel(entry.slbe, entry.slbv);
648 }
649 #endif
650 
651 static int
652 moea64_kenter_large(mmu_t mmup, vm_offset_t va, vm_paddr_t pa, uint64_t attr, int bootstrap)
653 {
654 	struct pvo_entry *pvo;
655 	uint64_t pte_lo;
656 	int error;
657 
658 	pte_lo = LPTE_M;
659 	pte_lo |= attr;
660 
661 	pvo = alloc_pvo_entry(bootstrap);
662 	pvo->pvo_vaddr |= PVO_WIRED | PVO_LARGE;
663 	init_pvo_entry(pvo, kernel_pmap, va);
664 
665 	pvo->pvo_pte.prot = VM_PROT_READ | VM_PROT_WRITE |
666 	    VM_PROT_EXECUTE;
667 	pvo->pvo_pte.pa = pa | pte_lo;
668 	error = moea64_pvo_enter(mmup, pvo, NULL, NULL);
669 	if (error != 0)
670 		panic("Error %d inserting large page\n", error);
671 	return (0);
672 }
673 
674 static void
675 moea64_setup_direct_map(mmu_t mmup, vm_offset_t kernelstart,
676     vm_offset_t kernelend)
677 {
678 	register_t msr;
679 	vm_paddr_t pa, pkernelstart, pkernelend;
680 	vm_offset_t size, off;
681 	uint64_t pte_lo;
682 	int i;
683 
684 	if (moea64_large_page_size == 0)
685 		hw_direct_map = 0;
686 
687 	DISABLE_TRANS(msr);
688 	if (hw_direct_map) {
689 		PMAP_LOCK(kernel_pmap);
690 		for (i = 0; i < pregions_sz; i++) {
691 		  for (pa = pregions[i].mr_start; pa < pregions[i].mr_start +
692 		     pregions[i].mr_size; pa += moea64_large_page_size) {
693 			pte_lo = LPTE_M;
694 			if (pa & moea64_large_page_mask) {
695 				pa &= moea64_large_page_mask;
696 				pte_lo |= LPTE_G;
697 			}
698 			if (pa + moea64_large_page_size >
699 			    pregions[i].mr_start + pregions[i].mr_size)
700 				pte_lo |= LPTE_G;
701 
702 			moea64_kenter_large(mmup, PHYS_TO_DMAP(pa), pa, pte_lo, 1);
703 		  }
704 		}
705 		PMAP_UNLOCK(kernel_pmap);
706 	}
707 
708 	/*
709 	 * Make sure the kernel and BPVO pool stay mapped on systems either
710 	 * without a direct map or on which the kernel is not already executing
711 	 * out of the direct-mapped region.
712 	 */
713 	if (kernelstart < DMAP_BASE_ADDRESS) {
714 		/*
715 		 * For pre-dmap execution, we need to use identity mapping
716 		 * because we will be operating with the mmu on but in the
717 		 * wrong address configuration until we __restartkernel().
718 		 */
719 		for (pa = kernelstart & ~PAGE_MASK; pa < kernelend;
720 		    pa += PAGE_SIZE)
721 			moea64_kenter(mmup, pa, pa);
722 	} else if (!hw_direct_map) {
723 		pkernelstart = kernelstart & ~DMAP_BASE_ADDRESS;
724 		pkernelend = kernelend & ~DMAP_BASE_ADDRESS;
725 		for (pa = pkernelstart & ~PAGE_MASK; pa < pkernelend;
726 		    pa += PAGE_SIZE)
727 			moea64_kenter(mmup, pa | DMAP_BASE_ADDRESS, pa);
728 	}
729 
730 	if (!hw_direct_map) {
731 		size = moea64_bpvo_pool_size*sizeof(struct pvo_entry);
732 		off = (vm_offset_t)(moea64_bpvo_pool);
733 		for (pa = off; pa < off + size; pa += PAGE_SIZE)
734 			moea64_kenter(mmup, pa, pa);
735 
736 		/* Map exception vectors */
737 		for (pa = EXC_RSVD; pa < EXC_LAST; pa += PAGE_SIZE)
738 			moea64_kenter(mmup, pa | DMAP_BASE_ADDRESS, pa);
739 	}
740 	ENABLE_TRANS(msr);
741 
742 	/*
743 	 * Allow user to override unmapped_buf_allowed for testing.
744 	 * XXXKIB Only direct map implementation was tested.
745 	 */
746 	if (!TUNABLE_INT_FETCH("vfs.unmapped_buf_allowed",
747 	    &unmapped_buf_allowed))
748 		unmapped_buf_allowed = hw_direct_map;
749 }
750 
751 /* Quick sort callout for comparing physical addresses. */
752 static int
753 pa_cmp(const void *a, const void *b)
754 {
755 	const vm_paddr_t *pa = a, *pb = b;
756 
757 	if (*pa < *pb)
758 		return (-1);
759 	else if (*pa > *pb)
760 		return (1);
761 	else
762 		return (0);
763 }
764 
765 void
766 moea64_early_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend)
767 {
768 	int		i, j;
769 	vm_size_t	physsz, hwphyssz;
770 	vm_paddr_t	kernelphysstart, kernelphysend;
771 	int		rm_pavail;
772 
773 #ifndef __powerpc64__
774 	/* We don't have a direct map since there is no BAT */
775 	hw_direct_map = 0;
776 
777 	/* Make sure battable is zero, since we have no BAT */
778 	for (i = 0; i < 16; i++) {
779 		battable[i].batu = 0;
780 		battable[i].batl = 0;
781 	}
782 #else
783 	moea64_probe_large_page();
784 
785 	/* Use a direct map if we have large page support */
786 	if (moea64_large_page_size > 0)
787 		hw_direct_map = 1;
788 	else
789 		hw_direct_map = 0;
790 
791 	/* Install trap handlers for SLBs */
792 	bcopy(&slbtrap, (void *)EXC_DSE,(size_t)&slbtrapend - (size_t)&slbtrap);
793 	bcopy(&slbtrap, (void *)EXC_ISE,(size_t)&slbtrapend - (size_t)&slbtrap);
794 	__syncicache((void *)EXC_DSE, 0x80);
795 	__syncicache((void *)EXC_ISE, 0x80);
796 #endif
797 
798 	kernelphysstart = kernelstart & ~DMAP_BASE_ADDRESS;
799 	kernelphysend = kernelend & ~DMAP_BASE_ADDRESS;
800 
801 	/* Get physical memory regions from firmware */
802 	mem_regions(&pregions, &pregions_sz, &regions, &regions_sz);
803 	CTR0(KTR_PMAP, "moea64_bootstrap: physical memory");
804 
805 	if (PHYS_AVAIL_ENTRIES < regions_sz)
806 		panic("moea64_bootstrap: phys_avail too small");
807 
808 	phys_avail_count = 0;
809 	physsz = 0;
810 	hwphyssz = 0;
811 	TUNABLE_ULONG_FETCH("hw.physmem", (u_long *) &hwphyssz);
812 	for (i = 0, j = 0; i < regions_sz; i++, j += 2) {
813 		CTR3(KTR_PMAP, "region: %#zx - %#zx (%#zx)",
814 		    regions[i].mr_start, regions[i].mr_start +
815 		    regions[i].mr_size, regions[i].mr_size);
816 		if (hwphyssz != 0 &&
817 		    (physsz + regions[i].mr_size) >= hwphyssz) {
818 			if (physsz < hwphyssz) {
819 				phys_avail[j] = regions[i].mr_start;
820 				phys_avail[j + 1] = regions[i].mr_start +
821 				    hwphyssz - physsz;
822 				physsz = hwphyssz;
823 				phys_avail_count++;
824 				dump_avail[j] = phys_avail[j];
825 				dump_avail[j + 1] = phys_avail[j + 1];
826 			}
827 			break;
828 		}
829 		phys_avail[j] = regions[i].mr_start;
830 		phys_avail[j + 1] = regions[i].mr_start + regions[i].mr_size;
831 		phys_avail_count++;
832 		physsz += regions[i].mr_size;
833 		dump_avail[j] = phys_avail[j];
834 		dump_avail[j + 1] = phys_avail[j + 1];
835 	}
836 
837 	/* Check for overlap with the kernel and exception vectors */
838 	rm_pavail = 0;
839 	for (j = 0; j < 2*phys_avail_count; j+=2) {
840 		if (phys_avail[j] < EXC_LAST)
841 			phys_avail[j] += EXC_LAST;
842 
843 		if (phys_avail[j] >= kernelphysstart &&
844 		    phys_avail[j+1] <= kernelphysend) {
845 			phys_avail[j] = phys_avail[j+1] = ~0;
846 			rm_pavail++;
847 			continue;
848 		}
849 
850 		if (kernelphysstart >= phys_avail[j] &&
851 		    kernelphysstart < phys_avail[j+1]) {
852 			if (kernelphysend < phys_avail[j+1]) {
853 				phys_avail[2*phys_avail_count] =
854 				    (kernelphysend & ~PAGE_MASK) + PAGE_SIZE;
855 				phys_avail[2*phys_avail_count + 1] =
856 				    phys_avail[j+1];
857 				phys_avail_count++;
858 			}
859 
860 			phys_avail[j+1] = kernelphysstart & ~PAGE_MASK;
861 		}
862 
863 		if (kernelphysend >= phys_avail[j] &&
864 		    kernelphysend < phys_avail[j+1]) {
865 			if (kernelphysstart > phys_avail[j]) {
866 				phys_avail[2*phys_avail_count] = phys_avail[j];
867 				phys_avail[2*phys_avail_count + 1] =
868 				    kernelphysstart & ~PAGE_MASK;
869 				phys_avail_count++;
870 			}
871 
872 			phys_avail[j] = (kernelphysend & ~PAGE_MASK) +
873 			    PAGE_SIZE;
874 		}
875 	}
876 
877 	/* Remove physical available regions marked for removal (~0) */
878 	if (rm_pavail) {
879 		qsort(phys_avail, 2*phys_avail_count, sizeof(phys_avail[0]),
880 			pa_cmp);
881 		phys_avail_count -= rm_pavail;
882 		for (i = 2*phys_avail_count;
883 		     i < 2*(phys_avail_count + rm_pavail); i+=2)
884 			phys_avail[i] = phys_avail[i+1] = 0;
885 	}
886 
887 	physmem = btoc(physsz);
888 
889 #ifdef PTEGCOUNT
890 	moea64_pteg_count = PTEGCOUNT;
891 #else
892 	moea64_pteg_count = 0x1000;
893 
894 	while (moea64_pteg_count < physmem)
895 		moea64_pteg_count <<= 1;
896 
897 	moea64_pteg_count >>= 1;
898 #endif /* PTEGCOUNT */
899 }
900 
901 void
902 moea64_mid_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend)
903 {
904 	int		i;
905 
906 	/*
907 	 * Set PTEG mask
908 	 */
909 	moea64_pteg_mask = moea64_pteg_count - 1;
910 
911 	/*
912 	 * Initialize SLB table lock and page locks
913 	 */
914 	mtx_init(&moea64_slb_mutex, "SLB table", NULL, MTX_DEF);
915 	for (i = 0; i < PV_LOCK_COUNT; i++)
916 		mtx_init(&pv_lock[i], "page pv", NULL, MTX_DEF);
917 
918 	/*
919 	 * Initialise the bootstrap pvo pool.
920 	 */
921 	TUNABLE_INT_FETCH("machdep.moea64_bpvo_pool_size", &moea64_bpvo_pool_size);
922 	if (moea64_bpvo_pool_size == 0) {
923 		if (!hw_direct_map)
924 			moea64_bpvo_pool_size = ((ptoa((uintmax_t)physmem) * sizeof(struct vm_page)) /
925 			    (PAGE_SIZE * PAGE_SIZE)) * BPVO_POOL_EXPANSION_FACTOR;
926 		else
927 			moea64_bpvo_pool_size = BPVO_POOL_SIZE;
928 	}
929 
930 	if (boothowto & RB_VERBOSE) {
931 		printf("mmu_oea64: bpvo pool entries = %d, bpvo pool size = %zu MB\n",
932 		    moea64_bpvo_pool_size,
933 		    moea64_bpvo_pool_size*sizeof(struct pvo_entry) / 1048576);
934 	}
935 
936 	moea64_bpvo_pool = (struct pvo_entry *)moea64_bootstrap_alloc(
937 		moea64_bpvo_pool_size*sizeof(struct pvo_entry), PAGE_SIZE);
938 	moea64_bpvo_pool_index = 0;
939 
940 	/* Place at address usable through the direct map */
941 	if (hw_direct_map)
942 		moea64_bpvo_pool = (struct pvo_entry *)
943 		    PHYS_TO_DMAP((uintptr_t)moea64_bpvo_pool);
944 
945 	/*
946 	 * Make sure kernel vsid is allocated as well as VSID 0.
947 	 */
948 	#ifndef __powerpc64__
949 	moea64_vsid_bitmap[(KERNEL_VSIDBITS & (NVSIDS - 1)) / VSID_NBPW]
950 		|= 1 << (KERNEL_VSIDBITS % VSID_NBPW);
951 	moea64_vsid_bitmap[0] |= 1;
952 	#endif
953 
954 	/*
955 	 * Initialize the kernel pmap (which is statically allocated).
956 	 */
957 	#ifdef __powerpc64__
958 	for (i = 0; i < 64; i++) {
959 		pcpup->pc_aim.slb[i].slbv = 0;
960 		pcpup->pc_aim.slb[i].slbe = 0;
961 	}
962 	#else
963 	for (i = 0; i < 16; i++)
964 		kernel_pmap->pm_sr[i] = EMPTY_SEGMENT + i;
965 	#endif
966 
967 	kernel_pmap->pmap_phys = kernel_pmap;
968 	CPU_FILL(&kernel_pmap->pm_active);
969 	RB_INIT(&kernel_pmap->pmap_pvo);
970 
971 	PMAP_LOCK_INIT(kernel_pmap);
972 
973 	/*
974 	 * Now map in all the other buffers we allocated earlier
975 	 */
976 
977 	moea64_setup_direct_map(mmup, kernelstart, kernelend);
978 }
979 
980 void
981 moea64_late_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend)
982 {
983 	ihandle_t	mmui;
984 	phandle_t	chosen;
985 	phandle_t	mmu;
986 	ssize_t		sz;
987 	int		i;
988 	vm_offset_t	pa, va;
989 	void		*dpcpu;
990 
991 	/*
992 	 * Set up the Open Firmware pmap and add its mappings if not in real
993 	 * mode.
994 	 */
995 
996 	chosen = OF_finddevice("/chosen");
997 	if (chosen != -1 && OF_getencprop(chosen, "mmu", &mmui, 4) != -1) {
998 		mmu = OF_instance_to_package(mmui);
999 		if (mmu == -1 ||
1000 		    (sz = OF_getproplen(mmu, "translations")) == -1)
1001 			sz = 0;
1002 		if (sz > 6144 /* tmpstksz - 2 KB headroom */)
1003 			panic("moea64_bootstrap: too many ofw translations");
1004 
1005 		if (sz > 0)
1006 			moea64_add_ofw_mappings(mmup, mmu, sz);
1007 	}
1008 
1009 	/*
1010 	 * Calculate the last available physical address.
1011 	 */
1012 	Maxmem = 0;
1013 	for (i = 0; phys_avail[i + 2] != 0; i += 2)
1014 		Maxmem = MAX(Maxmem, powerpc_btop(phys_avail[i + 1]));
1015 
1016 	/*
1017 	 * Initialize MMU.
1018 	 */
1019 	MMU_CPU_BOOTSTRAP(mmup,0);
1020 	mtmsr(mfmsr() | PSL_DR | PSL_IR);
1021 	pmap_bootstrapped++;
1022 
1023 	/*
1024 	 * Set the start and end of kva.
1025 	 */
1026 	virtual_avail = VM_MIN_KERNEL_ADDRESS;
1027 	virtual_end = VM_MAX_SAFE_KERNEL_ADDRESS;
1028 
1029 	/*
1030 	 * Map the entire KVA range into the SLB. We must not fault there.
1031 	 */
1032 	#ifdef __powerpc64__
1033 	for (va = virtual_avail; va < virtual_end; va += SEGMENT_LENGTH)
1034 		moea64_bootstrap_slb_prefault(va, 0);
1035 	#endif
1036 
1037 	/*
1038 	 * Remap any early IO mappings (console framebuffer, etc.)
1039 	 */
1040 	bs_remap_earlyboot();
1041 
1042 	/*
1043 	 * Figure out how far we can extend virtual_end into segment 16
1044 	 * without running into existing mappings. Segment 16 is guaranteed
1045 	 * to contain neither RAM nor devices (at least on Apple hardware),
1046 	 * but will generally contain some OFW mappings we should not
1047 	 * step on.
1048 	 */
1049 
1050 	#ifndef __powerpc64__	/* KVA is in high memory on PPC64 */
1051 	PMAP_LOCK(kernel_pmap);
1052 	while (virtual_end < VM_MAX_KERNEL_ADDRESS &&
1053 	    moea64_pvo_find_va(kernel_pmap, virtual_end+1) == NULL)
1054 		virtual_end += PAGE_SIZE;
1055 	PMAP_UNLOCK(kernel_pmap);
1056 	#endif
1057 
1058 	/*
1059 	 * Allocate a kernel stack with a guard page for thread0 and map it
1060 	 * into the kernel page map.
1061 	 */
1062 	pa = moea64_bootstrap_alloc(kstack_pages * PAGE_SIZE, PAGE_SIZE);
1063 	va = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE;
1064 	virtual_avail = va + kstack_pages * PAGE_SIZE;
1065 	CTR2(KTR_PMAP, "moea64_bootstrap: kstack0 at %#x (%#x)", pa, va);
1066 	thread0.td_kstack = va;
1067 	thread0.td_kstack_pages = kstack_pages;
1068 	for (i = 0; i < kstack_pages; i++) {
1069 		moea64_kenter(mmup, va, pa);
1070 		pa += PAGE_SIZE;
1071 		va += PAGE_SIZE;
1072 	}
1073 
1074 	/*
1075 	 * Allocate virtual address space for the message buffer.
1076 	 */
1077 	pa = msgbuf_phys = moea64_bootstrap_alloc(msgbufsize, PAGE_SIZE);
1078 	msgbufp = (struct msgbuf *)virtual_avail;
1079 	va = virtual_avail;
1080 	virtual_avail += round_page(msgbufsize);
1081 	while (va < virtual_avail) {
1082 		moea64_kenter(mmup, va, pa);
1083 		pa += PAGE_SIZE;
1084 		va += PAGE_SIZE;
1085 	}
1086 
1087 	/*
1088 	 * Allocate virtual address space for the dynamic percpu area.
1089 	 */
1090 	pa = moea64_bootstrap_alloc(DPCPU_SIZE, PAGE_SIZE);
1091 	dpcpu = (void *)virtual_avail;
1092 	va = virtual_avail;
1093 	virtual_avail += DPCPU_SIZE;
1094 	while (va < virtual_avail) {
1095 		moea64_kenter(mmup, va, pa);
1096 		pa += PAGE_SIZE;
1097 		va += PAGE_SIZE;
1098 	}
1099 	dpcpu_init(dpcpu, curcpu);
1100 
1101 	crashdumpmap = (caddr_t)virtual_avail;
1102 	virtual_avail += MAXDUMPPGS * PAGE_SIZE;
1103 
1104 	/*
1105 	 * Allocate some things for page zeroing. We put this directly
1106 	 * in the page table and use MOEA64_PTE_REPLACE to avoid any
1107 	 * of the PVO book-keeping or other parts of the VM system
1108 	 * from even knowing that this hack exists.
1109 	 */
1110 
1111 	if (!hw_direct_map) {
1112 		mtx_init(&moea64_scratchpage_mtx, "pvo zero page", NULL,
1113 		    MTX_DEF);
1114 		for (i = 0; i < 2; i++) {
1115 			moea64_scratchpage_va[i] = (virtual_end+1) - PAGE_SIZE;
1116 			virtual_end -= PAGE_SIZE;
1117 
1118 			moea64_kenter(mmup, moea64_scratchpage_va[i], 0);
1119 
1120 			PMAP_LOCK(kernel_pmap);
1121 			moea64_scratchpage_pvo[i] = moea64_pvo_find_va(
1122 			    kernel_pmap, (vm_offset_t)moea64_scratchpage_va[i]);
1123 			PMAP_UNLOCK(kernel_pmap);
1124 		}
1125 	}
1126 
1127 	numa_mem_regions(&numa_pregions, &numapregions_sz);
1128 }
1129 
1130 static void
1131 moea64_pmap_init_qpages(void)
1132 {
1133 	struct pcpu *pc;
1134 	int i;
1135 
1136 	if (hw_direct_map)
1137 		return;
1138 
1139 	CPU_FOREACH(i) {
1140 		pc = pcpu_find(i);
1141 		pc->pc_qmap_addr = kva_alloc(PAGE_SIZE);
1142 		if (pc->pc_qmap_addr == 0)
1143 			panic("pmap_init_qpages: unable to allocate KVA");
1144 		PMAP_LOCK(kernel_pmap);
1145 		pc->pc_aim.qmap_pvo =
1146 		    moea64_pvo_find_va(kernel_pmap, pc->pc_qmap_addr);
1147 		PMAP_UNLOCK(kernel_pmap);
1148 		mtx_init(&pc->pc_aim.qmap_lock, "qmap lock", NULL, MTX_DEF);
1149 	}
1150 }
1151 
1152 SYSINIT(qpages_init, SI_SUB_CPU, SI_ORDER_ANY, moea64_pmap_init_qpages, NULL);
1153 
1154 /*
1155  * Activate a user pmap.  This mostly involves setting some non-CPU
1156  * state.
1157  */
1158 void
1159 moea64_activate(mmu_t mmu, struct thread *td)
1160 {
1161 	pmap_t	pm;
1162 
1163 	pm = &td->td_proc->p_vmspace->vm_pmap;
1164 	CPU_SET(PCPU_GET(cpuid), &pm->pm_active);
1165 
1166 	#ifdef __powerpc64__
1167 	PCPU_SET(aim.userslb, pm->pm_slb);
1168 	__asm __volatile("slbmte %0, %1; isync" ::
1169 	    "r"(td->td_pcb->pcb_cpu.aim.usr_vsid), "r"(USER_SLB_SLBE));
1170 	#else
1171 	PCPU_SET(curpmap, pm->pmap_phys);
1172 	mtsrin(USER_SR << ADDR_SR_SHFT, td->td_pcb->pcb_cpu.aim.usr_vsid);
1173 	#endif
1174 }
1175 
1176 void
1177 moea64_deactivate(mmu_t mmu, struct thread *td)
1178 {
1179 	pmap_t	pm;
1180 
1181 	__asm __volatile("isync; slbie %0" :: "r"(USER_ADDR));
1182 
1183 	pm = &td->td_proc->p_vmspace->vm_pmap;
1184 	CPU_CLR(PCPU_GET(cpuid), &pm->pm_active);
1185 	#ifdef __powerpc64__
1186 	PCPU_SET(aim.userslb, NULL);
1187 	#else
1188 	PCPU_SET(curpmap, NULL);
1189 	#endif
1190 }
1191 
1192 void
1193 moea64_unwire(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva)
1194 {
1195 	struct	pvo_entry key, *pvo;
1196 	vm_page_t m;
1197 	int64_t	refchg;
1198 
1199 	key.pvo_vaddr = sva;
1200 	PMAP_LOCK(pm);
1201 	for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key);
1202 	    pvo != NULL && PVO_VADDR(pvo) < eva;
1203 	    pvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo)) {
1204 		if ((pvo->pvo_vaddr & PVO_WIRED) == 0)
1205 			panic("moea64_unwire: pvo %p is missing PVO_WIRED",
1206 			    pvo);
1207 		pvo->pvo_vaddr &= ~PVO_WIRED;
1208 		refchg = MOEA64_PTE_REPLACE(mmu, pvo, 0 /* No invalidation */);
1209 		if ((pvo->pvo_vaddr & PVO_MANAGED) &&
1210 		    (pvo->pvo_pte.prot & VM_PROT_WRITE)) {
1211 			if (refchg < 0)
1212 				refchg = LPTE_CHG;
1213 			m = PHYS_TO_VM_PAGE(pvo->pvo_pte.pa & LPTE_RPGN);
1214 
1215 			refchg |= atomic_readandclear_32(&m->md.mdpg_attrs);
1216 			if (refchg & LPTE_CHG)
1217 				vm_page_dirty(m);
1218 			if (refchg & LPTE_REF)
1219 				vm_page_aflag_set(m, PGA_REFERENCED);
1220 		}
1221 		pm->pm_stats.wired_count--;
1222 	}
1223 	PMAP_UNLOCK(pm);
1224 }
1225 
1226 /*
1227  * This goes through and sets the physical address of our
1228  * special scratch PTE to the PA we want to zero or copy. Because
1229  * of locking issues (this can get called in pvo_enter() by
1230  * the UMA allocator), we can't use most other utility functions here
1231  */
1232 
1233 static __inline
1234 void moea64_set_scratchpage_pa(mmu_t mmup, int which, vm_paddr_t pa)
1235 {
1236 	struct pvo_entry *pvo;
1237 
1238 	KASSERT(!hw_direct_map, ("Using OEA64 scratchpage with a direct map!"));
1239 	mtx_assert(&moea64_scratchpage_mtx, MA_OWNED);
1240 
1241 	pvo = moea64_scratchpage_pvo[which];
1242 	PMAP_LOCK(pvo->pvo_pmap);
1243 	pvo->pvo_pte.pa =
1244 	    moea64_calc_wimg(pa, VM_MEMATTR_DEFAULT) | (uint64_t)pa;
1245 	MOEA64_PTE_REPLACE(mmup, pvo, MOEA64_PTE_INVALIDATE);
1246 	PMAP_UNLOCK(pvo->pvo_pmap);
1247 	isync();
1248 }
1249 
1250 void
1251 moea64_copy_page(mmu_t mmu, vm_page_t msrc, vm_page_t mdst)
1252 {
1253 	vm_offset_t	dst;
1254 	vm_offset_t	src;
1255 
1256 	dst = VM_PAGE_TO_PHYS(mdst);
1257 	src = VM_PAGE_TO_PHYS(msrc);
1258 
1259 	if (hw_direct_map) {
1260 		bcopy((void *)PHYS_TO_DMAP(src), (void *)PHYS_TO_DMAP(dst),
1261 		    PAGE_SIZE);
1262 	} else {
1263 		mtx_lock(&moea64_scratchpage_mtx);
1264 
1265 		moea64_set_scratchpage_pa(mmu, 0, src);
1266 		moea64_set_scratchpage_pa(mmu, 1, dst);
1267 
1268 		bcopy((void *)moea64_scratchpage_va[0],
1269 		    (void *)moea64_scratchpage_va[1], PAGE_SIZE);
1270 
1271 		mtx_unlock(&moea64_scratchpage_mtx);
1272 	}
1273 }
1274 
1275 static inline void
1276 moea64_copy_pages_dmap(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
1277     vm_page_t *mb, vm_offset_t b_offset, int xfersize)
1278 {
1279 	void *a_cp, *b_cp;
1280 	vm_offset_t a_pg_offset, b_pg_offset;
1281 	int cnt;
1282 
1283 	while (xfersize > 0) {
1284 		a_pg_offset = a_offset & PAGE_MASK;
1285 		cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
1286 		a_cp = (char *)(uintptr_t)PHYS_TO_DMAP(
1287 		    VM_PAGE_TO_PHYS(ma[a_offset >> PAGE_SHIFT])) +
1288 		    a_pg_offset;
1289 		b_pg_offset = b_offset & PAGE_MASK;
1290 		cnt = min(cnt, PAGE_SIZE - b_pg_offset);
1291 		b_cp = (char *)(uintptr_t)PHYS_TO_DMAP(
1292 		    VM_PAGE_TO_PHYS(mb[b_offset >> PAGE_SHIFT])) +
1293 		    b_pg_offset;
1294 		bcopy(a_cp, b_cp, cnt);
1295 		a_offset += cnt;
1296 		b_offset += cnt;
1297 		xfersize -= cnt;
1298 	}
1299 }
1300 
1301 static inline void
1302 moea64_copy_pages_nodmap(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
1303     vm_page_t *mb, vm_offset_t b_offset, int xfersize)
1304 {
1305 	void *a_cp, *b_cp;
1306 	vm_offset_t a_pg_offset, b_pg_offset;
1307 	int cnt;
1308 
1309 	mtx_lock(&moea64_scratchpage_mtx);
1310 	while (xfersize > 0) {
1311 		a_pg_offset = a_offset & PAGE_MASK;
1312 		cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
1313 		moea64_set_scratchpage_pa(mmu, 0,
1314 		    VM_PAGE_TO_PHYS(ma[a_offset >> PAGE_SHIFT]));
1315 		a_cp = (char *)moea64_scratchpage_va[0] + a_pg_offset;
1316 		b_pg_offset = b_offset & PAGE_MASK;
1317 		cnt = min(cnt, PAGE_SIZE - b_pg_offset);
1318 		moea64_set_scratchpage_pa(mmu, 1,
1319 		    VM_PAGE_TO_PHYS(mb[b_offset >> PAGE_SHIFT]));
1320 		b_cp = (char *)moea64_scratchpage_va[1] + b_pg_offset;
1321 		bcopy(a_cp, b_cp, cnt);
1322 		a_offset += cnt;
1323 		b_offset += cnt;
1324 		xfersize -= cnt;
1325 	}
1326 	mtx_unlock(&moea64_scratchpage_mtx);
1327 }
1328 
1329 void
1330 moea64_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
1331     vm_page_t *mb, vm_offset_t b_offset, int xfersize)
1332 {
1333 
1334 	if (hw_direct_map) {
1335 		moea64_copy_pages_dmap(mmu, ma, a_offset, mb, b_offset,
1336 		    xfersize);
1337 	} else {
1338 		moea64_copy_pages_nodmap(mmu, ma, a_offset, mb, b_offset,
1339 		    xfersize);
1340 	}
1341 }
1342 
1343 void
1344 moea64_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size)
1345 {
1346 	vm_paddr_t pa = VM_PAGE_TO_PHYS(m);
1347 
1348 	if (size + off > PAGE_SIZE)
1349 		panic("moea64_zero_page: size + off > PAGE_SIZE");
1350 
1351 	if (hw_direct_map) {
1352 		bzero((caddr_t)(uintptr_t)PHYS_TO_DMAP(pa) + off, size);
1353 	} else {
1354 		mtx_lock(&moea64_scratchpage_mtx);
1355 		moea64_set_scratchpage_pa(mmu, 0, pa);
1356 		bzero((caddr_t)moea64_scratchpage_va[0] + off, size);
1357 		mtx_unlock(&moea64_scratchpage_mtx);
1358 	}
1359 }
1360 
1361 /*
1362  * Zero a page of physical memory by temporarily mapping it
1363  */
1364 void
1365 moea64_zero_page(mmu_t mmu, vm_page_t m)
1366 {
1367 	vm_paddr_t pa = VM_PAGE_TO_PHYS(m);
1368 	vm_offset_t va, off;
1369 
1370 	if (!hw_direct_map) {
1371 		mtx_lock(&moea64_scratchpage_mtx);
1372 
1373 		moea64_set_scratchpage_pa(mmu, 0, pa);
1374 		va = moea64_scratchpage_va[0];
1375 	} else {
1376 		va = PHYS_TO_DMAP(pa);
1377 	}
1378 
1379 	for (off = 0; off < PAGE_SIZE; off += cacheline_size)
1380 		__asm __volatile("dcbz 0,%0" :: "r"(va + off));
1381 
1382 	if (!hw_direct_map)
1383 		mtx_unlock(&moea64_scratchpage_mtx);
1384 }
1385 
1386 vm_offset_t
1387 moea64_quick_enter_page(mmu_t mmu, vm_page_t m)
1388 {
1389 	struct pvo_entry *pvo;
1390 	vm_paddr_t pa = VM_PAGE_TO_PHYS(m);
1391 
1392 	if (hw_direct_map)
1393 		return (PHYS_TO_DMAP(pa));
1394 
1395 	/*
1396  	 * MOEA64_PTE_REPLACE does some locking, so we can't just grab
1397 	 * a critical section and access the PCPU data like on i386.
1398 	 * Instead, pin the thread and grab the PCPU lock to prevent
1399 	 * a preempting thread from using the same PCPU data.
1400 	 */
1401 	sched_pin();
1402 
1403 	mtx_assert(PCPU_PTR(aim.qmap_lock), MA_NOTOWNED);
1404 	pvo = PCPU_GET(aim.qmap_pvo);
1405 
1406 	mtx_lock(PCPU_PTR(aim.qmap_lock));
1407 	pvo->pvo_pte.pa = moea64_calc_wimg(pa, pmap_page_get_memattr(m)) |
1408 	    (uint64_t)pa;
1409 	MOEA64_PTE_REPLACE(mmu, pvo, MOEA64_PTE_INVALIDATE);
1410 	isync();
1411 
1412 	return (PCPU_GET(qmap_addr));
1413 }
1414 
1415 void
1416 moea64_quick_remove_page(mmu_t mmu, vm_offset_t addr)
1417 {
1418 	if (hw_direct_map)
1419 		return;
1420 
1421 	mtx_assert(PCPU_PTR(aim.qmap_lock), MA_OWNED);
1422 	KASSERT(PCPU_GET(qmap_addr) == addr,
1423 	    ("moea64_quick_remove_page: invalid address"));
1424 	mtx_unlock(PCPU_PTR(aim.qmap_lock));
1425 	sched_unpin();
1426 }
1427 
1428 boolean_t
1429 moea64_page_is_mapped(mmu_t mmu, vm_page_t m)
1430 {
1431 	return (!LIST_EMPTY(&(m)->md.mdpg_pvoh));
1432 }
1433 
1434 /*
1435  * Map the given physical page at the specified virtual address in the
1436  * target pmap with the protection requested.  If specified the page
1437  * will be wired down.
1438  */
1439 
1440 int
1441 moea64_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
1442     vm_prot_t prot, u_int flags, int8_t psind)
1443 {
1444 	struct		pvo_entry *pvo, *oldpvo;
1445 	struct		pvo_head *pvo_head;
1446 	uint64_t	pte_lo;
1447 	int		error;
1448 
1449 	if ((m->oflags & VPO_UNMANAGED) == 0) {
1450 		if ((flags & PMAP_ENTER_QUICK_LOCKED) == 0)
1451 			VM_PAGE_OBJECT_BUSY_ASSERT(m);
1452 		else
1453 			VM_OBJECT_ASSERT_LOCKED(m->object);
1454 	}
1455 
1456 	pvo = alloc_pvo_entry(0);
1457 	if (pvo == NULL)
1458 		return (KERN_RESOURCE_SHORTAGE);
1459 	pvo->pvo_pmap = NULL; /* to be filled in later */
1460 	pvo->pvo_pte.prot = prot;
1461 
1462 	pte_lo = moea64_calc_wimg(VM_PAGE_TO_PHYS(m), pmap_page_get_memattr(m));
1463 	pvo->pvo_pte.pa = VM_PAGE_TO_PHYS(m) | pte_lo;
1464 
1465 	if ((flags & PMAP_ENTER_WIRED) != 0)
1466 		pvo->pvo_vaddr |= PVO_WIRED;
1467 
1468 	if ((m->oflags & VPO_UNMANAGED) != 0 || !moea64_initialized) {
1469 		pvo_head = NULL;
1470 	} else {
1471 		pvo_head = &m->md.mdpg_pvoh;
1472 		pvo->pvo_vaddr |= PVO_MANAGED;
1473 	}
1474 
1475 	PV_PAGE_LOCK(m);
1476 	PMAP_LOCK(pmap);
1477 	if (pvo->pvo_pmap == NULL)
1478 		init_pvo_entry(pvo, pmap, va);
1479 	if (prot & VM_PROT_WRITE)
1480 		if (pmap_bootstrapped &&
1481 		    (m->oflags & VPO_UNMANAGED) == 0)
1482 			vm_page_aflag_set(m, PGA_WRITEABLE);
1483 
1484 	error = moea64_pvo_enter(mmu, pvo, pvo_head, &oldpvo);
1485 	if (error == EEXIST) {
1486 		if (oldpvo->pvo_vaddr == pvo->pvo_vaddr &&
1487 		    oldpvo->pvo_pte.pa == pvo->pvo_pte.pa &&
1488 		    oldpvo->pvo_pte.prot == prot) {
1489 			/* Identical mapping already exists */
1490 			error = 0;
1491 
1492 			/* If not in page table, reinsert it */
1493 			if (MOEA64_PTE_SYNCH(mmu, oldpvo) < 0) {
1494 				STAT_MOEA64(moea64_pte_overflow--);
1495 				MOEA64_PTE_INSERT(mmu, oldpvo);
1496 			}
1497 
1498 			/* Then just clean up and go home */
1499 			PV_PAGE_UNLOCK(m);
1500 			PMAP_UNLOCK(pmap);
1501 			free_pvo_entry(pvo);
1502 			goto out;
1503 		} else {
1504 			/* Otherwise, need to kill it first */
1505 			KASSERT(oldpvo->pvo_pmap == pmap, ("pmap of old "
1506 			    "mapping does not match new mapping"));
1507 			moea64_pvo_remove_from_pmap(mmu, oldpvo);
1508 			moea64_pvo_enter(mmu, pvo, pvo_head, NULL);
1509 		}
1510 	}
1511 	PMAP_UNLOCK(pmap);
1512 	PV_PAGE_UNLOCK(m);
1513 
1514 	/* Free any dead pages */
1515 	if (error == EEXIST) {
1516 		moea64_pvo_remove_from_page(mmu, oldpvo);
1517 		free_pvo_entry(oldpvo);
1518 	}
1519 
1520 out:
1521 	/*
1522 	 * Flush the page from the instruction cache if this page is
1523 	 * mapped executable and cacheable.
1524 	 */
1525 	if (pmap != kernel_pmap && (m->a.flags & PGA_EXECUTABLE) == 0 &&
1526 	    (pte_lo & (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) {
1527 		vm_page_aflag_set(m, PGA_EXECUTABLE);
1528 		moea64_syncicache(mmu, pmap, va, VM_PAGE_TO_PHYS(m), PAGE_SIZE);
1529 	}
1530 	return (KERN_SUCCESS);
1531 }
1532 
1533 static void
1534 moea64_syncicache(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_paddr_t pa,
1535     vm_size_t sz)
1536 {
1537 
1538 	/*
1539 	 * This is much trickier than on older systems because
1540 	 * we can't sync the icache on physical addresses directly
1541 	 * without a direct map. Instead we check a couple of cases
1542 	 * where the memory is already mapped in and, failing that,
1543 	 * use the same trick we use for page zeroing to create
1544 	 * a temporary mapping for this physical address.
1545 	 */
1546 
1547 	if (!pmap_bootstrapped) {
1548 		/*
1549 		 * If PMAP is not bootstrapped, we are likely to be
1550 		 * in real mode.
1551 		 */
1552 		__syncicache((void *)(uintptr_t)pa, sz);
1553 	} else if (pmap == kernel_pmap) {
1554 		__syncicache((void *)va, sz);
1555 	} else if (hw_direct_map) {
1556 		__syncicache((void *)(uintptr_t)PHYS_TO_DMAP(pa), sz);
1557 	} else {
1558 		/* Use the scratch page to set up a temp mapping */
1559 
1560 		mtx_lock(&moea64_scratchpage_mtx);
1561 
1562 		moea64_set_scratchpage_pa(mmu, 1, pa & ~ADDR_POFF);
1563 		__syncicache((void *)(moea64_scratchpage_va[1] +
1564 		    (va & ADDR_POFF)), sz);
1565 
1566 		mtx_unlock(&moea64_scratchpage_mtx);
1567 	}
1568 }
1569 
1570 /*
1571  * Maps a sequence of resident pages belonging to the same object.
1572  * The sequence begins with the given page m_start.  This page is
1573  * mapped at the given virtual address start.  Each subsequent page is
1574  * mapped at a virtual address that is offset from start by the same
1575  * amount as the page is offset from m_start within the object.  The
1576  * last page in the sequence is the page with the largest offset from
1577  * m_start that can be mapped at a virtual address less than the given
1578  * virtual address end.  Not every virtual page between start and end
1579  * is mapped; only those for which a resident page exists with the
1580  * corresponding offset from m_start are mapped.
1581  */
1582 void
1583 moea64_enter_object(mmu_t mmu, pmap_t pm, vm_offset_t start, vm_offset_t end,
1584     vm_page_t m_start, vm_prot_t prot)
1585 {
1586 	vm_page_t m;
1587 	vm_pindex_t diff, psize;
1588 
1589 	VM_OBJECT_ASSERT_LOCKED(m_start->object);
1590 
1591 	psize = atop(end - start);
1592 	m = m_start;
1593 	while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
1594 		moea64_enter(mmu, pm, start + ptoa(diff), m, prot &
1595 		    (VM_PROT_READ | VM_PROT_EXECUTE), PMAP_ENTER_NOSLEEP |
1596 		    PMAP_ENTER_QUICK_LOCKED, 0);
1597 		m = TAILQ_NEXT(m, listq);
1598 	}
1599 }
1600 
1601 void
1602 moea64_enter_quick(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_page_t m,
1603     vm_prot_t prot)
1604 {
1605 
1606 	moea64_enter(mmu, pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE),
1607 	    PMAP_ENTER_NOSLEEP | PMAP_ENTER_QUICK_LOCKED, 0);
1608 }
1609 
1610 vm_paddr_t
1611 moea64_extract(mmu_t mmu, pmap_t pm, vm_offset_t va)
1612 {
1613 	struct	pvo_entry *pvo;
1614 	vm_paddr_t pa;
1615 
1616 	PMAP_LOCK(pm);
1617 	pvo = moea64_pvo_find_va(pm, va);
1618 	if (pvo == NULL)
1619 		pa = 0;
1620 	else
1621 		pa = (pvo->pvo_pte.pa & LPTE_RPGN) | (va - PVO_VADDR(pvo));
1622 	PMAP_UNLOCK(pm);
1623 
1624 	return (pa);
1625 }
1626 
1627 /*
1628  * Atomically extract and hold the physical page with the given
1629  * pmap and virtual address pair if that mapping permits the given
1630  * protection.
1631  */
1632 vm_page_t
1633 moea64_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_prot_t prot)
1634 {
1635 	struct	pvo_entry *pvo;
1636 	vm_page_t m;
1637 
1638 	m = NULL;
1639 	PMAP_LOCK(pmap);
1640 	pvo = moea64_pvo_find_va(pmap, va & ~ADDR_POFF);
1641 	if (pvo != NULL && (pvo->pvo_pte.prot & prot) == prot) {
1642 		m = PHYS_TO_VM_PAGE(pvo->pvo_pte.pa & LPTE_RPGN);
1643 		if (!vm_page_wire_mapped(m))
1644 			m = NULL;
1645 	}
1646 	PMAP_UNLOCK(pmap);
1647 	return (m);
1648 }
1649 
1650 static mmu_t installed_mmu;
1651 
1652 static void *
1653 moea64_uma_page_alloc(uma_zone_t zone, vm_size_t bytes, int domain,
1654     uint8_t *flags, int wait)
1655 {
1656 	struct pvo_entry *pvo;
1657         vm_offset_t va;
1658         vm_page_t m;
1659         int needed_lock;
1660 
1661 	/*
1662 	 * This entire routine is a horrible hack to avoid bothering kmem
1663 	 * for new KVA addresses. Because this can get called from inside
1664 	 * kmem allocation routines, calling kmem for a new address here
1665 	 * can lead to multiply locking non-recursive mutexes.
1666 	 */
1667 
1668 	*flags = UMA_SLAB_PRIV;
1669 	needed_lock = !PMAP_LOCKED(kernel_pmap);
1670 
1671 	m = vm_page_alloc_domain(NULL, 0, domain,
1672 	    malloc2vm_flags(wait) | VM_ALLOC_WIRED | VM_ALLOC_NOOBJ);
1673 	if (m == NULL)
1674 		return (NULL);
1675 
1676 	va = VM_PAGE_TO_PHYS(m);
1677 
1678 	pvo = alloc_pvo_entry(1 /* bootstrap */);
1679 
1680 	pvo->pvo_pte.prot = VM_PROT_READ | VM_PROT_WRITE;
1681 	pvo->pvo_pte.pa = VM_PAGE_TO_PHYS(m) | LPTE_M;
1682 
1683 	if (needed_lock)
1684 		PMAP_LOCK(kernel_pmap);
1685 
1686 	init_pvo_entry(pvo, kernel_pmap, va);
1687 	pvo->pvo_vaddr |= PVO_WIRED;
1688 
1689 	moea64_pvo_enter(installed_mmu, pvo, NULL, NULL);
1690 
1691 	if (needed_lock)
1692 		PMAP_UNLOCK(kernel_pmap);
1693 
1694 	if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0)
1695                 bzero((void *)va, PAGE_SIZE);
1696 
1697 	return (void *)va;
1698 }
1699 
1700 extern int elf32_nxstack;
1701 
1702 void
1703 moea64_init(mmu_t mmu)
1704 {
1705 
1706 	CTR0(KTR_PMAP, "moea64_init");
1707 
1708 	moea64_pvo_zone = uma_zcreate("UPVO entry", sizeof (struct pvo_entry),
1709 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
1710 	    UMA_ZONE_VM | UMA_ZONE_NOFREE);
1711 
1712 	if (!hw_direct_map) {
1713 		installed_mmu = mmu;
1714 		uma_zone_set_allocf(moea64_pvo_zone, moea64_uma_page_alloc);
1715 	}
1716 
1717 #ifdef COMPAT_FREEBSD32
1718 	elf32_nxstack = 1;
1719 #endif
1720 
1721 	moea64_initialized = TRUE;
1722 }
1723 
1724 boolean_t
1725 moea64_is_referenced(mmu_t mmu, vm_page_t m)
1726 {
1727 
1728 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1729 	    ("moea64_is_referenced: page %p is not managed", m));
1730 
1731 	return (moea64_query_bit(mmu, m, LPTE_REF));
1732 }
1733 
1734 boolean_t
1735 moea64_is_modified(mmu_t mmu, vm_page_t m)
1736 {
1737 
1738 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1739 	    ("moea64_is_modified: page %p is not managed", m));
1740 
1741 	/*
1742 	 * If the page is not busied then this check is racy.
1743 	 */
1744 	if (!pmap_page_is_write_mapped(m))
1745 		return (FALSE);
1746 
1747 	return (moea64_query_bit(mmu, m, LPTE_CHG));
1748 }
1749 
1750 boolean_t
1751 moea64_is_prefaultable(mmu_t mmu, pmap_t pmap, vm_offset_t va)
1752 {
1753 	struct pvo_entry *pvo;
1754 	boolean_t rv = TRUE;
1755 
1756 	PMAP_LOCK(pmap);
1757 	pvo = moea64_pvo_find_va(pmap, va & ~ADDR_POFF);
1758 	if (pvo != NULL)
1759 		rv = FALSE;
1760 	PMAP_UNLOCK(pmap);
1761 	return (rv);
1762 }
1763 
1764 void
1765 moea64_clear_modify(mmu_t mmu, vm_page_t m)
1766 {
1767 
1768 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1769 	    ("moea64_clear_modify: page %p is not managed", m));
1770 	vm_page_assert_busied(m);
1771 
1772 	if (!pmap_page_is_write_mapped(m))
1773 		return;
1774 	moea64_clear_bit(mmu, m, LPTE_CHG);
1775 }
1776 
1777 /*
1778  * Clear the write and modified bits in each of the given page's mappings.
1779  */
1780 void
1781 moea64_remove_write(mmu_t mmu, vm_page_t m)
1782 {
1783 	struct	pvo_entry *pvo;
1784 	int64_t	refchg, ret;
1785 	pmap_t	pmap;
1786 
1787 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1788 	    ("moea64_remove_write: page %p is not managed", m));
1789 	vm_page_assert_busied(m);
1790 
1791 	if (!pmap_page_is_write_mapped(m))
1792 		return
1793 
1794 	powerpc_sync();
1795 	PV_PAGE_LOCK(m);
1796 	refchg = 0;
1797 	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
1798 		pmap = pvo->pvo_pmap;
1799 		PMAP_LOCK(pmap);
1800 		if (!(pvo->pvo_vaddr & PVO_DEAD) &&
1801 		    (pvo->pvo_pte.prot & VM_PROT_WRITE)) {
1802 			pvo->pvo_pte.prot &= ~VM_PROT_WRITE;
1803 			ret = MOEA64_PTE_REPLACE(mmu, pvo,
1804 			    MOEA64_PTE_PROT_UPDATE);
1805 			if (ret < 0)
1806 				ret = LPTE_CHG;
1807 			refchg |= ret;
1808 			if (pvo->pvo_pmap == kernel_pmap)
1809 				isync();
1810 		}
1811 		PMAP_UNLOCK(pmap);
1812 	}
1813 	if ((refchg | atomic_readandclear_32(&m->md.mdpg_attrs)) & LPTE_CHG)
1814 		vm_page_dirty(m);
1815 	vm_page_aflag_clear(m, PGA_WRITEABLE);
1816 	PV_PAGE_UNLOCK(m);
1817 }
1818 
1819 /*
1820  *	moea64_ts_referenced:
1821  *
1822  *	Return a count of reference bits for a page, clearing those bits.
1823  *	It is not necessary for every reference bit to be cleared, but it
1824  *	is necessary that 0 only be returned when there are truly no
1825  *	reference bits set.
1826  *
1827  *	XXX: The exact number of bits to check and clear is a matter that
1828  *	should be tested and standardized at some point in the future for
1829  *	optimal aging of shared pages.
1830  */
1831 int
1832 moea64_ts_referenced(mmu_t mmu, vm_page_t m)
1833 {
1834 
1835 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1836 	    ("moea64_ts_referenced: page %p is not managed", m));
1837 	return (moea64_clear_bit(mmu, m, LPTE_REF));
1838 }
1839 
1840 /*
1841  * Modify the WIMG settings of all mappings for a page.
1842  */
1843 void
1844 moea64_page_set_memattr(mmu_t mmu, vm_page_t m, vm_memattr_t ma)
1845 {
1846 	struct	pvo_entry *pvo;
1847 	int64_t	refchg;
1848 	pmap_t	pmap;
1849 	uint64_t lo;
1850 
1851 	if ((m->oflags & VPO_UNMANAGED) != 0) {
1852 		m->md.mdpg_cache_attrs = ma;
1853 		return;
1854 	}
1855 
1856 	lo = moea64_calc_wimg(VM_PAGE_TO_PHYS(m), ma);
1857 
1858 	PV_PAGE_LOCK(m);
1859 	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
1860 		pmap = pvo->pvo_pmap;
1861 		PMAP_LOCK(pmap);
1862 		if (!(pvo->pvo_vaddr & PVO_DEAD)) {
1863 			pvo->pvo_pte.pa &= ~LPTE_WIMG;
1864 			pvo->pvo_pte.pa |= lo;
1865 			refchg = MOEA64_PTE_REPLACE(mmu, pvo,
1866 			    MOEA64_PTE_INVALIDATE);
1867 			if (refchg < 0)
1868 				refchg = (pvo->pvo_pte.prot & VM_PROT_WRITE) ?
1869 				    LPTE_CHG : 0;
1870 			if ((pvo->pvo_vaddr & PVO_MANAGED) &&
1871 			    (pvo->pvo_pte.prot & VM_PROT_WRITE)) {
1872 				refchg |=
1873 				    atomic_readandclear_32(&m->md.mdpg_attrs);
1874 				if (refchg & LPTE_CHG)
1875 					vm_page_dirty(m);
1876 				if (refchg & LPTE_REF)
1877 					vm_page_aflag_set(m, PGA_REFERENCED);
1878 			}
1879 			if (pvo->pvo_pmap == kernel_pmap)
1880 				isync();
1881 		}
1882 		PMAP_UNLOCK(pmap);
1883 	}
1884 	m->md.mdpg_cache_attrs = ma;
1885 	PV_PAGE_UNLOCK(m);
1886 }
1887 
1888 /*
1889  * Map a wired page into kernel virtual address space.
1890  */
1891 void
1892 moea64_kenter_attr(mmu_t mmu, vm_offset_t va, vm_paddr_t pa, vm_memattr_t ma)
1893 {
1894 	int		error;
1895 	struct pvo_entry *pvo, *oldpvo;
1896 
1897 	do {
1898 		pvo = alloc_pvo_entry(0);
1899 		if (pvo == NULL)
1900 			vm_wait(NULL);
1901 	} while (pvo == NULL);
1902 	pvo->pvo_pte.prot = VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE;
1903 	pvo->pvo_pte.pa = (pa & ~ADDR_POFF) | moea64_calc_wimg(pa, ma);
1904 	pvo->pvo_vaddr |= PVO_WIRED;
1905 
1906 	PMAP_LOCK(kernel_pmap);
1907 	oldpvo = moea64_pvo_find_va(kernel_pmap, va);
1908 	if (oldpvo != NULL)
1909 		moea64_pvo_remove_from_pmap(mmu, oldpvo);
1910 	init_pvo_entry(pvo, kernel_pmap, va);
1911 	error = moea64_pvo_enter(mmu, pvo, NULL, NULL);
1912 	PMAP_UNLOCK(kernel_pmap);
1913 
1914 	/* Free any dead pages */
1915 	if (oldpvo != NULL) {
1916 		moea64_pvo_remove_from_page(mmu, oldpvo);
1917 		free_pvo_entry(oldpvo);
1918 	}
1919 
1920 	if (error != 0)
1921 		panic("moea64_kenter: failed to enter va %#zx pa %#jx: %d", va,
1922 		    (uintmax_t)pa, error);
1923 }
1924 
1925 void
1926 moea64_kenter(mmu_t mmu, vm_offset_t va, vm_paddr_t pa)
1927 {
1928 
1929 	moea64_kenter_attr(mmu, va, pa, VM_MEMATTR_DEFAULT);
1930 }
1931 
1932 /*
1933  * Extract the physical page address associated with the given kernel virtual
1934  * address.
1935  */
1936 vm_paddr_t
1937 moea64_kextract(mmu_t mmu, vm_offset_t va)
1938 {
1939 	struct		pvo_entry *pvo;
1940 	vm_paddr_t pa;
1941 
1942 	/*
1943 	 * Shortcut the direct-mapped case when applicable.  We never put
1944 	 * anything but 1:1 (or 62-bit aliased) mappings below
1945 	 * VM_MIN_KERNEL_ADDRESS.
1946 	 */
1947 	if (va < VM_MIN_KERNEL_ADDRESS)
1948 		return (va & ~DMAP_BASE_ADDRESS);
1949 
1950 	PMAP_LOCK(kernel_pmap);
1951 	pvo = moea64_pvo_find_va(kernel_pmap, va);
1952 	KASSERT(pvo != NULL, ("moea64_kextract: no addr found for %#" PRIxPTR,
1953 	    va));
1954 	pa = (pvo->pvo_pte.pa & LPTE_RPGN) | (va - PVO_VADDR(pvo));
1955 	PMAP_UNLOCK(kernel_pmap);
1956 	return (pa);
1957 }
1958 
1959 /*
1960  * Remove a wired page from kernel virtual address space.
1961  */
1962 void
1963 moea64_kremove(mmu_t mmu, vm_offset_t va)
1964 {
1965 	moea64_remove(mmu, kernel_pmap, va, va + PAGE_SIZE);
1966 }
1967 
1968 /*
1969  * Provide a kernel pointer corresponding to a given userland pointer.
1970  * The returned pointer is valid until the next time this function is
1971  * called in this thread. This is used internally in copyin/copyout.
1972  */
1973 static int
1974 moea64_map_user_ptr(mmu_t mmu, pmap_t pm, volatile const void *uaddr,
1975     void **kaddr, size_t ulen, size_t *klen)
1976 {
1977 	size_t l;
1978 #ifdef __powerpc64__
1979 	struct slb *slb;
1980 #endif
1981 	register_t slbv;
1982 
1983 	*kaddr = (char *)USER_ADDR + ((uintptr_t)uaddr & ~SEGMENT_MASK);
1984 	l = ((char *)USER_ADDR + SEGMENT_LENGTH) - (char *)(*kaddr);
1985 	if (l > ulen)
1986 		l = ulen;
1987 	if (klen)
1988 		*klen = l;
1989 	else if (l != ulen)
1990 		return (EFAULT);
1991 
1992 #ifdef __powerpc64__
1993 	/* Try lockless look-up first */
1994 	slb = user_va_to_slb_entry(pm, (vm_offset_t)uaddr);
1995 
1996 	if (slb == NULL) {
1997 		/* If it isn't there, we need to pre-fault the VSID */
1998 		PMAP_LOCK(pm);
1999 		slbv = va_to_vsid(pm, (vm_offset_t)uaddr) << SLBV_VSID_SHIFT;
2000 		PMAP_UNLOCK(pm);
2001 	} else {
2002 		slbv = slb->slbv;
2003 	}
2004 
2005 	/* Mark segment no-execute */
2006 	slbv |= SLBV_N;
2007 #else
2008 	slbv = va_to_vsid(pm, (vm_offset_t)uaddr);
2009 
2010 	/* Mark segment no-execute */
2011 	slbv |= SR_N;
2012 #endif
2013 
2014 	/* If we have already set this VSID, we can just return */
2015 	if (curthread->td_pcb->pcb_cpu.aim.usr_vsid == slbv)
2016 		return (0);
2017 
2018 	__asm __volatile("isync");
2019 	curthread->td_pcb->pcb_cpu.aim.usr_segm =
2020 	    (uintptr_t)uaddr >> ADDR_SR_SHFT;
2021 	curthread->td_pcb->pcb_cpu.aim.usr_vsid = slbv;
2022 #ifdef __powerpc64__
2023 	__asm __volatile ("slbie %0; slbmte %1, %2; isync" ::
2024 	    "r"(USER_ADDR), "r"(slbv), "r"(USER_SLB_SLBE));
2025 #else
2026 	__asm __volatile("mtsr %0,%1; isync" :: "n"(USER_SR), "r"(slbv));
2027 #endif
2028 
2029 	return (0);
2030 }
2031 
2032 /*
2033  * Figure out where a given kernel pointer (usually in a fault) points
2034  * to from the VM's perspective, potentially remapping into userland's
2035  * address space.
2036  */
2037 static int
2038 moea64_decode_kernel_ptr(mmu_t mmu, vm_offset_t addr, int *is_user,
2039     vm_offset_t *decoded_addr)
2040 {
2041 	vm_offset_t user_sr;
2042 
2043 	if ((addr >> ADDR_SR_SHFT) == (USER_ADDR >> ADDR_SR_SHFT)) {
2044 		user_sr = curthread->td_pcb->pcb_cpu.aim.usr_segm;
2045 		addr &= ADDR_PIDX | ADDR_POFF;
2046 		addr |= user_sr << ADDR_SR_SHFT;
2047 		*decoded_addr = addr;
2048 		*is_user = 1;
2049 	} else {
2050 		*decoded_addr = addr;
2051 		*is_user = 0;
2052 	}
2053 
2054 	return (0);
2055 }
2056 
2057 /*
2058  * Map a range of physical addresses into kernel virtual address space.
2059  *
2060  * The value passed in *virt is a suggested virtual address for the mapping.
2061  * Architectures which can support a direct-mapped physical to virtual region
2062  * can return the appropriate address within that region, leaving '*virt'
2063  * unchanged.  Other architectures should map the pages starting at '*virt' and
2064  * update '*virt' with the first usable address after the mapped region.
2065  */
2066 vm_offset_t
2067 moea64_map(mmu_t mmu, vm_offset_t *virt, vm_paddr_t pa_start,
2068     vm_paddr_t pa_end, int prot)
2069 {
2070 	vm_offset_t	sva, va;
2071 
2072 	if (hw_direct_map) {
2073 		/*
2074 		 * Check if every page in the region is covered by the direct
2075 		 * map. The direct map covers all of physical memory. Use
2076 		 * moea64_calc_wimg() as a shortcut to see if the page is in
2077 		 * physical memory as a way to see if the direct map covers it.
2078 		 */
2079 		for (va = pa_start; va < pa_end; va += PAGE_SIZE)
2080 			if (moea64_calc_wimg(va, VM_MEMATTR_DEFAULT) != LPTE_M)
2081 				break;
2082 		if (va == pa_end)
2083 			return (PHYS_TO_DMAP(pa_start));
2084 	}
2085 	sva = *virt;
2086 	va = sva;
2087 	/* XXX respect prot argument */
2088 	for (; pa_start < pa_end; pa_start += PAGE_SIZE, va += PAGE_SIZE)
2089 		moea64_kenter(mmu, va, pa_start);
2090 	*virt = va;
2091 
2092 	return (sva);
2093 }
2094 
2095 /*
2096  * Returns true if the pmap's pv is one of the first
2097  * 16 pvs linked to from this page.  This count may
2098  * be changed upwards or downwards in the future; it
2099  * is only necessary that true be returned for a small
2100  * subset of pmaps for proper page aging.
2101  */
2102 boolean_t
2103 moea64_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m)
2104 {
2105         int loops;
2106 	struct pvo_entry *pvo;
2107 	boolean_t rv;
2108 
2109 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2110 	    ("moea64_page_exists_quick: page %p is not managed", m));
2111 	loops = 0;
2112 	rv = FALSE;
2113 	PV_PAGE_LOCK(m);
2114 	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
2115 		if (!(pvo->pvo_vaddr & PVO_DEAD) && pvo->pvo_pmap == pmap) {
2116 			rv = TRUE;
2117 			break;
2118 		}
2119 		if (++loops >= 16)
2120 			break;
2121 	}
2122 	PV_PAGE_UNLOCK(m);
2123 	return (rv);
2124 }
2125 
2126 void
2127 moea64_page_init(mmu_t mmu __unused, vm_page_t m)
2128 {
2129 
2130 	m->md.mdpg_attrs = 0;
2131 	m->md.mdpg_cache_attrs = VM_MEMATTR_DEFAULT;
2132 	LIST_INIT(&m->md.mdpg_pvoh);
2133 }
2134 
2135 /*
2136  * Return the number of managed mappings to the given physical page
2137  * that are wired.
2138  */
2139 int
2140 moea64_page_wired_mappings(mmu_t mmu, vm_page_t m)
2141 {
2142 	struct pvo_entry *pvo;
2143 	int count;
2144 
2145 	count = 0;
2146 	if ((m->oflags & VPO_UNMANAGED) != 0)
2147 		return (count);
2148 	PV_PAGE_LOCK(m);
2149 	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink)
2150 		if ((pvo->pvo_vaddr & (PVO_DEAD | PVO_WIRED)) == PVO_WIRED)
2151 			count++;
2152 	PV_PAGE_UNLOCK(m);
2153 	return (count);
2154 }
2155 
2156 static uintptr_t	moea64_vsidcontext;
2157 
2158 uintptr_t
2159 moea64_get_unique_vsid(void) {
2160 	u_int entropy;
2161 	register_t hash;
2162 	uint32_t mask;
2163 	int i;
2164 
2165 	entropy = 0;
2166 	__asm __volatile("mftb %0" : "=r"(entropy));
2167 
2168 	mtx_lock(&moea64_slb_mutex);
2169 	for (i = 0; i < NVSIDS; i += VSID_NBPW) {
2170 		u_int	n;
2171 
2172 		/*
2173 		 * Create a new value by mutiplying by a prime and adding in
2174 		 * entropy from the timebase register.  This is to make the
2175 		 * VSID more random so that the PT hash function collides
2176 		 * less often.  (Note that the prime casues gcc to do shifts
2177 		 * instead of a multiply.)
2178 		 */
2179 		moea64_vsidcontext = (moea64_vsidcontext * 0x1105) + entropy;
2180 		hash = moea64_vsidcontext & (NVSIDS - 1);
2181 		if (hash == 0)		/* 0 is special, avoid it */
2182 			continue;
2183 		n = hash >> 5;
2184 		mask = 1 << (hash & (VSID_NBPW - 1));
2185 		hash = (moea64_vsidcontext & VSID_HASHMASK);
2186 		if (moea64_vsid_bitmap[n] & mask) {	/* collision? */
2187 			/* anything free in this bucket? */
2188 			if (moea64_vsid_bitmap[n] == 0xffffffff) {
2189 				entropy = (moea64_vsidcontext >> 20);
2190 				continue;
2191 			}
2192 			i = ffs(~moea64_vsid_bitmap[n]) - 1;
2193 			mask = 1 << i;
2194 			hash &= rounddown2(VSID_HASHMASK, VSID_NBPW);
2195 			hash |= i;
2196 		}
2197 		if (hash == VSID_VRMA)	/* also special, avoid this too */
2198 			continue;
2199 		KASSERT(!(moea64_vsid_bitmap[n] & mask),
2200 		    ("Allocating in-use VSID %#zx\n", hash));
2201 		moea64_vsid_bitmap[n] |= mask;
2202 		mtx_unlock(&moea64_slb_mutex);
2203 		return (hash);
2204 	}
2205 
2206 	mtx_unlock(&moea64_slb_mutex);
2207 	panic("%s: out of segments",__func__);
2208 }
2209 
2210 #ifdef __powerpc64__
2211 void
2212 moea64_pinit(mmu_t mmu, pmap_t pmap)
2213 {
2214 
2215 	RB_INIT(&pmap->pmap_pvo);
2216 
2217 	pmap->pm_slb_tree_root = slb_alloc_tree();
2218 	pmap->pm_slb = slb_alloc_user_cache();
2219 	pmap->pm_slb_len = 0;
2220 }
2221 #else
2222 void
2223 moea64_pinit(mmu_t mmu, pmap_t pmap)
2224 {
2225 	int	i;
2226 	uint32_t hash;
2227 
2228 	RB_INIT(&pmap->pmap_pvo);
2229 
2230 	if (pmap_bootstrapped)
2231 		pmap->pmap_phys = (pmap_t)moea64_kextract(mmu,
2232 		    (vm_offset_t)pmap);
2233 	else
2234 		pmap->pmap_phys = pmap;
2235 
2236 	/*
2237 	 * Allocate some segment registers for this pmap.
2238 	 */
2239 	hash = moea64_get_unique_vsid();
2240 
2241 	for (i = 0; i < 16; i++)
2242 		pmap->pm_sr[i] = VSID_MAKE(i, hash);
2243 
2244 	KASSERT(pmap->pm_sr[0] != 0, ("moea64_pinit: pm_sr[0] = 0"));
2245 }
2246 #endif
2247 
2248 /*
2249  * Initialize the pmap associated with process 0.
2250  */
2251 void
2252 moea64_pinit0(mmu_t mmu, pmap_t pm)
2253 {
2254 
2255 	PMAP_LOCK_INIT(pm);
2256 	moea64_pinit(mmu, pm);
2257 	bzero(&pm->pm_stats, sizeof(pm->pm_stats));
2258 }
2259 
2260 /*
2261  * Set the physical protection on the specified range of this map as requested.
2262  */
2263 static void
2264 moea64_pvo_protect(mmu_t mmu,  pmap_t pm, struct pvo_entry *pvo, vm_prot_t prot)
2265 {
2266 	struct vm_page *pg;
2267 	vm_prot_t oldprot;
2268 	int32_t refchg;
2269 
2270 	PMAP_LOCK_ASSERT(pm, MA_OWNED);
2271 
2272 	/*
2273 	 * Change the protection of the page.
2274 	 */
2275 	oldprot = pvo->pvo_pte.prot;
2276 	pvo->pvo_pte.prot = prot;
2277 	pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.pa & LPTE_RPGN);
2278 
2279 	/*
2280 	 * If the PVO is in the page table, update mapping
2281 	 */
2282 	refchg = MOEA64_PTE_REPLACE(mmu, pvo, MOEA64_PTE_PROT_UPDATE);
2283 	if (refchg < 0)
2284 		refchg = (oldprot & VM_PROT_WRITE) ? LPTE_CHG : 0;
2285 
2286 	if (pm != kernel_pmap && pg != NULL &&
2287 	    (pg->a.flags & PGA_EXECUTABLE) == 0 &&
2288 	    (pvo->pvo_pte.pa & (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) {
2289 		if ((pg->oflags & VPO_UNMANAGED) == 0)
2290 			vm_page_aflag_set(pg, PGA_EXECUTABLE);
2291 		moea64_syncicache(mmu, pm, PVO_VADDR(pvo),
2292 		    pvo->pvo_pte.pa & LPTE_RPGN, PAGE_SIZE);
2293 	}
2294 
2295 	/*
2296 	 * Update vm about the REF/CHG bits if the page is managed and we have
2297 	 * removed write access.
2298 	 */
2299 	if (pg != NULL && (pvo->pvo_vaddr & PVO_MANAGED) &&
2300 	    (oldprot & VM_PROT_WRITE)) {
2301 		refchg |= atomic_readandclear_32(&pg->md.mdpg_attrs);
2302 		if (refchg & LPTE_CHG)
2303 			vm_page_dirty(pg);
2304 		if (refchg & LPTE_REF)
2305 			vm_page_aflag_set(pg, PGA_REFERENCED);
2306 	}
2307 }
2308 
2309 void
2310 moea64_protect(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva,
2311     vm_prot_t prot)
2312 {
2313 	struct	pvo_entry *pvo, *tpvo, key;
2314 
2315 	CTR4(KTR_PMAP, "moea64_protect: pm=%p sva=%#x eva=%#x prot=%#x", pm,
2316 	    sva, eva, prot);
2317 
2318 	KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap,
2319 	    ("moea64_protect: non current pmap"));
2320 
2321 	if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
2322 		moea64_remove(mmu, pm, sva, eva);
2323 		return;
2324 	}
2325 
2326 	PMAP_LOCK(pm);
2327 	key.pvo_vaddr = sva;
2328 	for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key);
2329 	    pvo != NULL && PVO_VADDR(pvo) < eva; pvo = tpvo) {
2330 		tpvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo);
2331 		moea64_pvo_protect(mmu, pm, pvo, prot);
2332 	}
2333 	PMAP_UNLOCK(pm);
2334 }
2335 
2336 /*
2337  * Map a list of wired pages into kernel virtual address space.  This is
2338  * intended for temporary mappings which do not need page modification or
2339  * references recorded.  Existing mappings in the region are overwritten.
2340  */
2341 void
2342 moea64_qenter(mmu_t mmu, vm_offset_t va, vm_page_t *m, int count)
2343 {
2344 	while (count-- > 0) {
2345 		moea64_kenter(mmu, va, VM_PAGE_TO_PHYS(*m));
2346 		va += PAGE_SIZE;
2347 		m++;
2348 	}
2349 }
2350 
2351 /*
2352  * Remove page mappings from kernel virtual address space.  Intended for
2353  * temporary mappings entered by moea64_qenter.
2354  */
2355 void
2356 moea64_qremove(mmu_t mmu, vm_offset_t va, int count)
2357 {
2358 	while (count-- > 0) {
2359 		moea64_kremove(mmu, va);
2360 		va += PAGE_SIZE;
2361 	}
2362 }
2363 
2364 void
2365 moea64_release_vsid(uint64_t vsid)
2366 {
2367 	int idx, mask;
2368 
2369 	mtx_lock(&moea64_slb_mutex);
2370 	idx = vsid & (NVSIDS-1);
2371 	mask = 1 << (idx % VSID_NBPW);
2372 	idx /= VSID_NBPW;
2373 	KASSERT(moea64_vsid_bitmap[idx] & mask,
2374 	    ("Freeing unallocated VSID %#jx", vsid));
2375 	moea64_vsid_bitmap[idx] &= ~mask;
2376 	mtx_unlock(&moea64_slb_mutex);
2377 }
2378 
2379 
2380 void
2381 moea64_release(mmu_t mmu, pmap_t pmap)
2382 {
2383 
2384 	/*
2385 	 * Free segment registers' VSIDs
2386 	 */
2387     #ifdef __powerpc64__
2388 	slb_free_tree(pmap);
2389 	slb_free_user_cache(pmap->pm_slb);
2390     #else
2391 	KASSERT(pmap->pm_sr[0] != 0, ("moea64_release: pm_sr[0] = 0"));
2392 
2393 	moea64_release_vsid(VSID_TO_HASH(pmap->pm_sr[0]));
2394     #endif
2395 }
2396 
2397 /*
2398  * Remove all pages mapped by the specified pmap
2399  */
2400 void
2401 moea64_remove_pages(mmu_t mmu, pmap_t pm)
2402 {
2403 	struct pvo_entry *pvo, *tpvo;
2404 	struct pvo_dlist tofree;
2405 
2406 	SLIST_INIT(&tofree);
2407 
2408 	PMAP_LOCK(pm);
2409 	RB_FOREACH_SAFE(pvo, pvo_tree, &pm->pmap_pvo, tpvo) {
2410 		if (pvo->pvo_vaddr & PVO_WIRED)
2411 			continue;
2412 
2413 		/*
2414 		 * For locking reasons, remove this from the page table and
2415 		 * pmap, but save delinking from the vm_page for a second
2416 		 * pass
2417 		 */
2418 		moea64_pvo_remove_from_pmap(mmu, pvo);
2419 		SLIST_INSERT_HEAD(&tofree, pvo, pvo_dlink);
2420 	}
2421 	PMAP_UNLOCK(pm);
2422 
2423 	while (!SLIST_EMPTY(&tofree)) {
2424 		pvo = SLIST_FIRST(&tofree);
2425 		SLIST_REMOVE_HEAD(&tofree, pvo_dlink);
2426 		moea64_pvo_remove_from_page(mmu, pvo);
2427 		free_pvo_entry(pvo);
2428 	}
2429 }
2430 
2431 /*
2432  * Remove the given range of addresses from the specified map.
2433  */
2434 void
2435 moea64_remove(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva)
2436 {
2437 	struct  pvo_entry *pvo, *tpvo, key;
2438 	struct pvo_dlist tofree;
2439 
2440 	/*
2441 	 * Perform an unsynchronized read.  This is, however, safe.
2442 	 */
2443 	if (pm->pm_stats.resident_count == 0)
2444 		return;
2445 
2446 	key.pvo_vaddr = sva;
2447 
2448 	SLIST_INIT(&tofree);
2449 
2450 	PMAP_LOCK(pm);
2451 	for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key);
2452 	    pvo != NULL && PVO_VADDR(pvo) < eva; pvo = tpvo) {
2453 		tpvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo);
2454 
2455 		/*
2456 		 * For locking reasons, remove this from the page table and
2457 		 * pmap, but save delinking from the vm_page for a second
2458 		 * pass
2459 		 */
2460 		moea64_pvo_remove_from_pmap(mmu, pvo);
2461 		SLIST_INSERT_HEAD(&tofree, pvo, pvo_dlink);
2462 	}
2463 	PMAP_UNLOCK(pm);
2464 
2465 	while (!SLIST_EMPTY(&tofree)) {
2466 		pvo = SLIST_FIRST(&tofree);
2467 		SLIST_REMOVE_HEAD(&tofree, pvo_dlink);
2468 		moea64_pvo_remove_from_page(mmu, pvo);
2469 		free_pvo_entry(pvo);
2470 	}
2471 }
2472 
2473 /*
2474  * Remove physical page from all pmaps in which it resides. moea64_pvo_remove()
2475  * will reflect changes in pte's back to the vm_page.
2476  */
2477 void
2478 moea64_remove_all(mmu_t mmu, vm_page_t m)
2479 {
2480 	struct	pvo_entry *pvo, *next_pvo;
2481 	struct	pvo_head freequeue;
2482 	int	wasdead;
2483 	pmap_t	pmap;
2484 
2485 	LIST_INIT(&freequeue);
2486 
2487 	PV_PAGE_LOCK(m);
2488 	LIST_FOREACH_SAFE(pvo, vm_page_to_pvoh(m), pvo_vlink, next_pvo) {
2489 		pmap = pvo->pvo_pmap;
2490 		PMAP_LOCK(pmap);
2491 		wasdead = (pvo->pvo_vaddr & PVO_DEAD);
2492 		if (!wasdead)
2493 			moea64_pvo_remove_from_pmap(mmu, pvo);
2494 		moea64_pvo_remove_from_page_locked(mmu, pvo, m);
2495 		if (!wasdead)
2496 			LIST_INSERT_HEAD(&freequeue, pvo, pvo_vlink);
2497 		PMAP_UNLOCK(pmap);
2498 
2499 	}
2500 	KASSERT(!pmap_page_is_mapped(m), ("Page still has mappings"));
2501 	KASSERT((m->a.flags & PGA_WRITEABLE) == 0, ("Page still writable"));
2502 	PV_PAGE_UNLOCK(m);
2503 
2504 	/* Clean up UMA allocations */
2505 	LIST_FOREACH_SAFE(pvo, &freequeue, pvo_vlink, next_pvo)
2506 		free_pvo_entry(pvo);
2507 }
2508 
2509 /*
2510  * Allocate a physical page of memory directly from the phys_avail map.
2511  * Can only be called from moea64_bootstrap before avail start and end are
2512  * calculated.
2513  */
2514 vm_offset_t
2515 moea64_bootstrap_alloc(vm_size_t size, vm_size_t align)
2516 {
2517 	vm_offset_t	s, e;
2518 	int		i, j;
2519 
2520 	size = round_page(size);
2521 	for (i = 0; phys_avail[i + 1] != 0; i += 2) {
2522 		if (align != 0)
2523 			s = roundup2(phys_avail[i], align);
2524 		else
2525 			s = phys_avail[i];
2526 		e = s + size;
2527 
2528 		if (s < phys_avail[i] || e > phys_avail[i + 1])
2529 			continue;
2530 
2531 		if (s + size > platform_real_maxaddr())
2532 			continue;
2533 
2534 		if (s == phys_avail[i]) {
2535 			phys_avail[i] += size;
2536 		} else if (e == phys_avail[i + 1]) {
2537 			phys_avail[i + 1] -= size;
2538 		} else {
2539 			for (j = phys_avail_count * 2; j > i; j -= 2) {
2540 				phys_avail[j] = phys_avail[j - 2];
2541 				phys_avail[j + 1] = phys_avail[j - 1];
2542 			}
2543 
2544 			phys_avail[i + 3] = phys_avail[i + 1];
2545 			phys_avail[i + 1] = s;
2546 			phys_avail[i + 2] = e;
2547 			phys_avail_count++;
2548 		}
2549 
2550 		return (s);
2551 	}
2552 	panic("moea64_bootstrap_alloc: could not allocate memory");
2553 }
2554 
2555 static int
2556 moea64_pvo_enter(mmu_t mmu, struct pvo_entry *pvo, struct pvo_head *pvo_head,
2557     struct pvo_entry **oldpvop)
2558 {
2559 	struct pvo_entry *old_pvo;
2560 	int err;
2561 
2562 	PMAP_LOCK_ASSERT(pvo->pvo_pmap, MA_OWNED);
2563 
2564 	STAT_MOEA64(moea64_pvo_enter_calls++);
2565 
2566 	/*
2567 	 * Add to pmap list
2568 	 */
2569 	old_pvo = RB_INSERT(pvo_tree, &pvo->pvo_pmap->pmap_pvo, pvo);
2570 
2571 	if (old_pvo != NULL) {
2572 		if (oldpvop != NULL)
2573 			*oldpvop = old_pvo;
2574 		return (EEXIST);
2575 	}
2576 
2577 	if (pvo_head != NULL) {
2578 		LIST_INSERT_HEAD(pvo_head, pvo, pvo_vlink);
2579 	}
2580 
2581 	if (pvo->pvo_vaddr & PVO_WIRED)
2582 		pvo->pvo_pmap->pm_stats.wired_count++;
2583 	pvo->pvo_pmap->pm_stats.resident_count++;
2584 
2585 	/*
2586 	 * Insert it into the hardware page table
2587 	 */
2588 	err = MOEA64_PTE_INSERT(mmu, pvo);
2589 	if (err != 0) {
2590 		panic("moea64_pvo_enter: overflow");
2591 	}
2592 
2593 	STAT_MOEA64(moea64_pvo_entries++);
2594 
2595 	if (pvo->pvo_pmap == kernel_pmap)
2596 		isync();
2597 
2598 #ifdef __powerpc64__
2599 	/*
2600 	 * Make sure all our bootstrap mappings are in the SLB as soon
2601 	 * as virtual memory is switched on.
2602 	 */
2603 	if (!pmap_bootstrapped)
2604 		moea64_bootstrap_slb_prefault(PVO_VADDR(pvo),
2605 		    pvo->pvo_vaddr & PVO_LARGE);
2606 #endif
2607 
2608 	return (0);
2609 }
2610 
2611 static void
2612 moea64_pvo_remove_from_pmap(mmu_t mmu, struct pvo_entry *pvo)
2613 {
2614 	struct	vm_page *pg;
2615 	int32_t refchg;
2616 
2617 	KASSERT(pvo->pvo_pmap != NULL, ("Trying to remove PVO with no pmap"));
2618 	PMAP_LOCK_ASSERT(pvo->pvo_pmap, MA_OWNED);
2619 	KASSERT(!(pvo->pvo_vaddr & PVO_DEAD), ("Trying to remove dead PVO"));
2620 
2621 	/*
2622 	 * If there is an active pte entry, we need to deactivate it
2623 	 */
2624 	refchg = MOEA64_PTE_UNSET(mmu, pvo);
2625 	if (refchg < 0) {
2626 		/*
2627 		 * If it was evicted from the page table, be pessimistic and
2628 		 * dirty the page.
2629 		 */
2630 		if (pvo->pvo_pte.prot & VM_PROT_WRITE)
2631 			refchg = LPTE_CHG;
2632 		else
2633 			refchg = 0;
2634 	}
2635 
2636 	/*
2637 	 * Update our statistics.
2638 	 */
2639 	pvo->pvo_pmap->pm_stats.resident_count--;
2640 	if (pvo->pvo_vaddr & PVO_WIRED)
2641 		pvo->pvo_pmap->pm_stats.wired_count--;
2642 
2643 	/*
2644 	 * Remove this PVO from the pmap list.
2645 	 */
2646 	RB_REMOVE(pvo_tree, &pvo->pvo_pmap->pmap_pvo, pvo);
2647 
2648 	/*
2649 	 * Mark this for the next sweep
2650 	 */
2651 	pvo->pvo_vaddr |= PVO_DEAD;
2652 
2653 	/* Send RC bits to VM */
2654 	if ((pvo->pvo_vaddr & PVO_MANAGED) &&
2655 	    (pvo->pvo_pte.prot & VM_PROT_WRITE)) {
2656 		pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.pa & LPTE_RPGN);
2657 		if (pg != NULL) {
2658 			refchg |= atomic_readandclear_32(&pg->md.mdpg_attrs);
2659 			if (refchg & LPTE_CHG)
2660 				vm_page_dirty(pg);
2661 			if (refchg & LPTE_REF)
2662 				vm_page_aflag_set(pg, PGA_REFERENCED);
2663 		}
2664 	}
2665 }
2666 
2667 static inline void
2668 moea64_pvo_remove_from_page_locked(mmu_t mmu, struct pvo_entry *pvo,
2669     vm_page_t m)
2670 {
2671 
2672 	KASSERT(pvo->pvo_vaddr & PVO_DEAD, ("Trying to delink live page"));
2673 
2674 	/* Use NULL pmaps as a sentinel for races in page deletion */
2675 	if (pvo->pvo_pmap == NULL)
2676 		return;
2677 	pvo->pvo_pmap = NULL;
2678 
2679 	/*
2680 	 * Update vm about page writeability/executability if managed
2681 	 */
2682 	PV_LOCKASSERT(pvo->pvo_pte.pa & LPTE_RPGN);
2683 	if (pvo->pvo_vaddr & PVO_MANAGED) {
2684 		if (m != NULL) {
2685 			LIST_REMOVE(pvo, pvo_vlink);
2686 			if (LIST_EMPTY(vm_page_to_pvoh(m)))
2687 				vm_page_aflag_clear(m,
2688 				    PGA_WRITEABLE | PGA_EXECUTABLE);
2689 		}
2690 	}
2691 
2692 	STAT_MOEA64(moea64_pvo_entries--);
2693 	STAT_MOEA64(moea64_pvo_remove_calls++);
2694 }
2695 
2696 static void
2697 moea64_pvo_remove_from_page(mmu_t mmu, struct pvo_entry *pvo)
2698 {
2699 	vm_page_t pg = NULL;
2700 
2701 	if (pvo->pvo_vaddr & PVO_MANAGED)
2702 		pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.pa & LPTE_RPGN);
2703 
2704 	PV_LOCK(pvo->pvo_pte.pa & LPTE_RPGN);
2705 	moea64_pvo_remove_from_page_locked(mmu, pvo, pg);
2706 	PV_UNLOCK(pvo->pvo_pte.pa & LPTE_RPGN);
2707 }
2708 
2709 static struct pvo_entry *
2710 moea64_pvo_find_va(pmap_t pm, vm_offset_t va)
2711 {
2712 	struct pvo_entry key;
2713 
2714 	PMAP_LOCK_ASSERT(pm, MA_OWNED);
2715 
2716 	key.pvo_vaddr = va & ~ADDR_POFF;
2717 	return (RB_FIND(pvo_tree, &pm->pmap_pvo, &key));
2718 }
2719 
2720 static boolean_t
2721 moea64_query_bit(mmu_t mmu, vm_page_t m, uint64_t ptebit)
2722 {
2723 	struct	pvo_entry *pvo;
2724 	int64_t ret;
2725 	boolean_t rv;
2726 
2727 	/*
2728 	 * See if this bit is stored in the page already.
2729 	 */
2730 	if (m->md.mdpg_attrs & ptebit)
2731 		return (TRUE);
2732 
2733 	/*
2734 	 * Examine each PTE.  Sync so that any pending REF/CHG bits are
2735 	 * flushed to the PTEs.
2736 	 */
2737 	rv = FALSE;
2738 	powerpc_sync();
2739 	PV_PAGE_LOCK(m);
2740 	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
2741 		ret = 0;
2742 
2743 		/*
2744 		 * See if this pvo has a valid PTE.  if so, fetch the
2745 		 * REF/CHG bits from the valid PTE.  If the appropriate
2746 		 * ptebit is set, return success.
2747 		 */
2748 		PMAP_LOCK(pvo->pvo_pmap);
2749 		if (!(pvo->pvo_vaddr & PVO_DEAD))
2750 			ret = MOEA64_PTE_SYNCH(mmu, pvo);
2751 		PMAP_UNLOCK(pvo->pvo_pmap);
2752 
2753 		if (ret > 0) {
2754 			atomic_set_32(&m->md.mdpg_attrs,
2755 			    ret & (LPTE_CHG | LPTE_REF));
2756 			if (ret & ptebit) {
2757 				rv = TRUE;
2758 				break;
2759 			}
2760 		}
2761 	}
2762 	PV_PAGE_UNLOCK(m);
2763 
2764 	return (rv);
2765 }
2766 
2767 static u_int
2768 moea64_clear_bit(mmu_t mmu, vm_page_t m, u_int64_t ptebit)
2769 {
2770 	u_int	count;
2771 	struct	pvo_entry *pvo;
2772 	int64_t ret;
2773 
2774 	/*
2775 	 * Sync so that any pending REF/CHG bits are flushed to the PTEs (so
2776 	 * we can reset the right ones).
2777 	 */
2778 	powerpc_sync();
2779 
2780 	/*
2781 	 * For each pvo entry, clear the pte's ptebit.
2782 	 */
2783 	count = 0;
2784 	PV_PAGE_LOCK(m);
2785 	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
2786 		ret = 0;
2787 
2788 		PMAP_LOCK(pvo->pvo_pmap);
2789 		if (!(pvo->pvo_vaddr & PVO_DEAD))
2790 			ret = MOEA64_PTE_CLEAR(mmu, pvo, ptebit);
2791 		PMAP_UNLOCK(pvo->pvo_pmap);
2792 
2793 		if (ret > 0 && (ret & ptebit))
2794 			count++;
2795 	}
2796 	atomic_clear_32(&m->md.mdpg_attrs, ptebit);
2797 	PV_PAGE_UNLOCK(m);
2798 
2799 	return (count);
2800 }
2801 
2802 boolean_t
2803 moea64_dev_direct_mapped(mmu_t mmu, vm_paddr_t pa, vm_size_t size)
2804 {
2805 	struct pvo_entry *pvo, key;
2806 	vm_offset_t ppa;
2807 	int error = 0;
2808 
2809 	if (hw_direct_map && mem_valid(pa, size) == 0)
2810 		return (0);
2811 
2812 	PMAP_LOCK(kernel_pmap);
2813 	ppa = pa & ~ADDR_POFF;
2814 	key.pvo_vaddr = DMAP_BASE_ADDRESS + ppa;
2815 	for (pvo = RB_FIND(pvo_tree, &kernel_pmap->pmap_pvo, &key);
2816 	    ppa < pa + size; ppa += PAGE_SIZE,
2817 	    pvo = RB_NEXT(pvo_tree, &kernel_pmap->pmap_pvo, pvo)) {
2818 		if (pvo == NULL || (pvo->pvo_pte.pa & LPTE_RPGN) != ppa) {
2819 			error = EFAULT;
2820 			break;
2821 		}
2822 	}
2823 	PMAP_UNLOCK(kernel_pmap);
2824 
2825 	return (error);
2826 }
2827 
2828 /*
2829  * Map a set of physical memory pages into the kernel virtual
2830  * address space. Return a pointer to where it is mapped. This
2831  * routine is intended to be used for mapping device memory,
2832  * NOT real memory.
2833  */
2834 void *
2835 moea64_mapdev_attr(mmu_t mmu, vm_paddr_t pa, vm_size_t size, vm_memattr_t ma)
2836 {
2837 	vm_offset_t va, tmpva, ppa, offset;
2838 
2839 	ppa = trunc_page(pa);
2840 	offset = pa & PAGE_MASK;
2841 	size = roundup2(offset + size, PAGE_SIZE);
2842 
2843 	va = kva_alloc(size);
2844 
2845 	if (!va)
2846 		panic("moea64_mapdev: Couldn't alloc kernel virtual memory");
2847 
2848 	for (tmpva = va; size > 0;) {
2849 		moea64_kenter_attr(mmu, tmpva, ppa, ma);
2850 		size -= PAGE_SIZE;
2851 		tmpva += PAGE_SIZE;
2852 		ppa += PAGE_SIZE;
2853 	}
2854 
2855 	return ((void *)(va + offset));
2856 }
2857 
2858 void *
2859 moea64_mapdev(mmu_t mmu, vm_paddr_t pa, vm_size_t size)
2860 {
2861 
2862 	return moea64_mapdev_attr(mmu, pa, size, VM_MEMATTR_DEFAULT);
2863 }
2864 
2865 void
2866 moea64_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size)
2867 {
2868 	vm_offset_t base, offset;
2869 
2870 	base = trunc_page(va);
2871 	offset = va & PAGE_MASK;
2872 	size = roundup2(offset + size, PAGE_SIZE);
2873 
2874 	kva_free(base, size);
2875 }
2876 
2877 void
2878 moea64_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz)
2879 {
2880 	struct pvo_entry *pvo;
2881 	vm_offset_t lim;
2882 	vm_paddr_t pa;
2883 	vm_size_t len;
2884 
2885 	if (__predict_false(pm == NULL))
2886 		pm = &curthread->td_proc->p_vmspace->vm_pmap;
2887 
2888 	PMAP_LOCK(pm);
2889 	while (sz > 0) {
2890 		lim = round_page(va+1);
2891 		len = MIN(lim - va, sz);
2892 		pvo = moea64_pvo_find_va(pm, va & ~ADDR_POFF);
2893 		if (pvo != NULL && !(pvo->pvo_pte.pa & LPTE_I)) {
2894 			pa = (pvo->pvo_pte.pa & LPTE_RPGN) | (va & ADDR_POFF);
2895 			moea64_syncicache(mmu, pm, va, pa, len);
2896 		}
2897 		va += len;
2898 		sz -= len;
2899 	}
2900 	PMAP_UNLOCK(pm);
2901 }
2902 
2903 void
2904 moea64_dumpsys_map(mmu_t mmu, vm_paddr_t pa, size_t sz, void **va)
2905 {
2906 
2907 	*va = (void *)(uintptr_t)pa;
2908 }
2909 
2910 extern struct dump_pa dump_map[PHYS_AVAIL_SZ + 1];
2911 
2912 void
2913 moea64_scan_init(mmu_t mmu)
2914 {
2915 	struct pvo_entry *pvo;
2916 	vm_offset_t va;
2917 	int i;
2918 
2919 	if (!do_minidump) {
2920 		/* Initialize phys. segments for dumpsys(). */
2921 		memset(&dump_map, 0, sizeof(dump_map));
2922 		mem_regions(&pregions, &pregions_sz, &regions, &regions_sz);
2923 		for (i = 0; i < pregions_sz; i++) {
2924 			dump_map[i].pa_start = pregions[i].mr_start;
2925 			dump_map[i].pa_size = pregions[i].mr_size;
2926 		}
2927 		return;
2928 	}
2929 
2930 	/* Virtual segments for minidumps: */
2931 	memset(&dump_map, 0, sizeof(dump_map));
2932 
2933 	/* 1st: kernel .data and .bss. */
2934 	dump_map[0].pa_start = trunc_page((uintptr_t)_etext);
2935 	dump_map[0].pa_size = round_page((uintptr_t)_end) -
2936 	    dump_map[0].pa_start;
2937 
2938 	/* 2nd: msgbuf and tables (see pmap_bootstrap()). */
2939 	dump_map[1].pa_start = (vm_paddr_t)(uintptr_t)msgbufp->msg_ptr;
2940 	dump_map[1].pa_size = round_page(msgbufp->msg_size);
2941 
2942 	/* 3rd: kernel VM. */
2943 	va = dump_map[1].pa_start + dump_map[1].pa_size;
2944 	/* Find start of next chunk (from va). */
2945 	while (va < virtual_end) {
2946 		/* Don't dump the buffer cache. */
2947 		if (va >= kmi.buffer_sva && va < kmi.buffer_eva) {
2948 			va = kmi.buffer_eva;
2949 			continue;
2950 		}
2951 		pvo = moea64_pvo_find_va(kernel_pmap, va & ~ADDR_POFF);
2952 		if (pvo != NULL && !(pvo->pvo_vaddr & PVO_DEAD))
2953 			break;
2954 		va += PAGE_SIZE;
2955 	}
2956 	if (va < virtual_end) {
2957 		dump_map[2].pa_start = va;
2958 		va += PAGE_SIZE;
2959 		/* Find last page in chunk. */
2960 		while (va < virtual_end) {
2961 			/* Don't run into the buffer cache. */
2962 			if (va == kmi.buffer_sva)
2963 				break;
2964 			pvo = moea64_pvo_find_va(kernel_pmap, va & ~ADDR_POFF);
2965 			if (pvo == NULL || (pvo->pvo_vaddr & PVO_DEAD))
2966 				break;
2967 			va += PAGE_SIZE;
2968 		}
2969 		dump_map[2].pa_size = va - dump_map[2].pa_start;
2970 	}
2971 }
2972 
2973 #ifdef __powerpc64__
2974 
2975 static size_t
2976 moea64_scan_pmap(mmu_t mmu)
2977 {
2978 	struct pvo_entry *pvo;
2979 	vm_paddr_t pa, pa_end;
2980 	vm_offset_t va, pgva, kstart, kend, kstart_lp, kend_lp;
2981 	uint64_t lpsize;
2982 
2983 	lpsize = moea64_large_page_size;
2984 	kstart = trunc_page((vm_offset_t)_etext);
2985 	kend = round_page((vm_offset_t)_end);
2986 	kstart_lp = kstart & ~moea64_large_page_mask;
2987 	kend_lp = (kend + moea64_large_page_mask) & ~moea64_large_page_mask;
2988 
2989 	CTR4(KTR_PMAP, "moea64_scan_pmap: kstart=0x%016lx, kend=0x%016lx, "
2990 	    "kstart_lp=0x%016lx, kend_lp=0x%016lx",
2991 	    kstart, kend, kstart_lp, kend_lp);
2992 
2993 	PMAP_LOCK(kernel_pmap);
2994 	RB_FOREACH(pvo, pvo_tree, &kernel_pmap->pmap_pvo) {
2995 		va = pvo->pvo_vaddr;
2996 
2997 		if (va & PVO_DEAD)
2998 			continue;
2999 
3000 		/* Skip DMAP (except kernel area) */
3001 		if (va >= DMAP_BASE_ADDRESS && va <= DMAP_MAX_ADDRESS) {
3002 			if (va & PVO_LARGE) {
3003 				pgva = va & ~moea64_large_page_mask;
3004 				if (pgva < kstart_lp || pgva >= kend_lp)
3005 					continue;
3006 			} else {
3007 				pgva = trunc_page(va);
3008 				if (pgva < kstart || pgva >= kend)
3009 					continue;
3010 			}
3011 		}
3012 
3013 		pa = pvo->pvo_pte.pa & LPTE_RPGN;
3014 
3015 		if (va & PVO_LARGE) {
3016 			pa_end = pa + lpsize;
3017 			for (; pa < pa_end; pa += PAGE_SIZE) {
3018 				if (is_dumpable(pa))
3019 					dump_add_page(pa);
3020 			}
3021 		} else {
3022 			if (is_dumpable(pa))
3023 				dump_add_page(pa);
3024 		}
3025 	}
3026 	PMAP_UNLOCK(kernel_pmap);
3027 
3028 	return (sizeof(struct lpte) * moea64_pteg_count * 8);
3029 }
3030 
3031 static struct dump_context dump_ctx;
3032 
3033 static void *
3034 moea64_dump_pmap_init(mmu_t mmu, unsigned blkpgs)
3035 {
3036 	dump_ctx.ptex = 0;
3037 	dump_ctx.ptex_end = moea64_pteg_count * 8;
3038 	dump_ctx.blksz = blkpgs * PAGE_SIZE;
3039 	return (&dump_ctx);
3040 }
3041 
3042 #else
3043 
3044 static size_t
3045 moea64_scan_pmap(mmu_t mmu)
3046 {
3047 	return (0);
3048 }
3049 
3050 static void *
3051 moea64_dump_pmap_init(mmu_t mmu, unsigned blkpgs)
3052 {
3053 	return (NULL);
3054 }
3055 
3056 #endif
3057 
3058 #ifdef __powerpc64__
3059 static void
3060 moea64_map_range(mmu_t mmu, vm_offset_t va, vm_paddr_t pa, vm_size_t npages)
3061 {
3062 
3063 	for (; npages > 0; --npages) {
3064 		if (moea64_large_page_size != 0 &&
3065 		    (pa & moea64_large_page_mask) == 0 &&
3066 		    (va & moea64_large_page_mask) == 0 &&
3067 		    npages >= (moea64_large_page_size >> PAGE_SHIFT)) {
3068 			PMAP_LOCK(kernel_pmap);
3069 			moea64_kenter_large(mmu, va, pa, 0, 0);
3070 			PMAP_UNLOCK(kernel_pmap);
3071 			pa += moea64_large_page_size;
3072 			va += moea64_large_page_size;
3073 			npages -= (moea64_large_page_size >> PAGE_SHIFT) - 1;
3074 		} else {
3075 			moea64_kenter(mmu, va, pa);
3076 			pa += PAGE_SIZE;
3077 			va += PAGE_SIZE;
3078 		}
3079 	}
3080 }
3081 
3082 static void
3083 moea64_page_array_startup(mmu_t mmu, long pages)
3084 {
3085 	long dom_pages[MAXMEMDOM];
3086 	vm_paddr_t pa;
3087 	vm_offset_t va, vm_page_base;
3088 	vm_size_t needed, size;
3089 	long page;
3090 	int domain;
3091 	int i;
3092 
3093 	vm_page_base = 0xd000000000000000ULL;
3094 
3095 	/* Short-circuit single-domain systems. */
3096 	if (vm_ndomains == 1) {
3097 		size = round_page(pages * sizeof(struct vm_page));
3098 		pa = vm_phys_early_alloc(0, size);
3099 		vm_page_base = moea64_map(mmu, &vm_page_base,
3100 		    pa, pa + size, VM_PROT_READ | VM_PROT_WRITE);
3101 		vm_page_array_size = pages;
3102 		vm_page_array = (vm_page_t)vm_page_base;
3103 		return;
3104 	}
3105 
3106 	page = 0;
3107 	for (i = 0; i < MAXMEMDOM; i++)
3108 		dom_pages[i] = 0;
3109 
3110 	/* Now get the number of pages required per domain. */
3111 	for (i = 0; i < vm_phys_nsegs; i++) {
3112 		domain = vm_phys_segs[i].domain;
3113 		KASSERT(domain < MAXMEMDOM,
3114 		    ("Invalid vm_phys_segs NUMA domain %d!\n", domain));
3115 		/* Get size of vm_page_array needed for this segment. */
3116 		size = btoc(vm_phys_segs[i].end - vm_phys_segs[i].start);
3117 		dom_pages[domain] += size;
3118 	}
3119 
3120 	for (i = 0; phys_avail[i + 1] != 0; i+= 2) {
3121 		domain = _vm_phys_domain(phys_avail[i]);
3122 		KASSERT(domain < MAXMEMDOM,
3123 		    ("Invalid phys_avail NUMA domain %d!\n", domain));
3124 		size = btoc(phys_avail[i + 1] - phys_avail[i]);
3125 		dom_pages[domain] += size;
3126 	}
3127 
3128 	/*
3129 	 * Map in chunks that can get us all 16MB pages.  There will be some
3130 	 * overlap between domains, but that's acceptable for now.
3131 	 */
3132 	vm_page_array_size = 0;
3133 	va = vm_page_base;
3134 	for (i = 0; i < MAXMEMDOM && vm_page_array_size < pages; i++) {
3135 		if (dom_pages[i] == 0)
3136 			continue;
3137 		size = ulmin(pages - vm_page_array_size, dom_pages[i]);
3138 		size = round_page(size * sizeof(struct vm_page));
3139 		needed = size;
3140 		size = roundup2(size, moea64_large_page_size);
3141 		pa = vm_phys_early_alloc(i, size);
3142 		vm_page_array_size += size / sizeof(struct vm_page);
3143 		moea64_map_range(mmu, va, pa, size >> PAGE_SHIFT);
3144 		/* Scoot up domain 0, to reduce the domain page overlap. */
3145 		if (i == 0)
3146 			vm_page_base += size - needed;
3147 		va += size;
3148 	}
3149 	vm_page_array = (vm_page_t)vm_page_base;
3150 	vm_page_array_size = pages;
3151 }
3152 #endif
3153