xref: /freebsd/sys/powerpc/aim/mmu_oea.c (revision 2be1a816b9ff69588e55be0a84cbe2a31efc0f2f)
1 /*-
2  * Copyright (c) 2001 The NetBSD Foundation, Inc.
3  * All rights reserved.
4  *
5  * This code is derived from software contributed to The NetBSD Foundation
6  * by Matt Thomas <matt@3am-software.com> of Allegro Networks, Inc.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *        This product includes software developed by the NetBSD
19  *        Foundation, Inc. and its contributors.
20  * 4. Neither the name of The NetBSD Foundation nor the names of its
21  *    contributors may be used to endorse or promote products derived
22  *    from this software without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
25  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
26  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
28  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34  * POSSIBILITY OF SUCH DAMAGE.
35  */
36 /*-
37  * Copyright (C) 1995, 1996 Wolfgang Solfrank.
38  * Copyright (C) 1995, 1996 TooLs GmbH.
39  * All rights reserved.
40  *
41  * Redistribution and use in source and binary forms, with or without
42  * modification, are permitted provided that the following conditions
43  * are met:
44  * 1. Redistributions of source code must retain the above copyright
45  *    notice, this list of conditions and the following disclaimer.
46  * 2. Redistributions in binary form must reproduce the above copyright
47  *    notice, this list of conditions and the following disclaimer in the
48  *    documentation and/or other materials provided with the distribution.
49  * 3. All advertising materials mentioning features or use of this software
50  *    must display the following acknowledgement:
51  *	This product includes software developed by TooLs GmbH.
52  * 4. The name of TooLs GmbH may not be used to endorse or promote products
53  *    derived from this software without specific prior written permission.
54  *
55  * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
56  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
57  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
58  * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
59  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
60  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
61  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
62  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
63  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
64  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
65  *
66  * $NetBSD: pmap.c,v 1.28 2000/03/26 20:42:36 kleink Exp $
67  */
68 /*-
69  * Copyright (C) 2001 Benno Rice.
70  * All rights reserved.
71  *
72  * Redistribution and use in source and binary forms, with or without
73  * modification, are permitted provided that the following conditions
74  * are met:
75  * 1. Redistributions of source code must retain the above copyright
76  *    notice, this list of conditions and the following disclaimer.
77  * 2. Redistributions in binary form must reproduce the above copyright
78  *    notice, this list of conditions and the following disclaimer in the
79  *    documentation and/or other materials provided with the distribution.
80  *
81  * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR
82  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
83  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
84  * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
85  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
86  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
87  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
88  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
89  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
90  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
91  */
92 
93 #include <sys/cdefs.h>
94 __FBSDID("$FreeBSD$");
95 
96 /*
97  * Manages physical address maps.
98  *
99  * In addition to hardware address maps, this module is called upon to
100  * provide software-use-only maps which may or may not be stored in the
101  * same form as hardware maps.  These pseudo-maps are used to store
102  * intermediate results from copy operations to and from address spaces.
103  *
104  * Since the information managed by this module is also stored by the
105  * logical address mapping module, this module may throw away valid virtual
106  * to physical mappings at almost any time.  However, invalidations of
107  * mappings must be done as requested.
108  *
109  * In order to cope with hardware architectures which make virtual to
110  * physical map invalidates expensive, this module may delay invalidate
111  * reduced protection operations until such time as they are actually
112  * necessary.  This module is given full information as to which processors
113  * are currently using which maps, and to when physical maps must be made
114  * correct.
115  */
116 
117 #include "opt_kstack_pages.h"
118 
119 #include <sys/param.h>
120 #include <sys/kernel.h>
121 #include <sys/ktr.h>
122 #include <sys/lock.h>
123 #include <sys/msgbuf.h>
124 #include <sys/mutex.h>
125 #include <sys/proc.h>
126 #include <sys/sysctl.h>
127 #include <sys/systm.h>
128 #include <sys/vmmeter.h>
129 
130 #include <dev/ofw/openfirm.h>
131 
132 #include <vm/vm.h>
133 #include <vm/vm_param.h>
134 #include <vm/vm_kern.h>
135 #include <vm/vm_page.h>
136 #include <vm/vm_map.h>
137 #include <vm/vm_object.h>
138 #include <vm/vm_extern.h>
139 #include <vm/vm_pageout.h>
140 #include <vm/vm_pager.h>
141 #include <vm/uma.h>
142 
143 #include <machine/cpu.h>
144 #include <machine/powerpc.h>
145 #include <machine/bat.h>
146 #include <machine/frame.h>
147 #include <machine/md_var.h>
148 #include <machine/psl.h>
149 #include <machine/pte.h>
150 #include <machine/sr.h>
151 #include <machine/mmuvar.h>
152 
153 #include "mmu_if.h"
154 
155 #define	MOEA_DEBUG
156 
157 #define TODO	panic("%s: not implemented", __func__);
158 
159 #define	TLBIE(va)	__asm __volatile("tlbie %0" :: "r"(va))
160 #define	TLBSYNC()	__asm __volatile("tlbsync");
161 #define	SYNC()		__asm __volatile("sync");
162 #define	EIEIO()		__asm __volatile("eieio");
163 
164 #define	VSID_MAKE(sr, hash)	((sr) | (((hash) & 0xfffff) << 4))
165 #define	VSID_TO_SR(vsid)	((vsid) & 0xf)
166 #define	VSID_TO_HASH(vsid)	(((vsid) >> 4) & 0xfffff)
167 
168 #define	PVO_PTEGIDX_MASK	0x007		/* which PTEG slot */
169 #define	PVO_PTEGIDX_VALID	0x008		/* slot is valid */
170 #define	PVO_WIRED		0x010		/* PVO entry is wired */
171 #define	PVO_MANAGED		0x020		/* PVO entry is managed */
172 #define	PVO_EXECUTABLE		0x040		/* PVO entry is executable */
173 #define	PVO_BOOTSTRAP		0x080		/* PVO entry allocated during
174 						   bootstrap */
175 #define PVO_FAKE		0x100		/* fictitious phys page */
176 #define	PVO_VADDR(pvo)		((pvo)->pvo_vaddr & ~ADDR_POFF)
177 #define	PVO_ISEXECUTABLE(pvo)	((pvo)->pvo_vaddr & PVO_EXECUTABLE)
178 #define PVO_ISFAKE(pvo)		((pvo)->pvo_vaddr & PVO_FAKE)
179 #define	PVO_PTEGIDX_GET(pvo)	((pvo)->pvo_vaddr & PVO_PTEGIDX_MASK)
180 #define	PVO_PTEGIDX_ISSET(pvo)	((pvo)->pvo_vaddr & PVO_PTEGIDX_VALID)
181 #define	PVO_PTEGIDX_CLR(pvo)	\
182 	((void)((pvo)->pvo_vaddr &= ~(PVO_PTEGIDX_VALID|PVO_PTEGIDX_MASK)))
183 #define	PVO_PTEGIDX_SET(pvo, i)	\
184 	((void)((pvo)->pvo_vaddr |= (i)|PVO_PTEGIDX_VALID))
185 
186 #define	MOEA_PVO_CHECK(pvo)
187 
188 struct ofw_map {
189 	vm_offset_t	om_va;
190 	vm_size_t	om_len;
191 	vm_offset_t	om_pa;
192 	u_int		om_mode;
193 };
194 
195 /*
196  * Map of physical memory regions.
197  */
198 static struct	mem_region *regions;
199 static struct	mem_region *pregions;
200 u_int           phys_avail_count;
201 int		regions_sz, pregions_sz;
202 static struct	ofw_map *translations;
203 
204 extern struct pmap ofw_pmap;
205 
206 
207 
208 /*
209  * Lock for the pteg and pvo tables.
210  */
211 struct mtx	moea_table_mutex;
212 
213 /*
214  * PTEG data.
215  */
216 static struct	pteg *moea_pteg_table;
217 u_int		moea_pteg_count;
218 u_int		moea_pteg_mask;
219 
220 /*
221  * PVO data.
222  */
223 struct	pvo_head *moea_pvo_table;		/* pvo entries by pteg index */
224 struct	pvo_head moea_pvo_kunmanaged =
225     LIST_HEAD_INITIALIZER(moea_pvo_kunmanaged);	/* list of unmanaged pages */
226 struct	pvo_head moea_pvo_unmanaged =
227     LIST_HEAD_INITIALIZER(moea_pvo_unmanaged);	/* list of unmanaged pages */
228 
229 uma_zone_t	moea_upvo_zone;	/* zone for pvo entries for unmanaged pages */
230 uma_zone_t	moea_mpvo_zone;	/* zone for pvo entries for managed pages */
231 
232 #define	BPVO_POOL_SIZE	32768
233 static struct	pvo_entry *moea_bpvo_pool;
234 static int	moea_bpvo_pool_index = 0;
235 
236 #define	VSID_NBPW	(sizeof(u_int32_t) * 8)
237 static u_int	moea_vsid_bitmap[NPMAPS / VSID_NBPW];
238 
239 static boolean_t moea_initialized = FALSE;
240 
241 /*
242  * Statistics.
243  */
244 u_int	moea_pte_valid = 0;
245 u_int	moea_pte_overflow = 0;
246 u_int	moea_pte_replacements = 0;
247 u_int	moea_pvo_entries = 0;
248 u_int	moea_pvo_enter_calls = 0;
249 u_int	moea_pvo_remove_calls = 0;
250 u_int	moea_pte_spills = 0;
251 SYSCTL_INT(_machdep, OID_AUTO, moea_pte_valid, CTLFLAG_RD, &moea_pte_valid,
252     0, "");
253 SYSCTL_INT(_machdep, OID_AUTO, moea_pte_overflow, CTLFLAG_RD,
254     &moea_pte_overflow, 0, "");
255 SYSCTL_INT(_machdep, OID_AUTO, moea_pte_replacements, CTLFLAG_RD,
256     &moea_pte_replacements, 0, "");
257 SYSCTL_INT(_machdep, OID_AUTO, moea_pvo_entries, CTLFLAG_RD, &moea_pvo_entries,
258     0, "");
259 SYSCTL_INT(_machdep, OID_AUTO, moea_pvo_enter_calls, CTLFLAG_RD,
260     &moea_pvo_enter_calls, 0, "");
261 SYSCTL_INT(_machdep, OID_AUTO, moea_pvo_remove_calls, CTLFLAG_RD,
262     &moea_pvo_remove_calls, 0, "");
263 SYSCTL_INT(_machdep, OID_AUTO, moea_pte_spills, CTLFLAG_RD,
264     &moea_pte_spills, 0, "");
265 
266 /*
267  * Allocate physical memory for use in moea_bootstrap.
268  */
269 static vm_offset_t	moea_bootstrap_alloc(vm_size_t, u_int);
270 
271 /*
272  * PTE calls.
273  */
274 static int		moea_pte_insert(u_int, struct pte *);
275 
276 /*
277  * PVO calls.
278  */
279 static int	moea_pvo_enter(pmap_t, uma_zone_t, struct pvo_head *,
280 		    vm_offset_t, vm_offset_t, u_int, int);
281 static void	moea_pvo_remove(struct pvo_entry *, int);
282 static struct	pvo_entry *moea_pvo_find_va(pmap_t, vm_offset_t, int *);
283 static struct	pte *moea_pvo_to_pte(const struct pvo_entry *, int);
284 
285 /*
286  * Utility routines.
287  */
288 static void		moea_enter_locked(pmap_t, vm_offset_t, vm_page_t,
289 			    vm_prot_t, boolean_t);
290 static void		moea_syncicache(vm_offset_t, vm_size_t);
291 static boolean_t	moea_query_bit(vm_page_t, int);
292 static u_int		moea_clear_bit(vm_page_t, int, int *);
293 static void		moea_kremove(mmu_t, vm_offset_t);
294 static void		tlbia(void);
295 int		moea_pte_spill(vm_offset_t);
296 
297 /*
298  * Kernel MMU interface
299  */
300 void moea_change_wiring(mmu_t, pmap_t, vm_offset_t, boolean_t);
301 void moea_clear_modify(mmu_t, vm_page_t);
302 void moea_clear_reference(mmu_t, vm_page_t);
303 void moea_copy_page(mmu_t, vm_page_t, vm_page_t);
304 void moea_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t, boolean_t);
305 void moea_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_page_t,
306     vm_prot_t);
307 void moea_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t);
308 vm_paddr_t moea_extract(mmu_t, pmap_t, vm_offset_t);
309 vm_page_t moea_extract_and_hold(mmu_t, pmap_t, vm_offset_t, vm_prot_t);
310 void moea_init(mmu_t);
311 boolean_t moea_is_modified(mmu_t, vm_page_t);
312 boolean_t moea_ts_referenced(mmu_t, vm_page_t);
313 vm_offset_t moea_map(mmu_t, vm_offset_t *, vm_offset_t, vm_offset_t, int);
314 boolean_t moea_page_exists_quick(mmu_t, pmap_t, vm_page_t);
315 int moea_page_wired_mappings(mmu_t, vm_page_t);
316 void moea_pinit(mmu_t, pmap_t);
317 void moea_pinit0(mmu_t, pmap_t);
318 void moea_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_prot_t);
319 void moea_qenter(mmu_t, vm_offset_t, vm_page_t *, int);
320 void moea_qremove(mmu_t, vm_offset_t, int);
321 void moea_release(mmu_t, pmap_t);
322 void moea_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t);
323 void moea_remove_all(mmu_t, vm_page_t);
324 void moea_remove_write(mmu_t, vm_page_t);
325 void moea_zero_page(mmu_t, vm_page_t);
326 void moea_zero_page_area(mmu_t, vm_page_t, int, int);
327 void moea_zero_page_idle(mmu_t, vm_page_t);
328 void moea_activate(mmu_t, struct thread *);
329 void moea_deactivate(mmu_t, struct thread *);
330 void moea_bootstrap(mmu_t, vm_offset_t, vm_offset_t);
331 void *moea_mapdev(mmu_t, vm_offset_t, vm_size_t);
332 void moea_unmapdev(mmu_t, vm_offset_t, vm_size_t);
333 vm_offset_t moea_kextract(mmu_t, vm_offset_t);
334 void moea_kenter(mmu_t, vm_offset_t, vm_offset_t);
335 boolean_t moea_dev_direct_mapped(mmu_t, vm_offset_t, vm_size_t);
336 boolean_t moea_page_executable(mmu_t, vm_page_t);
337 
338 static mmu_method_t moea_methods[] = {
339 	MMUMETHOD(mmu_change_wiring,	moea_change_wiring),
340 	MMUMETHOD(mmu_clear_modify,	moea_clear_modify),
341 	MMUMETHOD(mmu_clear_reference,	moea_clear_reference),
342 	MMUMETHOD(mmu_copy_page,	moea_copy_page),
343 	MMUMETHOD(mmu_enter,		moea_enter),
344 	MMUMETHOD(mmu_enter_object,	moea_enter_object),
345 	MMUMETHOD(mmu_enter_quick,	moea_enter_quick),
346 	MMUMETHOD(mmu_extract,		moea_extract),
347 	MMUMETHOD(mmu_extract_and_hold,	moea_extract_and_hold),
348 	MMUMETHOD(mmu_init,		moea_init),
349 	MMUMETHOD(mmu_is_modified,	moea_is_modified),
350 	MMUMETHOD(mmu_ts_referenced,	moea_ts_referenced),
351 	MMUMETHOD(mmu_map,     		moea_map),
352 	MMUMETHOD(mmu_page_exists_quick,moea_page_exists_quick),
353 	MMUMETHOD(mmu_page_wired_mappings,moea_page_wired_mappings),
354 	MMUMETHOD(mmu_pinit,		moea_pinit),
355 	MMUMETHOD(mmu_pinit0,		moea_pinit0),
356 	MMUMETHOD(mmu_protect,		moea_protect),
357 	MMUMETHOD(mmu_qenter,		moea_qenter),
358 	MMUMETHOD(mmu_qremove,		moea_qremove),
359 	MMUMETHOD(mmu_release,		moea_release),
360 	MMUMETHOD(mmu_remove,		moea_remove),
361 	MMUMETHOD(mmu_remove_all,      	moea_remove_all),
362 	MMUMETHOD(mmu_remove_write,	moea_remove_write),
363 	MMUMETHOD(mmu_zero_page,       	moea_zero_page),
364 	MMUMETHOD(mmu_zero_page_area,	moea_zero_page_area),
365 	MMUMETHOD(mmu_zero_page_idle,	moea_zero_page_idle),
366 	MMUMETHOD(mmu_activate,		moea_activate),
367 	MMUMETHOD(mmu_deactivate,      	moea_deactivate),
368 
369 	/* Internal interfaces */
370 	MMUMETHOD(mmu_bootstrap,       	moea_bootstrap),
371 	MMUMETHOD(mmu_mapdev,		moea_mapdev),
372 	MMUMETHOD(mmu_unmapdev,		moea_unmapdev),
373 	MMUMETHOD(mmu_kextract,		moea_kextract),
374 	MMUMETHOD(mmu_kenter,		moea_kenter),
375 	MMUMETHOD(mmu_dev_direct_mapped,moea_dev_direct_mapped),
376 	MMUMETHOD(mmu_page_executable,	moea_page_executable),
377 
378 	{ 0, 0 }
379 };
380 
381 static mmu_def_t oea_mmu = {
382 	MMU_TYPE_OEA,
383 	moea_methods,
384 	0
385 };
386 MMU_DEF(oea_mmu);
387 
388 
389 static __inline int
390 va_to_sr(u_int *sr, vm_offset_t va)
391 {
392 	return (sr[(uintptr_t)va >> ADDR_SR_SHFT]);
393 }
394 
395 static __inline u_int
396 va_to_pteg(u_int sr, vm_offset_t addr)
397 {
398 	u_int hash;
399 
400 	hash = (sr & SR_VSID_MASK) ^ (((u_int)addr & ADDR_PIDX) >>
401 	    ADDR_PIDX_SHFT);
402 	return (hash & moea_pteg_mask);
403 }
404 
405 static __inline struct pvo_head *
406 pa_to_pvoh(vm_offset_t pa, vm_page_t *pg_p)
407 {
408 	struct	vm_page *pg;
409 
410 	pg = PHYS_TO_VM_PAGE(pa);
411 
412 	if (pg_p != NULL)
413 		*pg_p = pg;
414 
415 	if (pg == NULL)
416 		return (&moea_pvo_unmanaged);
417 
418 	return (&pg->md.mdpg_pvoh);
419 }
420 
421 static __inline struct pvo_head *
422 vm_page_to_pvoh(vm_page_t m)
423 {
424 
425 	return (&m->md.mdpg_pvoh);
426 }
427 
428 static __inline void
429 moea_attr_clear(vm_page_t m, int ptebit)
430 {
431 
432 	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
433 	m->md.mdpg_attrs &= ~ptebit;
434 }
435 
436 static __inline int
437 moea_attr_fetch(vm_page_t m)
438 {
439 
440 	return (m->md.mdpg_attrs);
441 }
442 
443 static __inline void
444 moea_attr_save(vm_page_t m, int ptebit)
445 {
446 
447 	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
448 	m->md.mdpg_attrs |= ptebit;
449 }
450 
451 static __inline int
452 moea_pte_compare(const struct pte *pt, const struct pte *pvo_pt)
453 {
454 	if (pt->pte_hi == pvo_pt->pte_hi)
455 		return (1);
456 
457 	return (0);
458 }
459 
460 static __inline int
461 moea_pte_match(struct pte *pt, u_int sr, vm_offset_t va, int which)
462 {
463 	return (pt->pte_hi & ~PTE_VALID) ==
464 	    (((sr & SR_VSID_MASK) << PTE_VSID_SHFT) |
465 	    ((va >> ADDR_API_SHFT) & PTE_API) | which);
466 }
467 
468 static __inline void
469 moea_pte_create(struct pte *pt, u_int sr, vm_offset_t va, u_int pte_lo)
470 {
471 
472 	mtx_assert(&moea_table_mutex, MA_OWNED);
473 
474 	/*
475 	 * Construct a PTE.  Default to IMB initially.  Valid bit only gets
476 	 * set when the real pte is set in memory.
477 	 *
478 	 * Note: Don't set the valid bit for correct operation of tlb update.
479 	 */
480 	pt->pte_hi = ((sr & SR_VSID_MASK) << PTE_VSID_SHFT) |
481 	    (((va & ADDR_PIDX) >> ADDR_API_SHFT) & PTE_API);
482 	pt->pte_lo = pte_lo;
483 }
484 
485 static __inline void
486 moea_pte_synch(struct pte *pt, struct pte *pvo_pt)
487 {
488 
489 	mtx_assert(&moea_table_mutex, MA_OWNED);
490 	pvo_pt->pte_lo |= pt->pte_lo & (PTE_REF | PTE_CHG);
491 }
492 
493 static __inline void
494 moea_pte_clear(struct pte *pt, vm_offset_t va, int ptebit)
495 {
496 
497 	mtx_assert(&moea_table_mutex, MA_OWNED);
498 
499 	/*
500 	 * As shown in Section 7.6.3.2.3
501 	 */
502 	pt->pte_lo &= ~ptebit;
503 	TLBIE(va);
504 	EIEIO();
505 	TLBSYNC();
506 	SYNC();
507 }
508 
509 static __inline void
510 moea_pte_set(struct pte *pt, struct pte *pvo_pt)
511 {
512 
513 	mtx_assert(&moea_table_mutex, MA_OWNED);
514 	pvo_pt->pte_hi |= PTE_VALID;
515 
516 	/*
517 	 * Update the PTE as defined in section 7.6.3.1.
518 	 * Note that the REF/CHG bits are from pvo_pt and thus should havce
519 	 * been saved so this routine can restore them (if desired).
520 	 */
521 	pt->pte_lo = pvo_pt->pte_lo;
522 	EIEIO();
523 	pt->pte_hi = pvo_pt->pte_hi;
524 	SYNC();
525 	moea_pte_valid++;
526 }
527 
528 static __inline void
529 moea_pte_unset(struct pte *pt, struct pte *pvo_pt, vm_offset_t va)
530 {
531 
532 	mtx_assert(&moea_table_mutex, MA_OWNED);
533 	pvo_pt->pte_hi &= ~PTE_VALID;
534 
535 	/*
536 	 * Force the reg & chg bits back into the PTEs.
537 	 */
538 	SYNC();
539 
540 	/*
541 	 * Invalidate the pte.
542 	 */
543 	pt->pte_hi &= ~PTE_VALID;
544 
545 	SYNC();
546 	TLBIE(va);
547 	EIEIO();
548 	TLBSYNC();
549 	SYNC();
550 
551 	/*
552 	 * Save the reg & chg bits.
553 	 */
554 	moea_pte_synch(pt, pvo_pt);
555 	moea_pte_valid--;
556 }
557 
558 static __inline void
559 moea_pte_change(struct pte *pt, struct pte *pvo_pt, vm_offset_t va)
560 {
561 
562 	/*
563 	 * Invalidate the PTE
564 	 */
565 	moea_pte_unset(pt, pvo_pt, va);
566 	moea_pte_set(pt, pvo_pt);
567 }
568 
569 /*
570  * Quick sort callout for comparing memory regions.
571  */
572 static int	mr_cmp(const void *a, const void *b);
573 static int	om_cmp(const void *a, const void *b);
574 
575 static int
576 mr_cmp(const void *a, const void *b)
577 {
578 	const struct	mem_region *regiona;
579 	const struct	mem_region *regionb;
580 
581 	regiona = a;
582 	regionb = b;
583 	if (regiona->mr_start < regionb->mr_start)
584 		return (-1);
585 	else if (regiona->mr_start > regionb->mr_start)
586 		return (1);
587 	else
588 		return (0);
589 }
590 
591 static int
592 om_cmp(const void *a, const void *b)
593 {
594 	const struct	ofw_map *mapa;
595 	const struct	ofw_map *mapb;
596 
597 	mapa = a;
598 	mapb = b;
599 	if (mapa->om_pa < mapb->om_pa)
600 		return (-1);
601 	else if (mapa->om_pa > mapb->om_pa)
602 		return (1);
603 	else
604 		return (0);
605 }
606 
607 void
608 moea_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend)
609 {
610 	ihandle_t	mmui;
611 	phandle_t	chosen, mmu;
612 	int		sz;
613 	int		i, j;
614 	int		ofw_mappings;
615 	vm_size_t	size, physsz, hwphyssz;
616 	vm_offset_t	pa, va, off;
617 	u_int		batl, batu;
618 
619         /*
620          * Set up BAT0 to map the lowest 256 MB area
621          */
622         battable[0x0].batl = BATL(0x00000000, BAT_M, BAT_PP_RW);
623         battable[0x0].batu = BATU(0x00000000, BAT_BL_256M, BAT_Vs);
624 
625         /*
626          * Map PCI memory space.
627          */
628         battable[0x8].batl = BATL(0x80000000, BAT_I|BAT_G, BAT_PP_RW);
629         battable[0x8].batu = BATU(0x80000000, BAT_BL_256M, BAT_Vs);
630 
631         battable[0x9].batl = BATL(0x90000000, BAT_I|BAT_G, BAT_PP_RW);
632         battable[0x9].batu = BATU(0x90000000, BAT_BL_256M, BAT_Vs);
633 
634         battable[0xa].batl = BATL(0xa0000000, BAT_I|BAT_G, BAT_PP_RW);
635         battable[0xa].batu = BATU(0xa0000000, BAT_BL_256M, BAT_Vs);
636 
637         battable[0xb].batl = BATL(0xb0000000, BAT_I|BAT_G, BAT_PP_RW);
638         battable[0xb].batu = BATU(0xb0000000, BAT_BL_256M, BAT_Vs);
639 
640         /*
641          * Map obio devices.
642          */
643         battable[0xf].batl = BATL(0xf0000000, BAT_I|BAT_G, BAT_PP_RW);
644         battable[0xf].batu = BATU(0xf0000000, BAT_BL_256M, BAT_Vs);
645 
646 	/*
647 	 * Use an IBAT and a DBAT to map the bottom segment of memory
648 	 * where we are.
649 	 */
650 	batu = BATU(0x00000000, BAT_BL_256M, BAT_Vs);
651 	batl = BATL(0x00000000, BAT_M, BAT_PP_RW);
652 	__asm (".balign 32; \n"
653 	       "mtibatu 0,%0; mtibatl 0,%1; isync; \n"
654 	       "mtdbatu 0,%0; mtdbatl 0,%1; isync"
655 	    :: "r"(batu), "r"(batl));
656 
657 	/* map pci space */
658 	batu = BATU(0x80000000, BAT_BL_256M, BAT_Vs);
659 	batl = BATL(0x80000000, BAT_I|BAT_G, BAT_PP_RW);
660 	__asm ("mtdbatu 1,%0; mtdbatl 1,%1; isync"
661 	    :: "r"(batu), "r"(batl));
662 
663 	mem_regions(&pregions, &pregions_sz, &regions, &regions_sz);
664 	CTR0(KTR_PMAP, "moea_bootstrap: physical memory");
665 
666 	qsort(pregions, pregions_sz, sizeof(*pregions), mr_cmp);
667 	for (i = 0; i < pregions_sz; i++) {
668 		vm_offset_t pa;
669 		vm_offset_t end;
670 
671 		CTR3(KTR_PMAP, "physregion: %#x - %#x (%#x)",
672 			pregions[i].mr_start,
673 			pregions[i].mr_start + pregions[i].mr_size,
674 			pregions[i].mr_size);
675 		/*
676 		 * Install entries into the BAT table to allow all
677 		 * of physmem to be convered by on-demand BAT entries.
678 		 * The loop will sometimes set the same battable element
679 		 * twice, but that's fine since they won't be used for
680 		 * a while yet.
681 		 */
682 		pa = pregions[i].mr_start & 0xf0000000;
683 		end = pregions[i].mr_start + pregions[i].mr_size;
684 		do {
685                         u_int n = pa >> ADDR_SR_SHFT;
686 
687 			battable[n].batl = BATL(pa, BAT_M, BAT_PP_RW);
688 			battable[n].batu = BATU(pa, BAT_BL_256M, BAT_Vs);
689 			pa += SEGMENT_LENGTH;
690 		} while (pa < end);
691 	}
692 
693 	if (sizeof(phys_avail)/sizeof(phys_avail[0]) < regions_sz)
694 		panic("moea_bootstrap: phys_avail too small");
695 	qsort(regions, regions_sz, sizeof(*regions), mr_cmp);
696 	phys_avail_count = 0;
697 	physsz = 0;
698 	hwphyssz = 0;
699 	TUNABLE_ULONG_FETCH("hw.physmem", (u_long *) &hwphyssz);
700 	for (i = 0, j = 0; i < regions_sz; i++, j += 2) {
701 		CTR3(KTR_PMAP, "region: %#x - %#x (%#x)", regions[i].mr_start,
702 		    regions[i].mr_start + regions[i].mr_size,
703 		    regions[i].mr_size);
704 		if (hwphyssz != 0 &&
705 		    (physsz + regions[i].mr_size) >= hwphyssz) {
706 			if (physsz < hwphyssz) {
707 				phys_avail[j] = regions[i].mr_start;
708 				phys_avail[j + 1] = regions[i].mr_start +
709 				    hwphyssz - physsz;
710 				physsz = hwphyssz;
711 				phys_avail_count++;
712 			}
713 			break;
714 		}
715 		phys_avail[j] = regions[i].mr_start;
716 		phys_avail[j + 1] = regions[i].mr_start + regions[i].mr_size;
717 		phys_avail_count++;
718 		physsz += regions[i].mr_size;
719 	}
720 	physmem = btoc(physsz);
721 
722 	/*
723 	 * Allocate PTEG table.
724 	 */
725 #ifdef PTEGCOUNT
726 	moea_pteg_count = PTEGCOUNT;
727 #else
728 	moea_pteg_count = 0x1000;
729 
730 	while (moea_pteg_count < physmem)
731 		moea_pteg_count <<= 1;
732 
733 	moea_pteg_count >>= 1;
734 #endif /* PTEGCOUNT */
735 
736 	size = moea_pteg_count * sizeof(struct pteg);
737 	CTR2(KTR_PMAP, "moea_bootstrap: %d PTEGs, %d bytes", moea_pteg_count,
738 	    size);
739 	moea_pteg_table = (struct pteg *)moea_bootstrap_alloc(size, size);
740 	CTR1(KTR_PMAP, "moea_bootstrap: PTEG table at %p", moea_pteg_table);
741 	bzero((void *)moea_pteg_table, moea_pteg_count * sizeof(struct pteg));
742 	moea_pteg_mask = moea_pteg_count - 1;
743 
744 	/*
745 	 * Allocate pv/overflow lists.
746 	 */
747 	size = sizeof(struct pvo_head) * moea_pteg_count;
748 	moea_pvo_table = (struct pvo_head *)moea_bootstrap_alloc(size,
749 	    PAGE_SIZE);
750 	CTR1(KTR_PMAP, "moea_bootstrap: PVO table at %p", moea_pvo_table);
751 	for (i = 0; i < moea_pteg_count; i++)
752 		LIST_INIT(&moea_pvo_table[i]);
753 
754 	/*
755 	 * Initialize the lock that synchronizes access to the pteg and pvo
756 	 * tables.
757 	 */
758 	mtx_init(&moea_table_mutex, "pmap table", NULL, MTX_DEF |
759 	    MTX_RECURSE);
760 
761 	/*
762 	 * Initialise the unmanaged pvo pool.
763 	 */
764 	moea_bpvo_pool = (struct pvo_entry *)moea_bootstrap_alloc(
765 		BPVO_POOL_SIZE*sizeof(struct pvo_entry), 0);
766 	moea_bpvo_pool_index = 0;
767 
768 	/*
769 	 * Make sure kernel vsid is allocated as well as VSID 0.
770 	 */
771 	moea_vsid_bitmap[(KERNEL_VSIDBITS & (NPMAPS - 1)) / VSID_NBPW]
772 		|= 1 << (KERNEL_VSIDBITS % VSID_NBPW);
773 	moea_vsid_bitmap[0] |= 1;
774 
775 	/*
776 	 * Set up the Open Firmware pmap and add it's mappings.
777 	 */
778 	moea_pinit(mmup, &ofw_pmap);
779 	ofw_pmap.pm_sr[KERNEL_SR] = KERNEL_SEGMENT;
780 	ofw_pmap.pm_sr[KERNEL2_SR] = KERNEL2_SEGMENT;
781 	if ((chosen = OF_finddevice("/chosen")) == -1)
782 		panic("moea_bootstrap: can't find /chosen");
783 	OF_getprop(chosen, "mmu", &mmui, 4);
784 	if ((mmu = OF_instance_to_package(mmui)) == -1)
785 		panic("moea_bootstrap: can't get mmu package");
786 	if ((sz = OF_getproplen(mmu, "translations")) == -1)
787 		panic("moea_bootstrap: can't get ofw translation count");
788 	translations = NULL;
789 	for (i = 0; phys_avail[i] != 0; i += 2) {
790 		if (phys_avail[i + 1] >= sz) {
791 			translations = (struct ofw_map *)phys_avail[i];
792 			break;
793 		}
794 	}
795 	if (translations == NULL)
796 		panic("moea_bootstrap: no space to copy translations");
797 	bzero(translations, sz);
798 	if (OF_getprop(mmu, "translations", translations, sz) == -1)
799 		panic("moea_bootstrap: can't get ofw translations");
800 	CTR0(KTR_PMAP, "moea_bootstrap: translations");
801 	sz /= sizeof(*translations);
802 	qsort(translations, sz, sizeof (*translations), om_cmp);
803 	for (i = 0, ofw_mappings = 0; i < sz; i++) {
804 		CTR3(KTR_PMAP, "translation: pa=%#x va=%#x len=%#x",
805 		    translations[i].om_pa, translations[i].om_va,
806 		    translations[i].om_len);
807 
808 		/*
809 		 * If the mapping is 1:1, let the RAM and device on-demand
810 		 * BAT tables take care of the translation.
811 		 */
812 		if (translations[i].om_va == translations[i].om_pa)
813 			continue;
814 
815 		/* Enter the pages */
816 		for (off = 0; off < translations[i].om_len; off += PAGE_SIZE) {
817 			struct	vm_page m;
818 
819 			m.phys_addr = translations[i].om_pa + off;
820 			PMAP_LOCK(&ofw_pmap);
821 			moea_enter_locked(&ofw_pmap,
822 				   translations[i].om_va + off, &m,
823 				   VM_PROT_ALL, 1);
824 			PMAP_UNLOCK(&ofw_pmap);
825 			ofw_mappings++;
826 		}
827 	}
828 
829 	/*
830 	 * Calculate the last available physical address.
831 	 */
832 	for (i = 0; phys_avail[i + 2] != 0; i += 2)
833 		;
834 	Maxmem = powerpc_btop(phys_avail[i + 1]);
835 
836 	/*
837 	 * Initialize the kernel pmap (which is statically allocated).
838 	 */
839 	PMAP_LOCK_INIT(kernel_pmap);
840 	for (i = 0; i < 16; i++) {
841 		kernel_pmap->pm_sr[i] = EMPTY_SEGMENT;
842 	}
843 	kernel_pmap->pm_sr[KERNEL_SR] = KERNEL_SEGMENT;
844 	kernel_pmap->pm_sr[KERNEL2_SR] = KERNEL2_SEGMENT;
845 	kernel_pmap->pm_active = ~0;
846 
847 	/*
848 	 * Initialize hardware.
849 	 */
850 	for (i = 0; i < 16; i++) {
851 		mtsrin(i << ADDR_SR_SHFT, EMPTY_SEGMENT);
852 	}
853 	__asm __volatile ("mtsr %0,%1"
854 	    :: "n"(KERNEL_SR), "r"(KERNEL_SEGMENT));
855 	__asm __volatile ("mtsr %0,%1"
856 	    :: "n"(KERNEL2_SR), "r"(KERNEL2_SEGMENT));
857 	__asm __volatile ("sync; mtsdr1 %0; isync"
858 	    :: "r"((u_int)moea_pteg_table | (moea_pteg_mask >> 10)));
859 	tlbia();
860 
861 	pmap_bootstrapped++;
862 
863 	/*
864 	 * Set the start and end of kva.
865 	 */
866 	virtual_avail = VM_MIN_KERNEL_ADDRESS;
867 	virtual_end = VM_MAX_KERNEL_ADDRESS;
868 
869 	/*
870 	 * Allocate a kernel stack with a guard page for thread0 and map it
871 	 * into the kernel page map.
872 	 */
873 	pa = moea_bootstrap_alloc(KSTACK_PAGES * PAGE_SIZE, PAGE_SIZE);
874 	va = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE;
875 	virtual_avail = va + KSTACK_PAGES * PAGE_SIZE;
876 	CTR2(KTR_PMAP, "moea_bootstrap: kstack0 at %#x (%#x)", pa, va);
877 	thread0.td_kstack = va;
878 	thread0.td_kstack_pages = KSTACK_PAGES;
879 	for (i = 0; i < KSTACK_PAGES; i++) {
880 		moea_kenter(mmup, va, pa);;
881 		pa += PAGE_SIZE;
882 		va += PAGE_SIZE;
883 	}
884 
885 	/*
886 	 * Allocate virtual address space for the message buffer.
887 	 */
888 	pa = msgbuf_phys = moea_bootstrap_alloc(MSGBUF_SIZE, PAGE_SIZE);
889 	msgbufp = (struct msgbuf *)virtual_avail;
890 	va = virtual_avail;
891 	virtual_avail += round_page(MSGBUF_SIZE);
892 	while (va < virtual_avail) {
893 		moea_kenter(mmup, va, pa);;
894 		pa += PAGE_SIZE;
895 		va += PAGE_SIZE;
896 	}
897 }
898 
899 /*
900  * Activate a user pmap.  The pmap must be activated before it's address
901  * space can be accessed in any way.
902  */
903 void
904 moea_activate(mmu_t mmu, struct thread *td)
905 {
906 	pmap_t	pm, pmr;
907 
908 	/*
909 	 * Load all the data we need up front to encourage the compiler to
910 	 * not issue any loads while we have interrupts disabled below.
911 	 */
912 	pm = &td->td_proc->p_vmspace->vm_pmap;
913 
914 	if ((pmr = (pmap_t)moea_kextract(mmu, (vm_offset_t)pm)) == NULL)
915 		pmr = pm;
916 
917 	pm->pm_active |= PCPU_GET(cpumask);
918 	PCPU_SET(curpmap, pmr);
919 }
920 
921 void
922 moea_deactivate(mmu_t mmu, struct thread *td)
923 {
924 	pmap_t	pm;
925 
926 	pm = &td->td_proc->p_vmspace->vm_pmap;
927 	pm->pm_active &= ~(PCPU_GET(cpumask));
928 	PCPU_SET(curpmap, NULL);
929 }
930 
931 void
932 moea_change_wiring(mmu_t mmu, pmap_t pm, vm_offset_t va, boolean_t wired)
933 {
934 	struct	pvo_entry *pvo;
935 
936 	PMAP_LOCK(pm);
937 	pvo = moea_pvo_find_va(pm, va & ~ADDR_POFF, NULL);
938 
939 	if (pvo != NULL) {
940 		if (wired) {
941 			if ((pvo->pvo_vaddr & PVO_WIRED) == 0)
942 				pm->pm_stats.wired_count++;
943 			pvo->pvo_vaddr |= PVO_WIRED;
944 		} else {
945 			if ((pvo->pvo_vaddr & PVO_WIRED) != 0)
946 				pm->pm_stats.wired_count--;
947 			pvo->pvo_vaddr &= ~PVO_WIRED;
948 		}
949 	}
950 	PMAP_UNLOCK(pm);
951 }
952 
953 void
954 moea_copy_page(mmu_t mmu, vm_page_t msrc, vm_page_t mdst)
955 {
956 	vm_offset_t	dst;
957 	vm_offset_t	src;
958 
959 	dst = VM_PAGE_TO_PHYS(mdst);
960 	src = VM_PAGE_TO_PHYS(msrc);
961 
962 	kcopy((void *)src, (void *)dst, PAGE_SIZE);
963 }
964 
965 /*
966  * Zero a page of physical memory by temporarily mapping it into the tlb.
967  */
968 void
969 moea_zero_page(mmu_t mmu, vm_page_t m)
970 {
971 	vm_offset_t pa = VM_PAGE_TO_PHYS(m);
972 	void *va = (void *)pa;
973 
974 	bzero(va, PAGE_SIZE);
975 }
976 
977 void
978 moea_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size)
979 {
980 	vm_offset_t pa = VM_PAGE_TO_PHYS(m);
981 	void *va = (void *)(pa + off);
982 
983 	bzero(va, size);
984 }
985 
986 void
987 moea_zero_page_idle(mmu_t mmu, vm_page_t m)
988 {
989 	vm_offset_t pa = VM_PAGE_TO_PHYS(m);
990 	void *va = (void *)pa;
991 
992 	bzero(va, PAGE_SIZE);
993 }
994 
995 /*
996  * Map the given physical page at the specified virtual address in the
997  * target pmap with the protection requested.  If specified the page
998  * will be wired down.
999  */
1000 void
1001 moea_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
1002 	   boolean_t wired)
1003 {
1004 
1005 	vm_page_lock_queues();
1006 	PMAP_LOCK(pmap);
1007 	moea_enter_locked(pmap, va, m, prot, wired);
1008 	vm_page_unlock_queues();
1009 	PMAP_UNLOCK(pmap);
1010 }
1011 
1012 /*
1013  * Map the given physical page at the specified virtual address in the
1014  * target pmap with the protection requested.  If specified the page
1015  * will be wired down.
1016  *
1017  * The page queues and pmap must be locked.
1018  */
1019 static void
1020 moea_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
1021     boolean_t wired)
1022 {
1023 	struct		pvo_head *pvo_head;
1024 	uma_zone_t	zone;
1025 	vm_page_t	pg;
1026 	u_int		pte_lo, pvo_flags, was_exec, i;
1027 	int		error;
1028 
1029 	if (!moea_initialized) {
1030 		pvo_head = &moea_pvo_kunmanaged;
1031 		zone = moea_upvo_zone;
1032 		pvo_flags = 0;
1033 		pg = NULL;
1034 		was_exec = PTE_EXEC;
1035 	} else {
1036 		pvo_head = vm_page_to_pvoh(m);
1037 		pg = m;
1038 		zone = moea_mpvo_zone;
1039 		pvo_flags = PVO_MANAGED;
1040 		was_exec = 0;
1041 	}
1042 	if (pmap_bootstrapped)
1043 		mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1044 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1045 
1046 	/* XXX change the pvo head for fake pages */
1047 	if ((m->flags & PG_FICTITIOUS) == PG_FICTITIOUS)
1048 		pvo_head = &moea_pvo_kunmanaged;
1049 
1050 	/*
1051 	 * If this is a managed page, and it's the first reference to the page,
1052 	 * clear the execness of the page.  Otherwise fetch the execness.
1053 	 */
1054 	if ((pg != NULL) && ((m->flags & PG_FICTITIOUS) == 0)) {
1055 		if (LIST_EMPTY(pvo_head)) {
1056 			moea_attr_clear(pg, PTE_EXEC);
1057 		} else {
1058 			was_exec = moea_attr_fetch(pg) & PTE_EXEC;
1059 		}
1060 	}
1061 
1062 	/*
1063 	 * Assume the page is cache inhibited and access is guarded unless
1064 	 * it's in our available memory array.
1065 	 */
1066 	pte_lo = PTE_I | PTE_G;
1067 	for (i = 0; i < pregions_sz; i++) {
1068 		if ((VM_PAGE_TO_PHYS(m) >= pregions[i].mr_start) &&
1069 		    (VM_PAGE_TO_PHYS(m) <
1070 			(pregions[i].mr_start + pregions[i].mr_size))) {
1071 			pte_lo &= ~(PTE_I | PTE_G);
1072 			break;
1073 		}
1074 	}
1075 
1076 	if (prot & VM_PROT_WRITE) {
1077 		pte_lo |= PTE_BW;
1078 		if (pmap_bootstrapped)
1079 			vm_page_flag_set(m, PG_WRITEABLE);
1080 	} else
1081 		pte_lo |= PTE_BR;
1082 
1083 	if (prot & VM_PROT_EXECUTE)
1084 		pvo_flags |= PVO_EXECUTABLE;
1085 
1086 	if (wired)
1087 		pvo_flags |= PVO_WIRED;
1088 
1089 	if ((m->flags & PG_FICTITIOUS) != 0)
1090 		pvo_flags |= PVO_FAKE;
1091 
1092 	error = moea_pvo_enter(pmap, zone, pvo_head, va, VM_PAGE_TO_PHYS(m),
1093 	    pte_lo, pvo_flags);
1094 
1095 	/*
1096 	 * Flush the real page from the instruction cache if this page is
1097 	 * mapped executable and cacheable and was not previously mapped (or
1098 	 * was not mapped executable).
1099 	 */
1100 	if (error == 0 && (pvo_flags & PVO_EXECUTABLE) &&
1101 	    (pte_lo & PTE_I) == 0 && was_exec == 0) {
1102 		/*
1103 		 * Flush the real memory from the cache.
1104 		 */
1105 		moea_syncicache(VM_PAGE_TO_PHYS(m), PAGE_SIZE);
1106 		if (pg != NULL)
1107 			moea_attr_save(pg, PTE_EXEC);
1108 	}
1109 
1110 	/* XXX syncicache always until problems are sorted */
1111 	moea_syncicache(VM_PAGE_TO_PHYS(m), PAGE_SIZE);
1112 }
1113 
1114 /*
1115  * Maps a sequence of resident pages belonging to the same object.
1116  * The sequence begins with the given page m_start.  This page is
1117  * mapped at the given virtual address start.  Each subsequent page is
1118  * mapped at a virtual address that is offset from start by the same
1119  * amount as the page is offset from m_start within the object.  The
1120  * last page in the sequence is the page with the largest offset from
1121  * m_start that can be mapped at a virtual address less than the given
1122  * virtual address end.  Not every virtual page between start and end
1123  * is mapped; only those for which a resident page exists with the
1124  * corresponding offset from m_start are mapped.
1125  */
1126 void
1127 moea_enter_object(mmu_t mmu, pmap_t pm, vm_offset_t start, vm_offset_t end,
1128     vm_page_t m_start, vm_prot_t prot)
1129 {
1130 	vm_page_t m;
1131 	vm_pindex_t diff, psize;
1132 
1133 	psize = atop(end - start);
1134 	m = m_start;
1135 	PMAP_LOCK(pm);
1136 	while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
1137 		moea_enter_locked(pm, start + ptoa(diff), m, prot &
1138 		    (VM_PROT_READ | VM_PROT_EXECUTE), FALSE);
1139 		m = TAILQ_NEXT(m, listq);
1140 	}
1141 	PMAP_UNLOCK(pm);
1142 }
1143 
1144 void
1145 moea_enter_quick(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_page_t m,
1146     vm_prot_t prot)
1147 {
1148 
1149 	PMAP_LOCK(pm);
1150 	moea_enter_locked(pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE),
1151 	    FALSE);
1152 	PMAP_UNLOCK(pm);
1153 
1154 }
1155 
1156 vm_paddr_t
1157 moea_extract(mmu_t mmu, pmap_t pm, vm_offset_t va)
1158 {
1159 	struct	pvo_entry *pvo;
1160 	vm_paddr_t pa;
1161 
1162 	PMAP_LOCK(pm);
1163 	pvo = moea_pvo_find_va(pm, va & ~ADDR_POFF, NULL);
1164 	if (pvo == NULL)
1165 		pa = 0;
1166 	else
1167 		pa = (pvo->pvo_pte.pte_lo & PTE_RPGN) | (va & ADDR_POFF);
1168 	PMAP_UNLOCK(pm);
1169 	return (pa);
1170 }
1171 
1172 /*
1173  * Atomically extract and hold the physical page with the given
1174  * pmap and virtual address pair if that mapping permits the given
1175  * protection.
1176  */
1177 vm_page_t
1178 moea_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_prot_t prot)
1179 {
1180 	struct	pvo_entry *pvo;
1181 	vm_page_t m;
1182 
1183 	m = NULL;
1184 	vm_page_lock_queues();
1185 	PMAP_LOCK(pmap);
1186 	pvo = moea_pvo_find_va(pmap, va & ~ADDR_POFF, NULL);
1187 	if (pvo != NULL && (pvo->pvo_pte.pte_hi & PTE_VALID) &&
1188 	    ((pvo->pvo_pte.pte_lo & PTE_PP) == PTE_RW ||
1189 	     (prot & VM_PROT_WRITE) == 0)) {
1190 		m = PHYS_TO_VM_PAGE(pvo->pvo_pte.pte_lo & PTE_RPGN);
1191 		vm_page_hold(m);
1192 	}
1193 	vm_page_unlock_queues();
1194 	PMAP_UNLOCK(pmap);
1195 	return (m);
1196 }
1197 
1198 void
1199 moea_init(mmu_t mmu)
1200 {
1201 
1202 	CTR0(KTR_PMAP, "moea_init");
1203 
1204 	moea_upvo_zone = uma_zcreate("UPVO entry", sizeof (struct pvo_entry),
1205 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
1206 	    UMA_ZONE_VM | UMA_ZONE_NOFREE);
1207 	moea_mpvo_zone = uma_zcreate("MPVO entry", sizeof(struct pvo_entry),
1208 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
1209 	    UMA_ZONE_VM | UMA_ZONE_NOFREE);
1210 	moea_initialized = TRUE;
1211 }
1212 
1213 boolean_t
1214 moea_is_modified(mmu_t mmu, vm_page_t m)
1215 {
1216 
1217 	if ((m->flags & (PG_FICTITIOUS |PG_UNMANAGED)) != 0)
1218 		return (FALSE);
1219 
1220 	return (moea_query_bit(m, PTE_CHG));
1221 }
1222 
1223 void
1224 moea_clear_reference(mmu_t mmu, vm_page_t m)
1225 {
1226 
1227 	if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
1228 		return;
1229 	moea_clear_bit(m, PTE_REF, NULL);
1230 }
1231 
1232 void
1233 moea_clear_modify(mmu_t mmu, vm_page_t m)
1234 {
1235 
1236 	if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
1237 		return;
1238 	moea_clear_bit(m, PTE_CHG, NULL);
1239 }
1240 
1241 /*
1242  * Clear the write and modified bits in each of the given page's mappings.
1243  */
1244 void
1245 moea_remove_write(mmu_t mmu, vm_page_t m)
1246 {
1247 	struct	pvo_entry *pvo;
1248 	struct	pte *pt;
1249 	pmap_t	pmap;
1250 	u_int	lo;
1251 
1252 	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1253 	if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0 ||
1254 	    (m->flags & PG_WRITEABLE) == 0)
1255 		return;
1256 	lo = moea_attr_fetch(m);
1257 	SYNC();
1258 	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
1259 		pmap = pvo->pvo_pmap;
1260 		PMAP_LOCK(pmap);
1261 		if ((pvo->pvo_pte.pte_lo & PTE_PP) != PTE_BR) {
1262 			pt = moea_pvo_to_pte(pvo, -1);
1263 			pvo->pvo_pte.pte_lo &= ~PTE_PP;
1264 			pvo->pvo_pte.pte_lo |= PTE_BR;
1265 			if (pt != NULL) {
1266 				moea_pte_synch(pt, &pvo->pvo_pte);
1267 				lo |= pvo->pvo_pte.pte_lo;
1268 				pvo->pvo_pte.pte_lo &= ~PTE_CHG;
1269 				moea_pte_change(pt, &pvo->pvo_pte,
1270 				    pvo->pvo_vaddr);
1271 				mtx_unlock(&moea_table_mutex);
1272 			}
1273 		}
1274 		PMAP_UNLOCK(pmap);
1275 	}
1276 	if ((lo & PTE_CHG) != 0) {
1277 		moea_attr_clear(m, PTE_CHG);
1278 		vm_page_dirty(m);
1279 	}
1280 	vm_page_flag_clear(m, PG_WRITEABLE);
1281 }
1282 
1283 /*
1284  *	moea_ts_referenced:
1285  *
1286  *	Return a count of reference bits for a page, clearing those bits.
1287  *	It is not necessary for every reference bit to be cleared, but it
1288  *	is necessary that 0 only be returned when there are truly no
1289  *	reference bits set.
1290  *
1291  *	XXX: The exact number of bits to check and clear is a matter that
1292  *	should be tested and standardized at some point in the future for
1293  *	optimal aging of shared pages.
1294  */
1295 boolean_t
1296 moea_ts_referenced(mmu_t mmu, vm_page_t m)
1297 {
1298 	int count;
1299 
1300 	if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
1301 		return (0);
1302 
1303 	count = moea_clear_bit(m, PTE_REF, NULL);
1304 
1305 	return (count);
1306 }
1307 
1308 /*
1309  * Map a wired page into kernel virtual address space.
1310  */
1311 void
1312 moea_kenter(mmu_t mmu, vm_offset_t va, vm_offset_t pa)
1313 {
1314 	u_int		pte_lo;
1315 	int		error;
1316 	int		i;
1317 
1318 #if 0
1319 	if (va < VM_MIN_KERNEL_ADDRESS)
1320 		panic("moea_kenter: attempt to enter non-kernel address %#x",
1321 		    va);
1322 #endif
1323 
1324 	pte_lo = PTE_I | PTE_G;
1325 	for (i = 0; i < pregions_sz; i++) {
1326 		if ((pa >= pregions[i].mr_start) &&
1327 		    (pa < (pregions[i].mr_start + pregions[i].mr_size))) {
1328 			pte_lo &= ~(PTE_I | PTE_G);
1329 			break;
1330 		}
1331 	}
1332 
1333 	PMAP_LOCK(kernel_pmap);
1334 	error = moea_pvo_enter(kernel_pmap, moea_upvo_zone,
1335 	    &moea_pvo_kunmanaged, va, pa, pte_lo, PVO_WIRED);
1336 
1337 	if (error != 0 && error != ENOENT)
1338 		panic("moea_kenter: failed to enter va %#x pa %#x: %d", va,
1339 		    pa, error);
1340 
1341 	/*
1342 	 * Flush the real memory from the instruction cache.
1343 	 */
1344 	if ((pte_lo & (PTE_I | PTE_G)) == 0) {
1345 		moea_syncicache(pa, PAGE_SIZE);
1346 	}
1347 	PMAP_UNLOCK(kernel_pmap);
1348 }
1349 
1350 /*
1351  * Extract the physical page address associated with the given kernel virtual
1352  * address.
1353  */
1354 vm_offset_t
1355 moea_kextract(mmu_t mmu, vm_offset_t va)
1356 {
1357 	struct		pvo_entry *pvo;
1358 	vm_paddr_t pa;
1359 
1360 #ifdef UMA_MD_SMALL_ALLOC
1361 	/*
1362 	 * Allow direct mappings
1363 	 */
1364 	if (va < VM_MIN_KERNEL_ADDRESS) {
1365 		return (va);
1366 	}
1367 #endif
1368 
1369 	PMAP_LOCK(kernel_pmap);
1370 	pvo = moea_pvo_find_va(kernel_pmap, va & ~ADDR_POFF, NULL);
1371 	KASSERT(pvo != NULL, ("moea_kextract: no addr found"));
1372 	pa = (pvo->pvo_pte.pte_lo & PTE_RPGN) | (va & ADDR_POFF);
1373 	PMAP_UNLOCK(kernel_pmap);
1374 	return (pa);
1375 }
1376 
1377 /*
1378  * Remove a wired page from kernel virtual address space.
1379  */
1380 void
1381 moea_kremove(mmu_t mmu, vm_offset_t va)
1382 {
1383 
1384 	moea_remove(mmu, kernel_pmap, va, va + PAGE_SIZE);
1385 }
1386 
1387 /*
1388  * Map a range of physical addresses into kernel virtual address space.
1389  *
1390  * The value passed in *virt is a suggested virtual address for the mapping.
1391  * Architectures which can support a direct-mapped physical to virtual region
1392  * can return the appropriate address within that region, leaving '*virt'
1393  * unchanged.  We cannot and therefore do not; *virt is updated with the
1394  * first usable address after the mapped region.
1395  */
1396 vm_offset_t
1397 moea_map(mmu_t mmu, vm_offset_t *virt, vm_offset_t pa_start,
1398     vm_offset_t pa_end, int prot)
1399 {
1400 	vm_offset_t	sva, va;
1401 
1402 	sva = *virt;
1403 	va = sva;
1404 	for (; pa_start < pa_end; pa_start += PAGE_SIZE, va += PAGE_SIZE)
1405 		moea_kenter(mmu, va, pa_start);
1406 	*virt = va;
1407 	return (sva);
1408 }
1409 
1410 /*
1411  * Returns true if the pmap's pv is one of the first
1412  * 16 pvs linked to from this page.  This count may
1413  * be changed upwards or downwards in the future; it
1414  * is only necessary that true be returned for a small
1415  * subset of pmaps for proper page aging.
1416  */
1417 boolean_t
1418 moea_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m)
1419 {
1420         int loops;
1421 	struct pvo_entry *pvo;
1422 
1423         if (!moea_initialized || (m->flags & PG_FICTITIOUS))
1424                 return FALSE;
1425 
1426 	loops = 0;
1427 	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
1428 		if (pvo->pvo_pmap == pmap)
1429 			return (TRUE);
1430 		if (++loops >= 16)
1431 			break;
1432 	}
1433 
1434 	return (FALSE);
1435 }
1436 
1437 /*
1438  * Return the number of managed mappings to the given physical page
1439  * that are wired.
1440  */
1441 int
1442 moea_page_wired_mappings(mmu_t mmu, vm_page_t m)
1443 {
1444 	struct pvo_entry *pvo;
1445 	int count;
1446 
1447 	count = 0;
1448 	if (!moea_initialized || (m->flags & PG_FICTITIOUS) != 0)
1449 		return (count);
1450 	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1451 	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink)
1452 		if ((pvo->pvo_vaddr & PVO_WIRED) != 0)
1453 			count++;
1454 	return (count);
1455 }
1456 
1457 static u_int	moea_vsidcontext;
1458 
1459 void
1460 moea_pinit(mmu_t mmu, pmap_t pmap)
1461 {
1462 	int	i, mask;
1463 	u_int	entropy;
1464 
1465 	KASSERT((int)pmap < VM_MIN_KERNEL_ADDRESS, ("moea_pinit: virt pmap"));
1466 	PMAP_LOCK_INIT(pmap);
1467 
1468 	entropy = 0;
1469 	__asm __volatile("mftb %0" : "=r"(entropy));
1470 
1471 	/*
1472 	 * Allocate some segment registers for this pmap.
1473 	 */
1474 	for (i = 0; i < NPMAPS; i += VSID_NBPW) {
1475 		u_int	hash, n;
1476 
1477 		/*
1478 		 * Create a new value by mutiplying by a prime and adding in
1479 		 * entropy from the timebase register.  This is to make the
1480 		 * VSID more random so that the PT hash function collides
1481 		 * less often.  (Note that the prime casues gcc to do shifts
1482 		 * instead of a multiply.)
1483 		 */
1484 		moea_vsidcontext = (moea_vsidcontext * 0x1105) + entropy;
1485 		hash = moea_vsidcontext & (NPMAPS - 1);
1486 		if (hash == 0)		/* 0 is special, avoid it */
1487 			continue;
1488 		n = hash >> 5;
1489 		mask = 1 << (hash & (VSID_NBPW - 1));
1490 		hash = (moea_vsidcontext & 0xfffff);
1491 		if (moea_vsid_bitmap[n] & mask) {	/* collision? */
1492 			/* anything free in this bucket? */
1493 			if (moea_vsid_bitmap[n] == 0xffffffff) {
1494 				entropy = (moea_vsidcontext >> 20);
1495 				continue;
1496 			}
1497 			i = ffs(~moea_vsid_bitmap[i]) - 1;
1498 			mask = 1 << i;
1499 			hash &= 0xfffff & ~(VSID_NBPW - 1);
1500 			hash |= i;
1501 		}
1502 		moea_vsid_bitmap[n] |= mask;
1503 		for (i = 0; i < 16; i++)
1504 			pmap->pm_sr[i] = VSID_MAKE(i, hash);
1505 		return;
1506 	}
1507 
1508 	panic("moea_pinit: out of segments");
1509 }
1510 
1511 /*
1512  * Initialize the pmap associated with process 0.
1513  */
1514 void
1515 moea_pinit0(mmu_t mmu, pmap_t pm)
1516 {
1517 
1518 	moea_pinit(mmu, pm);
1519 	bzero(&pm->pm_stats, sizeof(pm->pm_stats));
1520 }
1521 
1522 /*
1523  * Set the physical protection on the specified range of this map as requested.
1524  */
1525 void
1526 moea_protect(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva,
1527     vm_prot_t prot)
1528 {
1529 	struct	pvo_entry *pvo;
1530 	struct	pte *pt;
1531 	int	pteidx;
1532 
1533 	CTR4(KTR_PMAP, "moea_protect: pm=%p sva=%#x eva=%#x prot=%#x", pm, sva,
1534 	    eva, prot);
1535 
1536 
1537 	KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap,
1538 	    ("moea_protect: non current pmap"));
1539 
1540 	if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
1541 		moea_remove(mmu, pm, sva, eva);
1542 		return;
1543 	}
1544 
1545 	vm_page_lock_queues();
1546 	PMAP_LOCK(pm);
1547 	for (; sva < eva; sva += PAGE_SIZE) {
1548 		pvo = moea_pvo_find_va(pm, sva, &pteidx);
1549 		if (pvo == NULL)
1550 			continue;
1551 
1552 		if ((prot & VM_PROT_EXECUTE) == 0)
1553 			pvo->pvo_vaddr &= ~PVO_EXECUTABLE;
1554 
1555 		/*
1556 		 * Grab the PTE pointer before we diddle with the cached PTE
1557 		 * copy.
1558 		 */
1559 		pt = moea_pvo_to_pte(pvo, pteidx);
1560 		/*
1561 		 * Change the protection of the page.
1562 		 */
1563 		pvo->pvo_pte.pte_lo &= ~PTE_PP;
1564 		pvo->pvo_pte.pte_lo |= PTE_BR;
1565 
1566 		/*
1567 		 * If the PVO is in the page table, update that pte as well.
1568 		 */
1569 		if (pt != NULL) {
1570 			moea_pte_change(pt, &pvo->pvo_pte, pvo->pvo_vaddr);
1571 			mtx_unlock(&moea_table_mutex);
1572 		}
1573 	}
1574 	vm_page_unlock_queues();
1575 	PMAP_UNLOCK(pm);
1576 }
1577 
1578 /*
1579  * Map a list of wired pages into kernel virtual address space.  This is
1580  * intended for temporary mappings which do not need page modification or
1581  * references recorded.  Existing mappings in the region are overwritten.
1582  */
1583 void
1584 moea_qenter(mmu_t mmu, vm_offset_t sva, vm_page_t *m, int count)
1585 {
1586 	vm_offset_t va;
1587 
1588 	va = sva;
1589 	while (count-- > 0) {
1590 		moea_kenter(mmu, va, VM_PAGE_TO_PHYS(*m));
1591 		va += PAGE_SIZE;
1592 		m++;
1593 	}
1594 }
1595 
1596 /*
1597  * Remove page mappings from kernel virtual address space.  Intended for
1598  * temporary mappings entered by moea_qenter.
1599  */
1600 void
1601 moea_qremove(mmu_t mmu, vm_offset_t sva, int count)
1602 {
1603 	vm_offset_t va;
1604 
1605 	va = sva;
1606 	while (count-- > 0) {
1607 		moea_kremove(mmu, va);
1608 		va += PAGE_SIZE;
1609 	}
1610 }
1611 
1612 void
1613 moea_release(mmu_t mmu, pmap_t pmap)
1614 {
1615         int idx, mask;
1616 
1617 	/*
1618 	 * Free segment register's VSID
1619 	 */
1620         if (pmap->pm_sr[0] == 0)
1621                 panic("moea_release");
1622 
1623         idx = VSID_TO_HASH(pmap->pm_sr[0]) & (NPMAPS-1);
1624         mask = 1 << (idx % VSID_NBPW);
1625         idx /= VSID_NBPW;
1626         moea_vsid_bitmap[idx] &= ~mask;
1627 	PMAP_LOCK_DESTROY(pmap);
1628 }
1629 
1630 /*
1631  * Remove the given range of addresses from the specified map.
1632  */
1633 void
1634 moea_remove(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva)
1635 {
1636 	struct	pvo_entry *pvo;
1637 	int	pteidx;
1638 
1639 	vm_page_lock_queues();
1640 	PMAP_LOCK(pm);
1641 	for (; sva < eva; sva += PAGE_SIZE) {
1642 		pvo = moea_pvo_find_va(pm, sva, &pteidx);
1643 		if (pvo != NULL) {
1644 			moea_pvo_remove(pvo, pteidx);
1645 		}
1646 	}
1647 	PMAP_UNLOCK(pm);
1648 	vm_page_unlock_queues();
1649 }
1650 
1651 /*
1652  * Remove physical page from all pmaps in which it resides. moea_pvo_remove()
1653  * will reflect changes in pte's back to the vm_page.
1654  */
1655 void
1656 moea_remove_all(mmu_t mmu, vm_page_t m)
1657 {
1658 	struct  pvo_head *pvo_head;
1659 	struct	pvo_entry *pvo, *next_pvo;
1660 	pmap_t	pmap;
1661 
1662 	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1663 
1664 	pvo_head = vm_page_to_pvoh(m);
1665 	for (pvo = LIST_FIRST(pvo_head); pvo != NULL; pvo = next_pvo) {
1666 		next_pvo = LIST_NEXT(pvo, pvo_vlink);
1667 
1668 		MOEA_PVO_CHECK(pvo);	/* sanity check */
1669 		pmap = pvo->pvo_pmap;
1670 		PMAP_LOCK(pmap);
1671 		moea_pvo_remove(pvo, -1);
1672 		PMAP_UNLOCK(pmap);
1673 	}
1674 	vm_page_flag_clear(m, PG_WRITEABLE);
1675 }
1676 
1677 /*
1678  * Allocate a physical page of memory directly from the phys_avail map.
1679  * Can only be called from moea_bootstrap before avail start and end are
1680  * calculated.
1681  */
1682 static vm_offset_t
1683 moea_bootstrap_alloc(vm_size_t size, u_int align)
1684 {
1685 	vm_offset_t	s, e;
1686 	int		i, j;
1687 
1688 	size = round_page(size);
1689 	for (i = 0; phys_avail[i + 1] != 0; i += 2) {
1690 		if (align != 0)
1691 			s = (phys_avail[i] + align - 1) & ~(align - 1);
1692 		else
1693 			s = phys_avail[i];
1694 		e = s + size;
1695 
1696 		if (s < phys_avail[i] || e > phys_avail[i + 1])
1697 			continue;
1698 
1699 		if (s == phys_avail[i]) {
1700 			phys_avail[i] += size;
1701 		} else if (e == phys_avail[i + 1]) {
1702 			phys_avail[i + 1] -= size;
1703 		} else {
1704 			for (j = phys_avail_count * 2; j > i; j -= 2) {
1705 				phys_avail[j] = phys_avail[j - 2];
1706 				phys_avail[j + 1] = phys_avail[j - 1];
1707 			}
1708 
1709 			phys_avail[i + 3] = phys_avail[i + 1];
1710 			phys_avail[i + 1] = s;
1711 			phys_avail[i + 2] = e;
1712 			phys_avail_count++;
1713 		}
1714 
1715 		return (s);
1716 	}
1717 	panic("moea_bootstrap_alloc: could not allocate memory");
1718 }
1719 
1720 static void
1721 moea_syncicache(vm_offset_t pa, vm_size_t len)
1722 {
1723 	__syncicache((void *)pa, len);
1724 }
1725 
1726 static void
1727 tlbia(void)
1728 {
1729 	caddr_t	i;
1730 
1731 	SYNC();
1732 	for (i = 0; i < (caddr_t)0x00040000; i += 0x00001000) {
1733 		TLBIE(i);
1734 		EIEIO();
1735 	}
1736 	TLBSYNC();
1737 	SYNC();
1738 }
1739 
1740 static int
1741 moea_pvo_enter(pmap_t pm, uma_zone_t zone, struct pvo_head *pvo_head,
1742     vm_offset_t va, vm_offset_t pa, u_int pte_lo, int flags)
1743 {
1744 	struct	pvo_entry *pvo;
1745 	u_int	sr;
1746 	int	first;
1747 	u_int	ptegidx;
1748 	int	i;
1749 	int     bootstrap;
1750 
1751 	moea_pvo_enter_calls++;
1752 	first = 0;
1753 	bootstrap = 0;
1754 
1755 	/*
1756 	 * Compute the PTE Group index.
1757 	 */
1758 	va &= ~ADDR_POFF;
1759 	sr = va_to_sr(pm->pm_sr, va);
1760 	ptegidx = va_to_pteg(sr, va);
1761 
1762 	/*
1763 	 * Remove any existing mapping for this page.  Reuse the pvo entry if
1764 	 * there is a mapping.
1765 	 */
1766 	mtx_lock(&moea_table_mutex);
1767 	LIST_FOREACH(pvo, &moea_pvo_table[ptegidx], pvo_olink) {
1768 		if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) {
1769 			if ((pvo->pvo_pte.pte_lo & PTE_RPGN) == pa &&
1770 			    (pvo->pvo_pte.pte_lo & PTE_PP) ==
1771 			    (pte_lo & PTE_PP)) {
1772 				mtx_unlock(&moea_table_mutex);
1773 				return (0);
1774 			}
1775 			moea_pvo_remove(pvo, -1);
1776 			break;
1777 		}
1778 	}
1779 
1780 	/*
1781 	 * If we aren't overwriting a mapping, try to allocate.
1782 	 */
1783 	if (moea_initialized) {
1784 		pvo = uma_zalloc(zone, M_NOWAIT);
1785 	} else {
1786 		if (moea_bpvo_pool_index >= BPVO_POOL_SIZE) {
1787 			panic("moea_enter: bpvo pool exhausted, %d, %d, %d",
1788 			      moea_bpvo_pool_index, BPVO_POOL_SIZE,
1789 			      BPVO_POOL_SIZE * sizeof(struct pvo_entry));
1790 		}
1791 		pvo = &moea_bpvo_pool[moea_bpvo_pool_index];
1792 		moea_bpvo_pool_index++;
1793 		bootstrap = 1;
1794 	}
1795 
1796 	if (pvo == NULL) {
1797 		mtx_unlock(&moea_table_mutex);
1798 		return (ENOMEM);
1799 	}
1800 
1801 	moea_pvo_entries++;
1802 	pvo->pvo_vaddr = va;
1803 	pvo->pvo_pmap = pm;
1804 	LIST_INSERT_HEAD(&moea_pvo_table[ptegidx], pvo, pvo_olink);
1805 	pvo->pvo_vaddr &= ~ADDR_POFF;
1806 	if (flags & VM_PROT_EXECUTE)
1807 		pvo->pvo_vaddr |= PVO_EXECUTABLE;
1808 	if (flags & PVO_WIRED)
1809 		pvo->pvo_vaddr |= PVO_WIRED;
1810 	if (pvo_head != &moea_pvo_kunmanaged)
1811 		pvo->pvo_vaddr |= PVO_MANAGED;
1812 	if (bootstrap)
1813 		pvo->pvo_vaddr |= PVO_BOOTSTRAP;
1814 	if (flags & PVO_FAKE)
1815 		pvo->pvo_vaddr |= PVO_FAKE;
1816 
1817 	moea_pte_create(&pvo->pvo_pte, sr, va, pa | pte_lo);
1818 
1819 	/*
1820 	 * Remember if the list was empty and therefore will be the first
1821 	 * item.
1822 	 */
1823 	if (LIST_FIRST(pvo_head) == NULL)
1824 		first = 1;
1825 	LIST_INSERT_HEAD(pvo_head, pvo, pvo_vlink);
1826 
1827 	if (pvo->pvo_pte.pte_lo & PVO_WIRED)
1828 		pm->pm_stats.wired_count++;
1829 	pm->pm_stats.resident_count++;
1830 
1831 	/*
1832 	 * We hope this succeeds but it isn't required.
1833 	 */
1834 	i = moea_pte_insert(ptegidx, &pvo->pvo_pte);
1835 	if (i >= 0) {
1836 		PVO_PTEGIDX_SET(pvo, i);
1837 	} else {
1838 		panic("moea_pvo_enter: overflow");
1839 		moea_pte_overflow++;
1840 	}
1841 	mtx_unlock(&moea_table_mutex);
1842 
1843 	return (first ? ENOENT : 0);
1844 }
1845 
1846 static void
1847 moea_pvo_remove(struct pvo_entry *pvo, int pteidx)
1848 {
1849 	struct	pte *pt;
1850 
1851 	/*
1852 	 * If there is an active pte entry, we need to deactivate it (and
1853 	 * save the ref & cfg bits).
1854 	 */
1855 	pt = moea_pvo_to_pte(pvo, pteidx);
1856 	if (pt != NULL) {
1857 		moea_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr);
1858 		mtx_unlock(&moea_table_mutex);
1859 		PVO_PTEGIDX_CLR(pvo);
1860 	} else {
1861 		moea_pte_overflow--;
1862 	}
1863 
1864 	/*
1865 	 * Update our statistics.
1866 	 */
1867 	pvo->pvo_pmap->pm_stats.resident_count--;
1868 	if (pvo->pvo_pte.pte_lo & PVO_WIRED)
1869 		pvo->pvo_pmap->pm_stats.wired_count--;
1870 
1871 	/*
1872 	 * Save the REF/CHG bits into their cache if the page is managed.
1873 	 */
1874 	if ((pvo->pvo_vaddr & (PVO_MANAGED|PVO_FAKE)) == PVO_MANAGED) {
1875 		struct	vm_page *pg;
1876 
1877 		pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.pte_lo & PTE_RPGN);
1878 		if (pg != NULL) {
1879 			moea_attr_save(pg, pvo->pvo_pte.pte_lo &
1880 			    (PTE_REF | PTE_CHG));
1881 		}
1882 	}
1883 
1884 	/*
1885 	 * Remove this PVO from the PV list.
1886 	 */
1887 	LIST_REMOVE(pvo, pvo_vlink);
1888 
1889 	/*
1890 	 * Remove this from the overflow list and return it to the pool
1891 	 * if we aren't going to reuse it.
1892 	 */
1893 	LIST_REMOVE(pvo, pvo_olink);
1894 	if (!(pvo->pvo_vaddr & PVO_BOOTSTRAP))
1895 		uma_zfree(pvo->pvo_vaddr & PVO_MANAGED ? moea_mpvo_zone :
1896 		    moea_upvo_zone, pvo);
1897 	moea_pvo_entries--;
1898 	moea_pvo_remove_calls++;
1899 }
1900 
1901 static __inline int
1902 moea_pvo_pte_index(const struct pvo_entry *pvo, int ptegidx)
1903 {
1904 	int	pteidx;
1905 
1906 	/*
1907 	 * We can find the actual pte entry without searching by grabbing
1908 	 * the PTEG index from 3 unused bits in pte_lo[11:9] and by
1909 	 * noticing the HID bit.
1910 	 */
1911 	pteidx = ptegidx * 8 + PVO_PTEGIDX_GET(pvo);
1912 	if (pvo->pvo_pte.pte_hi & PTE_HID)
1913 		pteidx ^= moea_pteg_mask * 8;
1914 
1915 	return (pteidx);
1916 }
1917 
1918 static struct pvo_entry *
1919 moea_pvo_find_va(pmap_t pm, vm_offset_t va, int *pteidx_p)
1920 {
1921 	struct	pvo_entry *pvo;
1922 	int	ptegidx;
1923 	u_int	sr;
1924 
1925 	va &= ~ADDR_POFF;
1926 	sr = va_to_sr(pm->pm_sr, va);
1927 	ptegidx = va_to_pteg(sr, va);
1928 
1929 	mtx_lock(&moea_table_mutex);
1930 	LIST_FOREACH(pvo, &moea_pvo_table[ptegidx], pvo_olink) {
1931 		if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) {
1932 			if (pteidx_p)
1933 				*pteidx_p = moea_pvo_pte_index(pvo, ptegidx);
1934 			break;
1935 		}
1936 	}
1937 	mtx_unlock(&moea_table_mutex);
1938 
1939 	return (pvo);
1940 }
1941 
1942 static struct pte *
1943 moea_pvo_to_pte(const struct pvo_entry *pvo, int pteidx)
1944 {
1945 	struct	pte *pt;
1946 
1947 	/*
1948 	 * If we haven't been supplied the ptegidx, calculate it.
1949 	 */
1950 	if (pteidx == -1) {
1951 		int	ptegidx;
1952 		u_int	sr;
1953 
1954 		sr = va_to_sr(pvo->pvo_pmap->pm_sr, pvo->pvo_vaddr);
1955 		ptegidx = va_to_pteg(sr, pvo->pvo_vaddr);
1956 		pteidx = moea_pvo_pte_index(pvo, ptegidx);
1957 	}
1958 
1959 	pt = &moea_pteg_table[pteidx >> 3].pt[pteidx & 7];
1960 	mtx_lock(&moea_table_mutex);
1961 
1962 	if ((pvo->pvo_pte.pte_hi & PTE_VALID) && !PVO_PTEGIDX_ISSET(pvo)) {
1963 		panic("moea_pvo_to_pte: pvo %p has valid pte in pvo but no "
1964 		    "valid pte index", pvo);
1965 	}
1966 
1967 	if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0 && PVO_PTEGIDX_ISSET(pvo)) {
1968 		panic("moea_pvo_to_pte: pvo %p has valid pte index in pvo "
1969 		    "pvo but no valid pte", pvo);
1970 	}
1971 
1972 	if ((pt->pte_hi ^ (pvo->pvo_pte.pte_hi & ~PTE_VALID)) == PTE_VALID) {
1973 		if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0) {
1974 			panic("moea_pvo_to_pte: pvo %p has valid pte in "
1975 			    "moea_pteg_table %p but invalid in pvo", pvo, pt);
1976 		}
1977 
1978 		if (((pt->pte_lo ^ pvo->pvo_pte.pte_lo) & ~(PTE_CHG|PTE_REF))
1979 		    != 0) {
1980 			panic("moea_pvo_to_pte: pvo %p pte does not match "
1981 			    "pte %p in moea_pteg_table", pvo, pt);
1982 		}
1983 
1984 		mtx_assert(&moea_table_mutex, MA_OWNED);
1985 		return (pt);
1986 	}
1987 
1988 	if (pvo->pvo_pte.pte_hi & PTE_VALID) {
1989 		panic("moea_pvo_to_pte: pvo %p has invalid pte %p in "
1990 		    "moea_pteg_table but valid in pvo", pvo, pt);
1991 	}
1992 
1993 	mtx_unlock(&moea_table_mutex);
1994 	return (NULL);
1995 }
1996 
1997 /*
1998  * XXX: THIS STUFF SHOULD BE IN pte.c?
1999  */
2000 int
2001 moea_pte_spill(vm_offset_t addr)
2002 {
2003 	struct	pvo_entry *source_pvo, *victim_pvo;
2004 	struct	pvo_entry *pvo;
2005 	int	ptegidx, i, j;
2006 	u_int	sr;
2007 	struct	pteg *pteg;
2008 	struct	pte *pt;
2009 
2010 	moea_pte_spills++;
2011 
2012 	sr = mfsrin(addr);
2013 	ptegidx = va_to_pteg(sr, addr);
2014 
2015 	/*
2016 	 * Have to substitute some entry.  Use the primary hash for this.
2017 	 * Use low bits of timebase as random generator.
2018 	 */
2019 	pteg = &moea_pteg_table[ptegidx];
2020 	mtx_lock(&moea_table_mutex);
2021 	__asm __volatile("mftb %0" : "=r"(i));
2022 	i &= 7;
2023 	pt = &pteg->pt[i];
2024 
2025 	source_pvo = NULL;
2026 	victim_pvo = NULL;
2027 	LIST_FOREACH(pvo, &moea_pvo_table[ptegidx], pvo_olink) {
2028 		/*
2029 		 * We need to find a pvo entry for this address.
2030 		 */
2031 		MOEA_PVO_CHECK(pvo);
2032 		if (source_pvo == NULL &&
2033 		    moea_pte_match(&pvo->pvo_pte, sr, addr,
2034 		    pvo->pvo_pte.pte_hi & PTE_HID)) {
2035 			/*
2036 			 * Now found an entry to be spilled into the pteg.
2037 			 * The PTE is now valid, so we know it's active.
2038 			 */
2039 			j = moea_pte_insert(ptegidx, &pvo->pvo_pte);
2040 
2041 			if (j >= 0) {
2042 				PVO_PTEGIDX_SET(pvo, j);
2043 				moea_pte_overflow--;
2044 				MOEA_PVO_CHECK(pvo);
2045 				mtx_unlock(&moea_table_mutex);
2046 				return (1);
2047 			}
2048 
2049 			source_pvo = pvo;
2050 
2051 			if (victim_pvo != NULL)
2052 				break;
2053 		}
2054 
2055 		/*
2056 		 * We also need the pvo entry of the victim we are replacing
2057 		 * so save the R & C bits of the PTE.
2058 		 */
2059 		if ((pt->pte_hi & PTE_HID) == 0 && victim_pvo == NULL &&
2060 		    moea_pte_compare(pt, &pvo->pvo_pte)) {
2061 			victim_pvo = pvo;
2062 			if (source_pvo != NULL)
2063 				break;
2064 		}
2065 	}
2066 
2067 	if (source_pvo == NULL) {
2068 		mtx_unlock(&moea_table_mutex);
2069 		return (0);
2070 	}
2071 
2072 	if (victim_pvo == NULL) {
2073 		if ((pt->pte_hi & PTE_HID) == 0)
2074 			panic("moea_pte_spill: victim p-pte (%p) has no pvo"
2075 			    "entry", pt);
2076 
2077 		/*
2078 		 * If this is a secondary PTE, we need to search it's primary
2079 		 * pvo bucket for the matching PVO.
2080 		 */
2081 		LIST_FOREACH(pvo, &moea_pvo_table[ptegidx ^ moea_pteg_mask],
2082 		    pvo_olink) {
2083 			MOEA_PVO_CHECK(pvo);
2084 			/*
2085 			 * We also need the pvo entry of the victim we are
2086 			 * replacing so save the R & C bits of the PTE.
2087 			 */
2088 			if (moea_pte_compare(pt, &pvo->pvo_pte)) {
2089 				victim_pvo = pvo;
2090 				break;
2091 			}
2092 		}
2093 
2094 		if (victim_pvo == NULL)
2095 			panic("moea_pte_spill: victim s-pte (%p) has no pvo"
2096 			    "entry", pt);
2097 	}
2098 
2099 	/*
2100 	 * We are invalidating the TLB entry for the EA we are replacing even
2101 	 * though it's valid.  If we don't, we lose any ref/chg bit changes
2102 	 * contained in the TLB entry.
2103 	 */
2104 	source_pvo->pvo_pte.pte_hi &= ~PTE_HID;
2105 
2106 	moea_pte_unset(pt, &victim_pvo->pvo_pte, victim_pvo->pvo_vaddr);
2107 	moea_pte_set(pt, &source_pvo->pvo_pte);
2108 
2109 	PVO_PTEGIDX_CLR(victim_pvo);
2110 	PVO_PTEGIDX_SET(source_pvo, i);
2111 	moea_pte_replacements++;
2112 
2113 	MOEA_PVO_CHECK(victim_pvo);
2114 	MOEA_PVO_CHECK(source_pvo);
2115 
2116 	mtx_unlock(&moea_table_mutex);
2117 	return (1);
2118 }
2119 
2120 static int
2121 moea_pte_insert(u_int ptegidx, struct pte *pvo_pt)
2122 {
2123 	struct	pte *pt;
2124 	int	i;
2125 
2126 	mtx_assert(&moea_table_mutex, MA_OWNED);
2127 
2128 	/*
2129 	 * First try primary hash.
2130 	 */
2131 	for (pt = moea_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) {
2132 		if ((pt->pte_hi & PTE_VALID) == 0) {
2133 			pvo_pt->pte_hi &= ~PTE_HID;
2134 			moea_pte_set(pt, pvo_pt);
2135 			return (i);
2136 		}
2137 	}
2138 
2139 	/*
2140 	 * Now try secondary hash.
2141 	 */
2142 	ptegidx ^= moea_pteg_mask;
2143 
2144 	for (pt = moea_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) {
2145 		if ((pt->pte_hi & PTE_VALID) == 0) {
2146 			pvo_pt->pte_hi |= PTE_HID;
2147 			moea_pte_set(pt, pvo_pt);
2148 			return (i);
2149 		}
2150 	}
2151 
2152 	panic("moea_pte_insert: overflow");
2153 	return (-1);
2154 }
2155 
2156 static boolean_t
2157 moea_query_bit(vm_page_t m, int ptebit)
2158 {
2159 	struct	pvo_entry *pvo;
2160 	struct	pte *pt;
2161 
2162 #if 0
2163 	if (moea_attr_fetch(m) & ptebit)
2164 		return (TRUE);
2165 #endif
2166 
2167 	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
2168 		MOEA_PVO_CHECK(pvo);	/* sanity check */
2169 
2170 		/*
2171 		 * See if we saved the bit off.  If so, cache it and return
2172 		 * success.
2173 		 */
2174 		if (pvo->pvo_pte.pte_lo & ptebit) {
2175 			moea_attr_save(m, ptebit);
2176 			MOEA_PVO_CHECK(pvo);	/* sanity check */
2177 			return (TRUE);
2178 		}
2179 	}
2180 
2181 	/*
2182 	 * No luck, now go through the hard part of looking at the PTEs
2183 	 * themselves.  Sync so that any pending REF/CHG bits are flushed to
2184 	 * the PTEs.
2185 	 */
2186 	SYNC();
2187 	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
2188 		MOEA_PVO_CHECK(pvo);	/* sanity check */
2189 
2190 		/*
2191 		 * See if this pvo has a valid PTE.  if so, fetch the
2192 		 * REF/CHG bits from the valid PTE.  If the appropriate
2193 		 * ptebit is set, cache it and return success.
2194 		 */
2195 		pt = moea_pvo_to_pte(pvo, -1);
2196 		if (pt != NULL) {
2197 			moea_pte_synch(pt, &pvo->pvo_pte);
2198 			mtx_unlock(&moea_table_mutex);
2199 			if (pvo->pvo_pte.pte_lo & ptebit) {
2200 				moea_attr_save(m, ptebit);
2201 				MOEA_PVO_CHECK(pvo);	/* sanity check */
2202 				return (TRUE);
2203 			}
2204 		}
2205 	}
2206 
2207 	return (FALSE);
2208 }
2209 
2210 static u_int
2211 moea_clear_bit(vm_page_t m, int ptebit, int *origbit)
2212 {
2213 	u_int	count;
2214 	struct	pvo_entry *pvo;
2215 	struct	pte *pt;
2216 	int	rv;
2217 
2218 	/*
2219 	 * Clear the cached value.
2220 	 */
2221 	rv = moea_attr_fetch(m);
2222 	moea_attr_clear(m, ptebit);
2223 
2224 	/*
2225 	 * Sync so that any pending REF/CHG bits are flushed to the PTEs (so
2226 	 * we can reset the right ones).  note that since the pvo entries and
2227 	 * list heads are accessed via BAT0 and are never placed in the page
2228 	 * table, we don't have to worry about further accesses setting the
2229 	 * REF/CHG bits.
2230 	 */
2231 	SYNC();
2232 
2233 	/*
2234 	 * For each pvo entry, clear the pvo's ptebit.  If this pvo has a
2235 	 * valid pte clear the ptebit from the valid pte.
2236 	 */
2237 	count = 0;
2238 	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
2239 		MOEA_PVO_CHECK(pvo);	/* sanity check */
2240 		pt = moea_pvo_to_pte(pvo, -1);
2241 		if (pt != NULL) {
2242 			moea_pte_synch(pt, &pvo->pvo_pte);
2243 			if (pvo->pvo_pte.pte_lo & ptebit) {
2244 				count++;
2245 				moea_pte_clear(pt, PVO_VADDR(pvo), ptebit);
2246 			}
2247 			mtx_unlock(&moea_table_mutex);
2248 		}
2249 		rv |= pvo->pvo_pte.pte_lo;
2250 		pvo->pvo_pte.pte_lo &= ~ptebit;
2251 		MOEA_PVO_CHECK(pvo);	/* sanity check */
2252 	}
2253 
2254 	if (origbit != NULL) {
2255 		*origbit = rv;
2256 	}
2257 
2258 	return (count);
2259 }
2260 
2261 /*
2262  * Return true if the physical range is encompassed by the battable[idx]
2263  */
2264 static int
2265 moea_bat_mapped(int idx, vm_offset_t pa, vm_size_t size)
2266 {
2267 	u_int prot;
2268 	u_int32_t start;
2269 	u_int32_t end;
2270 	u_int32_t bat_ble;
2271 
2272 	/*
2273 	 * Return immediately if not a valid mapping
2274 	 */
2275 	if (!battable[idx].batu & BAT_Vs)
2276 		return (EINVAL);
2277 
2278 	/*
2279 	 * The BAT entry must be cache-inhibited, guarded, and r/w
2280 	 * so it can function as an i/o page
2281 	 */
2282 	prot = battable[idx].batl & (BAT_I|BAT_G|BAT_PP_RW);
2283 	if (prot != (BAT_I|BAT_G|BAT_PP_RW))
2284 		return (EPERM);
2285 
2286 	/*
2287 	 * The address should be within the BAT range. Assume that the
2288 	 * start address in the BAT has the correct alignment (thus
2289 	 * not requiring masking)
2290 	 */
2291 	start = battable[idx].batl & BAT_PBS;
2292 	bat_ble = (battable[idx].batu & ~(BAT_EBS)) | 0x03;
2293 	end = start | (bat_ble << 15) | 0x7fff;
2294 
2295 	if ((pa < start) || ((pa + size) > end))
2296 		return (ERANGE);
2297 
2298 	return (0);
2299 }
2300 
2301 boolean_t
2302 moea_dev_direct_mapped(mmu_t mmu, vm_offset_t pa, vm_size_t size)
2303 {
2304 	int i;
2305 
2306 	/*
2307 	 * This currently does not work for entries that
2308 	 * overlap 256M BAT segments.
2309 	 */
2310 
2311 	for(i = 0; i < 16; i++)
2312 		if (moea_bat_mapped(i, pa, size) == 0)
2313 			return (0);
2314 
2315 	return (EFAULT);
2316 }
2317 
2318 boolean_t
2319 moea_page_executable(mmu_t mmu, vm_page_t pg)
2320 {
2321 	return ((moea_attr_fetch(pg) & PTE_EXEC) == PTE_EXEC);
2322 }
2323 
2324 /*
2325  * Map a set of physical memory pages into the kernel virtual
2326  * address space. Return a pointer to where it is mapped. This
2327  * routine is intended to be used for mapping device memory,
2328  * NOT real memory.
2329  */
2330 void *
2331 moea_mapdev(mmu_t mmu, vm_offset_t pa, vm_size_t size)
2332 {
2333 	vm_offset_t va, tmpva, ppa, offset;
2334 	int i;
2335 
2336 	ppa = trunc_page(pa);
2337 	offset = pa & PAGE_MASK;
2338 	size = roundup(offset + size, PAGE_SIZE);
2339 
2340 	GIANT_REQUIRED;
2341 
2342 	/*
2343 	 * If the physical address lies within a valid BAT table entry,
2344 	 * return the 1:1 mapping. This currently doesn't work
2345 	 * for regions that overlap 256M BAT segments.
2346 	 */
2347 	for (i = 0; i < 16; i++) {
2348 		if (moea_bat_mapped(i, pa, size) == 0)
2349 			return ((void *) pa);
2350 	}
2351 
2352 	va = kmem_alloc_nofault(kernel_map, size);
2353 	if (!va)
2354 		panic("moea_mapdev: Couldn't alloc kernel virtual memory");
2355 
2356 	for (tmpva = va; size > 0;) {
2357 		moea_kenter(mmu, tmpva, ppa);
2358 		TLBIE(tmpva); /* XXX or should it be invalidate-all ? */
2359 		size -= PAGE_SIZE;
2360 		tmpva += PAGE_SIZE;
2361 		ppa += PAGE_SIZE;
2362 	}
2363 
2364 	return ((void *)(va + offset));
2365 }
2366 
2367 void
2368 moea_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size)
2369 {
2370 	vm_offset_t base, offset;
2371 
2372 	/*
2373 	 * If this is outside kernel virtual space, then it's a
2374 	 * battable entry and doesn't require unmapping
2375 	 */
2376 	if ((va >= VM_MIN_KERNEL_ADDRESS) && (va <= VM_MAX_KERNEL_ADDRESS)) {
2377 		base = trunc_page(va);
2378 		offset = va & PAGE_MASK;
2379 		size = roundup(offset + size, PAGE_SIZE);
2380 		kmem_free(kernel_map, base, size);
2381 	}
2382 }
2383