xref: /freebsd/sys/powerpc/aim/mmu_oea.c (revision 6b3455a7665208c366849f0b2b3bc916fb97516e)
1 /*
2  * Copyright (c) 2001 The NetBSD Foundation, Inc.
3  * All rights reserved.
4  *
5  * This code is derived from software contributed to The NetBSD Foundation
6  * by Matt Thomas <matt@3am-software.com> of Allegro Networks, Inc.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *        This product includes software developed by the NetBSD
19  *        Foundation, Inc. and its contributors.
20  * 4. Neither the name of The NetBSD Foundation nor the names of its
21  *    contributors may be used to endorse or promote products derived
22  *    from this software without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
25  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
26  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
28  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34  * POSSIBILITY OF SUCH DAMAGE.
35  */
36 /*
37  * Copyright (C) 1995, 1996 Wolfgang Solfrank.
38  * Copyright (C) 1995, 1996 TooLs GmbH.
39  * All rights reserved.
40  *
41  * Redistribution and use in source and binary forms, with or without
42  * modification, are permitted provided that the following conditions
43  * are met:
44  * 1. Redistributions of source code must retain the above copyright
45  *    notice, this list of conditions and the following disclaimer.
46  * 2. Redistributions in binary form must reproduce the above copyright
47  *    notice, this list of conditions and the following disclaimer in the
48  *    documentation and/or other materials provided with the distribution.
49  * 3. All advertising materials mentioning features or use of this software
50  *    must display the following acknowledgement:
51  *	This product includes software developed by TooLs GmbH.
52  * 4. The name of TooLs GmbH may not be used to endorse or promote products
53  *    derived from this software without specific prior written permission.
54  *
55  * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
56  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
57  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
58  * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
59  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
60  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
61  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
62  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
63  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
64  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
65  *
66  * $NetBSD: pmap.c,v 1.28 2000/03/26 20:42:36 kleink Exp $
67  */
68 /*
69  * Copyright (C) 2001 Benno Rice.
70  * All rights reserved.
71  *
72  * Redistribution and use in source and binary forms, with or without
73  * modification, are permitted provided that the following conditions
74  * are met:
75  * 1. Redistributions of source code must retain the above copyright
76  *    notice, this list of conditions and the following disclaimer.
77  * 2. Redistributions in binary form must reproduce the above copyright
78  *    notice, this list of conditions and the following disclaimer in the
79  *    documentation and/or other materials provided with the distribution.
80  *
81  * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR
82  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
83  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
84  * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
85  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
86  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
87  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
88  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
89  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
90  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
91  */
92 
93 #include <sys/cdefs.h>
94 __FBSDID("$FreeBSD$");
95 
96 /*
97  * Manages physical address maps.
98  *
99  * In addition to hardware address maps, this module is called upon to
100  * provide software-use-only maps which may or may not be stored in the
101  * same form as hardware maps.  These pseudo-maps are used to store
102  * intermediate results from copy operations to and from address spaces.
103  *
104  * Since the information managed by this module is also stored by the
105  * logical address mapping module, this module may throw away valid virtual
106  * to physical mappings at almost any time.  However, invalidations of
107  * mappings must be done as requested.
108  *
109  * In order to cope with hardware architectures which make virtual to
110  * physical map invalidates expensive, this module may delay invalidate
111  * reduced protection operations until such time as they are actually
112  * necessary.  This module is given full information as to which processors
113  * are currently using which maps, and to when physical maps must be made
114  * correct.
115  */
116 
117 #include "opt_kstack_pages.h"
118 
119 #include <sys/param.h>
120 #include <sys/kernel.h>
121 #include <sys/ktr.h>
122 #include <sys/lock.h>
123 #include <sys/msgbuf.h>
124 #include <sys/mutex.h>
125 #include <sys/proc.h>
126 #include <sys/sysctl.h>
127 #include <sys/systm.h>
128 #include <sys/vmmeter.h>
129 
130 #include <dev/ofw/openfirm.h>
131 
132 #include <vm/vm.h>
133 #include <vm/vm_param.h>
134 #include <vm/vm_kern.h>
135 #include <vm/vm_page.h>
136 #include <vm/vm_map.h>
137 #include <vm/vm_object.h>
138 #include <vm/vm_extern.h>
139 #include <vm/vm_pageout.h>
140 #include <vm/vm_pager.h>
141 #include <vm/uma.h>
142 
143 #include <machine/cpu.h>
144 #include <machine/powerpc.h>
145 #include <machine/bat.h>
146 #include <machine/frame.h>
147 #include <machine/md_var.h>
148 #include <machine/psl.h>
149 #include <machine/pte.h>
150 #include <machine/sr.h>
151 
152 #define	PMAP_DEBUG
153 
154 #define TODO	panic("%s: not implemented", __func__);
155 
156 #define	PMAP_LOCK(pm)
157 #define	PMAP_UNLOCK(pm)
158 
159 #define	TLBIE(va)	__asm __volatile("tlbie %0" :: "r"(va))
160 #define	TLBSYNC()	__asm __volatile("tlbsync");
161 #define	SYNC()		__asm __volatile("sync");
162 #define	EIEIO()		__asm __volatile("eieio");
163 
164 #define	VSID_MAKE(sr, hash)	((sr) | (((hash) & 0xfffff) << 4))
165 #define	VSID_TO_SR(vsid)	((vsid) & 0xf)
166 #define	VSID_TO_HASH(vsid)	(((vsid) >> 4) & 0xfffff)
167 
168 #define	PVO_PTEGIDX_MASK	0x0007		/* which PTEG slot */
169 #define	PVO_PTEGIDX_VALID	0x0008		/* slot is valid */
170 #define	PVO_WIRED		0x0010		/* PVO entry is wired */
171 #define	PVO_MANAGED		0x0020		/* PVO entry is managed */
172 #define	PVO_EXECUTABLE		0x0040		/* PVO entry is executable */
173 #define	PVO_BOOTSTRAP		0x0080		/* PVO entry allocated during
174 						   bootstrap */
175 #define	PVO_VADDR(pvo)		((pvo)->pvo_vaddr & ~ADDR_POFF)
176 #define	PVO_ISEXECUTABLE(pvo)	((pvo)->pvo_vaddr & PVO_EXECUTABLE)
177 #define	PVO_PTEGIDX_GET(pvo)	((pvo)->pvo_vaddr & PVO_PTEGIDX_MASK)
178 #define	PVO_PTEGIDX_ISSET(pvo)	((pvo)->pvo_vaddr & PVO_PTEGIDX_VALID)
179 #define	PVO_PTEGIDX_CLR(pvo)	\
180 	((void)((pvo)->pvo_vaddr &= ~(PVO_PTEGIDX_VALID|PVO_PTEGIDX_MASK)))
181 #define	PVO_PTEGIDX_SET(pvo, i)	\
182 	((void)((pvo)->pvo_vaddr |= (i)|PVO_PTEGIDX_VALID))
183 
184 #define	PMAP_PVO_CHECK(pvo)
185 
186 struct ofw_map {
187 	vm_offset_t	om_va;
188 	vm_size_t	om_len;
189 	vm_offset_t	om_pa;
190 	u_int		om_mode;
191 };
192 
193 int	pmap_bootstrapped = 0;
194 
195 /*
196  * Virtual and physical address of message buffer.
197  */
198 struct		msgbuf *msgbufp;
199 vm_offset_t	msgbuf_phys;
200 
201 int pmap_pagedaemon_waken;
202 
203 /*
204  * Map of physical memory regions.
205  */
206 vm_offset_t	phys_avail[128];
207 u_int		phys_avail_count;
208 static struct	mem_region *regions;
209 static struct	mem_region *pregions;
210 int		regions_sz, pregions_sz;
211 static struct	ofw_map *translations;
212 
213 /*
214  * First and last available kernel virtual addresses.
215  */
216 vm_offset_t virtual_avail;
217 vm_offset_t virtual_end;
218 vm_offset_t kernel_vm_end;
219 
220 /*
221  * Kernel pmap.
222  */
223 struct pmap kernel_pmap_store;
224 extern struct pmap ofw_pmap;
225 
226 /*
227  * PTEG data.
228  */
229 static struct	pteg *pmap_pteg_table;
230 u_int		pmap_pteg_count;
231 u_int		pmap_pteg_mask;
232 
233 /*
234  * PVO data.
235  */
236 struct	pvo_head *pmap_pvo_table;		/* pvo entries by pteg index */
237 struct	pvo_head pmap_pvo_kunmanaged =
238     LIST_HEAD_INITIALIZER(pmap_pvo_kunmanaged);	/* list of unmanaged pages */
239 struct	pvo_head pmap_pvo_unmanaged =
240     LIST_HEAD_INITIALIZER(pmap_pvo_unmanaged);	/* list of unmanaged pages */
241 
242 uma_zone_t	pmap_upvo_zone;	/* zone for pvo entries for unmanaged pages */
243 uma_zone_t	pmap_mpvo_zone;	/* zone for pvo entries for managed pages */
244 
245 #define	BPVO_POOL_SIZE	32768
246 static struct	pvo_entry *pmap_bpvo_pool;
247 static int	pmap_bpvo_pool_index = 0;
248 
249 #define	VSID_NBPW	(sizeof(u_int32_t) * 8)
250 static u_int	pmap_vsid_bitmap[NPMAPS / VSID_NBPW];
251 
252 static boolean_t pmap_initialized = FALSE;
253 
254 /*
255  * Statistics.
256  */
257 u_int	pmap_pte_valid = 0;
258 u_int	pmap_pte_overflow = 0;
259 u_int	pmap_pte_replacements = 0;
260 u_int	pmap_pvo_entries = 0;
261 u_int	pmap_pvo_enter_calls = 0;
262 u_int	pmap_pvo_remove_calls = 0;
263 u_int	pmap_pte_spills = 0;
264 SYSCTL_INT(_machdep, OID_AUTO, pmap_pte_valid, CTLFLAG_RD, &pmap_pte_valid,
265     0, "");
266 SYSCTL_INT(_machdep, OID_AUTO, pmap_pte_overflow, CTLFLAG_RD,
267     &pmap_pte_overflow, 0, "");
268 SYSCTL_INT(_machdep, OID_AUTO, pmap_pte_replacements, CTLFLAG_RD,
269     &pmap_pte_replacements, 0, "");
270 SYSCTL_INT(_machdep, OID_AUTO, pmap_pvo_entries, CTLFLAG_RD, &pmap_pvo_entries,
271     0, "");
272 SYSCTL_INT(_machdep, OID_AUTO, pmap_pvo_enter_calls, CTLFLAG_RD,
273     &pmap_pvo_enter_calls, 0, "");
274 SYSCTL_INT(_machdep, OID_AUTO, pmap_pvo_remove_calls, CTLFLAG_RD,
275     &pmap_pvo_remove_calls, 0, "");
276 SYSCTL_INT(_machdep, OID_AUTO, pmap_pte_spills, CTLFLAG_RD,
277     &pmap_pte_spills, 0, "");
278 
279 struct	pvo_entry *pmap_pvo_zeropage;
280 
281 vm_offset_t	pmap_rkva_start = VM_MIN_KERNEL_ADDRESS;
282 u_int		pmap_rkva_count = 4;
283 
284 /*
285  * Allocate physical memory for use in pmap_bootstrap.
286  */
287 static vm_offset_t	pmap_bootstrap_alloc(vm_size_t, u_int);
288 
289 /*
290  * PTE calls.
291  */
292 static int		pmap_pte_insert(u_int, struct pte *);
293 
294 /*
295  * PVO calls.
296  */
297 static int	pmap_pvo_enter(pmap_t, uma_zone_t, struct pvo_head *,
298 		    vm_offset_t, vm_offset_t, u_int, int);
299 static void	pmap_pvo_remove(struct pvo_entry *, int);
300 static struct	pvo_entry *pmap_pvo_find_va(pmap_t, vm_offset_t, int *);
301 static struct	pte *pmap_pvo_to_pte(const struct pvo_entry *, int);
302 
303 /*
304  * Utility routines.
305  */
306 static struct		pvo_entry *pmap_rkva_alloc(void);
307 static void		pmap_pa_map(struct pvo_entry *, vm_offset_t,
308 			    struct pte *, int *);
309 static void		pmap_pa_unmap(struct pvo_entry *, struct pte *, int *);
310 static void		pmap_syncicache(vm_offset_t, vm_size_t);
311 static boolean_t	pmap_query_bit(vm_page_t, int);
312 static u_int		pmap_clear_bit(vm_page_t, int, int *);
313 static void		tlbia(void);
314 
315 static __inline int
316 va_to_sr(u_int *sr, vm_offset_t va)
317 {
318 	return (sr[(uintptr_t)va >> ADDR_SR_SHFT]);
319 }
320 
321 static __inline u_int
322 va_to_pteg(u_int sr, vm_offset_t addr)
323 {
324 	u_int hash;
325 
326 	hash = (sr & SR_VSID_MASK) ^ (((u_int)addr & ADDR_PIDX) >>
327 	    ADDR_PIDX_SHFT);
328 	return (hash & pmap_pteg_mask);
329 }
330 
331 static __inline struct pvo_head *
332 pa_to_pvoh(vm_offset_t pa, vm_page_t *pg_p)
333 {
334 	struct	vm_page *pg;
335 
336 	pg = PHYS_TO_VM_PAGE(pa);
337 
338 	if (pg_p != NULL)
339 		*pg_p = pg;
340 
341 	if (pg == NULL)
342 		return (&pmap_pvo_unmanaged);
343 
344 	return (&pg->md.mdpg_pvoh);
345 }
346 
347 static __inline struct pvo_head *
348 vm_page_to_pvoh(vm_page_t m)
349 {
350 
351 	return (&m->md.mdpg_pvoh);
352 }
353 
354 static __inline void
355 pmap_attr_clear(vm_page_t m, int ptebit)
356 {
357 
358 	m->md.mdpg_attrs &= ~ptebit;
359 }
360 
361 static __inline int
362 pmap_attr_fetch(vm_page_t m)
363 {
364 
365 	return (m->md.mdpg_attrs);
366 }
367 
368 static __inline void
369 pmap_attr_save(vm_page_t m, int ptebit)
370 {
371 
372 	m->md.mdpg_attrs |= ptebit;
373 }
374 
375 static __inline int
376 pmap_pte_compare(const struct pte *pt, const struct pte *pvo_pt)
377 {
378 	if (pt->pte_hi == pvo_pt->pte_hi)
379 		return (1);
380 
381 	return (0);
382 }
383 
384 static __inline int
385 pmap_pte_match(struct pte *pt, u_int sr, vm_offset_t va, int which)
386 {
387 	return (pt->pte_hi & ~PTE_VALID) ==
388 	    (((sr & SR_VSID_MASK) << PTE_VSID_SHFT) |
389 	    ((va >> ADDR_API_SHFT) & PTE_API) | which);
390 }
391 
392 static __inline void
393 pmap_pte_create(struct pte *pt, u_int sr, vm_offset_t va, u_int pte_lo)
394 {
395 	/*
396 	 * Construct a PTE.  Default to IMB initially.  Valid bit only gets
397 	 * set when the real pte is set in memory.
398 	 *
399 	 * Note: Don't set the valid bit for correct operation of tlb update.
400 	 */
401 	pt->pte_hi = ((sr & SR_VSID_MASK) << PTE_VSID_SHFT) |
402 	    (((va & ADDR_PIDX) >> ADDR_API_SHFT) & PTE_API);
403 	pt->pte_lo = pte_lo;
404 }
405 
406 static __inline void
407 pmap_pte_synch(struct pte *pt, struct pte *pvo_pt)
408 {
409 
410 	pvo_pt->pte_lo |= pt->pte_lo & (PTE_REF | PTE_CHG);
411 }
412 
413 static __inline void
414 pmap_pte_clear(struct pte *pt, vm_offset_t va, int ptebit)
415 {
416 
417 	/*
418 	 * As shown in Section 7.6.3.2.3
419 	 */
420 	pt->pte_lo &= ~ptebit;
421 	TLBIE(va);
422 	EIEIO();
423 	TLBSYNC();
424 	SYNC();
425 }
426 
427 static __inline void
428 pmap_pte_set(struct pte *pt, struct pte *pvo_pt)
429 {
430 
431 	pvo_pt->pte_hi |= PTE_VALID;
432 
433 	/*
434 	 * Update the PTE as defined in section 7.6.3.1.
435 	 * Note that the REF/CHG bits are from pvo_pt and thus should havce
436 	 * been saved so this routine can restore them (if desired).
437 	 */
438 	pt->pte_lo = pvo_pt->pte_lo;
439 	EIEIO();
440 	pt->pte_hi = pvo_pt->pte_hi;
441 	SYNC();
442 	pmap_pte_valid++;
443 }
444 
445 static __inline void
446 pmap_pte_unset(struct pte *pt, struct pte *pvo_pt, vm_offset_t va)
447 {
448 
449 	pvo_pt->pte_hi &= ~PTE_VALID;
450 
451 	/*
452 	 * Force the reg & chg bits back into the PTEs.
453 	 */
454 	SYNC();
455 
456 	/*
457 	 * Invalidate the pte.
458 	 */
459 	pt->pte_hi &= ~PTE_VALID;
460 
461 	SYNC();
462 	TLBIE(va);
463 	EIEIO();
464 	TLBSYNC();
465 	SYNC();
466 
467 	/*
468 	 * Save the reg & chg bits.
469 	 */
470 	pmap_pte_synch(pt, pvo_pt);
471 	pmap_pte_valid--;
472 }
473 
474 static __inline void
475 pmap_pte_change(struct pte *pt, struct pte *pvo_pt, vm_offset_t va)
476 {
477 
478 	/*
479 	 * Invalidate the PTE
480 	 */
481 	pmap_pte_unset(pt, pvo_pt, va);
482 	pmap_pte_set(pt, pvo_pt);
483 }
484 
485 /*
486  * Quick sort callout for comparing memory regions.
487  */
488 static int	mr_cmp(const void *a, const void *b);
489 static int	om_cmp(const void *a, const void *b);
490 
491 static int
492 mr_cmp(const void *a, const void *b)
493 {
494 	const struct	mem_region *regiona;
495 	const struct	mem_region *regionb;
496 
497 	regiona = a;
498 	regionb = b;
499 	if (regiona->mr_start < regionb->mr_start)
500 		return (-1);
501 	else if (regiona->mr_start > regionb->mr_start)
502 		return (1);
503 	else
504 		return (0);
505 }
506 
507 static int
508 om_cmp(const void *a, const void *b)
509 {
510 	const struct	ofw_map *mapa;
511 	const struct	ofw_map *mapb;
512 
513 	mapa = a;
514 	mapb = b;
515 	if (mapa->om_pa < mapb->om_pa)
516 		return (-1);
517 	else if (mapa->om_pa > mapb->om_pa)
518 		return (1);
519 	else
520 		return (0);
521 }
522 
523 void
524 pmap_bootstrap(vm_offset_t kernelstart, vm_offset_t kernelend)
525 {
526 	ihandle_t	mmui;
527 	phandle_t	chosen, mmu;
528 	int		sz;
529 	int		i, j;
530 	int		ofw_mappings;
531 	vm_size_t	size, physsz;
532 	vm_offset_t	pa, va, off;
533 	u_int		batl, batu;
534 
535         /*
536          * Set up BAT0 to map the lowest 256 MB area
537          */
538         battable[0x0].batl = BATL(0x00000000, BAT_M, BAT_PP_RW);
539         battable[0x0].batu = BATU(0x00000000, BAT_BL_256M, BAT_Vs);
540 
541         /*
542          * Map PCI memory space.
543          */
544         battable[0x8].batl = BATL(0x80000000, BAT_I|BAT_G, BAT_PP_RW);
545         battable[0x8].batu = BATU(0x80000000, BAT_BL_256M, BAT_Vs);
546 
547         battable[0x9].batl = BATL(0x90000000, BAT_I|BAT_G, BAT_PP_RW);
548         battable[0x9].batu = BATU(0x90000000, BAT_BL_256M, BAT_Vs);
549 
550         battable[0xa].batl = BATL(0xa0000000, BAT_I|BAT_G, BAT_PP_RW);
551         battable[0xa].batu = BATU(0xa0000000, BAT_BL_256M, BAT_Vs);
552 
553         battable[0xb].batl = BATL(0xb0000000, BAT_I|BAT_G, BAT_PP_RW);
554         battable[0xb].batu = BATU(0xb0000000, BAT_BL_256M, BAT_Vs);
555 
556         /*
557          * Map obio devices.
558          */
559         battable[0xf].batl = BATL(0xf0000000, BAT_I|BAT_G, BAT_PP_RW);
560         battable[0xf].batu = BATU(0xf0000000, BAT_BL_256M, BAT_Vs);
561 
562 	/*
563 	 * Use an IBAT and a DBAT to map the bottom segment of memory
564 	 * where we are.
565 	 */
566 	batu = BATU(0x00000000, BAT_BL_256M, BAT_Vs);
567 	batl = BATL(0x00000000, BAT_M, BAT_PP_RW);
568 	__asm ("mtibatu 0,%0; mtibatl 0,%1; isync; \n"
569 	       "mtdbatu 0,%0; mtdbatl 0,%1; isync"
570 	    :: "r"(batu), "r"(batl));
571 
572 #if 0
573 	/* map frame buffer */
574 	batu = BATU(0x90000000, BAT_BL_256M, BAT_Vs);
575 	batl = BATL(0x90000000, BAT_I|BAT_G, BAT_PP_RW);
576 	__asm ("mtdbatu 1,%0; mtdbatl 1,%1; isync"
577 	    :: "r"(batu), "r"(batl));
578 #endif
579 
580 #if 1
581 	/* map pci space */
582 	batu = BATU(0x80000000, BAT_BL_256M, BAT_Vs);
583 	batl = BATL(0x80000000, BAT_I|BAT_G, BAT_PP_RW);
584 	__asm ("mtdbatu 1,%0; mtdbatl 1,%1; isync"
585 	    :: "r"(batu), "r"(batl));
586 #endif
587 
588 	/*
589 	 * Set the start and end of kva.
590 	 */
591 	virtual_avail = VM_MIN_KERNEL_ADDRESS;
592 	virtual_end = VM_MAX_KERNEL_ADDRESS;
593 
594 	mem_regions(&pregions, &pregions_sz, &regions, &regions_sz);
595 	CTR0(KTR_PMAP, "pmap_bootstrap: physical memory");
596 
597 	qsort(pregions, pregions_sz, sizeof(*pregions), mr_cmp);
598 	for (i = 0; i < pregions_sz; i++) {
599 		vm_offset_t pa;
600 		vm_offset_t end;
601 
602 		CTR3(KTR_PMAP, "physregion: %#x - %#x (%#x)",
603 			pregions[i].mr_start,
604 			pregions[i].mr_start + pregions[i].mr_size,
605 			pregions[i].mr_size);
606 		/*
607 		 * Install entries into the BAT table to allow all
608 		 * of physmem to be convered by on-demand BAT entries.
609 		 * The loop will sometimes set the same battable element
610 		 * twice, but that's fine since they won't be used for
611 		 * a while yet.
612 		 */
613 		pa = pregions[i].mr_start & 0xf0000000;
614 		end = pregions[i].mr_start + pregions[i].mr_size;
615 		do {
616                         u_int n = pa >> ADDR_SR_SHFT;
617 
618 			battable[n].batl = BATL(pa, BAT_M, BAT_PP_RW);
619 			battable[n].batu = BATU(pa, BAT_BL_256M, BAT_Vs);
620 			pa += SEGMENT_LENGTH;
621 		} while (pa < end);
622 	}
623 
624 	if (sizeof(phys_avail)/sizeof(phys_avail[0]) < regions_sz)
625 		panic("pmap_bootstrap: phys_avail too small");
626 	qsort(regions, regions_sz, sizeof(*regions), mr_cmp);
627 	phys_avail_count = 0;
628 	physsz = 0;
629 	for (i = 0, j = 0; i < regions_sz; i++, j += 2) {
630 		CTR3(KTR_PMAP, "region: %#x - %#x (%#x)", regions[i].mr_start,
631 		    regions[i].mr_start + regions[i].mr_size,
632 		    regions[i].mr_size);
633 		phys_avail[j] = regions[i].mr_start;
634 		phys_avail[j + 1] = regions[i].mr_start + regions[i].mr_size;
635 		phys_avail_count++;
636 		physsz += regions[i].mr_size;
637 	}
638 	physmem = btoc(physsz);
639 
640 	/*
641 	 * Allocate PTEG table.
642 	 */
643 #ifdef PTEGCOUNT
644 	pmap_pteg_count = PTEGCOUNT;
645 #else
646 	pmap_pteg_count = 0x1000;
647 
648 	while (pmap_pteg_count < physmem)
649 		pmap_pteg_count <<= 1;
650 
651 	pmap_pteg_count >>= 1;
652 #endif /* PTEGCOUNT */
653 
654 	size = pmap_pteg_count * sizeof(struct pteg);
655 	CTR2(KTR_PMAP, "pmap_bootstrap: %d PTEGs, %d bytes", pmap_pteg_count,
656 	    size);
657 	pmap_pteg_table = (struct pteg *)pmap_bootstrap_alloc(size, size);
658 	CTR1(KTR_PMAP, "pmap_bootstrap: PTEG table at %p", pmap_pteg_table);
659 	bzero((void *)pmap_pteg_table, pmap_pteg_count * sizeof(struct pteg));
660 	pmap_pteg_mask = pmap_pteg_count - 1;
661 
662 	/*
663 	 * Allocate pv/overflow lists.
664 	 */
665 	size = sizeof(struct pvo_head) * pmap_pteg_count;
666 	pmap_pvo_table = (struct pvo_head *)pmap_bootstrap_alloc(size,
667 	    PAGE_SIZE);
668 	CTR1(KTR_PMAP, "pmap_bootstrap: PVO table at %p", pmap_pvo_table);
669 	for (i = 0; i < pmap_pteg_count; i++)
670 		LIST_INIT(&pmap_pvo_table[i]);
671 
672 	/*
673 	 * Allocate the message buffer.
674 	 */
675 	msgbuf_phys = pmap_bootstrap_alloc(MSGBUF_SIZE, 0);
676 
677 	/*
678 	 * Initialise the unmanaged pvo pool.
679 	 */
680 	pmap_bpvo_pool = (struct pvo_entry *)pmap_bootstrap_alloc(
681 		BPVO_POOL_SIZE*sizeof(struct pvo_entry), 0);
682 	pmap_bpvo_pool_index = 0;
683 
684 	/*
685 	 * Make sure kernel vsid is allocated as well as VSID 0.
686 	 */
687 	pmap_vsid_bitmap[(KERNEL_VSIDBITS & (NPMAPS - 1)) / VSID_NBPW]
688 		|= 1 << (KERNEL_VSIDBITS % VSID_NBPW);
689 	pmap_vsid_bitmap[0] |= 1;
690 
691 	/*
692 	 * Set up the OpenFirmware pmap and add it's mappings.
693 	 */
694 	pmap_pinit(&ofw_pmap);
695 	ofw_pmap.pm_sr[KERNEL_SR] = KERNEL_SEGMENT;
696 	ofw_pmap.pm_sr[KERNEL2_SR] = KERNEL2_SEGMENT;
697 	if ((chosen = OF_finddevice("/chosen")) == -1)
698 		panic("pmap_bootstrap: can't find /chosen");
699 	OF_getprop(chosen, "mmu", &mmui, 4);
700 	if ((mmu = OF_instance_to_package(mmui)) == -1)
701 		panic("pmap_bootstrap: can't get mmu package");
702 	if ((sz = OF_getproplen(mmu, "translations")) == -1)
703 		panic("pmap_bootstrap: can't get ofw translation count");
704 	translations = NULL;
705 	for (i = 0; phys_avail[i] != 0; i += 2) {
706 		if (phys_avail[i + 1] >= sz) {
707 			translations = (struct ofw_map *)phys_avail[i];
708 			break;
709 		}
710 	}
711 	if (translations == NULL)
712 		panic("pmap_bootstrap: no space to copy translations");
713 	bzero(translations, sz);
714 	if (OF_getprop(mmu, "translations", translations, sz) == -1)
715 		panic("pmap_bootstrap: can't get ofw translations");
716 	CTR0(KTR_PMAP, "pmap_bootstrap: translations");
717 	sz /= sizeof(*translations);
718 	qsort(translations, sz, sizeof (*translations), om_cmp);
719 	for (i = 0, ofw_mappings = 0; i < sz; i++) {
720 		CTR3(KTR_PMAP, "translation: pa=%#x va=%#x len=%#x",
721 		    translations[i].om_pa, translations[i].om_va,
722 		    translations[i].om_len);
723 
724 		/*
725 		 * If the mapping is 1:1, let the RAM and device on-demand
726 		 * BAT tables take care of the translation.
727 		 */
728 		if (translations[i].om_va == translations[i].om_pa)
729 			continue;
730 
731 		/* Enter the pages */
732 		for (off = 0; off < translations[i].om_len; off += PAGE_SIZE) {
733 			struct	vm_page m;
734 
735 			m.phys_addr = translations[i].om_pa + off;
736 			pmap_enter(&ofw_pmap, translations[i].om_va + off, &m,
737 				   VM_PROT_ALL, 1);
738 			ofw_mappings++;
739 		}
740 	}
741 #ifdef SMP
742 	TLBSYNC();
743 #endif
744 
745 	/*
746 	 * Initialize the kernel pmap (which is statically allocated).
747 	 */
748 	for (i = 0; i < 16; i++) {
749 		kernel_pmap->pm_sr[i] = EMPTY_SEGMENT;
750 	}
751 	kernel_pmap->pm_sr[KERNEL_SR] = KERNEL_SEGMENT;
752 	kernel_pmap->pm_sr[KERNEL2_SR] = KERNEL_SEGMENT;
753 	kernel_pmap->pm_active = ~0;
754 
755 	/*
756 	 * Allocate a kernel stack with a guard page for thread0 and map it
757 	 * into the kernel page map.
758 	 */
759 	pa = pmap_bootstrap_alloc(KSTACK_PAGES * PAGE_SIZE, 0);
760 	kstack0_phys = pa;
761 	kstack0 = virtual_avail + (KSTACK_GUARD_PAGES * PAGE_SIZE);
762 	CTR2(KTR_PMAP, "pmap_bootstrap: kstack0 at %#x (%#x)", kstack0_phys,
763 	    kstack0);
764 	virtual_avail += (KSTACK_PAGES + KSTACK_GUARD_PAGES) * PAGE_SIZE;
765 	for (i = 0; i < KSTACK_PAGES; i++) {
766 		pa = kstack0_phys + i * PAGE_SIZE;
767 		va = kstack0 + i * PAGE_SIZE;
768 		pmap_kenter(va, pa);
769 		TLBIE(va);
770 	}
771 
772 	/*
773 	 * Calculate the last available physical address.
774 	 */
775 	for (i = 0; phys_avail[i + 2] != 0; i += 2)
776 		;
777 	Maxmem = powerpc_btop(phys_avail[i + 1]);
778 
779 	/*
780 	 * Allocate virtual address space for the message buffer.
781 	 */
782 	msgbufp = (struct msgbuf *)virtual_avail;
783 	virtual_avail += round_page(MSGBUF_SIZE);
784 
785 	/*
786 	 * Initialize hardware.
787 	 */
788 	for (i = 0; i < 16; i++) {
789 		mtsrin(i << ADDR_SR_SHFT, EMPTY_SEGMENT);
790 	}
791 	__asm __volatile ("mtsr %0,%1"
792 	    :: "n"(KERNEL_SR), "r"(KERNEL_SEGMENT));
793 	__asm __volatile ("sync; mtsdr1 %0; isync"
794 	    :: "r"((u_int)pmap_pteg_table | (pmap_pteg_mask >> 10)));
795 	tlbia();
796 
797 	pmap_bootstrapped++;
798 }
799 
800 /*
801  * Activate a user pmap.  The pmap must be activated before it's address
802  * space can be accessed in any way.
803  */
804 void
805 pmap_activate(struct thread *td)
806 {
807 	pmap_t	pm, pmr;
808 
809 	/*
810 	 * Load all the data we need up front to encourage the compiler to
811 	 * not issue any loads while we have interrupts disabled below.
812 	 */
813 	pm = &td->td_proc->p_vmspace->vm_pmap;
814 
815 	if ((pmr = (pmap_t)pmap_kextract((vm_offset_t)pm)) == NULL)
816 		pmr = pm;
817 
818 	pm->pm_active |= PCPU_GET(cpumask);
819 	PCPU_SET(curpmap, pmr);
820 }
821 
822 void
823 pmap_deactivate(struct thread *td)
824 {
825 	pmap_t	pm;
826 
827 	pm = &td->td_proc->p_vmspace->vm_pmap;
828 	pm->pm_active &= ~(PCPU_GET(cpumask));
829 	PCPU_SET(curpmap, NULL);
830 }
831 
832 vm_offset_t
833 pmap_addr_hint(vm_object_t object, vm_offset_t va, vm_size_t size)
834 {
835 
836 	return (va);
837 }
838 
839 void
840 pmap_change_wiring(pmap_t pm, vm_offset_t va, boolean_t wired)
841 {
842 	struct	pvo_entry *pvo;
843 
844 	pvo = pmap_pvo_find_va(pm, va & ~ADDR_POFF, NULL);
845 
846 	if (pvo != NULL) {
847 		if (wired) {
848 			if ((pvo->pvo_vaddr & PVO_WIRED) == 0)
849 				pm->pm_stats.wired_count++;
850 			pvo->pvo_vaddr |= PVO_WIRED;
851 		} else {
852 			if ((pvo->pvo_vaddr & PVO_WIRED) != 0)
853 				pm->pm_stats.wired_count--;
854 			pvo->pvo_vaddr &= ~PVO_WIRED;
855 		}
856 	}
857 }
858 
859 void
860 pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr,
861 	  vm_size_t len, vm_offset_t src_addr)
862 {
863 
864 	/*
865 	 * This is not needed as it's mainly an optimisation.
866 	 * It may want to be implemented later though.
867 	 */
868 }
869 
870 void
871 pmap_copy_page(vm_page_t msrc, vm_page_t mdst)
872 {
873 	vm_offset_t	dst;
874 	vm_offset_t	src;
875 
876 	dst = VM_PAGE_TO_PHYS(mdst);
877 	src = VM_PAGE_TO_PHYS(msrc);
878 
879 	kcopy((void *)src, (void *)dst, PAGE_SIZE);
880 }
881 
882 /*
883  * Zero a page of physical memory by temporarily mapping it into the tlb.
884  */
885 void
886 pmap_zero_page(vm_page_t m)
887 {
888 	vm_offset_t pa = VM_PAGE_TO_PHYS(m);
889 	caddr_t va;
890 
891 	if (pa < SEGMENT_LENGTH) {
892 		va = (caddr_t) pa;
893 	} else if (pmap_initialized) {
894 		if (pmap_pvo_zeropage == NULL)
895 			pmap_pvo_zeropage = pmap_rkva_alloc();
896 		pmap_pa_map(pmap_pvo_zeropage, pa, NULL, NULL);
897 		va = (caddr_t)PVO_VADDR(pmap_pvo_zeropage);
898 	} else {
899 		panic("pmap_zero_page: can't zero pa %#x", pa);
900 	}
901 
902 	bzero(va, PAGE_SIZE);
903 
904 	if (pa >= SEGMENT_LENGTH)
905 		pmap_pa_unmap(pmap_pvo_zeropage, NULL, NULL);
906 }
907 
908 void
909 pmap_zero_page_area(vm_page_t m, int off, int size)
910 {
911 	vm_offset_t pa = VM_PAGE_TO_PHYS(m);
912 	caddr_t va;
913 
914 	if (pa < SEGMENT_LENGTH) {
915 		va = (caddr_t) pa;
916 	} else if (pmap_initialized) {
917 		if (pmap_pvo_zeropage == NULL)
918 			pmap_pvo_zeropage = pmap_rkva_alloc();
919 		pmap_pa_map(pmap_pvo_zeropage, pa, NULL, NULL);
920 		va = (caddr_t)PVO_VADDR(pmap_pvo_zeropage);
921 	} else {
922 		panic("pmap_zero_page: can't zero pa %#x", pa);
923 	}
924 
925 	bzero(va + off, size);
926 
927 	if (pa >= SEGMENT_LENGTH)
928 		pmap_pa_unmap(pmap_pvo_zeropage, NULL, NULL);
929 }
930 
931 void
932 pmap_zero_page_idle(vm_page_t m)
933 {
934 
935 	/* XXX this is called outside of Giant, is pmap_zero_page safe? */
936 	/* XXX maybe have a dedicated mapping for this to avoid the problem? */
937 	mtx_lock(&Giant);
938 	pmap_zero_page(m);
939 	mtx_unlock(&Giant);
940 }
941 
942 /*
943  * Map the given physical page at the specified virtual address in the
944  * target pmap with the protection requested.  If specified the page
945  * will be wired down.
946  */
947 void
948 pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
949 	   boolean_t wired)
950 {
951 	struct		pvo_head *pvo_head;
952 	uma_zone_t	zone;
953 	vm_page_t	pg;
954 	u_int		pte_lo, pvo_flags, was_exec, i;
955 	int		error;
956 
957 	if (!pmap_initialized) {
958 		pvo_head = &pmap_pvo_kunmanaged;
959 		zone = pmap_upvo_zone;
960 		pvo_flags = 0;
961 		pg = NULL;
962 		was_exec = PTE_EXEC;
963 	} else {
964 		pvo_head = vm_page_to_pvoh(m);
965 		pg = m;
966 		zone = pmap_mpvo_zone;
967 		pvo_flags = PVO_MANAGED;
968 		was_exec = 0;
969 	}
970 
971 	/*
972 	 * If this is a managed page, and it's the first reference to the page,
973 	 * clear the execness of the page.  Otherwise fetch the execness.
974 	 */
975 	if (pg != NULL) {
976 		if (LIST_EMPTY(pvo_head)) {
977 			pmap_attr_clear(pg, PTE_EXEC);
978 		} else {
979 			was_exec = pmap_attr_fetch(pg) & PTE_EXEC;
980 		}
981 	}
982 
983 
984 	/*
985 	 * Assume the page is cache inhibited and access is guarded unless
986 	 * it's in our available memory array.
987 	 */
988 	pte_lo = PTE_I | PTE_G;
989 	for (i = 0; i < pregions_sz; i++) {
990 		if ((VM_PAGE_TO_PHYS(m) >= pregions[i].mr_start) &&
991 		    (VM_PAGE_TO_PHYS(m) <
992 			(pregions[i].mr_start + pregions[i].mr_size))) {
993 			pte_lo &= ~(PTE_I | PTE_G);
994 			break;
995 		}
996 	}
997 
998 	if (prot & VM_PROT_WRITE)
999 		pte_lo |= PTE_BW;
1000 	else
1001 		pte_lo |= PTE_BR;
1002 
1003 	pvo_flags |= (prot & VM_PROT_EXECUTE);
1004 
1005 	if (wired)
1006 		pvo_flags |= PVO_WIRED;
1007 
1008 	error = pmap_pvo_enter(pmap, zone, pvo_head, va, VM_PAGE_TO_PHYS(m),
1009 	    pte_lo, pvo_flags);
1010 
1011 	/*
1012 	 * Flush the real page from the instruction cache if this page is
1013 	 * mapped executable and cacheable and was not previously mapped (or
1014 	 * was not mapped executable).
1015 	 */
1016 	if (error == 0 && (pvo_flags & PVO_EXECUTABLE) &&
1017 	    (pte_lo & PTE_I) == 0 && was_exec == 0) {
1018 		/*
1019 		 * Flush the real memory from the cache.
1020 		 */
1021 		pmap_syncicache(VM_PAGE_TO_PHYS(m), PAGE_SIZE);
1022 		if (pg != NULL)
1023 			pmap_attr_save(pg, PTE_EXEC);
1024 	}
1025 
1026 	/* XXX syncicache always until problems are sorted */
1027 	pmap_syncicache(VM_PAGE_TO_PHYS(m), PAGE_SIZE);
1028 }
1029 
1030 vm_page_t
1031 pmap_enter_quick(pmap_t pm, vm_offset_t va, vm_page_t m, vm_page_t mpte)
1032 {
1033 
1034 	pmap_enter(pm, va, m, VM_PROT_READ | VM_PROT_EXECUTE, FALSE);
1035 	return (NULL);
1036 }
1037 
1038 vm_paddr_t
1039 pmap_extract(pmap_t pm, vm_offset_t va)
1040 {
1041 	struct	pvo_entry *pvo;
1042 
1043 	pvo = pmap_pvo_find_va(pm, va & ~ADDR_POFF, NULL);
1044 
1045 	if (pvo != NULL) {
1046 		return ((pvo->pvo_pte.pte_lo & PTE_RPGN) | (va & ADDR_POFF));
1047 	}
1048 
1049 	return (0);
1050 }
1051 
1052 /*
1053  * Atomically extract and hold the physical page with the given
1054  * pmap and virtual address pair if that mapping permits the given
1055  * protection.
1056  */
1057 vm_page_t
1058 pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
1059 {
1060 	struct	pvo_entry *pvo;
1061 	vm_page_t m;
1062 
1063 	m = NULL;
1064 	mtx_lock(&Giant);
1065 	pvo = pmap_pvo_find_va(pmap, va & ~ADDR_POFF, NULL);
1066 	if (pvo != NULL && (pvo->pvo_pte.pte_hi & PTE_VALID) &&
1067 	    ((pvo->pvo_pte.pte_lo & PTE_PP) == PTE_RW ||
1068 	     (prot & VM_PROT_WRITE) == 0)) {
1069 		m = PHYS_TO_VM_PAGE(pvo->pvo_pte.pte_lo & PTE_RPGN);
1070 		vm_page_lock_queues();
1071 		vm_page_hold(m);
1072 		vm_page_unlock_queues();
1073 	}
1074 	mtx_unlock(&Giant);
1075 	return (m);
1076 }
1077 
1078 /*
1079  * Grow the number of kernel page table entries.  Unneeded.
1080  */
1081 void
1082 pmap_growkernel(vm_offset_t addr)
1083 {
1084 }
1085 
1086 void
1087 pmap_init(void)
1088 {
1089 
1090 	CTR0(KTR_PMAP, "pmap_init");
1091 
1092 	pmap_upvo_zone = uma_zcreate("UPVO entry", sizeof (struct pvo_entry),
1093 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
1094 	    UMA_ZONE_VM | UMA_ZONE_NOFREE);
1095 	pmap_mpvo_zone = uma_zcreate("MPVO entry", sizeof(struct pvo_entry),
1096 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
1097 	    UMA_ZONE_VM | UMA_ZONE_NOFREE);
1098 	pmap_initialized = TRUE;
1099 }
1100 
1101 void
1102 pmap_init2(void)
1103 {
1104 
1105 	CTR0(KTR_PMAP, "pmap_init2");
1106 }
1107 
1108 boolean_t
1109 pmap_is_modified(vm_page_t m)
1110 {
1111 
1112 	if ((m->flags & (PG_FICTITIOUS |PG_UNMANAGED)) != 0)
1113 		return (FALSE);
1114 
1115 	return (pmap_query_bit(m, PTE_CHG));
1116 }
1117 
1118 /*
1119  *	pmap_is_prefaultable:
1120  *
1121  *	Return whether or not the specified virtual address is elgible
1122  *	for prefault.
1123  */
1124 boolean_t
1125 pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
1126 {
1127 
1128 	return (FALSE);
1129 }
1130 
1131 void
1132 pmap_clear_reference(vm_page_t m)
1133 {
1134 
1135 	if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
1136 		return;
1137 	pmap_clear_bit(m, PTE_REF, NULL);
1138 }
1139 
1140 void
1141 pmap_clear_modify(vm_page_t m)
1142 {
1143 
1144 	if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
1145 		return;
1146 	pmap_clear_bit(m, PTE_CHG, NULL);
1147 }
1148 
1149 /*
1150  *	pmap_ts_referenced:
1151  *
1152  *	Return a count of reference bits for a page, clearing those bits.
1153  *	It is not necessary for every reference bit to be cleared, but it
1154  *	is necessary that 0 only be returned when there are truly no
1155  *	reference bits set.
1156  *
1157  *	XXX: The exact number of bits to check and clear is a matter that
1158  *	should be tested and standardized at some point in the future for
1159  *	optimal aging of shared pages.
1160  */
1161 int
1162 pmap_ts_referenced(vm_page_t m)
1163 {
1164 	int count;
1165 
1166 	if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
1167 		return (0);
1168 
1169 	count = pmap_clear_bit(m, PTE_REF, NULL);
1170 
1171 	return (count);
1172 }
1173 
1174 /*
1175  * Map a wired page into kernel virtual address space.
1176  */
1177 void
1178 pmap_kenter(vm_offset_t va, vm_offset_t pa)
1179 {
1180 	u_int		pte_lo;
1181 	int		error;
1182 	int		i;
1183 
1184 #if 0
1185 	if (va < VM_MIN_KERNEL_ADDRESS)
1186 		panic("pmap_kenter: attempt to enter non-kernel address %#x",
1187 		    va);
1188 #endif
1189 
1190 	pte_lo = PTE_I | PTE_G;
1191 	for (i = 0; i < pregions_sz; i++) {
1192 		if ((pa >= pregions[i].mr_start) &&
1193 		    (pa < (pregions[i].mr_start + pregions[i].mr_size))) {
1194 			pte_lo &= ~(PTE_I | PTE_G);
1195 			break;
1196 		}
1197 	}
1198 
1199 	error = pmap_pvo_enter(kernel_pmap, pmap_upvo_zone,
1200 	    &pmap_pvo_kunmanaged, va, pa, pte_lo, PVO_WIRED);
1201 
1202 	if (error != 0 && error != ENOENT)
1203 		panic("pmap_kenter: failed to enter va %#x pa %#x: %d", va,
1204 		    pa, error);
1205 
1206 	/*
1207 	 * Flush the real memory from the instruction cache.
1208 	 */
1209 	if ((pte_lo & (PTE_I | PTE_G)) == 0) {
1210 		pmap_syncicache(pa, PAGE_SIZE);
1211 	}
1212 }
1213 
1214 /*
1215  * Extract the physical page address associated with the given kernel virtual
1216  * address.
1217  */
1218 vm_offset_t
1219 pmap_kextract(vm_offset_t va)
1220 {
1221 	struct		pvo_entry *pvo;
1222 
1223 #ifdef UMA_MD_SMALL_ALLOC
1224 	/*
1225 	 * Allow direct mappings
1226 	 */
1227 	if (va < VM_MIN_KERNEL_ADDRESS) {
1228 		return (va);
1229 	}
1230 #endif
1231 
1232 	pvo = pmap_pvo_find_va(kernel_pmap, va & ~ADDR_POFF, NULL);
1233 	KASSERT(pvo != NULL, ("pmap_kextract: no addr found"));
1234 	if (pvo == NULL) {
1235 		return (0);
1236 	}
1237 
1238 	return ((pvo->pvo_pte.pte_lo & PTE_RPGN) | (va & ADDR_POFF));
1239 }
1240 
1241 /*
1242  * Remove a wired page from kernel virtual address space.
1243  */
1244 void
1245 pmap_kremove(vm_offset_t va)
1246 {
1247 
1248 	pmap_remove(kernel_pmap, va, va + PAGE_SIZE);
1249 }
1250 
1251 /*
1252  * Map a range of physical addresses into kernel virtual address space.
1253  *
1254  * The value passed in *virt is a suggested virtual address for the mapping.
1255  * Architectures which can support a direct-mapped physical to virtual region
1256  * can return the appropriate address within that region, leaving '*virt'
1257  * unchanged.  We cannot and therefore do not; *virt is updated with the
1258  * first usable address after the mapped region.
1259  */
1260 vm_offset_t
1261 pmap_map(vm_offset_t *virt, vm_offset_t pa_start, vm_offset_t pa_end, int prot)
1262 {
1263 	vm_offset_t	sva, va;
1264 
1265 	sva = *virt;
1266 	va = sva;
1267 	for (; pa_start < pa_end; pa_start += PAGE_SIZE, va += PAGE_SIZE)
1268 		pmap_kenter(va, pa_start);
1269 	*virt = va;
1270 	return (sva);
1271 }
1272 
1273 int
1274 pmap_mincore(pmap_t pmap, vm_offset_t addr)
1275 {
1276 	TODO;
1277 	return (0);
1278 }
1279 
1280 void
1281 pmap_object_init_pt(pmap_t pm, vm_offset_t addr, vm_object_t object,
1282 		    vm_pindex_t pindex, vm_size_t size)
1283 {
1284 
1285 	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
1286 	KASSERT(object->type == OBJT_DEVICE,
1287 	    ("pmap_object_init_pt: non-device object"));
1288 	KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap,
1289 	    ("pmap_object_init_pt: non current pmap"));
1290 }
1291 
1292 /*
1293  * Lower the permission for all mappings to a given page.
1294  */
1295 void
1296 pmap_page_protect(vm_page_t m, vm_prot_t prot)
1297 {
1298 	struct	pvo_head *pvo_head;
1299 	struct	pvo_entry *pvo, *next_pvo;
1300 	struct	pte *pt;
1301 
1302 	/*
1303 	 * Since the routine only downgrades protection, if the
1304 	 * maximal protection is desired, there isn't any change
1305 	 * to be made.
1306 	 */
1307 	if ((prot & (VM_PROT_READ|VM_PROT_WRITE)) ==
1308 	    (VM_PROT_READ|VM_PROT_WRITE))
1309 		return;
1310 
1311 	pvo_head = vm_page_to_pvoh(m);
1312 	for (pvo = LIST_FIRST(pvo_head); pvo != NULL; pvo = next_pvo) {
1313 		next_pvo = LIST_NEXT(pvo, pvo_vlink);
1314 		PMAP_PVO_CHECK(pvo);	/* sanity check */
1315 
1316 		/*
1317 		 * Downgrading to no mapping at all, we just remove the entry.
1318 		 */
1319 		if ((prot & VM_PROT_READ) == 0) {
1320 			pmap_pvo_remove(pvo, -1);
1321 			continue;
1322 		}
1323 
1324 		/*
1325 		 * If EXEC permission is being revoked, just clear the flag
1326 		 * in the PVO.
1327 		 */
1328 		if ((prot & VM_PROT_EXECUTE) == 0)
1329 			pvo->pvo_vaddr &= ~PVO_EXECUTABLE;
1330 
1331 		/*
1332 		 * If this entry is already RO, don't diddle with the page
1333 		 * table.
1334 		 */
1335 		if ((pvo->pvo_pte.pte_lo & PTE_PP) == PTE_BR) {
1336 			PMAP_PVO_CHECK(pvo);
1337 			continue;
1338 		}
1339 
1340 		/*
1341 		 * Grab the PTE before we diddle the bits so pvo_to_pte can
1342 		 * verify the pte contents are as expected.
1343 		 */
1344 		pt = pmap_pvo_to_pte(pvo, -1);
1345 		pvo->pvo_pte.pte_lo &= ~PTE_PP;
1346 		pvo->pvo_pte.pte_lo |= PTE_BR;
1347 		if (pt != NULL)
1348 			pmap_pte_change(pt, &pvo->pvo_pte, pvo->pvo_vaddr);
1349 		PMAP_PVO_CHECK(pvo);	/* sanity check */
1350 	}
1351 }
1352 
1353 /*
1354  * Returns true if the pmap's pv is one of the first
1355  * 16 pvs linked to from this page.  This count may
1356  * be changed upwards or downwards in the future; it
1357  * is only necessary that true be returned for a small
1358  * subset of pmaps for proper page aging.
1359  */
1360 boolean_t
1361 pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
1362 {
1363         int loops;
1364 	struct pvo_entry *pvo;
1365 
1366         if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
1367                 return FALSE;
1368 
1369 	loops = 0;
1370 	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
1371 		if (pvo->pvo_pmap == pmap)
1372 			return (TRUE);
1373 		if (++loops >= 16)
1374 			break;
1375 	}
1376 
1377 	return (FALSE);
1378 }
1379 
1380 static u_int	pmap_vsidcontext;
1381 
1382 void
1383 pmap_pinit(pmap_t pmap)
1384 {
1385 	int	i, mask;
1386 	u_int	entropy;
1387 
1388 	KASSERT((int)pmap < VM_MIN_KERNEL_ADDRESS, ("pmap_pinit: virt pmap"));
1389 
1390 	entropy = 0;
1391 	__asm __volatile("mftb %0" : "=r"(entropy));
1392 
1393 	/*
1394 	 * Allocate some segment registers for this pmap.
1395 	 */
1396 	for (i = 0; i < NPMAPS; i += VSID_NBPW) {
1397 		u_int	hash, n;
1398 
1399 		/*
1400 		 * Create a new value by mutiplying by a prime and adding in
1401 		 * entropy from the timebase register.  This is to make the
1402 		 * VSID more random so that the PT hash function collides
1403 		 * less often.  (Note that the prime casues gcc to do shifts
1404 		 * instead of a multiply.)
1405 		 */
1406 		pmap_vsidcontext = (pmap_vsidcontext * 0x1105) + entropy;
1407 		hash = pmap_vsidcontext & (NPMAPS - 1);
1408 		if (hash == 0)		/* 0 is special, avoid it */
1409 			continue;
1410 		n = hash >> 5;
1411 		mask = 1 << (hash & (VSID_NBPW - 1));
1412 		hash = (pmap_vsidcontext & 0xfffff);
1413 		if (pmap_vsid_bitmap[n] & mask) {	/* collision? */
1414 			/* anything free in this bucket? */
1415 			if (pmap_vsid_bitmap[n] == 0xffffffff) {
1416 				entropy = (pmap_vsidcontext >> 20);
1417 				continue;
1418 			}
1419 			i = ffs(~pmap_vsid_bitmap[i]) - 1;
1420 			mask = 1 << i;
1421 			hash &= 0xfffff & ~(VSID_NBPW - 1);
1422 			hash |= i;
1423 		}
1424 		pmap_vsid_bitmap[n] |= mask;
1425 		for (i = 0; i < 16; i++)
1426 			pmap->pm_sr[i] = VSID_MAKE(i, hash);
1427 		return;
1428 	}
1429 
1430 	panic("pmap_pinit: out of segments");
1431 }
1432 
1433 /*
1434  * Initialize the pmap associated with process 0.
1435  */
1436 void
1437 pmap_pinit0(pmap_t pm)
1438 {
1439 
1440 	pmap_pinit(pm);
1441 	bzero(&pm->pm_stats, sizeof(pm->pm_stats));
1442 }
1443 
1444 /*
1445  * Set the physical protection on the specified range of this map as requested.
1446  */
1447 void
1448 pmap_protect(pmap_t pm, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
1449 {
1450 	struct	pvo_entry *pvo;
1451 	struct	pte *pt;
1452 	int	pteidx;
1453 
1454 	CTR4(KTR_PMAP, "pmap_protect: pm=%p sva=%#x eva=%#x prot=%#x", pm, sva,
1455 	    eva, prot);
1456 
1457 
1458 	KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap,
1459 	    ("pmap_protect: non current pmap"));
1460 
1461 	if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
1462 		mtx_lock(&Giant);
1463 		pmap_remove(pm, sva, eva);
1464 		mtx_unlock(&Giant);
1465 		return;
1466 	}
1467 
1468 	mtx_lock(&Giant);
1469 	vm_page_lock_queues();
1470 	for (; sva < eva; sva += PAGE_SIZE) {
1471 		pvo = pmap_pvo_find_va(pm, sva, &pteidx);
1472 		if (pvo == NULL)
1473 			continue;
1474 
1475 		if ((prot & VM_PROT_EXECUTE) == 0)
1476 			pvo->pvo_vaddr &= ~PVO_EXECUTABLE;
1477 
1478 		/*
1479 		 * Grab the PTE pointer before we diddle with the cached PTE
1480 		 * copy.
1481 		 */
1482 		pt = pmap_pvo_to_pte(pvo, pteidx);
1483 		/*
1484 		 * Change the protection of the page.
1485 		 */
1486 		pvo->pvo_pte.pte_lo &= ~PTE_PP;
1487 		pvo->pvo_pte.pte_lo |= PTE_BR;
1488 
1489 		/*
1490 		 * If the PVO is in the page table, update that pte as well.
1491 		 */
1492 		if (pt != NULL)
1493 			pmap_pte_change(pt, &pvo->pvo_pte, pvo->pvo_vaddr);
1494 	}
1495 	vm_page_unlock_queues();
1496 	mtx_unlock(&Giant);
1497 }
1498 
1499 /*
1500  * Map a list of wired pages into kernel virtual address space.  This is
1501  * intended for temporary mappings which do not need page modification or
1502  * references recorded.  Existing mappings in the region are overwritten.
1503  */
1504 void
1505 pmap_qenter(vm_offset_t sva, vm_page_t *m, int count)
1506 {
1507 	vm_offset_t va;
1508 
1509 	va = sva;
1510 	while (count-- > 0) {
1511 		pmap_kenter(va, VM_PAGE_TO_PHYS(*m));
1512 		va += PAGE_SIZE;
1513 		m++;
1514 	}
1515 }
1516 
1517 /*
1518  * Remove page mappings from kernel virtual address space.  Intended for
1519  * temporary mappings entered by pmap_qenter.
1520  */
1521 void
1522 pmap_qremove(vm_offset_t sva, int count)
1523 {
1524 	vm_offset_t va;
1525 
1526 	va = sva;
1527 	while (count-- > 0) {
1528 		pmap_kremove(va);
1529 		va += PAGE_SIZE;
1530 	}
1531 }
1532 
1533 void
1534 pmap_release(pmap_t pmap)
1535 {
1536         int idx, mask;
1537 
1538 	/*
1539 	 * Free segment register's VSID
1540 	 */
1541         if (pmap->pm_sr[0] == 0)
1542                 panic("pmap_release");
1543 
1544         idx = VSID_TO_HASH(pmap->pm_sr[0]) & (NPMAPS-1);
1545         mask = 1 << (idx % VSID_NBPW);
1546         idx /= VSID_NBPW;
1547         pmap_vsid_bitmap[idx] &= ~mask;
1548 }
1549 
1550 /*
1551  * Remove the given range of addresses from the specified map.
1552  */
1553 void
1554 pmap_remove(pmap_t pm, vm_offset_t sva, vm_offset_t eva)
1555 {
1556 	struct	pvo_entry *pvo;
1557 	int	pteidx;
1558 
1559 	vm_page_lock_queues();
1560 	for (; sva < eva; sva += PAGE_SIZE) {
1561 		pvo = pmap_pvo_find_va(pm, sva, &pteidx);
1562 		if (pvo != NULL) {
1563 			pmap_pvo_remove(pvo, pteidx);
1564 		}
1565 	}
1566 	vm_page_unlock_queues();
1567 }
1568 
1569 /*
1570  * Remove physical page from all pmaps in which it resides. pmap_pvo_remove()
1571  * will reflect changes in pte's back to the vm_page.
1572  */
1573 void
1574 pmap_remove_all(vm_page_t m)
1575 {
1576 	struct  pvo_head *pvo_head;
1577 	struct	pvo_entry *pvo, *next_pvo;
1578 
1579 	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1580 
1581 	pvo_head = vm_page_to_pvoh(m);
1582 	for (pvo = LIST_FIRST(pvo_head); pvo != NULL; pvo = next_pvo) {
1583 		next_pvo = LIST_NEXT(pvo, pvo_vlink);
1584 
1585 		PMAP_PVO_CHECK(pvo);	/* sanity check */
1586 		pmap_pvo_remove(pvo, -1);
1587 	}
1588 	vm_page_flag_clear(m, PG_WRITEABLE);
1589 }
1590 
1591 /*
1592  * Remove all pages from specified address space, this aids process exit
1593  * speeds.  This is much faster than pmap_remove in the case of running down
1594  * an entire address space.  Only works for the current pmap.
1595  */
1596 void
1597 pmap_remove_pages(pmap_t pm, vm_offset_t sva, vm_offset_t eva)
1598 {
1599 }
1600 
1601 /*
1602  * Allocate a physical page of memory directly from the phys_avail map.
1603  * Can only be called from pmap_bootstrap before avail start and end are
1604  * calculated.
1605  */
1606 static vm_offset_t
1607 pmap_bootstrap_alloc(vm_size_t size, u_int align)
1608 {
1609 	vm_offset_t	s, e;
1610 	int		i, j;
1611 
1612 	size = round_page(size);
1613 	for (i = 0; phys_avail[i + 1] != 0; i += 2) {
1614 		if (align != 0)
1615 			s = (phys_avail[i] + align - 1) & ~(align - 1);
1616 		else
1617 			s = phys_avail[i];
1618 		e = s + size;
1619 
1620 		if (s < phys_avail[i] || e > phys_avail[i + 1])
1621 			continue;
1622 
1623 		if (s == phys_avail[i]) {
1624 			phys_avail[i] += size;
1625 		} else if (e == phys_avail[i + 1]) {
1626 			phys_avail[i + 1] -= size;
1627 		} else {
1628 			for (j = phys_avail_count * 2; j > i; j -= 2) {
1629 				phys_avail[j] = phys_avail[j - 2];
1630 				phys_avail[j + 1] = phys_avail[j - 1];
1631 			}
1632 
1633 			phys_avail[i + 3] = phys_avail[i + 1];
1634 			phys_avail[i + 1] = s;
1635 			phys_avail[i + 2] = e;
1636 			phys_avail_count++;
1637 		}
1638 
1639 		return (s);
1640 	}
1641 	panic("pmap_bootstrap_alloc: could not allocate memory");
1642 }
1643 
1644 /*
1645  * Return an unmapped pvo for a kernel virtual address.
1646  * Used by pmap functions that operate on physical pages.
1647  */
1648 static struct pvo_entry *
1649 pmap_rkva_alloc(void)
1650 {
1651 	struct		pvo_entry *pvo;
1652 	struct		pte *pt;
1653 	vm_offset_t	kva;
1654 	int		pteidx;
1655 
1656 	if (pmap_rkva_count == 0)
1657 		panic("pmap_rkva_alloc: no more reserved KVAs");
1658 
1659 	kva = pmap_rkva_start + (PAGE_SIZE * --pmap_rkva_count);
1660 	pmap_kenter(kva, 0);
1661 
1662 	pvo = pmap_pvo_find_va(kernel_pmap, kva, &pteidx);
1663 
1664 	if (pvo == NULL)
1665 		panic("pmap_kva_alloc: pmap_pvo_find_va failed");
1666 
1667 	pt = pmap_pvo_to_pte(pvo, pteidx);
1668 
1669 	if (pt == NULL)
1670 		panic("pmap_kva_alloc: pmap_pvo_to_pte failed");
1671 
1672 	pmap_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr);
1673 	PVO_PTEGIDX_CLR(pvo);
1674 
1675 	pmap_pte_overflow++;
1676 
1677 	return (pvo);
1678 }
1679 
1680 static void
1681 pmap_pa_map(struct pvo_entry *pvo, vm_offset_t pa, struct pte *saved_pt,
1682     int *depth_p)
1683 {
1684 	struct	pte *pt;
1685 
1686 	/*
1687 	 * If this pvo already has a valid pte, we need to save it so it can
1688 	 * be restored later.  We then just reload the new PTE over the old
1689 	 * slot.
1690 	 */
1691 	if (saved_pt != NULL) {
1692 		pt = pmap_pvo_to_pte(pvo, -1);
1693 
1694 		if (pt != NULL) {
1695 			pmap_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr);
1696 			PVO_PTEGIDX_CLR(pvo);
1697 			pmap_pte_overflow++;
1698 		}
1699 
1700 		*saved_pt = pvo->pvo_pte;
1701 
1702 		pvo->pvo_pte.pte_lo &= ~PTE_RPGN;
1703 	}
1704 
1705 	pvo->pvo_pte.pte_lo |= pa;
1706 
1707 	if (!pmap_pte_spill(pvo->pvo_vaddr))
1708 		panic("pmap_pa_map: could not spill pvo %p", pvo);
1709 
1710 	if (depth_p != NULL)
1711 		(*depth_p)++;
1712 }
1713 
1714 static void
1715 pmap_pa_unmap(struct pvo_entry *pvo, struct pte *saved_pt, int *depth_p)
1716 {
1717 	struct	pte *pt;
1718 
1719 	pt = pmap_pvo_to_pte(pvo, -1);
1720 
1721 	if (pt != NULL) {
1722 		pmap_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr);
1723 		PVO_PTEGIDX_CLR(pvo);
1724 		pmap_pte_overflow++;
1725 	}
1726 
1727 	pvo->pvo_pte.pte_lo &= ~PTE_RPGN;
1728 
1729 	/*
1730 	 * If there is a saved PTE and it's valid, restore it and return.
1731 	 */
1732 	if (saved_pt != NULL && (saved_pt->pte_lo & PTE_RPGN) != 0) {
1733 		if (depth_p != NULL && --(*depth_p) == 0)
1734 			panic("pmap_pa_unmap: restoring but depth == 0");
1735 
1736 		pvo->pvo_pte = *saved_pt;
1737 
1738 		if (!pmap_pte_spill(pvo->pvo_vaddr))
1739 			panic("pmap_pa_unmap: could not spill pvo %p", pvo);
1740 	}
1741 }
1742 
1743 static void
1744 pmap_syncicache(vm_offset_t pa, vm_size_t len)
1745 {
1746 	__syncicache((void *)pa, len);
1747 }
1748 
1749 static void
1750 tlbia(void)
1751 {
1752 	caddr_t	i;
1753 
1754 	SYNC();
1755 	for (i = 0; i < (caddr_t)0x00040000; i += 0x00001000) {
1756 		TLBIE(i);
1757 		EIEIO();
1758 	}
1759 	TLBSYNC();
1760 	SYNC();
1761 }
1762 
1763 static int
1764 pmap_pvo_enter(pmap_t pm, uma_zone_t zone, struct pvo_head *pvo_head,
1765     vm_offset_t va, vm_offset_t pa, u_int pte_lo, int flags)
1766 {
1767 	struct	pvo_entry *pvo;
1768 	u_int	sr;
1769 	int	first;
1770 	u_int	ptegidx;
1771 	int	i;
1772 	int     bootstrap;
1773 
1774 	pmap_pvo_enter_calls++;
1775 	first = 0;
1776 
1777 	bootstrap = 0;
1778 
1779 	/*
1780 	 * Compute the PTE Group index.
1781 	 */
1782 	va &= ~ADDR_POFF;
1783 	sr = va_to_sr(pm->pm_sr, va);
1784 	ptegidx = va_to_pteg(sr, va);
1785 
1786 	/*
1787 	 * Remove any existing mapping for this page.  Reuse the pvo entry if
1788 	 * there is a mapping.
1789 	 */
1790 	LIST_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) {
1791 		if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) {
1792 			if ((pvo->pvo_pte.pte_lo & PTE_RPGN) == pa &&
1793 			    (pvo->pvo_pte.pte_lo & PTE_PP) ==
1794 			    (pte_lo & PTE_PP)) {
1795 				return (0);
1796 			}
1797 			pmap_pvo_remove(pvo, -1);
1798 			break;
1799 		}
1800 	}
1801 
1802 	/*
1803 	 * If we aren't overwriting a mapping, try to allocate.
1804 	 */
1805 	if (pmap_initialized) {
1806 		pvo = uma_zalloc(zone, M_NOWAIT);
1807 	} else {
1808 		if (pmap_bpvo_pool_index >= BPVO_POOL_SIZE) {
1809 			panic("pmap_enter: bpvo pool exhausted, %d, %d, %d",
1810 			      pmap_bpvo_pool_index, BPVO_POOL_SIZE,
1811 			      BPVO_POOL_SIZE * sizeof(struct pvo_entry));
1812 		}
1813 		pvo = &pmap_bpvo_pool[pmap_bpvo_pool_index];
1814 		pmap_bpvo_pool_index++;
1815 		bootstrap = 1;
1816 	}
1817 
1818 	if (pvo == NULL) {
1819 		return (ENOMEM);
1820 	}
1821 
1822 	pmap_pvo_entries++;
1823 	pvo->pvo_vaddr = va;
1824 	pvo->pvo_pmap = pm;
1825 	LIST_INSERT_HEAD(&pmap_pvo_table[ptegidx], pvo, pvo_olink);
1826 	pvo->pvo_vaddr &= ~ADDR_POFF;
1827 	if (flags & VM_PROT_EXECUTE)
1828 		pvo->pvo_vaddr |= PVO_EXECUTABLE;
1829 	if (flags & PVO_WIRED)
1830 		pvo->pvo_vaddr |= PVO_WIRED;
1831 	if (pvo_head != &pmap_pvo_kunmanaged)
1832 		pvo->pvo_vaddr |= PVO_MANAGED;
1833 	if (bootstrap)
1834 		pvo->pvo_vaddr |= PVO_BOOTSTRAP;
1835 	pmap_pte_create(&pvo->pvo_pte, sr, va, pa | pte_lo);
1836 
1837 	/*
1838 	 * Remember if the list was empty and therefore will be the first
1839 	 * item.
1840 	 */
1841 	if (LIST_FIRST(pvo_head) == NULL)
1842 		first = 1;
1843 
1844 	LIST_INSERT_HEAD(pvo_head, pvo, pvo_vlink);
1845 	if (pvo->pvo_pte.pte_lo & PVO_WIRED)
1846 		pvo->pvo_pmap->pm_stats.wired_count++;
1847 	pvo->pvo_pmap->pm_stats.resident_count++;
1848 
1849 	/*
1850 	 * We hope this succeeds but it isn't required.
1851 	 */
1852 	i = pmap_pte_insert(ptegidx, &pvo->pvo_pte);
1853 	if (i >= 0) {
1854 		PVO_PTEGIDX_SET(pvo, i);
1855 	} else {
1856 		panic("pmap_pvo_enter: overflow");
1857 		pmap_pte_overflow++;
1858 	}
1859 
1860 	return (first ? ENOENT : 0);
1861 }
1862 
1863 static void
1864 pmap_pvo_remove(struct pvo_entry *pvo, int pteidx)
1865 {
1866 	struct	pte *pt;
1867 
1868 	/*
1869 	 * If there is an active pte entry, we need to deactivate it (and
1870 	 * save the ref & cfg bits).
1871 	 */
1872 	pt = pmap_pvo_to_pte(pvo, pteidx);
1873 	if (pt != NULL) {
1874 		pmap_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr);
1875 		PVO_PTEGIDX_CLR(pvo);
1876 	} else {
1877 		pmap_pte_overflow--;
1878 	}
1879 
1880 	/*
1881 	 * Update our statistics.
1882 	 */
1883 	pvo->pvo_pmap->pm_stats.resident_count--;
1884 	if (pvo->pvo_pte.pte_lo & PVO_WIRED)
1885 		pvo->pvo_pmap->pm_stats.wired_count--;
1886 
1887 	/*
1888 	 * Save the REF/CHG bits into their cache if the page is managed.
1889 	 */
1890 	if (pvo->pvo_vaddr & PVO_MANAGED) {
1891 		struct	vm_page *pg;
1892 
1893 		pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.pte_lo & PTE_RPGN);
1894 		if (pg != NULL) {
1895 			pmap_attr_save(pg, pvo->pvo_pte.pte_lo &
1896 			    (PTE_REF | PTE_CHG));
1897 		}
1898 	}
1899 
1900 	/*
1901 	 * Remove this PVO from the PV list.
1902 	 */
1903 	LIST_REMOVE(pvo, pvo_vlink);
1904 
1905 	/*
1906 	 * Remove this from the overflow list and return it to the pool
1907 	 * if we aren't going to reuse it.
1908 	 */
1909 	LIST_REMOVE(pvo, pvo_olink);
1910 	if (!(pvo->pvo_vaddr & PVO_BOOTSTRAP))
1911 		uma_zfree(pvo->pvo_vaddr & PVO_MANAGED ? pmap_mpvo_zone :
1912 		    pmap_upvo_zone, pvo);
1913 	pmap_pvo_entries--;
1914 	pmap_pvo_remove_calls++;
1915 }
1916 
1917 static __inline int
1918 pmap_pvo_pte_index(const struct pvo_entry *pvo, int ptegidx)
1919 {
1920 	int	pteidx;
1921 
1922 	/*
1923 	 * We can find the actual pte entry without searching by grabbing
1924 	 * the PTEG index from 3 unused bits in pte_lo[11:9] and by
1925 	 * noticing the HID bit.
1926 	 */
1927 	pteidx = ptegidx * 8 + PVO_PTEGIDX_GET(pvo);
1928 	if (pvo->pvo_pte.pte_hi & PTE_HID)
1929 		pteidx ^= pmap_pteg_mask * 8;
1930 
1931 	return (pteidx);
1932 }
1933 
1934 static struct pvo_entry *
1935 pmap_pvo_find_va(pmap_t pm, vm_offset_t va, int *pteidx_p)
1936 {
1937 	struct	pvo_entry *pvo;
1938 	int	ptegidx;
1939 	u_int	sr;
1940 
1941 	va &= ~ADDR_POFF;
1942 	sr = va_to_sr(pm->pm_sr, va);
1943 	ptegidx = va_to_pteg(sr, va);
1944 
1945 	LIST_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) {
1946 		if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) {
1947 			if (pteidx_p)
1948 				*pteidx_p = pmap_pvo_pte_index(pvo, ptegidx);
1949 			return (pvo);
1950 		}
1951 	}
1952 
1953 	return (NULL);
1954 }
1955 
1956 static struct pte *
1957 pmap_pvo_to_pte(const struct pvo_entry *pvo, int pteidx)
1958 {
1959 	struct	pte *pt;
1960 
1961 	/*
1962 	 * If we haven't been supplied the ptegidx, calculate it.
1963 	 */
1964 	if (pteidx == -1) {
1965 		int	ptegidx;
1966 		u_int	sr;
1967 
1968 		sr = va_to_sr(pvo->pvo_pmap->pm_sr, pvo->pvo_vaddr);
1969 		ptegidx = va_to_pteg(sr, pvo->pvo_vaddr);
1970 		pteidx = pmap_pvo_pte_index(pvo, ptegidx);
1971 	}
1972 
1973 	pt = &pmap_pteg_table[pteidx >> 3].pt[pteidx & 7];
1974 
1975 	if ((pvo->pvo_pte.pte_hi & PTE_VALID) && !PVO_PTEGIDX_ISSET(pvo)) {
1976 		panic("pmap_pvo_to_pte: pvo %p has valid pte in pvo but no "
1977 		    "valid pte index", pvo);
1978 	}
1979 
1980 	if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0 && PVO_PTEGIDX_ISSET(pvo)) {
1981 		panic("pmap_pvo_to_pte: pvo %p has valid pte index in pvo "
1982 		    "pvo but no valid pte", pvo);
1983 	}
1984 
1985 	if ((pt->pte_hi ^ (pvo->pvo_pte.pte_hi & ~PTE_VALID)) == PTE_VALID) {
1986 		if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0) {
1987 			panic("pmap_pvo_to_pte: pvo %p has valid pte in "
1988 			    "pmap_pteg_table %p but invalid in pvo", pvo, pt);
1989 		}
1990 
1991 		if (((pt->pte_lo ^ pvo->pvo_pte.pte_lo) & ~(PTE_CHG|PTE_REF))
1992 		    != 0) {
1993 			panic("pmap_pvo_to_pte: pvo %p pte does not match "
1994 			    "pte %p in pmap_pteg_table", pvo, pt);
1995 		}
1996 
1997 		return (pt);
1998 	}
1999 
2000 	if (pvo->pvo_pte.pte_hi & PTE_VALID) {
2001 		panic("pmap_pvo_to_pte: pvo %p has invalid pte %p in "
2002 		    "pmap_pteg_table but valid in pvo", pvo, pt);
2003 	}
2004 
2005 	return (NULL);
2006 }
2007 
2008 /*
2009  * XXX: THIS STUFF SHOULD BE IN pte.c?
2010  */
2011 int
2012 pmap_pte_spill(vm_offset_t addr)
2013 {
2014 	struct	pvo_entry *source_pvo, *victim_pvo;
2015 	struct	pvo_entry *pvo;
2016 	int	ptegidx, i, j;
2017 	u_int	sr;
2018 	struct	pteg *pteg;
2019 	struct	pte *pt;
2020 
2021 	pmap_pte_spills++;
2022 
2023 	sr = mfsrin(addr);
2024 	ptegidx = va_to_pteg(sr, addr);
2025 
2026 	/*
2027 	 * Have to substitute some entry.  Use the primary hash for this.
2028 	 * Use low bits of timebase as random generator.
2029 	 */
2030 	pteg = &pmap_pteg_table[ptegidx];
2031 	__asm __volatile("mftb %0" : "=r"(i));
2032 	i &= 7;
2033 	pt = &pteg->pt[i];
2034 
2035 	source_pvo = NULL;
2036 	victim_pvo = NULL;
2037 	LIST_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) {
2038 		/*
2039 		 * We need to find a pvo entry for this address.
2040 		 */
2041 		PMAP_PVO_CHECK(pvo);
2042 		if (source_pvo == NULL &&
2043 		    pmap_pte_match(&pvo->pvo_pte, sr, addr,
2044 		    pvo->pvo_pte.pte_hi & PTE_HID)) {
2045 			/*
2046 			 * Now found an entry to be spilled into the pteg.
2047 			 * The PTE is now valid, so we know it's active.
2048 			 */
2049 			j = pmap_pte_insert(ptegidx, &pvo->pvo_pte);
2050 
2051 			if (j >= 0) {
2052 				PVO_PTEGIDX_SET(pvo, j);
2053 				pmap_pte_overflow--;
2054 				PMAP_PVO_CHECK(pvo);
2055 				return (1);
2056 			}
2057 
2058 			source_pvo = pvo;
2059 
2060 			if (victim_pvo != NULL)
2061 				break;
2062 		}
2063 
2064 		/*
2065 		 * We also need the pvo entry of the victim we are replacing
2066 		 * so save the R & C bits of the PTE.
2067 		 */
2068 		if ((pt->pte_hi & PTE_HID) == 0 && victim_pvo == NULL &&
2069 		    pmap_pte_compare(pt, &pvo->pvo_pte)) {
2070 			victim_pvo = pvo;
2071 			if (source_pvo != NULL)
2072 				break;
2073 		}
2074 	}
2075 
2076 	if (source_pvo == NULL)
2077 		return (0);
2078 
2079 	if (victim_pvo == NULL) {
2080 		if ((pt->pte_hi & PTE_HID) == 0)
2081 			panic("pmap_pte_spill: victim p-pte (%p) has no pvo"
2082 			    "entry", pt);
2083 
2084 		/*
2085 		 * If this is a secondary PTE, we need to search it's primary
2086 		 * pvo bucket for the matching PVO.
2087 		 */
2088 		LIST_FOREACH(pvo, &pmap_pvo_table[ptegidx ^ pmap_pteg_mask],
2089 		    pvo_olink) {
2090 			PMAP_PVO_CHECK(pvo);
2091 			/*
2092 			 * We also need the pvo entry of the victim we are
2093 			 * replacing so save the R & C bits of the PTE.
2094 			 */
2095 			if (pmap_pte_compare(pt, &pvo->pvo_pte)) {
2096 				victim_pvo = pvo;
2097 				break;
2098 			}
2099 		}
2100 
2101 		if (victim_pvo == NULL)
2102 			panic("pmap_pte_spill: victim s-pte (%p) has no pvo"
2103 			    "entry", pt);
2104 	}
2105 
2106 	/*
2107 	 * We are invalidating the TLB entry for the EA we are replacing even
2108 	 * though it's valid.  If we don't, we lose any ref/chg bit changes
2109 	 * contained in the TLB entry.
2110 	 */
2111 	source_pvo->pvo_pte.pte_hi &= ~PTE_HID;
2112 
2113 	pmap_pte_unset(pt, &victim_pvo->pvo_pte, victim_pvo->pvo_vaddr);
2114 	pmap_pte_set(pt, &source_pvo->pvo_pte);
2115 
2116 	PVO_PTEGIDX_CLR(victim_pvo);
2117 	PVO_PTEGIDX_SET(source_pvo, i);
2118 	pmap_pte_replacements++;
2119 
2120 	PMAP_PVO_CHECK(victim_pvo);
2121 	PMAP_PVO_CHECK(source_pvo);
2122 
2123 	return (1);
2124 }
2125 
2126 static int
2127 pmap_pte_insert(u_int ptegidx, struct pte *pvo_pt)
2128 {
2129 	struct	pte *pt;
2130 	int	i;
2131 
2132 	/*
2133 	 * First try primary hash.
2134 	 */
2135 	for (pt = pmap_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) {
2136 		if ((pt->pte_hi & PTE_VALID) == 0) {
2137 			pvo_pt->pte_hi &= ~PTE_HID;
2138 			pmap_pte_set(pt, pvo_pt);
2139 			return (i);
2140 		}
2141 	}
2142 
2143 	/*
2144 	 * Now try secondary hash.
2145 	 */
2146 	ptegidx ^= pmap_pteg_mask;
2147 	ptegidx++;
2148 	for (pt = pmap_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) {
2149 		if ((pt->pte_hi & PTE_VALID) == 0) {
2150 			pvo_pt->pte_hi |= PTE_HID;
2151 			pmap_pte_set(pt, pvo_pt);
2152 			return (i);
2153 		}
2154 	}
2155 
2156 	panic("pmap_pte_insert: overflow");
2157 	return (-1);
2158 }
2159 
2160 static boolean_t
2161 pmap_query_bit(vm_page_t m, int ptebit)
2162 {
2163 	struct	pvo_entry *pvo;
2164 	struct	pte *pt;
2165 
2166 #if 0
2167 	if (pmap_attr_fetch(m) & ptebit)
2168 		return (TRUE);
2169 #endif
2170 
2171 	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
2172 		PMAP_PVO_CHECK(pvo);	/* sanity check */
2173 
2174 		/*
2175 		 * See if we saved the bit off.  If so, cache it and return
2176 		 * success.
2177 		 */
2178 		if (pvo->pvo_pte.pte_lo & ptebit) {
2179 			pmap_attr_save(m, ptebit);
2180 			PMAP_PVO_CHECK(pvo);	/* sanity check */
2181 			return (TRUE);
2182 		}
2183 	}
2184 
2185 	/*
2186 	 * No luck, now go through the hard part of looking at the PTEs
2187 	 * themselves.  Sync so that any pending REF/CHG bits are flushed to
2188 	 * the PTEs.
2189 	 */
2190 	SYNC();
2191 	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
2192 		PMAP_PVO_CHECK(pvo);	/* sanity check */
2193 
2194 		/*
2195 		 * See if this pvo has a valid PTE.  if so, fetch the
2196 		 * REF/CHG bits from the valid PTE.  If the appropriate
2197 		 * ptebit is set, cache it and return success.
2198 		 */
2199 		pt = pmap_pvo_to_pte(pvo, -1);
2200 		if (pt != NULL) {
2201 			pmap_pte_synch(pt, &pvo->pvo_pte);
2202 			if (pvo->pvo_pte.pte_lo & ptebit) {
2203 				pmap_attr_save(m, ptebit);
2204 				PMAP_PVO_CHECK(pvo);	/* sanity check */
2205 				return (TRUE);
2206 			}
2207 		}
2208 	}
2209 
2210 	return (FALSE);
2211 }
2212 
2213 static u_int
2214 pmap_clear_bit(vm_page_t m, int ptebit, int *origbit)
2215 {
2216 	u_int	count;
2217 	struct	pvo_entry *pvo;
2218 	struct	pte *pt;
2219 	int	rv;
2220 
2221 	/*
2222 	 * Clear the cached value.
2223 	 */
2224 	rv = pmap_attr_fetch(m);
2225 	pmap_attr_clear(m, ptebit);
2226 
2227 	/*
2228 	 * Sync so that any pending REF/CHG bits are flushed to the PTEs (so
2229 	 * we can reset the right ones).  note that since the pvo entries and
2230 	 * list heads are accessed via BAT0 and are never placed in the page
2231 	 * table, we don't have to worry about further accesses setting the
2232 	 * REF/CHG bits.
2233 	 */
2234 	SYNC();
2235 
2236 	/*
2237 	 * For each pvo entry, clear the pvo's ptebit.  If this pvo has a
2238 	 * valid pte clear the ptebit from the valid pte.
2239 	 */
2240 	count = 0;
2241 	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
2242 		PMAP_PVO_CHECK(pvo);	/* sanity check */
2243 		pt = pmap_pvo_to_pte(pvo, -1);
2244 		if (pt != NULL) {
2245 			pmap_pte_synch(pt, &pvo->pvo_pte);
2246 			if (pvo->pvo_pte.pte_lo & ptebit) {
2247 				count++;
2248 				pmap_pte_clear(pt, PVO_VADDR(pvo), ptebit);
2249 			}
2250 		}
2251 		rv |= pvo->pvo_pte.pte_lo;
2252 		pvo->pvo_pte.pte_lo &= ~ptebit;
2253 		PMAP_PVO_CHECK(pvo);	/* sanity check */
2254 	}
2255 
2256 	if (origbit != NULL) {
2257 		*origbit = rv;
2258 	}
2259 
2260 	return (count);
2261 }
2262 
2263 /*
2264  * Return true if the physical range is encompassed by the battable[idx]
2265  */
2266 static int
2267 pmap_bat_mapped(int idx, vm_offset_t pa, vm_size_t size)
2268 {
2269 	u_int prot;
2270 	u_int32_t start;
2271 	u_int32_t end;
2272 	u_int32_t bat_ble;
2273 
2274 	/*
2275 	 * Return immediately if not a valid mapping
2276 	 */
2277 	if (!battable[idx].batu & BAT_Vs)
2278 		return (EINVAL);
2279 
2280 	/*
2281 	 * The BAT entry must be cache-inhibited, guarded, and r/w
2282 	 * so it can function as an i/o page
2283 	 */
2284 	prot = battable[idx].batl & (BAT_I|BAT_G|BAT_PP_RW);
2285 	if (prot != (BAT_I|BAT_G|BAT_PP_RW))
2286 		return (EPERM);
2287 
2288 	/*
2289 	 * The address should be within the BAT range. Assume that the
2290 	 * start address in the BAT has the correct alignment (thus
2291 	 * not requiring masking)
2292 	 */
2293 	start = battable[idx].batl & BAT_PBS;
2294 	bat_ble = (battable[idx].batu & ~(BAT_EBS)) | 0x03;
2295 	end = start | (bat_ble << 15) | 0x7fff;
2296 
2297 	if ((pa < start) || ((pa + size) > end))
2298 		return (ERANGE);
2299 
2300 	return (0);
2301 }
2302 
2303 
2304 /*
2305  * Map a set of physical memory pages into the kernel virtual
2306  * address space. Return a pointer to where it is mapped. This
2307  * routine is intended to be used for mapping device memory,
2308  * NOT real memory.
2309  */
2310 void *
2311 pmap_mapdev(vm_offset_t pa, vm_size_t size)
2312 {
2313 	vm_offset_t va, tmpva, ppa, offset;
2314 	int i;
2315 
2316 	ppa = trunc_page(pa);
2317 	offset = pa & PAGE_MASK;
2318 	size = roundup(offset + size, PAGE_SIZE);
2319 
2320 	GIANT_REQUIRED;
2321 
2322 	/*
2323 	 * If the physical address lies within a valid BAT table entry,
2324 	 * return the 1:1 mapping. This currently doesn't work
2325 	 * for regions that overlap 256M BAT segments.
2326 	 */
2327 	for (i = 0; i < 16; i++) {
2328 		if (pmap_bat_mapped(i, pa, size) == 0)
2329 			return ((void *) pa);
2330 	}
2331 
2332 	va = kmem_alloc_nofault(kernel_map, size);
2333 	if (!va)
2334 		panic("pmap_mapdev: Couldn't alloc kernel virtual memory");
2335 
2336 	for (tmpva = va; size > 0;) {
2337 		pmap_kenter(tmpva, ppa);
2338 		TLBIE(tmpva); /* XXX or should it be invalidate-all ? */
2339 		size -= PAGE_SIZE;
2340 		tmpva += PAGE_SIZE;
2341 		ppa += PAGE_SIZE;
2342 	}
2343 
2344 	return ((void *)(va + offset));
2345 }
2346 
2347 void
2348 pmap_unmapdev(vm_offset_t va, vm_size_t size)
2349 {
2350 	vm_offset_t base, offset;
2351 
2352 	/*
2353 	 * If this is outside kernel virtual space, then it's a
2354 	 * battable entry and doesn't require unmapping
2355 	 */
2356 	if ((va >= VM_MIN_KERNEL_ADDRESS) && (va <= VM_MAX_KERNEL_ADDRESS)) {
2357 		base = trunc_page(va);
2358 		offset = va & PAGE_MASK;
2359 		size = roundup(offset + size, PAGE_SIZE);
2360 		kmem_free(kernel_map, base, size);
2361 	}
2362 }
2363