xref: /freebsd/sys/powerpc/aim/mmu_oea.c (revision 6af83ee0d2941d18880b6aaa2b4facd1d30c6106)
1 /*-
2  * Copyright (c) 2001 The NetBSD Foundation, Inc.
3  * All rights reserved.
4  *
5  * This code is derived from software contributed to The NetBSD Foundation
6  * by Matt Thomas <matt@3am-software.com> of Allegro Networks, Inc.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *        This product includes software developed by the NetBSD
19  *        Foundation, Inc. and its contributors.
20  * 4. Neither the name of The NetBSD Foundation nor the names of its
21  *    contributors may be used to endorse or promote products derived
22  *    from this software without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
25  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
26  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
28  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34  * POSSIBILITY OF SUCH DAMAGE.
35  */
36 /*-
37  * Copyright (C) 1995, 1996 Wolfgang Solfrank.
38  * Copyright (C) 1995, 1996 TooLs GmbH.
39  * All rights reserved.
40  *
41  * Redistribution and use in source and binary forms, with or without
42  * modification, are permitted provided that the following conditions
43  * are met:
44  * 1. Redistributions of source code must retain the above copyright
45  *    notice, this list of conditions and the following disclaimer.
46  * 2. Redistributions in binary form must reproduce the above copyright
47  *    notice, this list of conditions and the following disclaimer in the
48  *    documentation and/or other materials provided with the distribution.
49  * 3. All advertising materials mentioning features or use of this software
50  *    must display the following acknowledgement:
51  *	This product includes software developed by TooLs GmbH.
52  * 4. The name of TooLs GmbH may not be used to endorse or promote products
53  *    derived from this software without specific prior written permission.
54  *
55  * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
56  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
57  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
58  * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
59  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
60  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
61  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
62  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
63  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
64  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
65  *
66  * $NetBSD: pmap.c,v 1.28 2000/03/26 20:42:36 kleink Exp $
67  */
68 /*-
69  * Copyright (C) 2001 Benno Rice.
70  * All rights reserved.
71  *
72  * Redistribution and use in source and binary forms, with or without
73  * modification, are permitted provided that the following conditions
74  * are met:
75  * 1. Redistributions of source code must retain the above copyright
76  *    notice, this list of conditions and the following disclaimer.
77  * 2. Redistributions in binary form must reproduce the above copyright
78  *    notice, this list of conditions and the following disclaimer in the
79  *    documentation and/or other materials provided with the distribution.
80  *
81  * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR
82  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
83  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
84  * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
85  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
86  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
87  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
88  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
89  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
90  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
91  */
92 
93 #include <sys/cdefs.h>
94 __FBSDID("$FreeBSD$");
95 
96 /*
97  * Manages physical address maps.
98  *
99  * In addition to hardware address maps, this module is called upon to
100  * provide software-use-only maps which may or may not be stored in the
101  * same form as hardware maps.  These pseudo-maps are used to store
102  * intermediate results from copy operations to and from address spaces.
103  *
104  * Since the information managed by this module is also stored by the
105  * logical address mapping module, this module may throw away valid virtual
106  * to physical mappings at almost any time.  However, invalidations of
107  * mappings must be done as requested.
108  *
109  * In order to cope with hardware architectures which make virtual to
110  * physical map invalidates expensive, this module may delay invalidate
111  * reduced protection operations until such time as they are actually
112  * necessary.  This module is given full information as to which processors
113  * are currently using which maps, and to when physical maps must be made
114  * correct.
115  */
116 
117 #include "opt_kstack_pages.h"
118 
119 #include <sys/param.h>
120 #include <sys/kernel.h>
121 #include <sys/ktr.h>
122 #include <sys/lock.h>
123 #include <sys/msgbuf.h>
124 #include <sys/mutex.h>
125 #include <sys/proc.h>
126 #include <sys/sysctl.h>
127 #include <sys/systm.h>
128 #include <sys/vmmeter.h>
129 
130 #include <dev/ofw/openfirm.h>
131 
132 #include <vm/vm.h>
133 #include <vm/vm_param.h>
134 #include <vm/vm_kern.h>
135 #include <vm/vm_page.h>
136 #include <vm/vm_map.h>
137 #include <vm/vm_object.h>
138 #include <vm/vm_extern.h>
139 #include <vm/vm_pageout.h>
140 #include <vm/vm_pager.h>
141 #include <vm/uma.h>
142 
143 #include <machine/cpu.h>
144 #include <machine/powerpc.h>
145 #include <machine/bat.h>
146 #include <machine/frame.h>
147 #include <machine/md_var.h>
148 #include <machine/psl.h>
149 #include <machine/pte.h>
150 #include <machine/sr.h>
151 
152 #define	PMAP_DEBUG
153 
154 #define TODO	panic("%s: not implemented", __func__);
155 
156 #define	TLBIE(va)	__asm __volatile("tlbie %0" :: "r"(va))
157 #define	TLBSYNC()	__asm __volatile("tlbsync");
158 #define	SYNC()		__asm __volatile("sync");
159 #define	EIEIO()		__asm __volatile("eieio");
160 
161 #define	VSID_MAKE(sr, hash)	((sr) | (((hash) & 0xfffff) << 4))
162 #define	VSID_TO_SR(vsid)	((vsid) & 0xf)
163 #define	VSID_TO_HASH(vsid)	(((vsid) >> 4) & 0xfffff)
164 
165 #define	PVO_PTEGIDX_MASK	0x0007		/* which PTEG slot */
166 #define	PVO_PTEGIDX_VALID	0x0008		/* slot is valid */
167 #define	PVO_WIRED		0x0010		/* PVO entry is wired */
168 #define	PVO_MANAGED		0x0020		/* PVO entry is managed */
169 #define	PVO_EXECUTABLE		0x0040		/* PVO entry is executable */
170 #define	PVO_BOOTSTRAP		0x0080		/* PVO entry allocated during
171 						   bootstrap */
172 #define	PVO_VADDR(pvo)		((pvo)->pvo_vaddr & ~ADDR_POFF)
173 #define	PVO_ISEXECUTABLE(pvo)	((pvo)->pvo_vaddr & PVO_EXECUTABLE)
174 #define	PVO_PTEGIDX_GET(pvo)	((pvo)->pvo_vaddr & PVO_PTEGIDX_MASK)
175 #define	PVO_PTEGIDX_ISSET(pvo)	((pvo)->pvo_vaddr & PVO_PTEGIDX_VALID)
176 #define	PVO_PTEGIDX_CLR(pvo)	\
177 	((void)((pvo)->pvo_vaddr &= ~(PVO_PTEGIDX_VALID|PVO_PTEGIDX_MASK)))
178 #define	PVO_PTEGIDX_SET(pvo, i)	\
179 	((void)((pvo)->pvo_vaddr |= (i)|PVO_PTEGIDX_VALID))
180 
181 #define	PMAP_PVO_CHECK(pvo)
182 
183 struct ofw_map {
184 	vm_offset_t	om_va;
185 	vm_size_t	om_len;
186 	vm_offset_t	om_pa;
187 	u_int		om_mode;
188 };
189 
190 int	pmap_bootstrapped = 0;
191 
192 /*
193  * Virtual and physical address of message buffer.
194  */
195 struct		msgbuf *msgbufp;
196 vm_offset_t	msgbuf_phys;
197 
198 int pmap_pagedaemon_waken;
199 
200 /*
201  * Map of physical memory regions.
202  */
203 vm_offset_t	phys_avail[128];
204 u_int		phys_avail_count;
205 static struct	mem_region *regions;
206 static struct	mem_region *pregions;
207 int		regions_sz, pregions_sz;
208 static struct	ofw_map *translations;
209 
210 /*
211  * First and last available kernel virtual addresses.
212  */
213 vm_offset_t virtual_avail;
214 vm_offset_t virtual_end;
215 vm_offset_t kernel_vm_end;
216 
217 /*
218  * Kernel pmap.
219  */
220 struct pmap kernel_pmap_store;
221 extern struct pmap ofw_pmap;
222 
223 /*
224  * Lock for the pteg and pvo tables.
225  */
226 struct mtx	pmap_table_mutex;
227 
228 /*
229  * PTEG data.
230  */
231 static struct	pteg *pmap_pteg_table;
232 u_int		pmap_pteg_count;
233 u_int		pmap_pteg_mask;
234 
235 /*
236  * PVO data.
237  */
238 struct	pvo_head *pmap_pvo_table;		/* pvo entries by pteg index */
239 struct	pvo_head pmap_pvo_kunmanaged =
240     LIST_HEAD_INITIALIZER(pmap_pvo_kunmanaged);	/* list of unmanaged pages */
241 struct	pvo_head pmap_pvo_unmanaged =
242     LIST_HEAD_INITIALIZER(pmap_pvo_unmanaged);	/* list of unmanaged pages */
243 
244 uma_zone_t	pmap_upvo_zone;	/* zone for pvo entries for unmanaged pages */
245 uma_zone_t	pmap_mpvo_zone;	/* zone for pvo entries for managed pages */
246 
247 #define	BPVO_POOL_SIZE	32768
248 static struct	pvo_entry *pmap_bpvo_pool;
249 static int	pmap_bpvo_pool_index = 0;
250 
251 #define	VSID_NBPW	(sizeof(u_int32_t) * 8)
252 static u_int	pmap_vsid_bitmap[NPMAPS / VSID_NBPW];
253 
254 static boolean_t pmap_initialized = FALSE;
255 
256 /*
257  * Statistics.
258  */
259 u_int	pmap_pte_valid = 0;
260 u_int	pmap_pte_overflow = 0;
261 u_int	pmap_pte_replacements = 0;
262 u_int	pmap_pvo_entries = 0;
263 u_int	pmap_pvo_enter_calls = 0;
264 u_int	pmap_pvo_remove_calls = 0;
265 u_int	pmap_pte_spills = 0;
266 SYSCTL_INT(_machdep, OID_AUTO, pmap_pte_valid, CTLFLAG_RD, &pmap_pte_valid,
267     0, "");
268 SYSCTL_INT(_machdep, OID_AUTO, pmap_pte_overflow, CTLFLAG_RD,
269     &pmap_pte_overflow, 0, "");
270 SYSCTL_INT(_machdep, OID_AUTO, pmap_pte_replacements, CTLFLAG_RD,
271     &pmap_pte_replacements, 0, "");
272 SYSCTL_INT(_machdep, OID_AUTO, pmap_pvo_entries, CTLFLAG_RD, &pmap_pvo_entries,
273     0, "");
274 SYSCTL_INT(_machdep, OID_AUTO, pmap_pvo_enter_calls, CTLFLAG_RD,
275     &pmap_pvo_enter_calls, 0, "");
276 SYSCTL_INT(_machdep, OID_AUTO, pmap_pvo_remove_calls, CTLFLAG_RD,
277     &pmap_pvo_remove_calls, 0, "");
278 SYSCTL_INT(_machdep, OID_AUTO, pmap_pte_spills, CTLFLAG_RD,
279     &pmap_pte_spills, 0, "");
280 
281 struct	pvo_entry *pmap_pvo_zeropage;
282 
283 vm_offset_t	pmap_rkva_start = VM_MIN_KERNEL_ADDRESS;
284 u_int		pmap_rkva_count = 4;
285 
286 /*
287  * Allocate physical memory for use in pmap_bootstrap.
288  */
289 static vm_offset_t	pmap_bootstrap_alloc(vm_size_t, u_int);
290 
291 /*
292  * PTE calls.
293  */
294 static int		pmap_pte_insert(u_int, struct pte *);
295 
296 /*
297  * PVO calls.
298  */
299 static int	pmap_pvo_enter(pmap_t, uma_zone_t, struct pvo_head *,
300 		    vm_offset_t, vm_offset_t, u_int, int);
301 static void	pmap_pvo_remove(struct pvo_entry *, int);
302 static struct	pvo_entry *pmap_pvo_find_va(pmap_t, vm_offset_t, int *);
303 static struct	pte *pmap_pvo_to_pte(const struct pvo_entry *, int);
304 
305 /*
306  * Utility routines.
307  */
308 static struct		pvo_entry *pmap_rkva_alloc(void);
309 static void		pmap_pa_map(struct pvo_entry *, vm_offset_t,
310 			    struct pte *, int *);
311 static void		pmap_pa_unmap(struct pvo_entry *, struct pte *, int *);
312 static void		pmap_syncicache(vm_offset_t, vm_size_t);
313 static boolean_t	pmap_query_bit(vm_page_t, int);
314 static u_int		pmap_clear_bit(vm_page_t, int, int *);
315 static void		tlbia(void);
316 
317 static __inline int
318 va_to_sr(u_int *sr, vm_offset_t va)
319 {
320 	return (sr[(uintptr_t)va >> ADDR_SR_SHFT]);
321 }
322 
323 static __inline u_int
324 va_to_pteg(u_int sr, vm_offset_t addr)
325 {
326 	u_int hash;
327 
328 	hash = (sr & SR_VSID_MASK) ^ (((u_int)addr & ADDR_PIDX) >>
329 	    ADDR_PIDX_SHFT);
330 	return (hash & pmap_pteg_mask);
331 }
332 
333 static __inline struct pvo_head *
334 pa_to_pvoh(vm_offset_t pa, vm_page_t *pg_p)
335 {
336 	struct	vm_page *pg;
337 
338 	pg = PHYS_TO_VM_PAGE(pa);
339 
340 	if (pg_p != NULL)
341 		*pg_p = pg;
342 
343 	if (pg == NULL)
344 		return (&pmap_pvo_unmanaged);
345 
346 	return (&pg->md.mdpg_pvoh);
347 }
348 
349 static __inline struct pvo_head *
350 vm_page_to_pvoh(vm_page_t m)
351 {
352 
353 	return (&m->md.mdpg_pvoh);
354 }
355 
356 static __inline void
357 pmap_attr_clear(vm_page_t m, int ptebit)
358 {
359 
360 	m->md.mdpg_attrs &= ~ptebit;
361 }
362 
363 static __inline int
364 pmap_attr_fetch(vm_page_t m)
365 {
366 
367 	return (m->md.mdpg_attrs);
368 }
369 
370 static __inline void
371 pmap_attr_save(vm_page_t m, int ptebit)
372 {
373 
374 	m->md.mdpg_attrs |= ptebit;
375 }
376 
377 static __inline int
378 pmap_pte_compare(const struct pte *pt, const struct pte *pvo_pt)
379 {
380 	if (pt->pte_hi == pvo_pt->pte_hi)
381 		return (1);
382 
383 	return (0);
384 }
385 
386 static __inline int
387 pmap_pte_match(struct pte *pt, u_int sr, vm_offset_t va, int which)
388 {
389 	return (pt->pte_hi & ~PTE_VALID) ==
390 	    (((sr & SR_VSID_MASK) << PTE_VSID_SHFT) |
391 	    ((va >> ADDR_API_SHFT) & PTE_API) | which);
392 }
393 
394 static __inline void
395 pmap_pte_create(struct pte *pt, u_int sr, vm_offset_t va, u_int pte_lo)
396 {
397 	/*
398 	 * Construct a PTE.  Default to IMB initially.  Valid bit only gets
399 	 * set when the real pte is set in memory.
400 	 *
401 	 * Note: Don't set the valid bit for correct operation of tlb update.
402 	 */
403 	pt->pte_hi = ((sr & SR_VSID_MASK) << PTE_VSID_SHFT) |
404 	    (((va & ADDR_PIDX) >> ADDR_API_SHFT) & PTE_API);
405 	pt->pte_lo = pte_lo;
406 }
407 
408 static __inline void
409 pmap_pte_synch(struct pte *pt, struct pte *pvo_pt)
410 {
411 
412 	pvo_pt->pte_lo |= pt->pte_lo & (PTE_REF | PTE_CHG);
413 }
414 
415 static __inline void
416 pmap_pte_clear(struct pte *pt, vm_offset_t va, int ptebit)
417 {
418 
419 	/*
420 	 * As shown in Section 7.6.3.2.3
421 	 */
422 	pt->pte_lo &= ~ptebit;
423 	TLBIE(va);
424 	EIEIO();
425 	TLBSYNC();
426 	SYNC();
427 }
428 
429 static __inline void
430 pmap_pte_set(struct pte *pt, struct pte *pvo_pt)
431 {
432 
433 	pvo_pt->pte_hi |= PTE_VALID;
434 
435 	/*
436 	 * Update the PTE as defined in section 7.6.3.1.
437 	 * Note that the REF/CHG bits are from pvo_pt and thus should havce
438 	 * been saved so this routine can restore them (if desired).
439 	 */
440 	pt->pte_lo = pvo_pt->pte_lo;
441 	EIEIO();
442 	pt->pte_hi = pvo_pt->pte_hi;
443 	SYNC();
444 	pmap_pte_valid++;
445 }
446 
447 static __inline void
448 pmap_pte_unset(struct pte *pt, struct pte *pvo_pt, vm_offset_t va)
449 {
450 
451 	pvo_pt->pte_hi &= ~PTE_VALID;
452 
453 	/*
454 	 * Force the reg & chg bits back into the PTEs.
455 	 */
456 	SYNC();
457 
458 	/*
459 	 * Invalidate the pte.
460 	 */
461 	pt->pte_hi &= ~PTE_VALID;
462 
463 	SYNC();
464 	TLBIE(va);
465 	EIEIO();
466 	TLBSYNC();
467 	SYNC();
468 
469 	/*
470 	 * Save the reg & chg bits.
471 	 */
472 	pmap_pte_synch(pt, pvo_pt);
473 	pmap_pte_valid--;
474 }
475 
476 static __inline void
477 pmap_pte_change(struct pte *pt, struct pte *pvo_pt, vm_offset_t va)
478 {
479 
480 	/*
481 	 * Invalidate the PTE
482 	 */
483 	pmap_pte_unset(pt, pvo_pt, va);
484 	pmap_pte_set(pt, pvo_pt);
485 }
486 
487 /*
488  * Quick sort callout for comparing memory regions.
489  */
490 static int	mr_cmp(const void *a, const void *b);
491 static int	om_cmp(const void *a, const void *b);
492 
493 static int
494 mr_cmp(const void *a, const void *b)
495 {
496 	const struct	mem_region *regiona;
497 	const struct	mem_region *regionb;
498 
499 	regiona = a;
500 	regionb = b;
501 	if (regiona->mr_start < regionb->mr_start)
502 		return (-1);
503 	else if (regiona->mr_start > regionb->mr_start)
504 		return (1);
505 	else
506 		return (0);
507 }
508 
509 static int
510 om_cmp(const void *a, const void *b)
511 {
512 	const struct	ofw_map *mapa;
513 	const struct	ofw_map *mapb;
514 
515 	mapa = a;
516 	mapb = b;
517 	if (mapa->om_pa < mapb->om_pa)
518 		return (-1);
519 	else if (mapa->om_pa > mapb->om_pa)
520 		return (1);
521 	else
522 		return (0);
523 }
524 
525 void
526 pmap_bootstrap(vm_offset_t kernelstart, vm_offset_t kernelend)
527 {
528 	ihandle_t	mmui;
529 	phandle_t	chosen, mmu;
530 	int		sz;
531 	int		i, j;
532 	int		ofw_mappings;
533 	vm_size_t	size, physsz;
534 	vm_offset_t	pa, va, off;
535 	u_int		batl, batu;
536 
537         /*
538          * Set up BAT0 to map the lowest 256 MB area
539          */
540         battable[0x0].batl = BATL(0x00000000, BAT_M, BAT_PP_RW);
541         battable[0x0].batu = BATU(0x00000000, BAT_BL_256M, BAT_Vs);
542 
543         /*
544          * Map PCI memory space.
545          */
546         battable[0x8].batl = BATL(0x80000000, BAT_I|BAT_G, BAT_PP_RW);
547         battable[0x8].batu = BATU(0x80000000, BAT_BL_256M, BAT_Vs);
548 
549         battable[0x9].batl = BATL(0x90000000, BAT_I|BAT_G, BAT_PP_RW);
550         battable[0x9].batu = BATU(0x90000000, BAT_BL_256M, BAT_Vs);
551 
552         battable[0xa].batl = BATL(0xa0000000, BAT_I|BAT_G, BAT_PP_RW);
553         battable[0xa].batu = BATU(0xa0000000, BAT_BL_256M, BAT_Vs);
554 
555         battable[0xb].batl = BATL(0xb0000000, BAT_I|BAT_G, BAT_PP_RW);
556         battable[0xb].batu = BATU(0xb0000000, BAT_BL_256M, BAT_Vs);
557 
558         /*
559          * Map obio devices.
560          */
561         battable[0xf].batl = BATL(0xf0000000, BAT_I|BAT_G, BAT_PP_RW);
562         battable[0xf].batu = BATU(0xf0000000, BAT_BL_256M, BAT_Vs);
563 
564 	/*
565 	 * Use an IBAT and a DBAT to map the bottom segment of memory
566 	 * where we are.
567 	 */
568 	batu = BATU(0x00000000, BAT_BL_256M, BAT_Vs);
569 	batl = BATL(0x00000000, BAT_M, BAT_PP_RW);
570 	__asm ("mtibatu 0,%0; mtibatl 0,%1; isync; \n"
571 	       "mtdbatu 0,%0; mtdbatl 0,%1; isync"
572 	    :: "r"(batu), "r"(batl));
573 
574 #if 0
575 	/* map frame buffer */
576 	batu = BATU(0x90000000, BAT_BL_256M, BAT_Vs);
577 	batl = BATL(0x90000000, BAT_I|BAT_G, BAT_PP_RW);
578 	__asm ("mtdbatu 1,%0; mtdbatl 1,%1; isync"
579 	    :: "r"(batu), "r"(batl));
580 #endif
581 
582 #if 1
583 	/* map pci space */
584 	batu = BATU(0x80000000, BAT_BL_256M, BAT_Vs);
585 	batl = BATL(0x80000000, BAT_I|BAT_G, BAT_PP_RW);
586 	__asm ("mtdbatu 1,%0; mtdbatl 1,%1; isync"
587 	    :: "r"(batu), "r"(batl));
588 #endif
589 
590 	/*
591 	 * Set the start and end of kva.
592 	 */
593 	virtual_avail = VM_MIN_KERNEL_ADDRESS;
594 	virtual_end = VM_MAX_KERNEL_ADDRESS;
595 
596 	mem_regions(&pregions, &pregions_sz, &regions, &regions_sz);
597 	CTR0(KTR_PMAP, "pmap_bootstrap: physical memory");
598 
599 	qsort(pregions, pregions_sz, sizeof(*pregions), mr_cmp);
600 	for (i = 0; i < pregions_sz; i++) {
601 		vm_offset_t pa;
602 		vm_offset_t end;
603 
604 		CTR3(KTR_PMAP, "physregion: %#x - %#x (%#x)",
605 			pregions[i].mr_start,
606 			pregions[i].mr_start + pregions[i].mr_size,
607 			pregions[i].mr_size);
608 		/*
609 		 * Install entries into the BAT table to allow all
610 		 * of physmem to be convered by on-demand BAT entries.
611 		 * The loop will sometimes set the same battable element
612 		 * twice, but that's fine since they won't be used for
613 		 * a while yet.
614 		 */
615 		pa = pregions[i].mr_start & 0xf0000000;
616 		end = pregions[i].mr_start + pregions[i].mr_size;
617 		do {
618                         u_int n = pa >> ADDR_SR_SHFT;
619 
620 			battable[n].batl = BATL(pa, BAT_M, BAT_PP_RW);
621 			battable[n].batu = BATU(pa, BAT_BL_256M, BAT_Vs);
622 			pa += SEGMENT_LENGTH;
623 		} while (pa < end);
624 	}
625 
626 	if (sizeof(phys_avail)/sizeof(phys_avail[0]) < regions_sz)
627 		panic("pmap_bootstrap: phys_avail too small");
628 	qsort(regions, regions_sz, sizeof(*regions), mr_cmp);
629 	phys_avail_count = 0;
630 	physsz = 0;
631 	for (i = 0, j = 0; i < regions_sz; i++, j += 2) {
632 		CTR3(KTR_PMAP, "region: %#x - %#x (%#x)", regions[i].mr_start,
633 		    regions[i].mr_start + regions[i].mr_size,
634 		    regions[i].mr_size);
635 		phys_avail[j] = regions[i].mr_start;
636 		phys_avail[j + 1] = regions[i].mr_start + regions[i].mr_size;
637 		phys_avail_count++;
638 		physsz += regions[i].mr_size;
639 	}
640 	physmem = btoc(physsz);
641 
642 	/*
643 	 * Allocate PTEG table.
644 	 */
645 #ifdef PTEGCOUNT
646 	pmap_pteg_count = PTEGCOUNT;
647 #else
648 	pmap_pteg_count = 0x1000;
649 
650 	while (pmap_pteg_count < physmem)
651 		pmap_pteg_count <<= 1;
652 
653 	pmap_pteg_count >>= 1;
654 #endif /* PTEGCOUNT */
655 
656 	size = pmap_pteg_count * sizeof(struct pteg);
657 	CTR2(KTR_PMAP, "pmap_bootstrap: %d PTEGs, %d bytes", pmap_pteg_count,
658 	    size);
659 	pmap_pteg_table = (struct pteg *)pmap_bootstrap_alloc(size, size);
660 	CTR1(KTR_PMAP, "pmap_bootstrap: PTEG table at %p", pmap_pteg_table);
661 	bzero((void *)pmap_pteg_table, pmap_pteg_count * sizeof(struct pteg));
662 	pmap_pteg_mask = pmap_pteg_count - 1;
663 
664 	/*
665 	 * Allocate pv/overflow lists.
666 	 */
667 	size = sizeof(struct pvo_head) * pmap_pteg_count;
668 	pmap_pvo_table = (struct pvo_head *)pmap_bootstrap_alloc(size,
669 	    PAGE_SIZE);
670 	CTR1(KTR_PMAP, "pmap_bootstrap: PVO table at %p", pmap_pvo_table);
671 	for (i = 0; i < pmap_pteg_count; i++)
672 		LIST_INIT(&pmap_pvo_table[i]);
673 
674 	/*
675 	 * Initialize the lock that synchronizes access to the pteg and pvo
676 	 * tables.
677 	 */
678 	mtx_init(&pmap_table_mutex, "pmap table", NULL, MTX_DEF);
679 
680 	/*
681 	 * Allocate the message buffer.
682 	 */
683 	msgbuf_phys = pmap_bootstrap_alloc(MSGBUF_SIZE, 0);
684 
685 	/*
686 	 * Initialise the unmanaged pvo pool.
687 	 */
688 	pmap_bpvo_pool = (struct pvo_entry *)pmap_bootstrap_alloc(
689 		BPVO_POOL_SIZE*sizeof(struct pvo_entry), 0);
690 	pmap_bpvo_pool_index = 0;
691 
692 	/*
693 	 * Make sure kernel vsid is allocated as well as VSID 0.
694 	 */
695 	pmap_vsid_bitmap[(KERNEL_VSIDBITS & (NPMAPS - 1)) / VSID_NBPW]
696 		|= 1 << (KERNEL_VSIDBITS % VSID_NBPW);
697 	pmap_vsid_bitmap[0] |= 1;
698 
699 	/*
700 	 * Set up the Open Firmware pmap and add it's mappings.
701 	 */
702 	pmap_pinit(&ofw_pmap);
703 	ofw_pmap.pm_sr[KERNEL_SR] = KERNEL_SEGMENT;
704 	ofw_pmap.pm_sr[KERNEL2_SR] = KERNEL2_SEGMENT;
705 	if ((chosen = OF_finddevice("/chosen")) == -1)
706 		panic("pmap_bootstrap: can't find /chosen");
707 	OF_getprop(chosen, "mmu", &mmui, 4);
708 	if ((mmu = OF_instance_to_package(mmui)) == -1)
709 		panic("pmap_bootstrap: can't get mmu package");
710 	if ((sz = OF_getproplen(mmu, "translations")) == -1)
711 		panic("pmap_bootstrap: can't get ofw translation count");
712 	translations = NULL;
713 	for (i = 0; phys_avail[i] != 0; i += 2) {
714 		if (phys_avail[i + 1] >= sz) {
715 			translations = (struct ofw_map *)phys_avail[i];
716 			break;
717 		}
718 	}
719 	if (translations == NULL)
720 		panic("pmap_bootstrap: no space to copy translations");
721 	bzero(translations, sz);
722 	if (OF_getprop(mmu, "translations", translations, sz) == -1)
723 		panic("pmap_bootstrap: can't get ofw translations");
724 	CTR0(KTR_PMAP, "pmap_bootstrap: translations");
725 	sz /= sizeof(*translations);
726 	qsort(translations, sz, sizeof (*translations), om_cmp);
727 	for (i = 0, ofw_mappings = 0; i < sz; i++) {
728 		CTR3(KTR_PMAP, "translation: pa=%#x va=%#x len=%#x",
729 		    translations[i].om_pa, translations[i].om_va,
730 		    translations[i].om_len);
731 
732 		/*
733 		 * If the mapping is 1:1, let the RAM and device on-demand
734 		 * BAT tables take care of the translation.
735 		 */
736 		if (translations[i].om_va == translations[i].om_pa)
737 			continue;
738 
739 		/* Enter the pages */
740 		for (off = 0; off < translations[i].om_len; off += PAGE_SIZE) {
741 			struct	vm_page m;
742 
743 			m.phys_addr = translations[i].om_pa + off;
744 			pmap_enter(&ofw_pmap, translations[i].om_va + off, &m,
745 				   VM_PROT_ALL, 1);
746 			ofw_mappings++;
747 		}
748 	}
749 #ifdef SMP
750 	TLBSYNC();
751 #endif
752 
753 	/*
754 	 * Initialize the kernel pmap (which is statically allocated).
755 	 */
756 	PMAP_LOCK_INIT(kernel_pmap);
757 	for (i = 0; i < 16; i++) {
758 		kernel_pmap->pm_sr[i] = EMPTY_SEGMENT;
759 	}
760 	kernel_pmap->pm_sr[KERNEL_SR] = KERNEL_SEGMENT;
761 	kernel_pmap->pm_sr[KERNEL2_SR] = KERNEL2_SEGMENT;
762 	kernel_pmap->pm_active = ~0;
763 
764 	/*
765 	 * Allocate a kernel stack with a guard page for thread0 and map it
766 	 * into the kernel page map.
767 	 */
768 	pa = pmap_bootstrap_alloc(KSTACK_PAGES * PAGE_SIZE, 0);
769 	kstack0_phys = pa;
770 	kstack0 = virtual_avail + (KSTACK_GUARD_PAGES * PAGE_SIZE);
771 	CTR2(KTR_PMAP, "pmap_bootstrap: kstack0 at %#x (%#x)", kstack0_phys,
772 	    kstack0);
773 	virtual_avail += (KSTACK_PAGES + KSTACK_GUARD_PAGES) * PAGE_SIZE;
774 	for (i = 0; i < KSTACK_PAGES; i++) {
775 		pa = kstack0_phys + i * PAGE_SIZE;
776 		va = kstack0 + i * PAGE_SIZE;
777 		pmap_kenter(va, pa);
778 		TLBIE(va);
779 	}
780 
781 	/*
782 	 * Calculate the last available physical address.
783 	 */
784 	for (i = 0; phys_avail[i + 2] != 0; i += 2)
785 		;
786 	Maxmem = powerpc_btop(phys_avail[i + 1]);
787 
788 	/*
789 	 * Allocate virtual address space for the message buffer.
790 	 */
791 	msgbufp = (struct msgbuf *)virtual_avail;
792 	virtual_avail += round_page(MSGBUF_SIZE);
793 
794 	/*
795 	 * Initialize hardware.
796 	 */
797 	for (i = 0; i < 16; i++) {
798 		mtsrin(i << ADDR_SR_SHFT, EMPTY_SEGMENT);
799 	}
800 	__asm __volatile ("mtsr %0,%1"
801 	    :: "n"(KERNEL_SR), "r"(KERNEL_SEGMENT));
802 	__asm __volatile ("mtsr %0,%1"
803 	    :: "n"(KERNEL2_SR), "r"(KERNEL2_SEGMENT));
804 	__asm __volatile ("sync; mtsdr1 %0; isync"
805 	    :: "r"((u_int)pmap_pteg_table | (pmap_pteg_mask >> 10)));
806 	tlbia();
807 
808 	pmap_bootstrapped++;
809 }
810 
811 /*
812  * Activate a user pmap.  The pmap must be activated before it's address
813  * space can be accessed in any way.
814  */
815 void
816 pmap_activate(struct thread *td)
817 {
818 	pmap_t	pm, pmr;
819 
820 	/*
821 	 * Load all the data we need up front to encourage the compiler to
822 	 * not issue any loads while we have interrupts disabled below.
823 	 */
824 	pm = &td->td_proc->p_vmspace->vm_pmap;
825 
826 	if ((pmr = (pmap_t)pmap_kextract((vm_offset_t)pm)) == NULL)
827 		pmr = pm;
828 
829 	pm->pm_active |= PCPU_GET(cpumask);
830 	PCPU_SET(curpmap, pmr);
831 }
832 
833 void
834 pmap_deactivate(struct thread *td)
835 {
836 	pmap_t	pm;
837 
838 	pm = &td->td_proc->p_vmspace->vm_pmap;
839 	pm->pm_active &= ~(PCPU_GET(cpumask));
840 	PCPU_SET(curpmap, NULL);
841 }
842 
843 vm_offset_t
844 pmap_addr_hint(vm_object_t object, vm_offset_t va, vm_size_t size)
845 {
846 
847 	return (va);
848 }
849 
850 void
851 pmap_change_wiring(pmap_t pm, vm_offset_t va, boolean_t wired)
852 {
853 	struct	pvo_entry *pvo;
854 
855 	PMAP_LOCK(pm);
856 	pvo = pmap_pvo_find_va(pm, va & ~ADDR_POFF, NULL);
857 
858 	if (pvo != NULL) {
859 		if (wired) {
860 			if ((pvo->pvo_vaddr & PVO_WIRED) == 0)
861 				pm->pm_stats.wired_count++;
862 			pvo->pvo_vaddr |= PVO_WIRED;
863 		} else {
864 			if ((pvo->pvo_vaddr & PVO_WIRED) != 0)
865 				pm->pm_stats.wired_count--;
866 			pvo->pvo_vaddr &= ~PVO_WIRED;
867 		}
868 	}
869 	PMAP_UNLOCK(pm);
870 }
871 
872 void
873 pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr,
874 	  vm_size_t len, vm_offset_t src_addr)
875 {
876 
877 	/*
878 	 * This is not needed as it's mainly an optimisation.
879 	 * It may want to be implemented later though.
880 	 */
881 }
882 
883 void
884 pmap_copy_page(vm_page_t msrc, vm_page_t mdst)
885 {
886 	vm_offset_t	dst;
887 	vm_offset_t	src;
888 
889 	dst = VM_PAGE_TO_PHYS(mdst);
890 	src = VM_PAGE_TO_PHYS(msrc);
891 
892 	kcopy((void *)src, (void *)dst, PAGE_SIZE);
893 }
894 
895 /*
896  * Zero a page of physical memory by temporarily mapping it into the tlb.
897  */
898 void
899 pmap_zero_page(vm_page_t m)
900 {
901 	vm_offset_t pa = VM_PAGE_TO_PHYS(m);
902 	caddr_t va;
903 
904 	if (pa < SEGMENT_LENGTH) {
905 		va = (caddr_t) pa;
906 	} else if (pmap_initialized) {
907 		if (pmap_pvo_zeropage == NULL)
908 			pmap_pvo_zeropage = pmap_rkva_alloc();
909 		pmap_pa_map(pmap_pvo_zeropage, pa, NULL, NULL);
910 		va = (caddr_t)PVO_VADDR(pmap_pvo_zeropage);
911 	} else {
912 		panic("pmap_zero_page: can't zero pa %#x", pa);
913 	}
914 
915 	bzero(va, PAGE_SIZE);
916 
917 	if (pa >= SEGMENT_LENGTH)
918 		pmap_pa_unmap(pmap_pvo_zeropage, NULL, NULL);
919 }
920 
921 void
922 pmap_zero_page_area(vm_page_t m, int off, int size)
923 {
924 	vm_offset_t pa = VM_PAGE_TO_PHYS(m);
925 	caddr_t va;
926 
927 	if (pa < SEGMENT_LENGTH) {
928 		va = (caddr_t) pa;
929 	} else if (pmap_initialized) {
930 		if (pmap_pvo_zeropage == NULL)
931 			pmap_pvo_zeropage = pmap_rkva_alloc();
932 		pmap_pa_map(pmap_pvo_zeropage, pa, NULL, NULL);
933 		va = (caddr_t)PVO_VADDR(pmap_pvo_zeropage);
934 	} else {
935 		panic("pmap_zero_page: can't zero pa %#x", pa);
936 	}
937 
938 	bzero(va + off, size);
939 
940 	if (pa >= SEGMENT_LENGTH)
941 		pmap_pa_unmap(pmap_pvo_zeropage, NULL, NULL);
942 }
943 
944 void
945 pmap_zero_page_idle(vm_page_t m)
946 {
947 
948 	/* XXX this is called outside of Giant, is pmap_zero_page safe? */
949 	/* XXX maybe have a dedicated mapping for this to avoid the problem? */
950 	mtx_lock(&Giant);
951 	pmap_zero_page(m);
952 	mtx_unlock(&Giant);
953 }
954 
955 /*
956  * Map the given physical page at the specified virtual address in the
957  * target pmap with the protection requested.  If specified the page
958  * will be wired down.
959  */
960 void
961 pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
962 	   boolean_t wired)
963 {
964 	struct		pvo_head *pvo_head;
965 	uma_zone_t	zone;
966 	vm_page_t	pg;
967 	u_int		pte_lo, pvo_flags, was_exec, i;
968 	int		error;
969 
970 	if (!pmap_initialized) {
971 		pvo_head = &pmap_pvo_kunmanaged;
972 		zone = pmap_upvo_zone;
973 		pvo_flags = 0;
974 		pg = NULL;
975 		was_exec = PTE_EXEC;
976 	} else {
977 		pvo_head = vm_page_to_pvoh(m);
978 		pg = m;
979 		zone = pmap_mpvo_zone;
980 		pvo_flags = PVO_MANAGED;
981 		was_exec = 0;
982 	}
983 	if (pmap_bootstrapped)
984 		vm_page_lock_queues();
985 	PMAP_LOCK(pmap);
986 
987 	/*
988 	 * If this is a managed page, and it's the first reference to the page,
989 	 * clear the execness of the page.  Otherwise fetch the execness.
990 	 */
991 	if (pg != NULL) {
992 		if (LIST_EMPTY(pvo_head)) {
993 			pmap_attr_clear(pg, PTE_EXEC);
994 		} else {
995 			was_exec = pmap_attr_fetch(pg) & PTE_EXEC;
996 		}
997 	}
998 
999 
1000 	/*
1001 	 * Assume the page is cache inhibited and access is guarded unless
1002 	 * it's in our available memory array.
1003 	 */
1004 	pte_lo = PTE_I | PTE_G;
1005 	for (i = 0; i < pregions_sz; i++) {
1006 		if ((VM_PAGE_TO_PHYS(m) >= pregions[i].mr_start) &&
1007 		    (VM_PAGE_TO_PHYS(m) <
1008 			(pregions[i].mr_start + pregions[i].mr_size))) {
1009 			pte_lo &= ~(PTE_I | PTE_G);
1010 			break;
1011 		}
1012 	}
1013 
1014 	if (prot & VM_PROT_WRITE)
1015 		pte_lo |= PTE_BW;
1016 	else
1017 		pte_lo |= PTE_BR;
1018 
1019 	pvo_flags |= (prot & VM_PROT_EXECUTE);
1020 
1021 	if (wired)
1022 		pvo_flags |= PVO_WIRED;
1023 
1024 	error = pmap_pvo_enter(pmap, zone, pvo_head, va, VM_PAGE_TO_PHYS(m),
1025 	    pte_lo, pvo_flags);
1026 
1027 	/*
1028 	 * Flush the real page from the instruction cache if this page is
1029 	 * mapped executable and cacheable and was not previously mapped (or
1030 	 * was not mapped executable).
1031 	 */
1032 	if (error == 0 && (pvo_flags & PVO_EXECUTABLE) &&
1033 	    (pte_lo & PTE_I) == 0 && was_exec == 0) {
1034 		/*
1035 		 * Flush the real memory from the cache.
1036 		 */
1037 		pmap_syncicache(VM_PAGE_TO_PHYS(m), PAGE_SIZE);
1038 		if (pg != NULL)
1039 			pmap_attr_save(pg, PTE_EXEC);
1040 	}
1041 	if (pmap_bootstrapped)
1042 		vm_page_unlock_queues();
1043 
1044 	/* XXX syncicache always until problems are sorted */
1045 	pmap_syncicache(VM_PAGE_TO_PHYS(m), PAGE_SIZE);
1046 	PMAP_UNLOCK(pmap);
1047 }
1048 
1049 vm_page_t
1050 pmap_enter_quick(pmap_t pm, vm_offset_t va, vm_page_t m, vm_page_t mpte)
1051 {
1052 
1053 	vm_page_busy(m);
1054 	vm_page_unlock_queues();
1055 	VM_OBJECT_UNLOCK(m->object);
1056 	mtx_lock(&Giant);
1057 	pmap_enter(pm, va, m, VM_PROT_READ | VM_PROT_EXECUTE, FALSE);
1058 	mtx_unlock(&Giant);
1059 	VM_OBJECT_LOCK(m->object);
1060 	vm_page_lock_queues();
1061 	vm_page_wakeup(m);
1062 	return (NULL);
1063 }
1064 
1065 vm_paddr_t
1066 pmap_extract(pmap_t pm, vm_offset_t va)
1067 {
1068 	struct	pvo_entry *pvo;
1069 	vm_paddr_t pa;
1070 
1071 	PMAP_LOCK(pm);
1072 	pvo = pmap_pvo_find_va(pm, va & ~ADDR_POFF, NULL);
1073 	if (pvo == NULL)
1074 		pa = 0;
1075 	else
1076 		pa = (pvo->pvo_pte.pte_lo & PTE_RPGN) | (va & ADDR_POFF);
1077 	PMAP_UNLOCK(pm);
1078 	return (pa);
1079 }
1080 
1081 /*
1082  * Atomically extract and hold the physical page with the given
1083  * pmap and virtual address pair if that mapping permits the given
1084  * protection.
1085  */
1086 vm_page_t
1087 pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
1088 {
1089 	struct	pvo_entry *pvo;
1090 	vm_page_t m;
1091 
1092 	m = NULL;
1093 	mtx_lock(&Giant);
1094 	vm_page_lock_queues();
1095 	PMAP_LOCK(pmap);
1096 	pvo = pmap_pvo_find_va(pmap, va & ~ADDR_POFF, NULL);
1097 	if (pvo != NULL && (pvo->pvo_pte.pte_hi & PTE_VALID) &&
1098 	    ((pvo->pvo_pte.pte_lo & PTE_PP) == PTE_RW ||
1099 	     (prot & VM_PROT_WRITE) == 0)) {
1100 		m = PHYS_TO_VM_PAGE(pvo->pvo_pte.pte_lo & PTE_RPGN);
1101 		vm_page_hold(m);
1102 	}
1103 	vm_page_unlock_queues();
1104 	PMAP_UNLOCK(pmap);
1105 	mtx_unlock(&Giant);
1106 	return (m);
1107 }
1108 
1109 /*
1110  * Grow the number of kernel page table entries.  Unneeded.
1111  */
1112 void
1113 pmap_growkernel(vm_offset_t addr)
1114 {
1115 }
1116 
1117 void
1118 pmap_init(void)
1119 {
1120 
1121 	CTR0(KTR_PMAP, "pmap_init");
1122 
1123 	pmap_upvo_zone = uma_zcreate("UPVO entry", sizeof (struct pvo_entry),
1124 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
1125 	    UMA_ZONE_VM | UMA_ZONE_NOFREE);
1126 	pmap_mpvo_zone = uma_zcreate("MPVO entry", sizeof(struct pvo_entry),
1127 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
1128 	    UMA_ZONE_VM | UMA_ZONE_NOFREE);
1129 	pmap_initialized = TRUE;
1130 }
1131 
1132 void
1133 pmap_init2(void)
1134 {
1135 
1136 	CTR0(KTR_PMAP, "pmap_init2");
1137 }
1138 
1139 boolean_t
1140 pmap_is_modified(vm_page_t m)
1141 {
1142 
1143 	if ((m->flags & (PG_FICTITIOUS |PG_UNMANAGED)) != 0)
1144 		return (FALSE);
1145 
1146 	return (pmap_query_bit(m, PTE_CHG));
1147 }
1148 
1149 /*
1150  *	pmap_is_prefaultable:
1151  *
1152  *	Return whether or not the specified virtual address is elgible
1153  *	for prefault.
1154  */
1155 boolean_t
1156 pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
1157 {
1158 
1159 	return (FALSE);
1160 }
1161 
1162 void
1163 pmap_clear_reference(vm_page_t m)
1164 {
1165 
1166 	if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
1167 		return;
1168 	pmap_clear_bit(m, PTE_REF, NULL);
1169 }
1170 
1171 void
1172 pmap_clear_modify(vm_page_t m)
1173 {
1174 
1175 	if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
1176 		return;
1177 	pmap_clear_bit(m, PTE_CHG, NULL);
1178 }
1179 
1180 /*
1181  *	pmap_ts_referenced:
1182  *
1183  *	Return a count of reference bits for a page, clearing those bits.
1184  *	It is not necessary for every reference bit to be cleared, but it
1185  *	is necessary that 0 only be returned when there are truly no
1186  *	reference bits set.
1187  *
1188  *	XXX: The exact number of bits to check and clear is a matter that
1189  *	should be tested and standardized at some point in the future for
1190  *	optimal aging of shared pages.
1191  */
1192 int
1193 pmap_ts_referenced(vm_page_t m)
1194 {
1195 	int count;
1196 
1197 	if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
1198 		return (0);
1199 
1200 	count = pmap_clear_bit(m, PTE_REF, NULL);
1201 
1202 	return (count);
1203 }
1204 
1205 /*
1206  * Map a wired page into kernel virtual address space.
1207  */
1208 void
1209 pmap_kenter(vm_offset_t va, vm_offset_t pa)
1210 {
1211 	u_int		pte_lo;
1212 	int		error;
1213 	int		i;
1214 
1215 #if 0
1216 	if (va < VM_MIN_KERNEL_ADDRESS)
1217 		panic("pmap_kenter: attempt to enter non-kernel address %#x",
1218 		    va);
1219 #endif
1220 
1221 	pte_lo = PTE_I | PTE_G;
1222 	for (i = 0; i < pregions_sz; i++) {
1223 		if ((pa >= pregions[i].mr_start) &&
1224 		    (pa < (pregions[i].mr_start + pregions[i].mr_size))) {
1225 			pte_lo &= ~(PTE_I | PTE_G);
1226 			break;
1227 		}
1228 	}
1229 
1230 	PMAP_LOCK(kernel_pmap);
1231 	error = pmap_pvo_enter(kernel_pmap, pmap_upvo_zone,
1232 	    &pmap_pvo_kunmanaged, va, pa, pte_lo, PVO_WIRED);
1233 
1234 	if (error != 0 && error != ENOENT)
1235 		panic("pmap_kenter: failed to enter va %#x pa %#x: %d", va,
1236 		    pa, error);
1237 
1238 	/*
1239 	 * Flush the real memory from the instruction cache.
1240 	 */
1241 	if ((pte_lo & (PTE_I | PTE_G)) == 0) {
1242 		pmap_syncicache(pa, PAGE_SIZE);
1243 	}
1244 	PMAP_UNLOCK(kernel_pmap);
1245 }
1246 
1247 /*
1248  * Extract the physical page address associated with the given kernel virtual
1249  * address.
1250  */
1251 vm_offset_t
1252 pmap_kextract(vm_offset_t va)
1253 {
1254 	struct		pvo_entry *pvo;
1255 	vm_paddr_t pa;
1256 
1257 #ifdef UMA_MD_SMALL_ALLOC
1258 	/*
1259 	 * Allow direct mappings
1260 	 */
1261 	if (va < VM_MIN_KERNEL_ADDRESS) {
1262 		return (va);
1263 	}
1264 #endif
1265 
1266 	PMAP_LOCK(kernel_pmap);
1267 	pvo = pmap_pvo_find_va(kernel_pmap, va & ~ADDR_POFF, NULL);
1268 	KASSERT(pvo != NULL, ("pmap_kextract: no addr found"));
1269 	pa = (pvo->pvo_pte.pte_lo & PTE_RPGN) | (va & ADDR_POFF);
1270 	PMAP_UNLOCK(kernel_pmap);
1271 	return (pa);
1272 }
1273 
1274 /*
1275  * Remove a wired page from kernel virtual address space.
1276  */
1277 void
1278 pmap_kremove(vm_offset_t va)
1279 {
1280 
1281 	pmap_remove(kernel_pmap, va, va + PAGE_SIZE);
1282 }
1283 
1284 /*
1285  * Map a range of physical addresses into kernel virtual address space.
1286  *
1287  * The value passed in *virt is a suggested virtual address for the mapping.
1288  * Architectures which can support a direct-mapped physical to virtual region
1289  * can return the appropriate address within that region, leaving '*virt'
1290  * unchanged.  We cannot and therefore do not; *virt is updated with the
1291  * first usable address after the mapped region.
1292  */
1293 vm_offset_t
1294 pmap_map(vm_offset_t *virt, vm_offset_t pa_start, vm_offset_t pa_end, int prot)
1295 {
1296 	vm_offset_t	sva, va;
1297 
1298 	sva = *virt;
1299 	va = sva;
1300 	for (; pa_start < pa_end; pa_start += PAGE_SIZE, va += PAGE_SIZE)
1301 		pmap_kenter(va, pa_start);
1302 	*virt = va;
1303 	return (sva);
1304 }
1305 
1306 int
1307 pmap_mincore(pmap_t pmap, vm_offset_t addr)
1308 {
1309 	TODO;
1310 	return (0);
1311 }
1312 
1313 void
1314 pmap_object_init_pt(pmap_t pm, vm_offset_t addr, vm_object_t object,
1315 		    vm_pindex_t pindex, vm_size_t size)
1316 {
1317 
1318 	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
1319 	KASSERT(object->type == OBJT_DEVICE,
1320 	    ("pmap_object_init_pt: non-device object"));
1321 	KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap,
1322 	    ("pmap_object_init_pt: non current pmap"));
1323 }
1324 
1325 /*
1326  * Lower the permission for all mappings to a given page.
1327  */
1328 void
1329 pmap_page_protect(vm_page_t m, vm_prot_t prot)
1330 {
1331 	struct	pvo_head *pvo_head;
1332 	struct	pvo_entry *pvo, *next_pvo;
1333 	struct	pte *pt;
1334 	pmap_t	pmap;
1335 
1336 	/*
1337 	 * Since the routine only downgrades protection, if the
1338 	 * maximal protection is desired, there isn't any change
1339 	 * to be made.
1340 	 */
1341 	if ((prot & (VM_PROT_READ|VM_PROT_WRITE)) ==
1342 	    (VM_PROT_READ|VM_PROT_WRITE))
1343 		return;
1344 
1345 	pvo_head = vm_page_to_pvoh(m);
1346 	for (pvo = LIST_FIRST(pvo_head); pvo != NULL; pvo = next_pvo) {
1347 		next_pvo = LIST_NEXT(pvo, pvo_vlink);
1348 		PMAP_PVO_CHECK(pvo);	/* sanity check */
1349 		pmap = pvo->pvo_pmap;
1350 		PMAP_LOCK(pmap);
1351 
1352 		/*
1353 		 * Downgrading to no mapping at all, we just remove the entry.
1354 		 */
1355 		if ((prot & VM_PROT_READ) == 0) {
1356 			pmap_pvo_remove(pvo, -1);
1357 			PMAP_UNLOCK(pmap);
1358 			continue;
1359 		}
1360 
1361 		/*
1362 		 * If EXEC permission is being revoked, just clear the flag
1363 		 * in the PVO.
1364 		 */
1365 		if ((prot & VM_PROT_EXECUTE) == 0)
1366 			pvo->pvo_vaddr &= ~PVO_EXECUTABLE;
1367 
1368 		/*
1369 		 * If this entry is already RO, don't diddle with the page
1370 		 * table.
1371 		 */
1372 		if ((pvo->pvo_pte.pte_lo & PTE_PP) == PTE_BR) {
1373 			PMAP_UNLOCK(pmap);
1374 			PMAP_PVO_CHECK(pvo);
1375 			continue;
1376 		}
1377 
1378 		/*
1379 		 * Grab the PTE before we diddle the bits so pvo_to_pte can
1380 		 * verify the pte contents are as expected.
1381 		 */
1382 		pt = pmap_pvo_to_pte(pvo, -1);
1383 		pvo->pvo_pte.pte_lo &= ~PTE_PP;
1384 		pvo->pvo_pte.pte_lo |= PTE_BR;
1385 		if (pt != NULL)
1386 			pmap_pte_change(pt, &pvo->pvo_pte, pvo->pvo_vaddr);
1387 		PMAP_UNLOCK(pmap);
1388 		PMAP_PVO_CHECK(pvo);	/* sanity check */
1389 	}
1390 
1391 	/*
1392 	 * Downgrading from writeable: clear the VM page flag
1393 	 */
1394 	if ((prot & VM_PROT_WRITE) != VM_PROT_WRITE)
1395 		vm_page_flag_clear(m, PG_WRITEABLE);
1396 }
1397 
1398 /*
1399  * Returns true if the pmap's pv is one of the first
1400  * 16 pvs linked to from this page.  This count may
1401  * be changed upwards or downwards in the future; it
1402  * is only necessary that true be returned for a small
1403  * subset of pmaps for proper page aging.
1404  */
1405 boolean_t
1406 pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
1407 {
1408         int loops;
1409 	struct pvo_entry *pvo;
1410 
1411         if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
1412                 return FALSE;
1413 
1414 	loops = 0;
1415 	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
1416 		if (pvo->pvo_pmap == pmap)
1417 			return (TRUE);
1418 		if (++loops >= 16)
1419 			break;
1420 	}
1421 
1422 	return (FALSE);
1423 }
1424 
1425 static u_int	pmap_vsidcontext;
1426 
1427 void
1428 pmap_pinit(pmap_t pmap)
1429 {
1430 	int	i, mask;
1431 	u_int	entropy;
1432 
1433 	KASSERT((int)pmap < VM_MIN_KERNEL_ADDRESS, ("pmap_pinit: virt pmap"));
1434 	PMAP_LOCK_INIT(pmap);
1435 
1436 	entropy = 0;
1437 	__asm __volatile("mftb %0" : "=r"(entropy));
1438 
1439 	/*
1440 	 * Allocate some segment registers for this pmap.
1441 	 */
1442 	for (i = 0; i < NPMAPS; i += VSID_NBPW) {
1443 		u_int	hash, n;
1444 
1445 		/*
1446 		 * Create a new value by mutiplying by a prime and adding in
1447 		 * entropy from the timebase register.  This is to make the
1448 		 * VSID more random so that the PT hash function collides
1449 		 * less often.  (Note that the prime casues gcc to do shifts
1450 		 * instead of a multiply.)
1451 		 */
1452 		pmap_vsidcontext = (pmap_vsidcontext * 0x1105) + entropy;
1453 		hash = pmap_vsidcontext & (NPMAPS - 1);
1454 		if (hash == 0)		/* 0 is special, avoid it */
1455 			continue;
1456 		n = hash >> 5;
1457 		mask = 1 << (hash & (VSID_NBPW - 1));
1458 		hash = (pmap_vsidcontext & 0xfffff);
1459 		if (pmap_vsid_bitmap[n] & mask) {	/* collision? */
1460 			/* anything free in this bucket? */
1461 			if (pmap_vsid_bitmap[n] == 0xffffffff) {
1462 				entropy = (pmap_vsidcontext >> 20);
1463 				continue;
1464 			}
1465 			i = ffs(~pmap_vsid_bitmap[i]) - 1;
1466 			mask = 1 << i;
1467 			hash &= 0xfffff & ~(VSID_NBPW - 1);
1468 			hash |= i;
1469 		}
1470 		pmap_vsid_bitmap[n] |= mask;
1471 		for (i = 0; i < 16; i++)
1472 			pmap->pm_sr[i] = VSID_MAKE(i, hash);
1473 		return;
1474 	}
1475 
1476 	panic("pmap_pinit: out of segments");
1477 }
1478 
1479 /*
1480  * Initialize the pmap associated with process 0.
1481  */
1482 void
1483 pmap_pinit0(pmap_t pm)
1484 {
1485 
1486 	pmap_pinit(pm);
1487 	bzero(&pm->pm_stats, sizeof(pm->pm_stats));
1488 }
1489 
1490 /*
1491  * Set the physical protection on the specified range of this map as requested.
1492  */
1493 void
1494 pmap_protect(pmap_t pm, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
1495 {
1496 	struct	pvo_entry *pvo;
1497 	struct	pte *pt;
1498 	int	pteidx;
1499 
1500 	CTR4(KTR_PMAP, "pmap_protect: pm=%p sva=%#x eva=%#x prot=%#x", pm, sva,
1501 	    eva, prot);
1502 
1503 
1504 	KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap,
1505 	    ("pmap_protect: non current pmap"));
1506 
1507 	if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
1508 		mtx_lock(&Giant);
1509 		pmap_remove(pm, sva, eva);
1510 		mtx_unlock(&Giant);
1511 		return;
1512 	}
1513 
1514 	mtx_lock(&Giant);
1515 	vm_page_lock_queues();
1516 	PMAP_LOCK(pm);
1517 	for (; sva < eva; sva += PAGE_SIZE) {
1518 		pvo = pmap_pvo_find_va(pm, sva, &pteidx);
1519 		if (pvo == NULL)
1520 			continue;
1521 
1522 		if ((prot & VM_PROT_EXECUTE) == 0)
1523 			pvo->pvo_vaddr &= ~PVO_EXECUTABLE;
1524 
1525 		/*
1526 		 * Grab the PTE pointer before we diddle with the cached PTE
1527 		 * copy.
1528 		 */
1529 		pt = pmap_pvo_to_pte(pvo, pteidx);
1530 		/*
1531 		 * Change the protection of the page.
1532 		 */
1533 		pvo->pvo_pte.pte_lo &= ~PTE_PP;
1534 		pvo->pvo_pte.pte_lo |= PTE_BR;
1535 
1536 		/*
1537 		 * If the PVO is in the page table, update that pte as well.
1538 		 */
1539 		if (pt != NULL)
1540 			pmap_pte_change(pt, &pvo->pvo_pte, pvo->pvo_vaddr);
1541 	}
1542 	vm_page_unlock_queues();
1543 	PMAP_UNLOCK(pm);
1544 	mtx_unlock(&Giant);
1545 }
1546 
1547 /*
1548  * Map a list of wired pages into kernel virtual address space.  This is
1549  * intended for temporary mappings which do not need page modification or
1550  * references recorded.  Existing mappings in the region are overwritten.
1551  */
1552 void
1553 pmap_qenter(vm_offset_t sva, vm_page_t *m, int count)
1554 {
1555 	vm_offset_t va;
1556 
1557 	va = sva;
1558 	while (count-- > 0) {
1559 		pmap_kenter(va, VM_PAGE_TO_PHYS(*m));
1560 		va += PAGE_SIZE;
1561 		m++;
1562 	}
1563 }
1564 
1565 /*
1566  * Remove page mappings from kernel virtual address space.  Intended for
1567  * temporary mappings entered by pmap_qenter.
1568  */
1569 void
1570 pmap_qremove(vm_offset_t sva, int count)
1571 {
1572 	vm_offset_t va;
1573 
1574 	va = sva;
1575 	while (count-- > 0) {
1576 		pmap_kremove(va);
1577 		va += PAGE_SIZE;
1578 	}
1579 }
1580 
1581 void
1582 pmap_release(pmap_t pmap)
1583 {
1584         int idx, mask;
1585 
1586 	/*
1587 	 * Free segment register's VSID
1588 	 */
1589         if (pmap->pm_sr[0] == 0)
1590                 panic("pmap_release");
1591 
1592         idx = VSID_TO_HASH(pmap->pm_sr[0]) & (NPMAPS-1);
1593         mask = 1 << (idx % VSID_NBPW);
1594         idx /= VSID_NBPW;
1595         pmap_vsid_bitmap[idx] &= ~mask;
1596 	PMAP_LOCK_DESTROY(pmap);
1597 }
1598 
1599 /*
1600  * Remove the given range of addresses from the specified map.
1601  */
1602 void
1603 pmap_remove(pmap_t pm, vm_offset_t sva, vm_offset_t eva)
1604 {
1605 	struct	pvo_entry *pvo;
1606 	int	pteidx;
1607 
1608 	vm_page_lock_queues();
1609 	PMAP_LOCK(pm);
1610 	for (; sva < eva; sva += PAGE_SIZE) {
1611 		pvo = pmap_pvo_find_va(pm, sva, &pteidx);
1612 		if (pvo != NULL) {
1613 			pmap_pvo_remove(pvo, pteidx);
1614 		}
1615 	}
1616 	PMAP_UNLOCK(pm);
1617 	vm_page_unlock_queues();
1618 }
1619 
1620 /*
1621  * Remove physical page from all pmaps in which it resides. pmap_pvo_remove()
1622  * will reflect changes in pte's back to the vm_page.
1623  */
1624 void
1625 pmap_remove_all(vm_page_t m)
1626 {
1627 	struct  pvo_head *pvo_head;
1628 	struct	pvo_entry *pvo, *next_pvo;
1629 	pmap_t	pmap;
1630 
1631 	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1632 
1633 	pvo_head = vm_page_to_pvoh(m);
1634 	for (pvo = LIST_FIRST(pvo_head); pvo != NULL; pvo = next_pvo) {
1635 		next_pvo = LIST_NEXT(pvo, pvo_vlink);
1636 
1637 		PMAP_PVO_CHECK(pvo);	/* sanity check */
1638 		pmap = pvo->pvo_pmap;
1639 		PMAP_LOCK(pmap);
1640 		pmap_pvo_remove(pvo, -1);
1641 		PMAP_UNLOCK(pmap);
1642 	}
1643 	vm_page_flag_clear(m, PG_WRITEABLE);
1644 }
1645 
1646 /*
1647  * Remove all pages from specified address space, this aids process exit
1648  * speeds.  This is much faster than pmap_remove in the case of running down
1649  * an entire address space.  Only works for the current pmap.
1650  */
1651 void
1652 pmap_remove_pages(pmap_t pm, vm_offset_t sva, vm_offset_t eva)
1653 {
1654 }
1655 
1656 /*
1657  * Allocate a physical page of memory directly from the phys_avail map.
1658  * Can only be called from pmap_bootstrap before avail start and end are
1659  * calculated.
1660  */
1661 static vm_offset_t
1662 pmap_bootstrap_alloc(vm_size_t size, u_int align)
1663 {
1664 	vm_offset_t	s, e;
1665 	int		i, j;
1666 
1667 	size = round_page(size);
1668 	for (i = 0; phys_avail[i + 1] != 0; i += 2) {
1669 		if (align != 0)
1670 			s = (phys_avail[i] + align - 1) & ~(align - 1);
1671 		else
1672 			s = phys_avail[i];
1673 		e = s + size;
1674 
1675 		if (s < phys_avail[i] || e > phys_avail[i + 1])
1676 			continue;
1677 
1678 		if (s == phys_avail[i]) {
1679 			phys_avail[i] += size;
1680 		} else if (e == phys_avail[i + 1]) {
1681 			phys_avail[i + 1] -= size;
1682 		} else {
1683 			for (j = phys_avail_count * 2; j > i; j -= 2) {
1684 				phys_avail[j] = phys_avail[j - 2];
1685 				phys_avail[j + 1] = phys_avail[j - 1];
1686 			}
1687 
1688 			phys_avail[i + 3] = phys_avail[i + 1];
1689 			phys_avail[i + 1] = s;
1690 			phys_avail[i + 2] = e;
1691 			phys_avail_count++;
1692 		}
1693 
1694 		return (s);
1695 	}
1696 	panic("pmap_bootstrap_alloc: could not allocate memory");
1697 }
1698 
1699 /*
1700  * Return an unmapped pvo for a kernel virtual address.
1701  * Used by pmap functions that operate on physical pages.
1702  */
1703 static struct pvo_entry *
1704 pmap_rkva_alloc(void)
1705 {
1706 	struct		pvo_entry *pvo;
1707 	struct		pte *pt;
1708 	vm_offset_t	kva;
1709 	int		pteidx;
1710 
1711 	if (pmap_rkva_count == 0)
1712 		panic("pmap_rkva_alloc: no more reserved KVAs");
1713 
1714 	kva = pmap_rkva_start + (PAGE_SIZE * --pmap_rkva_count);
1715 	pmap_kenter(kva, 0);
1716 
1717 	pvo = pmap_pvo_find_va(kernel_pmap, kva, &pteidx);
1718 
1719 	if (pvo == NULL)
1720 		panic("pmap_kva_alloc: pmap_pvo_find_va failed");
1721 
1722 	pt = pmap_pvo_to_pte(pvo, pteidx);
1723 
1724 	if (pt == NULL)
1725 		panic("pmap_kva_alloc: pmap_pvo_to_pte failed");
1726 
1727 	pmap_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr);
1728 	PVO_PTEGIDX_CLR(pvo);
1729 
1730 	pmap_pte_overflow++;
1731 
1732 	return (pvo);
1733 }
1734 
1735 static void
1736 pmap_pa_map(struct pvo_entry *pvo, vm_offset_t pa, struct pte *saved_pt,
1737     int *depth_p)
1738 {
1739 	struct	pte *pt;
1740 
1741 	/*
1742 	 * If this pvo already has a valid pte, we need to save it so it can
1743 	 * be restored later.  We then just reload the new PTE over the old
1744 	 * slot.
1745 	 */
1746 	if (saved_pt != NULL) {
1747 		pt = pmap_pvo_to_pte(pvo, -1);
1748 
1749 		if (pt != NULL) {
1750 			pmap_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr);
1751 			PVO_PTEGIDX_CLR(pvo);
1752 			pmap_pte_overflow++;
1753 		}
1754 
1755 		*saved_pt = pvo->pvo_pte;
1756 
1757 		pvo->pvo_pte.pte_lo &= ~PTE_RPGN;
1758 	}
1759 
1760 	pvo->pvo_pte.pte_lo |= pa;
1761 
1762 	if (!pmap_pte_spill(pvo->pvo_vaddr))
1763 		panic("pmap_pa_map: could not spill pvo %p", pvo);
1764 
1765 	if (depth_p != NULL)
1766 		(*depth_p)++;
1767 }
1768 
1769 static void
1770 pmap_pa_unmap(struct pvo_entry *pvo, struct pte *saved_pt, int *depth_p)
1771 {
1772 	struct	pte *pt;
1773 
1774 	pt = pmap_pvo_to_pte(pvo, -1);
1775 
1776 	if (pt != NULL) {
1777 		pmap_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr);
1778 		PVO_PTEGIDX_CLR(pvo);
1779 		pmap_pte_overflow++;
1780 	}
1781 
1782 	pvo->pvo_pte.pte_lo &= ~PTE_RPGN;
1783 
1784 	/*
1785 	 * If there is a saved PTE and it's valid, restore it and return.
1786 	 */
1787 	if (saved_pt != NULL && (saved_pt->pte_lo & PTE_RPGN) != 0) {
1788 		if (depth_p != NULL && --(*depth_p) == 0)
1789 			panic("pmap_pa_unmap: restoring but depth == 0");
1790 
1791 		pvo->pvo_pte = *saved_pt;
1792 
1793 		if (!pmap_pte_spill(pvo->pvo_vaddr))
1794 			panic("pmap_pa_unmap: could not spill pvo %p", pvo);
1795 	}
1796 }
1797 
1798 static void
1799 pmap_syncicache(vm_offset_t pa, vm_size_t len)
1800 {
1801 	__syncicache((void *)pa, len);
1802 }
1803 
1804 static void
1805 tlbia(void)
1806 {
1807 	caddr_t	i;
1808 
1809 	SYNC();
1810 	for (i = 0; i < (caddr_t)0x00040000; i += 0x00001000) {
1811 		TLBIE(i);
1812 		EIEIO();
1813 	}
1814 	TLBSYNC();
1815 	SYNC();
1816 }
1817 
1818 static int
1819 pmap_pvo_enter(pmap_t pm, uma_zone_t zone, struct pvo_head *pvo_head,
1820     vm_offset_t va, vm_offset_t pa, u_int pte_lo, int flags)
1821 {
1822 	struct	pvo_entry *pvo;
1823 	u_int	sr;
1824 	int	first;
1825 	u_int	ptegidx;
1826 	int	i;
1827 	int     bootstrap;
1828 
1829 	pmap_pvo_enter_calls++;
1830 	first = 0;
1831 
1832 	bootstrap = 0;
1833 
1834 	/*
1835 	 * Compute the PTE Group index.
1836 	 */
1837 	va &= ~ADDR_POFF;
1838 	sr = va_to_sr(pm->pm_sr, va);
1839 	ptegidx = va_to_pteg(sr, va);
1840 
1841 	/*
1842 	 * Remove any existing mapping for this page.  Reuse the pvo entry if
1843 	 * there is a mapping.
1844 	 */
1845 	mtx_lock(&pmap_table_mutex);
1846 	LIST_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) {
1847 		if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) {
1848 			if ((pvo->pvo_pte.pte_lo & PTE_RPGN) == pa &&
1849 			    (pvo->pvo_pte.pte_lo & PTE_PP) ==
1850 			    (pte_lo & PTE_PP)) {
1851 				mtx_unlock(&pmap_table_mutex);
1852 				return (0);
1853 			}
1854 			pmap_pvo_remove(pvo, -1);
1855 			break;
1856 		}
1857 	}
1858 
1859 	/*
1860 	 * If we aren't overwriting a mapping, try to allocate.
1861 	 */
1862 	if (pmap_initialized) {
1863 		pvo = uma_zalloc(zone, M_NOWAIT);
1864 	} else {
1865 		if (pmap_bpvo_pool_index >= BPVO_POOL_SIZE) {
1866 			panic("pmap_enter: bpvo pool exhausted, %d, %d, %d",
1867 			      pmap_bpvo_pool_index, BPVO_POOL_SIZE,
1868 			      BPVO_POOL_SIZE * sizeof(struct pvo_entry));
1869 		}
1870 		pvo = &pmap_bpvo_pool[pmap_bpvo_pool_index];
1871 		pmap_bpvo_pool_index++;
1872 		bootstrap = 1;
1873 	}
1874 
1875 	if (pvo == NULL) {
1876 		mtx_unlock(&pmap_table_mutex);
1877 		return (ENOMEM);
1878 	}
1879 
1880 	pmap_pvo_entries++;
1881 	pvo->pvo_vaddr = va;
1882 	pvo->pvo_pmap = pm;
1883 	LIST_INSERT_HEAD(&pmap_pvo_table[ptegidx], pvo, pvo_olink);
1884 	pvo->pvo_vaddr &= ~ADDR_POFF;
1885 	if (flags & VM_PROT_EXECUTE)
1886 		pvo->pvo_vaddr |= PVO_EXECUTABLE;
1887 	if (flags & PVO_WIRED)
1888 		pvo->pvo_vaddr |= PVO_WIRED;
1889 	if (pvo_head != &pmap_pvo_kunmanaged)
1890 		pvo->pvo_vaddr |= PVO_MANAGED;
1891 	if (bootstrap)
1892 		pvo->pvo_vaddr |= PVO_BOOTSTRAP;
1893 	pmap_pte_create(&pvo->pvo_pte, sr, va, pa | pte_lo);
1894 
1895 	/*
1896 	 * Remember if the list was empty and therefore will be the first
1897 	 * item.
1898 	 */
1899 	if (LIST_FIRST(pvo_head) == NULL)
1900 		first = 1;
1901 
1902 	LIST_INSERT_HEAD(pvo_head, pvo, pvo_vlink);
1903 	if (pvo->pvo_pte.pte_lo & PVO_WIRED)
1904 		pm->pm_stats.wired_count++;
1905 	pm->pm_stats.resident_count++;
1906 
1907 	/*
1908 	 * We hope this succeeds but it isn't required.
1909 	 */
1910 	i = pmap_pte_insert(ptegidx, &pvo->pvo_pte);
1911 	if (i >= 0) {
1912 		PVO_PTEGIDX_SET(pvo, i);
1913 	} else {
1914 		panic("pmap_pvo_enter: overflow");
1915 		pmap_pte_overflow++;
1916 	}
1917 
1918 	mtx_unlock(&pmap_table_mutex);
1919 	return (first ? ENOENT : 0);
1920 }
1921 
1922 static void
1923 pmap_pvo_remove(struct pvo_entry *pvo, int pteidx)
1924 {
1925 	struct	pte *pt;
1926 
1927 	/*
1928 	 * If there is an active pte entry, we need to deactivate it (and
1929 	 * save the ref & cfg bits).
1930 	 */
1931 	pt = pmap_pvo_to_pte(pvo, pteidx);
1932 	if (pt != NULL) {
1933 		pmap_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr);
1934 		PVO_PTEGIDX_CLR(pvo);
1935 	} else {
1936 		pmap_pte_overflow--;
1937 	}
1938 
1939 	/*
1940 	 * Update our statistics.
1941 	 */
1942 	pvo->pvo_pmap->pm_stats.resident_count--;
1943 	if (pvo->pvo_pte.pte_lo & PVO_WIRED)
1944 		pvo->pvo_pmap->pm_stats.wired_count--;
1945 
1946 	/*
1947 	 * Save the REF/CHG bits into their cache if the page is managed.
1948 	 */
1949 	if (pvo->pvo_vaddr & PVO_MANAGED) {
1950 		struct	vm_page *pg;
1951 
1952 		pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.pte_lo & PTE_RPGN);
1953 		if (pg != NULL) {
1954 			pmap_attr_save(pg, pvo->pvo_pte.pte_lo &
1955 			    (PTE_REF | PTE_CHG));
1956 		}
1957 	}
1958 
1959 	/*
1960 	 * Remove this PVO from the PV list.
1961 	 */
1962 	LIST_REMOVE(pvo, pvo_vlink);
1963 
1964 	/*
1965 	 * Remove this from the overflow list and return it to the pool
1966 	 * if we aren't going to reuse it.
1967 	 */
1968 	LIST_REMOVE(pvo, pvo_olink);
1969 	if (!(pvo->pvo_vaddr & PVO_BOOTSTRAP))
1970 		uma_zfree(pvo->pvo_vaddr & PVO_MANAGED ? pmap_mpvo_zone :
1971 		    pmap_upvo_zone, pvo);
1972 	pmap_pvo_entries--;
1973 	pmap_pvo_remove_calls++;
1974 }
1975 
1976 static __inline int
1977 pmap_pvo_pte_index(const struct pvo_entry *pvo, int ptegidx)
1978 {
1979 	int	pteidx;
1980 
1981 	/*
1982 	 * We can find the actual pte entry without searching by grabbing
1983 	 * the PTEG index from 3 unused bits in pte_lo[11:9] and by
1984 	 * noticing the HID bit.
1985 	 */
1986 	pteidx = ptegidx * 8 + PVO_PTEGIDX_GET(pvo);
1987 	if (pvo->pvo_pte.pte_hi & PTE_HID)
1988 		pteidx ^= pmap_pteg_mask * 8;
1989 
1990 	return (pteidx);
1991 }
1992 
1993 static struct pvo_entry *
1994 pmap_pvo_find_va(pmap_t pm, vm_offset_t va, int *pteidx_p)
1995 {
1996 	struct	pvo_entry *pvo;
1997 	int	ptegidx;
1998 	u_int	sr;
1999 
2000 	va &= ~ADDR_POFF;
2001 	sr = va_to_sr(pm->pm_sr, va);
2002 	ptegidx = va_to_pteg(sr, va);
2003 
2004 	mtx_lock(&pmap_table_mutex);
2005 	LIST_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) {
2006 		if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) {
2007 			if (pteidx_p)
2008 				*pteidx_p = pmap_pvo_pte_index(pvo, ptegidx);
2009 			break;
2010 		}
2011 	}
2012 	mtx_unlock(&pmap_table_mutex);
2013 
2014 	return (pvo);
2015 }
2016 
2017 static struct pte *
2018 pmap_pvo_to_pte(const struct pvo_entry *pvo, int pteidx)
2019 {
2020 	struct	pte *pt;
2021 
2022 	/*
2023 	 * If we haven't been supplied the ptegidx, calculate it.
2024 	 */
2025 	if (pteidx == -1) {
2026 		int	ptegidx;
2027 		u_int	sr;
2028 
2029 		sr = va_to_sr(pvo->pvo_pmap->pm_sr, pvo->pvo_vaddr);
2030 		ptegidx = va_to_pteg(sr, pvo->pvo_vaddr);
2031 		pteidx = pmap_pvo_pte_index(pvo, ptegidx);
2032 	}
2033 
2034 	pt = &pmap_pteg_table[pteidx >> 3].pt[pteidx & 7];
2035 
2036 	if ((pvo->pvo_pte.pte_hi & PTE_VALID) && !PVO_PTEGIDX_ISSET(pvo)) {
2037 		panic("pmap_pvo_to_pte: pvo %p has valid pte in pvo but no "
2038 		    "valid pte index", pvo);
2039 	}
2040 
2041 	if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0 && PVO_PTEGIDX_ISSET(pvo)) {
2042 		panic("pmap_pvo_to_pte: pvo %p has valid pte index in pvo "
2043 		    "pvo but no valid pte", pvo);
2044 	}
2045 
2046 	if ((pt->pte_hi ^ (pvo->pvo_pte.pte_hi & ~PTE_VALID)) == PTE_VALID) {
2047 		if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0) {
2048 			panic("pmap_pvo_to_pte: pvo %p has valid pte in "
2049 			    "pmap_pteg_table %p but invalid in pvo", pvo, pt);
2050 		}
2051 
2052 		if (((pt->pte_lo ^ pvo->pvo_pte.pte_lo) & ~(PTE_CHG|PTE_REF))
2053 		    != 0) {
2054 			panic("pmap_pvo_to_pte: pvo %p pte does not match "
2055 			    "pte %p in pmap_pteg_table", pvo, pt);
2056 		}
2057 
2058 		return (pt);
2059 	}
2060 
2061 	if (pvo->pvo_pte.pte_hi & PTE_VALID) {
2062 		panic("pmap_pvo_to_pte: pvo %p has invalid pte %p in "
2063 		    "pmap_pteg_table but valid in pvo", pvo, pt);
2064 	}
2065 
2066 	return (NULL);
2067 }
2068 
2069 /*
2070  * XXX: THIS STUFF SHOULD BE IN pte.c?
2071  */
2072 int
2073 pmap_pte_spill(vm_offset_t addr)
2074 {
2075 	struct	pvo_entry *source_pvo, *victim_pvo;
2076 	struct	pvo_entry *pvo;
2077 	int	ptegidx, i, j;
2078 	u_int	sr;
2079 	struct	pteg *pteg;
2080 	struct	pte *pt;
2081 
2082 	pmap_pte_spills++;
2083 
2084 	sr = mfsrin(addr);
2085 	ptegidx = va_to_pteg(sr, addr);
2086 
2087 	/*
2088 	 * Have to substitute some entry.  Use the primary hash for this.
2089 	 * Use low bits of timebase as random generator.
2090 	 */
2091 	pteg = &pmap_pteg_table[ptegidx];
2092 	mtx_lock(&pmap_table_mutex);
2093 	__asm __volatile("mftb %0" : "=r"(i));
2094 	i &= 7;
2095 	pt = &pteg->pt[i];
2096 
2097 	source_pvo = NULL;
2098 	victim_pvo = NULL;
2099 	LIST_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) {
2100 		/*
2101 		 * We need to find a pvo entry for this address.
2102 		 */
2103 		PMAP_PVO_CHECK(pvo);
2104 		if (source_pvo == NULL &&
2105 		    pmap_pte_match(&pvo->pvo_pte, sr, addr,
2106 		    pvo->pvo_pte.pte_hi & PTE_HID)) {
2107 			/*
2108 			 * Now found an entry to be spilled into the pteg.
2109 			 * The PTE is now valid, so we know it's active.
2110 			 */
2111 			j = pmap_pte_insert(ptegidx, &pvo->pvo_pte);
2112 
2113 			if (j >= 0) {
2114 				PVO_PTEGIDX_SET(pvo, j);
2115 				pmap_pte_overflow--;
2116 				PMAP_PVO_CHECK(pvo);
2117 				mtx_unlock(&pmap_table_mutex);
2118 				return (1);
2119 			}
2120 
2121 			source_pvo = pvo;
2122 
2123 			if (victim_pvo != NULL)
2124 				break;
2125 		}
2126 
2127 		/*
2128 		 * We also need the pvo entry of the victim we are replacing
2129 		 * so save the R & C bits of the PTE.
2130 		 */
2131 		if ((pt->pte_hi & PTE_HID) == 0 && victim_pvo == NULL &&
2132 		    pmap_pte_compare(pt, &pvo->pvo_pte)) {
2133 			victim_pvo = pvo;
2134 			if (source_pvo != NULL)
2135 				break;
2136 		}
2137 	}
2138 
2139 	if (source_pvo == NULL) {
2140 		mtx_unlock(&pmap_table_mutex);
2141 		return (0);
2142 	}
2143 
2144 	if (victim_pvo == NULL) {
2145 		if ((pt->pte_hi & PTE_HID) == 0)
2146 			panic("pmap_pte_spill: victim p-pte (%p) has no pvo"
2147 			    "entry", pt);
2148 
2149 		/*
2150 		 * If this is a secondary PTE, we need to search it's primary
2151 		 * pvo bucket for the matching PVO.
2152 		 */
2153 		LIST_FOREACH(pvo, &pmap_pvo_table[ptegidx ^ pmap_pteg_mask],
2154 		    pvo_olink) {
2155 			PMAP_PVO_CHECK(pvo);
2156 			/*
2157 			 * We also need the pvo entry of the victim we are
2158 			 * replacing so save the R & C bits of the PTE.
2159 			 */
2160 			if (pmap_pte_compare(pt, &pvo->pvo_pte)) {
2161 				victim_pvo = pvo;
2162 				break;
2163 			}
2164 		}
2165 
2166 		if (victim_pvo == NULL)
2167 			panic("pmap_pte_spill: victim s-pte (%p) has no pvo"
2168 			    "entry", pt);
2169 	}
2170 
2171 	/*
2172 	 * We are invalidating the TLB entry for the EA we are replacing even
2173 	 * though it's valid.  If we don't, we lose any ref/chg bit changes
2174 	 * contained in the TLB entry.
2175 	 */
2176 	source_pvo->pvo_pte.pte_hi &= ~PTE_HID;
2177 
2178 	pmap_pte_unset(pt, &victim_pvo->pvo_pte, victim_pvo->pvo_vaddr);
2179 	pmap_pte_set(pt, &source_pvo->pvo_pte);
2180 
2181 	PVO_PTEGIDX_CLR(victim_pvo);
2182 	PVO_PTEGIDX_SET(source_pvo, i);
2183 	pmap_pte_replacements++;
2184 
2185 	PMAP_PVO_CHECK(victim_pvo);
2186 	PMAP_PVO_CHECK(source_pvo);
2187 
2188 	mtx_unlock(&pmap_table_mutex);
2189 	return (1);
2190 }
2191 
2192 static int
2193 pmap_pte_insert(u_int ptegidx, struct pte *pvo_pt)
2194 {
2195 	struct	pte *pt;
2196 	int	i;
2197 
2198 	/*
2199 	 * First try primary hash.
2200 	 */
2201 	for (pt = pmap_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) {
2202 		if ((pt->pte_hi & PTE_VALID) == 0) {
2203 			pvo_pt->pte_hi &= ~PTE_HID;
2204 			pmap_pte_set(pt, pvo_pt);
2205 			return (i);
2206 		}
2207 	}
2208 
2209 	/*
2210 	 * Now try secondary hash.
2211 	 */
2212 	ptegidx ^= pmap_pteg_mask;
2213 	ptegidx++;
2214 	for (pt = pmap_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) {
2215 		if ((pt->pte_hi & PTE_VALID) == 0) {
2216 			pvo_pt->pte_hi |= PTE_HID;
2217 			pmap_pte_set(pt, pvo_pt);
2218 			return (i);
2219 		}
2220 	}
2221 
2222 	panic("pmap_pte_insert: overflow");
2223 	return (-1);
2224 }
2225 
2226 static boolean_t
2227 pmap_query_bit(vm_page_t m, int ptebit)
2228 {
2229 	struct	pvo_entry *pvo;
2230 	struct	pte *pt;
2231 
2232 #if 0
2233 	if (pmap_attr_fetch(m) & ptebit)
2234 		return (TRUE);
2235 #endif
2236 
2237 	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
2238 		PMAP_PVO_CHECK(pvo);	/* sanity check */
2239 
2240 		/*
2241 		 * See if we saved the bit off.  If so, cache it and return
2242 		 * success.
2243 		 */
2244 		if (pvo->pvo_pte.pte_lo & ptebit) {
2245 			pmap_attr_save(m, ptebit);
2246 			PMAP_PVO_CHECK(pvo);	/* sanity check */
2247 			return (TRUE);
2248 		}
2249 	}
2250 
2251 	/*
2252 	 * No luck, now go through the hard part of looking at the PTEs
2253 	 * themselves.  Sync so that any pending REF/CHG bits are flushed to
2254 	 * the PTEs.
2255 	 */
2256 	SYNC();
2257 	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
2258 		PMAP_PVO_CHECK(pvo);	/* sanity check */
2259 
2260 		/*
2261 		 * See if this pvo has a valid PTE.  if so, fetch the
2262 		 * REF/CHG bits from the valid PTE.  If the appropriate
2263 		 * ptebit is set, cache it and return success.
2264 		 */
2265 		pt = pmap_pvo_to_pte(pvo, -1);
2266 		if (pt != NULL) {
2267 			pmap_pte_synch(pt, &pvo->pvo_pte);
2268 			if (pvo->pvo_pte.pte_lo & ptebit) {
2269 				pmap_attr_save(m, ptebit);
2270 				PMAP_PVO_CHECK(pvo);	/* sanity check */
2271 				return (TRUE);
2272 			}
2273 		}
2274 	}
2275 
2276 	return (FALSE);
2277 }
2278 
2279 static u_int
2280 pmap_clear_bit(vm_page_t m, int ptebit, int *origbit)
2281 {
2282 	u_int	count;
2283 	struct	pvo_entry *pvo;
2284 	struct	pte *pt;
2285 	int	rv;
2286 
2287 	/*
2288 	 * Clear the cached value.
2289 	 */
2290 	rv = pmap_attr_fetch(m);
2291 	pmap_attr_clear(m, ptebit);
2292 
2293 	/*
2294 	 * Sync so that any pending REF/CHG bits are flushed to the PTEs (so
2295 	 * we can reset the right ones).  note that since the pvo entries and
2296 	 * list heads are accessed via BAT0 and are never placed in the page
2297 	 * table, we don't have to worry about further accesses setting the
2298 	 * REF/CHG bits.
2299 	 */
2300 	SYNC();
2301 
2302 	/*
2303 	 * For each pvo entry, clear the pvo's ptebit.  If this pvo has a
2304 	 * valid pte clear the ptebit from the valid pte.
2305 	 */
2306 	count = 0;
2307 	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
2308 		PMAP_PVO_CHECK(pvo);	/* sanity check */
2309 		pt = pmap_pvo_to_pte(pvo, -1);
2310 		if (pt != NULL) {
2311 			pmap_pte_synch(pt, &pvo->pvo_pte);
2312 			if (pvo->pvo_pte.pte_lo & ptebit) {
2313 				count++;
2314 				pmap_pte_clear(pt, PVO_VADDR(pvo), ptebit);
2315 			}
2316 		}
2317 		rv |= pvo->pvo_pte.pte_lo;
2318 		pvo->pvo_pte.pte_lo &= ~ptebit;
2319 		PMAP_PVO_CHECK(pvo);	/* sanity check */
2320 	}
2321 
2322 	if (origbit != NULL) {
2323 		*origbit = rv;
2324 	}
2325 
2326 	return (count);
2327 }
2328 
2329 /*
2330  * Return true if the physical range is encompassed by the battable[idx]
2331  */
2332 static int
2333 pmap_bat_mapped(int idx, vm_offset_t pa, vm_size_t size)
2334 {
2335 	u_int prot;
2336 	u_int32_t start;
2337 	u_int32_t end;
2338 	u_int32_t bat_ble;
2339 
2340 	/*
2341 	 * Return immediately if not a valid mapping
2342 	 */
2343 	if (!battable[idx].batu & BAT_Vs)
2344 		return (EINVAL);
2345 
2346 	/*
2347 	 * The BAT entry must be cache-inhibited, guarded, and r/w
2348 	 * so it can function as an i/o page
2349 	 */
2350 	prot = battable[idx].batl & (BAT_I|BAT_G|BAT_PP_RW);
2351 	if (prot != (BAT_I|BAT_G|BAT_PP_RW))
2352 		return (EPERM);
2353 
2354 	/*
2355 	 * The address should be within the BAT range. Assume that the
2356 	 * start address in the BAT has the correct alignment (thus
2357 	 * not requiring masking)
2358 	 */
2359 	start = battable[idx].batl & BAT_PBS;
2360 	bat_ble = (battable[idx].batu & ~(BAT_EBS)) | 0x03;
2361 	end = start | (bat_ble << 15) | 0x7fff;
2362 
2363 	if ((pa < start) || ((pa + size) > end))
2364 		return (ERANGE);
2365 
2366 	return (0);
2367 }
2368 
2369 int
2370 pmap_dev_direct_mapped(vm_offset_t pa, vm_size_t size)
2371 {
2372 	int i;
2373 
2374 	/*
2375 	 * This currently does not work for entries that
2376 	 * overlap 256M BAT segments.
2377 	 */
2378 
2379 	for(i = 0; i < 16; i++)
2380 		if (pmap_bat_mapped(i, pa, size) == 0)
2381 			return (0);
2382 
2383 	return (EFAULT);
2384 }
2385 
2386 /*
2387  * Map a set of physical memory pages into the kernel virtual
2388  * address space. Return a pointer to where it is mapped. This
2389  * routine is intended to be used for mapping device memory,
2390  * NOT real memory.
2391  */
2392 void *
2393 pmap_mapdev(vm_offset_t pa, vm_size_t size)
2394 {
2395 	vm_offset_t va, tmpva, ppa, offset;
2396 	int i;
2397 
2398 	ppa = trunc_page(pa);
2399 	offset = pa & PAGE_MASK;
2400 	size = roundup(offset + size, PAGE_SIZE);
2401 
2402 	GIANT_REQUIRED;
2403 
2404 	/*
2405 	 * If the physical address lies within a valid BAT table entry,
2406 	 * return the 1:1 mapping. This currently doesn't work
2407 	 * for regions that overlap 256M BAT segments.
2408 	 */
2409 	for (i = 0; i < 16; i++) {
2410 		if (pmap_bat_mapped(i, pa, size) == 0)
2411 			return ((void *) pa);
2412 	}
2413 
2414 	va = kmem_alloc_nofault(kernel_map, size);
2415 	if (!va)
2416 		panic("pmap_mapdev: Couldn't alloc kernel virtual memory");
2417 
2418 	for (tmpva = va; size > 0;) {
2419 		pmap_kenter(tmpva, ppa);
2420 		TLBIE(tmpva); /* XXX or should it be invalidate-all ? */
2421 		size -= PAGE_SIZE;
2422 		tmpva += PAGE_SIZE;
2423 		ppa += PAGE_SIZE;
2424 	}
2425 
2426 	return ((void *)(va + offset));
2427 }
2428 
2429 void
2430 pmap_unmapdev(vm_offset_t va, vm_size_t size)
2431 {
2432 	vm_offset_t base, offset;
2433 
2434 	/*
2435 	 * If this is outside kernel virtual space, then it's a
2436 	 * battable entry and doesn't require unmapping
2437 	 */
2438 	if ((va >= VM_MIN_KERNEL_ADDRESS) && (va <= VM_MAX_KERNEL_ADDRESS)) {
2439 		base = trunc_page(va);
2440 		offset = va & PAGE_MASK;
2441 		size = roundup(offset + size, PAGE_SIZE);
2442 		kmem_free(kernel_map, base, size);
2443 	}
2444 }
2445