xref: /freebsd/sys/powerpc/aim/mmu_oea.c (revision eacee0ff7ec955b32e09515246bd97b6edcd2b0f)
1 /*
2  * Copyright (c) 2001 The NetBSD Foundation, Inc.
3  * All rights reserved.
4  *
5  * This code is derived from software contributed to The NetBSD Foundation
6  * by Matt Thomas <matt@3am-software.com> of Allegro Networks, Inc.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *        This product includes software developed by the NetBSD
19  *        Foundation, Inc. and its contributors.
20  * 4. Neither the name of The NetBSD Foundation nor the names of its
21  *    contributors may be used to endorse or promote products derived
22  *    from this software without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
25  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
26  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
28  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34  * POSSIBILITY OF SUCH DAMAGE.
35  */
36 /*
37  * Copyright (C) 1995, 1996 Wolfgang Solfrank.
38  * Copyright (C) 1995, 1996 TooLs GmbH.
39  * All rights reserved.
40  *
41  * Redistribution and use in source and binary forms, with or without
42  * modification, are permitted provided that the following conditions
43  * are met:
44  * 1. Redistributions of source code must retain the above copyright
45  *    notice, this list of conditions and the following disclaimer.
46  * 2. Redistributions in binary form must reproduce the above copyright
47  *    notice, this list of conditions and the following disclaimer in the
48  *    documentation and/or other materials provided with the distribution.
49  * 3. All advertising materials mentioning features or use of this software
50  *    must display the following acknowledgement:
51  *	This product includes software developed by TooLs GmbH.
52  * 4. The name of TooLs GmbH may not be used to endorse or promote products
53  *    derived from this software without specific prior written permission.
54  *
55  * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
56  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
57  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
58  * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
59  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
60  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
61  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
62  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
63  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
64  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
65  *
66  * $NetBSD: pmap.c,v 1.28 2000/03/26 20:42:36 kleink Exp $
67  */
68 /*
69  * Copyright (C) 2001 Benno Rice.
70  * All rights reserved.
71  *
72  * Redistribution and use in source and binary forms, with or without
73  * modification, are permitted provided that the following conditions
74  * are met:
75  * 1. Redistributions of source code must retain the above copyright
76  *    notice, this list of conditions and the following disclaimer.
77  * 2. Redistributions in binary form must reproduce the above copyright
78  *    notice, this list of conditions and the following disclaimer in the
79  *    documentation and/or other materials provided with the distribution.
80  *
81  * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR
82  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
83  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
84  * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
85  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
86  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
87  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
88  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
89  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
90  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
91  */
92 
93 #ifndef lint
94 static const char rcsid[] =
95   "$FreeBSD$";
96 #endif /* not lint */
97 
98 /*
99  * Manages physical address maps.
100  *
101  * In addition to hardware address maps, this module is called upon to
102  * provide software-use-only maps which may or may not be stored in the
103  * same form as hardware maps.  These pseudo-maps are used to store
104  * intermediate results from copy operations to and from address spaces.
105  *
106  * Since the information managed by this module is also stored by the
107  * logical address mapping module, this module may throw away valid virtual
108  * to physical mappings at almost any time.  However, invalidations of
109  * mappings must be done as requested.
110  *
111  * In order to cope with hardware architectures which make virtual to
112  * physical map invalidates expensive, this module may delay invalidate
113  * reduced protection operations until such time as they are actually
114  * necessary.  This module is given full information as to which processors
115  * are currently using which maps, and to when physical maps must be made
116  * correct.
117  */
118 
119 #include <sys/param.h>
120 #include <sys/kernel.h>
121 #include <sys/ktr.h>
122 #include <sys/lock.h>
123 #include <sys/msgbuf.h>
124 #include <sys/mutex.h>
125 #include <sys/proc.h>
126 #include <sys/sysctl.h>
127 #include <sys/systm.h>
128 #include <sys/vmmeter.h>
129 
130 #include <dev/ofw/openfirm.h>
131 
132 #include <vm/vm.h>
133 #include <vm/vm_param.h>
134 #include <vm/vm_kern.h>
135 #include <vm/vm_page.h>
136 #include <vm/vm_map.h>
137 #include <vm/vm_object.h>
138 #include <vm/vm_extern.h>
139 #include <vm/vm_pageout.h>
140 #include <vm/vm_pager.h>
141 #include <vm/vm_zone.h>
142 
143 #include <machine/bat.h>
144 #include <machine/frame.h>
145 #include <machine/md_var.h>
146 #include <machine/psl.h>
147 #include <machine/pte.h>
148 #include <machine/sr.h>
149 
150 #define	PMAP_DEBUG
151 
152 #define TODO	panic("%s: not implemented", __func__);
153 
154 #define	PMAP_LOCK(pm)
155 #define	PMAP_UNLOCK(pm)
156 
157 #define	TLBIE(va)	__asm __volatile("tlbie %0" :: "r"(va))
158 #define	TLBSYNC()	__asm __volatile("tlbsync");
159 #define	SYNC()		__asm __volatile("sync");
160 #define	EIEIO()		__asm __volatile("eieio");
161 
162 #define	VSID_MAKE(sr, hash)	((sr) | (((hash) & 0xfffff) << 4))
163 #define	VSID_TO_SR(vsid)	((vsid) & 0xf)
164 #define	VSID_TO_HASH(vsid)	(((vsid) >> 4) & 0xfffff)
165 
166 #define	PVO_PTEGIDX_MASK	0x0007		/* which PTEG slot */
167 #define	PVO_PTEGIDX_VALID	0x0008		/* slot is valid */
168 #define	PVO_WIRED		0x0010		/* PVO entry is wired */
169 #define	PVO_MANAGED		0x0020		/* PVO entry is managed */
170 #define	PVO_EXECUTABLE		0x0040		/* PVO entry is executable */
171 #define	PVO_VADDR(pvo)		((pvo)->pvo_vaddr & ~ADDR_POFF)
172 #define	PVO_ISEXECUTABLE(pvo)	((pvo)->pvo_vaddr & PVO_EXECUTABLE)
173 #define	PVO_PTEGIDX_GET(pvo)	((pvo)->pvo_vaddr & PVO_PTEGIDX_MASK)
174 #define	PVO_PTEGIDX_ISSET(pvo)	((pvo)->pvo_vaddr & PVO_PTEGIDX_VALID)
175 #define	PVO_PTEGIDX_CLR(pvo)	\
176 	((void)((pvo)->pvo_vaddr &= ~(PVO_PTEGIDX_VALID|PVO_PTEGIDX_MASK)))
177 #define	PVO_PTEGIDX_SET(pvo, i)	\
178 	((void)((pvo)->pvo_vaddr |= (i)|PVO_PTEGIDX_VALID))
179 
180 #define	PMAP_PVO_CHECK(pvo)
181 
182 struct mem_region {
183 	vm_offset_t	mr_start;
184 	vm_offset_t	mr_size;
185 };
186 
187 struct ofw_map {
188 	vm_offset_t	om_va;
189 	vm_size_t	om_len;
190 	vm_offset_t	om_pa;
191 	u_int		om_mode;
192 };
193 
194 int	pmap_bootstrapped = 0;
195 
196 /*
197  * Virtual and physical address of message buffer.
198  */
199 struct		msgbuf *msgbufp;
200 vm_offset_t	msgbuf_phys;
201 
202 /*
203  * Physical addresses of first and last available physical page.
204  */
205 vm_offset_t avail_start;
206 vm_offset_t avail_end;
207 
208 /*
209  * Map of physical memory regions.
210  */
211 vm_offset_t	phys_avail[128];
212 u_int		phys_avail_count;
213 static struct	mem_region regions[128];
214 static struct	ofw_map translations[128];
215 static int	translations_size;
216 
217 /*
218  * First and last available kernel virtual addresses.
219  */
220 vm_offset_t virtual_avail;
221 vm_offset_t virtual_end;
222 vm_offset_t kernel_vm_end;
223 
224 /*
225  * Kernel pmap.
226  */
227 struct pmap kernel_pmap_store;
228 extern struct pmap ofw_pmap;
229 
230 /*
231  * PTEG data.
232  */
233 static struct	pteg *pmap_pteg_table;
234 u_int		pmap_pteg_count;
235 u_int		pmap_pteg_mask;
236 
237 /*
238  * PVO data.
239  */
240 struct	pvo_head *pmap_pvo_table;		/* pvo entries by pteg index */
241 struct	pvo_head pmap_pvo_kunmanaged =
242     LIST_HEAD_INITIALIZER(pmap_pvo_kunmanaged);	/* list of unmanaged pages */
243 struct	pvo_head pmap_pvo_unmanaged =
244     LIST_HEAD_INITIALIZER(pmap_pvo_unmanaged);	/* list of unmanaged pages */
245 
246 vm_zone_t	pmap_upvo_zone;	/* zone for pvo entries for unmanaged pages */
247 vm_zone_t	pmap_mpvo_zone;	/* zone for pvo entries for managed pages */
248 struct		vm_zone pmap_upvo_zone_store;
249 struct		vm_zone pmap_mpvo_zone_store;
250 struct		vm_object pmap_upvo_zone_obj;
251 struct		vm_object pmap_mpvo_zone_obj;
252 
253 #define	PMAP_PVO_SIZE	1024
254 struct		pvo_entry pmap_upvo_pool[PMAP_PVO_SIZE];
255 
256 #define	VSID_NBPW	(sizeof(u_int32_t) * 8)
257 static u_int	pmap_vsid_bitmap[NPMAPS / VSID_NBPW];
258 
259 static boolean_t pmap_initialized = FALSE;
260 
261 /*
262  * Statistics.
263  */
264 u_int	pmap_pte_valid = 0;
265 u_int	pmap_pte_overflow = 0;
266 u_int	pmap_pte_replacements = 0;
267 u_int	pmap_pvo_entries = 0;
268 u_int	pmap_pvo_enter_calls = 0;
269 u_int	pmap_pvo_remove_calls = 0;
270 u_int	pmap_pte_spills = 0;
271 SYSCTL_INT(_machdep, OID_AUTO, pmap_pte_valid, CTLFLAG_RD, &pmap_pte_valid,
272     0, "");
273 SYSCTL_INT(_machdep, OID_AUTO, pmap_pte_overflow, CTLFLAG_RD,
274     &pmap_pte_overflow, 0, "");
275 SYSCTL_INT(_machdep, OID_AUTO, pmap_pte_replacements, CTLFLAG_RD,
276     &pmap_pte_replacements, 0, "");
277 SYSCTL_INT(_machdep, OID_AUTO, pmap_pvo_entries, CTLFLAG_RD, &pmap_pvo_entries,
278     0, "");
279 SYSCTL_INT(_machdep, OID_AUTO, pmap_pvo_enter_calls, CTLFLAG_RD,
280     &pmap_pvo_enter_calls, 0, "");
281 SYSCTL_INT(_machdep, OID_AUTO, pmap_pvo_remove_calls, CTLFLAG_RD,
282     &pmap_pvo_remove_calls, 0, "");
283 SYSCTL_INT(_machdep, OID_AUTO, pmap_pte_spills, CTLFLAG_RD,
284     &pmap_pte_spills, 0, "");
285 
286 struct	pvo_entry *pmap_pvo_zeropage;
287 
288 vm_offset_t	pmap_rkva_start = VM_MIN_KERNEL_ADDRESS;
289 u_int		pmap_rkva_count = 4;
290 
291 /*
292  * Allocate physical memory for use in pmap_bootstrap.
293  */
294 static vm_offset_t	pmap_bootstrap_alloc(vm_size_t, u_int);
295 
296 /*
297  * PTE calls.
298  */
299 static int		pmap_pte_insert(u_int, struct pte *);
300 
301 /*
302  * PVO calls.
303  */
304 static int	pmap_pvo_enter(pmap_t, vm_zone_t, struct pvo_head *,
305 		    vm_offset_t, vm_offset_t, u_int, int);
306 static void	pmap_pvo_remove(struct pvo_entry *, int);
307 static struct	pvo_entry *pmap_pvo_find_va(pmap_t, vm_offset_t, int *);
308 static struct	pte *pmap_pvo_to_pte(const struct pvo_entry *, int);
309 
310 /*
311  * Utility routines.
312  */
313 static struct		pvo_entry *pmap_rkva_alloc(void);
314 static void		pmap_pa_map(struct pvo_entry *, vm_offset_t,
315 			    struct pte *, int *);
316 static void		pmap_pa_unmap(struct pvo_entry *, struct pte *, int *);
317 static void		pmap_syncicache(vm_offset_t, vm_size_t);
318 static boolean_t	pmap_query_bit(vm_page_t, int);
319 static boolean_t	pmap_clear_bit(vm_page_t, int);
320 static void		tlbia(void);
321 
322 static __inline int
323 va_to_sr(u_int *sr, vm_offset_t va)
324 {
325 	return (sr[(uintptr_t)va >> ADDR_SR_SHFT]);
326 }
327 
328 static __inline u_int
329 va_to_pteg(u_int sr, vm_offset_t addr)
330 {
331 	u_int hash;
332 
333 	hash = (sr & SR_VSID_MASK) ^ (((u_int)addr & ADDR_PIDX) >>
334 	    ADDR_PIDX_SHFT);
335 	return (hash & pmap_pteg_mask);
336 }
337 
338 static __inline struct pvo_head *
339 pa_to_pvoh(vm_offset_t pa)
340 {
341 	struct	vm_page *pg;
342 
343 	pg = PHYS_TO_VM_PAGE(pa);
344 
345 	if (pg == NULL)
346 		return (&pmap_pvo_unmanaged);
347 
348 	return (&pg->md.mdpg_pvoh);
349 }
350 
351 static __inline struct pvo_head *
352 vm_page_to_pvoh(vm_page_t m)
353 {
354 
355 	return (&m->md.mdpg_pvoh);
356 }
357 
358 static __inline void
359 pmap_attr_clear(vm_page_t m, int ptebit)
360 {
361 
362 	m->md.mdpg_attrs &= ~ptebit;
363 }
364 
365 static __inline int
366 pmap_attr_fetch(vm_page_t m)
367 {
368 
369 	return (m->md.mdpg_attrs);
370 }
371 
372 static __inline void
373 pmap_attr_save(vm_page_t m, int ptebit)
374 {
375 
376 	m->md.mdpg_attrs |= ptebit;
377 }
378 
379 static __inline int
380 pmap_pte_compare(const struct pte *pt, const struct pte *pvo_pt)
381 {
382 	if (pt->pte_hi == pvo_pt->pte_hi)
383 		return (1);
384 
385 	return (0);
386 }
387 
388 static __inline int
389 pmap_pte_match(struct pte *pt, u_int sr, vm_offset_t va, int which)
390 {
391 	return (pt->pte_hi & ~PTE_VALID) ==
392 	    (((sr & SR_VSID_MASK) << PTE_VSID_SHFT) |
393 	    ((va >> ADDR_API_SHFT) & PTE_API) | which);
394 }
395 
396 static __inline void
397 pmap_pte_create(struct pte *pt, u_int sr, vm_offset_t va, u_int pte_lo)
398 {
399 	/*
400 	 * Construct a PTE.  Default to IMB initially.  Valid bit only gets
401 	 * set when the real pte is set in memory.
402 	 *
403 	 * Note: Don't set the valid bit for correct operation of tlb update.
404 	 */
405 	pt->pte_hi = ((sr & SR_VSID_MASK) << PTE_VSID_SHFT) |
406 	    (((va & ADDR_PIDX) >> ADDR_API_SHFT) & PTE_API);
407 	pt->pte_lo = pte_lo;
408 }
409 
410 static __inline void
411 pmap_pte_synch(struct pte *pt, struct pte *pvo_pt)
412 {
413 
414 	pvo_pt->pte_lo |= pt->pte_lo & (PTE_REF | PTE_CHG);
415 }
416 
417 static __inline void
418 pmap_pte_clear(struct pte *pt, vm_offset_t va, int ptebit)
419 {
420 
421 	/*
422 	 * As shown in Section 7.6.3.2.3
423 	 */
424 	pt->pte_lo &= ~ptebit;
425 	TLBIE(va);
426 	EIEIO();
427 	TLBSYNC();
428 	SYNC();
429 }
430 
431 static __inline void
432 pmap_pte_set(struct pte *pt, struct pte *pvo_pt)
433 {
434 
435 	pvo_pt->pte_hi |= PTE_VALID;
436 
437 	/*
438 	 * Update the PTE as defined in section 7.6.3.1.
439 	 * Note that the REF/CHG bits are from pvo_pt and thus should havce
440 	 * been saved so this routine can restore them (if desired).
441 	 */
442 	pt->pte_lo = pvo_pt->pte_lo;
443 	EIEIO();
444 	pt->pte_hi = pvo_pt->pte_hi;
445 	SYNC();
446 	pmap_pte_valid++;
447 }
448 
449 static __inline void
450 pmap_pte_unset(struct pte *pt, struct pte *pvo_pt, vm_offset_t va)
451 {
452 
453 	pvo_pt->pte_hi &= ~PTE_VALID;
454 
455 	/*
456 	 * Force the reg & chg bits back into the PTEs.
457 	 */
458 	SYNC();
459 
460 	/*
461 	 * Invalidate the pte.
462 	 */
463 	pt->pte_hi &= ~PTE_VALID;
464 
465 	SYNC();
466 	TLBIE(va);
467 	EIEIO();
468 	TLBSYNC();
469 	SYNC();
470 
471 	/*
472 	 * Save the reg & chg bits.
473 	 */
474 	pmap_pte_synch(pt, pvo_pt);
475 	pmap_pte_valid--;
476 }
477 
478 static __inline void
479 pmap_pte_change(struct pte *pt, struct pte *pvo_pt, vm_offset_t va)
480 {
481 
482 	/*
483 	 * Invalidate the PTE
484 	 */
485 	pmap_pte_unset(pt, pvo_pt, va);
486 	pmap_pte_set(pt, pvo_pt);
487 }
488 
489 /*
490  * Quick sort callout for comparing memory regions.
491  */
492 static int	mr_cmp(const void *a, const void *b);
493 static int	om_cmp(const void *a, const void *b);
494 
495 static int
496 mr_cmp(const void *a, const void *b)
497 {
498 	const struct	mem_region *regiona;
499 	const struct	mem_region *regionb;
500 
501 	regiona = a;
502 	regionb = b;
503 	if (regiona->mr_start < regionb->mr_start)
504 		return (-1);
505 	else if (regiona->mr_start > regionb->mr_start)
506 		return (1);
507 	else
508 		return (0);
509 }
510 
511 static int
512 om_cmp(const void *a, const void *b)
513 {
514 	const struct	ofw_map *mapa;
515 	const struct	ofw_map *mapb;
516 
517 	mapa = a;
518 	mapb = b;
519 	if (mapa->om_pa < mapb->om_pa)
520 		return (-1);
521 	else if (mapa->om_pa > mapb->om_pa)
522 		return (1);
523 	else
524 		return (0);
525 }
526 
527 void
528 pmap_bootstrap(vm_offset_t kernelstart, vm_offset_t kernelend)
529 {
530 	ihandle_t	pmem, mmui;
531 	phandle_t	chosen, mmu;
532 	int		sz;
533 	int		i, j;
534 	vm_size_t	size;
535 	vm_offset_t	pa, va, off;
536 	u_int		batl, batu;
537 
538 	/*
539 	 * Use an IBAT and a DBAT to map the bottom segment of memory
540 	 * where we are.
541 	 */
542 	batu = BATU(0x00000000, BAT_BL_256M, BAT_Vs);
543 	batl = BATL(0x00000000, BAT_M, BAT_PP_RW);
544 	__asm ("mtibatu 0,%0; mtibatl 0,%1; mtdbatu 0,%0; mtdbatl 0,%1"
545 	    :: "r"(batu), "r"(batl));
546 #if 0
547 	batu = BATU(0x80000000, BAT_BL_256M, BAT_Vs);
548 	batl = BATL(0x80000000, BAT_M, BAT_PP_RW);
549 	__asm ("mtibatu 1,%0; mtibatl 1,%1; mtdbatu 1,%0; mtdbatl 1,%1"
550 	    :: "r"(batu), "r"(batl));
551 #endif
552 
553 	/*
554 	 * Set the start and end of kva.
555 	 */
556 	virtual_avail = VM_MIN_KERNEL_ADDRESS;
557 	virtual_end = VM_MAX_KERNEL_ADDRESS;
558 
559 	if ((pmem = OF_finddevice("/memory")) == -1)
560 		panic("pmap_bootstrap: can't locate memory device");
561 	if ((sz = OF_getproplen(pmem, "available")) == -1)
562 		panic("pmap_bootstrap: can't get length of available memory");
563 	if (sizeof(phys_avail) < sz)
564 		panic("pmap_bootstrap: phys_avail too small");
565 	if (sizeof(regions) < sz)
566 		panic("pmap_bootstrap: regions too small");
567 	bzero(regions, sz);
568 	if (OF_getprop(pmem, "available", regions, sz) == -1)
569 		panic("pmap_bootstrap: can't get available memory");
570 	sz /= sizeof(*regions);
571 	CTR0(KTR_PMAP, "pmap_bootstrap: physical memory");
572 	qsort(regions, sz, sizeof(*regions), mr_cmp);
573 	phys_avail_count = 0;
574 	for (i = 0, j = 0; i < sz; i++, j += 2) {
575 		CTR3(KTR_PMAP, "region: %#x - %#x (%#x)", regions[i].mr_start,
576 		    regions[i].mr_start + regions[i].mr_size,
577 		    regions[i].mr_size);
578 		phys_avail[j] = regions[i].mr_start;
579 		phys_avail[j + 1] = regions[i].mr_start + regions[i].mr_size;
580 		phys_avail_count++;
581 	}
582 
583 	/*
584 	 * Allocate PTEG table.
585 	 */
586 #ifdef PTEGCOUNT
587 	pmap_pteg_count = PTEGCOUNT;
588 #else
589 	pmap_pteg_count = 0x1000;
590 
591 	while (pmap_pteg_count < physmem)
592 		pmap_pteg_count <<= 1;
593 
594 	pmap_pteg_count >>= 1;
595 #endif /* PTEGCOUNT */
596 
597 	size = pmap_pteg_count * sizeof(struct pteg);
598 	CTR2(KTR_PMAP, "pmap_bootstrap: %d PTEGs, %d bytes", pmap_pteg_count,
599 	    size);
600 	pmap_pteg_table = (struct pteg *)pmap_bootstrap_alloc(size, size);
601 	CTR1(KTR_PMAP, "pmap_bootstrap: PTEG table at %p", pmap_pteg_table);
602 	bzero((void *)pmap_pteg_table, pmap_pteg_count * sizeof(struct pteg));
603 	pmap_pteg_mask = pmap_pteg_count - 1;
604 
605 	/*
606 	 * Allocate PTE overflow lists.
607 	 */
608 	size = sizeof(struct pvo_head) * pmap_pteg_count;
609 	pmap_pvo_table = (struct pvo_head *)pmap_bootstrap_alloc(size,
610 	    PAGE_SIZE);
611 	CTR1(KTR_PMAP, "pmap_bootstrap: PVO table at %p", pmap_pvo_table);
612 	for (i = 0; i < pmap_pteg_count; i++)
613 		LIST_INIT(&pmap_pvo_table[i]);
614 
615 	/*
616 	 * Allocate the message buffer.
617 	 */
618 	msgbuf_phys = pmap_bootstrap_alloc(MSGBUF_SIZE, 0);
619 
620 	/*
621 	 * Initialise the unmanaged pvo pool.
622 	 */
623 	pmap_upvo_zone = &pmap_upvo_zone_store;
624 	zbootinit(pmap_upvo_zone, "unmanaged pvo", sizeof (struct pvo_entry),
625 	    pmap_upvo_pool, PMAP_PVO_SIZE);
626 
627 	/*
628 	 * Make sure kernel vsid is allocated as well as VSID 0.
629 	 */
630 	pmap_vsid_bitmap[(KERNEL_VSIDBITS & (NPMAPS - 1)) / VSID_NBPW]
631 		|= 1 << (KERNEL_VSIDBITS % VSID_NBPW);
632 	pmap_vsid_bitmap[0] |= 1;
633 
634 	/*
635 	 * Set up the OpenFirmware pmap and add it's mappings.
636 	 */
637 	pmap_pinit(&ofw_pmap);
638 	ofw_pmap.pm_sr[KERNEL_SR] = KERNEL_SEGMENT;
639 	if ((chosen = OF_finddevice("/chosen")) == -1)
640 		panic("pmap_bootstrap: can't find /chosen");
641 	OF_getprop(chosen, "mmu", &mmui, 4);
642 	if ((mmu = OF_instance_to_package(mmui)) == -1)
643 		panic("pmap_bootstrap: can't get mmu package");
644 	if ((sz = OF_getproplen(mmu, "translations")) == -1)
645 		panic("pmap_bootstrap: can't get ofw translation count");
646 	if (sizeof(translations) < sz)
647 		panic("pmap_bootstrap: translations too small");
648 	bzero(translations, sz);
649 	if (OF_getprop(mmu, "translations", translations, sz) == -1)
650 		panic("pmap_bootstrap: can't get ofw translations");
651 	CTR0(KTR_PMAP, "pmap_bootstrap: translations");
652 	qsort(translations, sz, sizeof (*translations), om_cmp);
653 	for (i = 0; i < sz; i++) {
654 		CTR3(KTR_PMAP, "translation: pa=%#x va=%#x len=%#x",
655 		    translations[i].om_pa, translations[i].om_va,
656 		    translations[i].om_len);
657 
658 		/* Drop stuff below something? */
659 
660 		/* Enter the pages? */
661 		for (off = 0; off < translations[i].om_len; off += PAGE_SIZE) {
662 			struct	vm_page m;
663 
664 			m.phys_addr = translations[i].om_pa + off;
665 			pmap_enter(&ofw_pmap, translations[i].om_va + off, &m,
666 			    VM_PROT_ALL, 1);
667 		}
668 	}
669 #ifdef SMP
670 	TLBSYNC();
671 #endif
672 
673 	/*
674 	 * Initialize the kernel pmap (which is statically allocated).
675 	 */
676 	for (i = 0; i < 16; i++) {
677 		kernel_pmap->pm_sr[i] = EMPTY_SEGMENT;
678 	}
679 	kernel_pmap->pm_sr[KERNEL_SR] = KERNEL_SEGMENT;
680 	kernel_pmap->pm_active = ~0;
681 	kernel_pmap->pm_count = 1;
682 
683 	/*
684 	 * Allocate a kernel stack with a guard page for thread0 and map it
685 	 * into the kernel page map.
686 	 */
687 	pa = pmap_bootstrap_alloc(KSTACK_PAGES * PAGE_SIZE, 0);
688 	kstack0_phys = pa;
689 	kstack0 = virtual_avail + (KSTACK_GUARD_PAGES * PAGE_SIZE);
690 	CTR2(KTR_PMAP, "pmap_bootstrap: kstack0 at %#x (%#x)", kstack0_phys,
691 	    kstack0);
692 	virtual_avail += (KSTACK_PAGES + KSTACK_GUARD_PAGES) * PAGE_SIZE;
693 	for (i = 0; i < KSTACK_PAGES; i++) {
694 		pa = kstack0_phys + i * PAGE_SIZE;
695 		va = kstack0 + i * PAGE_SIZE;
696 		pmap_kenter(va, pa);
697 		TLBIE(va);
698 	}
699 
700 	/*
701 	 * Calculate the first and last available physical addresses.
702 	 */
703 	avail_start = phys_avail[0];
704 	for (i = 0; phys_avail[i + 2] != 0; i += 2)
705 		;
706 	avail_end = phys_avail[i + 1];
707 	Maxmem = powerpc_btop(avail_end);
708 
709 	/*
710 	 * Allocate virtual address space for the message buffer.
711 	 */
712 	msgbufp = (struct msgbuf *)virtual_avail;
713 	virtual_avail += round_page(MSGBUF_SIZE);
714 
715 	/*
716 	 * Initialize hardware.
717 	 */
718 	for (i = 0; i < 16; i++) {
719 		__asm __volatile("mtsrin %0,%1"
720 		    :: "r"(EMPTY_SEGMENT), "r"(i << ADDR_SR_SHFT));
721 	}
722 	__asm __volatile ("mtsr %0,%1"
723 	    :: "n"(KERNEL_SR), "r"(KERNEL_SEGMENT));
724 	__asm __volatile ("sync; mtsdr1 %0; isync"
725 	    :: "r"((u_int)pmap_pteg_table | (pmap_pteg_mask >> 10)));
726 	tlbia();
727 
728 	pmap_bootstrapped++;
729 }
730 
731 /*
732  * Activate a user pmap.  The pmap must be activated before it's address
733  * space can be accessed in any way.
734  */
735 void
736 pmap_activate(struct thread *td)
737 {
738 	pmap_t	pm;
739 	int	i;
740 
741 	/*
742 	 * Load all the data we need up front to encourasge the compiler to
743 	 * not issue any loads while we have interrupts disabled below.
744 	 */
745 	pm = &td->td_proc->p_vmspace->vm_pmap;
746 
747 	KASSERT(pm->pm_active == 0, ("pmap_activate: pmap already active?"));
748 
749 	pm->pm_active |= PCPU_GET(cpumask);
750 
751 	/*
752 	 * XXX: Address this again later?
753 	 */
754 	critical_enter();
755 
756 	for (i = 0; i < 16; i++) {
757 		__asm __volatile("mtsr %0,%1" :: "r"(i), "r"(pm->pm_sr[i]));
758 	}
759 	__asm __volatile("sync; isync");
760 
761 	critical_exit();
762 }
763 
764 vm_offset_t
765 pmap_addr_hint(vm_object_t object, vm_offset_t va, vm_size_t size)
766 {
767 	TODO;
768 	return (0);
769 }
770 
771 void
772 pmap_change_wiring(pmap_t pmap, vm_offset_t va, boolean_t wired)
773 {
774 	TODO;
775 }
776 
777 void
778 pmap_clear_modify(vm_page_t m)
779 {
780 
781 	if (m->flags * PG_FICTITIOUS)
782 		return;
783 	pmap_clear_bit(m, PTE_CHG);
784 }
785 
786 void
787 pmap_collect(void)
788 {
789 	TODO;
790 }
791 
792 void
793 pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr,
794 	  vm_size_t len, vm_offset_t src_addr)
795 {
796 	TODO;
797 }
798 
799 void
800 pmap_copy_page(vm_offset_t src, vm_offset_t dst)
801 {
802 	TODO;
803 }
804 
805 /*
806  * Zero a page of physical memory by temporarily mapping it into the tlb.
807  */
808 void
809 pmap_zero_page(vm_offset_t pa)
810 {
811 	caddr_t	va;
812 	int	i;
813 
814 	if (pa < SEGMENT_LENGTH) {
815 		va = (caddr_t) pa;
816 	} else if (pmap_initialized) {
817 		if (pmap_pvo_zeropage == NULL)
818 			pmap_pvo_zeropage = pmap_rkva_alloc();
819 		pmap_pa_map(pmap_pvo_zeropage, pa, NULL, NULL);
820 		va = (caddr_t)PVO_VADDR(pmap_pvo_zeropage);
821 	} else {
822 		panic("pmap_zero_page: can't zero pa %#x", pa);
823 	}
824 
825 	bzero(va, PAGE_SIZE);
826 
827 	for (i = PAGE_SIZE / CACHELINESIZE; i > 0; i--) {
828 		__asm __volatile("dcbz 0,%0" :: "r"(va));
829 		va += CACHELINESIZE;
830 	}
831 
832 	if (pa >= SEGMENT_LENGTH)
833 		pmap_pa_unmap(pmap_pvo_zeropage, NULL, NULL);
834 }
835 
836 void
837 pmap_zero_page_area(vm_offset_t pa, int off, int size)
838 {
839 	TODO;
840 }
841 
842 /*
843  * Map the given physical page at the specified virtual address in the
844  * target pmap with the protection requested.  If specified the page
845  * will be wired down.
846  */
847 void
848 pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
849 	   boolean_t wired)
850 {
851 	struct		pvo_head *pvo_head;
852 	vm_zone_t	zone;
853 	u_int		pte_lo, pvo_flags;
854 	int		error;
855 
856 	if (!pmap_initialized) {
857 		pvo_head = &pmap_pvo_kunmanaged;
858 		zone = pmap_upvo_zone;
859 		pvo_flags = 0;
860 	} else {
861 		pvo_head = pa_to_pvoh(m->phys_addr);
862 		zone = pmap_mpvo_zone;
863 		pvo_flags = PVO_MANAGED;
864 	}
865 
866 	pte_lo = PTE_I | PTE_G;
867 
868 	if (prot & VM_PROT_WRITE)
869 		pte_lo |= PTE_BW;
870 	else
871 		pte_lo |= PTE_BR;
872 
873 	if (prot & VM_PROT_EXECUTE)
874 		pvo_flags |= PVO_EXECUTABLE;
875 
876 	if (wired)
877 		pvo_flags |= PVO_WIRED;
878 
879 	critical_enter();
880 
881 	error = pmap_pvo_enter(pmap, zone, pvo_head, va, m->phys_addr, pte_lo,
882 	    pvo_flags);
883 
884 	critical_exit();
885 
886 	if (error == ENOENT) {
887 		/*
888 		 * Flush the real memory from the cache.
889 		 */
890 		if ((pvo_flags & PVO_EXECUTABLE) && (pte_lo & PTE_I) == 0) {
891 			pmap_syncicache(m->phys_addr, PAGE_SIZE);
892 		}
893 	}
894 }
895 
896 vm_offset_t
897 pmap_extract(pmap_t pmap, vm_offset_t va)
898 {
899 	TODO;
900 	return (0);
901 }
902 
903 /*
904  * Grow the number of kernel page table entries.  Unneeded.
905  */
906 void
907 pmap_growkernel(vm_offset_t addr)
908 {
909 }
910 
911 void
912 pmap_init(vm_offset_t phys_start, vm_offset_t phys_end)
913 {
914 
915 	CTR(KTR_PMAP, "pmap_init");
916 }
917 
918 void
919 pmap_init2(void)
920 {
921 
922 	CTR(KTR_PMAP, "pmap_init2");
923 	zinitna(pmap_upvo_zone, &pmap_upvo_zone_obj, NULL, 0, PMAP_PVO_SIZE,
924 	    ZONE_INTERRUPT, 1);
925 	pmap_mpvo_zone = zinit("managed pvo", sizeof(struct pvo_entry),
926 	    PMAP_PVO_SIZE, ZONE_INTERRUPT, 1);
927 	pmap_initialized = TRUE;
928 }
929 
930 boolean_t
931 pmap_is_modified(vm_page_t m)
932 {
933 	TODO;
934 	return (0);
935 }
936 
937 void
938 pmap_clear_reference(vm_page_t m)
939 {
940 	TODO;
941 }
942 
943 int
944 pmap_ts_referenced(vm_page_t m)
945 {
946 	TODO;
947 	return (0);
948 }
949 
950 /*
951  * Map a wired page into kernel virtual address space.
952  */
953 void
954 pmap_kenter(vm_offset_t va, vm_offset_t pa)
955 {
956 	u_int		pte_lo;
957 	int		error;
958 	int		i;
959 
960 #if 0
961 	if (va < VM_MIN_KERNEL_ADDRESS)
962 		panic("pmap_kenter: attempt to enter non-kernel address %#x",
963 		    va);
964 #endif
965 
966 	pte_lo = PTE_I | PTE_G | PTE_BW;
967 	for (i = 0; phys_avail[i + 2] != 0; i += 2) {
968 		if (pa >= phys_avail[i] && pa < phys_avail[i + 1]) {
969 			pte_lo &= ~(PTE_I | PTE_G);
970 			break;
971 		}
972 	}
973 
974 	critical_enter();
975 
976 	error = pmap_pvo_enter(kernel_pmap, pmap_upvo_zone,
977 	    &pmap_pvo_kunmanaged, va, pa, pte_lo, PVO_WIRED);
978 
979 	critical_exit();
980 
981 	if (error != 0 && error != ENOENT)
982 		panic("pmap_kenter: failed to enter va %#x pa %#x: %d", va,
983 		    pa, error);
984 
985 	/*
986 	 * Flush the real memory from the instruction cache.
987 	 */
988 	if ((pte_lo & (PTE_I | PTE_G)) == 0) {
989 		pmap_syncicache(pa, PAGE_SIZE);
990 	}
991 }
992 
993 vm_offset_t
994 pmap_kextract(vm_offset_t va)
995 {
996 	TODO;
997 	return (0);
998 }
999 
1000 void
1001 pmap_kremove(vm_offset_t va)
1002 {
1003 	TODO;
1004 }
1005 
1006 /*
1007  * Map a range of physical addresses into kernel virtual address space.
1008  *
1009  * The value passed in *virt is a suggested virtual address for the mapping.
1010  * Architectures which can support a direct-mapped physical to virtual region
1011  * can return the appropriate address within that region, leaving '*virt'
1012  * unchanged.  We cannot and therefore do not; *virt is updated with the
1013  * first usable address after the mapped region.
1014  */
1015 vm_offset_t
1016 pmap_map(vm_offset_t *virt, vm_offset_t pa_start, vm_offset_t pa_end, int prot)
1017 {
1018 	vm_offset_t	sva, va;
1019 
1020 	sva = *virt;
1021 	va = sva;
1022 	for (; pa_start < pa_end; pa_start += PAGE_SIZE, va += PAGE_SIZE)
1023 		pmap_kenter(va, pa_start);
1024 	*virt = va;
1025 	return (sva);
1026 }
1027 
1028 int
1029 pmap_mincore(pmap_t pmap, vm_offset_t addr)
1030 {
1031 	TODO;
1032 	return (0);
1033 }
1034 
1035 /*
1036  * Create the uarea for a new process.
1037  * This routine directly affects the fork perf for a process.
1038  */
1039 void
1040 pmap_new_proc(struct proc *p)
1041 {
1042 	vm_object_t	upobj;
1043 	vm_offset_t	up;
1044 	vm_page_t	m;
1045 	u_int		i;
1046 
1047 	/*
1048 	 * Allocate the object for the upages.
1049 	 */
1050 	upobj = p->p_upages_obj;
1051 	if (upobj == NULL) {
1052 		upobj = vm_object_allocate(OBJT_DEFAULT, UAREA_PAGES);
1053 		p->p_upages_obj = upobj;
1054 	}
1055 
1056 	/*
1057 	 * Get a kernel virtual address for the uarea for this process.
1058 	 */
1059 	up = (vm_offset_t)p->p_uarea;
1060 	if (up == 0) {
1061 		up = kmem_alloc_nofault(kernel_map, UAREA_PAGES * PAGE_SIZE);
1062 		if (up == 0)
1063 			panic("pmap_new_proc: upage allocation failed");
1064 		p->p_uarea = (struct user *)up;
1065 	}
1066 
1067 	for (i = 0; i < UAREA_PAGES; i++) {
1068 		/*
1069 		 * Get a uarea page.
1070 		 */
1071 		m = vm_page_grab(upobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
1072 
1073 		/*
1074 		 * Wire the page.
1075 		 */
1076 		m->wire_count++;
1077 
1078 		/*
1079 		 * Enter the page into the kernel address space.
1080 		 */
1081 		pmap_kenter(up + i * PAGE_SIZE, VM_PAGE_TO_PHYS(m));
1082 
1083 		vm_page_wakeup(m);
1084 		vm_page_flag_clear(m, PG_ZERO);
1085 		vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE);
1086 		m->valid = VM_PAGE_BITS_ALL;
1087 	}
1088 }
1089 
1090 void
1091 pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
1092 		    vm_pindex_t pindex, vm_size_t size, int limit)
1093 {
1094 	TODO;
1095 }
1096 
1097 /*
1098  * Lower the permission for all mappings to a given page.
1099  */
1100 void
1101 pmap_page_protect(vm_page_t m, vm_prot_t prot)
1102 {
1103 	struct	pvo_head *pvo_head;
1104 	struct	pvo_entry *pvo, *next_pvo;
1105 	struct	pte *pt;
1106 
1107 	/*
1108 	 * Since the routine only downgrades protection, if the
1109 	 * maximal protection is desired, there isn't any change
1110 	 * to be made.
1111 	 */
1112 	if ((prot & (VM_PROT_READ|VM_PROT_WRITE)) ==
1113 	    (VM_PROT_READ|VM_PROT_WRITE))
1114 		return;
1115 
1116 	critical_enter();
1117 
1118 	pvo_head = vm_page_to_pvoh(m);
1119 	for (pvo = LIST_FIRST(pvo_head); pvo != NULL; pvo = next_pvo) {
1120 		next_pvo = LIST_NEXT(pvo, pvo_vlink);
1121 		PMAP_PVO_CHECK(pvo);	/* sanity check */
1122 
1123 		/*
1124 		 * Downgrading to no mapping at all, we just remove the entry.
1125 		 */
1126 		if ((prot & VM_PROT_READ) == 0) {
1127 			pmap_pvo_remove(pvo, -1);
1128 			continue;
1129 		}
1130 
1131 		/*
1132 		 * If EXEC permission is being revoked, just clear the flag
1133 		 * in the PVO.
1134 		 */
1135 		if ((prot & VM_PROT_EXECUTE) == 0)
1136 			pvo->pvo_vaddr &= ~PVO_EXECUTABLE;
1137 
1138 		/*
1139 		 * If this entry is already RO, don't diddle with the page
1140 		 * table.
1141 		 */
1142 		if ((pvo->pvo_pte.pte_lo & PTE_PP) == PTE_BR) {
1143 			PMAP_PVO_CHECK(pvo);
1144 			continue;
1145 		}
1146 
1147 		/*
1148 		 * Grab the PTE before we diddle the bits so pvo_to_pte can
1149 		 * verify the pte contents are as expected.
1150 		 */
1151 		pt = pmap_pvo_to_pte(pvo, -1);
1152 		pvo->pvo_pte.pte_lo &= ~PTE_PP;
1153 		pvo->pvo_pte.pte_lo |= PTE_BR;
1154 		if (pt != NULL)
1155 			pmap_pte_change(pt, &pvo->pvo_pte, pvo->pvo_vaddr);
1156 		PMAP_PVO_CHECK(pvo);	/* sanity check */
1157 	}
1158 
1159 	critical_exit();
1160 }
1161 
1162 /*
1163  * Make the specified page pageable (or not).  Unneeded.
1164  */
1165 void
1166 pmap_pageable(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
1167 	      boolean_t pageable)
1168 {
1169 }
1170 
1171 boolean_t
1172 pmap_page_exists(pmap_t pmap, vm_page_t m)
1173 {
1174 	TODO;
1175 	return (0);
1176 }
1177 
1178 static u_int	pmap_vsidcontext;
1179 
1180 void
1181 pmap_pinit(pmap_t pmap)
1182 {
1183 	int	i, mask;
1184 	u_int	entropy;
1185 
1186 	entropy = 0;
1187 	__asm __volatile("mftb %0" : "=r"(entropy));
1188 
1189 	/*
1190 	 * Allocate some segment registers for this pmap.
1191 	 */
1192 	pmap->pm_count = 1;
1193 	for (i = 0; i < NPMAPS; i += VSID_NBPW) {
1194 		u_int	hash, n;
1195 
1196 		/*
1197 		 * Create a new value by mutiplying by a prime and adding in
1198 		 * entropy from the timebase register.  This is to make the
1199 		 * VSID more random so that the PT hash function collides
1200 		 * less often.  (Note that the prime casues gcc to do shifts
1201 		 * instead of a multiply.)
1202 		 */
1203 		pmap_vsidcontext = (pmap_vsidcontext * 0x1105) + entropy;
1204 		hash = pmap_vsidcontext & (NPMAPS - 1);
1205 		if (hash == 0)		/* 0 is special, avoid it */
1206 			continue;
1207 		n = hash >> 5;
1208 		mask = 1 << (hash & (VSID_NBPW - 1));
1209 		hash = (pmap_vsidcontext & 0xfffff);
1210 		if (pmap_vsid_bitmap[n] & mask) {	/* collision? */
1211 			/* anything free in this bucket? */
1212 			if (pmap_vsid_bitmap[n] == 0xffffffff) {
1213 				entropy = (pmap_vsidcontext >> 20);
1214 				continue;
1215 			}
1216 			i = ffs(~pmap_vsid_bitmap[i]) - 1;
1217 			mask = 1 << i;
1218 			hash &= 0xfffff & ~(VSID_NBPW - 1);
1219 			hash |= i;
1220 		}
1221 		pmap_vsid_bitmap[n] |= mask;
1222 		for (i = 0; i < 16; i++)
1223 			pmap->pm_sr[i] = VSID_MAKE(i, hash);
1224 		return;
1225 	}
1226 
1227 	panic("pmap_pinit: out of segments");
1228 }
1229 
1230 /*
1231  * Initialize the pmap associated with process 0.
1232  */
1233 void
1234 pmap_pinit0(pmap_t pm)
1235 {
1236 
1237 	pmap_pinit(pm);
1238 	bzero(&pm->pm_stats, sizeof(pm->pm_stats));
1239 }
1240 
1241 void
1242 pmap_pinit2(pmap_t pmap)
1243 {
1244 	/* XXX: Remove this stub when no longer called */
1245 }
1246 
1247 void
1248 pmap_prefault(pmap_t pmap, vm_offset_t va, vm_map_entry_t entry)
1249 {
1250 	TODO;
1251 }
1252 
1253 void
1254 pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
1255 {
1256 	TODO;
1257 }
1258 
1259 vm_offset_t
1260 pmap_phys_address(int ppn)
1261 {
1262 	TODO;
1263 	return (0);
1264 }
1265 
1266 void
1267 pmap_qenter(vm_offset_t va, vm_page_t *m, int count)
1268 {
1269 	int	i;
1270 
1271 	for (i = 0; i < count; i++, va += PAGE_SIZE)
1272 		pmap_kenter(va, VM_PAGE_TO_PHYS(m[i]));
1273 }
1274 
1275 void
1276 pmap_qremove(vm_offset_t va, int count)
1277 {
1278 	TODO;
1279 }
1280 
1281 /*
1282  * Add a reference to the specified pmap.
1283  */
1284 void
1285 pmap_reference(pmap_t pm)
1286 {
1287 
1288 	if (pm != NULL)
1289 		pm->pm_count++;
1290 }
1291 
1292 void
1293 pmap_release(pmap_t pmap)
1294 {
1295 	TODO;
1296 }
1297 
1298 void
1299 pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
1300 {
1301 	TODO;
1302 }
1303 
1304 void
1305 pmap_remove_pages(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
1306 {
1307 	TODO;
1308 }
1309 
1310 void
1311 pmap_swapin_proc(struct proc *p)
1312 {
1313 	TODO;
1314 }
1315 
1316 void
1317 pmap_swapout_proc(struct proc *p)
1318 {
1319 	TODO;
1320 }
1321 
1322 /*
1323  * Create the kernel stack and pcb for a new thread.
1324  * This routine directly affects the fork perf for a process and
1325  * create performance for a thread.
1326  */
1327 void
1328 pmap_new_thread(struct thread *td)
1329 {
1330 	vm_object_t	ksobj;
1331 	vm_offset_t	ks;
1332 	vm_page_t	m;
1333 	u_int		i;
1334 
1335 	/*
1336 	 * Allocate object for the kstack.
1337 	 */
1338 	ksobj = td->td_kstack_obj;
1339 	if (ksobj == NULL) {
1340 		ksobj = vm_object_allocate(OBJT_DEFAULT, KSTACK_PAGES);
1341 		td->td_kstack_obj = ksobj;
1342 	}
1343 
1344 	/*
1345 	 * Get a kernel virtual address for the kstack for this thread.
1346 	 */
1347 	ks = td->td_kstack;
1348 	if (ks == 0) {
1349 		ks = kmem_alloc_nofault(kernel_map,
1350 		    (KSTACK_PAGES + KSTACK_GUARD_PAGES) * PAGE_SIZE);
1351 		if (ks == 0)
1352 			panic("pmap_new_thread: kstack allocation failed");
1353 		TLBIE(ks);
1354 		ks += KSTACK_GUARD_PAGES * PAGE_SIZE;
1355 		td->td_kstack = ks;
1356 	}
1357 
1358 	for (i = 0; i < KSTACK_PAGES; i++) {
1359 		/*
1360 		 * Get a kernel stack page.
1361 		 */
1362 		m = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
1363 
1364 		/*
1365 		 * Wire the page.
1366 		 */
1367 		m->wire_count++;
1368 
1369 		/*
1370 		 * Enter the page into the kernel address space.
1371 		 */
1372 		pmap_kenter(ks + i * PAGE_SIZE, VM_PAGE_TO_PHYS(m));
1373 
1374 		vm_page_wakeup(m);
1375 		vm_page_flag_clear(m, PG_ZERO);
1376 		vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE);
1377 		m->valid = VM_PAGE_BITS_ALL;
1378 	}
1379 }
1380 
1381 void
1382 pmap_dispose_proc(struct proc *p)
1383 {
1384 	TODO;
1385 }
1386 
1387 void
1388 pmap_dispose_thread(struct thread *td)
1389 {
1390 	TODO;
1391 }
1392 
1393 void
1394 pmap_swapin_thread(struct thread *td)
1395 {
1396 	TODO;
1397 }
1398 
1399 void
1400 pmap_swapout_thread(struct thread *td)
1401 {
1402 	TODO;
1403 }
1404 
1405 /*
1406  * Allocate a physical page of memory directly from the phys_avail map.
1407  * Can only be called from pmap_bootstrap before avail start and end are
1408  * calculated.
1409  */
1410 static vm_offset_t
1411 pmap_bootstrap_alloc(vm_size_t size, u_int align)
1412 {
1413 	vm_offset_t	s, e;
1414 	int		i, j;
1415 
1416 	size = round_page(size);
1417 	for (i = 0; phys_avail[i + 1] != 0; i += 2) {
1418 		if (align != 0)
1419 			s = (phys_avail[i] + align - 1) & ~(align - 1);
1420 		else
1421 			s = phys_avail[i];
1422 		e = s + size;
1423 
1424 		if (s < phys_avail[i] || e > phys_avail[i + 1])
1425 			continue;
1426 
1427 		if (s == phys_avail[i]) {
1428 			phys_avail[i] += size;
1429 		} else if (e == phys_avail[i + 1]) {
1430 			phys_avail[i + 1] -= size;
1431 		} else {
1432 			for (j = phys_avail_count * 2; j > i; j -= 2) {
1433 				phys_avail[j] = phys_avail[j - 2];
1434 				phys_avail[j + 1] = phys_avail[j - 1];
1435 			}
1436 
1437 			phys_avail[i + 3] = phys_avail[i + 1];
1438 			phys_avail[i + 1] = s;
1439 			phys_avail[i + 2] = e;
1440 			phys_avail_count++;
1441 		}
1442 
1443 		return (s);
1444 	}
1445 	panic("pmap_bootstrap_alloc: could not allocate memory");
1446 }
1447 
1448 /*
1449  * Return an unmapped pvo for a kernel virtual address.
1450  * Used by pmap functions that operate on physical pages.
1451  */
1452 static struct pvo_entry *
1453 pmap_rkva_alloc(void)
1454 {
1455 	struct		pvo_entry *pvo;
1456 	struct		pte *pt;
1457 	vm_offset_t	kva;
1458 	int		pteidx;
1459 
1460 	if (pmap_rkva_count == 0)
1461 		panic("pmap_rkva_alloc: no more reserved KVAs");
1462 
1463 	kva = pmap_rkva_start + (PAGE_SIZE * --pmap_rkva_count);
1464 	pmap_kenter(kva, 0);
1465 
1466 	pvo = pmap_pvo_find_va(kernel_pmap, kva, &pteidx);
1467 
1468 	if (pvo == NULL)
1469 		panic("pmap_kva_alloc: pmap_pvo_find_va failed");
1470 
1471 	pt = pmap_pvo_to_pte(pvo, pteidx);
1472 
1473 	if (pt == NULL)
1474 		panic("pmap_kva_alloc: pmap_pvo_to_pte failed");
1475 
1476 	pmap_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr);
1477 	PVO_PTEGIDX_CLR(pvo);
1478 
1479 	pmap_pte_overflow++;
1480 
1481 	return (pvo);
1482 }
1483 
1484 static void
1485 pmap_pa_map(struct pvo_entry *pvo, vm_offset_t pa, struct pte *saved_pt,
1486     int *depth_p)
1487 {
1488 	struct	pte *pt;
1489 
1490 	critical_enter();
1491 
1492 	/*
1493 	 * If this pvo already has a valid pte, we need to save it so it can
1494 	 * be restored later.  We then just reload the new PTE over the old
1495 	 * slot.
1496 	 */
1497 	if (saved_pt != NULL) {
1498 		pt = pmap_pvo_to_pte(pvo, -1);
1499 
1500 		if (pt != NULL) {
1501 			pmap_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr);
1502 			PVO_PTEGIDX_CLR(pvo);
1503 			pmap_pte_overflow++;
1504 		}
1505 
1506 		*saved_pt = pvo->pvo_pte;
1507 
1508 		pvo->pvo_pte.pte_lo &= ~PTE_RPGN;
1509 	}
1510 
1511 	pvo->pvo_pte.pte_lo |= pa;
1512 
1513 	if (!pmap_pte_spill(pvo->pvo_vaddr))
1514 		panic("pmap_pa_map: could not spill pvo %p", pvo);
1515 
1516 	if (depth_p != NULL)
1517 		(*depth_p)++;
1518 
1519 	critical_exit();
1520 }
1521 
1522 static void
1523 pmap_pa_unmap(struct pvo_entry *pvo, struct pte *saved_pt, int *depth_p)
1524 {
1525 	struct	pte *pt;
1526 
1527 	critical_enter();
1528 
1529 	pt = pmap_pvo_to_pte(pvo, -1);
1530 
1531 	if (pt != NULL) {
1532 		pmap_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr);
1533 		PVO_PTEGIDX_CLR(pvo);
1534 		pmap_pte_overflow++;
1535 	}
1536 
1537 	pvo->pvo_pte.pte_lo &= ~PTE_RPGN;
1538 
1539 	/*
1540 	 * If there is a saved PTE and it's valid, restore it and return.
1541 	 */
1542 	if (saved_pt != NULL && (saved_pt->pte_lo & PTE_RPGN) != 0) {
1543 		if (depth_p != NULL && --(*depth_p) == 0)
1544 			panic("pmap_pa_unmap: restoring but depth == 0");
1545 
1546 		pvo->pvo_pte = *saved_pt;
1547 
1548 		if (!pmap_pte_spill(pvo->pvo_vaddr))
1549 			panic("pmap_pa_unmap: could not spill pvo %p", pvo);
1550 	}
1551 
1552 	critical_exit();
1553 }
1554 
1555 static void
1556 pmap_syncicache(vm_offset_t pa, vm_size_t len)
1557 {
1558 	__syncicache((void *)pa, len);
1559 }
1560 
1561 static void
1562 tlbia(void)
1563 {
1564 	caddr_t	i;
1565 
1566 	SYNC();
1567 	for (i = 0; i < (caddr_t)0x00040000; i += 0x00001000) {
1568 		TLBIE(i);
1569 		EIEIO();
1570 	}
1571 	TLBSYNC();
1572 	SYNC();
1573 }
1574 
1575 static int
1576 pmap_pvo_enter(pmap_t pm, vm_zone_t zone, struct pvo_head *pvo_head,
1577     vm_offset_t va, vm_offset_t pa, u_int pte_lo, int flags)
1578 {
1579 	struct	pvo_entry *pvo;
1580 	u_int	sr;
1581 	int	first;
1582 	u_int	ptegidx;
1583 	int	i;
1584 
1585 	pmap_pvo_enter_calls++;
1586 
1587 	/*
1588 	 * Compute the PTE Group index.
1589 	 */
1590 	va &= ~ADDR_POFF;
1591 	sr = va_to_sr(pm->pm_sr, va);
1592 	ptegidx = va_to_pteg(sr, va);
1593 
1594 	critical_enter();
1595 
1596 	/*
1597 	 * Remove any existing mapping for this page.  Reuse the pvo entry if
1598 	 * there is a mapping.
1599 	 */
1600 	LIST_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) {
1601 		if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) {
1602 			pmap_pvo_remove(pvo, -1);
1603 			break;
1604 		}
1605 	}
1606 
1607 	/*
1608 	 * If we aren't overwriting a mapping, try to allocate.
1609 	 */
1610 	critical_exit();
1611 
1612 	pvo = zalloc(zone);
1613 
1614 	critical_enter();
1615 
1616 	if (pvo == NULL) {
1617 		critical_exit();
1618 		return (ENOMEM);
1619 	}
1620 
1621 	pmap_pvo_entries++;
1622 	pvo->pvo_vaddr = va;
1623 	pvo->pvo_pmap = pm;
1624 	LIST_INSERT_HEAD(&pmap_pvo_table[ptegidx], pvo, pvo_olink);
1625 	pvo->pvo_vaddr &= ~ADDR_POFF;
1626 	if (flags & VM_PROT_EXECUTE)
1627 		pvo->pvo_vaddr |= PVO_EXECUTABLE;
1628 	if (flags & PVO_WIRED)
1629 		pvo->pvo_vaddr |= PVO_WIRED;
1630 	if (pvo_head != &pmap_pvo_kunmanaged)
1631 		pvo->pvo_vaddr |= PVO_MANAGED;
1632 	pmap_pte_create(&pvo->pvo_pte, sr, va, pa | pte_lo);
1633 
1634 	/*
1635 	 * Remember if the list was empty and therefore will be the first
1636 	 * item.
1637 	 */
1638 	first = LIST_FIRST(pvo_head) == NULL;
1639 
1640 	LIST_INSERT_HEAD(pvo_head, pvo, pvo_vlink);
1641 	if (pvo->pvo_pte.pte_lo & PVO_WIRED)
1642 		pvo->pvo_pmap->pm_stats.wired_count++;
1643 	pvo->pvo_pmap->pm_stats.resident_count++;
1644 
1645 	/*
1646 	 * We hope this succeeds but it isn't required.
1647 	 */
1648 	i = pmap_pte_insert(ptegidx, &pvo->pvo_pte);
1649 	if (i >= 0) {
1650 		PVO_PTEGIDX_SET(pvo, i);
1651 	} else {
1652 		panic("pmap_pvo_enter: overflow");
1653 		pmap_pte_overflow++;
1654 	}
1655 
1656 	critical_exit();
1657 
1658 	return (first ? ENOENT : 0);
1659 }
1660 
1661 static void
1662 pmap_pvo_remove(struct pvo_entry *pvo, int pteidx)
1663 {
1664 	struct	pte *pt;
1665 
1666 	/*
1667 	 * If there is an active pte entry, we need to deactivate it (and
1668 	 * save the ref & cfg bits).
1669 	 */
1670 	pt = pmap_pvo_to_pte(pvo, pteidx);
1671 	if (pt != NULL) {
1672 		pmap_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr);
1673 		PVO_PTEGIDX_CLR(pvo);
1674 	} else {
1675 		pmap_pte_overflow--;
1676 	}
1677 
1678 	/*
1679 	 * Update our statistics.
1680 	 */
1681 	pvo->pvo_pmap->pm_stats.resident_count--;
1682 	if (pvo->pvo_pte.pte_lo & PVO_WIRED)
1683 		pvo->pvo_pmap->pm_stats.wired_count--;
1684 
1685 	/*
1686 	 * Save the REF/CHG bits into their cache if the page is managed.
1687 	 */
1688 	if (pvo->pvo_vaddr & PVO_MANAGED) {
1689 		struct	vm_page *pg;
1690 
1691 		pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.pte_lo * PTE_RPGN);
1692 		if (pg != NULL) {
1693 			pmap_attr_save(pg, pvo->pvo_pte.pte_lo &
1694 			    (PTE_REF | PTE_CHG));
1695 		}
1696 	}
1697 
1698 	/*
1699 	 * Remove this PVO from the PV list.
1700 	 */
1701 	LIST_REMOVE(pvo, pvo_vlink);
1702 
1703 	/*
1704 	 * Remove this from the overflow list and return it to the pool
1705 	 * if we aren't going to reuse it.
1706 	 */
1707 	LIST_REMOVE(pvo, pvo_olink);
1708 	zfree(pvo->pvo_vaddr & PVO_MANAGED ? pmap_mpvo_zone : pmap_upvo_zone,
1709 	    pvo);
1710 	pmap_pvo_entries--;
1711 	pmap_pvo_remove_calls++;
1712 }
1713 
1714 static __inline int
1715 pmap_pvo_pte_index(const struct pvo_entry *pvo, int ptegidx)
1716 {
1717 	int	pteidx;
1718 
1719 	/*
1720 	 * We can find the actual pte entry without searching by grabbing
1721 	 * the PTEG index from 3 unused bits in pte_lo[11:9] and by
1722 	 * noticing the HID bit.
1723 	 */
1724 	pteidx = ptegidx * 8 + PVO_PTEGIDX_GET(pvo);
1725 	if (pvo->pvo_pte.pte_hi & PTE_HID)
1726 		pteidx ^= pmap_pteg_mask * 8;
1727 
1728 	return (pteidx);
1729 }
1730 
1731 static struct pvo_entry *
1732 pmap_pvo_find_va(pmap_t pm, vm_offset_t va, int *pteidx_p)
1733 {
1734 	struct	pvo_entry *pvo;
1735 	int	ptegidx;
1736 	u_int	sr;
1737 
1738 	va &= ~ADDR_POFF;
1739 	sr = va_to_sr(pm->pm_sr, va);
1740 	ptegidx = va_to_pteg(sr, va);
1741 
1742 	LIST_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) {
1743 		if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) {
1744 			if (pteidx_p)
1745 				*pteidx_p = pmap_pvo_pte_index(pvo, ptegidx);
1746 			return (pvo);
1747 		}
1748 	}
1749 
1750 	return (NULL);
1751 }
1752 
1753 static struct pte *
1754 pmap_pvo_to_pte(const struct pvo_entry *pvo, int pteidx)
1755 {
1756 	struct	pte *pt;
1757 
1758 	/*
1759 	 * If we haven't been supplied the ptegidx, calculate it.
1760 	 */
1761 	if (pteidx == -1) {
1762 		int	ptegidx;
1763 		u_int	sr;
1764 
1765 		sr = va_to_sr(pvo->pvo_pmap->pm_sr, pvo->pvo_vaddr);
1766 		ptegidx = va_to_pteg(sr, pvo->pvo_vaddr);
1767 		pteidx = pmap_pvo_pte_index(pvo, ptegidx);
1768 	}
1769 
1770 	pt = &pmap_pteg_table[pteidx >> 3].pt[pteidx & 7];
1771 
1772 	if ((pvo->pvo_pte.pte_hi & PTE_VALID) && !PVO_PTEGIDX_ISSET(pvo)) {
1773 		panic("pmap_pvo_to_pte: pvo %p has valid pte in pvo but no "
1774 		    "valid pte index", pvo);
1775 	}
1776 
1777 	if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0 && PVO_PTEGIDX_ISSET(pvo)) {
1778 		panic("pmap_pvo_to_pte: pvo %p has valid pte index in pvo "
1779 		    "pvo but no valid pte", pvo);
1780 	}
1781 
1782 	if ((pt->pte_hi ^ (pvo->pvo_pte.pte_hi & ~PTE_VALID)) == PTE_VALID) {
1783 		if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0) {
1784 			panic("pmap_pvo_to_pte: pvo %p has valid pte in "
1785 			    "pmap_pteg_table %p but invalid in pvo", pvo, pt);
1786 		}
1787 
1788 		if (((pt->pte_lo ^ pvo->pvo_pte.pte_lo) & ~(PTE_CHG|PTE_REF))
1789 		    != 0) {
1790 			panic("pmap_pvo_to_pte: pvo %p pte does not match "
1791 			    "pte %p in pmap_pteg_table", pvo, pt);
1792 		}
1793 
1794 		return (pt);
1795 	}
1796 
1797 	if (pvo->pvo_pte.pte_hi & PTE_VALID) {
1798 		panic("pmap_pvo_to_pte: pvo %p has invalid pte %p in "
1799 		    "pmap_pteg_table but valid in pvo", pvo, pt);
1800 	}
1801 
1802 	return (NULL);
1803 }
1804 
1805 /*
1806  * XXX: THIS STUFF SHOULD BE IN pte.c?
1807  */
1808 int
1809 pmap_pte_spill(vm_offset_t addr)
1810 {
1811 	struct	pvo_entry *source_pvo, *victim_pvo;
1812 	struct	pvo_entry *pvo;
1813 	int	ptegidx, i, j;
1814 	u_int	sr;
1815 	struct	pteg *pteg;
1816 	struct	pte *pt;
1817 
1818 	pmap_pte_spills++;
1819 
1820 	__asm __volatile("mfsrin %0,%1" : "=r"(sr) : "r"(addr));
1821 	ptegidx = va_to_pteg(sr, addr);
1822 
1823 	/*
1824 	 * Have to substitute some entry.  Use the primary hash for this.
1825 	 * Use low bits of timebase as random generator.
1826 	 */
1827 	pteg = &pmap_pteg_table[ptegidx];
1828 	__asm __volatile("mftb %0" : "=r"(i));
1829 	i &= 7;
1830 	pt = &pteg->pt[i];
1831 
1832 	source_pvo = NULL;
1833 	victim_pvo = NULL;
1834 	LIST_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) {
1835 		/*
1836 		 * We need to find a pvo entry for this address.
1837 		 */
1838 		PMAP_PVO_CHECK(pvo);
1839 		if (source_pvo == NULL &&
1840 		    pmap_pte_match(&pvo->pvo_pte, sr, addr,
1841 		    pvo->pvo_pte.pte_hi & PTE_HID)) {
1842 			/*
1843 			 * Now found an entry to be spilled into the pteg.
1844 			 * The PTE is now valid, so we know it's active.
1845 			 */
1846 			j = pmap_pte_insert(ptegidx, &pvo->pvo_pte);
1847 
1848 			if (j >= 0) {
1849 				PVO_PTEGIDX_SET(pvo, j);
1850 				pmap_pte_overflow--;
1851 				PMAP_PVO_CHECK(pvo);
1852 				return (1);
1853 			}
1854 
1855 			source_pvo = pvo;
1856 
1857 			if (victim_pvo != NULL)
1858 				break;
1859 		}
1860 
1861 		/*
1862 		 * We also need the pvo entry of the victim we are replacing
1863 		 * so save the R & C bits of the PTE.
1864 		 */
1865 		if ((pt->pte_hi & PTE_HID) == 0 && victim_pvo == NULL &&
1866 		    pmap_pte_compare(pt, &pvo->pvo_pte)) {
1867 			victim_pvo = pvo;
1868 			if (source_pvo != NULL)
1869 				break;
1870 		}
1871 	}
1872 
1873 	if (source_pvo == NULL)
1874 		return (0);
1875 
1876 	if (victim_pvo == NULL) {
1877 		if ((pt->pte_hi & PTE_HID) == 0)
1878 			panic("pmap_pte_spill: victim p-pte (%p) has no pvo"
1879 			    "entry", pt);
1880 
1881 		/*
1882 		 * If this is a secondary PTE, we need to search it's primary
1883 		 * pvo bucket for the matching PVO.
1884 		 */
1885 		LIST_FOREACH(pvo, &pmap_pvo_table[ptegidx ^ pmap_pteg_mask],
1886 		    pvo_olink) {
1887 			PMAP_PVO_CHECK(pvo);
1888 			/*
1889 			 * We also need the pvo entry of the victim we are
1890 			 * replacing so save the R & C bits of the PTE.
1891 			 */
1892 			if (pmap_pte_compare(pt, &pvo->pvo_pte)) {
1893 				victim_pvo = pvo;
1894 				break;
1895 			}
1896 		}
1897 
1898 		if (victim_pvo == NULL)
1899 			panic("pmap_pte_spill: victim s-pte (%p) has no pvo"
1900 			    "entry", pt);
1901 	}
1902 
1903 	/*
1904 	 * We are invalidating the TLB entry for the EA we are replacing even
1905 	 * though it's valid.  If we don't, we lose any ref/chg bit changes
1906 	 * contained in the TLB entry.
1907 	 */
1908 	source_pvo->pvo_pte.pte_hi &= ~PTE_HID;
1909 
1910 	pmap_pte_unset(pt, &victim_pvo->pvo_pte, victim_pvo->pvo_vaddr);
1911 	pmap_pte_set(pt, &source_pvo->pvo_pte);
1912 
1913 	PVO_PTEGIDX_CLR(victim_pvo);
1914 	PVO_PTEGIDX_SET(source_pvo, i);
1915 	pmap_pte_replacements++;
1916 
1917 	PMAP_PVO_CHECK(victim_pvo);
1918 	PMAP_PVO_CHECK(source_pvo);
1919 
1920 	return (1);
1921 }
1922 
1923 static int
1924 pmap_pte_insert(u_int ptegidx, struct pte *pvo_pt)
1925 {
1926 	struct	pte *pt;
1927 	int	i;
1928 
1929 	/*
1930 	 * First try primary hash.
1931 	 */
1932 	for (pt = pmap_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) {
1933 		if ((pt->pte_hi & PTE_VALID) == 0) {
1934 			pvo_pt->pte_hi &= ~PTE_HID;
1935 			pmap_pte_set(pt, pvo_pt);
1936 			return (i);
1937 		}
1938 	}
1939 
1940 	/*
1941 	 * Now try secondary hash.
1942 	 */
1943 	ptegidx ^= pmap_pteg_mask;
1944 	ptegidx++;
1945 	for (pt = pmap_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) {
1946 		if ((pt->pte_hi & PTE_VALID) == 0) {
1947 			pvo_pt->pte_hi |= PTE_HID;
1948 			pmap_pte_set(pt, pvo_pt);
1949 			return (i);
1950 		}
1951 	}
1952 
1953 	panic("pmap_pte_insert: overflow");
1954 	return (-1);
1955 }
1956 
1957 static boolean_t
1958 pmap_query_bit(vm_page_t m, int ptebit)
1959 {
1960 	struct	pvo_entry *pvo;
1961 	struct	pte *pt;
1962 
1963 	if (pmap_attr_fetch(m) & ptebit)
1964 		return (TRUE);
1965 
1966 	critical_enter();
1967 
1968 	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
1969 		PMAP_PVO_CHECK(pvo);	/* sanity check */
1970 
1971 		/*
1972 		 * See if we saved the bit off.  If so, cache it and return
1973 		 * success.
1974 		 */
1975 		if (pvo->pvo_pte.pte_lo & ptebit) {
1976 			pmap_attr_save(m, ptebit);
1977 			PMAP_PVO_CHECK(pvo);	/* sanity check */
1978 			critical_exit();
1979 			return (TRUE);
1980 		}
1981 	}
1982 
1983 	/*
1984 	 * No luck, now go through the hard part of looking at the PTEs
1985 	 * themselves.  Sync so that any pending REF/CHG bits are flushed to
1986 	 * the PTEs.
1987 	 */
1988 	SYNC();
1989 	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
1990 		PMAP_PVO_CHECK(pvo);	/* sanity check */
1991 
1992 		/*
1993 		 * See if this pvo has a valid PTE.  if so, fetch the
1994 		 * REF/CHG bits from the valid PTE.  If the appropriate
1995 		 * ptebit is set, cache it and return success.
1996 		 */
1997 		pt = pmap_pvo_to_pte(pvo, -1);
1998 		if (pt != NULL) {
1999 			pmap_pte_synch(pt, &pvo->pvo_pte);
2000 			if (pvo->pvo_pte.pte_lo & ptebit) {
2001 				pmap_attr_save(m, ptebit);
2002 				PMAP_PVO_CHECK(pvo);	/* sanity check */
2003 				critical_exit();
2004 				return (TRUE);
2005 			}
2006 		}
2007 	}
2008 
2009 	critical_exit();
2010 	return (TRUE);
2011 }
2012 
2013 static boolean_t
2014 pmap_clear_bit(vm_page_t m, int ptebit)
2015 {
2016 	struct	pvo_entry *pvo;
2017 	struct	pte *pt;
2018 	int	rv;
2019 
2020 	critical_enter();
2021 
2022 	/*
2023 	 * Clear the cached value.
2024 	 */
2025 	rv = pmap_attr_fetch(m);
2026 	pmap_attr_clear(m, ptebit);
2027 
2028 	/*
2029 	 * Sync so that any pending REF/CHG bits are flushed to the PTEs (so
2030 	 * we can reset the right ones).  note that since the pvo entries and
2031 	 * list heads are accessed via BAT0 and are never placed in the page
2032 	 * table, we don't have to worry about further accesses setting the
2033 	 * REF/CHG bits.
2034 	 */
2035 	SYNC();
2036 
2037 	/*
2038 	 * For each pvo entry, clear the pvo's ptebit.  If this pvo has a
2039 	 * valid pte clear the ptebit from the valid pte.
2040 	 */
2041 	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
2042 		PMAP_PVO_CHECK(pvo);	/* sanity check */
2043 		pt = pmap_pvo_to_pte(pvo, -1);
2044 		if (pt != NULL) {
2045 			pmap_pte_synch(pt, &pvo->pvo_pte);
2046 			if (pvo->pvo_pte.pte_lo & ptebit)
2047 				pmap_pte_clear(pt, PVO_VADDR(pvo), ptebit);
2048 		}
2049 		rv |= pvo->pvo_pte.pte_lo;
2050 		pvo->pvo_pte.pte_lo &= ~ptebit;
2051 		PMAP_PVO_CHECK(pvo);	/* sanity check */
2052 	}
2053 
2054 	critical_exit();
2055 	return ((rv & ptebit) != 0);
2056 }
2057