xref: /freebsd/sys/powerpc/aim/mmu_oea.c (revision ee2ea5ceafed78a5bd9810beb9e3ca927180c226)
1 /*
2  * Copyright (c) 2001 The NetBSD Foundation, Inc.
3  * All rights reserved.
4  *
5  * This code is derived from software contributed to The NetBSD Foundation
6  * by Matt Thomas <matt@3am-software.com> of Allegro Networks, Inc.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *        This product includes software developed by the NetBSD
19  *        Foundation, Inc. and its contributors.
20  * 4. Neither the name of The NetBSD Foundation nor the names of its
21  *    contributors may be used to endorse or promote products derived
22  *    from this software without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
25  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
26  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
28  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34  * POSSIBILITY OF SUCH DAMAGE.
35  */
36 /*
37  * Copyright (C) 1995, 1996 Wolfgang Solfrank.
38  * Copyright (C) 1995, 1996 TooLs GmbH.
39  * All rights reserved.
40  *
41  * Redistribution and use in source and binary forms, with or without
42  * modification, are permitted provided that the following conditions
43  * are met:
44  * 1. Redistributions of source code must retain the above copyright
45  *    notice, this list of conditions and the following disclaimer.
46  * 2. Redistributions in binary form must reproduce the above copyright
47  *    notice, this list of conditions and the following disclaimer in the
48  *    documentation and/or other materials provided with the distribution.
49  * 3. All advertising materials mentioning features or use of this software
50  *    must display the following acknowledgement:
51  *	This product includes software developed by TooLs GmbH.
52  * 4. The name of TooLs GmbH may not be used to endorse or promote products
53  *    derived from this software without specific prior written permission.
54  *
55  * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
56  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
57  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
58  * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
59  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
60  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
61  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
62  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
63  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
64  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
65  *
66  * $NetBSD: pmap.c,v 1.28 2000/03/26 20:42:36 kleink Exp $
67  */
68 /*
69  * Copyright (C) 2001 Benno Rice.
70  * All rights reserved.
71  *
72  * Redistribution and use in source and binary forms, with or without
73  * modification, are permitted provided that the following conditions
74  * are met:
75  * 1. Redistributions of source code must retain the above copyright
76  *    notice, this list of conditions and the following disclaimer.
77  * 2. Redistributions in binary form must reproduce the above copyright
78  *    notice, this list of conditions and the following disclaimer in the
79  *    documentation and/or other materials provided with the distribution.
80  *
81  * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR
82  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
83  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
84  * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
85  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
86  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
87  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
88  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
89  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
90  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
91  */
92 
93 #ifndef lint
94 static const char rcsid[] =
95   "$FreeBSD$";
96 #endif /* not lint */
97 
98 /*
99  * Manages physical address maps.
100  *
101  * In addition to hardware address maps, this module is called upon to
102  * provide software-use-only maps which may or may not be stored in the
103  * same form as hardware maps.  These pseudo-maps are used to store
104  * intermediate results from copy operations to and from address spaces.
105  *
106  * Since the information managed by this module is also stored by the
107  * logical address mapping module, this module may throw away valid virtual
108  * to physical mappings at almost any time.  However, invalidations of
109  * mappings must be done as requested.
110  *
111  * In order to cope with hardware architectures which make virtual to
112  * physical map invalidates expensive, this module may delay invalidate
113  * reduced protection operations until such time as they are actually
114  * necessary.  This module is given full information as to which processors
115  * are currently using which maps, and to when physical maps must be made
116  * correct.
117  */
118 
119 #include <sys/param.h>
120 #include <sys/kernel.h>
121 #include <sys/ktr.h>
122 #include <sys/lock.h>
123 #include <sys/msgbuf.h>
124 #include <sys/mutex.h>
125 #include <sys/proc.h>
126 #include <sys/sysctl.h>
127 #include <sys/systm.h>
128 #include <sys/vmmeter.h>
129 
130 #include <dev/ofw/openfirm.h>
131 
132 #include <vm/vm.h>
133 #include <vm/vm_param.h>
134 #include <vm/vm_kern.h>
135 #include <vm/vm_page.h>
136 #include <vm/vm_map.h>
137 #include <vm/vm_object.h>
138 #include <vm/vm_extern.h>
139 #include <vm/vm_pageout.h>
140 #include <vm/vm_pager.h>
141 #include <vm/uma.h>
142 
143 #include <machine/bat.h>
144 #include <machine/frame.h>
145 #include <machine/md_var.h>
146 #include <machine/psl.h>
147 #include <machine/pte.h>
148 #include <machine/sr.h>
149 
150 #define	PMAP_DEBUG
151 
152 #define TODO	panic("%s: not implemented", __func__);
153 
154 #define	PMAP_LOCK(pm)
155 #define	PMAP_UNLOCK(pm)
156 
157 #define	TLBIE(va)	__asm __volatile("tlbie %0" :: "r"(va))
158 #define	TLBSYNC()	__asm __volatile("tlbsync");
159 #define	SYNC()		__asm __volatile("sync");
160 #define	EIEIO()		__asm __volatile("eieio");
161 
162 #define	VSID_MAKE(sr, hash)	((sr) | (((hash) & 0xfffff) << 4))
163 #define	VSID_TO_SR(vsid)	((vsid) & 0xf)
164 #define	VSID_TO_HASH(vsid)	(((vsid) >> 4) & 0xfffff)
165 
166 #define	PVO_PTEGIDX_MASK	0x0007		/* which PTEG slot */
167 #define	PVO_PTEGIDX_VALID	0x0008		/* slot is valid */
168 #define	PVO_WIRED		0x0010		/* PVO entry is wired */
169 #define	PVO_MANAGED		0x0020		/* PVO entry is managed */
170 #define	PVO_EXECUTABLE		0x0040		/* PVO entry is executable */
171 #define	PVO_BOOTSTRAP		0x0080		/* PVO entry allocated during
172 						   bootstrap */
173 #define	PVO_VADDR(pvo)		((pvo)->pvo_vaddr & ~ADDR_POFF)
174 #define	PVO_ISEXECUTABLE(pvo)	((pvo)->pvo_vaddr & PVO_EXECUTABLE)
175 #define	PVO_PTEGIDX_GET(pvo)	((pvo)->pvo_vaddr & PVO_PTEGIDX_MASK)
176 #define	PVO_PTEGIDX_ISSET(pvo)	((pvo)->pvo_vaddr & PVO_PTEGIDX_VALID)
177 #define	PVO_PTEGIDX_CLR(pvo)	\
178 	((void)((pvo)->pvo_vaddr &= ~(PVO_PTEGIDX_VALID|PVO_PTEGIDX_MASK)))
179 #define	PVO_PTEGIDX_SET(pvo, i)	\
180 	((void)((pvo)->pvo_vaddr |= (i)|PVO_PTEGIDX_VALID))
181 
182 #define	PMAP_PVO_CHECK(pvo)
183 
184 struct mem_region {
185 	vm_offset_t	mr_start;
186 	vm_offset_t	mr_size;
187 };
188 
189 struct ofw_map {
190 	vm_offset_t	om_va;
191 	vm_size_t	om_len;
192 	vm_offset_t	om_pa;
193 	u_int		om_mode;
194 };
195 
196 int	pmap_bootstrapped = 0;
197 
198 /*
199  * Virtual and physical address of message buffer.
200  */
201 struct		msgbuf *msgbufp;
202 vm_offset_t	msgbuf_phys;
203 
204 /*
205  * Physical addresses of first and last available physical page.
206  */
207 vm_offset_t avail_start;
208 vm_offset_t avail_end;
209 
210 /*
211  * Map of physical memory regions.
212  */
213 vm_offset_t	phys_avail[128];
214 u_int		phys_avail_count;
215 static struct	mem_region regions[128];
216 static struct	ofw_map translations[128];
217 static int	translations_size;
218 
219 /*
220  * First and last available kernel virtual addresses.
221  */
222 vm_offset_t virtual_avail;
223 vm_offset_t virtual_end;
224 vm_offset_t kernel_vm_end;
225 
226 /*
227  * Kernel pmap.
228  */
229 struct pmap kernel_pmap_store;
230 extern struct pmap ofw_pmap;
231 
232 /*
233  * PTEG data.
234  */
235 static struct	pteg *pmap_pteg_table;
236 u_int		pmap_pteg_count;
237 u_int		pmap_pteg_mask;
238 
239 /*
240  * PVO data.
241  */
242 struct	pvo_head *pmap_pvo_table;		/* pvo entries by pteg index */
243 struct	pvo_head pmap_pvo_kunmanaged =
244     LIST_HEAD_INITIALIZER(pmap_pvo_kunmanaged);	/* list of unmanaged pages */
245 struct	pvo_head pmap_pvo_unmanaged =
246     LIST_HEAD_INITIALIZER(pmap_pvo_unmanaged);	/* list of unmanaged pages */
247 
248 uma_zone_t	pmap_upvo_zone;	/* zone for pvo entries for unmanaged pages */
249 uma_zone_t	pmap_mpvo_zone;	/* zone for pvo entries for managed pages */
250 struct		vm_object pmap_upvo_zone_obj;
251 struct		vm_object pmap_mpvo_zone_obj;
252 static vm_object_t	pmap_pvo_obj;
253 static u_int		pmap_pvo_count;
254 
255 #define	PMAP_PVO_SIZE	1024
256 static struct	pvo_entry *pmap_bpvo_pool;
257 static int	pmap_bpvo_pool_index;
258 static int	pmap_bpvo_pool_count;
259 
260 #define	VSID_NBPW	(sizeof(u_int32_t) * 8)
261 static u_int	pmap_vsid_bitmap[NPMAPS / VSID_NBPW];
262 
263 static boolean_t pmap_initialized = FALSE;
264 
265 /*
266  * Statistics.
267  */
268 u_int	pmap_pte_valid = 0;
269 u_int	pmap_pte_overflow = 0;
270 u_int	pmap_pte_replacements = 0;
271 u_int	pmap_pvo_entries = 0;
272 u_int	pmap_pvo_enter_calls = 0;
273 u_int	pmap_pvo_remove_calls = 0;
274 u_int	pmap_pte_spills = 0;
275 SYSCTL_INT(_machdep, OID_AUTO, pmap_pte_valid, CTLFLAG_RD, &pmap_pte_valid,
276     0, "");
277 SYSCTL_INT(_machdep, OID_AUTO, pmap_pte_overflow, CTLFLAG_RD,
278     &pmap_pte_overflow, 0, "");
279 SYSCTL_INT(_machdep, OID_AUTO, pmap_pte_replacements, CTLFLAG_RD,
280     &pmap_pte_replacements, 0, "");
281 SYSCTL_INT(_machdep, OID_AUTO, pmap_pvo_entries, CTLFLAG_RD, &pmap_pvo_entries,
282     0, "");
283 SYSCTL_INT(_machdep, OID_AUTO, pmap_pvo_enter_calls, CTLFLAG_RD,
284     &pmap_pvo_enter_calls, 0, "");
285 SYSCTL_INT(_machdep, OID_AUTO, pmap_pvo_remove_calls, CTLFLAG_RD,
286     &pmap_pvo_remove_calls, 0, "");
287 SYSCTL_INT(_machdep, OID_AUTO, pmap_pte_spills, CTLFLAG_RD,
288     &pmap_pte_spills, 0, "");
289 
290 struct	pvo_entry *pmap_pvo_zeropage;
291 
292 vm_offset_t	pmap_rkva_start = VM_MIN_KERNEL_ADDRESS;
293 u_int		pmap_rkva_count = 4;
294 
295 /*
296  * Allocate physical memory for use in pmap_bootstrap.
297  */
298 static vm_offset_t	pmap_bootstrap_alloc(vm_size_t, u_int);
299 
300 /*
301  * PTE calls.
302  */
303 static int		pmap_pte_insert(u_int, struct pte *);
304 
305 /*
306  * PVO calls.
307  */
308 static int	pmap_pvo_enter(pmap_t, uma_zone_t, struct pvo_head *,
309 		    vm_offset_t, vm_offset_t, u_int, int);
310 static void	pmap_pvo_remove(struct pvo_entry *, int);
311 static struct	pvo_entry *pmap_pvo_find_va(pmap_t, vm_offset_t, int *);
312 static struct	pte *pmap_pvo_to_pte(const struct pvo_entry *, int);
313 
314 /*
315  * Utility routines.
316  */
317 static void *		pmap_pvo_allocf(uma_zone_t, int, u_int8_t *, int);
318 static struct		pvo_entry *pmap_rkva_alloc(void);
319 static void		pmap_pa_map(struct pvo_entry *, vm_offset_t,
320 			    struct pte *, int *);
321 static void		pmap_pa_unmap(struct pvo_entry *, struct pte *, int *);
322 static void		pmap_syncicache(vm_offset_t, vm_size_t);
323 static boolean_t	pmap_query_bit(vm_page_t, int);
324 static boolean_t	pmap_clear_bit(vm_page_t, int);
325 static void		tlbia(void);
326 
327 static __inline int
328 va_to_sr(u_int *sr, vm_offset_t va)
329 {
330 	return (sr[(uintptr_t)va >> ADDR_SR_SHFT]);
331 }
332 
333 static __inline u_int
334 va_to_pteg(u_int sr, vm_offset_t addr)
335 {
336 	u_int hash;
337 
338 	hash = (sr & SR_VSID_MASK) ^ (((u_int)addr & ADDR_PIDX) >>
339 	    ADDR_PIDX_SHFT);
340 	return (hash & pmap_pteg_mask);
341 }
342 
343 static __inline struct pvo_head *
344 pa_to_pvoh(vm_offset_t pa)
345 {
346 	struct	vm_page *pg;
347 
348 	pg = PHYS_TO_VM_PAGE(pa);
349 
350 	if (pg == NULL)
351 		return (&pmap_pvo_unmanaged);
352 
353 	return (&pg->md.mdpg_pvoh);
354 }
355 
356 static __inline struct pvo_head *
357 vm_page_to_pvoh(vm_page_t m)
358 {
359 
360 	return (&m->md.mdpg_pvoh);
361 }
362 
363 static __inline void
364 pmap_attr_clear(vm_page_t m, int ptebit)
365 {
366 
367 	m->md.mdpg_attrs &= ~ptebit;
368 }
369 
370 static __inline int
371 pmap_attr_fetch(vm_page_t m)
372 {
373 
374 	return (m->md.mdpg_attrs);
375 }
376 
377 static __inline void
378 pmap_attr_save(vm_page_t m, int ptebit)
379 {
380 
381 	m->md.mdpg_attrs |= ptebit;
382 }
383 
384 static __inline int
385 pmap_pte_compare(const struct pte *pt, const struct pte *pvo_pt)
386 {
387 	if (pt->pte_hi == pvo_pt->pte_hi)
388 		return (1);
389 
390 	return (0);
391 }
392 
393 static __inline int
394 pmap_pte_match(struct pte *pt, u_int sr, vm_offset_t va, int which)
395 {
396 	return (pt->pte_hi & ~PTE_VALID) ==
397 	    (((sr & SR_VSID_MASK) << PTE_VSID_SHFT) |
398 	    ((va >> ADDR_API_SHFT) & PTE_API) | which);
399 }
400 
401 static __inline void
402 pmap_pte_create(struct pte *pt, u_int sr, vm_offset_t va, u_int pte_lo)
403 {
404 	/*
405 	 * Construct a PTE.  Default to IMB initially.  Valid bit only gets
406 	 * set when the real pte is set in memory.
407 	 *
408 	 * Note: Don't set the valid bit for correct operation of tlb update.
409 	 */
410 	pt->pte_hi = ((sr & SR_VSID_MASK) << PTE_VSID_SHFT) |
411 	    (((va & ADDR_PIDX) >> ADDR_API_SHFT) & PTE_API);
412 	pt->pte_lo = pte_lo;
413 }
414 
415 static __inline void
416 pmap_pte_synch(struct pte *pt, struct pte *pvo_pt)
417 {
418 
419 	pvo_pt->pte_lo |= pt->pte_lo & (PTE_REF | PTE_CHG);
420 }
421 
422 static __inline void
423 pmap_pte_clear(struct pte *pt, vm_offset_t va, int ptebit)
424 {
425 
426 	/*
427 	 * As shown in Section 7.6.3.2.3
428 	 */
429 	pt->pte_lo &= ~ptebit;
430 	TLBIE(va);
431 	EIEIO();
432 	TLBSYNC();
433 	SYNC();
434 }
435 
436 static __inline void
437 pmap_pte_set(struct pte *pt, struct pte *pvo_pt)
438 {
439 
440 	pvo_pt->pte_hi |= PTE_VALID;
441 
442 	/*
443 	 * Update the PTE as defined in section 7.6.3.1.
444 	 * Note that the REF/CHG bits are from pvo_pt and thus should havce
445 	 * been saved so this routine can restore them (if desired).
446 	 */
447 	pt->pte_lo = pvo_pt->pte_lo;
448 	EIEIO();
449 	pt->pte_hi = pvo_pt->pte_hi;
450 	SYNC();
451 	pmap_pte_valid++;
452 }
453 
454 static __inline void
455 pmap_pte_unset(struct pte *pt, struct pte *pvo_pt, vm_offset_t va)
456 {
457 
458 	pvo_pt->pte_hi &= ~PTE_VALID;
459 
460 	/*
461 	 * Force the reg & chg bits back into the PTEs.
462 	 */
463 	SYNC();
464 
465 	/*
466 	 * Invalidate the pte.
467 	 */
468 	pt->pte_hi &= ~PTE_VALID;
469 
470 	SYNC();
471 	TLBIE(va);
472 	EIEIO();
473 	TLBSYNC();
474 	SYNC();
475 
476 	/*
477 	 * Save the reg & chg bits.
478 	 */
479 	pmap_pte_synch(pt, pvo_pt);
480 	pmap_pte_valid--;
481 }
482 
483 static __inline void
484 pmap_pte_change(struct pte *pt, struct pte *pvo_pt, vm_offset_t va)
485 {
486 
487 	/*
488 	 * Invalidate the PTE
489 	 */
490 	pmap_pte_unset(pt, pvo_pt, va);
491 	pmap_pte_set(pt, pvo_pt);
492 }
493 
494 /*
495  * Quick sort callout for comparing memory regions.
496  */
497 static int	mr_cmp(const void *a, const void *b);
498 static int	om_cmp(const void *a, const void *b);
499 
500 static int
501 mr_cmp(const void *a, const void *b)
502 {
503 	const struct	mem_region *regiona;
504 	const struct	mem_region *regionb;
505 
506 	regiona = a;
507 	regionb = b;
508 	if (regiona->mr_start < regionb->mr_start)
509 		return (-1);
510 	else if (regiona->mr_start > regionb->mr_start)
511 		return (1);
512 	else
513 		return (0);
514 }
515 
516 static int
517 om_cmp(const void *a, const void *b)
518 {
519 	const struct	ofw_map *mapa;
520 	const struct	ofw_map *mapb;
521 
522 	mapa = a;
523 	mapb = b;
524 	if (mapa->om_pa < mapb->om_pa)
525 		return (-1);
526 	else if (mapa->om_pa > mapb->om_pa)
527 		return (1);
528 	else
529 		return (0);
530 }
531 
532 void
533 pmap_bootstrap(vm_offset_t kernelstart, vm_offset_t kernelend)
534 {
535 	ihandle_t	pmem, mmui;
536 	phandle_t	chosen, mmu;
537 	int		sz;
538 	int		i, j;
539 	vm_size_t	size, physsz;
540 	vm_offset_t	pa, va, off;
541 	u_int		batl, batu;
542 
543 	/*
544 	 * Use an IBAT and a DBAT to map the bottom segment of memory
545 	 * where we are.
546 	 */
547 	batu = BATU(0x00000000, BAT_BL_256M, BAT_Vs);
548 	batl = BATL(0x00000000, BAT_M, BAT_PP_RW);
549 	__asm ("mtibatu 0,%0; mtibatl 0,%1; mtdbatu 0,%0; mtdbatl 0,%1"
550 	    :: "r"(batu), "r"(batl));
551 #if 0
552 	batu = BATU(0x80000000, BAT_BL_256M, BAT_Vs);
553 	batl = BATL(0x80000000, BAT_M, BAT_PP_RW);
554 	__asm ("mtibatu 1,%0; mtibatl 1,%1; mtdbatu 1,%0; mtdbatl 1,%1"
555 	    :: "r"(batu), "r"(batl));
556 #endif
557 
558 	/*
559 	 * Set the start and end of kva.
560 	 */
561 	virtual_avail = VM_MIN_KERNEL_ADDRESS;
562 	virtual_end = VM_MAX_KERNEL_ADDRESS;
563 
564 	if ((pmem = OF_finddevice("/memory")) == -1)
565 		panic("pmap_bootstrap: can't locate memory device");
566 	if ((sz = OF_getproplen(pmem, "available")) == -1)
567 		panic("pmap_bootstrap: can't get length of available memory");
568 	if (sizeof(phys_avail) < sz)
569 		panic("pmap_bootstrap: phys_avail too small");
570 	if (sizeof(regions) < sz)
571 		panic("pmap_bootstrap: regions too small");
572 	bzero(regions, sz);
573 	if (OF_getprop(pmem, "available", regions, sz) == -1)
574 		panic("pmap_bootstrap: can't get available memory");
575 	sz /= sizeof(*regions);
576 	CTR0(KTR_PMAP, "pmap_bootstrap: physical memory");
577 	qsort(regions, sz, sizeof(*regions), mr_cmp);
578 	phys_avail_count = 0;
579 	physsz = 0;
580 	for (i = 0, j = 0; i < sz; i++, j += 2) {
581 		CTR3(KTR_PMAP, "region: %#x - %#x (%#x)", regions[i].mr_start,
582 		    regions[i].mr_start + regions[i].mr_size,
583 		    regions[i].mr_size);
584 		phys_avail[j] = regions[i].mr_start;
585 		phys_avail[j + 1] = regions[i].mr_start + regions[i].mr_size;
586 		phys_avail_count++;
587 		physsz += regions[i].mr_size;
588 	}
589 	physmem = btoc(physsz);
590 
591 	/*
592 	 * Allocate PTEG table.
593 	 */
594 #ifdef PTEGCOUNT
595 	pmap_pteg_count = PTEGCOUNT;
596 #else
597 	pmap_pteg_count = 0x1000;
598 
599 	while (pmap_pteg_count < physmem)
600 		pmap_pteg_count <<= 1;
601 
602 	pmap_pteg_count >>= 1;
603 #endif /* PTEGCOUNT */
604 
605 	size = pmap_pteg_count * sizeof(struct pteg);
606 	CTR2(KTR_PMAP, "pmap_bootstrap: %d PTEGs, %d bytes", pmap_pteg_count,
607 	    size);
608 	pmap_pteg_table = (struct pteg *)pmap_bootstrap_alloc(size, size);
609 	CTR1(KTR_PMAP, "pmap_bootstrap: PTEG table at %p", pmap_pteg_table);
610 	bzero((void *)pmap_pteg_table, pmap_pteg_count * sizeof(struct pteg));
611 	pmap_pteg_mask = pmap_pteg_count - 1;
612 
613 	/*
614 	 * Allocate pv/overflow lists.
615 	 */
616 	size = sizeof(struct pvo_head) * pmap_pteg_count;
617 	pmap_pvo_table = (struct pvo_head *)pmap_bootstrap_alloc(size,
618 	    PAGE_SIZE);
619 	CTR1(KTR_PMAP, "pmap_bootstrap: PVO table at %p", pmap_pvo_table);
620 	for (i = 0; i < pmap_pteg_count; i++)
621 		LIST_INIT(&pmap_pvo_table[i]);
622 
623 	/*
624 	 * Allocate the message buffer.
625 	 */
626 	msgbuf_phys = pmap_bootstrap_alloc(MSGBUF_SIZE, 0);
627 
628 	/*
629 	 * Initialise the unmanaged pvo pool.
630 	 */
631 	pmap_bpvo_pool = (struct pvo_entry *)pmap_bootstrap_alloc(PAGE_SIZE, 0);
632 	pmap_bpvo_pool_index = 0;
633 	pmap_bpvo_pool_count = (int)PAGE_SIZE / sizeof(struct pvo_entry);
634 
635 	/*
636 	 * Make sure kernel vsid is allocated as well as VSID 0.
637 	 */
638 	pmap_vsid_bitmap[(KERNEL_VSIDBITS & (NPMAPS - 1)) / VSID_NBPW]
639 		|= 1 << (KERNEL_VSIDBITS % VSID_NBPW);
640 	pmap_vsid_bitmap[0] |= 1;
641 
642 	/*
643 	 * Set up the OpenFirmware pmap and add it's mappings.
644 	 */
645 	pmap_pinit(&ofw_pmap);
646 	ofw_pmap.pm_sr[KERNEL_SR] = KERNEL_SEGMENT;
647 	if ((chosen = OF_finddevice("/chosen")) == -1)
648 		panic("pmap_bootstrap: can't find /chosen");
649 	OF_getprop(chosen, "mmu", &mmui, 4);
650 	if ((mmu = OF_instance_to_package(mmui)) == -1)
651 		panic("pmap_bootstrap: can't get mmu package");
652 	if ((sz = OF_getproplen(mmu, "translations")) == -1)
653 		panic("pmap_bootstrap: can't get ofw translation count");
654 	if (sizeof(translations) < sz)
655 		panic("pmap_bootstrap: translations too small");
656 	bzero(translations, sz);
657 	if (OF_getprop(mmu, "translations", translations, sz) == -1)
658 		panic("pmap_bootstrap: can't get ofw translations");
659 	CTR0(KTR_PMAP, "pmap_bootstrap: translations");
660 	qsort(translations, sz, sizeof (*translations), om_cmp);
661 	for (i = 0; i < sz; i++) {
662 		CTR3(KTR_PMAP, "translation: pa=%#x va=%#x len=%#x",
663 		    translations[i].om_pa, translations[i].om_va,
664 		    translations[i].om_len);
665 
666 		/* Drop stuff below something? */
667 
668 		/* Enter the pages? */
669 		for (off = 0; off < translations[i].om_len; off += PAGE_SIZE) {
670 			struct	vm_page m;
671 
672 			m.phys_addr = translations[i].om_pa + off;
673 			pmap_enter(&ofw_pmap, translations[i].om_va + off, &m,
674 			    VM_PROT_ALL, 1);
675 		}
676 	}
677 #ifdef SMP
678 	TLBSYNC();
679 #endif
680 
681 	/*
682 	 * Initialize the kernel pmap (which is statically allocated).
683 	 */
684 	for (i = 0; i < 16; i++) {
685 		kernel_pmap->pm_sr[i] = EMPTY_SEGMENT;
686 	}
687 	kernel_pmap->pm_sr[KERNEL_SR] = KERNEL_SEGMENT;
688 	kernel_pmap->pm_active = ~0;
689 
690 	/*
691 	 * Allocate a kernel stack with a guard page for thread0 and map it
692 	 * into the kernel page map.
693 	 */
694 	pa = pmap_bootstrap_alloc(KSTACK_PAGES * PAGE_SIZE, 0);
695 	kstack0_phys = pa;
696 	kstack0 = virtual_avail + (KSTACK_GUARD_PAGES * PAGE_SIZE);
697 	CTR2(KTR_PMAP, "pmap_bootstrap: kstack0 at %#x (%#x)", kstack0_phys,
698 	    kstack0);
699 	virtual_avail += (KSTACK_PAGES + KSTACK_GUARD_PAGES) * PAGE_SIZE;
700 	for (i = 0; i < KSTACK_PAGES; i++) {
701 		pa = kstack0_phys + i * PAGE_SIZE;
702 		va = kstack0 + i * PAGE_SIZE;
703 		pmap_kenter(va, pa);
704 		TLBIE(va);
705 	}
706 
707 	/*
708 	 * Calculate the first and last available physical addresses.
709 	 */
710 	avail_start = phys_avail[0];
711 	for (i = 0; phys_avail[i + 2] != 0; i += 2)
712 		;
713 	avail_end = phys_avail[i + 1];
714 	Maxmem = powerpc_btop(avail_end);
715 
716 	/*
717 	 * Allocate virtual address space for the message buffer.
718 	 */
719 	msgbufp = (struct msgbuf *)virtual_avail;
720 	virtual_avail += round_page(MSGBUF_SIZE);
721 
722 	/*
723 	 * Initialize hardware.
724 	 */
725 	for (i = 0; i < 16; i++) {
726 		mtsrin(i << ADDR_SR_SHFT, EMPTY_SEGMENT);
727 	}
728 	__asm __volatile ("mtsr %0,%1"
729 	    :: "n"(KERNEL_SR), "r"(KERNEL_SEGMENT));
730 	__asm __volatile ("sync; mtsdr1 %0; isync"
731 	    :: "r"((u_int)pmap_pteg_table | (pmap_pteg_mask >> 10)));
732 	tlbia();
733 
734 	pmap_bootstrapped++;
735 }
736 
737 /*
738  * Activate a user pmap.  The pmap must be activated before it's address
739  * space can be accessed in any way.
740  */
741 void
742 pmap_activate(struct thread *td)
743 {
744 	pmap_t	pm;
745 
746 	/*
747 	 * Load all the data we need up front to encourasge the compiler to
748 	 * not issue any loads while we have interrupts disabled below.
749 	 */
750 	pm = &td->td_proc->p_vmspace->vm_pmap;
751 
752 	KASSERT(pm->pm_active == 0, ("pmap_activate: pmap already active?"));
753 
754 	pm->pm_active |= PCPU_GET(cpumask);
755 }
756 
757 void
758 pmap_deactivate(struct thread *td)
759 {
760 	pmap_t	pm;
761 
762 	pm = &td->td_proc->p_vmspace->vm_pmap;
763 	pm->pm_active &= ~(PCPU_GET(cpumask));
764 }
765 
766 vm_offset_t
767 pmap_addr_hint(vm_object_t object, vm_offset_t va, vm_size_t size)
768 {
769 	TODO;
770 	return (0);
771 }
772 
773 void
774 pmap_change_wiring(pmap_t pmap, vm_offset_t va, boolean_t wired)
775 {
776 	TODO;
777 }
778 
779 void
780 pmap_clear_modify(vm_page_t m)
781 {
782 
783 	if (m->flags * PG_FICTITIOUS)
784 		return;
785 	pmap_clear_bit(m, PTE_CHG);
786 }
787 
788 void
789 pmap_collect(void)
790 {
791 	TODO;
792 }
793 
794 void
795 pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr,
796 	  vm_size_t len, vm_offset_t src_addr)
797 {
798 	TODO;
799 }
800 
801 void
802 pmap_copy_page(vm_page_t src, vm_page_t dst)
803 {
804 	TODO;
805 }
806 
807 /*
808  * Zero a page of physical memory by temporarily mapping it into the tlb.
809  */
810 void
811 pmap_zero_page(vm_page_t m)
812 {
813 	vm_offset_t pa = VM_PAGE_TO_PHYS(m);
814 	caddr_t	va;
815 	int	i;
816 
817 	if (pa < SEGMENT_LENGTH) {
818 		va = (caddr_t) pa;
819 	} else if (pmap_initialized) {
820 		if (pmap_pvo_zeropage == NULL)
821 			pmap_pvo_zeropage = pmap_rkva_alloc();
822 		pmap_pa_map(pmap_pvo_zeropage, pa, NULL, NULL);
823 		va = (caddr_t)PVO_VADDR(pmap_pvo_zeropage);
824 	} else {
825 		panic("pmap_zero_page: can't zero pa %#x", pa);
826 	}
827 
828 	bzero(va, PAGE_SIZE);
829 
830 	for (i = PAGE_SIZE / CACHELINESIZE; i > 0; i--) {
831 		__asm __volatile("dcbz 0,%0" :: "r"(va));
832 		va += CACHELINESIZE;
833 	}
834 
835 	if (pa >= SEGMENT_LENGTH)
836 		pmap_pa_unmap(pmap_pvo_zeropage, NULL, NULL);
837 }
838 
839 void
840 pmap_zero_page_area(vm_page_t m, int off, int size)
841 {
842 	TODO;
843 }
844 
845 /*
846  * Map the given physical page at the specified virtual address in the
847  * target pmap with the protection requested.  If specified the page
848  * will be wired down.
849  */
850 void
851 pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
852 	   boolean_t wired)
853 {
854 	struct		pvo_head *pvo_head;
855 	uma_zone_t	zone;
856 	u_int		pte_lo, pvo_flags;
857 	int		error;
858 
859 	if (!pmap_initialized) {
860 		pvo_head = &pmap_pvo_kunmanaged;
861 		zone = pmap_upvo_zone;
862 		pvo_flags = 0;
863 	} else {
864 		pvo_head = pa_to_pvoh(m->phys_addr);
865 		zone = pmap_mpvo_zone;
866 		pvo_flags = PVO_MANAGED;
867 	}
868 
869 	pte_lo = PTE_I | PTE_G;
870 
871 	if (prot & VM_PROT_WRITE)
872 		pte_lo |= PTE_BW;
873 	else
874 		pte_lo |= PTE_BR;
875 
876 	if (prot & VM_PROT_EXECUTE)
877 		pvo_flags |= PVO_EXECUTABLE;
878 
879 	if (wired)
880 		pvo_flags |= PVO_WIRED;
881 
882 	error = pmap_pvo_enter(pmap, zone, pvo_head, va, m->phys_addr, pte_lo,
883 	    pvo_flags);
884 
885 	if (error == ENOENT) {
886 		/*
887 		 * Flush the real memory from the cache.
888 		 */
889 		if ((pvo_flags & PVO_EXECUTABLE) && (pte_lo & PTE_I) == 0) {
890 			pmap_syncicache(m->phys_addr, PAGE_SIZE);
891 		}
892 	}
893 }
894 
895 vm_offset_t
896 pmap_extract(pmap_t pmap, vm_offset_t va)
897 {
898 	TODO;
899 	return (0);
900 }
901 
902 /*
903  * Grow the number of kernel page table entries.  Unneeded.
904  */
905 void
906 pmap_growkernel(vm_offset_t addr)
907 {
908 }
909 
910 void
911 pmap_init(vm_offset_t phys_start, vm_offset_t phys_end)
912 {
913 
914 	CTR0(KTR_PMAP, "pmap_init");
915 }
916 
917 void
918 pmap_init2(void)
919 {
920 
921 	CTR0(KTR_PMAP, "pmap_init2");
922 
923 	pmap_pvo_obj = vm_object_allocate(OBJT_PHYS, 16);
924 	pmap_pvo_count = 0;
925 	pmap_upvo_zone = uma_zcreate("UPVO entry", sizeof (struct pvo_entry),
926 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
927 	uma_zone_set_allocf(pmap_upvo_zone, pmap_pvo_allocf);
928 	pmap_mpvo_zone = uma_zcreate("MPVO entry", sizeof(struct pvo_entry),
929 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
930 	uma_zone_set_allocf(pmap_mpvo_zone, pmap_pvo_allocf);
931 	pmap_initialized = TRUE;
932 }
933 
934 boolean_t
935 pmap_is_modified(vm_page_t m)
936 {
937 	TODO;
938 	return (0);
939 }
940 
941 void
942 pmap_clear_reference(vm_page_t m)
943 {
944 	TODO;
945 }
946 
947 /*
948  *	pmap_ts_referenced:
949  *
950  *	Return a count of reference bits for a page, clearing those bits.
951  *	It is not necessary for every reference bit to be cleared, but it
952  *	is necessary that 0 only be returned when there are truly no
953  *	reference bits set.
954  *
955  *	XXX: The exact number of bits to check and clear is a matter that
956  *	should be tested and standardized at some point in the future for
957  *	optimal aging of shared pages.
958  */
959 
960 int
961 pmap_ts_referenced(vm_page_t m)
962 {
963 	TODO;
964 	return (0);
965 }
966 
967 /*
968  * Map a wired page into kernel virtual address space.
969  */
970 void
971 pmap_kenter(vm_offset_t va, vm_offset_t pa)
972 {
973 	u_int		pte_lo;
974 	int		error;
975 	int		i;
976 
977 #if 0
978 	if (va < VM_MIN_KERNEL_ADDRESS)
979 		panic("pmap_kenter: attempt to enter non-kernel address %#x",
980 		    va);
981 #endif
982 
983 	pte_lo = PTE_I | PTE_G | PTE_BW;
984 	for (i = 0; phys_avail[i + 2] != 0; i += 2) {
985 		if (pa >= phys_avail[i] && pa < phys_avail[i + 1]) {
986 			pte_lo &= ~(PTE_I | PTE_G);
987 			break;
988 		}
989 	}
990 
991 	error = pmap_pvo_enter(kernel_pmap, pmap_upvo_zone,
992 	    &pmap_pvo_kunmanaged, va, pa, pte_lo, PVO_WIRED);
993 
994 	if (error != 0 && error != ENOENT)
995 		panic("pmap_kenter: failed to enter va %#x pa %#x: %d", va,
996 		    pa, error);
997 
998 	/*
999 	 * Flush the real memory from the instruction cache.
1000 	 */
1001 	if ((pte_lo & (PTE_I | PTE_G)) == 0) {
1002 		pmap_syncicache(pa, PAGE_SIZE);
1003 	}
1004 }
1005 
1006 /*
1007  * Extract the physical page address associated with the given kernel virtual
1008  * address.
1009  */
1010 vm_offset_t
1011 pmap_kextract(vm_offset_t va)
1012 {
1013 	struct		pvo_entry *pvo;
1014 
1015 	pvo = pmap_pvo_find_va(kernel_pmap, va & ~ADDR_POFF, NULL);
1016 	if (pvo == NULL) {
1017 		return (0);
1018 	}
1019 
1020 	return ((pvo->pvo_pte.pte_lo & PTE_RPGN) | (va & ADDR_POFF));
1021 }
1022 
1023 /*
1024  * Remove a wired page from kernel virtual address space.
1025  */
1026 void
1027 pmap_kremove(vm_offset_t va)
1028 {
1029 
1030 	pmap_remove(kernel_pmap, va, roundup(va, PAGE_SIZE));
1031 }
1032 
1033 /*
1034  * Map a range of physical addresses into kernel virtual address space.
1035  *
1036  * The value passed in *virt is a suggested virtual address for the mapping.
1037  * Architectures which can support a direct-mapped physical to virtual region
1038  * can return the appropriate address within that region, leaving '*virt'
1039  * unchanged.  We cannot and therefore do not; *virt is updated with the
1040  * first usable address after the mapped region.
1041  */
1042 vm_offset_t
1043 pmap_map(vm_offset_t *virt, vm_offset_t pa_start, vm_offset_t pa_end, int prot)
1044 {
1045 	vm_offset_t	sva, va;
1046 
1047 	sva = *virt;
1048 	va = sva;
1049 	for (; pa_start < pa_end; pa_start += PAGE_SIZE, va += PAGE_SIZE)
1050 		pmap_kenter(va, pa_start);
1051 	*virt = va;
1052 	return (sva);
1053 }
1054 
1055 int
1056 pmap_mincore(pmap_t pmap, vm_offset_t addr)
1057 {
1058 	TODO;
1059 	return (0);
1060 }
1061 
1062 /*
1063  * Create the uarea for a new process.
1064  * This routine directly affects the fork perf for a process.
1065  */
1066 void
1067 pmap_new_proc(struct proc *p)
1068 {
1069 	vm_object_t	upobj;
1070 	vm_offset_t	up;
1071 	vm_page_t	m;
1072 	u_int		i;
1073 
1074 	/*
1075 	 * Allocate the object for the upages.
1076 	 */
1077 	upobj = p->p_upages_obj;
1078 	if (upobj == NULL) {
1079 		upobj = vm_object_allocate(OBJT_DEFAULT, UAREA_PAGES);
1080 		p->p_upages_obj = upobj;
1081 	}
1082 
1083 	/*
1084 	 * Get a kernel virtual address for the uarea for this process.
1085 	 */
1086 	up = (vm_offset_t)p->p_uarea;
1087 	if (up == 0) {
1088 		up = kmem_alloc_nofault(kernel_map, UAREA_PAGES * PAGE_SIZE);
1089 		if (up == 0)
1090 			panic("pmap_new_proc: upage allocation failed");
1091 		p->p_uarea = (struct user *)up;
1092 	}
1093 
1094 	for (i = 0; i < UAREA_PAGES; i++) {
1095 		/*
1096 		 * Get a uarea page.
1097 		 */
1098 		m = vm_page_grab(upobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
1099 
1100 		/*
1101 		 * Wire the page.
1102 		 */
1103 		m->wire_count++;
1104 
1105 		/*
1106 		 * Enter the page into the kernel address space.
1107 		 */
1108 		pmap_kenter(up + i * PAGE_SIZE, VM_PAGE_TO_PHYS(m));
1109 
1110 		vm_page_wakeup(m);
1111 		vm_page_flag_clear(m, PG_ZERO);
1112 		vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE);
1113 		m->valid = VM_PAGE_BITS_ALL;
1114 	}
1115 }
1116 
1117 void
1118 pmap_object_init_pt(pmap_t pm, vm_offset_t addr, vm_object_t object,
1119 		    vm_pindex_t pindex, vm_size_t size, int limit)
1120 {
1121 
1122 	KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap,
1123 	    ("pmap_remove_pages: non current pmap"));
1124 	/* XXX */
1125 }
1126 
1127 /*
1128  * Lower the permission for all mappings to a given page.
1129  */
1130 void
1131 pmap_page_protect(vm_page_t m, vm_prot_t prot)
1132 {
1133 	struct	pvo_head *pvo_head;
1134 	struct	pvo_entry *pvo, *next_pvo;
1135 	struct	pte *pt;
1136 
1137 	/*
1138 	 * Since the routine only downgrades protection, if the
1139 	 * maximal protection is desired, there isn't any change
1140 	 * to be made.
1141 	 */
1142 	if ((prot & (VM_PROT_READ|VM_PROT_WRITE)) ==
1143 	    (VM_PROT_READ|VM_PROT_WRITE))
1144 		return;
1145 
1146 	pvo_head = vm_page_to_pvoh(m);
1147 	for (pvo = LIST_FIRST(pvo_head); pvo != NULL; pvo = next_pvo) {
1148 		next_pvo = LIST_NEXT(pvo, pvo_vlink);
1149 		PMAP_PVO_CHECK(pvo);	/* sanity check */
1150 
1151 		/*
1152 		 * Downgrading to no mapping at all, we just remove the entry.
1153 		 */
1154 		if ((prot & VM_PROT_READ) == 0) {
1155 			pmap_pvo_remove(pvo, -1);
1156 			continue;
1157 		}
1158 
1159 		/*
1160 		 * If EXEC permission is being revoked, just clear the flag
1161 		 * in the PVO.
1162 		 */
1163 		if ((prot & VM_PROT_EXECUTE) == 0)
1164 			pvo->pvo_vaddr &= ~PVO_EXECUTABLE;
1165 
1166 		/*
1167 		 * If this entry is already RO, don't diddle with the page
1168 		 * table.
1169 		 */
1170 		if ((pvo->pvo_pte.pte_lo & PTE_PP) == PTE_BR) {
1171 			PMAP_PVO_CHECK(pvo);
1172 			continue;
1173 		}
1174 
1175 		/*
1176 		 * Grab the PTE before we diddle the bits so pvo_to_pte can
1177 		 * verify the pte contents are as expected.
1178 		 */
1179 		pt = pmap_pvo_to_pte(pvo, -1);
1180 		pvo->pvo_pte.pte_lo &= ~PTE_PP;
1181 		pvo->pvo_pte.pte_lo |= PTE_BR;
1182 		if (pt != NULL)
1183 			pmap_pte_change(pt, &pvo->pvo_pte, pvo->pvo_vaddr);
1184 		PMAP_PVO_CHECK(pvo);	/* sanity check */
1185 	}
1186 }
1187 
1188 /*
1189  * Make the specified page pageable (or not).  Unneeded.
1190  */
1191 void
1192 pmap_pageable(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
1193 	      boolean_t pageable)
1194 {
1195 }
1196 
1197 /*
1198  * Returns true if the pmap's pv is one of the first
1199  * 16 pvs linked to from this page.  This count may
1200  * be changed upwards or downwards in the future; it
1201  * is only necessary that true be returned for a small
1202  * subset of pmaps for proper page aging.
1203  */
1204 boolean_t
1205 pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
1206 {
1207 	TODO;
1208 	return (0);
1209 }
1210 
1211 static u_int	pmap_vsidcontext;
1212 
1213 void
1214 pmap_pinit(pmap_t pmap)
1215 {
1216 	int	i, mask;
1217 	u_int	entropy;
1218 
1219 	entropy = 0;
1220 	__asm __volatile("mftb %0" : "=r"(entropy));
1221 
1222 	/*
1223 	 * Allocate some segment registers for this pmap.
1224 	 */
1225 	for (i = 0; i < NPMAPS; i += VSID_NBPW) {
1226 		u_int	hash, n;
1227 
1228 		/*
1229 		 * Create a new value by mutiplying by a prime and adding in
1230 		 * entropy from the timebase register.  This is to make the
1231 		 * VSID more random so that the PT hash function collides
1232 		 * less often.  (Note that the prime casues gcc to do shifts
1233 		 * instead of a multiply.)
1234 		 */
1235 		pmap_vsidcontext = (pmap_vsidcontext * 0x1105) + entropy;
1236 		hash = pmap_vsidcontext & (NPMAPS - 1);
1237 		if (hash == 0)		/* 0 is special, avoid it */
1238 			continue;
1239 		n = hash >> 5;
1240 		mask = 1 << (hash & (VSID_NBPW - 1));
1241 		hash = (pmap_vsidcontext & 0xfffff);
1242 		if (pmap_vsid_bitmap[n] & mask) {	/* collision? */
1243 			/* anything free in this bucket? */
1244 			if (pmap_vsid_bitmap[n] == 0xffffffff) {
1245 				entropy = (pmap_vsidcontext >> 20);
1246 				continue;
1247 			}
1248 			i = ffs(~pmap_vsid_bitmap[i]) - 1;
1249 			mask = 1 << i;
1250 			hash &= 0xfffff & ~(VSID_NBPW - 1);
1251 			hash |= i;
1252 		}
1253 		pmap_vsid_bitmap[n] |= mask;
1254 		for (i = 0; i < 16; i++)
1255 			pmap->pm_sr[i] = VSID_MAKE(i, hash);
1256 		return;
1257 	}
1258 
1259 	panic("pmap_pinit: out of segments");
1260 }
1261 
1262 /*
1263  * Initialize the pmap associated with process 0.
1264  */
1265 void
1266 pmap_pinit0(pmap_t pm)
1267 {
1268 
1269 	pmap_pinit(pm);
1270 	bzero(&pm->pm_stats, sizeof(pm->pm_stats));
1271 }
1272 
1273 void
1274 pmap_pinit2(pmap_t pmap)
1275 {
1276 	/* XXX: Remove this stub when no longer called */
1277 }
1278 
1279 void
1280 pmap_prefault(pmap_t pm, vm_offset_t va, vm_map_entry_t entry)
1281 {
1282 	KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap,
1283 	    ("pmap_prefault: non current pmap"));
1284 	/* XXX */
1285 }
1286 
1287 /*
1288  * Set the physical protection on the specified range of this map as requested.
1289  */
1290 void
1291 pmap_protect(pmap_t pm, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
1292 {
1293 	struct	pvo_entry *pvo;
1294 	struct	pte *pt;
1295 	int	pteidx;
1296 
1297 	CTR4(KTR_PMAP, "pmap_protect: pm=%p sva=%#x eva=%#x prot=%#x", pm, sva,
1298 	    eva, prot);
1299 
1300 
1301 	KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap,
1302 	    ("pmap_protect: non current pmap"));
1303 
1304 	if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
1305 		pmap_remove(pm, sva, eva);
1306 		return;
1307 	}
1308 
1309 	for (; sva < eva; sva += PAGE_SIZE) {
1310 		pvo = pmap_pvo_find_va(pm, sva, &pteidx);
1311 		if (pvo == NULL)
1312 			continue;
1313 
1314 		if ((prot & VM_PROT_EXECUTE) == 0)
1315 			pvo->pvo_vaddr &= ~PVO_EXECUTABLE;
1316 
1317 		/*
1318 		 * Grab the PTE pointer before we diddle with the cached PTE
1319 		 * copy.
1320 		 */
1321 		pt = pmap_pvo_to_pte(pvo, pteidx);
1322 		/*
1323 		 * Change the protection of the page.
1324 		 */
1325 		pvo->pvo_pte.pte_lo &= ~PTE_PP;
1326 		pvo->pvo_pte.pte_lo |= PTE_BR;
1327 
1328 		/*
1329 		 * If the PVO is in the page table, update that pte as well.
1330 		 */
1331 		if (pt != NULL)
1332 			pmap_pte_change(pt, &pvo->pvo_pte, pvo->pvo_vaddr);
1333 	}
1334 }
1335 
1336 vm_offset_t
1337 pmap_phys_address(int ppn)
1338 {
1339 	TODO;
1340 	return (0);
1341 }
1342 
1343 /*
1344  * Map a list of wired pages into kernel virtual address space.  This is
1345  * intended for temporary mappings which do not need page modification or
1346  * references recorded.  Existing mappings in the region are overwritten.
1347  */
1348 void
1349 pmap_qenter(vm_offset_t va, vm_page_t *m, int count)
1350 {
1351 	int	i;
1352 
1353 	for (i = 0; i < count; i++, va += PAGE_SIZE)
1354 		pmap_kenter(va, VM_PAGE_TO_PHYS(m[i]));
1355 }
1356 
1357 /*
1358  * Remove page mappings from kernel virtual address space.  Intended for
1359  * temporary mappings entered by pmap_qenter.
1360  */
1361 void
1362 pmap_qremove(vm_offset_t va, int count)
1363 {
1364 	int	i;
1365 
1366 	for (i = 0; i < count; i++, va += PAGE_SIZE)
1367 		pmap_kremove(va);
1368 }
1369 
1370 void
1371 pmap_release(pmap_t pmap)
1372 {
1373 	TODO;
1374 }
1375 
1376 /*
1377  * Remove the given range of addresses from the specified map.
1378  */
1379 void
1380 pmap_remove(pmap_t pm, vm_offset_t sva, vm_offset_t eva)
1381 {
1382 	struct	pvo_entry *pvo;
1383 	int	pteidx;
1384 
1385 	for (; sva < eva; sva += PAGE_SIZE) {
1386 		pvo = pmap_pvo_find_va(pm, sva, &pteidx);
1387 		if (pvo != NULL) {
1388 			pmap_pvo_remove(pvo, pteidx);
1389 		}
1390 	}
1391 }
1392 
1393 /*
1394  * Remove all pages from specified address space, this aids process exit
1395  * speeds.  This is much faster than pmap_remove in the case of running down
1396  * an entire address space.  Only works for the current pmap.
1397  */
1398 void
1399 pmap_remove_pages(pmap_t pm, vm_offset_t sva, vm_offset_t eva)
1400 {
1401 
1402 	KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap,
1403 	    ("pmap_remove_pages: non current pmap"));
1404 	pmap_remove(pm, sva, eva);
1405 }
1406 
1407 void
1408 pmap_swapin_proc(struct proc *p)
1409 {
1410 	TODO;
1411 }
1412 
1413 void
1414 pmap_swapout_proc(struct proc *p)
1415 {
1416 	TODO;
1417 }
1418 
1419 /*
1420  * Create the kernel stack and pcb for a new thread.
1421  * This routine directly affects the fork perf for a process and
1422  * create performance for a thread.
1423  */
1424 void
1425 pmap_new_thread(struct thread *td)
1426 {
1427 	vm_object_t	ksobj;
1428 	vm_offset_t	ks;
1429 	vm_page_t	m;
1430 	u_int		i;
1431 
1432 	/*
1433 	 * Allocate object for the kstack.
1434 	 */
1435 	ksobj = td->td_kstack_obj;
1436 	if (ksobj == NULL) {
1437 		ksobj = vm_object_allocate(OBJT_DEFAULT, KSTACK_PAGES);
1438 		td->td_kstack_obj = ksobj;
1439 	}
1440 
1441 	/*
1442 	 * Get a kernel virtual address for the kstack for this thread.
1443 	 */
1444 	ks = td->td_kstack;
1445 	if (ks == 0) {
1446 		ks = kmem_alloc_nofault(kernel_map,
1447 		    (KSTACK_PAGES + KSTACK_GUARD_PAGES) * PAGE_SIZE);
1448 		if (ks == 0)
1449 			panic("pmap_new_thread: kstack allocation failed");
1450 		TLBIE(ks);
1451 		ks += KSTACK_GUARD_PAGES * PAGE_SIZE;
1452 		td->td_kstack = ks;
1453 	}
1454 
1455 	for (i = 0; i < KSTACK_PAGES; i++) {
1456 		/*
1457 		 * Get a kernel stack page.
1458 		 */
1459 		m = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
1460 
1461 		/*
1462 		 * Wire the page.
1463 		 */
1464 		m->wire_count++;
1465 
1466 		/*
1467 		 * Enter the page into the kernel address space.
1468 		 */
1469 		pmap_kenter(ks + i * PAGE_SIZE, VM_PAGE_TO_PHYS(m));
1470 
1471 		vm_page_wakeup(m);
1472 		vm_page_flag_clear(m, PG_ZERO);
1473 		vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE);
1474 		m->valid = VM_PAGE_BITS_ALL;
1475 	}
1476 }
1477 
1478 void
1479 pmap_dispose_proc(struct proc *p)
1480 {
1481 	TODO;
1482 }
1483 
1484 void
1485 pmap_dispose_thread(struct thread *td)
1486 {
1487 	TODO;
1488 }
1489 
1490 void
1491 pmap_swapin_thread(struct thread *td)
1492 {
1493 	TODO;
1494 }
1495 
1496 void
1497 pmap_swapout_thread(struct thread *td)
1498 {
1499 	TODO;
1500 }
1501 
1502 /*
1503  * Allocate a physical page of memory directly from the phys_avail map.
1504  * Can only be called from pmap_bootstrap before avail start and end are
1505  * calculated.
1506  */
1507 static vm_offset_t
1508 pmap_bootstrap_alloc(vm_size_t size, u_int align)
1509 {
1510 	vm_offset_t	s, e;
1511 	int		i, j;
1512 
1513 	size = round_page(size);
1514 	for (i = 0; phys_avail[i + 1] != 0; i += 2) {
1515 		if (align != 0)
1516 			s = (phys_avail[i] + align - 1) & ~(align - 1);
1517 		else
1518 			s = phys_avail[i];
1519 		e = s + size;
1520 
1521 		if (s < phys_avail[i] || e > phys_avail[i + 1])
1522 			continue;
1523 
1524 		if (s == phys_avail[i]) {
1525 			phys_avail[i] += size;
1526 		} else if (e == phys_avail[i + 1]) {
1527 			phys_avail[i + 1] -= size;
1528 		} else {
1529 			for (j = phys_avail_count * 2; j > i; j -= 2) {
1530 				phys_avail[j] = phys_avail[j - 2];
1531 				phys_avail[j + 1] = phys_avail[j - 1];
1532 			}
1533 
1534 			phys_avail[i + 3] = phys_avail[i + 1];
1535 			phys_avail[i + 1] = s;
1536 			phys_avail[i + 2] = e;
1537 			phys_avail_count++;
1538 		}
1539 
1540 		return (s);
1541 	}
1542 	panic("pmap_bootstrap_alloc: could not allocate memory");
1543 }
1544 
1545 /*
1546  * Return an unmapped pvo for a kernel virtual address.
1547  * Used by pmap functions that operate on physical pages.
1548  */
1549 static struct pvo_entry *
1550 pmap_rkva_alloc(void)
1551 {
1552 	struct		pvo_entry *pvo;
1553 	struct		pte *pt;
1554 	vm_offset_t	kva;
1555 	int		pteidx;
1556 
1557 	if (pmap_rkva_count == 0)
1558 		panic("pmap_rkva_alloc: no more reserved KVAs");
1559 
1560 	kva = pmap_rkva_start + (PAGE_SIZE * --pmap_rkva_count);
1561 	pmap_kenter(kva, 0);
1562 
1563 	pvo = pmap_pvo_find_va(kernel_pmap, kva, &pteidx);
1564 
1565 	if (pvo == NULL)
1566 		panic("pmap_kva_alloc: pmap_pvo_find_va failed");
1567 
1568 	pt = pmap_pvo_to_pte(pvo, pteidx);
1569 
1570 	if (pt == NULL)
1571 		panic("pmap_kva_alloc: pmap_pvo_to_pte failed");
1572 
1573 	pmap_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr);
1574 	PVO_PTEGIDX_CLR(pvo);
1575 
1576 	pmap_pte_overflow++;
1577 
1578 	return (pvo);
1579 }
1580 
1581 static void
1582 pmap_pa_map(struct pvo_entry *pvo, vm_offset_t pa, struct pte *saved_pt,
1583     int *depth_p)
1584 {
1585 	struct	pte *pt;
1586 
1587 	/*
1588 	 * If this pvo already has a valid pte, we need to save it so it can
1589 	 * be restored later.  We then just reload the new PTE over the old
1590 	 * slot.
1591 	 */
1592 	if (saved_pt != NULL) {
1593 		pt = pmap_pvo_to_pte(pvo, -1);
1594 
1595 		if (pt != NULL) {
1596 			pmap_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr);
1597 			PVO_PTEGIDX_CLR(pvo);
1598 			pmap_pte_overflow++;
1599 		}
1600 
1601 		*saved_pt = pvo->pvo_pte;
1602 
1603 		pvo->pvo_pte.pte_lo &= ~PTE_RPGN;
1604 	}
1605 
1606 	pvo->pvo_pte.pte_lo |= pa;
1607 
1608 	if (!pmap_pte_spill(pvo->pvo_vaddr))
1609 		panic("pmap_pa_map: could not spill pvo %p", pvo);
1610 
1611 	if (depth_p != NULL)
1612 		(*depth_p)++;
1613 }
1614 
1615 static void
1616 pmap_pa_unmap(struct pvo_entry *pvo, struct pte *saved_pt, int *depth_p)
1617 {
1618 	struct	pte *pt;
1619 
1620 	pt = pmap_pvo_to_pte(pvo, -1);
1621 
1622 	if (pt != NULL) {
1623 		pmap_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr);
1624 		PVO_PTEGIDX_CLR(pvo);
1625 		pmap_pte_overflow++;
1626 	}
1627 
1628 	pvo->pvo_pte.pte_lo &= ~PTE_RPGN;
1629 
1630 	/*
1631 	 * If there is a saved PTE and it's valid, restore it and return.
1632 	 */
1633 	if (saved_pt != NULL && (saved_pt->pte_lo & PTE_RPGN) != 0) {
1634 		if (depth_p != NULL && --(*depth_p) == 0)
1635 			panic("pmap_pa_unmap: restoring but depth == 0");
1636 
1637 		pvo->pvo_pte = *saved_pt;
1638 
1639 		if (!pmap_pte_spill(pvo->pvo_vaddr))
1640 			panic("pmap_pa_unmap: could not spill pvo %p", pvo);
1641 	}
1642 }
1643 
1644 static void
1645 pmap_syncicache(vm_offset_t pa, vm_size_t len)
1646 {
1647 	__syncicache((void *)pa, len);
1648 }
1649 
1650 static void
1651 tlbia(void)
1652 {
1653 	caddr_t	i;
1654 
1655 	SYNC();
1656 	for (i = 0; i < (caddr_t)0x00040000; i += 0x00001000) {
1657 		TLBIE(i);
1658 		EIEIO();
1659 	}
1660 	TLBSYNC();
1661 	SYNC();
1662 }
1663 
1664 static int
1665 pmap_pvo_enter(pmap_t pm, uma_zone_t zone, struct pvo_head *pvo_head,
1666     vm_offset_t va, vm_offset_t pa, u_int pte_lo, int flags)
1667 {
1668 	struct	pvo_entry *pvo;
1669 	u_int	sr;
1670 	int	first;
1671 	u_int	ptegidx;
1672 	int	i;
1673 
1674 	pmap_pvo_enter_calls++;
1675 
1676 	/*
1677 	 * Compute the PTE Group index.
1678 	 */
1679 	va &= ~ADDR_POFF;
1680 	sr = va_to_sr(pm->pm_sr, va);
1681 	ptegidx = va_to_pteg(sr, va);
1682 
1683 	/*
1684 	 * Remove any existing mapping for this page.  Reuse the pvo entry if
1685 	 * there is a mapping.
1686 	 */
1687 	LIST_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) {
1688 		if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) {
1689 			if ((pvo->pvo_pte.pte_lo & PTE_RPGN) == pa)
1690 				return (0);
1691 			pmap_pvo_remove(pvo, -1);
1692 			break;
1693 		}
1694 	}
1695 
1696 	/*
1697 	 * If we aren't overwriting a mapping, try to allocate.
1698 	 */
1699 	if (pmap_initialized) {
1700 		pvo = uma_zalloc(zone, M_NOWAIT);
1701 	} else {
1702 		if (pmap_bpvo_pool_index >= pmap_bpvo_pool_count) {
1703 			pmap_bpvo_pool = (struct pvo_entry *)
1704 			    pmap_bootstrap_alloc(PAGE_SIZE, 0);
1705 			pmap_bpvo_pool_index = 0;
1706 		}
1707 		pvo = &pmap_bpvo_pool[pmap_bpvo_pool_index];
1708 		pmap_bpvo_pool_index++;
1709 		pvo->pvo_vaddr |= PVO_BOOTSTRAP;
1710 	}
1711 
1712 	if (pvo == NULL) {
1713 		return (ENOMEM);
1714 	}
1715 
1716 	pmap_pvo_entries++;
1717 	pvo->pvo_vaddr = va;
1718 	pvo->pvo_pmap = pm;
1719 	LIST_INSERT_HEAD(&pmap_pvo_table[ptegidx], pvo, pvo_olink);
1720 	pvo->pvo_vaddr &= ~ADDR_POFF;
1721 	if (flags & VM_PROT_EXECUTE)
1722 		pvo->pvo_vaddr |= PVO_EXECUTABLE;
1723 	if (flags & PVO_WIRED)
1724 		pvo->pvo_vaddr |= PVO_WIRED;
1725 	if (pvo_head != &pmap_pvo_kunmanaged)
1726 		pvo->pvo_vaddr |= PVO_MANAGED;
1727 	pmap_pte_create(&pvo->pvo_pte, sr, va, pa | pte_lo);
1728 
1729 	/*
1730 	 * Remember if the list was empty and therefore will be the first
1731 	 * item.
1732 	 */
1733 	first = LIST_FIRST(pvo_head) == NULL;
1734 
1735 	LIST_INSERT_HEAD(pvo_head, pvo, pvo_vlink);
1736 	if (pvo->pvo_pte.pte_lo & PVO_WIRED)
1737 		pvo->pvo_pmap->pm_stats.wired_count++;
1738 	pvo->pvo_pmap->pm_stats.resident_count++;
1739 
1740 	/*
1741 	 * We hope this succeeds but it isn't required.
1742 	 */
1743 	i = pmap_pte_insert(ptegidx, &pvo->pvo_pte);
1744 	if (i >= 0) {
1745 		PVO_PTEGIDX_SET(pvo, i);
1746 	} else {
1747 		panic("pmap_pvo_enter: overflow");
1748 		pmap_pte_overflow++;
1749 	}
1750 
1751 	return (first ? ENOENT : 0);
1752 }
1753 
1754 static void
1755 pmap_pvo_remove(struct pvo_entry *pvo, int pteidx)
1756 {
1757 	struct	pte *pt;
1758 
1759 	/*
1760 	 * If there is an active pte entry, we need to deactivate it (and
1761 	 * save the ref & cfg bits).
1762 	 */
1763 	pt = pmap_pvo_to_pte(pvo, pteidx);
1764 	if (pt != NULL) {
1765 		pmap_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr);
1766 		PVO_PTEGIDX_CLR(pvo);
1767 	} else {
1768 		pmap_pte_overflow--;
1769 	}
1770 
1771 	/*
1772 	 * Update our statistics.
1773 	 */
1774 	pvo->pvo_pmap->pm_stats.resident_count--;
1775 	if (pvo->pvo_pte.pte_lo & PVO_WIRED)
1776 		pvo->pvo_pmap->pm_stats.wired_count--;
1777 
1778 	/*
1779 	 * Save the REF/CHG bits into their cache if the page is managed.
1780 	 */
1781 	if (pvo->pvo_vaddr & PVO_MANAGED) {
1782 		struct	vm_page *pg;
1783 
1784 		pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.pte_lo & PTE_RPGN);
1785 		if (pg != NULL) {
1786 			pmap_attr_save(pg, pvo->pvo_pte.pte_lo &
1787 			    (PTE_REF | PTE_CHG));
1788 		}
1789 	}
1790 
1791 	/*
1792 	 * Remove this PVO from the PV list.
1793 	 */
1794 	LIST_REMOVE(pvo, pvo_vlink);
1795 
1796 	/*
1797 	 * Remove this from the overflow list and return it to the pool
1798 	 * if we aren't going to reuse it.
1799 	 */
1800 	LIST_REMOVE(pvo, pvo_olink);
1801 	if (!(pvo->pvo_vaddr & PVO_BOOTSTRAP))
1802 		uma_zfree(pvo->pvo_vaddr & PVO_MANAGED ? pmap_mpvo_zone :
1803 		    pmap_upvo_zone, pvo);
1804 	pmap_pvo_entries--;
1805 	pmap_pvo_remove_calls++;
1806 }
1807 
1808 static __inline int
1809 pmap_pvo_pte_index(const struct pvo_entry *pvo, int ptegidx)
1810 {
1811 	int	pteidx;
1812 
1813 	/*
1814 	 * We can find the actual pte entry without searching by grabbing
1815 	 * the PTEG index from 3 unused bits in pte_lo[11:9] and by
1816 	 * noticing the HID bit.
1817 	 */
1818 	pteidx = ptegidx * 8 + PVO_PTEGIDX_GET(pvo);
1819 	if (pvo->pvo_pte.pte_hi & PTE_HID)
1820 		pteidx ^= pmap_pteg_mask * 8;
1821 
1822 	return (pteidx);
1823 }
1824 
1825 static struct pvo_entry *
1826 pmap_pvo_find_va(pmap_t pm, vm_offset_t va, int *pteidx_p)
1827 {
1828 	struct	pvo_entry *pvo;
1829 	int	ptegidx;
1830 	u_int	sr;
1831 
1832 	va &= ~ADDR_POFF;
1833 	sr = va_to_sr(pm->pm_sr, va);
1834 	ptegidx = va_to_pteg(sr, va);
1835 
1836 	LIST_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) {
1837 		if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) {
1838 			if (pteidx_p)
1839 				*pteidx_p = pmap_pvo_pte_index(pvo, ptegidx);
1840 			return (pvo);
1841 		}
1842 	}
1843 
1844 	return (NULL);
1845 }
1846 
1847 static struct pte *
1848 pmap_pvo_to_pte(const struct pvo_entry *pvo, int pteidx)
1849 {
1850 	struct	pte *pt;
1851 
1852 	/*
1853 	 * If we haven't been supplied the ptegidx, calculate it.
1854 	 */
1855 	if (pteidx == -1) {
1856 		int	ptegidx;
1857 		u_int	sr;
1858 
1859 		sr = va_to_sr(pvo->pvo_pmap->pm_sr, pvo->pvo_vaddr);
1860 		ptegidx = va_to_pteg(sr, pvo->pvo_vaddr);
1861 		pteidx = pmap_pvo_pte_index(pvo, ptegidx);
1862 	}
1863 
1864 	pt = &pmap_pteg_table[pteidx >> 3].pt[pteidx & 7];
1865 
1866 	if ((pvo->pvo_pte.pte_hi & PTE_VALID) && !PVO_PTEGIDX_ISSET(pvo)) {
1867 		panic("pmap_pvo_to_pte: pvo %p has valid pte in pvo but no "
1868 		    "valid pte index", pvo);
1869 	}
1870 
1871 	if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0 && PVO_PTEGIDX_ISSET(pvo)) {
1872 		panic("pmap_pvo_to_pte: pvo %p has valid pte index in pvo "
1873 		    "pvo but no valid pte", pvo);
1874 	}
1875 
1876 	if ((pt->pte_hi ^ (pvo->pvo_pte.pte_hi & ~PTE_VALID)) == PTE_VALID) {
1877 		if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0) {
1878 			panic("pmap_pvo_to_pte: pvo %p has valid pte in "
1879 			    "pmap_pteg_table %p but invalid in pvo", pvo, pt);
1880 		}
1881 
1882 		if (((pt->pte_lo ^ pvo->pvo_pte.pte_lo) & ~(PTE_CHG|PTE_REF))
1883 		    != 0) {
1884 			panic("pmap_pvo_to_pte: pvo %p pte does not match "
1885 			    "pte %p in pmap_pteg_table", pvo, pt);
1886 		}
1887 
1888 		return (pt);
1889 	}
1890 
1891 	if (pvo->pvo_pte.pte_hi & PTE_VALID) {
1892 		panic("pmap_pvo_to_pte: pvo %p has invalid pte %p in "
1893 		    "pmap_pteg_table but valid in pvo", pvo, pt);
1894 	}
1895 
1896 	return (NULL);
1897 }
1898 
1899 static void *
1900 pmap_pvo_allocf(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
1901 {
1902 	vm_page_t	m;
1903 
1904 	if (bytes != PAGE_SIZE)
1905 		panic("pmap_pvo_allocf: benno was shortsighted.  hit him.");
1906 
1907 	*flags = UMA_SLAB_PRIV;
1908 	m = vm_page_alloc(pmap_pvo_obj, pmap_pvo_count, VM_ALLOC_SYSTEM);
1909 	if (m == NULL)
1910 		return (NULL);
1911 	pmap_pvo_count++;
1912 	return ((void *)VM_PAGE_TO_PHYS(m));
1913 }
1914 
1915 /*
1916  * XXX: THIS STUFF SHOULD BE IN pte.c?
1917  */
1918 int
1919 pmap_pte_spill(vm_offset_t addr)
1920 {
1921 	struct	pvo_entry *source_pvo, *victim_pvo;
1922 	struct	pvo_entry *pvo;
1923 	int	ptegidx, i, j;
1924 	u_int	sr;
1925 	struct	pteg *pteg;
1926 	struct	pte *pt;
1927 
1928 	pmap_pte_spills++;
1929 
1930 	sr = mfsrin(addr);
1931 	ptegidx = va_to_pteg(sr, addr);
1932 
1933 	/*
1934 	 * Have to substitute some entry.  Use the primary hash for this.
1935 	 * Use low bits of timebase as random generator.
1936 	 */
1937 	pteg = &pmap_pteg_table[ptegidx];
1938 	__asm __volatile("mftb %0" : "=r"(i));
1939 	i &= 7;
1940 	pt = &pteg->pt[i];
1941 
1942 	source_pvo = NULL;
1943 	victim_pvo = NULL;
1944 	LIST_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) {
1945 		/*
1946 		 * We need to find a pvo entry for this address.
1947 		 */
1948 		PMAP_PVO_CHECK(pvo);
1949 		if (source_pvo == NULL &&
1950 		    pmap_pte_match(&pvo->pvo_pte, sr, addr,
1951 		    pvo->pvo_pte.pte_hi & PTE_HID)) {
1952 			/*
1953 			 * Now found an entry to be spilled into the pteg.
1954 			 * The PTE is now valid, so we know it's active.
1955 			 */
1956 			j = pmap_pte_insert(ptegidx, &pvo->pvo_pte);
1957 
1958 			if (j >= 0) {
1959 				PVO_PTEGIDX_SET(pvo, j);
1960 				pmap_pte_overflow--;
1961 				PMAP_PVO_CHECK(pvo);
1962 				return (1);
1963 			}
1964 
1965 			source_pvo = pvo;
1966 
1967 			if (victim_pvo != NULL)
1968 				break;
1969 		}
1970 
1971 		/*
1972 		 * We also need the pvo entry of the victim we are replacing
1973 		 * so save the R & C bits of the PTE.
1974 		 */
1975 		if ((pt->pte_hi & PTE_HID) == 0 && victim_pvo == NULL &&
1976 		    pmap_pte_compare(pt, &pvo->pvo_pte)) {
1977 			victim_pvo = pvo;
1978 			if (source_pvo != NULL)
1979 				break;
1980 		}
1981 	}
1982 
1983 	if (source_pvo == NULL)
1984 		return (0);
1985 
1986 	if (victim_pvo == NULL) {
1987 		if ((pt->pte_hi & PTE_HID) == 0)
1988 			panic("pmap_pte_spill: victim p-pte (%p) has no pvo"
1989 			    "entry", pt);
1990 
1991 		/*
1992 		 * If this is a secondary PTE, we need to search it's primary
1993 		 * pvo bucket for the matching PVO.
1994 		 */
1995 		LIST_FOREACH(pvo, &pmap_pvo_table[ptegidx ^ pmap_pteg_mask],
1996 		    pvo_olink) {
1997 			PMAP_PVO_CHECK(pvo);
1998 			/*
1999 			 * We also need the pvo entry of the victim we are
2000 			 * replacing so save the R & C bits of the PTE.
2001 			 */
2002 			if (pmap_pte_compare(pt, &pvo->pvo_pte)) {
2003 				victim_pvo = pvo;
2004 				break;
2005 			}
2006 		}
2007 
2008 		if (victim_pvo == NULL)
2009 			panic("pmap_pte_spill: victim s-pte (%p) has no pvo"
2010 			    "entry", pt);
2011 	}
2012 
2013 	/*
2014 	 * We are invalidating the TLB entry for the EA we are replacing even
2015 	 * though it's valid.  If we don't, we lose any ref/chg bit changes
2016 	 * contained in the TLB entry.
2017 	 */
2018 	source_pvo->pvo_pte.pte_hi &= ~PTE_HID;
2019 
2020 	pmap_pte_unset(pt, &victim_pvo->pvo_pte, victim_pvo->pvo_vaddr);
2021 	pmap_pte_set(pt, &source_pvo->pvo_pte);
2022 
2023 	PVO_PTEGIDX_CLR(victim_pvo);
2024 	PVO_PTEGIDX_SET(source_pvo, i);
2025 	pmap_pte_replacements++;
2026 
2027 	PMAP_PVO_CHECK(victim_pvo);
2028 	PMAP_PVO_CHECK(source_pvo);
2029 
2030 	return (1);
2031 }
2032 
2033 static int
2034 pmap_pte_insert(u_int ptegidx, struct pte *pvo_pt)
2035 {
2036 	struct	pte *pt;
2037 	int	i;
2038 
2039 	/*
2040 	 * First try primary hash.
2041 	 */
2042 	for (pt = pmap_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) {
2043 		if ((pt->pte_hi & PTE_VALID) == 0) {
2044 			pvo_pt->pte_hi &= ~PTE_HID;
2045 			pmap_pte_set(pt, pvo_pt);
2046 			return (i);
2047 		}
2048 	}
2049 
2050 	/*
2051 	 * Now try secondary hash.
2052 	 */
2053 	ptegidx ^= pmap_pteg_mask;
2054 	ptegidx++;
2055 	for (pt = pmap_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) {
2056 		if ((pt->pte_hi & PTE_VALID) == 0) {
2057 			pvo_pt->pte_hi |= PTE_HID;
2058 			pmap_pte_set(pt, pvo_pt);
2059 			return (i);
2060 		}
2061 	}
2062 
2063 	panic("pmap_pte_insert: overflow");
2064 	return (-1);
2065 }
2066 
2067 static boolean_t
2068 pmap_query_bit(vm_page_t m, int ptebit)
2069 {
2070 	struct	pvo_entry *pvo;
2071 	struct	pte *pt;
2072 
2073 	if (pmap_attr_fetch(m) & ptebit)
2074 		return (TRUE);
2075 
2076 	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
2077 		PMAP_PVO_CHECK(pvo);	/* sanity check */
2078 
2079 		/*
2080 		 * See if we saved the bit off.  If so, cache it and return
2081 		 * success.
2082 		 */
2083 		if (pvo->pvo_pte.pte_lo & ptebit) {
2084 			pmap_attr_save(m, ptebit);
2085 			PMAP_PVO_CHECK(pvo);	/* sanity check */
2086 			return (TRUE);
2087 		}
2088 	}
2089 
2090 	/*
2091 	 * No luck, now go through the hard part of looking at the PTEs
2092 	 * themselves.  Sync so that any pending REF/CHG bits are flushed to
2093 	 * the PTEs.
2094 	 */
2095 	SYNC();
2096 	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
2097 		PMAP_PVO_CHECK(pvo);	/* sanity check */
2098 
2099 		/*
2100 		 * See if this pvo has a valid PTE.  if so, fetch the
2101 		 * REF/CHG bits from the valid PTE.  If the appropriate
2102 		 * ptebit is set, cache it and return success.
2103 		 */
2104 		pt = pmap_pvo_to_pte(pvo, -1);
2105 		if (pt != NULL) {
2106 			pmap_pte_synch(pt, &pvo->pvo_pte);
2107 			if (pvo->pvo_pte.pte_lo & ptebit) {
2108 				pmap_attr_save(m, ptebit);
2109 				PMAP_PVO_CHECK(pvo);	/* sanity check */
2110 				return (TRUE);
2111 			}
2112 		}
2113 	}
2114 
2115 	return (TRUE);
2116 }
2117 
2118 static boolean_t
2119 pmap_clear_bit(vm_page_t m, int ptebit)
2120 {
2121 	struct	pvo_entry *pvo;
2122 	struct	pte *pt;
2123 	int	rv;
2124 
2125 	/*
2126 	 * Clear the cached value.
2127 	 */
2128 	rv = pmap_attr_fetch(m);
2129 	pmap_attr_clear(m, ptebit);
2130 
2131 	/*
2132 	 * Sync so that any pending REF/CHG bits are flushed to the PTEs (so
2133 	 * we can reset the right ones).  note that since the pvo entries and
2134 	 * list heads are accessed via BAT0 and are never placed in the page
2135 	 * table, we don't have to worry about further accesses setting the
2136 	 * REF/CHG bits.
2137 	 */
2138 	SYNC();
2139 
2140 	/*
2141 	 * For each pvo entry, clear the pvo's ptebit.  If this pvo has a
2142 	 * valid pte clear the ptebit from the valid pte.
2143 	 */
2144 	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
2145 		PMAP_PVO_CHECK(pvo);	/* sanity check */
2146 		pt = pmap_pvo_to_pte(pvo, -1);
2147 		if (pt != NULL) {
2148 			pmap_pte_synch(pt, &pvo->pvo_pte);
2149 			if (pvo->pvo_pte.pte_lo & ptebit)
2150 				pmap_pte_clear(pt, PVO_VADDR(pvo), ptebit);
2151 		}
2152 		rv |= pvo->pvo_pte.pte_lo;
2153 		pvo->pvo_pte.pte_lo &= ~ptebit;
2154 		PMAP_PVO_CHECK(pvo);	/* sanity check */
2155 	}
2156 
2157 	return ((rv & ptebit) != 0);
2158 }
2159