xref: /freebsd/sys/powerpc/aim/mmu_oea.c (revision 8bbfa33a7969d98ee18609c767c5637f6c845c41)
1f9bac91bSBenno Rice /*
25244eac9SBenno Rice  * Copyright (c) 2001 The NetBSD Foundation, Inc.
35244eac9SBenno Rice  * All rights reserved.
45244eac9SBenno Rice  *
55244eac9SBenno Rice  * This code is derived from software contributed to The NetBSD Foundation
65244eac9SBenno Rice  * by Matt Thomas <matt@3am-software.com> of Allegro Networks, Inc.
75244eac9SBenno Rice  *
85244eac9SBenno Rice  * Redistribution and use in source and binary forms, with or without
95244eac9SBenno Rice  * modification, are permitted provided that the following conditions
105244eac9SBenno Rice  * are met:
115244eac9SBenno Rice  * 1. Redistributions of source code must retain the above copyright
125244eac9SBenno Rice  *    notice, this list of conditions and the following disclaimer.
135244eac9SBenno Rice  * 2. Redistributions in binary form must reproduce the above copyright
145244eac9SBenno Rice  *    notice, this list of conditions and the following disclaimer in the
155244eac9SBenno Rice  *    documentation and/or other materials provided with the distribution.
165244eac9SBenno Rice  * 3. All advertising materials mentioning features or use of this software
175244eac9SBenno Rice  *    must display the following acknowledgement:
185244eac9SBenno Rice  *        This product includes software developed by the NetBSD
195244eac9SBenno Rice  *        Foundation, Inc. and its contributors.
205244eac9SBenno Rice  * 4. Neither the name of The NetBSD Foundation nor the names of its
215244eac9SBenno Rice  *    contributors may be used to endorse or promote products derived
225244eac9SBenno Rice  *    from this software without specific prior written permission.
235244eac9SBenno Rice  *
245244eac9SBenno Rice  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
255244eac9SBenno Rice  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
265244eac9SBenno Rice  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
275244eac9SBenno Rice  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
285244eac9SBenno Rice  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
295244eac9SBenno Rice  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
305244eac9SBenno Rice  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
315244eac9SBenno Rice  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
325244eac9SBenno Rice  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
335244eac9SBenno Rice  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
345244eac9SBenno Rice  * POSSIBILITY OF SUCH DAMAGE.
355244eac9SBenno Rice  */
365244eac9SBenno Rice /*
37f9bac91bSBenno Rice  * Copyright (C) 1995, 1996 Wolfgang Solfrank.
38f9bac91bSBenno Rice  * Copyright (C) 1995, 1996 TooLs GmbH.
39f9bac91bSBenno Rice  * All rights reserved.
40f9bac91bSBenno Rice  *
41f9bac91bSBenno Rice  * Redistribution and use in source and binary forms, with or without
42f9bac91bSBenno Rice  * modification, are permitted provided that the following conditions
43f9bac91bSBenno Rice  * are met:
44f9bac91bSBenno Rice  * 1. Redistributions of source code must retain the above copyright
45f9bac91bSBenno Rice  *    notice, this list of conditions and the following disclaimer.
46f9bac91bSBenno Rice  * 2. Redistributions in binary form must reproduce the above copyright
47f9bac91bSBenno Rice  *    notice, this list of conditions and the following disclaimer in the
48f9bac91bSBenno Rice  *    documentation and/or other materials provided with the distribution.
49f9bac91bSBenno Rice  * 3. All advertising materials mentioning features or use of this software
50f9bac91bSBenno Rice  *    must display the following acknowledgement:
51f9bac91bSBenno Rice  *	This product includes software developed by TooLs GmbH.
52f9bac91bSBenno Rice  * 4. The name of TooLs GmbH may not be used to endorse or promote products
53f9bac91bSBenno Rice  *    derived from this software without specific prior written permission.
54f9bac91bSBenno Rice  *
55f9bac91bSBenno Rice  * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
56f9bac91bSBenno Rice  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
57f9bac91bSBenno Rice  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
58f9bac91bSBenno Rice  * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
59f9bac91bSBenno Rice  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
60f9bac91bSBenno Rice  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
61f9bac91bSBenno Rice  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
62f9bac91bSBenno Rice  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
63f9bac91bSBenno Rice  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
64f9bac91bSBenno Rice  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
65f9bac91bSBenno Rice  *
66111c77dcSBenno Rice  * $NetBSD: pmap.c,v 1.28 2000/03/26 20:42:36 kleink Exp $
67f9bac91bSBenno Rice  */
68f9bac91bSBenno Rice /*
69f9bac91bSBenno Rice  * Copyright (C) 2001 Benno Rice.
70f9bac91bSBenno Rice  * All rights reserved.
71f9bac91bSBenno Rice  *
72f9bac91bSBenno Rice  * Redistribution and use in source and binary forms, with or without
73f9bac91bSBenno Rice  * modification, are permitted provided that the following conditions
74f9bac91bSBenno Rice  * are met:
75f9bac91bSBenno Rice  * 1. Redistributions of source code must retain the above copyright
76f9bac91bSBenno Rice  *    notice, this list of conditions and the following disclaimer.
77f9bac91bSBenno Rice  * 2. Redistributions in binary form must reproduce the above copyright
78f9bac91bSBenno Rice  *    notice, this list of conditions and the following disclaimer in the
79f9bac91bSBenno Rice  *    documentation and/or other materials provided with the distribution.
80f9bac91bSBenno Rice  *
81f9bac91bSBenno Rice  * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR
82f9bac91bSBenno Rice  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
83f9bac91bSBenno Rice  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
84f9bac91bSBenno Rice  * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
85f9bac91bSBenno Rice  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
86f9bac91bSBenno Rice  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
87f9bac91bSBenno Rice  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
88f9bac91bSBenno Rice  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
89f9bac91bSBenno Rice  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
90f9bac91bSBenno Rice  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
91f9bac91bSBenno Rice  */
92f9bac91bSBenno Rice 
93f9bac91bSBenno Rice #ifndef lint
94f9bac91bSBenno Rice static const char rcsid[] =
95f9bac91bSBenno Rice   "$FreeBSD$";
96f9bac91bSBenno Rice #endif /* not lint */
97f9bac91bSBenno Rice 
985244eac9SBenno Rice /*
995244eac9SBenno Rice  * Manages physical address maps.
1005244eac9SBenno Rice  *
1015244eac9SBenno Rice  * In addition to hardware address maps, this module is called upon to
1025244eac9SBenno Rice  * provide software-use-only maps which may or may not be stored in the
1035244eac9SBenno Rice  * same form as hardware maps.  These pseudo-maps are used to store
1045244eac9SBenno Rice  * intermediate results from copy operations to and from address spaces.
1055244eac9SBenno Rice  *
1065244eac9SBenno Rice  * Since the information managed by this module is also stored by the
1075244eac9SBenno Rice  * logical address mapping module, this module may throw away valid virtual
1085244eac9SBenno Rice  * to physical mappings at almost any time.  However, invalidations of
1095244eac9SBenno Rice  * mappings must be done as requested.
1105244eac9SBenno Rice  *
1115244eac9SBenno Rice  * In order to cope with hardware architectures which make virtual to
1125244eac9SBenno Rice  * physical map invalidates expensive, this module may delay invalidate
1135244eac9SBenno Rice  * reduced protection operations until such time as they are actually
1145244eac9SBenno Rice  * necessary.  This module is given full information as to which processors
1155244eac9SBenno Rice  * are currently using which maps, and to when physical maps must be made
1165244eac9SBenno Rice  * correct.
1175244eac9SBenno Rice  */
1185244eac9SBenno Rice 
119f9bac91bSBenno Rice #include <sys/param.h>
1200b27d710SPeter Wemm #include <sys/kernel.h>
1215244eac9SBenno Rice #include <sys/ktr.h>
12294e0b85eSMark Peek #include <sys/lock.h>
1235244eac9SBenno Rice #include <sys/msgbuf.h>
124f9bac91bSBenno Rice #include <sys/mutex.h>
1255244eac9SBenno Rice #include <sys/proc.h>
1265244eac9SBenno Rice #include <sys/sysctl.h>
1275244eac9SBenno Rice #include <sys/systm.h>
1285244eac9SBenno Rice #include <sys/vmmeter.h>
1295244eac9SBenno Rice 
1305244eac9SBenno Rice #include <dev/ofw/openfirm.h>
131f9bac91bSBenno Rice 
132f9bac91bSBenno Rice #include <vm/vm.h>
133f9bac91bSBenno Rice #include <vm/vm_param.h>
134f9bac91bSBenno Rice #include <vm/vm_kern.h>
135f9bac91bSBenno Rice #include <vm/vm_page.h>
136f9bac91bSBenno Rice #include <vm/vm_map.h>
137f9bac91bSBenno Rice #include <vm/vm_object.h>
138f9bac91bSBenno Rice #include <vm/vm_extern.h>
139f9bac91bSBenno Rice #include <vm/vm_pageout.h>
140f9bac91bSBenno Rice #include <vm/vm_pager.h>
141378862a7SJeff Roberson #include <vm/uma.h>
142f9bac91bSBenno Rice 
14331c82d03SBenno Rice #include <machine/powerpc.h>
144d699b539SMark Peek #include <machine/bat.h>
1455244eac9SBenno Rice #include <machine/frame.h>
1465244eac9SBenno Rice #include <machine/md_var.h>
1475244eac9SBenno Rice #include <machine/psl.h>
148f9bac91bSBenno Rice #include <machine/pte.h>
1495244eac9SBenno Rice #include <machine/sr.h>
150f9bac91bSBenno Rice 
1515244eac9SBenno Rice #define	PMAP_DEBUG
152f9bac91bSBenno Rice 
1535244eac9SBenno Rice #define TODO	panic("%s: not implemented", __func__);
154f9bac91bSBenno Rice 
1555244eac9SBenno Rice #define	PMAP_LOCK(pm)
1565244eac9SBenno Rice #define	PMAP_UNLOCK(pm)
1575244eac9SBenno Rice 
1585244eac9SBenno Rice #define	TLBIE(va)	__asm __volatile("tlbie %0" :: "r"(va))
1595244eac9SBenno Rice #define	TLBSYNC()	__asm __volatile("tlbsync");
1605244eac9SBenno Rice #define	SYNC()		__asm __volatile("sync");
1615244eac9SBenno Rice #define	EIEIO()		__asm __volatile("eieio");
1625244eac9SBenno Rice 
1635244eac9SBenno Rice #define	VSID_MAKE(sr, hash)	((sr) | (((hash) & 0xfffff) << 4))
1645244eac9SBenno Rice #define	VSID_TO_SR(vsid)	((vsid) & 0xf)
1655244eac9SBenno Rice #define	VSID_TO_HASH(vsid)	(((vsid) >> 4) & 0xfffff)
1665244eac9SBenno Rice 
1675244eac9SBenno Rice #define	PVO_PTEGIDX_MASK	0x0007		/* which PTEG slot */
1685244eac9SBenno Rice #define	PVO_PTEGIDX_VALID	0x0008		/* slot is valid */
1695244eac9SBenno Rice #define	PVO_WIRED		0x0010		/* PVO entry is wired */
1705244eac9SBenno Rice #define	PVO_MANAGED		0x0020		/* PVO entry is managed */
1715244eac9SBenno Rice #define	PVO_EXECUTABLE		0x0040		/* PVO entry is executable */
172a8aaf02cSBenno Rice #define	PVO_BOOTSTRAP		0x0080		/* PVO entry allocated during
17349f8f727SBenno Rice 						   bootstrap */
1745244eac9SBenno Rice #define	PVO_VADDR(pvo)		((pvo)->pvo_vaddr & ~ADDR_POFF)
1755244eac9SBenno Rice #define	PVO_ISEXECUTABLE(pvo)	((pvo)->pvo_vaddr & PVO_EXECUTABLE)
1765244eac9SBenno Rice #define	PVO_PTEGIDX_GET(pvo)	((pvo)->pvo_vaddr & PVO_PTEGIDX_MASK)
1775244eac9SBenno Rice #define	PVO_PTEGIDX_ISSET(pvo)	((pvo)->pvo_vaddr & PVO_PTEGIDX_VALID)
1785244eac9SBenno Rice #define	PVO_PTEGIDX_CLR(pvo)	\
1795244eac9SBenno Rice 	((void)((pvo)->pvo_vaddr &= ~(PVO_PTEGIDX_VALID|PVO_PTEGIDX_MASK)))
1805244eac9SBenno Rice #define	PVO_PTEGIDX_SET(pvo, i)	\
1815244eac9SBenno Rice 	((void)((pvo)->pvo_vaddr |= (i)|PVO_PTEGIDX_VALID))
1825244eac9SBenno Rice 
1835244eac9SBenno Rice #define	PMAP_PVO_CHECK(pvo)
1845244eac9SBenno Rice 
1855244eac9SBenno Rice struct ofw_map {
1865244eac9SBenno Rice 	vm_offset_t	om_va;
1875244eac9SBenno Rice 	vm_size_t	om_len;
1885244eac9SBenno Rice 	vm_offset_t	om_pa;
1895244eac9SBenno Rice 	u_int		om_mode;
1905244eac9SBenno Rice };
191f9bac91bSBenno Rice 
1925244eac9SBenno Rice int	pmap_bootstrapped = 0;
193f9bac91bSBenno Rice 
1945244eac9SBenno Rice /*
1955244eac9SBenno Rice  * Virtual and physical address of message buffer.
1965244eac9SBenno Rice  */
1975244eac9SBenno Rice struct		msgbuf *msgbufp;
1985244eac9SBenno Rice vm_offset_t	msgbuf_phys;
199f9bac91bSBenno Rice 
2005244eac9SBenno Rice /*
2015244eac9SBenno Rice  * Physical addresses of first and last available physical page.
2025244eac9SBenno Rice  */
203f9bac91bSBenno Rice vm_offset_t avail_start;
204f9bac91bSBenno Rice vm_offset_t avail_end;
2055244eac9SBenno Rice 
2065244eac9SBenno Rice /*
2075244eac9SBenno Rice  * Map of physical memory regions.
2085244eac9SBenno Rice  */
2095244eac9SBenno Rice vm_offset_t	phys_avail[128];
2105244eac9SBenno Rice u_int		phys_avail_count;
21131c82d03SBenno Rice static struct	mem_region *regions;
21231c82d03SBenno Rice static struct	mem_region *pregions;
21331c82d03SBenno Rice int		regions_sz, pregions_sz;
2145244eac9SBenno Rice static struct	ofw_map translations[128];
2155244eac9SBenno Rice static int	translations_size;
2165244eac9SBenno Rice 
2175244eac9SBenno Rice /*
2185244eac9SBenno Rice  * First and last available kernel virtual addresses.
2195244eac9SBenno Rice  */
220f9bac91bSBenno Rice vm_offset_t virtual_avail;
221f9bac91bSBenno Rice vm_offset_t virtual_end;
222f9bac91bSBenno Rice vm_offset_t kernel_vm_end;
223f9bac91bSBenno Rice 
2245244eac9SBenno Rice /*
2255244eac9SBenno Rice  * Kernel pmap.
2265244eac9SBenno Rice  */
2275244eac9SBenno Rice struct pmap kernel_pmap_store;
2285244eac9SBenno Rice extern struct pmap ofw_pmap;
229f9bac91bSBenno Rice 
230f9bac91bSBenno Rice /*
2315244eac9SBenno Rice  * PTEG data.
232f9bac91bSBenno Rice  */
2335244eac9SBenno Rice static struct	pteg *pmap_pteg_table;
2345244eac9SBenno Rice u_int		pmap_pteg_count;
2355244eac9SBenno Rice u_int		pmap_pteg_mask;
2365244eac9SBenno Rice 
2375244eac9SBenno Rice /*
2385244eac9SBenno Rice  * PVO data.
2395244eac9SBenno Rice  */
2405244eac9SBenno Rice struct	pvo_head *pmap_pvo_table;		/* pvo entries by pteg index */
2415244eac9SBenno Rice struct	pvo_head pmap_pvo_kunmanaged =
2425244eac9SBenno Rice     LIST_HEAD_INITIALIZER(pmap_pvo_kunmanaged);	/* list of unmanaged pages */
2435244eac9SBenno Rice struct	pvo_head pmap_pvo_unmanaged =
2445244eac9SBenno Rice     LIST_HEAD_INITIALIZER(pmap_pvo_unmanaged);	/* list of unmanaged pages */
2455244eac9SBenno Rice 
246378862a7SJeff Roberson uma_zone_t	pmap_upvo_zone;	/* zone for pvo entries for unmanaged pages */
247378862a7SJeff Roberson uma_zone_t	pmap_mpvo_zone;	/* zone for pvo entries for managed pages */
2485244eac9SBenno Rice struct		vm_object pmap_upvo_zone_obj;
2495244eac9SBenno Rice struct		vm_object pmap_mpvo_zone_obj;
2508355f576SJeff Roberson static vm_object_t	pmap_pvo_obj;
2518355f576SJeff Roberson static u_int		pmap_pvo_count;
2525244eac9SBenno Rice 
2530d290675SBenno Rice #define	BPVO_POOL_SIZE	32768
25449f8f727SBenno Rice static struct	pvo_entry *pmap_bpvo_pool;
2550d290675SBenno Rice static int	pmap_bpvo_pool_index = 0;
2565244eac9SBenno Rice 
2575244eac9SBenno Rice #define	VSID_NBPW	(sizeof(u_int32_t) * 8)
2585244eac9SBenno Rice static u_int	pmap_vsid_bitmap[NPMAPS / VSID_NBPW];
2595244eac9SBenno Rice 
2605244eac9SBenno Rice static boolean_t pmap_initialized = FALSE;
2615244eac9SBenno Rice 
2625244eac9SBenno Rice /*
2635244eac9SBenno Rice  * Statistics.
2645244eac9SBenno Rice  */
2655244eac9SBenno Rice u_int	pmap_pte_valid = 0;
2665244eac9SBenno Rice u_int	pmap_pte_overflow = 0;
2675244eac9SBenno Rice u_int	pmap_pte_replacements = 0;
2685244eac9SBenno Rice u_int	pmap_pvo_entries = 0;
2695244eac9SBenno Rice u_int	pmap_pvo_enter_calls = 0;
2705244eac9SBenno Rice u_int	pmap_pvo_remove_calls = 0;
2715244eac9SBenno Rice u_int	pmap_pte_spills = 0;
2725244eac9SBenno Rice SYSCTL_INT(_machdep, OID_AUTO, pmap_pte_valid, CTLFLAG_RD, &pmap_pte_valid,
2735244eac9SBenno Rice     0, "");
2745244eac9SBenno Rice SYSCTL_INT(_machdep, OID_AUTO, pmap_pte_overflow, CTLFLAG_RD,
2755244eac9SBenno Rice     &pmap_pte_overflow, 0, "");
2765244eac9SBenno Rice SYSCTL_INT(_machdep, OID_AUTO, pmap_pte_replacements, CTLFLAG_RD,
2775244eac9SBenno Rice     &pmap_pte_replacements, 0, "");
2785244eac9SBenno Rice SYSCTL_INT(_machdep, OID_AUTO, pmap_pvo_entries, CTLFLAG_RD, &pmap_pvo_entries,
2795244eac9SBenno Rice     0, "");
2805244eac9SBenno Rice SYSCTL_INT(_machdep, OID_AUTO, pmap_pvo_enter_calls, CTLFLAG_RD,
2815244eac9SBenno Rice     &pmap_pvo_enter_calls, 0, "");
2825244eac9SBenno Rice SYSCTL_INT(_machdep, OID_AUTO, pmap_pvo_remove_calls, CTLFLAG_RD,
2835244eac9SBenno Rice     &pmap_pvo_remove_calls, 0, "");
2845244eac9SBenno Rice SYSCTL_INT(_machdep, OID_AUTO, pmap_pte_spills, CTLFLAG_RD,
2855244eac9SBenno Rice     &pmap_pte_spills, 0, "");
2865244eac9SBenno Rice 
2875244eac9SBenno Rice struct	pvo_entry *pmap_pvo_zeropage;
2885244eac9SBenno Rice 
2895244eac9SBenno Rice vm_offset_t	pmap_rkva_start = VM_MIN_KERNEL_ADDRESS;
2905244eac9SBenno Rice u_int		pmap_rkva_count = 4;
2915244eac9SBenno Rice 
2925244eac9SBenno Rice /*
2935244eac9SBenno Rice  * Allocate physical memory for use in pmap_bootstrap.
2945244eac9SBenno Rice  */
2955244eac9SBenno Rice static vm_offset_t	pmap_bootstrap_alloc(vm_size_t, u_int);
2965244eac9SBenno Rice 
2975244eac9SBenno Rice /*
2985244eac9SBenno Rice  * PTE calls.
2995244eac9SBenno Rice  */
3005244eac9SBenno Rice static int		pmap_pte_insert(u_int, struct pte *);
3015244eac9SBenno Rice 
3025244eac9SBenno Rice /*
3035244eac9SBenno Rice  * PVO calls.
3045244eac9SBenno Rice  */
305378862a7SJeff Roberson static int	pmap_pvo_enter(pmap_t, uma_zone_t, struct pvo_head *,
3065244eac9SBenno Rice 		    vm_offset_t, vm_offset_t, u_int, int);
3075244eac9SBenno Rice static void	pmap_pvo_remove(struct pvo_entry *, int);
3085244eac9SBenno Rice static struct	pvo_entry *pmap_pvo_find_va(pmap_t, vm_offset_t, int *);
3095244eac9SBenno Rice static struct	pte *pmap_pvo_to_pte(const struct pvo_entry *, int);
3105244eac9SBenno Rice 
3115244eac9SBenno Rice /*
3125244eac9SBenno Rice  * Utility routines.
3135244eac9SBenno Rice  */
3148355f576SJeff Roberson static void *		pmap_pvo_allocf(uma_zone_t, int, u_int8_t *, int);
3155244eac9SBenno Rice static struct		pvo_entry *pmap_rkva_alloc(void);
3165244eac9SBenno Rice static void		pmap_pa_map(struct pvo_entry *, vm_offset_t,
3175244eac9SBenno Rice 			    struct pte *, int *);
3185244eac9SBenno Rice static void		pmap_pa_unmap(struct pvo_entry *, struct pte *, int *);
3195244eac9SBenno Rice static void		pmap_syncicache(vm_offset_t, vm_size_t);
3205244eac9SBenno Rice static boolean_t	pmap_query_bit(vm_page_t, int);
3215244eac9SBenno Rice static boolean_t	pmap_clear_bit(vm_page_t, int);
3225244eac9SBenno Rice static void		tlbia(void);
3235244eac9SBenno Rice 
3245244eac9SBenno Rice static __inline int
3255244eac9SBenno Rice va_to_sr(u_int *sr, vm_offset_t va)
3265244eac9SBenno Rice {
3275244eac9SBenno Rice 	return (sr[(uintptr_t)va >> ADDR_SR_SHFT]);
3285244eac9SBenno Rice }
3295244eac9SBenno Rice 
3305244eac9SBenno Rice static __inline u_int
3315244eac9SBenno Rice va_to_pteg(u_int sr, vm_offset_t addr)
3325244eac9SBenno Rice {
3335244eac9SBenno Rice 	u_int hash;
3345244eac9SBenno Rice 
3355244eac9SBenno Rice 	hash = (sr & SR_VSID_MASK) ^ (((u_int)addr & ADDR_PIDX) >>
3365244eac9SBenno Rice 	    ADDR_PIDX_SHFT);
3375244eac9SBenno Rice 	return (hash & pmap_pteg_mask);
3385244eac9SBenno Rice }
3395244eac9SBenno Rice 
3405244eac9SBenno Rice static __inline struct pvo_head *
3418207b362SBenno Rice pa_to_pvoh(vm_offset_t pa, vm_page_t *pg_p)
3425244eac9SBenno Rice {
3435244eac9SBenno Rice 	struct	vm_page *pg;
3445244eac9SBenno Rice 
3455244eac9SBenno Rice 	pg = PHYS_TO_VM_PAGE(pa);
3465244eac9SBenno Rice 
3478207b362SBenno Rice 	if (pg_p != NULL)
3488207b362SBenno Rice 		*pg_p = pg;
3498207b362SBenno Rice 
3505244eac9SBenno Rice 	if (pg == NULL)
3515244eac9SBenno Rice 		return (&pmap_pvo_unmanaged);
3525244eac9SBenno Rice 
3535244eac9SBenno Rice 	return (&pg->md.mdpg_pvoh);
3545244eac9SBenno Rice }
3555244eac9SBenno Rice 
3565244eac9SBenno Rice static __inline struct pvo_head *
3575244eac9SBenno Rice vm_page_to_pvoh(vm_page_t m)
358f9bac91bSBenno Rice {
359f9bac91bSBenno Rice 
3605244eac9SBenno Rice 	return (&m->md.mdpg_pvoh);
361f9bac91bSBenno Rice }
362f9bac91bSBenno Rice 
363f9bac91bSBenno Rice static __inline void
3645244eac9SBenno Rice pmap_attr_clear(vm_page_t m, int ptebit)
365f9bac91bSBenno Rice {
366f9bac91bSBenno Rice 
3675244eac9SBenno Rice 	m->md.mdpg_attrs &= ~ptebit;
3685244eac9SBenno Rice }
3695244eac9SBenno Rice 
3705244eac9SBenno Rice static __inline int
3715244eac9SBenno Rice pmap_attr_fetch(vm_page_t m)
3725244eac9SBenno Rice {
3735244eac9SBenno Rice 
3745244eac9SBenno Rice 	return (m->md.mdpg_attrs);
375f9bac91bSBenno Rice }
376f9bac91bSBenno Rice 
377f9bac91bSBenno Rice static __inline void
3785244eac9SBenno Rice pmap_attr_save(vm_page_t m, int ptebit)
379f9bac91bSBenno Rice {
380f9bac91bSBenno Rice 
3815244eac9SBenno Rice 	m->md.mdpg_attrs |= ptebit;
382f9bac91bSBenno Rice }
383f9bac91bSBenno Rice 
384f9bac91bSBenno Rice static __inline int
3855244eac9SBenno Rice pmap_pte_compare(const struct pte *pt, const struct pte *pvo_pt)
386f9bac91bSBenno Rice {
3875244eac9SBenno Rice 	if (pt->pte_hi == pvo_pt->pte_hi)
3885244eac9SBenno Rice 		return (1);
389f9bac91bSBenno Rice 
3905244eac9SBenno Rice 	return (0);
391f9bac91bSBenno Rice }
392f9bac91bSBenno Rice 
393f9bac91bSBenno Rice static __inline int
3945244eac9SBenno Rice pmap_pte_match(struct pte *pt, u_int sr, vm_offset_t va, int which)
395f9bac91bSBenno Rice {
3965244eac9SBenno Rice 	return (pt->pte_hi & ~PTE_VALID) ==
3975244eac9SBenno Rice 	    (((sr & SR_VSID_MASK) << PTE_VSID_SHFT) |
3985244eac9SBenno Rice 	    ((va >> ADDR_API_SHFT) & PTE_API) | which);
399f9bac91bSBenno Rice }
400f9bac91bSBenno Rice 
4015244eac9SBenno Rice static __inline void
4025244eac9SBenno Rice pmap_pte_create(struct pte *pt, u_int sr, vm_offset_t va, u_int pte_lo)
403f9bac91bSBenno Rice {
404f9bac91bSBenno Rice 	/*
4055244eac9SBenno Rice 	 * Construct a PTE.  Default to IMB initially.  Valid bit only gets
4065244eac9SBenno Rice 	 * set when the real pte is set in memory.
407f9bac91bSBenno Rice 	 *
408f9bac91bSBenno Rice 	 * Note: Don't set the valid bit for correct operation of tlb update.
409f9bac91bSBenno Rice 	 */
4105244eac9SBenno Rice 	pt->pte_hi = ((sr & SR_VSID_MASK) << PTE_VSID_SHFT) |
4115244eac9SBenno Rice 	    (((va & ADDR_PIDX) >> ADDR_API_SHFT) & PTE_API);
4125244eac9SBenno Rice 	pt->pte_lo = pte_lo;
413f9bac91bSBenno Rice }
414f9bac91bSBenno Rice 
4155244eac9SBenno Rice static __inline void
4165244eac9SBenno Rice pmap_pte_synch(struct pte *pt, struct pte *pvo_pt)
417f9bac91bSBenno Rice {
418f9bac91bSBenno Rice 
4195244eac9SBenno Rice 	pvo_pt->pte_lo |= pt->pte_lo & (PTE_REF | PTE_CHG);
420f9bac91bSBenno Rice }
421f9bac91bSBenno Rice 
4225244eac9SBenno Rice static __inline void
4235244eac9SBenno Rice pmap_pte_clear(struct pte *pt, vm_offset_t va, int ptebit)
424f9bac91bSBenno Rice {
4255244eac9SBenno Rice 
4265244eac9SBenno Rice 	/*
4275244eac9SBenno Rice 	 * As shown in Section 7.6.3.2.3
4285244eac9SBenno Rice 	 */
4295244eac9SBenno Rice 	pt->pte_lo &= ~ptebit;
4305244eac9SBenno Rice 	TLBIE(va);
4315244eac9SBenno Rice 	EIEIO();
4325244eac9SBenno Rice 	TLBSYNC();
4335244eac9SBenno Rice 	SYNC();
4345244eac9SBenno Rice }
4355244eac9SBenno Rice 
4365244eac9SBenno Rice static __inline void
4375244eac9SBenno Rice pmap_pte_set(struct pte *pt, struct pte *pvo_pt)
4385244eac9SBenno Rice {
4395244eac9SBenno Rice 
4405244eac9SBenno Rice 	pvo_pt->pte_hi |= PTE_VALID;
4415244eac9SBenno Rice 
4425244eac9SBenno Rice 	/*
4435244eac9SBenno Rice 	 * Update the PTE as defined in section 7.6.3.1.
4445244eac9SBenno Rice 	 * Note that the REF/CHG bits are from pvo_pt and thus should havce
4455244eac9SBenno Rice 	 * been saved so this routine can restore them (if desired).
4465244eac9SBenno Rice 	 */
4475244eac9SBenno Rice 	pt->pte_lo = pvo_pt->pte_lo;
4485244eac9SBenno Rice 	EIEIO();
4495244eac9SBenno Rice 	pt->pte_hi = pvo_pt->pte_hi;
4505244eac9SBenno Rice 	SYNC();
4515244eac9SBenno Rice 	pmap_pte_valid++;
4525244eac9SBenno Rice }
4535244eac9SBenno Rice 
4545244eac9SBenno Rice static __inline void
4555244eac9SBenno Rice pmap_pte_unset(struct pte *pt, struct pte *pvo_pt, vm_offset_t va)
4565244eac9SBenno Rice {
4575244eac9SBenno Rice 
4585244eac9SBenno Rice 	pvo_pt->pte_hi &= ~PTE_VALID;
4595244eac9SBenno Rice 
4605244eac9SBenno Rice 	/*
4615244eac9SBenno Rice 	 * Force the reg & chg bits back into the PTEs.
4625244eac9SBenno Rice 	 */
4635244eac9SBenno Rice 	SYNC();
4645244eac9SBenno Rice 
4655244eac9SBenno Rice 	/*
4665244eac9SBenno Rice 	 * Invalidate the pte.
4675244eac9SBenno Rice 	 */
4685244eac9SBenno Rice 	pt->pte_hi &= ~PTE_VALID;
4695244eac9SBenno Rice 
4705244eac9SBenno Rice 	SYNC();
4715244eac9SBenno Rice 	TLBIE(va);
4725244eac9SBenno Rice 	EIEIO();
4735244eac9SBenno Rice 	TLBSYNC();
4745244eac9SBenno Rice 	SYNC();
4755244eac9SBenno Rice 
4765244eac9SBenno Rice 	/*
4775244eac9SBenno Rice 	 * Save the reg & chg bits.
4785244eac9SBenno Rice 	 */
4795244eac9SBenno Rice 	pmap_pte_synch(pt, pvo_pt);
4805244eac9SBenno Rice 	pmap_pte_valid--;
4815244eac9SBenno Rice }
4825244eac9SBenno Rice 
4835244eac9SBenno Rice static __inline void
4845244eac9SBenno Rice pmap_pte_change(struct pte *pt, struct pte *pvo_pt, vm_offset_t va)
4855244eac9SBenno Rice {
4865244eac9SBenno Rice 
4875244eac9SBenno Rice 	/*
4885244eac9SBenno Rice 	 * Invalidate the PTE
4895244eac9SBenno Rice 	 */
4905244eac9SBenno Rice 	pmap_pte_unset(pt, pvo_pt, va);
4915244eac9SBenno Rice 	pmap_pte_set(pt, pvo_pt);
492f9bac91bSBenno Rice }
493f9bac91bSBenno Rice 
494f9bac91bSBenno Rice /*
4955244eac9SBenno Rice  * Quick sort callout for comparing memory regions.
496f9bac91bSBenno Rice  */
4975244eac9SBenno Rice static int	mr_cmp(const void *a, const void *b);
4985244eac9SBenno Rice static int	om_cmp(const void *a, const void *b);
4995244eac9SBenno Rice 
5005244eac9SBenno Rice static int
5015244eac9SBenno Rice mr_cmp(const void *a, const void *b)
502f9bac91bSBenno Rice {
5035244eac9SBenno Rice 	const struct	mem_region *regiona;
5045244eac9SBenno Rice 	const struct	mem_region *regionb;
505f9bac91bSBenno Rice 
5065244eac9SBenno Rice 	regiona = a;
5075244eac9SBenno Rice 	regionb = b;
5085244eac9SBenno Rice 	if (regiona->mr_start < regionb->mr_start)
5095244eac9SBenno Rice 		return (-1);
5105244eac9SBenno Rice 	else if (regiona->mr_start > regionb->mr_start)
5115244eac9SBenno Rice 		return (1);
5125244eac9SBenno Rice 	else
513f9bac91bSBenno Rice 		return (0);
514f9bac91bSBenno Rice }
5155244eac9SBenno Rice 
5165244eac9SBenno Rice static int
5175244eac9SBenno Rice om_cmp(const void *a, const void *b)
5185244eac9SBenno Rice {
5195244eac9SBenno Rice 	const struct	ofw_map *mapa;
5205244eac9SBenno Rice 	const struct	ofw_map *mapb;
5215244eac9SBenno Rice 
5225244eac9SBenno Rice 	mapa = a;
5235244eac9SBenno Rice 	mapb = b;
5245244eac9SBenno Rice 	if (mapa->om_pa < mapb->om_pa)
5255244eac9SBenno Rice 		return (-1);
5265244eac9SBenno Rice 	else if (mapa->om_pa > mapb->om_pa)
5275244eac9SBenno Rice 		return (1);
5285244eac9SBenno Rice 	else
5295244eac9SBenno Rice 		return (0);
530f9bac91bSBenno Rice }
531f9bac91bSBenno Rice 
532f9bac91bSBenno Rice void
5335244eac9SBenno Rice pmap_bootstrap(vm_offset_t kernelstart, vm_offset_t kernelend)
534f9bac91bSBenno Rice {
53531c82d03SBenno Rice 	ihandle_t	mmui;
5365244eac9SBenno Rice 	phandle_t	chosen, mmu;
5375244eac9SBenno Rice 	int		sz;
5385244eac9SBenno Rice 	int		i, j;
539d2c1f576SBenno Rice 	vm_size_t	size, physsz;
5405244eac9SBenno Rice 	vm_offset_t	pa, va, off;
5415244eac9SBenno Rice 	u_int		batl, batu;
542f9bac91bSBenno Rice 
543f9bac91bSBenno Rice         /*
5440d290675SBenno Rice          * Set up BAT0 to only map the lowest 256 MB area
5450d290675SBenno Rice          */
5460d290675SBenno Rice         battable[0x0].batl = BATL(0x00000000, BAT_M, BAT_PP_RW);
5470d290675SBenno Rice         battable[0x0].batu = BATU(0x00000000, BAT_BL_256M, BAT_Vs);
5480d290675SBenno Rice 
5490d290675SBenno Rice         /*
5500d290675SBenno Rice          * Map PCI memory space.
5510d290675SBenno Rice          */
5520d290675SBenno Rice         battable[0x8].batl = BATL(0x80000000, BAT_I|BAT_G, BAT_PP_RW);
5530d290675SBenno Rice         battable[0x8].batu = BATU(0x80000000, BAT_BL_256M, BAT_Vs);
5540d290675SBenno Rice 
5550d290675SBenno Rice         battable[0x9].batl = BATL(0x90000000, BAT_I|BAT_G, BAT_PP_RW);
5560d290675SBenno Rice         battable[0x9].batu = BATU(0x90000000, BAT_BL_256M, BAT_Vs);
5570d290675SBenno Rice 
5580d290675SBenno Rice         battable[0xa].batl = BATL(0xa0000000, BAT_I|BAT_G, BAT_PP_RW);
5590d290675SBenno Rice         battable[0xa].batu = BATU(0xa0000000, BAT_BL_256M, BAT_Vs);
5600d290675SBenno Rice 
5610d290675SBenno Rice         battable[0xb].batl = BATL(0xb0000000, BAT_I|BAT_G, BAT_PP_RW);
5620d290675SBenno Rice         battable[0xb].batu = BATU(0xb0000000, BAT_BL_256M, BAT_Vs);
5630d290675SBenno Rice 
5640d290675SBenno Rice         /*
5650d290675SBenno Rice          * Map obio devices.
5660d290675SBenno Rice          */
5670d290675SBenno Rice         battable[0xf].batl = BATL(0xf0000000, BAT_I|BAT_G, BAT_PP_RW);
5680d290675SBenno Rice         battable[0xf].batu = BATU(0xf0000000, BAT_BL_256M, BAT_Vs);
5690d290675SBenno Rice 
5700d290675SBenno Rice 	/*
5715244eac9SBenno Rice 	 * Use an IBAT and a DBAT to map the bottom segment of memory
5725244eac9SBenno Rice 	 * where we are.
573f9bac91bSBenno Rice 	 */
5745244eac9SBenno Rice 	batu = BATU(0x00000000, BAT_BL_256M, BAT_Vs);
5755244eac9SBenno Rice 	batl = BATL(0x00000000, BAT_M, BAT_PP_RW);
5765244eac9SBenno Rice 	__asm ("mtibatu 0,%0; mtibatl 0,%1; mtdbatu 0,%0; mtdbatl 0,%1"
5775244eac9SBenno Rice 	    :: "r"(batu), "r"(batl));
5780d290675SBenno Rice 
5795244eac9SBenno Rice #if 0
5800d290675SBenno Rice 	/* map frame buffer */
5810d290675SBenno Rice 	batu = BATU(0x90000000, BAT_BL_256M, BAT_Vs);
5820d290675SBenno Rice 	batl = BATL(0x90000000, BAT_I|BAT_G, BAT_PP_RW);
5830d290675SBenno Rice 	__asm ("mtdbatu 1,%0; mtdbatl 1,%1"
5840d290675SBenno Rice 	    :: "r"(batu), "r"(batl));
5850d290675SBenno Rice #endif
5860d290675SBenno Rice 
5870d290675SBenno Rice #if 1
5880d290675SBenno Rice 	/* map pci space */
5895244eac9SBenno Rice 	batu = BATU(0x80000000, BAT_BL_256M, BAT_Vs);
5900d290675SBenno Rice 	batl = BATL(0x80000000, BAT_I|BAT_G, BAT_PP_RW);
5910d290675SBenno Rice 	__asm ("mtdbatu 1,%0; mtdbatl 1,%1"
5925244eac9SBenno Rice 	    :: "r"(batu), "r"(batl));
5935244eac9SBenno Rice #endif
594f9bac91bSBenno Rice 
595f9bac91bSBenno Rice 	/*
5965244eac9SBenno Rice 	 * Set the start and end of kva.
597f9bac91bSBenno Rice 	 */
5985244eac9SBenno Rice 	virtual_avail = VM_MIN_KERNEL_ADDRESS;
5995244eac9SBenno Rice 	virtual_end = VM_MAX_KERNEL_ADDRESS;
600f9bac91bSBenno Rice 
60131c82d03SBenno Rice 	mem_regions(&pregions, &pregions_sz, &regions, &regions_sz);
6025244eac9SBenno Rice 	CTR0(KTR_PMAP, "pmap_bootstrap: physical memory");
60331c82d03SBenno Rice 
60431c82d03SBenno Rice 	qsort(pregions, pregions_sz, sizeof(*pregions), mr_cmp);
60531c82d03SBenno Rice 	for (i = 0; i < pregions_sz; i++) {
60631c82d03SBenno Rice 		CTR3(KTR_PMAP, "physregion: %#x - %#x (%#x)",
60731c82d03SBenno Rice 			pregions[i].mr_start,
60831c82d03SBenno Rice 			pregions[i].mr_start + pregions[i].mr_size,
60931c82d03SBenno Rice 			pregions[i].mr_size);
61031c82d03SBenno Rice 	}
61131c82d03SBenno Rice 
61231c82d03SBenno Rice 	if (sizeof(phys_avail)/sizeof(phys_avail[0]) < regions_sz)
61331c82d03SBenno Rice 		panic("pmap_bootstrap: phys_avail too small");
61431c82d03SBenno Rice 	qsort(regions, regions_sz, sizeof(*regions), mr_cmp);
6155244eac9SBenno Rice 	phys_avail_count = 0;
616d2c1f576SBenno Rice 	physsz = 0;
61731c82d03SBenno Rice 	for (i = 0, j = 0; i < regions_sz; i++, j += 2) {
6185244eac9SBenno Rice 		CTR3(KTR_PMAP, "region: %#x - %#x (%#x)", regions[i].mr_start,
6195244eac9SBenno Rice 		    regions[i].mr_start + regions[i].mr_size,
6205244eac9SBenno Rice 		    regions[i].mr_size);
6215244eac9SBenno Rice 		phys_avail[j] = regions[i].mr_start;
6225244eac9SBenno Rice 		phys_avail[j + 1] = regions[i].mr_start + regions[i].mr_size;
6235244eac9SBenno Rice 		phys_avail_count++;
624d2c1f576SBenno Rice 		physsz += regions[i].mr_size;
625f9bac91bSBenno Rice 	}
626d2c1f576SBenno Rice 	physmem = btoc(physsz);
627f9bac91bSBenno Rice 
628f9bac91bSBenno Rice 	/*
6295244eac9SBenno Rice 	 * Allocate PTEG table.
630f9bac91bSBenno Rice 	 */
6315244eac9SBenno Rice #ifdef PTEGCOUNT
6325244eac9SBenno Rice 	pmap_pteg_count = PTEGCOUNT;
6335244eac9SBenno Rice #else
6345244eac9SBenno Rice 	pmap_pteg_count = 0x1000;
635f9bac91bSBenno Rice 
6365244eac9SBenno Rice 	while (pmap_pteg_count < physmem)
6375244eac9SBenno Rice 		pmap_pteg_count <<= 1;
638f9bac91bSBenno Rice 
6395244eac9SBenno Rice 	pmap_pteg_count >>= 1;
6405244eac9SBenno Rice #endif /* PTEGCOUNT */
641f9bac91bSBenno Rice 
6425244eac9SBenno Rice 	size = pmap_pteg_count * sizeof(struct pteg);
6435244eac9SBenno Rice 	CTR2(KTR_PMAP, "pmap_bootstrap: %d PTEGs, %d bytes", pmap_pteg_count,
6445244eac9SBenno Rice 	    size);
6455244eac9SBenno Rice 	pmap_pteg_table = (struct pteg *)pmap_bootstrap_alloc(size, size);
6465244eac9SBenno Rice 	CTR1(KTR_PMAP, "pmap_bootstrap: PTEG table at %p", pmap_pteg_table);
6475244eac9SBenno Rice 	bzero((void *)pmap_pteg_table, pmap_pteg_count * sizeof(struct pteg));
6485244eac9SBenno Rice 	pmap_pteg_mask = pmap_pteg_count - 1;
649f9bac91bSBenno Rice 
6505244eac9SBenno Rice 	/*
651864bc520SBenno Rice 	 * Allocate pv/overflow lists.
6525244eac9SBenno Rice 	 */
6535244eac9SBenno Rice 	size = sizeof(struct pvo_head) * pmap_pteg_count;
6545244eac9SBenno Rice 	pmap_pvo_table = (struct pvo_head *)pmap_bootstrap_alloc(size,
6555244eac9SBenno Rice 	    PAGE_SIZE);
6565244eac9SBenno Rice 	CTR1(KTR_PMAP, "pmap_bootstrap: PVO table at %p", pmap_pvo_table);
6575244eac9SBenno Rice 	for (i = 0; i < pmap_pteg_count; i++)
6585244eac9SBenno Rice 		LIST_INIT(&pmap_pvo_table[i]);
6595244eac9SBenno Rice 
6605244eac9SBenno Rice 	/*
6615244eac9SBenno Rice 	 * Allocate the message buffer.
6625244eac9SBenno Rice 	 */
6635244eac9SBenno Rice 	msgbuf_phys = pmap_bootstrap_alloc(MSGBUF_SIZE, 0);
6645244eac9SBenno Rice 
6655244eac9SBenno Rice 	/*
6665244eac9SBenno Rice 	 * Initialise the unmanaged pvo pool.
6675244eac9SBenno Rice 	 */
6680d290675SBenno Rice 	pmap_bpvo_pool = (struct pvo_entry *)pmap_bootstrap_alloc(
6690d290675SBenno Rice 		BPVO_POOL_SIZE*sizeof(struct pvo_entry), 0);
67049f8f727SBenno Rice 	pmap_bpvo_pool_index = 0;
6715244eac9SBenno Rice 
6725244eac9SBenno Rice 	/*
6735244eac9SBenno Rice 	 * Make sure kernel vsid is allocated as well as VSID 0.
6745244eac9SBenno Rice 	 */
6755244eac9SBenno Rice 	pmap_vsid_bitmap[(KERNEL_VSIDBITS & (NPMAPS - 1)) / VSID_NBPW]
6765244eac9SBenno Rice 		|= 1 << (KERNEL_VSIDBITS % VSID_NBPW);
6775244eac9SBenno Rice 	pmap_vsid_bitmap[0] |= 1;
6785244eac9SBenno Rice 
6795244eac9SBenno Rice 	/*
6805244eac9SBenno Rice 	 * Set up the OpenFirmware pmap and add it's mappings.
6815244eac9SBenno Rice 	 */
6825244eac9SBenno Rice 	pmap_pinit(&ofw_pmap);
6835244eac9SBenno Rice 	ofw_pmap.pm_sr[KERNEL_SR] = KERNEL_SEGMENT;
6845244eac9SBenno Rice 	if ((chosen = OF_finddevice("/chosen")) == -1)
6855244eac9SBenno Rice 		panic("pmap_bootstrap: can't find /chosen");
6865244eac9SBenno Rice 	OF_getprop(chosen, "mmu", &mmui, 4);
6875244eac9SBenno Rice 	if ((mmu = OF_instance_to_package(mmui)) == -1)
6885244eac9SBenno Rice 		panic("pmap_bootstrap: can't get mmu package");
6895244eac9SBenno Rice 	if ((sz = OF_getproplen(mmu, "translations")) == -1)
6905244eac9SBenno Rice 		panic("pmap_bootstrap: can't get ofw translation count");
6915244eac9SBenno Rice 	if (sizeof(translations) < sz)
6925244eac9SBenno Rice 		panic("pmap_bootstrap: translations too small");
6935244eac9SBenno Rice 	bzero(translations, sz);
6945244eac9SBenno Rice 	if (OF_getprop(mmu, "translations", translations, sz) == -1)
6955244eac9SBenno Rice 		panic("pmap_bootstrap: can't get ofw translations");
6965244eac9SBenno Rice 	CTR0(KTR_PMAP, "pmap_bootstrap: translations");
69731c82d03SBenno Rice 	sz /= sizeof(*translations);
6985244eac9SBenno Rice 	qsort(translations, sz, sizeof (*translations), om_cmp);
6995244eac9SBenno Rice 	for (i = 0; i < sz; i++) {
7005244eac9SBenno Rice 		CTR3(KTR_PMAP, "translation: pa=%#x va=%#x len=%#x",
7015244eac9SBenno Rice 		    translations[i].om_pa, translations[i].om_va,
7025244eac9SBenno Rice 		    translations[i].om_len);
7035244eac9SBenno Rice 
7045244eac9SBenno Rice 		/* Drop stuff below something? */
7055244eac9SBenno Rice 
7065244eac9SBenno Rice 		/* Enter the pages? */
7075244eac9SBenno Rice 		for (off = 0; off < translations[i].om_len; off += PAGE_SIZE) {
7085244eac9SBenno Rice 			struct	vm_page m;
7095244eac9SBenno Rice 
7105244eac9SBenno Rice 			m.phys_addr = translations[i].om_pa + off;
7115244eac9SBenno Rice 			pmap_enter(&ofw_pmap, translations[i].om_va + off, &m,
7125244eac9SBenno Rice 			    VM_PROT_ALL, 1);
713f9bac91bSBenno Rice 		}
714f9bac91bSBenno Rice 	}
7155244eac9SBenno Rice #ifdef SMP
7165244eac9SBenno Rice 	TLBSYNC();
7175244eac9SBenno Rice #endif
7185244eac9SBenno Rice 
7195244eac9SBenno Rice 	/*
7205244eac9SBenno Rice 	 * Initialize the kernel pmap (which is statically allocated).
7215244eac9SBenno Rice 	 */
7225244eac9SBenno Rice 	for (i = 0; i < 16; i++) {
7235244eac9SBenno Rice 		kernel_pmap->pm_sr[i] = EMPTY_SEGMENT;
724f9bac91bSBenno Rice 	}
7255244eac9SBenno Rice 	kernel_pmap->pm_sr[KERNEL_SR] = KERNEL_SEGMENT;
7265244eac9SBenno Rice 	kernel_pmap->pm_active = ~0;
7275244eac9SBenno Rice 
7285244eac9SBenno Rice 	/*
7295244eac9SBenno Rice 	 * Allocate a kernel stack with a guard page for thread0 and map it
7305244eac9SBenno Rice 	 * into the kernel page map.
7315244eac9SBenno Rice 	 */
7325244eac9SBenno Rice 	pa = pmap_bootstrap_alloc(KSTACK_PAGES * PAGE_SIZE, 0);
7335244eac9SBenno Rice 	kstack0_phys = pa;
7345244eac9SBenno Rice 	kstack0 = virtual_avail + (KSTACK_GUARD_PAGES * PAGE_SIZE);
7355244eac9SBenno Rice 	CTR2(KTR_PMAP, "pmap_bootstrap: kstack0 at %#x (%#x)", kstack0_phys,
7365244eac9SBenno Rice 	    kstack0);
7375244eac9SBenno Rice 	virtual_avail += (KSTACK_PAGES + KSTACK_GUARD_PAGES) * PAGE_SIZE;
7385244eac9SBenno Rice 	for (i = 0; i < KSTACK_PAGES; i++) {
7395244eac9SBenno Rice 		pa = kstack0_phys + i * PAGE_SIZE;
7405244eac9SBenno Rice 		va = kstack0 + i * PAGE_SIZE;
7415244eac9SBenno Rice 		pmap_kenter(va, pa);
7425244eac9SBenno Rice 		TLBIE(va);
743f9bac91bSBenno Rice 	}
744f9bac91bSBenno Rice 
745f9bac91bSBenno Rice 	/*
7465244eac9SBenno Rice 	 * Calculate the first and last available physical addresses.
7475244eac9SBenno Rice 	 */
7485244eac9SBenno Rice 	avail_start = phys_avail[0];
7495244eac9SBenno Rice 	for (i = 0; phys_avail[i + 2] != 0; i += 2)
7505244eac9SBenno Rice 		;
7515244eac9SBenno Rice 	avail_end = phys_avail[i + 1];
7525244eac9SBenno Rice 	Maxmem = powerpc_btop(avail_end);
7535244eac9SBenno Rice 
7545244eac9SBenno Rice 	/*
7555244eac9SBenno Rice 	 * Allocate virtual address space for the message buffer.
7565244eac9SBenno Rice 	 */
7575244eac9SBenno Rice 	msgbufp = (struct msgbuf *)virtual_avail;
7585244eac9SBenno Rice 	virtual_avail += round_page(MSGBUF_SIZE);
7595244eac9SBenno Rice 
7605244eac9SBenno Rice 	/*
7615244eac9SBenno Rice 	 * Initialize hardware.
7625244eac9SBenno Rice 	 */
7635244eac9SBenno Rice 	for (i = 0; i < 16; i++) {
764d080d5fdSBenno Rice 		mtsrin(i << ADDR_SR_SHFT, EMPTY_SEGMENT);
7655244eac9SBenno Rice 	}
7665244eac9SBenno Rice 	__asm __volatile ("mtsr %0,%1"
7675244eac9SBenno Rice 	    :: "n"(KERNEL_SR), "r"(KERNEL_SEGMENT));
7685244eac9SBenno Rice 	__asm __volatile ("sync; mtsdr1 %0; isync"
7695244eac9SBenno Rice 	    :: "r"((u_int)pmap_pteg_table | (pmap_pteg_mask >> 10)));
7705244eac9SBenno Rice 	tlbia();
7715244eac9SBenno Rice 
7725244eac9SBenno Rice 	pmap_bootstrapped++;
7735244eac9SBenno Rice }
7745244eac9SBenno Rice 
7755244eac9SBenno Rice /*
7765244eac9SBenno Rice  * Activate a user pmap.  The pmap must be activated before it's address
7775244eac9SBenno Rice  * space can be accessed in any way.
778f9bac91bSBenno Rice  */
779f9bac91bSBenno Rice void
780b40ce416SJulian Elischer pmap_activate(struct thread *td)
781f9bac91bSBenno Rice {
7828207b362SBenno Rice 	pmap_t	pm, pmr;
783f9bac91bSBenno Rice 
784f9bac91bSBenno Rice 	/*
7855244eac9SBenno Rice 	 * Load all the data we need up front to encourasge the compiler to
7865244eac9SBenno Rice 	 * not issue any loads while we have interrupts disabled below.
787f9bac91bSBenno Rice 	 */
7885244eac9SBenno Rice 	pm = &td->td_proc->p_vmspace->vm_pmap;
789f9bac91bSBenno Rice 
7905244eac9SBenno Rice 	KASSERT(pm->pm_active == 0, ("pmap_activate: pmap already active?"));
791f9bac91bSBenno Rice 
7928207b362SBenno Rice 	if ((pmr = (pmap_t)pmap_kextract((vm_offset_t)pm)) == NULL)
7938207b362SBenno Rice 		pmr = pm;
7948207b362SBenno Rice 
7955244eac9SBenno Rice 	pm->pm_active |= PCPU_GET(cpumask);
7968207b362SBenno Rice 	PCPU_SET(curpmap, pmr);
797ac6ba8bdSBenno Rice }
798ac6ba8bdSBenno Rice 
799ac6ba8bdSBenno Rice void
800ac6ba8bdSBenno Rice pmap_deactivate(struct thread *td)
801ac6ba8bdSBenno Rice {
802ac6ba8bdSBenno Rice 	pmap_t	pm;
803ac6ba8bdSBenno Rice 
804ac6ba8bdSBenno Rice 	pm = &td->td_proc->p_vmspace->vm_pmap;
805ac6ba8bdSBenno Rice 	pm->pm_active &= ~(PCPU_GET(cpumask));
8068207b362SBenno Rice 	PCPU_SET(curpmap, NULL);
807f9bac91bSBenno Rice }
808f9bac91bSBenno Rice 
809f9bac91bSBenno Rice vm_offset_t
8105244eac9SBenno Rice pmap_addr_hint(vm_object_t object, vm_offset_t va, vm_size_t size)
811f9bac91bSBenno Rice {
8120f92104cSBenno Rice 
8130f92104cSBenno Rice 	return (va);
814f9bac91bSBenno Rice }
815f9bac91bSBenno Rice 
816f9bac91bSBenno Rice void
8170f92104cSBenno Rice pmap_change_wiring(pmap_t pm, vm_offset_t va, boolean_t wired)
818f9bac91bSBenno Rice {
8190f92104cSBenno Rice 	struct	pvo_entry *pvo;
8200f92104cSBenno Rice 
8210f92104cSBenno Rice 	pvo = pmap_pvo_find_va(pm, va & ~ADDR_POFF, NULL);
8220f92104cSBenno Rice 
8230f92104cSBenno Rice 	if (pvo != NULL) {
8240f92104cSBenno Rice 		if (wired) {
8250f92104cSBenno Rice 			if ((pvo->pvo_vaddr & PVO_WIRED) == 0)
8260f92104cSBenno Rice 				pm->pm_stats.wired_count++;
8270f92104cSBenno Rice 			pvo->pvo_vaddr |= PVO_WIRED;
8280f92104cSBenno Rice 		} else {
8290f92104cSBenno Rice 			if ((pvo->pvo_vaddr & PVO_WIRED) != 0)
8300f92104cSBenno Rice 				pm->pm_stats.wired_count--;
8310f92104cSBenno Rice 			pvo->pvo_vaddr &= ~PVO_WIRED;
8320f92104cSBenno Rice 		}
8330f92104cSBenno Rice 	}
834f9bac91bSBenno Rice }
835f9bac91bSBenno Rice 
836f9bac91bSBenno Rice void
8375244eac9SBenno Rice pmap_clear_modify(vm_page_t m)
838f9bac91bSBenno Rice {
839f9bac91bSBenno Rice 
8405244eac9SBenno Rice 	if (m->flags * PG_FICTITIOUS)
841f9bac91bSBenno Rice 		return;
8425244eac9SBenno Rice 	pmap_clear_bit(m, PTE_CHG);
843f9bac91bSBenno Rice }
844f9bac91bSBenno Rice 
845f9bac91bSBenno Rice void
8465244eac9SBenno Rice pmap_collect(void)
847f9bac91bSBenno Rice {
8485244eac9SBenno Rice 	TODO;
849f9bac91bSBenno Rice }
850f9bac91bSBenno Rice 
851f9bac91bSBenno Rice void
8525244eac9SBenno Rice pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr,
8535244eac9SBenno Rice 	  vm_size_t len, vm_offset_t src_addr)
854f9bac91bSBenno Rice {
85525e2288dSBenno Rice 
85625e2288dSBenno Rice 	/*
85725e2288dSBenno Rice 	 * This is not needed as it's mainly an optimisation.
85825e2288dSBenno Rice 	 * It may want to be implemented later though.
85925e2288dSBenno Rice 	 */
860f9bac91bSBenno Rice }
861f9bac91bSBenno Rice 
862f9bac91bSBenno Rice void
86325e2288dSBenno Rice pmap_copy_page(vm_page_t msrc, vm_page_t mdst)
864f9bac91bSBenno Rice {
86525e2288dSBenno Rice 	vm_offset_t	dst;
86625e2288dSBenno Rice 	vm_offset_t	src;
86725e2288dSBenno Rice 
86825e2288dSBenno Rice 	dst = VM_PAGE_TO_PHYS(mdst);
86925e2288dSBenno Rice 	src = VM_PAGE_TO_PHYS(msrc);
87025e2288dSBenno Rice 
87125e2288dSBenno Rice 	kcopy((void *)src, (void *)dst, PAGE_SIZE);
872f9bac91bSBenno Rice }
873111c77dcSBenno Rice 
874111c77dcSBenno Rice /*
8755244eac9SBenno Rice  * Zero a page of physical memory by temporarily mapping it into the tlb.
8765244eac9SBenno Rice  */
8775244eac9SBenno Rice void
8781a87a0daSPeter Wemm pmap_zero_page(vm_page_t m)
8795244eac9SBenno Rice {
8801a87a0daSPeter Wemm 	vm_offset_t pa = VM_PAGE_TO_PHYS(m);
8815244eac9SBenno Rice 	caddr_t	va;
8825244eac9SBenno Rice 	int	i;
8835244eac9SBenno Rice 
8845244eac9SBenno Rice 	if (pa < SEGMENT_LENGTH) {
8855244eac9SBenno Rice 		va = (caddr_t) pa;
8865244eac9SBenno Rice 	} else if (pmap_initialized) {
8875244eac9SBenno Rice 		if (pmap_pvo_zeropage == NULL)
8885244eac9SBenno Rice 			pmap_pvo_zeropage = pmap_rkva_alloc();
8895244eac9SBenno Rice 		pmap_pa_map(pmap_pvo_zeropage, pa, NULL, NULL);
8905244eac9SBenno Rice 		va = (caddr_t)PVO_VADDR(pmap_pvo_zeropage);
8915244eac9SBenno Rice 	} else {
8925244eac9SBenno Rice 		panic("pmap_zero_page: can't zero pa %#x", pa);
8935244eac9SBenno Rice 	}
8945244eac9SBenno Rice 
8955244eac9SBenno Rice 	bzero(va, PAGE_SIZE);
8965244eac9SBenno Rice 
8975244eac9SBenno Rice 	for (i = PAGE_SIZE / CACHELINESIZE; i > 0; i--) {
8985244eac9SBenno Rice 		__asm __volatile("dcbz 0,%0" :: "r"(va));
8995244eac9SBenno Rice 		va += CACHELINESIZE;
9005244eac9SBenno Rice 	}
9015244eac9SBenno Rice 
9025244eac9SBenno Rice 	if (pa >= SEGMENT_LENGTH)
9035244eac9SBenno Rice 		pmap_pa_unmap(pmap_pvo_zeropage, NULL, NULL);
9045244eac9SBenno Rice }
9055244eac9SBenno Rice 
9065244eac9SBenno Rice void
9071a87a0daSPeter Wemm pmap_zero_page_area(vm_page_t m, int off, int size)
9085244eac9SBenno Rice {
9095244eac9SBenno Rice 	TODO;
9105244eac9SBenno Rice }
9115244eac9SBenno Rice 
9125244eac9SBenno Rice /*
9135244eac9SBenno Rice  * Map the given physical page at the specified virtual address in the
9145244eac9SBenno Rice  * target pmap with the protection requested.  If specified the page
9155244eac9SBenno Rice  * will be wired down.
9165244eac9SBenno Rice  */
9175244eac9SBenno Rice void
9185244eac9SBenno Rice pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
9195244eac9SBenno Rice 	   boolean_t wired)
9205244eac9SBenno Rice {
9215244eac9SBenno Rice 	struct		pvo_head *pvo_head;
922378862a7SJeff Roberson 	uma_zone_t	zone;
9238207b362SBenno Rice 	vm_page_t	pg;
9248207b362SBenno Rice 	u_int		pte_lo, pvo_flags, was_exec, i;
9255244eac9SBenno Rice 	int		error;
9265244eac9SBenno Rice 
9275244eac9SBenno Rice 	if (!pmap_initialized) {
9285244eac9SBenno Rice 		pvo_head = &pmap_pvo_kunmanaged;
9295244eac9SBenno Rice 		zone = pmap_upvo_zone;
9305244eac9SBenno Rice 		pvo_flags = 0;
9318207b362SBenno Rice 		pg = NULL;
9328207b362SBenno Rice 		was_exec = PTE_EXEC;
9335244eac9SBenno Rice 	} else {
9348207b362SBenno Rice 		pvo_head = pa_to_pvoh(VM_PAGE_TO_PHYS(m), &pg);
9355244eac9SBenno Rice 		zone = pmap_mpvo_zone;
9365244eac9SBenno Rice 		pvo_flags = PVO_MANAGED;
9378207b362SBenno Rice 		was_exec = 0;
9385244eac9SBenno Rice 	}
9395244eac9SBenno Rice 
9408207b362SBenno Rice 	/*
9418207b362SBenno Rice 	 * If this is a managed page, and it's the first reference to the page,
9428207b362SBenno Rice 	 * clear the execness of the page.  Otherwise fetch the execness.
9438207b362SBenno Rice 	 */
9448207b362SBenno Rice 	if (pg != NULL) {
9458207b362SBenno Rice 		if (LIST_EMPTY(pvo_head)) {
9468207b362SBenno Rice 			pmap_attr_clear(pg, PTE_EXEC);
9478207b362SBenno Rice 		} else {
9488207b362SBenno Rice 			was_exec = pmap_attr_fetch(pg) & PTE_EXEC;
9498207b362SBenno Rice 		}
9508207b362SBenno Rice 	}
9518207b362SBenno Rice 
9528207b362SBenno Rice 
9538207b362SBenno Rice 	/*
9548207b362SBenno Rice 	 * Assume the page is cache inhibited and access is guarded unless
9558207b362SBenno Rice 	 * it's in our available memory array.
9568207b362SBenno Rice 	 */
9575244eac9SBenno Rice 	pte_lo = PTE_I | PTE_G;
95831c82d03SBenno Rice 	for (i = 0; i < pregions_sz; i++) {
95931c82d03SBenno Rice 		if ((VM_PAGE_TO_PHYS(m) >= pregions[i].mr_start) &&
96031c82d03SBenno Rice 		    (VM_PAGE_TO_PHYS(m) <
96131c82d03SBenno Rice 			(pregions[i].mr_start + pregions[i].mr_size))) {
9628207b362SBenno Rice 			pte_lo &= ~(PTE_I | PTE_G);
9638207b362SBenno Rice 			break;
9648207b362SBenno Rice 		}
9658207b362SBenno Rice 	}
9665244eac9SBenno Rice 
9675244eac9SBenno Rice 	if (prot & VM_PROT_WRITE)
9685244eac9SBenno Rice 		pte_lo |= PTE_BW;
9695244eac9SBenno Rice 	else
9705244eac9SBenno Rice 		pte_lo |= PTE_BR;
9715244eac9SBenno Rice 
9728207b362SBenno Rice 	pvo_flags |= (prot & VM_PROT_EXECUTE);
9735244eac9SBenno Rice 
9745244eac9SBenno Rice 	if (wired)
9755244eac9SBenno Rice 		pvo_flags |= PVO_WIRED;
9765244eac9SBenno Rice 
9778207b362SBenno Rice 	error = pmap_pvo_enter(pmap, zone, pvo_head, va, VM_PAGE_TO_PHYS(m),
9788207b362SBenno Rice 	    pte_lo, pvo_flags);
9795244eac9SBenno Rice 
9808207b362SBenno Rice 	/*
9818207b362SBenno Rice 	 * Flush the real page from the instruction cache if this page is
9828207b362SBenno Rice 	 * mapped executable and cacheable and was not previously mapped (or
9838207b362SBenno Rice 	 * was not mapped executable).
9848207b362SBenno Rice 	 */
9858207b362SBenno Rice 	if (error == 0 && (pvo_flags & PVO_EXECUTABLE) &&
9868207b362SBenno Rice 	    (pte_lo & PTE_I) == 0 && was_exec == 0) {
9875244eac9SBenno Rice 		/*
9885244eac9SBenno Rice 		 * Flush the real memory from the cache.
9895244eac9SBenno Rice 		 */
9908207b362SBenno Rice 		pmap_syncicache(VM_PAGE_TO_PHYS(m), PAGE_SIZE);
9918207b362SBenno Rice 		if (pg != NULL)
9928207b362SBenno Rice 			pmap_attr_save(pg, PTE_EXEC);
9935244eac9SBenno Rice 	}
9945244eac9SBenno Rice }
9955244eac9SBenno Rice 
9965244eac9SBenno Rice vm_offset_t
9970f92104cSBenno Rice pmap_extract(pmap_t pm, vm_offset_t va)
9985244eac9SBenno Rice {
9990f92104cSBenno Rice 	struct	pvo_entry *pvo;
10000f92104cSBenno Rice 
10010f92104cSBenno Rice 	pvo = pmap_pvo_find_va(pm, va & ~ADDR_POFF, NULL);
10020f92104cSBenno Rice 
10030f92104cSBenno Rice 	if (pvo != NULL) {
10040f92104cSBenno Rice 		return ((pvo->pvo_pte.pte_lo & PTE_RPGN) | (va & ADDR_POFF));
10050f92104cSBenno Rice 	}
10060f92104cSBenno Rice 
10075244eac9SBenno Rice 	return (0);
10085244eac9SBenno Rice }
10095244eac9SBenno Rice 
10105244eac9SBenno Rice /*
10115244eac9SBenno Rice  * Grow the number of kernel page table entries.  Unneeded.
10125244eac9SBenno Rice  */
10135244eac9SBenno Rice void
10145244eac9SBenno Rice pmap_growkernel(vm_offset_t addr)
10155244eac9SBenno Rice {
10165244eac9SBenno Rice }
10175244eac9SBenno Rice 
10185244eac9SBenno Rice void
10195244eac9SBenno Rice pmap_init(vm_offset_t phys_start, vm_offset_t phys_end)
10205244eac9SBenno Rice {
10215244eac9SBenno Rice 
102252a3cde5SBenno Rice 	CTR0(KTR_PMAP, "pmap_init");
10230d290675SBenno Rice 
10240d290675SBenno Rice 	pmap_pvo_obj = vm_object_allocate(OBJT_PHYS, 16);
10250d290675SBenno Rice 	pmap_pvo_count = 0;
10260d290675SBenno Rice 	pmap_upvo_zone = uma_zcreate("UPVO entry", sizeof (struct pvo_entry),
10270d290675SBenno Rice 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM);
10280d290675SBenno Rice 	uma_zone_set_allocf(pmap_upvo_zone, pmap_pvo_allocf);
10290d290675SBenno Rice 	pmap_mpvo_zone = uma_zcreate("MPVO entry", sizeof(struct pvo_entry),
10300d290675SBenno Rice 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM);
10310d290675SBenno Rice 	uma_zone_set_allocf(pmap_mpvo_zone, pmap_pvo_allocf);
10320d290675SBenno Rice 	pmap_initialized = TRUE;
10335244eac9SBenno Rice }
10345244eac9SBenno Rice 
10355244eac9SBenno Rice void
10365244eac9SBenno Rice pmap_init2(void)
10375244eac9SBenno Rice {
10385244eac9SBenno Rice 
103952a3cde5SBenno Rice 	CTR0(KTR_PMAP, "pmap_init2");
10405244eac9SBenno Rice }
10415244eac9SBenno Rice 
10425244eac9SBenno Rice boolean_t
10435244eac9SBenno Rice pmap_is_modified(vm_page_t m)
10445244eac9SBenno Rice {
10450f92104cSBenno Rice 
10460f92104cSBenno Rice 	if (m->flags & PG_FICTITIOUS)
10470f92104cSBenno Rice 		return (FALSE);
10480f92104cSBenno Rice 
10490f92104cSBenno Rice 	return (pmap_query_bit(m, PTE_CHG));
10505244eac9SBenno Rice }
10515244eac9SBenno Rice 
10525244eac9SBenno Rice void
10535244eac9SBenno Rice pmap_clear_reference(vm_page_t m)
10545244eac9SBenno Rice {
10555244eac9SBenno Rice 	TODO;
10565244eac9SBenno Rice }
10575244eac9SBenno Rice 
10587f3a4093SMike Silbersack /*
10597f3a4093SMike Silbersack  *	pmap_ts_referenced:
10607f3a4093SMike Silbersack  *
10617f3a4093SMike Silbersack  *	Return a count of reference bits for a page, clearing those bits.
10627f3a4093SMike Silbersack  *	It is not necessary for every reference bit to be cleared, but it
10637f3a4093SMike Silbersack  *	is necessary that 0 only be returned when there are truly no
10647f3a4093SMike Silbersack  *	reference bits set.
10657f3a4093SMike Silbersack  *
10667f3a4093SMike Silbersack  *	XXX: The exact number of bits to check and clear is a matter that
10677f3a4093SMike Silbersack  *	should be tested and standardized at some point in the future for
10687f3a4093SMike Silbersack  *	optimal aging of shared pages.
10697f3a4093SMike Silbersack  */
10707f3a4093SMike Silbersack 
10715244eac9SBenno Rice int
10725244eac9SBenno Rice pmap_ts_referenced(vm_page_t m)
10735244eac9SBenno Rice {
10745244eac9SBenno Rice 	TODO;
10755244eac9SBenno Rice 	return (0);
10765244eac9SBenno Rice }
10775244eac9SBenno Rice 
10785244eac9SBenno Rice /*
10795244eac9SBenno Rice  * Map a wired page into kernel virtual address space.
10805244eac9SBenno Rice  */
10815244eac9SBenno Rice void
10825244eac9SBenno Rice pmap_kenter(vm_offset_t va, vm_offset_t pa)
10835244eac9SBenno Rice {
10845244eac9SBenno Rice 	u_int		pte_lo;
10855244eac9SBenno Rice 	int		error;
10865244eac9SBenno Rice 	int		i;
10875244eac9SBenno Rice 
10885244eac9SBenno Rice #if 0
10895244eac9SBenno Rice 	if (va < VM_MIN_KERNEL_ADDRESS)
10905244eac9SBenno Rice 		panic("pmap_kenter: attempt to enter non-kernel address %#x",
10915244eac9SBenno Rice 		    va);
10925244eac9SBenno Rice #endif
10935244eac9SBenno Rice 
10945244eac9SBenno Rice 	pte_lo = PTE_I | PTE_G | PTE_BW;
10955244eac9SBenno Rice 	for (i = 0; phys_avail[i + 2] != 0; i += 2) {
10965244eac9SBenno Rice 		if (pa >= phys_avail[i] && pa < phys_avail[i + 1]) {
10975244eac9SBenno Rice 			pte_lo &= ~(PTE_I | PTE_G);
10985244eac9SBenno Rice 			break;
10995244eac9SBenno Rice 		}
11005244eac9SBenno Rice 	}
11015244eac9SBenno Rice 
11025244eac9SBenno Rice 	error = pmap_pvo_enter(kernel_pmap, pmap_upvo_zone,
11035244eac9SBenno Rice 	    &pmap_pvo_kunmanaged, va, pa, pte_lo, PVO_WIRED);
11045244eac9SBenno Rice 
11055244eac9SBenno Rice 	if (error != 0 && error != ENOENT)
11065244eac9SBenno Rice 		panic("pmap_kenter: failed to enter va %#x pa %#x: %d", va,
11075244eac9SBenno Rice 		    pa, error);
11085244eac9SBenno Rice 
11095244eac9SBenno Rice 	/*
11105244eac9SBenno Rice 	 * Flush the real memory from the instruction cache.
11115244eac9SBenno Rice 	 */
11125244eac9SBenno Rice 	if ((pte_lo & (PTE_I | PTE_G)) == 0) {
11135244eac9SBenno Rice 		pmap_syncicache(pa, PAGE_SIZE);
11145244eac9SBenno Rice 	}
11155244eac9SBenno Rice }
11165244eac9SBenno Rice 
1117e79f59e8SBenno Rice /*
1118e79f59e8SBenno Rice  * Extract the physical page address associated with the given kernel virtual
1119e79f59e8SBenno Rice  * address.
1120e79f59e8SBenno Rice  */
11215244eac9SBenno Rice vm_offset_t
11225244eac9SBenno Rice pmap_kextract(vm_offset_t va)
11235244eac9SBenno Rice {
1124e79f59e8SBenno Rice 	struct		pvo_entry *pvo;
1125e79f59e8SBenno Rice 
1126e79f59e8SBenno Rice 	pvo = pmap_pvo_find_va(kernel_pmap, va & ~ADDR_POFF, NULL);
1127e79f59e8SBenno Rice 	if (pvo == NULL) {
11285244eac9SBenno Rice 		return (0);
11295244eac9SBenno Rice 	}
11305244eac9SBenno Rice 
1131e79f59e8SBenno Rice 	return ((pvo->pvo_pte.pte_lo & PTE_RPGN) | (va & ADDR_POFF));
1132e79f59e8SBenno Rice }
1133e79f59e8SBenno Rice 
113488afb2a3SBenno Rice /*
113588afb2a3SBenno Rice  * Remove a wired page from kernel virtual address space.
113688afb2a3SBenno Rice  */
11375244eac9SBenno Rice void
11385244eac9SBenno Rice pmap_kremove(vm_offset_t va)
11395244eac9SBenno Rice {
114088afb2a3SBenno Rice 
114188afb2a3SBenno Rice 	pmap_remove(kernel_pmap, va, roundup(va, PAGE_SIZE));
11425244eac9SBenno Rice }
11435244eac9SBenno Rice 
11445244eac9SBenno Rice /*
11455244eac9SBenno Rice  * Map a range of physical addresses into kernel virtual address space.
11465244eac9SBenno Rice  *
11475244eac9SBenno Rice  * The value passed in *virt is a suggested virtual address for the mapping.
11485244eac9SBenno Rice  * Architectures which can support a direct-mapped physical to virtual region
11495244eac9SBenno Rice  * can return the appropriate address within that region, leaving '*virt'
11505244eac9SBenno Rice  * unchanged.  We cannot and therefore do not; *virt is updated with the
11515244eac9SBenno Rice  * first usable address after the mapped region.
11525244eac9SBenno Rice  */
11535244eac9SBenno Rice vm_offset_t
11545244eac9SBenno Rice pmap_map(vm_offset_t *virt, vm_offset_t pa_start, vm_offset_t pa_end, int prot)
11555244eac9SBenno Rice {
11565244eac9SBenno Rice 	vm_offset_t	sva, va;
11575244eac9SBenno Rice 
11585244eac9SBenno Rice 	sva = *virt;
11595244eac9SBenno Rice 	va = sva;
11605244eac9SBenno Rice 	for (; pa_start < pa_end; pa_start += PAGE_SIZE, va += PAGE_SIZE)
11615244eac9SBenno Rice 		pmap_kenter(va, pa_start);
11625244eac9SBenno Rice 	*virt = va;
11635244eac9SBenno Rice 	return (sva);
11645244eac9SBenno Rice }
11655244eac9SBenno Rice 
11665244eac9SBenno Rice int
11675244eac9SBenno Rice pmap_mincore(pmap_t pmap, vm_offset_t addr)
11685244eac9SBenno Rice {
11695244eac9SBenno Rice 	TODO;
11705244eac9SBenno Rice 	return (0);
11715244eac9SBenno Rice }
11725244eac9SBenno Rice 
11735244eac9SBenno Rice /*
11745244eac9SBenno Rice  * Create the uarea for a new process.
1175111c77dcSBenno Rice  * This routine directly affects the fork perf for a process.
1176111c77dcSBenno Rice  */
1177111c77dcSBenno Rice void
1178111c77dcSBenno Rice pmap_new_proc(struct proc *p)
1179111c77dcSBenno Rice {
1180111c77dcSBenno Rice 	vm_object_t	upobj;
1181b8603f0eSPeter Wemm 	vm_offset_t	up;
1182111c77dcSBenno Rice 	vm_page_t	m;
11835244eac9SBenno Rice 	u_int		i;
1184111c77dcSBenno Rice 
1185111c77dcSBenno Rice 	/*
11865244eac9SBenno Rice 	 * Allocate the object for the upages.
1187111c77dcSBenno Rice 	 */
1188b8603f0eSPeter Wemm 	upobj = p->p_upages_obj;
1189b8603f0eSPeter Wemm 	if (upobj == NULL) {
11905fd2c51eSMark Peek 		upobj = vm_object_allocate(OBJT_DEFAULT, UAREA_PAGES);
1191111c77dcSBenno Rice 		p->p_upages_obj = upobj;
1192111c77dcSBenno Rice 	}
1193111c77dcSBenno Rice 
11945244eac9SBenno Rice 	/*
11955244eac9SBenno Rice 	 * Get a kernel virtual address for the uarea for this process.
11965244eac9SBenno Rice 	 */
11975fd2c51eSMark Peek 	up = (vm_offset_t)p->p_uarea;
1198b8603f0eSPeter Wemm 	if (up == 0) {
11995fd2c51eSMark Peek 		up = kmem_alloc_nofault(kernel_map, UAREA_PAGES * PAGE_SIZE);
1200b8603f0eSPeter Wemm 		if (up == 0)
1201b8603f0eSPeter Wemm 			panic("pmap_new_proc: upage allocation failed");
12025fd2c51eSMark Peek 		p->p_uarea = (struct user *)up;
1203111c77dcSBenno Rice 	}
1204111c77dcSBenno Rice 
12055fd2c51eSMark Peek 	for (i = 0; i < UAREA_PAGES; i++) {
1206111c77dcSBenno Rice 		/*
12075244eac9SBenno Rice 		 * Get a uarea page.
1208111c77dcSBenno Rice 		 */
1209111c77dcSBenno Rice 		m = vm_page_grab(upobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
1210111c77dcSBenno Rice 
1211111c77dcSBenno Rice 		/*
12125244eac9SBenno Rice 		 * Wire the page.
1213111c77dcSBenno Rice 		 */
1214111c77dcSBenno Rice 		m->wire_count++;
1215111c77dcSBenno Rice 
1216111c77dcSBenno Rice 		/*
1217111c77dcSBenno Rice 		 * Enter the page into the kernel address space.
1218111c77dcSBenno Rice 		 */
12195244eac9SBenno Rice 		pmap_kenter(up + i * PAGE_SIZE, VM_PAGE_TO_PHYS(m));
1220111c77dcSBenno Rice 
1221111c77dcSBenno Rice 		vm_page_wakeup(m);
1222111c77dcSBenno Rice 		vm_page_flag_clear(m, PG_ZERO);
1223111c77dcSBenno Rice 		vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE);
1224111c77dcSBenno Rice 		m->valid = VM_PAGE_BITS_ALL;
1225111c77dcSBenno Rice 	}
1226111c77dcSBenno Rice }
1227bdf71f56SBenno Rice 
12285244eac9SBenno Rice void
1229e79f59e8SBenno Rice pmap_object_init_pt(pmap_t pm, vm_offset_t addr, vm_object_t object,
12305244eac9SBenno Rice 		    vm_pindex_t pindex, vm_size_t size, int limit)
1231bdf71f56SBenno Rice {
1232e79f59e8SBenno Rice 
1233e79f59e8SBenno Rice 	KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap,
1234e79f59e8SBenno Rice 	    ("pmap_remove_pages: non current pmap"));
1235e79f59e8SBenno Rice 	/* XXX */
1236bdf71f56SBenno Rice }
1237bdf71f56SBenno Rice 
12385244eac9SBenno Rice /*
12395244eac9SBenno Rice  * Lower the permission for all mappings to a given page.
12405244eac9SBenno Rice  */
12415244eac9SBenno Rice void
12425244eac9SBenno Rice pmap_page_protect(vm_page_t m, vm_prot_t prot)
12435244eac9SBenno Rice {
12445244eac9SBenno Rice 	struct	pvo_head *pvo_head;
12455244eac9SBenno Rice 	struct	pvo_entry *pvo, *next_pvo;
12465244eac9SBenno Rice 	struct	pte *pt;
12475244eac9SBenno Rice 
12485244eac9SBenno Rice 	/*
12495244eac9SBenno Rice 	 * Since the routine only downgrades protection, if the
12505244eac9SBenno Rice 	 * maximal protection is desired, there isn't any change
12515244eac9SBenno Rice 	 * to be made.
12525244eac9SBenno Rice 	 */
12535244eac9SBenno Rice 	if ((prot & (VM_PROT_READ|VM_PROT_WRITE)) ==
12545244eac9SBenno Rice 	    (VM_PROT_READ|VM_PROT_WRITE))
12555244eac9SBenno Rice 		return;
12565244eac9SBenno Rice 
12575244eac9SBenno Rice 	pvo_head = vm_page_to_pvoh(m);
12585244eac9SBenno Rice 	for (pvo = LIST_FIRST(pvo_head); pvo != NULL; pvo = next_pvo) {
12595244eac9SBenno Rice 		next_pvo = LIST_NEXT(pvo, pvo_vlink);
12605244eac9SBenno Rice 		PMAP_PVO_CHECK(pvo);	/* sanity check */
12615244eac9SBenno Rice 
12625244eac9SBenno Rice 		/*
12635244eac9SBenno Rice 		 * Downgrading to no mapping at all, we just remove the entry.
12645244eac9SBenno Rice 		 */
12655244eac9SBenno Rice 		if ((prot & VM_PROT_READ) == 0) {
12665244eac9SBenno Rice 			pmap_pvo_remove(pvo, -1);
12675244eac9SBenno Rice 			continue;
12685244eac9SBenno Rice 		}
12695244eac9SBenno Rice 
12705244eac9SBenno Rice 		/*
12715244eac9SBenno Rice 		 * If EXEC permission is being revoked, just clear the flag
12725244eac9SBenno Rice 		 * in the PVO.
12735244eac9SBenno Rice 		 */
12745244eac9SBenno Rice 		if ((prot & VM_PROT_EXECUTE) == 0)
12755244eac9SBenno Rice 			pvo->pvo_vaddr &= ~PVO_EXECUTABLE;
12765244eac9SBenno Rice 
12775244eac9SBenno Rice 		/*
12785244eac9SBenno Rice 		 * If this entry is already RO, don't diddle with the page
12795244eac9SBenno Rice 		 * table.
12805244eac9SBenno Rice 		 */
12815244eac9SBenno Rice 		if ((pvo->pvo_pte.pte_lo & PTE_PP) == PTE_BR) {
12825244eac9SBenno Rice 			PMAP_PVO_CHECK(pvo);
12835244eac9SBenno Rice 			continue;
12845244eac9SBenno Rice 		}
12855244eac9SBenno Rice 
12865244eac9SBenno Rice 		/*
12875244eac9SBenno Rice 		 * Grab the PTE before we diddle the bits so pvo_to_pte can
12885244eac9SBenno Rice 		 * verify the pte contents are as expected.
12895244eac9SBenno Rice 		 */
12905244eac9SBenno Rice 		pt = pmap_pvo_to_pte(pvo, -1);
12915244eac9SBenno Rice 		pvo->pvo_pte.pte_lo &= ~PTE_PP;
12925244eac9SBenno Rice 		pvo->pvo_pte.pte_lo |= PTE_BR;
12935244eac9SBenno Rice 		if (pt != NULL)
12945244eac9SBenno Rice 			pmap_pte_change(pt, &pvo->pvo_pte, pvo->pvo_vaddr);
12955244eac9SBenno Rice 		PMAP_PVO_CHECK(pvo);	/* sanity check */
12965244eac9SBenno Rice 	}
12975244eac9SBenno Rice }
12985244eac9SBenno Rice 
12995244eac9SBenno Rice /*
13005244eac9SBenno Rice  * Make the specified page pageable (or not).  Unneeded.
13015244eac9SBenno Rice  */
13025244eac9SBenno Rice void
13035244eac9SBenno Rice pmap_pageable(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
13045244eac9SBenno Rice 	      boolean_t pageable)
13055244eac9SBenno Rice {
13065244eac9SBenno Rice }
13075244eac9SBenno Rice 
13087f3a4093SMike Silbersack /*
13097f3a4093SMike Silbersack  * Returns true if the pmap's pv is one of the first
13107f3a4093SMike Silbersack  * 16 pvs linked to from this page.  This count may
13117f3a4093SMike Silbersack  * be changed upwards or downwards in the future; it
13127f3a4093SMike Silbersack  * is only necessary that true be returned for a small
13137f3a4093SMike Silbersack  * subset of pmaps for proper page aging.
13147f3a4093SMike Silbersack  */
13155244eac9SBenno Rice boolean_t
13167f3a4093SMike Silbersack pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
13175244eac9SBenno Rice {
13185244eac9SBenno Rice 	TODO;
13195244eac9SBenno Rice 	return (0);
13205244eac9SBenno Rice }
13215244eac9SBenno Rice 
13225244eac9SBenno Rice static u_int	pmap_vsidcontext;
13235244eac9SBenno Rice 
13245244eac9SBenno Rice void
13255244eac9SBenno Rice pmap_pinit(pmap_t pmap)
13265244eac9SBenno Rice {
13275244eac9SBenno Rice 	int	i, mask;
13285244eac9SBenno Rice 	u_int	entropy;
13295244eac9SBenno Rice 
13305244eac9SBenno Rice 	entropy = 0;
13315244eac9SBenno Rice 	__asm __volatile("mftb %0" : "=r"(entropy));
13325244eac9SBenno Rice 
13335244eac9SBenno Rice 	/*
13345244eac9SBenno Rice 	 * Allocate some segment registers for this pmap.
13355244eac9SBenno Rice 	 */
13365244eac9SBenno Rice 	for (i = 0; i < NPMAPS; i += VSID_NBPW) {
13375244eac9SBenno Rice 		u_int	hash, n;
13385244eac9SBenno Rice 
13395244eac9SBenno Rice 		/*
13405244eac9SBenno Rice 		 * Create a new value by mutiplying by a prime and adding in
13415244eac9SBenno Rice 		 * entropy from the timebase register.  This is to make the
13425244eac9SBenno Rice 		 * VSID more random so that the PT hash function collides
13435244eac9SBenno Rice 		 * less often.  (Note that the prime casues gcc to do shifts
13445244eac9SBenno Rice 		 * instead of a multiply.)
13455244eac9SBenno Rice 		 */
13465244eac9SBenno Rice 		pmap_vsidcontext = (pmap_vsidcontext * 0x1105) + entropy;
13475244eac9SBenno Rice 		hash = pmap_vsidcontext & (NPMAPS - 1);
13485244eac9SBenno Rice 		if (hash == 0)		/* 0 is special, avoid it */
13495244eac9SBenno Rice 			continue;
13505244eac9SBenno Rice 		n = hash >> 5;
13515244eac9SBenno Rice 		mask = 1 << (hash & (VSID_NBPW - 1));
13525244eac9SBenno Rice 		hash = (pmap_vsidcontext & 0xfffff);
13535244eac9SBenno Rice 		if (pmap_vsid_bitmap[n] & mask) {	/* collision? */
13545244eac9SBenno Rice 			/* anything free in this bucket? */
13555244eac9SBenno Rice 			if (pmap_vsid_bitmap[n] == 0xffffffff) {
13565244eac9SBenno Rice 				entropy = (pmap_vsidcontext >> 20);
13575244eac9SBenno Rice 				continue;
13585244eac9SBenno Rice 			}
13595244eac9SBenno Rice 			i = ffs(~pmap_vsid_bitmap[i]) - 1;
13605244eac9SBenno Rice 			mask = 1 << i;
13615244eac9SBenno Rice 			hash &= 0xfffff & ~(VSID_NBPW - 1);
13625244eac9SBenno Rice 			hash |= i;
13635244eac9SBenno Rice 		}
13645244eac9SBenno Rice 		pmap_vsid_bitmap[n] |= mask;
13655244eac9SBenno Rice 		for (i = 0; i < 16; i++)
13665244eac9SBenno Rice 			pmap->pm_sr[i] = VSID_MAKE(i, hash);
13675244eac9SBenno Rice 		return;
13685244eac9SBenno Rice 	}
13695244eac9SBenno Rice 
13705244eac9SBenno Rice 	panic("pmap_pinit: out of segments");
13715244eac9SBenno Rice }
13725244eac9SBenno Rice 
13735244eac9SBenno Rice /*
13745244eac9SBenno Rice  * Initialize the pmap associated with process 0.
13755244eac9SBenno Rice  */
13765244eac9SBenno Rice void
13775244eac9SBenno Rice pmap_pinit0(pmap_t pm)
13785244eac9SBenno Rice {
13795244eac9SBenno Rice 
13805244eac9SBenno Rice 	pmap_pinit(pm);
13815244eac9SBenno Rice 	bzero(&pm->pm_stats, sizeof(pm->pm_stats));
13825244eac9SBenno Rice }
13835244eac9SBenno Rice 
13845244eac9SBenno Rice void
13855244eac9SBenno Rice pmap_pinit2(pmap_t pmap)
13865244eac9SBenno Rice {
13875244eac9SBenno Rice 	/* XXX: Remove this stub when no longer called */
13885244eac9SBenno Rice }
13895244eac9SBenno Rice 
13905244eac9SBenno Rice void
13913e440943SBenno Rice pmap_prefault(pmap_t pm, vm_offset_t va, vm_map_entry_t entry)
13925244eac9SBenno Rice {
13933e440943SBenno Rice 	KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap,
13943e440943SBenno Rice 	    ("pmap_prefault: non current pmap"));
13953e440943SBenno Rice 	/* XXX */
13965244eac9SBenno Rice }
13975244eac9SBenno Rice 
1398e79f59e8SBenno Rice /*
1399e79f59e8SBenno Rice  * Set the physical protection on the specified range of this map as requested.
1400e79f59e8SBenno Rice  */
14015244eac9SBenno Rice void
1402e79f59e8SBenno Rice pmap_protect(pmap_t pm, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
14035244eac9SBenno Rice {
1404e79f59e8SBenno Rice 	struct	pvo_entry *pvo;
1405e79f59e8SBenno Rice 	struct	pte *pt;
1406e79f59e8SBenno Rice 	int	pteidx;
1407e79f59e8SBenno Rice 
1408e79f59e8SBenno Rice 	CTR4(KTR_PMAP, "pmap_protect: pm=%p sva=%#x eva=%#x prot=%#x", pm, sva,
1409e79f59e8SBenno Rice 	    eva, prot);
1410e79f59e8SBenno Rice 
1411e79f59e8SBenno Rice 
1412e79f59e8SBenno Rice 	KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap,
1413e79f59e8SBenno Rice 	    ("pmap_protect: non current pmap"));
1414e79f59e8SBenno Rice 
1415e79f59e8SBenno Rice 	if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
1416e79f59e8SBenno Rice 		pmap_remove(pm, sva, eva);
1417e79f59e8SBenno Rice 		return;
1418e79f59e8SBenno Rice 	}
1419e79f59e8SBenno Rice 
1420e79f59e8SBenno Rice 	for (; sva < eva; sva += PAGE_SIZE) {
1421e79f59e8SBenno Rice 		pvo = pmap_pvo_find_va(pm, sva, &pteidx);
1422e79f59e8SBenno Rice 		if (pvo == NULL)
1423e79f59e8SBenno Rice 			continue;
1424e79f59e8SBenno Rice 
1425e79f59e8SBenno Rice 		if ((prot & VM_PROT_EXECUTE) == 0)
1426e79f59e8SBenno Rice 			pvo->pvo_vaddr &= ~PVO_EXECUTABLE;
1427e79f59e8SBenno Rice 
1428e79f59e8SBenno Rice 		/*
1429e79f59e8SBenno Rice 		 * Grab the PTE pointer before we diddle with the cached PTE
1430e79f59e8SBenno Rice 		 * copy.
1431e79f59e8SBenno Rice 		 */
1432e79f59e8SBenno Rice 		pt = pmap_pvo_to_pte(pvo, pteidx);
1433e79f59e8SBenno Rice 		/*
1434e79f59e8SBenno Rice 		 * Change the protection of the page.
1435e79f59e8SBenno Rice 		 */
1436e79f59e8SBenno Rice 		pvo->pvo_pte.pte_lo &= ~PTE_PP;
1437e79f59e8SBenno Rice 		pvo->pvo_pte.pte_lo |= PTE_BR;
1438e79f59e8SBenno Rice 
1439e79f59e8SBenno Rice 		/*
1440e79f59e8SBenno Rice 		 * If the PVO is in the page table, update that pte as well.
1441e79f59e8SBenno Rice 		 */
1442e79f59e8SBenno Rice 		if (pt != NULL)
1443e79f59e8SBenno Rice 			pmap_pte_change(pt, &pvo->pvo_pte, pvo->pvo_vaddr);
1444e79f59e8SBenno Rice 	}
14455244eac9SBenno Rice }
14465244eac9SBenno Rice 
14475244eac9SBenno Rice vm_offset_t
14485244eac9SBenno Rice pmap_phys_address(int ppn)
14495244eac9SBenno Rice {
14505244eac9SBenno Rice 	TODO;
14515244eac9SBenno Rice 	return (0);
14525244eac9SBenno Rice }
14535244eac9SBenno Rice 
145488afb2a3SBenno Rice /*
145588afb2a3SBenno Rice  * Map a list of wired pages into kernel virtual address space.  This is
145688afb2a3SBenno Rice  * intended for temporary mappings which do not need page modification or
145788afb2a3SBenno Rice  * references recorded.  Existing mappings in the region are overwritten.
145888afb2a3SBenno Rice  */
14595244eac9SBenno Rice void
14605244eac9SBenno Rice pmap_qenter(vm_offset_t va, vm_page_t *m, int count)
14615244eac9SBenno Rice {
14625244eac9SBenno Rice 	int	i;
14635244eac9SBenno Rice 
14645244eac9SBenno Rice 	for (i = 0; i < count; i++, va += PAGE_SIZE)
14655244eac9SBenno Rice 		pmap_kenter(va, VM_PAGE_TO_PHYS(m[i]));
14665244eac9SBenno Rice }
14675244eac9SBenno Rice 
146888afb2a3SBenno Rice /*
146988afb2a3SBenno Rice  * Remove page mappings from kernel virtual address space.  Intended for
147088afb2a3SBenno Rice  * temporary mappings entered by pmap_qenter.
147188afb2a3SBenno Rice  */
14725244eac9SBenno Rice void
14735244eac9SBenno Rice pmap_qremove(vm_offset_t va, int count)
14745244eac9SBenno Rice {
147588afb2a3SBenno Rice 	int	i;
147688afb2a3SBenno Rice 
147788afb2a3SBenno Rice 	for (i = 0; i < count; i++, va += PAGE_SIZE)
147888afb2a3SBenno Rice 		pmap_kremove(va);
14795244eac9SBenno Rice }
14805244eac9SBenno Rice 
14815244eac9SBenno Rice void
14825244eac9SBenno Rice pmap_release(pmap_t pmap)
14835244eac9SBenno Rice {
14845244eac9SBenno Rice 	TODO;
14855244eac9SBenno Rice }
14865244eac9SBenno Rice 
148788afb2a3SBenno Rice /*
148888afb2a3SBenno Rice  * Remove the given range of addresses from the specified map.
148988afb2a3SBenno Rice  */
14905244eac9SBenno Rice void
149188afb2a3SBenno Rice pmap_remove(pmap_t pm, vm_offset_t sva, vm_offset_t eva)
14925244eac9SBenno Rice {
149388afb2a3SBenno Rice 	struct	pvo_entry *pvo;
149488afb2a3SBenno Rice 	int	pteidx;
149588afb2a3SBenno Rice 
149688afb2a3SBenno Rice 	for (; sva < eva; sva += PAGE_SIZE) {
149788afb2a3SBenno Rice 		pvo = pmap_pvo_find_va(pm, sva, &pteidx);
149888afb2a3SBenno Rice 		if (pvo != NULL) {
149988afb2a3SBenno Rice 			pmap_pvo_remove(pvo, pteidx);
150088afb2a3SBenno Rice 		}
150188afb2a3SBenno Rice 	}
15025244eac9SBenno Rice }
15035244eac9SBenno Rice 
1504e79f59e8SBenno Rice /*
1505e79f59e8SBenno Rice  * Remove all pages from specified address space, this aids process exit
1506e79f59e8SBenno Rice  * speeds.  This is much faster than pmap_remove in the case of running down
1507e79f59e8SBenno Rice  * an entire address space.  Only works for the current pmap.
1508e79f59e8SBenno Rice  */
15095244eac9SBenno Rice void
1510e79f59e8SBenno Rice pmap_remove_pages(pmap_t pm, vm_offset_t sva, vm_offset_t eva)
15115244eac9SBenno Rice {
1512e79f59e8SBenno Rice 
1513e79f59e8SBenno Rice 	KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap,
1514e79f59e8SBenno Rice 	    ("pmap_remove_pages: non current pmap"));
1515e79f59e8SBenno Rice 	pmap_remove(pm, sva, eva);
15165244eac9SBenno Rice }
15175244eac9SBenno Rice 
15185244eac9SBenno Rice void
15195244eac9SBenno Rice pmap_swapin_proc(struct proc *p)
15205244eac9SBenno Rice {
15215244eac9SBenno Rice 	TODO;
15225244eac9SBenno Rice }
15235244eac9SBenno Rice 
15245244eac9SBenno Rice void
15255244eac9SBenno Rice pmap_swapout_proc(struct proc *p)
15265244eac9SBenno Rice {
15275244eac9SBenno Rice 	TODO;
15285244eac9SBenno Rice }
15295244eac9SBenno Rice 
15305244eac9SBenno Rice /*
15315244eac9SBenno Rice  * Create the kernel stack and pcb for a new thread.
15325244eac9SBenno Rice  * This routine directly affects the fork perf for a process and
15335244eac9SBenno Rice  * create performance for a thread.
15345244eac9SBenno Rice  */
15355244eac9SBenno Rice void
15365244eac9SBenno Rice pmap_new_thread(struct thread *td)
15375244eac9SBenno Rice {
15385244eac9SBenno Rice 	vm_object_t	ksobj;
15395244eac9SBenno Rice 	vm_offset_t	ks;
15405244eac9SBenno Rice 	vm_page_t	m;
15415244eac9SBenno Rice 	u_int		i;
15425244eac9SBenno Rice 
15435244eac9SBenno Rice 	/*
15445244eac9SBenno Rice 	 * Allocate object for the kstack.
15455244eac9SBenno Rice 	 */
15465244eac9SBenno Rice 	ksobj = td->td_kstack_obj;
15475244eac9SBenno Rice 	if (ksobj == NULL) {
15485244eac9SBenno Rice 		ksobj = vm_object_allocate(OBJT_DEFAULT, KSTACK_PAGES);
15495244eac9SBenno Rice 		td->td_kstack_obj = ksobj;
15505244eac9SBenno Rice 	}
15515244eac9SBenno Rice 
15525244eac9SBenno Rice 	/*
15535244eac9SBenno Rice 	 * Get a kernel virtual address for the kstack for this thread.
15545244eac9SBenno Rice 	 */
15555244eac9SBenno Rice 	ks = td->td_kstack;
15565244eac9SBenno Rice 	if (ks == 0) {
15575244eac9SBenno Rice 		ks = kmem_alloc_nofault(kernel_map,
15585244eac9SBenno Rice 		    (KSTACK_PAGES + KSTACK_GUARD_PAGES) * PAGE_SIZE);
15595244eac9SBenno Rice 		if (ks == 0)
15605244eac9SBenno Rice 			panic("pmap_new_thread: kstack allocation failed");
15615244eac9SBenno Rice 		TLBIE(ks);
15625244eac9SBenno Rice 		ks += KSTACK_GUARD_PAGES * PAGE_SIZE;
15635244eac9SBenno Rice 		td->td_kstack = ks;
15645244eac9SBenno Rice 	}
15655244eac9SBenno Rice 
15665244eac9SBenno Rice 	for (i = 0; i < KSTACK_PAGES; i++) {
15675244eac9SBenno Rice 		/*
15685244eac9SBenno Rice 		 * Get a kernel stack page.
15695244eac9SBenno Rice 		 */
15705244eac9SBenno Rice 		m = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
15715244eac9SBenno Rice 
15725244eac9SBenno Rice 		/*
15735244eac9SBenno Rice 		 * Wire the page.
15745244eac9SBenno Rice 		 */
15755244eac9SBenno Rice 		m->wire_count++;
15765244eac9SBenno Rice 
15775244eac9SBenno Rice 		/*
15785244eac9SBenno Rice 		 * Enter the page into the kernel address space.
15795244eac9SBenno Rice 		 */
15805244eac9SBenno Rice 		pmap_kenter(ks + i * PAGE_SIZE, VM_PAGE_TO_PHYS(m));
15815244eac9SBenno Rice 
15825244eac9SBenno Rice 		vm_page_wakeup(m);
15835244eac9SBenno Rice 		vm_page_flag_clear(m, PG_ZERO);
15845244eac9SBenno Rice 		vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE);
15855244eac9SBenno Rice 		m->valid = VM_PAGE_BITS_ALL;
15865244eac9SBenno Rice 	}
15875244eac9SBenno Rice }
15885244eac9SBenno Rice 
15895244eac9SBenno Rice void
15905244eac9SBenno Rice pmap_dispose_proc(struct proc *p)
15915244eac9SBenno Rice {
15925244eac9SBenno Rice 	TODO;
15935244eac9SBenno Rice }
15945244eac9SBenno Rice 
15955244eac9SBenno Rice void
15965244eac9SBenno Rice pmap_dispose_thread(struct thread *td)
15975244eac9SBenno Rice {
15985244eac9SBenno Rice 	TODO;
15995244eac9SBenno Rice }
16005244eac9SBenno Rice 
16015244eac9SBenno Rice void
16025244eac9SBenno Rice pmap_swapin_thread(struct thread *td)
16035244eac9SBenno Rice {
16045244eac9SBenno Rice 	TODO;
16055244eac9SBenno Rice }
16065244eac9SBenno Rice 
16075244eac9SBenno Rice void
16085244eac9SBenno Rice pmap_swapout_thread(struct thread *td)
16095244eac9SBenno Rice {
16105244eac9SBenno Rice 	TODO;
16115244eac9SBenno Rice }
16125244eac9SBenno Rice 
16135244eac9SBenno Rice /*
16145244eac9SBenno Rice  * Allocate a physical page of memory directly from the phys_avail map.
16155244eac9SBenno Rice  * Can only be called from pmap_bootstrap before avail start and end are
16165244eac9SBenno Rice  * calculated.
16175244eac9SBenno Rice  */
16185244eac9SBenno Rice static vm_offset_t
16195244eac9SBenno Rice pmap_bootstrap_alloc(vm_size_t size, u_int align)
16205244eac9SBenno Rice {
16215244eac9SBenno Rice 	vm_offset_t	s, e;
16225244eac9SBenno Rice 	int		i, j;
16235244eac9SBenno Rice 
16245244eac9SBenno Rice 	size = round_page(size);
16255244eac9SBenno Rice 	for (i = 0; phys_avail[i + 1] != 0; i += 2) {
16265244eac9SBenno Rice 		if (align != 0)
16275244eac9SBenno Rice 			s = (phys_avail[i] + align - 1) & ~(align - 1);
16285244eac9SBenno Rice 		else
16295244eac9SBenno Rice 			s = phys_avail[i];
16305244eac9SBenno Rice 		e = s + size;
16315244eac9SBenno Rice 
16325244eac9SBenno Rice 		if (s < phys_avail[i] || e > phys_avail[i + 1])
16335244eac9SBenno Rice 			continue;
16345244eac9SBenno Rice 
16355244eac9SBenno Rice 		if (s == phys_avail[i]) {
16365244eac9SBenno Rice 			phys_avail[i] += size;
16375244eac9SBenno Rice 		} else if (e == phys_avail[i + 1]) {
16385244eac9SBenno Rice 			phys_avail[i + 1] -= size;
16395244eac9SBenno Rice 		} else {
16405244eac9SBenno Rice 			for (j = phys_avail_count * 2; j > i; j -= 2) {
16415244eac9SBenno Rice 				phys_avail[j] = phys_avail[j - 2];
16425244eac9SBenno Rice 				phys_avail[j + 1] = phys_avail[j - 1];
16435244eac9SBenno Rice 			}
16445244eac9SBenno Rice 
16455244eac9SBenno Rice 			phys_avail[i + 3] = phys_avail[i + 1];
16465244eac9SBenno Rice 			phys_avail[i + 1] = s;
16475244eac9SBenno Rice 			phys_avail[i + 2] = e;
16485244eac9SBenno Rice 			phys_avail_count++;
16495244eac9SBenno Rice 		}
16505244eac9SBenno Rice 
16515244eac9SBenno Rice 		return (s);
16525244eac9SBenno Rice 	}
16535244eac9SBenno Rice 	panic("pmap_bootstrap_alloc: could not allocate memory");
16545244eac9SBenno Rice }
16555244eac9SBenno Rice 
16565244eac9SBenno Rice /*
16575244eac9SBenno Rice  * Return an unmapped pvo for a kernel virtual address.
16585244eac9SBenno Rice  * Used by pmap functions that operate on physical pages.
16595244eac9SBenno Rice  */
16605244eac9SBenno Rice static struct pvo_entry *
16615244eac9SBenno Rice pmap_rkva_alloc(void)
16625244eac9SBenno Rice {
16635244eac9SBenno Rice 	struct		pvo_entry *pvo;
16645244eac9SBenno Rice 	struct		pte *pt;
16655244eac9SBenno Rice 	vm_offset_t	kva;
16665244eac9SBenno Rice 	int		pteidx;
16675244eac9SBenno Rice 
16685244eac9SBenno Rice 	if (pmap_rkva_count == 0)
16695244eac9SBenno Rice 		panic("pmap_rkva_alloc: no more reserved KVAs");
16705244eac9SBenno Rice 
16715244eac9SBenno Rice 	kva = pmap_rkva_start + (PAGE_SIZE * --pmap_rkva_count);
16725244eac9SBenno Rice 	pmap_kenter(kva, 0);
16735244eac9SBenno Rice 
16745244eac9SBenno Rice 	pvo = pmap_pvo_find_va(kernel_pmap, kva, &pteidx);
16755244eac9SBenno Rice 
16765244eac9SBenno Rice 	if (pvo == NULL)
16775244eac9SBenno Rice 		panic("pmap_kva_alloc: pmap_pvo_find_va failed");
16785244eac9SBenno Rice 
16795244eac9SBenno Rice 	pt = pmap_pvo_to_pte(pvo, pteidx);
16805244eac9SBenno Rice 
16815244eac9SBenno Rice 	if (pt == NULL)
16825244eac9SBenno Rice 		panic("pmap_kva_alloc: pmap_pvo_to_pte failed");
16835244eac9SBenno Rice 
16845244eac9SBenno Rice 	pmap_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr);
16855244eac9SBenno Rice 	PVO_PTEGIDX_CLR(pvo);
16865244eac9SBenno Rice 
16875244eac9SBenno Rice 	pmap_pte_overflow++;
16885244eac9SBenno Rice 
16895244eac9SBenno Rice 	return (pvo);
16905244eac9SBenno Rice }
16915244eac9SBenno Rice 
16925244eac9SBenno Rice static void
16935244eac9SBenno Rice pmap_pa_map(struct pvo_entry *pvo, vm_offset_t pa, struct pte *saved_pt,
16945244eac9SBenno Rice     int *depth_p)
16955244eac9SBenno Rice {
16965244eac9SBenno Rice 	struct	pte *pt;
16975244eac9SBenno Rice 
16985244eac9SBenno Rice 	/*
16995244eac9SBenno Rice 	 * If this pvo already has a valid pte, we need to save it so it can
17005244eac9SBenno Rice 	 * be restored later.  We then just reload the new PTE over the old
17015244eac9SBenno Rice 	 * slot.
17025244eac9SBenno Rice 	 */
17035244eac9SBenno Rice 	if (saved_pt != NULL) {
17045244eac9SBenno Rice 		pt = pmap_pvo_to_pte(pvo, -1);
17055244eac9SBenno Rice 
17065244eac9SBenno Rice 		if (pt != NULL) {
17075244eac9SBenno Rice 			pmap_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr);
17085244eac9SBenno Rice 			PVO_PTEGIDX_CLR(pvo);
17095244eac9SBenno Rice 			pmap_pte_overflow++;
17105244eac9SBenno Rice 		}
17115244eac9SBenno Rice 
17125244eac9SBenno Rice 		*saved_pt = pvo->pvo_pte;
17135244eac9SBenno Rice 
17145244eac9SBenno Rice 		pvo->pvo_pte.pte_lo &= ~PTE_RPGN;
17155244eac9SBenno Rice 	}
17165244eac9SBenno Rice 
17175244eac9SBenno Rice 	pvo->pvo_pte.pte_lo |= pa;
17185244eac9SBenno Rice 
17195244eac9SBenno Rice 	if (!pmap_pte_spill(pvo->pvo_vaddr))
17205244eac9SBenno Rice 		panic("pmap_pa_map: could not spill pvo %p", pvo);
17215244eac9SBenno Rice 
17225244eac9SBenno Rice 	if (depth_p != NULL)
17235244eac9SBenno Rice 		(*depth_p)++;
17245244eac9SBenno Rice }
17255244eac9SBenno Rice 
17265244eac9SBenno Rice static void
17275244eac9SBenno Rice pmap_pa_unmap(struct pvo_entry *pvo, struct pte *saved_pt, int *depth_p)
17285244eac9SBenno Rice {
17295244eac9SBenno Rice 	struct	pte *pt;
17305244eac9SBenno Rice 
17315244eac9SBenno Rice 	pt = pmap_pvo_to_pte(pvo, -1);
17325244eac9SBenno Rice 
17335244eac9SBenno Rice 	if (pt != NULL) {
17345244eac9SBenno Rice 		pmap_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr);
17355244eac9SBenno Rice 		PVO_PTEGIDX_CLR(pvo);
17365244eac9SBenno Rice 		pmap_pte_overflow++;
17375244eac9SBenno Rice 	}
17385244eac9SBenno Rice 
17395244eac9SBenno Rice 	pvo->pvo_pte.pte_lo &= ~PTE_RPGN;
17405244eac9SBenno Rice 
17415244eac9SBenno Rice 	/*
17425244eac9SBenno Rice 	 * If there is a saved PTE and it's valid, restore it and return.
17435244eac9SBenno Rice 	 */
17445244eac9SBenno Rice 	if (saved_pt != NULL && (saved_pt->pte_lo & PTE_RPGN) != 0) {
17455244eac9SBenno Rice 		if (depth_p != NULL && --(*depth_p) == 0)
17465244eac9SBenno Rice 			panic("pmap_pa_unmap: restoring but depth == 0");
17475244eac9SBenno Rice 
17485244eac9SBenno Rice 		pvo->pvo_pte = *saved_pt;
17495244eac9SBenno Rice 
17505244eac9SBenno Rice 		if (!pmap_pte_spill(pvo->pvo_vaddr))
17515244eac9SBenno Rice 			panic("pmap_pa_unmap: could not spill pvo %p", pvo);
17525244eac9SBenno Rice 	}
17535244eac9SBenno Rice }
17545244eac9SBenno Rice 
17555244eac9SBenno Rice static void
17565244eac9SBenno Rice pmap_syncicache(vm_offset_t pa, vm_size_t len)
17575244eac9SBenno Rice {
17585244eac9SBenno Rice 	__syncicache((void *)pa, len);
17595244eac9SBenno Rice }
17605244eac9SBenno Rice 
17615244eac9SBenno Rice static void
17625244eac9SBenno Rice tlbia(void)
17635244eac9SBenno Rice {
17645244eac9SBenno Rice 	caddr_t	i;
17655244eac9SBenno Rice 
17665244eac9SBenno Rice 	SYNC();
17675244eac9SBenno Rice 	for (i = 0; i < (caddr_t)0x00040000; i += 0x00001000) {
17685244eac9SBenno Rice 		TLBIE(i);
17695244eac9SBenno Rice 		EIEIO();
17705244eac9SBenno Rice 	}
17715244eac9SBenno Rice 	TLBSYNC();
17725244eac9SBenno Rice 	SYNC();
17735244eac9SBenno Rice }
17745244eac9SBenno Rice 
17755244eac9SBenno Rice static int
1776378862a7SJeff Roberson pmap_pvo_enter(pmap_t pm, uma_zone_t zone, struct pvo_head *pvo_head,
17775244eac9SBenno Rice     vm_offset_t va, vm_offset_t pa, u_int pte_lo, int flags)
17785244eac9SBenno Rice {
17795244eac9SBenno Rice 	struct	pvo_entry *pvo;
17805244eac9SBenno Rice 	u_int	sr;
17815244eac9SBenno Rice 	int	first;
17825244eac9SBenno Rice 	u_int	ptegidx;
17835244eac9SBenno Rice 	int	i;
17845244eac9SBenno Rice 
17855244eac9SBenno Rice 	pmap_pvo_enter_calls++;
17868207b362SBenno Rice 	first = 0;
17875244eac9SBenno Rice 
17885244eac9SBenno Rice 	/*
17895244eac9SBenno Rice 	 * Compute the PTE Group index.
17905244eac9SBenno Rice 	 */
17915244eac9SBenno Rice 	va &= ~ADDR_POFF;
17925244eac9SBenno Rice 	sr = va_to_sr(pm->pm_sr, va);
17935244eac9SBenno Rice 	ptegidx = va_to_pteg(sr, va);
17945244eac9SBenno Rice 
17955244eac9SBenno Rice 	/*
17965244eac9SBenno Rice 	 * Remove any existing mapping for this page.  Reuse the pvo entry if
17975244eac9SBenno Rice 	 * there is a mapping.
17985244eac9SBenno Rice 	 */
17995244eac9SBenno Rice 	LIST_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) {
18005244eac9SBenno Rice 		if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) {
1801fafc7362SBenno Rice 			if ((pvo->pvo_pte.pte_lo & PTE_RPGN) == pa &&
1802fafc7362SBenno Rice 			    (pvo->pvo_pte.pte_lo & PTE_PP) ==
1803fafc7362SBenno Rice 			    (pte_lo & PTE_PP)) {
180449f8f727SBenno Rice 				return (0);
1805fafc7362SBenno Rice 			}
18065244eac9SBenno Rice 			pmap_pvo_remove(pvo, -1);
18075244eac9SBenno Rice 			break;
18085244eac9SBenno Rice 		}
18095244eac9SBenno Rice 	}
18105244eac9SBenno Rice 
18115244eac9SBenno Rice 	/*
18125244eac9SBenno Rice 	 * If we aren't overwriting a mapping, try to allocate.
18135244eac9SBenno Rice 	 */
181449f8f727SBenno Rice 	if (pmap_initialized) {
1815378862a7SJeff Roberson 		pvo = uma_zalloc(zone, M_NOWAIT);
181649f8f727SBenno Rice 	} else {
18170d290675SBenno Rice 		if (pmap_bpvo_pool_index >= BPVO_POOL_SIZE) {
18180d290675SBenno Rice 			panic("pmap_enter: bpvo pool exhausted, %d, %d, %d",
18190d290675SBenno Rice 			      pmap_bpvo_pool_index, BPVO_POOL_SIZE,
18200d290675SBenno Rice 			      BPVO_POOL_SIZE * sizeof(struct pvo_entry));
182149f8f727SBenno Rice 		}
182249f8f727SBenno Rice 		pvo = &pmap_bpvo_pool[pmap_bpvo_pool_index];
182349f8f727SBenno Rice 		pmap_bpvo_pool_index++;
182449f8f727SBenno Rice 		pvo->pvo_vaddr |= PVO_BOOTSTRAP;
182549f8f727SBenno Rice 	}
18265244eac9SBenno Rice 
18275244eac9SBenno Rice 	if (pvo == NULL) {
18285244eac9SBenno Rice 		return (ENOMEM);
18295244eac9SBenno Rice 	}
18305244eac9SBenno Rice 
18315244eac9SBenno Rice 	pmap_pvo_entries++;
18325244eac9SBenno Rice 	pvo->pvo_vaddr = va;
18335244eac9SBenno Rice 	pvo->pvo_pmap = pm;
18345244eac9SBenno Rice 	LIST_INSERT_HEAD(&pmap_pvo_table[ptegidx], pvo, pvo_olink);
18355244eac9SBenno Rice 	pvo->pvo_vaddr &= ~ADDR_POFF;
18365244eac9SBenno Rice 	if (flags & VM_PROT_EXECUTE)
18375244eac9SBenno Rice 		pvo->pvo_vaddr |= PVO_EXECUTABLE;
18385244eac9SBenno Rice 	if (flags & PVO_WIRED)
18395244eac9SBenno Rice 		pvo->pvo_vaddr |= PVO_WIRED;
18405244eac9SBenno Rice 	if (pvo_head != &pmap_pvo_kunmanaged)
18415244eac9SBenno Rice 		pvo->pvo_vaddr |= PVO_MANAGED;
18425244eac9SBenno Rice 	pmap_pte_create(&pvo->pvo_pte, sr, va, pa | pte_lo);
18435244eac9SBenno Rice 
18445244eac9SBenno Rice 	/*
18455244eac9SBenno Rice 	 * Remember if the list was empty and therefore will be the first
18465244eac9SBenno Rice 	 * item.
18475244eac9SBenno Rice 	 */
18488207b362SBenno Rice 	if (LIST_FIRST(pvo_head) == NULL)
18498207b362SBenno Rice 		first = 1;
18505244eac9SBenno Rice 
18515244eac9SBenno Rice 	LIST_INSERT_HEAD(pvo_head, pvo, pvo_vlink);
18525244eac9SBenno Rice 	if (pvo->pvo_pte.pte_lo & PVO_WIRED)
18535244eac9SBenno Rice 		pvo->pvo_pmap->pm_stats.wired_count++;
18545244eac9SBenno Rice 	pvo->pvo_pmap->pm_stats.resident_count++;
18555244eac9SBenno Rice 
18565244eac9SBenno Rice 	/*
18575244eac9SBenno Rice 	 * We hope this succeeds but it isn't required.
18585244eac9SBenno Rice 	 */
18595244eac9SBenno Rice 	i = pmap_pte_insert(ptegidx, &pvo->pvo_pte);
18605244eac9SBenno Rice 	if (i >= 0) {
18615244eac9SBenno Rice 		PVO_PTEGIDX_SET(pvo, i);
18625244eac9SBenno Rice 	} else {
18635244eac9SBenno Rice 		panic("pmap_pvo_enter: overflow");
18645244eac9SBenno Rice 		pmap_pte_overflow++;
18655244eac9SBenno Rice 	}
18665244eac9SBenno Rice 
18675244eac9SBenno Rice 	return (first ? ENOENT : 0);
18685244eac9SBenno Rice }
18695244eac9SBenno Rice 
18705244eac9SBenno Rice static void
18715244eac9SBenno Rice pmap_pvo_remove(struct pvo_entry *pvo, int pteidx)
18725244eac9SBenno Rice {
18735244eac9SBenno Rice 	struct	pte *pt;
18745244eac9SBenno Rice 
18755244eac9SBenno Rice 	/*
18765244eac9SBenno Rice 	 * If there is an active pte entry, we need to deactivate it (and
18775244eac9SBenno Rice 	 * save the ref & cfg bits).
18785244eac9SBenno Rice 	 */
18795244eac9SBenno Rice 	pt = pmap_pvo_to_pte(pvo, pteidx);
18805244eac9SBenno Rice 	if (pt != NULL) {
18815244eac9SBenno Rice 		pmap_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr);
18825244eac9SBenno Rice 		PVO_PTEGIDX_CLR(pvo);
18835244eac9SBenno Rice 	} else {
18845244eac9SBenno Rice 		pmap_pte_overflow--;
18855244eac9SBenno Rice 	}
18865244eac9SBenno Rice 
18875244eac9SBenno Rice 	/*
18885244eac9SBenno Rice 	 * Update our statistics.
18895244eac9SBenno Rice 	 */
18905244eac9SBenno Rice 	pvo->pvo_pmap->pm_stats.resident_count--;
18915244eac9SBenno Rice 	if (pvo->pvo_pte.pte_lo & PVO_WIRED)
18925244eac9SBenno Rice 		pvo->pvo_pmap->pm_stats.wired_count--;
18935244eac9SBenno Rice 
18945244eac9SBenno Rice 	/*
18955244eac9SBenno Rice 	 * Save the REF/CHG bits into their cache if the page is managed.
18965244eac9SBenno Rice 	 */
18975244eac9SBenno Rice 	if (pvo->pvo_vaddr & PVO_MANAGED) {
18985244eac9SBenno Rice 		struct	vm_page *pg;
18995244eac9SBenno Rice 
19008862232dSBenno Rice 		pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.pte_lo & PTE_RPGN);
19015244eac9SBenno Rice 		if (pg != NULL) {
19025244eac9SBenno Rice 			pmap_attr_save(pg, pvo->pvo_pte.pte_lo &
19035244eac9SBenno Rice 			    (PTE_REF | PTE_CHG));
19045244eac9SBenno Rice 		}
19055244eac9SBenno Rice 	}
19065244eac9SBenno Rice 
19075244eac9SBenno Rice 	/*
19085244eac9SBenno Rice 	 * Remove this PVO from the PV list.
19095244eac9SBenno Rice 	 */
19105244eac9SBenno Rice 	LIST_REMOVE(pvo, pvo_vlink);
19115244eac9SBenno Rice 
19125244eac9SBenno Rice 	/*
19135244eac9SBenno Rice 	 * Remove this from the overflow list and return it to the pool
19145244eac9SBenno Rice 	 * if we aren't going to reuse it.
19155244eac9SBenno Rice 	 */
19165244eac9SBenno Rice 	LIST_REMOVE(pvo, pvo_olink);
191749f8f727SBenno Rice 	if (!(pvo->pvo_vaddr & PVO_BOOTSTRAP))
1918378862a7SJeff Roberson 		uma_zfree(pvo->pvo_vaddr & PVO_MANAGED ? pmap_mpvo_zone :
191949f8f727SBenno Rice 		    pmap_upvo_zone, pvo);
19205244eac9SBenno Rice 	pmap_pvo_entries--;
19215244eac9SBenno Rice 	pmap_pvo_remove_calls++;
19225244eac9SBenno Rice }
19235244eac9SBenno Rice 
19245244eac9SBenno Rice static __inline int
19255244eac9SBenno Rice pmap_pvo_pte_index(const struct pvo_entry *pvo, int ptegidx)
19265244eac9SBenno Rice {
19275244eac9SBenno Rice 	int	pteidx;
19285244eac9SBenno Rice 
19295244eac9SBenno Rice 	/*
19305244eac9SBenno Rice 	 * We can find the actual pte entry without searching by grabbing
19315244eac9SBenno Rice 	 * the PTEG index from 3 unused bits in pte_lo[11:9] and by
19325244eac9SBenno Rice 	 * noticing the HID bit.
19335244eac9SBenno Rice 	 */
19345244eac9SBenno Rice 	pteidx = ptegidx * 8 + PVO_PTEGIDX_GET(pvo);
19355244eac9SBenno Rice 	if (pvo->pvo_pte.pte_hi & PTE_HID)
19365244eac9SBenno Rice 		pteidx ^= pmap_pteg_mask * 8;
19375244eac9SBenno Rice 
19385244eac9SBenno Rice 	return (pteidx);
19395244eac9SBenno Rice }
19405244eac9SBenno Rice 
19415244eac9SBenno Rice static struct pvo_entry *
19425244eac9SBenno Rice pmap_pvo_find_va(pmap_t pm, vm_offset_t va, int *pteidx_p)
19435244eac9SBenno Rice {
19445244eac9SBenno Rice 	struct	pvo_entry *pvo;
19455244eac9SBenno Rice 	int	ptegidx;
19465244eac9SBenno Rice 	u_int	sr;
19475244eac9SBenno Rice 
19485244eac9SBenno Rice 	va &= ~ADDR_POFF;
19495244eac9SBenno Rice 	sr = va_to_sr(pm->pm_sr, va);
19505244eac9SBenno Rice 	ptegidx = va_to_pteg(sr, va);
19515244eac9SBenno Rice 
19525244eac9SBenno Rice 	LIST_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) {
19535244eac9SBenno Rice 		if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) {
19545244eac9SBenno Rice 			if (pteidx_p)
19555244eac9SBenno Rice 				*pteidx_p = pmap_pvo_pte_index(pvo, ptegidx);
19565244eac9SBenno Rice 			return (pvo);
19575244eac9SBenno Rice 		}
19585244eac9SBenno Rice 	}
19595244eac9SBenno Rice 
19605244eac9SBenno Rice 	return (NULL);
19615244eac9SBenno Rice }
19625244eac9SBenno Rice 
19635244eac9SBenno Rice static struct pte *
19645244eac9SBenno Rice pmap_pvo_to_pte(const struct pvo_entry *pvo, int pteidx)
19655244eac9SBenno Rice {
19665244eac9SBenno Rice 	struct	pte *pt;
19675244eac9SBenno Rice 
19685244eac9SBenno Rice 	/*
19695244eac9SBenno Rice 	 * If we haven't been supplied the ptegidx, calculate it.
19705244eac9SBenno Rice 	 */
19715244eac9SBenno Rice 	if (pteidx == -1) {
19725244eac9SBenno Rice 		int	ptegidx;
19735244eac9SBenno Rice 		u_int	sr;
19745244eac9SBenno Rice 
19755244eac9SBenno Rice 		sr = va_to_sr(pvo->pvo_pmap->pm_sr, pvo->pvo_vaddr);
19765244eac9SBenno Rice 		ptegidx = va_to_pteg(sr, pvo->pvo_vaddr);
19775244eac9SBenno Rice 		pteidx = pmap_pvo_pte_index(pvo, ptegidx);
19785244eac9SBenno Rice 	}
19795244eac9SBenno Rice 
19805244eac9SBenno Rice 	pt = &pmap_pteg_table[pteidx >> 3].pt[pteidx & 7];
19815244eac9SBenno Rice 
19825244eac9SBenno Rice 	if ((pvo->pvo_pte.pte_hi & PTE_VALID) && !PVO_PTEGIDX_ISSET(pvo)) {
19835244eac9SBenno Rice 		panic("pmap_pvo_to_pte: pvo %p has valid pte in pvo but no "
19845244eac9SBenno Rice 		    "valid pte index", pvo);
19855244eac9SBenno Rice 	}
19865244eac9SBenno Rice 
19875244eac9SBenno Rice 	if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0 && PVO_PTEGIDX_ISSET(pvo)) {
19885244eac9SBenno Rice 		panic("pmap_pvo_to_pte: pvo %p has valid pte index in pvo "
19895244eac9SBenno Rice 		    "pvo but no valid pte", pvo);
19905244eac9SBenno Rice 	}
19915244eac9SBenno Rice 
19925244eac9SBenno Rice 	if ((pt->pte_hi ^ (pvo->pvo_pte.pte_hi & ~PTE_VALID)) == PTE_VALID) {
19935244eac9SBenno Rice 		if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0) {
19945244eac9SBenno Rice 			panic("pmap_pvo_to_pte: pvo %p has valid pte in "
19955244eac9SBenno Rice 			    "pmap_pteg_table %p but invalid in pvo", pvo, pt);
19965244eac9SBenno Rice 		}
19975244eac9SBenno Rice 
19985244eac9SBenno Rice 		if (((pt->pte_lo ^ pvo->pvo_pte.pte_lo) & ~(PTE_CHG|PTE_REF))
19995244eac9SBenno Rice 		    != 0) {
20005244eac9SBenno Rice 			panic("pmap_pvo_to_pte: pvo %p pte does not match "
20015244eac9SBenno Rice 			    "pte %p in pmap_pteg_table", pvo, pt);
20025244eac9SBenno Rice 		}
20035244eac9SBenno Rice 
20045244eac9SBenno Rice 		return (pt);
20055244eac9SBenno Rice 	}
20065244eac9SBenno Rice 
20075244eac9SBenno Rice 	if (pvo->pvo_pte.pte_hi & PTE_VALID) {
20085244eac9SBenno Rice 		panic("pmap_pvo_to_pte: pvo %p has invalid pte %p in "
20095244eac9SBenno Rice 		    "pmap_pteg_table but valid in pvo", pvo, pt);
20105244eac9SBenno Rice 	}
20115244eac9SBenno Rice 
20125244eac9SBenno Rice 	return (NULL);
20135244eac9SBenno Rice }
20145244eac9SBenno Rice 
20158355f576SJeff Roberson static void *
20168355f576SJeff Roberson pmap_pvo_allocf(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
20178355f576SJeff Roberson {
20188355f576SJeff Roberson 	vm_page_t	m;
20198355f576SJeff Roberson 
20208355f576SJeff Roberson 	if (bytes != PAGE_SIZE)
20218355f576SJeff Roberson 		panic("pmap_pvo_allocf: benno was shortsighted.  hit him.");
20228355f576SJeff Roberson 
20238355f576SJeff Roberson 	*flags = UMA_SLAB_PRIV;
20248355f576SJeff Roberson 	m = vm_page_alloc(pmap_pvo_obj, pmap_pvo_count, VM_ALLOC_SYSTEM);
20258355f576SJeff Roberson 	if (m == NULL)
20268355f576SJeff Roberson 		return (NULL);
202721d7ec89SBenno Rice 	pmap_pvo_count++;
20288355f576SJeff Roberson 	return ((void *)VM_PAGE_TO_PHYS(m));
20298355f576SJeff Roberson }
20308355f576SJeff Roberson 
20315244eac9SBenno Rice /*
20325244eac9SBenno Rice  * XXX: THIS STUFF SHOULD BE IN pte.c?
20335244eac9SBenno Rice  */
20345244eac9SBenno Rice int
20355244eac9SBenno Rice pmap_pte_spill(vm_offset_t addr)
20365244eac9SBenno Rice {
20375244eac9SBenno Rice 	struct	pvo_entry *source_pvo, *victim_pvo;
20385244eac9SBenno Rice 	struct	pvo_entry *pvo;
20395244eac9SBenno Rice 	int	ptegidx, i, j;
20405244eac9SBenno Rice 	u_int	sr;
20415244eac9SBenno Rice 	struct	pteg *pteg;
20425244eac9SBenno Rice 	struct	pte *pt;
20435244eac9SBenno Rice 
20445244eac9SBenno Rice 	pmap_pte_spills++;
20455244eac9SBenno Rice 
2046d080d5fdSBenno Rice 	sr = mfsrin(addr);
20475244eac9SBenno Rice 	ptegidx = va_to_pteg(sr, addr);
20485244eac9SBenno Rice 
20495244eac9SBenno Rice 	/*
20505244eac9SBenno Rice 	 * Have to substitute some entry.  Use the primary hash for this.
20515244eac9SBenno Rice 	 * Use low bits of timebase as random generator.
20525244eac9SBenno Rice 	 */
20535244eac9SBenno Rice 	pteg = &pmap_pteg_table[ptegidx];
20545244eac9SBenno Rice 	__asm __volatile("mftb %0" : "=r"(i));
20555244eac9SBenno Rice 	i &= 7;
20565244eac9SBenno Rice 	pt = &pteg->pt[i];
20575244eac9SBenno Rice 
20585244eac9SBenno Rice 	source_pvo = NULL;
20595244eac9SBenno Rice 	victim_pvo = NULL;
20605244eac9SBenno Rice 	LIST_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) {
20615244eac9SBenno Rice 		/*
20625244eac9SBenno Rice 		 * We need to find a pvo entry for this address.
20635244eac9SBenno Rice 		 */
20645244eac9SBenno Rice 		PMAP_PVO_CHECK(pvo);
20655244eac9SBenno Rice 		if (source_pvo == NULL &&
20665244eac9SBenno Rice 		    pmap_pte_match(&pvo->pvo_pte, sr, addr,
20675244eac9SBenno Rice 		    pvo->pvo_pte.pte_hi & PTE_HID)) {
20685244eac9SBenno Rice 			/*
20695244eac9SBenno Rice 			 * Now found an entry to be spilled into the pteg.
20705244eac9SBenno Rice 			 * The PTE is now valid, so we know it's active.
20715244eac9SBenno Rice 			 */
20725244eac9SBenno Rice 			j = pmap_pte_insert(ptegidx, &pvo->pvo_pte);
20735244eac9SBenno Rice 
20745244eac9SBenno Rice 			if (j >= 0) {
20755244eac9SBenno Rice 				PVO_PTEGIDX_SET(pvo, j);
20765244eac9SBenno Rice 				pmap_pte_overflow--;
20775244eac9SBenno Rice 				PMAP_PVO_CHECK(pvo);
20785244eac9SBenno Rice 				return (1);
20795244eac9SBenno Rice 			}
20805244eac9SBenno Rice 
20815244eac9SBenno Rice 			source_pvo = pvo;
20825244eac9SBenno Rice 
20835244eac9SBenno Rice 			if (victim_pvo != NULL)
20845244eac9SBenno Rice 				break;
20855244eac9SBenno Rice 		}
20865244eac9SBenno Rice 
20875244eac9SBenno Rice 		/*
20885244eac9SBenno Rice 		 * We also need the pvo entry of the victim we are replacing
20895244eac9SBenno Rice 		 * so save the R & C bits of the PTE.
20905244eac9SBenno Rice 		 */
20915244eac9SBenno Rice 		if ((pt->pte_hi & PTE_HID) == 0 && victim_pvo == NULL &&
20925244eac9SBenno Rice 		    pmap_pte_compare(pt, &pvo->pvo_pte)) {
20935244eac9SBenno Rice 			victim_pvo = pvo;
20945244eac9SBenno Rice 			if (source_pvo != NULL)
20955244eac9SBenno Rice 				break;
20965244eac9SBenno Rice 		}
20975244eac9SBenno Rice 	}
20985244eac9SBenno Rice 
20995244eac9SBenno Rice 	if (source_pvo == NULL)
21005244eac9SBenno Rice 		return (0);
21015244eac9SBenno Rice 
21025244eac9SBenno Rice 	if (victim_pvo == NULL) {
21035244eac9SBenno Rice 		if ((pt->pte_hi & PTE_HID) == 0)
21045244eac9SBenno Rice 			panic("pmap_pte_spill: victim p-pte (%p) has no pvo"
21055244eac9SBenno Rice 			    "entry", pt);
21065244eac9SBenno Rice 
21075244eac9SBenno Rice 		/*
21085244eac9SBenno Rice 		 * If this is a secondary PTE, we need to search it's primary
21095244eac9SBenno Rice 		 * pvo bucket for the matching PVO.
21105244eac9SBenno Rice 		 */
21115244eac9SBenno Rice 		LIST_FOREACH(pvo, &pmap_pvo_table[ptegidx ^ pmap_pteg_mask],
21125244eac9SBenno Rice 		    pvo_olink) {
21135244eac9SBenno Rice 			PMAP_PVO_CHECK(pvo);
21145244eac9SBenno Rice 			/*
21155244eac9SBenno Rice 			 * We also need the pvo entry of the victim we are
21165244eac9SBenno Rice 			 * replacing so save the R & C bits of the PTE.
21175244eac9SBenno Rice 			 */
21185244eac9SBenno Rice 			if (pmap_pte_compare(pt, &pvo->pvo_pte)) {
21195244eac9SBenno Rice 				victim_pvo = pvo;
21205244eac9SBenno Rice 				break;
21215244eac9SBenno Rice 			}
21225244eac9SBenno Rice 		}
21235244eac9SBenno Rice 
21245244eac9SBenno Rice 		if (victim_pvo == NULL)
21255244eac9SBenno Rice 			panic("pmap_pte_spill: victim s-pte (%p) has no pvo"
21265244eac9SBenno Rice 			    "entry", pt);
21275244eac9SBenno Rice 	}
21285244eac9SBenno Rice 
21295244eac9SBenno Rice 	/*
21305244eac9SBenno Rice 	 * We are invalidating the TLB entry for the EA we are replacing even
21315244eac9SBenno Rice 	 * though it's valid.  If we don't, we lose any ref/chg bit changes
21325244eac9SBenno Rice 	 * contained in the TLB entry.
21335244eac9SBenno Rice 	 */
21345244eac9SBenno Rice 	source_pvo->pvo_pte.pte_hi &= ~PTE_HID;
21355244eac9SBenno Rice 
21365244eac9SBenno Rice 	pmap_pte_unset(pt, &victim_pvo->pvo_pte, victim_pvo->pvo_vaddr);
21375244eac9SBenno Rice 	pmap_pte_set(pt, &source_pvo->pvo_pte);
21385244eac9SBenno Rice 
21395244eac9SBenno Rice 	PVO_PTEGIDX_CLR(victim_pvo);
21405244eac9SBenno Rice 	PVO_PTEGIDX_SET(source_pvo, i);
21415244eac9SBenno Rice 	pmap_pte_replacements++;
21425244eac9SBenno Rice 
21435244eac9SBenno Rice 	PMAP_PVO_CHECK(victim_pvo);
21445244eac9SBenno Rice 	PMAP_PVO_CHECK(source_pvo);
21455244eac9SBenno Rice 
21465244eac9SBenno Rice 	return (1);
21475244eac9SBenno Rice }
21485244eac9SBenno Rice 
21495244eac9SBenno Rice static int
21505244eac9SBenno Rice pmap_pte_insert(u_int ptegidx, struct pte *pvo_pt)
21515244eac9SBenno Rice {
21525244eac9SBenno Rice 	struct	pte *pt;
21535244eac9SBenno Rice 	int	i;
21545244eac9SBenno Rice 
21555244eac9SBenno Rice 	/*
21565244eac9SBenno Rice 	 * First try primary hash.
21575244eac9SBenno Rice 	 */
21585244eac9SBenno Rice 	for (pt = pmap_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) {
21595244eac9SBenno Rice 		if ((pt->pte_hi & PTE_VALID) == 0) {
21605244eac9SBenno Rice 			pvo_pt->pte_hi &= ~PTE_HID;
21615244eac9SBenno Rice 			pmap_pte_set(pt, pvo_pt);
21625244eac9SBenno Rice 			return (i);
21635244eac9SBenno Rice 		}
21645244eac9SBenno Rice 	}
21655244eac9SBenno Rice 
21665244eac9SBenno Rice 	/*
21675244eac9SBenno Rice 	 * Now try secondary hash.
21685244eac9SBenno Rice 	 */
21695244eac9SBenno Rice 	ptegidx ^= pmap_pteg_mask;
21705244eac9SBenno Rice 	ptegidx++;
21715244eac9SBenno Rice 	for (pt = pmap_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) {
21725244eac9SBenno Rice 		if ((pt->pte_hi & PTE_VALID) == 0) {
21735244eac9SBenno Rice 			pvo_pt->pte_hi |= PTE_HID;
21745244eac9SBenno Rice 			pmap_pte_set(pt, pvo_pt);
21755244eac9SBenno Rice 			return (i);
21765244eac9SBenno Rice 		}
21775244eac9SBenno Rice 	}
21785244eac9SBenno Rice 
21795244eac9SBenno Rice 	panic("pmap_pte_insert: overflow");
21805244eac9SBenno Rice 	return (-1);
21815244eac9SBenno Rice }
21825244eac9SBenno Rice 
21835244eac9SBenno Rice static boolean_t
21845244eac9SBenno Rice pmap_query_bit(vm_page_t m, int ptebit)
21855244eac9SBenno Rice {
21865244eac9SBenno Rice 	struct	pvo_entry *pvo;
21875244eac9SBenno Rice 	struct	pte *pt;
21885244eac9SBenno Rice 
21895244eac9SBenno Rice 	if (pmap_attr_fetch(m) & ptebit)
21905244eac9SBenno Rice 		return (TRUE);
21915244eac9SBenno Rice 
21925244eac9SBenno Rice 	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
21935244eac9SBenno Rice 		PMAP_PVO_CHECK(pvo);	/* sanity check */
21945244eac9SBenno Rice 
21955244eac9SBenno Rice 		/*
21965244eac9SBenno Rice 		 * See if we saved the bit off.  If so, cache it and return
21975244eac9SBenno Rice 		 * success.
21985244eac9SBenno Rice 		 */
21995244eac9SBenno Rice 		if (pvo->pvo_pte.pte_lo & ptebit) {
22005244eac9SBenno Rice 			pmap_attr_save(m, ptebit);
22015244eac9SBenno Rice 			PMAP_PVO_CHECK(pvo);	/* sanity check */
22025244eac9SBenno Rice 			return (TRUE);
22035244eac9SBenno Rice 		}
22045244eac9SBenno Rice 	}
22055244eac9SBenno Rice 
22065244eac9SBenno Rice 	/*
22075244eac9SBenno Rice 	 * No luck, now go through the hard part of looking at the PTEs
22085244eac9SBenno Rice 	 * themselves.  Sync so that any pending REF/CHG bits are flushed to
22095244eac9SBenno Rice 	 * the PTEs.
22105244eac9SBenno Rice 	 */
22115244eac9SBenno Rice 	SYNC();
22125244eac9SBenno Rice 	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
22135244eac9SBenno Rice 		PMAP_PVO_CHECK(pvo);	/* sanity check */
22145244eac9SBenno Rice 
22155244eac9SBenno Rice 		/*
22165244eac9SBenno Rice 		 * See if this pvo has a valid PTE.  if so, fetch the
22175244eac9SBenno Rice 		 * REF/CHG bits from the valid PTE.  If the appropriate
22185244eac9SBenno Rice 		 * ptebit is set, cache it and return success.
22195244eac9SBenno Rice 		 */
22205244eac9SBenno Rice 		pt = pmap_pvo_to_pte(pvo, -1);
22215244eac9SBenno Rice 		if (pt != NULL) {
22225244eac9SBenno Rice 			pmap_pte_synch(pt, &pvo->pvo_pte);
22235244eac9SBenno Rice 			if (pvo->pvo_pte.pte_lo & ptebit) {
22245244eac9SBenno Rice 				pmap_attr_save(m, ptebit);
22255244eac9SBenno Rice 				PMAP_PVO_CHECK(pvo);	/* sanity check */
22265244eac9SBenno Rice 				return (TRUE);
22275244eac9SBenno Rice 			}
22285244eac9SBenno Rice 		}
22295244eac9SBenno Rice 	}
22305244eac9SBenno Rice 
22315244eac9SBenno Rice 	return (TRUE);
22325244eac9SBenno Rice }
22335244eac9SBenno Rice 
22345244eac9SBenno Rice static boolean_t
22355244eac9SBenno Rice pmap_clear_bit(vm_page_t m, int ptebit)
22365244eac9SBenno Rice {
22375244eac9SBenno Rice 	struct	pvo_entry *pvo;
22385244eac9SBenno Rice 	struct	pte *pt;
22395244eac9SBenno Rice 	int	rv;
22405244eac9SBenno Rice 
22415244eac9SBenno Rice 	/*
22425244eac9SBenno Rice 	 * Clear the cached value.
22435244eac9SBenno Rice 	 */
22445244eac9SBenno Rice 	rv = pmap_attr_fetch(m);
22455244eac9SBenno Rice 	pmap_attr_clear(m, ptebit);
22465244eac9SBenno Rice 
22475244eac9SBenno Rice 	/*
22485244eac9SBenno Rice 	 * Sync so that any pending REF/CHG bits are flushed to the PTEs (so
22495244eac9SBenno Rice 	 * we can reset the right ones).  note that since the pvo entries and
22505244eac9SBenno Rice 	 * list heads are accessed via BAT0 and are never placed in the page
22515244eac9SBenno Rice 	 * table, we don't have to worry about further accesses setting the
22525244eac9SBenno Rice 	 * REF/CHG bits.
22535244eac9SBenno Rice 	 */
22545244eac9SBenno Rice 	SYNC();
22555244eac9SBenno Rice 
22565244eac9SBenno Rice 	/*
22575244eac9SBenno Rice 	 * For each pvo entry, clear the pvo's ptebit.  If this pvo has a
22585244eac9SBenno Rice 	 * valid pte clear the ptebit from the valid pte.
22595244eac9SBenno Rice 	 */
22605244eac9SBenno Rice 	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
22615244eac9SBenno Rice 		PMAP_PVO_CHECK(pvo);	/* sanity check */
22625244eac9SBenno Rice 		pt = pmap_pvo_to_pte(pvo, -1);
22635244eac9SBenno Rice 		if (pt != NULL) {
22645244eac9SBenno Rice 			pmap_pte_synch(pt, &pvo->pvo_pte);
22655244eac9SBenno Rice 			if (pvo->pvo_pte.pte_lo & ptebit)
22665244eac9SBenno Rice 				pmap_pte_clear(pt, PVO_VADDR(pvo), ptebit);
22675244eac9SBenno Rice 		}
22685244eac9SBenno Rice 		rv |= pvo->pvo_pte.pte_lo;
22695244eac9SBenno Rice 		pvo->pvo_pte.pte_lo &= ~ptebit;
22705244eac9SBenno Rice 		PMAP_PVO_CHECK(pvo);	/* sanity check */
22715244eac9SBenno Rice 	}
22725244eac9SBenno Rice 
22735244eac9SBenno Rice 	return ((rv & ptebit) != 0);
2274bdf71f56SBenno Rice }
22758bbfa33aSBenno Rice 
22768bbfa33aSBenno Rice /*
22778bbfa33aSBenno Rice  * Map a set of physical memory pages into the kernel virtual
22788bbfa33aSBenno Rice  * address space. Return a pointer to where it is mapped. This
22798bbfa33aSBenno Rice  * routine is intended to be used for mapping device memory,
22808bbfa33aSBenno Rice  * NOT real memory.
22818bbfa33aSBenno Rice  */
22828bbfa33aSBenno Rice void *
22838bbfa33aSBenno Rice pmap_mapdev(vm_offset_t pa, vm_size_t size)
22848bbfa33aSBenno Rice {
22858bbfa33aSBenno Rice 	vm_offset_t va, tmpva, offset;
22868bbfa33aSBenno Rice 
22878bbfa33aSBenno Rice 	pa = trunc_page(pa);
22888bbfa33aSBenno Rice 	offset = pa & PAGE_MASK;
22898bbfa33aSBenno Rice 	size = roundup(offset + size, PAGE_SIZE);
22908bbfa33aSBenno Rice 
22918bbfa33aSBenno Rice 	GIANT_REQUIRED;
22928bbfa33aSBenno Rice 
22938bbfa33aSBenno Rice 	va = kmem_alloc_pageable(kernel_map, size);
22948bbfa33aSBenno Rice 	if (!va)
22958bbfa33aSBenno Rice 		panic("pmap_mapdev: Couldn't alloc kernel virtual memory");
22968bbfa33aSBenno Rice 
22978bbfa33aSBenno Rice 	for (tmpva = va; size > 0;) {
22988bbfa33aSBenno Rice 		pmap_kenter(tmpva, pa);
22998bbfa33aSBenno Rice 		TLBIE(tmpva); /* XXX or should it be invalidate-all ? */
23008bbfa33aSBenno Rice 		size -= PAGE_SIZE;
23018bbfa33aSBenno Rice 		tmpva += PAGE_SIZE;
23028bbfa33aSBenno Rice 		pa += PAGE_SIZE;
23038bbfa33aSBenno Rice 	}
23048bbfa33aSBenno Rice 
23058bbfa33aSBenno Rice 	return ((void *)(va + offset));
23068bbfa33aSBenno Rice }
23078bbfa33aSBenno Rice 
23088bbfa33aSBenno Rice void
23098bbfa33aSBenno Rice pmap_unmapdev(vm_offset_t va, vm_size_t size)
23108bbfa33aSBenno Rice {
23118bbfa33aSBenno Rice 	vm_offset_t base, offset;
23128bbfa33aSBenno Rice 
23138bbfa33aSBenno Rice 	base = trunc_page(va);
23148bbfa33aSBenno Rice 	offset = va & PAGE_MASK;
23158bbfa33aSBenno Rice 	size = roundup(offset + size, PAGE_SIZE);
23168bbfa33aSBenno Rice 	kmem_free(kernel_map, base, size);
23178bbfa33aSBenno Rice }
2318