xref: /freebsd/sys/powerpc/include/pmap.h (revision 1f4bcc459a76b7aa664f3fd557684cd0ba6da352)
1 /*-
2  * Copyright (C) 2006 Semihalf, Marian Balakowicz <m8@semihalf.com>
3  * All rights reserved.
4  *
5  * Adapted for Freescale's e500 core CPUs.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. The name of the author may not be used to endorse or promote products
16  *    derived from this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
21  * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
23  * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
24  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
25  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
26  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
27  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  *
29  * $FreeBSD$
30  */
31 /*-
32  * Copyright (C) 1995, 1996 Wolfgang Solfrank.
33  * Copyright (C) 1995, 1996 TooLs GmbH.
34  * All rights reserved.
35  *
36  * Redistribution and use in source and binary forms, with or without
37  * modification, are permitted provided that the following conditions
38  * are met:
39  * 1. Redistributions of source code must retain the above copyright
40  *    notice, this list of conditions and the following disclaimer.
41  * 2. Redistributions in binary form must reproduce the above copyright
42  *    notice, this list of conditions and the following disclaimer in the
43  *    documentation and/or other materials provided with the distribution.
44  * 3. All advertising materials mentioning features or use of this software
45  *    must display the following acknowledgement:
46  *	This product includes software developed by TooLs GmbH.
47  * 4. The name of TooLs GmbH may not be used to endorse or promote products
48  *    derived from this software without specific prior written permission.
49  *
50  * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
51  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
52  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
53  * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
54  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
55  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
56  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
57  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
58  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
59  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
60  *
61  *	from: $NetBSD: pmap.h,v 1.17 2000/03/30 16:18:24 jdolecek Exp $
62  */
63 
64 #ifndef	_MACHINE_PMAP_H_
65 #define	_MACHINE_PMAP_H_
66 
67 #include <sys/queue.h>
68 #include <sys/tree.h>
69 #include <sys/_cpuset.h>
70 #include <sys/_lock.h>
71 #include <sys/_mutex.h>
72 #include <machine/sr.h>
73 #include <machine/pte.h>
74 #include <machine/slb.h>
75 #include <machine/tlb.h>
76 
77 #if defined(AIM)
78 
79 #if !defined(NPMAPS)
80 #define	NPMAPS		32768
81 #endif /* !defined(NPMAPS) */
82 
83 struct	slbtnode;
84 struct	pmap;
85 typedef	struct pmap *pmap_t;
86 
87 struct pvo_entry {
88 	LIST_ENTRY(pvo_entry) pvo_vlink;	/* Link to common virt page */
89 #ifndef __powerpc64__
90 	LIST_ENTRY(pvo_entry) pvo_olink;	/* Link to overflow entry */
91 #endif
92 	RB_ENTRY(pvo_entry) pvo_plink;	/* Link to pmap entries */
93 	struct {
94 #ifndef __powerpc64__
95 		/* 32-bit fields */
96 		struct	pte pte;
97 #endif
98 		/* 64-bit fields */
99 		uintptr_t   slot;
100 		vm_paddr_t  pa;
101 		vm_prot_t   prot;
102 	} pvo_pte;
103 	pmap_t		pvo_pmap;		/* Owning pmap */
104 	vm_offset_t	pvo_vaddr;		/* VA of entry */
105 	uint64_t	pvo_vpn;		/* Virtual page number */
106 };
107 LIST_HEAD(pvo_head, pvo_entry);
108 RB_HEAD(pvo_tree, pvo_entry);
109 int pvo_vaddr_compare(struct pvo_entry *, struct pvo_entry *);
110 RB_PROTOTYPE(pvo_tree, pvo_entry, pvo_plink, pvo_vaddr_compare);
111 
112 /* Used by 32-bit PMAP */
113 #define	PVO_PTEGIDX_MASK	0x007UL		/* which PTEG slot */
114 #define	PVO_PTEGIDX_VALID	0x008UL		/* slot is valid */
115 /* Used by 64-bit PMAP */
116 #define	PVO_HID			0x008UL		/* PVO entry in alternate hash*/
117 /* Used by both */
118 #define	PVO_WIRED		0x010UL		/* PVO entry is wired */
119 #define	PVO_MANAGED		0x020UL		/* PVO entry is managed */
120 #define	PVO_BOOTSTRAP		0x080UL		/* PVO entry allocated during
121 						   bootstrap */
122 #define PVO_DEAD		0x100UL		/* waiting to be deleted */
123 #define PVO_LARGE		0x200UL		/* large page */
124 #define	PVO_VADDR(pvo)		((pvo)->pvo_vaddr & ~ADDR_POFF)
125 #define	PVO_PTEGIDX_GET(pvo)	((pvo)->pvo_vaddr & PVO_PTEGIDX_MASK)
126 #define	PVO_PTEGIDX_ISSET(pvo)	((pvo)->pvo_vaddr & PVO_PTEGIDX_VALID)
127 #define	PVO_PTEGIDX_CLR(pvo)	\
128 	((void)((pvo)->pvo_vaddr &= ~(PVO_PTEGIDX_VALID|PVO_PTEGIDX_MASK)))
129 #define	PVO_PTEGIDX_SET(pvo, i)	\
130 	((void)((pvo)->pvo_vaddr |= (i)|PVO_PTEGIDX_VALID))
131 #define	PVO_VSID(pvo)		((pvo)->pvo_vpn >> 16)
132 
133 struct	pmap {
134 	struct	mtx	pm_mtx;
135 
136     #ifdef __powerpc64__
137 	struct slbtnode	*pm_slb_tree_root;
138 	struct slb	**pm_slb;
139 	int		pm_slb_len;
140     #else
141 	register_t	pm_sr[16];
142     #endif
143 	cpuset_t	pm_active;
144 
145 	struct pmap	*pmap_phys;
146 	struct		pmap_statistics	pm_stats;
147 	struct pvo_tree pmap_pvo;
148 };
149 
150 struct	md_page {
151 	volatile int32_t mdpg_attrs;
152 	vm_memattr_t	 mdpg_cache_attrs;
153 	struct	pvo_head mdpg_pvoh;
154 };
155 
156 #define	pmap_page_get_memattr(m)	((m)->md.mdpg_cache_attrs)
157 #define	pmap_page_is_mapped(m)	(!LIST_EMPTY(&(m)->md.mdpg_pvoh))
158 
159 /*
160  * Return the VSID corresponding to a given virtual address.
161  * If no VSID is currently defined, it will allocate one, and add
162  * it to a free slot if available.
163  *
164  * NB: The PMAP MUST be locked already.
165  */
166 uint64_t va_to_vsid(pmap_t pm, vm_offset_t va);
167 
168 /* Lock-free, non-allocating lookup routines */
169 uint64_t kernel_va_to_slbv(vm_offset_t va);
170 struct slb *user_va_to_slb_entry(pmap_t pm, vm_offset_t va);
171 
172 uint64_t allocate_user_vsid(pmap_t pm, uint64_t esid, int large);
173 void	free_vsid(pmap_t pm, uint64_t esid, int large);
174 void	slb_insert_user(pmap_t pm, struct slb *slb);
175 void	slb_insert_kernel(uint64_t slbe, uint64_t slbv);
176 
177 struct slbtnode *slb_alloc_tree(void);
178 void     slb_free_tree(pmap_t pm);
179 struct slb **slb_alloc_user_cache(void);
180 void	slb_free_user_cache(struct slb **);
181 
182 #else
183 
184 struct pmap {
185 	struct mtx		pm_mtx;		/* pmap mutex */
186 	tlbtid_t		pm_tid[MAXCPU];	/* TID to identify this pmap entries in TLB */
187 	cpuset_t		pm_active;	/* active on cpus */
188 	struct pmap_statistics	pm_stats;	/* pmap statistics */
189 
190 	/* Page table directory, array of pointers to page tables. */
191 	pte_t			*pm_pdir[PDIR_NENTRIES];
192 
193 	/* List of allocated ptbl bufs (ptbl kva regions). */
194 	TAILQ_HEAD(, ptbl_buf)	pm_ptbl_list;
195 };
196 typedef	struct pmap *pmap_t;
197 
198 struct pv_entry {
199 	pmap_t pv_pmap;
200 	vm_offset_t pv_va;
201 	TAILQ_ENTRY(pv_entry) pv_link;
202 };
203 typedef struct pv_entry *pv_entry_t;
204 
205 struct md_page {
206 	TAILQ_HEAD(, pv_entry) pv_list;
207 };
208 
209 #define	pmap_page_get_memattr(m)	VM_MEMATTR_DEFAULT
210 #define	pmap_page_is_mapped(m)	(!TAILQ_EMPTY(&(m)->md.pv_list))
211 
212 #endif /* AIM */
213 
214 extern	struct pmap kernel_pmap_store;
215 #define	kernel_pmap	(&kernel_pmap_store)
216 
217 #ifdef _KERNEL
218 
219 #define	PMAP_LOCK(pmap)		mtx_lock(&(pmap)->pm_mtx)
220 #define	PMAP_LOCK_ASSERT(pmap, type) \
221 				mtx_assert(&(pmap)->pm_mtx, (type))
222 #define	PMAP_LOCK_DESTROY(pmap)	mtx_destroy(&(pmap)->pm_mtx)
223 #define	PMAP_LOCK_INIT(pmap)	mtx_init(&(pmap)->pm_mtx, \
224 				    (pmap == kernel_pmap) ? "kernelpmap" : \
225 				    "pmap", NULL, MTX_DEF)
226 #define	PMAP_LOCKED(pmap)	mtx_owned(&(pmap)->pm_mtx)
227 #define	PMAP_MTX(pmap)		(&(pmap)->pm_mtx)
228 #define	PMAP_TRYLOCK(pmap)	mtx_trylock(&(pmap)->pm_mtx)
229 #define	PMAP_UNLOCK(pmap)	mtx_unlock(&(pmap)->pm_mtx)
230 
231 #define	pmap_page_is_write_mapped(m)	(((m)->aflags & PGA_WRITEABLE) != 0)
232 
233 void		pmap_bootstrap(vm_offset_t, vm_offset_t);
234 void		pmap_kenter(vm_offset_t va, vm_paddr_t pa);
235 void		pmap_kenter_attr(vm_offset_t va, vm_offset_t pa, vm_memattr_t);
236 void		pmap_kremove(vm_offset_t);
237 void		*pmap_mapdev(vm_paddr_t, vm_size_t);
238 void		*pmap_mapdev_attr(vm_paddr_t, vm_size_t, vm_memattr_t);
239 void		pmap_unmapdev(vm_offset_t, vm_size_t);
240 void		pmap_page_set_memattr(vm_page_t, vm_memattr_t);
241 void		pmap_deactivate(struct thread *);
242 vm_paddr_t	pmap_kextract(vm_offset_t);
243 int		pmap_dev_direct_mapped(vm_paddr_t, vm_size_t);
244 boolean_t	pmap_mmu_install(char *name, int prio);
245 
246 #define	vtophys(va)	pmap_kextract((vm_offset_t)(va))
247 
248 #define PHYS_AVAIL_SZ	256	/* Allows up to 16GB Ram on pSeries with
249 				 * logical memory block size of 64MB.
250 				 * For more Ram increase the lmb or this value.
251 				 */
252 
253 extern	vm_paddr_t phys_avail[PHYS_AVAIL_SZ];
254 extern	vm_offset_t virtual_avail;
255 extern	vm_offset_t virtual_end;
256 
257 extern	vm_offset_t msgbuf_phys;
258 
259 extern	int pmap_bootstrapped;
260 
261 vm_offset_t pmap_early_io_map(vm_paddr_t pa, vm_size_t size);
262 
263 #endif
264 
265 #endif /* !_MACHINE_PMAP_H_ */
266