xref: /freebsd/sys/i386/include/pmap.h (revision 7660b554bc59a07be0431c17e0e33815818baa69)
1 /*
2  * Copyright (c) 1991 Regents of the University of California.
3  * All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * the Systems Programming Group of the University of Utah Computer
7  * Science Department and William Jolitz of UUNET Technologies Inc.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *	This product includes software developed by the University of
20  *	California, Berkeley and its contributors.
21  * 4. Neither the name of the University nor the names of its contributors
22  *    may be used to endorse or promote products derived from this software
23  *    without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  *
37  * Derived from hp300 version by Mike Hibler, this version by William
38  * Jolitz uses a recursive map [a pde points to the page directory] to
39  * map the page tables using the pagetables themselves. This is done to
40  * reduce the impact on kernel virtual memory for lots of sparse address
41  * space, and to reduce the cost of memory to each process.
42  *
43  *	from: hp300: @(#)pmap.h	7.2 (Berkeley) 12/16/90
44  *	from: @(#)pmap.h	7.4 (Berkeley) 5/12/91
45  * $FreeBSD$
46  */
47 
48 #ifndef _MACHINE_PMAP_H_
49 #define	_MACHINE_PMAP_H_
50 
51 /*
52  * Page-directory and page-table entires follow this format, with a few
53  * of the fields not present here and there, depending on a lot of things.
54  */
55 				/* ---- Intel Nomenclature ---- */
56 #define	PG_V		0x001	/* P	Valid			*/
57 #define PG_RW		0x002	/* R/W	Read/Write		*/
58 #define PG_U		0x004	/* U/S  User/Supervisor		*/
59 #define	PG_NC_PWT	0x008	/* PWT	Write through		*/
60 #define	PG_NC_PCD	0x010	/* PCD	Cache disable		*/
61 #define PG_A		0x020	/* A	Accessed		*/
62 #define	PG_M		0x040	/* D	Dirty			*/
63 #define	PG_PS		0x080	/* PS	Page size (0=4k,1=4M)	*/
64 #define	PG_G		0x100	/* G	Global			*/
65 #define	PG_AVAIL1	0x200	/*    /	Available for system	*/
66 #define	PG_AVAIL2	0x400	/*   <	programmers use		*/
67 #define	PG_AVAIL3	0x800	/*    \				*/
68 
69 
70 /* Our various interpretations of the above */
71 #define PG_W		PG_AVAIL1	/* "Wired" pseudoflag */
72 #define	PG_MANAGED	PG_AVAIL2
73 #define	PG_FRAME	(~((vm_paddr_t)PAGE_MASK))
74 #define	PG_PROT		(PG_RW|PG_U)	/* all protection bits . */
75 #define PG_N		(PG_NC_PWT|PG_NC_PCD)	/* Non-cacheable */
76 
77 /*
78  * Page Protection Exception bits
79  */
80 
81 #define PGEX_P		0x01	/* Protection violation vs. not present */
82 #define PGEX_W		0x02	/* during a Write cycle */
83 #define PGEX_U		0x04	/* access from User mode (UPL) */
84 
85 /*
86  * Size of Kernel address space.  This is the number of page table pages
87  * (4MB each) to use for the kernel.  256 pages == 1 Gigabyte.
88  * This **MUST** be a multiple of 4 (eg: 252, 256, 260, etc).
89  */
90 #ifndef KVA_PAGES
91 #ifdef PAE
92 #define KVA_PAGES	512
93 #else
94 #define KVA_PAGES	256
95 #endif
96 #endif
97 
98 /*
99  * Pte related macros
100  */
101 #define VADDR(pdi, pti) ((vm_offset_t)(((pdi)<<PDRSHIFT)|((pti)<<PAGE_SHIFT)))
102 
103 #ifndef NKPT
104 #ifdef PAE
105 #define	NKPT		120	/* actual number of kernel page tables */
106 #else
107 #define	NKPT		30	/* actual number of kernel page tables */
108 #endif
109 #endif
110 #ifndef NKPDE
111 #ifdef SMP
112 #define NKPDE	(KVA_PAGES - 1) /* number of page tables/pde's */
113 #else
114 #define NKPDE	(KVA_PAGES)	/* number of page tables/pde's */
115 #endif
116 #endif
117 
118 /*
119  * The *PTDI values control the layout of virtual memory
120  *
121  * XXX This works for now, but I am not real happy with it, I'll fix it
122  * right after I fix locore.s and the magic 28K hole
123  *
124  * SMP_PRIVPAGES: The per-cpu address space is 0xff80000 -> 0xffbfffff
125  */
126 #ifdef SMP
127 #define MPPTDI		(NPDEPTD-1)	/* per cpu ptd entry */
128 #define	KPTDI		(MPPTDI-NKPDE)	/* start of kernel virtual pde's */
129 #else
130 #define	KPTDI		(NPDEPTD-NKPDE)/* start of kernel virtual pde's */
131 #endif	/* SMP */
132 #define	PTDPTDI		(KPTDI-NPGPTD)	/* ptd entry that points to ptd! */
133 
134 /*
135  * XXX doesn't really belong here I guess...
136  */
137 #define ISA_HOLE_START    0xa0000
138 #define ISA_HOLE_LENGTH (0x100000-ISA_HOLE_START)
139 
140 #ifndef LOCORE
141 
142 #include <sys/queue.h>
143 
144 #ifdef PAE
145 
146 typedef uint64_t pdpt_entry_t;
147 typedef uint64_t pd_entry_t;
148 typedef uint64_t pt_entry_t;
149 
150 #define	PTESHIFT	(3)
151 #define	PDESHIFT	(3)
152 
153 #else
154 
155 typedef uint32_t pd_entry_t;
156 typedef uint32_t pt_entry_t;
157 
158 #define	PTESHIFT	(2)
159 #define	PDESHIFT	(2)
160 
161 #endif
162 
163 /*
164  * Address of current and alternate address space page table maps
165  * and directories.
166  */
167 #ifdef _KERNEL
168 extern pt_entry_t PTmap[];
169 extern pd_entry_t PTD[];
170 extern pd_entry_t PTDpde[];
171 
172 #ifdef PAE
173 extern pdpt_entry_t *IdlePDPT;
174 #endif
175 extern pd_entry_t *IdlePTD;	/* physical address of "Idle" state directory */
176 #endif
177 
178 #ifdef _KERNEL
179 /*
180  * virtual address to page table entry and
181  * to physical address. Likewise for alternate address space.
182  * Note: these work recursively, thus vtopte of a pte will give
183  * the corresponding pde that in turn maps it.
184  */
185 #define	vtopte(va)	(PTmap + i386_btop(va))
186 
187 /*
188  *	Routine:	pmap_kextract
189  *	Function:
190  *		Extract the physical page address associated
191  *		kernel virtual address.
192  */
193 static __inline vm_paddr_t
194 pmap_kextract(vm_offset_t va)
195 {
196 	vm_paddr_t pa;
197 
198 	if ((pa = PTD[va >> PDRSHIFT]) & PG_PS) {
199 		pa = (pa & ~(NBPDR - 1)) | (va & (NBPDR - 1));
200 	} else {
201 		pa = *vtopte(va);
202 		pa = (pa & PG_FRAME) | (va & PAGE_MASK);
203 	}
204 	return pa;
205 }
206 
207 #define	vtophys(va)	pmap_kextract(((vm_offset_t) (va)))
208 
209 #ifdef PAE
210 
211 static __inline pt_entry_t
212 pte_load(pt_entry_t *ptep)
213 {
214 	pt_entry_t r;
215 
216 	__asm __volatile(
217 	    "lock; cmpxchg8b %1"
218 	    : "=A" (r)
219 	    : "m" (*ptep), "a" (0), "d" (0), "b" (0), "c" (0));
220 	return (r);
221 }
222 
223 static __inline pt_entry_t
224 pte_load_store(pt_entry_t *ptep, pt_entry_t v)
225 {
226 	pt_entry_t r;
227 
228 	r = *ptep;
229 	__asm __volatile(
230 	    "1:\n"
231 	    "\tlock; cmpxchg8b %1\n"
232 	    "\tjnz 1b"
233 	    : "+A" (r)
234 	    : "m" (*ptep), "b" ((uint32_t)v), "c" ((uint32_t)(v >> 32)));
235 	return (r);
236 }
237 
238 #define	pte_load_clear(ptep)	pte_load_store((ptep), (pt_entry_t)0ULL)
239 
240 #else /* PAE */
241 
242 static __inline pt_entry_t
243 pte_load(pt_entry_t *ptep)
244 {
245 	pt_entry_t r;
246 
247 	r = *ptep;
248 	return (r);
249 }
250 
251 static __inline pt_entry_t
252 pte_load_store(pt_entry_t *ptep, pt_entry_t pte)
253 {
254 	pt_entry_t r;
255 
256 	r = *ptep;
257 	*ptep = pte;
258 	return (r);
259 }
260 
261 #define	pte_load_clear(pte)	atomic_readandclear_int(pte)
262 
263 #endif /* PAE */
264 
265 #define	pte_clear(ptep)		pte_load_store((ptep), (pt_entry_t)0ULL)
266 #define	pte_store(ptep, pte)	pte_load_store((ptep), (pt_entry_t)pte)
267 
268 #define	pde_store(pdep, pde)	pte_store((pdep), (pde))
269 
270 #endif /* _KERNEL */
271 
272 /*
273  * Pmap stuff
274  */
275 struct	pv_entry;
276 
277 struct md_page {
278 	int pv_list_count;
279 	TAILQ_HEAD(,pv_entry)	pv_list;
280 };
281 
282 struct pmap {
283 	pd_entry_t		*pm_pdir;	/* KVA of page directory */
284 	vm_object_t		pm_pteobj;	/* Container for pte's */
285 	TAILQ_HEAD(,pv_entry)	pm_pvlist;	/* list of mappings in pmap */
286 	u_int			pm_active;	/* active on cpus */
287 	struct pmap_statistics	pm_stats;	/* pmap statistics */
288 	LIST_ENTRY(pmap) 	pm_list;	/* List of all pmaps */
289 #ifdef PAE
290 	pdpt_entry_t		*pm_pdpt;	/* KVA of page director pointer
291 						   table */
292 #endif
293 };
294 
295 #define	pmap_page_is_mapped(m)	(!TAILQ_EMPTY(&(m)->md.pv_list))
296 #define pmap_resident_count(pmap) (pmap)->pm_stats.resident_count
297 
298 typedef struct pmap	*pmap_t;
299 
300 #ifdef _KERNEL
301 extern struct pmap	kernel_pmap_store;
302 #define kernel_pmap	(&kernel_pmap_store)
303 #endif
304 
305 /*
306  * For each vm_page_t, there is a list of all currently valid virtual
307  * mappings of that page.  An entry is a pv_entry_t, the list is pv_table.
308  */
309 typedef struct pv_entry {
310 	pmap_t		pv_pmap;	/* pmap where mapping lies */
311 	vm_offset_t	pv_va;		/* virtual address for mapping */
312 	TAILQ_ENTRY(pv_entry)	pv_list;
313 	TAILQ_ENTRY(pv_entry)	pv_plist;
314 	vm_page_t	pv_ptem;	/* VM page for pte */
315 } *pv_entry_t;
316 
317 #ifdef	_KERNEL
318 
319 #define NPPROVMTRR		8
320 #define PPRO_VMTRRphysBase0	0x200
321 #define PPRO_VMTRRphysMask0	0x201
322 struct ppro_vmtrr {
323 	u_int64_t base, mask;
324 };
325 extern struct ppro_vmtrr PPro_vmtrr[NPPROVMTRR];
326 
327 extern caddr_t	CADDR1;
328 extern pt_entry_t *CMAP1;
329 extern vm_paddr_t avail_end;
330 extern vm_paddr_t avail_start;
331 extern vm_offset_t clean_eva;
332 extern vm_offset_t clean_sva;
333 extern vm_paddr_t phys_avail[];
334 extern char *ptvmmap;		/* poor name! */
335 extern vm_offset_t virtual_avail;
336 extern vm_offset_t virtual_end;
337 
338 void	pmap_bootstrap(vm_paddr_t, vm_paddr_t);
339 void	pmap_kenter(vm_offset_t va, vm_paddr_t pa);
340 void	pmap_kremove(vm_offset_t);
341 void	*pmap_mapdev(vm_paddr_t, vm_size_t);
342 void	pmap_unmapdev(vm_offset_t, vm_size_t);
343 pt_entry_t *pmap_pte_quick(pmap_t, vm_offset_t) __pure2;
344 void	pmap_set_opt(void);
345 void	pmap_invalidate_page(pmap_t, vm_offset_t);
346 void	pmap_invalidate_range(pmap_t, vm_offset_t, vm_offset_t);
347 void	pmap_invalidate_all(pmap_t);
348 
349 #endif /* _KERNEL */
350 
351 #endif /* !LOCORE */
352 
353 #endif /* !_MACHINE_PMAP_H_ */
354