xref: /freebsd/sys/i386/include/pmap.h (revision 895f86f15fbf6540071feb9328c3c50ed1f027b8)
1 /*-
2  * Copyright (c) 1991 Regents of the University of California.
3  * All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * the Systems Programming Group of the University of Utah Computer
7  * Science Department and William Jolitz of UUNET Technologies Inc.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 4. Neither the name of the University nor the names of its contributors
18  *    may be used to endorse or promote products derived from this software
19  *    without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31  * SUCH DAMAGE.
32  *
33  * Derived from hp300 version by Mike Hibler, this version by William
34  * Jolitz uses a recursive map [a pde points to the page directory] to
35  * map the page tables using the pagetables themselves. This is done to
36  * reduce the impact on kernel virtual memory for lots of sparse address
37  * space, and to reduce the cost of memory to each process.
38  *
39  *	from: hp300: @(#)pmap.h	7.2 (Berkeley) 12/16/90
40  *	from: @(#)pmap.h	7.4 (Berkeley) 5/12/91
41  * $FreeBSD$
42  */
43 
44 #ifndef _MACHINE_PMAP_H_
45 #define	_MACHINE_PMAP_H_
46 
47 /*
48  * Page-directory and page-table entries follow this format, with a few
49  * of the fields not present here and there, depending on a lot of things.
50  */
51 				/* ---- Intel Nomenclature ---- */
52 #define	PG_V		0x001	/* P	Valid			*/
53 #define PG_RW		0x002	/* R/W	Read/Write		*/
54 #define PG_U		0x004	/* U/S  User/Supervisor		*/
55 #define	PG_NC_PWT	0x008	/* PWT	Write through		*/
56 #define	PG_NC_PCD	0x010	/* PCD	Cache disable		*/
57 #define PG_A		0x020	/* A	Accessed		*/
58 #define	PG_M		0x040	/* D	Dirty			*/
59 #define	PG_PS		0x080	/* PS	Page size (0=4k,1=4M)	*/
60 #define	PG_PTE_PAT	0x080	/* PAT	PAT index		*/
61 #define	PG_G		0x100	/* G	Global			*/
62 #define	PG_AVAIL1	0x200	/*    /	Available for system	*/
63 #define	PG_AVAIL2	0x400	/*   <	programmers use		*/
64 #define	PG_AVAIL3	0x800	/*    \				*/
65 #define	PG_PDE_PAT	0x1000	/* PAT	PAT index		*/
66 #if defined(PAE) || defined(PAE_TABLES)
67 #define	PG_NX		(1ull<<63) /* No-execute */
68 #endif
69 
70 
71 /* Our various interpretations of the above */
72 #define PG_W		PG_AVAIL1	/* "Wired" pseudoflag */
73 #define	PG_MANAGED	PG_AVAIL2
74 #if defined(PAE) || defined(PAE_TABLES)
75 #define	PG_FRAME	(0x000ffffffffff000ull)
76 #define	PG_PS_FRAME	(0x000fffffffe00000ull)
77 #else
78 #define	PG_FRAME	(~PAGE_MASK)
79 #define	PG_PS_FRAME	(0xffc00000)
80 #endif
81 #define	PG_PROT		(PG_RW|PG_U)	/* all protection bits . */
82 #define PG_N		(PG_NC_PWT|PG_NC_PCD)	/* Non-cacheable */
83 
84 /* Page level cache control fields used to determine the PAT type */
85 #define PG_PDE_CACHE	(PG_PDE_PAT | PG_NC_PWT | PG_NC_PCD)
86 #define PG_PTE_CACHE	(PG_PTE_PAT | PG_NC_PWT | PG_NC_PCD)
87 
88 /*
89  * Promotion to a 2 or 4MB (PDE) page mapping requires that the corresponding
90  * 4KB (PTE) page mappings have identical settings for the following fields:
91  */
92 #define PG_PTE_PROMOTE	(PG_MANAGED | PG_W | PG_G | PG_PTE_PAT | \
93 	    PG_M | PG_A | PG_NC_PCD | PG_NC_PWT | PG_U | PG_RW | PG_V)
94 
95 /*
96  * Page Protection Exception bits
97  */
98 
99 #define PGEX_P		0x01	/* Protection violation vs. not present */
100 #define PGEX_W		0x02	/* during a Write cycle */
101 #define PGEX_U		0x04	/* access from User mode (UPL) */
102 #define PGEX_RSV	0x08	/* reserved PTE field is non-zero */
103 #define PGEX_I		0x10	/* during an instruction fetch */
104 
105 /*
106  * Size of Kernel address space.  This is the number of page table pages
107  * (4MB each) to use for the kernel.  256 pages == 1 Gigabyte.
108  * This **MUST** be a multiple of 4 (eg: 252, 256, 260, etc).
109  * For PAE, the page table page unit size is 2MB.  This means that 512 pages
110  * is 1 Gigabyte.  Double everything.  It must be a multiple of 8 for PAE.
111  */
112 #ifndef KVA_PAGES
113 #if defined(PAE) || defined(PAE_TABLES)
114 #define KVA_PAGES	512
115 #else
116 #define KVA_PAGES	256
117 #endif
118 #endif
119 
120 /*
121  * Pte related macros
122  */
123 #define VADDR(pdi, pti) ((vm_offset_t)(((pdi)<<PDRSHIFT)|((pti)<<PAGE_SHIFT)))
124 
125 /*
126  * The initial number of kernel page table pages that are constructed
127  * by locore must be sufficient to map vm_page_array.  That number can
128  * be calculated as follows:
129  *     max_phys / PAGE_SIZE * sizeof(struct vm_page) / NBPDR
130  * PAE:      max_phys 16G, sizeof(vm_page) 76, NBPDR 2M, 152 page table pages.
131  * PAE_TABLES: max_phys 4G,  sizeof(vm_page) 68, NBPDR 2M, 36 page table pages.
132  * Non-PAE:  max_phys 4G,  sizeof(vm_page) 68, NBPDR 4M, 18 page table pages.
133  */
134 #ifndef NKPT
135 #if defined(PAE)
136 #define	NKPT		240
137 #elif defined(PAE_TABLES)
138 #define	NKPT		60
139 #else
140 #define	NKPT		30
141 #endif
142 #endif
143 
144 #ifndef NKPDE
145 #define NKPDE	(KVA_PAGES)	/* number of page tables/pde's */
146 #endif
147 
148 /*
149  * The *PTDI values control the layout of virtual memory
150  *
151  * XXX This works for now, but I am not real happy with it, I'll fix it
152  * right after I fix locore.s and the magic 28K hole
153  */
154 #define	KPTDI		(NPDEPTD-NKPDE)	/* start of kernel virtual pde's */
155 #define	PTDPTDI		(KPTDI-NPGPTD)	/* ptd entry that points to ptd! */
156 
157 /*
158  * XXX doesn't really belong here I guess...
159  */
160 #define ISA_HOLE_START    0xa0000
161 #define ISA_HOLE_LENGTH (0x100000-ISA_HOLE_START)
162 
163 #ifndef LOCORE
164 
165 #include <sys/queue.h>
166 #include <sys/_cpuset.h>
167 #include <sys/_lock.h>
168 #include <sys/_mutex.h>
169 
170 #include <vm/_vm_radix.h>
171 
172 #if defined(PAE) || defined(PAE_TABLES)
173 
174 typedef uint64_t pdpt_entry_t;
175 typedef uint64_t pd_entry_t;
176 typedef uint64_t pt_entry_t;
177 
178 #define	PTESHIFT	(3)
179 #define	PDESHIFT	(3)
180 
181 #else
182 
183 typedef uint32_t pd_entry_t;
184 typedef uint32_t pt_entry_t;
185 
186 #define	PTESHIFT	(2)
187 #define	PDESHIFT	(2)
188 
189 #endif
190 
191 /*
192  * Address of current address space page table maps and directories.
193  */
194 #ifdef _KERNEL
195 extern pt_entry_t PTmap[];
196 extern pd_entry_t PTD[];
197 extern pd_entry_t PTDpde[];
198 
199 #if defined(PAE) || defined(PAE_TABLES)
200 extern pdpt_entry_t *IdlePDPT;
201 #endif
202 extern pd_entry_t *IdlePTD;	/* physical address of "Idle" state directory */
203 
204 /*
205  * Translate a virtual address to the kernel virtual address of its page table
206  * entry (PTE).  This can be used recursively.  If the address of a PTE as
207  * previously returned by this macro is itself given as the argument, then the
208  * address of the page directory entry (PDE) that maps the PTE will be
209  * returned.
210  *
211  * This macro may be used before pmap_bootstrap() is called.
212  */
213 #define	vtopte(va)	(PTmap + i386_btop(va))
214 
215 /*
216  * Translate a virtual address to its physical address.
217  *
218  * This macro may be used before pmap_bootstrap() is called.
219  */
220 #define	vtophys(va)	pmap_kextract((vm_offset_t)(va))
221 
222 /*
223  * KPTmap is a linear mapping of the kernel page table.  It differs from the
224  * recursive mapping in two ways: (1) it only provides access to kernel page
225  * table pages, and not user page table pages, and (2) it provides access to
226  * a kernel page table page after the corresponding virtual addresses have
227  * been promoted to a 2/4MB page mapping.
228  *
229  * KPTmap is first initialized by locore to support just NPKT page table
230  * pages.  Later, it is reinitialized by pmap_bootstrap() to allow for
231  * expansion of the kernel page table.
232  */
233 extern pt_entry_t *KPTmap;
234 
235 /*
236  * Extract from the kernel page table the physical address that is mapped by
237  * the given virtual address "va".
238  *
239  * This function may be used before pmap_bootstrap() is called.
240  */
241 static __inline vm_paddr_t
242 pmap_kextract(vm_offset_t va)
243 {
244 	vm_paddr_t pa;
245 
246 	if ((pa = PTD[va >> PDRSHIFT]) & PG_PS) {
247 		pa = (pa & PG_PS_FRAME) | (va & PDRMASK);
248 	} else {
249 		/*
250 		 * Beware of a concurrent promotion that changes the PDE at
251 		 * this point!  For example, vtopte() must not be used to
252 		 * access the PTE because it would use the new PDE.  It is,
253 		 * however, safe to use the old PDE because the page table
254 		 * page is preserved by the promotion.
255 		 */
256 		pa = KPTmap[i386_btop(va)];
257 		pa = (pa & PG_FRAME) | (va & PAGE_MASK);
258 	}
259 	return (pa);
260 }
261 
262 #if (defined(PAE) || defined(PAE_TABLES))
263 
264 #define	pde_cmpset(pdep, old, new)	atomic_cmpset_64_i586(pdep, old, new)
265 #define	pte_load_store(ptep, pte)	atomic_swap_64_i586(ptep, pte)
266 #define	pte_load_clear(ptep)		atomic_swap_64_i586(ptep, 0)
267 #define	pte_store(ptep, pte)		atomic_store_rel_64_i586(ptep, pte)
268 
269 extern pt_entry_t pg_nx;
270 
271 #else /* !(PAE || PAE_TABLES) */
272 
273 #define	pde_cmpset(pdep, old, new)	atomic_cmpset_int(pdep, old, new)
274 #define	pte_load_store(ptep, pte)	atomic_swap_int(ptep, pte)
275 #define	pte_load_clear(ptep)		atomic_swap_int(ptep, 0)
276 #define	pte_store(ptep, pte) do { \
277 	*(u_int *)(ptep) = (u_int)(pte); \
278 } while (0)
279 
280 #endif /* !(PAE || PAE_TABLES) */
281 
282 #define	pte_clear(ptep)			pte_store(ptep, 0)
283 
284 #define	pde_store(pdep, pde)		pte_store(pdep, pde)
285 
286 #endif /* _KERNEL */
287 
288 /*
289  * Pmap stuff
290  */
291 struct	pv_entry;
292 struct	pv_chunk;
293 
294 struct md_page {
295 	TAILQ_HEAD(,pv_entry)	pv_list;
296 	int			pat_mode;
297 };
298 
299 struct pmap {
300 	struct mtx		pm_mtx;
301 	pd_entry_t		*pm_pdir;	/* KVA of page directory */
302 	TAILQ_HEAD(,pv_chunk)	pm_pvchunk;	/* list of mappings in pmap */
303 	cpuset_t		pm_active;	/* active on cpus */
304 	struct pmap_statistics	pm_stats;	/* pmap statistics */
305 	LIST_ENTRY(pmap) 	pm_list;	/* List of all pmaps */
306 #if defined(PAE) || defined(PAE_TABLES)
307 	pdpt_entry_t		*pm_pdpt;	/* KVA of page directory pointer
308 						   table */
309 #endif
310 	struct vm_radix		pm_root;	/* spare page table pages */
311 };
312 
313 typedef struct pmap	*pmap_t;
314 
315 #ifdef _KERNEL
316 extern struct pmap	kernel_pmap_store;
317 #define kernel_pmap	(&kernel_pmap_store)
318 
319 #define	PMAP_LOCK(pmap)		mtx_lock(&(pmap)->pm_mtx)
320 #define	PMAP_LOCK_ASSERT(pmap, type) \
321 				mtx_assert(&(pmap)->pm_mtx, (type))
322 #define	PMAP_LOCK_DESTROY(pmap)	mtx_destroy(&(pmap)->pm_mtx)
323 #define	PMAP_LOCK_INIT(pmap)	mtx_init(&(pmap)->pm_mtx, "pmap", \
324 				    NULL, MTX_DEF | MTX_DUPOK)
325 #define	PMAP_LOCKED(pmap)	mtx_owned(&(pmap)->pm_mtx)
326 #define	PMAP_MTX(pmap)		(&(pmap)->pm_mtx)
327 #define	PMAP_TRYLOCK(pmap)	mtx_trylock(&(pmap)->pm_mtx)
328 #define	PMAP_UNLOCK(pmap)	mtx_unlock(&(pmap)->pm_mtx)
329 #endif
330 
331 /*
332  * For each vm_page_t, there is a list of all currently valid virtual
333  * mappings of that page.  An entry is a pv_entry_t, the list is pv_list.
334  */
335 typedef struct pv_entry {
336 	vm_offset_t	pv_va;		/* virtual address for mapping */
337 	TAILQ_ENTRY(pv_entry)	pv_next;
338 } *pv_entry_t;
339 
340 /*
341  * pv_entries are allocated in chunks per-process.  This avoids the
342  * need to track per-pmap assignments.
343  */
344 #define	_NPCM	11
345 #define	_NPCPV	336
346 struct pv_chunk {
347 	pmap_t			pc_pmap;
348 	TAILQ_ENTRY(pv_chunk)	pc_list;
349 	uint32_t		pc_map[_NPCM];	/* bitmap; 1 = free */
350 	TAILQ_ENTRY(pv_chunk)	pc_lru;
351 	struct pv_entry		pc_pventry[_NPCPV];
352 };
353 
354 #ifdef	_KERNEL
355 
356 extern caddr_t	CADDR3;
357 extern pt_entry_t *CMAP3;
358 extern vm_paddr_t phys_avail[];
359 extern vm_paddr_t dump_avail[];
360 extern int pseflag;
361 extern int pgeflag;
362 extern char *ptvmmap;		/* poor name! */
363 extern vm_offset_t virtual_avail;
364 extern vm_offset_t virtual_end;
365 
366 #define	pmap_page_get_memattr(m)	((vm_memattr_t)(m)->md.pat_mode)
367 #define	pmap_page_is_write_mapped(m)	(((m)->aflags & PGA_WRITEABLE) != 0)
368 #define	pmap_unmapbios(va, sz)	pmap_unmapdev((va), (sz))
369 
370 /*
371  * Only the following functions or macros may be used before pmap_bootstrap()
372  * is called: pmap_kenter(), pmap_kextract(), pmap_kremove(), vtophys(), and
373  * vtopte().
374  */
375 void	pmap_bootstrap(vm_paddr_t);
376 int	pmap_cache_bits(int mode, boolean_t is_pde);
377 int	pmap_change_attr(vm_offset_t, vm_size_t, int);
378 void	pmap_init_pat(void);
379 void	pmap_kenter(vm_offset_t va, vm_paddr_t pa);
380 void	*pmap_kenter_temporary(vm_paddr_t pa, int i);
381 void	pmap_kremove(vm_offset_t);
382 void	*pmap_mapbios(vm_paddr_t, vm_size_t);
383 void	*pmap_mapdev(vm_paddr_t, vm_size_t);
384 void	*pmap_mapdev_attr(vm_paddr_t, vm_size_t, int);
385 boolean_t pmap_page_is_mapped(vm_page_t m);
386 void	pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma);
387 void	pmap_unmapdev(vm_offset_t, vm_size_t);
388 pt_entry_t *pmap_pte(pmap_t, vm_offset_t) __pure2;
389 void	pmap_invalidate_page(pmap_t, vm_offset_t);
390 void	pmap_invalidate_range(pmap_t, vm_offset_t, vm_offset_t);
391 void	pmap_invalidate_all(pmap_t);
392 void	pmap_invalidate_cache(void);
393 void	pmap_invalidate_cache_pages(vm_page_t *pages, int count);
394 void	pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva,
395 	    boolean_t force);
396 
397 void	invltlb_glob(void);
398 
399 #endif /* _KERNEL */
400 
401 #endif /* !LOCORE */
402 
403 #endif /* !_MACHINE_PMAP_H_ */
404