xref: /freebsd/sys/amd64/include/pmap.h (revision 390e8cc2974df1888369c06339ef8e0e92b312b6)
1 /*
2  * Copyright (c) 1991 Regents of the University of California.
3  * All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * the Systems Programming Group of the University of Utah Computer
7  * Science Department and William Jolitz of UUNET Technologies Inc.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *	This product includes software developed by the University of
20  *	California, Berkeley and its contributors.
21  * 4. Neither the name of the University nor the names of its contributors
22  *    may be used to endorse or promote products derived from this software
23  *    without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  *
37  * Derived from hp300 version by Mike Hibler, this version by William
38  * Jolitz uses a recursive map [a pde points to the page directory] to
39  * map the page tables using the pagetables themselves. This is done to
40  * reduce the impact on kernel virtual memory for lots of sparse address
41  * space, and to reduce the cost of memory to each process.
42  *
43  *	from: hp300: @(#)pmap.h	7.2 (Berkeley) 12/16/90
44  *	from: @(#)pmap.h	7.4 (Berkeley) 5/12/91
45  * $FreeBSD$
46  */
47 
48 #ifndef _MACHINE_PMAP_H_
49 #define	_MACHINE_PMAP_H_
50 
51 /*
52  * Page-directory and page-table entires follow this format, with a few
53  * of the fields not present here and there, depending on a lot of things.
54  */
55 				/* ---- Intel Nomenclature ---- */
56 #define	PG_V		0x001	/* P	Valid			*/
57 #define PG_RW		0x002	/* R/W	Read/Write		*/
58 #define PG_U		0x004	/* U/S  User/Supervisor		*/
59 #define	PG_NC_PWT	0x008	/* PWT	Write through		*/
60 #define	PG_NC_PCD	0x010	/* PCD	Cache disable		*/
61 #define PG_A		0x020	/* A	Accessed		*/
62 #define	PG_M		0x040	/* D	Dirty			*/
63 #define	PG_PS		0x080	/* PS	Page size (0=4k,1=4M)	*/
64 #define	PG_G		0x100	/* G	Global			*/
65 #define	PG_AVAIL1	0x200	/*    /	Available for system	*/
66 #define	PG_AVAIL2	0x400	/*   <	programmers use		*/
67 #define	PG_AVAIL3	0x800	/*    \				*/
68 
69 
70 /* Our various interpretations of the above */
71 #define PG_W		PG_AVAIL1	/* "Wired" pseudoflag */
72 #define	PG_MANAGED	PG_AVAIL2
73 #define	PG_FRAME	(~((vm_paddr_t)PAGE_MASK))
74 #define	PG_PROT		(PG_RW|PG_U)	/* all protection bits . */
75 #define PG_N		(PG_NC_PWT|PG_NC_PCD)	/* Non-cacheable */
76 
77 /*
78  * Page Protection Exception bits
79  */
80 
81 #define PGEX_P		0x01	/* Protection violation vs. not present */
82 #define PGEX_W		0x02	/* during a Write cycle */
83 #define PGEX_U		0x04	/* access from User mode (UPL) */
84 
85 /*
86  * Pte related macros.  This is complicated by having to deal with
87  * the sign extension of the 48th bit.
88  */
89 #define VADDR_SIGN(l4) \
90 	((l4) >= NPML4EPG/2 ? ((unsigned long)-1 << 47) : 0ul)
91 #define VADDR(l4, l3, l2, l1) ( \
92 	((unsigned long)(l4) << PML4SHIFT) | VADDR_SIGN(l4) | \
93 	((unsigned long)(l3) << PDPSHIFT) | \
94 	((unsigned long)(l2) << PDRSHIFT) | \
95 	((unsigned long)(l1) << PAGE_SHIFT))
96 
97 
98 #ifndef NKPT
99 #define	NKPT		120	/* initial number of kernel page tables */
100 #endif
101 
102 #define NKPML4E		1		/* number of kernel PML4 slots */
103 #define NKPDPE		1		/* number of kernel PDP slots */
104 #define	NKPDE		(NKPDPE*NPDEPG)	/* number of kernel PD slots */
105 
106 #define	NUPML4E		1		/* number of userland PML4 pages */
107 #define	NUPDPE		(NUPML4E*NPDPEPG)/* number of userland PDP pages */
108 #define	NUPDE		(NUPDPE*NPDEPG)	/* number of userland PD entries */
109 
110 #define	NDMPML4E	1		/* number of dmap PML4 slots */
111 
112 /*
113  * The *PDI values control the layout of virtual memory
114  */
115 #define	PML4PML4I	(NPML4EPG/2)	/* Index of recursive pml4 mapping */
116 
117 #define	KPML4I		(NPML4EPG-1)	/* Top 512GB for KVM */
118 #define	DMPML4I		(KPML4I-1)	/* Next 512GB down for direct map */
119 
120 #define	KPDPI		(NPDPEPG-1)	/* kernbase at -1GB */
121 
122 /*
123  * XXX doesn't really belong here I guess...
124  */
125 #define ISA_HOLE_START    0xa0000
126 #define ISA_HOLE_LENGTH (0x100000-ISA_HOLE_START)
127 
128 #ifndef LOCORE
129 
130 #include <sys/queue.h>
131 
132 typedef u_int64_t pd_entry_t;
133 typedef u_int64_t pt_entry_t;
134 typedef u_int64_t pdp_entry_t;
135 typedef u_int64_t pml4_entry_t;
136 
137 #define	PML4ESHIFT	(3)
138 #define	PDPESHIFT	(3)
139 #define	PTESHIFT	(3)
140 #define	PDESHIFT	(3)
141 
142 /*
143  * Address of current and alternate address space page table maps
144  * and directories.
145  * XXX it might be saner to just direct map all of physical memory
146  * into the kernel using 2MB pages.  We have enough space to do
147  * it (2^47 bits of KVM, while current max physical addressability
148  * is 2^40 physical bits).  Then we can get rid of the evil hole
149  * in the page tables and the evil overlapping.
150  */
151 #ifdef _KERNEL
152 #define	addr_PTmap	(VADDR(PML4PML4I, 0, 0, 0))
153 #define	addr_PDmap	(VADDR(PML4PML4I, PML4PML4I, 0, 0))
154 #define	addr_PDPmap	(VADDR(PML4PML4I, PML4PML4I, PML4PML4I, 0))
155 #define	addr_PML4map	(VADDR(PML4PML4I, PML4PML4I, PML4PML4I, PML4PML4I))
156 #define	addr_PML4pml4e	(addr_PML4map + (PML4PML4I * sizeof(pml4_entry_t)))
157 #define	PTmap		((pt_entry_t *)(addr_PTmap))
158 #define	PDmap		((pd_entry_t *)(addr_PDmap))
159 #define	PDPmap		((pd_entry_t *)(addr_PDPmap))
160 #define	PML4map		((pd_entry_t *)(addr_PML4map))
161 #define	PML4pml4e	((pd_entry_t *)(addr_PML4pml4e))
162 
163 extern u_int64_t KPML4phys;	/* physical address of kernel level 4 */
164 #endif
165 
166 #ifdef _KERNEL
167 /*
168  * virtual address to page table entry and
169  * to physical address. Likewise for alternate address space.
170  * Note: these work recursively, thus vtopte of a pte will give
171  * the corresponding pde that in turn maps it.
172  */
173 pt_entry_t *vtopte(vm_offset_t);
174 vm_paddr_t pmap_kextract(vm_offset_t);
175 
176 #define	vtophys(va)	pmap_kextract(((vm_offset_t) (va)))
177 
178 static __inline pt_entry_t
179 pte_load(pt_entry_t *ptep)
180 {
181 	pt_entry_t r;
182 
183 	r = *ptep;
184 	return (r);
185 }
186 
187 static __inline pt_entry_t
188 pte_load_store(pt_entry_t *ptep, pt_entry_t pte)
189 {
190 	pt_entry_t r;
191 
192 	r = *ptep;
193 	*ptep = pte;
194 	return (r);
195 }
196 
197 #define	pte_load_clear(pte)	atomic_readandclear_long(pte)
198 
199 #define	pte_clear(ptep)		pte_load_store((ptep), (pt_entry_t)0ULL)
200 #define	pte_store(ptep, pte)	pte_load_store((ptep), (pt_entry_t)pte)
201 
202 #define	pde_store(pdep, pde)	pte_store((pdep), (pde))
203 
204 #endif /* _KERNEL */
205 
206 /*
207  * Pmap stuff
208  */
209 struct	pv_entry;
210 
211 struct md_page {
212 	int pv_list_count;
213 	TAILQ_HEAD(,pv_entry)	pv_list;
214 };
215 
216 struct pmap {
217 	pml4_entry_t		*pm_pml4;	/* KVA of level 4 page table */
218 	vm_object_t		pm_pteobj;	/* Container for pte's */
219 	TAILQ_HEAD(,pv_entry)	pm_pvlist;	/* list of mappings in pmap */
220 	u_long			pm_active;	/* active on cpus */
221 	struct pmap_statistics	pm_stats;	/* pmap statistics */
222 	LIST_ENTRY(pmap) 	pm_list;	/* List of all pmaps */
223 };
224 
225 #define	pmap_page_is_mapped(m)	(!TAILQ_EMPTY(&(m)->md.pv_list))
226 #define pmap_resident_count(pmap) (pmap)->pm_stats.resident_count
227 
228 typedef struct pmap	*pmap_t;
229 
230 #ifdef _KERNEL
231 extern struct pmap	kernel_pmap_store;
232 #define kernel_pmap	(&kernel_pmap_store)
233 #endif
234 
235 /*
236  * For each vm_page_t, there is a list of all currently valid virtual
237  * mappings of that page.  An entry is a pv_entry_t, the list is pv_table.
238  */
239 typedef struct pv_entry {
240 	pmap_t		pv_pmap;	/* pmap where mapping lies */
241 	vm_offset_t	pv_va;		/* virtual address for mapping */
242 	TAILQ_ENTRY(pv_entry)	pv_list;
243 	TAILQ_ENTRY(pv_entry)	pv_plist;
244 	vm_page_t	pv_ptem;	/* VM page for pte */
245 } *pv_entry_t;
246 
247 #ifdef	_KERNEL
248 
249 #define NPPROVMTRR		8
250 #define PPRO_VMTRRphysBase0	0x200
251 #define PPRO_VMTRRphysMask0	0x201
252 struct ppro_vmtrr {
253 	u_int64_t base, mask;
254 };
255 extern struct ppro_vmtrr PPro_vmtrr[NPPROVMTRR];
256 
257 extern caddr_t	CADDR1;
258 extern pt_entry_t *CMAP1;
259 extern vm_paddr_t avail_end;
260 extern vm_paddr_t avail_start;
261 extern vm_offset_t clean_eva;
262 extern vm_offset_t clean_sva;
263 extern vm_paddr_t phys_avail[];
264 extern char *ptvmmap;		/* poor name! */
265 extern vm_offset_t virtual_avail;
266 extern vm_offset_t virtual_end;
267 
268 void	pmap_bootstrap(vm_paddr_t *);
269 void	pmap_kenter(vm_offset_t va, vm_paddr_t pa);
270 void	pmap_kremove(vm_offset_t);
271 void	*pmap_mapdev(vm_paddr_t, vm_size_t);
272 void	pmap_unmapdev(vm_offset_t, vm_size_t);
273 pt_entry_t *pmap_pte_quick(pmap_t, vm_offset_t) __pure2;
274 void	pmap_invalidate_page(pmap_t, vm_offset_t);
275 void	pmap_invalidate_range(pmap_t, vm_offset_t, vm_offset_t);
276 void	pmap_invalidate_all(pmap_t);
277 
278 #endif /* _KERNEL */
279 
280 #endif /* !LOCORE */
281 
282 #endif /* !_MACHINE_PMAP_H_ */
283