xref: /freebsd/sys/amd64/include/pmap.h (revision 262e143bd46171a6415a5b28af260a5efa2a3db8)
1 /*-
2  * Copyright (c) 2003 Peter Wemm.
3  * Copyright (c) 1991 Regents of the University of California.
4  * All rights reserved.
5  *
6  * This code is derived from software contributed to Berkeley by
7  * the Systems Programming Group of the University of Utah Computer
8  * Science Department and William Jolitz of UUNET Technologies Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 4. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  * Derived from hp300 version by Mike Hibler, this version by William
35  * Jolitz uses a recursive map [a pde points to the page directory] to
36  * map the page tables using the pagetables themselves. This is done to
37  * reduce the impact on kernel virtual memory for lots of sparse address
38  * space, and to reduce the cost of memory to each process.
39  *
40  *	from: hp300: @(#)pmap.h	7.2 (Berkeley) 12/16/90
41  *	from: @(#)pmap.h	7.4 (Berkeley) 5/12/91
42  * $FreeBSD$
43  */
44 
45 #ifndef _MACHINE_PMAP_H_
46 #define	_MACHINE_PMAP_H_
47 
48 /*
49  * Page-directory and page-table entries follow this format, with a few
50  * of the fields not present here and there, depending on a lot of things.
51  */
52 				/* ---- Intel Nomenclature ---- */
53 #define	PG_V		0x001	/* P	Valid			*/
54 #define PG_RW		0x002	/* R/W	Read/Write		*/
55 #define PG_U		0x004	/* U/S  User/Supervisor		*/
56 #define	PG_NC_PWT	0x008	/* PWT	Write through		*/
57 #define	PG_NC_PCD	0x010	/* PCD	Cache disable		*/
58 #define PG_A		0x020	/* A	Accessed		*/
59 #define	PG_M		0x040	/* D	Dirty			*/
60 #define	PG_PS		0x080	/* PS	Page size (0=4k,1=4M)	*/
61 #define	PG_G		0x100	/* G	Global			*/
62 #define	PG_AVAIL1	0x200	/*    /	Available for system	*/
63 #define	PG_AVAIL2	0x400	/*   <	programmers use		*/
64 #define	PG_AVAIL3	0x800	/*    \				*/
65 #define	PG_NX		(1ul<<63) /* No-execute */
66 
67 
68 /* Our various interpretations of the above */
69 #define PG_W		PG_AVAIL1	/* "Wired" pseudoflag */
70 #define	PG_MANAGED	PG_AVAIL2
71 #define	PG_FRAME	(0x000ffffffffff000ul)
72 #define	PG_PROT		(PG_RW|PG_U)	/* all protection bits . */
73 #define PG_N		(PG_NC_PWT|PG_NC_PCD)	/* Non-cacheable */
74 
75 /*
76  * Page Protection Exception bits
77  */
78 
79 #define PGEX_P		0x01	/* Protection violation vs. not present */
80 #define PGEX_W		0x02	/* during a Write cycle */
81 #define PGEX_U		0x04	/* access from User mode (UPL) */
82 
83 /*
84  * Pte related macros.  This is complicated by having to deal with
85  * the sign extension of the 48th bit.
86  */
87 #define KVADDR(l4, l3, l2, l1) ( \
88 	((unsigned long)-1 << 47) | \
89 	((unsigned long)(l4) << PML4SHIFT) | \
90 	((unsigned long)(l3) << PDPSHIFT) | \
91 	((unsigned long)(l2) << PDRSHIFT) | \
92 	((unsigned long)(l1) << PAGE_SHIFT))
93 
94 #define UVADDR(l4, l3, l2, l1) ( \
95 	((unsigned long)(l4) << PML4SHIFT) | \
96 	((unsigned long)(l3) << PDPSHIFT) | \
97 	((unsigned long)(l2) << PDRSHIFT) | \
98 	((unsigned long)(l1) << PAGE_SHIFT))
99 
100 /* Initial number of kernel page tables */
101 #ifndef NKPT
102 #define	NKPT		240	/* Enough for 16GB (2MB page tables) */
103 #endif
104 
105 #define NKPML4E		1		/* number of kernel PML4 slots */
106 #define NKPDPE		1		/* number of kernel PDP slots */
107 #define	NKPDE		(NKPDPE*NPDEPG)	/* number of kernel PD slots */
108 
109 #define	NUPML4E		(NPML4EPG/2)	/* number of userland PML4 pages */
110 #define	NUPDPE		(NUPML4E*NPDPEPG)/* number of userland PDP pages */
111 #define	NUPDE		(NUPDPE*NPDEPG)	/* number of userland PD entries */
112 
113 #define	NDMPML4E	1		/* number of dmap PML4 slots */
114 
115 /*
116  * The *PDI values control the layout of virtual memory
117  */
118 #define	PML4PML4I	(NPML4EPG/2)	/* Index of recursive pml4 mapping */
119 
120 #define	KPML4I		(NPML4EPG-1)	/* Top 512GB for KVM */
121 #define	DMPML4I		(KPML4I-1)	/* Next 512GB down for direct map */
122 
123 #define	KPDPI		(NPDPEPG-2)	/* kernbase at -2GB */
124 
125 /*
126  * XXX doesn't really belong here I guess...
127  */
128 #define ISA_HOLE_START    0xa0000
129 #define ISA_HOLE_LENGTH (0x100000-ISA_HOLE_START)
130 
131 #ifndef LOCORE
132 
133 #include <sys/queue.h>
134 #include <sys/_lock.h>
135 #include <sys/_mutex.h>
136 
137 typedef u_int64_t pd_entry_t;
138 typedef u_int64_t pt_entry_t;
139 typedef u_int64_t pdp_entry_t;
140 typedef u_int64_t pml4_entry_t;
141 
142 #define	PML4ESHIFT	(3)
143 #define	PDPESHIFT	(3)
144 #define	PTESHIFT	(3)
145 #define	PDESHIFT	(3)
146 
147 /*
148  * Address of current and alternate address space page table maps
149  * and directories.
150  * XXX it might be saner to just direct map all of physical memory
151  * into the kernel using 2MB pages.  We have enough space to do
152  * it (2^47 bits of KVM, while current max physical addressability
153  * is 2^40 physical bits).  Then we can get rid of the evil hole
154  * in the page tables and the evil overlapping.
155  */
156 #ifdef _KERNEL
157 #define	addr_PTmap	(KVADDR(PML4PML4I, 0, 0, 0))
158 #define	addr_PDmap	(KVADDR(PML4PML4I, PML4PML4I, 0, 0))
159 #define	addr_PDPmap	(KVADDR(PML4PML4I, PML4PML4I, PML4PML4I, 0))
160 #define	addr_PML4map	(KVADDR(PML4PML4I, PML4PML4I, PML4PML4I, PML4PML4I))
161 #define	addr_PML4pml4e	(addr_PML4map + (PML4PML4I * sizeof(pml4_entry_t)))
162 #define	PTmap		((pt_entry_t *)(addr_PTmap))
163 #define	PDmap		((pd_entry_t *)(addr_PDmap))
164 #define	PDPmap		((pd_entry_t *)(addr_PDPmap))
165 #define	PML4map		((pd_entry_t *)(addr_PML4map))
166 #define	PML4pml4e	((pd_entry_t *)(addr_PML4pml4e))
167 
168 extern u_int64_t KPML4phys;	/* physical address of kernel level 4 */
169 #endif
170 
171 #ifdef _KERNEL
172 /*
173  * virtual address to page table entry and
174  * to physical address.
175  * Note: these work recursively, thus vtopte of a pte will give
176  * the corresponding pde that in turn maps it.
177  */
178 pt_entry_t *vtopte(vm_offset_t);
179 #define	vtophys(va)	pmap_kextract((vm_offset_t)(va))
180 
181 static __inline pt_entry_t
182 pte_load(pt_entry_t *ptep)
183 {
184 	pt_entry_t r;
185 
186 	r = *ptep;
187 	return (r);
188 }
189 
190 static __inline pt_entry_t
191 pte_load_store(pt_entry_t *ptep, pt_entry_t pte)
192 {
193 	pt_entry_t r;
194 
195 	__asm __volatile(
196 	    "xchgq %0,%1"
197 	    : "=m" (*ptep),
198 	      "=r" (r)
199 	    : "1" (pte),
200 	      "m" (*ptep));
201 	return (r);
202 }
203 
204 #define	pte_load_clear(pte)	atomic_readandclear_long(pte)
205 
206 static __inline void
207 pte_store(pt_entry_t *ptep, pt_entry_t pte)
208 {
209 
210 	*ptep = pte;
211 }
212 
213 #define	pte_clear(ptep)		pte_store((ptep), (pt_entry_t)0ULL)
214 
215 #define	pde_store(pdep, pde)	pte_store((pdep), (pde))
216 
217 extern pt_entry_t pg_nx;
218 
219 #endif /* _KERNEL */
220 
221 /*
222  * Pmap stuff
223  */
224 struct	pv_entry;
225 
226 struct md_page {
227 	int pv_list_count;
228 	TAILQ_HEAD(,pv_entry)	pv_list;
229 };
230 
231 struct pmap {
232 	struct mtx		pm_mtx;
233 	pml4_entry_t		*pm_pml4;	/* KVA of level 4 page table */
234 	TAILQ_HEAD(,pv_entry)	pm_pvlist;	/* list of mappings in pmap */
235 	u_int			pm_active;	/* active on cpus */
236 	/* spare u_int here due to padding */
237 	struct pmap_statistics	pm_stats;	/* pmap statistics */
238 };
239 
240 typedef struct pmap	*pmap_t;
241 
242 #ifdef _KERNEL
243 extern struct pmap	kernel_pmap_store;
244 #define kernel_pmap	(&kernel_pmap_store)
245 
246 #define	PMAP_LOCK(pmap)		mtx_lock(&(pmap)->pm_mtx)
247 #define	PMAP_LOCK_ASSERT(pmap, type) \
248 				mtx_assert(&(pmap)->pm_mtx, (type))
249 #define	PMAP_LOCK_DESTROY(pmap)	mtx_destroy(&(pmap)->pm_mtx)
250 #define	PMAP_LOCK_INIT(pmap)	mtx_init(&(pmap)->pm_mtx, "pmap", \
251 				    NULL, MTX_DEF | MTX_DUPOK)
252 #define	PMAP_LOCKED(pmap)	mtx_owned(&(pmap)->pm_mtx)
253 #define	PMAP_MTX(pmap)		(&(pmap)->pm_mtx)
254 #define	PMAP_TRYLOCK(pmap)	mtx_trylock(&(pmap)->pm_mtx)
255 #define	PMAP_UNLOCK(pmap)	mtx_unlock(&(pmap)->pm_mtx)
256 #endif
257 
258 /*
259  * For each vm_page_t, there is a list of all currently valid virtual
260  * mappings of that page.  An entry is a pv_entry_t, the list is pv_table.
261  */
262 typedef struct pv_entry {
263 	pmap_t		pv_pmap;	/* pmap where mapping lies */
264 	vm_offset_t	pv_va;		/* virtual address for mapping */
265 	TAILQ_ENTRY(pv_entry)	pv_list;
266 	TAILQ_ENTRY(pv_entry)	pv_plist;
267 } *pv_entry_t;
268 
269 #ifdef	_KERNEL
270 
271 #define NPPROVMTRR		8
272 #define PPRO_VMTRRphysBase0	0x200
273 #define PPRO_VMTRRphysMask0	0x201
274 struct ppro_vmtrr {
275 	u_int64_t base, mask;
276 };
277 extern struct ppro_vmtrr PPro_vmtrr[NPPROVMTRR];
278 
279 extern caddr_t	CADDR1;
280 extern pt_entry_t *CMAP1;
281 extern vm_paddr_t avail_end;
282 extern vm_paddr_t phys_avail[];
283 extern vm_paddr_t dump_avail[];
284 extern vm_offset_t virtual_avail;
285 extern vm_offset_t virtual_end;
286 
287 #define	pmap_page_is_mapped(m)	(!TAILQ_EMPTY(&(m)->md.pv_list))
288 
289 void	pmap_bootstrap(vm_paddr_t *);
290 void	pmap_kenter(vm_offset_t va, vm_paddr_t pa);
291 void	*pmap_kenter_temporary(vm_paddr_t pa, int i);
292 vm_paddr_t pmap_kextract(vm_offset_t);
293 void	pmap_kremove(vm_offset_t);
294 void	*pmap_mapdev(vm_paddr_t, vm_size_t);
295 void	pmap_unmapdev(vm_offset_t, vm_size_t);
296 void	pmap_invalidate_page(pmap_t, vm_offset_t);
297 void	pmap_invalidate_range(pmap_t, vm_offset_t, vm_offset_t);
298 void	pmap_invalidate_all(pmap_t);
299 
300 #endif /* _KERNEL */
301 
302 #endif /* !LOCORE */
303 
304 #endif /* !_MACHINE_PMAP_H_ */
305