xref: /freebsd/sys/amd64/include/pmap.h (revision 817420dc8eac7df799c78f5309b75092b7f7cd40)
1 /*
2  * Copyright (c) 1991 Regents of the University of California.
3  * All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * the Systems Programming Group of the University of Utah Computer
7  * Science Department and William Jolitz of UUNET Technologies Inc.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *	This product includes software developed by the University of
20  *	California, Berkeley and its contributors.
21  * 4. Neither the name of the University nor the names of its contributors
22  *    may be used to endorse or promote products derived from this software
23  *    without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  *
37  * Derived from hp300 version by Mike Hibler, this version by William
38  * Jolitz uses a recursive map [a pde points to the page directory] to
39  * map the page tables using the pagetables themselves. This is done to
40  * reduce the impact on kernel virtual memory for lots of sparse address
41  * space, and to reduce the cost of memory to each process.
42  *
43  *	from: hp300: @(#)pmap.h	7.2 (Berkeley) 12/16/90
44  *	from: @(#)pmap.h	7.4 (Berkeley) 5/12/91
45  * $FreeBSD$
46  */
47 
48 #ifndef _MACHINE_PMAP_H_
49 #define	_MACHINE_PMAP_H_
50 
51 /*
52  * Page-directory and page-table entires follow this format, with a few
53  * of the fields not present here and there, depending on a lot of things.
54  */
55 				/* ---- Intel Nomenclature ---- */
56 #define	PG_V		0x001	/* P	Valid			*/
57 #define PG_RW		0x002	/* R/W	Read/Write		*/
58 #define PG_U		0x004	/* U/S  User/Supervisor		*/
59 #define	PG_NC_PWT	0x008	/* PWT	Write through		*/
60 #define	PG_NC_PCD	0x010	/* PCD	Cache disable		*/
61 #define PG_A		0x020	/* A	Accessed		*/
62 #define	PG_M		0x040	/* D	Dirty			*/
63 #define	PG_PS		0x080	/* PS	Page size (0=4k,1=4M)	*/
64 #define	PG_G		0x100	/* G	Global			*/
65 #define	PG_AVAIL1	0x200	/*    /	Available for system	*/
66 #define	PG_AVAIL2	0x400	/*   <	programmers use		*/
67 #define	PG_AVAIL3	0x800	/*    \				*/
68 
69 
70 /* Our various interpretations of the above */
71 #define PG_W		PG_AVAIL1	/* "Wired" pseudoflag */
72 #define	PG_MANAGED	PG_AVAIL2
73 #define	PG_FRAME	(~PAGE_MASK)
74 #define	PG_PROT		(PG_RW|PG_U)	/* all protection bits . */
75 #define PG_N		(PG_NC_PWT|PG_NC_PCD)	/* Non-cacheable */
76 
77 /*
78  * Page Protection Exception bits
79  */
80 
81 #define PGEX_P		0x01	/* Protection violation vs. not present */
82 #define PGEX_W		0x02	/* during a Write cycle */
83 #define PGEX_U		0x04	/* access from User mode (UPL) */
84 
85 /*
86  * Pte related macros
87  */
88 #define VADDR(pdi, pti) ((vm_offset_t)(((pdi)<<PDRSHIFT)|((pti)<<PAGE_SHIFT)))
89 
90 #ifndef NKPT
91 #define	NKPT			17	/* actual number of kernel page tables */
92 #endif
93 #ifndef NKPDE
94 #ifdef SMP
95 #define NKPDE			254	/* addressable number of page tables/pde's */
96 #else
97 #define NKPDE			255	/* addressable number of page tables/pde's */
98 #endif	/* SMP */
99 #endif
100 
101 /*
102  * The *PTDI values control the layout of virtual memory
103  *
104  * XXX This works for now, but I am not real happy with it, I'll fix it
105  * right after I fix locore.s and the magic 28K hole
106  *
107  * SMP_PRIVPAGES: The per-cpu address space is 0xff80000 -> 0xffbfffff
108  */
109 #define	APTDPTDI	(NPDEPG-1)	/* alt ptd entry that points to APTD */
110 #ifdef SMP
111 #define MPPTDI		(APTDPTDI-1)	/* per cpu ptd entry */
112 #define	KPTDI		(MPPTDI-NKPDE)	/* start of kernel virtual pde's */
113 #else
114 #define	KPTDI		(APTDPTDI-NKPDE)/* start of kernel virtual pde's */
115 #endif	/* SMP */
116 #define	PTDPTDI		(KPTDI-1)	/* ptd entry that points to ptd! */
117 #define	UMAXPTDI	(PTDPTDI-1)	/* ptd entry for user space end */
118 #define	UMAXPTEOFF	(NPTEPG)	/* pte entry for user space end */
119 
120 /*
121  * XXX doesn't really belong here I guess...
122  */
123 #define ISA_HOLE_START    0xa0000
124 #define ISA_HOLE_LENGTH (0x100000-ISA_HOLE_START)
125 
126 #ifndef LOCORE
127 
128 #include <sys/queue.h>
129 
130 typedef unsigned int *pd_entry_t;
131 typedef unsigned int *pt_entry_t;
132 
133 #define PDESIZE		sizeof(pd_entry_t) /* for assembly files */
134 #define PTESIZE		sizeof(pt_entry_t) /* for assembly files */
135 
136 /*
137  * Address of current and alternate address space page table maps
138  * and directories.
139  */
140 #ifdef _KERNEL
141 extern pt_entry_t PTmap[], APTmap[], Upte;
142 extern pd_entry_t PTD[], APTD[], PTDpde, APTDpde, Upde;
143 
144 extern pd_entry_t IdlePTD;	/* physical address of "Idle" state directory */
145 #endif
146 
147 #ifdef _KERNEL
148 /*
149  * virtual address to page table entry and
150  * to physical address. Likewise for alternate address space.
151  * Note: these work recursively, thus vtopte of a pte will give
152  * the corresponding pde that in turn maps it.
153  */
154 #define	vtopte(va)	(PTmap + i386_btop(va))
155 
156 #define	avtopte(va)	(APTmap + i386_btop(va))
157 
158 /*
159  *	Routine:	pmap_kextract
160  *	Function:
161  *		Extract the physical page address associated
162  *		kernel virtual address.
163  */
164 static __inline vm_offset_t
165 pmap_kextract(vm_offset_t va)
166 {
167 	vm_offset_t pa;
168 	if ((pa = (vm_offset_t) PTD[va >> PDRSHIFT]) & PG_PS) {
169 		pa = (pa & ~(NBPDR - 1)) | (va & (NBPDR - 1));
170 	} else {
171 		pa = *(vm_offset_t *)vtopte(va);
172 		pa = (pa & PG_FRAME) | (va & PAGE_MASK);
173 	}
174 	return pa;
175 }
176 
177 #if 0
178 #define	vtophys(va)	(((vm_offset_t) (*vtopte(va))&PG_FRAME) | ((vm_offset_t)(va) & PAGE_MASK))
179 #else
180 #define	vtophys(va)	pmap_kextract(((vm_offset_t) (va)))
181 #endif
182 
183 #define	avtophys(va)	(((vm_offset_t) (*avtopte(va))&PG_FRAME) | ((vm_offset_t)(va) & PAGE_MASK))
184 
185 #endif
186 
187 /*
188  * Pmap stuff
189  */
190 struct	pv_entry;
191 
192 struct md_page {
193 	int pv_list_count;
194 	TAILQ_HEAD(,pv_entry)	pv_list;
195 };
196 
197 struct pmap {
198 	pd_entry_t		*pm_pdir;	/* KVA of page directory */
199 	vm_object_t		pm_pteobj;	/* Container for pte's */
200 	TAILQ_HEAD(,pv_entry)	pm_pvlist;	/* list of mappings in pmap */
201 	int			pm_count;	/* reference count */
202 	int			pm_active;	/* active on cpus */
203 	struct pmap_statistics	pm_stats;	/* pmap statistics */
204 	struct	vm_page		*pm_ptphint;	/* pmap ptp hint */
205 	LIST_ENTRY(pmap) 	pm_list;	/* List of all pmaps */
206 };
207 
208 #define pmap_resident_count(pmap) (pmap)->pm_stats.resident_count
209 
210 typedef struct pmap	*pmap_t;
211 
212 #ifdef _KERNEL
213 extern pmap_t		kernel_pmap;
214 #endif
215 
216 /*
217  * For each vm_page_t, there is a list of all currently valid virtual
218  * mappings of that page.  An entry is a pv_entry_t, the list is pv_table.
219  */
220 typedef struct pv_entry {
221 	pmap_t		pv_pmap;	/* pmap where mapping lies */
222 	vm_offset_t	pv_va;		/* virtual address for mapping */
223 	TAILQ_ENTRY(pv_entry)	pv_list;
224 	TAILQ_ENTRY(pv_entry)	pv_plist;
225 	vm_page_t	pv_ptem;	/* VM page for pte */
226 } *pv_entry_t;
227 
228 #define	PV_ENTRY_NULL	((pv_entry_t) 0)
229 
230 #define	PV_CI		0x01	/* all entries must be cache inhibited */
231 #define	PV_PTPAGE	0x02	/* entry maps a page table page */
232 
233 #ifdef	_KERNEL
234 
235 #define NPPROVMTRR		8
236 #define PPRO_VMTRRphysBase0	0x200
237 #define PPRO_VMTRRphysMask0	0x201
238 struct ppro_vmtrr {
239 	u_int64_t base, mask;
240 };
241 extern struct ppro_vmtrr PPro_vmtrr[NPPROVMTRR];
242 
243 extern caddr_t	CADDR1;
244 extern pt_entry_t *CMAP1;
245 extern vm_offset_t avail_end;
246 extern vm_offset_t avail_start;
247 extern vm_offset_t clean_eva;
248 extern vm_offset_t clean_sva;
249 extern vm_offset_t phys_avail[];
250 extern char *ptvmmap;		/* poor name! */
251 extern vm_offset_t virtual_avail;
252 extern vm_offset_t virtual_end;
253 
254 void	pmap_bootstrap __P(( vm_offset_t, vm_offset_t));
255 pmap_t	pmap_kernel __P((void));
256 void	*pmap_mapdev __P((vm_offset_t, vm_size_t));
257 void	pmap_unmapdev __P((vm_offset_t, vm_size_t));
258 unsigned *pmap_pte __P((pmap_t, vm_offset_t)) __pure2;
259 vm_page_t pmap_use_pt __P((pmap_t, vm_offset_t));
260 #ifdef SMP
261 void	pmap_set_opt __P((void));
262 #endif
263 
264 #endif /* _KERNEL */
265 
266 #endif /* !LOCORE */
267 
268 #endif /* !_MACHINE_PMAP_H_ */
269