xref: /freebsd/sys/i386/include/pmap.h (revision ea906c4152774dff300bb26fbfc1e4188351c89a)
1 /*-
2  * Copyright (c) 1991 Regents of the University of California.
3  * All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * the Systems Programming Group of the University of Utah Computer
7  * Science Department and William Jolitz of UUNET Technologies Inc.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 4. Neither the name of the University nor the names of its contributors
18  *    may be used to endorse or promote products derived from this software
19  *    without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31  * SUCH DAMAGE.
32  *
33  * Derived from hp300 version by Mike Hibler, this version by William
34  * Jolitz uses a recursive map [a pde points to the page directory] to
35  * map the page tables using the pagetables themselves. This is done to
36  * reduce the impact on kernel virtual memory for lots of sparse address
37  * space, and to reduce the cost of memory to each process.
38  *
39  *	from: hp300: @(#)pmap.h	7.2 (Berkeley) 12/16/90
40  *	from: @(#)pmap.h	7.4 (Berkeley) 5/12/91
41  * $FreeBSD$
42  */
43 
44 #ifndef _MACHINE_PMAP_H_
45 #define	_MACHINE_PMAP_H_
46 
47 /*
48  * Page-directory and page-table entries follow this format, with a few
49  * of the fields not present here and there, depending on a lot of things.
50  */
51 				/* ---- Intel Nomenclature ---- */
52 #define	PG_V		0x001	/* P	Valid			*/
53 #define PG_RW		0x002	/* R/W	Read/Write		*/
54 #define PG_U		0x004	/* U/S  User/Supervisor		*/
55 #define	PG_NC_PWT	0x008	/* PWT	Write through		*/
56 #define	PG_NC_PCD	0x010	/* PCD	Cache disable		*/
57 #define PG_A		0x020	/* A	Accessed		*/
58 #define	PG_M		0x040	/* D	Dirty			*/
59 #define	PG_PS		0x080	/* PS	Page size (0=4k,1=4M)	*/
60 #define	PG_PTE_PAT	0x080	/* PAT	PAT index		*/
61 #define	PG_G		0x100	/* G	Global			*/
62 #define	PG_AVAIL1	0x200	/*    /	Available for system	*/
63 #define	PG_AVAIL2	0x400	/*   <	programmers use		*/
64 #define	PG_AVAIL3	0x800	/*    \				*/
65 #define	PG_PDE_PAT	0x1000	/* PAT	PAT index		*/
66 #ifdef PAE
67 #define	PG_NX		(1ull<<63) /* No-execute */
68 #endif
69 
70 
71 /* Our various interpretations of the above */
72 #define PG_W		PG_AVAIL1	/* "Wired" pseudoflag */
73 #define	PG_MANAGED	PG_AVAIL2
74 #ifdef PAE
75 #define	PG_FRAME	(0x000ffffffffff000ull)
76 #define	PG_PS_FRAME	(0x000fffffffe00000ull)
77 #else
78 #define	PG_FRAME	(~PAGE_MASK)
79 #define	PG_PS_FRAME	(0xffc00000)
80 #endif
81 #define	PG_PROT		(PG_RW|PG_U)	/* all protection bits . */
82 #define PG_N		(PG_NC_PWT|PG_NC_PCD)	/* Non-cacheable */
83 
84 /*
85  * Promotion to a 2 or 4MB (PDE) page mapping requires that the corresponding
86  * 4KB (PTE) page mappings have identical settings for the following fields:
87  */
88 #define PG_PTE_PROMOTE	(PG_MANAGED | PG_W | PG_G | PG_PTE_PAT | \
89 	    PG_M | PG_A | PG_NC_PCD | PG_NC_PWT | PG_U | PG_RW | PG_V)
90 
91 /*
92  * Page Protection Exception bits
93  */
94 
95 #define PGEX_P		0x01	/* Protection violation vs. not present */
96 #define PGEX_W		0x02	/* during a Write cycle */
97 #define PGEX_U		0x04	/* access from User mode (UPL) */
98 #define PGEX_RSV	0x08	/* reserved PTE field is non-zero */
99 #define PGEX_I		0x10	/* during an instruction fetch */
100 
101 /*
102  * Size of Kernel address space.  This is the number of page table pages
103  * (4MB each) to use for the kernel.  256 pages == 1 Gigabyte.
104  * This **MUST** be a multiple of 4 (eg: 252, 256, 260, etc).
105  * For PAE, the page table page unit size is 2MB.  This means that 512 pages
106  * is 1 Gigabyte.  Double everything.  It must be a multiple of 8 for PAE.
107  */
108 #ifndef KVA_PAGES
109 #ifdef PAE
110 #define KVA_PAGES	512
111 #else
112 #define KVA_PAGES	256
113 #endif
114 #endif
115 
116 /*
117  * Pte related macros
118  */
119 #define VADDR(pdi, pti) ((vm_offset_t)(((pdi)<<PDRSHIFT)|((pti)<<PAGE_SHIFT)))
120 
121 /* Initial number of kernel page tables. */
122 #ifndef NKPT
123 #ifdef PAE
124 /* 152 page tables needed to map 16G (76B "struct vm_page", 2M page tables). */
125 #define	NKPT		240
126 #else
127 /* 18 page tables needed to map 4G (72B "struct vm_page", 4M page tables). */
128 #define	NKPT		30
129 #endif
130 #endif
131 
132 #ifndef NKPDE
133 #define NKPDE	(KVA_PAGES)	/* number of page tables/pde's */
134 #endif
135 
136 /*
137  * The *PTDI values control the layout of virtual memory
138  *
139  * XXX This works for now, but I am not real happy with it, I'll fix it
140  * right after I fix locore.s and the magic 28K hole
141  */
142 #define	KPTDI		(NPDEPTD-NKPDE)	/* start of kernel virtual pde's */
143 #define	PTDPTDI		(KPTDI-NPGPTD)	/* ptd entry that points to ptd! */
144 
145 /*
146  * XXX doesn't really belong here I guess...
147  */
148 #define ISA_HOLE_START    0xa0000
149 #define ISA_HOLE_LENGTH (0x100000-ISA_HOLE_START)
150 
151 #ifndef LOCORE
152 
153 #include <sys/queue.h>
154 #include <sys/_lock.h>
155 #include <sys/_mutex.h>
156 
157 #ifdef PAE
158 
159 typedef uint64_t pdpt_entry_t;
160 typedef uint64_t pd_entry_t;
161 typedef uint64_t pt_entry_t;
162 
163 #define	PTESHIFT	(3)
164 #define	PDESHIFT	(3)
165 
166 #else
167 
168 typedef uint32_t pd_entry_t;
169 typedef uint32_t pt_entry_t;
170 
171 #define	PTESHIFT	(2)
172 #define	PDESHIFT	(2)
173 
174 #endif
175 
176 /*
177  * Address of current and alternate address space page table maps
178  * and directories.
179  */
180 #ifdef _KERNEL
181 extern pt_entry_t PTmap[];
182 extern pd_entry_t PTD[];
183 extern pd_entry_t PTDpde[];
184 
185 #ifdef PAE
186 extern pdpt_entry_t *IdlePDPT;
187 #endif
188 extern pd_entry_t *IdlePTD;	/* physical address of "Idle" state directory */
189 #endif
190 
191 #ifdef _KERNEL
192 /*
193  * virtual address to page table entry and
194  * to physical address.
195  * Note: these work recursively, thus vtopte of a pte will give
196  * the corresponding pde that in turn maps it.
197  */
198 #define	vtopte(va)	(PTmap + i386_btop(va))
199 #define	vtophys(va)	pmap_kextract((vm_offset_t)(va))
200 
201 #ifdef XEN
202 #include <sys/param.h>
203 #include <machine/xen/xen-os.h>
204 #include <machine/xen/xenvar.h>
205 #include <machine/xen/xenpmap.h>
206 
207 extern pt_entry_t pg_nx;
208 
209 #define PG_KERNEL  (PG_V | PG_A | PG_RW | PG_M)
210 
211 #define MACH_TO_VM_PAGE(ma) PHYS_TO_VM_PAGE(xpmap_mtop((ma)))
212 #define VM_PAGE_TO_MACH(m) xpmap_ptom(VM_PAGE_TO_PHYS((m)))
213 
214 static __inline vm_paddr_t
215 pmap_kextract_ma(vm_offset_t va)
216 {
217         vm_paddr_t ma;
218         if ((ma = PTD[va >> PDRSHIFT]) & PG_PS) {
219                 ma = (ma & ~(NBPDR - 1)) | (va & (NBPDR - 1));
220         } else {
221                 ma = (*vtopte(va) & PG_FRAME) | (va & PAGE_MASK);
222         }
223         return ma;
224 }
225 
226 static __inline vm_paddr_t
227 pmap_kextract(vm_offset_t va)
228 {
229         return xpmap_mtop(pmap_kextract_ma(va));
230 }
231 #define vtomach(va)     pmap_kextract_ma(((vm_offset_t) (va)))
232 
233 vm_paddr_t pmap_extract_ma(struct pmap *pmap, vm_offset_t va);
234 
235 void    pmap_kenter_ma(vm_offset_t va, vm_paddr_t pa);
236 void    pmap_map_readonly(struct pmap *pmap, vm_offset_t va, int len);
237 void    pmap_map_readwrite(struct pmap *pmap, vm_offset_t va, int len);
238 
239 static __inline pt_entry_t
240 pte_load_store(pt_entry_t *ptep, pt_entry_t v)
241 {
242 	pt_entry_t r;
243 
244 	v = xpmap_ptom(v);
245 	r = *ptep;
246 	PT_SET_VA(ptep, v, TRUE);
247 	return (r);
248 }
249 
250 static __inline pt_entry_t
251 pte_load_store_ma(pt_entry_t *ptep, pt_entry_t v)
252 {
253 	pt_entry_t r;
254 
255 	r = *ptep;
256 	PT_SET_VA_MA(ptep, v, TRUE);
257 	return (r);
258 }
259 
260 #define	pte_load_clear(ptep)	pte_load_store((ptep), (pt_entry_t)0ULL)
261 
262 #define	pte_store(ptep, pte)	pte_load_store((ptep), (pt_entry_t)pte)
263 #define	pte_store_ma(ptep, pte)	pte_load_store_ma((ptep), (pt_entry_t)pte)
264 #define	pde_store_ma(ptep, pte)	pte_load_store_ma((ptep), (pt_entry_t)pte)
265 
266 #elif !defined(XEN)
267 /*
268  *	Routine:	pmap_kextract
269  *	Function:
270  *		Extract the physical page address associated
271  *		kernel virtual address.
272  */
273 static __inline vm_paddr_t
274 pmap_kextract(vm_offset_t va)
275 {
276 	vm_paddr_t pa;
277 
278 	if ((pa = PTD[va >> PDRSHIFT]) & PG_PS) {
279 		pa = (pa & PG_PS_FRAME) | (va & PDRMASK);
280 	} else {
281 		pa = *vtopte(va);
282 		pa = (pa & PG_FRAME) | (va & PAGE_MASK);
283 	}
284 	return pa;
285 }
286 
287 #define PT_UPDATES_FLUSH()
288 #endif
289 
290 #if defined(PAE) && !defined(XEN)
291 
292 #define	pde_cmpset(pdep, old, new) \
293 				atomic_cmpset_64((pdep), (old), (new))
294 
295 static __inline pt_entry_t
296 pte_load(pt_entry_t *ptep)
297 {
298 	pt_entry_t r;
299 
300 	__asm __volatile(
301 	    "lock; cmpxchg8b %1"
302 	    : "=A" (r)
303 	    : "m" (*ptep), "a" (0), "d" (0), "b" (0), "c" (0));
304 	return (r);
305 }
306 
307 static __inline pt_entry_t
308 pte_load_store(pt_entry_t *ptep, pt_entry_t v)
309 {
310 	pt_entry_t r;
311 
312 	r = *ptep;
313 	__asm __volatile(
314 	    "1:\n"
315 	    "\tlock; cmpxchg8b %1\n"
316 	    "\tjnz 1b"
317 	    : "+A" (r)
318 	    : "m" (*ptep), "b" ((uint32_t)v), "c" ((uint32_t)(v >> 32)));
319 	return (r);
320 }
321 
322 /* XXXRU move to atomic.h? */
323 static __inline int
324 atomic_cmpset_64(volatile uint64_t *dst, uint64_t exp, uint64_t src)
325 {
326 	int64_t res = exp;
327 
328 	__asm __volatile (
329 	"	lock ;			"
330 	"	cmpxchg8b %2 ;		"
331 	"	setz	%%al ;		"
332 	"	movzbl	%%al,%0 ;	"
333 	"# atomic_cmpset_64"
334 	: "+A" (res),			/* 0 (result) */
335 	  "=m" (*dst)			/* 1 */
336 	: "m" (*dst),			/* 2 */
337 	  "b" ((uint32_t)src),
338 	  "c" ((uint32_t)(src >> 32)));
339 
340 	return (res);
341 }
342 
343 #define	pte_load_clear(ptep)	pte_load_store((ptep), (pt_entry_t)0ULL)
344 
345 #define	pte_store(ptep, pte)	pte_load_store((ptep), (pt_entry_t)pte)
346 
347 extern pt_entry_t pg_nx;
348 
349 #elif !defined(PAE) && !defined (XEN)
350 
351 #define	pde_cmpset(pdep, old, new) \
352 				atomic_cmpset_int((pdep), (old), (new))
353 
354 static __inline pt_entry_t
355 pte_load(pt_entry_t *ptep)
356 {
357 	pt_entry_t r;
358 
359 	r = *ptep;
360 	return (r);
361 }
362 
363 static __inline pt_entry_t
364 pte_load_store(pt_entry_t *ptep, pt_entry_t pte)
365 {
366 	pt_entry_t r;
367 
368 	__asm __volatile(
369 	    "xchgl %0,%1"
370 	    : "=m" (*ptep),
371 	      "=r" (r)
372 	    : "1" (pte),
373 	      "m" (*ptep));
374 	return (r);
375 }
376 
377 #define	pte_load_clear(pte)	atomic_readandclear_int(pte)
378 
379 static __inline void
380 pte_store(pt_entry_t *ptep, pt_entry_t pte)
381 {
382 
383 	*ptep = pte;
384 }
385 
386 #endif /* PAE */
387 
388 #define	pte_clear(ptep)		pte_store((ptep), (pt_entry_t)0ULL)
389 
390 #define	pde_store(pdep, pde)	pte_store((pdep), (pde))
391 
392 #endif /* _KERNEL */
393 
394 /*
395  * Pmap stuff
396  */
397 struct	pv_entry;
398 struct	pv_chunk;
399 
400 struct md_page {
401 	TAILQ_HEAD(,pv_entry)	pv_list;
402 };
403 
404 struct pmap {
405 	struct mtx		pm_mtx;
406 	pd_entry_t		*pm_pdir;	/* KVA of page directory */
407 	TAILQ_HEAD(,pv_chunk)	pm_pvchunk;	/* list of mappings in pmap */
408 	u_int			pm_active;	/* active on cpus */
409 	struct pmap_statistics	pm_stats;	/* pmap statistics */
410 	LIST_ENTRY(pmap) 	pm_list;	/* List of all pmaps */
411 #ifdef PAE
412 	pdpt_entry_t		*pm_pdpt;	/* KVA of page director pointer
413 						   table */
414 #endif
415 	vm_page_t		pm_root;	/* spare page table pages */
416 };
417 
418 typedef struct pmap	*pmap_t;
419 
420 #ifdef _KERNEL
421 extern struct pmap	kernel_pmap_store;
422 #define kernel_pmap	(&kernel_pmap_store)
423 
424 #define	PMAP_LOCK(pmap)		mtx_lock(&(pmap)->pm_mtx)
425 #define	PMAP_LOCK_ASSERT(pmap, type) \
426 				mtx_assert(&(pmap)->pm_mtx, (type))
427 #define	PMAP_LOCK_DESTROY(pmap)	mtx_destroy(&(pmap)->pm_mtx)
428 #define	PMAP_LOCK_INIT(pmap)	mtx_init(&(pmap)->pm_mtx, "pmap", \
429 				    NULL, MTX_DEF | MTX_DUPOK)
430 #define	PMAP_LOCKED(pmap)	mtx_owned(&(pmap)->pm_mtx)
431 #define	PMAP_MTX(pmap)		(&(pmap)->pm_mtx)
432 #define	PMAP_TRYLOCK(pmap)	mtx_trylock(&(pmap)->pm_mtx)
433 #define	PMAP_UNLOCK(pmap)	mtx_unlock(&(pmap)->pm_mtx)
434 #endif
435 
436 /*
437  * For each vm_page_t, there is a list of all currently valid virtual
438  * mappings of that page.  An entry is a pv_entry_t, the list is pv_list.
439  */
440 typedef struct pv_entry {
441 	vm_offset_t	pv_va;		/* virtual address for mapping */
442 	TAILQ_ENTRY(pv_entry)	pv_list;
443 } *pv_entry_t;
444 
445 /*
446  * pv_entries are allocated in chunks per-process.  This avoids the
447  * need to track per-pmap assignments.
448  */
449 #define	_NPCM	11
450 #define	_NPCPV	336
451 struct pv_chunk {
452 	pmap_t			pc_pmap;
453 	TAILQ_ENTRY(pv_chunk)	pc_list;
454 	uint32_t		pc_map[_NPCM];	/* bitmap; 1 = free */
455 	uint32_t		pc_spare[2];
456 	struct pv_entry		pc_pventry[_NPCPV];
457 };
458 
459 #ifdef	_KERNEL
460 
461 #define NPPROVMTRR		8
462 #define PPRO_VMTRRphysBase0	0x200
463 #define PPRO_VMTRRphysMask0	0x201
464 struct ppro_vmtrr {
465 	u_int64_t base, mask;
466 };
467 extern struct ppro_vmtrr PPro_vmtrr[NPPROVMTRR];
468 
469 extern caddr_t	CADDR1;
470 extern pt_entry_t *CMAP1;
471 extern vm_paddr_t phys_avail[];
472 extern vm_paddr_t dump_avail[];
473 extern int pseflag;
474 extern int pgeflag;
475 extern char *ptvmmap;		/* poor name! */
476 extern vm_offset_t virtual_avail;
477 extern vm_offset_t virtual_end;
478 
479 #define	pmap_unmapbios(va, sz)	pmap_unmapdev((va), (sz))
480 
481 void	pmap_bootstrap(vm_paddr_t);
482 int	pmap_change_attr(vm_offset_t, vm_size_t, int);
483 void	pmap_init_pat(void);
484 void	pmap_kenter(vm_offset_t va, vm_paddr_t pa);
485 void	*pmap_kenter_temporary(vm_paddr_t pa, int i);
486 void	pmap_kremove(vm_offset_t);
487 void	*pmap_mapbios(vm_paddr_t, vm_size_t);
488 void	*pmap_mapdev(vm_paddr_t, vm_size_t);
489 void	*pmap_mapdev_attr(vm_paddr_t, vm_size_t, int);
490 boolean_t pmap_page_is_mapped(vm_page_t m);
491 void	pmap_unmapdev(vm_offset_t, vm_size_t);
492 pt_entry_t *pmap_pte(pmap_t, vm_offset_t) __pure2;
493 void	pmap_set_pg(void);
494 void	pmap_invalidate_page(pmap_t, vm_offset_t);
495 void	pmap_invalidate_range(pmap_t, vm_offset_t, vm_offset_t);
496 void	pmap_invalidate_all(pmap_t);
497 void	pmap_invalidate_cache(void);
498 
499 #endif /* _KERNEL */
500 
501 #endif /* !LOCORE */
502 
503 #endif /* !_MACHINE_PMAP_H_ */
504