xref: /linux/arch/x86/include/asm/pgtable_64_types.h (revision 8026aed072e1221f0a61e5acc48c64546341bd4d)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_PGTABLE_64_DEFS_H
3 #define _ASM_X86_PGTABLE_64_DEFS_H
4 
5 #include <asm/sparsemem.h>
6 
7 #ifndef __ASSEMBLER__
8 #include <linux/types.h>
9 #include <asm/kaslr.h>
10 
11 /*
12  * These are used to make use of C type-checking..
13  */
14 typedef unsigned long	pteval_t;
15 typedef unsigned long	pmdval_t;
16 typedef unsigned long	pudval_t;
17 typedef unsigned long	p4dval_t;
18 typedef unsigned long	pgdval_t;
19 typedef unsigned long	pgprotval_t;
20 
21 typedef struct { pteval_t pte; } pte_t;
22 typedef struct { pmdval_t pmd; } pmd_t;
23 
24 extern unsigned int __pgtable_l5_enabled;
25 
26 #ifdef USE_EARLY_PGTABLE_L5
27 /*
28  * cpu_feature_enabled() is not available in early boot code.
29  * Use variable instead.
30  */
pgtable_l5_enabled(void)31 static inline bool pgtable_l5_enabled(void)
32 {
33 	return __pgtable_l5_enabled;
34 }
35 #else
36 #define pgtable_l5_enabled() cpu_feature_enabled(X86_FEATURE_LA57)
37 #endif /* USE_EARLY_PGTABLE_L5 */
38 
39 #define ARCH_PAGE_TABLE_SYNC_MASK \
40 	(pgtable_l5_enabled() ? PGTBL_PGD_MODIFIED : PGTBL_P4D_MODIFIED)
41 
42 extern unsigned int pgdir_shift;
43 extern unsigned int ptrs_per_p4d;
44 
45 #endif	/* !__ASSEMBLER__ */
46 
47 /*
48  * PGDIR_SHIFT determines what a top-level page table entry can map
49  */
50 #define PGDIR_SHIFT	pgdir_shift
51 #define PTRS_PER_PGD	512
52 
53 /*
54  * 4th level page in 5-level paging case
55  */
56 #define P4D_SHIFT		39
57 #define MAX_PTRS_PER_P4D	512
58 #define PTRS_PER_P4D		ptrs_per_p4d
59 #define P4D_SIZE		(_AC(1, UL) << P4D_SHIFT)
60 #define P4D_MASK		(~(P4D_SIZE - 1))
61 
62 #define MAX_POSSIBLE_PHYSMEM_BITS	52
63 
64 /*
65  * 3rd level page
66  */
67 #define PUD_SHIFT	30
68 #define PTRS_PER_PUD	512
69 
70 /*
71  * PMD_SHIFT determines the size of the area a middle-level
72  * page table can map
73  */
74 #define PMD_SHIFT	21
75 #define PTRS_PER_PMD	512
76 
77 /*
78  * entries per page directory level
79  */
80 #define PTRS_PER_PTE	512
81 
82 #define PMD_SIZE	(_AC(1, UL) << PMD_SHIFT)
83 #define PMD_MASK	(~(PMD_SIZE - 1))
84 #define PUD_SIZE	(_AC(1, UL) << PUD_SHIFT)
85 #define PUD_MASK	(~(PUD_SIZE - 1))
86 #define PGDIR_SIZE	(_AC(1, UL) << PGDIR_SHIFT)
87 #define PGDIR_MASK	(~(PGDIR_SIZE - 1))
88 
89 /*
90  * See Documentation/arch/x86/x86_64/mm.rst for a description of the memory map.
91  *
92  * Be very careful vs. KASLR when changing anything here. The KASLR address
93  * range must not overlap with anything except the KASAN shadow area, which
94  * is correct as KASAN disables KASLR.
95  */
96 #define MAXMEM			(1UL << MAX_PHYSMEM_BITS)
97 
98 #define GUARD_HOLE_PGD_ENTRY	-256UL
99 #define GUARD_HOLE_SIZE		(16UL << PGDIR_SHIFT)
100 #define GUARD_HOLE_BASE_ADDR	(GUARD_HOLE_PGD_ENTRY << PGDIR_SHIFT)
101 #define GUARD_HOLE_END_ADDR	(GUARD_HOLE_BASE_ADDR + GUARD_HOLE_SIZE)
102 
103 #define LDT_PGD_ENTRY		-240UL
104 #define LDT_BASE_ADDR		(LDT_PGD_ENTRY << PGDIR_SHIFT)
105 #define LDT_END_ADDR		(LDT_BASE_ADDR + PGDIR_SIZE)
106 
107 #define __VMALLOC_BASE_L4	0xffffc90000000000UL
108 #define __VMALLOC_BASE_L5 	0xffa0000000000000UL
109 
110 #define VMALLOC_SIZE_TB_L4	32UL
111 #define VMALLOC_SIZE_TB_L5	12800UL
112 
113 #define __VMEMMAP_BASE_L4	0xffffea0000000000UL
114 #define __VMEMMAP_BASE_L5	0xffd4000000000000UL
115 
116 # define VMALLOC_START		vmalloc_base
117 # define VMALLOC_SIZE_TB	(pgtable_l5_enabled() ? VMALLOC_SIZE_TB_L5 : VMALLOC_SIZE_TB_L4)
118 # define VMEMMAP_START		vmemmap_base
119 
120 #ifdef CONFIG_RANDOMIZE_MEMORY
121 # define DIRECT_MAP_PHYSMEM_END	direct_map_physmem_end
122 #endif
123 
124 /*
125  * End of the region for which vmalloc page tables are pre-allocated.
126  * For non-KMSAN builds, this is the same as VMALLOC_END.
127  * For KMSAN builds, VMALLOC_START..VMEMORY_END is 4 times bigger than
128  * VMALLOC_START..VMALLOC_END (see below).
129  */
130 #define VMEMORY_END		(VMALLOC_START + (VMALLOC_SIZE_TB << 40) - 1)
131 
132 #ifndef CONFIG_KMSAN
133 #define VMALLOC_END		VMEMORY_END
134 #else
135 /*
136  * In KMSAN builds vmalloc area is four times smaller, and the remaining 3/4
137  * are used to keep the metadata for virtual pages. The memory formerly
138  * belonging to vmalloc area is now laid out as follows:
139  *
140  * 1st quarter: VMALLOC_START to VMALLOC_END - new vmalloc area
141  * 2nd quarter: KMSAN_VMALLOC_SHADOW_START to
142  *              VMALLOC_END+KMSAN_VMALLOC_SHADOW_OFFSET - vmalloc area shadow
143  * 3rd quarter: KMSAN_VMALLOC_ORIGIN_START to
144  *              VMALLOC_END+KMSAN_VMALLOC_ORIGIN_OFFSET - vmalloc area origins
145  * 4th quarter: KMSAN_MODULES_SHADOW_START to KMSAN_MODULES_ORIGIN_START
146  *              - shadow for modules,
147  *              KMSAN_MODULES_ORIGIN_START to
148  *              KMSAN_MODULES_ORIGIN_START + MODULES_LEN - origins for modules.
149  */
150 #define VMALLOC_QUARTER_SIZE	((VMALLOC_SIZE_TB << 40) >> 2)
151 #define VMALLOC_END		(VMALLOC_START + VMALLOC_QUARTER_SIZE - 1)
152 
153 /*
154  * vmalloc metadata addresses are calculated by adding shadow/origin offsets
155  * to vmalloc address.
156  */
157 #define KMSAN_VMALLOC_SHADOW_OFFSET	VMALLOC_QUARTER_SIZE
158 #define KMSAN_VMALLOC_ORIGIN_OFFSET	(VMALLOC_QUARTER_SIZE << 1)
159 
160 #define KMSAN_VMALLOC_SHADOW_START	(VMALLOC_START + KMSAN_VMALLOC_SHADOW_OFFSET)
161 #define KMSAN_VMALLOC_ORIGIN_START	(VMALLOC_START + KMSAN_VMALLOC_ORIGIN_OFFSET)
162 
163 /*
164  * The shadow/origin for modules are placed one by one in the last 1/4 of
165  * vmalloc space.
166  */
167 #define KMSAN_MODULES_SHADOW_START	(VMALLOC_END + KMSAN_VMALLOC_ORIGIN_OFFSET + 1)
168 #define KMSAN_MODULES_ORIGIN_START	(KMSAN_MODULES_SHADOW_START + MODULES_LEN)
169 #endif /* CONFIG_KMSAN */
170 
171 #define MODULES_VADDR		(__START_KERNEL_map + KERNEL_IMAGE_SIZE)
172 /* The module sections ends with the start of the fixmap */
173 #ifndef CONFIG_DEBUG_KMAP_LOCAL_FORCE_MAP
174 # define MODULES_END		_AC(0xffffffffff000000, UL)
175 #else
176 # define MODULES_END		_AC(0xfffffffffe000000, UL)
177 #endif
178 #define MODULES_LEN		(MODULES_END - MODULES_VADDR)
179 
180 #define ESPFIX_PGD_ENTRY	_AC(-2, UL)
181 #define ESPFIX_BASE_ADDR	(ESPFIX_PGD_ENTRY << P4D_SHIFT)
182 
183 #define CPU_ENTRY_AREA_PGD	_AC(-4, UL)
184 #define CPU_ENTRY_AREA_BASE	(CPU_ENTRY_AREA_PGD << P4D_SHIFT)
185 
186 #define EFI_VA_START		( -4 * (_AC(1, UL) << 30))
187 #define EFI_VA_END		(-68 * (_AC(1, UL) << 30))
188 
189 #define EARLY_DYNAMIC_PAGE_TABLES	64
190 
191 #define PGD_KERNEL_START	((PAGE_SIZE / 2) / sizeof(pgd_t))
192 
193 /*
194  * We borrow bit 3 to remember PG_anon_exclusive.
195  */
196 #define _PAGE_SWP_EXCLUSIVE	_PAGE_PWT
197 
198 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
199