xref: /linux/arch/x86/include/asm/pgtable_64.h (revision 3a39d672e7f48b8d6b91a09afa4b55352773b4b5)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_PGTABLE_64_H
3 #define _ASM_X86_PGTABLE_64_H
4 
5 #include <linux/const.h>
6 #include <asm/pgtable_64_types.h>
7 
8 #ifndef __ASSEMBLY__
9 
10 /*
11  * This file contains the functions and defines necessary to modify and use
12  * the x86-64 page table tree.
13  */
14 #include <asm/processor.h>
15 #include <linux/bitops.h>
16 #include <linux/threads.h>
17 #include <asm/fixmap.h>
18 
19 extern p4d_t level4_kernel_pgt[512];
20 extern p4d_t level4_ident_pgt[512];
21 extern pud_t level3_kernel_pgt[512];
22 extern pud_t level3_ident_pgt[512];
23 extern pmd_t level2_kernel_pgt[512];
24 extern pmd_t level2_fixmap_pgt[512];
25 extern pmd_t level2_ident_pgt[512];
26 extern pte_t level1_fixmap_pgt[512 * FIXMAP_PMD_NUM];
27 extern pgd_t init_top_pgt[];
28 
29 #define swapper_pg_dir init_top_pgt
30 
31 extern void paging_init(void);
sync_initial_page_table(void)32 static inline void sync_initial_page_table(void) { }
33 
34 #define pte_ERROR(e)					\
35 	pr_err("%s:%d: bad pte %p(%016lx)\n",		\
36 	       __FILE__, __LINE__, &(e), pte_val(e))
37 #define pmd_ERROR(e)					\
38 	pr_err("%s:%d: bad pmd %p(%016lx)\n",		\
39 	       __FILE__, __LINE__, &(e), pmd_val(e))
40 #define pud_ERROR(e)					\
41 	pr_err("%s:%d: bad pud %p(%016lx)\n",		\
42 	       __FILE__, __LINE__, &(e), pud_val(e))
43 
44 #if CONFIG_PGTABLE_LEVELS >= 5
45 #define p4d_ERROR(e)					\
46 	pr_err("%s:%d: bad p4d %p(%016lx)\n",		\
47 	       __FILE__, __LINE__, &(e), p4d_val(e))
48 #endif
49 
50 #define pgd_ERROR(e)					\
51 	pr_err("%s:%d: bad pgd %p(%016lx)\n",		\
52 	       __FILE__, __LINE__, &(e), pgd_val(e))
53 
54 struct mm_struct;
55 
56 #define mm_p4d_folded mm_p4d_folded
mm_p4d_folded(struct mm_struct * mm)57 static inline bool mm_p4d_folded(struct mm_struct *mm)
58 {
59 	return !pgtable_l5_enabled();
60 }
61 
62 void set_pte_vaddr_p4d(p4d_t *p4d_page, unsigned long vaddr, pte_t new_pte);
63 void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte);
64 
native_set_pte(pte_t * ptep,pte_t pte)65 static inline void native_set_pte(pte_t *ptep, pte_t pte)
66 {
67 	WRITE_ONCE(*ptep, pte);
68 }
69 
native_pte_clear(struct mm_struct * mm,unsigned long addr,pte_t * ptep)70 static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr,
71 				    pte_t *ptep)
72 {
73 	native_set_pte(ptep, native_make_pte(0));
74 }
75 
native_set_pte_atomic(pte_t * ptep,pte_t pte)76 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
77 {
78 	native_set_pte(ptep, pte);
79 }
80 
native_set_pmd(pmd_t * pmdp,pmd_t pmd)81 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
82 {
83 	WRITE_ONCE(*pmdp, pmd);
84 }
85 
native_pmd_clear(pmd_t * pmd)86 static inline void native_pmd_clear(pmd_t *pmd)
87 {
88 	native_set_pmd(pmd, native_make_pmd(0));
89 }
90 
native_ptep_get_and_clear(pte_t * xp)91 static inline pte_t native_ptep_get_and_clear(pte_t *xp)
92 {
93 #ifdef CONFIG_SMP
94 	return native_make_pte(xchg(&xp->pte, 0));
95 #else
96 	/* native_local_ptep_get_and_clear,
97 	   but duplicated because of cyclic dependency */
98 	pte_t ret = *xp;
99 	native_pte_clear(NULL, 0, xp);
100 	return ret;
101 #endif
102 }
103 
native_pmdp_get_and_clear(pmd_t * xp)104 static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
105 {
106 #ifdef CONFIG_SMP
107 	return native_make_pmd(xchg(&xp->pmd, 0));
108 #else
109 	/* native_local_pmdp_get_and_clear,
110 	   but duplicated because of cyclic dependency */
111 	pmd_t ret = *xp;
112 	native_pmd_clear(xp);
113 	return ret;
114 #endif
115 }
116 
native_set_pud(pud_t * pudp,pud_t pud)117 static inline void native_set_pud(pud_t *pudp, pud_t pud)
118 {
119 	WRITE_ONCE(*pudp, pud);
120 }
121 
native_pud_clear(pud_t * pud)122 static inline void native_pud_clear(pud_t *pud)
123 {
124 	native_set_pud(pud, native_make_pud(0));
125 }
126 
native_pudp_get_and_clear(pud_t * xp)127 static inline pud_t native_pudp_get_and_clear(pud_t *xp)
128 {
129 #ifdef CONFIG_SMP
130 	return native_make_pud(xchg(&xp->pud, 0));
131 #else
132 	/* native_local_pudp_get_and_clear,
133 	 * but duplicated because of cyclic dependency
134 	 */
135 	pud_t ret = *xp;
136 
137 	native_pud_clear(xp);
138 	return ret;
139 #endif
140 }
141 
native_set_p4d(p4d_t * p4dp,p4d_t p4d)142 static inline void native_set_p4d(p4d_t *p4dp, p4d_t p4d)
143 {
144 	pgd_t pgd;
145 
146 	if (pgtable_l5_enabled() ||
147 	    !IS_ENABLED(CONFIG_MITIGATION_PAGE_TABLE_ISOLATION)) {
148 		WRITE_ONCE(*p4dp, p4d);
149 		return;
150 	}
151 
152 	pgd = native_make_pgd(native_p4d_val(p4d));
153 	pgd = pti_set_user_pgtbl((pgd_t *)p4dp, pgd);
154 	WRITE_ONCE(*p4dp, native_make_p4d(native_pgd_val(pgd)));
155 }
156 
native_p4d_clear(p4d_t * p4d)157 static inline void native_p4d_clear(p4d_t *p4d)
158 {
159 	native_set_p4d(p4d, native_make_p4d(0));
160 }
161 
native_set_pgd(pgd_t * pgdp,pgd_t pgd)162 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
163 {
164 	WRITE_ONCE(*pgdp, pti_set_user_pgtbl(pgdp, pgd));
165 }
166 
native_pgd_clear(pgd_t * pgd)167 static inline void native_pgd_clear(pgd_t *pgd)
168 {
169 	native_set_pgd(pgd, native_make_pgd(0));
170 }
171 
172 /*
173  * Conversion functions: convert a page and protection to a page entry,
174  * and a page entry and page directory to the page they refer to.
175  */
176 
177 /* PGD - Level 4 access */
178 
179 /* PUD - Level 3 access */
180 
181 /* PMD - Level 2 access */
182 
183 /* PTE - Level 1 access */
184 
185 /*
186  * Encode and de-code a swap entry
187  *
188  * |     ...            | 11| 10|  9|8|7|6|5| 4| 3|2| 1|0| <- bit number
189  * |     ...            |SW3|SW2|SW1|G|L|D|A|CD|WT|U| W|P| <- bit names
190  * | TYPE (59-63) | ~OFFSET (9-58)  |0|0|X|X| X| E|F|SD|0| <- swp entry
191  *
192  * G (8) is aliased and used as a PROT_NONE indicator for
193  * !present ptes.  We need to start storing swap entries above
194  * there.  We also need to avoid using A and D because of an
195  * erratum where they can be incorrectly set by hardware on
196  * non-present PTEs.
197  *
198  * SD Bits 1-4 are not used in non-present format and available for
199  * special use described below:
200  *
201  * SD (1) in swp entry is used to store soft dirty bit, which helps us
202  * remember soft dirty over page migration
203  *
204  * F (2) in swp entry is used to record when a pagetable is
205  * writeprotected by userfaultfd WP support.
206  *
207  * E (3) in swp entry is used to remember PG_anon_exclusive.
208  *
209  * Bit 7 in swp entry should be 0 because pmd_present checks not only P,
210  * but also L and G.
211  *
212  * The offset is inverted by a binary not operation to make the high
213  * physical bits set.
214  */
215 #define SWP_TYPE_BITS		5
216 
217 #define SWP_OFFSET_FIRST_BIT	(_PAGE_BIT_PROTNONE + 1)
218 
219 /* We always extract/encode the offset by shifting it all the way up, and then down again */
220 #define SWP_OFFSET_SHIFT	(SWP_OFFSET_FIRST_BIT+SWP_TYPE_BITS)
221 
222 #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS)
223 
224 /* Extract the high bits for type */
225 #define __swp_type(x) ((x).val >> (64 - SWP_TYPE_BITS))
226 
227 /* Shift up (to get rid of type), then down to get value */
228 #define __swp_offset(x) (~(x).val << SWP_TYPE_BITS >> SWP_OFFSET_SHIFT)
229 
230 /*
231  * Shift the offset up "too far" by TYPE bits, then down again
232  * The offset is inverted by a binary not operation to make the high
233  * physical bits set.
234  */
235 #define __swp_entry(type, offset) ((swp_entry_t) { \
236 	(~(unsigned long)(offset) << SWP_OFFSET_SHIFT >> SWP_TYPE_BITS) \
237 	| ((unsigned long)(type) << (64-SWP_TYPE_BITS)) })
238 
239 #define __pte_to_swp_entry(pte)		((swp_entry_t) { pte_val((pte)) })
240 #define __pmd_to_swp_entry(pmd)		((swp_entry_t) { pmd_val((pmd)) })
241 #define __swp_entry_to_pte(x)		(__pte((x).val))
242 #define __swp_entry_to_pmd(x)		(__pmd((x).val))
243 
244 extern void cleanup_highmap(void);
245 
246 #define HAVE_ARCH_UNMAPPED_AREA
247 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
248 
249 #define PAGE_AGP    PAGE_KERNEL_NOCACHE
250 #define HAVE_PAGE_AGP 1
251 
252 /* fs/proc/kcore.c */
253 #define	kc_vaddr_to_offset(v) ((v) & __VIRTUAL_MASK)
254 #define	kc_offset_to_vaddr(o) ((o) | ~__VIRTUAL_MASK)
255 
256 #define __HAVE_ARCH_PTE_SAME
257 
258 #define vmemmap ((struct page *)VMEMMAP_START)
259 
260 extern void init_extra_mapping_uc(unsigned long phys, unsigned long size);
261 extern void init_extra_mapping_wb(unsigned long phys, unsigned long size);
262 
263 #define gup_fast_permitted gup_fast_permitted
gup_fast_permitted(unsigned long start,unsigned long end)264 static inline bool gup_fast_permitted(unsigned long start, unsigned long end)
265 {
266 	if (end >> __VIRTUAL_MASK_SHIFT)
267 		return false;
268 	return true;
269 }
270 
271 #include <asm/pgtable-invert.h>
272 
273 #else /* __ASSEMBLY__ */
274 
275 #define l4_index(x)	(((x) >> 39) & 511)
276 #define pud_index(x)	(((x) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))
277 
278 L4_PAGE_OFFSET = l4_index(__PAGE_OFFSET_BASE_L4)
279 L4_START_KERNEL = l4_index(__START_KERNEL_map)
280 
281 L3_START_KERNEL = pud_index(__START_KERNEL_map)
282 
283 #define SYM_DATA_START_PAGE_ALIGNED(name)			\
284 	SYM_START(name, SYM_L_GLOBAL, .balign PAGE_SIZE)
285 
286 /* Automate the creation of 1 to 1 mapping pmd entries */
287 #define PMDS(START, PERM, COUNT)			\
288 	i = 0 ;						\
289 	.rept (COUNT) ;					\
290 	.quad	(START) + (i << PMD_SHIFT) + (PERM) ;	\
291 	i = i + 1 ;					\
292 	.endr
293 
294 #endif /* __ASSEMBLY__ */
295 #endif /* _ASM_X86_PGTABLE_64_H */
296