xref: /linux/arch/x86/include/asm/page_64.h (revision c9f016e72b5cc7d4d68fac51f8e72c8c7a69c06e)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_PAGE_64_H
3 #define _ASM_X86_PAGE_64_H
4 
5 #include <asm/page_64_types.h>
6 
7 #ifndef __ASSEMBLY__
8 #include <asm/cpufeatures.h>
9 #include <asm/alternative.h>
10 
11 #include <linux/kmsan-checks.h>
12 
13 /* duplicated to the one in bootmem.h */
14 extern unsigned long max_pfn;
15 extern unsigned long phys_base;
16 
17 extern unsigned long page_offset_base;
18 extern unsigned long vmalloc_base;
19 extern unsigned long vmemmap_base;
20 extern unsigned long physmem_end;
21 
__phys_addr_nodebug(unsigned long x)22 static __always_inline unsigned long __phys_addr_nodebug(unsigned long x)
23 {
24 	unsigned long y = x - __START_KERNEL_map;
25 
26 	/* use the carry flag to determine if x was < __START_KERNEL_map */
27 	x = y + ((x > y) ? phys_base : (__START_KERNEL_map - PAGE_OFFSET));
28 
29 	return x;
30 }
31 
32 #ifdef CONFIG_DEBUG_VIRTUAL
33 extern unsigned long __phys_addr(unsigned long);
34 extern unsigned long __phys_addr_symbol(unsigned long);
35 #else
36 #define __phys_addr(x)		__phys_addr_nodebug(x)
37 #define __phys_addr_symbol(x) \
38 	((unsigned long)(x) - __START_KERNEL_map + phys_base)
39 #endif
40 
41 #define __phys_reloc_hide(x)	(x)
42 
43 void clear_page_orig(void *page);
44 void clear_page_rep(void *page);
45 void clear_page_erms(void *page);
46 
clear_page(void * page)47 static inline void clear_page(void *page)
48 {
49 	/*
50 	 * Clean up KMSAN metadata for the page being cleared. The assembly call
51 	 * below clobbers @page, so we perform unpoisoning before it.
52 	 */
53 	kmsan_unpoison_memory(page, PAGE_SIZE);
54 	alternative_call_2(clear_page_orig,
55 			   clear_page_rep, X86_FEATURE_REP_GOOD,
56 			   clear_page_erms, X86_FEATURE_ERMS,
57 			   "=D" (page),
58 			   "D" (page)
59 			   : "cc", "memory", "rax", "rcx");
60 }
61 
62 void copy_page(void *to, void *from);
63 
64 #ifdef CONFIG_X86_5LEVEL
65 /*
66  * User space process size.  This is the first address outside the user range.
67  * There are a few constraints that determine this:
68  *
69  * On Intel CPUs, if a SYSCALL instruction is at the highest canonical
70  * address, then that syscall will enter the kernel with a
71  * non-canonical return address, and SYSRET will explode dangerously.
72  * We avoid this particular problem by preventing anything
73  * from being mapped at the maximum canonical address.
74  *
75  * On AMD CPUs in the Ryzen family, there's a nasty bug in which the
76  * CPUs malfunction if they execute code from the highest canonical page.
77  * They'll speculate right off the end of the canonical space, and
78  * bad things happen.  This is worked around in the same way as the
79  * Intel problem.
80  *
81  * With page table isolation enabled, we map the LDT in ... [stay tuned]
82  */
task_size_max(void)83 static __always_inline unsigned long task_size_max(void)
84 {
85 	unsigned long ret;
86 
87 	alternative_io("movq %[small],%0","movq %[large],%0",
88 			X86_FEATURE_LA57,
89 			"=r" (ret),
90 			[small] "i" ((1ul << 47)-PAGE_SIZE),
91 			[large] "i" ((1ul << 56)-PAGE_SIZE));
92 
93 	return ret;
94 }
95 #endif	/* CONFIG_X86_5LEVEL */
96 
97 #endif	/* !__ASSEMBLY__ */
98 
99 #ifdef CONFIG_X86_VSYSCALL_EMULATION
100 # define __HAVE_ARCH_GATE_AREA 1
101 #endif
102 
103 #endif /* _ASM_X86_PAGE_64_H */
104