xref: /linux/arch/x86/include/asm/page_64.h (revision 7fc2cd2e4b398c57c9cf961cfea05eadbf34c05c)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_PAGE_64_H
3 #define _ASM_X86_PAGE_64_H
4 
5 #include <asm/page_64_types.h>
6 
7 #ifndef __ASSEMBLER__
8 #include <asm/cpufeatures.h>
9 #include <asm/alternative.h>
10 
11 #include <linux/kmsan-checks.h>
12 #include <linux/mmdebug.h>
13 
14 /* duplicated to the one in bootmem.h */
15 extern unsigned long max_pfn;
16 extern unsigned long phys_base;
17 
18 extern unsigned long page_offset_base;
19 extern unsigned long vmalloc_base;
20 extern unsigned long vmemmap_base;
21 extern unsigned long direct_map_physmem_end;
22 
23 static __always_inline unsigned long __phys_addr_nodebug(unsigned long x)
24 {
25 	unsigned long y = x - __START_KERNEL_map;
26 
27 	/* use the carry flag to determine if x was < __START_KERNEL_map */
28 	x = y + ((x > y) ? phys_base : (__START_KERNEL_map - PAGE_OFFSET));
29 
30 	return x;
31 }
32 
33 #ifdef CONFIG_DEBUG_VIRTUAL
34 extern unsigned long __phys_addr(unsigned long);
35 #else
36 #define __phys_addr(x)		__phys_addr_nodebug(x)
37 #endif
38 
39 static inline unsigned long __phys_addr_symbol(unsigned long x)
40 {
41 	unsigned long y = x - __START_KERNEL_map;
42 
43 	/* only check upper bounds since lower bounds will trigger carry */
44 	VIRTUAL_BUG_ON(y >= KERNEL_IMAGE_SIZE);
45 
46 	return y + phys_base;
47 }
48 
49 #define __phys_reloc_hide(x)	(x)
50 
51 void clear_page_orig(void *page);
52 void clear_page_rep(void *page);
53 void clear_page_erms(void *page);
54 KCFI_REFERENCE(clear_page_orig);
55 KCFI_REFERENCE(clear_page_rep);
56 KCFI_REFERENCE(clear_page_erms);
57 
58 static inline void clear_page(void *page)
59 {
60 	/*
61 	 * Clean up KMSAN metadata for the page being cleared. The assembly call
62 	 * below clobbers @page, so we perform unpoisoning before it.
63 	 */
64 	kmsan_unpoison_memory(page, PAGE_SIZE);
65 	alternative_call_2(clear_page_orig,
66 			   clear_page_rep, X86_FEATURE_REP_GOOD,
67 			   clear_page_erms, X86_FEATURE_ERMS,
68 			   "=D" (page),
69 			   "D" (page),
70 			   "cc", "memory", "rax", "rcx");
71 }
72 
73 void copy_page(void *to, void *from);
74 KCFI_REFERENCE(copy_page);
75 
76 /*
77  * User space process size.  This is the first address outside the user range.
78  * There are a few constraints that determine this:
79  *
80  * On Intel CPUs, if a SYSCALL instruction is at the highest canonical
81  * address, then that syscall will enter the kernel with a
82  * non-canonical return address, and SYSRET will explode dangerously.
83  * We avoid this particular problem by preventing anything
84  * from being mapped at the maximum canonical address.
85  *
86  * On AMD CPUs in the Ryzen family, there's a nasty bug in which the
87  * CPUs malfunction if they execute code from the highest canonical page.
88  * They'll speculate right off the end of the canonical space, and
89  * bad things happen.  This is worked around in the same way as the
90  * Intel problem.
91  *
92  * With page table isolation enabled, we map the LDT in ... [stay tuned]
93  */
94 static __always_inline unsigned long task_size_max(void)
95 {
96 	unsigned long ret;
97 
98 	alternative_io("movq %[small],%0","movq %[large],%0",
99 			X86_FEATURE_LA57,
100 			"=r" (ret),
101 			[small] "i" ((1ul << 47)-PAGE_SIZE),
102 			[large] "i" ((1ul << 56)-PAGE_SIZE));
103 
104 	return ret;
105 }
106 
107 #endif	/* !__ASSEMBLER__ */
108 
109 #ifdef CONFIG_X86_VSYSCALL_EMULATION
110 # define __HAVE_ARCH_GATE_AREA 1
111 #endif
112 
113 #endif /* _ASM_X86_PAGE_64_H */
114