1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _ASM_X86_PAGE_64_H 3 #define _ASM_X86_PAGE_64_H 4 5 #include <asm/page_64_types.h> 6 7 #ifndef __ASSEMBLER__ 8 #include <asm/cpufeatures.h> 9 #include <asm/alternative.h> 10 11 #include <linux/kmsan-checks.h> 12 13 /* duplicated to the one in bootmem.h */ 14 extern unsigned long max_pfn; 15 extern unsigned long phys_base; 16 17 extern unsigned long page_offset_base; 18 extern unsigned long vmalloc_base; 19 extern unsigned long vmemmap_base; 20 extern unsigned long direct_map_physmem_end; 21 22 static __always_inline unsigned long __phys_addr_nodebug(unsigned long x) 23 { 24 unsigned long y = x - __START_KERNEL_map; 25 26 /* use the carry flag to determine if x was < __START_KERNEL_map */ 27 x = y + ((x > y) ? phys_base : (__START_KERNEL_map - PAGE_OFFSET)); 28 29 return x; 30 } 31 32 #ifdef CONFIG_DEBUG_VIRTUAL 33 extern unsigned long __phys_addr(unsigned long); 34 extern unsigned long __phys_addr_symbol(unsigned long); 35 #else 36 #define __phys_addr(x) __phys_addr_nodebug(x) 37 #define __phys_addr_symbol(x) \ 38 ((unsigned long)(x) - __START_KERNEL_map + phys_base) 39 #endif 40 41 #define __phys_reloc_hide(x) (x) 42 43 void clear_page_orig(void *page); 44 void clear_page_rep(void *page); 45 void clear_page_erms(void *page); 46 47 static inline void clear_page(void *page) 48 { 49 /* 50 * Clean up KMSAN metadata for the page being cleared. The assembly call 51 * below clobbers @page, so we perform unpoisoning before it. 52 */ 53 kmsan_unpoison_memory(page, PAGE_SIZE); 54 alternative_call_2(clear_page_orig, 55 clear_page_rep, X86_FEATURE_REP_GOOD, 56 clear_page_erms, X86_FEATURE_ERMS, 57 "=D" (page), 58 "D" (page), 59 "cc", "memory", "rax", "rcx"); 60 } 61 62 void copy_page(void *to, void *from); 63 KCFI_REFERENCE(copy_page); 64 65 #ifdef CONFIG_X86_5LEVEL 66 /* 67 * User space process size. This is the first address outside the user range. 68 * There are a few constraints that determine this: 69 * 70 * On Intel CPUs, if a SYSCALL instruction is at the highest canonical 71 * address, then that syscall will enter the kernel with a 72 * non-canonical return address, and SYSRET will explode dangerously. 73 * We avoid this particular problem by preventing anything 74 * from being mapped at the maximum canonical address. 75 * 76 * On AMD CPUs in the Ryzen family, there's a nasty bug in which the 77 * CPUs malfunction if they execute code from the highest canonical page. 78 * They'll speculate right off the end of the canonical space, and 79 * bad things happen. This is worked around in the same way as the 80 * Intel problem. 81 * 82 * With page table isolation enabled, we map the LDT in ... [stay tuned] 83 */ 84 static __always_inline unsigned long task_size_max(void) 85 { 86 unsigned long ret; 87 88 alternative_io("movq %[small],%0","movq %[large],%0", 89 X86_FEATURE_LA57, 90 "=r" (ret), 91 [small] "i" ((1ul << 47)-PAGE_SIZE), 92 [large] "i" ((1ul << 56)-PAGE_SIZE)); 93 94 return ret; 95 } 96 #endif /* CONFIG_X86_5LEVEL */ 97 98 #endif /* !__ASSEMBLER__ */ 99 100 #ifdef CONFIG_X86_VSYSCALL_EMULATION 101 # define __HAVE_ARCH_GATE_AREA 1 102 #endif 103 104 #endif /* _ASM_X86_PAGE_64_H */ 105