xref: /linux/arch/x86/include/asm/page_64.h (revision c9f016e72b5cc7d4d68fac51f8e72c8c7a69c06e)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
21965aae3SH. Peter Anvin #ifndef _ASM_X86_PAGE_64_H
31965aae3SH. Peter Anvin #define _ASM_X86_PAGE_64_H
4bb898558SAl Viro 
551c78eb3SJeremy Fitzhardinge #include <asm/page_64_types.h>
6bb898558SAl Viro 
7fb50b020SAlexander Duyck #ifndef __ASSEMBLY__
8e1cd82a3SIngo Molnar #include <asm/cpufeatures.h>
9f25d3847SBorislav Petkov #include <asm/alternative.h>
10fb50b020SAlexander Duyck 
11b073d7f8SAlexander Potapenko #include <linux/kmsan-checks.h>
12b073d7f8SAlexander Potapenko 
13fb50b020SAlexander Duyck /* duplicated to the one in bootmem.h */
14fb50b020SAlexander Duyck extern unsigned long max_pfn;
15fb50b020SAlexander Duyck extern unsigned long phys_base;
16fb50b020SAlexander Duyck 
17eedb92abSKirill A. Shutemov extern unsigned long page_offset_base;
18eedb92abSKirill A. Shutemov extern unsigned long vmalloc_base;
19eedb92abSKirill A. Shutemov extern unsigned long vmemmap_base;
20*ea72ce5dSThomas Gleixner extern unsigned long physmem_end;
21eedb92abSKirill A. Shutemov 
__phys_addr_nodebug(unsigned long x)22ace1a985SBorislav Petkov static __always_inline unsigned long __phys_addr_nodebug(unsigned long x)
230bdf525fSAlexander Duyck {
240bdf525fSAlexander Duyck 	unsigned long y = x - __START_KERNEL_map;
250bdf525fSAlexander Duyck 
260bdf525fSAlexander Duyck 	/* use the carry flag to determine if x was < __START_KERNEL_map */
270bdf525fSAlexander Duyck 	x = y + ((x > y) ? phys_base : (__START_KERNEL_map - PAGE_OFFSET));
280bdf525fSAlexander Duyck 
290bdf525fSAlexander Duyck 	return x;
300bdf525fSAlexander Duyck }
310bdf525fSAlexander Duyck 
320bdf525fSAlexander Duyck #ifdef CONFIG_DEBUG_VIRTUAL
33fb50b020SAlexander Duyck extern unsigned long __phys_addr(unsigned long);
347d74275dSAlexander Duyck extern unsigned long __phys_addr_symbol(unsigned long);
350bdf525fSAlexander Duyck #else
360bdf525fSAlexander Duyck #define __phys_addr(x)		__phys_addr_nodebug(x)
377d74275dSAlexander Duyck #define __phys_addr_symbol(x) \
387d74275dSAlexander Duyck 	((unsigned long)(x) - __START_KERNEL_map + phys_base)
390bdf525fSAlexander Duyck #endif
40fb50b020SAlexander Duyck 
41fb50b020SAlexander Duyck #define __phys_reloc_hide(x)	(x)
42fb50b020SAlexander Duyck 
43f25d3847SBorislav Petkov void clear_page_orig(void *page);
44f25d3847SBorislav Petkov void clear_page_rep(void *page);
45f25d3847SBorislav Petkov void clear_page_erms(void *page);
46f25d3847SBorislav Petkov 
clear_page(void * page)47f25d3847SBorislav Petkov static inline void clear_page(void *page)
48f25d3847SBorislav Petkov {
49b073d7f8SAlexander Potapenko 	/*
50b073d7f8SAlexander Potapenko 	 * Clean up KMSAN metadata for the page being cleared. The assembly call
51b073d7f8SAlexander Potapenko 	 * below clobbers @page, so we perform unpoisoning before it.
52b073d7f8SAlexander Potapenko 	 */
53b073d7f8SAlexander Potapenko 	kmsan_unpoison_memory(page, PAGE_SIZE);
54f25d3847SBorislav Petkov 	alternative_call_2(clear_page_orig,
55f25d3847SBorislav Petkov 			   clear_page_rep, X86_FEATURE_REP_GOOD,
56f25d3847SBorislav Petkov 			   clear_page_erms, X86_FEATURE_ERMS,
57f25d3847SBorislav Petkov 			   "=D" (page),
580d3db1f1SBorislav Petkov (AMD) 			   "D" (page)
591acdbf7eSAlexey Dobriyan 			   : "cc", "memory", "rax", "rcx");
60f25d3847SBorislav Petkov }
61f25d3847SBorislav Petkov 
62fb50b020SAlexander Duyck void copy_page(void *to, void *from);
63fb50b020SAlexander Duyck 
64025768a9SLinus Torvalds #ifdef CONFIG_X86_5LEVEL
65025768a9SLinus Torvalds /*
66025768a9SLinus Torvalds  * User space process size.  This is the first address outside the user range.
67025768a9SLinus Torvalds  * There are a few constraints that determine this:
68025768a9SLinus Torvalds  *
69025768a9SLinus Torvalds  * On Intel CPUs, if a SYSCALL instruction is at the highest canonical
70025768a9SLinus Torvalds  * address, then that syscall will enter the kernel with a
71025768a9SLinus Torvalds  * non-canonical return address, and SYSRET will explode dangerously.
72025768a9SLinus Torvalds  * We avoid this particular problem by preventing anything
73025768a9SLinus Torvalds  * from being mapped at the maximum canonical address.
74025768a9SLinus Torvalds  *
75025768a9SLinus Torvalds  * On AMD CPUs in the Ryzen family, there's a nasty bug in which the
76025768a9SLinus Torvalds  * CPUs malfunction if they execute code from the highest canonical page.
77025768a9SLinus Torvalds  * They'll speculate right off the end of the canonical space, and
78025768a9SLinus Torvalds  * bad things happen.  This is worked around in the same way as the
79025768a9SLinus Torvalds  * Intel problem.
80025768a9SLinus Torvalds  *
81025768a9SLinus Torvalds  * With page table isolation enabled, we map the LDT in ... [stay tuned]
82025768a9SLinus Torvalds  */
task_size_max(void)831f008d46SPeter Zijlstra static __always_inline unsigned long task_size_max(void)
84025768a9SLinus Torvalds {
85025768a9SLinus Torvalds 	unsigned long ret;
86025768a9SLinus Torvalds 
87025768a9SLinus Torvalds 	alternative_io("movq %[small],%0","movq %[large],%0",
88025768a9SLinus Torvalds 			X86_FEATURE_LA57,
89025768a9SLinus Torvalds 			"=r" (ret),
90025768a9SLinus Torvalds 			[small] "i" ((1ul << 47)-PAGE_SIZE),
91025768a9SLinus Torvalds 			[large] "i" ((1ul << 56)-PAGE_SIZE));
92025768a9SLinus Torvalds 
93025768a9SLinus Torvalds 	return ret;
94025768a9SLinus Torvalds }
95025768a9SLinus Torvalds #endif	/* CONFIG_X86_5LEVEL */
96025768a9SLinus Torvalds 
97fb50b020SAlexander Duyck #endif	/* !__ASSEMBLY__ */
98fb50b020SAlexander Duyck 
991ad83c85SAndy Lutomirski #ifdef CONFIG_X86_VSYSCALL_EMULATION
100a6c19dfeSAndy Lutomirski # define __HAVE_ARCH_GATE_AREA 1
1011ad83c85SAndy Lutomirski #endif
102a6c19dfeSAndy Lutomirski 
1031965aae3SH. Peter Anvin #endif /* _ASM_X86_PAGE_64_H */
104