xref: /linux/arch/x86/include/asm/uaccess_64.h (revision bd4af432cc71b5fbfe4833510359a6ad3ada250d)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_UACCESS_64_H
3 #define _ASM_X86_UACCESS_64_H
4 
5 /*
6  * User space memory access functions
7  */
8 #include <linux/compiler.h>
9 #include <linux/lockdep.h>
10 #include <linux/kasan-checks.h>
11 #include <asm/alternative.h>
12 #include <asm/cpufeatures.h>
13 #include <asm/page.h>
14 
15 /*
16  * Copy To/From Userspace
17  */
18 
19 /* Handles exceptions in both to and from, but doesn't do access_ok */
20 __must_check unsigned long
21 copy_user_enhanced_fast_string(void *to, const void *from, unsigned len);
22 __must_check unsigned long
23 copy_user_generic_string(void *to, const void *from, unsigned len);
24 __must_check unsigned long
25 copy_user_generic_unrolled(void *to, const void *from, unsigned len);
26 
27 static __always_inline __must_check unsigned long
28 copy_user_generic(void *to, const void *from, unsigned len)
29 {
30 	unsigned ret;
31 
32 	/*
33 	 * If CPU has ERMS feature, use copy_user_enhanced_fast_string.
34 	 * Otherwise, if CPU has rep_good feature, use copy_user_generic_string.
35 	 * Otherwise, use copy_user_generic_unrolled.
36 	 */
37 	alternative_call_2(copy_user_generic_unrolled,
38 			 copy_user_generic_string,
39 			 X86_FEATURE_REP_GOOD,
40 			 copy_user_enhanced_fast_string,
41 			 X86_FEATURE_ERMS,
42 			 ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from),
43 				     "=d" (len)),
44 			 "1" (to), "2" (from), "3" (len)
45 			 : "memory", "rcx", "r8", "r9", "r10", "r11");
46 	return ret;
47 }
48 
49 static __always_inline __must_check unsigned long
50 copy_to_user_mcsafe(void *to, const void *from, unsigned len)
51 {
52 	unsigned long ret;
53 
54 	__uaccess_begin();
55 	/*
56 	 * Note, __memcpy_mcsafe() is explicitly used since it can
57 	 * handle exceptions / faults.  memcpy_mcsafe() may fall back to
58 	 * memcpy() which lacks this handling.
59 	 */
60 	ret = __memcpy_mcsafe(to, from, len);
61 	__uaccess_end();
62 	return ret;
63 }
64 
65 static __always_inline __must_check unsigned long
66 raw_copy_from_user(void *dst, const void __user *src, unsigned long size)
67 {
68 	return copy_user_generic(dst, (__force void *)src, size);
69 }
70 
71 static __always_inline __must_check unsigned long
72 raw_copy_to_user(void __user *dst, const void *src, unsigned long size)
73 {
74 	return copy_user_generic((__force void *)dst, src, size);
75 }
76 
77 static __always_inline __must_check
78 unsigned long raw_copy_in_user(void __user *dst, const void __user *src, unsigned long size)
79 {
80 	return copy_user_generic((__force void *)dst,
81 				 (__force void *)src, size);
82 }
83 
84 extern long __copy_user_nocache(void *dst, const void __user *src,
85 				unsigned size, int zerorest);
86 
87 extern long __copy_user_flushcache(void *dst, const void __user *src, unsigned size);
88 extern void memcpy_page_flushcache(char *to, struct page *page, size_t offset,
89 			   size_t len);
90 
91 static inline int
92 __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
93 				  unsigned size)
94 {
95 	kasan_check_write(dst, size);
96 	return __copy_user_nocache(dst, src, size, 0);
97 }
98 
99 static inline int
100 __copy_from_user_flushcache(void *dst, const void __user *src, unsigned size)
101 {
102 	kasan_check_write(dst, size);
103 	return __copy_user_flushcache(dst, src, size);
104 }
105 
106 unsigned long
107 mcsafe_handle_tail(char *to, char *from, unsigned len);
108 
109 #endif /* _ASM_X86_UACCESS_64_H */
110