xref: /linux/arch/x86/include/asm/uaccess_64.h (revision 7cefa5a05dbda1f0bbbd98e9d2861b09a35cc6ea)
1 #ifndef _ASM_X86_UACCESS_64_H
2 #define _ASM_X86_UACCESS_64_H
3 
4 /*
5  * User space memory access functions
6  */
7 #include <linux/compiler.h>
8 #include <linux/lockdep.h>
9 #include <linux/kasan-checks.h>
10 #include <asm/alternative.h>
11 #include <asm/cpufeatures.h>
12 #include <asm/page.h>
13 
14 /*
15  * Copy To/From Userspace
16  */
17 
18 /* Handles exceptions in both to and from, but doesn't do access_ok */
19 __must_check unsigned long
20 copy_user_enhanced_fast_string(void *to, const void *from, unsigned len);
21 __must_check unsigned long
22 copy_user_generic_string(void *to, const void *from, unsigned len);
23 __must_check unsigned long
24 copy_user_generic_unrolled(void *to, const void *from, unsigned len);
25 
26 static __always_inline __must_check unsigned long
27 copy_user_generic(void *to, const void *from, unsigned len)
28 {
29 	unsigned ret;
30 
31 	/*
32 	 * If CPU has ERMS feature, use copy_user_enhanced_fast_string.
33 	 * Otherwise, if CPU has rep_good feature, use copy_user_generic_string.
34 	 * Otherwise, use copy_user_generic_unrolled.
35 	 */
36 	alternative_call_2(copy_user_generic_unrolled,
37 			 copy_user_generic_string,
38 			 X86_FEATURE_REP_GOOD,
39 			 copy_user_enhanced_fast_string,
40 			 X86_FEATURE_ERMS,
41 			 ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from),
42 				     "=d" (len)),
43 			 "1" (to), "2" (from), "3" (len)
44 			 : "memory", "rcx", "r8", "r9", "r10", "r11");
45 	return ret;
46 }
47 
48 __must_check unsigned long
49 copy_in_user(void __user *to, const void __user *from, unsigned len);
50 
51 static __always_inline __must_check
52 int __copy_from_user_nocheck(void *dst, const void __user *src, unsigned size)
53 {
54 	int ret = 0;
55 
56 	check_object_size(dst, size, false);
57 	if (!__builtin_constant_p(size))
58 		return copy_user_generic(dst, (__force void *)src, size);
59 	switch (size) {
60 	case 1:
61 		__uaccess_begin();
62 		__get_user_asm(*(u8 *)dst, (u8 __user *)src,
63 			      ret, "b", "b", "=q", 1);
64 		__uaccess_end();
65 		return ret;
66 	case 2:
67 		__uaccess_begin();
68 		__get_user_asm(*(u16 *)dst, (u16 __user *)src,
69 			      ret, "w", "w", "=r", 2);
70 		__uaccess_end();
71 		return ret;
72 	case 4:
73 		__uaccess_begin();
74 		__get_user_asm(*(u32 *)dst, (u32 __user *)src,
75 			      ret, "l", "k", "=r", 4);
76 		__uaccess_end();
77 		return ret;
78 	case 8:
79 		__uaccess_begin();
80 		__get_user_asm(*(u64 *)dst, (u64 __user *)src,
81 			      ret, "q", "", "=r", 8);
82 		__uaccess_end();
83 		return ret;
84 	case 10:
85 		__uaccess_begin();
86 		__get_user_asm(*(u64 *)dst, (u64 __user *)src,
87 			       ret, "q", "", "=r", 10);
88 		if (likely(!ret))
89 			__get_user_asm(*(u16 *)(8 + (char *)dst),
90 				       (u16 __user *)(8 + (char __user *)src),
91 				       ret, "w", "w", "=r", 2);
92 		__uaccess_end();
93 		return ret;
94 	case 16:
95 		__uaccess_begin();
96 		__get_user_asm(*(u64 *)dst, (u64 __user *)src,
97 			       ret, "q", "", "=r", 16);
98 		if (likely(!ret))
99 			__get_user_asm(*(u64 *)(8 + (char *)dst),
100 				       (u64 __user *)(8 + (char __user *)src),
101 				       ret, "q", "", "=r", 8);
102 		__uaccess_end();
103 		return ret;
104 	default:
105 		return copy_user_generic(dst, (__force void *)src, size);
106 	}
107 }
108 
109 static __always_inline __must_check
110 int __copy_from_user(void *dst, const void __user *src, unsigned size)
111 {
112 	might_fault();
113 	kasan_check_write(dst, size);
114 	return __copy_from_user_nocheck(dst, src, size);
115 }
116 
117 static __always_inline __must_check
118 int __copy_to_user_nocheck(void __user *dst, const void *src, unsigned size)
119 {
120 	int ret = 0;
121 
122 	check_object_size(src, size, true);
123 	if (!__builtin_constant_p(size))
124 		return copy_user_generic((__force void *)dst, src, size);
125 	switch (size) {
126 	case 1:
127 		__uaccess_begin();
128 		__put_user_asm(*(u8 *)src, (u8 __user *)dst,
129 			      ret, "b", "b", "iq", 1);
130 		__uaccess_end();
131 		return ret;
132 	case 2:
133 		__uaccess_begin();
134 		__put_user_asm(*(u16 *)src, (u16 __user *)dst,
135 			      ret, "w", "w", "ir", 2);
136 		__uaccess_end();
137 		return ret;
138 	case 4:
139 		__uaccess_begin();
140 		__put_user_asm(*(u32 *)src, (u32 __user *)dst,
141 			      ret, "l", "k", "ir", 4);
142 		__uaccess_end();
143 		return ret;
144 	case 8:
145 		__uaccess_begin();
146 		__put_user_asm(*(u64 *)src, (u64 __user *)dst,
147 			      ret, "q", "", "er", 8);
148 		__uaccess_end();
149 		return ret;
150 	case 10:
151 		__uaccess_begin();
152 		__put_user_asm(*(u64 *)src, (u64 __user *)dst,
153 			       ret, "q", "", "er", 10);
154 		if (likely(!ret)) {
155 			asm("":::"memory");
156 			__put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
157 				       ret, "w", "w", "ir", 2);
158 		}
159 		__uaccess_end();
160 		return ret;
161 	case 16:
162 		__uaccess_begin();
163 		__put_user_asm(*(u64 *)src, (u64 __user *)dst,
164 			       ret, "q", "", "er", 16);
165 		if (likely(!ret)) {
166 			asm("":::"memory");
167 			__put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
168 				       ret, "q", "", "er", 8);
169 		}
170 		__uaccess_end();
171 		return ret;
172 	default:
173 		return copy_user_generic((__force void *)dst, src, size);
174 	}
175 }
176 
177 static __always_inline __must_check
178 int __copy_to_user(void __user *dst, const void *src, unsigned size)
179 {
180 	might_fault();
181 	kasan_check_read(src, size);
182 	return __copy_to_user_nocheck(dst, src, size);
183 }
184 
185 static __always_inline __must_check
186 int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
187 {
188 	int ret = 0;
189 
190 	might_fault();
191 	if (!__builtin_constant_p(size))
192 		return copy_user_generic((__force void *)dst,
193 					 (__force void *)src, size);
194 	switch (size) {
195 	case 1: {
196 		u8 tmp;
197 		__uaccess_begin();
198 		__get_user_asm(tmp, (u8 __user *)src,
199 			       ret, "b", "b", "=q", 1);
200 		if (likely(!ret))
201 			__put_user_asm(tmp, (u8 __user *)dst,
202 				       ret, "b", "b", "iq", 1);
203 		__uaccess_end();
204 		return ret;
205 	}
206 	case 2: {
207 		u16 tmp;
208 		__uaccess_begin();
209 		__get_user_asm(tmp, (u16 __user *)src,
210 			       ret, "w", "w", "=r", 2);
211 		if (likely(!ret))
212 			__put_user_asm(tmp, (u16 __user *)dst,
213 				       ret, "w", "w", "ir", 2);
214 		__uaccess_end();
215 		return ret;
216 	}
217 
218 	case 4: {
219 		u32 tmp;
220 		__uaccess_begin();
221 		__get_user_asm(tmp, (u32 __user *)src,
222 			       ret, "l", "k", "=r", 4);
223 		if (likely(!ret))
224 			__put_user_asm(tmp, (u32 __user *)dst,
225 				       ret, "l", "k", "ir", 4);
226 		__uaccess_end();
227 		return ret;
228 	}
229 	case 8: {
230 		u64 tmp;
231 		__uaccess_begin();
232 		__get_user_asm(tmp, (u64 __user *)src,
233 			       ret, "q", "", "=r", 8);
234 		if (likely(!ret))
235 			__put_user_asm(tmp, (u64 __user *)dst,
236 				       ret, "q", "", "er", 8);
237 		__uaccess_end();
238 		return ret;
239 	}
240 	default:
241 		return copy_user_generic((__force void *)dst,
242 					 (__force void *)src, size);
243 	}
244 }
245 
246 static __must_check __always_inline int
247 __copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
248 {
249 	kasan_check_write(dst, size);
250 	return __copy_from_user_nocheck(dst, src, size);
251 }
252 
253 static __must_check __always_inline int
254 __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
255 {
256 	kasan_check_read(src, size);
257 	return __copy_to_user_nocheck(dst, src, size);
258 }
259 
260 extern long __copy_user_nocache(void *dst, const void __user *src,
261 				unsigned size, int zerorest);
262 
263 static inline int
264 __copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
265 {
266 	might_fault();
267 	kasan_check_write(dst, size);
268 	return __copy_user_nocache(dst, src, size, 1);
269 }
270 
271 static inline int
272 __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
273 				  unsigned size)
274 {
275 	kasan_check_write(dst, size);
276 	return __copy_user_nocache(dst, src, size, 0);
277 }
278 
279 unsigned long
280 copy_user_handle_tail(char *to, char *from, unsigned len);
281 
282 #endif /* _ASM_X86_UACCESS_64_H */
283