xref: /linux/arch/x86/include/asm/uaccess.h (revision 36ec807b627b4c0a0a382f0ae48eac7187d14b2b)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
21965aae3SH. Peter Anvin #ifndef _ASM_X86_UACCESS_H
31965aae3SH. Peter Anvin #define _ASM_X86_UACCESS_H
4bb898558SAl Viro /*
5bb898558SAl Viro  * User space memory access functions
6bb898558SAl Viro  */
7bb898558SAl Viro #include <linux/compiler.h>
8888f84a6SAlexander Potapenko #include <linux/instrumented.h>
91771c6e1SAndrey Ryabinin #include <linux/kasan-checks.h>
1074c228d2SKirill A. Shutemov #include <linux/mm_types.h>
11bb898558SAl Viro #include <linux/string.h>
12e0bddc19SKirill A. Shutemov #include <linux/mmap_lock.h>
13bb898558SAl Viro #include <asm/asm.h>
14bb898558SAl Viro #include <asm/page.h>
1563bcff2aSH. Peter Anvin #include <asm/smap.h>
1645caf470SAl Viro #include <asm/extable.h>
1774c228d2SKirill A. Shutemov #include <asm/tlbflush.h>
18bb898558SAl Viro 
19b9bd9f60SLinus Torvalds #ifdef CONFIG_X86_32
20b9bd9f60SLinus Torvalds # include <asm/uaccess_32.h>
2174c228d2SKirill A. Shutemov #else
22b9bd9f60SLinus Torvalds # include <asm/uaccess_64.h>
236014bc27SLinus Torvalds #endif
246014bc27SLinus Torvalds 
2512700c17SArnd Bergmann #include <asm-generic/access_ok.h>
2612700c17SArnd Bergmann 
27bb898558SAl Viro extern int __get_user_1(void);
28bb898558SAl Viro extern int __get_user_2(void);
29bb898558SAl Viro extern int __get_user_4(void);
30bb898558SAl Viro extern int __get_user_8(void);
31ea6f043fSLinus Torvalds extern int __get_user_nocheck_1(void);
32ea6f043fSLinus Torvalds extern int __get_user_nocheck_2(void);
33ea6f043fSLinus Torvalds extern int __get_user_nocheck_4(void);
34ea6f043fSLinus Torvalds extern int __get_user_nocheck_8(void);
35bb898558SAl Viro extern int __get_user_bad(void);
36bb898558SAl Viro 
3711f1a4b9SLinus Torvalds #define __uaccess_begin() stac()
3811f1a4b9SLinus Torvalds #define __uaccess_end()   clac()
39b3bbfb3fSDan Williams #define __uaccess_begin_nospec()	\
40b3bbfb3fSDan Williams ({					\
41b3bbfb3fSDan Williams 	stac();				\
42b3bbfb3fSDan Williams 	barrier_nospec();		\
43b3bbfb3fSDan Williams })
4411f1a4b9SLinus Torvalds 
453578baaeSH. Peter Anvin /*
467da63b3dSLinus Torvalds  * This is the smallest unsigned integer type that can fit a value
477da63b3dSLinus Torvalds  * (up to 'long long')
483578baaeSH. Peter Anvin  */
497da63b3dSLinus Torvalds #define __inttype(x) __typeof__(		\
507da63b3dSLinus Torvalds 	__typefits(x,char,			\
517da63b3dSLinus Torvalds 	  __typefits(x,short,			\
527da63b3dSLinus Torvalds 	    __typefits(x,int,			\
537da63b3dSLinus Torvalds 	      __typefits(x,long,0ULL)))))
547da63b3dSLinus Torvalds 
557da63b3dSLinus Torvalds #define __typefits(x,type,not) \
567da63b3dSLinus Torvalds 	__builtin_choose_expr(sizeof(x)<=sizeof(type),(unsigned type)0,not)
57bb898558SAl Viro 
58ea6f043fSLinus Torvalds /*
59ea6f043fSLinus Torvalds  * This is used for both get_user() and __get_user() to expand to
60ea6f043fSLinus Torvalds  * the proper special function call that has odd calling conventions
61ea6f043fSLinus Torvalds  * due to returning both a value and an error, and that depends on
62ea6f043fSLinus Torvalds  * the size of the pointer passed in.
63ea6f043fSLinus Torvalds  *
64ea6f043fSLinus Torvalds  * Careful: we have to cast the result to the type of the pointer
65ea6f043fSLinus Torvalds  * for sign reasons.
66ea6f043fSLinus Torvalds  *
67ea6f043fSLinus Torvalds  * The use of _ASM_DX as the register specifier is a bit of a
68ea6f043fSLinus Torvalds  * simplification, as gcc only cares about it as the starting point
69ea6f043fSLinus Torvalds  * and not size: for a 64-bit value it will use %ecx:%edx on 32 bits
70ea6f043fSLinus Torvalds  * (%ecx being the next register in gcc's x86 register sequence), and
71ea6f043fSLinus Torvalds  * %rdx on 64 bits.
72ea6f043fSLinus Torvalds  *
73ea6f043fSLinus Torvalds  * Clang/LLVM cares about the size of the register, but still wants
74ea6f043fSLinus Torvalds  * the base register for something that ends up being a pair.
75ea6f043fSLinus Torvalds  */
76ea6f043fSLinus Torvalds #define do_get_user_call(fn,x,ptr)					\
77ea6f043fSLinus Torvalds ({									\
78ea6f043fSLinus Torvalds 	int __ret_gu;							\
79ea6f043fSLinus Torvalds 	register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX);		\
80ea6f043fSLinus Torvalds 	__chk_user_ptr(ptr);						\
81*8c860ed8SKees Cook 	asm volatile("call __" #fn "_%c[size]"				\
82ea6f043fSLinus Torvalds 		     : "=a" (__ret_gu), "=r" (__val_gu),		\
83ea6f043fSLinus Torvalds 			ASM_CALL_CONSTRAINT				\
84*8c860ed8SKees Cook 		     : "0" (ptr), [size] "i" (sizeof(*(ptr))));		\
85888f84a6SAlexander Potapenko 	instrument_get_user(__val_gu);					\
86ea6f043fSLinus Torvalds 	(x) = (__force __typeof__(*(ptr))) __val_gu;			\
87ea6f043fSLinus Torvalds 	__builtin_expect(__ret_gu, 0);					\
88ea6f043fSLinus Torvalds })
89ea6f043fSLinus Torvalds 
90bb898558SAl Viro /**
91bc8ff3caSMike Rapoport  * get_user - Get a simple variable from user space.
92bb898558SAl Viro  * @x:   Variable to store result.
93bb898558SAl Viro  * @ptr: Source address, in user space.
94bb898558SAl Viro  *
95b3c395efSDavid Hildenbrand  * Context: User context only. This function may sleep if pagefaults are
96b3c395efSDavid Hildenbrand  *          enabled.
97bb898558SAl Viro  *
98bb898558SAl Viro  * This macro copies a single simple variable from user space to kernel
99bb898558SAl Viro  * space.  It supports simple types like char and int, but not larger
100bb898558SAl Viro  * data types like structures or arrays.
101bb898558SAl Viro  *
102bb898558SAl Viro  * @ptr must have pointer-to-simple-variable type, and the result of
103bb898558SAl Viro  * dereferencing @ptr must be assignable to @x without a cast.
104bb898558SAl Viro  *
105bc8ff3caSMike Rapoport  * Return: zero on success, or -EFAULT on error.
106bb898558SAl Viro  * On error, the variable @x is set to zero.
107ff52c3b0SH. Peter Anvin  */
108ea6f043fSLinus Torvalds #define get_user(x,ptr) ({ might_fault(); do_get_user_call(get_user,x,ptr); })
109ea6f043fSLinus Torvalds 
110ea6f043fSLinus Torvalds /**
111ea6f043fSLinus Torvalds  * __get_user - Get a simple variable from user space, with less checking.
112ea6f043fSLinus Torvalds  * @x:   Variable to store result.
113ea6f043fSLinus Torvalds  * @ptr: Source address, in user space.
114ff52c3b0SH. Peter Anvin  *
115ea6f043fSLinus Torvalds  * Context: User context only. This function may sleep if pagefaults are
116ea6f043fSLinus Torvalds  *          enabled.
117f69fa9a9SH. Peter Anvin  *
118ea6f043fSLinus Torvalds  * This macro copies a single simple variable from user space to kernel
119ea6f043fSLinus Torvalds  * space.  It supports simple types like char and int, but not larger
120ea6f043fSLinus Torvalds  * data types like structures or arrays.
121ea6f043fSLinus Torvalds  *
122ea6f043fSLinus Torvalds  * @ptr must have pointer-to-simple-variable type, and the result of
123ea6f043fSLinus Torvalds  * dereferencing @ptr must be assignable to @x without a cast.
124ea6f043fSLinus Torvalds  *
125ea6f043fSLinus Torvalds  * Caller must check the pointer with access_ok() before calling this
126ea6f043fSLinus Torvalds  * function.
127ea6f043fSLinus Torvalds  *
128ea6f043fSLinus Torvalds  * Return: zero on success, or -EFAULT on error.
129ea6f043fSLinus Torvalds  * On error, the variable @x is set to zero.
130bb898558SAl Viro  */
131ea6f043fSLinus Torvalds #define __get_user(x,ptr) do_get_user_call(get_user_nocheck,x,ptr)
132bb898558SAl Viro 
133bb898558SAl Viro 
134bb898558SAl Viro #ifdef CONFIG_X86_32
135a959dc88SLinus Torvalds #define __put_user_goto_u64(x, addr, label)			\
1364356e9f8SLinus Torvalds 	asm goto("\n"					\
137a959dc88SLinus Torvalds 		     "1:	movl %%eax,0(%1)\n"		\
138a959dc88SLinus Torvalds 		     "2:	movl %%edx,4(%1)\n"		\
139a959dc88SLinus Torvalds 		     _ASM_EXTABLE_UA(1b, %l2)			\
140a959dc88SLinus Torvalds 		     _ASM_EXTABLE_UA(2b, %l2)			\
141a959dc88SLinus Torvalds 		     : : "A" (x), "r" (addr)			\
142a959dc88SLinus Torvalds 		     : : label)
143bb898558SAl Viro 
144bb898558SAl Viro #else
145a959dc88SLinus Torvalds #define __put_user_goto_u64(x, ptr, label) \
14636807856SLinus Torvalds 	__put_user_goto(x, ptr, "q", "er", label)
147bb898558SAl Viro #endif
148bb898558SAl Viro 
149bb898558SAl Viro extern void __put_user_bad(void);
150bb898558SAl Viro 
151bb898558SAl Viro /*
152bb898558SAl Viro  * Strange magic calling convention: pointer in %ecx,
153d55564cfSLinus Torvalds  * value in %eax(:%edx), return value in %ecx. clobbers %rbx
154bb898558SAl Viro  */
155bb898558SAl Viro extern void __put_user_1(void);
156bb898558SAl Viro extern void __put_user_2(void);
157bb898558SAl Viro extern void __put_user_4(void);
158bb898558SAl Viro extern void __put_user_8(void);
159d55564cfSLinus Torvalds extern void __put_user_nocheck_1(void);
160d55564cfSLinus Torvalds extern void __put_user_nocheck_2(void);
161d55564cfSLinus Torvalds extern void __put_user_nocheck_4(void);
162d55564cfSLinus Torvalds extern void __put_user_nocheck_8(void);
163d55564cfSLinus Torvalds 
1649c5743dfSRasmus Villemoes /*
1659c5743dfSRasmus Villemoes  * ptr must be evaluated and assigned to the temporary __ptr_pu before
1669c5743dfSRasmus Villemoes  * the assignment of x to __val_pu, to avoid any function calls
1679c5743dfSRasmus Villemoes  * involved in the ptr expression (possibly implicitly generated due
1689c5743dfSRasmus Villemoes  * to KASAN) from clobbering %ax.
1699c5743dfSRasmus Villemoes  */
170d55564cfSLinus Torvalds #define do_put_user_call(fn,x,ptr)					\
171d55564cfSLinus Torvalds ({									\
172d55564cfSLinus Torvalds 	int __ret_pu;							\
1739c5743dfSRasmus Villemoes 	void __user *__ptr_pu;						\
174d55564cfSLinus Torvalds 	register __typeof__(*(ptr)) __val_pu asm("%"_ASM_AX);		\
175888f84a6SAlexander Potapenko 	__typeof__(*(ptr)) __x = (x); /* eval x once */			\
176888f84a6SAlexander Potapenko 	__typeof__(ptr) __ptr = (ptr); /* eval ptr once */		\
177888f84a6SAlexander Potapenko 	__chk_user_ptr(__ptr);						\
178888f84a6SAlexander Potapenko 	__ptr_pu = __ptr;						\
179888f84a6SAlexander Potapenko 	__val_pu = __x;							\
18041cd2e1eSUros Bizjak 	asm volatile("call __" #fn "_%c[size]"				\
181d55564cfSLinus Torvalds 		     : "=c" (__ret_pu),					\
182d55564cfSLinus Torvalds 			ASM_CALL_CONSTRAINT				\
1839c5743dfSRasmus Villemoes 		     : "0" (__ptr_pu),					\
184d55564cfSLinus Torvalds 		       "r" (__val_pu),					\
185d55564cfSLinus Torvalds 		       [size] "i" (sizeof(*(ptr)))			\
186d55564cfSLinus Torvalds 		     :"ebx");						\
187888f84a6SAlexander Potapenko 	instrument_put_user(__x, __ptr, sizeof(*(ptr)));		\
188d55564cfSLinus Torvalds 	__builtin_expect(__ret_pu, 0);					\
189d55564cfSLinus Torvalds })
190bb898558SAl Viro 
191bb898558SAl Viro /**
192bc8ff3caSMike Rapoport  * put_user - Write a simple value into user space.
193bb898558SAl Viro  * @x:   Value to copy to user space.
194bb898558SAl Viro  * @ptr: Destination address, in user space.
195bb898558SAl Viro  *
196b3c395efSDavid Hildenbrand  * Context: User context only. This function may sleep if pagefaults are
197b3c395efSDavid Hildenbrand  *          enabled.
198bb898558SAl Viro  *
199bb898558SAl Viro  * This macro copies a single simple value from kernel space to user
200bb898558SAl Viro  * space.  It supports simple types like char and int, but not larger
201bb898558SAl Viro  * data types like structures or arrays.
202bb898558SAl Viro  *
203bb898558SAl Viro  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
204bb898558SAl Viro  * to the result of dereferencing @ptr.
205bb898558SAl Viro  *
206bc8ff3caSMike Rapoport  * Return: zero on success, or -EFAULT on error.
207bb898558SAl Viro  */
208d55564cfSLinus Torvalds #define put_user(x, ptr) ({ might_fault(); do_put_user_call(put_user,x,ptr); })
209d55564cfSLinus Torvalds 
210d55564cfSLinus Torvalds /**
211d55564cfSLinus Torvalds  * __put_user - Write a simple value into user space, with less checking.
212d55564cfSLinus Torvalds  * @x:   Value to copy to user space.
213d55564cfSLinus Torvalds  * @ptr: Destination address, in user space.
214d55564cfSLinus Torvalds  *
215d55564cfSLinus Torvalds  * Context: User context only. This function may sleep if pagefaults are
216d55564cfSLinus Torvalds  *          enabled.
217d55564cfSLinus Torvalds  *
218d55564cfSLinus Torvalds  * This macro copies a single simple value from kernel space to user
219d55564cfSLinus Torvalds  * space.  It supports simple types like char and int, but not larger
220d55564cfSLinus Torvalds  * data types like structures or arrays.
221d55564cfSLinus Torvalds  *
222d55564cfSLinus Torvalds  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
223d55564cfSLinus Torvalds  * to the result of dereferencing @ptr.
224d55564cfSLinus Torvalds  *
225d55564cfSLinus Torvalds  * Caller must check the pointer with access_ok() before calling this
226d55564cfSLinus Torvalds  * function.
227d55564cfSLinus Torvalds  *
228d55564cfSLinus Torvalds  * Return: zero on success, or -EFAULT on error.
229d55564cfSLinus Torvalds  */
230d55564cfSLinus Torvalds #define __put_user(x, ptr) do_put_user_call(put_user_nocheck,x,ptr)
231bb898558SAl Viro 
232a959dc88SLinus Torvalds #define __put_user_size(x, ptr, size, label)				\
233bb898558SAl Viro do {									\
234888f84a6SAlexander Potapenko 	__typeof__(*(ptr)) __x = (x); /* eval x once */			\
23559c8a02eSAlexander Potapenko 	__typeof__(ptr) __ptr = (ptr); /* eval ptr once */		\
23659c8a02eSAlexander Potapenko 	__chk_user_ptr(__ptr);						\
237bb898558SAl Viro 	switch (size) {							\
238bb898558SAl Viro 	case 1:								\
23959c8a02eSAlexander Potapenko 		__put_user_goto(__x, __ptr, "b", "iq", label);		\
240bb898558SAl Viro 		break;							\
241bb898558SAl Viro 	case 2:								\
24259c8a02eSAlexander Potapenko 		__put_user_goto(__x, __ptr, "w", "ir", label);		\
243bb898558SAl Viro 		break;							\
244bb898558SAl Viro 	case 4:								\
24559c8a02eSAlexander Potapenko 		__put_user_goto(__x, __ptr, "l", "ir", label);		\
246bb898558SAl Viro 		break;							\
247bb898558SAl Viro 	case 8:								\
24859c8a02eSAlexander Potapenko 		__put_user_goto_u64(__x, __ptr, label);			\
249bb898558SAl Viro 		break;							\
250bb898558SAl Viro 	default:							\
251bb898558SAl Viro 		__put_user_bad();					\
252bb898558SAl Viro 	}								\
25359c8a02eSAlexander Potapenko 	instrument_put_user(__x, __ptr, size);				\
254bb898558SAl Viro } while (0)
255bb898558SAl Viro 
256865c50e1SNick Desaulniers #ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
257865c50e1SNick Desaulniers 
258865c50e1SNick Desaulniers #ifdef CONFIG_X86_32
259865c50e1SNick Desaulniers #define __get_user_asm_u64(x, ptr, label) do {				\
260865c50e1SNick Desaulniers 	unsigned int __gu_low, __gu_high;				\
261865c50e1SNick Desaulniers 	const unsigned int __user *__gu_ptr;				\
262865c50e1SNick Desaulniers 	__gu_ptr = (const void __user *)(ptr);				\
263a69ae291SWill Deacon 	__get_user_asm(__gu_low, __gu_ptr, "l", "=r", label);		\
264a69ae291SWill Deacon 	__get_user_asm(__gu_high, __gu_ptr+1, "l", "=r", label);	\
265865c50e1SNick Desaulniers 	(x) = ((unsigned long long)__gu_high << 32) | __gu_low;		\
266865c50e1SNick Desaulniers } while (0)
267865c50e1SNick Desaulniers #else
268865c50e1SNick Desaulniers #define __get_user_asm_u64(x, ptr, label)				\
269865c50e1SNick Desaulniers 	__get_user_asm(x, ptr, "q", "=r", label)
270865c50e1SNick Desaulniers #endif
271865c50e1SNick Desaulniers 
272865c50e1SNick Desaulniers #define __get_user_size(x, ptr, size, label)				\
273865c50e1SNick Desaulniers do {									\
274865c50e1SNick Desaulniers 	__chk_user_ptr(ptr);						\
275865c50e1SNick Desaulniers 	switch (size) {							\
27661646ca8SKees Cook 	case 1:	{							\
277865c50e1SNick Desaulniers 		unsigned char x_u8__;					\
278865c50e1SNick Desaulniers 		__get_user_asm(x_u8__, ptr, "b", "=q", label);		\
279865c50e1SNick Desaulniers 		(x) = x_u8__;						\
280865c50e1SNick Desaulniers 		break;							\
28161646ca8SKees Cook 	}								\
282865c50e1SNick Desaulniers 	case 2:								\
283865c50e1SNick Desaulniers 		__get_user_asm(x, ptr, "w", "=r", label);		\
284865c50e1SNick Desaulniers 		break;							\
285865c50e1SNick Desaulniers 	case 4:								\
286865c50e1SNick Desaulniers 		__get_user_asm(x, ptr, "l", "=r", label);		\
287865c50e1SNick Desaulniers 		break;							\
288865c50e1SNick Desaulniers 	case 8:								\
289865c50e1SNick Desaulniers 		__get_user_asm_u64(x, ptr, label);			\
290865c50e1SNick Desaulniers 		break;							\
291865c50e1SNick Desaulniers 	default:							\
292865c50e1SNick Desaulniers 		(x) = __get_user_bad();					\
293865c50e1SNick Desaulniers 	}								\
294888f84a6SAlexander Potapenko 	instrument_get_user(x);						\
295865c50e1SNick Desaulniers } while (0)
296865c50e1SNick Desaulniers 
297865c50e1SNick Desaulniers #define __get_user_asm(x, addr, itype, ltype, label)			\
2984356e9f8SLinus Torvalds 	asm_goto_output("\n"						\
299865c50e1SNick Desaulniers 		     "1:	mov"itype" %[umem],%[output]\n"		\
300865c50e1SNick Desaulniers 		     _ASM_EXTABLE_UA(1b, %l2)				\
301865c50e1SNick Desaulniers 		     : [output] ltype(x)				\
302865c50e1SNick Desaulniers 		     : [umem] "m" (__m(addr))				\
303865c50e1SNick Desaulniers 		     : : label)
304865c50e1SNick Desaulniers 
305865c50e1SNick Desaulniers #else // !CONFIG_CC_HAS_ASM_GOTO_OUTPUT
306865c50e1SNick Desaulniers 
307bb898558SAl Viro #ifdef CONFIG_X86_32
3081a323ea5SLinus Torvalds #define __get_user_asm_u64(x, ptr, retval)				\
309b2f68038SBenjamin LaHaise ({									\
310b2f68038SBenjamin LaHaise 	__typeof__(ptr) __ptr = (ptr);					\
31133c9e972SLinus Torvalds 	asm volatile("\n"						\
312890f0b0dSLinus Torvalds 		     "1:	movl %[lowbits],%%eax\n"		\
313890f0b0dSLinus Torvalds 		     "2:	movl %[highbits],%%edx\n"		\
31433c9e972SLinus Torvalds 		     "3:\n"						\
31599641e09SPeter Zijlstra 		     _ASM_EXTABLE_TYPE_REG(1b, 3b, EX_TYPE_EFAULT_REG |	\
31699641e09SPeter Zijlstra 					   EX_FLAG_CLEAR_AX_DX,		\
31799641e09SPeter Zijlstra 					   %[errout])			\
31899641e09SPeter Zijlstra 		     _ASM_EXTABLE_TYPE_REG(2b, 3b, EX_TYPE_EFAULT_REG |	\
31999641e09SPeter Zijlstra 					   EX_FLAG_CLEAR_AX_DX,		\
32099641e09SPeter Zijlstra 					   %[errout])			\
321890f0b0dSLinus Torvalds 		     : [errout] "=r" (retval),				\
322890f0b0dSLinus Torvalds 		       [output] "=&A"(x)				\
323890f0b0dSLinus Torvalds 		     : [lowbits] "m" (__m(__ptr)),			\
324890f0b0dSLinus Torvalds 		       [highbits] "m" __m(((u32 __user *)(__ptr)) + 1),	\
32599641e09SPeter Zijlstra 		       "0" (retval));					\
326b2f68038SBenjamin LaHaise })
327b2f68038SBenjamin LaHaise 
328bb898558SAl Viro #else
3291a323ea5SLinus Torvalds #define __get_user_asm_u64(x, ptr, retval) \
33099641e09SPeter Zijlstra 	 __get_user_asm(x, ptr, retval, "q")
331bb898558SAl Viro #endif
332bb898558SAl Viro 
3331a323ea5SLinus Torvalds #define __get_user_size(x, ptr, size, retval)				\
334bb898558SAl Viro do {									\
335158807deSNick Desaulniers 	unsigned char x_u8__;						\
336158807deSNick Desaulniers 									\
337bb898558SAl Viro 	retval = 0;							\
338bb898558SAl Viro 	__chk_user_ptr(ptr);						\
339bb898558SAl Viro 	switch (size) {							\
340bb898558SAl Viro 	case 1:								\
34199641e09SPeter Zijlstra 		__get_user_asm(x_u8__, ptr, retval, "b");		\
342158807deSNick Desaulniers 		(x) = x_u8__;						\
343bb898558SAl Viro 		break;							\
344bb898558SAl Viro 	case 2:								\
34599641e09SPeter Zijlstra 		__get_user_asm(x, ptr, retval, "w");			\
346bb898558SAl Viro 		break;							\
347bb898558SAl Viro 	case 4:								\
34899641e09SPeter Zijlstra 		__get_user_asm(x, ptr, retval, "l");			\
349bb898558SAl Viro 		break;							\
350bb898558SAl Viro 	case 8:								\
3511a323ea5SLinus Torvalds 		__get_user_asm_u64(x, ptr, retval);			\
352bb898558SAl Viro 		break;							\
353bb898558SAl Viro 	default:							\
354bb898558SAl Viro 		(x) = __get_user_bad();					\
355bb898558SAl Viro 	}								\
356bb898558SAl Viro } while (0)
357bb898558SAl Viro 
35899641e09SPeter Zijlstra #define __get_user_asm(x, addr, err, itype)				\
35911f1a4b9SLinus Torvalds 	asm volatile("\n"						\
360890f0b0dSLinus Torvalds 		     "1:	mov"itype" %[umem],%[output]\n"		\
36111f1a4b9SLinus Torvalds 		     "2:\n"						\
36299641e09SPeter Zijlstra 		     _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_EFAULT_REG | \
36399641e09SPeter Zijlstra 					   EX_FLAG_CLEAR_AX,		\
36499641e09SPeter Zijlstra 					   %[errout])			\
365890f0b0dSLinus Torvalds 		     : [errout] "=r" (err),				\
36699641e09SPeter Zijlstra 		       [output] "=a" (x)				\
367890f0b0dSLinus Torvalds 		     : [umem] "m" (__m(addr)),				\
36899641e09SPeter Zijlstra 		       "0" (err))
369bb898558SAl Viro 
3706bf8a55dSLukas Bulwahn #endif // CONFIG_CC_HAS_ASM_GOTO_OUTPUT
371865c50e1SNick Desaulniers 
372989b5db2SPeter Zijlstra #ifdef CONFIG_CC_HAS_ASM_GOTO_TIED_OUTPUT
373989b5db2SPeter Zijlstra #define __try_cmpxchg_user_asm(itype, ltype, _ptr, _pold, _new, label)	({ \
374989b5db2SPeter Zijlstra 	bool success;							\
375989b5db2SPeter Zijlstra 	__typeof__(_ptr) _old = (__typeof__(_ptr))(_pold);		\
376989b5db2SPeter Zijlstra 	__typeof__(*(_ptr)) __old = *_old;				\
377989b5db2SPeter Zijlstra 	__typeof__(*(_ptr)) __new = (_new);				\
3784356e9f8SLinus Torvalds 	asm_goto_output("\n"						\
379989b5db2SPeter Zijlstra 		     "1: " LOCK_PREFIX "cmpxchg"itype" %[new], %[ptr]\n"\
380989b5db2SPeter Zijlstra 		     _ASM_EXTABLE_UA(1b, %l[label])			\
381989b5db2SPeter Zijlstra 		     : CC_OUT(z) (success),				\
382989b5db2SPeter Zijlstra 		       [ptr] "+m" (*_ptr),				\
383989b5db2SPeter Zijlstra 		       [old] "+a" (__old)				\
384989b5db2SPeter Zijlstra 		     : [new] ltype (__new)				\
385989b5db2SPeter Zijlstra 		     : "memory"						\
386989b5db2SPeter Zijlstra 		     : label);						\
387989b5db2SPeter Zijlstra 	if (unlikely(!success))						\
388989b5db2SPeter Zijlstra 		*_old = __old;						\
389989b5db2SPeter Zijlstra 	likely(success);					})
390989b5db2SPeter Zijlstra 
391989b5db2SPeter Zijlstra #ifdef CONFIG_X86_32
392989b5db2SPeter Zijlstra #define __try_cmpxchg64_user_asm(_ptr, _pold, _new, label)	({	\
393989b5db2SPeter Zijlstra 	bool success;							\
394989b5db2SPeter Zijlstra 	__typeof__(_ptr) _old = (__typeof__(_ptr))(_pold);		\
395989b5db2SPeter Zijlstra 	__typeof__(*(_ptr)) __old = *_old;				\
396989b5db2SPeter Zijlstra 	__typeof__(*(_ptr)) __new = (_new);				\
3974356e9f8SLinus Torvalds 	asm_goto_output("\n"						\
398989b5db2SPeter Zijlstra 		     "1: " LOCK_PREFIX "cmpxchg8b %[ptr]\n"		\
399989b5db2SPeter Zijlstra 		     _ASM_EXTABLE_UA(1b, %l[label])			\
400989b5db2SPeter Zijlstra 		     : CC_OUT(z) (success),				\
401989b5db2SPeter Zijlstra 		       "+A" (__old),					\
402989b5db2SPeter Zijlstra 		       [ptr] "+m" (*_ptr)				\
403989b5db2SPeter Zijlstra 		     : "b" ((u32)__new),				\
404989b5db2SPeter Zijlstra 		       "c" ((u32)((u64)__new >> 32))			\
405989b5db2SPeter Zijlstra 		     : "memory"						\
406989b5db2SPeter Zijlstra 		     : label);						\
407989b5db2SPeter Zijlstra 	if (unlikely(!success))						\
408989b5db2SPeter Zijlstra 		*_old = __old;						\
409989b5db2SPeter Zijlstra 	likely(success);					})
410989b5db2SPeter Zijlstra #endif // CONFIG_X86_32
411989b5db2SPeter Zijlstra #else  // !CONFIG_CC_HAS_ASM_GOTO_TIED_OUTPUT
412989b5db2SPeter Zijlstra #define __try_cmpxchg_user_asm(itype, ltype, _ptr, _pold, _new, label)	({ \
413989b5db2SPeter Zijlstra 	int __err = 0;							\
414989b5db2SPeter Zijlstra 	bool success;							\
415989b5db2SPeter Zijlstra 	__typeof__(_ptr) _old = (__typeof__(_ptr))(_pold);		\
416989b5db2SPeter Zijlstra 	__typeof__(*(_ptr)) __old = *_old;				\
417989b5db2SPeter Zijlstra 	__typeof__(*(_ptr)) __new = (_new);				\
418989b5db2SPeter Zijlstra 	asm volatile("\n"						\
419989b5db2SPeter Zijlstra 		     "1: " LOCK_PREFIX "cmpxchg"itype" %[new], %[ptr]\n"\
420989b5db2SPeter Zijlstra 		     CC_SET(z)						\
421989b5db2SPeter Zijlstra 		     "2:\n"						\
422989b5db2SPeter Zijlstra 		     _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_EFAULT_REG,	\
423989b5db2SPeter Zijlstra 					   %[errout])			\
424989b5db2SPeter Zijlstra 		     : CC_OUT(z) (success),				\
425989b5db2SPeter Zijlstra 		       [errout] "+r" (__err),				\
426989b5db2SPeter Zijlstra 		       [ptr] "+m" (*_ptr),				\
427989b5db2SPeter Zijlstra 		       [old] "+a" (__old)				\
428989b5db2SPeter Zijlstra 		     : [new] ltype (__new)				\
4291df931d9SJan Beulich 		     : "memory");					\
430989b5db2SPeter Zijlstra 	if (unlikely(__err))						\
431989b5db2SPeter Zijlstra 		goto label;						\
432989b5db2SPeter Zijlstra 	if (unlikely(!success))						\
433989b5db2SPeter Zijlstra 		*_old = __old;						\
434989b5db2SPeter Zijlstra 	likely(success);					})
435989b5db2SPeter Zijlstra 
436989b5db2SPeter Zijlstra #ifdef CONFIG_X86_32
437989b5db2SPeter Zijlstra /*
4384630535cSUros Bizjak  * Unlike the normal CMPXCHG, use output GPR for both success/fail and error.
439989b5db2SPeter Zijlstra  * There are only six GPRs available and four (EAX, EBX, ECX, and EDX) are
440989b5db2SPeter Zijlstra  * hardcoded by CMPXCHG8B, leaving only ESI and EDI.  If the compiler uses
441989b5db2SPeter Zijlstra  * both ESI and EDI for the memory operand, compilation will fail if the error
442989b5db2SPeter Zijlstra  * is an input+output as there will be no register available for input.
443989b5db2SPeter Zijlstra  */
444989b5db2SPeter Zijlstra #define __try_cmpxchg64_user_asm(_ptr, _pold, _new, label)	({	\
445989b5db2SPeter Zijlstra 	int __result;							\
446989b5db2SPeter Zijlstra 	__typeof__(_ptr) _old = (__typeof__(_ptr))(_pold);		\
447989b5db2SPeter Zijlstra 	__typeof__(*(_ptr)) __old = *_old;				\
448989b5db2SPeter Zijlstra 	__typeof__(*(_ptr)) __new = (_new);				\
449989b5db2SPeter Zijlstra 	asm volatile("\n"						\
450989b5db2SPeter Zijlstra 		     "1: " LOCK_PREFIX "cmpxchg8b %[ptr]\n"		\
4514630535cSUros Bizjak 		     "mov $0, %[result]\n\t"				\
4524630535cSUros Bizjak 		     "setz %b[result]\n"				\
453989b5db2SPeter Zijlstra 		     "2:\n"						\
4544630535cSUros Bizjak 		     _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_EFAULT_REG,	\
4554630535cSUros Bizjak 					   %[result])			\
4564630535cSUros Bizjak 		     : [result] "=q" (__result),			\
457989b5db2SPeter Zijlstra 		       "+A" (__old),					\
458989b5db2SPeter Zijlstra 		       [ptr] "+m" (*_ptr)				\
459989b5db2SPeter Zijlstra 		     : "b" ((u32)__new),				\
460989b5db2SPeter Zijlstra 		       "c" ((u32)((u64)__new >> 32))			\
461989b5db2SPeter Zijlstra 		     : "memory", "cc");					\
462989b5db2SPeter Zijlstra 	if (unlikely(__result < 0))					\
463989b5db2SPeter Zijlstra 		goto label;						\
464989b5db2SPeter Zijlstra 	if (unlikely(!__result))					\
465989b5db2SPeter Zijlstra 		*_old = __old;						\
466989b5db2SPeter Zijlstra 	likely(__result);					})
467989b5db2SPeter Zijlstra #endif // CONFIG_X86_32
468989b5db2SPeter Zijlstra #endif // CONFIG_CC_HAS_ASM_GOTO_TIED_OUTPUT
469989b5db2SPeter Zijlstra 
470bb898558SAl Viro /* FIXME: this hack is definitely wrong -AK */
471bb898558SAl Viro struct __large_struct { unsigned long buf[100]; };
472bb898558SAl Viro #define __m(x) (*(struct __large_struct __user *)(x))
473bb898558SAl Viro 
474bb898558SAl Viro /*
475bb898558SAl Viro  * Tell gcc we read from memory instead of writing: this is because
476bb898558SAl Viro  * we do not write to any memory gcc knows about, so there are no
477bb898558SAl Viro  * aliasing issues.
478bb898558SAl Viro  */
47936807856SLinus Torvalds #define __put_user_goto(x, addr, itype, ltype, label)			\
4804356e9f8SLinus Torvalds 	asm goto("\n"							\
48136807856SLinus Torvalds 		"1:	mov"itype" %0,%1\n"				\
4824a789213SLinus Torvalds 		_ASM_EXTABLE_UA(1b, %l2)				\
4834a789213SLinus Torvalds 		: : ltype(x), "m" (__m(addr))				\
4844a789213SLinus Torvalds 		: : label)
4854a789213SLinus Torvalds 
4861ac2e6caSRobert Richter extern unsigned long
4871ac2e6caSRobert Richter copy_from_user_nmi(void *to, const void __user *from, unsigned long n);
48892ae03f2SLinus Torvalds extern __must_check long
48992ae03f2SLinus Torvalds strncpy_from_user(char *dst, const char __user *src, long count);
4901ac2e6caSRobert Richter 
4915723aa99SLinus Torvalds extern __must_check long strnlen_user(const char __user *str, long n);
4925723aa99SLinus Torvalds 
493ec6347bbSDan Williams #ifdef CONFIG_ARCH_HAS_COPY_MC
494ec6347bbSDan Williams unsigned long __must_check
495ec6347bbSDan Williams copy_mc_to_kernel(void *to, const void *from, unsigned len);
496ec6347bbSDan Williams #define copy_mc_to_kernel copy_mc_to_kernel
497ec6347bbSDan Williams 
498ec6347bbSDan Williams unsigned long __must_check
499066baf92SDavid Howells copy_mc_to_user(void __user *to, const void *from, unsigned len);
500ec6347bbSDan Williams #endif
501ec6347bbSDan Williams 
502fe40c0afSHiroshi Shimamoto /*
503bb898558SAl Viro  * movsl can be slow when source and dest are not both 8-byte aligned
504bb898558SAl Viro  */
505bb898558SAl Viro #ifdef CONFIG_X86_INTEL_USERCOPY
506bb898558SAl Viro extern struct movsl_mask {
507bb898558SAl Viro 	int mask;
508bb898558SAl Viro } ____cacheline_aligned_in_smp movsl_mask;
509bb898558SAl Viro #endif
510bb898558SAl Viro 
511bb898558SAl Viro #define ARCH_HAS_NOCACHE_UACCESS 1
512bb898558SAl Viro 
51310013ebbSAndi Kleen /*
5145b24a7a2SLinus Torvalds  * The "unsafe" user accesses aren't really "unsafe", but the naming
5155b24a7a2SLinus Torvalds  * is a big fat warning: you have to not only do the access_ok()
5165b24a7a2SLinus Torvalds  * checking before using them, but you have to surround them with the
5175b24a7a2SLinus Torvalds  * user_access_begin/end() pair.
5185b24a7a2SLinus Torvalds  */
519b7f89bfeSPeter Zijlstra static __must_check __always_inline bool user_access_begin(const void __user *ptr, size_t len)
520594cc251SLinus Torvalds {
521594cc251SLinus Torvalds 	if (unlikely(!access_ok(ptr,len)))
522594cc251SLinus Torvalds 		return 0;
5236e693b3fSWill Deacon 	__uaccess_begin_nospec();
524594cc251SLinus Torvalds 	return 1;
525594cc251SLinus Torvalds }
526594cc251SLinus Torvalds #define user_access_begin(a,b)	user_access_begin(a,b)
5275b24a7a2SLinus Torvalds #define user_access_end()	__uaccess_end()
5285b24a7a2SLinus Torvalds 
529e74deb11SPeter Zijlstra #define user_access_save()	smap_save()
530e74deb11SPeter Zijlstra #define user_access_restore(x)	smap_restore(x)
531e74deb11SPeter Zijlstra 
532a959dc88SLinus Torvalds #define unsafe_put_user(x, ptr, label)	\
533a959dc88SLinus Torvalds 	__put_user_size((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), label)
5345b24a7a2SLinus Torvalds 
535865c50e1SNick Desaulniers #ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
536865c50e1SNick Desaulniers #define unsafe_get_user(x, ptr, err_label)					\
537865c50e1SNick Desaulniers do {										\
538865c50e1SNick Desaulniers 	__inttype(*(ptr)) __gu_val;						\
539865c50e1SNick Desaulniers 	__get_user_size(__gu_val, (ptr), sizeof(*(ptr)), err_label);		\
540865c50e1SNick Desaulniers 	(x) = (__force __typeof__(*(ptr)))__gu_val;				\
541865c50e1SNick Desaulniers } while (0)
542865c50e1SNick Desaulniers #else // !CONFIG_CC_HAS_ASM_GOTO_OUTPUT
5431bd4403dSLinus Torvalds #define unsafe_get_user(x, ptr, err_label)					\
5441bd4403dSLinus Torvalds do {										\
5455b24a7a2SLinus Torvalds 	int __gu_err;								\
546334a023eSLinus Torvalds 	__inttype(*(ptr)) __gu_val;						\
5471a323ea5SLinus Torvalds 	__get_user_size(__gu_val, (ptr), sizeof(*(ptr)), __gu_err);		\
5485b24a7a2SLinus Torvalds 	(x) = (__force __typeof__(*(ptr)))__gu_val;				\
5491bd4403dSLinus Torvalds 	if (unlikely(__gu_err)) goto err_label;					\
5501bd4403dSLinus Torvalds } while (0)
551865c50e1SNick Desaulniers #endif // CONFIG_CC_HAS_ASM_GOTO_OUTPUT
5525b24a7a2SLinus Torvalds 
553989b5db2SPeter Zijlstra extern void __try_cmpxchg_user_wrong_size(void);
554989b5db2SPeter Zijlstra 
555989b5db2SPeter Zijlstra #ifndef CONFIG_X86_32
556989b5db2SPeter Zijlstra #define __try_cmpxchg64_user_asm(_ptr, _oldp, _nval, _label)		\
557989b5db2SPeter Zijlstra 	__try_cmpxchg_user_asm("q", "r", (_ptr), (_oldp), (_nval), _label)
558989b5db2SPeter Zijlstra #endif
559989b5db2SPeter Zijlstra 
560989b5db2SPeter Zijlstra /*
561989b5db2SPeter Zijlstra  * Force the pointer to u<size> to match the size expected by the asm helper.
562989b5db2SPeter Zijlstra  * clang/LLVM compiles all cases and only discards the unused paths after
563989b5db2SPeter Zijlstra  * processing errors, which breaks i386 if the pointer is an 8-byte value.
564989b5db2SPeter Zijlstra  */
565989b5db2SPeter Zijlstra #define unsafe_try_cmpxchg_user(_ptr, _oldp, _nval, _label) ({			\
566989b5db2SPeter Zijlstra 	bool __ret;								\
567989b5db2SPeter Zijlstra 	__chk_user_ptr(_ptr);							\
568989b5db2SPeter Zijlstra 	switch (sizeof(*(_ptr))) {						\
569989b5db2SPeter Zijlstra 	case 1:	__ret = __try_cmpxchg_user_asm("b", "q",			\
570989b5db2SPeter Zijlstra 					       (__force u8 *)(_ptr), (_oldp),	\
571989b5db2SPeter Zijlstra 					       (_nval), _label);		\
572989b5db2SPeter Zijlstra 		break;								\
573989b5db2SPeter Zijlstra 	case 2:	__ret = __try_cmpxchg_user_asm("w", "r",			\
574989b5db2SPeter Zijlstra 					       (__force u16 *)(_ptr), (_oldp),	\
575989b5db2SPeter Zijlstra 					       (_nval), _label);		\
576989b5db2SPeter Zijlstra 		break;								\
577989b5db2SPeter Zijlstra 	case 4:	__ret = __try_cmpxchg_user_asm("l", "r",			\
578989b5db2SPeter Zijlstra 					       (__force u32 *)(_ptr), (_oldp),	\
579989b5db2SPeter Zijlstra 					       (_nval), _label);		\
580989b5db2SPeter Zijlstra 		break;								\
581989b5db2SPeter Zijlstra 	case 8:	__ret = __try_cmpxchg64_user_asm((__force u64 *)(_ptr), (_oldp),\
582989b5db2SPeter Zijlstra 						 (_nval), _label);		\
583989b5db2SPeter Zijlstra 		break;								\
584989b5db2SPeter Zijlstra 	default: __try_cmpxchg_user_wrong_size();				\
585989b5db2SPeter Zijlstra 	}									\
586989b5db2SPeter Zijlstra 	__ret;						})
587989b5db2SPeter Zijlstra 
588989b5db2SPeter Zijlstra /* "Returns" 0 on success, 1 on failure, -EFAULT if the access faults. */
589989b5db2SPeter Zijlstra #define __try_cmpxchg_user(_ptr, _oldp, _nval, _label)	({		\
590989b5db2SPeter Zijlstra 	int __ret = -EFAULT;						\
591989b5db2SPeter Zijlstra 	__uaccess_begin_nospec();					\
592989b5db2SPeter Zijlstra 	__ret = !unsafe_try_cmpxchg_user(_ptr, _oldp, _nval, _label);	\
593989b5db2SPeter Zijlstra _label:									\
594989b5db2SPeter Zijlstra 	__uaccess_end();						\
595989b5db2SPeter Zijlstra 	__ret;								\
596989b5db2SPeter Zijlstra 							})
597989b5db2SPeter Zijlstra 
598c512c691SLinus Torvalds /*
599c512c691SLinus Torvalds  * We want the unsafe accessors to always be inlined and use
600c512c691SLinus Torvalds  * the error labels - thus the macro games.
601c512c691SLinus Torvalds  */
602c512c691SLinus Torvalds #define unsafe_copy_loop(dst, src, len, type, label)				\
603c512c691SLinus Torvalds 	while (len >= sizeof(type)) {						\
6043beff76bSLinus Torvalds 		unsafe_put_user(*(type *)(src),(type __user *)(dst),label);	\
605c512c691SLinus Torvalds 		dst += sizeof(type);						\
606c512c691SLinus Torvalds 		src += sizeof(type);						\
607c512c691SLinus Torvalds 		len -= sizeof(type);						\
608c512c691SLinus Torvalds 	}
609c512c691SLinus Torvalds 
610c512c691SLinus Torvalds #define unsafe_copy_to_user(_dst,_src,_len,label)			\
611c512c691SLinus Torvalds do {									\
612c512c691SLinus Torvalds 	char __user *__ucu_dst = (_dst);				\
613c512c691SLinus Torvalds 	const char *__ucu_src = (_src);					\
614c512c691SLinus Torvalds 	size_t __ucu_len = (_len);					\
615c512c691SLinus Torvalds 	unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u64, label);	\
616c512c691SLinus Torvalds 	unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u32, label);	\
617c512c691SLinus Torvalds 	unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u16, label);	\
618c512c691SLinus Torvalds 	unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u8, label);	\
619c512c691SLinus Torvalds } while (0)
620c512c691SLinus Torvalds 
621865c50e1SNick Desaulniers #ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
622865c50e1SNick Desaulniers #define __get_kernel_nofault(dst, src, type, err_label)			\
623865c50e1SNick Desaulniers 	__get_user_size(*((type *)(dst)), (__force type __user *)(src),	\
624865c50e1SNick Desaulniers 			sizeof(type), err_label)
625865c50e1SNick Desaulniers #else // !CONFIG_CC_HAS_ASM_GOTO_OUTPUT
626fa94111dSChristoph Hellwig #define __get_kernel_nofault(dst, src, type, err_label)			\
627fa94111dSChristoph Hellwig do {									\
628fa94111dSChristoph Hellwig 	int __kr_err;							\
629fa94111dSChristoph Hellwig 									\
6303beff76bSLinus Torvalds 	__get_user_size(*((type *)(dst)), (__force type __user *)(src),	\
631fa94111dSChristoph Hellwig 			sizeof(type), __kr_err);			\
632fa94111dSChristoph Hellwig 	if (unlikely(__kr_err))						\
633fa94111dSChristoph Hellwig 		goto err_label;						\
634fa94111dSChristoph Hellwig } while (0)
635865c50e1SNick Desaulniers #endif // CONFIG_CC_HAS_ASM_GOTO_OUTPUT
636fa94111dSChristoph Hellwig 
637fa94111dSChristoph Hellwig #define __put_kernel_nofault(dst, src, type, err_label)			\
638fa94111dSChristoph Hellwig 	__put_user_size(*((type *)(src)), (__force type __user *)(dst),	\
639fa94111dSChristoph Hellwig 			sizeof(type), err_label)
640fa94111dSChristoph Hellwig 
6411965aae3SH. Peter Anvin #endif /* _ASM_X86_UACCESS_H */
642bb898558SAl Viro 
643