xref: /linux/arch/x86/include/asm/uaccess.h (revision 594cc251fdd0d231d342d88b2fdff4bc42fb0690)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
21965aae3SH. Peter Anvin #ifndef _ASM_X86_UACCESS_H
31965aae3SH. Peter Anvin #define _ASM_X86_UACCESS_H
4bb898558SAl Viro /*
5bb898558SAl Viro  * User space memory access functions
6bb898558SAl Viro  */
7bb898558SAl Viro #include <linux/compiler.h>
81771c6e1SAndrey Ryabinin #include <linux/kasan-checks.h>
9bb898558SAl Viro #include <linux/string.h>
10bb898558SAl Viro #include <asm/asm.h>
11bb898558SAl Viro #include <asm/page.h>
1263bcff2aSH. Peter Anvin #include <asm/smap.h>
1345caf470SAl Viro #include <asm/extable.h>
14bb898558SAl Viro 
15bb898558SAl Viro /*
16bb898558SAl Viro  * The fs value determines whether argument validity checking should be
17bb898558SAl Viro  * performed or not.  If get_fs() == USER_DS, checking is performed, with
18bb898558SAl Viro  * get_fs() == KERNEL_DS, checking is bypassed.
19bb898558SAl Viro  *
20bb898558SAl Viro  * For historical reasons, these macros are grossly misnamed.
21bb898558SAl Viro  */
22bb898558SAl Viro 
23bb898558SAl Viro #define MAKE_MM_SEG(s)	((mm_segment_t) { (s) })
24bb898558SAl Viro 
25bb898558SAl Viro #define KERNEL_DS	MAKE_MM_SEG(-1UL)
269063c61fSLinus Torvalds #define USER_DS 	MAKE_MM_SEG(TASK_SIZE_MAX)
27bb898558SAl Viro 
28bb898558SAl Viro #define get_ds()	(KERNEL_DS)
2913d4ea09SAndy Lutomirski #define get_fs()	(current->thread.addr_limit)
305ea0727bSThomas Garnier static inline void set_fs(mm_segment_t fs)
315ea0727bSThomas Garnier {
325ea0727bSThomas Garnier 	current->thread.addr_limit = fs;
335ea0727bSThomas Garnier 	/* On user-mode return, check fs is correct */
345ea0727bSThomas Garnier 	set_thread_flag(TIF_FSCHECK);
355ea0727bSThomas Garnier }
36bb898558SAl Viro 
37bb898558SAl Viro #define segment_eq(a, b)	((a).seg == (b).seg)
38bb898558SAl Viro 
3913d4ea09SAndy Lutomirski #define user_addr_max() (current->thread.addr_limit.seg)
40bb898558SAl Viro #define __addr_ok(addr) 	\
41bc6ca7b3SArun Sharma 	((unsigned long __force)(addr) < user_addr_max())
42bb898558SAl Viro 
43bb898558SAl Viro /*
44bb898558SAl Viro  * Test whether a block of memory is a valid user space address.
45bb898558SAl Viro  * Returns 0 if the range is valid, nonzero otherwise.
46bb898558SAl Viro  */
47a740576aSH. Peter Anvin static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, unsigned long limit)
48c5fe5d80SLinus Torvalds {
49c5fe5d80SLinus Torvalds 	/*
50c5fe5d80SLinus Torvalds 	 * If we have used "sizeof()" for the size,
51c5fe5d80SLinus Torvalds 	 * we know it won't overflow the limit (but
52c5fe5d80SLinus Torvalds 	 * it might overflow the 'addr', so it's
53c5fe5d80SLinus Torvalds 	 * important to subtract the size from the
54c5fe5d80SLinus Torvalds 	 * limit, not add it to the address).
55c5fe5d80SLinus Torvalds 	 */
56c5fe5d80SLinus Torvalds 	if (__builtin_constant_p(size))
577e0f51cbSAndy Lutomirski 		return unlikely(addr > limit - size);
58c5fe5d80SLinus Torvalds 
59c5fe5d80SLinus Torvalds 	/* Arbitrary sizes? Be careful about overflow */
60c5fe5d80SLinus Torvalds 	addr += size;
617e0f51cbSAndy Lutomirski 	if (unlikely(addr < size))
62a740576aSH. Peter Anvin 		return true;
637e0f51cbSAndy Lutomirski 	return unlikely(addr > limit);
64c5fe5d80SLinus Torvalds }
65bb898558SAl Viro 
66bc6ca7b3SArun Sharma #define __range_not_ok(addr, size, limit)				\
67bb898558SAl Viro ({									\
68bb898558SAl Viro 	__chk_user_ptr(addr);						\
69c5fe5d80SLinus Torvalds 	__chk_range_not_ok((unsigned long __force)(addr), size, limit); \
70bb898558SAl Viro })
71bb898558SAl Viro 
727c478895SPeter Zijlstra #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
737c478895SPeter Zijlstra # define WARN_ON_IN_IRQ()	WARN_ON_ONCE(!in_task())
747c478895SPeter Zijlstra #else
757c478895SPeter Zijlstra # define WARN_ON_IN_IRQ()
767c478895SPeter Zijlstra #endif
777c478895SPeter Zijlstra 
78bb898558SAl Viro /**
79bb898558SAl Viro  * access_ok: - Checks if a user space pointer is valid
80bb898558SAl Viro  * @addr: User space pointer to start of block to check
81bb898558SAl Viro  * @size: Size of block to check
82bb898558SAl Viro  *
83b3c395efSDavid Hildenbrand  * Context: User context only. This function may sleep if pagefaults are
84b3c395efSDavid Hildenbrand  *          enabled.
85bb898558SAl Viro  *
86bb898558SAl Viro  * Checks if a pointer to a block of memory in user space is valid.
87bb898558SAl Viro  *
88bb898558SAl Viro  * Returns true (nonzero) if the memory block may be valid, false (zero)
89bb898558SAl Viro  * if it is definitely invalid.
90bb898558SAl Viro  *
91bb898558SAl Viro  * Note that, depending on architecture, this function probably just
92bb898558SAl Viro  * checks that the pointer is in the user space range - after calling
93bb898558SAl Viro  * this function, memory access functions may still return -EFAULT.
94bb898558SAl Viro  */
9596d4f267SLinus Torvalds #define access_ok(addr, size)					\
967c478895SPeter Zijlstra ({									\
977c478895SPeter Zijlstra 	WARN_ON_IN_IRQ();						\
987c478895SPeter Zijlstra 	likely(!__range_not_ok(addr, size, user_addr_max()));		\
997c478895SPeter Zijlstra })
100bb898558SAl Viro 
101bb898558SAl Viro /*
102bb898558SAl Viro  * These are the main single-value transfer routines.  They automatically
103bb898558SAl Viro  * use the right size if we just have the right pointer type.
104bb898558SAl Viro  *
105bb898558SAl Viro  * This gets kind of ugly. We want to return _two_ values in "get_user()"
106bb898558SAl Viro  * and yet we don't want to do any pointers, because that is too much
107bb898558SAl Viro  * of a performance impact. Thus we have a few rather ugly macros here,
108bb898558SAl Viro  * and hide all the ugliness from the user.
109bb898558SAl Viro  *
110bb898558SAl Viro  * The "__xxx" versions of the user access functions are versions that
111bb898558SAl Viro  * do not verify the address space, that must have been done previously
112bb898558SAl Viro  * with a separate "access_ok()" call (this is used when we do multiple
113bb898558SAl Viro  * accesses to the same area of user memory).
114bb898558SAl Viro  */
115bb898558SAl Viro 
116bb898558SAl Viro extern int __get_user_1(void);
117bb898558SAl Viro extern int __get_user_2(void);
118bb898558SAl Viro extern int __get_user_4(void);
119bb898558SAl Viro extern int __get_user_8(void);
120bb898558SAl Viro extern int __get_user_bad(void);
121bb898558SAl Viro 
12211f1a4b9SLinus Torvalds #define __uaccess_begin() stac()
12311f1a4b9SLinus Torvalds #define __uaccess_end()   clac()
124b3bbfb3fSDan Williams #define __uaccess_begin_nospec()	\
125b3bbfb3fSDan Williams ({					\
126b3bbfb3fSDan Williams 	stac();				\
127b3bbfb3fSDan Williams 	barrier_nospec();		\
128b3bbfb3fSDan Williams })
12911f1a4b9SLinus Torvalds 
1303578baaeSH. Peter Anvin /*
1313578baaeSH. Peter Anvin  * This is a type: either unsigned long, if the argument fits into
1323578baaeSH. Peter Anvin  * that type, or otherwise unsigned long long.
1333578baaeSH. Peter Anvin  */
1343578baaeSH. Peter Anvin #define __inttype(x) \
1353578baaeSH. Peter Anvin __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
136bb898558SAl Viro 
137bb898558SAl Viro /**
138bb898558SAl Viro  * get_user: - Get a simple variable from user space.
139bb898558SAl Viro  * @x:   Variable to store result.
140bb898558SAl Viro  * @ptr: Source address, in user space.
141bb898558SAl Viro  *
142b3c395efSDavid Hildenbrand  * Context: User context only. This function may sleep if pagefaults are
143b3c395efSDavid Hildenbrand  *          enabled.
144bb898558SAl Viro  *
145bb898558SAl Viro  * This macro copies a single simple variable from user space to kernel
146bb898558SAl Viro  * space.  It supports simple types like char and int, but not larger
147bb898558SAl Viro  * data types like structures or arrays.
148bb898558SAl Viro  *
149bb898558SAl Viro  * @ptr must have pointer-to-simple-variable type, and the result of
150bb898558SAl Viro  * dereferencing @ptr must be assignable to @x without a cast.
151bb898558SAl Viro  *
152bb898558SAl Viro  * Returns zero on success, or -EFAULT on error.
153bb898558SAl Viro  * On error, the variable @x is set to zero.
154ff52c3b0SH. Peter Anvin  */
155ff52c3b0SH. Peter Anvin /*
1563578baaeSH. Peter Anvin  * Careful: we have to cast the result to the type of the pointer
1573578baaeSH. Peter Anvin  * for sign reasons.
158ff52c3b0SH. Peter Anvin  *
159f69fa9a9SH. Peter Anvin  * The use of _ASM_DX as the register specifier is a bit of a
160ff52c3b0SH. Peter Anvin  * simplification, as gcc only cares about it as the starting point
161ff52c3b0SH. Peter Anvin  * and not size: for a 64-bit value it will use %ecx:%edx on 32 bits
162ff52c3b0SH. Peter Anvin  * (%ecx being the next register in gcc's x86 register sequence), and
163ff52c3b0SH. Peter Anvin  * %rdx on 64 bits.
164f69fa9a9SH. Peter Anvin  *
165f69fa9a9SH. Peter Anvin  * Clang/LLVM cares about the size of the register, but still wants
166f69fa9a9SH. Peter Anvin  * the base register for something that ends up being a pair.
167bb898558SAl Viro  */
168bb898558SAl Viro #define get_user(x, ptr)						\
169bb898558SAl Viro ({									\
170bb898558SAl Viro 	int __ret_gu;							\
171bdfc017eSJan-Simon Möller 	register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX);		\
172bb898558SAl Viro 	__chk_user_ptr(ptr);						\
173d1a76187SIngo Molnar 	might_fault();							\
174f05058c4SChris J Arges 	asm volatile("call __get_user_%P4"				\
175f5caf621SJosh Poimboeuf 		     : "=a" (__ret_gu), "=r" (__val_gu),		\
176f5caf621SJosh Poimboeuf 			ASM_CALL_CONSTRAINT				\
1773578baaeSH. Peter Anvin 		     : "0" (ptr), "i" (sizeof(*(ptr))));		\
178e182c570SMichael S. Tsirkin 	(x) = (__force __typeof__(*(ptr))) __val_gu;			\
179a76cf66eSAndy Lutomirski 	__builtin_expect(__ret_gu, 0);					\
180bb898558SAl Viro })
181bb898558SAl Viro 
182bb898558SAl Viro #define __put_user_x(size, x, ptr, __ret_pu)			\
183bb898558SAl Viro 	asm volatile("call __put_user_" #size : "=a" (__ret_pu)	\
184bb898558SAl Viro 		     : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
185bb898558SAl Viro 
186bb898558SAl Viro 
187bb898558SAl Viro 
188bb898558SAl Viro #ifdef CONFIG_X86_32
18918114f61SHiroshi Shimamoto #define __put_user_asm_u64(x, addr, err, errret)			\
19011f1a4b9SLinus Torvalds 	asm volatile("\n"						\
19163bcff2aSH. Peter Anvin 		     "1:	movl %%eax,0(%2)\n"			\
192bb898558SAl Viro 		     "2:	movl %%edx,4(%2)\n"			\
19311f1a4b9SLinus Torvalds 		     "3:"						\
194bb898558SAl Viro 		     ".section .fixup,\"ax\"\n"				\
195bb898558SAl Viro 		     "4:	movl %3,%0\n"				\
196bb898558SAl Viro 		     "	jmp 3b\n"					\
197bb898558SAl Viro 		     ".previous\n"					\
19875045f77SJann Horn 		     _ASM_EXTABLE_UA(1b, 4b)				\
19975045f77SJann Horn 		     _ASM_EXTABLE_UA(2b, 4b)				\
200bb898558SAl Viro 		     : "=r" (err)					\
20118114f61SHiroshi Shimamoto 		     : "A" (x), "r" (addr), "i" (errret), "0" (err))
202bb898558SAl Viro 
203fe40c0afSHiroshi Shimamoto #define __put_user_asm_ex_u64(x, addr)					\
20411f1a4b9SLinus Torvalds 	asm volatile("\n"						\
20563bcff2aSH. Peter Anvin 		     "1:	movl %%eax,0(%1)\n"			\
206fe40c0afSHiroshi Shimamoto 		     "2:	movl %%edx,4(%1)\n"			\
20711f1a4b9SLinus Torvalds 		     "3:"						\
208535c0c34SH. Peter Anvin 		     _ASM_EXTABLE_EX(1b, 2b)				\
209535c0c34SH. Peter Anvin 		     _ASM_EXTABLE_EX(2b, 3b)				\
210fe40c0afSHiroshi Shimamoto 		     : : "A" (x), "r" (addr))
211fe40c0afSHiroshi Shimamoto 
212bb898558SAl Viro #define __put_user_x8(x, ptr, __ret_pu)				\
213bb898558SAl Viro 	asm volatile("call __put_user_8" : "=a" (__ret_pu)	\
214bb898558SAl Viro 		     : "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
215bb898558SAl Viro #else
21618114f61SHiroshi Shimamoto #define __put_user_asm_u64(x, ptr, retval, errret) \
217ebe119cdSH. Peter Anvin 	__put_user_asm(x, ptr, retval, "q", "", "er", errret)
218fe40c0afSHiroshi Shimamoto #define __put_user_asm_ex_u64(x, addr)	\
219ebe119cdSH. Peter Anvin 	__put_user_asm_ex(x, addr, "q", "", "er")
220bb898558SAl Viro #define __put_user_x8(x, ptr, __ret_pu) __put_user_x(8, x, ptr, __ret_pu)
221bb898558SAl Viro #endif
222bb898558SAl Viro 
223bb898558SAl Viro extern void __put_user_bad(void);
224bb898558SAl Viro 
225bb898558SAl Viro /*
226bb898558SAl Viro  * Strange magic calling convention: pointer in %ecx,
227bb898558SAl Viro  * value in %eax(:%edx), return value in %eax. clobbers %rbx
228bb898558SAl Viro  */
229bb898558SAl Viro extern void __put_user_1(void);
230bb898558SAl Viro extern void __put_user_2(void);
231bb898558SAl Viro extern void __put_user_4(void);
232bb898558SAl Viro extern void __put_user_8(void);
233bb898558SAl Viro 
234bb898558SAl Viro /**
235bb898558SAl Viro  * put_user: - Write a simple value into user space.
236bb898558SAl Viro  * @x:   Value to copy to user space.
237bb898558SAl Viro  * @ptr: Destination address, in user space.
238bb898558SAl Viro  *
239b3c395efSDavid Hildenbrand  * Context: User context only. This function may sleep if pagefaults are
240b3c395efSDavid Hildenbrand  *          enabled.
241bb898558SAl Viro  *
242bb898558SAl Viro  * This macro copies a single simple value from kernel space to user
243bb898558SAl Viro  * space.  It supports simple types like char and int, but not larger
244bb898558SAl Viro  * data types like structures or arrays.
245bb898558SAl Viro  *
246bb898558SAl Viro  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
247bb898558SAl Viro  * to the result of dereferencing @ptr.
248bb898558SAl Viro  *
249bb898558SAl Viro  * Returns zero on success, or -EFAULT on error.
250bb898558SAl Viro  */
251bb898558SAl Viro #define put_user(x, ptr)					\
252bb898558SAl Viro ({								\
253bb898558SAl Viro 	int __ret_pu;						\
254bb898558SAl Viro 	__typeof__(*(ptr)) __pu_val;				\
255bb898558SAl Viro 	__chk_user_ptr(ptr);					\
256d1a76187SIngo Molnar 	might_fault();						\
257bb898558SAl Viro 	__pu_val = x;						\
258bb898558SAl Viro 	switch (sizeof(*(ptr))) {				\
259bb898558SAl Viro 	case 1:							\
260bb898558SAl Viro 		__put_user_x(1, __pu_val, ptr, __ret_pu);	\
261bb898558SAl Viro 		break;						\
262bb898558SAl Viro 	case 2:							\
263bb898558SAl Viro 		__put_user_x(2, __pu_val, ptr, __ret_pu);	\
264bb898558SAl Viro 		break;						\
265bb898558SAl Viro 	case 4:							\
266bb898558SAl Viro 		__put_user_x(4, __pu_val, ptr, __ret_pu);	\
267bb898558SAl Viro 		break;						\
268bb898558SAl Viro 	case 8:							\
269bb898558SAl Viro 		__put_user_x8(__pu_val, ptr, __ret_pu);		\
270bb898558SAl Viro 		break;						\
271bb898558SAl Viro 	default:						\
272bb898558SAl Viro 		__put_user_x(X, __pu_val, ptr, __ret_pu);	\
273bb898558SAl Viro 		break;						\
274bb898558SAl Viro 	}							\
275a76cf66eSAndy Lutomirski 	__builtin_expect(__ret_pu, 0);				\
276bb898558SAl Viro })
277bb898558SAl Viro 
278bb898558SAl Viro #define __put_user_size(x, ptr, size, retval, errret)			\
279bb898558SAl Viro do {									\
280bb898558SAl Viro 	retval = 0;							\
281bb898558SAl Viro 	__chk_user_ptr(ptr);						\
282bb898558SAl Viro 	switch (size) {							\
283bb898558SAl Viro 	case 1:								\
284bb898558SAl Viro 		__put_user_asm(x, ptr, retval, "b", "b", "iq", errret);	\
285bb898558SAl Viro 		break;							\
286bb898558SAl Viro 	case 2:								\
287bb898558SAl Viro 		__put_user_asm(x, ptr, retval, "w", "w", "ir", errret);	\
288bb898558SAl Viro 		break;							\
289bb898558SAl Viro 	case 4:								\
290bb898558SAl Viro 		__put_user_asm(x, ptr, retval, "l", "k", "ir", errret);	\
291bb898558SAl Viro 		break;							\
292bb898558SAl Viro 	case 8:								\
29318114f61SHiroshi Shimamoto 		__put_user_asm_u64((__typeof__(*ptr))(x), ptr, retval,	\
29418114f61SHiroshi Shimamoto 				   errret);				\
295bb898558SAl Viro 		break;							\
296bb898558SAl Viro 	default:							\
297bb898558SAl Viro 		__put_user_bad();					\
298bb898558SAl Viro 	}								\
299bb898558SAl Viro } while (0)
300bb898558SAl Viro 
30111f1a4b9SLinus Torvalds /*
30211f1a4b9SLinus Torvalds  * This doesn't do __uaccess_begin/end - the exception handling
30311f1a4b9SLinus Torvalds  * around it must do that.
30411f1a4b9SLinus Torvalds  */
305fe40c0afSHiroshi Shimamoto #define __put_user_size_ex(x, ptr, size)				\
306fe40c0afSHiroshi Shimamoto do {									\
307fe40c0afSHiroshi Shimamoto 	__chk_user_ptr(ptr);						\
308fe40c0afSHiroshi Shimamoto 	switch (size) {							\
309fe40c0afSHiroshi Shimamoto 	case 1:								\
310fe40c0afSHiroshi Shimamoto 		__put_user_asm_ex(x, ptr, "b", "b", "iq");		\
311fe40c0afSHiroshi Shimamoto 		break;							\
312fe40c0afSHiroshi Shimamoto 	case 2:								\
313fe40c0afSHiroshi Shimamoto 		__put_user_asm_ex(x, ptr, "w", "w", "ir");		\
314fe40c0afSHiroshi Shimamoto 		break;							\
315fe40c0afSHiroshi Shimamoto 	case 4:								\
316fe40c0afSHiroshi Shimamoto 		__put_user_asm_ex(x, ptr, "l", "k", "ir");		\
317fe40c0afSHiroshi Shimamoto 		break;							\
318fe40c0afSHiroshi Shimamoto 	case 8:								\
319fe40c0afSHiroshi Shimamoto 		__put_user_asm_ex_u64((__typeof__(*ptr))(x), ptr);	\
320fe40c0afSHiroshi Shimamoto 		break;							\
321fe40c0afSHiroshi Shimamoto 	default:							\
322fe40c0afSHiroshi Shimamoto 		__put_user_bad();					\
323fe40c0afSHiroshi Shimamoto 	}								\
324fe40c0afSHiroshi Shimamoto } while (0)
325fe40c0afSHiroshi Shimamoto 
326bb898558SAl Viro #ifdef CONFIG_X86_32
327b2f68038SBenjamin LaHaise #define __get_user_asm_u64(x, ptr, retval, errret)			\
328b2f68038SBenjamin LaHaise ({									\
329b2f68038SBenjamin LaHaise 	__typeof__(ptr) __ptr = (ptr);					\
33033c9e972SLinus Torvalds 	asm volatile("\n"					\
331b2f68038SBenjamin LaHaise 		     "1:	movl %2,%%eax\n"			\
332b2f68038SBenjamin LaHaise 		     "2:	movl %3,%%edx\n"			\
33333c9e972SLinus Torvalds 		     "3:\n"				\
334b2f68038SBenjamin LaHaise 		     ".section .fixup,\"ax\"\n"				\
335b2f68038SBenjamin LaHaise 		     "4:	mov %4,%0\n"				\
336b2f68038SBenjamin LaHaise 		     "	xorl %%eax,%%eax\n"				\
337b2f68038SBenjamin LaHaise 		     "	xorl %%edx,%%edx\n"				\
338b2f68038SBenjamin LaHaise 		     "	jmp 3b\n"					\
339b2f68038SBenjamin LaHaise 		     ".previous\n"					\
34075045f77SJann Horn 		     _ASM_EXTABLE_UA(1b, 4b)				\
34175045f77SJann Horn 		     _ASM_EXTABLE_UA(2b, 4b)				\
34233c9e972SLinus Torvalds 		     : "=r" (retval), "=&A"(x)				\
3435ac751d9SVille Syrjälä 		     : "m" (__m(__ptr)), "m" __m(((u32 __user *)(__ptr)) + 1),	\
344b2f68038SBenjamin LaHaise 		       "i" (errret), "0" (retval));			\
345b2f68038SBenjamin LaHaise })
346b2f68038SBenjamin LaHaise 
347fe40c0afSHiroshi Shimamoto #define __get_user_asm_ex_u64(x, ptr)			(x) = __get_user_bad()
348bb898558SAl Viro #else
349bb898558SAl Viro #define __get_user_asm_u64(x, ptr, retval, errret) \
350bb898558SAl Viro 	 __get_user_asm(x, ptr, retval, "q", "", "=r", errret)
351fe40c0afSHiroshi Shimamoto #define __get_user_asm_ex_u64(x, ptr) \
352fe40c0afSHiroshi Shimamoto 	 __get_user_asm_ex(x, ptr, "q", "", "=r")
353bb898558SAl Viro #endif
354bb898558SAl Viro 
355bb898558SAl Viro #define __get_user_size(x, ptr, size, retval, errret)			\
356bb898558SAl Viro do {									\
357bb898558SAl Viro 	retval = 0;							\
358bb898558SAl Viro 	__chk_user_ptr(ptr);						\
359bb898558SAl Viro 	switch (size) {							\
360bb898558SAl Viro 	case 1:								\
361bb898558SAl Viro 		__get_user_asm(x, ptr, retval, "b", "b", "=q", errret);	\
362bb898558SAl Viro 		break;							\
363bb898558SAl Viro 	case 2:								\
364bb898558SAl Viro 		__get_user_asm(x, ptr, retval, "w", "w", "=r", errret);	\
365bb898558SAl Viro 		break;							\
366bb898558SAl Viro 	case 4:								\
367bb898558SAl Viro 		__get_user_asm(x, ptr, retval, "l", "k", "=r", errret);	\
368bb898558SAl Viro 		break;							\
369bb898558SAl Viro 	case 8:								\
370bb898558SAl Viro 		__get_user_asm_u64(x, ptr, retval, errret);		\
371bb898558SAl Viro 		break;							\
372bb898558SAl Viro 	default:							\
373bb898558SAl Viro 		(x) = __get_user_bad();					\
374bb898558SAl Viro 	}								\
375bb898558SAl Viro } while (0)
376bb898558SAl Viro 
377bb898558SAl Viro #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret)	\
37811f1a4b9SLinus Torvalds 	asm volatile("\n"						\
37963bcff2aSH. Peter Anvin 		     "1:	mov"itype" %2,%"rtype"1\n"		\
38011f1a4b9SLinus Torvalds 		     "2:\n"						\
381bb898558SAl Viro 		     ".section .fixup,\"ax\"\n"				\
382bb898558SAl Viro 		     "3:	mov %3,%0\n"				\
383bb898558SAl Viro 		     "	xor"itype" %"rtype"1,%"rtype"1\n"		\
384bb898558SAl Viro 		     "	jmp 2b\n"					\
385bb898558SAl Viro 		     ".previous\n"					\
38675045f77SJann Horn 		     _ASM_EXTABLE_UA(1b, 3b)				\
387bb898558SAl Viro 		     : "=r" (err), ltype(x)				\
388bb898558SAl Viro 		     : "m" (__m(addr)), "i" (errret), "0" (err))
389bb898558SAl Viro 
390122b05ddSAl Viro #define __get_user_asm_nozero(x, addr, err, itype, rtype, ltype, errret)	\
391122b05ddSAl Viro 	asm volatile("\n"						\
392122b05ddSAl Viro 		     "1:	mov"itype" %2,%"rtype"1\n"		\
393122b05ddSAl Viro 		     "2:\n"						\
394122b05ddSAl Viro 		     ".section .fixup,\"ax\"\n"				\
395122b05ddSAl Viro 		     "3:	mov %3,%0\n"				\
396122b05ddSAl Viro 		     "	jmp 2b\n"					\
397122b05ddSAl Viro 		     ".previous\n"					\
39875045f77SJann Horn 		     _ASM_EXTABLE_UA(1b, 3b)				\
399122b05ddSAl Viro 		     : "=r" (err), ltype(x)				\
400122b05ddSAl Viro 		     : "m" (__m(addr)), "i" (errret), "0" (err))
401122b05ddSAl Viro 
40211f1a4b9SLinus Torvalds /*
40311f1a4b9SLinus Torvalds  * This doesn't do __uaccess_begin/end - the exception handling
40411f1a4b9SLinus Torvalds  * around it must do that.
40511f1a4b9SLinus Torvalds  */
406fe40c0afSHiroshi Shimamoto #define __get_user_size_ex(x, ptr, size)				\
407fe40c0afSHiroshi Shimamoto do {									\
408fe40c0afSHiroshi Shimamoto 	__chk_user_ptr(ptr);						\
409fe40c0afSHiroshi Shimamoto 	switch (size) {							\
410fe40c0afSHiroshi Shimamoto 	case 1:								\
411fe40c0afSHiroshi Shimamoto 		__get_user_asm_ex(x, ptr, "b", "b", "=q");		\
412fe40c0afSHiroshi Shimamoto 		break;							\
413fe40c0afSHiroshi Shimamoto 	case 2:								\
414fe40c0afSHiroshi Shimamoto 		__get_user_asm_ex(x, ptr, "w", "w", "=r");		\
415fe40c0afSHiroshi Shimamoto 		break;							\
416fe40c0afSHiroshi Shimamoto 	case 4:								\
417fe40c0afSHiroshi Shimamoto 		__get_user_asm_ex(x, ptr, "l", "k", "=r");		\
418fe40c0afSHiroshi Shimamoto 		break;							\
419fe40c0afSHiroshi Shimamoto 	case 8:								\
420fe40c0afSHiroshi Shimamoto 		__get_user_asm_ex_u64(x, ptr);				\
421fe40c0afSHiroshi Shimamoto 		break;							\
422fe40c0afSHiroshi Shimamoto 	default:							\
423fe40c0afSHiroshi Shimamoto 		(x) = __get_user_bad();					\
424fe40c0afSHiroshi Shimamoto 	}								\
425fe40c0afSHiroshi Shimamoto } while (0)
426fe40c0afSHiroshi Shimamoto 
427fe40c0afSHiroshi Shimamoto #define __get_user_asm_ex(x, addr, itype, rtype, ltype)			\
4285e88353dSH. Peter Anvin 	asm volatile("1:	mov"itype" %1,%"rtype"0\n"		\
4295e88353dSH. Peter Anvin 		     "2:\n"						\
4301c109fabSAl Viro 		     ".section .fixup,\"ax\"\n"				\
4311c109fabSAl Viro                      "3:xor"itype" %"rtype"0,%"rtype"0\n"		\
4321c109fabSAl Viro 		     "  jmp 2b\n"					\
4331c109fabSAl Viro 		     ".previous\n"					\
4341c109fabSAl Viro 		     _ASM_EXTABLE_EX(1b, 3b)				\
435fe40c0afSHiroshi Shimamoto 		     : ltype(x) : "m" (__m(addr)))
436fe40c0afSHiroshi Shimamoto 
437bb898558SAl Viro #define __put_user_nocheck(x, ptr, size)			\
438bb898558SAl Viro ({								\
43916855f87SHiroshi Shimamoto 	int __pu_err;						\
44011f1a4b9SLinus Torvalds 	__uaccess_begin();					\
441bb898558SAl Viro 	__put_user_size((x), (ptr), (size), __pu_err, -EFAULT);	\
44211f1a4b9SLinus Torvalds 	__uaccess_end();					\
443a76cf66eSAndy Lutomirski 	__builtin_expect(__pu_err, 0);				\
444bb898558SAl Viro })
445bb898558SAl Viro 
446bb898558SAl Viro #define __get_user_nocheck(x, ptr, size)				\
447bb898558SAl Viro ({									\
44816855f87SHiroshi Shimamoto 	int __gu_err;							\
449b2f68038SBenjamin LaHaise 	__inttype(*(ptr)) __gu_val;					\
450304ec1b0SDan Williams 	__uaccess_begin_nospec();					\
451bb898558SAl Viro 	__get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT);	\
45211f1a4b9SLinus Torvalds 	__uaccess_end();						\
453bb898558SAl Viro 	(x) = (__force __typeof__(*(ptr)))__gu_val;			\
454a76cf66eSAndy Lutomirski 	__builtin_expect(__gu_err, 0);					\
455bb898558SAl Viro })
456bb898558SAl Viro 
457bb898558SAl Viro /* FIXME: this hack is definitely wrong -AK */
458bb898558SAl Viro struct __large_struct { unsigned long buf[100]; };
459bb898558SAl Viro #define __m(x) (*(struct __large_struct __user *)(x))
460bb898558SAl Viro 
461bb898558SAl Viro /*
462bb898558SAl Viro  * Tell gcc we read from memory instead of writing: this is because
463bb898558SAl Viro  * we do not write to any memory gcc knows about, so there are no
464bb898558SAl Viro  * aliasing issues.
465bb898558SAl Viro  */
466bb898558SAl Viro #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret)	\
46711f1a4b9SLinus Torvalds 	asm volatile("\n"						\
46863bcff2aSH. Peter Anvin 		     "1:	mov"itype" %"rtype"1,%2\n"		\
46911f1a4b9SLinus Torvalds 		     "2:\n"						\
470bb898558SAl Viro 		     ".section .fixup,\"ax\"\n"				\
471bb898558SAl Viro 		     "3:	mov %3,%0\n"				\
472bb898558SAl Viro 		     "	jmp 2b\n"					\
473bb898558SAl Viro 		     ".previous\n"					\
47475045f77SJann Horn 		     _ASM_EXTABLE_UA(1b, 3b)				\
475bb898558SAl Viro 		     : "=r"(err)					\
476bb898558SAl Viro 		     : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
477fe40c0afSHiroshi Shimamoto 
478fe40c0afSHiroshi Shimamoto #define __put_user_asm_ex(x, addr, itype, rtype, ltype)			\
4795e88353dSH. Peter Anvin 	asm volatile("1:	mov"itype" %"rtype"0,%1\n"		\
4805e88353dSH. Peter Anvin 		     "2:\n"						\
481535c0c34SH. Peter Anvin 		     _ASM_EXTABLE_EX(1b, 2b)				\
482fe40c0afSHiroshi Shimamoto 		     : : ltype(x), "m" (__m(addr)))
483fe40c0afSHiroshi Shimamoto 
484fe40c0afSHiroshi Shimamoto /*
485fe40c0afSHiroshi Shimamoto  * uaccess_try and catch
486fe40c0afSHiroshi Shimamoto  */
487fe40c0afSHiroshi Shimamoto #define uaccess_try	do {						\
488dfa9a942SAndy Lutomirski 	current->thread.uaccess_err = 0;				\
48911f1a4b9SLinus Torvalds 	__uaccess_begin();						\
490fe40c0afSHiroshi Shimamoto 	barrier();
491fe40c0afSHiroshi Shimamoto 
492b3bbfb3fSDan Williams #define uaccess_try_nospec do {						\
493b3bbfb3fSDan Williams 	current->thread.uaccess_err = 0;				\
494b3bbfb3fSDan Williams 	__uaccess_begin_nospec();					\
495b3bbfb3fSDan Williams 
496fe40c0afSHiroshi Shimamoto #define uaccess_catch(err)						\
49711f1a4b9SLinus Torvalds 	__uaccess_end();						\
498dfa9a942SAndy Lutomirski 	(err) |= (current->thread.uaccess_err ? -EFAULT : 0);		\
499fe40c0afSHiroshi Shimamoto } while (0)
500fe40c0afSHiroshi Shimamoto 
501bb898558SAl Viro /**
502bb898558SAl Viro  * __get_user: - Get a simple variable from user space, with less checking.
503bb898558SAl Viro  * @x:   Variable to store result.
504bb898558SAl Viro  * @ptr: Source address, in user space.
505bb898558SAl Viro  *
506b3c395efSDavid Hildenbrand  * Context: User context only. This function may sleep if pagefaults are
507b3c395efSDavid Hildenbrand  *          enabled.
508bb898558SAl Viro  *
509bb898558SAl Viro  * This macro copies a single simple variable from user space to kernel
510bb898558SAl Viro  * space.  It supports simple types like char and int, but not larger
511bb898558SAl Viro  * data types like structures or arrays.
512bb898558SAl Viro  *
513bb898558SAl Viro  * @ptr must have pointer-to-simple-variable type, and the result of
514bb898558SAl Viro  * dereferencing @ptr must be assignable to @x without a cast.
515bb898558SAl Viro  *
516bb898558SAl Viro  * Caller must check the pointer with access_ok() before calling this
517bb898558SAl Viro  * function.
518bb898558SAl Viro  *
519bb898558SAl Viro  * Returns zero on success, or -EFAULT on error.
520bb898558SAl Viro  * On error, the variable @x is set to zero.
521bb898558SAl Viro  */
522bb898558SAl Viro 
523bb898558SAl Viro #define __get_user(x, ptr)						\
524bb898558SAl Viro 	__get_user_nocheck((x), (ptr), sizeof(*(ptr)))
525fe40c0afSHiroshi Shimamoto 
526bb898558SAl Viro /**
527bb898558SAl Viro  * __put_user: - Write a simple value into user space, with less checking.
528bb898558SAl Viro  * @x:   Value to copy to user space.
529bb898558SAl Viro  * @ptr: Destination address, in user space.
530bb898558SAl Viro  *
531b3c395efSDavid Hildenbrand  * Context: User context only. This function may sleep if pagefaults are
532b3c395efSDavid Hildenbrand  *          enabled.
533bb898558SAl Viro  *
534bb898558SAl Viro  * This macro copies a single simple value from kernel space to user
535bb898558SAl Viro  * space.  It supports simple types like char and int, but not larger
536bb898558SAl Viro  * data types like structures or arrays.
537bb898558SAl Viro  *
538bb898558SAl Viro  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
539bb898558SAl Viro  * to the result of dereferencing @ptr.
540bb898558SAl Viro  *
541bb898558SAl Viro  * Caller must check the pointer with access_ok() before calling this
542bb898558SAl Viro  * function.
543bb898558SAl Viro  *
544bb898558SAl Viro  * Returns zero on success, or -EFAULT on error.
545bb898558SAl Viro  */
546bb898558SAl Viro 
547bb898558SAl Viro #define __put_user(x, ptr)						\
548bb898558SAl Viro 	__put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
549bb898558SAl Viro 
550bb898558SAl Viro /*
551fe40c0afSHiroshi Shimamoto  * {get|put}_user_try and catch
552fe40c0afSHiroshi Shimamoto  *
553fe40c0afSHiroshi Shimamoto  * get_user_try {
554fe40c0afSHiroshi Shimamoto  *	get_user_ex(...);
555fe40c0afSHiroshi Shimamoto  * } get_user_catch(err)
556fe40c0afSHiroshi Shimamoto  */
557304ec1b0SDan Williams #define get_user_try		uaccess_try_nospec
558fe40c0afSHiroshi Shimamoto #define get_user_catch(err)	uaccess_catch(err)
559fe40c0afSHiroshi Shimamoto 
560fe40c0afSHiroshi Shimamoto #define get_user_ex(x, ptr)	do {					\
561fe40c0afSHiroshi Shimamoto 	unsigned long __gue_val;					\
562fe40c0afSHiroshi Shimamoto 	__get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr))));	\
563fe40c0afSHiroshi Shimamoto 	(x) = (__force __typeof__(*(ptr)))__gue_val;			\
564fe40c0afSHiroshi Shimamoto } while (0)
565fe40c0afSHiroshi Shimamoto 
566019a1369SHiroshi Shimamoto #define put_user_try		uaccess_try
567019a1369SHiroshi Shimamoto #define put_user_catch(err)	uaccess_catch(err)
568019a1369SHiroshi Shimamoto 
569fe40c0afSHiroshi Shimamoto #define put_user_ex(x, ptr)						\
570fe40c0afSHiroshi Shimamoto 	__put_user_size_ex((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
571fe40c0afSHiroshi Shimamoto 
5721ac2e6caSRobert Richter extern unsigned long
5731ac2e6caSRobert Richter copy_from_user_nmi(void *to, const void __user *from, unsigned long n);
57492ae03f2SLinus Torvalds extern __must_check long
57592ae03f2SLinus Torvalds strncpy_from_user(char *dst, const char __user *src, long count);
5761ac2e6caSRobert Richter 
5775723aa99SLinus Torvalds extern __must_check long strnlen_user(const char __user *str, long n);
5785723aa99SLinus Torvalds 
579a052858fSH. Peter Anvin unsigned long __must_check clear_user(void __user *mem, unsigned long len);
580a052858fSH. Peter Anvin unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
581a052858fSH. Peter Anvin 
582f09174c5SQiaowei Ren extern void __cmpxchg_wrong_size(void)
583f09174c5SQiaowei Ren 	__compiletime_error("Bad argument size for cmpxchg");
584f09174c5SQiaowei Ren 
585f09174c5SQiaowei Ren #define __user_atomic_cmpxchg_inatomic(uval, ptr, old, new, size)	\
586f09174c5SQiaowei Ren ({									\
587f09174c5SQiaowei Ren 	int __ret = 0;							\
588f09174c5SQiaowei Ren 	__typeof__(ptr) __uval = (uval);				\
589f09174c5SQiaowei Ren 	__typeof__(*(ptr)) __old = (old);				\
590f09174c5SQiaowei Ren 	__typeof__(*(ptr)) __new = (new);				\
591304ec1b0SDan Williams 	__uaccess_begin_nospec();					\
592f09174c5SQiaowei Ren 	switch (size) {							\
593f09174c5SQiaowei Ren 	case 1:								\
594f09174c5SQiaowei Ren 	{								\
59511f1a4b9SLinus Torvalds 		asm volatile("\n"					\
596f09174c5SQiaowei Ren 			"1:\t" LOCK_PREFIX "cmpxchgb %4, %2\n"		\
59711f1a4b9SLinus Torvalds 			"2:\n"						\
598f09174c5SQiaowei Ren 			"\t.section .fixup, \"ax\"\n"			\
599f09174c5SQiaowei Ren 			"3:\tmov     %3, %0\n"				\
600f09174c5SQiaowei Ren 			"\tjmp     2b\n"				\
601f09174c5SQiaowei Ren 			"\t.previous\n"					\
60275045f77SJann Horn 			_ASM_EXTABLE_UA(1b, 3b)				\
603f09174c5SQiaowei Ren 			: "+r" (__ret), "=a" (__old), "+m" (*(ptr))	\
604f09174c5SQiaowei Ren 			: "i" (-EFAULT), "q" (__new), "1" (__old)	\
605f09174c5SQiaowei Ren 			: "memory"					\
606f09174c5SQiaowei Ren 		);							\
607f09174c5SQiaowei Ren 		break;							\
608f09174c5SQiaowei Ren 	}								\
609f09174c5SQiaowei Ren 	case 2:								\
610f09174c5SQiaowei Ren 	{								\
61111f1a4b9SLinus Torvalds 		asm volatile("\n"					\
612f09174c5SQiaowei Ren 			"1:\t" LOCK_PREFIX "cmpxchgw %4, %2\n"		\
61311f1a4b9SLinus Torvalds 			"2:\n"						\
614f09174c5SQiaowei Ren 			"\t.section .fixup, \"ax\"\n"			\
615f09174c5SQiaowei Ren 			"3:\tmov     %3, %0\n"				\
616f09174c5SQiaowei Ren 			"\tjmp     2b\n"				\
617f09174c5SQiaowei Ren 			"\t.previous\n"					\
61875045f77SJann Horn 			_ASM_EXTABLE_UA(1b, 3b)				\
619f09174c5SQiaowei Ren 			: "+r" (__ret), "=a" (__old), "+m" (*(ptr))	\
620f09174c5SQiaowei Ren 			: "i" (-EFAULT), "r" (__new), "1" (__old)	\
621f09174c5SQiaowei Ren 			: "memory"					\
622f09174c5SQiaowei Ren 		);							\
623f09174c5SQiaowei Ren 		break;							\
624f09174c5SQiaowei Ren 	}								\
625f09174c5SQiaowei Ren 	case 4:								\
626f09174c5SQiaowei Ren 	{								\
62711f1a4b9SLinus Torvalds 		asm volatile("\n"					\
628f09174c5SQiaowei Ren 			"1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"		\
62911f1a4b9SLinus Torvalds 			"2:\n"						\
630f09174c5SQiaowei Ren 			"\t.section .fixup, \"ax\"\n"			\
631f09174c5SQiaowei Ren 			"3:\tmov     %3, %0\n"				\
632f09174c5SQiaowei Ren 			"\tjmp     2b\n"				\
633f09174c5SQiaowei Ren 			"\t.previous\n"					\
63475045f77SJann Horn 			_ASM_EXTABLE_UA(1b, 3b)				\
635f09174c5SQiaowei Ren 			: "+r" (__ret), "=a" (__old), "+m" (*(ptr))	\
636f09174c5SQiaowei Ren 			: "i" (-EFAULT), "r" (__new), "1" (__old)	\
637f09174c5SQiaowei Ren 			: "memory"					\
638f09174c5SQiaowei Ren 		);							\
639f09174c5SQiaowei Ren 		break;							\
640f09174c5SQiaowei Ren 	}								\
641f09174c5SQiaowei Ren 	case 8:								\
642f09174c5SQiaowei Ren 	{								\
643f09174c5SQiaowei Ren 		if (!IS_ENABLED(CONFIG_X86_64))				\
644f09174c5SQiaowei Ren 			__cmpxchg_wrong_size();				\
645f09174c5SQiaowei Ren 									\
64611f1a4b9SLinus Torvalds 		asm volatile("\n"					\
647f09174c5SQiaowei Ren 			"1:\t" LOCK_PREFIX "cmpxchgq %4, %2\n"		\
64811f1a4b9SLinus Torvalds 			"2:\n"						\
649f09174c5SQiaowei Ren 			"\t.section .fixup, \"ax\"\n"			\
650f09174c5SQiaowei Ren 			"3:\tmov     %3, %0\n"				\
651f09174c5SQiaowei Ren 			"\tjmp     2b\n"				\
652f09174c5SQiaowei Ren 			"\t.previous\n"					\
65375045f77SJann Horn 			_ASM_EXTABLE_UA(1b, 3b)				\
654f09174c5SQiaowei Ren 			: "+r" (__ret), "=a" (__old), "+m" (*(ptr))	\
655f09174c5SQiaowei Ren 			: "i" (-EFAULT), "r" (__new), "1" (__old)	\
656f09174c5SQiaowei Ren 			: "memory"					\
657f09174c5SQiaowei Ren 		);							\
658f09174c5SQiaowei Ren 		break;							\
659f09174c5SQiaowei Ren 	}								\
660f09174c5SQiaowei Ren 	default:							\
661f09174c5SQiaowei Ren 		__cmpxchg_wrong_size();					\
662f09174c5SQiaowei Ren 	}								\
66311f1a4b9SLinus Torvalds 	__uaccess_end();						\
664f09174c5SQiaowei Ren 	*__uval = __old;						\
665f09174c5SQiaowei Ren 	__ret;								\
666f09174c5SQiaowei Ren })
667f09174c5SQiaowei Ren 
668f09174c5SQiaowei Ren #define user_atomic_cmpxchg_inatomic(uval, ptr, old, new)		\
669f09174c5SQiaowei Ren ({									\
67096d4f267SLinus Torvalds 	access_ok((ptr), sizeof(*(ptr))) ?		\
671f09174c5SQiaowei Ren 		__user_atomic_cmpxchg_inatomic((uval), (ptr),		\
672f09174c5SQiaowei Ren 				(old), (new), sizeof(*(ptr))) :		\
673f09174c5SQiaowei Ren 		-EFAULT;						\
674f09174c5SQiaowei Ren })
675f09174c5SQiaowei Ren 
676fe40c0afSHiroshi Shimamoto /*
677bb898558SAl Viro  * movsl can be slow when source and dest are not both 8-byte aligned
678bb898558SAl Viro  */
679bb898558SAl Viro #ifdef CONFIG_X86_INTEL_USERCOPY
680bb898558SAl Viro extern struct movsl_mask {
681bb898558SAl Viro 	int mask;
682bb898558SAl Viro } ____cacheline_aligned_in_smp movsl_mask;
683bb898558SAl Viro #endif
684bb898558SAl Viro 
685bb898558SAl Viro #define ARCH_HAS_NOCACHE_UACCESS 1
686bb898558SAl Viro 
687bb898558SAl Viro #ifdef CONFIG_X86_32
688a1ce3928SDavid Howells # include <asm/uaccess_32.h>
689bb898558SAl Viro #else
690a1ce3928SDavid Howells # include <asm/uaccess_64.h>
691bb898558SAl Viro #endif
692bb898558SAl Viro 
69310013ebbSAndi Kleen /*
69410013ebbSAndi Kleen  * We rely on the nested NMI work to allow atomic faults from the NMI path; the
69510013ebbSAndi Kleen  * nested NMI paths are careful to preserve CR2.
69610013ebbSAndi Kleen  *
69710013ebbSAndi Kleen  * Caller must use pagefault_enable/disable, or run in interrupt context,
69810013ebbSAndi Kleen  * and also do a uaccess_ok() check
69910013ebbSAndi Kleen  */
70010013ebbSAndi Kleen #define __copy_from_user_nmi __copy_from_user_inatomic
70110013ebbSAndi Kleen 
702404a4741SLinus Torvalds /*
7035b24a7a2SLinus Torvalds  * The "unsafe" user accesses aren't really "unsafe", but the naming
7045b24a7a2SLinus Torvalds  * is a big fat warning: you have to not only do the access_ok()
7055b24a7a2SLinus Torvalds  * checking before using them, but you have to surround them with the
7065b24a7a2SLinus Torvalds  * user_access_begin/end() pair.
7075b24a7a2SLinus Torvalds  */
708*594cc251SLinus Torvalds static __must_check inline bool user_access_begin(const void __user *ptr, size_t len)
709*594cc251SLinus Torvalds {
710*594cc251SLinus Torvalds 	if (unlikely(!access_ok(ptr,len)))
711*594cc251SLinus Torvalds 		return 0;
712*594cc251SLinus Torvalds 	__uaccess_begin();
713*594cc251SLinus Torvalds 	return 1;
714*594cc251SLinus Torvalds }
715*594cc251SLinus Torvalds #define user_access_begin(a,b)	user_access_begin(a,b)
7165b24a7a2SLinus Torvalds #define user_access_end()	__uaccess_end()
7175b24a7a2SLinus Torvalds 
7181bd4403dSLinus Torvalds #define unsafe_put_user(x, ptr, err_label)					\
7191bd4403dSLinus Torvalds do {										\
7205b24a7a2SLinus Torvalds 	int __pu_err;								\
721334a023eSLinus Torvalds 	__typeof__(*(ptr)) __pu_val = (x);					\
722334a023eSLinus Torvalds 	__put_user_size(__pu_val, (ptr), sizeof(*(ptr)), __pu_err, -EFAULT);	\
7231bd4403dSLinus Torvalds 	if (unlikely(__pu_err)) goto err_label;					\
7241bd4403dSLinus Torvalds } while (0)
7255b24a7a2SLinus Torvalds 
7261bd4403dSLinus Torvalds #define unsafe_get_user(x, ptr, err_label)					\
7271bd4403dSLinus Torvalds do {										\
7285b24a7a2SLinus Torvalds 	int __gu_err;								\
729334a023eSLinus Torvalds 	__inttype(*(ptr)) __gu_val;						\
7305b24a7a2SLinus Torvalds 	__get_user_size(__gu_val, (ptr), sizeof(*(ptr)), __gu_err, -EFAULT);	\
7315b24a7a2SLinus Torvalds 	(x) = (__force __typeof__(*(ptr)))__gu_val;				\
7321bd4403dSLinus Torvalds 	if (unlikely(__gu_err)) goto err_label;					\
7331bd4403dSLinus Torvalds } while (0)
7345b24a7a2SLinus Torvalds 
7351965aae3SH. Peter Anvin #endif /* _ASM_X86_UACCESS_H */
736bb898558SAl Viro 
737