1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */ 21965aae3SH. Peter Anvin #ifndef _ASM_X86_UACCESS_H 31965aae3SH. Peter Anvin #define _ASM_X86_UACCESS_H 4bb898558SAl Viro /* 5bb898558SAl Viro * User space memory access functions 6bb898558SAl Viro */ 7bb898558SAl Viro #include <linux/compiler.h> 81771c6e1SAndrey Ryabinin #include <linux/kasan-checks.h> 9bb898558SAl Viro #include <linux/string.h> 10bb898558SAl Viro #include <asm/asm.h> 11bb898558SAl Viro #include <asm/page.h> 1263bcff2aSH. Peter Anvin #include <asm/smap.h> 1345caf470SAl Viro #include <asm/extable.h> 14bb898558SAl Viro 15bb898558SAl Viro /* 16bb898558SAl Viro * The fs value determines whether argument validity checking should be 17bb898558SAl Viro * performed or not. If get_fs() == USER_DS, checking is performed, with 18bb898558SAl Viro * get_fs() == KERNEL_DS, checking is bypassed. 19bb898558SAl Viro * 20bb898558SAl Viro * For historical reasons, these macros are grossly misnamed. 21bb898558SAl Viro */ 22bb898558SAl Viro 23bb898558SAl Viro #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) }) 24bb898558SAl Viro 25bb898558SAl Viro #define KERNEL_DS MAKE_MM_SEG(-1UL) 269063c61fSLinus Torvalds #define USER_DS MAKE_MM_SEG(TASK_SIZE_MAX) 27bb898558SAl Viro 28bb898558SAl Viro #define get_ds() (KERNEL_DS) 2913d4ea09SAndy Lutomirski #define get_fs() (current->thread.addr_limit) 305ea0727bSThomas Garnier static inline void set_fs(mm_segment_t fs) 315ea0727bSThomas Garnier { 325ea0727bSThomas Garnier current->thread.addr_limit = fs; 335ea0727bSThomas Garnier /* On user-mode return, check fs is correct */ 345ea0727bSThomas Garnier set_thread_flag(TIF_FSCHECK); 355ea0727bSThomas Garnier } 36bb898558SAl Viro 37bb898558SAl Viro #define segment_eq(a, b) ((a).seg == (b).seg) 38bb898558SAl Viro 3913d4ea09SAndy Lutomirski #define user_addr_max() (current->thread.addr_limit.seg) 40bb898558SAl Viro #define __addr_ok(addr) \ 41bc6ca7b3SArun Sharma ((unsigned long __force)(addr) < user_addr_max()) 42bb898558SAl Viro 43bb898558SAl Viro /* 44bb898558SAl Viro * Test whether a block of memory is a valid user space address. 45bb898558SAl Viro * Returns 0 if the range is valid, nonzero otherwise. 46bb898558SAl Viro */ 47a740576aSH. Peter Anvin static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, unsigned long limit) 48c5fe5d80SLinus Torvalds { 49c5fe5d80SLinus Torvalds /* 50c5fe5d80SLinus Torvalds * If we have used "sizeof()" for the size, 51c5fe5d80SLinus Torvalds * we know it won't overflow the limit (but 52c5fe5d80SLinus Torvalds * it might overflow the 'addr', so it's 53c5fe5d80SLinus Torvalds * important to subtract the size from the 54c5fe5d80SLinus Torvalds * limit, not add it to the address). 55c5fe5d80SLinus Torvalds */ 56c5fe5d80SLinus Torvalds if (__builtin_constant_p(size)) 577e0f51cbSAndy Lutomirski return unlikely(addr > limit - size); 58c5fe5d80SLinus Torvalds 59c5fe5d80SLinus Torvalds /* Arbitrary sizes? Be careful about overflow */ 60c5fe5d80SLinus Torvalds addr += size; 617e0f51cbSAndy Lutomirski if (unlikely(addr < size)) 62a740576aSH. Peter Anvin return true; 637e0f51cbSAndy Lutomirski return unlikely(addr > limit); 64c5fe5d80SLinus Torvalds } 65bb898558SAl Viro 66bc6ca7b3SArun Sharma #define __range_not_ok(addr, size, limit) \ 67bb898558SAl Viro ({ \ 68bb898558SAl Viro __chk_user_ptr(addr); \ 69c5fe5d80SLinus Torvalds __chk_range_not_ok((unsigned long __force)(addr), size, limit); \ 70bb898558SAl Viro }) 71bb898558SAl Viro 727c478895SPeter Zijlstra #ifdef CONFIG_DEBUG_ATOMIC_SLEEP 737c478895SPeter Zijlstra # define WARN_ON_IN_IRQ() WARN_ON_ONCE(!in_task()) 747c478895SPeter Zijlstra #else 757c478895SPeter Zijlstra # define WARN_ON_IN_IRQ() 767c478895SPeter Zijlstra #endif 777c478895SPeter Zijlstra 78bb898558SAl Viro /** 79bb898558SAl Viro * access_ok: - Checks if a user space pointer is valid 80bb898558SAl Viro * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE. Note that 81bb898558SAl Viro * %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe 82bb898558SAl Viro * to write to a block, it is always safe to read from it. 83bb898558SAl Viro * @addr: User space pointer to start of block to check 84bb898558SAl Viro * @size: Size of block to check 85bb898558SAl Viro * 86b3c395efSDavid Hildenbrand * Context: User context only. This function may sleep if pagefaults are 87b3c395efSDavid Hildenbrand * enabled. 88bb898558SAl Viro * 89bb898558SAl Viro * Checks if a pointer to a block of memory in user space is valid. 90bb898558SAl Viro * 91bb898558SAl Viro * Returns true (nonzero) if the memory block may be valid, false (zero) 92bb898558SAl Viro * if it is definitely invalid. 93bb898558SAl Viro * 94bb898558SAl Viro * Note that, depending on architecture, this function probably just 95bb898558SAl Viro * checks that the pointer is in the user space range - after calling 96bb898558SAl Viro * this function, memory access functions may still return -EFAULT. 97bb898558SAl Viro */ 98bc6ca7b3SArun Sharma #define access_ok(type, addr, size) \ 997c478895SPeter Zijlstra ({ \ 1007c478895SPeter Zijlstra WARN_ON_IN_IRQ(); \ 1017c478895SPeter Zijlstra likely(!__range_not_ok(addr, size, user_addr_max())); \ 1027c478895SPeter Zijlstra }) 103bb898558SAl Viro 104bb898558SAl Viro /* 105bb898558SAl Viro * These are the main single-value transfer routines. They automatically 106bb898558SAl Viro * use the right size if we just have the right pointer type. 107bb898558SAl Viro * 108bb898558SAl Viro * This gets kind of ugly. We want to return _two_ values in "get_user()" 109bb898558SAl Viro * and yet we don't want to do any pointers, because that is too much 110bb898558SAl Viro * of a performance impact. Thus we have a few rather ugly macros here, 111bb898558SAl Viro * and hide all the ugliness from the user. 112bb898558SAl Viro * 113bb898558SAl Viro * The "__xxx" versions of the user access functions are versions that 114bb898558SAl Viro * do not verify the address space, that must have been done previously 115bb898558SAl Viro * with a separate "access_ok()" call (this is used when we do multiple 116bb898558SAl Viro * accesses to the same area of user memory). 117bb898558SAl Viro */ 118bb898558SAl Viro 119bb898558SAl Viro extern int __get_user_1(void); 120bb898558SAl Viro extern int __get_user_2(void); 121bb898558SAl Viro extern int __get_user_4(void); 122bb898558SAl Viro extern int __get_user_8(void); 123bb898558SAl Viro extern int __get_user_bad(void); 124bb898558SAl Viro 12511f1a4b9SLinus Torvalds #define __uaccess_begin() stac() 12611f1a4b9SLinus Torvalds #define __uaccess_end() clac() 127b3bbfb3fSDan Williams #define __uaccess_begin_nospec() \ 128b3bbfb3fSDan Williams ({ \ 129b3bbfb3fSDan Williams stac(); \ 130b3bbfb3fSDan Williams barrier_nospec(); \ 131b3bbfb3fSDan Williams }) 13211f1a4b9SLinus Torvalds 1333578baaeSH. Peter Anvin /* 1343578baaeSH. Peter Anvin * This is a type: either unsigned long, if the argument fits into 1353578baaeSH. Peter Anvin * that type, or otherwise unsigned long long. 1363578baaeSH. Peter Anvin */ 1373578baaeSH. Peter Anvin #define __inttype(x) \ 1383578baaeSH. Peter Anvin __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL)) 139bb898558SAl Viro 140bb898558SAl Viro /** 141bb898558SAl Viro * get_user: - Get a simple variable from user space. 142bb898558SAl Viro * @x: Variable to store result. 143bb898558SAl Viro * @ptr: Source address, in user space. 144bb898558SAl Viro * 145b3c395efSDavid Hildenbrand * Context: User context only. This function may sleep if pagefaults are 146b3c395efSDavid Hildenbrand * enabled. 147bb898558SAl Viro * 148bb898558SAl Viro * This macro copies a single simple variable from user space to kernel 149bb898558SAl Viro * space. It supports simple types like char and int, but not larger 150bb898558SAl Viro * data types like structures or arrays. 151bb898558SAl Viro * 152bb898558SAl Viro * @ptr must have pointer-to-simple-variable type, and the result of 153bb898558SAl Viro * dereferencing @ptr must be assignable to @x without a cast. 154bb898558SAl Viro * 155bb898558SAl Viro * Returns zero on success, or -EFAULT on error. 156bb898558SAl Viro * On error, the variable @x is set to zero. 157ff52c3b0SH. Peter Anvin */ 158ff52c3b0SH. Peter Anvin /* 1593578baaeSH. Peter Anvin * Careful: we have to cast the result to the type of the pointer 1603578baaeSH. Peter Anvin * for sign reasons. 161ff52c3b0SH. Peter Anvin * 162f69fa9a9SH. Peter Anvin * The use of _ASM_DX as the register specifier is a bit of a 163ff52c3b0SH. Peter Anvin * simplification, as gcc only cares about it as the starting point 164ff52c3b0SH. Peter Anvin * and not size: for a 64-bit value it will use %ecx:%edx on 32 bits 165ff52c3b0SH. Peter Anvin * (%ecx being the next register in gcc's x86 register sequence), and 166ff52c3b0SH. Peter Anvin * %rdx on 64 bits. 167f69fa9a9SH. Peter Anvin * 168f69fa9a9SH. Peter Anvin * Clang/LLVM cares about the size of the register, but still wants 169f69fa9a9SH. Peter Anvin * the base register for something that ends up being a pair. 170bb898558SAl Viro */ 171bb898558SAl Viro #define get_user(x, ptr) \ 172bb898558SAl Viro ({ \ 173bb898558SAl Viro int __ret_gu; \ 174bdfc017eSJan-Simon Möller register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX); \ 175bb898558SAl Viro __chk_user_ptr(ptr); \ 176d1a76187SIngo Molnar might_fault(); \ 177f05058c4SChris J Arges asm volatile("call __get_user_%P4" \ 178f5caf621SJosh Poimboeuf : "=a" (__ret_gu), "=r" (__val_gu), \ 179f5caf621SJosh Poimboeuf ASM_CALL_CONSTRAINT \ 1803578baaeSH. Peter Anvin : "0" (ptr), "i" (sizeof(*(ptr)))); \ 181e182c570SMichael S. Tsirkin (x) = (__force __typeof__(*(ptr))) __val_gu; \ 182a76cf66eSAndy Lutomirski __builtin_expect(__ret_gu, 0); \ 183bb898558SAl Viro }) 184bb898558SAl Viro 185bb898558SAl Viro #define __put_user_x(size, x, ptr, __ret_pu) \ 186bb898558SAl Viro asm volatile("call __put_user_" #size : "=a" (__ret_pu) \ 187bb898558SAl Viro : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx") 188bb898558SAl Viro 189bb898558SAl Viro 190bb898558SAl Viro 191bb898558SAl Viro #ifdef CONFIG_X86_32 19218114f61SHiroshi Shimamoto #define __put_user_asm_u64(x, addr, err, errret) \ 19311f1a4b9SLinus Torvalds asm volatile("\n" \ 19463bcff2aSH. Peter Anvin "1: movl %%eax,0(%2)\n" \ 195bb898558SAl Viro "2: movl %%edx,4(%2)\n" \ 19611f1a4b9SLinus Torvalds "3:" \ 197bb898558SAl Viro ".section .fixup,\"ax\"\n" \ 198bb898558SAl Viro "4: movl %3,%0\n" \ 199bb898558SAl Viro " jmp 3b\n" \ 200bb898558SAl Viro ".previous\n" \ 201*75045f77SJann Horn _ASM_EXTABLE_UA(1b, 4b) \ 202*75045f77SJann Horn _ASM_EXTABLE_UA(2b, 4b) \ 203bb898558SAl Viro : "=r" (err) \ 20418114f61SHiroshi Shimamoto : "A" (x), "r" (addr), "i" (errret), "0" (err)) 205bb898558SAl Viro 206fe40c0afSHiroshi Shimamoto #define __put_user_asm_ex_u64(x, addr) \ 20711f1a4b9SLinus Torvalds asm volatile("\n" \ 20863bcff2aSH. Peter Anvin "1: movl %%eax,0(%1)\n" \ 209fe40c0afSHiroshi Shimamoto "2: movl %%edx,4(%1)\n" \ 21011f1a4b9SLinus Torvalds "3:" \ 211535c0c34SH. Peter Anvin _ASM_EXTABLE_EX(1b, 2b) \ 212535c0c34SH. Peter Anvin _ASM_EXTABLE_EX(2b, 3b) \ 213fe40c0afSHiroshi Shimamoto : : "A" (x), "r" (addr)) 214fe40c0afSHiroshi Shimamoto 215bb898558SAl Viro #define __put_user_x8(x, ptr, __ret_pu) \ 216bb898558SAl Viro asm volatile("call __put_user_8" : "=a" (__ret_pu) \ 217bb898558SAl Viro : "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx") 218bb898558SAl Viro #else 21918114f61SHiroshi Shimamoto #define __put_user_asm_u64(x, ptr, retval, errret) \ 220ebe119cdSH. Peter Anvin __put_user_asm(x, ptr, retval, "q", "", "er", errret) 221fe40c0afSHiroshi Shimamoto #define __put_user_asm_ex_u64(x, addr) \ 222ebe119cdSH. Peter Anvin __put_user_asm_ex(x, addr, "q", "", "er") 223bb898558SAl Viro #define __put_user_x8(x, ptr, __ret_pu) __put_user_x(8, x, ptr, __ret_pu) 224bb898558SAl Viro #endif 225bb898558SAl Viro 226bb898558SAl Viro extern void __put_user_bad(void); 227bb898558SAl Viro 228bb898558SAl Viro /* 229bb898558SAl Viro * Strange magic calling convention: pointer in %ecx, 230bb898558SAl Viro * value in %eax(:%edx), return value in %eax. clobbers %rbx 231bb898558SAl Viro */ 232bb898558SAl Viro extern void __put_user_1(void); 233bb898558SAl Viro extern void __put_user_2(void); 234bb898558SAl Viro extern void __put_user_4(void); 235bb898558SAl Viro extern void __put_user_8(void); 236bb898558SAl Viro 237bb898558SAl Viro /** 238bb898558SAl Viro * put_user: - Write a simple value into user space. 239bb898558SAl Viro * @x: Value to copy to user space. 240bb898558SAl Viro * @ptr: Destination address, in user space. 241bb898558SAl Viro * 242b3c395efSDavid Hildenbrand * Context: User context only. This function may sleep if pagefaults are 243b3c395efSDavid Hildenbrand * enabled. 244bb898558SAl Viro * 245bb898558SAl Viro * This macro copies a single simple value from kernel space to user 246bb898558SAl Viro * space. It supports simple types like char and int, but not larger 247bb898558SAl Viro * data types like structures or arrays. 248bb898558SAl Viro * 249bb898558SAl Viro * @ptr must have pointer-to-simple-variable type, and @x must be assignable 250bb898558SAl Viro * to the result of dereferencing @ptr. 251bb898558SAl Viro * 252bb898558SAl Viro * Returns zero on success, or -EFAULT on error. 253bb898558SAl Viro */ 254bb898558SAl Viro #define put_user(x, ptr) \ 255bb898558SAl Viro ({ \ 256bb898558SAl Viro int __ret_pu; \ 257bb898558SAl Viro __typeof__(*(ptr)) __pu_val; \ 258bb898558SAl Viro __chk_user_ptr(ptr); \ 259d1a76187SIngo Molnar might_fault(); \ 260bb898558SAl Viro __pu_val = x; \ 261bb898558SAl Viro switch (sizeof(*(ptr))) { \ 262bb898558SAl Viro case 1: \ 263bb898558SAl Viro __put_user_x(1, __pu_val, ptr, __ret_pu); \ 264bb898558SAl Viro break; \ 265bb898558SAl Viro case 2: \ 266bb898558SAl Viro __put_user_x(2, __pu_val, ptr, __ret_pu); \ 267bb898558SAl Viro break; \ 268bb898558SAl Viro case 4: \ 269bb898558SAl Viro __put_user_x(4, __pu_val, ptr, __ret_pu); \ 270bb898558SAl Viro break; \ 271bb898558SAl Viro case 8: \ 272bb898558SAl Viro __put_user_x8(__pu_val, ptr, __ret_pu); \ 273bb898558SAl Viro break; \ 274bb898558SAl Viro default: \ 275bb898558SAl Viro __put_user_x(X, __pu_val, ptr, __ret_pu); \ 276bb898558SAl Viro break; \ 277bb898558SAl Viro } \ 278a76cf66eSAndy Lutomirski __builtin_expect(__ret_pu, 0); \ 279bb898558SAl Viro }) 280bb898558SAl Viro 281bb898558SAl Viro #define __put_user_size(x, ptr, size, retval, errret) \ 282bb898558SAl Viro do { \ 283bb898558SAl Viro retval = 0; \ 284bb898558SAl Viro __chk_user_ptr(ptr); \ 285bb898558SAl Viro switch (size) { \ 286bb898558SAl Viro case 1: \ 287bb898558SAl Viro __put_user_asm(x, ptr, retval, "b", "b", "iq", errret); \ 288bb898558SAl Viro break; \ 289bb898558SAl Viro case 2: \ 290bb898558SAl Viro __put_user_asm(x, ptr, retval, "w", "w", "ir", errret); \ 291bb898558SAl Viro break; \ 292bb898558SAl Viro case 4: \ 293bb898558SAl Viro __put_user_asm(x, ptr, retval, "l", "k", "ir", errret); \ 294bb898558SAl Viro break; \ 295bb898558SAl Viro case 8: \ 29618114f61SHiroshi Shimamoto __put_user_asm_u64((__typeof__(*ptr))(x), ptr, retval, \ 29718114f61SHiroshi Shimamoto errret); \ 298bb898558SAl Viro break; \ 299bb898558SAl Viro default: \ 300bb898558SAl Viro __put_user_bad(); \ 301bb898558SAl Viro } \ 302bb898558SAl Viro } while (0) 303bb898558SAl Viro 30411f1a4b9SLinus Torvalds /* 30511f1a4b9SLinus Torvalds * This doesn't do __uaccess_begin/end - the exception handling 30611f1a4b9SLinus Torvalds * around it must do that. 30711f1a4b9SLinus Torvalds */ 308fe40c0afSHiroshi Shimamoto #define __put_user_size_ex(x, ptr, size) \ 309fe40c0afSHiroshi Shimamoto do { \ 310fe40c0afSHiroshi Shimamoto __chk_user_ptr(ptr); \ 311fe40c0afSHiroshi Shimamoto switch (size) { \ 312fe40c0afSHiroshi Shimamoto case 1: \ 313fe40c0afSHiroshi Shimamoto __put_user_asm_ex(x, ptr, "b", "b", "iq"); \ 314fe40c0afSHiroshi Shimamoto break; \ 315fe40c0afSHiroshi Shimamoto case 2: \ 316fe40c0afSHiroshi Shimamoto __put_user_asm_ex(x, ptr, "w", "w", "ir"); \ 317fe40c0afSHiroshi Shimamoto break; \ 318fe40c0afSHiroshi Shimamoto case 4: \ 319fe40c0afSHiroshi Shimamoto __put_user_asm_ex(x, ptr, "l", "k", "ir"); \ 320fe40c0afSHiroshi Shimamoto break; \ 321fe40c0afSHiroshi Shimamoto case 8: \ 322fe40c0afSHiroshi Shimamoto __put_user_asm_ex_u64((__typeof__(*ptr))(x), ptr); \ 323fe40c0afSHiroshi Shimamoto break; \ 324fe40c0afSHiroshi Shimamoto default: \ 325fe40c0afSHiroshi Shimamoto __put_user_bad(); \ 326fe40c0afSHiroshi Shimamoto } \ 327fe40c0afSHiroshi Shimamoto } while (0) 328fe40c0afSHiroshi Shimamoto 329bb898558SAl Viro #ifdef CONFIG_X86_32 330b2f68038SBenjamin LaHaise #define __get_user_asm_u64(x, ptr, retval, errret) \ 331b2f68038SBenjamin LaHaise ({ \ 332b2f68038SBenjamin LaHaise __typeof__(ptr) __ptr = (ptr); \ 33333c9e972SLinus Torvalds asm volatile("\n" \ 334b2f68038SBenjamin LaHaise "1: movl %2,%%eax\n" \ 335b2f68038SBenjamin LaHaise "2: movl %3,%%edx\n" \ 33633c9e972SLinus Torvalds "3:\n" \ 337b2f68038SBenjamin LaHaise ".section .fixup,\"ax\"\n" \ 338b2f68038SBenjamin LaHaise "4: mov %4,%0\n" \ 339b2f68038SBenjamin LaHaise " xorl %%eax,%%eax\n" \ 340b2f68038SBenjamin LaHaise " xorl %%edx,%%edx\n" \ 341b2f68038SBenjamin LaHaise " jmp 3b\n" \ 342b2f68038SBenjamin LaHaise ".previous\n" \ 343*75045f77SJann Horn _ASM_EXTABLE_UA(1b, 4b) \ 344*75045f77SJann Horn _ASM_EXTABLE_UA(2b, 4b) \ 34533c9e972SLinus Torvalds : "=r" (retval), "=&A"(x) \ 3465ac751d9SVille Syrjälä : "m" (__m(__ptr)), "m" __m(((u32 __user *)(__ptr)) + 1), \ 347b2f68038SBenjamin LaHaise "i" (errret), "0" (retval)); \ 348b2f68038SBenjamin LaHaise }) 349b2f68038SBenjamin LaHaise 350fe40c0afSHiroshi Shimamoto #define __get_user_asm_ex_u64(x, ptr) (x) = __get_user_bad() 351bb898558SAl Viro #else 352bb898558SAl Viro #define __get_user_asm_u64(x, ptr, retval, errret) \ 353bb898558SAl Viro __get_user_asm(x, ptr, retval, "q", "", "=r", errret) 354fe40c0afSHiroshi Shimamoto #define __get_user_asm_ex_u64(x, ptr) \ 355fe40c0afSHiroshi Shimamoto __get_user_asm_ex(x, ptr, "q", "", "=r") 356bb898558SAl Viro #endif 357bb898558SAl Viro 358bb898558SAl Viro #define __get_user_size(x, ptr, size, retval, errret) \ 359bb898558SAl Viro do { \ 360bb898558SAl Viro retval = 0; \ 361bb898558SAl Viro __chk_user_ptr(ptr); \ 362bb898558SAl Viro switch (size) { \ 363bb898558SAl Viro case 1: \ 364bb898558SAl Viro __get_user_asm(x, ptr, retval, "b", "b", "=q", errret); \ 365bb898558SAl Viro break; \ 366bb898558SAl Viro case 2: \ 367bb898558SAl Viro __get_user_asm(x, ptr, retval, "w", "w", "=r", errret); \ 368bb898558SAl Viro break; \ 369bb898558SAl Viro case 4: \ 370bb898558SAl Viro __get_user_asm(x, ptr, retval, "l", "k", "=r", errret); \ 371bb898558SAl Viro break; \ 372bb898558SAl Viro case 8: \ 373bb898558SAl Viro __get_user_asm_u64(x, ptr, retval, errret); \ 374bb898558SAl Viro break; \ 375bb898558SAl Viro default: \ 376bb898558SAl Viro (x) = __get_user_bad(); \ 377bb898558SAl Viro } \ 378bb898558SAl Viro } while (0) 379bb898558SAl Viro 380bb898558SAl Viro #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \ 38111f1a4b9SLinus Torvalds asm volatile("\n" \ 38263bcff2aSH. Peter Anvin "1: mov"itype" %2,%"rtype"1\n" \ 38311f1a4b9SLinus Torvalds "2:\n" \ 384bb898558SAl Viro ".section .fixup,\"ax\"\n" \ 385bb898558SAl Viro "3: mov %3,%0\n" \ 386bb898558SAl Viro " xor"itype" %"rtype"1,%"rtype"1\n" \ 387bb898558SAl Viro " jmp 2b\n" \ 388bb898558SAl Viro ".previous\n" \ 389*75045f77SJann Horn _ASM_EXTABLE_UA(1b, 3b) \ 390bb898558SAl Viro : "=r" (err), ltype(x) \ 391bb898558SAl Viro : "m" (__m(addr)), "i" (errret), "0" (err)) 392bb898558SAl Viro 393122b05ddSAl Viro #define __get_user_asm_nozero(x, addr, err, itype, rtype, ltype, errret) \ 394122b05ddSAl Viro asm volatile("\n" \ 395122b05ddSAl Viro "1: mov"itype" %2,%"rtype"1\n" \ 396122b05ddSAl Viro "2:\n" \ 397122b05ddSAl Viro ".section .fixup,\"ax\"\n" \ 398122b05ddSAl Viro "3: mov %3,%0\n" \ 399122b05ddSAl Viro " jmp 2b\n" \ 400122b05ddSAl Viro ".previous\n" \ 401*75045f77SJann Horn _ASM_EXTABLE_UA(1b, 3b) \ 402122b05ddSAl Viro : "=r" (err), ltype(x) \ 403122b05ddSAl Viro : "m" (__m(addr)), "i" (errret), "0" (err)) 404122b05ddSAl Viro 40511f1a4b9SLinus Torvalds /* 40611f1a4b9SLinus Torvalds * This doesn't do __uaccess_begin/end - the exception handling 40711f1a4b9SLinus Torvalds * around it must do that. 40811f1a4b9SLinus Torvalds */ 409fe40c0afSHiroshi Shimamoto #define __get_user_size_ex(x, ptr, size) \ 410fe40c0afSHiroshi Shimamoto do { \ 411fe40c0afSHiroshi Shimamoto __chk_user_ptr(ptr); \ 412fe40c0afSHiroshi Shimamoto switch (size) { \ 413fe40c0afSHiroshi Shimamoto case 1: \ 414fe40c0afSHiroshi Shimamoto __get_user_asm_ex(x, ptr, "b", "b", "=q"); \ 415fe40c0afSHiroshi Shimamoto break; \ 416fe40c0afSHiroshi Shimamoto case 2: \ 417fe40c0afSHiroshi Shimamoto __get_user_asm_ex(x, ptr, "w", "w", "=r"); \ 418fe40c0afSHiroshi Shimamoto break; \ 419fe40c0afSHiroshi Shimamoto case 4: \ 420fe40c0afSHiroshi Shimamoto __get_user_asm_ex(x, ptr, "l", "k", "=r"); \ 421fe40c0afSHiroshi Shimamoto break; \ 422fe40c0afSHiroshi Shimamoto case 8: \ 423fe40c0afSHiroshi Shimamoto __get_user_asm_ex_u64(x, ptr); \ 424fe40c0afSHiroshi Shimamoto break; \ 425fe40c0afSHiroshi Shimamoto default: \ 426fe40c0afSHiroshi Shimamoto (x) = __get_user_bad(); \ 427fe40c0afSHiroshi Shimamoto } \ 428fe40c0afSHiroshi Shimamoto } while (0) 429fe40c0afSHiroshi Shimamoto 430fe40c0afSHiroshi Shimamoto #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \ 4315e88353dSH. Peter Anvin asm volatile("1: mov"itype" %1,%"rtype"0\n" \ 4325e88353dSH. Peter Anvin "2:\n" \ 4331c109fabSAl Viro ".section .fixup,\"ax\"\n" \ 4341c109fabSAl Viro "3:xor"itype" %"rtype"0,%"rtype"0\n" \ 4351c109fabSAl Viro " jmp 2b\n" \ 4361c109fabSAl Viro ".previous\n" \ 4371c109fabSAl Viro _ASM_EXTABLE_EX(1b, 3b) \ 438fe40c0afSHiroshi Shimamoto : ltype(x) : "m" (__m(addr))) 439fe40c0afSHiroshi Shimamoto 440bb898558SAl Viro #define __put_user_nocheck(x, ptr, size) \ 441bb898558SAl Viro ({ \ 44216855f87SHiroshi Shimamoto int __pu_err; \ 44311f1a4b9SLinus Torvalds __uaccess_begin(); \ 444bb898558SAl Viro __put_user_size((x), (ptr), (size), __pu_err, -EFAULT); \ 44511f1a4b9SLinus Torvalds __uaccess_end(); \ 446a76cf66eSAndy Lutomirski __builtin_expect(__pu_err, 0); \ 447bb898558SAl Viro }) 448bb898558SAl Viro 449bb898558SAl Viro #define __get_user_nocheck(x, ptr, size) \ 450bb898558SAl Viro ({ \ 45116855f87SHiroshi Shimamoto int __gu_err; \ 452b2f68038SBenjamin LaHaise __inttype(*(ptr)) __gu_val; \ 453304ec1b0SDan Williams __uaccess_begin_nospec(); \ 454bb898558SAl Viro __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \ 45511f1a4b9SLinus Torvalds __uaccess_end(); \ 456bb898558SAl Viro (x) = (__force __typeof__(*(ptr)))__gu_val; \ 457a76cf66eSAndy Lutomirski __builtin_expect(__gu_err, 0); \ 458bb898558SAl Viro }) 459bb898558SAl Viro 460bb898558SAl Viro /* FIXME: this hack is definitely wrong -AK */ 461bb898558SAl Viro struct __large_struct { unsigned long buf[100]; }; 462bb898558SAl Viro #define __m(x) (*(struct __large_struct __user *)(x)) 463bb898558SAl Viro 464bb898558SAl Viro /* 465bb898558SAl Viro * Tell gcc we read from memory instead of writing: this is because 466bb898558SAl Viro * we do not write to any memory gcc knows about, so there are no 467bb898558SAl Viro * aliasing issues. 468bb898558SAl Viro */ 469bb898558SAl Viro #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \ 47011f1a4b9SLinus Torvalds asm volatile("\n" \ 47163bcff2aSH. Peter Anvin "1: mov"itype" %"rtype"1,%2\n" \ 47211f1a4b9SLinus Torvalds "2:\n" \ 473bb898558SAl Viro ".section .fixup,\"ax\"\n" \ 474bb898558SAl Viro "3: mov %3,%0\n" \ 475bb898558SAl Viro " jmp 2b\n" \ 476bb898558SAl Viro ".previous\n" \ 477*75045f77SJann Horn _ASM_EXTABLE_UA(1b, 3b) \ 478bb898558SAl Viro : "=r"(err) \ 479bb898558SAl Viro : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err)) 480fe40c0afSHiroshi Shimamoto 481fe40c0afSHiroshi Shimamoto #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \ 4825e88353dSH. Peter Anvin asm volatile("1: mov"itype" %"rtype"0,%1\n" \ 4835e88353dSH. Peter Anvin "2:\n" \ 484535c0c34SH. Peter Anvin _ASM_EXTABLE_EX(1b, 2b) \ 485fe40c0afSHiroshi Shimamoto : : ltype(x), "m" (__m(addr))) 486fe40c0afSHiroshi Shimamoto 487fe40c0afSHiroshi Shimamoto /* 488fe40c0afSHiroshi Shimamoto * uaccess_try and catch 489fe40c0afSHiroshi Shimamoto */ 490fe40c0afSHiroshi Shimamoto #define uaccess_try do { \ 491dfa9a942SAndy Lutomirski current->thread.uaccess_err = 0; \ 49211f1a4b9SLinus Torvalds __uaccess_begin(); \ 493fe40c0afSHiroshi Shimamoto barrier(); 494fe40c0afSHiroshi Shimamoto 495b3bbfb3fSDan Williams #define uaccess_try_nospec do { \ 496b3bbfb3fSDan Williams current->thread.uaccess_err = 0; \ 497b3bbfb3fSDan Williams __uaccess_begin_nospec(); \ 498b3bbfb3fSDan Williams 499fe40c0afSHiroshi Shimamoto #define uaccess_catch(err) \ 50011f1a4b9SLinus Torvalds __uaccess_end(); \ 501dfa9a942SAndy Lutomirski (err) |= (current->thread.uaccess_err ? -EFAULT : 0); \ 502fe40c0afSHiroshi Shimamoto } while (0) 503fe40c0afSHiroshi Shimamoto 504bb898558SAl Viro /** 505bb898558SAl Viro * __get_user: - Get a simple variable from user space, with less checking. 506bb898558SAl Viro * @x: Variable to store result. 507bb898558SAl Viro * @ptr: Source address, in user space. 508bb898558SAl Viro * 509b3c395efSDavid Hildenbrand * Context: User context only. This function may sleep if pagefaults are 510b3c395efSDavid Hildenbrand * enabled. 511bb898558SAl Viro * 512bb898558SAl Viro * This macro copies a single simple variable from user space to kernel 513bb898558SAl Viro * space. It supports simple types like char and int, but not larger 514bb898558SAl Viro * data types like structures or arrays. 515bb898558SAl Viro * 516bb898558SAl Viro * @ptr must have pointer-to-simple-variable type, and the result of 517bb898558SAl Viro * dereferencing @ptr must be assignable to @x without a cast. 518bb898558SAl Viro * 519bb898558SAl Viro * Caller must check the pointer with access_ok() before calling this 520bb898558SAl Viro * function. 521bb898558SAl Viro * 522bb898558SAl Viro * Returns zero on success, or -EFAULT on error. 523bb898558SAl Viro * On error, the variable @x is set to zero. 524bb898558SAl Viro */ 525bb898558SAl Viro 526bb898558SAl Viro #define __get_user(x, ptr) \ 527bb898558SAl Viro __get_user_nocheck((x), (ptr), sizeof(*(ptr))) 528fe40c0afSHiroshi Shimamoto 529bb898558SAl Viro /** 530bb898558SAl Viro * __put_user: - Write a simple value into user space, with less checking. 531bb898558SAl Viro * @x: Value to copy to user space. 532bb898558SAl Viro * @ptr: Destination address, in user space. 533bb898558SAl Viro * 534b3c395efSDavid Hildenbrand * Context: User context only. This function may sleep if pagefaults are 535b3c395efSDavid Hildenbrand * enabled. 536bb898558SAl Viro * 537bb898558SAl Viro * This macro copies a single simple value from kernel space to user 538bb898558SAl Viro * space. It supports simple types like char and int, but not larger 539bb898558SAl Viro * data types like structures or arrays. 540bb898558SAl Viro * 541bb898558SAl Viro * @ptr must have pointer-to-simple-variable type, and @x must be assignable 542bb898558SAl Viro * to the result of dereferencing @ptr. 543bb898558SAl Viro * 544bb898558SAl Viro * Caller must check the pointer with access_ok() before calling this 545bb898558SAl Viro * function. 546bb898558SAl Viro * 547bb898558SAl Viro * Returns zero on success, or -EFAULT on error. 548bb898558SAl Viro */ 549bb898558SAl Viro 550bb898558SAl Viro #define __put_user(x, ptr) \ 551bb898558SAl Viro __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) 552bb898558SAl Viro 553bb898558SAl Viro /* 554fe40c0afSHiroshi Shimamoto * {get|put}_user_try and catch 555fe40c0afSHiroshi Shimamoto * 556fe40c0afSHiroshi Shimamoto * get_user_try { 557fe40c0afSHiroshi Shimamoto * get_user_ex(...); 558fe40c0afSHiroshi Shimamoto * } get_user_catch(err) 559fe40c0afSHiroshi Shimamoto */ 560304ec1b0SDan Williams #define get_user_try uaccess_try_nospec 561fe40c0afSHiroshi Shimamoto #define get_user_catch(err) uaccess_catch(err) 562fe40c0afSHiroshi Shimamoto 563fe40c0afSHiroshi Shimamoto #define get_user_ex(x, ptr) do { \ 564fe40c0afSHiroshi Shimamoto unsigned long __gue_val; \ 565fe40c0afSHiroshi Shimamoto __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \ 566fe40c0afSHiroshi Shimamoto (x) = (__force __typeof__(*(ptr)))__gue_val; \ 567fe40c0afSHiroshi Shimamoto } while (0) 568fe40c0afSHiroshi Shimamoto 569019a1369SHiroshi Shimamoto #define put_user_try uaccess_try 570019a1369SHiroshi Shimamoto #define put_user_catch(err) uaccess_catch(err) 571019a1369SHiroshi Shimamoto 572fe40c0afSHiroshi Shimamoto #define put_user_ex(x, ptr) \ 573fe40c0afSHiroshi Shimamoto __put_user_size_ex((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) 574fe40c0afSHiroshi Shimamoto 5751ac2e6caSRobert Richter extern unsigned long 5761ac2e6caSRobert Richter copy_from_user_nmi(void *to, const void __user *from, unsigned long n); 57792ae03f2SLinus Torvalds extern __must_check long 57892ae03f2SLinus Torvalds strncpy_from_user(char *dst, const char __user *src, long count); 5791ac2e6caSRobert Richter 5805723aa99SLinus Torvalds extern __must_check long strnlen_user(const char __user *str, long n); 5815723aa99SLinus Torvalds 582a052858fSH. Peter Anvin unsigned long __must_check clear_user(void __user *mem, unsigned long len); 583a052858fSH. Peter Anvin unsigned long __must_check __clear_user(void __user *mem, unsigned long len); 584a052858fSH. Peter Anvin 585f09174c5SQiaowei Ren extern void __cmpxchg_wrong_size(void) 586f09174c5SQiaowei Ren __compiletime_error("Bad argument size for cmpxchg"); 587f09174c5SQiaowei Ren 588f09174c5SQiaowei Ren #define __user_atomic_cmpxchg_inatomic(uval, ptr, old, new, size) \ 589f09174c5SQiaowei Ren ({ \ 590f09174c5SQiaowei Ren int __ret = 0; \ 591f09174c5SQiaowei Ren __typeof__(ptr) __uval = (uval); \ 592f09174c5SQiaowei Ren __typeof__(*(ptr)) __old = (old); \ 593f09174c5SQiaowei Ren __typeof__(*(ptr)) __new = (new); \ 594304ec1b0SDan Williams __uaccess_begin_nospec(); \ 595f09174c5SQiaowei Ren switch (size) { \ 596f09174c5SQiaowei Ren case 1: \ 597f09174c5SQiaowei Ren { \ 59811f1a4b9SLinus Torvalds asm volatile("\n" \ 599f09174c5SQiaowei Ren "1:\t" LOCK_PREFIX "cmpxchgb %4, %2\n" \ 60011f1a4b9SLinus Torvalds "2:\n" \ 601f09174c5SQiaowei Ren "\t.section .fixup, \"ax\"\n" \ 602f09174c5SQiaowei Ren "3:\tmov %3, %0\n" \ 603f09174c5SQiaowei Ren "\tjmp 2b\n" \ 604f09174c5SQiaowei Ren "\t.previous\n" \ 605*75045f77SJann Horn _ASM_EXTABLE_UA(1b, 3b) \ 606f09174c5SQiaowei Ren : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \ 607f09174c5SQiaowei Ren : "i" (-EFAULT), "q" (__new), "1" (__old) \ 608f09174c5SQiaowei Ren : "memory" \ 609f09174c5SQiaowei Ren ); \ 610f09174c5SQiaowei Ren break; \ 611f09174c5SQiaowei Ren } \ 612f09174c5SQiaowei Ren case 2: \ 613f09174c5SQiaowei Ren { \ 61411f1a4b9SLinus Torvalds asm volatile("\n" \ 615f09174c5SQiaowei Ren "1:\t" LOCK_PREFIX "cmpxchgw %4, %2\n" \ 61611f1a4b9SLinus Torvalds "2:\n" \ 617f09174c5SQiaowei Ren "\t.section .fixup, \"ax\"\n" \ 618f09174c5SQiaowei Ren "3:\tmov %3, %0\n" \ 619f09174c5SQiaowei Ren "\tjmp 2b\n" \ 620f09174c5SQiaowei Ren "\t.previous\n" \ 621*75045f77SJann Horn _ASM_EXTABLE_UA(1b, 3b) \ 622f09174c5SQiaowei Ren : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \ 623f09174c5SQiaowei Ren : "i" (-EFAULT), "r" (__new), "1" (__old) \ 624f09174c5SQiaowei Ren : "memory" \ 625f09174c5SQiaowei Ren ); \ 626f09174c5SQiaowei Ren break; \ 627f09174c5SQiaowei Ren } \ 628f09174c5SQiaowei Ren case 4: \ 629f09174c5SQiaowei Ren { \ 63011f1a4b9SLinus Torvalds asm volatile("\n" \ 631f09174c5SQiaowei Ren "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n" \ 63211f1a4b9SLinus Torvalds "2:\n" \ 633f09174c5SQiaowei Ren "\t.section .fixup, \"ax\"\n" \ 634f09174c5SQiaowei Ren "3:\tmov %3, %0\n" \ 635f09174c5SQiaowei Ren "\tjmp 2b\n" \ 636f09174c5SQiaowei Ren "\t.previous\n" \ 637*75045f77SJann Horn _ASM_EXTABLE_UA(1b, 3b) \ 638f09174c5SQiaowei Ren : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \ 639f09174c5SQiaowei Ren : "i" (-EFAULT), "r" (__new), "1" (__old) \ 640f09174c5SQiaowei Ren : "memory" \ 641f09174c5SQiaowei Ren ); \ 642f09174c5SQiaowei Ren break; \ 643f09174c5SQiaowei Ren } \ 644f09174c5SQiaowei Ren case 8: \ 645f09174c5SQiaowei Ren { \ 646f09174c5SQiaowei Ren if (!IS_ENABLED(CONFIG_X86_64)) \ 647f09174c5SQiaowei Ren __cmpxchg_wrong_size(); \ 648f09174c5SQiaowei Ren \ 64911f1a4b9SLinus Torvalds asm volatile("\n" \ 650f09174c5SQiaowei Ren "1:\t" LOCK_PREFIX "cmpxchgq %4, %2\n" \ 65111f1a4b9SLinus Torvalds "2:\n" \ 652f09174c5SQiaowei Ren "\t.section .fixup, \"ax\"\n" \ 653f09174c5SQiaowei Ren "3:\tmov %3, %0\n" \ 654f09174c5SQiaowei Ren "\tjmp 2b\n" \ 655f09174c5SQiaowei Ren "\t.previous\n" \ 656*75045f77SJann Horn _ASM_EXTABLE_UA(1b, 3b) \ 657f09174c5SQiaowei Ren : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \ 658f09174c5SQiaowei Ren : "i" (-EFAULT), "r" (__new), "1" (__old) \ 659f09174c5SQiaowei Ren : "memory" \ 660f09174c5SQiaowei Ren ); \ 661f09174c5SQiaowei Ren break; \ 662f09174c5SQiaowei Ren } \ 663f09174c5SQiaowei Ren default: \ 664f09174c5SQiaowei Ren __cmpxchg_wrong_size(); \ 665f09174c5SQiaowei Ren } \ 66611f1a4b9SLinus Torvalds __uaccess_end(); \ 667f09174c5SQiaowei Ren *__uval = __old; \ 668f09174c5SQiaowei Ren __ret; \ 669f09174c5SQiaowei Ren }) 670f09174c5SQiaowei Ren 671f09174c5SQiaowei Ren #define user_atomic_cmpxchg_inatomic(uval, ptr, old, new) \ 672f09174c5SQiaowei Ren ({ \ 673f09174c5SQiaowei Ren access_ok(VERIFY_WRITE, (ptr), sizeof(*(ptr))) ? \ 674f09174c5SQiaowei Ren __user_atomic_cmpxchg_inatomic((uval), (ptr), \ 675f09174c5SQiaowei Ren (old), (new), sizeof(*(ptr))) : \ 676f09174c5SQiaowei Ren -EFAULT; \ 677f09174c5SQiaowei Ren }) 678f09174c5SQiaowei Ren 679fe40c0afSHiroshi Shimamoto /* 680bb898558SAl Viro * movsl can be slow when source and dest are not both 8-byte aligned 681bb898558SAl Viro */ 682bb898558SAl Viro #ifdef CONFIG_X86_INTEL_USERCOPY 683bb898558SAl Viro extern struct movsl_mask { 684bb898558SAl Viro int mask; 685bb898558SAl Viro } ____cacheline_aligned_in_smp movsl_mask; 686bb898558SAl Viro #endif 687bb898558SAl Viro 688bb898558SAl Viro #define ARCH_HAS_NOCACHE_UACCESS 1 689bb898558SAl Viro 690bb898558SAl Viro #ifdef CONFIG_X86_32 691a1ce3928SDavid Howells # include <asm/uaccess_32.h> 692bb898558SAl Viro #else 693a1ce3928SDavid Howells # include <asm/uaccess_64.h> 694bb898558SAl Viro #endif 695bb898558SAl Viro 69610013ebbSAndi Kleen /* 69710013ebbSAndi Kleen * We rely on the nested NMI work to allow atomic faults from the NMI path; the 69810013ebbSAndi Kleen * nested NMI paths are careful to preserve CR2. 69910013ebbSAndi Kleen * 70010013ebbSAndi Kleen * Caller must use pagefault_enable/disable, or run in interrupt context, 70110013ebbSAndi Kleen * and also do a uaccess_ok() check 70210013ebbSAndi Kleen */ 70310013ebbSAndi Kleen #define __copy_from_user_nmi __copy_from_user_inatomic 70410013ebbSAndi Kleen 705404a4741SLinus Torvalds /* 7065b24a7a2SLinus Torvalds * The "unsafe" user accesses aren't really "unsafe", but the naming 7075b24a7a2SLinus Torvalds * is a big fat warning: you have to not only do the access_ok() 7085b24a7a2SLinus Torvalds * checking before using them, but you have to surround them with the 7095b24a7a2SLinus Torvalds * user_access_begin/end() pair. 7105b24a7a2SLinus Torvalds */ 7115b24a7a2SLinus Torvalds #define user_access_begin() __uaccess_begin() 7125b24a7a2SLinus Torvalds #define user_access_end() __uaccess_end() 7135b24a7a2SLinus Torvalds 7141bd4403dSLinus Torvalds #define unsafe_put_user(x, ptr, err_label) \ 7151bd4403dSLinus Torvalds do { \ 7165b24a7a2SLinus Torvalds int __pu_err; \ 717334a023eSLinus Torvalds __typeof__(*(ptr)) __pu_val = (x); \ 718334a023eSLinus Torvalds __put_user_size(__pu_val, (ptr), sizeof(*(ptr)), __pu_err, -EFAULT); \ 7191bd4403dSLinus Torvalds if (unlikely(__pu_err)) goto err_label; \ 7201bd4403dSLinus Torvalds } while (0) 7215b24a7a2SLinus Torvalds 7221bd4403dSLinus Torvalds #define unsafe_get_user(x, ptr, err_label) \ 7231bd4403dSLinus Torvalds do { \ 7245b24a7a2SLinus Torvalds int __gu_err; \ 725334a023eSLinus Torvalds __inttype(*(ptr)) __gu_val; \ 7265b24a7a2SLinus Torvalds __get_user_size(__gu_val, (ptr), sizeof(*(ptr)), __gu_err, -EFAULT); \ 7275b24a7a2SLinus Torvalds (x) = (__force __typeof__(*(ptr)))__gu_val; \ 7281bd4403dSLinus Torvalds if (unlikely(__gu_err)) goto err_label; \ 7291bd4403dSLinus Torvalds } while (0) 7305b24a7a2SLinus Torvalds 7311965aae3SH. Peter Anvin #endif /* _ASM_X86_UACCESS_H */ 732bb898558SAl Viro 733