11965aae3SH. Peter Anvin #ifndef _ASM_X86_UACCESS_H 21965aae3SH. Peter Anvin #define _ASM_X86_UACCESS_H 3bb898558SAl Viro /* 4bb898558SAl Viro * User space memory access functions 5bb898558SAl Viro */ 6bb898558SAl Viro #include <linux/compiler.h> 71771c6e1SAndrey Ryabinin #include <linux/kasan-checks.h> 8bb898558SAl Viro #include <linux/string.h> 9bb898558SAl Viro #include <asm/asm.h> 10bb898558SAl Viro #include <asm/page.h> 1163bcff2aSH. Peter Anvin #include <asm/smap.h> 1245caf470SAl Viro #include <asm/extable.h> 13bb898558SAl Viro 14bb898558SAl Viro /* 15bb898558SAl Viro * The fs value determines whether argument validity checking should be 16bb898558SAl Viro * performed or not. If get_fs() == USER_DS, checking is performed, with 17bb898558SAl Viro * get_fs() == KERNEL_DS, checking is bypassed. 18bb898558SAl Viro * 19bb898558SAl Viro * For historical reasons, these macros are grossly misnamed. 20bb898558SAl Viro */ 21bb898558SAl Viro 22bb898558SAl Viro #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) }) 23bb898558SAl Viro 24bb898558SAl Viro #define KERNEL_DS MAKE_MM_SEG(-1UL) 259063c61fSLinus Torvalds #define USER_DS MAKE_MM_SEG(TASK_SIZE_MAX) 26bb898558SAl Viro 27bb898558SAl Viro #define get_ds() (KERNEL_DS) 2813d4ea09SAndy Lutomirski #define get_fs() (current->thread.addr_limit) 29*5ea0727bSThomas Garnier static inline void set_fs(mm_segment_t fs) 30*5ea0727bSThomas Garnier { 31*5ea0727bSThomas Garnier current->thread.addr_limit = fs; 32*5ea0727bSThomas Garnier /* On user-mode return, check fs is correct */ 33*5ea0727bSThomas Garnier set_thread_flag(TIF_FSCHECK); 34*5ea0727bSThomas Garnier } 35bb898558SAl Viro 36bb898558SAl Viro #define segment_eq(a, b) ((a).seg == (b).seg) 37bb898558SAl Viro 3813d4ea09SAndy Lutomirski #define user_addr_max() (current->thread.addr_limit.seg) 39bb898558SAl Viro #define __addr_ok(addr) \ 40bc6ca7b3SArun Sharma ((unsigned long __force)(addr) < user_addr_max()) 41bb898558SAl Viro 42bb898558SAl Viro /* 43bb898558SAl Viro * Test whether a block of memory is a valid user space address. 44bb898558SAl Viro * Returns 0 if the range is valid, nonzero otherwise. 45bb898558SAl Viro */ 46a740576aSH. Peter Anvin static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, unsigned long limit) 47c5fe5d80SLinus Torvalds { 48c5fe5d80SLinus Torvalds /* 49c5fe5d80SLinus Torvalds * If we have used "sizeof()" for the size, 50c5fe5d80SLinus Torvalds * we know it won't overflow the limit (but 51c5fe5d80SLinus Torvalds * it might overflow the 'addr', so it's 52c5fe5d80SLinus Torvalds * important to subtract the size from the 53c5fe5d80SLinus Torvalds * limit, not add it to the address). 54c5fe5d80SLinus Torvalds */ 55c5fe5d80SLinus Torvalds if (__builtin_constant_p(size)) 567e0f51cbSAndy Lutomirski return unlikely(addr > limit - size); 57c5fe5d80SLinus Torvalds 58c5fe5d80SLinus Torvalds /* Arbitrary sizes? Be careful about overflow */ 59c5fe5d80SLinus Torvalds addr += size; 607e0f51cbSAndy Lutomirski if (unlikely(addr < size)) 61a740576aSH. Peter Anvin return true; 627e0f51cbSAndy Lutomirski return unlikely(addr > limit); 63c5fe5d80SLinus Torvalds } 64bb898558SAl Viro 65bc6ca7b3SArun Sharma #define __range_not_ok(addr, size, limit) \ 66bb898558SAl Viro ({ \ 67bb898558SAl Viro __chk_user_ptr(addr); \ 68c5fe5d80SLinus Torvalds __chk_range_not_ok((unsigned long __force)(addr), size, limit); \ 69bb898558SAl Viro }) 70bb898558SAl Viro 717c478895SPeter Zijlstra #ifdef CONFIG_DEBUG_ATOMIC_SLEEP 727c478895SPeter Zijlstra # define WARN_ON_IN_IRQ() WARN_ON_ONCE(!in_task()) 737c478895SPeter Zijlstra #else 747c478895SPeter Zijlstra # define WARN_ON_IN_IRQ() 757c478895SPeter Zijlstra #endif 767c478895SPeter Zijlstra 77bb898558SAl Viro /** 78bb898558SAl Viro * access_ok: - Checks if a user space pointer is valid 79bb898558SAl Viro * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE. Note that 80bb898558SAl Viro * %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe 81bb898558SAl Viro * to write to a block, it is always safe to read from it. 82bb898558SAl Viro * @addr: User space pointer to start of block to check 83bb898558SAl Viro * @size: Size of block to check 84bb898558SAl Viro * 85b3c395efSDavid Hildenbrand * Context: User context only. This function may sleep if pagefaults are 86b3c395efSDavid Hildenbrand * enabled. 87bb898558SAl Viro * 88bb898558SAl Viro * Checks if a pointer to a block of memory in user space is valid. 89bb898558SAl Viro * 90bb898558SAl Viro * Returns true (nonzero) if the memory block may be valid, false (zero) 91bb898558SAl Viro * if it is definitely invalid. 92bb898558SAl Viro * 93bb898558SAl Viro * Note that, depending on architecture, this function probably just 94bb898558SAl Viro * checks that the pointer is in the user space range - after calling 95bb898558SAl Viro * this function, memory access functions may still return -EFAULT. 96bb898558SAl Viro */ 97bc6ca7b3SArun Sharma #define access_ok(type, addr, size) \ 987c478895SPeter Zijlstra ({ \ 997c478895SPeter Zijlstra WARN_ON_IN_IRQ(); \ 1007c478895SPeter Zijlstra likely(!__range_not_ok(addr, size, user_addr_max())); \ 1017c478895SPeter Zijlstra }) 102bb898558SAl Viro 103bb898558SAl Viro /* 104bb898558SAl Viro * These are the main single-value transfer routines. They automatically 105bb898558SAl Viro * use the right size if we just have the right pointer type. 106bb898558SAl Viro * 107bb898558SAl Viro * This gets kind of ugly. We want to return _two_ values in "get_user()" 108bb898558SAl Viro * and yet we don't want to do any pointers, because that is too much 109bb898558SAl Viro * of a performance impact. Thus we have a few rather ugly macros here, 110bb898558SAl Viro * and hide all the ugliness from the user. 111bb898558SAl Viro * 112bb898558SAl Viro * The "__xxx" versions of the user access functions are versions that 113bb898558SAl Viro * do not verify the address space, that must have been done previously 114bb898558SAl Viro * with a separate "access_ok()" call (this is used when we do multiple 115bb898558SAl Viro * accesses to the same area of user memory). 116bb898558SAl Viro */ 117bb898558SAl Viro 118bb898558SAl Viro extern int __get_user_1(void); 119bb898558SAl Viro extern int __get_user_2(void); 120bb898558SAl Viro extern int __get_user_4(void); 121bb898558SAl Viro extern int __get_user_8(void); 122bb898558SAl Viro extern int __get_user_bad(void); 123bb898558SAl Viro 12411f1a4b9SLinus Torvalds #define __uaccess_begin() stac() 12511f1a4b9SLinus Torvalds #define __uaccess_end() clac() 12611f1a4b9SLinus Torvalds 1273578baaeSH. Peter Anvin /* 1283578baaeSH. Peter Anvin * This is a type: either unsigned long, if the argument fits into 1293578baaeSH. Peter Anvin * that type, or otherwise unsigned long long. 1303578baaeSH. Peter Anvin */ 1313578baaeSH. Peter Anvin #define __inttype(x) \ 1323578baaeSH. Peter Anvin __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL)) 133bb898558SAl Viro 134bb898558SAl Viro /** 135bb898558SAl Viro * get_user: - Get a simple variable from user space. 136bb898558SAl Viro * @x: Variable to store result. 137bb898558SAl Viro * @ptr: Source address, in user space. 138bb898558SAl Viro * 139b3c395efSDavid Hildenbrand * Context: User context only. This function may sleep if pagefaults are 140b3c395efSDavid Hildenbrand * enabled. 141bb898558SAl Viro * 142bb898558SAl Viro * This macro copies a single simple variable from user space to kernel 143bb898558SAl Viro * space. It supports simple types like char and int, but not larger 144bb898558SAl Viro * data types like structures or arrays. 145bb898558SAl Viro * 146bb898558SAl Viro * @ptr must have pointer-to-simple-variable type, and the result of 147bb898558SAl Viro * dereferencing @ptr must be assignable to @x without a cast. 148bb898558SAl Viro * 149bb898558SAl Viro * Returns zero on success, or -EFAULT on error. 150bb898558SAl Viro * On error, the variable @x is set to zero. 151ff52c3b0SH. Peter Anvin */ 152ff52c3b0SH. Peter Anvin /* 1533578baaeSH. Peter Anvin * Careful: we have to cast the result to the type of the pointer 1543578baaeSH. Peter Anvin * for sign reasons. 155ff52c3b0SH. Peter Anvin * 156f69fa9a9SH. Peter Anvin * The use of _ASM_DX as the register specifier is a bit of a 157ff52c3b0SH. Peter Anvin * simplification, as gcc only cares about it as the starting point 158ff52c3b0SH. Peter Anvin * and not size: for a 64-bit value it will use %ecx:%edx on 32 bits 159ff52c3b0SH. Peter Anvin * (%ecx being the next register in gcc's x86 register sequence), and 160ff52c3b0SH. Peter Anvin * %rdx on 64 bits. 161f69fa9a9SH. Peter Anvin * 162f69fa9a9SH. Peter Anvin * Clang/LLVM cares about the size of the register, but still wants 163f69fa9a9SH. Peter Anvin * the base register for something that ends up being a pair. 164bb898558SAl Viro */ 165bb898558SAl Viro #define get_user(x, ptr) \ 166bb898558SAl Viro ({ \ 167bb898558SAl Viro int __ret_gu; \ 168bdfc017eSJan-Simon Möller register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX); \ 169f05058c4SChris J Arges register void *__sp asm(_ASM_SP); \ 170bb898558SAl Viro __chk_user_ptr(ptr); \ 171d1a76187SIngo Molnar might_fault(); \ 172f05058c4SChris J Arges asm volatile("call __get_user_%P4" \ 173f05058c4SChris J Arges : "=a" (__ret_gu), "=r" (__val_gu), "+r" (__sp) \ 1743578baaeSH. Peter Anvin : "0" (ptr), "i" (sizeof(*(ptr)))); \ 175e182c570SMichael S. Tsirkin (x) = (__force __typeof__(*(ptr))) __val_gu; \ 176a76cf66eSAndy Lutomirski __builtin_expect(__ret_gu, 0); \ 177bb898558SAl Viro }) 178bb898558SAl Viro 179bb898558SAl Viro #define __put_user_x(size, x, ptr, __ret_pu) \ 180bb898558SAl Viro asm volatile("call __put_user_" #size : "=a" (__ret_pu) \ 181bb898558SAl Viro : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx") 182bb898558SAl Viro 183bb898558SAl Viro 184bb898558SAl Viro 185bb898558SAl Viro #ifdef CONFIG_X86_32 18618114f61SHiroshi Shimamoto #define __put_user_asm_u64(x, addr, err, errret) \ 18711f1a4b9SLinus Torvalds asm volatile("\n" \ 18863bcff2aSH. Peter Anvin "1: movl %%eax,0(%2)\n" \ 189bb898558SAl Viro "2: movl %%edx,4(%2)\n" \ 19011f1a4b9SLinus Torvalds "3:" \ 191bb898558SAl Viro ".section .fixup,\"ax\"\n" \ 192bb898558SAl Viro "4: movl %3,%0\n" \ 193bb898558SAl Viro " jmp 3b\n" \ 194bb898558SAl Viro ".previous\n" \ 195bb898558SAl Viro _ASM_EXTABLE(1b, 4b) \ 196bb898558SAl Viro _ASM_EXTABLE(2b, 4b) \ 197bb898558SAl Viro : "=r" (err) \ 19818114f61SHiroshi Shimamoto : "A" (x), "r" (addr), "i" (errret), "0" (err)) 199bb898558SAl Viro 200fe40c0afSHiroshi Shimamoto #define __put_user_asm_ex_u64(x, addr) \ 20111f1a4b9SLinus Torvalds asm volatile("\n" \ 20263bcff2aSH. Peter Anvin "1: movl %%eax,0(%1)\n" \ 203fe40c0afSHiroshi Shimamoto "2: movl %%edx,4(%1)\n" \ 20411f1a4b9SLinus Torvalds "3:" \ 205535c0c34SH. Peter Anvin _ASM_EXTABLE_EX(1b, 2b) \ 206535c0c34SH. Peter Anvin _ASM_EXTABLE_EX(2b, 3b) \ 207fe40c0afSHiroshi Shimamoto : : "A" (x), "r" (addr)) 208fe40c0afSHiroshi Shimamoto 209bb898558SAl Viro #define __put_user_x8(x, ptr, __ret_pu) \ 210bb898558SAl Viro asm volatile("call __put_user_8" : "=a" (__ret_pu) \ 211bb898558SAl Viro : "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx") 212bb898558SAl Viro #else 21318114f61SHiroshi Shimamoto #define __put_user_asm_u64(x, ptr, retval, errret) \ 214ebe119cdSH. Peter Anvin __put_user_asm(x, ptr, retval, "q", "", "er", errret) 215fe40c0afSHiroshi Shimamoto #define __put_user_asm_ex_u64(x, addr) \ 216ebe119cdSH. Peter Anvin __put_user_asm_ex(x, addr, "q", "", "er") 217bb898558SAl Viro #define __put_user_x8(x, ptr, __ret_pu) __put_user_x(8, x, ptr, __ret_pu) 218bb898558SAl Viro #endif 219bb898558SAl Viro 220bb898558SAl Viro extern void __put_user_bad(void); 221bb898558SAl Viro 222bb898558SAl Viro /* 223bb898558SAl Viro * Strange magic calling convention: pointer in %ecx, 224bb898558SAl Viro * value in %eax(:%edx), return value in %eax. clobbers %rbx 225bb898558SAl Viro */ 226bb898558SAl Viro extern void __put_user_1(void); 227bb898558SAl Viro extern void __put_user_2(void); 228bb898558SAl Viro extern void __put_user_4(void); 229bb898558SAl Viro extern void __put_user_8(void); 230bb898558SAl Viro 231bb898558SAl Viro /** 232bb898558SAl Viro * put_user: - Write a simple value into user space. 233bb898558SAl Viro * @x: Value to copy to user space. 234bb898558SAl Viro * @ptr: Destination address, in user space. 235bb898558SAl Viro * 236b3c395efSDavid Hildenbrand * Context: User context only. This function may sleep if pagefaults are 237b3c395efSDavid Hildenbrand * enabled. 238bb898558SAl Viro * 239bb898558SAl Viro * This macro copies a single simple value from kernel space to user 240bb898558SAl Viro * space. It supports simple types like char and int, but not larger 241bb898558SAl Viro * data types like structures or arrays. 242bb898558SAl Viro * 243bb898558SAl Viro * @ptr must have pointer-to-simple-variable type, and @x must be assignable 244bb898558SAl Viro * to the result of dereferencing @ptr. 245bb898558SAl Viro * 246bb898558SAl Viro * Returns zero on success, or -EFAULT on error. 247bb898558SAl Viro */ 248bb898558SAl Viro #define put_user(x, ptr) \ 249bb898558SAl Viro ({ \ 250bb898558SAl Viro int __ret_pu; \ 251bb898558SAl Viro __typeof__(*(ptr)) __pu_val; \ 252bb898558SAl Viro __chk_user_ptr(ptr); \ 253d1a76187SIngo Molnar might_fault(); \ 254bb898558SAl Viro __pu_val = x; \ 255bb898558SAl Viro switch (sizeof(*(ptr))) { \ 256bb898558SAl Viro case 1: \ 257bb898558SAl Viro __put_user_x(1, __pu_val, ptr, __ret_pu); \ 258bb898558SAl Viro break; \ 259bb898558SAl Viro case 2: \ 260bb898558SAl Viro __put_user_x(2, __pu_val, ptr, __ret_pu); \ 261bb898558SAl Viro break; \ 262bb898558SAl Viro case 4: \ 263bb898558SAl Viro __put_user_x(4, __pu_val, ptr, __ret_pu); \ 264bb898558SAl Viro break; \ 265bb898558SAl Viro case 8: \ 266bb898558SAl Viro __put_user_x8(__pu_val, ptr, __ret_pu); \ 267bb898558SAl Viro break; \ 268bb898558SAl Viro default: \ 269bb898558SAl Viro __put_user_x(X, __pu_val, ptr, __ret_pu); \ 270bb898558SAl Viro break; \ 271bb898558SAl Viro } \ 272a76cf66eSAndy Lutomirski __builtin_expect(__ret_pu, 0); \ 273bb898558SAl Viro }) 274bb898558SAl Viro 275bb898558SAl Viro #define __put_user_size(x, ptr, size, retval, errret) \ 276bb898558SAl Viro do { \ 277bb898558SAl Viro retval = 0; \ 278bb898558SAl Viro __chk_user_ptr(ptr); \ 279bb898558SAl Viro switch (size) { \ 280bb898558SAl Viro case 1: \ 281bb898558SAl Viro __put_user_asm(x, ptr, retval, "b", "b", "iq", errret); \ 282bb898558SAl Viro break; \ 283bb898558SAl Viro case 2: \ 284bb898558SAl Viro __put_user_asm(x, ptr, retval, "w", "w", "ir", errret); \ 285bb898558SAl Viro break; \ 286bb898558SAl Viro case 4: \ 287bb898558SAl Viro __put_user_asm(x, ptr, retval, "l", "k", "ir", errret); \ 288bb898558SAl Viro break; \ 289bb898558SAl Viro case 8: \ 29018114f61SHiroshi Shimamoto __put_user_asm_u64((__typeof__(*ptr))(x), ptr, retval, \ 29118114f61SHiroshi Shimamoto errret); \ 292bb898558SAl Viro break; \ 293bb898558SAl Viro default: \ 294bb898558SAl Viro __put_user_bad(); \ 295bb898558SAl Viro } \ 296bb898558SAl Viro } while (0) 297bb898558SAl Viro 29811f1a4b9SLinus Torvalds /* 29911f1a4b9SLinus Torvalds * This doesn't do __uaccess_begin/end - the exception handling 30011f1a4b9SLinus Torvalds * around it must do that. 30111f1a4b9SLinus Torvalds */ 302fe40c0afSHiroshi Shimamoto #define __put_user_size_ex(x, ptr, size) \ 303fe40c0afSHiroshi Shimamoto do { \ 304fe40c0afSHiroshi Shimamoto __chk_user_ptr(ptr); \ 305fe40c0afSHiroshi Shimamoto switch (size) { \ 306fe40c0afSHiroshi Shimamoto case 1: \ 307fe40c0afSHiroshi Shimamoto __put_user_asm_ex(x, ptr, "b", "b", "iq"); \ 308fe40c0afSHiroshi Shimamoto break; \ 309fe40c0afSHiroshi Shimamoto case 2: \ 310fe40c0afSHiroshi Shimamoto __put_user_asm_ex(x, ptr, "w", "w", "ir"); \ 311fe40c0afSHiroshi Shimamoto break; \ 312fe40c0afSHiroshi Shimamoto case 4: \ 313fe40c0afSHiroshi Shimamoto __put_user_asm_ex(x, ptr, "l", "k", "ir"); \ 314fe40c0afSHiroshi Shimamoto break; \ 315fe40c0afSHiroshi Shimamoto case 8: \ 316fe40c0afSHiroshi Shimamoto __put_user_asm_ex_u64((__typeof__(*ptr))(x), ptr); \ 317fe40c0afSHiroshi Shimamoto break; \ 318fe40c0afSHiroshi Shimamoto default: \ 319fe40c0afSHiroshi Shimamoto __put_user_bad(); \ 320fe40c0afSHiroshi Shimamoto } \ 321fe40c0afSHiroshi Shimamoto } while (0) 322fe40c0afSHiroshi Shimamoto 323bb898558SAl Viro #ifdef CONFIG_X86_32 324b2f68038SBenjamin LaHaise #define __get_user_asm_u64(x, ptr, retval, errret) \ 325b2f68038SBenjamin LaHaise ({ \ 326b2f68038SBenjamin LaHaise __typeof__(ptr) __ptr = (ptr); \ 32733c9e972SLinus Torvalds asm volatile("\n" \ 328b2f68038SBenjamin LaHaise "1: movl %2,%%eax\n" \ 329b2f68038SBenjamin LaHaise "2: movl %3,%%edx\n" \ 33033c9e972SLinus Torvalds "3:\n" \ 331b2f68038SBenjamin LaHaise ".section .fixup,\"ax\"\n" \ 332b2f68038SBenjamin LaHaise "4: mov %4,%0\n" \ 333b2f68038SBenjamin LaHaise " xorl %%eax,%%eax\n" \ 334b2f68038SBenjamin LaHaise " xorl %%edx,%%edx\n" \ 335b2f68038SBenjamin LaHaise " jmp 3b\n" \ 336b2f68038SBenjamin LaHaise ".previous\n" \ 337b2f68038SBenjamin LaHaise _ASM_EXTABLE(1b, 4b) \ 338b2f68038SBenjamin LaHaise _ASM_EXTABLE(2b, 4b) \ 33933c9e972SLinus Torvalds : "=r" (retval), "=&A"(x) \ 340b2f68038SBenjamin LaHaise : "m" (__m(__ptr)), "m" __m(((u32 *)(__ptr)) + 1), \ 341b2f68038SBenjamin LaHaise "i" (errret), "0" (retval)); \ 342b2f68038SBenjamin LaHaise }) 343b2f68038SBenjamin LaHaise 344fe40c0afSHiroshi Shimamoto #define __get_user_asm_ex_u64(x, ptr) (x) = __get_user_bad() 345bb898558SAl Viro #else 346bb898558SAl Viro #define __get_user_asm_u64(x, ptr, retval, errret) \ 347bb898558SAl Viro __get_user_asm(x, ptr, retval, "q", "", "=r", errret) 348fe40c0afSHiroshi Shimamoto #define __get_user_asm_ex_u64(x, ptr) \ 349fe40c0afSHiroshi Shimamoto __get_user_asm_ex(x, ptr, "q", "", "=r") 350bb898558SAl Viro #endif 351bb898558SAl Viro 352bb898558SAl Viro #define __get_user_size(x, ptr, size, retval, errret) \ 353bb898558SAl Viro do { \ 354bb898558SAl Viro retval = 0; \ 355bb898558SAl Viro __chk_user_ptr(ptr); \ 356bb898558SAl Viro switch (size) { \ 357bb898558SAl Viro case 1: \ 358bb898558SAl Viro __get_user_asm(x, ptr, retval, "b", "b", "=q", errret); \ 359bb898558SAl Viro break; \ 360bb898558SAl Viro case 2: \ 361bb898558SAl Viro __get_user_asm(x, ptr, retval, "w", "w", "=r", errret); \ 362bb898558SAl Viro break; \ 363bb898558SAl Viro case 4: \ 364bb898558SAl Viro __get_user_asm(x, ptr, retval, "l", "k", "=r", errret); \ 365bb898558SAl Viro break; \ 366bb898558SAl Viro case 8: \ 367bb898558SAl Viro __get_user_asm_u64(x, ptr, retval, errret); \ 368bb898558SAl Viro break; \ 369bb898558SAl Viro default: \ 370bb898558SAl Viro (x) = __get_user_bad(); \ 371bb898558SAl Viro } \ 372bb898558SAl Viro } while (0) 373bb898558SAl Viro 374bb898558SAl Viro #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \ 37511f1a4b9SLinus Torvalds asm volatile("\n" \ 37663bcff2aSH. Peter Anvin "1: mov"itype" %2,%"rtype"1\n" \ 37711f1a4b9SLinus Torvalds "2:\n" \ 378bb898558SAl Viro ".section .fixup,\"ax\"\n" \ 379bb898558SAl Viro "3: mov %3,%0\n" \ 380bb898558SAl Viro " xor"itype" %"rtype"1,%"rtype"1\n" \ 381bb898558SAl Viro " jmp 2b\n" \ 382bb898558SAl Viro ".previous\n" \ 383bb898558SAl Viro _ASM_EXTABLE(1b, 3b) \ 384bb898558SAl Viro : "=r" (err), ltype(x) \ 385bb898558SAl Viro : "m" (__m(addr)), "i" (errret), "0" (err)) 386bb898558SAl Viro 387122b05ddSAl Viro #define __get_user_asm_nozero(x, addr, err, itype, rtype, ltype, errret) \ 388122b05ddSAl Viro asm volatile("\n" \ 389122b05ddSAl Viro "1: mov"itype" %2,%"rtype"1\n" \ 390122b05ddSAl Viro "2:\n" \ 391122b05ddSAl Viro ".section .fixup,\"ax\"\n" \ 392122b05ddSAl Viro "3: mov %3,%0\n" \ 393122b05ddSAl Viro " jmp 2b\n" \ 394122b05ddSAl Viro ".previous\n" \ 395122b05ddSAl Viro _ASM_EXTABLE(1b, 3b) \ 396122b05ddSAl Viro : "=r" (err), ltype(x) \ 397122b05ddSAl Viro : "m" (__m(addr)), "i" (errret), "0" (err)) 398122b05ddSAl Viro 39911f1a4b9SLinus Torvalds /* 40011f1a4b9SLinus Torvalds * This doesn't do __uaccess_begin/end - the exception handling 40111f1a4b9SLinus Torvalds * around it must do that. 40211f1a4b9SLinus Torvalds */ 403fe40c0afSHiroshi Shimamoto #define __get_user_size_ex(x, ptr, size) \ 404fe40c0afSHiroshi Shimamoto do { \ 405fe40c0afSHiroshi Shimamoto __chk_user_ptr(ptr); \ 406fe40c0afSHiroshi Shimamoto switch (size) { \ 407fe40c0afSHiroshi Shimamoto case 1: \ 408fe40c0afSHiroshi Shimamoto __get_user_asm_ex(x, ptr, "b", "b", "=q"); \ 409fe40c0afSHiroshi Shimamoto break; \ 410fe40c0afSHiroshi Shimamoto case 2: \ 411fe40c0afSHiroshi Shimamoto __get_user_asm_ex(x, ptr, "w", "w", "=r"); \ 412fe40c0afSHiroshi Shimamoto break; \ 413fe40c0afSHiroshi Shimamoto case 4: \ 414fe40c0afSHiroshi Shimamoto __get_user_asm_ex(x, ptr, "l", "k", "=r"); \ 415fe40c0afSHiroshi Shimamoto break; \ 416fe40c0afSHiroshi Shimamoto case 8: \ 417fe40c0afSHiroshi Shimamoto __get_user_asm_ex_u64(x, ptr); \ 418fe40c0afSHiroshi Shimamoto break; \ 419fe40c0afSHiroshi Shimamoto default: \ 420fe40c0afSHiroshi Shimamoto (x) = __get_user_bad(); \ 421fe40c0afSHiroshi Shimamoto } \ 422fe40c0afSHiroshi Shimamoto } while (0) 423fe40c0afSHiroshi Shimamoto 424fe40c0afSHiroshi Shimamoto #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \ 4255e88353dSH. Peter Anvin asm volatile("1: mov"itype" %1,%"rtype"0\n" \ 4265e88353dSH. Peter Anvin "2:\n" \ 4271c109fabSAl Viro ".section .fixup,\"ax\"\n" \ 4281c109fabSAl Viro "3:xor"itype" %"rtype"0,%"rtype"0\n" \ 4291c109fabSAl Viro " jmp 2b\n" \ 4301c109fabSAl Viro ".previous\n" \ 4311c109fabSAl Viro _ASM_EXTABLE_EX(1b, 3b) \ 432fe40c0afSHiroshi Shimamoto : ltype(x) : "m" (__m(addr))) 433fe40c0afSHiroshi Shimamoto 434bb898558SAl Viro #define __put_user_nocheck(x, ptr, size) \ 435bb898558SAl Viro ({ \ 43616855f87SHiroshi Shimamoto int __pu_err; \ 43711f1a4b9SLinus Torvalds __uaccess_begin(); \ 438bb898558SAl Viro __put_user_size((x), (ptr), (size), __pu_err, -EFAULT); \ 43911f1a4b9SLinus Torvalds __uaccess_end(); \ 440a76cf66eSAndy Lutomirski __builtin_expect(__pu_err, 0); \ 441bb898558SAl Viro }) 442bb898558SAl Viro 443bb898558SAl Viro #define __get_user_nocheck(x, ptr, size) \ 444bb898558SAl Viro ({ \ 44516855f87SHiroshi Shimamoto int __gu_err; \ 446b2f68038SBenjamin LaHaise __inttype(*(ptr)) __gu_val; \ 44711f1a4b9SLinus Torvalds __uaccess_begin(); \ 448bb898558SAl Viro __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \ 44911f1a4b9SLinus Torvalds __uaccess_end(); \ 450bb898558SAl Viro (x) = (__force __typeof__(*(ptr)))__gu_val; \ 451a76cf66eSAndy Lutomirski __builtin_expect(__gu_err, 0); \ 452bb898558SAl Viro }) 453bb898558SAl Viro 454bb898558SAl Viro /* FIXME: this hack is definitely wrong -AK */ 455bb898558SAl Viro struct __large_struct { unsigned long buf[100]; }; 456bb898558SAl Viro #define __m(x) (*(struct __large_struct __user *)(x)) 457bb898558SAl Viro 458bb898558SAl Viro /* 459bb898558SAl Viro * Tell gcc we read from memory instead of writing: this is because 460bb898558SAl Viro * we do not write to any memory gcc knows about, so there are no 461bb898558SAl Viro * aliasing issues. 462bb898558SAl Viro */ 463bb898558SAl Viro #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \ 46411f1a4b9SLinus Torvalds asm volatile("\n" \ 46563bcff2aSH. Peter Anvin "1: mov"itype" %"rtype"1,%2\n" \ 46611f1a4b9SLinus Torvalds "2:\n" \ 467bb898558SAl Viro ".section .fixup,\"ax\"\n" \ 468bb898558SAl Viro "3: mov %3,%0\n" \ 469bb898558SAl Viro " jmp 2b\n" \ 470bb898558SAl Viro ".previous\n" \ 471bb898558SAl Viro _ASM_EXTABLE(1b, 3b) \ 472bb898558SAl Viro : "=r"(err) \ 473bb898558SAl Viro : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err)) 474fe40c0afSHiroshi Shimamoto 475fe40c0afSHiroshi Shimamoto #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \ 4765e88353dSH. Peter Anvin asm volatile("1: mov"itype" %"rtype"0,%1\n" \ 4775e88353dSH. Peter Anvin "2:\n" \ 478535c0c34SH. Peter Anvin _ASM_EXTABLE_EX(1b, 2b) \ 479fe40c0afSHiroshi Shimamoto : : ltype(x), "m" (__m(addr))) 480fe40c0afSHiroshi Shimamoto 481fe40c0afSHiroshi Shimamoto /* 482fe40c0afSHiroshi Shimamoto * uaccess_try and catch 483fe40c0afSHiroshi Shimamoto */ 484fe40c0afSHiroshi Shimamoto #define uaccess_try do { \ 485dfa9a942SAndy Lutomirski current->thread.uaccess_err = 0; \ 48611f1a4b9SLinus Torvalds __uaccess_begin(); \ 487fe40c0afSHiroshi Shimamoto barrier(); 488fe40c0afSHiroshi Shimamoto 489fe40c0afSHiroshi Shimamoto #define uaccess_catch(err) \ 49011f1a4b9SLinus Torvalds __uaccess_end(); \ 491dfa9a942SAndy Lutomirski (err) |= (current->thread.uaccess_err ? -EFAULT : 0); \ 492fe40c0afSHiroshi Shimamoto } while (0) 493fe40c0afSHiroshi Shimamoto 494bb898558SAl Viro /** 495bb898558SAl Viro * __get_user: - Get a simple variable from user space, with less checking. 496bb898558SAl Viro * @x: Variable to store result. 497bb898558SAl Viro * @ptr: Source address, in user space. 498bb898558SAl Viro * 499b3c395efSDavid Hildenbrand * Context: User context only. This function may sleep if pagefaults are 500b3c395efSDavid Hildenbrand * enabled. 501bb898558SAl Viro * 502bb898558SAl Viro * This macro copies a single simple variable from user space to kernel 503bb898558SAl Viro * space. It supports simple types like char and int, but not larger 504bb898558SAl Viro * data types like structures or arrays. 505bb898558SAl Viro * 506bb898558SAl Viro * @ptr must have pointer-to-simple-variable type, and the result of 507bb898558SAl Viro * dereferencing @ptr must be assignable to @x without a cast. 508bb898558SAl Viro * 509bb898558SAl Viro * Caller must check the pointer with access_ok() before calling this 510bb898558SAl Viro * function. 511bb898558SAl Viro * 512bb898558SAl Viro * Returns zero on success, or -EFAULT on error. 513bb898558SAl Viro * On error, the variable @x is set to zero. 514bb898558SAl Viro */ 515bb898558SAl Viro 516bb898558SAl Viro #define __get_user(x, ptr) \ 517bb898558SAl Viro __get_user_nocheck((x), (ptr), sizeof(*(ptr))) 518fe40c0afSHiroshi Shimamoto 519bb898558SAl Viro /** 520bb898558SAl Viro * __put_user: - Write a simple value into user space, with less checking. 521bb898558SAl Viro * @x: Value to copy to user space. 522bb898558SAl Viro * @ptr: Destination address, in user space. 523bb898558SAl Viro * 524b3c395efSDavid Hildenbrand * Context: User context only. This function may sleep if pagefaults are 525b3c395efSDavid Hildenbrand * enabled. 526bb898558SAl Viro * 527bb898558SAl Viro * This macro copies a single simple value from kernel space to user 528bb898558SAl Viro * space. It supports simple types like char and int, but not larger 529bb898558SAl Viro * data types like structures or arrays. 530bb898558SAl Viro * 531bb898558SAl Viro * @ptr must have pointer-to-simple-variable type, and @x must be assignable 532bb898558SAl Viro * to the result of dereferencing @ptr. 533bb898558SAl Viro * 534bb898558SAl Viro * Caller must check the pointer with access_ok() before calling this 535bb898558SAl Viro * function. 536bb898558SAl Viro * 537bb898558SAl Viro * Returns zero on success, or -EFAULT on error. 538bb898558SAl Viro */ 539bb898558SAl Viro 540bb898558SAl Viro #define __put_user(x, ptr) \ 541bb898558SAl Viro __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) 542bb898558SAl Viro 543bb898558SAl Viro #define __get_user_unaligned __get_user 544bb898558SAl Viro #define __put_user_unaligned __put_user 545bb898558SAl Viro 546bb898558SAl Viro /* 547fe40c0afSHiroshi Shimamoto * {get|put}_user_try and catch 548fe40c0afSHiroshi Shimamoto * 549fe40c0afSHiroshi Shimamoto * get_user_try { 550fe40c0afSHiroshi Shimamoto * get_user_ex(...); 551fe40c0afSHiroshi Shimamoto * } get_user_catch(err) 552fe40c0afSHiroshi Shimamoto */ 553fe40c0afSHiroshi Shimamoto #define get_user_try uaccess_try 554fe40c0afSHiroshi Shimamoto #define get_user_catch(err) uaccess_catch(err) 555fe40c0afSHiroshi Shimamoto 556fe40c0afSHiroshi Shimamoto #define get_user_ex(x, ptr) do { \ 557fe40c0afSHiroshi Shimamoto unsigned long __gue_val; \ 558fe40c0afSHiroshi Shimamoto __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \ 559fe40c0afSHiroshi Shimamoto (x) = (__force __typeof__(*(ptr)))__gue_val; \ 560fe40c0afSHiroshi Shimamoto } while (0) 561fe40c0afSHiroshi Shimamoto 562019a1369SHiroshi Shimamoto #define put_user_try uaccess_try 563019a1369SHiroshi Shimamoto #define put_user_catch(err) uaccess_catch(err) 564019a1369SHiroshi Shimamoto 565fe40c0afSHiroshi Shimamoto #define put_user_ex(x, ptr) \ 566fe40c0afSHiroshi Shimamoto __put_user_size_ex((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) 567fe40c0afSHiroshi Shimamoto 5681ac2e6caSRobert Richter extern unsigned long 5691ac2e6caSRobert Richter copy_from_user_nmi(void *to, const void __user *from, unsigned long n); 57092ae03f2SLinus Torvalds extern __must_check long 57192ae03f2SLinus Torvalds strncpy_from_user(char *dst, const char __user *src, long count); 5721ac2e6caSRobert Richter 5735723aa99SLinus Torvalds extern __must_check long strlen_user(const char __user *str); 5745723aa99SLinus Torvalds extern __must_check long strnlen_user(const char __user *str, long n); 5755723aa99SLinus Torvalds 576a052858fSH. Peter Anvin unsigned long __must_check clear_user(void __user *mem, unsigned long len); 577a052858fSH. Peter Anvin unsigned long __must_check __clear_user(void __user *mem, unsigned long len); 578a052858fSH. Peter Anvin 579f09174c5SQiaowei Ren extern void __cmpxchg_wrong_size(void) 580f09174c5SQiaowei Ren __compiletime_error("Bad argument size for cmpxchg"); 581f09174c5SQiaowei Ren 582f09174c5SQiaowei Ren #define __user_atomic_cmpxchg_inatomic(uval, ptr, old, new, size) \ 583f09174c5SQiaowei Ren ({ \ 584f09174c5SQiaowei Ren int __ret = 0; \ 585f09174c5SQiaowei Ren __typeof__(ptr) __uval = (uval); \ 586f09174c5SQiaowei Ren __typeof__(*(ptr)) __old = (old); \ 587f09174c5SQiaowei Ren __typeof__(*(ptr)) __new = (new); \ 58811f1a4b9SLinus Torvalds __uaccess_begin(); \ 589f09174c5SQiaowei Ren switch (size) { \ 590f09174c5SQiaowei Ren case 1: \ 591f09174c5SQiaowei Ren { \ 59211f1a4b9SLinus Torvalds asm volatile("\n" \ 593f09174c5SQiaowei Ren "1:\t" LOCK_PREFIX "cmpxchgb %4, %2\n" \ 59411f1a4b9SLinus Torvalds "2:\n" \ 595f09174c5SQiaowei Ren "\t.section .fixup, \"ax\"\n" \ 596f09174c5SQiaowei Ren "3:\tmov %3, %0\n" \ 597f09174c5SQiaowei Ren "\tjmp 2b\n" \ 598f09174c5SQiaowei Ren "\t.previous\n" \ 599f09174c5SQiaowei Ren _ASM_EXTABLE(1b, 3b) \ 600f09174c5SQiaowei Ren : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \ 601f09174c5SQiaowei Ren : "i" (-EFAULT), "q" (__new), "1" (__old) \ 602f09174c5SQiaowei Ren : "memory" \ 603f09174c5SQiaowei Ren ); \ 604f09174c5SQiaowei Ren break; \ 605f09174c5SQiaowei Ren } \ 606f09174c5SQiaowei Ren case 2: \ 607f09174c5SQiaowei Ren { \ 60811f1a4b9SLinus Torvalds asm volatile("\n" \ 609f09174c5SQiaowei Ren "1:\t" LOCK_PREFIX "cmpxchgw %4, %2\n" \ 61011f1a4b9SLinus Torvalds "2:\n" \ 611f09174c5SQiaowei Ren "\t.section .fixup, \"ax\"\n" \ 612f09174c5SQiaowei Ren "3:\tmov %3, %0\n" \ 613f09174c5SQiaowei Ren "\tjmp 2b\n" \ 614f09174c5SQiaowei Ren "\t.previous\n" \ 615f09174c5SQiaowei Ren _ASM_EXTABLE(1b, 3b) \ 616f09174c5SQiaowei Ren : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \ 617f09174c5SQiaowei Ren : "i" (-EFAULT), "r" (__new), "1" (__old) \ 618f09174c5SQiaowei Ren : "memory" \ 619f09174c5SQiaowei Ren ); \ 620f09174c5SQiaowei Ren break; \ 621f09174c5SQiaowei Ren } \ 622f09174c5SQiaowei Ren case 4: \ 623f09174c5SQiaowei Ren { \ 62411f1a4b9SLinus Torvalds asm volatile("\n" \ 625f09174c5SQiaowei Ren "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n" \ 62611f1a4b9SLinus Torvalds "2:\n" \ 627f09174c5SQiaowei Ren "\t.section .fixup, \"ax\"\n" \ 628f09174c5SQiaowei Ren "3:\tmov %3, %0\n" \ 629f09174c5SQiaowei Ren "\tjmp 2b\n" \ 630f09174c5SQiaowei Ren "\t.previous\n" \ 631f09174c5SQiaowei Ren _ASM_EXTABLE(1b, 3b) \ 632f09174c5SQiaowei Ren : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \ 633f09174c5SQiaowei Ren : "i" (-EFAULT), "r" (__new), "1" (__old) \ 634f09174c5SQiaowei Ren : "memory" \ 635f09174c5SQiaowei Ren ); \ 636f09174c5SQiaowei Ren break; \ 637f09174c5SQiaowei Ren } \ 638f09174c5SQiaowei Ren case 8: \ 639f09174c5SQiaowei Ren { \ 640f09174c5SQiaowei Ren if (!IS_ENABLED(CONFIG_X86_64)) \ 641f09174c5SQiaowei Ren __cmpxchg_wrong_size(); \ 642f09174c5SQiaowei Ren \ 64311f1a4b9SLinus Torvalds asm volatile("\n" \ 644f09174c5SQiaowei Ren "1:\t" LOCK_PREFIX "cmpxchgq %4, %2\n" \ 64511f1a4b9SLinus Torvalds "2:\n" \ 646f09174c5SQiaowei Ren "\t.section .fixup, \"ax\"\n" \ 647f09174c5SQiaowei Ren "3:\tmov %3, %0\n" \ 648f09174c5SQiaowei Ren "\tjmp 2b\n" \ 649f09174c5SQiaowei Ren "\t.previous\n" \ 650f09174c5SQiaowei Ren _ASM_EXTABLE(1b, 3b) \ 651f09174c5SQiaowei Ren : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \ 652f09174c5SQiaowei Ren : "i" (-EFAULT), "r" (__new), "1" (__old) \ 653f09174c5SQiaowei Ren : "memory" \ 654f09174c5SQiaowei Ren ); \ 655f09174c5SQiaowei Ren break; \ 656f09174c5SQiaowei Ren } \ 657f09174c5SQiaowei Ren default: \ 658f09174c5SQiaowei Ren __cmpxchg_wrong_size(); \ 659f09174c5SQiaowei Ren } \ 66011f1a4b9SLinus Torvalds __uaccess_end(); \ 661f09174c5SQiaowei Ren *__uval = __old; \ 662f09174c5SQiaowei Ren __ret; \ 663f09174c5SQiaowei Ren }) 664f09174c5SQiaowei Ren 665f09174c5SQiaowei Ren #define user_atomic_cmpxchg_inatomic(uval, ptr, old, new) \ 666f09174c5SQiaowei Ren ({ \ 667f09174c5SQiaowei Ren access_ok(VERIFY_WRITE, (ptr), sizeof(*(ptr))) ? \ 668f09174c5SQiaowei Ren __user_atomic_cmpxchg_inatomic((uval), (ptr), \ 669f09174c5SQiaowei Ren (old), (new), sizeof(*(ptr))) : \ 670f09174c5SQiaowei Ren -EFAULT; \ 671f09174c5SQiaowei Ren }) 672f09174c5SQiaowei Ren 673fe40c0afSHiroshi Shimamoto /* 674bb898558SAl Viro * movsl can be slow when source and dest are not both 8-byte aligned 675bb898558SAl Viro */ 676bb898558SAl Viro #ifdef CONFIG_X86_INTEL_USERCOPY 677bb898558SAl Viro extern struct movsl_mask { 678bb898558SAl Viro int mask; 679bb898558SAl Viro } ____cacheline_aligned_in_smp movsl_mask; 680bb898558SAl Viro #endif 681bb898558SAl Viro 682bb898558SAl Viro #define ARCH_HAS_NOCACHE_UACCESS 1 683bb898558SAl Viro 684bb898558SAl Viro #ifdef CONFIG_X86_32 685a1ce3928SDavid Howells # include <asm/uaccess_32.h> 686bb898558SAl Viro #else 687a1ce3928SDavid Howells # include <asm/uaccess_64.h> 688bb898558SAl Viro #endif 689bb898558SAl Viro 69010013ebbSAndi Kleen /* 69110013ebbSAndi Kleen * We rely on the nested NMI work to allow atomic faults from the NMI path; the 69210013ebbSAndi Kleen * nested NMI paths are careful to preserve CR2. 69310013ebbSAndi Kleen * 69410013ebbSAndi Kleen * Caller must use pagefault_enable/disable, or run in interrupt context, 69510013ebbSAndi Kleen * and also do a uaccess_ok() check 69610013ebbSAndi Kleen */ 69710013ebbSAndi Kleen #define __copy_from_user_nmi __copy_from_user_inatomic 69810013ebbSAndi Kleen 699404a4741SLinus Torvalds /* 7005b24a7a2SLinus Torvalds * The "unsafe" user accesses aren't really "unsafe", but the naming 7015b24a7a2SLinus Torvalds * is a big fat warning: you have to not only do the access_ok() 7025b24a7a2SLinus Torvalds * checking before using them, but you have to surround them with the 7035b24a7a2SLinus Torvalds * user_access_begin/end() pair. 7045b24a7a2SLinus Torvalds */ 7055b24a7a2SLinus Torvalds #define user_access_begin() __uaccess_begin() 7065b24a7a2SLinus Torvalds #define user_access_end() __uaccess_end() 7075b24a7a2SLinus Torvalds 7081bd4403dSLinus Torvalds #define unsafe_put_user(x, ptr, err_label) \ 7091bd4403dSLinus Torvalds do { \ 7105b24a7a2SLinus Torvalds int __pu_err; \ 711334a023eSLinus Torvalds __typeof__(*(ptr)) __pu_val = (x); \ 712334a023eSLinus Torvalds __put_user_size(__pu_val, (ptr), sizeof(*(ptr)), __pu_err, -EFAULT); \ 7131bd4403dSLinus Torvalds if (unlikely(__pu_err)) goto err_label; \ 7141bd4403dSLinus Torvalds } while (0) 7155b24a7a2SLinus Torvalds 7161bd4403dSLinus Torvalds #define unsafe_get_user(x, ptr, err_label) \ 7171bd4403dSLinus Torvalds do { \ 7185b24a7a2SLinus Torvalds int __gu_err; \ 719334a023eSLinus Torvalds __inttype(*(ptr)) __gu_val; \ 7205b24a7a2SLinus Torvalds __get_user_size(__gu_val, (ptr), sizeof(*(ptr)), __gu_err, -EFAULT); \ 7215b24a7a2SLinus Torvalds (x) = (__force __typeof__(*(ptr)))__gu_val; \ 7221bd4403dSLinus Torvalds if (unlikely(__gu_err)) goto err_label; \ 7231bd4403dSLinus Torvalds } while (0) 7245b24a7a2SLinus Torvalds 7251965aae3SH. Peter Anvin #endif /* _ASM_X86_UACCESS_H */ 726bb898558SAl Viro 727