1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _ASM_X86_UACCESS_H 3 #define _ASM_X86_UACCESS_H 4 /* 5 * User space memory access functions 6 */ 7 #include <linux/compiler.h> 8 #include <linux/kasan-checks.h> 9 #include <linux/string.h> 10 #include <asm/asm.h> 11 #include <asm/page.h> 12 #include <asm/smap.h> 13 #include <asm/extable.h> 14 15 /* 16 * The fs value determines whether argument validity checking should be 17 * performed or not. If get_fs() == USER_DS, checking is performed, with 18 * get_fs() == KERNEL_DS, checking is bypassed. 19 * 20 * For historical reasons, these macros are grossly misnamed. 21 */ 22 23 #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) }) 24 25 #define KERNEL_DS MAKE_MM_SEG(-1UL) 26 #define USER_DS MAKE_MM_SEG(TASK_SIZE_MAX) 27 28 #define get_fs() (current->thread.addr_limit) 29 static inline void set_fs(mm_segment_t fs) 30 { 31 current->thread.addr_limit = fs; 32 /* On user-mode return, check fs is correct */ 33 set_thread_flag(TIF_FSCHECK); 34 } 35 36 #define segment_eq(a, b) ((a).seg == (b).seg) 37 #define user_addr_max() (current->thread.addr_limit.seg) 38 39 /* 40 * Test whether a block of memory is a valid user space address. 41 * Returns 0 if the range is valid, nonzero otherwise. 42 */ 43 static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, unsigned long limit) 44 { 45 /* 46 * If we have used "sizeof()" for the size, 47 * we know it won't overflow the limit (but 48 * it might overflow the 'addr', so it's 49 * important to subtract the size from the 50 * limit, not add it to the address). 51 */ 52 if (__builtin_constant_p(size)) 53 return unlikely(addr > limit - size); 54 55 /* Arbitrary sizes? Be careful about overflow */ 56 addr += size; 57 if (unlikely(addr < size)) 58 return true; 59 return unlikely(addr > limit); 60 } 61 62 #define __range_not_ok(addr, size, limit) \ 63 ({ \ 64 __chk_user_ptr(addr); \ 65 __chk_range_not_ok((unsigned long __force)(addr), size, limit); \ 66 }) 67 68 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP 69 static inline bool pagefault_disabled(void); 70 # define WARN_ON_IN_IRQ() \ 71 WARN_ON_ONCE(!in_task() && !pagefault_disabled()) 72 #else 73 # define WARN_ON_IN_IRQ() 74 #endif 75 76 /** 77 * access_ok - Checks if a user space pointer is valid 78 * @addr: User space pointer to start of block to check 79 * @size: Size of block to check 80 * 81 * Context: User context only. This function may sleep if pagefaults are 82 * enabled. 83 * 84 * Checks if a pointer to a block of memory in user space is valid. 85 * 86 * Note that, depending on architecture, this function probably just 87 * checks that the pointer is in the user space range - after calling 88 * this function, memory access functions may still return -EFAULT. 89 * 90 * Return: true (nonzero) if the memory block may be valid, false (zero) 91 * if it is definitely invalid. 92 */ 93 #define access_ok(addr, size) \ 94 ({ \ 95 WARN_ON_IN_IRQ(); \ 96 likely(!__range_not_ok(addr, size, user_addr_max())); \ 97 }) 98 99 /* 100 * These are the main single-value transfer routines. They automatically 101 * use the right size if we just have the right pointer type. 102 * 103 * This gets kind of ugly. We want to return _two_ values in "get_user()" 104 * and yet we don't want to do any pointers, because that is too much 105 * of a performance impact. Thus we have a few rather ugly macros here, 106 * and hide all the ugliness from the user. 107 * 108 * The "__xxx" versions of the user access functions are versions that 109 * do not verify the address space, that must have been done previously 110 * with a separate "access_ok()" call (this is used when we do multiple 111 * accesses to the same area of user memory). 112 */ 113 114 extern int __get_user_1(void); 115 extern int __get_user_2(void); 116 extern int __get_user_4(void); 117 extern int __get_user_8(void); 118 extern int __get_user_bad(void); 119 120 #define __uaccess_begin() stac() 121 #define __uaccess_end() clac() 122 #define __uaccess_begin_nospec() \ 123 ({ \ 124 stac(); \ 125 barrier_nospec(); \ 126 }) 127 128 /* 129 * This is a type: either unsigned long, if the argument fits into 130 * that type, or otherwise unsigned long long. 131 */ 132 #define __inttype(x) \ 133 __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL)) 134 135 /** 136 * get_user - Get a simple variable from user space. 137 * @x: Variable to store result. 138 * @ptr: Source address, in user space. 139 * 140 * Context: User context only. This function may sleep if pagefaults are 141 * enabled. 142 * 143 * This macro copies a single simple variable from user space to kernel 144 * space. It supports simple types like char and int, but not larger 145 * data types like structures or arrays. 146 * 147 * @ptr must have pointer-to-simple-variable type, and the result of 148 * dereferencing @ptr must be assignable to @x without a cast. 149 * 150 * Return: zero on success, or -EFAULT on error. 151 * On error, the variable @x is set to zero. 152 */ 153 /* 154 * Careful: we have to cast the result to the type of the pointer 155 * for sign reasons. 156 * 157 * The use of _ASM_DX as the register specifier is a bit of a 158 * simplification, as gcc only cares about it as the starting point 159 * and not size: for a 64-bit value it will use %ecx:%edx on 32 bits 160 * (%ecx being the next register in gcc's x86 register sequence), and 161 * %rdx on 64 bits. 162 * 163 * Clang/LLVM cares about the size of the register, but still wants 164 * the base register for something that ends up being a pair. 165 */ 166 #define get_user(x, ptr) \ 167 ({ \ 168 int __ret_gu; \ 169 register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX); \ 170 __chk_user_ptr(ptr); \ 171 might_fault(); \ 172 asm volatile("call __get_user_%P4" \ 173 : "=a" (__ret_gu), "=r" (__val_gu), \ 174 ASM_CALL_CONSTRAINT \ 175 : "0" (ptr), "i" (sizeof(*(ptr)))); \ 176 (x) = (__force __typeof__(*(ptr))) __val_gu; \ 177 __builtin_expect(__ret_gu, 0); \ 178 }) 179 180 #define __put_user_x(size, x, ptr, __ret_pu) \ 181 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \ 182 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx") 183 184 185 186 #ifdef CONFIG_X86_32 187 #define __put_user_goto_u64(x, addr, label) \ 188 asm_volatile_goto("\n" \ 189 "1: movl %%eax,0(%1)\n" \ 190 "2: movl %%edx,4(%1)\n" \ 191 _ASM_EXTABLE_UA(1b, %l2) \ 192 _ASM_EXTABLE_UA(2b, %l2) \ 193 : : "A" (x), "r" (addr) \ 194 : : label) 195 196 #define __put_user_x8(x, ptr, __ret_pu) \ 197 asm volatile("call __put_user_8" : "=a" (__ret_pu) \ 198 : "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx") 199 #else 200 #define __put_user_goto_u64(x, ptr, label) \ 201 __put_user_goto(x, ptr, "q", "", "er", label) 202 #define __put_user_x8(x, ptr, __ret_pu) __put_user_x(8, x, ptr, __ret_pu) 203 #endif 204 205 extern void __put_user_bad(void); 206 207 /* 208 * Strange magic calling convention: pointer in %ecx, 209 * value in %eax(:%edx), return value in %eax. clobbers %rbx 210 */ 211 extern void __put_user_1(void); 212 extern void __put_user_2(void); 213 extern void __put_user_4(void); 214 extern void __put_user_8(void); 215 216 /** 217 * put_user - Write a simple value into user space. 218 * @x: Value to copy to user space. 219 * @ptr: Destination address, in user space. 220 * 221 * Context: User context only. This function may sleep if pagefaults are 222 * enabled. 223 * 224 * This macro copies a single simple value from kernel space to user 225 * space. It supports simple types like char and int, but not larger 226 * data types like structures or arrays. 227 * 228 * @ptr must have pointer-to-simple-variable type, and @x must be assignable 229 * to the result of dereferencing @ptr. 230 * 231 * Return: zero on success, or -EFAULT on error. 232 */ 233 #define put_user(x, ptr) \ 234 ({ \ 235 int __ret_pu; \ 236 __typeof__(*(ptr)) __pu_val; \ 237 __chk_user_ptr(ptr); \ 238 might_fault(); \ 239 __pu_val = x; \ 240 switch (sizeof(*(ptr))) { \ 241 case 1: \ 242 __put_user_x(1, __pu_val, ptr, __ret_pu); \ 243 break; \ 244 case 2: \ 245 __put_user_x(2, __pu_val, ptr, __ret_pu); \ 246 break; \ 247 case 4: \ 248 __put_user_x(4, __pu_val, ptr, __ret_pu); \ 249 break; \ 250 case 8: \ 251 __put_user_x8(__pu_val, ptr, __ret_pu); \ 252 break; \ 253 default: \ 254 __put_user_x(X, __pu_val, ptr, __ret_pu); \ 255 break; \ 256 } \ 257 __builtin_expect(__ret_pu, 0); \ 258 }) 259 260 #define __put_user_size(x, ptr, size, label) \ 261 do { \ 262 __chk_user_ptr(ptr); \ 263 switch (size) { \ 264 case 1: \ 265 __put_user_goto(x, ptr, "b", "b", "iq", label); \ 266 break; \ 267 case 2: \ 268 __put_user_goto(x, ptr, "w", "w", "ir", label); \ 269 break; \ 270 case 4: \ 271 __put_user_goto(x, ptr, "l", "k", "ir", label); \ 272 break; \ 273 case 8: \ 274 __put_user_goto_u64(x, ptr, label); \ 275 break; \ 276 default: \ 277 __put_user_bad(); \ 278 } \ 279 } while (0) 280 281 #ifdef CONFIG_X86_32 282 #define __get_user_asm_u64(x, ptr, retval) \ 283 ({ \ 284 __typeof__(ptr) __ptr = (ptr); \ 285 asm volatile("\n" \ 286 "1: movl %2,%%eax\n" \ 287 "2: movl %3,%%edx\n" \ 288 "3:\n" \ 289 ".section .fixup,\"ax\"\n" \ 290 "4: mov %4,%0\n" \ 291 " xorl %%eax,%%eax\n" \ 292 " xorl %%edx,%%edx\n" \ 293 " jmp 3b\n" \ 294 ".previous\n" \ 295 _ASM_EXTABLE_UA(1b, 4b) \ 296 _ASM_EXTABLE_UA(2b, 4b) \ 297 : "=r" (retval), "=&A"(x) \ 298 : "m" (__m(__ptr)), "m" __m(((u32 __user *)(__ptr)) + 1), \ 299 "i" (-EFAULT), "0" (retval)); \ 300 }) 301 302 #else 303 #define __get_user_asm_u64(x, ptr, retval) \ 304 __get_user_asm(x, ptr, retval, "q", "", "=r") 305 #endif 306 307 #define __get_user_size(x, ptr, size, retval) \ 308 do { \ 309 retval = 0; \ 310 __chk_user_ptr(ptr); \ 311 switch (size) { \ 312 case 1: \ 313 __get_user_asm(x, ptr, retval, "b", "b", "=q"); \ 314 break; \ 315 case 2: \ 316 __get_user_asm(x, ptr, retval, "w", "w", "=r"); \ 317 break; \ 318 case 4: \ 319 __get_user_asm(x, ptr, retval, "l", "k", "=r"); \ 320 break; \ 321 case 8: \ 322 __get_user_asm_u64(x, ptr, retval); \ 323 break; \ 324 default: \ 325 (x) = __get_user_bad(); \ 326 } \ 327 } while (0) 328 329 #define __get_user_asm(x, addr, err, itype, rtype, ltype) \ 330 asm volatile("\n" \ 331 "1: mov"itype" %2,%"rtype"1\n" \ 332 "2:\n" \ 333 ".section .fixup,\"ax\"\n" \ 334 "3: mov %3,%0\n" \ 335 " xor"itype" %"rtype"1,%"rtype"1\n" \ 336 " jmp 2b\n" \ 337 ".previous\n" \ 338 _ASM_EXTABLE_UA(1b, 3b) \ 339 : "=r" (err), ltype(x) \ 340 : "m" (__m(addr)), "i" (-EFAULT), "0" (err)) 341 342 #define __put_user_nocheck(x, ptr, size) \ 343 ({ \ 344 __label__ __pu_label; \ 345 int __pu_err = -EFAULT; \ 346 __typeof__(*(ptr)) __pu_val = (x); \ 347 __typeof__(ptr) __pu_ptr = (ptr); \ 348 __typeof__(size) __pu_size = (size); \ 349 __uaccess_begin(); \ 350 __put_user_size(__pu_val, __pu_ptr, __pu_size, __pu_label); \ 351 __pu_err = 0; \ 352 __pu_label: \ 353 __uaccess_end(); \ 354 __builtin_expect(__pu_err, 0); \ 355 }) 356 357 #define __get_user_nocheck(x, ptr, size) \ 358 ({ \ 359 int __gu_err; \ 360 __inttype(*(ptr)) __gu_val; \ 361 __typeof__(ptr) __gu_ptr = (ptr); \ 362 __typeof__(size) __gu_size = (size); \ 363 __uaccess_begin_nospec(); \ 364 __get_user_size(__gu_val, __gu_ptr, __gu_size, __gu_err); \ 365 __uaccess_end(); \ 366 (x) = (__force __typeof__(*(ptr)))__gu_val; \ 367 __builtin_expect(__gu_err, 0); \ 368 }) 369 370 /* FIXME: this hack is definitely wrong -AK */ 371 struct __large_struct { unsigned long buf[100]; }; 372 #define __m(x) (*(struct __large_struct __user *)(x)) 373 374 /* 375 * Tell gcc we read from memory instead of writing: this is because 376 * we do not write to any memory gcc knows about, so there are no 377 * aliasing issues. 378 */ 379 #define __put_user_goto(x, addr, itype, rtype, ltype, label) \ 380 asm_volatile_goto("\n" \ 381 "1: mov"itype" %"rtype"0,%1\n" \ 382 _ASM_EXTABLE_UA(1b, %l2) \ 383 : : ltype(x), "m" (__m(addr)) \ 384 : : label) 385 386 /** 387 * __get_user - Get a simple variable from user space, with less checking. 388 * @x: Variable to store result. 389 * @ptr: Source address, in user space. 390 * 391 * Context: User context only. This function may sleep if pagefaults are 392 * enabled. 393 * 394 * This macro copies a single simple variable from user space to kernel 395 * space. It supports simple types like char and int, but not larger 396 * data types like structures or arrays. 397 * 398 * @ptr must have pointer-to-simple-variable type, and the result of 399 * dereferencing @ptr must be assignable to @x without a cast. 400 * 401 * Caller must check the pointer with access_ok() before calling this 402 * function. 403 * 404 * Return: zero on success, or -EFAULT on error. 405 * On error, the variable @x is set to zero. 406 */ 407 408 #define __get_user(x, ptr) \ 409 __get_user_nocheck((x), (ptr), sizeof(*(ptr))) 410 411 /** 412 * __put_user - Write a simple value into user space, with less checking. 413 * @x: Value to copy to user space. 414 * @ptr: Destination address, in user space. 415 * 416 * Context: User context only. This function may sleep if pagefaults are 417 * enabled. 418 * 419 * This macro copies a single simple value from kernel space to user 420 * space. It supports simple types like char and int, but not larger 421 * data types like structures or arrays. 422 * 423 * @ptr must have pointer-to-simple-variable type, and @x must be assignable 424 * to the result of dereferencing @ptr. 425 * 426 * Caller must check the pointer with access_ok() before calling this 427 * function. 428 * 429 * Return: zero on success, or -EFAULT on error. 430 */ 431 432 #define __put_user(x, ptr) \ 433 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) 434 435 extern unsigned long 436 copy_from_user_nmi(void *to, const void __user *from, unsigned long n); 437 extern __must_check long 438 strncpy_from_user(char *dst, const char __user *src, long count); 439 440 extern __must_check long strnlen_user(const char __user *str, long n); 441 442 unsigned long __must_check clear_user(void __user *mem, unsigned long len); 443 unsigned long __must_check __clear_user(void __user *mem, unsigned long len); 444 445 /* 446 * movsl can be slow when source and dest are not both 8-byte aligned 447 */ 448 #ifdef CONFIG_X86_INTEL_USERCOPY 449 extern struct movsl_mask { 450 int mask; 451 } ____cacheline_aligned_in_smp movsl_mask; 452 #endif 453 454 #define ARCH_HAS_NOCACHE_UACCESS 1 455 456 #ifdef CONFIG_X86_32 457 # include <asm/uaccess_32.h> 458 #else 459 # include <asm/uaccess_64.h> 460 #endif 461 462 /* 463 * The "unsafe" user accesses aren't really "unsafe", but the naming 464 * is a big fat warning: you have to not only do the access_ok() 465 * checking before using them, but you have to surround them with the 466 * user_access_begin/end() pair. 467 */ 468 static __must_check __always_inline bool user_access_begin(const void __user *ptr, size_t len) 469 { 470 if (unlikely(!access_ok(ptr,len))) 471 return 0; 472 __uaccess_begin_nospec(); 473 return 1; 474 } 475 #define user_access_begin(a,b) user_access_begin(a,b) 476 #define user_access_end() __uaccess_end() 477 478 #define user_access_save() smap_save() 479 #define user_access_restore(x) smap_restore(x) 480 481 #define unsafe_put_user(x, ptr, label) \ 482 __put_user_size((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), label) 483 484 #define unsafe_get_user(x, ptr, err_label) \ 485 do { \ 486 int __gu_err; \ 487 __inttype(*(ptr)) __gu_val; \ 488 __get_user_size(__gu_val, (ptr), sizeof(*(ptr)), __gu_err); \ 489 (x) = (__force __typeof__(*(ptr)))__gu_val; \ 490 if (unlikely(__gu_err)) goto err_label; \ 491 } while (0) 492 493 /* 494 * We want the unsafe accessors to always be inlined and use 495 * the error labels - thus the macro games. 496 */ 497 #define unsafe_copy_loop(dst, src, len, type, label) \ 498 while (len >= sizeof(type)) { \ 499 unsafe_put_user(*(type *)src,(type __user *)dst,label); \ 500 dst += sizeof(type); \ 501 src += sizeof(type); \ 502 len -= sizeof(type); \ 503 } 504 505 #define unsafe_copy_to_user(_dst,_src,_len,label) \ 506 do { \ 507 char __user *__ucu_dst = (_dst); \ 508 const char *__ucu_src = (_src); \ 509 size_t __ucu_len = (_len); \ 510 unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u64, label); \ 511 unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u32, label); \ 512 unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u16, label); \ 513 unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u8, label); \ 514 } while (0) 515 516 #endif /* _ASM_X86_UACCESS_H */ 517 518