1 #ifndef _ASM_X86_UACCESS_H 2 #define _ASM_X86_UACCESS_H 3 /* 4 * User space memory access functions 5 */ 6 #include <linux/compiler.h> 7 #include <linux/kasan-checks.h> 8 #include <linux/string.h> 9 #include <asm/asm.h> 10 #include <asm/page.h> 11 #include <asm/smap.h> 12 #include <asm/extable.h> 13 14 /* 15 * The fs value determines whether argument validity checking should be 16 * performed or not. If get_fs() == USER_DS, checking is performed, with 17 * get_fs() == KERNEL_DS, checking is bypassed. 18 * 19 * For historical reasons, these macros are grossly misnamed. 20 */ 21 22 #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) }) 23 24 #define KERNEL_DS MAKE_MM_SEG(-1UL) 25 #define USER_DS MAKE_MM_SEG(TASK_SIZE_MAX) 26 27 #define get_ds() (KERNEL_DS) 28 #define get_fs() (current->thread.addr_limit) 29 #define set_fs(x) (current->thread.addr_limit = (x)) 30 31 #define segment_eq(a, b) ((a).seg == (b).seg) 32 33 #define user_addr_max() (current->thread.addr_limit.seg) 34 #define __addr_ok(addr) \ 35 ((unsigned long __force)(addr) < user_addr_max()) 36 37 /* 38 * Test whether a block of memory is a valid user space address. 39 * Returns 0 if the range is valid, nonzero otherwise. 40 */ 41 static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, unsigned long limit) 42 { 43 /* 44 * If we have used "sizeof()" for the size, 45 * we know it won't overflow the limit (but 46 * it might overflow the 'addr', so it's 47 * important to subtract the size from the 48 * limit, not add it to the address). 49 */ 50 if (__builtin_constant_p(size)) 51 return unlikely(addr > limit - size); 52 53 /* Arbitrary sizes? Be careful about overflow */ 54 addr += size; 55 if (unlikely(addr < size)) 56 return true; 57 return unlikely(addr > limit); 58 } 59 60 #define __range_not_ok(addr, size, limit) \ 61 ({ \ 62 __chk_user_ptr(addr); \ 63 __chk_range_not_ok((unsigned long __force)(addr), size, limit); \ 64 }) 65 66 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP 67 # define WARN_ON_IN_IRQ() WARN_ON_ONCE(!in_task()) 68 #else 69 # define WARN_ON_IN_IRQ() 70 #endif 71 72 /** 73 * access_ok: - Checks if a user space pointer is valid 74 * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE. Note that 75 * %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe 76 * to write to a block, it is always safe to read from it. 77 * @addr: User space pointer to start of block to check 78 * @size: Size of block to check 79 * 80 * Context: User context only. This function may sleep if pagefaults are 81 * enabled. 82 * 83 * Checks if a pointer to a block of memory in user space is valid. 84 * 85 * Returns true (nonzero) if the memory block may be valid, false (zero) 86 * if it is definitely invalid. 87 * 88 * Note that, depending on architecture, this function probably just 89 * checks that the pointer is in the user space range - after calling 90 * this function, memory access functions may still return -EFAULT. 91 */ 92 #define access_ok(type, addr, size) \ 93 ({ \ 94 WARN_ON_IN_IRQ(); \ 95 likely(!__range_not_ok(addr, size, user_addr_max())); \ 96 }) 97 98 /* 99 * These are the main single-value transfer routines. They automatically 100 * use the right size if we just have the right pointer type. 101 * 102 * This gets kind of ugly. We want to return _two_ values in "get_user()" 103 * and yet we don't want to do any pointers, because that is too much 104 * of a performance impact. Thus we have a few rather ugly macros here, 105 * and hide all the ugliness from the user. 106 * 107 * The "__xxx" versions of the user access functions are versions that 108 * do not verify the address space, that must have been done previously 109 * with a separate "access_ok()" call (this is used when we do multiple 110 * accesses to the same area of user memory). 111 */ 112 113 extern int __get_user_1(void); 114 extern int __get_user_2(void); 115 extern int __get_user_4(void); 116 extern int __get_user_8(void); 117 extern int __get_user_bad(void); 118 119 #define __uaccess_begin() stac() 120 #define __uaccess_end() clac() 121 122 /* 123 * This is a type: either unsigned long, if the argument fits into 124 * that type, or otherwise unsigned long long. 125 */ 126 #define __inttype(x) \ 127 __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL)) 128 129 /** 130 * get_user: - Get a simple variable from user space. 131 * @x: Variable to store result. 132 * @ptr: Source address, in user space. 133 * 134 * Context: User context only. This function may sleep if pagefaults are 135 * enabled. 136 * 137 * This macro copies a single simple variable from user space to kernel 138 * space. It supports simple types like char and int, but not larger 139 * data types like structures or arrays. 140 * 141 * @ptr must have pointer-to-simple-variable type, and the result of 142 * dereferencing @ptr must be assignable to @x without a cast. 143 * 144 * Returns zero on success, or -EFAULT on error. 145 * On error, the variable @x is set to zero. 146 */ 147 /* 148 * Careful: we have to cast the result to the type of the pointer 149 * for sign reasons. 150 * 151 * The use of _ASM_DX as the register specifier is a bit of a 152 * simplification, as gcc only cares about it as the starting point 153 * and not size: for a 64-bit value it will use %ecx:%edx on 32 bits 154 * (%ecx being the next register in gcc's x86 register sequence), and 155 * %rdx on 64 bits. 156 * 157 * Clang/LLVM cares about the size of the register, but still wants 158 * the base register for something that ends up being a pair. 159 */ 160 #define get_user(x, ptr) \ 161 ({ \ 162 int __ret_gu; \ 163 register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX); \ 164 register void *__sp asm(_ASM_SP); \ 165 __chk_user_ptr(ptr); \ 166 might_fault(); \ 167 asm volatile("call __get_user_%P4" \ 168 : "=a" (__ret_gu), "=r" (__val_gu), "+r" (__sp) \ 169 : "0" (ptr), "i" (sizeof(*(ptr)))); \ 170 (x) = (__force __typeof__(*(ptr))) __val_gu; \ 171 __builtin_expect(__ret_gu, 0); \ 172 }) 173 174 #define __put_user_x(size, x, ptr, __ret_pu) \ 175 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \ 176 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx") 177 178 179 180 #ifdef CONFIG_X86_32 181 #define __put_user_asm_u64(x, addr, err, errret) \ 182 asm volatile("\n" \ 183 "1: movl %%eax,0(%2)\n" \ 184 "2: movl %%edx,4(%2)\n" \ 185 "3:" \ 186 ".section .fixup,\"ax\"\n" \ 187 "4: movl %3,%0\n" \ 188 " jmp 3b\n" \ 189 ".previous\n" \ 190 _ASM_EXTABLE(1b, 4b) \ 191 _ASM_EXTABLE(2b, 4b) \ 192 : "=r" (err) \ 193 : "A" (x), "r" (addr), "i" (errret), "0" (err)) 194 195 #define __put_user_asm_ex_u64(x, addr) \ 196 asm volatile("\n" \ 197 "1: movl %%eax,0(%1)\n" \ 198 "2: movl %%edx,4(%1)\n" \ 199 "3:" \ 200 _ASM_EXTABLE_EX(1b, 2b) \ 201 _ASM_EXTABLE_EX(2b, 3b) \ 202 : : "A" (x), "r" (addr)) 203 204 #define __put_user_x8(x, ptr, __ret_pu) \ 205 asm volatile("call __put_user_8" : "=a" (__ret_pu) \ 206 : "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx") 207 #else 208 #define __put_user_asm_u64(x, ptr, retval, errret) \ 209 __put_user_asm(x, ptr, retval, "q", "", "er", errret) 210 #define __put_user_asm_ex_u64(x, addr) \ 211 __put_user_asm_ex(x, addr, "q", "", "er") 212 #define __put_user_x8(x, ptr, __ret_pu) __put_user_x(8, x, ptr, __ret_pu) 213 #endif 214 215 extern void __put_user_bad(void); 216 217 /* 218 * Strange magic calling convention: pointer in %ecx, 219 * value in %eax(:%edx), return value in %eax. clobbers %rbx 220 */ 221 extern void __put_user_1(void); 222 extern void __put_user_2(void); 223 extern void __put_user_4(void); 224 extern void __put_user_8(void); 225 226 /** 227 * put_user: - Write a simple value into user space. 228 * @x: Value to copy to user space. 229 * @ptr: Destination address, in user space. 230 * 231 * Context: User context only. This function may sleep if pagefaults are 232 * enabled. 233 * 234 * This macro copies a single simple value from kernel space to user 235 * space. It supports simple types like char and int, but not larger 236 * data types like structures or arrays. 237 * 238 * @ptr must have pointer-to-simple-variable type, and @x must be assignable 239 * to the result of dereferencing @ptr. 240 * 241 * Returns zero on success, or -EFAULT on error. 242 */ 243 #define put_user(x, ptr) \ 244 ({ \ 245 int __ret_pu; \ 246 __typeof__(*(ptr)) __pu_val; \ 247 __chk_user_ptr(ptr); \ 248 might_fault(); \ 249 __pu_val = x; \ 250 switch (sizeof(*(ptr))) { \ 251 case 1: \ 252 __put_user_x(1, __pu_val, ptr, __ret_pu); \ 253 break; \ 254 case 2: \ 255 __put_user_x(2, __pu_val, ptr, __ret_pu); \ 256 break; \ 257 case 4: \ 258 __put_user_x(4, __pu_val, ptr, __ret_pu); \ 259 break; \ 260 case 8: \ 261 __put_user_x8(__pu_val, ptr, __ret_pu); \ 262 break; \ 263 default: \ 264 __put_user_x(X, __pu_val, ptr, __ret_pu); \ 265 break; \ 266 } \ 267 __builtin_expect(__ret_pu, 0); \ 268 }) 269 270 #define __put_user_size(x, ptr, size, retval, errret) \ 271 do { \ 272 retval = 0; \ 273 __chk_user_ptr(ptr); \ 274 switch (size) { \ 275 case 1: \ 276 __put_user_asm(x, ptr, retval, "b", "b", "iq", errret); \ 277 break; \ 278 case 2: \ 279 __put_user_asm(x, ptr, retval, "w", "w", "ir", errret); \ 280 break; \ 281 case 4: \ 282 __put_user_asm(x, ptr, retval, "l", "k", "ir", errret); \ 283 break; \ 284 case 8: \ 285 __put_user_asm_u64((__typeof__(*ptr))(x), ptr, retval, \ 286 errret); \ 287 break; \ 288 default: \ 289 __put_user_bad(); \ 290 } \ 291 } while (0) 292 293 /* 294 * This doesn't do __uaccess_begin/end - the exception handling 295 * around it must do that. 296 */ 297 #define __put_user_size_ex(x, ptr, size) \ 298 do { \ 299 __chk_user_ptr(ptr); \ 300 switch (size) { \ 301 case 1: \ 302 __put_user_asm_ex(x, ptr, "b", "b", "iq"); \ 303 break; \ 304 case 2: \ 305 __put_user_asm_ex(x, ptr, "w", "w", "ir"); \ 306 break; \ 307 case 4: \ 308 __put_user_asm_ex(x, ptr, "l", "k", "ir"); \ 309 break; \ 310 case 8: \ 311 __put_user_asm_ex_u64((__typeof__(*ptr))(x), ptr); \ 312 break; \ 313 default: \ 314 __put_user_bad(); \ 315 } \ 316 } while (0) 317 318 #ifdef CONFIG_X86_32 319 #define __get_user_asm_u64(x, ptr, retval, errret) \ 320 ({ \ 321 __typeof__(ptr) __ptr = (ptr); \ 322 asm volatile(ASM_STAC "\n" \ 323 "1: movl %2,%%eax\n" \ 324 "2: movl %3,%%edx\n" \ 325 "3: " ASM_CLAC "\n" \ 326 ".section .fixup,\"ax\"\n" \ 327 "4: mov %4,%0\n" \ 328 " xorl %%eax,%%eax\n" \ 329 " xorl %%edx,%%edx\n" \ 330 " jmp 3b\n" \ 331 ".previous\n" \ 332 _ASM_EXTABLE(1b, 4b) \ 333 _ASM_EXTABLE(2b, 4b) \ 334 : "=r" (retval), "=A"(x) \ 335 : "m" (__m(__ptr)), "m" __m(((u32 *)(__ptr)) + 1), \ 336 "i" (errret), "0" (retval)); \ 337 }) 338 339 #define __get_user_asm_ex_u64(x, ptr) (x) = __get_user_bad() 340 #else 341 #define __get_user_asm_u64(x, ptr, retval, errret) \ 342 __get_user_asm(x, ptr, retval, "q", "", "=r", errret) 343 #define __get_user_asm_ex_u64(x, ptr) \ 344 __get_user_asm_ex(x, ptr, "q", "", "=r") 345 #endif 346 347 #define __get_user_size(x, ptr, size, retval, errret) \ 348 do { \ 349 retval = 0; \ 350 __chk_user_ptr(ptr); \ 351 switch (size) { \ 352 case 1: \ 353 __get_user_asm(x, ptr, retval, "b", "b", "=q", errret); \ 354 break; \ 355 case 2: \ 356 __get_user_asm(x, ptr, retval, "w", "w", "=r", errret); \ 357 break; \ 358 case 4: \ 359 __get_user_asm(x, ptr, retval, "l", "k", "=r", errret); \ 360 break; \ 361 case 8: \ 362 __get_user_asm_u64(x, ptr, retval, errret); \ 363 break; \ 364 default: \ 365 (x) = __get_user_bad(); \ 366 } \ 367 } while (0) 368 369 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \ 370 asm volatile("\n" \ 371 "1: mov"itype" %2,%"rtype"1\n" \ 372 "2:\n" \ 373 ".section .fixup,\"ax\"\n" \ 374 "3: mov %3,%0\n" \ 375 " xor"itype" %"rtype"1,%"rtype"1\n" \ 376 " jmp 2b\n" \ 377 ".previous\n" \ 378 _ASM_EXTABLE(1b, 3b) \ 379 : "=r" (err), ltype(x) \ 380 : "m" (__m(addr)), "i" (errret), "0" (err)) 381 382 /* 383 * This doesn't do __uaccess_begin/end - the exception handling 384 * around it must do that. 385 */ 386 #define __get_user_size_ex(x, ptr, size) \ 387 do { \ 388 __chk_user_ptr(ptr); \ 389 switch (size) { \ 390 case 1: \ 391 __get_user_asm_ex(x, ptr, "b", "b", "=q"); \ 392 break; \ 393 case 2: \ 394 __get_user_asm_ex(x, ptr, "w", "w", "=r"); \ 395 break; \ 396 case 4: \ 397 __get_user_asm_ex(x, ptr, "l", "k", "=r"); \ 398 break; \ 399 case 8: \ 400 __get_user_asm_ex_u64(x, ptr); \ 401 break; \ 402 default: \ 403 (x) = __get_user_bad(); \ 404 } \ 405 } while (0) 406 407 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \ 408 asm volatile("1: mov"itype" %1,%"rtype"0\n" \ 409 "2:\n" \ 410 ".section .fixup,\"ax\"\n" \ 411 "3:xor"itype" %"rtype"0,%"rtype"0\n" \ 412 " jmp 2b\n" \ 413 ".previous\n" \ 414 _ASM_EXTABLE_EX(1b, 3b) \ 415 : ltype(x) : "m" (__m(addr))) 416 417 #define __put_user_nocheck(x, ptr, size) \ 418 ({ \ 419 int __pu_err; \ 420 __uaccess_begin(); \ 421 __put_user_size((x), (ptr), (size), __pu_err, -EFAULT); \ 422 __uaccess_end(); \ 423 __builtin_expect(__pu_err, 0); \ 424 }) 425 426 #define __get_user_nocheck(x, ptr, size) \ 427 ({ \ 428 int __gu_err; \ 429 __inttype(*(ptr)) __gu_val; \ 430 __uaccess_begin(); \ 431 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \ 432 __uaccess_end(); \ 433 (x) = (__force __typeof__(*(ptr)))__gu_val; \ 434 __builtin_expect(__gu_err, 0); \ 435 }) 436 437 /* FIXME: this hack is definitely wrong -AK */ 438 struct __large_struct { unsigned long buf[100]; }; 439 #define __m(x) (*(struct __large_struct __user *)(x)) 440 441 /* 442 * Tell gcc we read from memory instead of writing: this is because 443 * we do not write to any memory gcc knows about, so there are no 444 * aliasing issues. 445 */ 446 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \ 447 asm volatile("\n" \ 448 "1: mov"itype" %"rtype"1,%2\n" \ 449 "2:\n" \ 450 ".section .fixup,\"ax\"\n" \ 451 "3: mov %3,%0\n" \ 452 " jmp 2b\n" \ 453 ".previous\n" \ 454 _ASM_EXTABLE(1b, 3b) \ 455 : "=r"(err) \ 456 : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err)) 457 458 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \ 459 asm volatile("1: mov"itype" %"rtype"0,%1\n" \ 460 "2:\n" \ 461 _ASM_EXTABLE_EX(1b, 2b) \ 462 : : ltype(x), "m" (__m(addr))) 463 464 /* 465 * uaccess_try and catch 466 */ 467 #define uaccess_try do { \ 468 current->thread.uaccess_err = 0; \ 469 __uaccess_begin(); \ 470 barrier(); 471 472 #define uaccess_catch(err) \ 473 __uaccess_end(); \ 474 (err) |= (current->thread.uaccess_err ? -EFAULT : 0); \ 475 } while (0) 476 477 /** 478 * __get_user: - Get a simple variable from user space, with less checking. 479 * @x: Variable to store result. 480 * @ptr: Source address, in user space. 481 * 482 * Context: User context only. This function may sleep if pagefaults are 483 * enabled. 484 * 485 * This macro copies a single simple variable from user space to kernel 486 * space. It supports simple types like char and int, but not larger 487 * data types like structures or arrays. 488 * 489 * @ptr must have pointer-to-simple-variable type, and the result of 490 * dereferencing @ptr must be assignable to @x without a cast. 491 * 492 * Caller must check the pointer with access_ok() before calling this 493 * function. 494 * 495 * Returns zero on success, or -EFAULT on error. 496 * On error, the variable @x is set to zero. 497 */ 498 499 #define __get_user(x, ptr) \ 500 __get_user_nocheck((x), (ptr), sizeof(*(ptr))) 501 502 /** 503 * __put_user: - Write a simple value into user space, with less checking. 504 * @x: Value to copy to user space. 505 * @ptr: Destination address, in user space. 506 * 507 * Context: User context only. This function may sleep if pagefaults are 508 * enabled. 509 * 510 * This macro copies a single simple value from kernel space to user 511 * space. It supports simple types like char and int, but not larger 512 * data types like structures or arrays. 513 * 514 * @ptr must have pointer-to-simple-variable type, and @x must be assignable 515 * to the result of dereferencing @ptr. 516 * 517 * Caller must check the pointer with access_ok() before calling this 518 * function. 519 * 520 * Returns zero on success, or -EFAULT on error. 521 */ 522 523 #define __put_user(x, ptr) \ 524 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) 525 526 #define __get_user_unaligned __get_user 527 #define __put_user_unaligned __put_user 528 529 /* 530 * {get|put}_user_try and catch 531 * 532 * get_user_try { 533 * get_user_ex(...); 534 * } get_user_catch(err) 535 */ 536 #define get_user_try uaccess_try 537 #define get_user_catch(err) uaccess_catch(err) 538 539 #define get_user_ex(x, ptr) do { \ 540 unsigned long __gue_val; \ 541 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \ 542 (x) = (__force __typeof__(*(ptr)))__gue_val; \ 543 } while (0) 544 545 #define put_user_try uaccess_try 546 #define put_user_catch(err) uaccess_catch(err) 547 548 #define put_user_ex(x, ptr) \ 549 __put_user_size_ex((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) 550 551 extern unsigned long 552 copy_from_user_nmi(void *to, const void __user *from, unsigned long n); 553 extern __must_check long 554 strncpy_from_user(char *dst, const char __user *src, long count); 555 556 extern __must_check long strlen_user(const char __user *str); 557 extern __must_check long strnlen_user(const char __user *str, long n); 558 559 unsigned long __must_check clear_user(void __user *mem, unsigned long len); 560 unsigned long __must_check __clear_user(void __user *mem, unsigned long len); 561 562 extern void __cmpxchg_wrong_size(void) 563 __compiletime_error("Bad argument size for cmpxchg"); 564 565 #define __user_atomic_cmpxchg_inatomic(uval, ptr, old, new, size) \ 566 ({ \ 567 int __ret = 0; \ 568 __typeof__(ptr) __uval = (uval); \ 569 __typeof__(*(ptr)) __old = (old); \ 570 __typeof__(*(ptr)) __new = (new); \ 571 __uaccess_begin(); \ 572 switch (size) { \ 573 case 1: \ 574 { \ 575 asm volatile("\n" \ 576 "1:\t" LOCK_PREFIX "cmpxchgb %4, %2\n" \ 577 "2:\n" \ 578 "\t.section .fixup, \"ax\"\n" \ 579 "3:\tmov %3, %0\n" \ 580 "\tjmp 2b\n" \ 581 "\t.previous\n" \ 582 _ASM_EXTABLE(1b, 3b) \ 583 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \ 584 : "i" (-EFAULT), "q" (__new), "1" (__old) \ 585 : "memory" \ 586 ); \ 587 break; \ 588 } \ 589 case 2: \ 590 { \ 591 asm volatile("\n" \ 592 "1:\t" LOCK_PREFIX "cmpxchgw %4, %2\n" \ 593 "2:\n" \ 594 "\t.section .fixup, \"ax\"\n" \ 595 "3:\tmov %3, %0\n" \ 596 "\tjmp 2b\n" \ 597 "\t.previous\n" \ 598 _ASM_EXTABLE(1b, 3b) \ 599 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \ 600 : "i" (-EFAULT), "r" (__new), "1" (__old) \ 601 : "memory" \ 602 ); \ 603 break; \ 604 } \ 605 case 4: \ 606 { \ 607 asm volatile("\n" \ 608 "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n" \ 609 "2:\n" \ 610 "\t.section .fixup, \"ax\"\n" \ 611 "3:\tmov %3, %0\n" \ 612 "\tjmp 2b\n" \ 613 "\t.previous\n" \ 614 _ASM_EXTABLE(1b, 3b) \ 615 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \ 616 : "i" (-EFAULT), "r" (__new), "1" (__old) \ 617 : "memory" \ 618 ); \ 619 break; \ 620 } \ 621 case 8: \ 622 { \ 623 if (!IS_ENABLED(CONFIG_X86_64)) \ 624 __cmpxchg_wrong_size(); \ 625 \ 626 asm volatile("\n" \ 627 "1:\t" LOCK_PREFIX "cmpxchgq %4, %2\n" \ 628 "2:\n" \ 629 "\t.section .fixup, \"ax\"\n" \ 630 "3:\tmov %3, %0\n" \ 631 "\tjmp 2b\n" \ 632 "\t.previous\n" \ 633 _ASM_EXTABLE(1b, 3b) \ 634 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \ 635 : "i" (-EFAULT), "r" (__new), "1" (__old) \ 636 : "memory" \ 637 ); \ 638 break; \ 639 } \ 640 default: \ 641 __cmpxchg_wrong_size(); \ 642 } \ 643 __uaccess_end(); \ 644 *__uval = __old; \ 645 __ret; \ 646 }) 647 648 #define user_atomic_cmpxchg_inatomic(uval, ptr, old, new) \ 649 ({ \ 650 access_ok(VERIFY_WRITE, (ptr), sizeof(*(ptr))) ? \ 651 __user_atomic_cmpxchg_inatomic((uval), (ptr), \ 652 (old), (new), sizeof(*(ptr))) : \ 653 -EFAULT; \ 654 }) 655 656 /* 657 * movsl can be slow when source and dest are not both 8-byte aligned 658 */ 659 #ifdef CONFIG_X86_INTEL_USERCOPY 660 extern struct movsl_mask { 661 int mask; 662 } ____cacheline_aligned_in_smp movsl_mask; 663 #endif 664 665 #define ARCH_HAS_NOCACHE_UACCESS 1 666 667 #ifdef CONFIG_X86_32 668 # include <asm/uaccess_32.h> 669 #else 670 # include <asm/uaccess_64.h> 671 #endif 672 673 unsigned long __must_check _copy_from_user(void *to, const void __user *from, 674 unsigned n); 675 unsigned long __must_check _copy_to_user(void __user *to, const void *from, 676 unsigned n); 677 678 extern void __compiletime_error("usercopy buffer size is too small") 679 __bad_copy_user(void); 680 681 static inline void copy_user_overflow(int size, unsigned long count) 682 { 683 WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count); 684 } 685 686 static __always_inline unsigned long __must_check 687 copy_from_user(void *to, const void __user *from, unsigned long n) 688 { 689 int sz = __compiletime_object_size(to); 690 691 might_fault(); 692 693 kasan_check_write(to, n); 694 695 if (likely(sz < 0 || sz >= n)) { 696 check_object_size(to, n, false); 697 n = _copy_from_user(to, from, n); 698 } else if (!__builtin_constant_p(n)) 699 copy_user_overflow(sz, n); 700 else 701 __bad_copy_user(); 702 703 return n; 704 } 705 706 static __always_inline unsigned long __must_check 707 copy_to_user(void __user *to, const void *from, unsigned long n) 708 { 709 int sz = __compiletime_object_size(from); 710 711 kasan_check_read(from, n); 712 713 might_fault(); 714 715 if (likely(sz < 0 || sz >= n)) { 716 check_object_size(from, n, true); 717 n = _copy_to_user(to, from, n); 718 } else if (!__builtin_constant_p(n)) 719 copy_user_overflow(sz, n); 720 else 721 __bad_copy_user(); 722 723 return n; 724 } 725 726 /* 727 * We rely on the nested NMI work to allow atomic faults from the NMI path; the 728 * nested NMI paths are careful to preserve CR2. 729 * 730 * Caller must use pagefault_enable/disable, or run in interrupt context, 731 * and also do a uaccess_ok() check 732 */ 733 #define __copy_from_user_nmi __copy_from_user_inatomic 734 735 /* 736 * The "unsafe" user accesses aren't really "unsafe", but the naming 737 * is a big fat warning: you have to not only do the access_ok() 738 * checking before using them, but you have to surround them with the 739 * user_access_begin/end() pair. 740 */ 741 #define user_access_begin() __uaccess_begin() 742 #define user_access_end() __uaccess_end() 743 744 #define unsafe_put_user(x, ptr, err_label) \ 745 do { \ 746 int __pu_err; \ 747 __put_user_size((x), (ptr), sizeof(*(ptr)), __pu_err, -EFAULT); \ 748 if (unlikely(__pu_err)) goto err_label; \ 749 } while (0) 750 751 #define unsafe_get_user(x, ptr, err_label) \ 752 do { \ 753 int __gu_err; \ 754 unsigned long __gu_val; \ 755 __get_user_size(__gu_val, (ptr), sizeof(*(ptr)), __gu_err, -EFAULT); \ 756 (x) = (__force __typeof__(*(ptr)))__gu_val; \ 757 if (unlikely(__gu_err)) goto err_label; \ 758 } while (0) 759 760 #endif /* _ASM_X86_UACCESS_H */ 761 762