1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _ASM_X86_UACCESS_H 3 #define _ASM_X86_UACCESS_H 4 /* 5 * User space memory access functions 6 */ 7 #include <linux/compiler.h> 8 #include <linux/kasan-checks.h> 9 #include <linux/string.h> 10 #include <asm/asm.h> 11 #include <asm/page.h> 12 #include <asm/smap.h> 13 #include <asm/extable.h> 14 15 /* 16 * The fs value determines whether argument validity checking should be 17 * performed or not. If get_fs() == USER_DS, checking is performed, with 18 * get_fs() == KERNEL_DS, checking is bypassed. 19 * 20 * For historical reasons, these macros are grossly misnamed. 21 */ 22 23 #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) }) 24 25 #define KERNEL_DS MAKE_MM_SEG(-1UL) 26 #define USER_DS MAKE_MM_SEG(TASK_SIZE_MAX) 27 28 #define get_fs() (current->thread.addr_limit) 29 static inline void set_fs(mm_segment_t fs) 30 { 31 current->thread.addr_limit = fs; 32 /* On user-mode return, check fs is correct */ 33 set_thread_flag(TIF_FSCHECK); 34 } 35 36 #define segment_eq(a, b) ((a).seg == (b).seg) 37 38 #define user_addr_max() (current->thread.addr_limit.seg) 39 #define __addr_ok(addr) \ 40 ((unsigned long __force)(addr) < user_addr_max()) 41 42 /* 43 * Test whether a block of memory is a valid user space address. 44 * Returns 0 if the range is valid, nonzero otherwise. 45 */ 46 static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, unsigned long limit) 47 { 48 /* 49 * If we have used "sizeof()" for the size, 50 * we know it won't overflow the limit (but 51 * it might overflow the 'addr', so it's 52 * important to subtract the size from the 53 * limit, not add it to the address). 54 */ 55 if (__builtin_constant_p(size)) 56 return unlikely(addr > limit - size); 57 58 /* Arbitrary sizes? Be careful about overflow */ 59 addr += size; 60 if (unlikely(addr < size)) 61 return true; 62 return unlikely(addr > limit); 63 } 64 65 #define __range_not_ok(addr, size, limit) \ 66 ({ \ 67 __chk_user_ptr(addr); \ 68 __chk_range_not_ok((unsigned long __force)(addr), size, limit); \ 69 }) 70 71 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP 72 # define WARN_ON_IN_IRQ() WARN_ON_ONCE(!in_task()) 73 #else 74 # define WARN_ON_IN_IRQ() 75 #endif 76 77 /** 78 * access_ok - Checks if a user space pointer is valid 79 * @addr: User space pointer to start of block to check 80 * @size: Size of block to check 81 * 82 * Context: User context only. This function may sleep if pagefaults are 83 * enabled. 84 * 85 * Checks if a pointer to a block of memory in user space is valid. 86 * 87 * Note that, depending on architecture, this function probably just 88 * checks that the pointer is in the user space range - after calling 89 * this function, memory access functions may still return -EFAULT. 90 * 91 * Return: true (nonzero) if the memory block may be valid, false (zero) 92 * if it is definitely invalid. 93 */ 94 #define access_ok(addr, size) \ 95 ({ \ 96 WARN_ON_IN_IRQ(); \ 97 likely(!__range_not_ok(addr, size, user_addr_max())); \ 98 }) 99 100 /* 101 * These are the main single-value transfer routines. They automatically 102 * use the right size if we just have the right pointer type. 103 * 104 * This gets kind of ugly. We want to return _two_ values in "get_user()" 105 * and yet we don't want to do any pointers, because that is too much 106 * of a performance impact. Thus we have a few rather ugly macros here, 107 * and hide all the ugliness from the user. 108 * 109 * The "__xxx" versions of the user access functions are versions that 110 * do not verify the address space, that must have been done previously 111 * with a separate "access_ok()" call (this is used when we do multiple 112 * accesses to the same area of user memory). 113 */ 114 115 extern int __get_user_1(void); 116 extern int __get_user_2(void); 117 extern int __get_user_4(void); 118 extern int __get_user_8(void); 119 extern int __get_user_bad(void); 120 121 #define __uaccess_begin() stac() 122 #define __uaccess_end() clac() 123 #define __uaccess_begin_nospec() \ 124 ({ \ 125 stac(); \ 126 barrier_nospec(); \ 127 }) 128 129 /* 130 * This is a type: either unsigned long, if the argument fits into 131 * that type, or otherwise unsigned long long. 132 */ 133 #define __inttype(x) \ 134 __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL)) 135 136 /** 137 * get_user - Get a simple variable from user space. 138 * @x: Variable to store result. 139 * @ptr: Source address, in user space. 140 * 141 * Context: User context only. This function may sleep if pagefaults are 142 * enabled. 143 * 144 * This macro copies a single simple variable from user space to kernel 145 * space. It supports simple types like char and int, but not larger 146 * data types like structures or arrays. 147 * 148 * @ptr must have pointer-to-simple-variable type, and the result of 149 * dereferencing @ptr must be assignable to @x without a cast. 150 * 151 * Return: zero on success, or -EFAULT on error. 152 * On error, the variable @x is set to zero. 153 */ 154 /* 155 * Careful: we have to cast the result to the type of the pointer 156 * for sign reasons. 157 * 158 * The use of _ASM_DX as the register specifier is a bit of a 159 * simplification, as gcc only cares about it as the starting point 160 * and not size: for a 64-bit value it will use %ecx:%edx on 32 bits 161 * (%ecx being the next register in gcc's x86 register sequence), and 162 * %rdx on 64 bits. 163 * 164 * Clang/LLVM cares about the size of the register, but still wants 165 * the base register for something that ends up being a pair. 166 */ 167 #define get_user(x, ptr) \ 168 ({ \ 169 int __ret_gu; \ 170 register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX); \ 171 __chk_user_ptr(ptr); \ 172 might_fault(); \ 173 asm volatile("call __get_user_%P4" \ 174 : "=a" (__ret_gu), "=r" (__val_gu), \ 175 ASM_CALL_CONSTRAINT \ 176 : "0" (ptr), "i" (sizeof(*(ptr)))); \ 177 (x) = (__force __typeof__(*(ptr))) __val_gu; \ 178 __builtin_expect(__ret_gu, 0); \ 179 }) 180 181 #define __put_user_x(size, x, ptr, __ret_pu) \ 182 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \ 183 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx") 184 185 186 187 #ifdef CONFIG_X86_32 188 #define __put_user_goto_u64(x, addr, label) \ 189 asm_volatile_goto("\n" \ 190 "1: movl %%eax,0(%1)\n" \ 191 "2: movl %%edx,4(%1)\n" \ 192 _ASM_EXTABLE_UA(1b, %l2) \ 193 _ASM_EXTABLE_UA(2b, %l2) \ 194 : : "A" (x), "r" (addr) \ 195 : : label) 196 197 #define __put_user_asm_ex_u64(x, addr) \ 198 asm volatile("\n" \ 199 "1: movl %%eax,0(%1)\n" \ 200 "2: movl %%edx,4(%1)\n" \ 201 "3:" \ 202 _ASM_EXTABLE_EX(1b, 2b) \ 203 _ASM_EXTABLE_EX(2b, 3b) \ 204 : : "A" (x), "r" (addr)) 205 206 #define __put_user_x8(x, ptr, __ret_pu) \ 207 asm volatile("call __put_user_8" : "=a" (__ret_pu) \ 208 : "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx") 209 #else 210 #define __put_user_goto_u64(x, ptr, label) \ 211 __put_user_goto(x, ptr, "q", "", "er", label) 212 #define __put_user_asm_ex_u64(x, addr) \ 213 __put_user_asm_ex(x, addr, "q", "", "er") 214 #define __put_user_x8(x, ptr, __ret_pu) __put_user_x(8, x, ptr, __ret_pu) 215 #endif 216 217 extern void __put_user_bad(void); 218 219 /* 220 * Strange magic calling convention: pointer in %ecx, 221 * value in %eax(:%edx), return value in %eax. clobbers %rbx 222 */ 223 extern void __put_user_1(void); 224 extern void __put_user_2(void); 225 extern void __put_user_4(void); 226 extern void __put_user_8(void); 227 228 /** 229 * put_user - Write a simple value into user space. 230 * @x: Value to copy to user space. 231 * @ptr: Destination address, in user space. 232 * 233 * Context: User context only. This function may sleep if pagefaults are 234 * enabled. 235 * 236 * This macro copies a single simple value from kernel space to user 237 * space. It supports simple types like char and int, but not larger 238 * data types like structures or arrays. 239 * 240 * @ptr must have pointer-to-simple-variable type, and @x must be assignable 241 * to the result of dereferencing @ptr. 242 * 243 * Return: zero on success, or -EFAULT on error. 244 */ 245 #define put_user(x, ptr) \ 246 ({ \ 247 int __ret_pu; \ 248 __typeof__(*(ptr)) __pu_val; \ 249 __chk_user_ptr(ptr); \ 250 might_fault(); \ 251 __pu_val = x; \ 252 switch (sizeof(*(ptr))) { \ 253 case 1: \ 254 __put_user_x(1, __pu_val, ptr, __ret_pu); \ 255 break; \ 256 case 2: \ 257 __put_user_x(2, __pu_val, ptr, __ret_pu); \ 258 break; \ 259 case 4: \ 260 __put_user_x(4, __pu_val, ptr, __ret_pu); \ 261 break; \ 262 case 8: \ 263 __put_user_x8(__pu_val, ptr, __ret_pu); \ 264 break; \ 265 default: \ 266 __put_user_x(X, __pu_val, ptr, __ret_pu); \ 267 break; \ 268 } \ 269 __builtin_expect(__ret_pu, 0); \ 270 }) 271 272 #define __put_user_size(x, ptr, size, label) \ 273 do { \ 274 __chk_user_ptr(ptr); \ 275 switch (size) { \ 276 case 1: \ 277 __put_user_goto(x, ptr, "b", "b", "iq", label); \ 278 break; \ 279 case 2: \ 280 __put_user_goto(x, ptr, "w", "w", "ir", label); \ 281 break; \ 282 case 4: \ 283 __put_user_goto(x, ptr, "l", "k", "ir", label); \ 284 break; \ 285 case 8: \ 286 __put_user_goto_u64(x, ptr, label); \ 287 break; \ 288 default: \ 289 __put_user_bad(); \ 290 } \ 291 } while (0) 292 293 /* 294 * This doesn't do __uaccess_begin/end - the exception handling 295 * around it must do that. 296 */ 297 #define __put_user_size_ex(x, ptr, size) \ 298 do { \ 299 __chk_user_ptr(ptr); \ 300 switch (size) { \ 301 case 1: \ 302 __put_user_asm_ex(x, ptr, "b", "b", "iq"); \ 303 break; \ 304 case 2: \ 305 __put_user_asm_ex(x, ptr, "w", "w", "ir"); \ 306 break; \ 307 case 4: \ 308 __put_user_asm_ex(x, ptr, "l", "k", "ir"); \ 309 break; \ 310 case 8: \ 311 __put_user_asm_ex_u64((__typeof__(*ptr))(x), ptr); \ 312 break; \ 313 default: \ 314 __put_user_bad(); \ 315 } \ 316 } while (0) 317 318 #ifdef CONFIG_X86_32 319 #define __get_user_asm_u64(x, ptr, retval, errret) \ 320 ({ \ 321 __typeof__(ptr) __ptr = (ptr); \ 322 asm volatile("\n" \ 323 "1: movl %2,%%eax\n" \ 324 "2: movl %3,%%edx\n" \ 325 "3:\n" \ 326 ".section .fixup,\"ax\"\n" \ 327 "4: mov %4,%0\n" \ 328 " xorl %%eax,%%eax\n" \ 329 " xorl %%edx,%%edx\n" \ 330 " jmp 3b\n" \ 331 ".previous\n" \ 332 _ASM_EXTABLE_UA(1b, 4b) \ 333 _ASM_EXTABLE_UA(2b, 4b) \ 334 : "=r" (retval), "=&A"(x) \ 335 : "m" (__m(__ptr)), "m" __m(((u32 __user *)(__ptr)) + 1), \ 336 "i" (errret), "0" (retval)); \ 337 }) 338 339 #define __get_user_asm_ex_u64(x, ptr) (x) = __get_user_bad() 340 #else 341 #define __get_user_asm_u64(x, ptr, retval, errret) \ 342 __get_user_asm(x, ptr, retval, "q", "", "=r", errret) 343 #define __get_user_asm_ex_u64(x, ptr) \ 344 __get_user_asm_ex(x, ptr, "q", "", "=r") 345 #endif 346 347 #define __get_user_size(x, ptr, size, retval, errret) \ 348 do { \ 349 retval = 0; \ 350 __chk_user_ptr(ptr); \ 351 switch (size) { \ 352 case 1: \ 353 __get_user_asm(x, ptr, retval, "b", "b", "=q", errret); \ 354 break; \ 355 case 2: \ 356 __get_user_asm(x, ptr, retval, "w", "w", "=r", errret); \ 357 break; \ 358 case 4: \ 359 __get_user_asm(x, ptr, retval, "l", "k", "=r", errret); \ 360 break; \ 361 case 8: \ 362 __get_user_asm_u64(x, ptr, retval, errret); \ 363 break; \ 364 default: \ 365 (x) = __get_user_bad(); \ 366 } \ 367 } while (0) 368 369 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \ 370 asm volatile("\n" \ 371 "1: mov"itype" %2,%"rtype"1\n" \ 372 "2:\n" \ 373 ".section .fixup,\"ax\"\n" \ 374 "3: mov %3,%0\n" \ 375 " xor"itype" %"rtype"1,%"rtype"1\n" \ 376 " jmp 2b\n" \ 377 ".previous\n" \ 378 _ASM_EXTABLE_UA(1b, 3b) \ 379 : "=r" (err), ltype(x) \ 380 : "m" (__m(addr)), "i" (errret), "0" (err)) 381 382 #define __get_user_asm_nozero(x, addr, err, itype, rtype, ltype, errret) \ 383 asm volatile("\n" \ 384 "1: mov"itype" %2,%"rtype"1\n" \ 385 "2:\n" \ 386 ".section .fixup,\"ax\"\n" \ 387 "3: mov %3,%0\n" \ 388 " jmp 2b\n" \ 389 ".previous\n" \ 390 _ASM_EXTABLE_UA(1b, 3b) \ 391 : "=r" (err), ltype(x) \ 392 : "m" (__m(addr)), "i" (errret), "0" (err)) 393 394 /* 395 * This doesn't do __uaccess_begin/end - the exception handling 396 * around it must do that. 397 */ 398 #define __get_user_size_ex(x, ptr, size) \ 399 do { \ 400 __chk_user_ptr(ptr); \ 401 switch (size) { \ 402 case 1: \ 403 __get_user_asm_ex(x, ptr, "b", "b", "=q"); \ 404 break; \ 405 case 2: \ 406 __get_user_asm_ex(x, ptr, "w", "w", "=r"); \ 407 break; \ 408 case 4: \ 409 __get_user_asm_ex(x, ptr, "l", "k", "=r"); \ 410 break; \ 411 case 8: \ 412 __get_user_asm_ex_u64(x, ptr); \ 413 break; \ 414 default: \ 415 (x) = __get_user_bad(); \ 416 } \ 417 } while (0) 418 419 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \ 420 asm volatile("1: mov"itype" %1,%"rtype"0\n" \ 421 "2:\n" \ 422 ".section .fixup,\"ax\"\n" \ 423 "3:xor"itype" %"rtype"0,%"rtype"0\n" \ 424 " jmp 2b\n" \ 425 ".previous\n" \ 426 _ASM_EXTABLE_EX(1b, 3b) \ 427 : ltype(x) : "m" (__m(addr))) 428 429 #define __put_user_nocheck(x, ptr, size) \ 430 ({ \ 431 __label__ __pu_label; \ 432 int __pu_err = -EFAULT; \ 433 __typeof__(*(ptr)) __pu_val; \ 434 __pu_val = x; \ 435 __uaccess_begin(); \ 436 __put_user_size(__pu_val, (ptr), (size), __pu_label); \ 437 __pu_err = 0; \ 438 __pu_label: \ 439 __uaccess_end(); \ 440 __builtin_expect(__pu_err, 0); \ 441 }) 442 443 #define __get_user_nocheck(x, ptr, size) \ 444 ({ \ 445 int __gu_err; \ 446 __inttype(*(ptr)) __gu_val; \ 447 __uaccess_begin_nospec(); \ 448 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \ 449 __uaccess_end(); \ 450 (x) = (__force __typeof__(*(ptr)))__gu_val; \ 451 __builtin_expect(__gu_err, 0); \ 452 }) 453 454 /* FIXME: this hack is definitely wrong -AK */ 455 struct __large_struct { unsigned long buf[100]; }; 456 #define __m(x) (*(struct __large_struct __user *)(x)) 457 458 /* 459 * Tell gcc we read from memory instead of writing: this is because 460 * we do not write to any memory gcc knows about, so there are no 461 * aliasing issues. 462 */ 463 #define __put_user_goto(x, addr, itype, rtype, ltype, label) \ 464 asm_volatile_goto("\n" \ 465 "1: mov"itype" %"rtype"0,%1\n" \ 466 _ASM_EXTABLE_UA(1b, %l2) \ 467 : : ltype(x), "m" (__m(addr)) \ 468 : : label) 469 470 #define __put_user_failed(x, addr, itype, rtype, ltype, errret) \ 471 ({ __label__ __puflab; \ 472 int __pufret = errret; \ 473 __put_user_goto(x,addr,itype,rtype,ltype,__puflab); \ 474 __pufret = 0; \ 475 __puflab: __pufret; }) 476 477 #define __put_user_asm(x, addr, retval, itype, rtype, ltype, errret) do { \ 478 retval = __put_user_failed(x, addr, itype, rtype, ltype, errret); \ 479 } while (0) 480 481 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \ 482 asm volatile("1: mov"itype" %"rtype"0,%1\n" \ 483 "2:\n" \ 484 _ASM_EXTABLE_EX(1b, 2b) \ 485 : : ltype(x), "m" (__m(addr))) 486 487 /* 488 * uaccess_try and catch 489 */ 490 #define uaccess_try do { \ 491 current->thread.uaccess_err = 0; \ 492 __uaccess_begin(); \ 493 barrier(); 494 495 #define uaccess_try_nospec do { \ 496 current->thread.uaccess_err = 0; \ 497 __uaccess_begin_nospec(); \ 498 499 #define uaccess_catch(err) \ 500 __uaccess_end(); \ 501 (err) |= (current->thread.uaccess_err ? -EFAULT : 0); \ 502 } while (0) 503 504 /** 505 * __get_user - Get a simple variable from user space, with less checking. 506 * @x: Variable to store result. 507 * @ptr: Source address, in user space. 508 * 509 * Context: User context only. This function may sleep if pagefaults are 510 * enabled. 511 * 512 * This macro copies a single simple variable from user space to kernel 513 * space. It supports simple types like char and int, but not larger 514 * data types like structures or arrays. 515 * 516 * @ptr must have pointer-to-simple-variable type, and the result of 517 * dereferencing @ptr must be assignable to @x without a cast. 518 * 519 * Caller must check the pointer with access_ok() before calling this 520 * function. 521 * 522 * Return: zero on success, or -EFAULT on error. 523 * On error, the variable @x is set to zero. 524 */ 525 526 #define __get_user(x, ptr) \ 527 __get_user_nocheck((x), (ptr), sizeof(*(ptr))) 528 529 /** 530 * __put_user - Write a simple value into user space, with less checking. 531 * @x: Value to copy to user space. 532 * @ptr: Destination address, in user space. 533 * 534 * Context: User context only. This function may sleep if pagefaults are 535 * enabled. 536 * 537 * This macro copies a single simple value from kernel space to user 538 * space. It supports simple types like char and int, but not larger 539 * data types like structures or arrays. 540 * 541 * @ptr must have pointer-to-simple-variable type, and @x must be assignable 542 * to the result of dereferencing @ptr. 543 * 544 * Caller must check the pointer with access_ok() before calling this 545 * function. 546 * 547 * Return: zero on success, or -EFAULT on error. 548 */ 549 550 #define __put_user(x, ptr) \ 551 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) 552 553 /* 554 * {get|put}_user_try and catch 555 * 556 * get_user_try { 557 * get_user_ex(...); 558 * } get_user_catch(err) 559 */ 560 #define get_user_try uaccess_try_nospec 561 #define get_user_catch(err) uaccess_catch(err) 562 563 #define get_user_ex(x, ptr) do { \ 564 unsigned long __gue_val; \ 565 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \ 566 (x) = (__force __typeof__(*(ptr)))__gue_val; \ 567 } while (0) 568 569 #define put_user_try uaccess_try 570 #define put_user_catch(err) uaccess_catch(err) 571 572 #define put_user_ex(x, ptr) \ 573 __put_user_size_ex((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) 574 575 extern unsigned long 576 copy_from_user_nmi(void *to, const void __user *from, unsigned long n); 577 extern __must_check long 578 strncpy_from_user(char *dst, const char __user *src, long count); 579 580 extern __must_check long strnlen_user(const char __user *str, long n); 581 582 unsigned long __must_check clear_user(void __user *mem, unsigned long len); 583 unsigned long __must_check __clear_user(void __user *mem, unsigned long len); 584 585 extern void __cmpxchg_wrong_size(void) 586 __compiletime_error("Bad argument size for cmpxchg"); 587 588 #define __user_atomic_cmpxchg_inatomic(uval, ptr, old, new, size) \ 589 ({ \ 590 int __ret = 0; \ 591 __typeof__(ptr) __uval = (uval); \ 592 __typeof__(*(ptr)) __old = (old); \ 593 __typeof__(*(ptr)) __new = (new); \ 594 __uaccess_begin_nospec(); \ 595 switch (size) { \ 596 case 1: \ 597 { \ 598 asm volatile("\n" \ 599 "1:\t" LOCK_PREFIX "cmpxchgb %4, %2\n" \ 600 "2:\n" \ 601 "\t.section .fixup, \"ax\"\n" \ 602 "3:\tmov %3, %0\n" \ 603 "\tjmp 2b\n" \ 604 "\t.previous\n" \ 605 _ASM_EXTABLE_UA(1b, 3b) \ 606 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \ 607 : "i" (-EFAULT), "q" (__new), "1" (__old) \ 608 : "memory" \ 609 ); \ 610 break; \ 611 } \ 612 case 2: \ 613 { \ 614 asm volatile("\n" \ 615 "1:\t" LOCK_PREFIX "cmpxchgw %4, %2\n" \ 616 "2:\n" \ 617 "\t.section .fixup, \"ax\"\n" \ 618 "3:\tmov %3, %0\n" \ 619 "\tjmp 2b\n" \ 620 "\t.previous\n" \ 621 _ASM_EXTABLE_UA(1b, 3b) \ 622 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \ 623 : "i" (-EFAULT), "r" (__new), "1" (__old) \ 624 : "memory" \ 625 ); \ 626 break; \ 627 } \ 628 case 4: \ 629 { \ 630 asm volatile("\n" \ 631 "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n" \ 632 "2:\n" \ 633 "\t.section .fixup, \"ax\"\n" \ 634 "3:\tmov %3, %0\n" \ 635 "\tjmp 2b\n" \ 636 "\t.previous\n" \ 637 _ASM_EXTABLE_UA(1b, 3b) \ 638 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \ 639 : "i" (-EFAULT), "r" (__new), "1" (__old) \ 640 : "memory" \ 641 ); \ 642 break; \ 643 } \ 644 case 8: \ 645 { \ 646 if (!IS_ENABLED(CONFIG_X86_64)) \ 647 __cmpxchg_wrong_size(); \ 648 \ 649 asm volatile("\n" \ 650 "1:\t" LOCK_PREFIX "cmpxchgq %4, %2\n" \ 651 "2:\n" \ 652 "\t.section .fixup, \"ax\"\n" \ 653 "3:\tmov %3, %0\n" \ 654 "\tjmp 2b\n" \ 655 "\t.previous\n" \ 656 _ASM_EXTABLE_UA(1b, 3b) \ 657 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \ 658 : "i" (-EFAULT), "r" (__new), "1" (__old) \ 659 : "memory" \ 660 ); \ 661 break; \ 662 } \ 663 default: \ 664 __cmpxchg_wrong_size(); \ 665 } \ 666 __uaccess_end(); \ 667 *__uval = __old; \ 668 __ret; \ 669 }) 670 671 #define user_atomic_cmpxchg_inatomic(uval, ptr, old, new) \ 672 ({ \ 673 access_ok((ptr), sizeof(*(ptr))) ? \ 674 __user_atomic_cmpxchg_inatomic((uval), (ptr), \ 675 (old), (new), sizeof(*(ptr))) : \ 676 -EFAULT; \ 677 }) 678 679 /* 680 * movsl can be slow when source and dest are not both 8-byte aligned 681 */ 682 #ifdef CONFIG_X86_INTEL_USERCOPY 683 extern struct movsl_mask { 684 int mask; 685 } ____cacheline_aligned_in_smp movsl_mask; 686 #endif 687 688 #define ARCH_HAS_NOCACHE_UACCESS 1 689 690 #ifdef CONFIG_X86_32 691 # include <asm/uaccess_32.h> 692 #else 693 # include <asm/uaccess_64.h> 694 #endif 695 696 /* 697 * We rely on the nested NMI work to allow atomic faults from the NMI path; the 698 * nested NMI paths are careful to preserve CR2. 699 * 700 * Caller must use pagefault_enable/disable, or run in interrupt context, 701 * and also do a uaccess_ok() check 702 */ 703 #define __copy_from_user_nmi __copy_from_user_inatomic 704 705 /* 706 * The "unsafe" user accesses aren't really "unsafe", but the naming 707 * is a big fat warning: you have to not only do the access_ok() 708 * checking before using them, but you have to surround them with the 709 * user_access_begin/end() pair. 710 */ 711 static __must_check inline bool user_access_begin(const void __user *ptr, size_t len) 712 { 713 if (unlikely(!access_ok(ptr,len))) 714 return 0; 715 __uaccess_begin_nospec(); 716 return 1; 717 } 718 #define user_access_begin(a,b) user_access_begin(a,b) 719 #define user_access_end() __uaccess_end() 720 721 #define unsafe_put_user(x, ptr, label) \ 722 __put_user_size((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), label) 723 724 #define unsafe_get_user(x, ptr, err_label) \ 725 do { \ 726 int __gu_err; \ 727 __inttype(*(ptr)) __gu_val; \ 728 __get_user_size(__gu_val, (ptr), sizeof(*(ptr)), __gu_err, -EFAULT); \ 729 (x) = (__force __typeof__(*(ptr)))__gu_val; \ 730 if (unlikely(__gu_err)) goto err_label; \ 731 } while (0) 732 733 #endif /* _ASM_X86_UACCESS_H */ 734 735