1 #ifndef _ASM_X86_UACCESS_H 2 #define _ASM_X86_UACCESS_H 3 /* 4 * User space memory access functions 5 */ 6 #include <linux/errno.h> 7 #include <linux/compiler.h> 8 #include <linux/thread_info.h> 9 #include <linux/string.h> 10 #include <asm/asm.h> 11 #include <asm/page.h> 12 #include <asm/smap.h> 13 14 #define VERIFY_READ 0 15 #define VERIFY_WRITE 1 16 17 /* 18 * The fs value determines whether argument validity checking should be 19 * performed or not. If get_fs() == USER_DS, checking is performed, with 20 * get_fs() == KERNEL_DS, checking is bypassed. 21 * 22 * For historical reasons, these macros are grossly misnamed. 23 */ 24 25 #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) }) 26 27 #define KERNEL_DS MAKE_MM_SEG(-1UL) 28 #define USER_DS MAKE_MM_SEG(TASK_SIZE_MAX) 29 30 #define get_ds() (KERNEL_DS) 31 #define get_fs() (current_thread_info()->addr_limit) 32 #define set_fs(x) (current_thread_info()->addr_limit = (x)) 33 34 #define segment_eq(a, b) ((a).seg == (b).seg) 35 36 #define user_addr_max() (current_thread_info()->addr_limit.seg) 37 #define __addr_ok(addr) \ 38 ((unsigned long __force)(addr) < user_addr_max()) 39 40 /* 41 * Test whether a block of memory is a valid user space address. 42 * Returns 0 if the range is valid, nonzero otherwise. 43 */ 44 static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, unsigned long limit) 45 { 46 /* 47 * If we have used "sizeof()" for the size, 48 * we know it won't overflow the limit (but 49 * it might overflow the 'addr', so it's 50 * important to subtract the size from the 51 * limit, not add it to the address). 52 */ 53 if (__builtin_constant_p(size)) 54 return unlikely(addr > limit - size); 55 56 /* Arbitrary sizes? Be careful about overflow */ 57 addr += size; 58 if (unlikely(addr < size)) 59 return true; 60 return unlikely(addr > limit); 61 } 62 63 #define __range_not_ok(addr, size, limit) \ 64 ({ \ 65 __chk_user_ptr(addr); \ 66 __chk_range_not_ok((unsigned long __force)(addr), size, limit); \ 67 }) 68 69 /** 70 * access_ok: - Checks if a user space pointer is valid 71 * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE. Note that 72 * %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe 73 * to write to a block, it is always safe to read from it. 74 * @addr: User space pointer to start of block to check 75 * @size: Size of block to check 76 * 77 * Context: User context only. This function may sleep if pagefaults are 78 * enabled. 79 * 80 * Checks if a pointer to a block of memory in user space is valid. 81 * 82 * Returns true (nonzero) if the memory block may be valid, false (zero) 83 * if it is definitely invalid. 84 * 85 * Note that, depending on architecture, this function probably just 86 * checks that the pointer is in the user space range - after calling 87 * this function, memory access functions may still return -EFAULT. 88 */ 89 #define access_ok(type, addr, size) \ 90 likely(!__range_not_ok(addr, size, user_addr_max())) 91 92 /* 93 * The exception table consists of triples of addresses relative to the 94 * exception table entry itself. The first address is of an instruction 95 * that is allowed to fault, the second is the target at which the program 96 * should continue. The third is a handler function to deal with the fault 97 * caused by the instruction in the first field. 98 * 99 * All the routines below use bits of fixup code that are out of line 100 * with the main instruction path. This means when everything is well, 101 * we don't even have to jump over them. Further, they do not intrude 102 * on our cache or tlb entries. 103 */ 104 105 struct exception_table_entry { 106 int insn, fixup, handler; 107 }; 108 /* This is not the generic standard exception_table_entry format */ 109 #define ARCH_HAS_SORT_EXTABLE 110 #define ARCH_HAS_SEARCH_EXTABLE 111 112 extern int fixup_exception(struct pt_regs *regs, int trapnr); 113 extern bool ex_has_fault_handler(unsigned long ip); 114 extern int early_fixup_exception(unsigned long *ip); 115 116 /* 117 * These are the main single-value transfer routines. They automatically 118 * use the right size if we just have the right pointer type. 119 * 120 * This gets kind of ugly. We want to return _two_ values in "get_user()" 121 * and yet we don't want to do any pointers, because that is too much 122 * of a performance impact. Thus we have a few rather ugly macros here, 123 * and hide all the ugliness from the user. 124 * 125 * The "__xxx" versions of the user access functions are versions that 126 * do not verify the address space, that must have been done previously 127 * with a separate "access_ok()" call (this is used when we do multiple 128 * accesses to the same area of user memory). 129 */ 130 131 extern int __get_user_1(void); 132 extern int __get_user_2(void); 133 extern int __get_user_4(void); 134 extern int __get_user_8(void); 135 extern int __get_user_bad(void); 136 137 #define __uaccess_begin() stac() 138 #define __uaccess_end() clac() 139 140 /* 141 * This is a type: either unsigned long, if the argument fits into 142 * that type, or otherwise unsigned long long. 143 */ 144 #define __inttype(x) \ 145 __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL)) 146 147 /** 148 * get_user: - Get a simple variable from user space. 149 * @x: Variable to store result. 150 * @ptr: Source address, in user space. 151 * 152 * Context: User context only. This function may sleep if pagefaults are 153 * enabled. 154 * 155 * This macro copies a single simple variable from user space to kernel 156 * space. It supports simple types like char and int, but not larger 157 * data types like structures or arrays. 158 * 159 * @ptr must have pointer-to-simple-variable type, and the result of 160 * dereferencing @ptr must be assignable to @x without a cast. 161 * 162 * Returns zero on success, or -EFAULT on error. 163 * On error, the variable @x is set to zero. 164 */ 165 /* 166 * Careful: we have to cast the result to the type of the pointer 167 * for sign reasons. 168 * 169 * The use of _ASM_DX as the register specifier is a bit of a 170 * simplification, as gcc only cares about it as the starting point 171 * and not size: for a 64-bit value it will use %ecx:%edx on 32 bits 172 * (%ecx being the next register in gcc's x86 register sequence), and 173 * %rdx on 64 bits. 174 * 175 * Clang/LLVM cares about the size of the register, but still wants 176 * the base register for something that ends up being a pair. 177 */ 178 #define get_user(x, ptr) \ 179 ({ \ 180 int __ret_gu; \ 181 register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX); \ 182 register void *__sp asm(_ASM_SP); \ 183 __chk_user_ptr(ptr); \ 184 might_fault(); \ 185 asm volatile("call __get_user_%P4" \ 186 : "=a" (__ret_gu), "=r" (__val_gu), "+r" (__sp) \ 187 : "0" (ptr), "i" (sizeof(*(ptr)))); \ 188 (x) = (__force __typeof__(*(ptr))) __val_gu; \ 189 __builtin_expect(__ret_gu, 0); \ 190 }) 191 192 #define __put_user_x(size, x, ptr, __ret_pu) \ 193 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \ 194 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx") 195 196 197 198 #ifdef CONFIG_X86_32 199 #define __put_user_asm_u64(x, addr, err, errret) \ 200 asm volatile("\n" \ 201 "1: movl %%eax,0(%2)\n" \ 202 "2: movl %%edx,4(%2)\n" \ 203 "3:" \ 204 ".section .fixup,\"ax\"\n" \ 205 "4: movl %3,%0\n" \ 206 " jmp 3b\n" \ 207 ".previous\n" \ 208 _ASM_EXTABLE(1b, 4b) \ 209 _ASM_EXTABLE(2b, 4b) \ 210 : "=r" (err) \ 211 : "A" (x), "r" (addr), "i" (errret), "0" (err)) 212 213 #define __put_user_asm_ex_u64(x, addr) \ 214 asm volatile("\n" \ 215 "1: movl %%eax,0(%1)\n" \ 216 "2: movl %%edx,4(%1)\n" \ 217 "3:" \ 218 _ASM_EXTABLE_EX(1b, 2b) \ 219 _ASM_EXTABLE_EX(2b, 3b) \ 220 : : "A" (x), "r" (addr)) 221 222 #define __put_user_x8(x, ptr, __ret_pu) \ 223 asm volatile("call __put_user_8" : "=a" (__ret_pu) \ 224 : "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx") 225 #else 226 #define __put_user_asm_u64(x, ptr, retval, errret) \ 227 __put_user_asm(x, ptr, retval, "q", "", "er", errret) 228 #define __put_user_asm_ex_u64(x, addr) \ 229 __put_user_asm_ex(x, addr, "q", "", "er") 230 #define __put_user_x8(x, ptr, __ret_pu) __put_user_x(8, x, ptr, __ret_pu) 231 #endif 232 233 extern void __put_user_bad(void); 234 235 /* 236 * Strange magic calling convention: pointer in %ecx, 237 * value in %eax(:%edx), return value in %eax. clobbers %rbx 238 */ 239 extern void __put_user_1(void); 240 extern void __put_user_2(void); 241 extern void __put_user_4(void); 242 extern void __put_user_8(void); 243 244 /** 245 * put_user: - Write a simple value into user space. 246 * @x: Value to copy to user space. 247 * @ptr: Destination address, in user space. 248 * 249 * Context: User context only. This function may sleep if pagefaults are 250 * enabled. 251 * 252 * This macro copies a single simple value from kernel space to user 253 * space. It supports simple types like char and int, but not larger 254 * data types like structures or arrays. 255 * 256 * @ptr must have pointer-to-simple-variable type, and @x must be assignable 257 * to the result of dereferencing @ptr. 258 * 259 * Returns zero on success, or -EFAULT on error. 260 */ 261 #define put_user(x, ptr) \ 262 ({ \ 263 int __ret_pu; \ 264 __typeof__(*(ptr)) __pu_val; \ 265 __chk_user_ptr(ptr); \ 266 might_fault(); \ 267 __pu_val = x; \ 268 switch (sizeof(*(ptr))) { \ 269 case 1: \ 270 __put_user_x(1, __pu_val, ptr, __ret_pu); \ 271 break; \ 272 case 2: \ 273 __put_user_x(2, __pu_val, ptr, __ret_pu); \ 274 break; \ 275 case 4: \ 276 __put_user_x(4, __pu_val, ptr, __ret_pu); \ 277 break; \ 278 case 8: \ 279 __put_user_x8(__pu_val, ptr, __ret_pu); \ 280 break; \ 281 default: \ 282 __put_user_x(X, __pu_val, ptr, __ret_pu); \ 283 break; \ 284 } \ 285 __builtin_expect(__ret_pu, 0); \ 286 }) 287 288 #define __put_user_size(x, ptr, size, retval, errret) \ 289 do { \ 290 retval = 0; \ 291 __chk_user_ptr(ptr); \ 292 switch (size) { \ 293 case 1: \ 294 __put_user_asm(x, ptr, retval, "b", "b", "iq", errret); \ 295 break; \ 296 case 2: \ 297 __put_user_asm(x, ptr, retval, "w", "w", "ir", errret); \ 298 break; \ 299 case 4: \ 300 __put_user_asm(x, ptr, retval, "l", "k", "ir", errret); \ 301 break; \ 302 case 8: \ 303 __put_user_asm_u64((__typeof__(*ptr))(x), ptr, retval, \ 304 errret); \ 305 break; \ 306 default: \ 307 __put_user_bad(); \ 308 } \ 309 } while (0) 310 311 /* 312 * This doesn't do __uaccess_begin/end - the exception handling 313 * around it must do that. 314 */ 315 #define __put_user_size_ex(x, ptr, size) \ 316 do { \ 317 __chk_user_ptr(ptr); \ 318 switch (size) { \ 319 case 1: \ 320 __put_user_asm_ex(x, ptr, "b", "b", "iq"); \ 321 break; \ 322 case 2: \ 323 __put_user_asm_ex(x, ptr, "w", "w", "ir"); \ 324 break; \ 325 case 4: \ 326 __put_user_asm_ex(x, ptr, "l", "k", "ir"); \ 327 break; \ 328 case 8: \ 329 __put_user_asm_ex_u64((__typeof__(*ptr))(x), ptr); \ 330 break; \ 331 default: \ 332 __put_user_bad(); \ 333 } \ 334 } while (0) 335 336 #ifdef CONFIG_X86_32 337 #define __get_user_asm_u64(x, ptr, retval, errret) (x) = __get_user_bad() 338 #define __get_user_asm_ex_u64(x, ptr) (x) = __get_user_bad() 339 #else 340 #define __get_user_asm_u64(x, ptr, retval, errret) \ 341 __get_user_asm(x, ptr, retval, "q", "", "=r", errret) 342 #define __get_user_asm_ex_u64(x, ptr) \ 343 __get_user_asm_ex(x, ptr, "q", "", "=r") 344 #endif 345 346 #define __get_user_size(x, ptr, size, retval, errret) \ 347 do { \ 348 retval = 0; \ 349 __chk_user_ptr(ptr); \ 350 switch (size) { \ 351 case 1: \ 352 __get_user_asm(x, ptr, retval, "b", "b", "=q", errret); \ 353 break; \ 354 case 2: \ 355 __get_user_asm(x, ptr, retval, "w", "w", "=r", errret); \ 356 break; \ 357 case 4: \ 358 __get_user_asm(x, ptr, retval, "l", "k", "=r", errret); \ 359 break; \ 360 case 8: \ 361 __get_user_asm_u64(x, ptr, retval, errret); \ 362 break; \ 363 default: \ 364 (x) = __get_user_bad(); \ 365 } \ 366 } while (0) 367 368 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \ 369 asm volatile("\n" \ 370 "1: mov"itype" %2,%"rtype"1\n" \ 371 "2:\n" \ 372 ".section .fixup,\"ax\"\n" \ 373 "3: mov %3,%0\n" \ 374 " xor"itype" %"rtype"1,%"rtype"1\n" \ 375 " jmp 2b\n" \ 376 ".previous\n" \ 377 _ASM_EXTABLE(1b, 3b) \ 378 : "=r" (err), ltype(x) \ 379 : "m" (__m(addr)), "i" (errret), "0" (err)) 380 381 /* 382 * This doesn't do __uaccess_begin/end - the exception handling 383 * around it must do that. 384 */ 385 #define __get_user_size_ex(x, ptr, size) \ 386 do { \ 387 __chk_user_ptr(ptr); \ 388 switch (size) { \ 389 case 1: \ 390 __get_user_asm_ex(x, ptr, "b", "b", "=q"); \ 391 break; \ 392 case 2: \ 393 __get_user_asm_ex(x, ptr, "w", "w", "=r"); \ 394 break; \ 395 case 4: \ 396 __get_user_asm_ex(x, ptr, "l", "k", "=r"); \ 397 break; \ 398 case 8: \ 399 __get_user_asm_ex_u64(x, ptr); \ 400 break; \ 401 default: \ 402 (x) = __get_user_bad(); \ 403 } \ 404 } while (0) 405 406 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \ 407 asm volatile("1: mov"itype" %1,%"rtype"0\n" \ 408 "2:\n" \ 409 _ASM_EXTABLE_EX(1b, 2b) \ 410 : ltype(x) : "m" (__m(addr))) 411 412 #define __put_user_nocheck(x, ptr, size) \ 413 ({ \ 414 int __pu_err; \ 415 __uaccess_begin(); \ 416 __put_user_size((x), (ptr), (size), __pu_err, -EFAULT); \ 417 __uaccess_end(); \ 418 __builtin_expect(__pu_err, 0); \ 419 }) 420 421 #define __get_user_nocheck(x, ptr, size) \ 422 ({ \ 423 int __gu_err; \ 424 unsigned long __gu_val; \ 425 __uaccess_begin(); \ 426 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \ 427 __uaccess_end(); \ 428 (x) = (__force __typeof__(*(ptr)))__gu_val; \ 429 __builtin_expect(__gu_err, 0); \ 430 }) 431 432 /* FIXME: this hack is definitely wrong -AK */ 433 struct __large_struct { unsigned long buf[100]; }; 434 #define __m(x) (*(struct __large_struct __user *)(x)) 435 436 /* 437 * Tell gcc we read from memory instead of writing: this is because 438 * we do not write to any memory gcc knows about, so there are no 439 * aliasing issues. 440 */ 441 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \ 442 asm volatile("\n" \ 443 "1: mov"itype" %"rtype"1,%2\n" \ 444 "2:\n" \ 445 ".section .fixup,\"ax\"\n" \ 446 "3: mov %3,%0\n" \ 447 " jmp 2b\n" \ 448 ".previous\n" \ 449 _ASM_EXTABLE(1b, 3b) \ 450 : "=r"(err) \ 451 : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err)) 452 453 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \ 454 asm volatile("1: mov"itype" %"rtype"0,%1\n" \ 455 "2:\n" \ 456 _ASM_EXTABLE_EX(1b, 2b) \ 457 : : ltype(x), "m" (__m(addr))) 458 459 /* 460 * uaccess_try and catch 461 */ 462 #define uaccess_try do { \ 463 current_thread_info()->uaccess_err = 0; \ 464 __uaccess_begin(); \ 465 barrier(); 466 467 #define uaccess_catch(err) \ 468 __uaccess_end(); \ 469 (err) |= (current_thread_info()->uaccess_err ? -EFAULT : 0); \ 470 } while (0) 471 472 /** 473 * __get_user: - Get a simple variable from user space, with less checking. 474 * @x: Variable to store result. 475 * @ptr: Source address, in user space. 476 * 477 * Context: User context only. This function may sleep if pagefaults are 478 * enabled. 479 * 480 * This macro copies a single simple variable from user space to kernel 481 * space. It supports simple types like char and int, but not larger 482 * data types like structures or arrays. 483 * 484 * @ptr must have pointer-to-simple-variable type, and the result of 485 * dereferencing @ptr must be assignable to @x without a cast. 486 * 487 * Caller must check the pointer with access_ok() before calling this 488 * function. 489 * 490 * Returns zero on success, or -EFAULT on error. 491 * On error, the variable @x is set to zero. 492 */ 493 494 #define __get_user(x, ptr) \ 495 __get_user_nocheck((x), (ptr), sizeof(*(ptr))) 496 497 /** 498 * __put_user: - Write a simple value into user space, with less checking. 499 * @x: Value to copy to user space. 500 * @ptr: Destination address, in user space. 501 * 502 * Context: User context only. This function may sleep if pagefaults are 503 * enabled. 504 * 505 * This macro copies a single simple value from kernel space to user 506 * space. It supports simple types like char and int, but not larger 507 * data types like structures or arrays. 508 * 509 * @ptr must have pointer-to-simple-variable type, and @x must be assignable 510 * to the result of dereferencing @ptr. 511 * 512 * Caller must check the pointer with access_ok() before calling this 513 * function. 514 * 515 * Returns zero on success, or -EFAULT on error. 516 */ 517 518 #define __put_user(x, ptr) \ 519 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) 520 521 #define __get_user_unaligned __get_user 522 #define __put_user_unaligned __put_user 523 524 /* 525 * {get|put}_user_try and catch 526 * 527 * get_user_try { 528 * get_user_ex(...); 529 * } get_user_catch(err) 530 */ 531 #define get_user_try uaccess_try 532 #define get_user_catch(err) uaccess_catch(err) 533 534 #define get_user_ex(x, ptr) do { \ 535 unsigned long __gue_val; \ 536 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \ 537 (x) = (__force __typeof__(*(ptr)))__gue_val; \ 538 } while (0) 539 540 #define put_user_try uaccess_try 541 #define put_user_catch(err) uaccess_catch(err) 542 543 #define put_user_ex(x, ptr) \ 544 __put_user_size_ex((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) 545 546 extern unsigned long 547 copy_from_user_nmi(void *to, const void __user *from, unsigned long n); 548 extern __must_check long 549 strncpy_from_user(char *dst, const char __user *src, long count); 550 551 extern __must_check long strlen_user(const char __user *str); 552 extern __must_check long strnlen_user(const char __user *str, long n); 553 554 unsigned long __must_check clear_user(void __user *mem, unsigned long len); 555 unsigned long __must_check __clear_user(void __user *mem, unsigned long len); 556 557 extern void __cmpxchg_wrong_size(void) 558 __compiletime_error("Bad argument size for cmpxchg"); 559 560 #define __user_atomic_cmpxchg_inatomic(uval, ptr, old, new, size) \ 561 ({ \ 562 int __ret = 0; \ 563 __typeof__(ptr) __uval = (uval); \ 564 __typeof__(*(ptr)) __old = (old); \ 565 __typeof__(*(ptr)) __new = (new); \ 566 __uaccess_begin(); \ 567 switch (size) { \ 568 case 1: \ 569 { \ 570 asm volatile("\n" \ 571 "1:\t" LOCK_PREFIX "cmpxchgb %4, %2\n" \ 572 "2:\n" \ 573 "\t.section .fixup, \"ax\"\n" \ 574 "3:\tmov %3, %0\n" \ 575 "\tjmp 2b\n" \ 576 "\t.previous\n" \ 577 _ASM_EXTABLE(1b, 3b) \ 578 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \ 579 : "i" (-EFAULT), "q" (__new), "1" (__old) \ 580 : "memory" \ 581 ); \ 582 break; \ 583 } \ 584 case 2: \ 585 { \ 586 asm volatile("\n" \ 587 "1:\t" LOCK_PREFIX "cmpxchgw %4, %2\n" \ 588 "2:\n" \ 589 "\t.section .fixup, \"ax\"\n" \ 590 "3:\tmov %3, %0\n" \ 591 "\tjmp 2b\n" \ 592 "\t.previous\n" \ 593 _ASM_EXTABLE(1b, 3b) \ 594 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \ 595 : "i" (-EFAULT), "r" (__new), "1" (__old) \ 596 : "memory" \ 597 ); \ 598 break; \ 599 } \ 600 case 4: \ 601 { \ 602 asm volatile("\n" \ 603 "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n" \ 604 "2:\n" \ 605 "\t.section .fixup, \"ax\"\n" \ 606 "3:\tmov %3, %0\n" \ 607 "\tjmp 2b\n" \ 608 "\t.previous\n" \ 609 _ASM_EXTABLE(1b, 3b) \ 610 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \ 611 : "i" (-EFAULT), "r" (__new), "1" (__old) \ 612 : "memory" \ 613 ); \ 614 break; \ 615 } \ 616 case 8: \ 617 { \ 618 if (!IS_ENABLED(CONFIG_X86_64)) \ 619 __cmpxchg_wrong_size(); \ 620 \ 621 asm volatile("\n" \ 622 "1:\t" LOCK_PREFIX "cmpxchgq %4, %2\n" \ 623 "2:\n" \ 624 "\t.section .fixup, \"ax\"\n" \ 625 "3:\tmov %3, %0\n" \ 626 "\tjmp 2b\n" \ 627 "\t.previous\n" \ 628 _ASM_EXTABLE(1b, 3b) \ 629 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \ 630 : "i" (-EFAULT), "r" (__new), "1" (__old) \ 631 : "memory" \ 632 ); \ 633 break; \ 634 } \ 635 default: \ 636 __cmpxchg_wrong_size(); \ 637 } \ 638 __uaccess_end(); \ 639 *__uval = __old; \ 640 __ret; \ 641 }) 642 643 #define user_atomic_cmpxchg_inatomic(uval, ptr, old, new) \ 644 ({ \ 645 access_ok(VERIFY_WRITE, (ptr), sizeof(*(ptr))) ? \ 646 __user_atomic_cmpxchg_inatomic((uval), (ptr), \ 647 (old), (new), sizeof(*(ptr))) : \ 648 -EFAULT; \ 649 }) 650 651 /* 652 * movsl can be slow when source and dest are not both 8-byte aligned 653 */ 654 #ifdef CONFIG_X86_INTEL_USERCOPY 655 extern struct movsl_mask { 656 int mask; 657 } ____cacheline_aligned_in_smp movsl_mask; 658 #endif 659 660 #define ARCH_HAS_NOCACHE_UACCESS 1 661 662 #ifdef CONFIG_X86_32 663 # include <asm/uaccess_32.h> 664 #else 665 # include <asm/uaccess_64.h> 666 #endif 667 668 unsigned long __must_check _copy_from_user(void *to, const void __user *from, 669 unsigned n); 670 unsigned long __must_check _copy_to_user(void __user *to, const void *from, 671 unsigned n); 672 673 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS 674 # define copy_user_diag __compiletime_error 675 #else 676 # define copy_user_diag __compiletime_warning 677 #endif 678 679 extern void copy_user_diag("copy_from_user() buffer size is too small") 680 copy_from_user_overflow(void); 681 extern void copy_user_diag("copy_to_user() buffer size is too small") 682 copy_to_user_overflow(void) __asm__("copy_from_user_overflow"); 683 684 #undef copy_user_diag 685 686 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS 687 688 extern void 689 __compiletime_warning("copy_from_user() buffer size is not provably correct") 690 __copy_from_user_overflow(void) __asm__("copy_from_user_overflow"); 691 #define __copy_from_user_overflow(size, count) __copy_from_user_overflow() 692 693 extern void 694 __compiletime_warning("copy_to_user() buffer size is not provably correct") 695 __copy_to_user_overflow(void) __asm__("copy_from_user_overflow"); 696 #define __copy_to_user_overflow(size, count) __copy_to_user_overflow() 697 698 #else 699 700 static inline void 701 __copy_from_user_overflow(int size, unsigned long count) 702 { 703 WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count); 704 } 705 706 #define __copy_to_user_overflow __copy_from_user_overflow 707 708 #endif 709 710 static inline unsigned long __must_check 711 copy_from_user(void *to, const void __user *from, unsigned long n) 712 { 713 int sz = __compiletime_object_size(to); 714 715 might_fault(); 716 717 /* 718 * While we would like to have the compiler do the checking for us 719 * even in the non-constant size case, any false positives there are 720 * a problem (especially when DEBUG_STRICT_USER_COPY_CHECKS, but even 721 * without - the [hopefully] dangerous looking nature of the warning 722 * would make people go look at the respecitive call sites over and 723 * over again just to find that there's no problem). 724 * 725 * And there are cases where it's just not realistic for the compiler 726 * to prove the count to be in range. For example when multiple call 727 * sites of a helper function - perhaps in different source files - 728 * all doing proper range checking, yet the helper function not doing 729 * so again. 730 * 731 * Therefore limit the compile time checking to the constant size 732 * case, and do only runtime checking for non-constant sizes. 733 */ 734 735 if (likely(sz < 0 || sz >= n)) 736 n = _copy_from_user(to, from, n); 737 else if(__builtin_constant_p(n)) 738 copy_from_user_overflow(); 739 else 740 __copy_from_user_overflow(sz, n); 741 742 return n; 743 } 744 745 static inline unsigned long __must_check 746 copy_to_user(void __user *to, const void *from, unsigned long n) 747 { 748 int sz = __compiletime_object_size(from); 749 750 might_fault(); 751 752 /* See the comment in copy_from_user() above. */ 753 if (likely(sz < 0 || sz >= n)) 754 n = _copy_to_user(to, from, n); 755 else if(__builtin_constant_p(n)) 756 copy_to_user_overflow(); 757 else 758 __copy_to_user_overflow(sz, n); 759 760 return n; 761 } 762 763 #undef __copy_from_user_overflow 764 #undef __copy_to_user_overflow 765 766 /* 767 * We rely on the nested NMI work to allow atomic faults from the NMI path; the 768 * nested NMI paths are careful to preserve CR2. 769 * 770 * Caller must use pagefault_enable/disable, or run in interrupt context, 771 * and also do a uaccess_ok() check 772 */ 773 #define __copy_from_user_nmi __copy_from_user_inatomic 774 775 /* 776 * The "unsafe" user accesses aren't really "unsafe", but the naming 777 * is a big fat warning: you have to not only do the access_ok() 778 * checking before using them, but you have to surround them with the 779 * user_access_begin/end() pair. 780 */ 781 #define user_access_begin() __uaccess_begin() 782 #define user_access_end() __uaccess_end() 783 784 #define unsafe_put_user(x, ptr) \ 785 ({ \ 786 int __pu_err; \ 787 __put_user_size((x), (ptr), sizeof(*(ptr)), __pu_err, -EFAULT); \ 788 __builtin_expect(__pu_err, 0); \ 789 }) 790 791 #define unsafe_get_user(x, ptr) \ 792 ({ \ 793 int __gu_err; \ 794 unsigned long __gu_val; \ 795 __get_user_size(__gu_val, (ptr), sizeof(*(ptr)), __gu_err, -EFAULT); \ 796 (x) = (__force __typeof__(*(ptr)))__gu_val; \ 797 __builtin_expect(__gu_err, 0); \ 798 }) 799 800 #endif /* _ASM_X86_UACCESS_H */ 801 802