1 #ifndef _ASM_X86_UACCESS_H 2 #define _ASM_X86_UACCESS_H 3 /* 4 * User space memory access functions 5 */ 6 #include <linux/errno.h> 7 #include <linux/compiler.h> 8 #include <linux/thread_info.h> 9 #include <linux/string.h> 10 #include <asm/asm.h> 11 #include <asm/page.h> 12 #include <asm/smap.h> 13 14 #define VERIFY_READ 0 15 #define VERIFY_WRITE 1 16 17 /* 18 * The fs value determines whether argument validity checking should be 19 * performed or not. If get_fs() == USER_DS, checking is performed, with 20 * get_fs() == KERNEL_DS, checking is bypassed. 21 * 22 * For historical reasons, these macros are grossly misnamed. 23 */ 24 25 #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) }) 26 27 #define KERNEL_DS MAKE_MM_SEG(-1UL) 28 #define USER_DS MAKE_MM_SEG(TASK_SIZE_MAX) 29 30 #define get_ds() (KERNEL_DS) 31 #define get_fs() (current_thread_info()->addr_limit) 32 #define set_fs(x) (current_thread_info()->addr_limit = (x)) 33 34 #define segment_eq(a, b) ((a).seg == (b).seg) 35 36 #define user_addr_max() (current_thread_info()->addr_limit.seg) 37 #define __addr_ok(addr) \ 38 ((unsigned long __force)(addr) < user_addr_max()) 39 40 /* 41 * Test whether a block of memory is a valid user space address. 42 * Returns 0 if the range is valid, nonzero otherwise. 43 */ 44 static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, unsigned long limit) 45 { 46 /* 47 * If we have used "sizeof()" for the size, 48 * we know it won't overflow the limit (but 49 * it might overflow the 'addr', so it's 50 * important to subtract the size from the 51 * limit, not add it to the address). 52 */ 53 if (__builtin_constant_p(size)) 54 return addr > limit - size; 55 56 /* Arbitrary sizes? Be careful about overflow */ 57 addr += size; 58 if (addr < size) 59 return true; 60 return addr > limit; 61 } 62 63 #define __range_not_ok(addr, size, limit) \ 64 ({ \ 65 __chk_user_ptr(addr); \ 66 __chk_range_not_ok((unsigned long __force)(addr), size, limit); \ 67 }) 68 69 /** 70 * access_ok: - Checks if a user space pointer is valid 71 * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE. Note that 72 * %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe 73 * to write to a block, it is always safe to read from it. 74 * @addr: User space pointer to start of block to check 75 * @size: Size of block to check 76 * 77 * Context: User context only. This function may sleep if pagefaults are 78 * enabled. 79 * 80 * Checks if a pointer to a block of memory in user space is valid. 81 * 82 * Returns true (nonzero) if the memory block may be valid, false (zero) 83 * if it is definitely invalid. 84 * 85 * Note that, depending on architecture, this function probably just 86 * checks that the pointer is in the user space range - after calling 87 * this function, memory access functions may still return -EFAULT. 88 */ 89 #define access_ok(type, addr, size) \ 90 likely(!__range_not_ok(addr, size, user_addr_max())) 91 92 /* 93 * The exception table consists of pairs of addresses relative to the 94 * exception table enty itself: the first is the address of an 95 * instruction that is allowed to fault, and the second is the address 96 * at which the program should continue. No registers are modified, 97 * so it is entirely up to the continuation code to figure out what to 98 * do. 99 * 100 * All the routines below use bits of fixup code that are out of line 101 * with the main instruction path. This means when everything is well, 102 * we don't even have to jump over them. Further, they do not intrude 103 * on our cache or tlb entries. 104 */ 105 106 struct exception_table_entry { 107 int insn, fixup; 108 }; 109 /* This is not the generic standard exception_table_entry format */ 110 #define ARCH_HAS_SORT_EXTABLE 111 #define ARCH_HAS_SEARCH_EXTABLE 112 113 extern int fixup_exception(struct pt_regs *regs); 114 extern int early_fixup_exception(unsigned long *ip); 115 116 /* 117 * These are the main single-value transfer routines. They automatically 118 * use the right size if we just have the right pointer type. 119 * 120 * This gets kind of ugly. We want to return _two_ values in "get_user()" 121 * and yet we don't want to do any pointers, because that is too much 122 * of a performance impact. Thus we have a few rather ugly macros here, 123 * and hide all the ugliness from the user. 124 * 125 * The "__xxx" versions of the user access functions are versions that 126 * do not verify the address space, that must have been done previously 127 * with a separate "access_ok()" call (this is used when we do multiple 128 * accesses to the same area of user memory). 129 */ 130 131 extern int __get_user_1(void); 132 extern int __get_user_2(void); 133 extern int __get_user_4(void); 134 extern int __get_user_8(void); 135 extern int __get_user_bad(void); 136 137 /* 138 * This is a type: either unsigned long, if the argument fits into 139 * that type, or otherwise unsigned long long. 140 */ 141 #define __inttype(x) \ 142 __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL)) 143 144 /** 145 * get_user: - Get a simple variable from user space. 146 * @x: Variable to store result. 147 * @ptr: Source address, in user space. 148 * 149 * Context: User context only. This function may sleep if pagefaults are 150 * enabled. 151 * 152 * This macro copies a single simple variable from user space to kernel 153 * space. It supports simple types like char and int, but not larger 154 * data types like structures or arrays. 155 * 156 * @ptr must have pointer-to-simple-variable type, and the result of 157 * dereferencing @ptr must be assignable to @x without a cast. 158 * 159 * Returns zero on success, or -EFAULT on error. 160 * On error, the variable @x is set to zero. 161 */ 162 /* 163 * Careful: we have to cast the result to the type of the pointer 164 * for sign reasons. 165 * 166 * The use of _ASM_DX as the register specifier is a bit of a 167 * simplification, as gcc only cares about it as the starting point 168 * and not size: for a 64-bit value it will use %ecx:%edx on 32 bits 169 * (%ecx being the next register in gcc's x86 register sequence), and 170 * %rdx on 64 bits. 171 * 172 * Clang/LLVM cares about the size of the register, but still wants 173 * the base register for something that ends up being a pair. 174 */ 175 #define get_user(x, ptr) \ 176 ({ \ 177 int __ret_gu; \ 178 register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX); \ 179 __chk_user_ptr(ptr); \ 180 might_fault(); \ 181 asm volatile("call __get_user_%P3" \ 182 : "=a" (__ret_gu), "=r" (__val_gu) \ 183 : "0" (ptr), "i" (sizeof(*(ptr)))); \ 184 (x) = (__force __typeof__(*(ptr))) __val_gu; \ 185 __ret_gu; \ 186 }) 187 188 #define __put_user_x(size, x, ptr, __ret_pu) \ 189 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \ 190 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx") 191 192 193 194 #ifdef CONFIG_X86_32 195 #define __put_user_asm_u64(x, addr, err, errret) \ 196 asm volatile(ASM_STAC "\n" \ 197 "1: movl %%eax,0(%2)\n" \ 198 "2: movl %%edx,4(%2)\n" \ 199 "3: " ASM_CLAC "\n" \ 200 ".section .fixup,\"ax\"\n" \ 201 "4: movl %3,%0\n" \ 202 " jmp 3b\n" \ 203 ".previous\n" \ 204 _ASM_EXTABLE(1b, 4b) \ 205 _ASM_EXTABLE(2b, 4b) \ 206 : "=r" (err) \ 207 : "A" (x), "r" (addr), "i" (errret), "0" (err)) 208 209 #define __put_user_asm_ex_u64(x, addr) \ 210 asm volatile(ASM_STAC "\n" \ 211 "1: movl %%eax,0(%1)\n" \ 212 "2: movl %%edx,4(%1)\n" \ 213 "3: " ASM_CLAC "\n" \ 214 _ASM_EXTABLE_EX(1b, 2b) \ 215 _ASM_EXTABLE_EX(2b, 3b) \ 216 : : "A" (x), "r" (addr)) 217 218 #define __put_user_x8(x, ptr, __ret_pu) \ 219 asm volatile("call __put_user_8" : "=a" (__ret_pu) \ 220 : "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx") 221 #else 222 #define __put_user_asm_u64(x, ptr, retval, errret) \ 223 __put_user_asm(x, ptr, retval, "q", "", "er", errret) 224 #define __put_user_asm_ex_u64(x, addr) \ 225 __put_user_asm_ex(x, addr, "q", "", "er") 226 #define __put_user_x8(x, ptr, __ret_pu) __put_user_x(8, x, ptr, __ret_pu) 227 #endif 228 229 extern void __put_user_bad(void); 230 231 /* 232 * Strange magic calling convention: pointer in %ecx, 233 * value in %eax(:%edx), return value in %eax. clobbers %rbx 234 */ 235 extern void __put_user_1(void); 236 extern void __put_user_2(void); 237 extern void __put_user_4(void); 238 extern void __put_user_8(void); 239 240 /** 241 * put_user: - Write a simple value into user space. 242 * @x: Value to copy to user space. 243 * @ptr: Destination address, in user space. 244 * 245 * Context: User context only. This function may sleep if pagefaults are 246 * enabled. 247 * 248 * This macro copies a single simple value from kernel space to user 249 * space. It supports simple types like char and int, but not larger 250 * data types like structures or arrays. 251 * 252 * @ptr must have pointer-to-simple-variable type, and @x must be assignable 253 * to the result of dereferencing @ptr. 254 * 255 * Returns zero on success, or -EFAULT on error. 256 */ 257 #define put_user(x, ptr) \ 258 ({ \ 259 int __ret_pu; \ 260 __typeof__(*(ptr)) __pu_val; \ 261 __chk_user_ptr(ptr); \ 262 might_fault(); \ 263 __pu_val = x; \ 264 switch (sizeof(*(ptr))) { \ 265 case 1: \ 266 __put_user_x(1, __pu_val, ptr, __ret_pu); \ 267 break; \ 268 case 2: \ 269 __put_user_x(2, __pu_val, ptr, __ret_pu); \ 270 break; \ 271 case 4: \ 272 __put_user_x(4, __pu_val, ptr, __ret_pu); \ 273 break; \ 274 case 8: \ 275 __put_user_x8(__pu_val, ptr, __ret_pu); \ 276 break; \ 277 default: \ 278 __put_user_x(X, __pu_val, ptr, __ret_pu); \ 279 break; \ 280 } \ 281 __ret_pu; \ 282 }) 283 284 #define __put_user_size(x, ptr, size, retval, errret) \ 285 do { \ 286 retval = 0; \ 287 __chk_user_ptr(ptr); \ 288 switch (size) { \ 289 case 1: \ 290 __put_user_asm(x, ptr, retval, "b", "b", "iq", errret); \ 291 break; \ 292 case 2: \ 293 __put_user_asm(x, ptr, retval, "w", "w", "ir", errret); \ 294 break; \ 295 case 4: \ 296 __put_user_asm(x, ptr, retval, "l", "k", "ir", errret); \ 297 break; \ 298 case 8: \ 299 __put_user_asm_u64((__typeof__(*ptr))(x), ptr, retval, \ 300 errret); \ 301 break; \ 302 default: \ 303 __put_user_bad(); \ 304 } \ 305 } while (0) 306 307 #define __put_user_size_ex(x, ptr, size) \ 308 do { \ 309 __chk_user_ptr(ptr); \ 310 switch (size) { \ 311 case 1: \ 312 __put_user_asm_ex(x, ptr, "b", "b", "iq"); \ 313 break; \ 314 case 2: \ 315 __put_user_asm_ex(x, ptr, "w", "w", "ir"); \ 316 break; \ 317 case 4: \ 318 __put_user_asm_ex(x, ptr, "l", "k", "ir"); \ 319 break; \ 320 case 8: \ 321 __put_user_asm_ex_u64((__typeof__(*ptr))(x), ptr); \ 322 break; \ 323 default: \ 324 __put_user_bad(); \ 325 } \ 326 } while (0) 327 328 #ifdef CONFIG_X86_32 329 #define __get_user_asm_u64(x, ptr, retval, errret) (x) = __get_user_bad() 330 #define __get_user_asm_ex_u64(x, ptr) (x) = __get_user_bad() 331 #else 332 #define __get_user_asm_u64(x, ptr, retval, errret) \ 333 __get_user_asm(x, ptr, retval, "q", "", "=r", errret) 334 #define __get_user_asm_ex_u64(x, ptr) \ 335 __get_user_asm_ex(x, ptr, "q", "", "=r") 336 #endif 337 338 #define __get_user_size(x, ptr, size, retval, errret) \ 339 do { \ 340 retval = 0; \ 341 __chk_user_ptr(ptr); \ 342 switch (size) { \ 343 case 1: \ 344 __get_user_asm(x, ptr, retval, "b", "b", "=q", errret); \ 345 break; \ 346 case 2: \ 347 __get_user_asm(x, ptr, retval, "w", "w", "=r", errret); \ 348 break; \ 349 case 4: \ 350 __get_user_asm(x, ptr, retval, "l", "k", "=r", errret); \ 351 break; \ 352 case 8: \ 353 __get_user_asm_u64(x, ptr, retval, errret); \ 354 break; \ 355 default: \ 356 (x) = __get_user_bad(); \ 357 } \ 358 } while (0) 359 360 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \ 361 asm volatile(ASM_STAC "\n" \ 362 "1: mov"itype" %2,%"rtype"1\n" \ 363 "2: " ASM_CLAC "\n" \ 364 ".section .fixup,\"ax\"\n" \ 365 "3: mov %3,%0\n" \ 366 " xor"itype" %"rtype"1,%"rtype"1\n" \ 367 " jmp 2b\n" \ 368 ".previous\n" \ 369 _ASM_EXTABLE(1b, 3b) \ 370 : "=r" (err), ltype(x) \ 371 : "m" (__m(addr)), "i" (errret), "0" (err)) 372 373 #define __get_user_size_ex(x, ptr, size) \ 374 do { \ 375 __chk_user_ptr(ptr); \ 376 switch (size) { \ 377 case 1: \ 378 __get_user_asm_ex(x, ptr, "b", "b", "=q"); \ 379 break; \ 380 case 2: \ 381 __get_user_asm_ex(x, ptr, "w", "w", "=r"); \ 382 break; \ 383 case 4: \ 384 __get_user_asm_ex(x, ptr, "l", "k", "=r"); \ 385 break; \ 386 case 8: \ 387 __get_user_asm_ex_u64(x, ptr); \ 388 break; \ 389 default: \ 390 (x) = __get_user_bad(); \ 391 } \ 392 } while (0) 393 394 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \ 395 asm volatile("1: mov"itype" %1,%"rtype"0\n" \ 396 "2:\n" \ 397 _ASM_EXTABLE_EX(1b, 2b) \ 398 : ltype(x) : "m" (__m(addr))) 399 400 #define __put_user_nocheck(x, ptr, size) \ 401 ({ \ 402 int __pu_err; \ 403 __put_user_size((x), (ptr), (size), __pu_err, -EFAULT); \ 404 __pu_err; \ 405 }) 406 407 #define __get_user_nocheck(x, ptr, size) \ 408 ({ \ 409 int __gu_err; \ 410 unsigned long __gu_val; \ 411 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \ 412 (x) = (__force __typeof__(*(ptr)))__gu_val; \ 413 __gu_err; \ 414 }) 415 416 /* FIXME: this hack is definitely wrong -AK */ 417 struct __large_struct { unsigned long buf[100]; }; 418 #define __m(x) (*(struct __large_struct __user *)(x)) 419 420 /* 421 * Tell gcc we read from memory instead of writing: this is because 422 * we do not write to any memory gcc knows about, so there are no 423 * aliasing issues. 424 */ 425 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \ 426 asm volatile(ASM_STAC "\n" \ 427 "1: mov"itype" %"rtype"1,%2\n" \ 428 "2: " ASM_CLAC "\n" \ 429 ".section .fixup,\"ax\"\n" \ 430 "3: mov %3,%0\n" \ 431 " jmp 2b\n" \ 432 ".previous\n" \ 433 _ASM_EXTABLE(1b, 3b) \ 434 : "=r"(err) \ 435 : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err)) 436 437 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \ 438 asm volatile("1: mov"itype" %"rtype"0,%1\n" \ 439 "2:\n" \ 440 _ASM_EXTABLE_EX(1b, 2b) \ 441 : : ltype(x), "m" (__m(addr))) 442 443 /* 444 * uaccess_try and catch 445 */ 446 #define uaccess_try do { \ 447 current_thread_info()->uaccess_err = 0; \ 448 stac(); \ 449 barrier(); 450 451 #define uaccess_catch(err) \ 452 clac(); \ 453 (err) |= (current_thread_info()->uaccess_err ? -EFAULT : 0); \ 454 } while (0) 455 456 /** 457 * __get_user: - Get a simple variable from user space, with less checking. 458 * @x: Variable to store result. 459 * @ptr: Source address, in user space. 460 * 461 * Context: User context only. This function may sleep if pagefaults are 462 * enabled. 463 * 464 * This macro copies a single simple variable from user space to kernel 465 * space. It supports simple types like char and int, but not larger 466 * data types like structures or arrays. 467 * 468 * @ptr must have pointer-to-simple-variable type, and the result of 469 * dereferencing @ptr must be assignable to @x without a cast. 470 * 471 * Caller must check the pointer with access_ok() before calling this 472 * function. 473 * 474 * Returns zero on success, or -EFAULT on error. 475 * On error, the variable @x is set to zero. 476 */ 477 478 #define __get_user(x, ptr) \ 479 __get_user_nocheck((x), (ptr), sizeof(*(ptr))) 480 481 /** 482 * __put_user: - Write a simple value into user space, with less checking. 483 * @x: Value to copy to user space. 484 * @ptr: Destination address, in user space. 485 * 486 * Context: User context only. This function may sleep if pagefaults are 487 * enabled. 488 * 489 * This macro copies a single simple value from kernel space to user 490 * space. It supports simple types like char and int, but not larger 491 * data types like structures or arrays. 492 * 493 * @ptr must have pointer-to-simple-variable type, and @x must be assignable 494 * to the result of dereferencing @ptr. 495 * 496 * Caller must check the pointer with access_ok() before calling this 497 * function. 498 * 499 * Returns zero on success, or -EFAULT on error. 500 */ 501 502 #define __put_user(x, ptr) \ 503 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) 504 505 #define __get_user_unaligned __get_user 506 #define __put_user_unaligned __put_user 507 508 /* 509 * {get|put}_user_try and catch 510 * 511 * get_user_try { 512 * get_user_ex(...); 513 * } get_user_catch(err) 514 */ 515 #define get_user_try uaccess_try 516 #define get_user_catch(err) uaccess_catch(err) 517 518 #define get_user_ex(x, ptr) do { \ 519 unsigned long __gue_val; \ 520 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \ 521 (x) = (__force __typeof__(*(ptr)))__gue_val; \ 522 } while (0) 523 524 #define put_user_try uaccess_try 525 #define put_user_catch(err) uaccess_catch(err) 526 527 #define put_user_ex(x, ptr) \ 528 __put_user_size_ex((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) 529 530 extern unsigned long 531 copy_from_user_nmi(void *to, const void __user *from, unsigned long n); 532 extern __must_check long 533 strncpy_from_user(char *dst, const char __user *src, long count); 534 535 extern __must_check long strlen_user(const char __user *str); 536 extern __must_check long strnlen_user(const char __user *str, long n); 537 538 unsigned long __must_check clear_user(void __user *mem, unsigned long len); 539 unsigned long __must_check __clear_user(void __user *mem, unsigned long len); 540 541 extern void __cmpxchg_wrong_size(void) 542 __compiletime_error("Bad argument size for cmpxchg"); 543 544 #define __user_atomic_cmpxchg_inatomic(uval, ptr, old, new, size) \ 545 ({ \ 546 int __ret = 0; \ 547 __typeof__(ptr) __uval = (uval); \ 548 __typeof__(*(ptr)) __old = (old); \ 549 __typeof__(*(ptr)) __new = (new); \ 550 switch (size) { \ 551 case 1: \ 552 { \ 553 asm volatile("\t" ASM_STAC "\n" \ 554 "1:\t" LOCK_PREFIX "cmpxchgb %4, %2\n" \ 555 "2:\t" ASM_CLAC "\n" \ 556 "\t.section .fixup, \"ax\"\n" \ 557 "3:\tmov %3, %0\n" \ 558 "\tjmp 2b\n" \ 559 "\t.previous\n" \ 560 _ASM_EXTABLE(1b, 3b) \ 561 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \ 562 : "i" (-EFAULT), "q" (__new), "1" (__old) \ 563 : "memory" \ 564 ); \ 565 break; \ 566 } \ 567 case 2: \ 568 { \ 569 asm volatile("\t" ASM_STAC "\n" \ 570 "1:\t" LOCK_PREFIX "cmpxchgw %4, %2\n" \ 571 "2:\t" ASM_CLAC "\n" \ 572 "\t.section .fixup, \"ax\"\n" \ 573 "3:\tmov %3, %0\n" \ 574 "\tjmp 2b\n" \ 575 "\t.previous\n" \ 576 _ASM_EXTABLE(1b, 3b) \ 577 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \ 578 : "i" (-EFAULT), "r" (__new), "1" (__old) \ 579 : "memory" \ 580 ); \ 581 break; \ 582 } \ 583 case 4: \ 584 { \ 585 asm volatile("\t" ASM_STAC "\n" \ 586 "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n" \ 587 "2:\t" ASM_CLAC "\n" \ 588 "\t.section .fixup, \"ax\"\n" \ 589 "3:\tmov %3, %0\n" \ 590 "\tjmp 2b\n" \ 591 "\t.previous\n" \ 592 _ASM_EXTABLE(1b, 3b) \ 593 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \ 594 : "i" (-EFAULT), "r" (__new), "1" (__old) \ 595 : "memory" \ 596 ); \ 597 break; \ 598 } \ 599 case 8: \ 600 { \ 601 if (!IS_ENABLED(CONFIG_X86_64)) \ 602 __cmpxchg_wrong_size(); \ 603 \ 604 asm volatile("\t" ASM_STAC "\n" \ 605 "1:\t" LOCK_PREFIX "cmpxchgq %4, %2\n" \ 606 "2:\t" ASM_CLAC "\n" \ 607 "\t.section .fixup, \"ax\"\n" \ 608 "3:\tmov %3, %0\n" \ 609 "\tjmp 2b\n" \ 610 "\t.previous\n" \ 611 _ASM_EXTABLE(1b, 3b) \ 612 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \ 613 : "i" (-EFAULT), "r" (__new), "1" (__old) \ 614 : "memory" \ 615 ); \ 616 break; \ 617 } \ 618 default: \ 619 __cmpxchg_wrong_size(); \ 620 } \ 621 *__uval = __old; \ 622 __ret; \ 623 }) 624 625 #define user_atomic_cmpxchg_inatomic(uval, ptr, old, new) \ 626 ({ \ 627 access_ok(VERIFY_WRITE, (ptr), sizeof(*(ptr))) ? \ 628 __user_atomic_cmpxchg_inatomic((uval), (ptr), \ 629 (old), (new), sizeof(*(ptr))) : \ 630 -EFAULT; \ 631 }) 632 633 /* 634 * movsl can be slow when source and dest are not both 8-byte aligned 635 */ 636 #ifdef CONFIG_X86_INTEL_USERCOPY 637 extern struct movsl_mask { 638 int mask; 639 } ____cacheline_aligned_in_smp movsl_mask; 640 #endif 641 642 #define ARCH_HAS_NOCACHE_UACCESS 1 643 644 #ifdef CONFIG_X86_32 645 # include <asm/uaccess_32.h> 646 #else 647 # include <asm/uaccess_64.h> 648 #endif 649 650 unsigned long __must_check _copy_from_user(void *to, const void __user *from, 651 unsigned n); 652 unsigned long __must_check _copy_to_user(void __user *to, const void *from, 653 unsigned n); 654 655 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS 656 # define copy_user_diag __compiletime_error 657 #else 658 # define copy_user_diag __compiletime_warning 659 #endif 660 661 extern void copy_user_diag("copy_from_user() buffer size is too small") 662 copy_from_user_overflow(void); 663 extern void copy_user_diag("copy_to_user() buffer size is too small") 664 copy_to_user_overflow(void) __asm__("copy_from_user_overflow"); 665 666 #undef copy_user_diag 667 668 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS 669 670 extern void 671 __compiletime_warning("copy_from_user() buffer size is not provably correct") 672 __copy_from_user_overflow(void) __asm__("copy_from_user_overflow"); 673 #define __copy_from_user_overflow(size, count) __copy_from_user_overflow() 674 675 extern void 676 __compiletime_warning("copy_to_user() buffer size is not provably correct") 677 __copy_to_user_overflow(void) __asm__("copy_from_user_overflow"); 678 #define __copy_to_user_overflow(size, count) __copy_to_user_overflow() 679 680 #else 681 682 static inline void 683 __copy_from_user_overflow(int size, unsigned long count) 684 { 685 WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count); 686 } 687 688 #define __copy_to_user_overflow __copy_from_user_overflow 689 690 #endif 691 692 static inline unsigned long __must_check 693 copy_from_user(void *to, const void __user *from, unsigned long n) 694 { 695 int sz = __compiletime_object_size(to); 696 697 might_fault(); 698 699 /* 700 * While we would like to have the compiler do the checking for us 701 * even in the non-constant size case, any false positives there are 702 * a problem (especially when DEBUG_STRICT_USER_COPY_CHECKS, but even 703 * without - the [hopefully] dangerous looking nature of the warning 704 * would make people go look at the respecitive call sites over and 705 * over again just to find that there's no problem). 706 * 707 * And there are cases where it's just not realistic for the compiler 708 * to prove the count to be in range. For example when multiple call 709 * sites of a helper function - perhaps in different source files - 710 * all doing proper range checking, yet the helper function not doing 711 * so again. 712 * 713 * Therefore limit the compile time checking to the constant size 714 * case, and do only runtime checking for non-constant sizes. 715 */ 716 717 if (likely(sz < 0 || sz >= n)) 718 n = _copy_from_user(to, from, n); 719 else if(__builtin_constant_p(n)) 720 copy_from_user_overflow(); 721 else 722 __copy_from_user_overflow(sz, n); 723 724 return n; 725 } 726 727 static inline unsigned long __must_check 728 copy_to_user(void __user *to, const void *from, unsigned long n) 729 { 730 int sz = __compiletime_object_size(from); 731 732 might_fault(); 733 734 /* See the comment in copy_from_user() above. */ 735 if (likely(sz < 0 || sz >= n)) 736 n = _copy_to_user(to, from, n); 737 else if(__builtin_constant_p(n)) 738 copy_to_user_overflow(); 739 else 740 __copy_to_user_overflow(sz, n); 741 742 return n; 743 } 744 745 #undef __copy_from_user_overflow 746 #undef __copy_to_user_overflow 747 748 #endif /* _ASM_X86_UACCESS_H */ 749 750