1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 1996, 1997, 1998, 1999, 2000, 03, 04 by Ralf Baechle 7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc. 8 * Copyright (C) 2007 Maciej W. Rozycki 9 */ 10 #ifndef _ASM_UACCESS_H 11 #define _ASM_UACCESS_H 12 13 #include <linux/kernel.h> 14 #include <linux/errno.h> 15 #include <linux/thread_info.h> 16 17 /* 18 * The fs value determines whether argument validity checking should be 19 * performed or not. If get_fs() == USER_DS, checking is performed, with 20 * get_fs() == KERNEL_DS, checking is bypassed. 21 * 22 * For historical reasons, these macros are grossly misnamed. 23 */ 24 #ifdef CONFIG_32BIT 25 26 #define __UA_LIMIT 0x80000000UL 27 28 #define __UA_ADDR ".word" 29 #define __UA_LA "la" 30 #define __UA_ADDU "addu" 31 #define __UA_t0 "$8" 32 #define __UA_t1 "$9" 33 34 #endif /* CONFIG_32BIT */ 35 36 #ifdef CONFIG_64BIT 37 38 #define __UA_LIMIT (- TASK_SIZE) 39 40 #define __UA_ADDR ".dword" 41 #define __UA_LA "dla" 42 #define __UA_ADDU "daddu" 43 #define __UA_t0 "$12" 44 #define __UA_t1 "$13" 45 46 #endif /* CONFIG_64BIT */ 47 48 /* 49 * USER_DS is a bitmask that has the bits set that may not be set in a valid 50 * userspace address. Note that we limit 32-bit userspace to 0x7fff8000 but 51 * the arithmetic we're doing only works if the limit is a power of two, so 52 * we use 0x80000000 here on 32-bit kernels. If a process passes an invalid 53 * address in this range it's the process's problem, not ours :-) 54 */ 55 56 #define KERNEL_DS ((mm_segment_t) { 0UL }) 57 #define USER_DS ((mm_segment_t) { __UA_LIMIT }) 58 59 #define VERIFY_READ 0 60 #define VERIFY_WRITE 1 61 62 #define get_ds() (KERNEL_DS) 63 #define get_fs() (current_thread_info()->addr_limit) 64 #define set_fs(x) (current_thread_info()->addr_limit = (x)) 65 66 #define segment_eq(a, b) ((a).seg == (b).seg) 67 68 69 /* 70 * Is a address valid? This does a straighforward calculation rather 71 * than tests. 72 * 73 * Address valid if: 74 * - "addr" doesn't have any high-bits set 75 * - AND "size" doesn't have any high-bits set 76 * - AND "addr+size" doesn't have any high-bits set 77 * - OR we are in kernel mode. 78 * 79 * __ua_size() is a trick to avoid runtime checking of positive constant 80 * sizes; for those we already know at compile time that the size is ok. 81 */ 82 #define __ua_size(size) \ 83 ((__builtin_constant_p(size) && (signed long) (size) > 0) ? 0 : (size)) 84 85 /* 86 * access_ok: - Checks if a user space pointer is valid 87 * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE. Note that 88 * %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe 89 * to write to a block, it is always safe to read from it. 90 * @addr: User space pointer to start of block to check 91 * @size: Size of block to check 92 * 93 * Context: User context only. This function may sleep. 94 * 95 * Checks if a pointer to a block of memory in user space is valid. 96 * 97 * Returns true (nonzero) if the memory block may be valid, false (zero) 98 * if it is definitely invalid. 99 * 100 * Note that, depending on architecture, this function probably just 101 * checks that the pointer is in the user space range - after calling 102 * this function, memory access functions may still return -EFAULT. 103 */ 104 105 #define __access_mask get_fs().seg 106 107 #define __access_ok(addr, size, mask) \ 108 (((signed long)((mask) & ((addr) | ((addr) + (size)) | __ua_size(size)))) == 0) 109 110 #define access_ok(type, addr, size) \ 111 likely(__access_ok((unsigned long)(addr), (size), __access_mask)) 112 113 /* 114 * put_user: - Write a simple value into user space. 115 * @x: Value to copy to user space. 116 * @ptr: Destination address, in user space. 117 * 118 * Context: User context only. This function may sleep. 119 * 120 * This macro copies a single simple value from kernel space to user 121 * space. It supports simple types like char and int, but not larger 122 * data types like structures or arrays. 123 * 124 * @ptr must have pointer-to-simple-variable type, and @x must be assignable 125 * to the result of dereferencing @ptr. 126 * 127 * Returns zero on success, or -EFAULT on error. 128 */ 129 #define put_user(x,ptr) \ 130 __put_user_check((x), (ptr), sizeof(*(ptr))) 131 132 /* 133 * get_user: - Get a simple variable from user space. 134 * @x: Variable to store result. 135 * @ptr: Source address, in user space. 136 * 137 * Context: User context only. This function may sleep. 138 * 139 * This macro copies a single simple variable from user space to kernel 140 * space. It supports simple types like char and int, but not larger 141 * data types like structures or arrays. 142 * 143 * @ptr must have pointer-to-simple-variable type, and the result of 144 * dereferencing @ptr must be assignable to @x without a cast. 145 * 146 * Returns zero on success, or -EFAULT on error. 147 * On error, the variable @x is set to zero. 148 */ 149 #define get_user(x,ptr) \ 150 __get_user_check((x), (ptr), sizeof(*(ptr))) 151 152 /* 153 * __put_user: - Write a simple value into user space, with less checking. 154 * @x: Value to copy to user space. 155 * @ptr: Destination address, in user space. 156 * 157 * Context: User context only. This function may sleep. 158 * 159 * This macro copies a single simple value from kernel space to user 160 * space. It supports simple types like char and int, but not larger 161 * data types like structures or arrays. 162 * 163 * @ptr must have pointer-to-simple-variable type, and @x must be assignable 164 * to the result of dereferencing @ptr. 165 * 166 * Caller must check the pointer with access_ok() before calling this 167 * function. 168 * 169 * Returns zero on success, or -EFAULT on error. 170 */ 171 #define __put_user(x,ptr) \ 172 __put_user_nocheck((x), (ptr), sizeof(*(ptr))) 173 174 /* 175 * __get_user: - Get a simple variable from user space, with less checking. 176 * @x: Variable to store result. 177 * @ptr: Source address, in user space. 178 * 179 * Context: User context only. This function may sleep. 180 * 181 * This macro copies a single simple variable from user space to kernel 182 * space. It supports simple types like char and int, but not larger 183 * data types like structures or arrays. 184 * 185 * @ptr must have pointer-to-simple-variable type, and the result of 186 * dereferencing @ptr must be assignable to @x without a cast. 187 * 188 * Caller must check the pointer with access_ok() before calling this 189 * function. 190 * 191 * Returns zero on success, or -EFAULT on error. 192 * On error, the variable @x is set to zero. 193 */ 194 #define __get_user(x,ptr) \ 195 __get_user_nocheck((x), (ptr), sizeof(*(ptr))) 196 197 struct __large_struct { unsigned long buf[100]; }; 198 #define __m(x) (*(struct __large_struct __user *)(x)) 199 200 /* 201 * Yuck. We need two variants, one for 64bit operation and one 202 * for 32 bit mode and old iron. 203 */ 204 #ifdef CONFIG_32BIT 205 #define __GET_USER_DW(val, ptr) __get_user_asm_ll32(val, ptr) 206 #endif 207 #ifdef CONFIG_64BIT 208 #define __GET_USER_DW(val, ptr) __get_user_asm(val, "ld", ptr) 209 #endif 210 211 extern void __get_user_unknown(void); 212 213 #define __get_user_common(val, size, ptr) \ 214 do { \ 215 switch (size) { \ 216 case 1: __get_user_asm(val, "lb", ptr); break; \ 217 case 2: __get_user_asm(val, "lh", ptr); break; \ 218 case 4: __get_user_asm(val, "lw", ptr); break; \ 219 case 8: __GET_USER_DW(val, ptr); break; \ 220 default: __get_user_unknown(); break; \ 221 } \ 222 } while (0) 223 224 #define __get_user_nocheck(x, ptr, size) \ 225 ({ \ 226 int __gu_err; \ 227 \ 228 __get_user_common((x), size, ptr); \ 229 __gu_err; \ 230 }) 231 232 #define __get_user_check(x, ptr, size) \ 233 ({ \ 234 int __gu_err = -EFAULT; \ 235 const __typeof__(*(ptr)) __user * __gu_ptr = (ptr); \ 236 \ 237 if (likely(access_ok(VERIFY_READ, __gu_ptr, size))) \ 238 __get_user_common((x), size, __gu_ptr); \ 239 \ 240 __gu_err; \ 241 }) 242 243 #define __get_user_asm(val, insn, addr) \ 244 { \ 245 long __gu_tmp; \ 246 \ 247 __asm__ __volatile__( \ 248 "1: " insn " %1, %3 \n" \ 249 "2: \n" \ 250 " .section .fixup,\"ax\" \n" \ 251 "3: li %0, %4 \n" \ 252 " j 2b \n" \ 253 " .previous \n" \ 254 " .section __ex_table,\"a\" \n" \ 255 " "__UA_ADDR "\t1b, 3b \n" \ 256 " .previous \n" \ 257 : "=r" (__gu_err), "=r" (__gu_tmp) \ 258 : "0" (0), "o" (__m(addr)), "i" (-EFAULT)); \ 259 \ 260 (val) = (__typeof__(*(addr))) __gu_tmp; \ 261 } 262 263 /* 264 * Get a long long 64 using 32 bit registers. 265 */ 266 #define __get_user_asm_ll32(val, addr) \ 267 { \ 268 union { \ 269 unsigned long long l; \ 270 __typeof__(*(addr)) t; \ 271 } __gu_tmp; \ 272 \ 273 __asm__ __volatile__( \ 274 "1: lw %1, (%3) \n" \ 275 "2: lw %D1, 4(%3) \n" \ 276 "3: .section .fixup,\"ax\" \n" \ 277 "4: li %0, %4 \n" \ 278 " move %1, $0 \n" \ 279 " move %D1, $0 \n" \ 280 " j 3b \n" \ 281 " .previous \n" \ 282 " .section __ex_table,\"a\" \n" \ 283 " " __UA_ADDR " 1b, 4b \n" \ 284 " " __UA_ADDR " 2b, 4b \n" \ 285 " .previous \n" \ 286 : "=r" (__gu_err), "=&r" (__gu_tmp.l) \ 287 : "0" (0), "r" (addr), "i" (-EFAULT)); \ 288 \ 289 (val) = __gu_tmp.t; \ 290 } 291 292 /* 293 * Yuck. We need two variants, one for 64bit operation and one 294 * for 32 bit mode and old iron. 295 */ 296 #ifdef CONFIG_32BIT 297 #define __PUT_USER_DW(ptr) __put_user_asm_ll32(ptr) 298 #endif 299 #ifdef CONFIG_64BIT 300 #define __PUT_USER_DW(ptr) __put_user_asm("sd", ptr) 301 #endif 302 303 #define __put_user_nocheck(x, ptr, size) \ 304 ({ \ 305 __typeof__(*(ptr)) __pu_val; \ 306 int __pu_err = 0; \ 307 \ 308 __pu_val = (x); \ 309 switch (size) { \ 310 case 1: __put_user_asm("sb", ptr); break; \ 311 case 2: __put_user_asm("sh", ptr); break; \ 312 case 4: __put_user_asm("sw", ptr); break; \ 313 case 8: __PUT_USER_DW(ptr); break; \ 314 default: __put_user_unknown(); break; \ 315 } \ 316 __pu_err; \ 317 }) 318 319 #define __put_user_check(x, ptr, size) \ 320 ({ \ 321 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ 322 __typeof__(*(ptr)) __pu_val = (x); \ 323 int __pu_err = -EFAULT; \ 324 \ 325 if (likely(access_ok(VERIFY_WRITE, __pu_addr, size))) { \ 326 switch (size) { \ 327 case 1: __put_user_asm("sb", __pu_addr); break; \ 328 case 2: __put_user_asm("sh", __pu_addr); break; \ 329 case 4: __put_user_asm("sw", __pu_addr); break; \ 330 case 8: __PUT_USER_DW(__pu_addr); break; \ 331 default: __put_user_unknown(); break; \ 332 } \ 333 } \ 334 __pu_err; \ 335 }) 336 337 #define __put_user_asm(insn, ptr) \ 338 { \ 339 __asm__ __volatile__( \ 340 "1: " insn " %z2, %3 # __put_user_asm\n" \ 341 "2: \n" \ 342 " .section .fixup,\"ax\" \n" \ 343 "3: li %0, %4 \n" \ 344 " j 2b \n" \ 345 " .previous \n" \ 346 " .section __ex_table,\"a\" \n" \ 347 " " __UA_ADDR " 1b, 3b \n" \ 348 " .previous \n" \ 349 : "=r" (__pu_err) \ 350 : "0" (0), "Jr" (__pu_val), "o" (__m(ptr)), \ 351 "i" (-EFAULT)); \ 352 } 353 354 #define __put_user_asm_ll32(ptr) \ 355 { \ 356 __asm__ __volatile__( \ 357 "1: sw %2, (%3) # __put_user_asm_ll32 \n" \ 358 "2: sw %D2, 4(%3) \n" \ 359 "3: \n" \ 360 " .section .fixup,\"ax\" \n" \ 361 "4: li %0, %4 \n" \ 362 " j 3b \n" \ 363 " .previous \n" \ 364 " .section __ex_table,\"a\" \n" \ 365 " " __UA_ADDR " 1b, 4b \n" \ 366 " " __UA_ADDR " 2b, 4b \n" \ 367 " .previous" \ 368 : "=r" (__pu_err) \ 369 : "0" (0), "r" (__pu_val), "r" (ptr), \ 370 "i" (-EFAULT)); \ 371 } 372 373 extern void __put_user_unknown(void); 374 375 /* 376 * put_user_unaligned: - Write a simple value into user space. 377 * @x: Value to copy to user space. 378 * @ptr: Destination address, in user space. 379 * 380 * Context: User context only. This function may sleep. 381 * 382 * This macro copies a single simple value from kernel space to user 383 * space. It supports simple types like char and int, but not larger 384 * data types like structures or arrays. 385 * 386 * @ptr must have pointer-to-simple-variable type, and @x must be assignable 387 * to the result of dereferencing @ptr. 388 * 389 * Returns zero on success, or -EFAULT on error. 390 */ 391 #define put_user_unaligned(x,ptr) \ 392 __put_user_unaligned_check((x),(ptr),sizeof(*(ptr))) 393 394 /* 395 * get_user_unaligned: - Get a simple variable from user space. 396 * @x: Variable to store result. 397 * @ptr: Source address, in user space. 398 * 399 * Context: User context only. This function may sleep. 400 * 401 * This macro copies a single simple variable from user space to kernel 402 * space. It supports simple types like char and int, but not larger 403 * data types like structures or arrays. 404 * 405 * @ptr must have pointer-to-simple-variable type, and the result of 406 * dereferencing @ptr must be assignable to @x without a cast. 407 * 408 * Returns zero on success, or -EFAULT on error. 409 * On error, the variable @x is set to zero. 410 */ 411 #define get_user_unaligned(x,ptr) \ 412 __get_user_unaligned_check((x),(ptr),sizeof(*(ptr))) 413 414 /* 415 * __put_user_unaligned: - Write a simple value into user space, with less checking. 416 * @x: Value to copy to user space. 417 * @ptr: Destination address, in user space. 418 * 419 * Context: User context only. This function may sleep. 420 * 421 * This macro copies a single simple value from kernel space to user 422 * space. It supports simple types like char and int, but not larger 423 * data types like structures or arrays. 424 * 425 * @ptr must have pointer-to-simple-variable type, and @x must be assignable 426 * to the result of dereferencing @ptr. 427 * 428 * Caller must check the pointer with access_ok() before calling this 429 * function. 430 * 431 * Returns zero on success, or -EFAULT on error. 432 */ 433 #define __put_user_unaligned(x,ptr) \ 434 __put_user_unaligned_nocheck((x),(ptr),sizeof(*(ptr))) 435 436 /* 437 * __get_user_unaligned: - Get a simple variable from user space, with less checking. 438 * @x: Variable to store result. 439 * @ptr: Source address, in user space. 440 * 441 * Context: User context only. This function may sleep. 442 * 443 * This macro copies a single simple variable from user space to kernel 444 * space. It supports simple types like char and int, but not larger 445 * data types like structures or arrays. 446 * 447 * @ptr must have pointer-to-simple-variable type, and the result of 448 * dereferencing @ptr must be assignable to @x without a cast. 449 * 450 * Caller must check the pointer with access_ok() before calling this 451 * function. 452 * 453 * Returns zero on success, or -EFAULT on error. 454 * On error, the variable @x is set to zero. 455 */ 456 #define __get_user_unaligned(x,ptr) \ 457 __get_user__unalignednocheck((x),(ptr),sizeof(*(ptr))) 458 459 /* 460 * Yuck. We need two variants, one for 64bit operation and one 461 * for 32 bit mode and old iron. 462 */ 463 #ifdef CONFIG_32BIT 464 #define __GET_USER_UNALIGNED_DW(val, ptr) \ 465 __get_user_unaligned_asm_ll32(val, ptr) 466 #endif 467 #ifdef CONFIG_64BIT 468 #define __GET_USER_UNALIGNED_DW(val, ptr) \ 469 __get_user_unaligned_asm(val, "uld", ptr) 470 #endif 471 472 extern void __get_user_unaligned_unknown(void); 473 474 #define __get_user_unaligned_common(val, size, ptr) \ 475 do { \ 476 switch (size) { \ 477 case 1: __get_user_asm(val, "lb", ptr); break; \ 478 case 2: __get_user_unaligned_asm(val, "ulh", ptr); break; \ 479 case 4: __get_user_unaligned_asm(val, "ulw", ptr); break; \ 480 case 8: __GET_USER_UNALIGNED_DW(val, ptr); break; \ 481 default: __get_user_unaligned_unknown(); break; \ 482 } \ 483 } while (0) 484 485 #define __get_user_unaligned_nocheck(x,ptr,size) \ 486 ({ \ 487 int __gu_err; \ 488 \ 489 __get_user_unaligned_common((x), size, ptr); \ 490 __gu_err; \ 491 }) 492 493 #define __get_user_unaligned_check(x,ptr,size) \ 494 ({ \ 495 int __gu_err = -EFAULT; \ 496 const __typeof__(*(ptr)) __user * __gu_ptr = (ptr); \ 497 \ 498 if (likely(access_ok(VERIFY_READ, __gu_ptr, size))) \ 499 __get_user_unaligned_common((x), size, __gu_ptr); \ 500 \ 501 __gu_err; \ 502 }) 503 504 #define __get_user_unaligned_asm(val, insn, addr) \ 505 { \ 506 long __gu_tmp; \ 507 \ 508 __asm__ __volatile__( \ 509 "1: " insn " %1, %3 \n" \ 510 "2: \n" \ 511 " .section .fixup,\"ax\" \n" \ 512 "3: li %0, %4 \n" \ 513 " j 2b \n" \ 514 " .previous \n" \ 515 " .section __ex_table,\"a\" \n" \ 516 " "__UA_ADDR "\t1b, 3b \n" \ 517 " "__UA_ADDR "\t1b + 4, 3b \n" \ 518 " .previous \n" \ 519 : "=r" (__gu_err), "=r" (__gu_tmp) \ 520 : "0" (0), "o" (__m(addr)), "i" (-EFAULT)); \ 521 \ 522 (val) = (__typeof__(*(addr))) __gu_tmp; \ 523 } 524 525 /* 526 * Get a long long 64 using 32 bit registers. 527 */ 528 #define __get_user_unaligned_asm_ll32(val, addr) \ 529 { \ 530 unsigned long long __gu_tmp; \ 531 \ 532 __asm__ __volatile__( \ 533 "1: ulw %1, (%3) \n" \ 534 "2: ulw %D1, 4(%3) \n" \ 535 " move %0, $0 \n" \ 536 "3: .section .fixup,\"ax\" \n" \ 537 "4: li %0, %4 \n" \ 538 " move %1, $0 \n" \ 539 " move %D1, $0 \n" \ 540 " j 3b \n" \ 541 " .previous \n" \ 542 " .section __ex_table,\"a\" \n" \ 543 " " __UA_ADDR " 1b, 4b \n" \ 544 " " __UA_ADDR " 1b + 4, 4b \n" \ 545 " " __UA_ADDR " 2b, 4b \n" \ 546 " " __UA_ADDR " 2b + 4, 4b \n" \ 547 " .previous \n" \ 548 : "=r" (__gu_err), "=&r" (__gu_tmp) \ 549 : "0" (0), "r" (addr), "i" (-EFAULT)); \ 550 (val) = (__typeof__(*(addr))) __gu_tmp; \ 551 } 552 553 /* 554 * Yuck. We need two variants, one for 64bit operation and one 555 * for 32 bit mode and old iron. 556 */ 557 #ifdef CONFIG_32BIT 558 #define __PUT_USER_UNALIGNED_DW(ptr) __put_user_unaligned_asm_ll32(ptr) 559 #endif 560 #ifdef CONFIG_64BIT 561 #define __PUT_USER_UNALIGNED_DW(ptr) __put_user_unaligned_asm("usd", ptr) 562 #endif 563 564 #define __put_user_unaligned_nocheck(x,ptr,size) \ 565 ({ \ 566 __typeof__(*(ptr)) __pu_val; \ 567 int __pu_err = 0; \ 568 \ 569 __pu_val = (x); \ 570 switch (size) { \ 571 case 1: __put_user_asm("sb", ptr); break; \ 572 case 2: __put_user_unaligned_asm("ush", ptr); break; \ 573 case 4: __put_user_unaligned_asm("usw", ptr); break; \ 574 case 8: __PUT_USER_UNALIGNED_DW(ptr); break; \ 575 default: __put_user_unaligned_unknown(); break; \ 576 } \ 577 __pu_err; \ 578 }) 579 580 #define __put_user_unaligned_check(x,ptr,size) \ 581 ({ \ 582 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ 583 __typeof__(*(ptr)) __pu_val = (x); \ 584 int __pu_err = -EFAULT; \ 585 \ 586 if (likely(access_ok(VERIFY_WRITE, __pu_addr, size))) { \ 587 switch (size) { \ 588 case 1: __put_user_asm("sb", __pu_addr); break; \ 589 case 2: __put_user_unaligned_asm("ush", __pu_addr); break; \ 590 case 4: __put_user_unaligned_asm("usw", __pu_addr); break; \ 591 case 8: __PUT_USER_UNALGINED_DW(__pu_addr); break; \ 592 default: __put_user_unaligned_unknown(); break; \ 593 } \ 594 } \ 595 __pu_err; \ 596 }) 597 598 #define __put_user_unaligned_asm(insn, ptr) \ 599 { \ 600 __asm__ __volatile__( \ 601 "1: " insn " %z2, %3 # __put_user_unaligned_asm\n" \ 602 "2: \n" \ 603 " .section .fixup,\"ax\" \n" \ 604 "3: li %0, %4 \n" \ 605 " j 2b \n" \ 606 " .previous \n" \ 607 " .section __ex_table,\"a\" \n" \ 608 " " __UA_ADDR " 1b, 3b \n" \ 609 " .previous \n" \ 610 : "=r" (__pu_err) \ 611 : "0" (0), "Jr" (__pu_val), "o" (__m(ptr)), \ 612 "i" (-EFAULT)); \ 613 } 614 615 #define __put_user_unaligned_asm_ll32(ptr) \ 616 { \ 617 __asm__ __volatile__( \ 618 "1: sw %2, (%3) # __put_user_unaligned_asm_ll32 \n" \ 619 "2: sw %D2, 4(%3) \n" \ 620 "3: \n" \ 621 " .section .fixup,\"ax\" \n" \ 622 "4: li %0, %4 \n" \ 623 " j 3b \n" \ 624 " .previous \n" \ 625 " .section __ex_table,\"a\" \n" \ 626 " " __UA_ADDR " 1b, 4b \n" \ 627 " " __UA_ADDR " 1b + 4, 4b \n" \ 628 " " __UA_ADDR " 2b, 4b \n" \ 629 " " __UA_ADDR " 2b + 4, 4b \n" \ 630 " .previous" \ 631 : "=r" (__pu_err) \ 632 : "0" (0), "r" (__pu_val), "r" (ptr), \ 633 "i" (-EFAULT)); \ 634 } 635 636 extern void __put_user_unaligned_unknown(void); 637 638 /* 639 * We're generating jump to subroutines which will be outside the range of 640 * jump instructions 641 */ 642 #ifdef MODULE 643 #define __MODULE_JAL(destination) \ 644 ".set\tnoat\n\t" \ 645 __UA_LA "\t$1, " #destination "\n\t" \ 646 "jalr\t$1\n\t" \ 647 ".set\tat\n\t" 648 #else 649 #define __MODULE_JAL(destination) \ 650 "jal\t" #destination "\n\t" 651 #endif 652 653 #ifndef CONFIG_CPU_DADDI_WORKAROUNDS 654 #define DADDI_SCRATCH "$0" 655 #else 656 #define DADDI_SCRATCH "$3" 657 #endif 658 659 extern size_t __copy_user(void *__to, const void *__from, size_t __n); 660 661 #define __invoke_copy_to_user(to, from, n) \ 662 ({ \ 663 register void __user *__cu_to_r __asm__("$4"); \ 664 register const void *__cu_from_r __asm__("$5"); \ 665 register long __cu_len_r __asm__("$6"); \ 666 \ 667 __cu_to_r = (to); \ 668 __cu_from_r = (from); \ 669 __cu_len_r = (n); \ 670 __asm__ __volatile__( \ 671 __MODULE_JAL(__copy_user) \ 672 : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \ 673 : \ 674 : "$8", "$9", "$10", "$11", "$12", "$15", "$24", "$31", \ 675 DADDI_SCRATCH, "memory"); \ 676 __cu_len_r; \ 677 }) 678 679 /* 680 * __copy_to_user: - Copy a block of data into user space, with less checking. 681 * @to: Destination address, in user space. 682 * @from: Source address, in kernel space. 683 * @n: Number of bytes to copy. 684 * 685 * Context: User context only. This function may sleep. 686 * 687 * Copy data from kernel space to user space. Caller must check 688 * the specified block with access_ok() before calling this function. 689 * 690 * Returns number of bytes that could not be copied. 691 * On success, this will be zero. 692 */ 693 #define __copy_to_user(to, from, n) \ 694 ({ \ 695 void __user *__cu_to; \ 696 const void *__cu_from; \ 697 long __cu_len; \ 698 \ 699 might_sleep(); \ 700 __cu_to = (to); \ 701 __cu_from = (from); \ 702 __cu_len = (n); \ 703 __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, __cu_len); \ 704 __cu_len; \ 705 }) 706 707 extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n); 708 709 #define __copy_to_user_inatomic(to, from, n) \ 710 ({ \ 711 void __user *__cu_to; \ 712 const void *__cu_from; \ 713 long __cu_len; \ 714 \ 715 __cu_to = (to); \ 716 __cu_from = (from); \ 717 __cu_len = (n); \ 718 __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, __cu_len); \ 719 __cu_len; \ 720 }) 721 722 #define __copy_from_user_inatomic(to, from, n) \ 723 ({ \ 724 void *__cu_to; \ 725 const void __user *__cu_from; \ 726 long __cu_len; \ 727 \ 728 __cu_to = (to); \ 729 __cu_from = (from); \ 730 __cu_len = (n); \ 731 __cu_len = __invoke_copy_from_user_inatomic(__cu_to, __cu_from, \ 732 __cu_len); \ 733 __cu_len; \ 734 }) 735 736 /* 737 * copy_to_user: - Copy a block of data into user space. 738 * @to: Destination address, in user space. 739 * @from: Source address, in kernel space. 740 * @n: Number of bytes to copy. 741 * 742 * Context: User context only. This function may sleep. 743 * 744 * Copy data from kernel space to user space. 745 * 746 * Returns number of bytes that could not be copied. 747 * On success, this will be zero. 748 */ 749 #define copy_to_user(to, from, n) \ 750 ({ \ 751 void __user *__cu_to; \ 752 const void *__cu_from; \ 753 long __cu_len; \ 754 \ 755 might_sleep(); \ 756 __cu_to = (to); \ 757 __cu_from = (from); \ 758 __cu_len = (n); \ 759 if (access_ok(VERIFY_WRITE, __cu_to, __cu_len)) \ 760 __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, \ 761 __cu_len); \ 762 __cu_len; \ 763 }) 764 765 #define __invoke_copy_from_user(to, from, n) \ 766 ({ \ 767 register void *__cu_to_r __asm__("$4"); \ 768 register const void __user *__cu_from_r __asm__("$5"); \ 769 register long __cu_len_r __asm__("$6"); \ 770 \ 771 __cu_to_r = (to); \ 772 __cu_from_r = (from); \ 773 __cu_len_r = (n); \ 774 __asm__ __volatile__( \ 775 ".set\tnoreorder\n\t" \ 776 __MODULE_JAL(__copy_user) \ 777 ".set\tnoat\n\t" \ 778 __UA_ADDU "\t$1, %1, %2\n\t" \ 779 ".set\tat\n\t" \ 780 ".set\treorder" \ 781 : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \ 782 : \ 783 : "$8", "$9", "$10", "$11", "$12", "$15", "$24", "$31", \ 784 DADDI_SCRATCH, "memory"); \ 785 __cu_len_r; \ 786 }) 787 788 #define __invoke_copy_from_user_inatomic(to, from, n) \ 789 ({ \ 790 register void *__cu_to_r __asm__("$4"); \ 791 register const void __user *__cu_from_r __asm__("$5"); \ 792 register long __cu_len_r __asm__("$6"); \ 793 \ 794 __cu_to_r = (to); \ 795 __cu_from_r = (from); \ 796 __cu_len_r = (n); \ 797 __asm__ __volatile__( \ 798 ".set\tnoreorder\n\t" \ 799 __MODULE_JAL(__copy_user_inatomic) \ 800 ".set\tnoat\n\t" \ 801 __UA_ADDU "\t$1, %1, %2\n\t" \ 802 ".set\tat\n\t" \ 803 ".set\treorder" \ 804 : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \ 805 : \ 806 : "$8", "$9", "$10", "$11", "$12", "$15", "$24", "$31", \ 807 DADDI_SCRATCH, "memory"); \ 808 __cu_len_r; \ 809 }) 810 811 /* 812 * __copy_from_user: - Copy a block of data from user space, with less checking. 813 * @to: Destination address, in kernel space. 814 * @from: Source address, in user space. 815 * @n: Number of bytes to copy. 816 * 817 * Context: User context only. This function may sleep. 818 * 819 * Copy data from user space to kernel space. Caller must check 820 * the specified block with access_ok() before calling this function. 821 * 822 * Returns number of bytes that could not be copied. 823 * On success, this will be zero. 824 * 825 * If some data could not be copied, this function will pad the copied 826 * data to the requested size using zero bytes. 827 */ 828 #define __copy_from_user(to, from, n) \ 829 ({ \ 830 void *__cu_to; \ 831 const void __user *__cu_from; \ 832 long __cu_len; \ 833 \ 834 might_sleep(); \ 835 __cu_to = (to); \ 836 __cu_from = (from); \ 837 __cu_len = (n); \ 838 __cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \ 839 __cu_len); \ 840 __cu_len; \ 841 }) 842 843 /* 844 * copy_from_user: - Copy a block of data from user space. 845 * @to: Destination address, in kernel space. 846 * @from: Source address, in user space. 847 * @n: Number of bytes to copy. 848 * 849 * Context: User context only. This function may sleep. 850 * 851 * Copy data from user space to kernel space. 852 * 853 * Returns number of bytes that could not be copied. 854 * On success, this will be zero. 855 * 856 * If some data could not be copied, this function will pad the copied 857 * data to the requested size using zero bytes. 858 */ 859 #define copy_from_user(to, from, n) \ 860 ({ \ 861 void *__cu_to; \ 862 const void __user *__cu_from; \ 863 long __cu_len; \ 864 \ 865 might_sleep(); \ 866 __cu_to = (to); \ 867 __cu_from = (from); \ 868 __cu_len = (n); \ 869 if (access_ok(VERIFY_READ, __cu_from, __cu_len)) \ 870 __cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \ 871 __cu_len); \ 872 __cu_len; \ 873 }) 874 875 #define __copy_in_user(to, from, n) __copy_from_user(to, from, n) 876 877 #define copy_in_user(to, from, n) \ 878 ({ \ 879 void __user *__cu_to; \ 880 const void __user *__cu_from; \ 881 long __cu_len; \ 882 \ 883 might_sleep(); \ 884 __cu_to = (to); \ 885 __cu_from = (from); \ 886 __cu_len = (n); \ 887 if (likely(access_ok(VERIFY_READ, __cu_from, __cu_len) && \ 888 access_ok(VERIFY_WRITE, __cu_to, __cu_len))) \ 889 __cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \ 890 __cu_len); \ 891 __cu_len; \ 892 }) 893 894 /* 895 * __clear_user: - Zero a block of memory in user space, with less checking. 896 * @to: Destination address, in user space. 897 * @n: Number of bytes to zero. 898 * 899 * Zero a block of memory in user space. Caller must check 900 * the specified block with access_ok() before calling this function. 901 * 902 * Returns number of bytes that could not be cleared. 903 * On success, this will be zero. 904 */ 905 static inline __kernel_size_t 906 __clear_user(void __user *addr, __kernel_size_t size) 907 { 908 __kernel_size_t res; 909 910 might_sleep(); 911 __asm__ __volatile__( 912 "move\t$4, %1\n\t" 913 "move\t$5, $0\n\t" 914 "move\t$6, %2\n\t" 915 __MODULE_JAL(__bzero) 916 "move\t%0, $6" 917 : "=r" (res) 918 : "r" (addr), "r" (size) 919 : "$4", "$5", "$6", __UA_t0, __UA_t1, "$31"); 920 921 return res; 922 } 923 924 #define clear_user(addr,n) \ 925 ({ \ 926 void __user * __cl_addr = (addr); \ 927 unsigned long __cl_size = (n); \ 928 if (__cl_size && access_ok(VERIFY_WRITE, \ 929 ((unsigned long)(__cl_addr)), __cl_size)) \ 930 __cl_size = __clear_user(__cl_addr, __cl_size); \ 931 __cl_size; \ 932 }) 933 934 /* 935 * __strncpy_from_user: - Copy a NUL terminated string from userspace, with less checking. 936 * @dst: Destination address, in kernel space. This buffer must be at 937 * least @count bytes long. 938 * @src: Source address, in user space. 939 * @count: Maximum number of bytes to copy, including the trailing NUL. 940 * 941 * Copies a NUL-terminated string from userspace to kernel space. 942 * Caller must check the specified block with access_ok() before calling 943 * this function. 944 * 945 * On success, returns the length of the string (not including the trailing 946 * NUL). 947 * 948 * If access to userspace fails, returns -EFAULT (some data may have been 949 * copied). 950 * 951 * If @count is smaller than the length of the string, copies @count bytes 952 * and returns @count. 953 */ 954 static inline long 955 __strncpy_from_user(char *__to, const char __user *__from, long __len) 956 { 957 long res; 958 959 might_sleep(); 960 __asm__ __volatile__( 961 "move\t$4, %1\n\t" 962 "move\t$5, %2\n\t" 963 "move\t$6, %3\n\t" 964 __MODULE_JAL(__strncpy_from_user_nocheck_asm) 965 "move\t%0, $2" 966 : "=r" (res) 967 : "r" (__to), "r" (__from), "r" (__len) 968 : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory"); 969 970 return res; 971 } 972 973 /* 974 * strncpy_from_user: - Copy a NUL terminated string from userspace. 975 * @dst: Destination address, in kernel space. This buffer must be at 976 * least @count bytes long. 977 * @src: Source address, in user space. 978 * @count: Maximum number of bytes to copy, including the trailing NUL. 979 * 980 * Copies a NUL-terminated string from userspace to kernel space. 981 * 982 * On success, returns the length of the string (not including the trailing 983 * NUL). 984 * 985 * If access to userspace fails, returns -EFAULT (some data may have been 986 * copied). 987 * 988 * If @count is smaller than the length of the string, copies @count bytes 989 * and returns @count. 990 */ 991 static inline long 992 strncpy_from_user(char *__to, const char __user *__from, long __len) 993 { 994 long res; 995 996 might_sleep(); 997 __asm__ __volatile__( 998 "move\t$4, %1\n\t" 999 "move\t$5, %2\n\t" 1000 "move\t$6, %3\n\t" 1001 __MODULE_JAL(__strncpy_from_user_asm) 1002 "move\t%0, $2" 1003 : "=r" (res) 1004 : "r" (__to), "r" (__from), "r" (__len) 1005 : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory"); 1006 1007 return res; 1008 } 1009 1010 /* Returns: 0 if bad, string length+1 (memory size) of string if ok */ 1011 static inline long __strlen_user(const char __user *s) 1012 { 1013 long res; 1014 1015 might_sleep(); 1016 __asm__ __volatile__( 1017 "move\t$4, %1\n\t" 1018 __MODULE_JAL(__strlen_user_nocheck_asm) 1019 "move\t%0, $2" 1020 : "=r" (res) 1021 : "r" (s) 1022 : "$2", "$4", __UA_t0, "$31"); 1023 1024 return res; 1025 } 1026 1027 /* 1028 * strlen_user: - Get the size of a string in user space. 1029 * @str: The string to measure. 1030 * 1031 * Context: User context only. This function may sleep. 1032 * 1033 * Get the size of a NUL-terminated string in user space. 1034 * 1035 * Returns the size of the string INCLUDING the terminating NUL. 1036 * On exception, returns 0. 1037 * 1038 * If there is a limit on the length of a valid string, you may wish to 1039 * consider using strnlen_user() instead. 1040 */ 1041 static inline long strlen_user(const char __user *s) 1042 { 1043 long res; 1044 1045 might_sleep(); 1046 __asm__ __volatile__( 1047 "move\t$4, %1\n\t" 1048 __MODULE_JAL(__strlen_user_asm) 1049 "move\t%0, $2" 1050 : "=r" (res) 1051 : "r" (s) 1052 : "$2", "$4", __UA_t0, "$31"); 1053 1054 return res; 1055 } 1056 1057 /* Returns: 0 if bad, string length+1 (memory size) of string if ok */ 1058 static inline long __strnlen_user(const char __user *s, long n) 1059 { 1060 long res; 1061 1062 might_sleep(); 1063 __asm__ __volatile__( 1064 "move\t$4, %1\n\t" 1065 "move\t$5, %2\n\t" 1066 __MODULE_JAL(__strnlen_user_nocheck_asm) 1067 "move\t%0, $2" 1068 : "=r" (res) 1069 : "r" (s), "r" (n) 1070 : "$2", "$4", "$5", __UA_t0, "$31"); 1071 1072 return res; 1073 } 1074 1075 /* 1076 * strlen_user: - Get the size of a string in user space. 1077 * @str: The string to measure. 1078 * 1079 * Context: User context only. This function may sleep. 1080 * 1081 * Get the size of a NUL-terminated string in user space. 1082 * 1083 * Returns the size of the string INCLUDING the terminating NUL. 1084 * On exception, returns 0. 1085 * 1086 * If there is a limit on the length of a valid string, you may wish to 1087 * consider using strnlen_user() instead. 1088 */ 1089 static inline long strnlen_user(const char __user *s, long n) 1090 { 1091 long res; 1092 1093 might_sleep(); 1094 __asm__ __volatile__( 1095 "move\t$4, %1\n\t" 1096 "move\t$5, %2\n\t" 1097 __MODULE_JAL(__strnlen_user_asm) 1098 "move\t%0, $2" 1099 : "=r" (res) 1100 : "r" (s), "r" (n) 1101 : "$2", "$4", "$5", __UA_t0, "$31"); 1102 1103 return res; 1104 } 1105 1106 struct exception_table_entry 1107 { 1108 unsigned long insn; 1109 unsigned long nextinsn; 1110 }; 1111 1112 extern int fixup_exception(struct pt_regs *regs); 1113 1114 #endif /* _ASM_UACCESS_H */ 1115