1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 1996, 1997, 1998, 1999, 2000, 03, 04 by Ralf Baechle 7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc. 8 * Copyright (C) 2007 Maciej W. Rozycki 9 * Copyright (C) 2014, Imagination Technologies Ltd. 10 */ 11 #ifndef _ASM_UACCESS_H 12 #define _ASM_UACCESS_H 13 14 #include <linux/kernel.h> 15 #include <linux/errno.h> 16 #include <linux/thread_info.h> 17 #include <asm/asm-eva.h> 18 19 /* 20 * The fs value determines whether argument validity checking should be 21 * performed or not. If get_fs() == USER_DS, checking is performed, with 22 * get_fs() == KERNEL_DS, checking is bypassed. 23 * 24 * For historical reasons, these macros are grossly misnamed. 25 */ 26 #ifdef CONFIG_32BIT 27 28 #ifdef CONFIG_KVM_GUEST 29 #define __UA_LIMIT 0x40000000UL 30 #else 31 #define __UA_LIMIT 0x80000000UL 32 #endif 33 34 #define __UA_ADDR ".word" 35 #define __UA_LA "la" 36 #define __UA_ADDU "addu" 37 #define __UA_t0 "$8" 38 #define __UA_t1 "$9" 39 40 #endif /* CONFIG_32BIT */ 41 42 #ifdef CONFIG_64BIT 43 44 extern u64 __ua_limit; 45 46 #define __UA_LIMIT __ua_limit 47 48 #define __UA_ADDR ".dword" 49 #define __UA_LA "dla" 50 #define __UA_ADDU "daddu" 51 #define __UA_t0 "$12" 52 #define __UA_t1 "$13" 53 54 #endif /* CONFIG_64BIT */ 55 56 /* 57 * USER_DS is a bitmask that has the bits set that may not be set in a valid 58 * userspace address. Note that we limit 32-bit userspace to 0x7fff8000 but 59 * the arithmetic we're doing only works if the limit is a power of two, so 60 * we use 0x80000000 here on 32-bit kernels. If a process passes an invalid 61 * address in this range it's the process's problem, not ours :-) 62 */ 63 64 #ifdef CONFIG_KVM_GUEST 65 #define KERNEL_DS ((mm_segment_t) { 0x80000000UL }) 66 #define USER_DS ((mm_segment_t) { 0xC0000000UL }) 67 #else 68 #define KERNEL_DS ((mm_segment_t) { 0UL }) 69 #define USER_DS ((mm_segment_t) { __UA_LIMIT }) 70 #endif 71 72 #define VERIFY_READ 0 73 #define VERIFY_WRITE 1 74 75 #define get_ds() (KERNEL_DS) 76 #define get_fs() (current_thread_info()->addr_limit) 77 #define set_fs(x) (current_thread_info()->addr_limit = (x)) 78 79 #define segment_eq(a, b) ((a).seg == (b).seg) 80 81 /* 82 * eva_kernel_access() - determine whether kernel memory access on an EVA system 83 * 84 * Determines whether memory accesses should be performed to kernel memory 85 * on a system using Extended Virtual Addressing (EVA). 86 * 87 * Return: true if a kernel memory access on an EVA system, else false. 88 */ 89 static inline bool eva_kernel_access(void) 90 { 91 if (!config_enabled(CONFIG_EVA)) 92 return false; 93 94 return segment_eq(get_fs(), get_ds()); 95 } 96 97 /* 98 * Is a address valid? This does a straighforward calculation rather 99 * than tests. 100 * 101 * Address valid if: 102 * - "addr" doesn't have any high-bits set 103 * - AND "size" doesn't have any high-bits set 104 * - AND "addr+size" doesn't have any high-bits set 105 * - OR we are in kernel mode. 106 * 107 * __ua_size() is a trick to avoid runtime checking of positive constant 108 * sizes; for those we already know at compile time that the size is ok. 109 */ 110 #define __ua_size(size) \ 111 ((__builtin_constant_p(size) && (signed long) (size) > 0) ? 0 : (size)) 112 113 /* 114 * access_ok: - Checks if a user space pointer is valid 115 * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE. Note that 116 * %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe 117 * to write to a block, it is always safe to read from it. 118 * @addr: User space pointer to start of block to check 119 * @size: Size of block to check 120 * 121 * Context: User context only. This function may sleep. 122 * 123 * Checks if a pointer to a block of memory in user space is valid. 124 * 125 * Returns true (nonzero) if the memory block may be valid, false (zero) 126 * if it is definitely invalid. 127 * 128 * Note that, depending on architecture, this function probably just 129 * checks that the pointer is in the user space range - after calling 130 * this function, memory access functions may still return -EFAULT. 131 */ 132 133 #define __access_mask get_fs().seg 134 135 #define __access_ok(addr, size, mask) \ 136 ({ \ 137 unsigned long __addr = (unsigned long) (addr); \ 138 unsigned long __size = size; \ 139 unsigned long __mask = mask; \ 140 unsigned long __ok; \ 141 \ 142 __chk_user_ptr(addr); \ 143 __ok = (signed long)(__mask & (__addr | (__addr + __size) | \ 144 __ua_size(__size))); \ 145 __ok == 0; \ 146 }) 147 148 #define access_ok(type, addr, size) \ 149 likely(__access_ok((addr), (size), __access_mask)) 150 151 /* 152 * put_user: - Write a simple value into user space. 153 * @x: Value to copy to user space. 154 * @ptr: Destination address, in user space. 155 * 156 * Context: User context only. This function may sleep. 157 * 158 * This macro copies a single simple value from kernel space to user 159 * space. It supports simple types like char and int, but not larger 160 * data types like structures or arrays. 161 * 162 * @ptr must have pointer-to-simple-variable type, and @x must be assignable 163 * to the result of dereferencing @ptr. 164 * 165 * Returns zero on success, or -EFAULT on error. 166 */ 167 #define put_user(x,ptr) \ 168 __put_user_check((x), (ptr), sizeof(*(ptr))) 169 170 /* 171 * get_user: - Get a simple variable from user space. 172 * @x: Variable to store result. 173 * @ptr: Source address, in user space. 174 * 175 * Context: User context only. This function may sleep. 176 * 177 * This macro copies a single simple variable from user space to kernel 178 * space. It supports simple types like char and int, but not larger 179 * data types like structures or arrays. 180 * 181 * @ptr must have pointer-to-simple-variable type, and the result of 182 * dereferencing @ptr must be assignable to @x without a cast. 183 * 184 * Returns zero on success, or -EFAULT on error. 185 * On error, the variable @x is set to zero. 186 */ 187 #define get_user(x,ptr) \ 188 __get_user_check((x), (ptr), sizeof(*(ptr))) 189 190 /* 191 * __put_user: - Write a simple value into user space, with less checking. 192 * @x: Value to copy to user space. 193 * @ptr: Destination address, in user space. 194 * 195 * Context: User context only. This function may sleep. 196 * 197 * This macro copies a single simple value from kernel space to user 198 * space. It supports simple types like char and int, but not larger 199 * data types like structures or arrays. 200 * 201 * @ptr must have pointer-to-simple-variable type, and @x must be assignable 202 * to the result of dereferencing @ptr. 203 * 204 * Caller must check the pointer with access_ok() before calling this 205 * function. 206 * 207 * Returns zero on success, or -EFAULT on error. 208 */ 209 #define __put_user(x,ptr) \ 210 __put_user_nocheck((x), (ptr), sizeof(*(ptr))) 211 212 /* 213 * __get_user: - Get a simple variable from user space, with less checking. 214 * @x: Variable to store result. 215 * @ptr: Source address, in user space. 216 * 217 * Context: User context only. This function may sleep. 218 * 219 * This macro copies a single simple variable from user space to kernel 220 * space. It supports simple types like char and int, but not larger 221 * data types like structures or arrays. 222 * 223 * @ptr must have pointer-to-simple-variable type, and the result of 224 * dereferencing @ptr must be assignable to @x without a cast. 225 * 226 * Caller must check the pointer with access_ok() before calling this 227 * function. 228 * 229 * Returns zero on success, or -EFAULT on error. 230 * On error, the variable @x is set to zero. 231 */ 232 #define __get_user(x,ptr) \ 233 __get_user_nocheck((x), (ptr), sizeof(*(ptr))) 234 235 struct __large_struct { unsigned long buf[100]; }; 236 #define __m(x) (*(struct __large_struct __user *)(x)) 237 238 /* 239 * Yuck. We need two variants, one for 64bit operation and one 240 * for 32 bit mode and old iron. 241 */ 242 #ifndef CONFIG_EVA 243 #define __get_kernel_common(val, size, ptr) __get_user_common(val, size, ptr) 244 #else 245 /* 246 * Kernel specific functions for EVA. We need to use normal load instructions 247 * to read data from kernel when operating in EVA mode. We use these macros to 248 * avoid redefining __get_user_asm for EVA. 249 */ 250 #undef _loadd 251 #undef _loadw 252 #undef _loadh 253 #undef _loadb 254 #ifdef CONFIG_32BIT 255 #define _loadd _loadw 256 #else 257 #define _loadd(reg, addr) "ld " reg ", " addr 258 #endif 259 #define _loadw(reg, addr) "lw " reg ", " addr 260 #define _loadh(reg, addr) "lh " reg ", " addr 261 #define _loadb(reg, addr) "lb " reg ", " addr 262 263 #define __get_kernel_common(val, size, ptr) \ 264 do { \ 265 switch (size) { \ 266 case 1: __get_data_asm(val, _loadb, ptr); break; \ 267 case 2: __get_data_asm(val, _loadh, ptr); break; \ 268 case 4: __get_data_asm(val, _loadw, ptr); break; \ 269 case 8: __GET_DW(val, _loadd, ptr); break; \ 270 default: __get_user_unknown(); break; \ 271 } \ 272 } while (0) 273 #endif 274 275 #ifdef CONFIG_32BIT 276 #define __GET_DW(val, insn, ptr) __get_data_asm_ll32(val, insn, ptr) 277 #endif 278 #ifdef CONFIG_64BIT 279 #define __GET_DW(val, insn, ptr) __get_data_asm(val, insn, ptr) 280 #endif 281 282 extern void __get_user_unknown(void); 283 284 #define __get_user_common(val, size, ptr) \ 285 do { \ 286 switch (size) { \ 287 case 1: __get_data_asm(val, user_lb, ptr); break; \ 288 case 2: __get_data_asm(val, user_lh, ptr); break; \ 289 case 4: __get_data_asm(val, user_lw, ptr); break; \ 290 case 8: __GET_DW(val, user_ld, ptr); break; \ 291 default: __get_user_unknown(); break; \ 292 } \ 293 } while (0) 294 295 #define __get_user_nocheck(x, ptr, size) \ 296 ({ \ 297 int __gu_err; \ 298 \ 299 if (eva_kernel_access()) { \ 300 __get_kernel_common((x), size, ptr); \ 301 } else { \ 302 __chk_user_ptr(ptr); \ 303 __get_user_common((x), size, ptr); \ 304 } \ 305 __gu_err; \ 306 }) 307 308 #define __get_user_check(x, ptr, size) \ 309 ({ \ 310 int __gu_err = -EFAULT; \ 311 const __typeof__(*(ptr)) __user * __gu_ptr = (ptr); \ 312 \ 313 might_fault(); \ 314 if (likely(access_ok(VERIFY_READ, __gu_ptr, size))) { \ 315 if (eva_kernel_access()) \ 316 __get_kernel_common((x), size, __gu_ptr); \ 317 else \ 318 __get_user_common((x), size, __gu_ptr); \ 319 } else \ 320 (x) = 0; \ 321 \ 322 __gu_err; \ 323 }) 324 325 #define __get_data_asm(val, insn, addr) \ 326 { \ 327 long __gu_tmp; \ 328 \ 329 __asm__ __volatile__( \ 330 "1: "insn("%1", "%3")" \n" \ 331 "2: \n" \ 332 " .insn \n" \ 333 " .section .fixup,\"ax\" \n" \ 334 "3: li %0, %4 \n" \ 335 " move %1, $0 \n" \ 336 " j 2b \n" \ 337 " .previous \n" \ 338 " .section __ex_table,\"a\" \n" \ 339 " "__UA_ADDR "\t1b, 3b \n" \ 340 " .previous \n" \ 341 : "=r" (__gu_err), "=r" (__gu_tmp) \ 342 : "0" (0), "o" (__m(addr)), "i" (-EFAULT)); \ 343 \ 344 (val) = (__typeof__(*(addr))) __gu_tmp; \ 345 } 346 347 /* 348 * Get a long long 64 using 32 bit registers. 349 */ 350 #define __get_data_asm_ll32(val, insn, addr) \ 351 { \ 352 union { \ 353 unsigned long long l; \ 354 __typeof__(*(addr)) t; \ 355 } __gu_tmp; \ 356 \ 357 __asm__ __volatile__( \ 358 "1: " insn("%1", "(%3)")" \n" \ 359 "2: " insn("%D1", "4(%3)")" \n" \ 360 "3: \n" \ 361 " .insn \n" \ 362 " .section .fixup,\"ax\" \n" \ 363 "4: li %0, %4 \n" \ 364 " move %1, $0 \n" \ 365 " move %D1, $0 \n" \ 366 " j 3b \n" \ 367 " .previous \n" \ 368 " .section __ex_table,\"a\" \n" \ 369 " " __UA_ADDR " 1b, 4b \n" \ 370 " " __UA_ADDR " 2b, 4b \n" \ 371 " .previous \n" \ 372 : "=r" (__gu_err), "=&r" (__gu_tmp.l) \ 373 : "0" (0), "r" (addr), "i" (-EFAULT)); \ 374 \ 375 (val) = __gu_tmp.t; \ 376 } 377 378 #ifndef CONFIG_EVA 379 #define __put_kernel_common(ptr, size) __put_user_common(ptr, size) 380 #else 381 /* 382 * Kernel specific functions for EVA. We need to use normal load instructions 383 * to read data from kernel when operating in EVA mode. We use these macros to 384 * avoid redefining __get_data_asm for EVA. 385 */ 386 #undef _stored 387 #undef _storew 388 #undef _storeh 389 #undef _storeb 390 #ifdef CONFIG_32BIT 391 #define _stored _storew 392 #else 393 #define _stored(reg, addr) "ld " reg ", " addr 394 #endif 395 396 #define _storew(reg, addr) "sw " reg ", " addr 397 #define _storeh(reg, addr) "sh " reg ", " addr 398 #define _storeb(reg, addr) "sb " reg ", " addr 399 400 #define __put_kernel_common(ptr, size) \ 401 do { \ 402 switch (size) { \ 403 case 1: __put_data_asm(_storeb, ptr); break; \ 404 case 2: __put_data_asm(_storeh, ptr); break; \ 405 case 4: __put_data_asm(_storew, ptr); break; \ 406 case 8: __PUT_DW(_stored, ptr); break; \ 407 default: __put_user_unknown(); break; \ 408 } \ 409 } while(0) 410 #endif 411 412 /* 413 * Yuck. We need two variants, one for 64bit operation and one 414 * for 32 bit mode and old iron. 415 */ 416 #ifdef CONFIG_32BIT 417 #define __PUT_DW(insn, ptr) __put_data_asm_ll32(insn, ptr) 418 #endif 419 #ifdef CONFIG_64BIT 420 #define __PUT_DW(insn, ptr) __put_data_asm(insn, ptr) 421 #endif 422 423 #define __put_user_common(ptr, size) \ 424 do { \ 425 switch (size) { \ 426 case 1: __put_data_asm(user_sb, ptr); break; \ 427 case 2: __put_data_asm(user_sh, ptr); break; \ 428 case 4: __put_data_asm(user_sw, ptr); break; \ 429 case 8: __PUT_DW(user_sd, ptr); break; \ 430 default: __put_user_unknown(); break; \ 431 } \ 432 } while (0) 433 434 #define __put_user_nocheck(x, ptr, size) \ 435 ({ \ 436 __typeof__(*(ptr)) __pu_val; \ 437 int __pu_err = 0; \ 438 \ 439 __pu_val = (x); \ 440 if (eva_kernel_access()) { \ 441 __put_kernel_common(ptr, size); \ 442 } else { \ 443 __chk_user_ptr(ptr); \ 444 __put_user_common(ptr, size); \ 445 } \ 446 __pu_err; \ 447 }) 448 449 #define __put_user_check(x, ptr, size) \ 450 ({ \ 451 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ 452 __typeof__(*(ptr)) __pu_val = (x); \ 453 int __pu_err = -EFAULT; \ 454 \ 455 might_fault(); \ 456 if (likely(access_ok(VERIFY_WRITE, __pu_addr, size))) { \ 457 if (eva_kernel_access()) \ 458 __put_kernel_common(__pu_addr, size); \ 459 else \ 460 __put_user_common(__pu_addr, size); \ 461 } \ 462 \ 463 __pu_err; \ 464 }) 465 466 #define __put_data_asm(insn, ptr) \ 467 { \ 468 __asm__ __volatile__( \ 469 "1: "insn("%z2", "%3")" # __put_data_asm \n" \ 470 "2: \n" \ 471 " .insn \n" \ 472 " .section .fixup,\"ax\" \n" \ 473 "3: li %0, %4 \n" \ 474 " j 2b \n" \ 475 " .previous \n" \ 476 " .section __ex_table,\"a\" \n" \ 477 " " __UA_ADDR " 1b, 3b \n" \ 478 " .previous \n" \ 479 : "=r" (__pu_err) \ 480 : "0" (0), "Jr" (__pu_val), "o" (__m(ptr)), \ 481 "i" (-EFAULT)); \ 482 } 483 484 #define __put_data_asm_ll32(insn, ptr) \ 485 { \ 486 __asm__ __volatile__( \ 487 "1: "insn("%2", "(%3)")" # __put_data_asm_ll32 \n" \ 488 "2: "insn("%D2", "4(%3)")" \n" \ 489 "3: \n" \ 490 " .insn \n" \ 491 " .section .fixup,\"ax\" \n" \ 492 "4: li %0, %4 \n" \ 493 " j 3b \n" \ 494 " .previous \n" \ 495 " .section __ex_table,\"a\" \n" \ 496 " " __UA_ADDR " 1b, 4b \n" \ 497 " " __UA_ADDR " 2b, 4b \n" \ 498 " .previous" \ 499 : "=r" (__pu_err) \ 500 : "0" (0), "r" (__pu_val), "r" (ptr), \ 501 "i" (-EFAULT)); \ 502 } 503 504 extern void __put_user_unknown(void); 505 506 /* 507 * ul{b,h,w} are macros and there are no equivalent macros for EVA. 508 * EVA unaligned access is handled in the ADE exception handler. 509 */ 510 #ifndef CONFIG_EVA 511 /* 512 * put_user_unaligned: - Write a simple value into user space. 513 * @x: Value to copy to user space. 514 * @ptr: Destination address, in user space. 515 * 516 * Context: User context only. This function may sleep. 517 * 518 * This macro copies a single simple value from kernel space to user 519 * space. It supports simple types like char and int, but not larger 520 * data types like structures or arrays. 521 * 522 * @ptr must have pointer-to-simple-variable type, and @x must be assignable 523 * to the result of dereferencing @ptr. 524 * 525 * Returns zero on success, or -EFAULT on error. 526 */ 527 #define put_user_unaligned(x,ptr) \ 528 __put_user_unaligned_check((x),(ptr),sizeof(*(ptr))) 529 530 /* 531 * get_user_unaligned: - Get a simple variable from user space. 532 * @x: Variable to store result. 533 * @ptr: Source address, in user space. 534 * 535 * Context: User context only. This function may sleep. 536 * 537 * This macro copies a single simple variable from user space to kernel 538 * space. It supports simple types like char and int, but not larger 539 * data types like structures or arrays. 540 * 541 * @ptr must have pointer-to-simple-variable type, and the result of 542 * dereferencing @ptr must be assignable to @x without a cast. 543 * 544 * Returns zero on success, or -EFAULT on error. 545 * On error, the variable @x is set to zero. 546 */ 547 #define get_user_unaligned(x,ptr) \ 548 __get_user_unaligned_check((x),(ptr),sizeof(*(ptr))) 549 550 /* 551 * __put_user_unaligned: - Write a simple value into user space, with less checking. 552 * @x: Value to copy to user space. 553 * @ptr: Destination address, in user space. 554 * 555 * Context: User context only. This function may sleep. 556 * 557 * This macro copies a single simple value from kernel space to user 558 * space. It supports simple types like char and int, but not larger 559 * data types like structures or arrays. 560 * 561 * @ptr must have pointer-to-simple-variable type, and @x must be assignable 562 * to the result of dereferencing @ptr. 563 * 564 * Caller must check the pointer with access_ok() before calling this 565 * function. 566 * 567 * Returns zero on success, or -EFAULT on error. 568 */ 569 #define __put_user_unaligned(x,ptr) \ 570 __put_user_unaligned_nocheck((x),(ptr),sizeof(*(ptr))) 571 572 /* 573 * __get_user_unaligned: - Get a simple variable from user space, with less checking. 574 * @x: Variable to store result. 575 * @ptr: Source address, in user space. 576 * 577 * Context: User context only. This function may sleep. 578 * 579 * This macro copies a single simple variable from user space to kernel 580 * space. It supports simple types like char and int, but not larger 581 * data types like structures or arrays. 582 * 583 * @ptr must have pointer-to-simple-variable type, and the result of 584 * dereferencing @ptr must be assignable to @x without a cast. 585 * 586 * Caller must check the pointer with access_ok() before calling this 587 * function. 588 * 589 * Returns zero on success, or -EFAULT on error. 590 * On error, the variable @x is set to zero. 591 */ 592 #define __get_user_unaligned(x,ptr) \ 593 __get_user__unalignednocheck((x),(ptr),sizeof(*(ptr))) 594 595 /* 596 * Yuck. We need two variants, one for 64bit operation and one 597 * for 32 bit mode and old iron. 598 */ 599 #ifdef CONFIG_32BIT 600 #define __GET_USER_UNALIGNED_DW(val, ptr) \ 601 __get_user_unaligned_asm_ll32(val, ptr) 602 #endif 603 #ifdef CONFIG_64BIT 604 #define __GET_USER_UNALIGNED_DW(val, ptr) \ 605 __get_user_unaligned_asm(val, "uld", ptr) 606 #endif 607 608 extern void __get_user_unaligned_unknown(void); 609 610 #define __get_user_unaligned_common(val, size, ptr) \ 611 do { \ 612 switch (size) { \ 613 case 1: __get_data_asm(val, "lb", ptr); break; \ 614 case 2: __get_user_unaligned_asm(val, "ulh", ptr); break; \ 615 case 4: __get_user_unaligned_asm(val, "ulw", ptr); break; \ 616 case 8: __GET_USER_UNALIGNED_DW(val, ptr); break; \ 617 default: __get_user_unaligned_unknown(); break; \ 618 } \ 619 } while (0) 620 621 #define __get_user_unaligned_nocheck(x,ptr,size) \ 622 ({ \ 623 int __gu_err; \ 624 \ 625 __get_user_unaligned_common((x), size, ptr); \ 626 __gu_err; \ 627 }) 628 629 #define __get_user_unaligned_check(x,ptr,size) \ 630 ({ \ 631 int __gu_err = -EFAULT; \ 632 const __typeof__(*(ptr)) __user * __gu_ptr = (ptr); \ 633 \ 634 if (likely(access_ok(VERIFY_READ, __gu_ptr, size))) \ 635 __get_user_unaligned_common((x), size, __gu_ptr); \ 636 \ 637 __gu_err; \ 638 }) 639 640 #define __get_data_unaligned_asm(val, insn, addr) \ 641 { \ 642 long __gu_tmp; \ 643 \ 644 __asm__ __volatile__( \ 645 "1: " insn " %1, %3 \n" \ 646 "2: \n" \ 647 " .insn \n" \ 648 " .section .fixup,\"ax\" \n" \ 649 "3: li %0, %4 \n" \ 650 " move %1, $0 \n" \ 651 " j 2b \n" \ 652 " .previous \n" \ 653 " .section __ex_table,\"a\" \n" \ 654 " "__UA_ADDR "\t1b, 3b \n" \ 655 " "__UA_ADDR "\t1b + 4, 3b \n" \ 656 " .previous \n" \ 657 : "=r" (__gu_err), "=r" (__gu_tmp) \ 658 : "0" (0), "o" (__m(addr)), "i" (-EFAULT)); \ 659 \ 660 (val) = (__typeof__(*(addr))) __gu_tmp; \ 661 } 662 663 /* 664 * Get a long long 64 using 32 bit registers. 665 */ 666 #define __get_user_unaligned_asm_ll32(val, addr) \ 667 { \ 668 unsigned long long __gu_tmp; \ 669 \ 670 __asm__ __volatile__( \ 671 "1: ulw %1, (%3) \n" \ 672 "2: ulw %D1, 4(%3) \n" \ 673 " move %0, $0 \n" \ 674 "3: \n" \ 675 " .insn \n" \ 676 " .section .fixup,\"ax\" \n" \ 677 "4: li %0, %4 \n" \ 678 " move %1, $0 \n" \ 679 " move %D1, $0 \n" \ 680 " j 3b \n" \ 681 " .previous \n" \ 682 " .section __ex_table,\"a\" \n" \ 683 " " __UA_ADDR " 1b, 4b \n" \ 684 " " __UA_ADDR " 1b + 4, 4b \n" \ 685 " " __UA_ADDR " 2b, 4b \n" \ 686 " " __UA_ADDR " 2b + 4, 4b \n" \ 687 " .previous \n" \ 688 : "=r" (__gu_err), "=&r" (__gu_tmp) \ 689 : "0" (0), "r" (addr), "i" (-EFAULT)); \ 690 (val) = (__typeof__(*(addr))) __gu_tmp; \ 691 } 692 693 /* 694 * Yuck. We need two variants, one for 64bit operation and one 695 * for 32 bit mode and old iron. 696 */ 697 #ifdef CONFIG_32BIT 698 #define __PUT_USER_UNALIGNED_DW(ptr) __put_user_unaligned_asm_ll32(ptr) 699 #endif 700 #ifdef CONFIG_64BIT 701 #define __PUT_USER_UNALIGNED_DW(ptr) __put_user_unaligned_asm("usd", ptr) 702 #endif 703 704 #define __put_user_unaligned_common(ptr, size) \ 705 do { \ 706 switch (size) { \ 707 case 1: __put_data_asm("sb", ptr); break; \ 708 case 2: __put_user_unaligned_asm("ush", ptr); break; \ 709 case 4: __put_user_unaligned_asm("usw", ptr); break; \ 710 case 8: __PUT_USER_UNALIGNED_DW(ptr); break; \ 711 default: __put_user_unaligned_unknown(); break; \ 712 } while (0) 713 714 #define __put_user_unaligned_nocheck(x,ptr,size) \ 715 ({ \ 716 __typeof__(*(ptr)) __pu_val; \ 717 int __pu_err = 0; \ 718 \ 719 __pu_val = (x); \ 720 __put_user_unaligned_common(ptr, size); \ 721 __pu_err; \ 722 }) 723 724 #define __put_user_unaligned_check(x,ptr,size) \ 725 ({ \ 726 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ 727 __typeof__(*(ptr)) __pu_val = (x); \ 728 int __pu_err = -EFAULT; \ 729 \ 730 if (likely(access_ok(VERIFY_WRITE, __pu_addr, size))) \ 731 __put_user_unaligned_common(__pu_addr, size); \ 732 \ 733 __pu_err; \ 734 }) 735 736 #define __put_user_unaligned_asm(insn, ptr) \ 737 { \ 738 __asm__ __volatile__( \ 739 "1: " insn " %z2, %3 # __put_user_unaligned_asm\n" \ 740 "2: \n" \ 741 " .insn \n" \ 742 " .section .fixup,\"ax\" \n" \ 743 "3: li %0, %4 \n" \ 744 " j 2b \n" \ 745 " .previous \n" \ 746 " .section __ex_table,\"a\" \n" \ 747 " " __UA_ADDR " 1b, 3b \n" \ 748 " .previous \n" \ 749 : "=r" (__pu_err) \ 750 : "0" (0), "Jr" (__pu_val), "o" (__m(ptr)), \ 751 "i" (-EFAULT)); \ 752 } 753 754 #define __put_user_unaligned_asm_ll32(ptr) \ 755 { \ 756 __asm__ __volatile__( \ 757 "1: sw %2, (%3) # __put_user_unaligned_asm_ll32 \n" \ 758 "2: sw %D2, 4(%3) \n" \ 759 "3: \n" \ 760 " .insn \n" \ 761 " .section .fixup,\"ax\" \n" \ 762 "4: li %0, %4 \n" \ 763 " j 3b \n" \ 764 " .previous \n" \ 765 " .section __ex_table,\"a\" \n" \ 766 " " __UA_ADDR " 1b, 4b \n" \ 767 " " __UA_ADDR " 1b + 4, 4b \n" \ 768 " " __UA_ADDR " 2b, 4b \n" \ 769 " " __UA_ADDR " 2b + 4, 4b \n" \ 770 " .previous" \ 771 : "=r" (__pu_err) \ 772 : "0" (0), "r" (__pu_val), "r" (ptr), \ 773 "i" (-EFAULT)); \ 774 } 775 776 extern void __put_user_unaligned_unknown(void); 777 #endif 778 779 /* 780 * We're generating jump to subroutines which will be outside the range of 781 * jump instructions 782 */ 783 #ifdef MODULE 784 #define __MODULE_JAL(destination) \ 785 ".set\tnoat\n\t" \ 786 __UA_LA "\t$1, " #destination "\n\t" \ 787 "jalr\t$1\n\t" \ 788 ".set\tat\n\t" 789 #else 790 #define __MODULE_JAL(destination) \ 791 "jal\t" #destination "\n\t" 792 #endif 793 794 #if defined(CONFIG_CPU_DADDI_WORKAROUNDS) || (defined(CONFIG_EVA) && \ 795 defined(CONFIG_CPU_HAS_PREFETCH)) 796 #define DADDI_SCRATCH "$3" 797 #else 798 #define DADDI_SCRATCH "$0" 799 #endif 800 801 extern size_t __copy_user(void *__to, const void *__from, size_t __n); 802 803 #ifndef CONFIG_EVA 804 #define __invoke_copy_to_user(to, from, n) \ 805 ({ \ 806 register void __user *__cu_to_r __asm__("$4"); \ 807 register const void *__cu_from_r __asm__("$5"); \ 808 register long __cu_len_r __asm__("$6"); \ 809 \ 810 __cu_to_r = (to); \ 811 __cu_from_r = (from); \ 812 __cu_len_r = (n); \ 813 __asm__ __volatile__( \ 814 __MODULE_JAL(__copy_user) \ 815 : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \ 816 : \ 817 : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \ 818 DADDI_SCRATCH, "memory"); \ 819 __cu_len_r; \ 820 }) 821 822 #define __invoke_copy_to_kernel(to, from, n) \ 823 __invoke_copy_to_user(to, from, n) 824 825 #endif 826 827 /* 828 * __copy_to_user: - Copy a block of data into user space, with less checking. 829 * @to: Destination address, in user space. 830 * @from: Source address, in kernel space. 831 * @n: Number of bytes to copy. 832 * 833 * Context: User context only. This function may sleep. 834 * 835 * Copy data from kernel space to user space. Caller must check 836 * the specified block with access_ok() before calling this function. 837 * 838 * Returns number of bytes that could not be copied. 839 * On success, this will be zero. 840 */ 841 #define __copy_to_user(to, from, n) \ 842 ({ \ 843 void __user *__cu_to; \ 844 const void *__cu_from; \ 845 long __cu_len; \ 846 \ 847 __cu_to = (to); \ 848 __cu_from = (from); \ 849 __cu_len = (n); \ 850 might_fault(); \ 851 if (eva_kernel_access()) \ 852 __cu_len = __invoke_copy_to_kernel(__cu_to, __cu_from, \ 853 __cu_len); \ 854 else \ 855 __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, \ 856 __cu_len); \ 857 __cu_len; \ 858 }) 859 860 extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n); 861 862 #define __copy_to_user_inatomic(to, from, n) \ 863 ({ \ 864 void __user *__cu_to; \ 865 const void *__cu_from; \ 866 long __cu_len; \ 867 \ 868 __cu_to = (to); \ 869 __cu_from = (from); \ 870 __cu_len = (n); \ 871 if (eva_kernel_access()) \ 872 __cu_len = __invoke_copy_to_kernel(__cu_to, __cu_from, \ 873 __cu_len); \ 874 else \ 875 __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, \ 876 __cu_len); \ 877 __cu_len; \ 878 }) 879 880 #define __copy_from_user_inatomic(to, from, n) \ 881 ({ \ 882 void *__cu_to; \ 883 const void __user *__cu_from; \ 884 long __cu_len; \ 885 \ 886 __cu_to = (to); \ 887 __cu_from = (from); \ 888 __cu_len = (n); \ 889 if (eva_kernel_access()) \ 890 __cu_len = __invoke_copy_from_kernel_inatomic(__cu_to, \ 891 __cu_from,\ 892 __cu_len);\ 893 else \ 894 __cu_len = __invoke_copy_from_user_inatomic(__cu_to, \ 895 __cu_from, \ 896 __cu_len); \ 897 __cu_len; \ 898 }) 899 900 /* 901 * copy_to_user: - Copy a block of data into user space. 902 * @to: Destination address, in user space. 903 * @from: Source address, in kernel space. 904 * @n: Number of bytes to copy. 905 * 906 * Context: User context only. This function may sleep. 907 * 908 * Copy data from kernel space to user space. 909 * 910 * Returns number of bytes that could not be copied. 911 * On success, this will be zero. 912 */ 913 #define copy_to_user(to, from, n) \ 914 ({ \ 915 void __user *__cu_to; \ 916 const void *__cu_from; \ 917 long __cu_len; \ 918 \ 919 __cu_to = (to); \ 920 __cu_from = (from); \ 921 __cu_len = (n); \ 922 if (eva_kernel_access()) { \ 923 __cu_len = __invoke_copy_to_kernel(__cu_to, \ 924 __cu_from, \ 925 __cu_len); \ 926 } else { \ 927 if (access_ok(VERIFY_WRITE, __cu_to, __cu_len)) { \ 928 might_fault(); \ 929 __cu_len = __invoke_copy_to_user(__cu_to, \ 930 __cu_from, \ 931 __cu_len); \ 932 } \ 933 } \ 934 __cu_len; \ 935 }) 936 937 #ifndef CONFIG_EVA 938 939 #define __invoke_copy_from_user(to, from, n) \ 940 ({ \ 941 register void *__cu_to_r __asm__("$4"); \ 942 register const void __user *__cu_from_r __asm__("$5"); \ 943 register long __cu_len_r __asm__("$6"); \ 944 \ 945 __cu_to_r = (to); \ 946 __cu_from_r = (from); \ 947 __cu_len_r = (n); \ 948 __asm__ __volatile__( \ 949 ".set\tnoreorder\n\t" \ 950 __MODULE_JAL(__copy_user) \ 951 ".set\tnoat\n\t" \ 952 __UA_ADDU "\t$1, %1, %2\n\t" \ 953 ".set\tat\n\t" \ 954 ".set\treorder" \ 955 : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \ 956 : \ 957 : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \ 958 DADDI_SCRATCH, "memory"); \ 959 __cu_len_r; \ 960 }) 961 962 #define __invoke_copy_from_kernel(to, from, n) \ 963 __invoke_copy_from_user(to, from, n) 964 965 /* For userland <-> userland operations */ 966 #define ___invoke_copy_in_user(to, from, n) \ 967 __invoke_copy_from_user(to, from, n) 968 969 /* For kernel <-> kernel operations */ 970 #define ___invoke_copy_in_kernel(to, from, n) \ 971 __invoke_copy_from_user(to, from, n) 972 973 #define __invoke_copy_from_user_inatomic(to, from, n) \ 974 ({ \ 975 register void *__cu_to_r __asm__("$4"); \ 976 register const void __user *__cu_from_r __asm__("$5"); \ 977 register long __cu_len_r __asm__("$6"); \ 978 \ 979 __cu_to_r = (to); \ 980 __cu_from_r = (from); \ 981 __cu_len_r = (n); \ 982 __asm__ __volatile__( \ 983 ".set\tnoreorder\n\t" \ 984 __MODULE_JAL(__copy_user_inatomic) \ 985 ".set\tnoat\n\t" \ 986 __UA_ADDU "\t$1, %1, %2\n\t" \ 987 ".set\tat\n\t" \ 988 ".set\treorder" \ 989 : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \ 990 : \ 991 : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \ 992 DADDI_SCRATCH, "memory"); \ 993 __cu_len_r; \ 994 }) 995 996 #define __invoke_copy_from_kernel_inatomic(to, from, n) \ 997 __invoke_copy_from_user_inatomic(to, from, n) \ 998 999 #else 1000 1001 /* EVA specific functions */ 1002 1003 extern size_t __copy_user_inatomic_eva(void *__to, const void *__from, 1004 size_t __n); 1005 extern size_t __copy_from_user_eva(void *__to, const void *__from, 1006 size_t __n); 1007 extern size_t __copy_to_user_eva(void *__to, const void *__from, 1008 size_t __n); 1009 extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n); 1010 1011 #define __invoke_copy_from_user_eva_generic(to, from, n, func_ptr) \ 1012 ({ \ 1013 register void *__cu_to_r __asm__("$4"); \ 1014 register const void __user *__cu_from_r __asm__("$5"); \ 1015 register long __cu_len_r __asm__("$6"); \ 1016 \ 1017 __cu_to_r = (to); \ 1018 __cu_from_r = (from); \ 1019 __cu_len_r = (n); \ 1020 __asm__ __volatile__( \ 1021 ".set\tnoreorder\n\t" \ 1022 __MODULE_JAL(func_ptr) \ 1023 ".set\tnoat\n\t" \ 1024 __UA_ADDU "\t$1, %1, %2\n\t" \ 1025 ".set\tat\n\t" \ 1026 ".set\treorder" \ 1027 : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \ 1028 : \ 1029 : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \ 1030 DADDI_SCRATCH, "memory"); \ 1031 __cu_len_r; \ 1032 }) 1033 1034 #define __invoke_copy_to_user_eva_generic(to, from, n, func_ptr) \ 1035 ({ \ 1036 register void *__cu_to_r __asm__("$4"); \ 1037 register const void __user *__cu_from_r __asm__("$5"); \ 1038 register long __cu_len_r __asm__("$6"); \ 1039 \ 1040 __cu_to_r = (to); \ 1041 __cu_from_r = (from); \ 1042 __cu_len_r = (n); \ 1043 __asm__ __volatile__( \ 1044 __MODULE_JAL(func_ptr) \ 1045 : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \ 1046 : \ 1047 : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \ 1048 DADDI_SCRATCH, "memory"); \ 1049 __cu_len_r; \ 1050 }) 1051 1052 /* 1053 * Source or destination address is in userland. We need to go through 1054 * the TLB 1055 */ 1056 #define __invoke_copy_from_user(to, from, n) \ 1057 __invoke_copy_from_user_eva_generic(to, from, n, __copy_from_user_eva) 1058 1059 #define __invoke_copy_from_user_inatomic(to, from, n) \ 1060 __invoke_copy_from_user_eva_generic(to, from, n, \ 1061 __copy_user_inatomic_eva) 1062 1063 #define __invoke_copy_to_user(to, from, n) \ 1064 __invoke_copy_to_user_eva_generic(to, from, n, __copy_to_user_eva) 1065 1066 #define ___invoke_copy_in_user(to, from, n) \ 1067 __invoke_copy_from_user_eva_generic(to, from, n, __copy_in_user_eva) 1068 1069 /* 1070 * Source or destination address in the kernel. We are not going through 1071 * the TLB 1072 */ 1073 #define __invoke_copy_from_kernel(to, from, n) \ 1074 __invoke_copy_from_user_eva_generic(to, from, n, __copy_user) 1075 1076 #define __invoke_copy_from_kernel_inatomic(to, from, n) \ 1077 __invoke_copy_from_user_eva_generic(to, from, n, __copy_user_inatomic) 1078 1079 #define __invoke_copy_to_kernel(to, from, n) \ 1080 __invoke_copy_to_user_eva_generic(to, from, n, __copy_user) 1081 1082 #define ___invoke_copy_in_kernel(to, from, n) \ 1083 __invoke_copy_from_user_eva_generic(to, from, n, __copy_user) 1084 1085 #endif /* CONFIG_EVA */ 1086 1087 /* 1088 * __copy_from_user: - Copy a block of data from user space, with less checking. 1089 * @to: Destination address, in kernel space. 1090 * @from: Source address, in user space. 1091 * @n: Number of bytes to copy. 1092 * 1093 * Context: User context only. This function may sleep. 1094 * 1095 * Copy data from user space to kernel space. Caller must check 1096 * the specified block with access_ok() before calling this function. 1097 * 1098 * Returns number of bytes that could not be copied. 1099 * On success, this will be zero. 1100 * 1101 * If some data could not be copied, this function will pad the copied 1102 * data to the requested size using zero bytes. 1103 */ 1104 #define __copy_from_user(to, from, n) \ 1105 ({ \ 1106 void *__cu_to; \ 1107 const void __user *__cu_from; \ 1108 long __cu_len; \ 1109 \ 1110 __cu_to = (to); \ 1111 __cu_from = (from); \ 1112 __cu_len = (n); \ 1113 might_fault(); \ 1114 __cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \ 1115 __cu_len); \ 1116 __cu_len; \ 1117 }) 1118 1119 /* 1120 * copy_from_user: - Copy a block of data from user space. 1121 * @to: Destination address, in kernel space. 1122 * @from: Source address, in user space. 1123 * @n: Number of bytes to copy. 1124 * 1125 * Context: User context only. This function may sleep. 1126 * 1127 * Copy data from user space to kernel space. 1128 * 1129 * Returns number of bytes that could not be copied. 1130 * On success, this will be zero. 1131 * 1132 * If some data could not be copied, this function will pad the copied 1133 * data to the requested size using zero bytes. 1134 */ 1135 #define copy_from_user(to, from, n) \ 1136 ({ \ 1137 void *__cu_to; \ 1138 const void __user *__cu_from; \ 1139 long __cu_len; \ 1140 \ 1141 __cu_to = (to); \ 1142 __cu_from = (from); \ 1143 __cu_len = (n); \ 1144 if (eva_kernel_access()) { \ 1145 __cu_len = __invoke_copy_from_kernel(__cu_to, \ 1146 __cu_from, \ 1147 __cu_len); \ 1148 } else { \ 1149 if (access_ok(VERIFY_READ, __cu_from, __cu_len)) { \ 1150 might_fault(); \ 1151 __cu_len = __invoke_copy_from_user(__cu_to, \ 1152 __cu_from, \ 1153 __cu_len); \ 1154 } \ 1155 } \ 1156 __cu_len; \ 1157 }) 1158 1159 #define __copy_in_user(to, from, n) \ 1160 ({ \ 1161 void __user *__cu_to; \ 1162 const void __user *__cu_from; \ 1163 long __cu_len; \ 1164 \ 1165 __cu_to = (to); \ 1166 __cu_from = (from); \ 1167 __cu_len = (n); \ 1168 if (eva_kernel_access()) { \ 1169 __cu_len = ___invoke_copy_in_kernel(__cu_to, __cu_from, \ 1170 __cu_len); \ 1171 } else { \ 1172 might_fault(); \ 1173 __cu_len = ___invoke_copy_in_user(__cu_to, __cu_from, \ 1174 __cu_len); \ 1175 } \ 1176 __cu_len; \ 1177 }) 1178 1179 #define copy_in_user(to, from, n) \ 1180 ({ \ 1181 void __user *__cu_to; \ 1182 const void __user *__cu_from; \ 1183 long __cu_len; \ 1184 \ 1185 __cu_to = (to); \ 1186 __cu_from = (from); \ 1187 __cu_len = (n); \ 1188 if (eva_kernel_access()) { \ 1189 __cu_len = ___invoke_copy_in_kernel(__cu_to,__cu_from, \ 1190 __cu_len); \ 1191 } else { \ 1192 if (likely(access_ok(VERIFY_READ, __cu_from, __cu_len) &&\ 1193 access_ok(VERIFY_WRITE, __cu_to, __cu_len))) {\ 1194 might_fault(); \ 1195 __cu_len = ___invoke_copy_in_user(__cu_to, \ 1196 __cu_from, \ 1197 __cu_len); \ 1198 } \ 1199 } \ 1200 __cu_len; \ 1201 }) 1202 1203 /* 1204 * __clear_user: - Zero a block of memory in user space, with less checking. 1205 * @to: Destination address, in user space. 1206 * @n: Number of bytes to zero. 1207 * 1208 * Zero a block of memory in user space. Caller must check 1209 * the specified block with access_ok() before calling this function. 1210 * 1211 * Returns number of bytes that could not be cleared. 1212 * On success, this will be zero. 1213 */ 1214 static inline __kernel_size_t 1215 __clear_user(void __user *addr, __kernel_size_t size) 1216 { 1217 __kernel_size_t res; 1218 1219 might_fault(); 1220 __asm__ __volatile__( 1221 "move\t$4, %1\n\t" 1222 "move\t$5, $0\n\t" 1223 "move\t$6, %2\n\t" 1224 __MODULE_JAL(__bzero) 1225 "move\t%0, $6" 1226 : "=r" (res) 1227 : "r" (addr), "r" (size) 1228 : "$4", "$5", "$6", __UA_t0, __UA_t1, "$31"); 1229 1230 return res; 1231 } 1232 1233 #define clear_user(addr,n) \ 1234 ({ \ 1235 void __user * __cl_addr = (addr); \ 1236 unsigned long __cl_size = (n); \ 1237 if (__cl_size && access_ok(VERIFY_WRITE, \ 1238 __cl_addr, __cl_size)) \ 1239 __cl_size = __clear_user(__cl_addr, __cl_size); \ 1240 __cl_size; \ 1241 }) 1242 1243 /* 1244 * __strncpy_from_user: - Copy a NUL terminated string from userspace, with less checking. 1245 * @dst: Destination address, in kernel space. This buffer must be at 1246 * least @count bytes long. 1247 * @src: Source address, in user space. 1248 * @count: Maximum number of bytes to copy, including the trailing NUL. 1249 * 1250 * Copies a NUL-terminated string from userspace to kernel space. 1251 * Caller must check the specified block with access_ok() before calling 1252 * this function. 1253 * 1254 * On success, returns the length of the string (not including the trailing 1255 * NUL). 1256 * 1257 * If access to userspace fails, returns -EFAULT (some data may have been 1258 * copied). 1259 * 1260 * If @count is smaller than the length of the string, copies @count bytes 1261 * and returns @count. 1262 */ 1263 static inline long 1264 __strncpy_from_user(char *__to, const char __user *__from, long __len) 1265 { 1266 long res; 1267 1268 if (eva_kernel_access()) { 1269 __asm__ __volatile__( 1270 "move\t$4, %1\n\t" 1271 "move\t$5, %2\n\t" 1272 "move\t$6, %3\n\t" 1273 __MODULE_JAL(__strncpy_from_kernel_nocheck_asm) 1274 "move\t%0, $2" 1275 : "=r" (res) 1276 : "r" (__to), "r" (__from), "r" (__len) 1277 : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory"); 1278 } else { 1279 might_fault(); 1280 __asm__ __volatile__( 1281 "move\t$4, %1\n\t" 1282 "move\t$5, %2\n\t" 1283 "move\t$6, %3\n\t" 1284 __MODULE_JAL(__strncpy_from_user_nocheck_asm) 1285 "move\t%0, $2" 1286 : "=r" (res) 1287 : "r" (__to), "r" (__from), "r" (__len) 1288 : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory"); 1289 } 1290 1291 return res; 1292 } 1293 1294 /* 1295 * strncpy_from_user: - Copy a NUL terminated string from userspace. 1296 * @dst: Destination address, in kernel space. This buffer must be at 1297 * least @count bytes long. 1298 * @src: Source address, in user space. 1299 * @count: Maximum number of bytes to copy, including the trailing NUL. 1300 * 1301 * Copies a NUL-terminated string from userspace to kernel space. 1302 * 1303 * On success, returns the length of the string (not including the trailing 1304 * NUL). 1305 * 1306 * If access to userspace fails, returns -EFAULT (some data may have been 1307 * copied). 1308 * 1309 * If @count is smaller than the length of the string, copies @count bytes 1310 * and returns @count. 1311 */ 1312 static inline long 1313 strncpy_from_user(char *__to, const char __user *__from, long __len) 1314 { 1315 long res; 1316 1317 if (eva_kernel_access()) { 1318 __asm__ __volatile__( 1319 "move\t$4, %1\n\t" 1320 "move\t$5, %2\n\t" 1321 "move\t$6, %3\n\t" 1322 __MODULE_JAL(__strncpy_from_kernel_asm) 1323 "move\t%0, $2" 1324 : "=r" (res) 1325 : "r" (__to), "r" (__from), "r" (__len) 1326 : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory"); 1327 } else { 1328 might_fault(); 1329 __asm__ __volatile__( 1330 "move\t$4, %1\n\t" 1331 "move\t$5, %2\n\t" 1332 "move\t$6, %3\n\t" 1333 __MODULE_JAL(__strncpy_from_user_asm) 1334 "move\t%0, $2" 1335 : "=r" (res) 1336 : "r" (__to), "r" (__from), "r" (__len) 1337 : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory"); 1338 } 1339 1340 return res; 1341 } 1342 1343 /* 1344 * strlen_user: - Get the size of a string in user space. 1345 * @str: The string to measure. 1346 * 1347 * Context: User context only. This function may sleep. 1348 * 1349 * Get the size of a NUL-terminated string in user space. 1350 * 1351 * Returns the size of the string INCLUDING the terminating NUL. 1352 * On exception, returns 0. 1353 * 1354 * If there is a limit on the length of a valid string, you may wish to 1355 * consider using strnlen_user() instead. 1356 */ 1357 static inline long strlen_user(const char __user *s) 1358 { 1359 long res; 1360 1361 if (eva_kernel_access()) { 1362 __asm__ __volatile__( 1363 "move\t$4, %1\n\t" 1364 __MODULE_JAL(__strlen_kernel_asm) 1365 "move\t%0, $2" 1366 : "=r" (res) 1367 : "r" (s) 1368 : "$2", "$4", __UA_t0, "$31"); 1369 } else { 1370 might_fault(); 1371 __asm__ __volatile__( 1372 "move\t$4, %1\n\t" 1373 __MODULE_JAL(__strlen_kernel_asm) 1374 "move\t%0, $2" 1375 : "=r" (res) 1376 : "r" (s) 1377 : "$2", "$4", __UA_t0, "$31"); 1378 } 1379 1380 return res; 1381 } 1382 1383 /* Returns: 0 if bad, string length+1 (memory size) of string if ok */ 1384 static inline long __strnlen_user(const char __user *s, long n) 1385 { 1386 long res; 1387 1388 if (eva_kernel_access()) { 1389 __asm__ __volatile__( 1390 "move\t$4, %1\n\t" 1391 "move\t$5, %2\n\t" 1392 __MODULE_JAL(__strnlen_kernel_nocheck_asm) 1393 "move\t%0, $2" 1394 : "=r" (res) 1395 : "r" (s), "r" (n) 1396 : "$2", "$4", "$5", __UA_t0, "$31"); 1397 } else { 1398 might_fault(); 1399 __asm__ __volatile__( 1400 "move\t$4, %1\n\t" 1401 "move\t$5, %2\n\t" 1402 __MODULE_JAL(__strnlen_user_nocheck_asm) 1403 "move\t%0, $2" 1404 : "=r" (res) 1405 : "r" (s), "r" (n) 1406 : "$2", "$4", "$5", __UA_t0, "$31"); 1407 } 1408 1409 return res; 1410 } 1411 1412 /* 1413 * strnlen_user: - Get the size of a string in user space. 1414 * @str: The string to measure. 1415 * 1416 * Context: User context only. This function may sleep. 1417 * 1418 * Get the size of a NUL-terminated string in user space. 1419 * 1420 * Returns the size of the string INCLUDING the terminating NUL. 1421 * On exception, returns 0. 1422 * If the string is too long, returns a value greater than @n. 1423 */ 1424 static inline long strnlen_user(const char __user *s, long n) 1425 { 1426 long res; 1427 1428 might_fault(); 1429 if (eva_kernel_access()) { 1430 __asm__ __volatile__( 1431 "move\t$4, %1\n\t" 1432 "move\t$5, %2\n\t" 1433 __MODULE_JAL(__strnlen_kernel_asm) 1434 "move\t%0, $2" 1435 : "=r" (res) 1436 : "r" (s), "r" (n) 1437 : "$2", "$4", "$5", __UA_t0, "$31"); 1438 } else { 1439 __asm__ __volatile__( 1440 "move\t$4, %1\n\t" 1441 "move\t$5, %2\n\t" 1442 __MODULE_JAL(__strnlen_user_asm) 1443 "move\t%0, $2" 1444 : "=r" (res) 1445 : "r" (s), "r" (n) 1446 : "$2", "$4", "$5", __UA_t0, "$31"); 1447 } 1448 1449 return res; 1450 } 1451 1452 struct exception_table_entry 1453 { 1454 unsigned long insn; 1455 unsigned long nextinsn; 1456 }; 1457 1458 extern int fixup_exception(struct pt_regs *regs); 1459 1460 #endif /* _ASM_UACCESS_H */ 1461