1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 1996, 1997, 1998, 1999, 2000, 03, 04 by Ralf Baechle 7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc. 8 * Copyright (C) 2007 Maciej W. Rozycki 9 * Copyright (C) 2014, Imagination Technologies Ltd. 10 */ 11 #ifndef _ASM_UACCESS_H 12 #define _ASM_UACCESS_H 13 14 #include <linux/kernel.h> 15 #include <linux/string.h> 16 #include <asm/asm-eva.h> 17 #include <asm/extable.h> 18 19 #ifdef CONFIG_32BIT 20 21 #define __UA_LIMIT 0x80000000UL 22 #define TASK_SIZE_MAX KSEG0 23 24 #define __UA_ADDR ".word" 25 #define __UA_LA "la" 26 #define __UA_ADDU "addu" 27 #define __UA_t0 "$8" 28 #define __UA_t1 "$9" 29 30 #endif /* CONFIG_32BIT */ 31 32 #ifdef CONFIG_64BIT 33 34 extern u64 __ua_limit; 35 36 #define __UA_LIMIT __ua_limit 37 #define TASK_SIZE_MAX XKSSEG 38 39 #define __UA_ADDR ".dword" 40 #define __UA_LA "dla" 41 #define __UA_ADDU "daddu" 42 #define __UA_t0 "$12" 43 #define __UA_t1 "$13" 44 45 #endif /* CONFIG_64BIT */ 46 47 #include <asm-generic/access_ok.h> 48 49 /* 50 * put_user: - Write a simple value into user space. 51 * @x: Value to copy to user space. 52 * @ptr: Destination address, in user space. 53 * 54 * Context: User context only. This function may sleep if pagefaults are 55 * enabled. 56 * 57 * This macro copies a single simple value from kernel space to user 58 * space. It supports simple types like char and int, but not larger 59 * data types like structures or arrays. 60 * 61 * @ptr must have pointer-to-simple-variable type, and @x must be assignable 62 * to the result of dereferencing @ptr. 63 * 64 * Returns zero on success, or -EFAULT on error. 65 */ 66 #define put_user(x, ptr) \ 67 ({ \ 68 __typeof__(*(ptr)) __user *__p = (ptr); \ 69 \ 70 might_fault(); \ 71 access_ok(__p, sizeof(*__p)) ? __put_user((x), __p) : -EFAULT; \ 72 }) 73 74 /* 75 * get_user: - Get a simple variable from user space. 76 * @x: Variable to store result. 77 * @ptr: Source address, in user space. 78 * 79 * Context: User context only. This function may sleep if pagefaults are 80 * enabled. 81 * 82 * This macro copies a single simple variable from user space to kernel 83 * space. It supports simple types like char and int, but not larger 84 * data types like structures or arrays. 85 * 86 * @ptr must have pointer-to-simple-variable type, and the result of 87 * dereferencing @ptr must be assignable to @x without a cast. 88 * 89 * Returns zero on success, or -EFAULT on error. 90 * On error, the variable @x is set to zero. 91 */ 92 #define get_user(x, ptr) \ 93 ({ \ 94 const __typeof__(*(ptr)) __user *__p = (ptr); \ 95 \ 96 might_fault(); \ 97 access_ok(__p, sizeof(*__p)) ? __get_user((x), __p) : \ 98 ((x) = 0, -EFAULT); \ 99 }) 100 101 /* 102 * __put_user: - Write a simple value into user space, with less checking. 103 * @x: Value to copy to user space. 104 * @ptr: Destination address, in user space. 105 * 106 * Context: User context only. This function may sleep if pagefaults are 107 * enabled. 108 * 109 * This macro copies a single simple value from kernel space to user 110 * space. It supports simple types like char and int, but not larger 111 * data types like structures or arrays. 112 * 113 * @ptr must have pointer-to-simple-variable type, and @x must be assignable 114 * to the result of dereferencing @ptr. 115 * 116 * Caller must check the pointer with access_ok() before calling this 117 * function. 118 * 119 * Returns zero on success, or -EFAULT on error. 120 */ 121 #define __put_user(x, ptr) \ 122 ({ \ 123 __typeof__(*(ptr)) __user *__pu_ptr = (ptr); \ 124 __typeof__(*(ptr)) __pu_val = (x); \ 125 int __pu_err = 0; \ 126 \ 127 __chk_user_ptr(__pu_ptr); \ 128 switch (sizeof(*__pu_ptr)) { \ 129 case 1: \ 130 __put_data_asm(user_sb, __pu_ptr); \ 131 break; \ 132 case 2: \ 133 __put_data_asm(user_sh, __pu_ptr); \ 134 break; \ 135 case 4: \ 136 __put_data_asm(user_sw, __pu_ptr); \ 137 break; \ 138 case 8: \ 139 __PUT_DW(user_sd, __pu_ptr); \ 140 break; \ 141 default: \ 142 BUILD_BUG(); \ 143 } \ 144 \ 145 __pu_err; \ 146 }) 147 148 /* 149 * __get_user: - Get a simple variable from user space, with less checking. 150 * @x: Variable to store result. 151 * @ptr: Source address, in user space. 152 * 153 * Context: User context only. This function may sleep if pagefaults are 154 * enabled. 155 * 156 * This macro copies a single simple variable from user space to kernel 157 * space. It supports simple types like char and int, but not larger 158 * data types like structures or arrays. 159 * 160 * @ptr must have pointer-to-simple-variable type, and the result of 161 * dereferencing @ptr must be assignable to @x without a cast. 162 * 163 * Caller must check the pointer with access_ok() before calling this 164 * function. 165 * 166 * Returns zero on success, or -EFAULT on error. 167 * On error, the variable @x is set to zero. 168 */ 169 #define __get_user(x, ptr) \ 170 ({ \ 171 const __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \ 172 int __gu_err = 0; \ 173 \ 174 __chk_user_ptr(__gu_ptr); \ 175 switch (sizeof(*__gu_ptr)) { \ 176 case 1: \ 177 __get_data_asm((x), user_lb, __gu_ptr); \ 178 break; \ 179 case 2: \ 180 __get_data_asm((x), user_lh, __gu_ptr); \ 181 break; \ 182 case 4: \ 183 __get_data_asm((x), user_lw, __gu_ptr); \ 184 break; \ 185 case 8: \ 186 __GET_DW((x), user_ld, __gu_ptr); \ 187 break; \ 188 default: \ 189 BUILD_BUG(); \ 190 } \ 191 \ 192 __gu_err; \ 193 }) 194 195 struct __large_struct { unsigned long buf[100]; }; 196 #define __m(x) (*(struct __large_struct __user *)(x)) 197 198 #ifdef CONFIG_32BIT 199 #define __GET_DW(val, insn, ptr) __get_data_asm_ll32(val, insn, ptr) 200 #endif 201 #ifdef CONFIG_64BIT 202 #define __GET_DW(val, insn, ptr) __get_data_asm(val, insn, ptr) 203 #endif 204 205 #define __get_data_asm(val, insn, addr) \ 206 { \ 207 long __gu_tmp; \ 208 \ 209 __asm__ __volatile__( \ 210 "1: "insn("%1", "%3")" \n" \ 211 "2: \n" \ 212 " .insn \n" \ 213 " .section .fixup,\"ax\" \n" \ 214 "3: li %0, %4 \n" \ 215 " move %1, $0 \n" \ 216 " j 2b \n" \ 217 " .previous \n" \ 218 " .section __ex_table,\"a\" \n" \ 219 " "__UA_ADDR "\t1b, 3b \n" \ 220 " .previous \n" \ 221 : "=r" (__gu_err), "=r" (__gu_tmp) \ 222 : "0" (0), "o" (__m(addr)), "i" (-EFAULT)); \ 223 \ 224 (val) = (__typeof__(*(addr))) __gu_tmp; \ 225 } 226 227 /* 228 * Get a long long 64 using 32 bit registers. 229 */ 230 #define __get_data_asm_ll32(val, insn, addr) \ 231 { \ 232 union { \ 233 unsigned long long l; \ 234 __typeof__(*(addr)) t; \ 235 } __gu_tmp; \ 236 \ 237 __asm__ __volatile__( \ 238 "1: " insn("%1", "(%3)")" \n" \ 239 "2: " insn("%D1", "4(%3)")" \n" \ 240 "3: \n" \ 241 " .insn \n" \ 242 " .section .fixup,\"ax\" \n" \ 243 "4: li %0, %4 \n" \ 244 " move %1, $0 \n" \ 245 " move %D1, $0 \n" \ 246 " j 3b \n" \ 247 " .previous \n" \ 248 " .section __ex_table,\"a\" \n" \ 249 " " __UA_ADDR " 1b, 4b \n" \ 250 " " __UA_ADDR " 2b, 4b \n" \ 251 " .previous \n" \ 252 : "=r" (__gu_err), "=&r" (__gu_tmp.l) \ 253 : "0" (0), "r" (addr), "i" (-EFAULT)); \ 254 \ 255 (val) = __gu_tmp.t; \ 256 } 257 258 #define __get_kernel_nofault(dst, src, type, err_label) \ 259 do { \ 260 int __gu_err; \ 261 \ 262 switch (sizeof(type)) { \ 263 case 1: \ 264 __get_data_asm(*(type *)(dst), kernel_lb, \ 265 (__force type *)(src)); \ 266 break; \ 267 case 2: \ 268 __get_data_asm(*(type *)(dst), kernel_lh, \ 269 (__force type *)(src)); \ 270 break; \ 271 case 4: \ 272 __get_data_asm(*(type *)(dst), kernel_lw, \ 273 (__force type *)(src)); \ 274 break; \ 275 case 8: \ 276 __GET_DW(*(type *)(dst), kernel_ld, \ 277 (__force type *)(src)); \ 278 break; \ 279 default: \ 280 BUILD_BUG(); \ 281 break; \ 282 } \ 283 if (unlikely(__gu_err)) \ 284 goto err_label; \ 285 } while (0) 286 287 /* 288 * Yuck. We need two variants, one for 64bit operation and one 289 * for 32 bit mode and old iron. 290 */ 291 #ifdef CONFIG_32BIT 292 #define __PUT_DW(insn, ptr) __put_data_asm_ll32(insn, ptr) 293 #endif 294 #ifdef CONFIG_64BIT 295 #define __PUT_DW(insn, ptr) __put_data_asm(insn, ptr) 296 #endif 297 298 #define __put_data_asm(insn, ptr) \ 299 { \ 300 __asm__ __volatile__( \ 301 "1: "insn("%z2", "%3")" # __put_data_asm \n" \ 302 "2: \n" \ 303 " .insn \n" \ 304 " .section .fixup,\"ax\" \n" \ 305 "3: li %0, %4 \n" \ 306 " j 2b \n" \ 307 " .previous \n" \ 308 " .section __ex_table,\"a\" \n" \ 309 " " __UA_ADDR " 1b, 3b \n" \ 310 " .previous \n" \ 311 : "=r" (__pu_err) \ 312 : "0" (0), "Jr" (__pu_val), "o" (__m(ptr)), \ 313 "i" (-EFAULT)); \ 314 } 315 316 #define __put_data_asm_ll32(insn, ptr) \ 317 { \ 318 __asm__ __volatile__( \ 319 "1: "insn("%2", "(%3)")" # __put_data_asm_ll32 \n" \ 320 "2: "insn("%D2", "4(%3)")" \n" \ 321 "3: \n" \ 322 " .insn \n" \ 323 " .section .fixup,\"ax\" \n" \ 324 "4: li %0, %4 \n" \ 325 " j 3b \n" \ 326 " .previous \n" \ 327 " .section __ex_table,\"a\" \n" \ 328 " " __UA_ADDR " 1b, 4b \n" \ 329 " " __UA_ADDR " 2b, 4b \n" \ 330 " .previous" \ 331 : "=r" (__pu_err) \ 332 : "0" (0), "r" (__pu_val), "r" (ptr), \ 333 "i" (-EFAULT)); \ 334 } 335 336 #define __put_kernel_nofault(dst, src, type, err_label) \ 337 do { \ 338 type __pu_val; \ 339 int __pu_err = 0; \ 340 \ 341 __pu_val = *(__force type *)(src); \ 342 switch (sizeof(type)) { \ 343 case 1: \ 344 __put_data_asm(kernel_sb, (type *)(dst)); \ 345 break; \ 346 case 2: \ 347 __put_data_asm(kernel_sh, (type *)(dst)); \ 348 break; \ 349 case 4: \ 350 __put_data_asm(kernel_sw, (type *)(dst)) \ 351 break; \ 352 case 8: \ 353 __PUT_DW(kernel_sd, (type *)(dst)); \ 354 break; \ 355 default: \ 356 BUILD_BUG(); \ 357 break; \ 358 } \ 359 if (unlikely(__pu_err)) \ 360 goto err_label; \ 361 } while (0) 362 363 364 /* 365 * We're generating jump to subroutines which will be outside the range of 366 * jump instructions 367 */ 368 #ifdef MODULE 369 #define __MODULE_JAL(destination) \ 370 ".set\tnoat\n\t" \ 371 __UA_LA "\t$1, " #destination "\n\t" \ 372 "jalr\t$1\n\t" \ 373 ".set\tat\n\t" 374 #else 375 #define __MODULE_JAL(destination) \ 376 "jal\t" #destination "\n\t" 377 #endif 378 379 #if defined(CONFIG_CPU_DADDI_WORKAROUNDS) || (defined(CONFIG_EVA) && \ 380 defined(CONFIG_CPU_HAS_PREFETCH)) 381 #define DADDI_SCRATCH "$3" 382 #else 383 #define DADDI_SCRATCH "$0" 384 #endif 385 386 extern size_t __raw_copy_from_user(void *__to, const void *__from, size_t __n); 387 extern size_t __raw_copy_to_user(void *__to, const void *__from, size_t __n); 388 389 static inline unsigned long 390 raw_copy_from_user(void *to, const void __user *from, unsigned long n) 391 { 392 register void *__cu_to_r __asm__("$4"); 393 register const void __user *__cu_from_r __asm__("$5"); 394 register long __cu_len_r __asm__("$6"); 395 396 __cu_to_r = to; 397 __cu_from_r = from; 398 __cu_len_r = n; 399 400 __asm__ __volatile__( 401 ".set\tnoreorder\n\t" 402 __MODULE_JAL(__raw_copy_from_user) 403 ".set\tnoat\n\t" 404 __UA_ADDU "\t$1, %1, %2\n\t" 405 ".set\tat\n\t" 406 ".set\treorder" 407 : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) 408 : 409 : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", 410 DADDI_SCRATCH, "memory"); 411 412 return __cu_len_r; 413 } 414 415 static inline unsigned long 416 raw_copy_to_user(void __user *to, const void *from, unsigned long n) 417 { 418 register void __user *__cu_to_r __asm__("$4"); 419 register const void *__cu_from_r __asm__("$5"); 420 register long __cu_len_r __asm__("$6"); 421 422 __cu_to_r = (to); 423 __cu_from_r = (from); 424 __cu_len_r = (n); 425 426 __asm__ __volatile__( 427 __MODULE_JAL(__raw_copy_to_user) 428 : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) 429 : 430 : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", 431 DADDI_SCRATCH, "memory"); 432 433 return __cu_len_r; 434 } 435 436 #define INLINE_COPY_FROM_USER 437 #define INLINE_COPY_TO_USER 438 439 extern __kernel_size_t __bzero(void __user *addr, __kernel_size_t size); 440 441 /* 442 * __clear_user: - Zero a block of memory in user space, with less checking. 443 * @to: Destination address, in user space. 444 * @n: Number of bytes to zero. 445 * 446 * Zero a block of memory in user space. Caller must check 447 * the specified block with access_ok() before calling this function. 448 * 449 * Returns number of bytes that could not be cleared. 450 * On success, this will be zero. 451 */ 452 static inline __kernel_size_t 453 __clear_user(void __user *addr, __kernel_size_t size) 454 { 455 __kernel_size_t res; 456 457 #ifdef CONFIG_CPU_MICROMIPS 458 /* micromips memset / bzero also clobbers t7 & t8 */ 459 #define bzero_clobbers "$4", "$5", "$6", __UA_t0, __UA_t1, "$15", "$24", "$31" 460 #else 461 #define bzero_clobbers "$4", "$5", "$6", __UA_t0, __UA_t1, "$31" 462 #endif /* CONFIG_CPU_MICROMIPS */ 463 464 might_fault(); 465 __asm__ __volatile__( 466 "move\t$4, %1\n\t" 467 "move\t$5, $0\n\t" 468 "move\t$6, %2\n\t" 469 __MODULE_JAL(__bzero) 470 "move\t%0, $6" 471 : "=r" (res) 472 : "r" (addr), "r" (size) 473 : bzero_clobbers); 474 475 return res; 476 } 477 478 #define clear_user(addr,n) \ 479 ({ \ 480 void __user * __cl_addr = (addr); \ 481 unsigned long __cl_size = (n); \ 482 if (__cl_size && access_ok(__cl_addr, __cl_size)) \ 483 __cl_size = __clear_user(__cl_addr, __cl_size); \ 484 __cl_size; \ 485 }) 486 487 extern long __strncpy_from_user_asm(char *__to, const char __user *__from, long __len); 488 489 /* 490 * strncpy_from_user: - Copy a NUL terminated string from userspace. 491 * @dst: Destination address, in kernel space. This buffer must be at 492 * least @count bytes long. 493 * @src: Source address, in user space. 494 * @count: Maximum number of bytes to copy, including the trailing NUL. 495 * 496 * Copies a NUL-terminated string from userspace to kernel space. 497 * 498 * On success, returns the length of the string (not including the trailing 499 * NUL). 500 * 501 * If access to userspace fails, returns -EFAULT (some data may have been 502 * copied). 503 * 504 * If @count is smaller than the length of the string, copies @count bytes 505 * and returns @count. 506 */ 507 static inline long 508 strncpy_from_user(char *__to, const char __user *__from, long __len) 509 { 510 long res; 511 512 if (!access_ok(__from, __len)) 513 return -EFAULT; 514 515 might_fault(); 516 __asm__ __volatile__( 517 "move\t$4, %1\n\t" 518 "move\t$5, %2\n\t" 519 "move\t$6, %3\n\t" 520 __MODULE_JAL(__strncpy_from_user_asm) 521 "move\t%0, $2" 522 : "=r" (res) 523 : "r" (__to), "r" (__from), "r" (__len) 524 : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory"); 525 526 return res; 527 } 528 529 extern long __strnlen_user_asm(const char __user *s, long n); 530 531 /* 532 * strnlen_user: - Get the size of a string in user space. 533 * @str: The string to measure. 534 * 535 * Context: User context only. This function may sleep if pagefaults are 536 * enabled. 537 * 538 * Get the size of a NUL-terminated string in user space. 539 * 540 * Returns the size of the string INCLUDING the terminating NUL. 541 * On exception, returns 0. 542 * If the string is too long, returns a value greater than @n. 543 */ 544 static inline long strnlen_user(const char __user *s, long n) 545 { 546 long res; 547 548 if (!access_ok(s, 1)) 549 return 0; 550 551 might_fault(); 552 __asm__ __volatile__( 553 "move\t$4, %1\n\t" 554 "move\t$5, %2\n\t" 555 __MODULE_JAL(__strnlen_user_asm) 556 "move\t%0, $2" 557 : "=r" (res) 558 : "r" (s), "r" (n) 559 : "$2", "$4", "$5", __UA_t0, "$31"); 560 561 return res; 562 } 563 564 #endif /* _ASM_UACCESS_H */ 565