1 /* 2 * arch/arm/include/asm/uaccess.h 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License version 2 as 6 * published by the Free Software Foundation. 7 */ 8 #ifndef _ASMARM_UACCESS_H 9 #define _ASMARM_UACCESS_H 10 11 /* 12 * User space memory access functions 13 */ 14 #include <linux/string.h> 15 #include <asm/memory.h> 16 #include <asm/domain.h> 17 #include <asm/unified.h> 18 #include <asm/compiler.h> 19 20 #include <asm/extable.h> 21 22 /* 23 * These two functions allow hooking accesses to userspace to increase 24 * system integrity by ensuring that the kernel can not inadvertantly 25 * perform such accesses (eg, via list poison values) which could then 26 * be exploited for priviledge escalation. 27 */ 28 static inline unsigned int uaccess_save_and_enable(void) 29 { 30 #ifdef CONFIG_CPU_SW_DOMAIN_PAN 31 unsigned int old_domain = get_domain(); 32 33 /* Set the current domain access to permit user accesses */ 34 set_domain((old_domain & ~domain_mask(DOMAIN_USER)) | 35 domain_val(DOMAIN_USER, DOMAIN_CLIENT)); 36 37 return old_domain; 38 #else 39 return 0; 40 #endif 41 } 42 43 static inline void uaccess_restore(unsigned int flags) 44 { 45 #ifdef CONFIG_CPU_SW_DOMAIN_PAN 46 /* Restore the user access mask */ 47 set_domain(flags); 48 #endif 49 } 50 51 /* 52 * These two are intentionally not defined anywhere - if the kernel 53 * code generates any references to them, that's a bug. 54 */ 55 extern int __get_user_bad(void); 56 extern int __put_user_bad(void); 57 58 /* 59 * Note that this is actually 0x1,0000,0000 60 */ 61 #define KERNEL_DS 0x00000000 62 63 #ifdef CONFIG_MMU 64 65 #define USER_DS TASK_SIZE 66 #define get_fs() (current_thread_info()->addr_limit) 67 68 static inline void set_fs(mm_segment_t fs) 69 { 70 current_thread_info()->addr_limit = fs; 71 72 /* 73 * Prevent a mispredicted conditional call to set_fs from forwarding 74 * the wrong address limit to access_ok under speculation. 75 */ 76 dsb(nsh); 77 isb(); 78 79 modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER); 80 } 81 82 #define segment_eq(a, b) ((a) == (b)) 83 84 /* We use 33-bit arithmetic here... */ 85 #define __range_ok(addr, size) ({ \ 86 unsigned long flag, roksum; \ 87 __chk_user_ptr(addr); \ 88 __asm__("adds %1, %2, %3; sbcccs %1, %1, %0; movcc %0, #0" \ 89 : "=&r" (flag), "=&r" (roksum) \ 90 : "r" (addr), "Ir" (size), "0" (current_thread_info()->addr_limit) \ 91 : "cc"); \ 92 flag; }) 93 94 /* 95 * This is a type: either unsigned long, if the argument fits into 96 * that type, or otherwise unsigned long long. 97 */ 98 #define __inttype(x) \ 99 __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL)) 100 101 /* 102 * Sanitise a uaccess pointer such that it becomes NULL if addr+size 103 * is above the current addr_limit. 104 */ 105 #define uaccess_mask_range_ptr(ptr, size) \ 106 ((__typeof__(ptr))__uaccess_mask_range_ptr(ptr, size)) 107 static inline void __user *__uaccess_mask_range_ptr(const void __user *ptr, 108 size_t size) 109 { 110 void __user *safe_ptr = (void __user *)ptr; 111 unsigned long tmp; 112 113 asm volatile( 114 " sub %1, %3, #1\n" 115 " subs %1, %1, %0\n" 116 " addhs %1, %1, #1\n" 117 " subhss %1, %1, %2\n" 118 " movlo %0, #0\n" 119 : "+r" (safe_ptr), "=&r" (tmp) 120 : "r" (size), "r" (current_thread_info()->addr_limit) 121 : "cc"); 122 123 csdb(); 124 return safe_ptr; 125 } 126 127 /* 128 * Single-value transfer routines. They automatically use the right 129 * size if we just have the right pointer type. Note that the functions 130 * which read from user space (*get_*) need to take care not to leak 131 * kernel data even if the calling code is buggy and fails to check 132 * the return value. This means zeroing out the destination variable 133 * or buffer on error. Normally this is done out of line by the 134 * fixup code, but there are a few places where it intrudes on the 135 * main code path. When we only write to user space, there is no 136 * problem. 137 */ 138 extern int __get_user_1(void *); 139 extern int __get_user_2(void *); 140 extern int __get_user_4(void *); 141 extern int __get_user_32t_8(void *); 142 extern int __get_user_8(void *); 143 extern int __get_user_64t_1(void *); 144 extern int __get_user_64t_2(void *); 145 extern int __get_user_64t_4(void *); 146 147 #define __GUP_CLOBBER_1 "lr", "cc" 148 #ifdef CONFIG_CPU_USE_DOMAINS 149 #define __GUP_CLOBBER_2 "ip", "lr", "cc" 150 #else 151 #define __GUP_CLOBBER_2 "lr", "cc" 152 #endif 153 #define __GUP_CLOBBER_4 "lr", "cc" 154 #define __GUP_CLOBBER_32t_8 "lr", "cc" 155 #define __GUP_CLOBBER_8 "lr", "cc" 156 157 #define __get_user_x(__r2, __p, __e, __l, __s) \ 158 __asm__ __volatile__ ( \ 159 __asmeq("%0", "r0") __asmeq("%1", "r2") \ 160 __asmeq("%3", "r1") \ 161 "bl __get_user_" #__s \ 162 : "=&r" (__e), "=r" (__r2) \ 163 : "0" (__p), "r" (__l) \ 164 : __GUP_CLOBBER_##__s) 165 166 /* narrowing a double-word get into a single 32bit word register: */ 167 #ifdef __ARMEB__ 168 #define __get_user_x_32t(__r2, __p, __e, __l, __s) \ 169 __get_user_x(__r2, __p, __e, __l, 32t_8) 170 #else 171 #define __get_user_x_32t __get_user_x 172 #endif 173 174 /* 175 * storing result into proper least significant word of 64bit target var, 176 * different only for big endian case where 64 bit __r2 lsw is r3: 177 */ 178 #ifdef __ARMEB__ 179 #define __get_user_x_64t(__r2, __p, __e, __l, __s) \ 180 __asm__ __volatile__ ( \ 181 __asmeq("%0", "r0") __asmeq("%1", "r2") \ 182 __asmeq("%3", "r1") \ 183 "bl __get_user_64t_" #__s \ 184 : "=&r" (__e), "=r" (__r2) \ 185 : "0" (__p), "r" (__l) \ 186 : __GUP_CLOBBER_##__s) 187 #else 188 #define __get_user_x_64t __get_user_x 189 #endif 190 191 192 #define __get_user_check(x, p) \ 193 ({ \ 194 unsigned long __limit = current_thread_info()->addr_limit - 1; \ 195 register typeof(*(p)) __user *__p asm("r0") = (p); \ 196 register __inttype(x) __r2 asm("r2"); \ 197 register unsigned long __l asm("r1") = __limit; \ 198 register int __e asm("r0"); \ 199 unsigned int __ua_flags = uaccess_save_and_enable(); \ 200 switch (sizeof(*(__p))) { \ 201 case 1: \ 202 if (sizeof((x)) >= 8) \ 203 __get_user_x_64t(__r2, __p, __e, __l, 1); \ 204 else \ 205 __get_user_x(__r2, __p, __e, __l, 1); \ 206 break; \ 207 case 2: \ 208 if (sizeof((x)) >= 8) \ 209 __get_user_x_64t(__r2, __p, __e, __l, 2); \ 210 else \ 211 __get_user_x(__r2, __p, __e, __l, 2); \ 212 break; \ 213 case 4: \ 214 if (sizeof((x)) >= 8) \ 215 __get_user_x_64t(__r2, __p, __e, __l, 4); \ 216 else \ 217 __get_user_x(__r2, __p, __e, __l, 4); \ 218 break; \ 219 case 8: \ 220 if (sizeof((x)) < 8) \ 221 __get_user_x_32t(__r2, __p, __e, __l, 4); \ 222 else \ 223 __get_user_x(__r2, __p, __e, __l, 8); \ 224 break; \ 225 default: __e = __get_user_bad(); break; \ 226 } \ 227 uaccess_restore(__ua_flags); \ 228 x = (typeof(*(p))) __r2; \ 229 __e; \ 230 }) 231 232 #define get_user(x, p) \ 233 ({ \ 234 might_fault(); \ 235 __get_user_check(x, p); \ 236 }) 237 238 extern int __put_user_1(void *, unsigned int); 239 extern int __put_user_2(void *, unsigned int); 240 extern int __put_user_4(void *, unsigned int); 241 extern int __put_user_8(void *, unsigned long long); 242 243 #define __put_user_check(__pu_val, __ptr, __err, __s) \ 244 ({ \ 245 unsigned long __limit = current_thread_info()->addr_limit - 1; \ 246 register typeof(__pu_val) __r2 asm("r2") = __pu_val; \ 247 register const void __user *__p asm("r0") = __ptr; \ 248 register unsigned long __l asm("r1") = __limit; \ 249 register int __e asm("r0"); \ 250 __asm__ __volatile__ ( \ 251 __asmeq("%0", "r0") __asmeq("%2", "r2") \ 252 __asmeq("%3", "r1") \ 253 "bl __put_user_" #__s \ 254 : "=&r" (__e) \ 255 : "0" (__p), "r" (__r2), "r" (__l) \ 256 : "ip", "lr", "cc"); \ 257 __err = __e; \ 258 }) 259 260 #else /* CONFIG_MMU */ 261 262 /* 263 * uClinux has only one addr space, so has simplified address limits. 264 */ 265 #define USER_DS KERNEL_DS 266 267 #define segment_eq(a, b) (1) 268 #define __addr_ok(addr) ((void)(addr), 1) 269 #define __range_ok(addr, size) ((void)(addr), 0) 270 #define get_fs() (KERNEL_DS) 271 272 static inline void set_fs(mm_segment_t fs) 273 { 274 } 275 276 #define get_user(x, p) __get_user(x, p) 277 #define __put_user_check __put_user_nocheck 278 279 #endif /* CONFIG_MMU */ 280 281 #define access_ok(addr, size) (__range_ok(addr, size) == 0) 282 283 #define user_addr_max() \ 284 (uaccess_kernel() ? ~0UL : get_fs()) 285 286 #ifdef CONFIG_CPU_SPECTRE 287 /* 288 * When mitigating Spectre variant 1, it is not worth fixing the non- 289 * verifying accessors, because we need to add verification of the 290 * address space there. Force these to use the standard get_user() 291 * version instead. 292 */ 293 #define __get_user(x, ptr) get_user(x, ptr) 294 #else 295 296 /* 297 * The "__xxx" versions of the user access functions do not verify the 298 * address space - it must have been done previously with a separate 299 * "access_ok()" call. 300 * 301 * The "xxx_error" versions set the third argument to EFAULT if an 302 * error occurs, and leave it unchanged on success. Note that these 303 * versions are void (ie, don't return a value as such). 304 */ 305 #define __get_user(x, ptr) \ 306 ({ \ 307 long __gu_err = 0; \ 308 __get_user_err((x), (ptr), __gu_err); \ 309 __gu_err; \ 310 }) 311 312 #define __get_user_err(x, ptr, err) \ 313 do { \ 314 unsigned long __gu_addr = (unsigned long)(ptr); \ 315 unsigned long __gu_val; \ 316 unsigned int __ua_flags; \ 317 __chk_user_ptr(ptr); \ 318 might_fault(); \ 319 __ua_flags = uaccess_save_and_enable(); \ 320 switch (sizeof(*(ptr))) { \ 321 case 1: __get_user_asm_byte(__gu_val, __gu_addr, err); break; \ 322 case 2: __get_user_asm_half(__gu_val, __gu_addr, err); break; \ 323 case 4: __get_user_asm_word(__gu_val, __gu_addr, err); break; \ 324 default: (__gu_val) = __get_user_bad(); \ 325 } \ 326 uaccess_restore(__ua_flags); \ 327 (x) = (__typeof__(*(ptr)))__gu_val; \ 328 } while (0) 329 330 #define __get_user_asm(x, addr, err, instr) \ 331 __asm__ __volatile__( \ 332 "1: " TUSER(instr) " %1, [%2], #0\n" \ 333 "2:\n" \ 334 " .pushsection .text.fixup,\"ax\"\n" \ 335 " .align 2\n" \ 336 "3: mov %0, %3\n" \ 337 " mov %1, #0\n" \ 338 " b 2b\n" \ 339 " .popsection\n" \ 340 " .pushsection __ex_table,\"a\"\n" \ 341 " .align 3\n" \ 342 " .long 1b, 3b\n" \ 343 " .popsection" \ 344 : "+r" (err), "=&r" (x) \ 345 : "r" (addr), "i" (-EFAULT) \ 346 : "cc") 347 348 #define __get_user_asm_byte(x, addr, err) \ 349 __get_user_asm(x, addr, err, ldrb) 350 351 #if __LINUX_ARM_ARCH__ >= 6 352 353 #define __get_user_asm_half(x, addr, err) \ 354 __get_user_asm(x, addr, err, ldrh) 355 356 #else 357 358 #ifndef __ARMEB__ 359 #define __get_user_asm_half(x, __gu_addr, err) \ 360 ({ \ 361 unsigned long __b1, __b2; \ 362 __get_user_asm_byte(__b1, __gu_addr, err); \ 363 __get_user_asm_byte(__b2, __gu_addr + 1, err); \ 364 (x) = __b1 | (__b2 << 8); \ 365 }) 366 #else 367 #define __get_user_asm_half(x, __gu_addr, err) \ 368 ({ \ 369 unsigned long __b1, __b2; \ 370 __get_user_asm_byte(__b1, __gu_addr, err); \ 371 __get_user_asm_byte(__b2, __gu_addr + 1, err); \ 372 (x) = (__b1 << 8) | __b2; \ 373 }) 374 #endif 375 376 #endif /* __LINUX_ARM_ARCH__ >= 6 */ 377 378 #define __get_user_asm_word(x, addr, err) \ 379 __get_user_asm(x, addr, err, ldr) 380 #endif 381 382 383 #define __put_user_switch(x, ptr, __err, __fn) \ 384 do { \ 385 const __typeof__(*(ptr)) __user *__pu_ptr = (ptr); \ 386 __typeof__(*(ptr)) __pu_val = (x); \ 387 unsigned int __ua_flags; \ 388 might_fault(); \ 389 __ua_flags = uaccess_save_and_enable(); \ 390 switch (sizeof(*(ptr))) { \ 391 case 1: __fn(__pu_val, __pu_ptr, __err, 1); break; \ 392 case 2: __fn(__pu_val, __pu_ptr, __err, 2); break; \ 393 case 4: __fn(__pu_val, __pu_ptr, __err, 4); break; \ 394 case 8: __fn(__pu_val, __pu_ptr, __err, 8); break; \ 395 default: __err = __put_user_bad(); break; \ 396 } \ 397 uaccess_restore(__ua_flags); \ 398 } while (0) 399 400 #define put_user(x, ptr) \ 401 ({ \ 402 int __pu_err = 0; \ 403 __put_user_switch((x), (ptr), __pu_err, __put_user_check); \ 404 __pu_err; \ 405 }) 406 407 #ifdef CONFIG_CPU_SPECTRE 408 /* 409 * When mitigating Spectre variant 1.1, all accessors need to include 410 * verification of the address space. 411 */ 412 #define __put_user(x, ptr) put_user(x, ptr) 413 414 #else 415 #define __put_user(x, ptr) \ 416 ({ \ 417 long __pu_err = 0; \ 418 __put_user_switch((x), (ptr), __pu_err, __put_user_nocheck); \ 419 __pu_err; \ 420 }) 421 422 #define __put_user_nocheck(x, __pu_ptr, __err, __size) \ 423 do { \ 424 unsigned long __pu_addr = (unsigned long)__pu_ptr; \ 425 __put_user_nocheck_##__size(x, __pu_addr, __err); \ 426 } while (0) 427 428 #define __put_user_nocheck_1 __put_user_asm_byte 429 #define __put_user_nocheck_2 __put_user_asm_half 430 #define __put_user_nocheck_4 __put_user_asm_word 431 #define __put_user_nocheck_8 __put_user_asm_dword 432 433 #define __put_user_asm(x, __pu_addr, err, instr) \ 434 __asm__ __volatile__( \ 435 "1: " TUSER(instr) " %1, [%2], #0\n" \ 436 "2:\n" \ 437 " .pushsection .text.fixup,\"ax\"\n" \ 438 " .align 2\n" \ 439 "3: mov %0, %3\n" \ 440 " b 2b\n" \ 441 " .popsection\n" \ 442 " .pushsection __ex_table,\"a\"\n" \ 443 " .align 3\n" \ 444 " .long 1b, 3b\n" \ 445 " .popsection" \ 446 : "+r" (err) \ 447 : "r" (x), "r" (__pu_addr), "i" (-EFAULT) \ 448 : "cc") 449 450 #define __put_user_asm_byte(x, __pu_addr, err) \ 451 __put_user_asm(x, __pu_addr, err, strb) 452 453 #if __LINUX_ARM_ARCH__ >= 6 454 455 #define __put_user_asm_half(x, __pu_addr, err) \ 456 __put_user_asm(x, __pu_addr, err, strh) 457 458 #else 459 460 #ifndef __ARMEB__ 461 #define __put_user_asm_half(x, __pu_addr, err) \ 462 ({ \ 463 unsigned long __temp = (__force unsigned long)(x); \ 464 __put_user_asm_byte(__temp, __pu_addr, err); \ 465 __put_user_asm_byte(__temp >> 8, __pu_addr + 1, err); \ 466 }) 467 #else 468 #define __put_user_asm_half(x, __pu_addr, err) \ 469 ({ \ 470 unsigned long __temp = (__force unsigned long)(x); \ 471 __put_user_asm_byte(__temp >> 8, __pu_addr, err); \ 472 __put_user_asm_byte(__temp, __pu_addr + 1, err); \ 473 }) 474 #endif 475 476 #endif /* __LINUX_ARM_ARCH__ >= 6 */ 477 478 #define __put_user_asm_word(x, __pu_addr, err) \ 479 __put_user_asm(x, __pu_addr, err, str) 480 481 #ifndef __ARMEB__ 482 #define __reg_oper0 "%R2" 483 #define __reg_oper1 "%Q2" 484 #else 485 #define __reg_oper0 "%Q2" 486 #define __reg_oper1 "%R2" 487 #endif 488 489 #define __put_user_asm_dword(x, __pu_addr, err) \ 490 __asm__ __volatile__( \ 491 ARM( "1: " TUSER(str) " " __reg_oper1 ", [%1], #4\n" ) \ 492 ARM( "2: " TUSER(str) " " __reg_oper0 ", [%1]\n" ) \ 493 THUMB( "1: " TUSER(str) " " __reg_oper1 ", [%1]\n" ) \ 494 THUMB( "2: " TUSER(str) " " __reg_oper0 ", [%1, #4]\n" ) \ 495 "3:\n" \ 496 " .pushsection .text.fixup,\"ax\"\n" \ 497 " .align 2\n" \ 498 "4: mov %0, %3\n" \ 499 " b 3b\n" \ 500 " .popsection\n" \ 501 " .pushsection __ex_table,\"a\"\n" \ 502 " .align 3\n" \ 503 " .long 1b, 4b\n" \ 504 " .long 2b, 4b\n" \ 505 " .popsection" \ 506 : "+r" (err), "+r" (__pu_addr) \ 507 : "r" (x), "i" (-EFAULT) \ 508 : "cc") 509 510 #endif /* !CONFIG_CPU_SPECTRE */ 511 512 #ifdef CONFIG_MMU 513 extern unsigned long __must_check 514 arm_copy_from_user(void *to, const void __user *from, unsigned long n); 515 516 static inline unsigned long __must_check 517 raw_copy_from_user(void *to, const void __user *from, unsigned long n) 518 { 519 unsigned int __ua_flags; 520 521 __ua_flags = uaccess_save_and_enable(); 522 n = arm_copy_from_user(to, from, n); 523 uaccess_restore(__ua_flags); 524 return n; 525 } 526 527 extern unsigned long __must_check 528 arm_copy_to_user(void __user *to, const void *from, unsigned long n); 529 extern unsigned long __must_check 530 __copy_to_user_std(void __user *to, const void *from, unsigned long n); 531 532 static inline unsigned long __must_check 533 raw_copy_to_user(void __user *to, const void *from, unsigned long n) 534 { 535 #ifndef CONFIG_UACCESS_WITH_MEMCPY 536 unsigned int __ua_flags; 537 __ua_flags = uaccess_save_and_enable(); 538 n = arm_copy_to_user(to, from, n); 539 uaccess_restore(__ua_flags); 540 return n; 541 #else 542 return arm_copy_to_user(to, from, n); 543 #endif 544 } 545 546 extern unsigned long __must_check 547 arm_clear_user(void __user *addr, unsigned long n); 548 extern unsigned long __must_check 549 __clear_user_std(void __user *addr, unsigned long n); 550 551 static inline unsigned long __must_check 552 __clear_user(void __user *addr, unsigned long n) 553 { 554 unsigned int __ua_flags = uaccess_save_and_enable(); 555 n = arm_clear_user(addr, n); 556 uaccess_restore(__ua_flags); 557 return n; 558 } 559 560 #else 561 static inline unsigned long 562 raw_copy_from_user(void *to, const void __user *from, unsigned long n) 563 { 564 memcpy(to, (const void __force *)from, n); 565 return 0; 566 } 567 static inline unsigned long 568 raw_copy_to_user(void __user *to, const void *from, unsigned long n) 569 { 570 memcpy((void __force *)to, from, n); 571 return 0; 572 } 573 #define __clear_user(addr, n) (memset((void __force *)addr, 0, n), 0) 574 #endif 575 #define INLINE_COPY_TO_USER 576 #define INLINE_COPY_FROM_USER 577 578 static inline unsigned long __must_check clear_user(void __user *to, unsigned long n) 579 { 580 if (access_ok(to, n)) 581 n = __clear_user(to, n); 582 return n; 583 } 584 585 /* These are from lib/ code, and use __get_user() and friends */ 586 extern long strncpy_from_user(char *dest, const char __user *src, long count); 587 588 extern __must_check long strnlen_user(const char __user *str, long n); 589 590 #endif /* _ASMARM_UACCESS_H */ 591