1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _ARCH_POWERPC_UACCESS_H 3 #define _ARCH_POWERPC_UACCESS_H 4 5 #include <linux/sizes.h> 6 7 #include <asm/processor.h> 8 #include <asm/page.h> 9 #include <asm/extable.h> 10 #include <asm/kup.h> 11 #include <asm/asm-compat.h> 12 13 #ifdef __powerpc64__ 14 /* We use TASK_SIZE_USER64 as TASK_SIZE is not constant */ 15 #define TASK_SIZE_MAX TASK_SIZE_USER64 16 #endif 17 18 #include <asm-generic/access_ok.h> 19 20 /* 21 * These are the main single-value transfer routines. They automatically 22 * use the right size if we just have the right pointer type. 23 * 24 * This gets kind of ugly. We want to return _two_ values in "get_user()" 25 * and yet we don't want to do any pointers, because that is too much 26 * of a performance impact. Thus we have a few rather ugly macros here, 27 * and hide all the ugliness from the user. 28 * 29 * The "__xxx" versions of the user access functions are versions that 30 * do not verify the address space, that must have been done previously 31 * with a separate "access_ok()" call (this is used when we do multiple 32 * accesses to the same area of user memory). 33 * 34 * As we use the same address space for kernel and user data on the 35 * PowerPC, we can just do these as direct assignments. (Of course, the 36 * exception handling means that it's no longer "just"...) 37 * 38 */ 39 #define __put_user(x, ptr) \ 40 ({ \ 41 long __pu_err; \ 42 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ 43 __typeof__(*(ptr)) __pu_val = (__typeof__(*(ptr)))(x); \ 44 __typeof__(sizeof(*(ptr))) __pu_size = sizeof(*(ptr)); \ 45 \ 46 might_fault(); \ 47 do { \ 48 __label__ __pu_failed; \ 49 \ 50 allow_user_access(__pu_addr, KUAP_WRITE); \ 51 __put_user_size_goto(__pu_val, __pu_addr, __pu_size, __pu_failed); \ 52 prevent_user_access(KUAP_WRITE); \ 53 __pu_err = 0; \ 54 break; \ 55 \ 56 __pu_failed: \ 57 prevent_user_access(KUAP_WRITE); \ 58 __pu_err = -EFAULT; \ 59 } while (0); \ 60 \ 61 __pu_err; \ 62 }) 63 64 #define put_user(x, ptr) \ 65 ({ \ 66 __typeof__(*(ptr)) __user *_pu_addr = (ptr); \ 67 \ 68 access_ok(_pu_addr, sizeof(*(ptr))) ? \ 69 __put_user(x, _pu_addr) : -EFAULT; \ 70 }) 71 72 /* 73 * We don't tell gcc that we are accessing memory, but this is OK 74 * because we do not write to any memory gcc knows about, so there 75 * are no aliasing issues. 76 */ 77 /* -mprefixed can generate offsets beyond range, fall back hack */ 78 #ifdef CONFIG_PPC_KERNEL_PREFIXED 79 #define __put_user_asm_goto(x, addr, label, op) \ 80 asm goto( \ 81 "1: " op " %0,0(%1) # put_user\n" \ 82 EX_TABLE(1b, %l2) \ 83 : \ 84 : "r" (x), "b" (addr) \ 85 : \ 86 : label) 87 #else 88 #define __put_user_asm_goto(x, addr, label, op) \ 89 asm goto( \ 90 "1: " op "%U1%X1 %0,%1 # put_user\n" \ 91 EX_TABLE(1b, %l2) \ 92 : \ 93 : "r" (x), "m<>" (*addr) \ 94 : \ 95 : label) 96 #endif 97 98 #ifdef __powerpc64__ 99 #ifdef CONFIG_PPC_KERNEL_PREFIXED 100 #define __put_user_asm2_goto(x, ptr, label) \ 101 __put_user_asm_goto(x, ptr, label, "std") 102 #else 103 #define __put_user_asm2_goto(x, addr, label) \ 104 asm goto ("1: std%U1%X1 %0,%1 # put_user\n" \ 105 EX_TABLE(1b, %l2) \ 106 : \ 107 : "r" (x), DS_FORM_CONSTRAINT (*addr) \ 108 : \ 109 : label) 110 #endif // CONFIG_PPC_KERNEL_PREFIXED 111 #else /* __powerpc64__ */ 112 #define __put_user_asm2_goto(x, addr, label) \ 113 asm goto( \ 114 "1: stw%X1 %0, %1\n" \ 115 "2: stw%X1 %L0, %L1\n" \ 116 EX_TABLE(1b, %l2) \ 117 EX_TABLE(2b, %l2) \ 118 : \ 119 : "r" (x), "m" (*addr) \ 120 : \ 121 : label) 122 #endif /* __powerpc64__ */ 123 124 #define __put_user_size_goto(x, ptr, size, label) \ 125 do { \ 126 __typeof__(*(ptr)) __user *__pus_addr = (ptr); \ 127 \ 128 switch (size) { \ 129 case 1: __put_user_asm_goto(x, __pus_addr, label, "stb"); break; \ 130 case 2: __put_user_asm_goto(x, __pus_addr, label, "sth"); break; \ 131 case 4: __put_user_asm_goto(x, __pus_addr, label, "stw"); break; \ 132 case 8: __put_user_asm2_goto(x, __pus_addr, label); break; \ 133 default: BUILD_BUG(); \ 134 } \ 135 } while (0) 136 137 /* 138 * This does an atomic 128 byte aligned load from userspace. 139 * Upto caller to do enable_kernel_vmx() before calling! 140 */ 141 #define __get_user_atomic_128_aligned(kaddr, uaddr, err) \ 142 __asm__ __volatile__( \ 143 ".machine push\n" \ 144 ".machine altivec\n" \ 145 "1: lvx 0,0,%1 # get user\n" \ 146 " stvx 0,0,%2 # put kernel\n" \ 147 ".machine pop\n" \ 148 "2:\n" \ 149 ".section .fixup,\"ax\"\n" \ 150 "3: li %0,%3\n" \ 151 " b 2b\n" \ 152 ".previous\n" \ 153 EX_TABLE(1b, 3b) \ 154 : "=r" (err) \ 155 : "b" (uaddr), "b" (kaddr), "i" (-EFAULT), "0" (err)) 156 157 #ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT 158 159 /* -mprefixed can generate offsets beyond range, fall back hack */ 160 #ifdef CONFIG_PPC_KERNEL_PREFIXED 161 #define __get_user_asm_goto(x, addr, label, op) \ 162 asm_goto_output( \ 163 "1: "op" %0,0(%1) # get_user\n" \ 164 EX_TABLE(1b, %l2) \ 165 : "=r" (x) \ 166 : "b" (addr) \ 167 : \ 168 : label) 169 #else 170 #define __get_user_asm_goto(x, addr, label, op) \ 171 asm_goto_output( \ 172 "1: "op"%U1%X1 %0, %1 # get_user\n" \ 173 EX_TABLE(1b, %l2) \ 174 : "=r" (x) \ 175 : "m<>" (*addr) \ 176 : \ 177 : label) 178 #endif 179 180 #ifdef __powerpc64__ 181 #ifdef CONFIG_PPC_KERNEL_PREFIXED 182 #define __get_user_asm2_goto(x, addr, label) \ 183 __get_user_asm_goto(x, addr, label, "ld") 184 #else 185 #define __get_user_asm2_goto(x, addr, label) \ 186 asm_goto_output( \ 187 "1: ld%U1%X1 %0, %1 # get_user\n" \ 188 EX_TABLE(1b, %l2) \ 189 : "=r" (x) \ 190 : DS_FORM_CONSTRAINT (*addr) \ 191 : \ 192 : label) 193 #endif // CONFIG_PPC_KERNEL_PREFIXED 194 #else /* __powerpc64__ */ 195 #define __get_user_asm2_goto(x, addr, label) \ 196 asm_goto_output( \ 197 "1: lwz%X1 %0, %1\n" \ 198 "2: lwz%X1 %L0, %L1\n" \ 199 EX_TABLE(1b, %l2) \ 200 EX_TABLE(2b, %l2) \ 201 : "=&r" (x) \ 202 : "m" (*addr) \ 203 : \ 204 : label) 205 #endif /* __powerpc64__ */ 206 207 #define __get_user_size_goto(x, ptr, size, label) \ 208 do { \ 209 BUILD_BUG_ON(size > sizeof(x)); \ 210 switch (size) { \ 211 case 1: __get_user_asm_goto(x, (u8 __user *)ptr, label, "lbz"); break; \ 212 case 2: __get_user_asm_goto(x, (u16 __user *)ptr, label, "lhz"); break; \ 213 case 4: __get_user_asm_goto(x, (u32 __user *)ptr, label, "lwz"); break; \ 214 case 8: __get_user_asm2_goto(x, (u64 __user *)ptr, label); break; \ 215 default: x = 0; BUILD_BUG(); \ 216 } \ 217 } while (0) 218 219 #define __get_user_size_allowed(x, ptr, size, retval) \ 220 do { \ 221 __label__ __gus_failed; \ 222 \ 223 __get_user_size_goto(x, ptr, size, __gus_failed); \ 224 retval = 0; \ 225 break; \ 226 __gus_failed: \ 227 x = 0; \ 228 retval = -EFAULT; \ 229 } while (0) 230 231 #else /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT */ 232 233 #define __get_user_asm(x, addr, err, op) \ 234 __asm__ __volatile__( \ 235 "1: "op"%U2%X2 %1, %2 # get_user\n" \ 236 "2:\n" \ 237 ".section .fixup,\"ax\"\n" \ 238 "3: li %0,%3\n" \ 239 " li %1,0\n" \ 240 " b 2b\n" \ 241 ".previous\n" \ 242 EX_TABLE(1b, 3b) \ 243 : "=r" (err), "=r" (x) \ 244 : "m<>" (*addr), "i" (-EFAULT), "0" (err)) 245 246 #ifdef __powerpc64__ 247 #define __get_user_asm2(x, addr, err) \ 248 __get_user_asm(x, addr, err, "ld") 249 #else /* __powerpc64__ */ 250 #define __get_user_asm2(x, addr, err) \ 251 __asm__ __volatile__( \ 252 "1: lwz%X2 %1, %2\n" \ 253 "2: lwz%X2 %L1, %L2\n" \ 254 "3:\n" \ 255 ".section .fixup,\"ax\"\n" \ 256 "4: li %0,%3\n" \ 257 " li %1,0\n" \ 258 " li %1+1,0\n" \ 259 " b 3b\n" \ 260 ".previous\n" \ 261 EX_TABLE(1b, 4b) \ 262 EX_TABLE(2b, 4b) \ 263 : "=r" (err), "=&r" (x) \ 264 : "m" (*addr), "i" (-EFAULT), "0" (err)) 265 #endif /* __powerpc64__ */ 266 267 #define __get_user_size_allowed(x, ptr, size, retval) \ 268 do { \ 269 retval = 0; \ 270 BUILD_BUG_ON(size > sizeof(x)); \ 271 switch (size) { \ 272 case 1: __get_user_asm(x, (u8 __user *)ptr, retval, "lbz"); break; \ 273 case 2: __get_user_asm(x, (u16 __user *)ptr, retval, "lhz"); break; \ 274 case 4: __get_user_asm(x, (u32 __user *)ptr, retval, "lwz"); break; \ 275 case 8: __get_user_asm2(x, (u64 __user *)ptr, retval); break; \ 276 default: x = 0; BUILD_BUG(); \ 277 } \ 278 } while (0) 279 280 #define __get_user_size_goto(x, ptr, size, label) \ 281 do { \ 282 long __gus_retval; \ 283 \ 284 __get_user_size_allowed(x, ptr, size, __gus_retval); \ 285 if (__gus_retval) \ 286 goto label; \ 287 } while (0) 288 289 #endif /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT */ 290 291 /* 292 * This is a type: either unsigned long, if the argument fits into 293 * that type, or otherwise unsigned long long. 294 */ 295 #define __long_type(x) \ 296 __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL)) 297 298 #define __get_user(x, ptr) \ 299 ({ \ 300 long __gu_err; \ 301 __long_type(*(ptr)) __gu_val; \ 302 __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ 303 __typeof__(sizeof(*(ptr))) __gu_size = sizeof(*(ptr)); \ 304 \ 305 might_fault(); \ 306 barrier_nospec(); \ 307 allow_user_access(NULL, KUAP_READ); \ 308 __get_user_size_allowed(__gu_val, __gu_addr, __gu_size, __gu_err); \ 309 prevent_user_access(KUAP_READ); \ 310 (x) = (__typeof__(*(ptr)))__gu_val; \ 311 \ 312 __gu_err; \ 313 }) 314 315 #define get_user(x, ptr) \ 316 ({ \ 317 __typeof__(*(ptr)) __user *_gu_addr = (ptr); \ 318 \ 319 access_ok(_gu_addr, sizeof(*(ptr))) ? \ 320 __get_user(x, _gu_addr) : \ 321 ((x) = (__force __typeof__(*(ptr)))0, -EFAULT); \ 322 }) 323 324 /* more complex routines */ 325 326 extern unsigned long __copy_tofrom_user(void __user *to, 327 const void __user *from, unsigned long size); 328 329 #ifdef __powerpc64__ 330 static inline unsigned long 331 raw_copy_in_user(void __user *to, const void __user *from, unsigned long n) 332 { 333 unsigned long ret; 334 335 barrier_nospec(); 336 allow_user_access(to, KUAP_READ_WRITE); 337 ret = __copy_tofrom_user(to, from, n); 338 prevent_user_access(KUAP_READ_WRITE); 339 return ret; 340 } 341 #endif /* __powerpc64__ */ 342 343 static inline unsigned long raw_copy_from_user(void *to, 344 const void __user *from, unsigned long n) 345 { 346 unsigned long ret; 347 348 allow_user_access(NULL, KUAP_READ); 349 ret = __copy_tofrom_user((__force void __user *)to, from, n); 350 prevent_user_access(KUAP_READ); 351 return ret; 352 } 353 354 static inline unsigned long 355 raw_copy_to_user(void __user *to, const void *from, unsigned long n) 356 { 357 unsigned long ret; 358 359 allow_user_access(to, KUAP_WRITE); 360 ret = __copy_tofrom_user(to, (__force const void __user *)from, n); 361 prevent_user_access(KUAP_WRITE); 362 return ret; 363 } 364 365 unsigned long __arch_clear_user(void __user *addr, unsigned long size); 366 367 static inline unsigned long __clear_user(void __user *addr, unsigned long size) 368 { 369 unsigned long ret; 370 371 might_fault(); 372 allow_user_access(addr, KUAP_WRITE); 373 ret = __arch_clear_user(addr, size); 374 prevent_user_access(KUAP_WRITE); 375 return ret; 376 } 377 378 static inline unsigned long clear_user(void __user *addr, unsigned long size) 379 { 380 return likely(access_ok(addr, size)) ? __clear_user(addr, size) : size; 381 } 382 383 extern long strncpy_from_user(char *dst, const char __user *src, long count); 384 extern __must_check long strnlen_user(const char __user *str, long n); 385 386 #ifdef CONFIG_ARCH_HAS_COPY_MC 387 unsigned long __must_check 388 copy_mc_generic(void *to, const void *from, unsigned long size); 389 390 static inline unsigned long __must_check 391 copy_mc_to_kernel(void *to, const void *from, unsigned long size) 392 { 393 return copy_mc_generic(to, from, size); 394 } 395 #define copy_mc_to_kernel copy_mc_to_kernel 396 397 static inline unsigned long __must_check 398 copy_mc_to_user(void __user *to, const void *from, unsigned long n) 399 { 400 if (check_copy_size(from, n, true)) { 401 if (access_ok(to, n)) { 402 allow_user_access(to, KUAP_WRITE); 403 n = copy_mc_generic((void __force *)to, from, n); 404 prevent_user_access(KUAP_WRITE); 405 } 406 } 407 408 return n; 409 } 410 #endif 411 412 extern long __copy_from_user_flushcache(void *dst, const void __user *src, 413 unsigned size); 414 415 static __must_check __always_inline bool __user_access_begin(const void __user *ptr, size_t len, 416 unsigned long dir) 417 { 418 if (unlikely(!access_ok(ptr, len))) 419 return false; 420 421 might_fault(); 422 423 if (dir & KUAP_READ) 424 barrier_nospec(); 425 allow_user_access((void __user *)ptr, dir); 426 return true; 427 } 428 429 #define user_access_begin(p, l) __user_access_begin(p, l, KUAP_READ_WRITE) 430 #define user_read_access_begin(p, l) __user_access_begin(p, l, KUAP_READ) 431 #define user_write_access_begin(p, l) __user_access_begin(p, l, KUAP_WRITE) 432 433 #define user_access_end() prevent_user_access(KUAP_READ_WRITE) 434 #define user_read_access_end() prevent_user_access(KUAP_READ) 435 #define user_write_access_end() prevent_user_access(KUAP_WRITE) 436 437 #define user_access_save prevent_user_access_return 438 #define user_access_restore restore_user_access 439 440 /* 441 * Masking the user address is an alternative to a conditional 442 * user_access_begin that can avoid the fencing. This only works 443 * for dense accesses starting at the address. 444 */ 445 static inline void __user *mask_user_address_simple(const void __user *ptr) 446 { 447 unsigned long addr = (unsigned long)ptr; 448 unsigned long mask = (unsigned long)(((long)addr >> (BITS_PER_LONG - 1)) & LONG_MAX); 449 450 return (void __user *)(addr & ~mask); 451 } 452 453 static inline void __user *mask_user_address_isel(const void __user *ptr) 454 { 455 unsigned long addr; 456 457 asm("cmplw %1, %2; iselgt %0, %2, %1" : "=r"(addr) : "r"(ptr), "r"(TASK_SIZE) : "cr0"); 458 459 return (void __user *)addr; 460 } 461 462 /* TASK_SIZE is a multiple of 128K for shifting by 17 to the right */ 463 static inline void __user *mask_user_address_32(const void __user *ptr) 464 { 465 unsigned long addr = (unsigned long)ptr; 466 unsigned long mask = (unsigned long)((long)((TASK_SIZE >> 17) - 1 - (addr >> 17)) >> 31); 467 468 addr = (addr & ~mask) | (TASK_SIZE & mask); 469 470 return (void __user *)addr; 471 } 472 473 static inline void __user *mask_user_address_fallback(const void __user *ptr) 474 { 475 unsigned long addr = (unsigned long)ptr; 476 477 return (void __user *)(likely(addr < TASK_SIZE) ? addr : TASK_SIZE); 478 } 479 480 static inline void __user *mask_user_address(const void __user *ptr) 481 { 482 #ifdef MODULES_VADDR 483 const unsigned long border = MODULES_VADDR; 484 #else 485 const unsigned long border = PAGE_OFFSET; 486 #endif 487 488 if (IS_ENABLED(CONFIG_PPC64)) 489 return mask_user_address_simple(ptr); 490 if (IS_ENABLED(CONFIG_E500)) 491 return mask_user_address_isel(ptr); 492 if (TASK_SIZE <= UL(SZ_2G) && border >= UL(SZ_2G)) 493 return mask_user_address_simple(ptr); 494 if (IS_ENABLED(CONFIG_PPC_BARRIER_NOSPEC)) 495 return mask_user_address_32(ptr); 496 return mask_user_address_fallback(ptr); 497 } 498 499 static __always_inline void __user *__masked_user_access_begin(const void __user *p, 500 unsigned long dir) 501 { 502 void __user *ptr = mask_user_address(p); 503 504 might_fault(); 505 allow_user_access(ptr, dir); 506 507 return ptr; 508 } 509 510 #define masked_user_access_begin(p) __masked_user_access_begin(p, KUAP_READ_WRITE) 511 #define masked_user_read_access_begin(p) __masked_user_access_begin(p, KUAP_READ) 512 #define masked_user_write_access_begin(p) __masked_user_access_begin(p, KUAP_WRITE) 513 514 #define arch_unsafe_get_user(x, p, e) do { \ 515 __long_type(*(p)) __gu_val; \ 516 __typeof__(*(p)) __user *__gu_addr = (p); \ 517 \ 518 __get_user_size_goto(__gu_val, __gu_addr, sizeof(*(p)), e); \ 519 (x) = (__typeof__(*(p)))__gu_val; \ 520 } while (0) 521 522 #define arch_unsafe_put_user(x, p, e) \ 523 __put_user_size_goto((__typeof__(*(p)))(x), (p), sizeof(*(p)), e) 524 525 #define unsafe_copy_from_user(d, s, l, e) \ 526 do { \ 527 u8 *_dst = (u8 *)(d); \ 528 const u8 __user *_src = (const u8 __user *)(s); \ 529 size_t _len = (l); \ 530 int _i; \ 531 \ 532 for (_i = 0; _i < (_len & ~(sizeof(u64) - 1)); _i += sizeof(u64)) \ 533 unsafe_get_user(*(u64 *)(_dst + _i), (u64 __user *)(_src + _i), e); \ 534 if (_len & 4) { \ 535 unsafe_get_user(*(u32 *)(_dst + _i), (u32 __user *)(_src + _i), e); \ 536 _i += 4; \ 537 } \ 538 if (_len & 2) { \ 539 unsafe_get_user(*(u16 *)(_dst + _i), (u16 __user *)(_src + _i), e); \ 540 _i += 2; \ 541 } \ 542 if (_len & 1) \ 543 unsafe_get_user(*(u8 *)(_dst + _i), (u8 __user *)(_src + _i), e); \ 544 } while (0) 545 546 #define unsafe_copy_to_user(d, s, l, e) \ 547 do { \ 548 u8 __user *_dst = (u8 __user *)(d); \ 549 const u8 *_src = (const u8 *)(s); \ 550 size_t _len = (l); \ 551 int _i; \ 552 \ 553 for (_i = 0; _i < (_len & ~(sizeof(u64) - 1)); _i += sizeof(u64)) \ 554 unsafe_put_user(*(u64 *)(_src + _i), (u64 __user *)(_dst + _i), e); \ 555 if (_len & 4) { \ 556 unsafe_put_user(*(u32*)(_src + _i), (u32 __user *)(_dst + _i), e); \ 557 _i += 4; \ 558 } \ 559 if (_len & 2) { \ 560 unsafe_put_user(*(u16*)(_src + _i), (u16 __user *)(_dst + _i), e); \ 561 _i += 2; \ 562 } \ 563 if (_len & 1) \ 564 unsafe_put_user(*(u8*)(_src + _i), (u8 __user *)(_dst + _i), e); \ 565 } while (0) 566 567 #define arch_get_kernel_nofault(dst, src, type, err_label) \ 568 __get_user_size_goto(*((type *)(dst)), \ 569 (__force type __user *)(src), sizeof(type), err_label) 570 571 #define arch_put_kernel_nofault(dst, src, type, err_label) \ 572 __put_user_size_goto(*((type *)(src)), \ 573 (__force type __user *)(dst), sizeof(type), err_label) 574 575 #endif /* _ARCH_POWERPC_UACCESS_H */ 576