1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * S390 version 4 * Copyright IBM Corp. 1999, 2000 5 * Author(s): Hartmut Penner (hp@de.ibm.com), 6 * Martin Schwidefsky (schwidefsky@de.ibm.com) 7 * 8 * Derived from "include/asm-i386/uaccess.h" 9 */ 10 #ifndef __S390_UACCESS_H 11 #define __S390_UACCESS_H 12 13 /* 14 * User space memory access functions 15 */ 16 #include <linux/pgtable.h> 17 #include <asm/asm-extable.h> 18 #include <asm/processor.h> 19 #include <asm/extable.h> 20 #include <asm/facility.h> 21 #include <asm-generic/access_ok.h> 22 #include <asm/asce.h> 23 #include <linux/instrumented.h> 24 25 void debug_user_asce(int exit); 26 27 #ifdef CONFIG_KMSAN 28 #define uaccess_kmsan_or_inline noinline __maybe_unused __no_sanitize_memory 29 #else 30 #define uaccess_kmsan_or_inline __always_inline 31 #endif 32 33 #define INLINE_COPY_FROM_USER 34 #define INLINE_COPY_TO_USER 35 36 static uaccess_kmsan_or_inline __must_check unsigned long 37 raw_copy_from_user(void *to, const void __user *from, unsigned long size) 38 { 39 unsigned long osize; 40 int cc; 41 42 while (1) { 43 osize = size; 44 asm_inline volatile( 45 " lhi %%r0,%[spec]\n" 46 "0: mvcos %[to],%[from],%[size]\n" 47 "1: nopr %%r7\n" 48 CC_IPM(cc) 49 EX_TABLE_UA_MVCOS_FROM(0b, 0b) 50 EX_TABLE_UA_MVCOS_FROM(1b, 0b) 51 : CC_OUT(cc, cc), [size] "+d" (size), [to] "=Q" (*(char *)to) 52 : [spec] "I" (0x81), [from] "Q" (*(const char __user *)from) 53 : CC_CLOBBER_LIST("memory", "0")); 54 if (__builtin_constant_p(osize) && osize <= 4096) 55 return osize - size; 56 if (likely(CC_TRANSFORM(cc) == 0)) 57 return osize - size; 58 size -= 4096; 59 to += 4096; 60 from += 4096; 61 } 62 } 63 64 static uaccess_kmsan_or_inline __must_check unsigned long 65 raw_copy_to_user(void __user *to, const void *from, unsigned long size) 66 { 67 unsigned long osize; 68 int cc; 69 70 while (1) { 71 osize = size; 72 asm_inline volatile( 73 " llilh %%r0,%[spec]\n" 74 "0: mvcos %[to],%[from],%[size]\n" 75 "1: nopr %%r7\n" 76 CC_IPM(cc) 77 EX_TABLE_UA_MVCOS_TO(0b, 0b) 78 EX_TABLE_UA_MVCOS_TO(1b, 0b) 79 : CC_OUT(cc, cc), [size] "+d" (size), [to] "=Q" (*(char __user *)to) 80 : [spec] "I" (0x81), [from] "Q" (*(const char *)from) 81 : CC_CLOBBER_LIST("memory", "0")); 82 if (__builtin_constant_p(osize) && osize <= 4096) 83 return osize - size; 84 if (likely(CC_TRANSFORM(cc) == 0)) 85 return osize - size; 86 size -= 4096; 87 to += 4096; 88 from += 4096; 89 } 90 } 91 92 unsigned long __must_check 93 _copy_from_user_key(void *to, const void __user *from, unsigned long n, unsigned long key); 94 95 static __always_inline unsigned long __must_check 96 copy_from_user_key(void *to, const void __user *from, unsigned long n, unsigned long key) 97 { 98 if (check_copy_size(to, n, false)) 99 n = _copy_from_user_key(to, from, n, key); 100 return n; 101 } 102 103 unsigned long __must_check 104 _copy_to_user_key(void __user *to, const void *from, unsigned long n, unsigned long key); 105 106 static __always_inline unsigned long __must_check 107 copy_to_user_key(void __user *to, const void *from, unsigned long n, unsigned long key) 108 { 109 if (check_copy_size(from, n, true)) 110 n = _copy_to_user_key(to, from, n, key); 111 return n; 112 } 113 114 int __noreturn __put_user_bad(void); 115 116 #ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT 117 118 #define DEFINE_PUT_USER_NOINSTR(type) \ 119 static uaccess_kmsan_or_inline int \ 120 __put_user_##type##_noinstr(unsigned type __user *to, \ 121 unsigned type *from, \ 122 unsigned long size) \ 123 { \ 124 asm goto( \ 125 " llilh %%r0,%[spec]\n" \ 126 "0: mvcos %[to],%[from],%[size]\n" \ 127 "1: nopr %%r7\n" \ 128 EX_TABLE(0b, %l[Efault]) \ 129 EX_TABLE(1b, %l[Efault]) \ 130 : [to] "+Q" (*to) \ 131 : [size] "d" (size), [from] "Q" (*from), \ 132 [spec] "I" (0x81) \ 133 : "cc", "0" \ 134 : Efault \ 135 ); \ 136 return 0; \ 137 Efault: \ 138 return -EFAULT; \ 139 } 140 141 #else /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT */ 142 143 #define DEFINE_PUT_USER_NOINSTR(type) \ 144 static uaccess_kmsan_or_inline int \ 145 __put_user_##type##_noinstr(unsigned type __user *to, \ 146 unsigned type *from, \ 147 unsigned long size) \ 148 { \ 149 int rc; \ 150 \ 151 asm_inline volatile( \ 152 " llilh %%r0,%[spec]\n" \ 153 "0: mvcos %[to],%[from],%[size]\n" \ 154 "1: lhi %[rc],0\n" \ 155 "2:\n" \ 156 EX_TABLE_UA_FAULT(0b, 2b, %[rc]) \ 157 EX_TABLE_UA_FAULT(1b, 2b, %[rc]) \ 158 : [rc] "=d" (rc), [to] "+Q" (*to) \ 159 : [size] "d" (size), [from] "Q" (*from), \ 160 [spec] "I" (0x81) \ 161 : "cc", "0"); \ 162 return rc; \ 163 } 164 165 #endif /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT */ 166 167 DEFINE_PUT_USER_NOINSTR(char); 168 DEFINE_PUT_USER_NOINSTR(short); 169 DEFINE_PUT_USER_NOINSTR(int); 170 DEFINE_PUT_USER_NOINSTR(long); 171 172 #define DEFINE_PUT_USER(type) \ 173 static __always_inline int \ 174 __put_user_##type(unsigned type __user *to, unsigned type *from, \ 175 unsigned long size) \ 176 { \ 177 int rc; \ 178 \ 179 rc = __put_user_##type##_noinstr(to, from, size); \ 180 instrument_put_user(*from, to, size); \ 181 return rc; \ 182 } 183 184 DEFINE_PUT_USER(char); 185 DEFINE_PUT_USER(short); 186 DEFINE_PUT_USER(int); 187 DEFINE_PUT_USER(long); 188 189 #define __put_user(x, ptr) \ 190 ({ \ 191 __typeof__(*(ptr)) __x = (x); \ 192 int __prc; \ 193 \ 194 __chk_user_ptr(ptr); \ 195 switch (sizeof(*(ptr))) { \ 196 case 1: \ 197 __prc = __put_user_char((unsigned char __user *)(ptr), \ 198 (unsigned char *)&__x, \ 199 sizeof(*(ptr))); \ 200 break; \ 201 case 2: \ 202 __prc = __put_user_short((unsigned short __user *)(ptr),\ 203 (unsigned short *)&__x, \ 204 sizeof(*(ptr))); \ 205 break; \ 206 case 4: \ 207 __prc = __put_user_int((unsigned int __user *)(ptr), \ 208 (unsigned int *)&__x, \ 209 sizeof(*(ptr))); \ 210 break; \ 211 case 8: \ 212 __prc = __put_user_long((unsigned long __user *)(ptr), \ 213 (unsigned long *)&__x, \ 214 sizeof(*(ptr))); \ 215 break; \ 216 default: \ 217 __prc = __put_user_bad(); \ 218 break; \ 219 } \ 220 __builtin_expect(__prc, 0); \ 221 }) 222 223 #define put_user(x, ptr) \ 224 ({ \ 225 might_fault(); \ 226 __put_user(x, ptr); \ 227 }) 228 229 int __noreturn __get_user_bad(void); 230 231 #ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT 232 233 #define DEFINE_GET_USER_NOINSTR(type) \ 234 static uaccess_kmsan_or_inline int \ 235 __get_user_##type##_noinstr(unsigned type *to, \ 236 const unsigned type __user *from, \ 237 unsigned long size) \ 238 { \ 239 asm goto( \ 240 " lhi %%r0,%[spec]\n" \ 241 "0: mvcos %[to],%[from],%[size]\n" \ 242 "1: nopr %%r7\n" \ 243 EX_TABLE(0b, %l[Efault]) \ 244 EX_TABLE(1b, %l[Efault]) \ 245 : [to] "=Q" (*to) \ 246 : [size] "d" (size), [from] "Q" (*from), \ 247 [spec] "I" (0x81) \ 248 : "cc", "0" \ 249 : Efault \ 250 ); \ 251 return 0; \ 252 Efault: \ 253 *to = 0; \ 254 return -EFAULT; \ 255 } 256 257 #else /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT */ 258 259 #define DEFINE_GET_USER_NOINSTR(type) \ 260 static uaccess_kmsan_or_inline int \ 261 __get_user_##type##_noinstr(unsigned type *to, \ 262 const unsigned type __user *from, \ 263 unsigned long size) \ 264 { \ 265 int rc; \ 266 \ 267 asm_inline volatile( \ 268 " lhi %%r0,%[spec]\n" \ 269 "0: mvcos %[to],%[from],%[size]\n" \ 270 "1: lhi %[rc],0\n" \ 271 "2:\n" \ 272 EX_TABLE_UA_FAULT(0b, 2b, %[rc]) \ 273 EX_TABLE_UA_FAULT(1b, 2b, %[rc]) \ 274 : [rc] "=d" (rc), [to] "=Q" (*to) \ 275 : [size] "d" (size), [from] "Q" (*from), \ 276 [spec] "I" (0x81) \ 277 : "cc", "0"); \ 278 if (likely(!rc)) \ 279 return 0; \ 280 *to = 0; \ 281 return rc; \ 282 } 283 284 #endif /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT */ 285 286 DEFINE_GET_USER_NOINSTR(char); 287 DEFINE_GET_USER_NOINSTR(short); 288 DEFINE_GET_USER_NOINSTR(int); 289 DEFINE_GET_USER_NOINSTR(long); 290 291 #define DEFINE_GET_USER(type) \ 292 static __always_inline int \ 293 __get_user_##type(unsigned type *to, const unsigned type __user *from, \ 294 unsigned long size) \ 295 { \ 296 int rc; \ 297 \ 298 rc = __get_user_##type##_noinstr(to, from, size); \ 299 instrument_get_user(*to); \ 300 return rc; \ 301 } 302 303 DEFINE_GET_USER(char); 304 DEFINE_GET_USER(short); 305 DEFINE_GET_USER(int); 306 DEFINE_GET_USER(long); 307 308 #define __get_user(x, ptr) \ 309 ({ \ 310 const __user void *____guptr = (ptr); \ 311 int __grc; \ 312 \ 313 __chk_user_ptr(ptr); \ 314 switch (sizeof(*(ptr))) { \ 315 case 1: { \ 316 const unsigned char __user *__guptr = ____guptr; \ 317 unsigned char __x; \ 318 \ 319 __grc = __get_user_char(&__x, __guptr, sizeof(*(ptr))); \ 320 (x) = *(__force __typeof__(*(ptr)) *)&__x; \ 321 break; \ 322 }; \ 323 case 2: { \ 324 const unsigned short __user *__guptr = ____guptr; \ 325 unsigned short __x; \ 326 \ 327 __grc = __get_user_short(&__x, __guptr, sizeof(*(ptr)));\ 328 (x) = *(__force __typeof__(*(ptr)) *)&__x; \ 329 break; \ 330 }; \ 331 case 4: { \ 332 const unsigned int __user *__guptr = ____guptr; \ 333 unsigned int __x; \ 334 \ 335 __grc = __get_user_int(&__x, __guptr, sizeof(*(ptr))); \ 336 (x) = *(__force __typeof__(*(ptr)) *)&__x; \ 337 break; \ 338 }; \ 339 case 8: { \ 340 const unsigned long __user *__guptr = ____guptr; \ 341 unsigned long __x; \ 342 \ 343 __grc = __get_user_long(&__x, __guptr, sizeof(*(ptr))); \ 344 (x) = *(__force __typeof__(*(ptr)) *)&__x; \ 345 break; \ 346 }; \ 347 default: \ 348 __grc = __get_user_bad(); \ 349 break; \ 350 } \ 351 __builtin_expect(__grc, 0); \ 352 }) 353 354 #define get_user(x, ptr) \ 355 ({ \ 356 might_fault(); \ 357 __get_user(x, ptr); \ 358 }) 359 360 /* 361 * Copy a null terminated string from userspace. 362 */ 363 long __must_check strncpy_from_user(char *dst, const char __user *src, long count); 364 365 long __must_check strnlen_user(const char __user *src, long count); 366 367 static uaccess_kmsan_or_inline __must_check unsigned long 368 __clear_user(void __user *to, unsigned long size) 369 { 370 unsigned long osize; 371 int cc; 372 373 while (1) { 374 osize = size; 375 asm_inline volatile( 376 " llilh %%r0,%[spec]\n" 377 "0: mvcos %[to],%[from],%[size]\n" 378 "1: nopr %%r7\n" 379 CC_IPM(cc) 380 EX_TABLE_UA_MVCOS_TO(0b, 0b) 381 EX_TABLE_UA_MVCOS_TO(1b, 0b) 382 : CC_OUT(cc, cc), [size] "+d" (size), [to] "=Q" (*(char __user *)to) 383 : [spec] "I" (0x81), [from] "Q" (*(const char *)empty_zero_page) 384 : CC_CLOBBER_LIST("memory", "0")); 385 if (__builtin_constant_p(osize) && osize <= 4096) 386 return osize - size; 387 if (CC_TRANSFORM(cc) == 0) 388 return osize - size; 389 size -= 4096; 390 to += 4096; 391 } 392 } 393 394 static __always_inline unsigned long __must_check clear_user(void __user *to, unsigned long n) 395 { 396 might_fault(); 397 return __clear_user(to, n); 398 } 399 400 void *__s390_kernel_write(void *dst, const void *src, size_t size); 401 402 static inline void *s390_kernel_write(void *dst, const void *src, size_t size) 403 { 404 if (__is_defined(__DECOMPRESSOR)) 405 return memcpy(dst, src, size); 406 return __s390_kernel_write(dst, src, size); 407 } 408 409 void __noreturn __mvc_kernel_nofault_bad(void); 410 411 #if defined(CONFIG_CC_HAS_ASM_GOTO_OUTPUT) && defined(CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS) 412 413 #define __mvc_kernel_nofault(dst, src, type, err_label) \ 414 do { \ 415 switch (sizeof(type)) { \ 416 case 1: \ 417 case 2: \ 418 case 4: \ 419 case 8: \ 420 asm goto( \ 421 "0: mvc %O[_dst](%[_len],%R[_dst]),%[_src]\n" \ 422 "1: nopr %%r7\n" \ 423 EX_TABLE(0b, %l[err_label]) \ 424 EX_TABLE(1b, %l[err_label]) \ 425 : [_dst] "=Q" (*(type *)dst) \ 426 : [_src] "Q" (*(type *)(src)), \ 427 [_len] "I" (sizeof(type)) \ 428 : \ 429 : err_label); \ 430 break; \ 431 default: \ 432 __mvc_kernel_nofault_bad(); \ 433 break; \ 434 } \ 435 } while (0) 436 437 #else /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT) && CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS */ 438 439 #define __mvc_kernel_nofault(dst, src, type, err_label) \ 440 do { \ 441 type *(__dst) = (type *)(dst); \ 442 int __rc; \ 443 \ 444 switch (sizeof(type)) { \ 445 case 1: \ 446 case 2: \ 447 case 4: \ 448 case 8: \ 449 asm_inline volatile( \ 450 "0: mvc 0(%[_len],%[_dst]),%[_src]\n" \ 451 "1: lhi %[_rc],0\n" \ 452 "2:\n" \ 453 EX_TABLE_UA_FAULT(0b, 2b, %[_rc]) \ 454 EX_TABLE_UA_FAULT(1b, 2b, %[_rc]) \ 455 : [_rc] "=d" (__rc), \ 456 "=m" (*__dst) \ 457 : [_src] "Q" (*(type *)(src)), \ 458 [_dst] "a" (__dst), \ 459 [_len] "I" (sizeof(type))); \ 460 if (__rc) \ 461 goto err_label; \ 462 break; \ 463 default: \ 464 __mvc_kernel_nofault_bad(); \ 465 break; \ 466 } \ 467 } while (0) 468 469 #endif /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT && CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS */ 470 471 #define __get_kernel_nofault __mvc_kernel_nofault 472 #define __put_kernel_nofault __mvc_kernel_nofault 473 474 void __cmpxchg_user_key_called_with_bad_pointer(void); 475 476 int __cmpxchg_user_key1(unsigned long address, unsigned char *uval, 477 unsigned char old, unsigned char new, unsigned long key); 478 int __cmpxchg_user_key2(unsigned long address, unsigned short *uval, 479 unsigned short old, unsigned short new, unsigned long key); 480 int __cmpxchg_user_key4(unsigned long address, unsigned int *uval, 481 unsigned int old, unsigned int new, unsigned long key); 482 int __cmpxchg_user_key8(unsigned long address, unsigned long *uval, 483 unsigned long old, unsigned long new, unsigned long key); 484 int __cmpxchg_user_key16(unsigned long address, __uint128_t *uval, 485 __uint128_t old, __uint128_t new, unsigned long key); 486 487 static __always_inline int _cmpxchg_user_key(unsigned long address, void *uval, 488 __uint128_t old, __uint128_t new, 489 unsigned long key, int size) 490 { 491 switch (size) { 492 case 1: return __cmpxchg_user_key1(address, uval, old, new, key); 493 case 2: return __cmpxchg_user_key2(address, uval, old, new, key); 494 case 4: return __cmpxchg_user_key4(address, uval, old, new, key); 495 case 8: return __cmpxchg_user_key8(address, uval, old, new, key); 496 case 16: return __cmpxchg_user_key16(address, uval, old, new, key); 497 default: __cmpxchg_user_key_called_with_bad_pointer(); 498 } 499 return 0; 500 } 501 502 /** 503 * cmpxchg_user_key() - cmpxchg with user space target, honoring storage keys 504 * @ptr: User space address of value to compare to @old and exchange with 505 * @new. Must be aligned to sizeof(*@ptr). 506 * @uval: Address where the old value of *@ptr is written to. 507 * @old: Old value. Compared to the content pointed to by @ptr in order to 508 * determine if the exchange occurs. The old value read from *@ptr is 509 * written to *@uval. 510 * @new: New value to place at *@ptr. 511 * @key: Access key to use for checking storage key protection. 512 * 513 * Perform a cmpxchg on a user space target, honoring storage key protection. 514 * @key alone determines how key checking is performed, neither 515 * storage-protection-override nor fetch-protection-override apply. 516 * The caller must compare *@uval and @old to determine if values have been 517 * exchanged. In case of an exception *@uval is set to zero. 518 * 519 * Return: 0: cmpxchg executed 520 * -EFAULT: an exception happened when trying to access *@ptr 521 * -EAGAIN: maxed out number of retries (byte and short only) 522 */ 523 #define cmpxchg_user_key(ptr, uval, old, new, key) \ 524 ({ \ 525 __typeof__(ptr) __ptr = (ptr); \ 526 __typeof__(uval) __uval = (uval); \ 527 \ 528 BUILD_BUG_ON(sizeof(*(__ptr)) != sizeof(*(__uval))); \ 529 might_fault(); \ 530 __chk_user_ptr(__ptr); \ 531 _cmpxchg_user_key((unsigned long)(__ptr), (void *)(__uval), \ 532 (old), (new), (key), sizeof(*(__ptr))); \ 533 }) 534 535 #endif /* __S390_UACCESS_H */ 536