1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * S390 version
4 * Copyright IBM Corp. 1999, 2000
5 * Author(s): Hartmut Penner (hp@de.ibm.com),
6 * Martin Schwidefsky (schwidefsky@de.ibm.com)
7 *
8 * Derived from "include/asm-i386/uaccess.h"
9 */
10 #ifndef __S390_UACCESS_H
11 #define __S390_UACCESS_H
12
13 /*
14 * User space memory access functions
15 */
16 #include <asm/asm-extable.h>
17 #include <asm/processor.h>
18 #include <asm/extable.h>
19 #include <asm/facility.h>
20 #include <asm-generic/access_ok.h>
21 #include <linux/instrumented.h>
22
23 void debug_user_asce(int exit);
24
25 union oac {
26 unsigned int val;
27 struct {
28 struct {
29 unsigned short key : 4;
30 unsigned short : 4;
31 unsigned short as : 2;
32 unsigned short : 4;
33 unsigned short k : 1;
34 unsigned short a : 1;
35 } oac1;
36 struct {
37 unsigned short key : 4;
38 unsigned short : 4;
39 unsigned short as : 2;
40 unsigned short : 4;
41 unsigned short k : 1;
42 unsigned short a : 1;
43 } oac2;
44 };
45 };
46
47 static __always_inline __must_check unsigned long
raw_copy_from_user_key(void * to,const void __user * from,unsigned long size,unsigned long key)48 raw_copy_from_user_key(void *to, const void __user *from, unsigned long size, unsigned long key)
49 {
50 unsigned long rem;
51 union oac spec = {
52 .oac2.key = key,
53 .oac2.as = PSW_BITS_AS_SECONDARY,
54 .oac2.k = 1,
55 .oac2.a = 1,
56 };
57
58 asm_inline volatile(
59 " lr %%r0,%[spec]\n"
60 "0: mvcos 0(%[to]),0(%[from]),%[size]\n"
61 "1: jz 5f\n"
62 " algr %[size],%[val]\n"
63 " slgr %[from],%[val]\n"
64 " slgr %[to],%[val]\n"
65 " j 0b\n"
66 "2: la %[rem],4095(%[from])\n" /* rem = from + 4095 */
67 " nr %[rem],%[val]\n" /* rem = (from + 4095) & -4096 */
68 " slgr %[rem],%[from]\n"
69 " clgr %[size],%[rem]\n" /* copy crosses next page boundary? */
70 " jnh 6f\n"
71 "3: mvcos 0(%[to]),0(%[from]),%[rem]\n"
72 "4: slgr %[size],%[rem]\n"
73 " j 6f\n"
74 "5: lghi %[size],0\n"
75 "6:\n"
76 EX_TABLE(0b, 2b)
77 EX_TABLE(1b, 2b)
78 EX_TABLE(3b, 6b)
79 EX_TABLE(4b, 6b)
80 : [size] "+&a" (size), [from] "+&a" (from), [to] "+&a" (to), [rem] "=&a" (rem)
81 : [val] "a" (-4096UL), [spec] "d" (spec.val)
82 : "cc", "memory", "0");
83 return size;
84 }
85
86 static __always_inline __must_check unsigned long
raw_copy_from_user(void * to,const void __user * from,unsigned long n)87 raw_copy_from_user(void *to, const void __user *from, unsigned long n)
88 {
89 return raw_copy_from_user_key(to, from, n, 0);
90 }
91
92 static __always_inline __must_check unsigned long
raw_copy_to_user_key(void __user * to,const void * from,unsigned long size,unsigned long key)93 raw_copy_to_user_key(void __user *to, const void *from, unsigned long size, unsigned long key)
94 {
95 unsigned long rem;
96 union oac spec = {
97 .oac1.key = key,
98 .oac1.as = PSW_BITS_AS_SECONDARY,
99 .oac1.k = 1,
100 .oac1.a = 1,
101 };
102
103 asm_inline volatile(
104 " lr %%r0,%[spec]\n"
105 "0: mvcos 0(%[to]),0(%[from]),%[size]\n"
106 "1: jz 5f\n"
107 " algr %[size],%[val]\n"
108 " slgr %[to],%[val]\n"
109 " slgr %[from],%[val]\n"
110 " j 0b\n"
111 "2: la %[rem],4095(%[to])\n" /* rem = to + 4095 */
112 " nr %[rem],%[val]\n" /* rem = (to + 4095) & -4096 */
113 " slgr %[rem],%[to]\n"
114 " clgr %[size],%[rem]\n" /* copy crosses next page boundary? */
115 " jnh 6f\n"
116 "3: mvcos 0(%[to]),0(%[from]),%[rem]\n"
117 "4: slgr %[size],%[rem]\n"
118 " j 6f\n"
119 "5: lghi %[size],0\n"
120 "6:\n"
121 EX_TABLE(0b, 2b)
122 EX_TABLE(1b, 2b)
123 EX_TABLE(3b, 6b)
124 EX_TABLE(4b, 6b)
125 : [size] "+&a" (size), [to] "+&a" (to), [from] "+&a" (from), [rem] "=&a" (rem)
126 : [val] "a" (-4096UL), [spec] "d" (spec.val)
127 : "cc", "memory", "0");
128 return size;
129 }
130
131 static __always_inline __must_check unsigned long
raw_copy_to_user(void __user * to,const void * from,unsigned long n)132 raw_copy_to_user(void __user *to, const void *from, unsigned long n)
133 {
134 return raw_copy_to_user_key(to, from, n, 0);
135 }
136
137 unsigned long __must_check
138 _copy_from_user_key(void *to, const void __user *from, unsigned long n, unsigned long key);
139
140 static __always_inline unsigned long __must_check
copy_from_user_key(void * to,const void __user * from,unsigned long n,unsigned long key)141 copy_from_user_key(void *to, const void __user *from, unsigned long n, unsigned long key)
142 {
143 if (check_copy_size(to, n, false))
144 n = _copy_from_user_key(to, from, n, key);
145 return n;
146 }
147
148 unsigned long __must_check
149 _copy_to_user_key(void __user *to, const void *from, unsigned long n, unsigned long key);
150
151 static __always_inline unsigned long __must_check
copy_to_user_key(void __user * to,const void * from,unsigned long n,unsigned long key)152 copy_to_user_key(void __user *to, const void *from, unsigned long n, unsigned long key)
153 {
154 if (check_copy_size(from, n, true))
155 n = _copy_to_user_key(to, from, n, key);
156 return n;
157 }
158
159 int __noreturn __put_user_bad(void);
160
161 #ifdef CONFIG_KMSAN
162 #define uaccess_kmsan_or_inline noinline __maybe_unused __no_sanitize_memory
163 #else
164 #define uaccess_kmsan_or_inline __always_inline
165 #endif
166
167 #ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
168
169 #define DEFINE_PUT_USER_NOINSTR(type) \
170 static uaccess_kmsan_or_inline int \
171 __put_user_##type##_noinstr(unsigned type __user *to, \
172 unsigned type *from, \
173 unsigned long size) \
174 { \
175 asm goto( \
176 " llilh %%r0,%[spec]\n" \
177 "0: mvcos %[to],%[from],%[size]\n" \
178 "1: nopr %%r7\n" \
179 EX_TABLE(0b, %l[Efault]) \
180 EX_TABLE(1b, %l[Efault]) \
181 : [to] "+Q" (*to) \
182 : [size] "d" (size), [from] "Q" (*from), \
183 [spec] "I" (0x81) \
184 : "cc", "0" \
185 : Efault \
186 ); \
187 return 0; \
188 Efault: \
189 return -EFAULT; \
190 }
191
192 #else /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT */
193
194 #define DEFINE_PUT_USER_NOINSTR(type) \
195 static uaccess_kmsan_or_inline int \
196 __put_user_##type##_noinstr(unsigned type __user *to, \
197 unsigned type *from, \
198 unsigned long size) \
199 { \
200 int rc; \
201 \
202 asm volatile( \
203 " llilh %%r0,%[spec]\n" \
204 "0: mvcos %[to],%[from],%[size]\n" \
205 "1: lhi %[rc],0\n" \
206 "2:\n" \
207 EX_TABLE_UA_FAULT(0b, 2b, %[rc]) \
208 EX_TABLE_UA_FAULT(1b, 2b, %[rc]) \
209 : [rc] "=d" (rc), [to] "+Q" (*to) \
210 : [size] "d" (size), [from] "Q" (*from), \
211 [spec] "I" (0x81) \
212 : "cc", "0"); \
213 return rc; \
214 }
215
216 #endif /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT */
217
218 DEFINE_PUT_USER_NOINSTR(char);
219 DEFINE_PUT_USER_NOINSTR(short);
220 DEFINE_PUT_USER_NOINSTR(int);
221 DEFINE_PUT_USER_NOINSTR(long);
222
223 #define DEFINE_PUT_USER(type) \
224 static __always_inline int \
225 __put_user_##type(unsigned type __user *to, unsigned type *from, \
226 unsigned long size) \
227 { \
228 int rc; \
229 \
230 rc = __put_user_##type##_noinstr(to, from, size); \
231 instrument_put_user(*from, to, size); \
232 return rc; \
233 }
234
235 DEFINE_PUT_USER(char);
236 DEFINE_PUT_USER(short);
237 DEFINE_PUT_USER(int);
238 DEFINE_PUT_USER(long);
239
240 #define __put_user(x, ptr) \
241 ({ \
242 __typeof__(*(ptr)) __x = (x); \
243 int __prc; \
244 \
245 __chk_user_ptr(ptr); \
246 switch (sizeof(*(ptr))) { \
247 case 1: \
248 __prc = __put_user_char((unsigned char __user *)(ptr), \
249 (unsigned char *)&__x, \
250 sizeof(*(ptr))); \
251 break; \
252 case 2: \
253 __prc = __put_user_short((unsigned short __user *)(ptr),\
254 (unsigned short *)&__x, \
255 sizeof(*(ptr))); \
256 break; \
257 case 4: \
258 __prc = __put_user_int((unsigned int __user *)(ptr), \
259 (unsigned int *)&__x, \
260 sizeof(*(ptr))); \
261 break; \
262 case 8: \
263 __prc = __put_user_long((unsigned long __user *)(ptr), \
264 (unsigned long *)&__x, \
265 sizeof(*(ptr))); \
266 break; \
267 default: \
268 __prc = __put_user_bad(); \
269 break; \
270 } \
271 __builtin_expect(__prc, 0); \
272 })
273
274 #define put_user(x, ptr) \
275 ({ \
276 might_fault(); \
277 __put_user(x, ptr); \
278 })
279
280 int __noreturn __get_user_bad(void);
281
282 #ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
283
284 #define DEFINE_GET_USER_NOINSTR(type) \
285 static uaccess_kmsan_or_inline int \
286 __get_user_##type##_noinstr(unsigned type *to, \
287 const unsigned type __user *from, \
288 unsigned long size) \
289 { \
290 asm goto( \
291 " lhi %%r0,%[spec]\n" \
292 "0: mvcos %[to],%[from],%[size]\n" \
293 "1: nopr %%r7\n" \
294 EX_TABLE(0b, %l[Efault]) \
295 EX_TABLE(1b, %l[Efault]) \
296 : [to] "=Q" (*to) \
297 : [size] "d" (size), [from] "Q" (*from), \
298 [spec] "I" (0x81) \
299 : "cc", "0" \
300 : Efault \
301 ); \
302 return 0; \
303 Efault: \
304 *to = 0; \
305 return -EFAULT; \
306 }
307
308 #else /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT */
309
310 #define DEFINE_GET_USER_NOINSTR(type) \
311 static uaccess_kmsan_or_inline int \
312 __get_user_##type##_noinstr(unsigned type *to, \
313 const unsigned type __user *from, \
314 unsigned long size) \
315 { \
316 int rc; \
317 \
318 asm volatile( \
319 " lhi %%r0,%[spec]\n" \
320 "0: mvcos %[to],%[from],%[size]\n" \
321 "1: lhi %[rc],0\n" \
322 "2:\n" \
323 EX_TABLE_UA_FAULT(0b, 2b, %[rc]) \
324 EX_TABLE_UA_FAULT(1b, 2b, %[rc]) \
325 : [rc] "=d" (rc), [to] "=Q" (*to) \
326 : [size] "d" (size), [from] "Q" (*from), \
327 [spec] "I" (0x81) \
328 : "cc", "0"); \
329 if (likely(!rc)) \
330 return 0; \
331 *to = 0; \
332 return rc; \
333 }
334
335 #endif /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT */
336
337 DEFINE_GET_USER_NOINSTR(char);
338 DEFINE_GET_USER_NOINSTR(short);
339 DEFINE_GET_USER_NOINSTR(int);
340 DEFINE_GET_USER_NOINSTR(long);
341
342 #define DEFINE_GET_USER(type) \
343 static __always_inline int \
344 __get_user_##type(unsigned type *to, const unsigned type __user *from, \
345 unsigned long size) \
346 { \
347 int rc; \
348 \
349 rc = __get_user_##type##_noinstr(to, from, size); \
350 instrument_get_user(*to); \
351 return rc; \
352 }
353
354 DEFINE_GET_USER(char);
355 DEFINE_GET_USER(short);
356 DEFINE_GET_USER(int);
357 DEFINE_GET_USER(long);
358
359 #define __get_user(x, ptr) \
360 ({ \
361 const __user void *____guptr = (ptr); \
362 int __grc; \
363 \
364 __chk_user_ptr(ptr); \
365 switch (sizeof(*(ptr))) { \
366 case 1: { \
367 const unsigned char __user *__guptr = ____guptr; \
368 unsigned char __x; \
369 \
370 __grc = __get_user_char(&__x, __guptr, sizeof(*(ptr))); \
371 (x) = *(__force __typeof__(*(ptr)) *)&__x; \
372 break; \
373 }; \
374 case 2: { \
375 const unsigned short __user *__guptr = ____guptr; \
376 unsigned short __x; \
377 \
378 __grc = __get_user_short(&__x, __guptr, sizeof(*(ptr)));\
379 (x) = *(__force __typeof__(*(ptr)) *)&__x; \
380 break; \
381 }; \
382 case 4: { \
383 const unsigned int __user *__guptr = ____guptr; \
384 unsigned int __x; \
385 \
386 __grc = __get_user_int(&__x, __guptr, sizeof(*(ptr))); \
387 (x) = *(__force __typeof__(*(ptr)) *)&__x; \
388 break; \
389 }; \
390 case 8: { \
391 const unsigned long __user *__guptr = ____guptr; \
392 unsigned long __x; \
393 \
394 __grc = __get_user_long(&__x, __guptr, sizeof(*(ptr))); \
395 (x) = *(__force __typeof__(*(ptr)) *)&__x; \
396 break; \
397 }; \
398 default: \
399 __grc = __get_user_bad(); \
400 break; \
401 } \
402 __builtin_expect(__grc, 0); \
403 })
404
405 #define get_user(x, ptr) \
406 ({ \
407 might_fault(); \
408 __get_user(x, ptr); \
409 })
410
411 /*
412 * Copy a null terminated string from userspace.
413 */
414 long __must_check strncpy_from_user(char *dst, const char __user *src, long count);
415
416 long __must_check strnlen_user(const char __user *src, long count);
417
418 /*
419 * Zero Userspace
420 */
421 unsigned long __must_check __clear_user(void __user *to, unsigned long size);
422
clear_user(void __user * to,unsigned long n)423 static inline unsigned long __must_check clear_user(void __user *to, unsigned long n)
424 {
425 might_fault();
426 return __clear_user(to, n);
427 }
428
429 void *__s390_kernel_write(void *dst, const void *src, size_t size);
430
s390_kernel_write(void * dst,const void * src,size_t size)431 static inline void *s390_kernel_write(void *dst, const void *src, size_t size)
432 {
433 if (__is_defined(__DECOMPRESSOR))
434 return memcpy(dst, src, size);
435 return __s390_kernel_write(dst, src, size);
436 }
437
438 void __noreturn __mvc_kernel_nofault_bad(void);
439
440 #if defined(CONFIG_CC_HAS_ASM_GOTO_OUTPUT) && defined(CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS)
441
442 #define __mvc_kernel_nofault(dst, src, type, err_label) \
443 do { \
444 switch (sizeof(type)) { \
445 case 1: \
446 case 2: \
447 case 4: \
448 case 8: \
449 asm goto( \
450 "0: mvc %O[_dst](%[_len],%R[_dst]),%[_src]\n" \
451 "1: nopr %%r7\n" \
452 EX_TABLE(0b, %l[err_label]) \
453 EX_TABLE(1b, %l[err_label]) \
454 : [_dst] "=Q" (*(type *)dst) \
455 : [_src] "Q" (*(type *)(src)), \
456 [_len] "I" (sizeof(type)) \
457 : \
458 : err_label); \
459 break; \
460 default: \
461 __mvc_kernel_nofault_bad(); \
462 break; \
463 } \
464 } while (0)
465
466 #else /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT) && CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS */
467
468 #define __mvc_kernel_nofault(dst, src, type, err_label) \
469 do { \
470 type *(__dst) = (type *)(dst); \
471 int __rc; \
472 \
473 switch (sizeof(type)) { \
474 case 1: \
475 case 2: \
476 case 4: \
477 case 8: \
478 asm_inline volatile( \
479 "0: mvc 0(%[_len],%[_dst]),%[_src]\n" \
480 "1: lhi %[_rc],0\n" \
481 "2:\n" \
482 EX_TABLE_UA_FAULT(0b, 2b, %[_rc]) \
483 EX_TABLE_UA_FAULT(1b, 2b, %[_rc]) \
484 : [_rc] "=d" (__rc), \
485 "=m" (*__dst) \
486 : [_src] "Q" (*(type *)(src)), \
487 [_dst] "a" (__dst), \
488 [_len] "I" (sizeof(type))); \
489 if (__rc) \
490 goto err_label; \
491 break; \
492 default: \
493 __mvc_kernel_nofault_bad(); \
494 break; \
495 } \
496 } while (0)
497
498 #endif /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT && CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS */
499
500 #define __get_kernel_nofault __mvc_kernel_nofault
501 #define __put_kernel_nofault __mvc_kernel_nofault
502
503 void __cmpxchg_user_key_called_with_bad_pointer(void);
504
505 #define CMPXCHG_USER_KEY_MAX_LOOPS 128
506
__cmpxchg_user_key(unsigned long address,void * uval,__uint128_t old,__uint128_t new,unsigned long key,int size)507 static __always_inline int __cmpxchg_user_key(unsigned long address, void *uval,
508 __uint128_t old, __uint128_t new,
509 unsigned long key, int size)
510 {
511 int rc = 0;
512
513 switch (size) {
514 case 1: {
515 unsigned int prev, shift, mask, _old, _new;
516 unsigned long count;
517
518 shift = (3 ^ (address & 3)) << 3;
519 address ^= address & 3;
520 _old = ((unsigned int)old & 0xff) << shift;
521 _new = ((unsigned int)new & 0xff) << shift;
522 mask = ~(0xff << shift);
523 asm volatile(
524 " spka 0(%[key])\n"
525 " sacf 256\n"
526 " llill %[count],%[max_loops]\n"
527 "0: l %[prev],%[address]\n"
528 "1: nr %[prev],%[mask]\n"
529 " xilf %[mask],0xffffffff\n"
530 " or %[new],%[prev]\n"
531 " or %[prev],%[tmp]\n"
532 "2: lr %[tmp],%[prev]\n"
533 "3: cs %[prev],%[new],%[address]\n"
534 "4: jnl 5f\n"
535 " xr %[tmp],%[prev]\n"
536 " xr %[new],%[tmp]\n"
537 " nr %[tmp],%[mask]\n"
538 " jnz 5f\n"
539 " brct %[count],2b\n"
540 "5: sacf 768\n"
541 " spka %[default_key]\n"
542 EX_TABLE_UA_LOAD_REG(0b, 5b, %[rc], %[prev])
543 EX_TABLE_UA_LOAD_REG(1b, 5b, %[rc], %[prev])
544 EX_TABLE_UA_LOAD_REG(3b, 5b, %[rc], %[prev])
545 EX_TABLE_UA_LOAD_REG(4b, 5b, %[rc], %[prev])
546 : [rc] "+&d" (rc),
547 [prev] "=&d" (prev),
548 [address] "+Q" (*(int *)address),
549 [tmp] "+&d" (_old),
550 [new] "+&d" (_new),
551 [mask] "+&d" (mask),
552 [count] "=a" (count)
553 : [key] "%[count]" (key << 4),
554 [default_key] "J" (PAGE_DEFAULT_KEY),
555 [max_loops] "J" (CMPXCHG_USER_KEY_MAX_LOOPS)
556 : "memory", "cc");
557 *(unsigned char *)uval = prev >> shift;
558 if (!count)
559 rc = -EAGAIN;
560 return rc;
561 }
562 case 2: {
563 unsigned int prev, shift, mask, _old, _new;
564 unsigned long count;
565
566 shift = (2 ^ (address & 2)) << 3;
567 address ^= address & 2;
568 _old = ((unsigned int)old & 0xffff) << shift;
569 _new = ((unsigned int)new & 0xffff) << shift;
570 mask = ~(0xffff << shift);
571 asm volatile(
572 " spka 0(%[key])\n"
573 " sacf 256\n"
574 " llill %[count],%[max_loops]\n"
575 "0: l %[prev],%[address]\n"
576 "1: nr %[prev],%[mask]\n"
577 " xilf %[mask],0xffffffff\n"
578 " or %[new],%[prev]\n"
579 " or %[prev],%[tmp]\n"
580 "2: lr %[tmp],%[prev]\n"
581 "3: cs %[prev],%[new],%[address]\n"
582 "4: jnl 5f\n"
583 " xr %[tmp],%[prev]\n"
584 " xr %[new],%[tmp]\n"
585 " nr %[tmp],%[mask]\n"
586 " jnz 5f\n"
587 " brct %[count],2b\n"
588 "5: sacf 768\n"
589 " spka %[default_key]\n"
590 EX_TABLE_UA_LOAD_REG(0b, 5b, %[rc], %[prev])
591 EX_TABLE_UA_LOAD_REG(1b, 5b, %[rc], %[prev])
592 EX_TABLE_UA_LOAD_REG(3b, 5b, %[rc], %[prev])
593 EX_TABLE_UA_LOAD_REG(4b, 5b, %[rc], %[prev])
594 : [rc] "+&d" (rc),
595 [prev] "=&d" (prev),
596 [address] "+Q" (*(int *)address),
597 [tmp] "+&d" (_old),
598 [new] "+&d" (_new),
599 [mask] "+&d" (mask),
600 [count] "=a" (count)
601 : [key] "%[count]" (key << 4),
602 [default_key] "J" (PAGE_DEFAULT_KEY),
603 [max_loops] "J" (CMPXCHG_USER_KEY_MAX_LOOPS)
604 : "memory", "cc");
605 *(unsigned short *)uval = prev >> shift;
606 if (!count)
607 rc = -EAGAIN;
608 return rc;
609 }
610 case 4: {
611 unsigned int prev = old;
612
613 asm volatile(
614 " spka 0(%[key])\n"
615 " sacf 256\n"
616 "0: cs %[prev],%[new],%[address]\n"
617 "1: sacf 768\n"
618 " spka %[default_key]\n"
619 EX_TABLE_UA_LOAD_REG(0b, 1b, %[rc], %[prev])
620 EX_TABLE_UA_LOAD_REG(1b, 1b, %[rc], %[prev])
621 : [rc] "+&d" (rc),
622 [prev] "+&d" (prev),
623 [address] "+Q" (*(int *)address)
624 : [new] "d" ((unsigned int)new),
625 [key] "a" (key << 4),
626 [default_key] "J" (PAGE_DEFAULT_KEY)
627 : "memory", "cc");
628 *(unsigned int *)uval = prev;
629 return rc;
630 }
631 case 8: {
632 unsigned long prev = old;
633
634 asm volatile(
635 " spka 0(%[key])\n"
636 " sacf 256\n"
637 "0: csg %[prev],%[new],%[address]\n"
638 "1: sacf 768\n"
639 " spka %[default_key]\n"
640 EX_TABLE_UA_LOAD_REG(0b, 1b, %[rc], %[prev])
641 EX_TABLE_UA_LOAD_REG(1b, 1b, %[rc], %[prev])
642 : [rc] "+&d" (rc),
643 [prev] "+&d" (prev),
644 [address] "+QS" (*(long *)address)
645 : [new] "d" ((unsigned long)new),
646 [key] "a" (key << 4),
647 [default_key] "J" (PAGE_DEFAULT_KEY)
648 : "memory", "cc");
649 *(unsigned long *)uval = prev;
650 return rc;
651 }
652 case 16: {
653 __uint128_t prev = old;
654
655 asm volatile(
656 " spka 0(%[key])\n"
657 " sacf 256\n"
658 "0: cdsg %[prev],%[new],%[address]\n"
659 "1: sacf 768\n"
660 " spka %[default_key]\n"
661 EX_TABLE_UA_LOAD_REGPAIR(0b, 1b, %[rc], %[prev])
662 EX_TABLE_UA_LOAD_REGPAIR(1b, 1b, %[rc], %[prev])
663 : [rc] "+&d" (rc),
664 [prev] "+&d" (prev),
665 [address] "+QS" (*(__int128_t *)address)
666 : [new] "d" (new),
667 [key] "a" (key << 4),
668 [default_key] "J" (PAGE_DEFAULT_KEY)
669 : "memory", "cc");
670 *(__uint128_t *)uval = prev;
671 return rc;
672 }
673 }
674 __cmpxchg_user_key_called_with_bad_pointer();
675 return rc;
676 }
677
678 /**
679 * cmpxchg_user_key() - cmpxchg with user space target, honoring storage keys
680 * @ptr: User space address of value to compare to @old and exchange with
681 * @new. Must be aligned to sizeof(*@ptr).
682 * @uval: Address where the old value of *@ptr is written to.
683 * @old: Old value. Compared to the content pointed to by @ptr in order to
684 * determine if the exchange occurs. The old value read from *@ptr is
685 * written to *@uval.
686 * @new: New value to place at *@ptr.
687 * @key: Access key to use for checking storage key protection.
688 *
689 * Perform a cmpxchg on a user space target, honoring storage key protection.
690 * @key alone determines how key checking is performed, neither
691 * storage-protection-override nor fetch-protection-override apply.
692 * The caller must compare *@uval and @old to determine if values have been
693 * exchanged. In case of an exception *@uval is set to zero.
694 *
695 * Return: 0: cmpxchg executed
696 * -EFAULT: an exception happened when trying to access *@ptr
697 * -EAGAIN: maxed out number of retries (byte and short only)
698 */
699 #define cmpxchg_user_key(ptr, uval, old, new, key) \
700 ({ \
701 __typeof__(ptr) __ptr = (ptr); \
702 __typeof__(uval) __uval = (uval); \
703 \
704 BUILD_BUG_ON(sizeof(*(__ptr)) != sizeof(*(__uval))); \
705 might_fault(); \
706 __chk_user_ptr(__ptr); \
707 __cmpxchg_user_key((unsigned long)(__ptr), (void *)(__uval), \
708 (old), (new), (key), sizeof(*(__ptr))); \
709 })
710
711 #endif /* __S390_UACCESS_H */
712