1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Copyright (C) 2012 Regents of the University of California
4 *
5 * This file was copied from include/asm-generic/uaccess.h
6 */
7
8 #ifndef _ASM_RISCV_UACCESS_H
9 #define _ASM_RISCV_UACCESS_H
10
11 #include <asm/asm-extable.h>
12 #include <asm/cpufeature.h>
13 #include <asm/pgtable.h> /* for TASK_SIZE */
14
15 #ifdef CONFIG_RISCV_ISA_SUPM
__untagged_addr_remote(struct mm_struct * mm,unsigned long addr)16 static inline unsigned long __untagged_addr_remote(struct mm_struct *mm, unsigned long addr)
17 {
18 if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SUPM)) {
19 u8 pmlen = mm->context.pmlen;
20
21 /* Virtual addresses are sign-extended; physical addresses are zero-extended. */
22 if (IS_ENABLED(CONFIG_MMU))
23 return (long)(addr << pmlen) >> pmlen;
24 else
25 return (addr << pmlen) >> pmlen;
26 }
27
28 return addr;
29 }
30
31 #define untagged_addr(addr) ({ \
32 unsigned long __addr = (__force unsigned long)(addr); \
33 (__force __typeof__(addr))__untagged_addr_remote(current->mm, __addr); \
34 })
35
36 #define untagged_addr_remote(mm, addr) ({ \
37 unsigned long __addr = (__force unsigned long)(addr); \
38 mmap_assert_locked(mm); \
39 (__force __typeof__(addr))__untagged_addr_remote(mm, __addr); \
40 })
41
42 #define access_ok(addr, size) likely(__access_ok(untagged_addr(addr), size))
43 #else
44 #define untagged_addr(addr) (addr)
45 #endif
46
47 /*
48 * User space memory access functions
49 */
50 #ifdef CONFIG_MMU
51 #include <linux/errno.h>
52 #include <linux/compiler.h>
53 #include <linux/thread_info.h>
54 #include <asm/byteorder.h>
55 #include <asm/extable.h>
56 #include <asm/asm.h>
57 #include <asm-generic/access_ok.h>
58
59 #define __enable_user_access() \
60 __asm__ __volatile__ ("csrs sstatus, %0" : : "r" (SR_SUM) : "memory")
61 #define __disable_user_access() \
62 __asm__ __volatile__ ("csrc sstatus, %0" : : "r" (SR_SUM) : "memory")
63
64 /*
65 * This is the smallest unsigned integer type that can fit a value
66 * (up to 'long long')
67 */
68 #define __inttype(x) __typeof__( \
69 __typefits(x, char, \
70 __typefits(x, short, \
71 __typefits(x, int, \
72 __typefits(x, long, 0ULL)))))
73
74 #define __typefits(x, type, not) \
75 __builtin_choose_expr(sizeof(x) <= sizeof(type), (unsigned type)0, not)
76
77 /*
78 * The exception table consists of pairs of addresses: the first is the
79 * address of an instruction that is allowed to fault, and the second is
80 * the address at which the program should continue. No registers are
81 * modified, so it is entirely up to the continuation code to figure out
82 * what to do.
83 *
84 * All the routines below use bits of fixup code that are out of line
85 * with the main instruction path. This means when everything is well,
86 * we don't even have to jump over them. Further, they do not intrude
87 * on our cache or tlb entries.
88 */
89
90 #define __LSW 0
91 #define __MSW 1
92
93 /*
94 * The "__xxx" versions of the user access functions do not verify the address
95 * space - it must have been done previously with a separate "access_ok()"
96 * call.
97 */
98
99 #ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
100 /*
101 * Use a temporary variable for the output of the asm goto to avoid a
102 * triggering an LLVM assertion due to sign extending the output when
103 * it is used in later function calls:
104 * https://github.com/llvm/llvm-project/issues/143795
105 */
106 #define __get_user_asm(insn, x, ptr, label) \
107 do { \
108 u64 __tmp; \
109 asm_goto_output( \
110 "1:\n" \
111 " " insn " %0, %1\n" \
112 _ASM_EXTABLE_UACCESS_ERR(1b, %l2, %0) \
113 : "=&r" (__tmp) \
114 : "m" (*(ptr)) : : label); \
115 (x) = (__typeof__(x))(unsigned long)__tmp; \
116 } while (0)
117 #else /* !CONFIG_CC_HAS_ASM_GOTO_OUTPUT */
118 #define __get_user_asm(insn, x, ptr, label) \
119 do { \
120 long __gua_err = 0; \
121 __asm__ __volatile__ ( \
122 "1:\n" \
123 " " insn " %1, %2\n" \
124 "2:\n" \
125 _ASM_EXTABLE_UACCESS_ERR_ZERO(1b, 2b, %0, %1) \
126 : "+r" (__gua_err), "=&r" (x) \
127 : "m" (*(ptr))); \
128 if (__gua_err) \
129 goto label; \
130 } while (0)
131 #endif /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT */
132
133 #ifdef CONFIG_64BIT
134 #define __get_user_8(x, ptr, label) \
135 __get_user_asm("ld", x, ptr, label)
136 #else /* !CONFIG_64BIT */
137
138 #ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
139 #define __get_user_8(x, ptr, label) \
140 do { \
141 u32 __user *__ptr = (u32 __user *)(ptr); \
142 u32 __lo, __hi; \
143 asm_goto_output( \
144 "1:\n" \
145 " lw %0, %2\n" \
146 "2:\n" \
147 " lw %1, %3\n" \
148 _ASM_EXTABLE_UACCESS_ERR(1b, %l4, %0) \
149 _ASM_EXTABLE_UACCESS_ERR(2b, %l4, %0) \
150 : "=&r" (__lo), "=r" (__hi) \
151 : "m" (__ptr[__LSW]), "m" (__ptr[__MSW]) \
152 : : label); \
153 (x) = (__typeof__(x))((__typeof__((x) - (x)))( \
154 (((u64)__hi << 32) | __lo))); \
155 } while (0)
156 #else /* !CONFIG_CC_HAS_ASM_GOTO_OUTPUT */
157 #define __get_user_8(x, ptr, label) \
158 do { \
159 u32 __user *__ptr = (u32 __user *)(ptr); \
160 u32 __lo, __hi; \
161 long __gu8_err = 0; \
162 __asm__ __volatile__ ( \
163 "1:\n" \
164 " lw %1, %3\n" \
165 "2:\n" \
166 " lw %2, %4\n" \
167 "3:\n" \
168 _ASM_EXTABLE_UACCESS_ERR_ZERO(1b, 3b, %0, %1) \
169 _ASM_EXTABLE_UACCESS_ERR_ZERO(2b, 3b, %0, %1) \
170 : "+r" (__gu8_err), "=&r" (__lo), "=r" (__hi) \
171 : "m" (__ptr[__LSW]), "m" (__ptr[__MSW])); \
172 if (__gu8_err) { \
173 __hi = 0; \
174 goto label; \
175 } \
176 (x) = (__typeof__(x))((__typeof__((x) - (x)))( \
177 (((u64)__hi << 32) | __lo))); \
178 } while (0)
179 #endif /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT */
180
181 #endif /* CONFIG_64BIT */
182
183 unsigned long __must_check __asm_copy_to_user_sum_enabled(void __user *to,
184 const void *from, unsigned long n);
185 unsigned long __must_check __asm_copy_from_user_sum_enabled(void *to,
186 const void __user *from, unsigned long n);
187
188 #define __get_user_nocheck(x, __gu_ptr, label) \
189 do { \
190 if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && \
191 !IS_ALIGNED((uintptr_t)__gu_ptr, sizeof(*__gu_ptr))) { \
192 if (__asm_copy_from_user_sum_enabled(&(x), __gu_ptr, sizeof(*__gu_ptr))) \
193 goto label; \
194 break; \
195 } \
196 switch (sizeof(*__gu_ptr)) { \
197 case 1: \
198 __get_user_asm("lb", (x), __gu_ptr, label); \
199 break; \
200 case 2: \
201 __get_user_asm("lh", (x), __gu_ptr, label); \
202 break; \
203 case 4: \
204 __get_user_asm("lw", (x), __gu_ptr, label); \
205 break; \
206 case 8: \
207 __get_user_8((x), __gu_ptr, label); \
208 break; \
209 default: \
210 BUILD_BUG(); \
211 } \
212 } while (0)
213
214 #define __get_user_error(x, ptr, err) \
215 do { \
216 __label__ __gu_failed; \
217 \
218 __get_user_nocheck(x, ptr, __gu_failed); \
219 err = 0; \
220 break; \
221 __gu_failed: \
222 x = (__typeof__(x))0; \
223 err = -EFAULT; \
224 } while (0)
225
226 /**
227 * __get_user: - Get a simple variable from user space, with less checking.
228 * @x: Variable to store result.
229 * @ptr: Source address, in user space.
230 *
231 * Context: User context only. This function may sleep.
232 *
233 * This macro copies a single simple variable from user space to kernel
234 * space. It supports simple types like char and int, but not larger
235 * data types like structures or arrays.
236 *
237 * @ptr must have pointer-to-simple-variable type, and the result of
238 * dereferencing @ptr must be assignable to @x without a cast.
239 *
240 * Caller must check the pointer with access_ok() before calling this
241 * function.
242 *
243 * Returns zero on success, or -EFAULT on error.
244 * On error, the variable @x is set to zero.
245 */
246 #define __get_user(x, ptr) \
247 ({ \
248 const __typeof__(*(ptr)) __user *__gu_ptr = untagged_addr(ptr); \
249 long __gu_err = 0; \
250 __typeof__(x) __gu_val; \
251 \
252 __chk_user_ptr(__gu_ptr); \
253 \
254 __enable_user_access(); \
255 __get_user_error(__gu_val, __gu_ptr, __gu_err); \
256 __disable_user_access(); \
257 \
258 (x) = __gu_val; \
259 \
260 __gu_err; \
261 })
262
263 /**
264 * get_user: - Get a simple variable from user space.
265 * @x: Variable to store result.
266 * @ptr: Source address, in user space.
267 *
268 * Context: User context only. This function may sleep.
269 *
270 * This macro copies a single simple variable from user space to kernel
271 * space. It supports simple types like char and int, but not larger
272 * data types like structures or arrays.
273 *
274 * @ptr must have pointer-to-simple-variable type, and the result of
275 * dereferencing @ptr must be assignable to @x without a cast.
276 *
277 * Returns zero on success, or -EFAULT on error.
278 * On error, the variable @x is set to zero.
279 */
280 #define get_user(x, ptr) \
281 ({ \
282 const __typeof__(*(ptr)) __user *__p = (ptr); \
283 might_fault(); \
284 access_ok(__p, sizeof(*__p)) ? \
285 __get_user((x), __p) : \
286 ((x) = (__force __typeof__(x))0, -EFAULT); \
287 })
288
289 #define __put_user_asm(insn, x, ptr, label) \
290 do { \
291 __typeof__(*(ptr)) __x = x; \
292 asm goto( \
293 "1:\n" \
294 " " insn " %z0, %1\n" \
295 _ASM_EXTABLE(1b, %l2) \
296 : : "rJ" (__x), "m"(*(ptr)) : : label); \
297 } while (0)
298
299 #ifdef CONFIG_64BIT
300 #define __put_user_8(x, ptr, label) \
301 __put_user_asm("sd", x, ptr, label)
302 #else /* !CONFIG_64BIT */
303 #define __put_user_8(x, ptr, label) \
304 do { \
305 u32 __user *__ptr = (u32 __user *)(ptr); \
306 u64 __x = (__typeof__((x)-(x)))(x); \
307 asm goto( \
308 "1:\n" \
309 " sw %z0, %2\n" \
310 "2:\n" \
311 " sw %z1, %3\n" \
312 _ASM_EXTABLE(1b, %l4) \
313 _ASM_EXTABLE(2b, %l4) \
314 : : "rJ" (__x), "rJ" (__x >> 32), \
315 "m" (__ptr[__LSW]), \
316 "m" (__ptr[__MSW]) : : label); \
317 } while (0)
318 #endif /* CONFIG_64BIT */
319
320 #define __put_user_nocheck(x, __gu_ptr, label) \
321 do { \
322 if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && \
323 !IS_ALIGNED((uintptr_t)__gu_ptr, sizeof(*__gu_ptr))) { \
324 __typeof__(*(__gu_ptr)) ___val = (x); \
325 if (__asm_copy_to_user_sum_enabled(__gu_ptr, &(___val), sizeof(*__gu_ptr))) \
326 goto label; \
327 break; \
328 } \
329 switch (sizeof(*__gu_ptr)) { \
330 case 1: \
331 __put_user_asm("sb", (x), __gu_ptr, label); \
332 break; \
333 case 2: \
334 __put_user_asm("sh", (x), __gu_ptr, label); \
335 break; \
336 case 4: \
337 __put_user_asm("sw", (x), __gu_ptr, label); \
338 break; \
339 case 8: \
340 __put_user_8((x), __gu_ptr, label); \
341 break; \
342 default: \
343 BUILD_BUG(); \
344 } \
345 } while (0)
346
347 #define __put_user_error(x, ptr, err) \
348 do { \
349 __label__ err_label; \
350 __put_user_nocheck(x, ptr, err_label); \
351 break; \
352 err_label: \
353 (err) = -EFAULT; \
354 } while (0)
355
356 /**
357 * __put_user: - Write a simple value into user space, with less checking.
358 * @x: Value to copy to user space.
359 * @ptr: Destination address, in user space.
360 *
361 * Context: User context only. This function may sleep.
362 *
363 * This macro copies a single simple value from kernel space to user
364 * space. It supports simple types like char and int, but not larger
365 * data types like structures or arrays.
366 *
367 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
368 * to the result of dereferencing @ptr. The value of @x is copied to avoid
369 * re-ordering where @x is evaluated inside the block that enables user-space
370 * access (thus bypassing user space protection if @x is a function).
371 *
372 * Caller must check the pointer with access_ok() before calling this
373 * function.
374 *
375 * Returns zero on success, or -EFAULT on error.
376 */
377 #define __put_user(x, ptr) \
378 ({ \
379 __typeof__(*(ptr)) __user *__gu_ptr = untagged_addr(ptr); \
380 __typeof__(*__gu_ptr) __val = (x); \
381 long __pu_err = 0; \
382 \
383 __chk_user_ptr(__gu_ptr); \
384 \
385 __enable_user_access(); \
386 __put_user_error(__val, __gu_ptr, __pu_err); \
387 __disable_user_access(); \
388 \
389 __pu_err; \
390 })
391
392 /**
393 * put_user: - Write a simple value into user space.
394 * @x: Value to copy to user space.
395 * @ptr: Destination address, in user space.
396 *
397 * Context: User context only. This function may sleep.
398 *
399 * This macro copies a single simple value from kernel space to user
400 * space. It supports simple types like char and int, but not larger
401 * data types like structures or arrays.
402 *
403 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
404 * to the result of dereferencing @ptr.
405 *
406 * Returns zero on success, or -EFAULT on error.
407 */
408 #define put_user(x, ptr) \
409 ({ \
410 __typeof__(*(ptr)) __user *__p = (ptr); \
411 might_fault(); \
412 access_ok(__p, sizeof(*__p)) ? \
413 __put_user((x), __p) : \
414 -EFAULT; \
415 })
416
417
418 unsigned long __must_check __asm_copy_to_user(void __user *to,
419 const void *from, unsigned long n);
420 unsigned long __must_check __asm_copy_from_user(void *to,
421 const void __user *from, unsigned long n);
422
423 static inline unsigned long
raw_copy_from_user(void * to,const void __user * from,unsigned long n)424 raw_copy_from_user(void *to, const void __user *from, unsigned long n)
425 {
426 return __asm_copy_from_user(to, untagged_addr(from), n);
427 }
428
429 static inline unsigned long
raw_copy_to_user(void __user * to,const void * from,unsigned long n)430 raw_copy_to_user(void __user *to, const void *from, unsigned long n)
431 {
432 return __asm_copy_to_user(untagged_addr(to), from, n);
433 }
434
435 extern long strncpy_from_user(char *dest, const char __user *src, long count);
436
437 extern long __must_check strnlen_user(const char __user *str, long n);
438
439 extern
440 unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
441
442 static inline
clear_user(void __user * to,unsigned long n)443 unsigned long __must_check clear_user(void __user *to, unsigned long n)
444 {
445 might_fault();
446 return access_ok(to, n) ?
447 __clear_user(untagged_addr(to), n) : n;
448 }
449
450 #define arch_get_kernel_nofault(dst, src, type, err_label) \
451 __get_user_nocheck(*((type *)(dst)), (__force __user type *)(src), err_label)
452
453 #define arch_put_kernel_nofault(dst, src, type, err_label) \
454 __put_user_nocheck(*((type *)(src)), (__force __user type *)(dst), err_label)
455
user_access_begin(const void __user * ptr,size_t len)456 static __must_check __always_inline bool user_access_begin(const void __user *ptr, size_t len)
457 {
458 if (unlikely(!access_ok(ptr, len)))
459 return 0;
460 __enable_user_access();
461 return 1;
462 }
463 #define user_access_begin user_access_begin
464 #define user_access_end __disable_user_access
465
user_access_save(void)466 static inline unsigned long user_access_save(void) { return 0UL; }
user_access_restore(unsigned long enabled)467 static inline void user_access_restore(unsigned long enabled) { }
468
469 /*
470 * We want the unsafe accessors to always be inlined and use
471 * the error labels - thus the macro games.
472 */
473 #define arch_unsafe_put_user(x, ptr, label) \
474 __put_user_nocheck(x, (ptr), label)
475
476 #define arch_unsafe_get_user(x, ptr, label) do { \
477 __inttype(*(ptr)) __gu_val; \
478 __get_user_nocheck(__gu_val, (ptr), label); \
479 (x) = (__force __typeof__(*(ptr)))__gu_val; \
480 } while (0)
481
482 #define unsafe_copy_to_user(_dst, _src, _len, label) \
483 if (__asm_copy_to_user_sum_enabled(_dst, _src, _len)) \
484 goto label;
485
486 #define unsafe_copy_from_user(_dst, _src, _len, label) \
487 if (__asm_copy_from_user_sum_enabled(_dst, _src, _len)) \
488 goto label;
489
490 #else /* CONFIG_MMU */
491 #include <asm-generic/uaccess.h>
492 #endif /* CONFIG_MMU */
493 #endif /* _ASM_RISCV_UACCESS_H */
494