xref: /linux/arch/riscv/include/asm/uaccess.h (revision 867b9987a30b7f68a6e9e89d3670730692222a4a)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2012 Regents of the University of California
4  *
5  * This file was copied from include/asm-generic/uaccess.h
6  */
7 
8 #ifndef _ASM_RISCV_UACCESS_H
9 #define _ASM_RISCV_UACCESS_H
10 
11 #include <asm/asm-extable.h>
12 #include <asm/cpufeature.h>
13 #include <asm/pgtable.h>		/* for TASK_SIZE */
14 
15 #ifdef CONFIG_RISCV_ISA_SUPM
__untagged_addr_remote(struct mm_struct * mm,unsigned long addr)16 static inline unsigned long __untagged_addr_remote(struct mm_struct *mm, unsigned long addr)
17 {
18 	if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SUPM)) {
19 		u8 pmlen = mm->context.pmlen;
20 
21 		/* Virtual addresses are sign-extended; physical addresses are zero-extended. */
22 		if (IS_ENABLED(CONFIG_MMU))
23 			return (long)(addr << pmlen) >> pmlen;
24 		else
25 			return (addr << pmlen) >> pmlen;
26 	}
27 
28 	return addr;
29 }
30 
31 #define untagged_addr(addr) ({							\
32 	unsigned long __addr = (__force unsigned long)(addr);			\
33 	(__force __typeof__(addr))__untagged_addr_remote(current->mm, __addr);	\
34 })
35 
36 #define untagged_addr_remote(mm, addr) ({					\
37 	unsigned long __addr = (__force unsigned long)(addr);			\
38 	mmap_assert_locked(mm);							\
39 	(__force __typeof__(addr))__untagged_addr_remote(mm, __addr);		\
40 })
41 
42 #define access_ok(addr, size) likely(__access_ok(untagged_addr(addr), size))
43 #else
44 #define untagged_addr(addr) (addr)
45 #endif
46 
47 /*
48  * User space memory access functions
49  */
50 #ifdef CONFIG_MMU
51 #include <linux/errno.h>
52 #include <linux/compiler.h>
53 #include <linux/thread_info.h>
54 #include <asm/byteorder.h>
55 #include <asm/extable.h>
56 #include <asm/asm.h>
57 #include <asm-generic/access_ok.h>
58 
59 #define __enable_user_access()							\
60 	__asm__ __volatile__ ("csrs sstatus, %0" : : "r" (SR_SUM) : "memory")
61 #define __disable_user_access()							\
62 	__asm__ __volatile__ ("csrc sstatus, %0" : : "r" (SR_SUM) : "memory")
63 
64 /*
65  * This is the smallest unsigned integer type that can fit a value
66  * (up to 'long long')
67  */
68 #define __inttype(x) __typeof__(		\
69 	__typefits(x, char,			\
70 	  __typefits(x, short,			\
71 	    __typefits(x, int,			\
72 	      __typefits(x, long, 0ULL)))))
73 
74 #define __typefits(x, type, not) \
75 	__builtin_choose_expr(sizeof(x) <= sizeof(type), (unsigned type)0, not)
76 
77 /*
78  * The exception table consists of pairs of addresses: the first is the
79  * address of an instruction that is allowed to fault, and the second is
80  * the address at which the program should continue.  No registers are
81  * modified, so it is entirely up to the continuation code to figure out
82  * what to do.
83  *
84  * All the routines below use bits of fixup code that are out of line
85  * with the main instruction path.  This means when everything is well,
86  * we don't even have to jump over them.  Further, they do not intrude
87  * on our cache or tlb entries.
88  */
89 
90 #define __LSW	0
91 #define __MSW	1
92 
93 /*
94  * The "__xxx" versions of the user access functions do not verify the address
95  * space - it must have been done previously with a separate "access_ok()"
96  * call.
97  */
98 
99 #ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
100 #define __get_user_asm(insn, x, ptr, label)			\
101 	asm_goto_output(					\
102 		"1:\n"						\
103 		"	" insn " %0, %1\n"			\
104 		_ASM_EXTABLE_UACCESS_ERR(1b, %l2, %0)		\
105 		: "=&r" (x)					\
106 		: "m" (*(ptr)) : : label)
107 #else /* !CONFIG_CC_HAS_ASM_GOTO_OUTPUT */
108 #define __get_user_asm(insn, x, ptr, label)			\
109 do {								\
110 	long __gua_err = 0;					\
111 	__asm__ __volatile__ (					\
112 		"1:\n"						\
113 		"	" insn " %1, %2\n"			\
114 		"2:\n"						\
115 		_ASM_EXTABLE_UACCESS_ERR_ZERO(1b, 2b, %0, %1)	\
116 		: "+r" (__gua_err), "=&r" (x)			\
117 		: "m" (*(ptr)));				\
118 	if (__gua_err)						\
119 		goto label;					\
120 } while (0)
121 #endif /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT */
122 
123 #ifdef CONFIG_64BIT
124 #define __get_user_8(x, ptr, label) \
125 	__get_user_asm("ld", x, ptr, label)
126 #else /* !CONFIG_64BIT */
127 
128 #ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
129 #define __get_user_8(x, ptr, label)				\
130 do {								\
131 	u32 __user *__ptr = (u32 __user *)(ptr);		\
132 	u32 __lo, __hi;						\
133 	asm_goto_output(					\
134 		"1:\n"						\
135 		"	lw %0, %2\n"				\
136 		"2:\n"						\
137 		"	lw %1, %3\n"				\
138 		_ASM_EXTABLE_UACCESS_ERR(1b, %l4, %0)		\
139 		_ASM_EXTABLE_UACCESS_ERR(2b, %l4, %0)		\
140 		: "=&r" (__lo), "=r" (__hi)			\
141 		: "m" (__ptr[__LSW]), "m" (__ptr[__MSW])	\
142 		: : label);                                     \
143 	(x) = (__typeof__(x))((__typeof__((x) - (x)))(		\
144 		(((u64)__hi << 32) | __lo)));			\
145 } while (0)
146 #else /* !CONFIG_CC_HAS_ASM_GOTO_OUTPUT */
147 #define __get_user_8(x, ptr, label)				\
148 do {								\
149 	u32 __user *__ptr = (u32 __user *)(ptr);		\
150 	u32 __lo, __hi;						\
151 	long __gu8_err = 0;					\
152 	__asm__ __volatile__ (					\
153 		"1:\n"						\
154 		"	lw %1, %3\n"				\
155 		"2:\n"						\
156 		"	lw %2, %4\n"				\
157 		"3:\n"						\
158 		_ASM_EXTABLE_UACCESS_ERR_ZERO(1b, 3b, %0, %1)	\
159 		_ASM_EXTABLE_UACCESS_ERR_ZERO(2b, 3b, %0, %1)	\
160 		: "+r" (__gu8_err), "=&r" (__lo), "=r" (__hi)	\
161 		: "m" (__ptr[__LSW]), "m" (__ptr[__MSW]));	\
162 	if (__gu8_err) {					\
163 		__hi = 0;					\
164 		goto label;					\
165 	}							\
166 	(x) = (__typeof__(x))((__typeof__((x) - (x)))(		\
167 		(((u64)__hi << 32) | __lo)));			\
168 } while (0)
169 #endif /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT */
170 
171 #endif /* CONFIG_64BIT */
172 
173 unsigned long __must_check __asm_copy_to_user_sum_enabled(void __user *to,
174 	const void *from, unsigned long n);
175 unsigned long __must_check __asm_copy_from_user_sum_enabled(void *to,
176 	const void __user *from, unsigned long n);
177 
178 #define __get_user_nocheck(x, __gu_ptr, label)			\
179 do {								\
180 	if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) &&	\
181 	    !IS_ALIGNED((uintptr_t)__gu_ptr, sizeof(*__gu_ptr))) {	\
182 		if (__asm_copy_from_user_sum_enabled(&(x), __gu_ptr, sizeof(*__gu_ptr))) \
183 			goto label;				\
184 		break;						\
185 	}							\
186 	switch (sizeof(*__gu_ptr)) {				\
187 	case 1:							\
188 		__get_user_asm("lb", (x), __gu_ptr, label);	\
189 		break;						\
190 	case 2:							\
191 		__get_user_asm("lh", (x), __gu_ptr, label);	\
192 		break;						\
193 	case 4:							\
194 		__get_user_asm("lw", (x), __gu_ptr, label);	\
195 		break;						\
196 	case 8:							\
197 		__get_user_8((x), __gu_ptr, label);		\
198 		break;						\
199 	default:						\
200 		BUILD_BUG();					\
201 	}							\
202 } while (0)
203 
204 #define __get_user_error(x, ptr, err)					\
205 do {									\
206 	__label__ __gu_failed;						\
207 									\
208 	__get_user_nocheck(x, ptr, __gu_failed);			\
209 		err = 0;						\
210 		break;							\
211 __gu_failed:								\
212 		x = 0;							\
213 		err = -EFAULT;						\
214 } while (0)
215 
216 /**
217  * __get_user: - Get a simple variable from user space, with less checking.
218  * @x:   Variable to store result.
219  * @ptr: Source address, in user space.
220  *
221  * Context: User context only.  This function may sleep.
222  *
223  * This macro copies a single simple variable from user space to kernel
224  * space.  It supports simple types like char and int, but not larger
225  * data types like structures or arrays.
226  *
227  * @ptr must have pointer-to-simple-variable type, and the result of
228  * dereferencing @ptr must be assignable to @x without a cast.
229  *
230  * Caller must check the pointer with access_ok() before calling this
231  * function.
232  *
233  * Returns zero on success, or -EFAULT on error.
234  * On error, the variable @x is set to zero.
235  */
236 #define __get_user(x, ptr)					\
237 ({								\
238 	const __typeof__(*(ptr)) __user *__gu_ptr = untagged_addr(ptr); \
239 	long __gu_err = 0;					\
240 	__typeof__(x) __gu_val;					\
241 								\
242 	__chk_user_ptr(__gu_ptr);				\
243 								\
244 	__enable_user_access();					\
245 	__get_user_error(__gu_val, __gu_ptr, __gu_err);		\
246 	__disable_user_access();				\
247 								\
248 	(x) = __gu_val;						\
249 								\
250 	__gu_err;						\
251 })
252 
253 /**
254  * get_user: - Get a simple variable from user space.
255  * @x:   Variable to store result.
256  * @ptr: Source address, in user space.
257  *
258  * Context: User context only.  This function may sleep.
259  *
260  * This macro copies a single simple variable from user space to kernel
261  * space.  It supports simple types like char and int, but not larger
262  * data types like structures or arrays.
263  *
264  * @ptr must have pointer-to-simple-variable type, and the result of
265  * dereferencing @ptr must be assignable to @x without a cast.
266  *
267  * Returns zero on success, or -EFAULT on error.
268  * On error, the variable @x is set to zero.
269  */
270 #define get_user(x, ptr)					\
271 ({								\
272 	const __typeof__(*(ptr)) __user *__p = (ptr);		\
273 	might_fault();						\
274 	access_ok(__p, sizeof(*__p)) ?		\
275 		__get_user((x), __p) :				\
276 		((x) = (__force __typeof__(x))0, -EFAULT);	\
277 })
278 
279 #define __put_user_asm(insn, x, ptr, label)			\
280 do {								\
281 	__typeof__(*(ptr)) __x = x;				\
282 	asm goto(						\
283 		"1:\n"						\
284 		"	" insn " %z0, %1\n"			\
285 		_ASM_EXTABLE(1b, %l2)				\
286 		: : "rJ" (__x), "m"(*(ptr)) : : label);		\
287 } while (0)
288 
289 #ifdef CONFIG_64BIT
290 #define __put_user_8(x, ptr, label) \
291 	__put_user_asm("sd", x, ptr, label)
292 #else /* !CONFIG_64BIT */
293 #define __put_user_8(x, ptr, label)				\
294 do {								\
295 	u32 __user *__ptr = (u32 __user *)(ptr);		\
296 	u64 __x = (__typeof__((x)-(x)))(x);			\
297 	asm goto(						\
298 		"1:\n"						\
299 		"	sw %z0, %2\n"				\
300 		"2:\n"						\
301 		"	sw %z1, %3\n"				\
302 		_ASM_EXTABLE(1b, %l4)				\
303 		_ASM_EXTABLE(2b, %l4)				\
304 		: : "rJ" (__x), "rJ" (__x >> 32),		\
305 			"m" (__ptr[__LSW]),			\
306 			"m" (__ptr[__MSW]) : : label);		\
307 } while (0)
308 #endif /* CONFIG_64BIT */
309 
310 #define __put_user_nocheck(x, __gu_ptr, label)			\
311 do {								\
312 	if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) &&	\
313 	    !IS_ALIGNED((uintptr_t)__gu_ptr, sizeof(*__gu_ptr))) {	\
314 		__inttype(x) val = (__inttype(x))x;			\
315 		if (__asm_copy_to_user_sum_enabled(__gu_ptr, &(val), sizeof(*__gu_ptr))) \
316 			goto label;				\
317 		break;						\
318 	}							\
319 	switch (sizeof(*__gu_ptr)) {				\
320 	case 1:							\
321 		__put_user_asm("sb", (x), __gu_ptr, label);	\
322 		break;						\
323 	case 2:							\
324 		__put_user_asm("sh", (x), __gu_ptr, label);	\
325 		break;						\
326 	case 4:							\
327 		__put_user_asm("sw", (x), __gu_ptr, label);	\
328 		break;						\
329 	case 8:							\
330 		__put_user_8((x), __gu_ptr, label);		\
331 		break;						\
332 	default:						\
333 		BUILD_BUG();					\
334 	}							\
335 } while (0)
336 
337 #define __put_user_error(x, ptr, err)				\
338 do {								\
339 	__label__ err_label;					\
340 	__put_user_nocheck(x, ptr, err_label);			\
341 	break;							\
342 err_label:							\
343 	(err) = -EFAULT;					\
344 } while (0)
345 
346 /**
347  * __put_user: - Write a simple value into user space, with less checking.
348  * @x:   Value to copy to user space.
349  * @ptr: Destination address, in user space.
350  *
351  * Context: User context only.  This function may sleep.
352  *
353  * This macro copies a single simple value from kernel space to user
354  * space.  It supports simple types like char and int, but not larger
355  * data types like structures or arrays.
356  *
357  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
358  * to the result of dereferencing @ptr. The value of @x is copied to avoid
359  * re-ordering where @x is evaluated inside the block that enables user-space
360  * access (thus bypassing user space protection if @x is a function).
361  *
362  * Caller must check the pointer with access_ok() before calling this
363  * function.
364  *
365  * Returns zero on success, or -EFAULT on error.
366  */
367 #define __put_user(x, ptr)					\
368 ({								\
369 	__typeof__(*(ptr)) __user *__gu_ptr = untagged_addr(ptr); \
370 	__typeof__(*__gu_ptr) __val = (x);			\
371 	long __pu_err = 0;					\
372 								\
373 	__chk_user_ptr(__gu_ptr);				\
374 								\
375 	__enable_user_access();					\
376 	__put_user_error(__val, __gu_ptr, __pu_err);		\
377 	__disable_user_access();				\
378 								\
379 	__pu_err;						\
380 })
381 
382 /**
383  * put_user: - Write a simple value into user space.
384  * @x:   Value to copy to user space.
385  * @ptr: Destination address, in user space.
386  *
387  * Context: User context only.  This function may sleep.
388  *
389  * This macro copies a single simple value from kernel space to user
390  * space.  It supports simple types like char and int, but not larger
391  * data types like structures or arrays.
392  *
393  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
394  * to the result of dereferencing @ptr.
395  *
396  * Returns zero on success, or -EFAULT on error.
397  */
398 #define put_user(x, ptr)					\
399 ({								\
400 	__typeof__(*(ptr)) __user *__p = (ptr);			\
401 	might_fault();						\
402 	access_ok(__p, sizeof(*__p)) ?		\
403 		__put_user((x), __p) :				\
404 		-EFAULT;					\
405 })
406 
407 
408 unsigned long __must_check __asm_copy_to_user(void __user *to,
409 	const void *from, unsigned long n);
410 unsigned long __must_check __asm_copy_from_user(void *to,
411 	const void __user *from, unsigned long n);
412 
413 static inline unsigned long
raw_copy_from_user(void * to,const void __user * from,unsigned long n)414 raw_copy_from_user(void *to, const void __user *from, unsigned long n)
415 {
416 	return __asm_copy_from_user(to, untagged_addr(from), n);
417 }
418 
419 static inline unsigned long
raw_copy_to_user(void __user * to,const void * from,unsigned long n)420 raw_copy_to_user(void __user *to, const void *from, unsigned long n)
421 {
422 	return __asm_copy_to_user(untagged_addr(to), from, n);
423 }
424 
425 extern long strncpy_from_user(char *dest, const char __user *src, long count);
426 
427 extern long __must_check strnlen_user(const char __user *str, long n);
428 
429 extern
430 unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
431 
432 static inline
clear_user(void __user * to,unsigned long n)433 unsigned long __must_check clear_user(void __user *to, unsigned long n)
434 {
435 	might_fault();
436 	return access_ok(to, n) ?
437 		__clear_user(untagged_addr(to), n) : n;
438 }
439 
440 #define __get_kernel_nofault(dst, src, type, err_label)			\
441 	__get_user_nocheck(*((type *)(dst)), (type *)(src), err_label)
442 
443 #define __put_kernel_nofault(dst, src, type, err_label)			\
444 	__put_user_nocheck(*((type *)(src)), (type *)(dst), err_label)
445 
user_access_begin(const void __user * ptr,size_t len)446 static __must_check __always_inline bool user_access_begin(const void __user *ptr, size_t len)
447 {
448 	if (unlikely(!access_ok(ptr, len)))
449 		return 0;
450 	__enable_user_access();
451 	return 1;
452 }
453 #define user_access_begin user_access_begin
454 #define user_access_end __disable_user_access
455 
user_access_save(void)456 static inline unsigned long user_access_save(void) { return 0UL; }
user_access_restore(unsigned long enabled)457 static inline void user_access_restore(unsigned long enabled) { }
458 
459 /*
460  * We want the unsafe accessors to always be inlined and use
461  * the error labels - thus the macro games.
462  */
463 #define unsafe_put_user(x, ptr, label)					\
464 	__put_user_nocheck(x, (ptr), label)
465 
466 #define unsafe_get_user(x, ptr, label)	do {				\
467 	__inttype(*(ptr)) __gu_val;					\
468 	__get_user_nocheck(__gu_val, (ptr), label);			\
469 	(x) = (__force __typeof__(*(ptr)))__gu_val;			\
470 } while (0)
471 
472 #define unsafe_copy_to_user(_dst, _src, _len, label)			\
473 	if (__asm_copy_to_user_sum_enabled(_dst, _src, _len))		\
474 		goto label;
475 
476 #define unsafe_copy_from_user(_dst, _src, _len, label)			\
477 	if (__asm_copy_from_user_sum_enabled(_dst, _src, _len))		\
478 		goto label;
479 
480 #else /* CONFIG_MMU */
481 #include <asm-generic/uaccess.h>
482 #endif /* CONFIG_MMU */
483 #endif /* _ASM_RISCV_UACCESS_H */
484