xref: /linux/arch/riscv/include/asm/uaccess.h (revision 876f5ebd58a9ac42f48a7ead3d5b274a314e0ace)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2012 Regents of the University of California
4  *
5  * This file was copied from include/asm-generic/uaccess.h
6  */
7 
8 #ifndef _ASM_RISCV_UACCESS_H
9 #define _ASM_RISCV_UACCESS_H
10 
11 #include <asm/asm-extable.h>
12 #include <asm/cpufeature.h>
13 #include <asm/pgtable.h>		/* for TASK_SIZE */
14 
15 #ifdef CONFIG_RISCV_ISA_SUPM
16 static inline unsigned long __untagged_addr_remote(struct mm_struct *mm, unsigned long addr)
17 {
18 	if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SUPM)) {
19 		u8 pmlen = mm->context.pmlen;
20 
21 		/* Virtual addresses are sign-extended; physical addresses are zero-extended. */
22 		if (IS_ENABLED(CONFIG_MMU))
23 			return (long)(addr << pmlen) >> pmlen;
24 		else
25 			return (addr << pmlen) >> pmlen;
26 	}
27 
28 	return addr;
29 }
30 
31 #define untagged_addr(addr) ({							\
32 	unsigned long __addr = (__force unsigned long)(addr);			\
33 	(__force __typeof__(addr))__untagged_addr_remote(current->mm, __addr);	\
34 })
35 
36 #define untagged_addr_remote(mm, addr) ({					\
37 	unsigned long __addr = (__force unsigned long)(addr);			\
38 	mmap_assert_locked(mm);							\
39 	(__force __typeof__(addr))__untagged_addr_remote(mm, __addr);		\
40 })
41 
42 #define access_ok(addr, size) likely(__access_ok(untagged_addr(addr), size))
43 #else
44 #define untagged_addr(addr) (addr)
45 #endif
46 
47 /*
48  * User space memory access functions
49  */
50 #ifdef CONFIG_MMU
51 #include <linux/errno.h>
52 #include <linux/compiler.h>
53 #include <linux/thread_info.h>
54 #include <asm/byteorder.h>
55 #include <asm/extable.h>
56 #include <asm/asm.h>
57 #include <asm-generic/access_ok.h>
58 
59 #define __enable_user_access()							\
60 	__asm__ __volatile__ ("csrs sstatus, %0" : : "r" (SR_SUM) : "memory")
61 #define __disable_user_access()							\
62 	__asm__ __volatile__ ("csrc sstatus, %0" : : "r" (SR_SUM) : "memory")
63 
64 /*
65  * This is the smallest unsigned integer type that can fit a value
66  * (up to 'long long')
67  */
68 #define __inttype(x) __typeof__(		\
69 	__typefits(x, char,			\
70 	  __typefits(x, short,			\
71 	    __typefits(x, int,			\
72 	      __typefits(x, long, 0ULL)))))
73 
74 #define __typefits(x, type, not) \
75 	__builtin_choose_expr(sizeof(x) <= sizeof(type), (unsigned type)0, not)
76 
77 /*
78  * The exception table consists of pairs of addresses: the first is the
79  * address of an instruction that is allowed to fault, and the second is
80  * the address at which the program should continue.  No registers are
81  * modified, so it is entirely up to the continuation code to figure out
82  * what to do.
83  *
84  * All the routines below use bits of fixup code that are out of line
85  * with the main instruction path.  This means when everything is well,
86  * we don't even have to jump over them.  Further, they do not intrude
87  * on our cache or tlb entries.
88  */
89 
90 #define __LSW	0
91 #define __MSW	1
92 
93 /*
94  * The "__xxx" versions of the user access functions do not verify the address
95  * space - it must have been done previously with a separate "access_ok()"
96  * call.
97  */
98 
99 #ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
100 #define __get_user_asm(insn, x, ptr, label)			\
101 	asm_goto_output(					\
102 		"1:\n"						\
103 		"	" insn " %0, %1\n"			\
104 		_ASM_EXTABLE_UACCESS_ERR(1b, %l2, %0)		\
105 		: "=&r" (x)					\
106 		: "m" (*(ptr)) : : label)
107 #else /* !CONFIG_CC_HAS_ASM_GOTO_OUTPUT */
108 #define __get_user_asm(insn, x, ptr, label)			\
109 do {								\
110 	long __gua_err = 0;					\
111 	__asm__ __volatile__ (					\
112 		"1:\n"						\
113 		"	" insn " %1, %2\n"			\
114 		"2:\n"						\
115 		_ASM_EXTABLE_UACCESS_ERR_ZERO(1b, 2b, %0, %1)	\
116 		: "+r" (__gua_err), "=&r" (x)			\
117 		: "m" (*(ptr)));				\
118 	if (__gua_err)						\
119 		goto label;					\
120 } while (0)
121 #endif /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT */
122 
123 #ifdef CONFIG_64BIT
124 #define __get_user_8(x, ptr, label) \
125 	__get_user_asm("ld", x, ptr, label)
126 #else /* !CONFIG_64BIT */
127 
128 #ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
129 #define __get_user_8(x, ptr, label)				\
130 	u32 __user *__ptr = (u32 __user *)(ptr);		\
131 	u32 __lo, __hi;						\
132 	asm_goto_output(					\
133 		"1:\n"						\
134 		"	lw %0, %2\n"				\
135 		"2:\n"						\
136 		"	lw %1, %3\n"				\
137 		_ASM_EXTABLE_UACCESS_ERR(1b, %l4, %0)		\
138 		_ASM_EXTABLE_UACCESS_ERR(2b, %l4, %0)		\
139 		: "=&r" (__lo), "=r" (__hi)			\
140 		: "m" (__ptr[__LSW]), "m" (__ptr[__MSW])	\
141 		: : label);                                     \
142 	(x) = (__typeof__(x))((__typeof__((x) - (x)))(		\
143 		(((u64)__hi << 32) | __lo)));			\
144 
145 #else /* !CONFIG_CC_HAS_ASM_GOTO_OUTPUT */
146 #define __get_user_8(x, ptr, label)				\
147 do {								\
148 	u32 __user *__ptr = (u32 __user *)(ptr);		\
149 	u32 __lo, __hi;						\
150 	long __gu8_err = 0;					\
151 	__asm__ __volatile__ (					\
152 		"1:\n"						\
153 		"	lw %1, %3\n"				\
154 		"2:\n"						\
155 		"	lw %2, %4\n"				\
156 		"3:\n"						\
157 		_ASM_EXTABLE_UACCESS_ERR_ZERO(1b, 3b, %0, %1)	\
158 		_ASM_EXTABLE_UACCESS_ERR_ZERO(2b, 3b, %0, %1)	\
159 		: "+r" (__gu8_err), "=&r" (__lo), "=r" (__hi)	\
160 		: "m" (__ptr[__LSW]), "m" (__ptr[__MSW]));	\
161 	if (__gu8_err) {					\
162 		__hi = 0;					\
163 		goto label;					\
164 	}							\
165 	(x) = (__typeof__(x))((__typeof__((x) - (x)))(		\
166 		(((u64)__hi << 32) | __lo)));			\
167 } while (0)
168 #endif /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT */
169 
170 #endif /* CONFIG_64BIT */
171 
172 unsigned long __must_check __asm_copy_to_user_sum_enabled(void __user *to,
173 	const void *from, unsigned long n);
174 unsigned long __must_check __asm_copy_from_user_sum_enabled(void *to,
175 	const void __user *from, unsigned long n);
176 
177 #define __get_user_nocheck(x, __gu_ptr, label)			\
178 do {								\
179 	if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) &&	\
180 	    !IS_ALIGNED((uintptr_t)__gu_ptr, sizeof(*__gu_ptr))) {	\
181 		if (__asm_copy_from_user_sum_enabled(&(x), __gu_ptr, sizeof(*__gu_ptr))) \
182 			goto label;				\
183 		break;						\
184 	}							\
185 	switch (sizeof(*__gu_ptr)) {				\
186 	case 1:							\
187 		__get_user_asm("lb", (x), __gu_ptr, label);	\
188 		break;						\
189 	case 2:							\
190 		__get_user_asm("lh", (x), __gu_ptr, label);	\
191 		break;						\
192 	case 4:							\
193 		__get_user_asm("lw", (x), __gu_ptr, label);	\
194 		break;						\
195 	case 8:							\
196 		__get_user_8((x), __gu_ptr, label);		\
197 		break;						\
198 	default:						\
199 		BUILD_BUG();					\
200 	}							\
201 } while (0)
202 
203 #define __get_user_error(x, ptr, err)					\
204 do {									\
205 	__label__ __gu_failed;						\
206 									\
207 	__get_user_nocheck(x, ptr, __gu_failed);			\
208 		err = 0;						\
209 		break;							\
210 __gu_failed:								\
211 		x = 0;							\
212 		err = -EFAULT;						\
213 } while (0)
214 
215 /**
216  * __get_user: - Get a simple variable from user space, with less checking.
217  * @x:   Variable to store result.
218  * @ptr: Source address, in user space.
219  *
220  * Context: User context only.  This function may sleep.
221  *
222  * This macro copies a single simple variable from user space to kernel
223  * space.  It supports simple types like char and int, but not larger
224  * data types like structures or arrays.
225  *
226  * @ptr must have pointer-to-simple-variable type, and the result of
227  * dereferencing @ptr must be assignable to @x without a cast.
228  *
229  * Caller must check the pointer with access_ok() before calling this
230  * function.
231  *
232  * Returns zero on success, or -EFAULT on error.
233  * On error, the variable @x is set to zero.
234  */
235 #define __get_user(x, ptr)					\
236 ({								\
237 	const __typeof__(*(ptr)) __user *__gu_ptr = untagged_addr(ptr); \
238 	long __gu_err = 0;					\
239 	__typeof__(x) __gu_val;					\
240 								\
241 	__chk_user_ptr(__gu_ptr);				\
242 								\
243 	__enable_user_access();					\
244 	__get_user_error(__gu_val, __gu_ptr, __gu_err);		\
245 	__disable_user_access();				\
246 								\
247 	(x) = __gu_val;						\
248 								\
249 	__gu_err;						\
250 })
251 
252 /**
253  * get_user: - Get a simple variable from user space.
254  * @x:   Variable to store result.
255  * @ptr: Source address, in user space.
256  *
257  * Context: User context only.  This function may sleep.
258  *
259  * This macro copies a single simple variable from user space to kernel
260  * space.  It supports simple types like char and int, but not larger
261  * data types like structures or arrays.
262  *
263  * @ptr must have pointer-to-simple-variable type, and the result of
264  * dereferencing @ptr must be assignable to @x without a cast.
265  *
266  * Returns zero on success, or -EFAULT on error.
267  * On error, the variable @x is set to zero.
268  */
269 #define get_user(x, ptr)					\
270 ({								\
271 	const __typeof__(*(ptr)) __user *__p = (ptr);		\
272 	might_fault();						\
273 	access_ok(__p, sizeof(*__p)) ?		\
274 		__get_user((x), __p) :				\
275 		((x) = (__force __typeof__(x))0, -EFAULT);	\
276 })
277 
278 #define __put_user_asm(insn, x, ptr, label)			\
279 do {								\
280 	__typeof__(*(ptr)) __x = x;				\
281 	asm goto(						\
282 		"1:\n"						\
283 		"	" insn " %z0, %1\n"			\
284 		_ASM_EXTABLE(1b, %l2)				\
285 		: : "rJ" (__x), "m"(*(ptr)) : : label);		\
286 } while (0)
287 
288 #ifdef CONFIG_64BIT
289 #define __put_user_8(x, ptr, label) \
290 	__put_user_asm("sd", x, ptr, label)
291 #else /* !CONFIG_64BIT */
292 #define __put_user_8(x, ptr, label)				\
293 do {								\
294 	u32 __user *__ptr = (u32 __user *)(ptr);		\
295 	u64 __x = (__typeof__((x)-(x)))(x);			\
296 	asm goto(						\
297 		"1:\n"						\
298 		"	sw %z0, %2\n"				\
299 		"2:\n"						\
300 		"	sw %z1, %3\n"				\
301 		_ASM_EXTABLE(1b, %l4)				\
302 		_ASM_EXTABLE(2b, %l4)				\
303 		: : "rJ" (__x), "rJ" (__x >> 32),		\
304 			"m" (__ptr[__LSW]),			\
305 			"m" (__ptr[__MSW]) : : label);		\
306 } while (0)
307 #endif /* CONFIG_64BIT */
308 
309 #define __put_user_nocheck(x, __gu_ptr, label)			\
310 do {								\
311 	if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) &&	\
312 	    !IS_ALIGNED((uintptr_t)__gu_ptr, sizeof(*__gu_ptr))) {	\
313 		__inttype(x) val = (__inttype(x))x;			\
314 		if (__asm_copy_to_user_sum_enabled(__gu_ptr, &(val), sizeof(*__gu_ptr))) \
315 			goto label;				\
316 		break;						\
317 	}							\
318 	switch (sizeof(*__gu_ptr)) {				\
319 	case 1:							\
320 		__put_user_asm("sb", (x), __gu_ptr, label);	\
321 		break;						\
322 	case 2:							\
323 		__put_user_asm("sh", (x), __gu_ptr, label);	\
324 		break;						\
325 	case 4:							\
326 		__put_user_asm("sw", (x), __gu_ptr, label);	\
327 		break;						\
328 	case 8:							\
329 		__put_user_8((x), __gu_ptr, label);		\
330 		break;						\
331 	default:						\
332 		BUILD_BUG();					\
333 	}							\
334 } while (0)
335 
336 #define __put_user_error(x, ptr, err)				\
337 do {								\
338 	__label__ err_label;					\
339 	__put_user_nocheck(x, ptr, err_label);			\
340 	break;							\
341 err_label:							\
342 	(err) = -EFAULT;					\
343 } while (0)
344 
345 /**
346  * __put_user: - Write a simple value into user space, with less checking.
347  * @x:   Value to copy to user space.
348  * @ptr: Destination address, in user space.
349  *
350  * Context: User context only.  This function may sleep.
351  *
352  * This macro copies a single simple value from kernel space to user
353  * space.  It supports simple types like char and int, but not larger
354  * data types like structures or arrays.
355  *
356  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
357  * to the result of dereferencing @ptr. The value of @x is copied to avoid
358  * re-ordering where @x is evaluated inside the block that enables user-space
359  * access (thus bypassing user space protection if @x is a function).
360  *
361  * Caller must check the pointer with access_ok() before calling this
362  * function.
363  *
364  * Returns zero on success, or -EFAULT on error.
365  */
366 #define __put_user(x, ptr)					\
367 ({								\
368 	__typeof__(*(ptr)) __user *__gu_ptr = untagged_addr(ptr); \
369 	__typeof__(*__gu_ptr) __val = (x);			\
370 	long __pu_err = 0;					\
371 								\
372 	__chk_user_ptr(__gu_ptr);				\
373 								\
374 	__enable_user_access();					\
375 	__put_user_error(__val, __gu_ptr, __pu_err);		\
376 	__disable_user_access();				\
377 								\
378 	__pu_err;						\
379 })
380 
381 /**
382  * put_user: - Write a simple value into user space.
383  * @x:   Value to copy to user space.
384  * @ptr: Destination address, in user space.
385  *
386  * Context: User context only.  This function may sleep.
387  *
388  * This macro copies a single simple value from kernel space to user
389  * space.  It supports simple types like char and int, but not larger
390  * data types like structures or arrays.
391  *
392  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
393  * to the result of dereferencing @ptr.
394  *
395  * Returns zero on success, or -EFAULT on error.
396  */
397 #define put_user(x, ptr)					\
398 ({								\
399 	__typeof__(*(ptr)) __user *__p = (ptr);			\
400 	might_fault();						\
401 	access_ok(__p, sizeof(*__p)) ?		\
402 		__put_user((x), __p) :				\
403 		-EFAULT;					\
404 })
405 
406 
407 unsigned long __must_check __asm_copy_to_user(void __user *to,
408 	const void *from, unsigned long n);
409 unsigned long __must_check __asm_copy_from_user(void *to,
410 	const void __user *from, unsigned long n);
411 
412 static inline unsigned long
413 raw_copy_from_user(void *to, const void __user *from, unsigned long n)
414 {
415 	return __asm_copy_from_user(to, untagged_addr(from), n);
416 }
417 
418 static inline unsigned long
419 raw_copy_to_user(void __user *to, const void *from, unsigned long n)
420 {
421 	return __asm_copy_to_user(untagged_addr(to), from, n);
422 }
423 
424 extern long strncpy_from_user(char *dest, const char __user *src, long count);
425 
426 extern long __must_check strnlen_user(const char __user *str, long n);
427 
428 extern
429 unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
430 
431 static inline
432 unsigned long __must_check clear_user(void __user *to, unsigned long n)
433 {
434 	might_fault();
435 	return access_ok(to, n) ?
436 		__clear_user(untagged_addr(to), n) : n;
437 }
438 
439 #define __get_kernel_nofault(dst, src, type, err_label)			\
440 	__get_user_nocheck(*((type *)(dst)), (type *)(src), err_label)
441 
442 #define __put_kernel_nofault(dst, src, type, err_label)			\
443 	__put_user_nocheck(*((type *)(src)), (type *)(dst), err_label)
444 
445 static __must_check __always_inline bool user_access_begin(const void __user *ptr, size_t len)
446 {
447 	if (unlikely(!access_ok(ptr, len)))
448 		return 0;
449 	__enable_user_access();
450 	return 1;
451 }
452 #define user_access_begin user_access_begin
453 #define user_access_end __disable_user_access
454 
455 static inline unsigned long user_access_save(void) { return 0UL; }
456 static inline void user_access_restore(unsigned long enabled) { }
457 
458 /*
459  * We want the unsafe accessors to always be inlined and use
460  * the error labels - thus the macro games.
461  */
462 #define unsafe_put_user(x, ptr, label)					\
463 	__put_user_nocheck(x, (ptr), label)
464 
465 #define unsafe_get_user(x, ptr, label)	do {				\
466 	__inttype(*(ptr)) __gu_val;					\
467 	__get_user_nocheck(__gu_val, (ptr), label);			\
468 	(x) = (__force __typeof__(*(ptr)))__gu_val;			\
469 } while (0)
470 
471 #define unsafe_copy_to_user(_dst, _src, _len, label)			\
472 	if (__asm_copy_to_user_sum_enabled(_dst, _src, _len))		\
473 		goto label;
474 
475 #define unsafe_copy_from_user(_dst, _src, _len, label)			\
476 	if (__asm_copy_from_user_sum_enabled(_dst, _src, _len))		\
477 		goto label;
478 
479 #else /* CONFIG_MMU */
480 #include <asm-generic/uaccess.h>
481 #endif /* CONFIG_MMU */
482 #endif /* _ASM_RISCV_UACCESS_H */
483