xref: /linux/arch/sparc/include/asm/uaccess_32.h (revision a1ff5a7d78a036d6c2178ee5acd6ba4946243800)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * uaccess.h: User space memore access functions.
4  *
5  * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
6  * Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
7  */
8 #ifndef _ASM_UACCESS_H
9 #define _ASM_UACCESS_H
10 
11 #include <linux/compiler.h>
12 #include <linux/string.h>
13 
14 #include <asm/processor.h>
15 #include <asm-generic/access_ok.h>
16 
17 /* Uh, these should become the main single-value transfer routines..
18  * They automatically use the right size if we just have the right
19  * pointer type..
20  *
21  * This gets kind of ugly. We want to return _two_ values in "get_user()"
22  * and yet we don't want to do any pointers, because that is too much
23  * of a performance impact. Thus we have a few rather ugly macros here,
24  * and hide all the ugliness from the user.
25  */
26 #define put_user(x, ptr) ({ \
27 	void __user *__pu_addr = (ptr); \
28 	__chk_user_ptr(ptr); \
29 	__put_user_check((__typeof__(*(ptr)))(x), __pu_addr, sizeof(*(ptr))); \
30 })
31 
32 #define get_user(x, ptr) ({ \
33 	const void __user *__gu_addr = (ptr); \
34 	__chk_user_ptr(ptr); \
35 	__get_user_check((x), __gu_addr, sizeof(*(ptr)), __typeof__(*(ptr))); \
36 })
37 
38 /*
39  * The "__xxx" versions do not do address space checking, useful when
40  * doing multiple accesses to the same area (the user has to do the
41  * checks by hand with "access_ok()")
42  */
43 #define __put_user(x, ptr) \
44 	__put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
45 #define __get_user(x, ptr) \
46     __get_user_nocheck((x), (ptr), sizeof(*(ptr)), __typeof__(*(ptr)))
47 
48 struct __large_struct { unsigned long buf[100]; };
49 #define __m(x) ((struct __large_struct __user *)(x))
50 
51 #define __put_user_check(x, addr, size) ({ \
52 	register int __pu_ret; \
53 	if (__access_ok(addr, size)) { \
54 		switch (size) { \
55 		case 1: \
56 			__put_user_asm(x, b, addr, __pu_ret); \
57 			break; \
58 		case 2: \
59 			__put_user_asm(x, h, addr, __pu_ret); \
60 			break; \
61 		case 4: \
62 			__put_user_asm(x, , addr, __pu_ret); \
63 			break; \
64 		case 8: \
65 			__put_user_asm(x, d, addr, __pu_ret); \
66 			break; \
67 		default: \
68 			__pu_ret = __put_user_bad(); \
69 			break; \
70 		} \
71 	} else { \
72 		__pu_ret = -EFAULT; \
73 	} \
74 	__pu_ret; \
75 })
76 
77 #define __put_user_nocheck(x, addr, size) ({			\
78 	register int __pu_ret;					\
79 	switch (size) {						\
80 	case 1: __put_user_asm(x, b, addr, __pu_ret); break;	\
81 	case 2: __put_user_asm(x, h, addr, __pu_ret); break;	\
82 	case 4: __put_user_asm(x, , addr, __pu_ret); break;	\
83 	case 8: __put_user_asm(x, d, addr, __pu_ret); break;	\
84 	default: __pu_ret = __put_user_bad(); break;		\
85 	} \
86 	__pu_ret; \
87 })
88 
89 #define __put_user_asm(x, size, addr, ret)				\
90 __asm__ __volatile__(							\
91 		"/* Put user asm, inline. */\n"				\
92 	"1:\t"	"st"#size " %1, %2\n\t"					\
93 		"clr	%0\n"						\
94 	"2:\n\n\t"							\
95 		".section .fixup,#alloc,#execinstr\n\t"			\
96 		".align	4\n"						\
97 	"3:\n\t"							\
98 		"sethi	%%hi(2b), %0\n\t"				\
99 		"jmpl	%0 + %%lo(2b), %%g0\n\t"			\
100 		" mov	%3, %0\n\t"					\
101 		".previous\n\n\t"					\
102 		".section __ex_table,#alloc\n\t"			\
103 		".align	4\n\t"						\
104 		".word	1b, 3b\n\t"					\
105 		".previous\n\n\t"					\
106 	       : "=&r" (ret) : "r" (x), "m" (*__m(addr)),		\
107 		 "i" (-EFAULT))
108 
109 int __put_user_bad(void);
110 
111 #define __get_user_check(x, addr, size, type) ({ \
112 	register int __gu_ret; \
113 	register unsigned long __gu_val; \
114 	if (__access_ok(addr, size)) { \
115 		switch (size) { \
116 		case 1: \
117 			 __get_user_asm(__gu_val, ub, addr, __gu_ret); \
118 			break; \
119 		case 2: \
120 			__get_user_asm(__gu_val, uh, addr, __gu_ret); \
121 			break; \
122 		case 4: \
123 			__get_user_asm(__gu_val, , addr, __gu_ret); \
124 			break; \
125 		case 8: \
126 			__get_user_asm(__gu_val, d, addr, __gu_ret); \
127 			break; \
128 		default: \
129 			__gu_val = 0; \
130 			__gu_ret = __get_user_bad(); \
131 			break; \
132 		} \
133 	 } else { \
134 		 __gu_val = 0; \
135 		 __gu_ret = -EFAULT; \
136 	} \
137 	x = (__force type) __gu_val; \
138 	__gu_ret; \
139 })
140 
141 #define __get_user_nocheck(x, addr, size, type) ({			\
142 	register int __gu_ret;						\
143 	register unsigned long __gu_val;				\
144 	switch (size) {							\
145 	case 1: __get_user_asm(__gu_val, ub, addr, __gu_ret); break;	\
146 	case 2: __get_user_asm(__gu_val, uh, addr, __gu_ret); break;	\
147 	case 4: __get_user_asm(__gu_val, , addr, __gu_ret); break;	\
148 	case 8: __get_user_asm(__gu_val, d, addr, __gu_ret); break;	\
149 	default:							\
150 		__gu_val = 0;						\
151 		__gu_ret = __get_user_bad();				\
152 		break;							\
153 	}								\
154 	x = (__force type) __gu_val;					\
155 	__gu_ret;							\
156 })
157 
158 #define __get_user_asm(x, size, addr, ret)				\
159 __asm__ __volatile__(							\
160 		"/* Get user asm, inline. */\n"				\
161 	"1:\t"	"ld"#size " %2, %1\n\t"					\
162 		"clr	%0\n"						\
163 	"2:\n\n\t"							\
164 		".section .fixup,#alloc,#execinstr\n\t"			\
165 		".align	4\n"						\
166 	"3:\n\t"							\
167 		"sethi	%%hi(2b), %0\n\t"				\
168 		"clr	%1\n\t"						\
169 		"jmpl	%0 + %%lo(2b), %%g0\n\t"			\
170 		" mov	%3, %0\n\n\t"					\
171 		".previous\n\t"						\
172 		".section __ex_table,#alloc\n\t"			\
173 		".align	4\n\t"						\
174 		".word	1b, 3b\n\n\t"					\
175 		".previous\n\t"						\
176 	       : "=&r" (ret), "=&r" (x) : "m" (*__m(addr)),		\
177 		 "i" (-EFAULT))
178 
179 int __get_user_bad(void);
180 
181 unsigned long __copy_user(void __user *to, const void __user *from, unsigned long size);
182 
raw_copy_to_user(void __user * to,const void * from,unsigned long n)183 static inline unsigned long raw_copy_to_user(void __user *to, const void *from, unsigned long n)
184 {
185 	return __copy_user(to, (__force void __user *) from, n);
186 }
187 
raw_copy_from_user(void * to,const void __user * from,unsigned long n)188 static inline unsigned long raw_copy_from_user(void *to, const void __user *from, unsigned long n)
189 {
190 	return __copy_user((__force void __user *) to, from, n);
191 }
192 
193 #define INLINE_COPY_FROM_USER
194 #define INLINE_COPY_TO_USER
195 
__clear_user(void __user * addr,unsigned long size)196 static inline unsigned long __clear_user(void __user *addr, unsigned long size)
197 {
198 	unsigned long ret;
199 
200 	__asm__ __volatile__ (
201 		"mov %2, %%o1\n"
202 		"call __bzero\n\t"
203 		" mov %1, %%o0\n\t"
204 		"mov %%o0, %0\n"
205 		: "=r" (ret) : "r" (addr), "r" (size) :
206 		"o0", "o1", "o2", "o3", "o4", "o5", "o7",
207 		"g1", "g2", "g3", "g4", "g5", "g7", "cc");
208 
209 	return ret;
210 }
211 
clear_user(void __user * addr,unsigned long n)212 static inline unsigned long clear_user(void __user *addr, unsigned long n)
213 {
214 	if (n && __access_ok(addr, n))
215 		return __clear_user(addr, n);
216 	else
217 		return n;
218 }
219 
220 __must_check long strnlen_user(const char __user *str, long n);
221 
222 #endif /* _ASM_UACCESS_H */
223