xref: /linux/arch/sparc/include/asm/uaccess_32.h (revision 3932b9ca55b0be314a36d3e84faff3e823c081f5)
1 /*
2  * uaccess.h: User space memore access functions.
3  *
4  * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
5  * Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
6  */
7 #ifndef _ASM_UACCESS_H
8 #define _ASM_UACCESS_H
9 
10 #ifdef __KERNEL__
11 #include <linux/compiler.h>
12 #include <linux/sched.h>
13 #include <linux/string.h>
14 #include <linux/errno.h>
15 #endif
16 
17 #ifndef __ASSEMBLY__
18 
19 #include <asm/processor.h>
20 
21 #define ARCH_HAS_SORT_EXTABLE
22 #define ARCH_HAS_SEARCH_EXTABLE
23 
24 /* Sparc is not segmented, however we need to be able to fool access_ok()
25  * when doing system calls from kernel mode legitimately.
26  *
27  * "For historical reasons, these macros are grossly misnamed." -Linus
28  */
29 
30 #define KERNEL_DS   ((mm_segment_t) { 0 })
31 #define USER_DS     ((mm_segment_t) { -1 })
32 
33 #define VERIFY_READ	0
34 #define VERIFY_WRITE	1
35 
36 #define get_ds()	(KERNEL_DS)
37 #define get_fs()	(current->thread.current_ds)
38 #define set_fs(val)	((current->thread.current_ds) = (val))
39 
40 #define segment_eq(a,b)	((a).seg == (b).seg)
41 
42 /* We have there a nice not-mapped page at PAGE_OFFSET - PAGE_SIZE, so that this test
43  * can be fairly lightweight.
44  * No one can read/write anything from userland in the kernel space by setting
45  * large size and address near to PAGE_OFFSET - a fault will break his intentions.
46  */
47 #define __user_ok(addr, size) ({ (void)(size); (addr) < STACK_TOP; })
48 #define __kernel_ok (segment_eq(get_fs(), KERNEL_DS))
49 #define __access_ok(addr,size) (__user_ok((addr) & get_fs().seg,(size)))
50 #define access_ok(type, addr, size)					\
51 	({ (void)(type); __access_ok((unsigned long)(addr), size); })
52 
53 /*
54  * The exception table consists of pairs of addresses: the first is the
55  * address of an instruction that is allowed to fault, and the second is
56  * the address at which the program should continue.  No registers are
57  * modified, so it is entirely up to the continuation code to figure out
58  * what to do.
59  *
60  * All the routines below use bits of fixup code that are out of line
61  * with the main instruction path.  This means when everything is well,
62  * we don't even have to jump over them.  Further, they do not intrude
63  * on our cache or tlb entries.
64  *
65  * There is a special way how to put a range of potentially faulting
66  * insns (like twenty ldd/std's with now intervening other instructions)
67  * You specify address of first in insn and 0 in fixup and in the next
68  * exception_table_entry you specify last potentially faulting insn + 1
69  * and in fixup the routine which should handle the fault.
70  * That fixup code will get
71  * (faulting_insn_address - first_insn_in_the_range_address)/4
72  * in %g2 (ie. index of the faulting instruction in the range).
73  */
74 
75 struct exception_table_entry
76 {
77         unsigned long insn, fixup;
78 };
79 
80 /* Returns 0 if exception not found and fixup otherwise.  */
81 unsigned long search_extables_range(unsigned long addr, unsigned long *g2);
82 
83 void __ret_efault(void);
84 
85 /* Uh, these should become the main single-value transfer routines..
86  * They automatically use the right size if we just have the right
87  * pointer type..
88  *
89  * This gets kind of ugly. We want to return _two_ values in "get_user()"
90  * and yet we don't want to do any pointers, because that is too much
91  * of a performance impact. Thus we have a few rather ugly macros here,
92  * and hide all the ugliness from the user.
93  */
94 #define put_user(x,ptr) ({ \
95 unsigned long __pu_addr = (unsigned long)(ptr); \
96 __chk_user_ptr(ptr); \
97 __put_user_check((__typeof__(*(ptr)))(x),__pu_addr,sizeof(*(ptr))); })
98 
99 #define get_user(x,ptr) ({ \
100 unsigned long __gu_addr = (unsigned long)(ptr); \
101 __chk_user_ptr(ptr); \
102 __get_user_check((x),__gu_addr,sizeof(*(ptr)),__typeof__(*(ptr))); })
103 
104 /*
105  * The "__xxx" versions do not do address space checking, useful when
106  * doing multiple accesses to the same area (the user has to do the
107  * checks by hand with "access_ok()")
108  */
109 #define __put_user(x,ptr) __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
110 #define __get_user(x,ptr) __get_user_nocheck((x),(ptr),sizeof(*(ptr)),__typeof__(*(ptr)))
111 
112 struct __large_struct { unsigned long buf[100]; };
113 #define __m(x) ((struct __large_struct __user *)(x))
114 
115 #define __put_user_check(x,addr,size) ({ \
116 register int __pu_ret; \
117 if (__access_ok(addr,size)) { \
118 switch (size) { \
119 case 1: __put_user_asm(x,b,addr,__pu_ret); break; \
120 case 2: __put_user_asm(x,h,addr,__pu_ret); break; \
121 case 4: __put_user_asm(x,,addr,__pu_ret); break; \
122 case 8: __put_user_asm(x,d,addr,__pu_ret); break; \
123 default: __pu_ret = __put_user_bad(); break; \
124 } } else { __pu_ret = -EFAULT; } __pu_ret; })
125 
126 #define __put_user_nocheck(x,addr,size) ({ \
127 register int __pu_ret; \
128 switch (size) { \
129 case 1: __put_user_asm(x,b,addr,__pu_ret); break; \
130 case 2: __put_user_asm(x,h,addr,__pu_ret); break; \
131 case 4: __put_user_asm(x,,addr,__pu_ret); break; \
132 case 8: __put_user_asm(x,d,addr,__pu_ret); break; \
133 default: __pu_ret = __put_user_bad(); break; \
134 } __pu_ret; })
135 
136 #define __put_user_asm(x,size,addr,ret)					\
137 __asm__ __volatile__(							\
138 	"/* Put user asm, inline. */\n"					\
139 "1:\t"	"st"#size " %1, %2\n\t"						\
140 	"clr	%0\n"							\
141 "2:\n\n\t"								\
142 	".section .fixup,#alloc,#execinstr\n\t"				\
143 	".align	4\n"							\
144 "3:\n\t"								\
145 	"b	2b\n\t"							\
146 	" mov	%3, %0\n\t"						\
147         ".previous\n\n\t"						\
148 	".section __ex_table,#alloc\n\t"				\
149 	".align	4\n\t"							\
150 	".word	1b, 3b\n\t"						\
151 	".previous\n\n\t"						\
152        : "=&r" (ret) : "r" (x), "m" (*__m(addr)),			\
153 	 "i" (-EFAULT))
154 
155 int __put_user_bad(void);
156 
157 #define __get_user_check(x,addr,size,type) ({ \
158 register int __gu_ret; \
159 register unsigned long __gu_val; \
160 if (__access_ok(addr,size)) { \
161 switch (size) { \
162 case 1: __get_user_asm(__gu_val,ub,addr,__gu_ret); break; \
163 case 2: __get_user_asm(__gu_val,uh,addr,__gu_ret); break; \
164 case 4: __get_user_asm(__gu_val,,addr,__gu_ret); break; \
165 case 8: __get_user_asm(__gu_val,d,addr,__gu_ret); break; \
166 default: __gu_val = 0; __gu_ret = __get_user_bad(); break; \
167 } } else { __gu_val = 0; __gu_ret = -EFAULT; } x = (type) __gu_val; __gu_ret; })
168 
169 #define __get_user_check_ret(x,addr,size,type,retval) ({ \
170 register unsigned long __gu_val __asm__ ("l1"); \
171 if (__access_ok(addr,size)) { \
172 switch (size) { \
173 case 1: __get_user_asm_ret(__gu_val,ub,addr,retval); break; \
174 case 2: __get_user_asm_ret(__gu_val,uh,addr,retval); break; \
175 case 4: __get_user_asm_ret(__gu_val,,addr,retval); break; \
176 case 8: __get_user_asm_ret(__gu_val,d,addr,retval); break; \
177 default: if (__get_user_bad()) return retval; \
178 } x = (type) __gu_val; } else return retval; })
179 
180 #define __get_user_nocheck(x,addr,size,type) ({ \
181 register int __gu_ret; \
182 register unsigned long __gu_val; \
183 switch (size) { \
184 case 1: __get_user_asm(__gu_val,ub,addr,__gu_ret); break; \
185 case 2: __get_user_asm(__gu_val,uh,addr,__gu_ret); break; \
186 case 4: __get_user_asm(__gu_val,,addr,__gu_ret); break; \
187 case 8: __get_user_asm(__gu_val,d,addr,__gu_ret); break; \
188 default: __gu_val = 0; __gu_ret = __get_user_bad(); break; \
189 } x = (type) __gu_val; __gu_ret; })
190 
191 #define __get_user_nocheck_ret(x,addr,size,type,retval) ({ \
192 register unsigned long __gu_val __asm__ ("l1"); \
193 switch (size) { \
194 case 1: __get_user_asm_ret(__gu_val,ub,addr,retval); break; \
195 case 2: __get_user_asm_ret(__gu_val,uh,addr,retval); break; \
196 case 4: __get_user_asm_ret(__gu_val,,addr,retval); break; \
197 case 8: __get_user_asm_ret(__gu_val,d,addr,retval); break; \
198 default: if (__get_user_bad()) return retval; \
199 } x = (type) __gu_val; })
200 
201 #define __get_user_asm(x,size,addr,ret)					\
202 __asm__ __volatile__(							\
203 	"/* Get user asm, inline. */\n"					\
204 "1:\t"	"ld"#size " %2, %1\n\t"						\
205 	"clr	%0\n"							\
206 "2:\n\n\t"								\
207 	".section .fixup,#alloc,#execinstr\n\t"				\
208 	".align	4\n"							\
209 "3:\n\t"								\
210 	"clr	%1\n\t"							\
211 	"b	2b\n\t"							\
212 	" mov	%3, %0\n\n\t"						\
213 	".previous\n\t"							\
214 	".section __ex_table,#alloc\n\t"				\
215 	".align	4\n\t"							\
216 	".word	1b, 3b\n\n\t"						\
217 	".previous\n\t"							\
218        : "=&r" (ret), "=&r" (x) : "m" (*__m(addr)),			\
219 	 "i" (-EFAULT))
220 
221 #define __get_user_asm_ret(x,size,addr,retval)				\
222 if (__builtin_constant_p(retval) && retval == -EFAULT)			\
223 __asm__ __volatile__(							\
224 	"/* Get user asm ret, inline. */\n"				\
225 "1:\t"	"ld"#size " %1, %0\n\n\t"					\
226 	".section __ex_table,#alloc\n\t"				\
227 	".align	4\n\t"							\
228 	".word	1b,__ret_efault\n\n\t"					\
229 	".previous\n\t"							\
230        : "=&r" (x) : "m" (*__m(addr)));					\
231 else									\
232 __asm__ __volatile__(							\
233 	"/* Get user asm ret, inline. */\n"				\
234 "1:\t"	"ld"#size " %1, %0\n\n\t"					\
235 	".section .fixup,#alloc,#execinstr\n\t"				\
236 	".align	4\n"							\
237 "3:\n\t"								\
238 	"ret\n\t"							\
239 	" restore %%g0, %2, %%o0\n\n\t"					\
240 	".previous\n\t"							\
241 	".section __ex_table,#alloc\n\t"				\
242 	".align	4\n\t"							\
243 	".word	1b, 3b\n\n\t"						\
244 	".previous\n\t"							\
245        : "=&r" (x) : "m" (*__m(addr)), "i" (retval))
246 
247 int __get_user_bad(void);
248 
249 unsigned long __copy_user(void __user *to, const void __user *from, unsigned long size);
250 
251 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
252 {
253 	if (n && __access_ok((unsigned long) to, n))
254 		return __copy_user(to, (__force void __user *) from, n);
255 	else
256 		return n;
257 }
258 
259 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
260 {
261 	return __copy_user(to, (__force void __user *) from, n);
262 }
263 
264 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
265 {
266 	if (n && __access_ok((unsigned long) from, n))
267 		return __copy_user((__force void __user *) to, from, n);
268 	else
269 		return n;
270 }
271 
272 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
273 {
274 	return __copy_user((__force void __user *) to, from, n);
275 }
276 
277 #define __copy_to_user_inatomic __copy_to_user
278 #define __copy_from_user_inatomic __copy_from_user
279 
280 static inline unsigned long __clear_user(void __user *addr, unsigned long size)
281 {
282 	unsigned long ret;
283 
284 	__asm__ __volatile__ (
285 		".section __ex_table,#alloc\n\t"
286 		".align 4\n\t"
287 		".word 1f,3\n\t"
288 		".previous\n\t"
289 		"mov %2, %%o1\n"
290 		"1:\n\t"
291 		"call __bzero\n\t"
292 		" mov %1, %%o0\n\t"
293 		"mov %%o0, %0\n"
294 		: "=r" (ret) : "r" (addr), "r" (size) :
295 		"o0", "o1", "o2", "o3", "o4", "o5", "o7",
296 		"g1", "g2", "g3", "g4", "g5", "g7", "cc");
297 
298 	return ret;
299 }
300 
301 static inline unsigned long clear_user(void __user *addr, unsigned long n)
302 {
303 	if (n && __access_ok((unsigned long) addr, n))
304 		return __clear_user(addr, n);
305 	else
306 		return n;
307 }
308 
309 __must_check long strlen_user(const char __user *str);
310 __must_check long strnlen_user(const char __user *str, long n);
311 
312 #endif  /* __ASSEMBLY__ */
313 
314 #endif /* _ASM_UACCESS_H */
315