xref: /linux/arch/sparc/include/asm/uaccess_32.h (revision 5148fa52a12fa1b97c730b2fe321f2aad7ea041c)
1 /*
2  * uaccess.h: User space memore access functions.
3  *
4  * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
5  * Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
6  */
7 #ifndef _ASM_UACCESS_H
8 #define _ASM_UACCESS_H
9 
10 #ifdef __KERNEL__
11 #include <linux/compiler.h>
12 #include <linux/sched.h>
13 #include <linux/string.h>
14 #include <linux/errno.h>
15 #endif
16 
17 #ifndef __ASSEMBLY__
18 
19 #define ARCH_HAS_SORT_EXTABLE
20 #define ARCH_HAS_SEARCH_EXTABLE
21 
22 /* Sparc is not segmented, however we need to be able to fool access_ok()
23  * when doing system calls from kernel mode legitimately.
24  *
25  * "For historical reasons, these macros are grossly misnamed." -Linus
26  */
27 
28 #define KERNEL_DS   ((mm_segment_t) { 0 })
29 #define USER_DS     ((mm_segment_t) { -1 })
30 
31 #define VERIFY_READ	0
32 #define VERIFY_WRITE	1
33 
34 #define get_ds()	(KERNEL_DS)
35 #define get_fs()	(current->thread.current_ds)
36 #define set_fs(val)	((current->thread.current_ds) = (val))
37 
38 #define segment_eq(a,b)	((a).seg == (b).seg)
39 
40 /* We have there a nice not-mapped page at PAGE_OFFSET - PAGE_SIZE, so that this test
41  * can be fairly lightweight.
42  * No one can read/write anything from userland in the kernel space by setting
43  * large size and address near to PAGE_OFFSET - a fault will break his intentions.
44  */
45 #define __user_ok(addr, size) ({ (void)(size); (addr) < STACK_TOP; })
46 #define __kernel_ok (segment_eq(get_fs(), KERNEL_DS))
47 #define __access_ok(addr,size) (__user_ok((addr) & get_fs().seg,(size)))
48 #define access_ok(type, addr, size)					\
49 	({ (void)(type); __access_ok((unsigned long)(addr), size); })
50 
51 /*
52  * The exception table consists of pairs of addresses: the first is the
53  * address of an instruction that is allowed to fault, and the second is
54  * the address at which the program should continue.  No registers are
55  * modified, so it is entirely up to the continuation code to figure out
56  * what to do.
57  *
58  * All the routines below use bits of fixup code that are out of line
59  * with the main instruction path.  This means when everything is well,
60  * we don't even have to jump over them.  Further, they do not intrude
61  * on our cache or tlb entries.
62  *
63  * There is a special way how to put a range of potentially faulting
64  * insns (like twenty ldd/std's with now intervening other instructions)
65  * You specify address of first in insn and 0 in fixup and in the next
66  * exception_table_entry you specify last potentially faulting insn + 1
67  * and in fixup the routine which should handle the fault.
68  * That fixup code will get
69  * (faulting_insn_address - first_insn_in_the_range_address)/4
70  * in %g2 (ie. index of the faulting instruction in the range).
71  */
72 
73 struct exception_table_entry
74 {
75         unsigned long insn, fixup;
76 };
77 
78 /* Returns 0 if exception not found and fixup otherwise.  */
79 extern unsigned long search_extables_range(unsigned long addr, unsigned long *g2);
80 
81 extern void __ret_efault(void);
82 
83 /* Uh, these should become the main single-value transfer routines..
84  * They automatically use the right size if we just have the right
85  * pointer type..
86  *
87  * This gets kind of ugly. We want to return _two_ values in "get_user()"
88  * and yet we don't want to do any pointers, because that is too much
89  * of a performance impact. Thus we have a few rather ugly macros here,
90  * and hide all the ugliness from the user.
91  */
92 #define put_user(x,ptr) ({ \
93 unsigned long __pu_addr = (unsigned long)(ptr); \
94 __chk_user_ptr(ptr); \
95 __put_user_check((__typeof__(*(ptr)))(x),__pu_addr,sizeof(*(ptr))); })
96 
97 #define get_user(x,ptr) ({ \
98 unsigned long __gu_addr = (unsigned long)(ptr); \
99 __chk_user_ptr(ptr); \
100 __get_user_check((x),__gu_addr,sizeof(*(ptr)),__typeof__(*(ptr))); })
101 
102 /*
103  * The "__xxx" versions do not do address space checking, useful when
104  * doing multiple accesses to the same area (the user has to do the
105  * checks by hand with "access_ok()")
106  */
107 #define __put_user(x,ptr) __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
108 #define __get_user(x,ptr) __get_user_nocheck((x),(ptr),sizeof(*(ptr)),__typeof__(*(ptr)))
109 
110 struct __large_struct { unsigned long buf[100]; };
111 #define __m(x) ((struct __large_struct __user *)(x))
112 
113 #define __put_user_check(x,addr,size) ({ \
114 register int __pu_ret; \
115 if (__access_ok(addr,size)) { \
116 switch (size) { \
117 case 1: __put_user_asm(x,b,addr,__pu_ret); break; \
118 case 2: __put_user_asm(x,h,addr,__pu_ret); break; \
119 case 4: __put_user_asm(x,,addr,__pu_ret); break; \
120 case 8: __put_user_asm(x,d,addr,__pu_ret); break; \
121 default: __pu_ret = __put_user_bad(); break; \
122 } } else { __pu_ret = -EFAULT; } __pu_ret; })
123 
124 #define __put_user_nocheck(x,addr,size) ({ \
125 register int __pu_ret; \
126 switch (size) { \
127 case 1: __put_user_asm(x,b,addr,__pu_ret); break; \
128 case 2: __put_user_asm(x,h,addr,__pu_ret); break; \
129 case 4: __put_user_asm(x,,addr,__pu_ret); break; \
130 case 8: __put_user_asm(x,d,addr,__pu_ret); break; \
131 default: __pu_ret = __put_user_bad(); break; \
132 } __pu_ret; })
133 
134 #define __put_user_asm(x,size,addr,ret)					\
135 __asm__ __volatile__(							\
136 	"/* Put user asm, inline. */\n"					\
137 "1:\t"	"st"#size " %1, %2\n\t"						\
138 	"clr	%0\n"							\
139 "2:\n\n\t"								\
140 	".section .fixup,#alloc,#execinstr\n\t"				\
141 	".align	4\n"							\
142 "3:\n\t"								\
143 	"b	2b\n\t"							\
144 	" mov	%3, %0\n\t"						\
145         ".previous\n\n\t"						\
146 	".section __ex_table,#alloc\n\t"				\
147 	".align	4\n\t"							\
148 	".word	1b, 3b\n\t"						\
149 	".previous\n\n\t"						\
150        : "=&r" (ret) : "r" (x), "m" (*__m(addr)),			\
151 	 "i" (-EFAULT))
152 
153 extern int __put_user_bad(void);
154 
155 #define __get_user_check(x,addr,size,type) ({ \
156 register int __gu_ret; \
157 register unsigned long __gu_val; \
158 if (__access_ok(addr,size)) { \
159 switch (size) { \
160 case 1: __get_user_asm(__gu_val,ub,addr,__gu_ret); break; \
161 case 2: __get_user_asm(__gu_val,uh,addr,__gu_ret); break; \
162 case 4: __get_user_asm(__gu_val,,addr,__gu_ret); break; \
163 case 8: __get_user_asm(__gu_val,d,addr,__gu_ret); break; \
164 default: __gu_val = 0; __gu_ret = __get_user_bad(); break; \
165 } } else { __gu_val = 0; __gu_ret = -EFAULT; } x = (type) __gu_val; __gu_ret; })
166 
167 #define __get_user_check_ret(x,addr,size,type,retval) ({ \
168 register unsigned long __gu_val __asm__ ("l1"); \
169 if (__access_ok(addr,size)) { \
170 switch (size) { \
171 case 1: __get_user_asm_ret(__gu_val,ub,addr,retval); break; \
172 case 2: __get_user_asm_ret(__gu_val,uh,addr,retval); break; \
173 case 4: __get_user_asm_ret(__gu_val,,addr,retval); break; \
174 case 8: __get_user_asm_ret(__gu_val,d,addr,retval); break; \
175 default: if (__get_user_bad()) return retval; \
176 } x = (type) __gu_val; } else return retval; })
177 
178 #define __get_user_nocheck(x,addr,size,type) ({ \
179 register int __gu_ret; \
180 register unsigned long __gu_val; \
181 switch (size) { \
182 case 1: __get_user_asm(__gu_val,ub,addr,__gu_ret); break; \
183 case 2: __get_user_asm(__gu_val,uh,addr,__gu_ret); break; \
184 case 4: __get_user_asm(__gu_val,,addr,__gu_ret); break; \
185 case 8: __get_user_asm(__gu_val,d,addr,__gu_ret); break; \
186 default: __gu_val = 0; __gu_ret = __get_user_bad(); break; \
187 } x = (type) __gu_val; __gu_ret; })
188 
189 #define __get_user_nocheck_ret(x,addr,size,type,retval) ({ \
190 register unsigned long __gu_val __asm__ ("l1"); \
191 switch (size) { \
192 case 1: __get_user_asm_ret(__gu_val,ub,addr,retval); break; \
193 case 2: __get_user_asm_ret(__gu_val,uh,addr,retval); break; \
194 case 4: __get_user_asm_ret(__gu_val,,addr,retval); break; \
195 case 8: __get_user_asm_ret(__gu_val,d,addr,retval); break; \
196 default: if (__get_user_bad()) return retval; \
197 } x = (type) __gu_val; })
198 
199 #define __get_user_asm(x,size,addr,ret)					\
200 __asm__ __volatile__(							\
201 	"/* Get user asm, inline. */\n"					\
202 "1:\t"	"ld"#size " %2, %1\n\t"						\
203 	"clr	%0\n"							\
204 "2:\n\n\t"								\
205 	".section .fixup,#alloc,#execinstr\n\t"				\
206 	".align	4\n"							\
207 "3:\n\t"								\
208 	"clr	%1\n\t"							\
209 	"b	2b\n\t"							\
210 	" mov	%3, %0\n\n\t"						\
211 	".previous\n\t"							\
212 	".section __ex_table,#alloc\n\t"				\
213 	".align	4\n\t"							\
214 	".word	1b, 3b\n\n\t"						\
215 	".previous\n\t"							\
216        : "=&r" (ret), "=&r" (x) : "m" (*__m(addr)),			\
217 	 "i" (-EFAULT))
218 
219 #define __get_user_asm_ret(x,size,addr,retval)				\
220 if (__builtin_constant_p(retval) && retval == -EFAULT)			\
221 __asm__ __volatile__(							\
222 	"/* Get user asm ret, inline. */\n"				\
223 "1:\t"	"ld"#size " %1, %0\n\n\t"					\
224 	".section __ex_table,#alloc\n\t"				\
225 	".align	4\n\t"							\
226 	".word	1b,__ret_efault\n\n\t"					\
227 	".previous\n\t"							\
228        : "=&r" (x) : "m" (*__m(addr)));					\
229 else									\
230 __asm__ __volatile__(							\
231 	"/* Get user asm ret, inline. */\n"				\
232 "1:\t"	"ld"#size " %1, %0\n\n\t"					\
233 	".section .fixup,#alloc,#execinstr\n\t"				\
234 	".align	4\n"							\
235 "3:\n\t"								\
236 	"ret\n\t"							\
237 	" restore %%g0, %2, %%o0\n\n\t"					\
238 	".previous\n\t"							\
239 	".section __ex_table,#alloc\n\t"				\
240 	".align	4\n\t"							\
241 	".word	1b, 3b\n\n\t"						\
242 	".previous\n\t"							\
243        : "=&r" (x) : "m" (*__m(addr)), "i" (retval))
244 
245 extern int __get_user_bad(void);
246 
247 extern unsigned long __copy_user(void __user *to, const void __user *from, unsigned long size);
248 
249 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
250 {
251 	if (n && __access_ok((unsigned long) to, n))
252 		return __copy_user(to, (__force void __user *) from, n);
253 	else
254 		return n;
255 }
256 
257 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
258 {
259 	return __copy_user(to, (__force void __user *) from, n);
260 }
261 
262 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
263 {
264 	if (n && __access_ok((unsigned long) from, n))
265 		return __copy_user((__force void __user *) to, from, n);
266 	else
267 		return n;
268 }
269 
270 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
271 {
272 	return __copy_user((__force void __user *) to, from, n);
273 }
274 
275 #define __copy_to_user_inatomic __copy_to_user
276 #define __copy_from_user_inatomic __copy_from_user
277 
278 static inline unsigned long __clear_user(void __user *addr, unsigned long size)
279 {
280 	unsigned long ret;
281 
282 	__asm__ __volatile__ (
283 		".section __ex_table,#alloc\n\t"
284 		".align 4\n\t"
285 		".word 1f,3\n\t"
286 		".previous\n\t"
287 		"mov %2, %%o1\n"
288 		"1:\n\t"
289 		"call __bzero\n\t"
290 		" mov %1, %%o0\n\t"
291 		"mov %%o0, %0\n"
292 		: "=r" (ret) : "r" (addr), "r" (size) :
293 		"o0", "o1", "o2", "o3", "o4", "o5", "o7",
294 		"g1", "g2", "g3", "g4", "g5", "g7", "cc");
295 
296 	return ret;
297 }
298 
299 static inline unsigned long clear_user(void __user *addr, unsigned long n)
300 {
301 	if (n && __access_ok((unsigned long) addr, n))
302 		return __clear_user(addr, n);
303 	else
304 		return n;
305 }
306 
307 extern long __strncpy_from_user(char *dest, const char __user *src, long count);
308 
309 static inline long strncpy_from_user(char *dest, const char __user *src, long count)
310 {
311 	if (__access_ok((unsigned long) src, count))
312 		return __strncpy_from_user(dest, src, count);
313 	else
314 		return -EFAULT;
315 }
316 
317 extern long __strlen_user(const char __user *);
318 extern long __strnlen_user(const char __user *, long len);
319 
320 static inline long strlen_user(const char __user *str)
321 {
322 	if (!access_ok(VERIFY_READ, str, 0))
323 		return 0;
324 	else
325 		return __strlen_user(str);
326 }
327 
328 static inline long strnlen_user(const char __user *str, long len)
329 {
330 	if (!access_ok(VERIFY_READ, str, 0))
331 		return 0;
332 	else
333 		return __strnlen_user(str, len);
334 }
335 
336 #endif  /* __ASSEMBLY__ */
337 
338 #endif /* _ASM_UACCESS_H */
339