xref: /linux/arch/sparc/include/asm/uaccess_32.h (revision af1d5b37d6211c814fac0d5d0b71ec695618054a)
1 /*
2  * uaccess.h: User space memore access functions.
3  *
4  * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
5  * Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
6  */
7 #ifndef _ASM_UACCESS_H
8 #define _ASM_UACCESS_H
9 
10 #ifdef __KERNEL__
11 #include <linux/compiler.h>
12 #include <linux/string.h>
13 #endif
14 
15 #ifndef __ASSEMBLY__
16 
17 #include <asm/processor.h>
18 
19 #define ARCH_HAS_SORT_EXTABLE
20 #define ARCH_HAS_SEARCH_EXTABLE
21 
22 /* Sparc is not segmented, however we need to be able to fool access_ok()
23  * when doing system calls from kernel mode legitimately.
24  *
25  * "For historical reasons, these macros are grossly misnamed." -Linus
26  */
27 
28 #define KERNEL_DS   ((mm_segment_t) { 0 })
29 #define USER_DS     ((mm_segment_t) { -1 })
30 
31 #define get_ds()	(KERNEL_DS)
32 #define get_fs()	(current->thread.current_ds)
33 #define set_fs(val)	((current->thread.current_ds) = (val))
34 
35 #define segment_eq(a, b) ((a).seg == (b).seg)
36 
37 /* We have there a nice not-mapped page at PAGE_OFFSET - PAGE_SIZE, so that this test
38  * can be fairly lightweight.
39  * No one can read/write anything from userland in the kernel space by setting
40  * large size and address near to PAGE_OFFSET - a fault will break his intentions.
41  */
42 #define __user_ok(addr, size) ({ (void)(size); (addr) < STACK_TOP; })
43 #define __kernel_ok (segment_eq(get_fs(), KERNEL_DS))
44 #define __access_ok(addr, size) (__user_ok((addr) & get_fs().seg, (size)))
45 #define access_ok(type, addr, size) \
46 	({ (void)(type); __access_ok((unsigned long)(addr), size); })
47 
48 /*
49  * The exception table consists of pairs of addresses: the first is the
50  * address of an instruction that is allowed to fault, and the second is
51  * the address at which the program should continue.  No registers are
52  * modified, so it is entirely up to the continuation code to figure out
53  * what to do.
54  *
55  * All the routines below use bits of fixup code that are out of line
56  * with the main instruction path.  This means when everything is well,
57  * we don't even have to jump over them.  Further, they do not intrude
58  * on our cache or tlb entries.
59  *
60  * There is a special way how to put a range of potentially faulting
61  * insns (like twenty ldd/std's with now intervening other instructions)
62  * You specify address of first in insn and 0 in fixup and in the next
63  * exception_table_entry you specify last potentially faulting insn + 1
64  * and in fixup the routine which should handle the fault.
65  * That fixup code will get
66  * (faulting_insn_address - first_insn_in_the_range_address)/4
67  * in %g2 (ie. index of the faulting instruction in the range).
68  */
69 
70 struct exception_table_entry
71 {
72         unsigned long insn, fixup;
73 };
74 
75 /* Returns 0 if exception not found and fixup otherwise.  */
76 unsigned long search_extables_range(unsigned long addr, unsigned long *g2);
77 
78 void __ret_efault(void);
79 
80 /* Uh, these should become the main single-value transfer routines..
81  * They automatically use the right size if we just have the right
82  * pointer type..
83  *
84  * This gets kind of ugly. We want to return _two_ values in "get_user()"
85  * and yet we don't want to do any pointers, because that is too much
86  * of a performance impact. Thus we have a few rather ugly macros here,
87  * and hide all the ugliness from the user.
88  */
89 #define put_user(x, ptr) ({ \
90 	unsigned long __pu_addr = (unsigned long)(ptr); \
91 	__chk_user_ptr(ptr); \
92 	__put_user_check((__typeof__(*(ptr)))(x), __pu_addr, sizeof(*(ptr))); \
93 })
94 
95 #define get_user(x, ptr) ({ \
96 	unsigned long __gu_addr = (unsigned long)(ptr); \
97 	__chk_user_ptr(ptr); \
98 	__get_user_check((x), __gu_addr, sizeof(*(ptr)), __typeof__(*(ptr))); \
99 })
100 
101 /*
102  * The "__xxx" versions do not do address space checking, useful when
103  * doing multiple accesses to the same area (the user has to do the
104  * checks by hand with "access_ok()")
105  */
106 #define __put_user(x, ptr) \
107 	__put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
108 #define __get_user(x, ptr) \
109     __get_user_nocheck((x), (ptr), sizeof(*(ptr)), __typeof__(*(ptr)))
110 
111 struct __large_struct { unsigned long buf[100]; };
112 #define __m(x) ((struct __large_struct __user *)(x))
113 
114 #define __put_user_check(x, addr, size) ({ \
115 	register int __pu_ret; \
116 	if (__access_ok(addr, size)) { \
117 		switch (size) { \
118 		case 1: \
119 			__put_user_asm(x, b, addr, __pu_ret); \
120 			break; \
121 		case 2: \
122 			__put_user_asm(x, h, addr, __pu_ret); \
123 			break; \
124 		case 4: \
125 			__put_user_asm(x, , addr, __pu_ret); \
126 			break; \
127 		case 8: \
128 			__put_user_asm(x, d, addr, __pu_ret); \
129 			break; \
130 		default: \
131 			__pu_ret = __put_user_bad(); \
132 			break; \
133 		} \
134 	} else { \
135 		__pu_ret = -EFAULT; \
136 	} \
137 	__pu_ret; \
138 })
139 
140 #define __put_user_nocheck(x, addr, size) ({			\
141 	register int __pu_ret;					\
142 	switch (size) {						\
143 	case 1: __put_user_asm(x, b, addr, __pu_ret); break;	\
144 	case 2: __put_user_asm(x, h, addr, __pu_ret); break;	\
145 	case 4: __put_user_asm(x, , addr, __pu_ret); break;	\
146 	case 8: __put_user_asm(x, d, addr, __pu_ret); break;	\
147 	default: __pu_ret = __put_user_bad(); break;		\
148 	} \
149 	__pu_ret; \
150 })
151 
152 #define __put_user_asm(x, size, addr, ret)				\
153 __asm__ __volatile__(							\
154 		"/* Put user asm, inline. */\n"				\
155 	"1:\t"	"st"#size " %1, %2\n\t"					\
156 		"clr	%0\n"						\
157 	"2:\n\n\t"							\
158 		".section .fixup,#alloc,#execinstr\n\t"			\
159 		".align	4\n"						\
160 	"3:\n\t"							\
161 		"b	2b\n\t"						\
162 		" mov	%3, %0\n\t"					\
163 		".previous\n\n\t"					\
164 		".section __ex_table,#alloc\n\t"			\
165 		".align	4\n\t"						\
166 		".word	1b, 3b\n\t"					\
167 		".previous\n\n\t"					\
168 	       : "=&r" (ret) : "r" (x), "m" (*__m(addr)),		\
169 		 "i" (-EFAULT))
170 
171 int __put_user_bad(void);
172 
173 #define __get_user_check(x, addr, size, type) ({ \
174 	register int __gu_ret; \
175 	register unsigned long __gu_val; \
176 	if (__access_ok(addr, size)) { \
177 		switch (size) { \
178 		case 1: \
179 			 __get_user_asm(__gu_val, ub, addr, __gu_ret); \
180 			break; \
181 		case 2: \
182 			__get_user_asm(__gu_val, uh, addr, __gu_ret); \
183 			break; \
184 		case 4: \
185 			__get_user_asm(__gu_val, , addr, __gu_ret); \
186 			break; \
187 		case 8: \
188 			__get_user_asm(__gu_val, d, addr, __gu_ret); \
189 			break; \
190 		default: \
191 			__gu_val = 0; \
192 			__gu_ret = __get_user_bad(); \
193 			break; \
194 		} \
195 	 } else { \
196 		 __gu_val = 0; \
197 		 __gu_ret = -EFAULT; \
198 	} \
199 	x = (__force type) __gu_val; \
200 	__gu_ret; \
201 })
202 
203 #define __get_user_nocheck(x, addr, size, type) ({			\
204 	register int __gu_ret;						\
205 	register unsigned long __gu_val;				\
206 	switch (size) {							\
207 	case 1: __get_user_asm(__gu_val, ub, addr, __gu_ret); break;	\
208 	case 2: __get_user_asm(__gu_val, uh, addr, __gu_ret); break;	\
209 	case 4: __get_user_asm(__gu_val, , addr, __gu_ret); break;	\
210 	case 8: __get_user_asm(__gu_val, d, addr, __gu_ret); break;	\
211 	default:							\
212 		__gu_val = 0;						\
213 		__gu_ret = __get_user_bad();				\
214 		break;							\
215 	}								\
216 	x = (__force type) __gu_val;					\
217 	__gu_ret;							\
218 })
219 
220 #define __get_user_asm(x, size, addr, ret)				\
221 __asm__ __volatile__(							\
222 		"/* Get user asm, inline. */\n"				\
223 	"1:\t"	"ld"#size " %2, %1\n\t"					\
224 		"clr	%0\n"						\
225 	"2:\n\n\t"							\
226 		".section .fixup,#alloc,#execinstr\n\t"			\
227 		".align	4\n"						\
228 	"3:\n\t"							\
229 		"clr	%1\n\t"						\
230 		"b	2b\n\t"						\
231 		" mov	%3, %0\n\n\t"					\
232 		".previous\n\t"						\
233 		".section __ex_table,#alloc\n\t"			\
234 		".align	4\n\t"						\
235 		".word	1b, 3b\n\n\t"					\
236 		".previous\n\t"						\
237 	       : "=&r" (ret), "=&r" (x) : "m" (*__m(addr)),		\
238 		 "i" (-EFAULT))
239 
240 int __get_user_bad(void);
241 
242 unsigned long __copy_user(void __user *to, const void __user *from, unsigned long size);
243 
244 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
245 {
246 	if (n && __access_ok((unsigned long) to, n)) {
247 		check_object_size(from, n, true);
248 		return __copy_user(to, (__force void __user *) from, n);
249 	} else
250 		return n;
251 }
252 
253 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
254 {
255 	check_object_size(from, n, true);
256 	return __copy_user(to, (__force void __user *) from, n);
257 }
258 
259 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
260 {
261 	if (n && __access_ok((unsigned long) from, n)) {
262 		check_object_size(to, n, false);
263 		return __copy_user((__force void __user *) to, from, n);
264 	} else {
265 		memset(to, 0, n);
266 		return n;
267 	}
268 }
269 
270 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
271 {
272 	return __copy_user((__force void __user *) to, from, n);
273 }
274 
275 #define __copy_to_user_inatomic __copy_to_user
276 #define __copy_from_user_inatomic __copy_from_user
277 
278 static inline unsigned long __clear_user(void __user *addr, unsigned long size)
279 {
280 	unsigned long ret;
281 
282 	__asm__ __volatile__ (
283 		".section __ex_table,#alloc\n\t"
284 		".align 4\n\t"
285 		".word 1f,3\n\t"
286 		".previous\n\t"
287 		"mov %2, %%o1\n"
288 		"1:\n\t"
289 		"call __bzero\n\t"
290 		" mov %1, %%o0\n\t"
291 		"mov %%o0, %0\n"
292 		: "=r" (ret) : "r" (addr), "r" (size) :
293 		"o0", "o1", "o2", "o3", "o4", "o5", "o7",
294 		"g1", "g2", "g3", "g4", "g5", "g7", "cc");
295 
296 	return ret;
297 }
298 
299 static inline unsigned long clear_user(void __user *addr, unsigned long n)
300 {
301 	if (n && __access_ok((unsigned long) addr, n))
302 		return __clear_user(addr, n);
303 	else
304 		return n;
305 }
306 
307 __must_check long strlen_user(const char __user *str);
308 __must_check long strnlen_user(const char __user *str, long n);
309 
310 #endif  /* __ASSEMBLY__ */
311 
312 #endif /* _ASM_UACCESS_H */
313