xref: /linux/arch/sparc/include/asm/uaccess_32.h (revision db68ce10c4f0a27c1ff9fa0e789e5c41f8c4ea63)
1 /*
2  * uaccess.h: User space memore access functions.
3  *
4  * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
5  * Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
6  */
7 #ifndef _ASM_UACCESS_H
8 #define _ASM_UACCESS_H
9 
10 #include <linux/compiler.h>
11 #include <linux/string.h>
12 
13 #include <asm/processor.h>
14 
15 #define ARCH_HAS_SORT_EXTABLE
16 #define ARCH_HAS_SEARCH_EXTABLE
17 
18 /* Sparc is not segmented, however we need to be able to fool access_ok()
19  * when doing system calls from kernel mode legitimately.
20  *
21  * "For historical reasons, these macros are grossly misnamed." -Linus
22  */
23 
24 #define KERNEL_DS   ((mm_segment_t) { 0 })
25 #define USER_DS     ((mm_segment_t) { -1 })
26 
27 #define get_ds()	(KERNEL_DS)
28 #define get_fs()	(current->thread.current_ds)
29 #define set_fs(val)	((current->thread.current_ds) = (val))
30 
31 #define segment_eq(a, b) ((a).seg == (b).seg)
32 
33 /* We have there a nice not-mapped page at PAGE_OFFSET - PAGE_SIZE, so that this test
34  * can be fairly lightweight.
35  * No one can read/write anything from userland in the kernel space by setting
36  * large size and address near to PAGE_OFFSET - a fault will break his intentions.
37  */
38 #define __user_ok(addr, size) ({ (void)(size); (addr) < STACK_TOP; })
39 #define __kernel_ok (uaccess_kernel())
40 #define __access_ok(addr, size) (__user_ok((addr) & get_fs().seg, (size)))
41 #define access_ok(type, addr, size) \
42 	({ (void)(type); __access_ok((unsigned long)(addr), size); })
43 
44 /*
45  * The exception table consists of pairs of addresses: the first is the
46  * address of an instruction that is allowed to fault, and the second is
47  * the address at which the program should continue.  No registers are
48  * modified, so it is entirely up to the continuation code to figure out
49  * what to do.
50  *
51  * All the routines below use bits of fixup code that are out of line
52  * with the main instruction path.  This means when everything is well,
53  * we don't even have to jump over them.  Further, they do not intrude
54  * on our cache or tlb entries.
55  *
56  * There is a special way how to put a range of potentially faulting
57  * insns (like twenty ldd/std's with now intervening other instructions)
58  * You specify address of first in insn and 0 in fixup and in the next
59  * exception_table_entry you specify last potentially faulting insn + 1
60  * and in fixup the routine which should handle the fault.
61  * That fixup code will get
62  * (faulting_insn_address - first_insn_in_the_range_address)/4
63  * in %g2 (ie. index of the faulting instruction in the range).
64  */
65 
66 struct exception_table_entry
67 {
68         unsigned long insn, fixup;
69 };
70 
71 /* Returns 0 if exception not found and fixup otherwise.  */
72 unsigned long search_extables_range(unsigned long addr, unsigned long *g2);
73 
74 void __ret_efault(void);
75 
76 /* Uh, these should become the main single-value transfer routines..
77  * They automatically use the right size if we just have the right
78  * pointer type..
79  *
80  * This gets kind of ugly. We want to return _two_ values in "get_user()"
81  * and yet we don't want to do any pointers, because that is too much
82  * of a performance impact. Thus we have a few rather ugly macros here,
83  * and hide all the ugliness from the user.
84  */
85 #define put_user(x, ptr) ({ \
86 	unsigned long __pu_addr = (unsigned long)(ptr); \
87 	__chk_user_ptr(ptr); \
88 	__put_user_check((__typeof__(*(ptr)))(x), __pu_addr, sizeof(*(ptr))); \
89 })
90 
91 #define get_user(x, ptr) ({ \
92 	unsigned long __gu_addr = (unsigned long)(ptr); \
93 	__chk_user_ptr(ptr); \
94 	__get_user_check((x), __gu_addr, sizeof(*(ptr)), __typeof__(*(ptr))); \
95 })
96 
97 /*
98  * The "__xxx" versions do not do address space checking, useful when
99  * doing multiple accesses to the same area (the user has to do the
100  * checks by hand with "access_ok()")
101  */
102 #define __put_user(x, ptr) \
103 	__put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
104 #define __get_user(x, ptr) \
105     __get_user_nocheck((x), (ptr), sizeof(*(ptr)), __typeof__(*(ptr)))
106 
107 struct __large_struct { unsigned long buf[100]; };
108 #define __m(x) ((struct __large_struct __user *)(x))
109 
110 #define __put_user_check(x, addr, size) ({ \
111 	register int __pu_ret; \
112 	if (__access_ok(addr, size)) { \
113 		switch (size) { \
114 		case 1: \
115 			__put_user_asm(x, b, addr, __pu_ret); \
116 			break; \
117 		case 2: \
118 			__put_user_asm(x, h, addr, __pu_ret); \
119 			break; \
120 		case 4: \
121 			__put_user_asm(x, , addr, __pu_ret); \
122 			break; \
123 		case 8: \
124 			__put_user_asm(x, d, addr, __pu_ret); \
125 			break; \
126 		default: \
127 			__pu_ret = __put_user_bad(); \
128 			break; \
129 		} \
130 	} else { \
131 		__pu_ret = -EFAULT; \
132 	} \
133 	__pu_ret; \
134 })
135 
136 #define __put_user_nocheck(x, addr, size) ({			\
137 	register int __pu_ret;					\
138 	switch (size) {						\
139 	case 1: __put_user_asm(x, b, addr, __pu_ret); break;	\
140 	case 2: __put_user_asm(x, h, addr, __pu_ret); break;	\
141 	case 4: __put_user_asm(x, , addr, __pu_ret); break;	\
142 	case 8: __put_user_asm(x, d, addr, __pu_ret); break;	\
143 	default: __pu_ret = __put_user_bad(); break;		\
144 	} \
145 	__pu_ret; \
146 })
147 
148 #define __put_user_asm(x, size, addr, ret)				\
149 __asm__ __volatile__(							\
150 		"/* Put user asm, inline. */\n"				\
151 	"1:\t"	"st"#size " %1, %2\n\t"					\
152 		"clr	%0\n"						\
153 	"2:\n\n\t"							\
154 		".section .fixup,#alloc,#execinstr\n\t"			\
155 		".align	4\n"						\
156 	"3:\n\t"							\
157 		"b	2b\n\t"						\
158 		" mov	%3, %0\n\t"					\
159 		".previous\n\n\t"					\
160 		".section __ex_table,#alloc\n\t"			\
161 		".align	4\n\t"						\
162 		".word	1b, 3b\n\t"					\
163 		".previous\n\n\t"					\
164 	       : "=&r" (ret) : "r" (x), "m" (*__m(addr)),		\
165 		 "i" (-EFAULT))
166 
167 int __put_user_bad(void);
168 
169 #define __get_user_check(x, addr, size, type) ({ \
170 	register int __gu_ret; \
171 	register unsigned long __gu_val; \
172 	if (__access_ok(addr, size)) { \
173 		switch (size) { \
174 		case 1: \
175 			 __get_user_asm(__gu_val, ub, addr, __gu_ret); \
176 			break; \
177 		case 2: \
178 			__get_user_asm(__gu_val, uh, addr, __gu_ret); \
179 			break; \
180 		case 4: \
181 			__get_user_asm(__gu_val, , addr, __gu_ret); \
182 			break; \
183 		case 8: \
184 			__get_user_asm(__gu_val, d, addr, __gu_ret); \
185 			break; \
186 		default: \
187 			__gu_val = 0; \
188 			__gu_ret = __get_user_bad(); \
189 			break; \
190 		} \
191 	 } else { \
192 		 __gu_val = 0; \
193 		 __gu_ret = -EFAULT; \
194 	} \
195 	x = (__force type) __gu_val; \
196 	__gu_ret; \
197 })
198 
199 #define __get_user_nocheck(x, addr, size, type) ({			\
200 	register int __gu_ret;						\
201 	register unsigned long __gu_val;				\
202 	switch (size) {							\
203 	case 1: __get_user_asm(__gu_val, ub, addr, __gu_ret); break;	\
204 	case 2: __get_user_asm(__gu_val, uh, addr, __gu_ret); break;	\
205 	case 4: __get_user_asm(__gu_val, , addr, __gu_ret); break;	\
206 	case 8: __get_user_asm(__gu_val, d, addr, __gu_ret); break;	\
207 	default:							\
208 		__gu_val = 0;						\
209 		__gu_ret = __get_user_bad();				\
210 		break;							\
211 	}								\
212 	x = (__force type) __gu_val;					\
213 	__gu_ret;							\
214 })
215 
216 #define __get_user_asm(x, size, addr, ret)				\
217 __asm__ __volatile__(							\
218 		"/* Get user asm, inline. */\n"				\
219 	"1:\t"	"ld"#size " %2, %1\n\t"					\
220 		"clr	%0\n"						\
221 	"2:\n\n\t"							\
222 		".section .fixup,#alloc,#execinstr\n\t"			\
223 		".align	4\n"						\
224 	"3:\n\t"							\
225 		"clr	%1\n\t"						\
226 		"b	2b\n\t"						\
227 		" mov	%3, %0\n\n\t"					\
228 		".previous\n\t"						\
229 		".section __ex_table,#alloc\n\t"			\
230 		".align	4\n\t"						\
231 		".word	1b, 3b\n\n\t"					\
232 		".previous\n\t"						\
233 	       : "=&r" (ret), "=&r" (x) : "m" (*__m(addr)),		\
234 		 "i" (-EFAULT))
235 
236 int __get_user_bad(void);
237 
238 unsigned long __copy_user(void __user *to, const void __user *from, unsigned long size);
239 
240 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
241 {
242 	if (n && __access_ok((unsigned long) to, n)) {
243 		check_object_size(from, n, true);
244 		return __copy_user(to, (__force void __user *) from, n);
245 	} else
246 		return n;
247 }
248 
249 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
250 {
251 	check_object_size(from, n, true);
252 	return __copy_user(to, (__force void __user *) from, n);
253 }
254 
255 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
256 {
257 	if (n && __access_ok((unsigned long) from, n)) {
258 		check_object_size(to, n, false);
259 		return __copy_user((__force void __user *) to, from, n);
260 	} else {
261 		memset(to, 0, n);
262 		return n;
263 	}
264 }
265 
266 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
267 {
268 	return __copy_user((__force void __user *) to, from, n);
269 }
270 
271 #define __copy_to_user_inatomic __copy_to_user
272 #define __copy_from_user_inatomic __copy_from_user
273 
274 static inline unsigned long __clear_user(void __user *addr, unsigned long size)
275 {
276 	unsigned long ret;
277 
278 	__asm__ __volatile__ (
279 		".section __ex_table,#alloc\n\t"
280 		".align 4\n\t"
281 		".word 1f,3\n\t"
282 		".previous\n\t"
283 		"mov %2, %%o1\n"
284 		"1:\n\t"
285 		"call __bzero\n\t"
286 		" mov %1, %%o0\n\t"
287 		"mov %%o0, %0\n"
288 		: "=r" (ret) : "r" (addr), "r" (size) :
289 		"o0", "o1", "o2", "o3", "o4", "o5", "o7",
290 		"g1", "g2", "g3", "g4", "g5", "g7", "cc");
291 
292 	return ret;
293 }
294 
295 static inline unsigned long clear_user(void __user *addr, unsigned long n)
296 {
297 	if (n && __access_ok((unsigned long) addr, n))
298 		return __clear_user(addr, n);
299 	else
300 		return n;
301 }
302 
303 __must_check long strlen_user(const char __user *str);
304 __must_check long strnlen_user(const char __user *str, long n);
305 
306 #endif /* _ASM_UACCESS_H */
307