xref: /linux/arch/sparc/include/asm/uaccess_32.h (revision 3d886aa3be15439e05784ac1cbd4acc2f13c0048)
1 /*
2  * uaccess.h: User space memore access functions.
3  *
4  * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
5  * Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
6  */
7 #ifndef _ASM_UACCESS_H
8 #define _ASM_UACCESS_H
9 
10 #include <linux/compiler.h>
11 #include <linux/string.h>
12 
13 #include <asm/processor.h>
14 
15 #define ARCH_HAS_SORT_EXTABLE
16 #define ARCH_HAS_SEARCH_EXTABLE
17 
18 /* Sparc is not segmented, however we need to be able to fool access_ok()
19  * when doing system calls from kernel mode legitimately.
20  *
21  * "For historical reasons, these macros are grossly misnamed." -Linus
22  */
23 
24 #define KERNEL_DS   ((mm_segment_t) { 0 })
25 #define USER_DS     ((mm_segment_t) { -1 })
26 
27 #define get_ds()	(KERNEL_DS)
28 #define get_fs()	(current->thread.current_ds)
29 #define set_fs(val)	((current->thread.current_ds) = (val))
30 
31 #define segment_eq(a, b) ((a).seg == (b).seg)
32 
33 /* We have there a nice not-mapped page at PAGE_OFFSET - PAGE_SIZE, so that this test
34  * can be fairly lightweight.
35  * No one can read/write anything from userland in the kernel space by setting
36  * large size and address near to PAGE_OFFSET - a fault will break his intentions.
37  */
38 #define __user_ok(addr, size) ({ (void)(size); (addr) < STACK_TOP; })
39 #define __kernel_ok (uaccess_kernel())
40 #define __access_ok(addr, size) (__user_ok((addr) & get_fs().seg, (size)))
41 #define access_ok(type, addr, size) \
42 	({ (void)(type); __access_ok((unsigned long)(addr), size); })
43 
44 /*
45  * The exception table consists of pairs of addresses: the first is the
46  * address of an instruction that is allowed to fault, and the second is
47  * the address at which the program should continue.  No registers are
48  * modified, so it is entirely up to the continuation code to figure out
49  * what to do.
50  *
51  * All the routines below use bits of fixup code that are out of line
52  * with the main instruction path.  This means when everything is well,
53  * we don't even have to jump over them.  Further, they do not intrude
54  * on our cache or tlb entries.
55  *
56  * There is a special way how to put a range of potentially faulting
57  * insns (like twenty ldd/std's with now intervening other instructions)
58  * You specify address of first in insn and 0 in fixup and in the next
59  * exception_table_entry you specify last potentially faulting insn + 1
60  * and in fixup the routine which should handle the fault.
61  * That fixup code will get
62  * (faulting_insn_address - first_insn_in_the_range_address)/4
63  * in %g2 (ie. index of the faulting instruction in the range).
64  */
65 
66 struct exception_table_entry
67 {
68         unsigned long insn, fixup;
69 };
70 
71 /* Returns 0 if exception not found and fixup otherwise.  */
72 unsigned long search_extables_range(unsigned long addr, unsigned long *g2);
73 
74 /* Uh, these should become the main single-value transfer routines..
75  * They automatically use the right size if we just have the right
76  * pointer type..
77  *
78  * This gets kind of ugly. We want to return _two_ values in "get_user()"
79  * and yet we don't want to do any pointers, because that is too much
80  * of a performance impact. Thus we have a few rather ugly macros here,
81  * and hide all the ugliness from the user.
82  */
83 #define put_user(x, ptr) ({ \
84 	unsigned long __pu_addr = (unsigned long)(ptr); \
85 	__chk_user_ptr(ptr); \
86 	__put_user_check((__typeof__(*(ptr)))(x), __pu_addr, sizeof(*(ptr))); \
87 })
88 
89 #define get_user(x, ptr) ({ \
90 	unsigned long __gu_addr = (unsigned long)(ptr); \
91 	__chk_user_ptr(ptr); \
92 	__get_user_check((x), __gu_addr, sizeof(*(ptr)), __typeof__(*(ptr))); \
93 })
94 
95 /*
96  * The "__xxx" versions do not do address space checking, useful when
97  * doing multiple accesses to the same area (the user has to do the
98  * checks by hand with "access_ok()")
99  */
100 #define __put_user(x, ptr) \
101 	__put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
102 #define __get_user(x, ptr) \
103     __get_user_nocheck((x), (ptr), sizeof(*(ptr)), __typeof__(*(ptr)))
104 
105 struct __large_struct { unsigned long buf[100]; };
106 #define __m(x) ((struct __large_struct __user *)(x))
107 
108 #define __put_user_check(x, addr, size) ({ \
109 	register int __pu_ret; \
110 	if (__access_ok(addr, size)) { \
111 		switch (size) { \
112 		case 1: \
113 			__put_user_asm(x, b, addr, __pu_ret); \
114 			break; \
115 		case 2: \
116 			__put_user_asm(x, h, addr, __pu_ret); \
117 			break; \
118 		case 4: \
119 			__put_user_asm(x, , addr, __pu_ret); \
120 			break; \
121 		case 8: \
122 			__put_user_asm(x, d, addr, __pu_ret); \
123 			break; \
124 		default: \
125 			__pu_ret = __put_user_bad(); \
126 			break; \
127 		} \
128 	} else { \
129 		__pu_ret = -EFAULT; \
130 	} \
131 	__pu_ret; \
132 })
133 
134 #define __put_user_nocheck(x, addr, size) ({			\
135 	register int __pu_ret;					\
136 	switch (size) {						\
137 	case 1: __put_user_asm(x, b, addr, __pu_ret); break;	\
138 	case 2: __put_user_asm(x, h, addr, __pu_ret); break;	\
139 	case 4: __put_user_asm(x, , addr, __pu_ret); break;	\
140 	case 8: __put_user_asm(x, d, addr, __pu_ret); break;	\
141 	default: __pu_ret = __put_user_bad(); break;		\
142 	} \
143 	__pu_ret; \
144 })
145 
146 #define __put_user_asm(x, size, addr, ret)				\
147 __asm__ __volatile__(							\
148 		"/* Put user asm, inline. */\n"				\
149 	"1:\t"	"st"#size " %1, %2\n\t"					\
150 		"clr	%0\n"						\
151 	"2:\n\n\t"							\
152 		".section .fixup,#alloc,#execinstr\n\t"			\
153 		".align	4\n"						\
154 	"3:\n\t"							\
155 		"b	2b\n\t"						\
156 		" mov	%3, %0\n\t"					\
157 		".previous\n\n\t"					\
158 		".section __ex_table,#alloc\n\t"			\
159 		".align	4\n\t"						\
160 		".word	1b, 3b\n\t"					\
161 		".previous\n\n\t"					\
162 	       : "=&r" (ret) : "r" (x), "m" (*__m(addr)),		\
163 		 "i" (-EFAULT))
164 
165 int __put_user_bad(void);
166 
167 #define __get_user_check(x, addr, size, type) ({ \
168 	register int __gu_ret; \
169 	register unsigned long __gu_val; \
170 	if (__access_ok(addr, size)) { \
171 		switch (size) { \
172 		case 1: \
173 			 __get_user_asm(__gu_val, ub, addr, __gu_ret); \
174 			break; \
175 		case 2: \
176 			__get_user_asm(__gu_val, uh, addr, __gu_ret); \
177 			break; \
178 		case 4: \
179 			__get_user_asm(__gu_val, , addr, __gu_ret); \
180 			break; \
181 		case 8: \
182 			__get_user_asm(__gu_val, d, addr, __gu_ret); \
183 			break; \
184 		default: \
185 			__gu_val = 0; \
186 			__gu_ret = __get_user_bad(); \
187 			break; \
188 		} \
189 	 } else { \
190 		 __gu_val = 0; \
191 		 __gu_ret = -EFAULT; \
192 	} \
193 	x = (__force type) __gu_val; \
194 	__gu_ret; \
195 })
196 
197 #define __get_user_nocheck(x, addr, size, type) ({			\
198 	register int __gu_ret;						\
199 	register unsigned long __gu_val;				\
200 	switch (size) {							\
201 	case 1: __get_user_asm(__gu_val, ub, addr, __gu_ret); break;	\
202 	case 2: __get_user_asm(__gu_val, uh, addr, __gu_ret); break;	\
203 	case 4: __get_user_asm(__gu_val, , addr, __gu_ret); break;	\
204 	case 8: __get_user_asm(__gu_val, d, addr, __gu_ret); break;	\
205 	default:							\
206 		__gu_val = 0;						\
207 		__gu_ret = __get_user_bad();				\
208 		break;							\
209 	}								\
210 	x = (__force type) __gu_val;					\
211 	__gu_ret;							\
212 })
213 
214 #define __get_user_asm(x, size, addr, ret)				\
215 __asm__ __volatile__(							\
216 		"/* Get user asm, inline. */\n"				\
217 	"1:\t"	"ld"#size " %2, %1\n\t"					\
218 		"clr	%0\n"						\
219 	"2:\n\n\t"							\
220 		".section .fixup,#alloc,#execinstr\n\t"			\
221 		".align	4\n"						\
222 	"3:\n\t"							\
223 		"clr	%1\n\t"						\
224 		"b	2b\n\t"						\
225 		" mov	%3, %0\n\n\t"					\
226 		".previous\n\t"						\
227 		".section __ex_table,#alloc\n\t"			\
228 		".align	4\n\t"						\
229 		".word	1b, 3b\n\n\t"					\
230 		".previous\n\t"						\
231 	       : "=&r" (ret), "=&r" (x) : "m" (*__m(addr)),		\
232 		 "i" (-EFAULT))
233 
234 int __get_user_bad(void);
235 
236 unsigned long __copy_user(void __user *to, const void __user *from, unsigned long size);
237 
238 static inline unsigned long raw_copy_to_user(void __user *to, const void *from, unsigned long n)
239 {
240 	return __copy_user(to, (__force void __user *) from, n);
241 }
242 
243 static inline unsigned long raw_copy_from_user(void *to, const void __user *from, unsigned long n)
244 {
245 	return __copy_user((__force void __user *) to, from, n);
246 }
247 
248 #define INLINE_COPY_FROM_USER
249 #define INLINE_COPY_TO_USER
250 
251 static inline unsigned long __clear_user(void __user *addr, unsigned long size)
252 {
253 	unsigned long ret;
254 
255 	__asm__ __volatile__ (
256 		".section __ex_table,#alloc\n\t"
257 		".align 4\n\t"
258 		".word 1f,3\n\t"
259 		".previous\n\t"
260 		"mov %2, %%o1\n"
261 		"1:\n\t"
262 		"call __bzero\n\t"
263 		" mov %1, %%o0\n\t"
264 		"mov %%o0, %0\n"
265 		: "=r" (ret) : "r" (addr), "r" (size) :
266 		"o0", "o1", "o2", "o3", "o4", "o5", "o7",
267 		"g1", "g2", "g3", "g4", "g5", "g7", "cc");
268 
269 	return ret;
270 }
271 
272 static inline unsigned long clear_user(void __user *addr, unsigned long n)
273 {
274 	if (n && __access_ok((unsigned long) addr, n))
275 		return __clear_user(addr, n);
276 	else
277 		return n;
278 }
279 
280 __must_check long strnlen_user(const char __user *str, long n);
281 
282 #endif /* _ASM_UACCESS_H */
283