xref: /linux/arch/powerpc/include/asm/uaccess.h (revision e5c86679d5e864947a52fb31e45a425dea3e7fa9)
1 #ifndef _ARCH_POWERPC_UACCESS_H
2 #define _ARCH_POWERPC_UACCESS_H
3 
4 #ifdef __KERNEL__
5 #ifndef __ASSEMBLY__
6 
7 #include <linux/sched.h>
8 #include <linux/errno.h>
9 #include <asm/asm-compat.h>
10 #include <asm/ppc_asm.h>
11 #include <asm/processor.h>
12 #include <asm/page.h>
13 
14 #define VERIFY_READ	0
15 #define VERIFY_WRITE	1
16 
17 /*
18  * The fs value determines whether argument validity checking should be
19  * performed or not.  If get_fs() == USER_DS, checking is performed, with
20  * get_fs() == KERNEL_DS, checking is bypassed.
21  *
22  * For historical reasons, these macros are grossly misnamed.
23  *
24  * The fs/ds values are now the highest legal address in the "segment".
25  * This simplifies the checking in the routines below.
26  */
27 
28 #define MAKE_MM_SEG(s)  ((mm_segment_t) { (s) })
29 
30 #define KERNEL_DS	MAKE_MM_SEG(~0UL)
31 #ifdef __powerpc64__
32 /* We use TASK_SIZE_USER64 as TASK_SIZE is not constant */
33 #define USER_DS		MAKE_MM_SEG(TASK_SIZE_USER64 - 1)
34 #else
35 #define USER_DS		MAKE_MM_SEG(TASK_SIZE - 1)
36 #endif
37 
38 #define get_ds()	(KERNEL_DS)
39 #define get_fs()	(current->thread.fs)
40 #define set_fs(val)	(current->thread.fs = (val))
41 
42 #define segment_eq(a, b)	((a).seg == (b).seg)
43 
44 #define user_addr_max()	(get_fs().seg)
45 
46 #ifdef __powerpc64__
47 /*
48  * This check is sufficient because there is a large enough
49  * gap between user addresses and the kernel addresses
50  */
51 #define __access_ok(addr, size, segment)	\
52 	(((addr) <= (segment).seg) && ((size) <= (segment).seg))
53 
54 #else
55 
56 #define __access_ok(addr, size, segment)	\
57 	(((addr) <= (segment).seg) &&		\
58 	 (((size) == 0) || (((size) - 1) <= ((segment).seg - (addr)))))
59 
60 #endif
61 
62 #define access_ok(type, addr, size)		\
63 	(__chk_user_ptr(addr),			\
64 	 __access_ok((__force unsigned long)(addr), (size), get_fs()))
65 
66 /*
67  * The exception table consists of pairs of relative addresses: the first is
68  * the address of an instruction that is allowed to fault, and the second is
69  * the address at which the program should continue.  No registers are
70  * modified, so it is entirely up to the continuation code to figure out what
71  * to do.
72  *
73  * All the routines below use bits of fixup code that are out of line with the
74  * main instruction path.  This means when everything is well, we don't even
75  * have to jump over them.  Further, they do not intrude on our cache or tlb
76  * entries.
77  */
78 
79 #define ARCH_HAS_RELATIVE_EXTABLE
80 
81 struct exception_table_entry {
82 	int insn;
83 	int fixup;
84 };
85 
86 static inline unsigned long extable_fixup(const struct exception_table_entry *x)
87 {
88 	return (unsigned long)&x->fixup + x->fixup;
89 }
90 
91 /*
92  * These are the main single-value transfer routines.  They automatically
93  * use the right size if we just have the right pointer type.
94  *
95  * This gets kind of ugly. We want to return _two_ values in "get_user()"
96  * and yet we don't want to do any pointers, because that is too much
97  * of a performance impact. Thus we have a few rather ugly macros here,
98  * and hide all the ugliness from the user.
99  *
100  * The "__xxx" versions of the user access functions are versions that
101  * do not verify the address space, that must have been done previously
102  * with a separate "access_ok()" call (this is used when we do multiple
103  * accesses to the same area of user memory).
104  *
105  * As we use the same address space for kernel and user data on the
106  * PowerPC, we can just do these as direct assignments.  (Of course, the
107  * exception handling means that it's no longer "just"...)
108  *
109  */
110 #define get_user(x, ptr) \
111 	__get_user_check((x), (ptr), sizeof(*(ptr)))
112 #define put_user(x, ptr) \
113 	__put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
114 
115 #define __get_user(x, ptr) \
116 	__get_user_nocheck((x), (ptr), sizeof(*(ptr)))
117 #define __put_user(x, ptr) \
118 	__put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
119 
120 #define __get_user_inatomic(x, ptr) \
121 	__get_user_nosleep((x), (ptr), sizeof(*(ptr)))
122 #define __put_user_inatomic(x, ptr) \
123 	__put_user_nosleep((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
124 
125 #define __get_user_unaligned __get_user
126 #define __put_user_unaligned __put_user
127 
128 extern long __put_user_bad(void);
129 
130 /*
131  * We don't tell gcc that we are accessing memory, but this is OK
132  * because we do not write to any memory gcc knows about, so there
133  * are no aliasing issues.
134  */
135 #define __put_user_asm(x, addr, err, op)			\
136 	__asm__ __volatile__(					\
137 		"1:	" op " %1,0(%2)	# put_user\n"		\
138 		"2:\n"						\
139 		".section .fixup,\"ax\"\n"			\
140 		"3:	li %0,%3\n"				\
141 		"	b 2b\n"					\
142 		".previous\n"					\
143 		EX_TABLE(1b, 3b)				\
144 		: "=r" (err)					\
145 		: "r" (x), "b" (addr), "i" (-EFAULT), "0" (err))
146 
147 #ifdef __powerpc64__
148 #define __put_user_asm2(x, ptr, retval)				\
149 	  __put_user_asm(x, ptr, retval, "std")
150 #else /* __powerpc64__ */
151 #define __put_user_asm2(x, addr, err)				\
152 	__asm__ __volatile__(					\
153 		"1:	stw %1,0(%2)\n"				\
154 		"2:	stw %1+1,4(%2)\n"			\
155 		"3:\n"						\
156 		".section .fixup,\"ax\"\n"			\
157 		"4:	li %0,%3\n"				\
158 		"	b 3b\n"					\
159 		".previous\n"					\
160 		EX_TABLE(1b, 4b)				\
161 		EX_TABLE(2b, 4b)				\
162 		: "=r" (err)					\
163 		: "r" (x), "b" (addr), "i" (-EFAULT), "0" (err))
164 #endif /* __powerpc64__ */
165 
166 #define __put_user_size(x, ptr, size, retval)			\
167 do {								\
168 	retval = 0;						\
169 	switch (size) {						\
170 	  case 1: __put_user_asm(x, ptr, retval, "stb"); break;	\
171 	  case 2: __put_user_asm(x, ptr, retval, "sth"); break;	\
172 	  case 4: __put_user_asm(x, ptr, retval, "stw"); break;	\
173 	  case 8: __put_user_asm2(x, ptr, retval); break;	\
174 	  default: __put_user_bad();				\
175 	}							\
176 } while (0)
177 
178 #define __put_user_nocheck(x, ptr, size)			\
179 ({								\
180 	long __pu_err;						\
181 	__typeof__(*(ptr)) __user *__pu_addr = (ptr);		\
182 	if (!is_kernel_addr((unsigned long)__pu_addr))		\
183 		might_fault();					\
184 	__chk_user_ptr(ptr);					\
185 	__put_user_size((x), __pu_addr, (size), __pu_err);	\
186 	__pu_err;						\
187 })
188 
189 #define __put_user_check(x, ptr, size)					\
190 ({									\
191 	long __pu_err = -EFAULT;					\
192 	__typeof__(*(ptr)) __user *__pu_addr = (ptr);			\
193 	might_fault();							\
194 	if (access_ok(VERIFY_WRITE, __pu_addr, size))			\
195 		__put_user_size((x), __pu_addr, (size), __pu_err);	\
196 	__pu_err;							\
197 })
198 
199 #define __put_user_nosleep(x, ptr, size)			\
200 ({								\
201 	long __pu_err;						\
202 	__typeof__(*(ptr)) __user *__pu_addr = (ptr);		\
203 	__chk_user_ptr(ptr);					\
204 	__put_user_size((x), __pu_addr, (size), __pu_err);	\
205 	__pu_err;						\
206 })
207 
208 
209 extern long __get_user_bad(void);
210 
211 #define __get_user_asm(x, addr, err, op)		\
212 	__asm__ __volatile__(				\
213 		"1:	"op" %1,0(%2)	# get_user\n"	\
214 		"2:\n"					\
215 		".section .fixup,\"ax\"\n"		\
216 		"3:	li %0,%3\n"			\
217 		"	li %1,0\n"			\
218 		"	b 2b\n"				\
219 		".previous\n"				\
220 		EX_TABLE(1b, 3b)			\
221 		: "=r" (err), "=r" (x)			\
222 		: "b" (addr), "i" (-EFAULT), "0" (err))
223 
224 #ifdef __powerpc64__
225 #define __get_user_asm2(x, addr, err)			\
226 	__get_user_asm(x, addr, err, "ld")
227 #else /* __powerpc64__ */
228 #define __get_user_asm2(x, addr, err)			\
229 	__asm__ __volatile__(				\
230 		"1:	lwz %1,0(%2)\n"			\
231 		"2:	lwz %1+1,4(%2)\n"		\
232 		"3:\n"					\
233 		".section .fixup,\"ax\"\n"		\
234 		"4:	li %0,%3\n"			\
235 		"	li %1,0\n"			\
236 		"	li %1+1,0\n"			\
237 		"	b 3b\n"				\
238 		".previous\n"				\
239 		EX_TABLE(1b, 4b)			\
240 		EX_TABLE(2b, 4b)			\
241 		: "=r" (err), "=&r" (x)			\
242 		: "b" (addr), "i" (-EFAULT), "0" (err))
243 #endif /* __powerpc64__ */
244 
245 #define __get_user_size(x, ptr, size, retval)			\
246 do {								\
247 	retval = 0;						\
248 	__chk_user_ptr(ptr);					\
249 	if (size > sizeof(x))					\
250 		(x) = __get_user_bad();				\
251 	switch (size) {						\
252 	case 1: __get_user_asm(x, ptr, retval, "lbz"); break;	\
253 	case 2: __get_user_asm(x, ptr, retval, "lhz"); break;	\
254 	case 4: __get_user_asm(x, ptr, retval, "lwz"); break;	\
255 	case 8: __get_user_asm2(x, ptr, retval);  break;	\
256 	default: (x) = __get_user_bad();			\
257 	}							\
258 } while (0)
259 
260 #define __get_user_nocheck(x, ptr, size)			\
261 ({								\
262 	long __gu_err;						\
263 	unsigned long __gu_val;					\
264 	const __typeof__(*(ptr)) __user *__gu_addr = (ptr);	\
265 	__chk_user_ptr(ptr);					\
266 	if (!is_kernel_addr((unsigned long)__gu_addr))		\
267 		might_fault();					\
268 	__get_user_size(__gu_val, __gu_addr, (size), __gu_err);	\
269 	(x) = (__typeof__(*(ptr)))__gu_val;			\
270 	__gu_err;						\
271 })
272 
273 #define __get_user_check(x, ptr, size)					\
274 ({									\
275 	long __gu_err = -EFAULT;					\
276 	unsigned long  __gu_val = 0;					\
277 	const __typeof__(*(ptr)) __user *__gu_addr = (ptr);		\
278 	might_fault();							\
279 	if (access_ok(VERIFY_READ, __gu_addr, (size)))			\
280 		__get_user_size(__gu_val, __gu_addr, (size), __gu_err);	\
281 	(x) = (__force __typeof__(*(ptr)))__gu_val;				\
282 	__gu_err;							\
283 })
284 
285 #define __get_user_nosleep(x, ptr, size)			\
286 ({								\
287 	long __gu_err;						\
288 	unsigned long __gu_val;					\
289 	const __typeof__(*(ptr)) __user *__gu_addr = (ptr);	\
290 	__chk_user_ptr(ptr);					\
291 	__get_user_size(__gu_val, __gu_addr, (size), __gu_err);	\
292 	(x) = (__force __typeof__(*(ptr)))__gu_val;			\
293 	__gu_err;						\
294 })
295 
296 
297 /* more complex routines */
298 
299 extern unsigned long __copy_tofrom_user(void __user *to,
300 		const void __user *from, unsigned long size);
301 
302 #ifndef __powerpc64__
303 
304 static inline unsigned long copy_from_user(void *to,
305 		const void __user *from, unsigned long n)
306 {
307 	if (likely(access_ok(VERIFY_READ, from, n))) {
308 		check_object_size(to, n, false);
309 		return __copy_tofrom_user((__force void __user *)to, from, n);
310 	}
311 	memset(to, 0, n);
312 	return n;
313 }
314 
315 static inline unsigned long copy_to_user(void __user *to,
316 		const void *from, unsigned long n)
317 {
318 	if (access_ok(VERIFY_WRITE, to, n)) {
319 		check_object_size(from, n, true);
320 		return __copy_tofrom_user(to, (__force void __user *)from, n);
321 	}
322 	return n;
323 }
324 
325 #else /* __powerpc64__ */
326 
327 #define __copy_in_user(to, from, size) \
328 	__copy_tofrom_user((to), (from), (size))
329 
330 extern unsigned long copy_from_user(void *to, const void __user *from,
331 				    unsigned long n);
332 extern unsigned long copy_to_user(void __user *to, const void *from,
333 				  unsigned long n);
334 extern unsigned long copy_in_user(void __user *to, const void __user *from,
335 				  unsigned long n);
336 
337 #endif /* __powerpc64__ */
338 
339 static inline unsigned long __copy_from_user_inatomic(void *to,
340 		const void __user *from, unsigned long n)
341 {
342 	if (__builtin_constant_p(n) && (n <= 8)) {
343 		unsigned long ret = 1;
344 
345 		switch (n) {
346 		case 1:
347 			__get_user_size(*(u8 *)to, from, 1, ret);
348 			break;
349 		case 2:
350 			__get_user_size(*(u16 *)to, from, 2, ret);
351 			break;
352 		case 4:
353 			__get_user_size(*(u32 *)to, from, 4, ret);
354 			break;
355 		case 8:
356 			__get_user_size(*(u64 *)to, from, 8, ret);
357 			break;
358 		}
359 		if (ret == 0)
360 			return 0;
361 	}
362 
363 	check_object_size(to, n, false);
364 
365 	return __copy_tofrom_user((__force void __user *)to, from, n);
366 }
367 
368 static inline unsigned long __copy_to_user_inatomic(void __user *to,
369 		const void *from, unsigned long n)
370 {
371 	if (__builtin_constant_p(n) && (n <= 8)) {
372 		unsigned long ret = 1;
373 
374 		switch (n) {
375 		case 1:
376 			__put_user_size(*(u8 *)from, (u8 __user *)to, 1, ret);
377 			break;
378 		case 2:
379 			__put_user_size(*(u16 *)from, (u16 __user *)to, 2, ret);
380 			break;
381 		case 4:
382 			__put_user_size(*(u32 *)from, (u32 __user *)to, 4, ret);
383 			break;
384 		case 8:
385 			__put_user_size(*(u64 *)from, (u64 __user *)to, 8, ret);
386 			break;
387 		}
388 		if (ret == 0)
389 			return 0;
390 	}
391 
392 	check_object_size(from, n, true);
393 
394 	return __copy_tofrom_user(to, (__force const void __user *)from, n);
395 }
396 
397 static inline unsigned long __copy_from_user(void *to,
398 		const void __user *from, unsigned long size)
399 {
400 	might_fault();
401 	return __copy_from_user_inatomic(to, from, size);
402 }
403 
404 static inline unsigned long __copy_to_user(void __user *to,
405 		const void *from, unsigned long size)
406 {
407 	might_fault();
408 	return __copy_to_user_inatomic(to, from, size);
409 }
410 
411 extern unsigned long __clear_user(void __user *addr, unsigned long size);
412 
413 static inline unsigned long clear_user(void __user *addr, unsigned long size)
414 {
415 	might_fault();
416 	if (likely(access_ok(VERIFY_WRITE, addr, size)))
417 		return __clear_user(addr, size);
418 	return size;
419 }
420 
421 extern long strncpy_from_user(char *dst, const char __user *src, long count);
422 extern __must_check long strlen_user(const char __user *str);
423 extern __must_check long strnlen_user(const char __user *str, long n);
424 
425 #endif  /* __ASSEMBLY__ */
426 #endif /* __KERNEL__ */
427 
428 #endif	/* _ARCH_POWERPC_UACCESS_H */
429