xref: /linux/arch/s390/include/asm/uaccess.h (revision 7cefa5a05dbda1f0bbbd98e9d2861b09a35cc6ea)
1 /*
2  *  S390 version
3  *    Copyright IBM Corp. 1999, 2000
4  *    Author(s): Hartmut Penner (hp@de.ibm.com),
5  *               Martin Schwidefsky (schwidefsky@de.ibm.com)
6  *
7  *  Derived from "include/asm-i386/uaccess.h"
8  */
9 #ifndef __S390_UACCESS_H
10 #define __S390_UACCESS_H
11 
12 /*
13  * User space memory access functions
14  */
15 #include <asm/processor.h>
16 #include <asm/ctl_reg.h>
17 
18 
19 /*
20  * The fs value determines whether argument validity checking should be
21  * performed or not.  If get_fs() == USER_DS, checking is performed, with
22  * get_fs() == KERNEL_DS, checking is bypassed.
23  *
24  * For historical reasons, these macros are grossly misnamed.
25  */
26 
27 #define MAKE_MM_SEG(a)  ((mm_segment_t) { (a) })
28 
29 
30 #define KERNEL_DS       MAKE_MM_SEG(0)
31 #define USER_DS         MAKE_MM_SEG(1)
32 
33 #define get_ds()        (KERNEL_DS)
34 #define get_fs()        (current->thread.mm_segment)
35 #define segment_eq(a,b) ((a).ar4 == (b).ar4)
36 
37 static inline void set_fs(mm_segment_t fs)
38 {
39 	current->thread.mm_segment = fs;
40 	if (uaccess_kernel()) {
41 		set_cpu_flag(CIF_ASCE_SECONDARY);
42 		__ctl_load(S390_lowcore.kernel_asce, 7, 7);
43 	} else {
44 		clear_cpu_flag(CIF_ASCE_SECONDARY);
45 		__ctl_load(S390_lowcore.user_asce, 7, 7);
46 	}
47 }
48 
49 static inline int __range_ok(unsigned long addr, unsigned long size)
50 {
51 	return 1;
52 }
53 
54 #define __access_ok(addr, size)				\
55 ({							\
56 	__chk_user_ptr(addr);				\
57 	__range_ok((unsigned long)(addr), (size));	\
58 })
59 
60 #define access_ok(type, addr, size) __access_ok(addr, size)
61 
62 /*
63  * The exception table consists of pairs of addresses: the first is the
64  * address of an instruction that is allowed to fault, and the second is
65  * the address at which the program should continue.  No registers are
66  * modified, so it is entirely up to the continuation code to figure out
67  * what to do.
68  *
69  * All the routines below use bits of fixup code that are out of line
70  * with the main instruction path.  This means when everything is well,
71  * we don't even have to jump over them.  Further, they do not intrude
72  * on our cache or tlb entries.
73  */
74 
75 struct exception_table_entry
76 {
77 	int insn, fixup;
78 };
79 
80 static inline unsigned long extable_fixup(const struct exception_table_entry *x)
81 {
82 	return (unsigned long)&x->fixup + x->fixup;
83 }
84 
85 #define ARCH_HAS_RELATIVE_EXTABLE
86 
87 /**
88  * __copy_from_user: - Copy a block of data from user space, with less checking.
89  * @to:   Destination address, in kernel space.
90  * @from: Source address, in user space.
91  * @n:	  Number of bytes to copy.
92  *
93  * Context: User context only. This function may sleep if pagefaults are
94  *          enabled.
95  *
96  * Copy data from user space to kernel space.  Caller must check
97  * the specified block with access_ok() before calling this function.
98  *
99  * Returns number of bytes that could not be copied.
100  * On success, this will be zero.
101  *
102  * If some data could not be copied, this function will pad the copied
103  * data to the requested size using zero bytes.
104  */
105 unsigned long __must_check __copy_from_user(void *to, const void __user *from,
106 					    unsigned long n);
107 
108 /**
109  * __copy_to_user: - Copy a block of data into user space, with less checking.
110  * @to:   Destination address, in user space.
111  * @from: Source address, in kernel space.
112  * @n:	  Number of bytes to copy.
113  *
114  * Context: User context only. This function may sleep if pagefaults are
115  *          enabled.
116  *
117  * Copy data from kernel space to user space.  Caller must check
118  * the specified block with access_ok() before calling this function.
119  *
120  * Returns number of bytes that could not be copied.
121  * On success, this will be zero.
122  */
123 unsigned long __must_check __copy_to_user(void __user *to, const void *from,
124 					  unsigned long n);
125 
126 #define __copy_to_user_inatomic __copy_to_user
127 #define __copy_from_user_inatomic __copy_from_user
128 
129 #ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
130 
131 #define __put_get_user_asm(to, from, size, spec)		\
132 ({								\
133 	register unsigned long __reg0 asm("0") = spec;		\
134 	int __rc;						\
135 								\
136 	asm volatile(						\
137 		"0:	mvcos	%1,%3,%2\n"			\
138 		"1:	xr	%0,%0\n"			\
139 		"2:\n"						\
140 		".pushsection .fixup, \"ax\"\n"			\
141 		"3:	lhi	%0,%5\n"			\
142 		"	jg	2b\n"				\
143 		".popsection\n"					\
144 		EX_TABLE(0b,3b) EX_TABLE(1b,3b)			\
145 		: "=d" (__rc), "=Q" (*(to))			\
146 		: "d" (size), "Q" (*(from)),			\
147 		  "d" (__reg0), "K" (-EFAULT)			\
148 		: "cc");					\
149 	__rc;							\
150 })
151 
152 static inline int __put_user_fn(void *x, void __user *ptr, unsigned long size)
153 {
154 	unsigned long spec = 0x810000UL;
155 	int rc;
156 
157 	switch (size) {
158 	case 1:
159 		rc = __put_get_user_asm((unsigned char __user *)ptr,
160 					(unsigned char *)x,
161 					size, spec);
162 		break;
163 	case 2:
164 		rc = __put_get_user_asm((unsigned short __user *)ptr,
165 					(unsigned short *)x,
166 					size, spec);
167 		break;
168 	case 4:
169 		rc = __put_get_user_asm((unsigned int __user *)ptr,
170 					(unsigned int *)x,
171 					size, spec);
172 		break;
173 	case 8:
174 		rc = __put_get_user_asm((unsigned long __user *)ptr,
175 					(unsigned long *)x,
176 					size, spec);
177 		break;
178 	}
179 	return rc;
180 }
181 
182 static inline int __get_user_fn(void *x, const void __user *ptr, unsigned long size)
183 {
184 	unsigned long spec = 0x81UL;
185 	int rc;
186 
187 	switch (size) {
188 	case 1:
189 		rc = __put_get_user_asm((unsigned char *)x,
190 					(unsigned char __user *)ptr,
191 					size, spec);
192 		break;
193 	case 2:
194 		rc = __put_get_user_asm((unsigned short *)x,
195 					(unsigned short __user *)ptr,
196 					size, spec);
197 		break;
198 	case 4:
199 		rc = __put_get_user_asm((unsigned int *)x,
200 					(unsigned int __user *)ptr,
201 					size, spec);
202 		break;
203 	case 8:
204 		rc = __put_get_user_asm((unsigned long *)x,
205 					(unsigned long __user *)ptr,
206 					size, spec);
207 		break;
208 	}
209 	return rc;
210 }
211 
212 #else /* CONFIG_HAVE_MARCH_Z10_FEATURES */
213 
214 static inline int __put_user_fn(void *x, void __user *ptr, unsigned long size)
215 {
216 	size = __copy_to_user(ptr, x, size);
217 	return size ? -EFAULT : 0;
218 }
219 
220 static inline int __get_user_fn(void *x, const void __user *ptr, unsigned long size)
221 {
222 	size = __copy_from_user(x, ptr, size);
223 	return size ? -EFAULT : 0;
224 }
225 
226 #endif /* CONFIG_HAVE_MARCH_Z10_FEATURES */
227 
228 /*
229  * These are the main single-value transfer routines.  They automatically
230  * use the right size if we just have the right pointer type.
231  */
232 #define __put_user(x, ptr) \
233 ({								\
234 	__typeof__(*(ptr)) __x = (x);				\
235 	int __pu_err = -EFAULT;					\
236         __chk_user_ptr(ptr);                                    \
237 	switch (sizeof (*(ptr))) {				\
238 	case 1:							\
239 	case 2:							\
240 	case 4:							\
241 	case 8:							\
242 		__pu_err = __put_user_fn(&__x, ptr,		\
243 					 sizeof(*(ptr)));	\
244 		break;						\
245 	default:						\
246 		__put_user_bad();				\
247 		break;						\
248 	 }							\
249 	__builtin_expect(__pu_err, 0);				\
250 })
251 
252 #define put_user(x, ptr)					\
253 ({								\
254 	might_fault();						\
255 	__put_user(x, ptr);					\
256 })
257 
258 
259 int __put_user_bad(void) __attribute__((noreturn));
260 
261 #define __get_user(x, ptr)					\
262 ({								\
263 	int __gu_err = -EFAULT;					\
264 	__chk_user_ptr(ptr);					\
265 	switch (sizeof(*(ptr))) {				\
266 	case 1: {						\
267 		unsigned char __x = 0;				\
268 		__gu_err = __get_user_fn(&__x, ptr,		\
269 					 sizeof(*(ptr)));	\
270 		(x) = *(__force __typeof__(*(ptr)) *) &__x;	\
271 		break;						\
272 	};							\
273 	case 2: {						\
274 		unsigned short __x = 0;				\
275 		__gu_err = __get_user_fn(&__x, ptr,		\
276 					 sizeof(*(ptr)));	\
277 		(x) = *(__force __typeof__(*(ptr)) *) &__x;	\
278 		break;						\
279 	};							\
280 	case 4: {						\
281 		unsigned int __x = 0;				\
282 		__gu_err = __get_user_fn(&__x, ptr,		\
283 					 sizeof(*(ptr)));	\
284 		(x) = *(__force __typeof__(*(ptr)) *) &__x;	\
285 		break;						\
286 	};							\
287 	case 8: {						\
288 		unsigned long long __x = 0;			\
289 		__gu_err = __get_user_fn(&__x, ptr,		\
290 					 sizeof(*(ptr)));	\
291 		(x) = *(__force __typeof__(*(ptr)) *) &__x;	\
292 		break;						\
293 	};							\
294 	default:						\
295 		__get_user_bad();				\
296 		break;						\
297 	}							\
298 	__builtin_expect(__gu_err, 0);				\
299 })
300 
301 #define get_user(x, ptr)					\
302 ({								\
303 	might_fault();						\
304 	__get_user(x, ptr);					\
305 })
306 
307 int __get_user_bad(void) __attribute__((noreturn));
308 
309 #define __put_user_unaligned __put_user
310 #define __get_user_unaligned __get_user
311 
312 extern void __compiletime_error("usercopy buffer size is too small")
313 __bad_copy_user(void);
314 
315 static inline void copy_user_overflow(int size, unsigned long count)
316 {
317 	WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
318 }
319 
320 /**
321  * copy_to_user: - Copy a block of data into user space.
322  * @to:   Destination address, in user space.
323  * @from: Source address, in kernel space.
324  * @n:    Number of bytes to copy.
325  *
326  * Context: User context only. This function may sleep if pagefaults are
327  *          enabled.
328  *
329  * Copy data from kernel space to user space.
330  *
331  * Returns number of bytes that could not be copied.
332  * On success, this will be zero.
333  */
334 static inline unsigned long __must_check
335 copy_to_user(void __user *to, const void *from, unsigned long n)
336 {
337 	might_fault();
338 	return __copy_to_user(to, from, n);
339 }
340 
341 /**
342  * copy_from_user: - Copy a block of data from user space.
343  * @to:   Destination address, in kernel space.
344  * @from: Source address, in user space.
345  * @n:    Number of bytes to copy.
346  *
347  * Context: User context only. This function may sleep if pagefaults are
348  *          enabled.
349  *
350  * Copy data from user space to kernel space.
351  *
352  * Returns number of bytes that could not be copied.
353  * On success, this will be zero.
354  *
355  * If some data could not be copied, this function will pad the copied
356  * data to the requested size using zero bytes.
357  */
358 static inline unsigned long __must_check
359 copy_from_user(void *to, const void __user *from, unsigned long n)
360 {
361 	unsigned int sz = __compiletime_object_size(to);
362 
363 	might_fault();
364 	if (unlikely(sz != -1 && sz < n)) {
365 		if (!__builtin_constant_p(n))
366 			copy_user_overflow(sz, n);
367 		else
368 			__bad_copy_user();
369 		return n;
370 	}
371 	return __copy_from_user(to, from, n);
372 }
373 
374 unsigned long __must_check
375 __copy_in_user(void __user *to, const void __user *from, unsigned long n);
376 
377 static inline unsigned long __must_check
378 copy_in_user(void __user *to, const void __user *from, unsigned long n)
379 {
380 	might_fault();
381 	return __copy_in_user(to, from, n);
382 }
383 
384 /*
385  * Copy a null terminated string from userspace.
386  */
387 
388 long __strncpy_from_user(char *dst, const char __user *src, long count);
389 
390 static inline long __must_check
391 strncpy_from_user(char *dst, const char __user *src, long count)
392 {
393 	might_fault();
394 	return __strncpy_from_user(dst, src, count);
395 }
396 
397 unsigned long __must_check __strnlen_user(const char __user *src, unsigned long count);
398 
399 static inline unsigned long strnlen_user(const char __user *src, unsigned long n)
400 {
401 	might_fault();
402 	return __strnlen_user(src, n);
403 }
404 
405 /**
406  * strlen_user: - Get the size of a string in user space.
407  * @str: The string to measure.
408  *
409  * Context: User context only. This function may sleep if pagefaults are
410  *          enabled.
411  *
412  * Get the size of a NUL-terminated string in user space.
413  *
414  * Returns the size of the string INCLUDING the terminating NUL.
415  * On exception, returns 0.
416  *
417  * If there is a limit on the length of a valid string, you may wish to
418  * consider using strnlen_user() instead.
419  */
420 #define strlen_user(str) strnlen_user(str, ~0UL)
421 
422 /*
423  * Zero Userspace
424  */
425 unsigned long __must_check __clear_user(void __user *to, unsigned long size);
426 
427 static inline unsigned long __must_check clear_user(void __user *to, unsigned long n)
428 {
429 	might_fault();
430 	return __clear_user(to, n);
431 }
432 
433 int copy_to_user_real(void __user *dest, void *src, unsigned long count);
434 void s390_kernel_write(void *dst, const void *src, size_t size);
435 
436 #endif /* __S390_UACCESS_H */
437