xref: /linux/arch/mips/include/asm/uaccess.h (revision 3932b9ca55b0be314a36d3e84faff3e823c081f5)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1996, 1997, 1998, 1999, 2000, 03, 04 by Ralf Baechle
7  * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8  * Copyright (C) 2007  Maciej W. Rozycki
9  * Copyright (C) 2014, Imagination Technologies Ltd.
10  */
11 #ifndef _ASM_UACCESS_H
12 #define _ASM_UACCESS_H
13 
14 #include <linux/kernel.h>
15 #include <linux/errno.h>
16 #include <linux/thread_info.h>
17 #include <asm/asm-eva.h>
18 
19 /*
20  * The fs value determines whether argument validity checking should be
21  * performed or not.  If get_fs() == USER_DS, checking is performed, with
22  * get_fs() == KERNEL_DS, checking is bypassed.
23  *
24  * For historical reasons, these macros are grossly misnamed.
25  */
26 #ifdef CONFIG_32BIT
27 
28 #ifdef CONFIG_KVM_GUEST
29 #define __UA_LIMIT 0x40000000UL
30 #else
31 #define __UA_LIMIT 0x80000000UL
32 #endif
33 
34 #define __UA_ADDR	".word"
35 #define __UA_LA		"la"
36 #define __UA_ADDU	"addu"
37 #define __UA_t0		"$8"
38 #define __UA_t1		"$9"
39 
40 #endif /* CONFIG_32BIT */
41 
42 #ifdef CONFIG_64BIT
43 
44 extern u64 __ua_limit;
45 
46 #define __UA_LIMIT	__ua_limit
47 
48 #define __UA_ADDR	".dword"
49 #define __UA_LA		"dla"
50 #define __UA_ADDU	"daddu"
51 #define __UA_t0		"$12"
52 #define __UA_t1		"$13"
53 
54 #endif /* CONFIG_64BIT */
55 
56 /*
57  * USER_DS is a bitmask that has the bits set that may not be set in a valid
58  * userspace address.  Note that we limit 32-bit userspace to 0x7fff8000 but
59  * the arithmetic we're doing only works if the limit is a power of two, so
60  * we use 0x80000000 here on 32-bit kernels.  If a process passes an invalid
61  * address in this range it's the process's problem, not ours :-)
62  */
63 
64 #ifdef CONFIG_KVM_GUEST
65 #define KERNEL_DS	((mm_segment_t) { 0x80000000UL })
66 #define USER_DS		((mm_segment_t) { 0xC0000000UL })
67 #else
68 #define KERNEL_DS	((mm_segment_t) { 0UL })
69 #define USER_DS		((mm_segment_t) { __UA_LIMIT })
70 #endif
71 
72 #define VERIFY_READ    0
73 #define VERIFY_WRITE   1
74 
75 #define get_ds()	(KERNEL_DS)
76 #define get_fs()	(current_thread_info()->addr_limit)
77 #define set_fs(x)	(current_thread_info()->addr_limit = (x))
78 
79 #define segment_eq(a, b)	((a).seg == (b).seg)
80 
81 
82 /*
83  * Is a address valid? This does a straighforward calculation rather
84  * than tests.
85  *
86  * Address valid if:
87  *  - "addr" doesn't have any high-bits set
88  *  - AND "size" doesn't have any high-bits set
89  *  - AND "addr+size" doesn't have any high-bits set
90  *  - OR we are in kernel mode.
91  *
92  * __ua_size() is a trick to avoid runtime checking of positive constant
93  * sizes; for those we already know at compile time that the size is ok.
94  */
95 #define __ua_size(size)							\
96 	((__builtin_constant_p(size) && (signed long) (size) > 0) ? 0 : (size))
97 
98 /*
99  * access_ok: - Checks if a user space pointer is valid
100  * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE.  Note that
101  *	  %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe
102  *	  to write to a block, it is always safe to read from it.
103  * @addr: User space pointer to start of block to check
104  * @size: Size of block to check
105  *
106  * Context: User context only.	This function may sleep.
107  *
108  * Checks if a pointer to a block of memory in user space is valid.
109  *
110  * Returns true (nonzero) if the memory block may be valid, false (zero)
111  * if it is definitely invalid.
112  *
113  * Note that, depending on architecture, this function probably just
114  * checks that the pointer is in the user space range - after calling
115  * this function, memory access functions may still return -EFAULT.
116  */
117 
118 #define __access_mask get_fs().seg
119 
120 #define __access_ok(addr, size, mask)					\
121 ({									\
122 	unsigned long __addr = (unsigned long) (addr);			\
123 	unsigned long __size = size;					\
124 	unsigned long __mask = mask;					\
125 	unsigned long __ok;						\
126 									\
127 	__chk_user_ptr(addr);						\
128 	__ok = (signed long)(__mask & (__addr | (__addr + __size) |	\
129 		__ua_size(__size)));					\
130 	__ok == 0;							\
131 })
132 
133 #define access_ok(type, addr, size)					\
134 	likely(__access_ok((addr), (size), __access_mask))
135 
136 /*
137  * put_user: - Write a simple value into user space.
138  * @x:	 Value to copy to user space.
139  * @ptr: Destination address, in user space.
140  *
141  * Context: User context only.	This function may sleep.
142  *
143  * This macro copies a single simple value from kernel space to user
144  * space.  It supports simple types like char and int, but not larger
145  * data types like structures or arrays.
146  *
147  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
148  * to the result of dereferencing @ptr.
149  *
150  * Returns zero on success, or -EFAULT on error.
151  */
152 #define put_user(x,ptr) \
153 	__put_user_check((x), (ptr), sizeof(*(ptr)))
154 
155 /*
156  * get_user: - Get a simple variable from user space.
157  * @x:	 Variable to store result.
158  * @ptr: Source address, in user space.
159  *
160  * Context: User context only.	This function may sleep.
161  *
162  * This macro copies a single simple variable from user space to kernel
163  * space.  It supports simple types like char and int, but not larger
164  * data types like structures or arrays.
165  *
166  * @ptr must have pointer-to-simple-variable type, and the result of
167  * dereferencing @ptr must be assignable to @x without a cast.
168  *
169  * Returns zero on success, or -EFAULT on error.
170  * On error, the variable @x is set to zero.
171  */
172 #define get_user(x,ptr) \
173 	__get_user_check((x), (ptr), sizeof(*(ptr)))
174 
175 /*
176  * __put_user: - Write a simple value into user space, with less checking.
177  * @x:	 Value to copy to user space.
178  * @ptr: Destination address, in user space.
179  *
180  * Context: User context only.	This function may sleep.
181  *
182  * This macro copies a single simple value from kernel space to user
183  * space.  It supports simple types like char and int, but not larger
184  * data types like structures or arrays.
185  *
186  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
187  * to the result of dereferencing @ptr.
188  *
189  * Caller must check the pointer with access_ok() before calling this
190  * function.
191  *
192  * Returns zero on success, or -EFAULT on error.
193  */
194 #define __put_user(x,ptr) \
195 	__put_user_nocheck((x), (ptr), sizeof(*(ptr)))
196 
197 /*
198  * __get_user: - Get a simple variable from user space, with less checking.
199  * @x:	 Variable to store result.
200  * @ptr: Source address, in user space.
201  *
202  * Context: User context only.	This function may sleep.
203  *
204  * This macro copies a single simple variable from user space to kernel
205  * space.  It supports simple types like char and int, but not larger
206  * data types like structures or arrays.
207  *
208  * @ptr must have pointer-to-simple-variable type, and the result of
209  * dereferencing @ptr must be assignable to @x without a cast.
210  *
211  * Caller must check the pointer with access_ok() before calling this
212  * function.
213  *
214  * Returns zero on success, or -EFAULT on error.
215  * On error, the variable @x is set to zero.
216  */
217 #define __get_user(x,ptr) \
218 	__get_user_nocheck((x), (ptr), sizeof(*(ptr)))
219 
220 struct __large_struct { unsigned long buf[100]; };
221 #define __m(x) (*(struct __large_struct __user *)(x))
222 
223 /*
224  * Yuck.  We need two variants, one for 64bit operation and one
225  * for 32 bit mode and old iron.
226  */
227 #ifndef CONFIG_EVA
228 #define __get_kernel_common(val, size, ptr) __get_user_common(val, size, ptr)
229 #else
230 /*
231  * Kernel specific functions for EVA. We need to use normal load instructions
232  * to read data from kernel when operating in EVA mode. We use these macros to
233  * avoid redefining __get_user_asm for EVA.
234  */
235 #undef _loadd
236 #undef _loadw
237 #undef _loadh
238 #undef _loadb
239 #ifdef CONFIG_32BIT
240 #define _loadd			_loadw
241 #else
242 #define _loadd(reg, addr)	"ld " reg ", " addr
243 #endif
244 #define _loadw(reg, addr)	"lw " reg ", " addr
245 #define _loadh(reg, addr)	"lh " reg ", " addr
246 #define _loadb(reg, addr)	"lb " reg ", " addr
247 
248 #define __get_kernel_common(val, size, ptr)				\
249 do {									\
250 	switch (size) {							\
251 	case 1: __get_data_asm(val, _loadb, ptr); break;		\
252 	case 2: __get_data_asm(val, _loadh, ptr); break;		\
253 	case 4: __get_data_asm(val, _loadw, ptr); break;		\
254 	case 8: __GET_DW(val, _loadd, ptr); break;			\
255 	default: __get_user_unknown(); break;				\
256 	}								\
257 } while (0)
258 #endif
259 
260 #ifdef CONFIG_32BIT
261 #define __GET_DW(val, insn, ptr) __get_data_asm_ll32(val, insn, ptr)
262 #endif
263 #ifdef CONFIG_64BIT
264 #define __GET_DW(val, insn, ptr) __get_data_asm(val, insn, ptr)
265 #endif
266 
267 extern void __get_user_unknown(void);
268 
269 #define __get_user_common(val, size, ptr)				\
270 do {									\
271 	switch (size) {							\
272 	case 1: __get_data_asm(val, user_lb, ptr); break;		\
273 	case 2: __get_data_asm(val, user_lh, ptr); break;		\
274 	case 4: __get_data_asm(val, user_lw, ptr); break;		\
275 	case 8: __GET_DW(val, user_ld, ptr); break;			\
276 	default: __get_user_unknown(); break;				\
277 	}								\
278 } while (0)
279 
280 #define __get_user_nocheck(x, ptr, size)				\
281 ({									\
282 	int __gu_err;							\
283 									\
284 	if (segment_eq(get_fs(), get_ds())) {				\
285 		__get_kernel_common((x), size, ptr);			\
286 	} else {							\
287 		__chk_user_ptr(ptr);					\
288 		__get_user_common((x), size, ptr);			\
289 	}								\
290 	__gu_err;							\
291 })
292 
293 #define __get_user_check(x, ptr, size)					\
294 ({									\
295 	int __gu_err = -EFAULT;						\
296 	const __typeof__(*(ptr)) __user * __gu_ptr = (ptr);		\
297 									\
298 	might_fault();							\
299 	if (likely(access_ok(VERIFY_READ,  __gu_ptr, size))) {		\
300 		if (segment_eq(get_fs(), get_ds()))			\
301 			__get_kernel_common((x), size, __gu_ptr);	\
302 		else							\
303 			__get_user_common((x), size, __gu_ptr);		\
304 	}								\
305 									\
306 	__gu_err;							\
307 })
308 
309 #define __get_data_asm(val, insn, addr)					\
310 {									\
311 	long __gu_tmp;							\
312 									\
313 	__asm__ __volatile__(						\
314 	"1:	"insn("%1", "%3")"				\n"	\
315 	"2:							\n"	\
316 	"	.insn						\n"	\
317 	"	.section .fixup,\"ax\"				\n"	\
318 	"3:	li	%0, %4					\n"	\
319 	"	j	2b					\n"	\
320 	"	.previous					\n"	\
321 	"	.section __ex_table,\"a\"			\n"	\
322 	"	"__UA_ADDR "\t1b, 3b				\n"	\
323 	"	.previous					\n"	\
324 	: "=r" (__gu_err), "=r" (__gu_tmp)				\
325 	: "0" (0), "o" (__m(addr)), "i" (-EFAULT));			\
326 									\
327 	(val) = (__typeof__(*(addr))) __gu_tmp;				\
328 }
329 
330 /*
331  * Get a long long 64 using 32 bit registers.
332  */
333 #define __get_data_asm_ll32(val, insn, addr)				\
334 {									\
335 	union {								\
336 		unsigned long long	l;				\
337 		__typeof__(*(addr))	t;				\
338 	} __gu_tmp;							\
339 									\
340 	__asm__ __volatile__(						\
341 	"1:	" insn("%1", "(%3)")"				\n"	\
342 	"2:	" insn("%D1", "4(%3)")"				\n"	\
343 	"3:							\n"	\
344 	"	.insn						\n"	\
345 	"	.section	.fixup,\"ax\"			\n"	\
346 	"4:	li	%0, %4					\n"	\
347 	"	move	%1, $0					\n"	\
348 	"	move	%D1, $0					\n"	\
349 	"	j	3b					\n"	\
350 	"	.previous					\n"	\
351 	"	.section	__ex_table,\"a\"		\n"	\
352 	"	" __UA_ADDR "	1b, 4b				\n"	\
353 	"	" __UA_ADDR "	2b, 4b				\n"	\
354 	"	.previous					\n"	\
355 	: "=r" (__gu_err), "=&r" (__gu_tmp.l)				\
356 	: "0" (0), "r" (addr), "i" (-EFAULT));				\
357 									\
358 	(val) = __gu_tmp.t;						\
359 }
360 
361 #ifndef CONFIG_EVA
362 #define __put_kernel_common(ptr, size) __put_user_common(ptr, size)
363 #else
364 /*
365  * Kernel specific functions for EVA. We need to use normal load instructions
366  * to read data from kernel when operating in EVA mode. We use these macros to
367  * avoid redefining __get_data_asm for EVA.
368  */
369 #undef _stored
370 #undef _storew
371 #undef _storeh
372 #undef _storeb
373 #ifdef CONFIG_32BIT
374 #define _stored			_storew
375 #else
376 #define _stored(reg, addr)	"ld " reg ", " addr
377 #endif
378 
379 #define _storew(reg, addr)	"sw " reg ", " addr
380 #define _storeh(reg, addr)	"sh " reg ", " addr
381 #define _storeb(reg, addr)	"sb " reg ", " addr
382 
383 #define __put_kernel_common(ptr, size)					\
384 do {									\
385 	switch (size) {							\
386 	case 1: __put_data_asm(_storeb, ptr); break;			\
387 	case 2: __put_data_asm(_storeh, ptr); break;			\
388 	case 4: __put_data_asm(_storew, ptr); break;			\
389 	case 8: __PUT_DW(_stored, ptr); break;				\
390 	default: __put_user_unknown(); break;				\
391 	}								\
392 } while(0)
393 #endif
394 
395 /*
396  * Yuck.  We need two variants, one for 64bit operation and one
397  * for 32 bit mode and old iron.
398  */
399 #ifdef CONFIG_32BIT
400 #define __PUT_DW(insn, ptr) __put_data_asm_ll32(insn, ptr)
401 #endif
402 #ifdef CONFIG_64BIT
403 #define __PUT_DW(insn, ptr) __put_data_asm(insn, ptr)
404 #endif
405 
406 #define __put_user_common(ptr, size)					\
407 do {									\
408 	switch (size) {							\
409 	case 1: __put_data_asm(user_sb, ptr); break;			\
410 	case 2: __put_data_asm(user_sh, ptr); break;			\
411 	case 4: __put_data_asm(user_sw, ptr); break;			\
412 	case 8: __PUT_DW(user_sd, ptr); break;				\
413 	default: __put_user_unknown(); break;				\
414 	}								\
415 } while (0)
416 
417 #define __put_user_nocheck(x, ptr, size)				\
418 ({									\
419 	__typeof__(*(ptr)) __pu_val;					\
420 	int __pu_err = 0;						\
421 									\
422 	__pu_val = (x);							\
423 	if (segment_eq(get_fs(), get_ds())) {				\
424 		__put_kernel_common(ptr, size);				\
425 	} else {							\
426 		__chk_user_ptr(ptr);					\
427 		__put_user_common(ptr, size);				\
428 	}								\
429 	__pu_err;							\
430 })
431 
432 #define __put_user_check(x, ptr, size)					\
433 ({									\
434 	__typeof__(*(ptr)) __user *__pu_addr = (ptr);			\
435 	__typeof__(*(ptr)) __pu_val = (x);				\
436 	int __pu_err = -EFAULT;						\
437 									\
438 	might_fault();							\
439 	if (likely(access_ok(VERIFY_WRITE,  __pu_addr, size))) {	\
440 		if (segment_eq(get_fs(), get_ds()))			\
441 			__put_kernel_common(__pu_addr, size);		\
442 		else							\
443 			__put_user_common(__pu_addr, size);		\
444 	}								\
445 									\
446 	__pu_err;							\
447 })
448 
449 #define __put_data_asm(insn, ptr)					\
450 {									\
451 	__asm__ __volatile__(						\
452 	"1:	"insn("%z2", "%3")"	# __put_data_asm	\n"	\
453 	"2:							\n"	\
454 	"	.insn						\n"	\
455 	"	.section	.fixup,\"ax\"			\n"	\
456 	"3:	li	%0, %4					\n"	\
457 	"	j	2b					\n"	\
458 	"	.previous					\n"	\
459 	"	.section	__ex_table,\"a\"		\n"	\
460 	"	" __UA_ADDR "	1b, 3b				\n"	\
461 	"	.previous					\n"	\
462 	: "=r" (__pu_err)						\
463 	: "0" (0), "Jr" (__pu_val), "o" (__m(ptr)),			\
464 	  "i" (-EFAULT));						\
465 }
466 
467 #define __put_data_asm_ll32(insn, ptr)					\
468 {									\
469 	__asm__ __volatile__(						\
470 	"1:	"insn("%2", "(%3)")"	# __put_data_asm_ll32	\n"	\
471 	"2:	"insn("%D2", "4(%3)")"				\n"	\
472 	"3:							\n"	\
473 	"	.insn						\n"	\
474 	"	.section	.fixup,\"ax\"			\n"	\
475 	"4:	li	%0, %4					\n"	\
476 	"	j	3b					\n"	\
477 	"	.previous					\n"	\
478 	"	.section	__ex_table,\"a\"		\n"	\
479 	"	" __UA_ADDR "	1b, 4b				\n"	\
480 	"	" __UA_ADDR "	2b, 4b				\n"	\
481 	"	.previous"						\
482 	: "=r" (__pu_err)						\
483 	: "0" (0), "r" (__pu_val), "r" (ptr),				\
484 	  "i" (-EFAULT));						\
485 }
486 
487 extern void __put_user_unknown(void);
488 
489 /*
490  * ul{b,h,w} are macros and there are no equivalent macros for EVA.
491  * EVA unaligned access is handled in the ADE exception handler.
492  */
493 #ifndef CONFIG_EVA
494 /*
495  * put_user_unaligned: - Write a simple value into user space.
496  * @x:	 Value to copy to user space.
497  * @ptr: Destination address, in user space.
498  *
499  * Context: User context only.	This function may sleep.
500  *
501  * This macro copies a single simple value from kernel space to user
502  * space.  It supports simple types like char and int, but not larger
503  * data types like structures or arrays.
504  *
505  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
506  * to the result of dereferencing @ptr.
507  *
508  * Returns zero on success, or -EFAULT on error.
509  */
510 #define put_user_unaligned(x,ptr)	\
511 	__put_user_unaligned_check((x),(ptr),sizeof(*(ptr)))
512 
513 /*
514  * get_user_unaligned: - Get a simple variable from user space.
515  * @x:	 Variable to store result.
516  * @ptr: Source address, in user space.
517  *
518  * Context: User context only.	This function may sleep.
519  *
520  * This macro copies a single simple variable from user space to kernel
521  * space.  It supports simple types like char and int, but not larger
522  * data types like structures or arrays.
523  *
524  * @ptr must have pointer-to-simple-variable type, and the result of
525  * dereferencing @ptr must be assignable to @x without a cast.
526  *
527  * Returns zero on success, or -EFAULT on error.
528  * On error, the variable @x is set to zero.
529  */
530 #define get_user_unaligned(x,ptr) \
531 	__get_user_unaligned_check((x),(ptr),sizeof(*(ptr)))
532 
533 /*
534  * __put_user_unaligned: - Write a simple value into user space, with less checking.
535  * @x:	 Value to copy to user space.
536  * @ptr: Destination address, in user space.
537  *
538  * Context: User context only.	This function may sleep.
539  *
540  * This macro copies a single simple value from kernel space to user
541  * space.  It supports simple types like char and int, but not larger
542  * data types like structures or arrays.
543  *
544  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
545  * to the result of dereferencing @ptr.
546  *
547  * Caller must check the pointer with access_ok() before calling this
548  * function.
549  *
550  * Returns zero on success, or -EFAULT on error.
551  */
552 #define __put_user_unaligned(x,ptr) \
553 	__put_user_unaligned_nocheck((x),(ptr),sizeof(*(ptr)))
554 
555 /*
556  * __get_user_unaligned: - Get a simple variable from user space, with less checking.
557  * @x:	 Variable to store result.
558  * @ptr: Source address, in user space.
559  *
560  * Context: User context only.	This function may sleep.
561  *
562  * This macro copies a single simple variable from user space to kernel
563  * space.  It supports simple types like char and int, but not larger
564  * data types like structures or arrays.
565  *
566  * @ptr must have pointer-to-simple-variable type, and the result of
567  * dereferencing @ptr must be assignable to @x without a cast.
568  *
569  * Caller must check the pointer with access_ok() before calling this
570  * function.
571  *
572  * Returns zero on success, or -EFAULT on error.
573  * On error, the variable @x is set to zero.
574  */
575 #define __get_user_unaligned(x,ptr) \
576 	__get_user__unalignednocheck((x),(ptr),sizeof(*(ptr)))
577 
578 /*
579  * Yuck.  We need two variants, one for 64bit operation and one
580  * for 32 bit mode and old iron.
581  */
582 #ifdef CONFIG_32BIT
583 #define __GET_USER_UNALIGNED_DW(val, ptr)				\
584 	__get_user_unaligned_asm_ll32(val, ptr)
585 #endif
586 #ifdef CONFIG_64BIT
587 #define __GET_USER_UNALIGNED_DW(val, ptr)				\
588 	__get_user_unaligned_asm(val, "uld", ptr)
589 #endif
590 
591 extern void __get_user_unaligned_unknown(void);
592 
593 #define __get_user_unaligned_common(val, size, ptr)			\
594 do {									\
595 	switch (size) {							\
596 	case 1: __get_data_asm(val, "lb", ptr); break;			\
597 	case 2: __get_user_unaligned_asm(val, "ulh", ptr); break;	\
598 	case 4: __get_user_unaligned_asm(val, "ulw", ptr); break;	\
599 	case 8: __GET_USER_UNALIGNED_DW(val, ptr); break;		\
600 	default: __get_user_unaligned_unknown(); break;			\
601 	}								\
602 } while (0)
603 
604 #define __get_user_unaligned_nocheck(x,ptr,size)			\
605 ({									\
606 	int __gu_err;							\
607 									\
608 	__get_user_unaligned_common((x), size, ptr);			\
609 	__gu_err;							\
610 })
611 
612 #define __get_user_unaligned_check(x,ptr,size)				\
613 ({									\
614 	int __gu_err = -EFAULT;						\
615 	const __typeof__(*(ptr)) __user * __gu_ptr = (ptr);		\
616 									\
617 	if (likely(access_ok(VERIFY_READ,  __gu_ptr, size)))		\
618 		__get_user_unaligned_common((x), size, __gu_ptr);	\
619 									\
620 	__gu_err;							\
621 })
622 
623 #define __get_data_unaligned_asm(val, insn, addr)			\
624 {									\
625 	long __gu_tmp;							\
626 									\
627 	__asm__ __volatile__(						\
628 	"1:	" insn "	%1, %3				\n"	\
629 	"2:							\n"	\
630 	"	.insn						\n"	\
631 	"	.section .fixup,\"ax\"				\n"	\
632 	"3:	li	%0, %4					\n"	\
633 	"	j	2b					\n"	\
634 	"	.previous					\n"	\
635 	"	.section __ex_table,\"a\"			\n"	\
636 	"	"__UA_ADDR "\t1b, 3b				\n"	\
637 	"	"__UA_ADDR "\t1b + 4, 3b			\n"	\
638 	"	.previous					\n"	\
639 	: "=r" (__gu_err), "=r" (__gu_tmp)				\
640 	: "0" (0), "o" (__m(addr)), "i" (-EFAULT));			\
641 									\
642 	(val) = (__typeof__(*(addr))) __gu_tmp;				\
643 }
644 
645 /*
646  * Get a long long 64 using 32 bit registers.
647  */
648 #define __get_user_unaligned_asm_ll32(val, addr)			\
649 {									\
650 	unsigned long long __gu_tmp;					\
651 									\
652 	__asm__ __volatile__(						\
653 	"1:	ulw	%1, (%3)				\n"	\
654 	"2:	ulw	%D1, 4(%3)				\n"	\
655 	"	move	%0, $0					\n"	\
656 	"3:							\n"	\
657 	"	.insn						\n"	\
658 	"	.section	.fixup,\"ax\"			\n"	\
659 	"4:	li	%0, %4					\n"	\
660 	"	move	%1, $0					\n"	\
661 	"	move	%D1, $0					\n"	\
662 	"	j	3b					\n"	\
663 	"	.previous					\n"	\
664 	"	.section	__ex_table,\"a\"		\n"	\
665 	"	" __UA_ADDR "	1b, 4b				\n"	\
666 	"	" __UA_ADDR "	1b + 4, 4b			\n"	\
667 	"	" __UA_ADDR "	2b, 4b				\n"	\
668 	"	" __UA_ADDR "	2b + 4, 4b			\n"	\
669 	"	.previous					\n"	\
670 	: "=r" (__gu_err), "=&r" (__gu_tmp)				\
671 	: "0" (0), "r" (addr), "i" (-EFAULT));				\
672 	(val) = (__typeof__(*(addr))) __gu_tmp;				\
673 }
674 
675 /*
676  * Yuck.  We need two variants, one for 64bit operation and one
677  * for 32 bit mode and old iron.
678  */
679 #ifdef CONFIG_32BIT
680 #define __PUT_USER_UNALIGNED_DW(ptr) __put_user_unaligned_asm_ll32(ptr)
681 #endif
682 #ifdef CONFIG_64BIT
683 #define __PUT_USER_UNALIGNED_DW(ptr) __put_user_unaligned_asm("usd", ptr)
684 #endif
685 
686 #define __put_user_unaligned_common(ptr, size)				\
687 do {									\
688 	switch (size) {							\
689 	case 1: __put_data_asm("sb", ptr); break;			\
690 	case 2: __put_user_unaligned_asm("ush", ptr); break;		\
691 	case 4: __put_user_unaligned_asm("usw", ptr); break;		\
692 	case 8: __PUT_USER_UNALIGNED_DW(ptr); break;			\
693 	default: __put_user_unaligned_unknown(); break;			\
694 } while (0)
695 
696 #define __put_user_unaligned_nocheck(x,ptr,size)			\
697 ({									\
698 	__typeof__(*(ptr)) __pu_val;					\
699 	int __pu_err = 0;						\
700 									\
701 	__pu_val = (x);							\
702 	__put_user_unaligned_common(ptr, size);				\
703 	__pu_err;							\
704 })
705 
706 #define __put_user_unaligned_check(x,ptr,size)				\
707 ({									\
708 	__typeof__(*(ptr)) __user *__pu_addr = (ptr);			\
709 	__typeof__(*(ptr)) __pu_val = (x);				\
710 	int __pu_err = -EFAULT;						\
711 									\
712 	if (likely(access_ok(VERIFY_WRITE,  __pu_addr, size)))		\
713 		__put_user_unaligned_common(__pu_addr, size);		\
714 									\
715 	__pu_err;							\
716 })
717 
718 #define __put_user_unaligned_asm(insn, ptr)				\
719 {									\
720 	__asm__ __volatile__(						\
721 	"1:	" insn "	%z2, %3		# __put_user_unaligned_asm\n" \
722 	"2:							\n"	\
723 	"	.insn						\n"	\
724 	"	.section	.fixup,\"ax\"			\n"	\
725 	"3:	li	%0, %4					\n"	\
726 	"	j	2b					\n"	\
727 	"	.previous					\n"	\
728 	"	.section	__ex_table,\"a\"		\n"	\
729 	"	" __UA_ADDR "	1b, 3b				\n"	\
730 	"	.previous					\n"	\
731 	: "=r" (__pu_err)						\
732 	: "0" (0), "Jr" (__pu_val), "o" (__m(ptr)),			\
733 	  "i" (-EFAULT));						\
734 }
735 
736 #define __put_user_unaligned_asm_ll32(ptr)				\
737 {									\
738 	__asm__ __volatile__(						\
739 	"1:	sw	%2, (%3)	# __put_user_unaligned_asm_ll32 \n" \
740 	"2:	sw	%D2, 4(%3)				\n"	\
741 	"3:							\n"	\
742 	"	.insn						\n"	\
743 	"	.section	.fixup,\"ax\"			\n"	\
744 	"4:	li	%0, %4					\n"	\
745 	"	j	3b					\n"	\
746 	"	.previous					\n"	\
747 	"	.section	__ex_table,\"a\"		\n"	\
748 	"	" __UA_ADDR "	1b, 4b				\n"	\
749 	"	" __UA_ADDR "	1b + 4, 4b			\n"	\
750 	"	" __UA_ADDR "	2b, 4b				\n"	\
751 	"	" __UA_ADDR "	2b + 4, 4b			\n"	\
752 	"	.previous"						\
753 	: "=r" (__pu_err)						\
754 	: "0" (0), "r" (__pu_val), "r" (ptr),				\
755 	  "i" (-EFAULT));						\
756 }
757 
758 extern void __put_user_unaligned_unknown(void);
759 #endif
760 
761 /*
762  * We're generating jump to subroutines which will be outside the range of
763  * jump instructions
764  */
765 #ifdef MODULE
766 #define __MODULE_JAL(destination)					\
767 	".set\tnoat\n\t"						\
768 	__UA_LA "\t$1, " #destination "\n\t"				\
769 	"jalr\t$1\n\t"							\
770 	".set\tat\n\t"
771 #else
772 #define __MODULE_JAL(destination)					\
773 	"jal\t" #destination "\n\t"
774 #endif
775 
776 #ifndef CONFIG_CPU_DADDI_WORKAROUNDS
777 #define DADDI_SCRATCH "$0"
778 #else
779 #define DADDI_SCRATCH "$3"
780 #endif
781 
782 extern size_t __copy_user(void *__to, const void *__from, size_t __n);
783 
784 #ifndef CONFIG_EVA
785 #define __invoke_copy_to_user(to, from, n)				\
786 ({									\
787 	register void __user *__cu_to_r __asm__("$4");			\
788 	register const void *__cu_from_r __asm__("$5");			\
789 	register long __cu_len_r __asm__("$6");				\
790 									\
791 	__cu_to_r = (to);						\
792 	__cu_from_r = (from);						\
793 	__cu_len_r = (n);						\
794 	__asm__ __volatile__(						\
795 	__MODULE_JAL(__copy_user)					\
796 	: "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)	\
797 	:								\
798 	: "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31",	\
799 	  DADDI_SCRATCH, "memory");					\
800 	__cu_len_r;							\
801 })
802 
803 #define __invoke_copy_to_kernel(to, from, n)				\
804 	__invoke_copy_to_user(to, from, n)
805 
806 #endif
807 
808 /*
809  * __copy_to_user: - Copy a block of data into user space, with less checking.
810  * @to:	  Destination address, in user space.
811  * @from: Source address, in kernel space.
812  * @n:	  Number of bytes to copy.
813  *
814  * Context: User context only.	This function may sleep.
815  *
816  * Copy data from kernel space to user space.  Caller must check
817  * the specified block with access_ok() before calling this function.
818  *
819  * Returns number of bytes that could not be copied.
820  * On success, this will be zero.
821  */
822 #define __copy_to_user(to, from, n)					\
823 ({									\
824 	void __user *__cu_to;						\
825 	const void *__cu_from;						\
826 	long __cu_len;							\
827 									\
828 	__cu_to = (to);							\
829 	__cu_from = (from);						\
830 	__cu_len = (n);							\
831 	might_fault();							\
832 	if (segment_eq(get_fs(), get_ds()))				\
833 		__cu_len = __invoke_copy_to_kernel(__cu_to, __cu_from,	\
834 						   __cu_len);		\
835 	else								\
836 		__cu_len = __invoke_copy_to_user(__cu_to, __cu_from,	\
837 						 __cu_len);		\
838 	__cu_len;							\
839 })
840 
841 extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
842 
843 #define __copy_to_user_inatomic(to, from, n)				\
844 ({									\
845 	void __user *__cu_to;						\
846 	const void *__cu_from;						\
847 	long __cu_len;							\
848 									\
849 	__cu_to = (to);							\
850 	__cu_from = (from);						\
851 	__cu_len = (n);							\
852 	if (segment_eq(get_fs(), get_ds()))				\
853 		__cu_len = __invoke_copy_to_kernel(__cu_to, __cu_from,	\
854 						   __cu_len);		\
855 	else								\
856 		__cu_len = __invoke_copy_to_user(__cu_to, __cu_from,	\
857 						 __cu_len);		\
858 	__cu_len;							\
859 })
860 
861 #define __copy_from_user_inatomic(to, from, n)				\
862 ({									\
863 	void *__cu_to;							\
864 	const void __user *__cu_from;					\
865 	long __cu_len;							\
866 									\
867 	__cu_to = (to);							\
868 	__cu_from = (from);						\
869 	__cu_len = (n);							\
870 	if (segment_eq(get_fs(), get_ds()))				\
871 		__cu_len = __invoke_copy_from_kernel_inatomic(__cu_to,	\
872 							      __cu_from,\
873 							      __cu_len);\
874 	else								\
875 		__cu_len = __invoke_copy_from_user_inatomic(__cu_to,	\
876 							    __cu_from,	\
877 							    __cu_len);	\
878 	__cu_len;							\
879 })
880 
881 /*
882  * copy_to_user: - Copy a block of data into user space.
883  * @to:	  Destination address, in user space.
884  * @from: Source address, in kernel space.
885  * @n:	  Number of bytes to copy.
886  *
887  * Context: User context only.	This function may sleep.
888  *
889  * Copy data from kernel space to user space.
890  *
891  * Returns number of bytes that could not be copied.
892  * On success, this will be zero.
893  */
894 #define copy_to_user(to, from, n)					\
895 ({									\
896 	void __user *__cu_to;						\
897 	const void *__cu_from;						\
898 	long __cu_len;							\
899 									\
900 	__cu_to = (to);							\
901 	__cu_from = (from);						\
902 	__cu_len = (n);							\
903 	if (segment_eq(get_fs(), get_ds())) {				\
904 		__cu_len = __invoke_copy_to_kernel(__cu_to,		\
905 						   __cu_from,		\
906 						   __cu_len);		\
907 	} else {							\
908 		if (access_ok(VERIFY_WRITE, __cu_to, __cu_len)) {       \
909 			might_fault();                                  \
910 			__cu_len = __invoke_copy_to_user(__cu_to,	\
911 							 __cu_from,	\
912 							 __cu_len);     \
913 		}							\
914 	}								\
915 	__cu_len;							\
916 })
917 
918 #ifndef CONFIG_EVA
919 
920 #define __invoke_copy_from_user(to, from, n)				\
921 ({									\
922 	register void *__cu_to_r __asm__("$4");				\
923 	register const void __user *__cu_from_r __asm__("$5");		\
924 	register long __cu_len_r __asm__("$6");				\
925 									\
926 	__cu_to_r = (to);						\
927 	__cu_from_r = (from);						\
928 	__cu_len_r = (n);						\
929 	__asm__ __volatile__(						\
930 	".set\tnoreorder\n\t"						\
931 	__MODULE_JAL(__copy_user)					\
932 	".set\tnoat\n\t"						\
933 	__UA_ADDU "\t$1, %1, %2\n\t"					\
934 	".set\tat\n\t"							\
935 	".set\treorder"							\
936 	: "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)	\
937 	:								\
938 	: "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31",	\
939 	  DADDI_SCRATCH, "memory");					\
940 	__cu_len_r;							\
941 })
942 
943 #define __invoke_copy_from_kernel(to, from, n)				\
944 	__invoke_copy_from_user(to, from, n)
945 
946 /* For userland <-> userland operations */
947 #define ___invoke_copy_in_user(to, from, n)				\
948 	__invoke_copy_from_user(to, from, n)
949 
950 /* For kernel <-> kernel operations */
951 #define ___invoke_copy_in_kernel(to, from, n)				\
952 	__invoke_copy_from_user(to, from, n)
953 
954 #define __invoke_copy_from_user_inatomic(to, from, n)			\
955 ({									\
956 	register void *__cu_to_r __asm__("$4");				\
957 	register const void __user *__cu_from_r __asm__("$5");		\
958 	register long __cu_len_r __asm__("$6");				\
959 									\
960 	__cu_to_r = (to);						\
961 	__cu_from_r = (from);						\
962 	__cu_len_r = (n);						\
963 	__asm__ __volatile__(						\
964 	".set\tnoreorder\n\t"						\
965 	__MODULE_JAL(__copy_user_inatomic)				\
966 	".set\tnoat\n\t"						\
967 	__UA_ADDU "\t$1, %1, %2\n\t"					\
968 	".set\tat\n\t"							\
969 	".set\treorder"							\
970 	: "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)	\
971 	:								\
972 	: "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31",	\
973 	  DADDI_SCRATCH, "memory");					\
974 	__cu_len_r;							\
975 })
976 
977 #define __invoke_copy_from_kernel_inatomic(to, from, n)			\
978 	__invoke_copy_from_user_inatomic(to, from, n)			\
979 
980 #else
981 
982 /* EVA specific functions */
983 
984 extern size_t __copy_user_inatomic_eva(void *__to, const void *__from,
985 				       size_t __n);
986 extern size_t __copy_from_user_eva(void *__to, const void *__from,
987 				   size_t __n);
988 extern size_t __copy_to_user_eva(void *__to, const void *__from,
989 				 size_t __n);
990 extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n);
991 
992 #define __invoke_copy_from_user_eva_generic(to, from, n, func_ptr)	\
993 ({									\
994 	register void *__cu_to_r __asm__("$4");				\
995 	register const void __user *__cu_from_r __asm__("$5");		\
996 	register long __cu_len_r __asm__("$6");				\
997 									\
998 	__cu_to_r = (to);						\
999 	__cu_from_r = (from);						\
1000 	__cu_len_r = (n);						\
1001 	__asm__ __volatile__(						\
1002 	".set\tnoreorder\n\t"						\
1003 	__MODULE_JAL(func_ptr)						\
1004 	".set\tnoat\n\t"						\
1005 	__UA_ADDU "\t$1, %1, %2\n\t"					\
1006 	".set\tat\n\t"							\
1007 	".set\treorder"							\
1008 	: "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)	\
1009 	:								\
1010 	: "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31",	\
1011 	  DADDI_SCRATCH, "memory");					\
1012 	__cu_len_r;							\
1013 })
1014 
1015 #define __invoke_copy_to_user_eva_generic(to, from, n, func_ptr)	\
1016 ({									\
1017 	register void *__cu_to_r __asm__("$4");				\
1018 	register const void __user *__cu_from_r __asm__("$5");		\
1019 	register long __cu_len_r __asm__("$6");				\
1020 									\
1021 	__cu_to_r = (to);						\
1022 	__cu_from_r = (from);						\
1023 	__cu_len_r = (n);						\
1024 	__asm__ __volatile__(						\
1025 	__MODULE_JAL(func_ptr)						\
1026 	: "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)	\
1027 	:								\
1028 	: "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31",	\
1029 	  DADDI_SCRATCH, "memory");					\
1030 	__cu_len_r;							\
1031 })
1032 
1033 /*
1034  * Source or destination address is in userland. We need to go through
1035  * the TLB
1036  */
1037 #define __invoke_copy_from_user(to, from, n)				\
1038 	__invoke_copy_from_user_eva_generic(to, from, n, __copy_from_user_eva)
1039 
1040 #define __invoke_copy_from_user_inatomic(to, from, n)			\
1041 	__invoke_copy_from_user_eva_generic(to, from, n,		\
1042 					    __copy_user_inatomic_eva)
1043 
1044 #define __invoke_copy_to_user(to, from, n)				\
1045 	__invoke_copy_to_user_eva_generic(to, from, n, __copy_to_user_eva)
1046 
1047 #define ___invoke_copy_in_user(to, from, n)				\
1048 	__invoke_copy_from_user_eva_generic(to, from, n, __copy_in_user_eva)
1049 
1050 /*
1051  * Source or destination address in the kernel. We are not going through
1052  * the TLB
1053  */
1054 #define __invoke_copy_from_kernel(to, from, n)				\
1055 	__invoke_copy_from_user_eva_generic(to, from, n, __copy_user)
1056 
1057 #define __invoke_copy_from_kernel_inatomic(to, from, n)			\
1058 	__invoke_copy_from_user_eva_generic(to, from, n, __copy_user_inatomic)
1059 
1060 #define __invoke_copy_to_kernel(to, from, n)				\
1061 	__invoke_copy_to_user_eva_generic(to, from, n, __copy_user)
1062 
1063 #define ___invoke_copy_in_kernel(to, from, n)				\
1064 	__invoke_copy_from_user_eva_generic(to, from, n, __copy_user)
1065 
1066 #endif /* CONFIG_EVA */
1067 
1068 /*
1069  * __copy_from_user: - Copy a block of data from user space, with less checking.
1070  * @to:	  Destination address, in kernel space.
1071  * @from: Source address, in user space.
1072  * @n:	  Number of bytes to copy.
1073  *
1074  * Context: User context only.	This function may sleep.
1075  *
1076  * Copy data from user space to kernel space.  Caller must check
1077  * the specified block with access_ok() before calling this function.
1078  *
1079  * Returns number of bytes that could not be copied.
1080  * On success, this will be zero.
1081  *
1082  * If some data could not be copied, this function will pad the copied
1083  * data to the requested size using zero bytes.
1084  */
1085 #define __copy_from_user(to, from, n)					\
1086 ({									\
1087 	void *__cu_to;							\
1088 	const void __user *__cu_from;					\
1089 	long __cu_len;							\
1090 									\
1091 	__cu_to = (to);							\
1092 	__cu_from = (from);						\
1093 	__cu_len = (n);							\
1094 	might_fault();							\
1095 	__cu_len = __invoke_copy_from_user(__cu_to, __cu_from,		\
1096 					   __cu_len);			\
1097 	__cu_len;							\
1098 })
1099 
1100 /*
1101  * copy_from_user: - Copy a block of data from user space.
1102  * @to:	  Destination address, in kernel space.
1103  * @from: Source address, in user space.
1104  * @n:	  Number of bytes to copy.
1105  *
1106  * Context: User context only.	This function may sleep.
1107  *
1108  * Copy data from user space to kernel space.
1109  *
1110  * Returns number of bytes that could not be copied.
1111  * On success, this will be zero.
1112  *
1113  * If some data could not be copied, this function will pad the copied
1114  * data to the requested size using zero bytes.
1115  */
1116 #define copy_from_user(to, from, n)					\
1117 ({									\
1118 	void *__cu_to;							\
1119 	const void __user *__cu_from;					\
1120 	long __cu_len;							\
1121 									\
1122 	__cu_to = (to);							\
1123 	__cu_from = (from);						\
1124 	__cu_len = (n);							\
1125 	if (segment_eq(get_fs(), get_ds())) {				\
1126 		__cu_len = __invoke_copy_from_kernel(__cu_to,		\
1127 						     __cu_from,		\
1128 						     __cu_len);		\
1129 	} else {							\
1130 		if (access_ok(VERIFY_READ, __cu_from, __cu_len)) {	\
1131 			might_fault();                                  \
1132 			__cu_len = __invoke_copy_from_user(__cu_to,	\
1133 							   __cu_from,	\
1134 							   __cu_len);   \
1135 		}							\
1136 	}								\
1137 	__cu_len;							\
1138 })
1139 
1140 #define __copy_in_user(to, from, n)					\
1141 ({									\
1142 	void __user *__cu_to;						\
1143 	const void __user *__cu_from;					\
1144 	long __cu_len;							\
1145 									\
1146 	__cu_to = (to);							\
1147 	__cu_from = (from);						\
1148 	__cu_len = (n);							\
1149 	if (segment_eq(get_fs(), get_ds())) {				\
1150 		__cu_len = ___invoke_copy_in_kernel(__cu_to, __cu_from,	\
1151 						    __cu_len);		\
1152 	} else {							\
1153 		might_fault();						\
1154 		__cu_len = ___invoke_copy_in_user(__cu_to, __cu_from,	\
1155 						  __cu_len);		\
1156 	}								\
1157 	__cu_len;							\
1158 })
1159 
1160 #define copy_in_user(to, from, n)					\
1161 ({									\
1162 	void __user *__cu_to;						\
1163 	const void __user *__cu_from;					\
1164 	long __cu_len;							\
1165 									\
1166 	__cu_to = (to);							\
1167 	__cu_from = (from);						\
1168 	__cu_len = (n);							\
1169 	if (segment_eq(get_fs(), get_ds())) {				\
1170 		__cu_len = ___invoke_copy_in_kernel(__cu_to,__cu_from,	\
1171 						    __cu_len);		\
1172 	} else {							\
1173 		if (likely(access_ok(VERIFY_READ, __cu_from, __cu_len) &&\
1174 			   access_ok(VERIFY_WRITE, __cu_to, __cu_len))) {\
1175 			might_fault();					\
1176 			__cu_len = ___invoke_copy_in_user(__cu_to,	\
1177 							  __cu_from,	\
1178 							  __cu_len);	\
1179 		}							\
1180 	}								\
1181 	__cu_len;							\
1182 })
1183 
1184 /*
1185  * __clear_user: - Zero a block of memory in user space, with less checking.
1186  * @to:	  Destination address, in user space.
1187  * @n:	  Number of bytes to zero.
1188  *
1189  * Zero a block of memory in user space.  Caller must check
1190  * the specified block with access_ok() before calling this function.
1191  *
1192  * Returns number of bytes that could not be cleared.
1193  * On success, this will be zero.
1194  */
1195 static inline __kernel_size_t
1196 __clear_user(void __user *addr, __kernel_size_t size)
1197 {
1198 	__kernel_size_t res;
1199 
1200 	might_fault();
1201 	__asm__ __volatile__(
1202 		"move\t$4, %1\n\t"
1203 		"move\t$5, $0\n\t"
1204 		"move\t$6, %2\n\t"
1205 		__MODULE_JAL(__bzero)
1206 		"move\t%0, $6"
1207 		: "=r" (res)
1208 		: "r" (addr), "r" (size)
1209 		: "$4", "$5", "$6", __UA_t0, __UA_t1, "$31");
1210 
1211 	return res;
1212 }
1213 
1214 #define clear_user(addr,n)						\
1215 ({									\
1216 	void __user * __cl_addr = (addr);				\
1217 	unsigned long __cl_size = (n);					\
1218 	if (__cl_size && access_ok(VERIFY_WRITE,			\
1219 					__cl_addr, __cl_size))		\
1220 		__cl_size = __clear_user(__cl_addr, __cl_size);		\
1221 	__cl_size;							\
1222 })
1223 
1224 /*
1225  * __strncpy_from_user: - Copy a NUL terminated string from userspace, with less checking.
1226  * @dst:   Destination address, in kernel space.  This buffer must be at
1227  *	   least @count bytes long.
1228  * @src:   Source address, in user space.
1229  * @count: Maximum number of bytes to copy, including the trailing NUL.
1230  *
1231  * Copies a NUL-terminated string from userspace to kernel space.
1232  * Caller must check the specified block with access_ok() before calling
1233  * this function.
1234  *
1235  * On success, returns the length of the string (not including the trailing
1236  * NUL).
1237  *
1238  * If access to userspace fails, returns -EFAULT (some data may have been
1239  * copied).
1240  *
1241  * If @count is smaller than the length of the string, copies @count bytes
1242  * and returns @count.
1243  */
1244 static inline long
1245 __strncpy_from_user(char *__to, const char __user *__from, long __len)
1246 {
1247 	long res;
1248 
1249 	if (segment_eq(get_fs(), get_ds())) {
1250 		__asm__ __volatile__(
1251 			"move\t$4, %1\n\t"
1252 			"move\t$5, %2\n\t"
1253 			"move\t$6, %3\n\t"
1254 			__MODULE_JAL(__strncpy_from_kernel_nocheck_asm)
1255 			"move\t%0, $2"
1256 			: "=r" (res)
1257 			: "r" (__to), "r" (__from), "r" (__len)
1258 			: "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
1259 	} else {
1260 		might_fault();
1261 		__asm__ __volatile__(
1262 			"move\t$4, %1\n\t"
1263 			"move\t$5, %2\n\t"
1264 			"move\t$6, %3\n\t"
1265 			__MODULE_JAL(__strncpy_from_user_nocheck_asm)
1266 			"move\t%0, $2"
1267 			: "=r" (res)
1268 			: "r" (__to), "r" (__from), "r" (__len)
1269 			: "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
1270 	}
1271 
1272 	return res;
1273 }
1274 
1275 /*
1276  * strncpy_from_user: - Copy a NUL terminated string from userspace.
1277  * @dst:   Destination address, in kernel space.  This buffer must be at
1278  *	   least @count bytes long.
1279  * @src:   Source address, in user space.
1280  * @count: Maximum number of bytes to copy, including the trailing NUL.
1281  *
1282  * Copies a NUL-terminated string from userspace to kernel space.
1283  *
1284  * On success, returns the length of the string (not including the trailing
1285  * NUL).
1286  *
1287  * If access to userspace fails, returns -EFAULT (some data may have been
1288  * copied).
1289  *
1290  * If @count is smaller than the length of the string, copies @count bytes
1291  * and returns @count.
1292  */
1293 static inline long
1294 strncpy_from_user(char *__to, const char __user *__from, long __len)
1295 {
1296 	long res;
1297 
1298 	if (segment_eq(get_fs(), get_ds())) {
1299 		__asm__ __volatile__(
1300 			"move\t$4, %1\n\t"
1301 			"move\t$5, %2\n\t"
1302 			"move\t$6, %3\n\t"
1303 			__MODULE_JAL(__strncpy_from_kernel_asm)
1304 			"move\t%0, $2"
1305 			: "=r" (res)
1306 			: "r" (__to), "r" (__from), "r" (__len)
1307 			: "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
1308 	} else {
1309 		might_fault();
1310 		__asm__ __volatile__(
1311 			"move\t$4, %1\n\t"
1312 			"move\t$5, %2\n\t"
1313 			"move\t$6, %3\n\t"
1314 			__MODULE_JAL(__strncpy_from_user_asm)
1315 			"move\t%0, $2"
1316 			: "=r" (res)
1317 			: "r" (__to), "r" (__from), "r" (__len)
1318 			: "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
1319 	}
1320 
1321 	return res;
1322 }
1323 
1324 /* Returns: 0 if bad, string length+1 (memory size) of string if ok */
1325 static inline long __strlen_user(const char __user *s)
1326 {
1327 	long res;
1328 
1329 	if (segment_eq(get_fs(), get_ds())) {
1330 		__asm__ __volatile__(
1331 			"move\t$4, %1\n\t"
1332 			__MODULE_JAL(__strlen_kernel_nocheck_asm)
1333 			"move\t%0, $2"
1334 			: "=r" (res)
1335 			: "r" (s)
1336 			: "$2", "$4", __UA_t0, "$31");
1337 	} else {
1338 		might_fault();
1339 		__asm__ __volatile__(
1340 			"move\t$4, %1\n\t"
1341 			__MODULE_JAL(__strlen_user_nocheck_asm)
1342 			"move\t%0, $2"
1343 			: "=r" (res)
1344 			: "r" (s)
1345 			: "$2", "$4", __UA_t0, "$31");
1346 	}
1347 
1348 	return res;
1349 }
1350 
1351 /*
1352  * strlen_user: - Get the size of a string in user space.
1353  * @str: The string to measure.
1354  *
1355  * Context: User context only.	This function may sleep.
1356  *
1357  * Get the size of a NUL-terminated string in user space.
1358  *
1359  * Returns the size of the string INCLUDING the terminating NUL.
1360  * On exception, returns 0.
1361  *
1362  * If there is a limit on the length of a valid string, you may wish to
1363  * consider using strnlen_user() instead.
1364  */
1365 static inline long strlen_user(const char __user *s)
1366 {
1367 	long res;
1368 
1369 	if (segment_eq(get_fs(), get_ds())) {
1370 		__asm__ __volatile__(
1371 			"move\t$4, %1\n\t"
1372 			__MODULE_JAL(__strlen_kernel_asm)
1373 			"move\t%0, $2"
1374 			: "=r" (res)
1375 			: "r" (s)
1376 			: "$2", "$4", __UA_t0, "$31");
1377 	} else {
1378 		might_fault();
1379 		__asm__ __volatile__(
1380 			"move\t$4, %1\n\t"
1381 			__MODULE_JAL(__strlen_kernel_asm)
1382 			"move\t%0, $2"
1383 			: "=r" (res)
1384 			: "r" (s)
1385 			: "$2", "$4", __UA_t0, "$31");
1386 	}
1387 
1388 	return res;
1389 }
1390 
1391 /* Returns: 0 if bad, string length+1 (memory size) of string if ok */
1392 static inline long __strnlen_user(const char __user *s, long n)
1393 {
1394 	long res;
1395 
1396 	if (segment_eq(get_fs(), get_ds())) {
1397 		__asm__ __volatile__(
1398 			"move\t$4, %1\n\t"
1399 			"move\t$5, %2\n\t"
1400 			__MODULE_JAL(__strnlen_kernel_nocheck_asm)
1401 			"move\t%0, $2"
1402 			: "=r" (res)
1403 			: "r" (s), "r" (n)
1404 			: "$2", "$4", "$5", __UA_t0, "$31");
1405 	} else {
1406 		might_fault();
1407 		__asm__ __volatile__(
1408 			"move\t$4, %1\n\t"
1409 			"move\t$5, %2\n\t"
1410 			__MODULE_JAL(__strnlen_user_nocheck_asm)
1411 			"move\t%0, $2"
1412 			: "=r" (res)
1413 			: "r" (s), "r" (n)
1414 			: "$2", "$4", "$5", __UA_t0, "$31");
1415 	}
1416 
1417 	return res;
1418 }
1419 
1420 /*
1421  * strlen_user: - Get the size of a string in user space.
1422  * @str: The string to measure.
1423  *
1424  * Context: User context only.	This function may sleep.
1425  *
1426  * Get the size of a NUL-terminated string in user space.
1427  *
1428  * Returns the size of the string INCLUDING the terminating NUL.
1429  * On exception, returns 0.
1430  *
1431  * If there is a limit on the length of a valid string, you may wish to
1432  * consider using strnlen_user() instead.
1433  */
1434 static inline long strnlen_user(const char __user *s, long n)
1435 {
1436 	long res;
1437 
1438 	might_fault();
1439 	if (segment_eq(get_fs(), get_ds())) {
1440 		__asm__ __volatile__(
1441 			"move\t$4, %1\n\t"
1442 			"move\t$5, %2\n\t"
1443 			__MODULE_JAL(__strnlen_kernel_asm)
1444 			"move\t%0, $2"
1445 			: "=r" (res)
1446 			: "r" (s), "r" (n)
1447 			: "$2", "$4", "$5", __UA_t0, "$31");
1448 	} else {
1449 		__asm__ __volatile__(
1450 			"move\t$4, %1\n\t"
1451 			"move\t$5, %2\n\t"
1452 			__MODULE_JAL(__strnlen_user_asm)
1453 			"move\t%0, $2"
1454 			: "=r" (res)
1455 			: "r" (s), "r" (n)
1456 			: "$2", "$4", "$5", __UA_t0, "$31");
1457 	}
1458 
1459 	return res;
1460 }
1461 
1462 struct exception_table_entry
1463 {
1464 	unsigned long insn;
1465 	unsigned long nextinsn;
1466 };
1467 
1468 extern int fixup_exception(struct pt_regs *regs);
1469 
1470 #endif /* _ASM_UACCESS_H */
1471