xref: /linux/arch/mips/include/asm/uaccess.h (revision 3a0e75adecc8da026a5befb2c5828d08c999373c)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1996, 1997, 1998, 1999, 2000, 03, 04 by Ralf Baechle
7  * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8  * Copyright (C) 2007  Maciej W. Rozycki
9  * Copyright (C) 2014, Imagination Technologies Ltd.
10  */
11 #ifndef _ASM_UACCESS_H
12 #define _ASM_UACCESS_H
13 
14 #include <linux/kernel.h>
15 #include <linux/string.h>
16 #include <asm/asm-eva.h>
17 #include <asm/extable.h>
18 
19 /*
20  * The fs value determines whether argument validity checking should be
21  * performed or not.  If get_fs() == USER_DS, checking is performed, with
22  * get_fs() == KERNEL_DS, checking is bypassed.
23  *
24  * For historical reasons, these macros are grossly misnamed.
25  */
26 #ifdef CONFIG_32BIT
27 
28 #ifdef CONFIG_KVM_GUEST
29 #define __UA_LIMIT 0x40000000UL
30 #else
31 #define __UA_LIMIT 0x80000000UL
32 #endif
33 
34 #define __UA_ADDR	".word"
35 #define __UA_LA		"la"
36 #define __UA_ADDU	"addu"
37 #define __UA_t0		"$8"
38 #define __UA_t1		"$9"
39 
40 #endif /* CONFIG_32BIT */
41 
42 #ifdef CONFIG_64BIT
43 
44 extern u64 __ua_limit;
45 
46 #define __UA_LIMIT	__ua_limit
47 
48 #define __UA_ADDR	".dword"
49 #define __UA_LA		"dla"
50 #define __UA_ADDU	"daddu"
51 #define __UA_t0		"$12"
52 #define __UA_t1		"$13"
53 
54 #endif /* CONFIG_64BIT */
55 
56 /*
57  * USER_DS is a bitmask that has the bits set that may not be set in a valid
58  * userspace address.  Note that we limit 32-bit userspace to 0x7fff8000 but
59  * the arithmetic we're doing only works if the limit is a power of two, so
60  * we use 0x80000000 here on 32-bit kernels.  If a process passes an invalid
61  * address in this range it's the process's problem, not ours :-)
62  */
63 
64 #ifdef CONFIG_KVM_GUEST
65 #define KERNEL_DS	((mm_segment_t) { 0x80000000UL })
66 #define USER_DS		((mm_segment_t) { 0xC0000000UL })
67 #else
68 #define KERNEL_DS	((mm_segment_t) { 0UL })
69 #define USER_DS		((mm_segment_t) { __UA_LIMIT })
70 #endif
71 
72 #define get_ds()	(KERNEL_DS)
73 #define get_fs()	(current_thread_info()->addr_limit)
74 #define set_fs(x)	(current_thread_info()->addr_limit = (x))
75 
76 #define segment_eq(a, b)	((a).seg == (b).seg)
77 
78 /*
79  * eva_kernel_access() - determine whether kernel memory access on an EVA system
80  *
81  * Determines whether memory accesses should be performed to kernel memory
82  * on a system using Extended Virtual Addressing (EVA).
83  *
84  * Return: true if a kernel memory access on an EVA system, else false.
85  */
86 static inline bool eva_kernel_access(void)
87 {
88 	if (!IS_ENABLED(CONFIG_EVA))
89 		return false;
90 
91 	return uaccess_kernel();
92 }
93 
94 /*
95  * Is a address valid? This does a straightforward calculation rather
96  * than tests.
97  *
98  * Address valid if:
99  *  - "addr" doesn't have any high-bits set
100  *  - AND "size" doesn't have any high-bits set
101  *  - AND "addr+size" doesn't have any high-bits set
102  *  - OR we are in kernel mode.
103  *
104  * __ua_size() is a trick to avoid runtime checking of positive constant
105  * sizes; for those we already know at compile time that the size is ok.
106  */
107 #define __ua_size(size)							\
108 	((__builtin_constant_p(size) && (signed long) (size) > 0) ? 0 : (size))
109 
110 /*
111  * access_ok: - Checks if a user space pointer is valid
112  * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE.  Note that
113  *	  %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe
114  *	  to write to a block, it is always safe to read from it.
115  * @addr: User space pointer to start of block to check
116  * @size: Size of block to check
117  *
118  * Context: User context only. This function may sleep if pagefaults are
119  *          enabled.
120  *
121  * Checks if a pointer to a block of memory in user space is valid.
122  *
123  * Returns true (nonzero) if the memory block may be valid, false (zero)
124  * if it is definitely invalid.
125  *
126  * Note that, depending on architecture, this function probably just
127  * checks that the pointer is in the user space range - after calling
128  * this function, memory access functions may still return -EFAULT.
129  */
130 
131 #define __access_mask get_fs().seg
132 
133 #define __access_ok(addr, size, mask)					\
134 ({									\
135 	unsigned long __addr = (unsigned long) (addr);			\
136 	unsigned long __size = size;					\
137 	unsigned long __mask = mask;					\
138 	unsigned long __ok;						\
139 									\
140 	__chk_user_ptr(addr);						\
141 	__ok = (signed long)(__mask & (__addr | (__addr + __size) |	\
142 		__ua_size(__size)));					\
143 	__ok == 0;							\
144 })
145 
146 #define access_ok(type, addr, size)					\
147 	likely(__access_ok((addr), (size), __access_mask))
148 
149 /*
150  * put_user: - Write a simple value into user space.
151  * @x:	 Value to copy to user space.
152  * @ptr: Destination address, in user space.
153  *
154  * Context: User context only. This function may sleep if pagefaults are
155  *          enabled.
156  *
157  * This macro copies a single simple value from kernel space to user
158  * space.  It supports simple types like char and int, but not larger
159  * data types like structures or arrays.
160  *
161  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
162  * to the result of dereferencing @ptr.
163  *
164  * Returns zero on success, or -EFAULT on error.
165  */
166 #define put_user(x,ptr) \
167 	__put_user_check((x), (ptr), sizeof(*(ptr)))
168 
169 /*
170  * get_user: - Get a simple variable from user space.
171  * @x:	 Variable to store result.
172  * @ptr: Source address, in user space.
173  *
174  * Context: User context only. This function may sleep if pagefaults are
175  *          enabled.
176  *
177  * This macro copies a single simple variable from user space to kernel
178  * space.  It supports simple types like char and int, but not larger
179  * data types like structures or arrays.
180  *
181  * @ptr must have pointer-to-simple-variable type, and the result of
182  * dereferencing @ptr must be assignable to @x without a cast.
183  *
184  * Returns zero on success, or -EFAULT on error.
185  * On error, the variable @x is set to zero.
186  */
187 #define get_user(x,ptr) \
188 	__get_user_check((x), (ptr), sizeof(*(ptr)))
189 
190 /*
191  * __put_user: - Write a simple value into user space, with less checking.
192  * @x:	 Value to copy to user space.
193  * @ptr: Destination address, in user space.
194  *
195  * Context: User context only. This function may sleep if pagefaults are
196  *          enabled.
197  *
198  * This macro copies a single simple value from kernel space to user
199  * space.  It supports simple types like char and int, but not larger
200  * data types like structures or arrays.
201  *
202  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
203  * to the result of dereferencing @ptr.
204  *
205  * Caller must check the pointer with access_ok() before calling this
206  * function.
207  *
208  * Returns zero on success, or -EFAULT on error.
209  */
210 #define __put_user(x,ptr) \
211 	__put_user_nocheck((x), (ptr), sizeof(*(ptr)))
212 
213 /*
214  * __get_user: - Get a simple variable from user space, with less checking.
215  * @x:	 Variable to store result.
216  * @ptr: Source address, in user space.
217  *
218  * Context: User context only. This function may sleep if pagefaults are
219  *          enabled.
220  *
221  * This macro copies a single simple variable from user space to kernel
222  * space.  It supports simple types like char and int, but not larger
223  * data types like structures or arrays.
224  *
225  * @ptr must have pointer-to-simple-variable type, and the result of
226  * dereferencing @ptr must be assignable to @x without a cast.
227  *
228  * Caller must check the pointer with access_ok() before calling this
229  * function.
230  *
231  * Returns zero on success, or -EFAULT on error.
232  * On error, the variable @x is set to zero.
233  */
234 #define __get_user(x,ptr) \
235 	__get_user_nocheck((x), (ptr), sizeof(*(ptr)))
236 
237 struct __large_struct { unsigned long buf[100]; };
238 #define __m(x) (*(struct __large_struct __user *)(x))
239 
240 /*
241  * Yuck.  We need two variants, one for 64bit operation and one
242  * for 32 bit mode and old iron.
243  */
244 #ifndef CONFIG_EVA
245 #define __get_kernel_common(val, size, ptr) __get_user_common(val, size, ptr)
246 #else
247 /*
248  * Kernel specific functions for EVA. We need to use normal load instructions
249  * to read data from kernel when operating in EVA mode. We use these macros to
250  * avoid redefining __get_user_asm for EVA.
251  */
252 #undef _loadd
253 #undef _loadw
254 #undef _loadh
255 #undef _loadb
256 #ifdef CONFIG_32BIT
257 #define _loadd			_loadw
258 #else
259 #define _loadd(reg, addr)	"ld " reg ", " addr
260 #endif
261 #define _loadw(reg, addr)	"lw " reg ", " addr
262 #define _loadh(reg, addr)	"lh " reg ", " addr
263 #define _loadb(reg, addr)	"lb " reg ", " addr
264 
265 #define __get_kernel_common(val, size, ptr)				\
266 do {									\
267 	switch (size) {							\
268 	case 1: __get_data_asm(val, _loadb, ptr); break;		\
269 	case 2: __get_data_asm(val, _loadh, ptr); break;		\
270 	case 4: __get_data_asm(val, _loadw, ptr); break;		\
271 	case 8: __GET_DW(val, _loadd, ptr); break;			\
272 	default: __get_user_unknown(); break;				\
273 	}								\
274 } while (0)
275 #endif
276 
277 #ifdef CONFIG_32BIT
278 #define __GET_DW(val, insn, ptr) __get_data_asm_ll32(val, insn, ptr)
279 #endif
280 #ifdef CONFIG_64BIT
281 #define __GET_DW(val, insn, ptr) __get_data_asm(val, insn, ptr)
282 #endif
283 
284 extern void __get_user_unknown(void);
285 
286 #define __get_user_common(val, size, ptr)				\
287 do {									\
288 	switch (size) {							\
289 	case 1: __get_data_asm(val, user_lb, ptr); break;		\
290 	case 2: __get_data_asm(val, user_lh, ptr); break;		\
291 	case 4: __get_data_asm(val, user_lw, ptr); break;		\
292 	case 8: __GET_DW(val, user_ld, ptr); break;			\
293 	default: __get_user_unknown(); break;				\
294 	}								\
295 } while (0)
296 
297 #define __get_user_nocheck(x, ptr, size)				\
298 ({									\
299 	int __gu_err;							\
300 									\
301 	if (eva_kernel_access()) {					\
302 		__get_kernel_common((x), size, ptr);			\
303 	} else {							\
304 		__chk_user_ptr(ptr);					\
305 		__get_user_common((x), size, ptr);			\
306 	}								\
307 	__gu_err;							\
308 })
309 
310 #define __get_user_check(x, ptr, size)					\
311 ({									\
312 	int __gu_err = -EFAULT;						\
313 	const __typeof__(*(ptr)) __user * __gu_ptr = (ptr);		\
314 									\
315 	might_fault();							\
316 	if (likely(access_ok(VERIFY_READ,  __gu_ptr, size))) {		\
317 		if (eva_kernel_access())				\
318 			__get_kernel_common((x), size, __gu_ptr);	\
319 		else							\
320 			__get_user_common((x), size, __gu_ptr);		\
321 	} else								\
322 		(x) = 0;						\
323 									\
324 	__gu_err;							\
325 })
326 
327 #define __get_data_asm(val, insn, addr)					\
328 {									\
329 	long __gu_tmp;							\
330 									\
331 	__asm__ __volatile__(						\
332 	"1:	"insn("%1", "%3")"				\n"	\
333 	"2:							\n"	\
334 	"	.insn						\n"	\
335 	"	.section .fixup,\"ax\"				\n"	\
336 	"3:	li	%0, %4					\n"	\
337 	"	move	%1, $0					\n"	\
338 	"	j	2b					\n"	\
339 	"	.previous					\n"	\
340 	"	.section __ex_table,\"a\"			\n"	\
341 	"	"__UA_ADDR "\t1b, 3b				\n"	\
342 	"	.previous					\n"	\
343 	: "=r" (__gu_err), "=r" (__gu_tmp)				\
344 	: "0" (0), "o" (__m(addr)), "i" (-EFAULT));			\
345 									\
346 	(val) = (__typeof__(*(addr))) __gu_tmp;				\
347 }
348 
349 /*
350  * Get a long long 64 using 32 bit registers.
351  */
352 #define __get_data_asm_ll32(val, insn, addr)				\
353 {									\
354 	union {								\
355 		unsigned long long	l;				\
356 		__typeof__(*(addr))	t;				\
357 	} __gu_tmp;							\
358 									\
359 	__asm__ __volatile__(						\
360 	"1:	" insn("%1", "(%3)")"				\n"	\
361 	"2:	" insn("%D1", "4(%3)")"				\n"	\
362 	"3:							\n"	\
363 	"	.insn						\n"	\
364 	"	.section	.fixup,\"ax\"			\n"	\
365 	"4:	li	%0, %4					\n"	\
366 	"	move	%1, $0					\n"	\
367 	"	move	%D1, $0					\n"	\
368 	"	j	3b					\n"	\
369 	"	.previous					\n"	\
370 	"	.section	__ex_table,\"a\"		\n"	\
371 	"	" __UA_ADDR "	1b, 4b				\n"	\
372 	"	" __UA_ADDR "	2b, 4b				\n"	\
373 	"	.previous					\n"	\
374 	: "=r" (__gu_err), "=&r" (__gu_tmp.l)				\
375 	: "0" (0), "r" (addr), "i" (-EFAULT));				\
376 									\
377 	(val) = __gu_tmp.t;						\
378 }
379 
380 #ifndef CONFIG_EVA
381 #define __put_kernel_common(ptr, size) __put_user_common(ptr, size)
382 #else
383 /*
384  * Kernel specific functions for EVA. We need to use normal load instructions
385  * to read data from kernel when operating in EVA mode. We use these macros to
386  * avoid redefining __get_data_asm for EVA.
387  */
388 #undef _stored
389 #undef _storew
390 #undef _storeh
391 #undef _storeb
392 #ifdef CONFIG_32BIT
393 #define _stored			_storew
394 #else
395 #define _stored(reg, addr)	"ld " reg ", " addr
396 #endif
397 
398 #define _storew(reg, addr)	"sw " reg ", " addr
399 #define _storeh(reg, addr)	"sh " reg ", " addr
400 #define _storeb(reg, addr)	"sb " reg ", " addr
401 
402 #define __put_kernel_common(ptr, size)					\
403 do {									\
404 	switch (size) {							\
405 	case 1: __put_data_asm(_storeb, ptr); break;			\
406 	case 2: __put_data_asm(_storeh, ptr); break;			\
407 	case 4: __put_data_asm(_storew, ptr); break;			\
408 	case 8: __PUT_DW(_stored, ptr); break;				\
409 	default: __put_user_unknown(); break;				\
410 	}								\
411 } while(0)
412 #endif
413 
414 /*
415  * Yuck.  We need two variants, one for 64bit operation and one
416  * for 32 bit mode and old iron.
417  */
418 #ifdef CONFIG_32BIT
419 #define __PUT_DW(insn, ptr) __put_data_asm_ll32(insn, ptr)
420 #endif
421 #ifdef CONFIG_64BIT
422 #define __PUT_DW(insn, ptr) __put_data_asm(insn, ptr)
423 #endif
424 
425 #define __put_user_common(ptr, size)					\
426 do {									\
427 	switch (size) {							\
428 	case 1: __put_data_asm(user_sb, ptr); break;			\
429 	case 2: __put_data_asm(user_sh, ptr); break;			\
430 	case 4: __put_data_asm(user_sw, ptr); break;			\
431 	case 8: __PUT_DW(user_sd, ptr); break;				\
432 	default: __put_user_unknown(); break;				\
433 	}								\
434 } while (0)
435 
436 #define __put_user_nocheck(x, ptr, size)				\
437 ({									\
438 	__typeof__(*(ptr)) __pu_val;					\
439 	int __pu_err = 0;						\
440 									\
441 	__pu_val = (x);							\
442 	if (eva_kernel_access()) {					\
443 		__put_kernel_common(ptr, size);				\
444 	} else {							\
445 		__chk_user_ptr(ptr);					\
446 		__put_user_common(ptr, size);				\
447 	}								\
448 	__pu_err;							\
449 })
450 
451 #define __put_user_check(x, ptr, size)					\
452 ({									\
453 	__typeof__(*(ptr)) __user *__pu_addr = (ptr);			\
454 	__typeof__(*(ptr)) __pu_val = (x);				\
455 	int __pu_err = -EFAULT;						\
456 									\
457 	might_fault();							\
458 	if (likely(access_ok(VERIFY_WRITE,  __pu_addr, size))) {	\
459 		if (eva_kernel_access())				\
460 			__put_kernel_common(__pu_addr, size);		\
461 		else							\
462 			__put_user_common(__pu_addr, size);		\
463 	}								\
464 									\
465 	__pu_err;							\
466 })
467 
468 #define __put_data_asm(insn, ptr)					\
469 {									\
470 	__asm__ __volatile__(						\
471 	"1:	"insn("%z2", "%3")"	# __put_data_asm	\n"	\
472 	"2:							\n"	\
473 	"	.insn						\n"	\
474 	"	.section	.fixup,\"ax\"			\n"	\
475 	"3:	li	%0, %4					\n"	\
476 	"	j	2b					\n"	\
477 	"	.previous					\n"	\
478 	"	.section	__ex_table,\"a\"		\n"	\
479 	"	" __UA_ADDR "	1b, 3b				\n"	\
480 	"	.previous					\n"	\
481 	: "=r" (__pu_err)						\
482 	: "0" (0), "Jr" (__pu_val), "o" (__m(ptr)),			\
483 	  "i" (-EFAULT));						\
484 }
485 
486 #define __put_data_asm_ll32(insn, ptr)					\
487 {									\
488 	__asm__ __volatile__(						\
489 	"1:	"insn("%2", "(%3)")"	# __put_data_asm_ll32	\n"	\
490 	"2:	"insn("%D2", "4(%3)")"				\n"	\
491 	"3:							\n"	\
492 	"	.insn						\n"	\
493 	"	.section	.fixup,\"ax\"			\n"	\
494 	"4:	li	%0, %4					\n"	\
495 	"	j	3b					\n"	\
496 	"	.previous					\n"	\
497 	"	.section	__ex_table,\"a\"		\n"	\
498 	"	" __UA_ADDR "	1b, 4b				\n"	\
499 	"	" __UA_ADDR "	2b, 4b				\n"	\
500 	"	.previous"						\
501 	: "=r" (__pu_err)						\
502 	: "0" (0), "r" (__pu_val), "r" (ptr),				\
503 	  "i" (-EFAULT));						\
504 }
505 
506 extern void __put_user_unknown(void);
507 
508 /*
509  * ul{b,h,w} are macros and there are no equivalent macros for EVA.
510  * EVA unaligned access is handled in the ADE exception handler.
511  */
512 #ifndef CONFIG_EVA
513 /*
514  * put_user_unaligned: - Write a simple value into user space.
515  * @x:	 Value to copy to user space.
516  * @ptr: Destination address, in user space.
517  *
518  * Context: User context only. This function may sleep if pagefaults are
519  *          enabled.
520  *
521  * This macro copies a single simple value from kernel space to user
522  * space.  It supports simple types like char and int, but not larger
523  * data types like structures or arrays.
524  *
525  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
526  * to the result of dereferencing @ptr.
527  *
528  * Returns zero on success, or -EFAULT on error.
529  */
530 #define put_user_unaligned(x,ptr)	\
531 	__put_user_unaligned_check((x),(ptr),sizeof(*(ptr)))
532 
533 /*
534  * get_user_unaligned: - Get a simple variable from user space.
535  * @x:	 Variable to store result.
536  * @ptr: Source address, in user space.
537  *
538  * Context: User context only. This function may sleep if pagefaults are
539  *          enabled.
540  *
541  * This macro copies a single simple variable from user space to kernel
542  * space.  It supports simple types like char and int, but not larger
543  * data types like structures or arrays.
544  *
545  * @ptr must have pointer-to-simple-variable type, and the result of
546  * dereferencing @ptr must be assignable to @x without a cast.
547  *
548  * Returns zero on success, or -EFAULT on error.
549  * On error, the variable @x is set to zero.
550  */
551 #define get_user_unaligned(x,ptr) \
552 	__get_user_unaligned_check((x),(ptr),sizeof(*(ptr)))
553 
554 /*
555  * __put_user_unaligned: - Write a simple value into user space, with less checking.
556  * @x:	 Value to copy to user space.
557  * @ptr: Destination address, in user space.
558  *
559  * Context: User context only. This function may sleep if pagefaults are
560  *          enabled.
561  *
562  * This macro copies a single simple value from kernel space to user
563  * space.  It supports simple types like char and int, but not larger
564  * data types like structures or arrays.
565  *
566  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
567  * to the result of dereferencing @ptr.
568  *
569  * Caller must check the pointer with access_ok() before calling this
570  * function.
571  *
572  * Returns zero on success, or -EFAULT on error.
573  */
574 #define __put_user_unaligned(x,ptr) \
575 	__put_user_unaligned_nocheck((x),(ptr),sizeof(*(ptr)))
576 
577 /*
578  * __get_user_unaligned: - Get a simple variable from user space, with less checking.
579  * @x:	 Variable to store result.
580  * @ptr: Source address, in user space.
581  *
582  * Context: User context only. This function may sleep if pagefaults are
583  *          enabled.
584  *
585  * This macro copies a single simple variable from user space to kernel
586  * space.  It supports simple types like char and int, but not larger
587  * data types like structures or arrays.
588  *
589  * @ptr must have pointer-to-simple-variable type, and the result of
590  * dereferencing @ptr must be assignable to @x without a cast.
591  *
592  * Caller must check the pointer with access_ok() before calling this
593  * function.
594  *
595  * Returns zero on success, or -EFAULT on error.
596  * On error, the variable @x is set to zero.
597  */
598 #define __get_user_unaligned(x,ptr) \
599 	__get_user_unaligned_nocheck((x),(ptr),sizeof(*(ptr)))
600 
601 /*
602  * Yuck.  We need two variants, one for 64bit operation and one
603  * for 32 bit mode and old iron.
604  */
605 #ifdef CONFIG_32BIT
606 #define __GET_USER_UNALIGNED_DW(val, ptr)				\
607 	__get_user_unaligned_asm_ll32(val, ptr)
608 #endif
609 #ifdef CONFIG_64BIT
610 #define __GET_USER_UNALIGNED_DW(val, ptr)				\
611 	__get_user_unaligned_asm(val, "uld", ptr)
612 #endif
613 
614 extern void __get_user_unaligned_unknown(void);
615 
616 #define __get_user_unaligned_common(val, size, ptr)			\
617 do {									\
618 	switch (size) {							\
619 	case 1: __get_data_asm(val, "lb", ptr); break;			\
620 	case 2: __get_data_unaligned_asm(val, "ulh", ptr); break;	\
621 	case 4: __get_data_unaligned_asm(val, "ulw", ptr); break;	\
622 	case 8: __GET_USER_UNALIGNED_DW(val, ptr); break;		\
623 	default: __get_user_unaligned_unknown(); break;			\
624 	}								\
625 } while (0)
626 
627 #define __get_user_unaligned_nocheck(x,ptr,size)			\
628 ({									\
629 	int __gu_err;							\
630 									\
631 	__get_user_unaligned_common((x), size, ptr);			\
632 	__gu_err;							\
633 })
634 
635 #define __get_user_unaligned_check(x,ptr,size)				\
636 ({									\
637 	int __gu_err = -EFAULT;						\
638 	const __typeof__(*(ptr)) __user * __gu_ptr = (ptr);		\
639 									\
640 	if (likely(access_ok(VERIFY_READ,  __gu_ptr, size)))		\
641 		__get_user_unaligned_common((x), size, __gu_ptr);	\
642 									\
643 	__gu_err;							\
644 })
645 
646 #define __get_data_unaligned_asm(val, insn, addr)			\
647 {									\
648 	long __gu_tmp;							\
649 									\
650 	__asm__ __volatile__(						\
651 	"1:	" insn "	%1, %3				\n"	\
652 	"2:							\n"	\
653 	"	.insn						\n"	\
654 	"	.section .fixup,\"ax\"				\n"	\
655 	"3:	li	%0, %4					\n"	\
656 	"	move	%1, $0					\n"	\
657 	"	j	2b					\n"	\
658 	"	.previous					\n"	\
659 	"	.section __ex_table,\"a\"			\n"	\
660 	"	"__UA_ADDR "\t1b, 3b				\n"	\
661 	"	"__UA_ADDR "\t1b + 4, 3b			\n"	\
662 	"	.previous					\n"	\
663 	: "=r" (__gu_err), "=r" (__gu_tmp)				\
664 	: "0" (0), "o" (__m(addr)), "i" (-EFAULT));			\
665 									\
666 	(val) = (__typeof__(*(addr))) __gu_tmp;				\
667 }
668 
669 /*
670  * Get a long long 64 using 32 bit registers.
671  */
672 #define __get_user_unaligned_asm_ll32(val, addr)			\
673 {									\
674 	unsigned long long __gu_tmp;					\
675 									\
676 	__asm__ __volatile__(						\
677 	"1:	ulw	%1, (%3)				\n"	\
678 	"2:	ulw	%D1, 4(%3)				\n"	\
679 	"	move	%0, $0					\n"	\
680 	"3:							\n"	\
681 	"	.insn						\n"	\
682 	"	.section	.fixup,\"ax\"			\n"	\
683 	"4:	li	%0, %4					\n"	\
684 	"	move	%1, $0					\n"	\
685 	"	move	%D1, $0					\n"	\
686 	"	j	3b					\n"	\
687 	"	.previous					\n"	\
688 	"	.section	__ex_table,\"a\"		\n"	\
689 	"	" __UA_ADDR "	1b, 4b				\n"	\
690 	"	" __UA_ADDR "	1b + 4, 4b			\n"	\
691 	"	" __UA_ADDR "	2b, 4b				\n"	\
692 	"	" __UA_ADDR "	2b + 4, 4b			\n"	\
693 	"	.previous					\n"	\
694 	: "=r" (__gu_err), "=&r" (__gu_tmp)				\
695 	: "0" (0), "r" (addr), "i" (-EFAULT));				\
696 	(val) = (__typeof__(*(addr))) __gu_tmp;				\
697 }
698 
699 /*
700  * Yuck.  We need two variants, one for 64bit operation and one
701  * for 32 bit mode and old iron.
702  */
703 #ifdef CONFIG_32BIT
704 #define __PUT_USER_UNALIGNED_DW(ptr) __put_user_unaligned_asm_ll32(ptr)
705 #endif
706 #ifdef CONFIG_64BIT
707 #define __PUT_USER_UNALIGNED_DW(ptr) __put_user_unaligned_asm("usd", ptr)
708 #endif
709 
710 #define __put_user_unaligned_common(ptr, size)				\
711 do {									\
712 	switch (size) {							\
713 	case 1: __put_data_asm("sb", ptr); break;			\
714 	case 2: __put_user_unaligned_asm("ush", ptr); break;		\
715 	case 4: __put_user_unaligned_asm("usw", ptr); break;		\
716 	case 8: __PUT_USER_UNALIGNED_DW(ptr); break;			\
717 	default: __put_user_unaligned_unknown(); break;			\
718 } while (0)
719 
720 #define __put_user_unaligned_nocheck(x,ptr,size)			\
721 ({									\
722 	__typeof__(*(ptr)) __pu_val;					\
723 	int __pu_err = 0;						\
724 									\
725 	__pu_val = (x);							\
726 	__put_user_unaligned_common(ptr, size);				\
727 	__pu_err;							\
728 })
729 
730 #define __put_user_unaligned_check(x,ptr,size)				\
731 ({									\
732 	__typeof__(*(ptr)) __user *__pu_addr = (ptr);			\
733 	__typeof__(*(ptr)) __pu_val = (x);				\
734 	int __pu_err = -EFAULT;						\
735 									\
736 	if (likely(access_ok(VERIFY_WRITE,  __pu_addr, size)))		\
737 		__put_user_unaligned_common(__pu_addr, size);		\
738 									\
739 	__pu_err;							\
740 })
741 
742 #define __put_user_unaligned_asm(insn, ptr)				\
743 {									\
744 	__asm__ __volatile__(						\
745 	"1:	" insn "	%z2, %3		# __put_user_unaligned_asm\n" \
746 	"2:							\n"	\
747 	"	.insn						\n"	\
748 	"	.section	.fixup,\"ax\"			\n"	\
749 	"3:	li	%0, %4					\n"	\
750 	"	j	2b					\n"	\
751 	"	.previous					\n"	\
752 	"	.section	__ex_table,\"a\"		\n"	\
753 	"	" __UA_ADDR "	1b, 3b				\n"	\
754 	"	.previous					\n"	\
755 	: "=r" (__pu_err)						\
756 	: "0" (0), "Jr" (__pu_val), "o" (__m(ptr)),			\
757 	  "i" (-EFAULT));						\
758 }
759 
760 #define __put_user_unaligned_asm_ll32(ptr)				\
761 {									\
762 	__asm__ __volatile__(						\
763 	"1:	sw	%2, (%3)	# __put_user_unaligned_asm_ll32 \n" \
764 	"2:	sw	%D2, 4(%3)				\n"	\
765 	"3:							\n"	\
766 	"	.insn						\n"	\
767 	"	.section	.fixup,\"ax\"			\n"	\
768 	"4:	li	%0, %4					\n"	\
769 	"	j	3b					\n"	\
770 	"	.previous					\n"	\
771 	"	.section	__ex_table,\"a\"		\n"	\
772 	"	" __UA_ADDR "	1b, 4b				\n"	\
773 	"	" __UA_ADDR "	1b + 4, 4b			\n"	\
774 	"	" __UA_ADDR "	2b, 4b				\n"	\
775 	"	" __UA_ADDR "	2b + 4, 4b			\n"	\
776 	"	.previous"						\
777 	: "=r" (__pu_err)						\
778 	: "0" (0), "r" (__pu_val), "r" (ptr),				\
779 	  "i" (-EFAULT));						\
780 }
781 
782 extern void __put_user_unaligned_unknown(void);
783 #endif
784 
785 /*
786  * We're generating jump to subroutines which will be outside the range of
787  * jump instructions
788  */
789 #ifdef MODULE
790 #define __MODULE_JAL(destination)					\
791 	".set\tnoat\n\t"						\
792 	__UA_LA "\t$1, " #destination "\n\t"				\
793 	"jalr\t$1\n\t"							\
794 	".set\tat\n\t"
795 #else
796 #define __MODULE_JAL(destination)					\
797 	"jal\t" #destination "\n\t"
798 #endif
799 
800 #if defined(CONFIG_CPU_DADDI_WORKAROUNDS) || (defined(CONFIG_EVA) &&	\
801 					      defined(CONFIG_CPU_HAS_PREFETCH))
802 #define DADDI_SCRATCH "$3"
803 #else
804 #define DADDI_SCRATCH "$0"
805 #endif
806 
807 extern size_t __copy_user(void *__to, const void *__from, size_t __n);
808 
809 #ifndef CONFIG_EVA
810 #define __invoke_copy_to_user(to, from, n)				\
811 ({									\
812 	register void __user *__cu_to_r __asm__("$4");			\
813 	register const void *__cu_from_r __asm__("$5");			\
814 	register long __cu_len_r __asm__("$6");				\
815 									\
816 	__cu_to_r = (to);						\
817 	__cu_from_r = (from);						\
818 	__cu_len_r = (n);						\
819 	__asm__ __volatile__(						\
820 	__MODULE_JAL(__copy_user)					\
821 	: "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)	\
822 	:								\
823 	: "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31",	\
824 	  DADDI_SCRATCH, "memory");					\
825 	__cu_len_r;							\
826 })
827 
828 #define __invoke_copy_to_kernel(to, from, n)				\
829 	__invoke_copy_to_user(to, from, n)
830 
831 #endif
832 
833 /*
834  * __copy_to_user: - Copy a block of data into user space, with less checking.
835  * @to:	  Destination address, in user space.
836  * @from: Source address, in kernel space.
837  * @n:	  Number of bytes to copy.
838  *
839  * Context: User context only. This function may sleep if pagefaults are
840  *          enabled.
841  *
842  * Copy data from kernel space to user space.  Caller must check
843  * the specified block with access_ok() before calling this function.
844  *
845  * Returns number of bytes that could not be copied.
846  * On success, this will be zero.
847  */
848 #define __copy_to_user(to, from, n)					\
849 ({									\
850 	void __user *__cu_to;						\
851 	const void *__cu_from;						\
852 	long __cu_len;							\
853 									\
854 	__cu_to = (to);							\
855 	__cu_from = (from);						\
856 	__cu_len = (n);							\
857 									\
858 	check_object_size(__cu_from, __cu_len, true);			\
859 	might_fault();							\
860 									\
861 	if (eva_kernel_access())					\
862 		__cu_len = __invoke_copy_to_kernel(__cu_to, __cu_from,	\
863 						   __cu_len);		\
864 	else								\
865 		__cu_len = __invoke_copy_to_user(__cu_to, __cu_from,	\
866 						 __cu_len);		\
867 	__cu_len;							\
868 })
869 
870 extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
871 
872 #define __copy_to_user_inatomic(to, from, n)				\
873 ({									\
874 	void __user *__cu_to;						\
875 	const void *__cu_from;						\
876 	long __cu_len;							\
877 									\
878 	__cu_to = (to);							\
879 	__cu_from = (from);						\
880 	__cu_len = (n);							\
881 									\
882 	check_object_size(__cu_from, __cu_len, true);			\
883 									\
884 	if (eva_kernel_access())					\
885 		__cu_len = __invoke_copy_to_kernel(__cu_to, __cu_from,	\
886 						   __cu_len);		\
887 	else								\
888 		__cu_len = __invoke_copy_to_user(__cu_to, __cu_from,	\
889 						 __cu_len);		\
890 	__cu_len;							\
891 })
892 
893 #define __copy_from_user_inatomic(to, from, n)				\
894 ({									\
895 	void *__cu_to;							\
896 	const void __user *__cu_from;					\
897 	long __cu_len;							\
898 									\
899 	__cu_to = (to);							\
900 	__cu_from = (from);						\
901 	__cu_len = (n);							\
902 									\
903 	check_object_size(__cu_to, __cu_len, false);			\
904 									\
905 	if (eva_kernel_access())					\
906 		__cu_len = __invoke_copy_from_kernel_inatomic(__cu_to,	\
907 							      __cu_from,\
908 							      __cu_len);\
909 	else								\
910 		__cu_len = __invoke_copy_from_user_inatomic(__cu_to,	\
911 							    __cu_from,	\
912 							    __cu_len);	\
913 	__cu_len;							\
914 })
915 
916 /*
917  * copy_to_user: - Copy a block of data into user space.
918  * @to:	  Destination address, in user space.
919  * @from: Source address, in kernel space.
920  * @n:	  Number of bytes to copy.
921  *
922  * Context: User context only. This function may sleep if pagefaults are
923  *          enabled.
924  *
925  * Copy data from kernel space to user space.
926  *
927  * Returns number of bytes that could not be copied.
928  * On success, this will be zero.
929  */
930 #define copy_to_user(to, from, n)					\
931 ({									\
932 	void __user *__cu_to;						\
933 	const void *__cu_from;						\
934 	long __cu_len;							\
935 									\
936 	__cu_to = (to);							\
937 	__cu_from = (from);						\
938 	__cu_len = (n);							\
939 									\
940 	check_object_size(__cu_from, __cu_len, true);			\
941 									\
942 	if (eva_kernel_access()) {					\
943 		__cu_len = __invoke_copy_to_kernel(__cu_to,		\
944 						   __cu_from,		\
945 						   __cu_len);		\
946 	} else {							\
947 		if (access_ok(VERIFY_WRITE, __cu_to, __cu_len)) {       \
948 			might_fault();                                  \
949 			__cu_len = __invoke_copy_to_user(__cu_to,	\
950 							 __cu_from,	\
951 							 __cu_len);     \
952 		}							\
953 	}								\
954 	__cu_len;							\
955 })
956 
957 #ifndef CONFIG_EVA
958 
959 #define __invoke_copy_from_user(to, from, n)				\
960 ({									\
961 	register void *__cu_to_r __asm__("$4");				\
962 	register const void __user *__cu_from_r __asm__("$5");		\
963 	register long __cu_len_r __asm__("$6");				\
964 									\
965 	__cu_to_r = (to);						\
966 	__cu_from_r = (from);						\
967 	__cu_len_r = (n);						\
968 	__asm__ __volatile__(						\
969 	".set\tnoreorder\n\t"						\
970 	__MODULE_JAL(__copy_user)					\
971 	".set\tnoat\n\t"						\
972 	__UA_ADDU "\t$1, %1, %2\n\t"					\
973 	".set\tat\n\t"							\
974 	".set\treorder"							\
975 	: "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)	\
976 	:								\
977 	: "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31",	\
978 	  DADDI_SCRATCH, "memory");					\
979 	__cu_len_r;							\
980 })
981 
982 #define __invoke_copy_from_kernel(to, from, n)				\
983 	__invoke_copy_from_user(to, from, n)
984 
985 /* For userland <-> userland operations */
986 #define ___invoke_copy_in_user(to, from, n)				\
987 	__invoke_copy_from_user(to, from, n)
988 
989 /* For kernel <-> kernel operations */
990 #define ___invoke_copy_in_kernel(to, from, n)				\
991 	__invoke_copy_from_user(to, from, n)
992 
993 #define __invoke_copy_from_user_inatomic(to, from, n)			\
994 ({									\
995 	register void *__cu_to_r __asm__("$4");				\
996 	register const void __user *__cu_from_r __asm__("$5");		\
997 	register long __cu_len_r __asm__("$6");				\
998 									\
999 	__cu_to_r = (to);						\
1000 	__cu_from_r = (from);						\
1001 	__cu_len_r = (n);						\
1002 	__asm__ __volatile__(						\
1003 	".set\tnoreorder\n\t"						\
1004 	__MODULE_JAL(__copy_user_inatomic)				\
1005 	".set\tnoat\n\t"						\
1006 	__UA_ADDU "\t$1, %1, %2\n\t"					\
1007 	".set\tat\n\t"							\
1008 	".set\treorder"							\
1009 	: "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)	\
1010 	:								\
1011 	: "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31",	\
1012 	  DADDI_SCRATCH, "memory");					\
1013 	__cu_len_r;							\
1014 })
1015 
1016 #define __invoke_copy_from_kernel_inatomic(to, from, n)			\
1017 	__invoke_copy_from_user_inatomic(to, from, n)			\
1018 
1019 #else
1020 
1021 /* EVA specific functions */
1022 
1023 extern size_t __copy_user_inatomic_eva(void *__to, const void *__from,
1024 				       size_t __n);
1025 extern size_t __copy_from_user_eva(void *__to, const void *__from,
1026 				   size_t __n);
1027 extern size_t __copy_to_user_eva(void *__to, const void *__from,
1028 				 size_t __n);
1029 extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n);
1030 
1031 #define __invoke_copy_from_user_eva_generic(to, from, n, func_ptr)	\
1032 ({									\
1033 	register void *__cu_to_r __asm__("$4");				\
1034 	register const void __user *__cu_from_r __asm__("$5");		\
1035 	register long __cu_len_r __asm__("$6");				\
1036 									\
1037 	__cu_to_r = (to);						\
1038 	__cu_from_r = (from);						\
1039 	__cu_len_r = (n);						\
1040 	__asm__ __volatile__(						\
1041 	".set\tnoreorder\n\t"						\
1042 	__MODULE_JAL(func_ptr)						\
1043 	".set\tnoat\n\t"						\
1044 	__UA_ADDU "\t$1, %1, %2\n\t"					\
1045 	".set\tat\n\t"							\
1046 	".set\treorder"							\
1047 	: "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)	\
1048 	:								\
1049 	: "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31",	\
1050 	  DADDI_SCRATCH, "memory");					\
1051 	__cu_len_r;							\
1052 })
1053 
1054 #define __invoke_copy_to_user_eva_generic(to, from, n, func_ptr)	\
1055 ({									\
1056 	register void *__cu_to_r __asm__("$4");				\
1057 	register const void __user *__cu_from_r __asm__("$5");		\
1058 	register long __cu_len_r __asm__("$6");				\
1059 									\
1060 	__cu_to_r = (to);						\
1061 	__cu_from_r = (from);						\
1062 	__cu_len_r = (n);						\
1063 	__asm__ __volatile__(						\
1064 	__MODULE_JAL(func_ptr)						\
1065 	: "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)	\
1066 	:								\
1067 	: "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31",	\
1068 	  DADDI_SCRATCH, "memory");					\
1069 	__cu_len_r;							\
1070 })
1071 
1072 /*
1073  * Source or destination address is in userland. We need to go through
1074  * the TLB
1075  */
1076 #define __invoke_copy_from_user(to, from, n)				\
1077 	__invoke_copy_from_user_eva_generic(to, from, n, __copy_from_user_eva)
1078 
1079 #define __invoke_copy_from_user_inatomic(to, from, n)			\
1080 	__invoke_copy_from_user_eva_generic(to, from, n,		\
1081 					    __copy_user_inatomic_eva)
1082 
1083 #define __invoke_copy_to_user(to, from, n)				\
1084 	__invoke_copy_to_user_eva_generic(to, from, n, __copy_to_user_eva)
1085 
1086 #define ___invoke_copy_in_user(to, from, n)				\
1087 	__invoke_copy_from_user_eva_generic(to, from, n, __copy_in_user_eva)
1088 
1089 /*
1090  * Source or destination address in the kernel. We are not going through
1091  * the TLB
1092  */
1093 #define __invoke_copy_from_kernel(to, from, n)				\
1094 	__invoke_copy_from_user_eva_generic(to, from, n, __copy_user)
1095 
1096 #define __invoke_copy_from_kernel_inatomic(to, from, n)			\
1097 	__invoke_copy_from_user_eva_generic(to, from, n, __copy_user_inatomic)
1098 
1099 #define __invoke_copy_to_kernel(to, from, n)				\
1100 	__invoke_copy_to_user_eva_generic(to, from, n, __copy_user)
1101 
1102 #define ___invoke_copy_in_kernel(to, from, n)				\
1103 	__invoke_copy_from_user_eva_generic(to, from, n, __copy_user)
1104 
1105 #endif /* CONFIG_EVA */
1106 
1107 /*
1108  * __copy_from_user: - Copy a block of data from user space, with less checking.
1109  * @to:	  Destination address, in kernel space.
1110  * @from: Source address, in user space.
1111  * @n:	  Number of bytes to copy.
1112  *
1113  * Context: User context only. This function may sleep if pagefaults are
1114  *          enabled.
1115  *
1116  * Copy data from user space to kernel space.  Caller must check
1117  * the specified block with access_ok() before calling this function.
1118  *
1119  * Returns number of bytes that could not be copied.
1120  * On success, this will be zero.
1121  *
1122  * If some data could not be copied, this function will pad the copied
1123  * data to the requested size using zero bytes.
1124  */
1125 #define __copy_from_user(to, from, n)					\
1126 ({									\
1127 	void *__cu_to;							\
1128 	const void __user *__cu_from;					\
1129 	long __cu_len;							\
1130 									\
1131 	__cu_to = (to);							\
1132 	__cu_from = (from);						\
1133 	__cu_len = (n);							\
1134 									\
1135 	check_object_size(__cu_to, __cu_len, false);			\
1136 									\
1137 	if (eva_kernel_access()) {					\
1138 		__cu_len = __invoke_copy_from_kernel(__cu_to,		\
1139 						     __cu_from,		\
1140 						     __cu_len);		\
1141 	} else {							\
1142 		might_fault();						\
1143 		__cu_len = __invoke_copy_from_user(__cu_to, __cu_from,	\
1144 						   __cu_len);		\
1145 	}								\
1146 	__cu_len;							\
1147 })
1148 
1149 /*
1150  * copy_from_user: - Copy a block of data from user space.
1151  * @to:	  Destination address, in kernel space.
1152  * @from: Source address, in user space.
1153  * @n:	  Number of bytes to copy.
1154  *
1155  * Context: User context only. This function may sleep if pagefaults are
1156  *          enabled.
1157  *
1158  * Copy data from user space to kernel space.
1159  *
1160  * Returns number of bytes that could not be copied.
1161  * On success, this will be zero.
1162  *
1163  * If some data could not be copied, this function will pad the copied
1164  * data to the requested size using zero bytes.
1165  */
1166 #define copy_from_user(to, from, n)					\
1167 ({									\
1168 	void *__cu_to;							\
1169 	const void __user *__cu_from;					\
1170 	long __cu_len;							\
1171 									\
1172 	__cu_to = (to);							\
1173 	__cu_from = (from);						\
1174 	__cu_len = (n);							\
1175 									\
1176 	check_object_size(__cu_to, __cu_len, false);			\
1177 									\
1178 	if (eva_kernel_access()) {					\
1179 		__cu_len = __invoke_copy_from_kernel(__cu_to,		\
1180 						     __cu_from,		\
1181 						     __cu_len);		\
1182 	} else {							\
1183 		if (access_ok(VERIFY_READ, __cu_from, __cu_len)) {	\
1184 			might_fault();                                  \
1185 			__cu_len = __invoke_copy_from_user(__cu_to,	\
1186 							   __cu_from,	\
1187 							   __cu_len);   \
1188 		} else {						\
1189 			memset(__cu_to, 0, __cu_len);			\
1190 		}							\
1191 	}								\
1192 	__cu_len;							\
1193 })
1194 
1195 #define __copy_in_user(to, from, n)					\
1196 ({									\
1197 	void __user *__cu_to;						\
1198 	const void __user *__cu_from;					\
1199 	long __cu_len;							\
1200 									\
1201 	__cu_to = (to);							\
1202 	__cu_from = (from);						\
1203 	__cu_len = (n);							\
1204 	if (eva_kernel_access()) {					\
1205 		__cu_len = ___invoke_copy_in_kernel(__cu_to, __cu_from,	\
1206 						    __cu_len);		\
1207 	} else {							\
1208 		might_fault();						\
1209 		__cu_len = ___invoke_copy_in_user(__cu_to, __cu_from,	\
1210 						  __cu_len);		\
1211 	}								\
1212 	__cu_len;							\
1213 })
1214 
1215 #define copy_in_user(to, from, n)					\
1216 ({									\
1217 	void __user *__cu_to;						\
1218 	const void __user *__cu_from;					\
1219 	long __cu_len;							\
1220 									\
1221 	__cu_to = (to);							\
1222 	__cu_from = (from);						\
1223 	__cu_len = (n);							\
1224 	if (eva_kernel_access()) {					\
1225 		__cu_len = ___invoke_copy_in_kernel(__cu_to,__cu_from,	\
1226 						    __cu_len);		\
1227 	} else {							\
1228 		if (likely(access_ok(VERIFY_READ, __cu_from, __cu_len) &&\
1229 			   access_ok(VERIFY_WRITE, __cu_to, __cu_len))) {\
1230 			might_fault();					\
1231 			__cu_len = ___invoke_copy_in_user(__cu_to,	\
1232 							  __cu_from,	\
1233 							  __cu_len);	\
1234 		}							\
1235 	}								\
1236 	__cu_len;							\
1237 })
1238 
1239 extern __kernel_size_t __bzero_kernel(void __user *addr, __kernel_size_t size);
1240 extern __kernel_size_t __bzero(void __user *addr, __kernel_size_t size);
1241 
1242 /*
1243  * __clear_user: - Zero a block of memory in user space, with less checking.
1244  * @to:	  Destination address, in user space.
1245  * @n:	  Number of bytes to zero.
1246  *
1247  * Zero a block of memory in user space.  Caller must check
1248  * the specified block with access_ok() before calling this function.
1249  *
1250  * Returns number of bytes that could not be cleared.
1251  * On success, this will be zero.
1252  */
1253 static inline __kernel_size_t
1254 __clear_user(void __user *addr, __kernel_size_t size)
1255 {
1256 	__kernel_size_t res;
1257 
1258 	if (eva_kernel_access()) {
1259 		__asm__ __volatile__(
1260 			"move\t$4, %1\n\t"
1261 			"move\t$5, $0\n\t"
1262 			"move\t$6, %2\n\t"
1263 			__MODULE_JAL(__bzero_kernel)
1264 			"move\t%0, $6"
1265 			: "=r" (res)
1266 			: "r" (addr), "r" (size)
1267 			: "$4", "$5", "$6", __UA_t0, __UA_t1, "$31");
1268 	} else {
1269 		might_fault();
1270 		__asm__ __volatile__(
1271 			"move\t$4, %1\n\t"
1272 			"move\t$5, $0\n\t"
1273 			"move\t$6, %2\n\t"
1274 			__MODULE_JAL(__bzero)
1275 			"move\t%0, $6"
1276 			: "=r" (res)
1277 			: "r" (addr), "r" (size)
1278 			: "$4", "$5", "$6", __UA_t0, __UA_t1, "$31");
1279 	}
1280 
1281 	return res;
1282 }
1283 
1284 #define clear_user(addr,n)						\
1285 ({									\
1286 	void __user * __cl_addr = (addr);				\
1287 	unsigned long __cl_size = (n);					\
1288 	if (__cl_size && access_ok(VERIFY_WRITE,			\
1289 					__cl_addr, __cl_size))		\
1290 		__cl_size = __clear_user(__cl_addr, __cl_size);		\
1291 	__cl_size;							\
1292 })
1293 
1294 extern long __strncpy_from_kernel_nocheck_asm(char *__to, const char __user *__from, long __len);
1295 extern long __strncpy_from_user_nocheck_asm(char *__to, const char __user *__from, long __len);
1296 
1297 /*
1298  * __strncpy_from_user: - Copy a NUL terminated string from userspace, with less checking.
1299  * @dst:   Destination address, in kernel space.  This buffer must be at
1300  *	   least @count bytes long.
1301  * @src:   Source address, in user space.
1302  * @count: Maximum number of bytes to copy, including the trailing NUL.
1303  *
1304  * Copies a NUL-terminated string from userspace to kernel space.
1305  * Caller must check the specified block with access_ok() before calling
1306  * this function.
1307  *
1308  * On success, returns the length of the string (not including the trailing
1309  * NUL).
1310  *
1311  * If access to userspace fails, returns -EFAULT (some data may have been
1312  * copied).
1313  *
1314  * If @count is smaller than the length of the string, copies @count bytes
1315  * and returns @count.
1316  */
1317 static inline long
1318 __strncpy_from_user(char *__to, const char __user *__from, long __len)
1319 {
1320 	long res;
1321 
1322 	if (eva_kernel_access()) {
1323 		__asm__ __volatile__(
1324 			"move\t$4, %1\n\t"
1325 			"move\t$5, %2\n\t"
1326 			"move\t$6, %3\n\t"
1327 			__MODULE_JAL(__strncpy_from_kernel_nocheck_asm)
1328 			"move\t%0, $2"
1329 			: "=r" (res)
1330 			: "r" (__to), "r" (__from), "r" (__len)
1331 			: "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
1332 	} else {
1333 		might_fault();
1334 		__asm__ __volatile__(
1335 			"move\t$4, %1\n\t"
1336 			"move\t$5, %2\n\t"
1337 			"move\t$6, %3\n\t"
1338 			__MODULE_JAL(__strncpy_from_user_nocheck_asm)
1339 			"move\t%0, $2"
1340 			: "=r" (res)
1341 			: "r" (__to), "r" (__from), "r" (__len)
1342 			: "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
1343 	}
1344 
1345 	return res;
1346 }
1347 
1348 extern long __strncpy_from_kernel_asm(char *__to, const char __user *__from, long __len);
1349 extern long __strncpy_from_user_asm(char *__to, const char __user *__from, long __len);
1350 
1351 /*
1352  * strncpy_from_user: - Copy a NUL terminated string from userspace.
1353  * @dst:   Destination address, in kernel space.  This buffer must be at
1354  *	   least @count bytes long.
1355  * @src:   Source address, in user space.
1356  * @count: Maximum number of bytes to copy, including the trailing NUL.
1357  *
1358  * Copies a NUL-terminated string from userspace to kernel space.
1359  *
1360  * On success, returns the length of the string (not including the trailing
1361  * NUL).
1362  *
1363  * If access to userspace fails, returns -EFAULT (some data may have been
1364  * copied).
1365  *
1366  * If @count is smaller than the length of the string, copies @count bytes
1367  * and returns @count.
1368  */
1369 static inline long
1370 strncpy_from_user(char *__to, const char __user *__from, long __len)
1371 {
1372 	long res;
1373 
1374 	if (eva_kernel_access()) {
1375 		__asm__ __volatile__(
1376 			"move\t$4, %1\n\t"
1377 			"move\t$5, %2\n\t"
1378 			"move\t$6, %3\n\t"
1379 			__MODULE_JAL(__strncpy_from_kernel_asm)
1380 			"move\t%0, $2"
1381 			: "=r" (res)
1382 			: "r" (__to), "r" (__from), "r" (__len)
1383 			: "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
1384 	} else {
1385 		might_fault();
1386 		__asm__ __volatile__(
1387 			"move\t$4, %1\n\t"
1388 			"move\t$5, %2\n\t"
1389 			"move\t$6, %3\n\t"
1390 			__MODULE_JAL(__strncpy_from_user_asm)
1391 			"move\t%0, $2"
1392 			: "=r" (res)
1393 			: "r" (__to), "r" (__from), "r" (__len)
1394 			: "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
1395 	}
1396 
1397 	return res;
1398 }
1399 
1400 extern long __strlen_kernel_asm(const char __user *s);
1401 extern long __strlen_user_asm(const char __user *s);
1402 
1403 /*
1404  * strlen_user: - Get the size of a string in user space.
1405  * @str: The string to measure.
1406  *
1407  * Context: User context only. This function may sleep if pagefaults are
1408  *          enabled.
1409  *
1410  * Get the size of a NUL-terminated string in user space.
1411  *
1412  * Returns the size of the string INCLUDING the terminating NUL.
1413  * On exception, returns 0.
1414  *
1415  * If there is a limit on the length of a valid string, you may wish to
1416  * consider using strnlen_user() instead.
1417  */
1418 static inline long strlen_user(const char __user *s)
1419 {
1420 	long res;
1421 
1422 	if (eva_kernel_access()) {
1423 		__asm__ __volatile__(
1424 			"move\t$4, %1\n\t"
1425 			__MODULE_JAL(__strlen_kernel_asm)
1426 			"move\t%0, $2"
1427 			: "=r" (res)
1428 			: "r" (s)
1429 			: "$2", "$4", __UA_t0, "$31");
1430 	} else {
1431 		might_fault();
1432 		__asm__ __volatile__(
1433 			"move\t$4, %1\n\t"
1434 			__MODULE_JAL(__strlen_user_asm)
1435 			"move\t%0, $2"
1436 			: "=r" (res)
1437 			: "r" (s)
1438 			: "$2", "$4", __UA_t0, "$31");
1439 	}
1440 
1441 	return res;
1442 }
1443 
1444 extern long __strnlen_kernel_nocheck_asm(const char __user *s, long n);
1445 extern long __strnlen_user_nocheck_asm(const char __user *s, long n);
1446 
1447 /* Returns: 0 if bad, string length+1 (memory size) of string if ok */
1448 static inline long __strnlen_user(const char __user *s, long n)
1449 {
1450 	long res;
1451 
1452 	if (eva_kernel_access()) {
1453 		__asm__ __volatile__(
1454 			"move\t$4, %1\n\t"
1455 			"move\t$5, %2\n\t"
1456 			__MODULE_JAL(__strnlen_kernel_nocheck_asm)
1457 			"move\t%0, $2"
1458 			: "=r" (res)
1459 			: "r" (s), "r" (n)
1460 			: "$2", "$4", "$5", __UA_t0, "$31");
1461 	} else {
1462 		might_fault();
1463 		__asm__ __volatile__(
1464 			"move\t$4, %1\n\t"
1465 			"move\t$5, %2\n\t"
1466 			__MODULE_JAL(__strnlen_user_nocheck_asm)
1467 			"move\t%0, $2"
1468 			: "=r" (res)
1469 			: "r" (s), "r" (n)
1470 			: "$2", "$4", "$5", __UA_t0, "$31");
1471 	}
1472 
1473 	return res;
1474 }
1475 
1476 extern long __strnlen_kernel_asm(const char __user *s, long n);
1477 extern long __strnlen_user_asm(const char __user *s, long n);
1478 
1479 /*
1480  * strnlen_user: - Get the size of a string in user space.
1481  * @str: The string to measure.
1482  *
1483  * Context: User context only. This function may sleep if pagefaults are
1484  *          enabled.
1485  *
1486  * Get the size of a NUL-terminated string in user space.
1487  *
1488  * Returns the size of the string INCLUDING the terminating NUL.
1489  * On exception, returns 0.
1490  * If the string is too long, returns a value greater than @n.
1491  */
1492 static inline long strnlen_user(const char __user *s, long n)
1493 {
1494 	long res;
1495 
1496 	might_fault();
1497 	if (eva_kernel_access()) {
1498 		__asm__ __volatile__(
1499 			"move\t$4, %1\n\t"
1500 			"move\t$5, %2\n\t"
1501 			__MODULE_JAL(__strnlen_kernel_asm)
1502 			"move\t%0, $2"
1503 			: "=r" (res)
1504 			: "r" (s), "r" (n)
1505 			: "$2", "$4", "$5", __UA_t0, "$31");
1506 	} else {
1507 		__asm__ __volatile__(
1508 			"move\t$4, %1\n\t"
1509 			"move\t$5, %2\n\t"
1510 			__MODULE_JAL(__strnlen_user_asm)
1511 			"move\t%0, $2"
1512 			: "=r" (res)
1513 			: "r" (s), "r" (n)
1514 			: "$2", "$4", "$5", __UA_t0, "$31");
1515 	}
1516 
1517 	return res;
1518 }
1519 
1520 #endif /* _ASM_UACCESS_H */
1521