xref: /linux/arch/mips/include/asm/uaccess.h (revision a1515ec7204edca770c07929df8538fcdb03ad46)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1996, 1997, 1998, 1999, 2000, 03, 04 by Ralf Baechle
7  * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8  * Copyright (C) 2007  Maciej W. Rozycki
9  * Copyright (C) 2014, Imagination Technologies Ltd.
10  */
11 #ifndef _ASM_UACCESS_H
12 #define _ASM_UACCESS_H
13 
14 #include <linux/kernel.h>
15 #include <linux/string.h>
16 #include <asm/asm-eva.h>
17 #include <asm/extable.h>
18 
19 /*
20  * The fs value determines whether argument validity checking should be
21  * performed or not.  If get_fs() == USER_DS, checking is performed, with
22  * get_fs() == KERNEL_DS, checking is bypassed.
23  *
24  * For historical reasons, these macros are grossly misnamed.
25  */
26 #ifdef CONFIG_32BIT
27 
28 #define __UA_LIMIT 0x80000000UL
29 
30 #define __UA_ADDR	".word"
31 #define __UA_LA		"la"
32 #define __UA_ADDU	"addu"
33 #define __UA_t0		"$8"
34 #define __UA_t1		"$9"
35 
36 #endif /* CONFIG_32BIT */
37 
38 #ifdef CONFIG_64BIT
39 
40 extern u64 __ua_limit;
41 
42 #define __UA_LIMIT	__ua_limit
43 
44 #define __UA_ADDR	".dword"
45 #define __UA_LA		"dla"
46 #define __UA_ADDU	"daddu"
47 #define __UA_t0		"$12"
48 #define __UA_t1		"$13"
49 
50 #endif /* CONFIG_64BIT */
51 
52 /*
53  * USER_DS is a bitmask that has the bits set that may not be set in a valid
54  * userspace address.  Note that we limit 32-bit userspace to 0x7fff8000 but
55  * the arithmetic we're doing only works if the limit is a power of two, so
56  * we use 0x80000000 here on 32-bit kernels.  If a process passes an invalid
57  * address in this range it's the process's problem, not ours :-)
58  */
59 
60 #define KERNEL_DS	((mm_segment_t) { 0UL })
61 #define USER_DS		((mm_segment_t) { __UA_LIMIT })
62 
63 #define get_fs()	(current_thread_info()->addr_limit)
64 #define set_fs(x)	(current_thread_info()->addr_limit = (x))
65 
66 #define uaccess_kernel()	(get_fs().seg == KERNEL_DS.seg)
67 
68 /*
69  * eva_kernel_access() - determine whether kernel memory access on an EVA system
70  *
71  * Determines whether memory accesses should be performed to kernel memory
72  * on a system using Extended Virtual Addressing (EVA).
73  *
74  * Return: true if a kernel memory access on an EVA system, else false.
75  */
76 static inline bool eva_kernel_access(void)
77 {
78 	if (!IS_ENABLED(CONFIG_EVA))
79 		return false;
80 
81 	return uaccess_kernel();
82 }
83 
84 /*
85  * Is a address valid? This does a straightforward calculation rather
86  * than tests.
87  *
88  * Address valid if:
89  *  - "addr" doesn't have any high-bits set
90  *  - AND "size" doesn't have any high-bits set
91  *  - AND "addr+size" doesn't have any high-bits set
92  *  - OR we are in kernel mode.
93  *
94  * __ua_size() is a trick to avoid runtime checking of positive constant
95  * sizes; for those we already know at compile time that the size is ok.
96  */
97 #define __ua_size(size)							\
98 	((__builtin_constant_p(size) && (signed long) (size) > 0) ? 0 : (size))
99 
100 /*
101  * access_ok: - Checks if a user space pointer is valid
102  * @addr: User space pointer to start of block to check
103  * @size: Size of block to check
104  *
105  * Context: User context only. This function may sleep if pagefaults are
106  *          enabled.
107  *
108  * Checks if a pointer to a block of memory in user space is valid.
109  *
110  * Returns true (nonzero) if the memory block may be valid, false (zero)
111  * if it is definitely invalid.
112  *
113  * Note that, depending on architecture, this function probably just
114  * checks that the pointer is in the user space range - after calling
115  * this function, memory access functions may still return -EFAULT.
116  */
117 
118 static inline int __access_ok(const void __user *p, unsigned long size)
119 {
120 	unsigned long addr = (unsigned long)p;
121 	return (get_fs().seg & (addr | (addr + size) | __ua_size(size))) == 0;
122 }
123 
124 #define access_ok(addr, size)					\
125 	likely(__access_ok((addr), (size)))
126 
127 /*
128  * put_user: - Write a simple value into user space.
129  * @x:	 Value to copy to user space.
130  * @ptr: Destination address, in user space.
131  *
132  * Context: User context only. This function may sleep if pagefaults are
133  *          enabled.
134  *
135  * This macro copies a single simple value from kernel space to user
136  * space.  It supports simple types like char and int, but not larger
137  * data types like structures or arrays.
138  *
139  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
140  * to the result of dereferencing @ptr.
141  *
142  * Returns zero on success, or -EFAULT on error.
143  */
144 #define put_user(x,ptr) \
145 	__put_user_check((x), (ptr), sizeof(*(ptr)))
146 
147 /*
148  * get_user: - Get a simple variable from user space.
149  * @x:	 Variable to store result.
150  * @ptr: Source address, in user space.
151  *
152  * Context: User context only. This function may sleep if pagefaults are
153  *          enabled.
154  *
155  * This macro copies a single simple variable from user space to kernel
156  * space.  It supports simple types like char and int, but not larger
157  * data types like structures or arrays.
158  *
159  * @ptr must have pointer-to-simple-variable type, and the result of
160  * dereferencing @ptr must be assignable to @x without a cast.
161  *
162  * Returns zero on success, or -EFAULT on error.
163  * On error, the variable @x is set to zero.
164  */
165 #define get_user(x,ptr) \
166 	__get_user_check((x), (ptr), sizeof(*(ptr)))
167 
168 /*
169  * __put_user: - Write a simple value into user space, with less checking.
170  * @x:	 Value to copy to user space.
171  * @ptr: Destination address, in user space.
172  *
173  * Context: User context only. This function may sleep if pagefaults are
174  *          enabled.
175  *
176  * This macro copies a single simple value from kernel space to user
177  * space.  It supports simple types like char and int, but not larger
178  * data types like structures or arrays.
179  *
180  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
181  * to the result of dereferencing @ptr.
182  *
183  * Caller must check the pointer with access_ok() before calling this
184  * function.
185  *
186  * Returns zero on success, or -EFAULT on error.
187  */
188 #define __put_user(x,ptr) \
189 	__put_user_nocheck((x), (ptr), sizeof(*(ptr)))
190 
191 /*
192  * __get_user: - Get a simple variable from user space, with less checking.
193  * @x:	 Variable to store result.
194  * @ptr: Source address, in user space.
195  *
196  * Context: User context only. This function may sleep if pagefaults are
197  *          enabled.
198  *
199  * This macro copies a single simple variable from user space to kernel
200  * space.  It supports simple types like char and int, but not larger
201  * data types like structures or arrays.
202  *
203  * @ptr must have pointer-to-simple-variable type, and the result of
204  * dereferencing @ptr must be assignable to @x without a cast.
205  *
206  * Caller must check the pointer with access_ok() before calling this
207  * function.
208  *
209  * Returns zero on success, or -EFAULT on error.
210  * On error, the variable @x is set to zero.
211  */
212 #define __get_user(x,ptr) \
213 	__get_user_nocheck((x), (ptr), sizeof(*(ptr)))
214 
215 struct __large_struct { unsigned long buf[100]; };
216 #define __m(x) (*(struct __large_struct __user *)(x))
217 
218 /*
219  * Yuck.  We need two variants, one for 64bit operation and one
220  * for 32 bit mode and old iron.
221  */
222 #ifndef CONFIG_EVA
223 #define __get_kernel_common(val, size, ptr) __get_user_common(val, size, ptr)
224 #else
225 /*
226  * Kernel specific functions for EVA. We need to use normal load instructions
227  * to read data from kernel when operating in EVA mode. We use these macros to
228  * avoid redefining __get_user_asm for EVA.
229  */
230 #undef _loadd
231 #undef _loadw
232 #undef _loadh
233 #undef _loadb
234 #ifdef CONFIG_32BIT
235 #define _loadd			_loadw
236 #else
237 #define _loadd(reg, addr)	"ld " reg ", " addr
238 #endif
239 #define _loadw(reg, addr)	"lw " reg ", " addr
240 #define _loadh(reg, addr)	"lh " reg ", " addr
241 #define _loadb(reg, addr)	"lb " reg ", " addr
242 
243 #define __get_kernel_common(val, size, ptr)				\
244 do {									\
245 	switch (size) {							\
246 	case 1: __get_data_asm(val, _loadb, ptr); break;		\
247 	case 2: __get_data_asm(val, _loadh, ptr); break;		\
248 	case 4: __get_data_asm(val, _loadw, ptr); break;		\
249 	case 8: __GET_DW(val, _loadd, ptr); break;			\
250 	default: __get_user_unknown(); break;				\
251 	}								\
252 } while (0)
253 #endif
254 
255 #ifdef CONFIG_32BIT
256 #define __GET_DW(val, insn, ptr) __get_data_asm_ll32(val, insn, ptr)
257 #endif
258 #ifdef CONFIG_64BIT
259 #define __GET_DW(val, insn, ptr) __get_data_asm(val, insn, ptr)
260 #endif
261 
262 extern void __get_user_unknown(void);
263 
264 #define __get_user_common(val, size, ptr)				\
265 do {									\
266 	switch (size) {							\
267 	case 1: __get_data_asm(val, user_lb, ptr); break;		\
268 	case 2: __get_data_asm(val, user_lh, ptr); break;		\
269 	case 4: __get_data_asm(val, user_lw, ptr); break;		\
270 	case 8: __GET_DW(val, user_ld, ptr); break;			\
271 	default: __get_user_unknown(); break;				\
272 	}								\
273 } while (0)
274 
275 #define __get_user_nocheck(x, ptr, size)				\
276 ({									\
277 	int __gu_err;							\
278 									\
279 	if (eva_kernel_access()) {					\
280 		__get_kernel_common((x), size, ptr);			\
281 	} else {							\
282 		__chk_user_ptr(ptr);					\
283 		__get_user_common((x), size, ptr);			\
284 	}								\
285 	__gu_err;							\
286 })
287 
288 #define __get_user_check(x, ptr, size)					\
289 ({									\
290 	int __gu_err = -EFAULT;						\
291 	const __typeof__(*(ptr)) __user * __gu_ptr = (ptr);		\
292 									\
293 	might_fault();							\
294 	if (likely(access_ok( __gu_ptr, size))) {		\
295 		if (eva_kernel_access())				\
296 			__get_kernel_common((x), size, __gu_ptr);	\
297 		else							\
298 			__get_user_common((x), size, __gu_ptr);		\
299 	} else								\
300 		(x) = 0;						\
301 									\
302 	__gu_err;							\
303 })
304 
305 #define __get_data_asm(val, insn, addr)					\
306 {									\
307 	long __gu_tmp;							\
308 									\
309 	__asm__ __volatile__(						\
310 	"1:	"insn("%1", "%3")"				\n"	\
311 	"2:							\n"	\
312 	"	.insn						\n"	\
313 	"	.section .fixup,\"ax\"				\n"	\
314 	"3:	li	%0, %4					\n"	\
315 	"	move	%1, $0					\n"	\
316 	"	j	2b					\n"	\
317 	"	.previous					\n"	\
318 	"	.section __ex_table,\"a\"			\n"	\
319 	"	"__UA_ADDR "\t1b, 3b				\n"	\
320 	"	.previous					\n"	\
321 	: "=r" (__gu_err), "=r" (__gu_tmp)				\
322 	: "0" (0), "o" (__m(addr)), "i" (-EFAULT));			\
323 									\
324 	(val) = (__typeof__(*(addr))) __gu_tmp;				\
325 }
326 
327 /*
328  * Get a long long 64 using 32 bit registers.
329  */
330 #define __get_data_asm_ll32(val, insn, addr)				\
331 {									\
332 	union {								\
333 		unsigned long long	l;				\
334 		__typeof__(*(addr))	t;				\
335 	} __gu_tmp;							\
336 									\
337 	__asm__ __volatile__(						\
338 	"1:	" insn("%1", "(%3)")"				\n"	\
339 	"2:	" insn("%D1", "4(%3)")"				\n"	\
340 	"3:							\n"	\
341 	"	.insn						\n"	\
342 	"	.section	.fixup,\"ax\"			\n"	\
343 	"4:	li	%0, %4					\n"	\
344 	"	move	%1, $0					\n"	\
345 	"	move	%D1, $0					\n"	\
346 	"	j	3b					\n"	\
347 	"	.previous					\n"	\
348 	"	.section	__ex_table,\"a\"		\n"	\
349 	"	" __UA_ADDR "	1b, 4b				\n"	\
350 	"	" __UA_ADDR "	2b, 4b				\n"	\
351 	"	.previous					\n"	\
352 	: "=r" (__gu_err), "=&r" (__gu_tmp.l)				\
353 	: "0" (0), "r" (addr), "i" (-EFAULT));				\
354 									\
355 	(val) = __gu_tmp.t;						\
356 }
357 
358 #ifndef CONFIG_EVA
359 #define __put_kernel_common(ptr, size) __put_user_common(ptr, size)
360 #else
361 /*
362  * Kernel specific functions for EVA. We need to use normal load instructions
363  * to read data from kernel when operating in EVA mode. We use these macros to
364  * avoid redefining __get_data_asm for EVA.
365  */
366 #undef _stored
367 #undef _storew
368 #undef _storeh
369 #undef _storeb
370 #ifdef CONFIG_32BIT
371 #define _stored			_storew
372 #else
373 #define _stored(reg, addr)	"ld " reg ", " addr
374 #endif
375 
376 #define _storew(reg, addr)	"sw " reg ", " addr
377 #define _storeh(reg, addr)	"sh " reg ", " addr
378 #define _storeb(reg, addr)	"sb " reg ", " addr
379 
380 #define __put_kernel_common(ptr, size)					\
381 do {									\
382 	switch (size) {							\
383 	case 1: __put_data_asm(_storeb, ptr); break;			\
384 	case 2: __put_data_asm(_storeh, ptr); break;			\
385 	case 4: __put_data_asm(_storew, ptr); break;			\
386 	case 8: __PUT_DW(_stored, ptr); break;				\
387 	default: __put_user_unknown(); break;				\
388 	}								\
389 } while(0)
390 #endif
391 
392 /*
393  * Yuck.  We need two variants, one for 64bit operation and one
394  * for 32 bit mode and old iron.
395  */
396 #ifdef CONFIG_32BIT
397 #define __PUT_DW(insn, ptr) __put_data_asm_ll32(insn, ptr)
398 #endif
399 #ifdef CONFIG_64BIT
400 #define __PUT_DW(insn, ptr) __put_data_asm(insn, ptr)
401 #endif
402 
403 #define __put_user_common(ptr, size)					\
404 do {									\
405 	switch (size) {							\
406 	case 1: __put_data_asm(user_sb, ptr); break;			\
407 	case 2: __put_data_asm(user_sh, ptr); break;			\
408 	case 4: __put_data_asm(user_sw, ptr); break;			\
409 	case 8: __PUT_DW(user_sd, ptr); break;				\
410 	default: __put_user_unknown(); break;				\
411 	}								\
412 } while (0)
413 
414 #define __put_user_nocheck(x, ptr, size)				\
415 ({									\
416 	__typeof__(*(ptr)) __pu_val;					\
417 	int __pu_err = 0;						\
418 									\
419 	__pu_val = (x);							\
420 	if (eva_kernel_access()) {					\
421 		__put_kernel_common(ptr, size);				\
422 	} else {							\
423 		__chk_user_ptr(ptr);					\
424 		__put_user_common(ptr, size);				\
425 	}								\
426 	__pu_err;							\
427 })
428 
429 #define __put_user_check(x, ptr, size)					\
430 ({									\
431 	__typeof__(*(ptr)) __user *__pu_addr = (ptr);			\
432 	__typeof__(*(ptr)) __pu_val = (x);				\
433 	int __pu_err = -EFAULT;						\
434 									\
435 	might_fault();							\
436 	if (likely(access_ok( __pu_addr, size))) {	\
437 		if (eva_kernel_access())				\
438 			__put_kernel_common(__pu_addr, size);		\
439 		else							\
440 			__put_user_common(__pu_addr, size);		\
441 	}								\
442 									\
443 	__pu_err;							\
444 })
445 
446 #define __put_data_asm(insn, ptr)					\
447 {									\
448 	__asm__ __volatile__(						\
449 	"1:	"insn("%z2", "%3")"	# __put_data_asm	\n"	\
450 	"2:							\n"	\
451 	"	.insn						\n"	\
452 	"	.section	.fixup,\"ax\"			\n"	\
453 	"3:	li	%0, %4					\n"	\
454 	"	j	2b					\n"	\
455 	"	.previous					\n"	\
456 	"	.section	__ex_table,\"a\"		\n"	\
457 	"	" __UA_ADDR "	1b, 3b				\n"	\
458 	"	.previous					\n"	\
459 	: "=r" (__pu_err)						\
460 	: "0" (0), "Jr" (__pu_val), "o" (__m(ptr)),			\
461 	  "i" (-EFAULT));						\
462 }
463 
464 #define __put_data_asm_ll32(insn, ptr)					\
465 {									\
466 	__asm__ __volatile__(						\
467 	"1:	"insn("%2", "(%3)")"	# __put_data_asm_ll32	\n"	\
468 	"2:	"insn("%D2", "4(%3)")"				\n"	\
469 	"3:							\n"	\
470 	"	.insn						\n"	\
471 	"	.section	.fixup,\"ax\"			\n"	\
472 	"4:	li	%0, %4					\n"	\
473 	"	j	3b					\n"	\
474 	"	.previous					\n"	\
475 	"	.section	__ex_table,\"a\"		\n"	\
476 	"	" __UA_ADDR "	1b, 4b				\n"	\
477 	"	" __UA_ADDR "	2b, 4b				\n"	\
478 	"	.previous"						\
479 	: "=r" (__pu_err)						\
480 	: "0" (0), "r" (__pu_val), "r" (ptr),				\
481 	  "i" (-EFAULT));						\
482 }
483 
484 extern void __put_user_unknown(void);
485 
486 /*
487  * We're generating jump to subroutines which will be outside the range of
488  * jump instructions
489  */
490 #ifdef MODULE
491 #define __MODULE_JAL(destination)					\
492 	".set\tnoat\n\t"						\
493 	__UA_LA "\t$1, " #destination "\n\t"				\
494 	"jalr\t$1\n\t"							\
495 	".set\tat\n\t"
496 #else
497 #define __MODULE_JAL(destination)					\
498 	"jal\t" #destination "\n\t"
499 #endif
500 
501 #if defined(CONFIG_CPU_DADDI_WORKAROUNDS) || (defined(CONFIG_EVA) &&	\
502 					      defined(CONFIG_CPU_HAS_PREFETCH))
503 #define DADDI_SCRATCH "$3"
504 #else
505 #define DADDI_SCRATCH "$0"
506 #endif
507 
508 extern size_t __copy_user(void *__to, const void *__from, size_t __n);
509 
510 #define __invoke_copy_from(func, to, from, n)				\
511 ({									\
512 	register void *__cu_to_r __asm__("$4");				\
513 	register const void __user *__cu_from_r __asm__("$5");		\
514 	register long __cu_len_r __asm__("$6");				\
515 									\
516 	__cu_to_r = (to);						\
517 	__cu_from_r = (from);						\
518 	__cu_len_r = (n);						\
519 	__asm__ __volatile__(						\
520 	".set\tnoreorder\n\t"						\
521 	__MODULE_JAL(func)						\
522 	".set\tnoat\n\t"						\
523 	__UA_ADDU "\t$1, %1, %2\n\t"					\
524 	".set\tat\n\t"							\
525 	".set\treorder"							\
526 	: "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)	\
527 	:								\
528 	: "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31",	\
529 	  DADDI_SCRATCH, "memory");					\
530 	__cu_len_r;							\
531 })
532 
533 #define __invoke_copy_to(func, to, from, n)				\
534 ({									\
535 	register void __user *__cu_to_r __asm__("$4");			\
536 	register const void *__cu_from_r __asm__("$5");			\
537 	register long __cu_len_r __asm__("$6");				\
538 									\
539 	__cu_to_r = (to);						\
540 	__cu_from_r = (from);						\
541 	__cu_len_r = (n);						\
542 	__asm__ __volatile__(						\
543 	__MODULE_JAL(func)						\
544 	: "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)	\
545 	:								\
546 	: "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31",	\
547 	  DADDI_SCRATCH, "memory");					\
548 	__cu_len_r;							\
549 })
550 
551 #define __invoke_copy_from_kernel(to, from, n)				\
552 	__invoke_copy_from(__copy_user, to, from, n)
553 
554 #define __invoke_copy_to_kernel(to, from, n)				\
555 	__invoke_copy_to(__copy_user, to, from, n)
556 
557 #define ___invoke_copy_in_kernel(to, from, n)				\
558 	__invoke_copy_from(__copy_user, to, from, n)
559 
560 #ifndef CONFIG_EVA
561 #define __invoke_copy_from_user(to, from, n)				\
562 	__invoke_copy_from(__copy_user, to, from, n)
563 
564 #define __invoke_copy_to_user(to, from, n)				\
565 	__invoke_copy_to(__copy_user, to, from, n)
566 
567 #define ___invoke_copy_in_user(to, from, n)				\
568 	__invoke_copy_from(__copy_user, to, from, n)
569 
570 #else
571 
572 /* EVA specific functions */
573 
574 extern size_t __copy_from_user_eva(void *__to, const void *__from,
575 				   size_t __n);
576 extern size_t __copy_to_user_eva(void *__to, const void *__from,
577 				 size_t __n);
578 extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n);
579 
580 /*
581  * Source or destination address is in userland. We need to go through
582  * the TLB
583  */
584 #define __invoke_copy_from_user(to, from, n)				\
585 	__invoke_copy_from(__copy_from_user_eva, to, from, n)
586 
587 #define __invoke_copy_to_user(to, from, n)				\
588 	__invoke_copy_to(__copy_to_user_eva, to, from, n)
589 
590 #define ___invoke_copy_in_user(to, from, n)				\
591 	__invoke_copy_from(__copy_in_user_eva, to, from, n)
592 
593 #endif /* CONFIG_EVA */
594 
595 static inline unsigned long
596 raw_copy_to_user(void __user *to, const void *from, unsigned long n)
597 {
598 	if (eva_kernel_access())
599 		return __invoke_copy_to_kernel(to, from, n);
600 	else
601 		return __invoke_copy_to_user(to, from, n);
602 }
603 
604 static inline unsigned long
605 raw_copy_from_user(void *to, const void __user *from, unsigned long n)
606 {
607 	if (eva_kernel_access())
608 		return __invoke_copy_from_kernel(to, from, n);
609 	else
610 		return __invoke_copy_from_user(to, from, n);
611 }
612 
613 #define INLINE_COPY_FROM_USER
614 #define INLINE_COPY_TO_USER
615 
616 static inline unsigned long
617 raw_copy_in_user(void __user*to, const void __user *from, unsigned long n)
618 {
619 	if (eva_kernel_access())
620 		return ___invoke_copy_in_kernel(to, from, n);
621 	else
622 		return ___invoke_copy_in_user(to, from,	n);
623 }
624 
625 extern __kernel_size_t __bzero_kernel(void __user *addr, __kernel_size_t size);
626 extern __kernel_size_t __bzero(void __user *addr, __kernel_size_t size);
627 
628 /*
629  * __clear_user: - Zero a block of memory in user space, with less checking.
630  * @to:	  Destination address, in user space.
631  * @n:	  Number of bytes to zero.
632  *
633  * Zero a block of memory in user space.  Caller must check
634  * the specified block with access_ok() before calling this function.
635  *
636  * Returns number of bytes that could not be cleared.
637  * On success, this will be zero.
638  */
639 static inline __kernel_size_t
640 __clear_user(void __user *addr, __kernel_size_t size)
641 {
642 	__kernel_size_t res;
643 
644 #ifdef CONFIG_CPU_MICROMIPS
645 /* micromips memset / bzero also clobbers t7 & t8 */
646 #define bzero_clobbers "$4", "$5", "$6", __UA_t0, __UA_t1, "$15", "$24", "$31"
647 #else
648 #define bzero_clobbers "$4", "$5", "$6", __UA_t0, __UA_t1, "$31"
649 #endif /* CONFIG_CPU_MICROMIPS */
650 
651 	if (eva_kernel_access()) {
652 		__asm__ __volatile__(
653 			"move\t$4, %1\n\t"
654 			"move\t$5, $0\n\t"
655 			"move\t$6, %2\n\t"
656 			__MODULE_JAL(__bzero_kernel)
657 			"move\t%0, $6"
658 			: "=r" (res)
659 			: "r" (addr), "r" (size)
660 			: bzero_clobbers);
661 	} else {
662 		might_fault();
663 		__asm__ __volatile__(
664 			"move\t$4, %1\n\t"
665 			"move\t$5, $0\n\t"
666 			"move\t$6, %2\n\t"
667 			__MODULE_JAL(__bzero)
668 			"move\t%0, $6"
669 			: "=r" (res)
670 			: "r" (addr), "r" (size)
671 			: bzero_clobbers);
672 	}
673 
674 	return res;
675 }
676 
677 #define clear_user(addr,n)						\
678 ({									\
679 	void __user * __cl_addr = (addr);				\
680 	unsigned long __cl_size = (n);					\
681 	if (__cl_size && access_ok(__cl_addr, __cl_size))		\
682 		__cl_size = __clear_user(__cl_addr, __cl_size);		\
683 	__cl_size;							\
684 })
685 
686 extern long __strncpy_from_kernel_asm(char *__to, const char __user *__from, long __len);
687 extern long __strncpy_from_user_asm(char *__to, const char __user *__from, long __len);
688 
689 /*
690  * strncpy_from_user: - Copy a NUL terminated string from userspace.
691  * @dst:   Destination address, in kernel space.  This buffer must be at
692  *	   least @count bytes long.
693  * @src:   Source address, in user space.
694  * @count: Maximum number of bytes to copy, including the trailing NUL.
695  *
696  * Copies a NUL-terminated string from userspace to kernel space.
697  *
698  * On success, returns the length of the string (not including the trailing
699  * NUL).
700  *
701  * If access to userspace fails, returns -EFAULT (some data may have been
702  * copied).
703  *
704  * If @count is smaller than the length of the string, copies @count bytes
705  * and returns @count.
706  */
707 static inline long
708 strncpy_from_user(char *__to, const char __user *__from, long __len)
709 {
710 	long res;
711 
712 	if (eva_kernel_access()) {
713 		__asm__ __volatile__(
714 			"move\t$4, %1\n\t"
715 			"move\t$5, %2\n\t"
716 			"move\t$6, %3\n\t"
717 			__MODULE_JAL(__strncpy_from_kernel_asm)
718 			"move\t%0, $2"
719 			: "=r" (res)
720 			: "r" (__to), "r" (__from), "r" (__len)
721 			: "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
722 	} else {
723 		might_fault();
724 		__asm__ __volatile__(
725 			"move\t$4, %1\n\t"
726 			"move\t$5, %2\n\t"
727 			"move\t$6, %3\n\t"
728 			__MODULE_JAL(__strncpy_from_user_asm)
729 			"move\t%0, $2"
730 			: "=r" (res)
731 			: "r" (__to), "r" (__from), "r" (__len)
732 			: "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
733 	}
734 
735 	return res;
736 }
737 
738 extern long __strnlen_kernel_asm(const char __user *s, long n);
739 extern long __strnlen_user_asm(const char __user *s, long n);
740 
741 /*
742  * strnlen_user: - Get the size of a string in user space.
743  * @str: The string to measure.
744  *
745  * Context: User context only. This function may sleep if pagefaults are
746  *          enabled.
747  *
748  * Get the size of a NUL-terminated string in user space.
749  *
750  * Returns the size of the string INCLUDING the terminating NUL.
751  * On exception, returns 0.
752  * If the string is too long, returns a value greater than @n.
753  */
754 static inline long strnlen_user(const char __user *s, long n)
755 {
756 	long res;
757 
758 	might_fault();
759 	if (eva_kernel_access()) {
760 		__asm__ __volatile__(
761 			"move\t$4, %1\n\t"
762 			"move\t$5, %2\n\t"
763 			__MODULE_JAL(__strnlen_kernel_asm)
764 			"move\t%0, $2"
765 			: "=r" (res)
766 			: "r" (s), "r" (n)
767 			: "$2", "$4", "$5", __UA_t0, "$31");
768 	} else {
769 		__asm__ __volatile__(
770 			"move\t$4, %1\n\t"
771 			"move\t$5, %2\n\t"
772 			__MODULE_JAL(__strnlen_user_asm)
773 			"move\t%0, $2"
774 			: "=r" (res)
775 			: "r" (s), "r" (n)
776 			: "$2", "$4", "$5", __UA_t0, "$31");
777 	}
778 
779 	return res;
780 }
781 
782 #endif /* _ASM_UACCESS_H */
783