xref: /linux/arch/hexagon/include/asm/uaccess.h (revision e58e871becec2d3b04ed91c0c16fe8deac9c9dfa)
1 /*
2  * User memory access support for Hexagon
3  *
4  * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 and
8  * only version 2 as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18  * 02110-1301, USA.
19  */
20 
21 #ifndef _ASM_UACCESS_H
22 #define _ASM_UACCESS_H
23 /*
24  * User space memory access functions
25  */
26 #include <linux/mm.h>
27 #include <asm/segment.h>
28 #include <asm/sections.h>
29 
30 /*
31  * access_ok: - Checks if a user space pointer is valid
32  * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE.  Note that
33  *        %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe
34  *        to write to a block, it is always safe to read from it.
35  * @addr: User space pointer to start of block to check
36  * @size: Size of block to check
37  *
38  * Context: User context only. This function may sleep if pagefaults are
39  *          enabled.
40  *
41  * Checks if a pointer to a block of memory in user space is valid.
42  *
43  * Returns true (nonzero) if the memory block *may* be valid, false (zero)
44  * if it is definitely invalid.
45  *
46  * User address space in Hexagon, like x86, goes to 0xbfffffff, so the
47  * simple MSB-based tests used by MIPS won't work.  Some further
48  * optimization is probably possible here, but for now, keep it
49  * reasonably simple and not *too* slow.  After all, we've got the
50  * MMU for backup.
51  */
52 
53 #define __access_ok(addr, size) \
54 	((get_fs().seg == KERNEL_DS.seg) || \
55 	(((unsigned long)addr < get_fs().seg) && \
56 	  (unsigned long)size < (get_fs().seg - (unsigned long)addr)))
57 
58 /*
59  * When a kernel-mode page fault is taken, the faulting instruction
60  * address is checked against a table of exception_table_entries.
61  * Each entry is a tuple of the address of an instruction that may
62  * be authorized to fault, and the address at which execution should
63  * be resumed instead of the faulting instruction, so as to effect
64  * a workaround.
65  */
66 
67 /*  Assembly somewhat optimized copy routines  */
68 unsigned long raw_copy_from_user(void *to, const void __user *from,
69 				     unsigned long n);
70 unsigned long raw_copy_to_user(void __user *to, const void *from,
71 				   unsigned long n);
72 #define INLINE_COPY_FROM_USER
73 #define INLINE_COPY_TO_USER
74 
75 __kernel_size_t __clear_user_hexagon(void __user *dest, unsigned long count);
76 #define __clear_user(a, s) __clear_user_hexagon((a), (s))
77 
78 #define __strncpy_from_user(dst, src, n) hexagon_strncpy_from_user(dst, src, n)
79 
80 /*  get around the ifndef in asm-generic/uaccess.h  */
81 #define __strnlen_user __strnlen_user
82 
83 extern long __strnlen_user(const char __user *src, long n);
84 
85 static inline long hexagon_strncpy_from_user(char *dst, const char __user *src,
86 					     long n);
87 
88 #include <asm-generic/uaccess.h>
89 
90 /*  Todo:  an actual accelerated version of this.  */
91 static inline long hexagon_strncpy_from_user(char *dst, const char __user *src,
92 					     long n)
93 {
94 	long res = __strnlen_user(src, n);
95 
96 	if (unlikely(!res))
97 		return -EFAULT;
98 
99 	if (res > n) {
100 		long left = raw_copy_from_user(dst, src, n);
101 		if (unlikely(left))
102 			memset(dst + (n - left), 0, left);
103 		return n;
104 	} else {
105 		long left = raw_copy_from_user(dst, src, res);
106 		if (unlikely(left))
107 			memset(dst + (res - left), 0, left);
108 		return res-1;
109 	}
110 }
111 
112 #endif
113