xref: /linux/arch/sparc/include/asm/uaccess_64.h (revision 0883c2c06fb5bcf5b9e008270827e63c09a88c1e)
1 #ifndef _ASM_UACCESS_H
2 #define _ASM_UACCESS_H
3 
4 /*
5  * User space memory access functions
6  */
7 
8 #ifdef __KERNEL__
9 #include <linux/errno.h>
10 #include <linux/compiler.h>
11 #include <linux/string.h>
12 #include <linux/thread_info.h>
13 #include <asm/asi.h>
14 #include <asm/spitfire.h>
15 #include <asm-generic/uaccess-unaligned.h>
16 #endif
17 
18 #ifndef __ASSEMBLY__
19 
20 #include <asm/processor.h>
21 
22 /*
23  * Sparc64 is segmented, though more like the M68K than the I386.
24  * We use the secondary ASI to address user memory, which references a
25  * completely different VM map, thus there is zero chance of the user
26  * doing something queer and tricking us into poking kernel memory.
27  *
28  * What is left here is basically what is needed for the other parts of
29  * the kernel that expect to be able to manipulate, erum, "segments".
30  * Or perhaps more properly, permissions.
31  *
32  * "For historical reasons, these macros are grossly misnamed." -Linus
33  */
34 
35 #define KERNEL_DS   ((mm_segment_t) { ASI_P })
36 #define USER_DS     ((mm_segment_t) { ASI_AIUS })	/* har har har */
37 
38 #define VERIFY_READ	0
39 #define VERIFY_WRITE	1
40 
41 #define get_fs() ((mm_segment_t){(current_thread_info()->current_ds)})
42 #define get_ds() (KERNEL_DS)
43 
44 #define segment_eq(a, b)  ((a).seg == (b).seg)
45 
46 #define set_fs(val)								\
47 do {										\
48 	current_thread_info()->current_ds = (val).seg;				\
49 	__asm__ __volatile__ ("wr %%g0, %0, %%asi" : : "r" ((val).seg));	\
50 } while(0)
51 
52 /*
53  * Test whether a block of memory is a valid user space address.
54  * Returns 0 if the range is valid, nonzero otherwise.
55  */
56 static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, unsigned long limit)
57 {
58 	if (__builtin_constant_p(size))
59 		return addr > limit - size;
60 
61 	addr += size;
62 	if (addr < size)
63 		return true;
64 
65 	return addr > limit;
66 }
67 
68 #define __range_not_ok(addr, size, limit)                               \
69 ({                                                                      \
70 	__chk_user_ptr(addr);                                           \
71 	__chk_range_not_ok((unsigned long __force)(addr), size, limit); \
72 })
73 
74 static inline int __access_ok(const void __user * addr, unsigned long size)
75 {
76 	return 1;
77 }
78 
79 static inline int access_ok(int type, const void __user * addr, unsigned long size)
80 {
81 	return 1;
82 }
83 
84 /*
85  * The exception table consists of pairs of addresses: the first is the
86  * address of an instruction that is allowed to fault, and the second is
87  * the address at which the program should continue.  No registers are
88  * modified, so it is entirely up to the continuation code to figure out
89  * what to do.
90  *
91  * All the routines below use bits of fixup code that are out of line
92  * with the main instruction path.  This means when everything is well,
93  * we don't even have to jump over them.  Further, they do not intrude
94  * on our cache or tlb entries.
95  */
96 
97 struct exception_table_entry {
98         unsigned int insn, fixup;
99 };
100 
101 void __ret_efault(void);
102 void __retl_efault(void);
103 
104 /* Uh, these should become the main single-value transfer routines..
105  * They automatically use the right size if we just have the right
106  * pointer type..
107  *
108  * This gets kind of ugly. We want to return _two_ values in "get_user()"
109  * and yet we don't want to do any pointers, because that is too much
110  * of a performance impact. Thus we have a few rather ugly macros here,
111  * and hide all the ugliness from the user.
112  */
113 #define put_user(x, ptr) ({ \
114 	unsigned long __pu_addr = (unsigned long)(ptr); \
115 	__chk_user_ptr(ptr); \
116 	__put_user_nocheck((__typeof__(*(ptr)))(x), __pu_addr, sizeof(*(ptr)));\
117 })
118 
119 #define get_user(x, ptr) ({ \
120 	unsigned long __gu_addr = (unsigned long)(ptr); \
121 	__chk_user_ptr(ptr); \
122 	__get_user_nocheck((x), __gu_addr, sizeof(*(ptr)), __typeof__(*(ptr)));\
123 })
124 
125 #define __put_user(x, ptr) put_user(x, ptr)
126 #define __get_user(x, ptr) get_user(x, ptr)
127 
128 struct __large_struct { unsigned long buf[100]; };
129 #define __m(x) ((struct __large_struct *)(x))
130 
131 #define __put_user_nocheck(data, addr, size) ({			\
132 	register int __pu_ret;					\
133 	switch (size) {						\
134 	case 1: __put_user_asm(data, b, addr, __pu_ret); break;	\
135 	case 2: __put_user_asm(data, h, addr, __pu_ret); break;	\
136 	case 4: __put_user_asm(data, w, addr, __pu_ret); break;	\
137 	case 8: __put_user_asm(data, x, addr, __pu_ret); break;	\
138 	default: __pu_ret = __put_user_bad(); break;		\
139 	}							\
140 	__pu_ret;						\
141 })
142 
143 #define __put_user_asm(x, size, addr, ret)				\
144 __asm__ __volatile__(							\
145 		"/* Put user asm, inline. */\n"				\
146 	"1:\t"	"st"#size "a %1, [%2] %%asi\n\t"			\
147 		"clr	%0\n"						\
148 	"2:\n\n\t"							\
149 		".section .fixup,#alloc,#execinstr\n\t"			\
150 		".align	4\n"						\
151 	"3:\n\t"							\
152 		"sethi	%%hi(2b), %0\n\t"				\
153 		"jmpl	%0 + %%lo(2b), %%g0\n\t"			\
154 		" mov	%3, %0\n\n\t"					\
155 		".previous\n\t"						\
156 		".section __ex_table,\"a\"\n\t"				\
157 		".align	4\n\t"						\
158 		".word	1b, 3b\n\t"					\
159 		".previous\n\n\t"					\
160 	       : "=r" (ret) : "r" (x), "r" (__m(addr)),			\
161 		 "i" (-EFAULT))
162 
163 int __put_user_bad(void);
164 
165 #define __get_user_nocheck(data, addr, size, type) ({			     \
166 	register int __gu_ret;						     \
167 	register unsigned long __gu_val;				     \
168 	switch (size) {							     \
169 		case 1: __get_user_asm(__gu_val, ub, addr, __gu_ret); break; \
170 		case 2: __get_user_asm(__gu_val, uh, addr, __gu_ret); break; \
171 		case 4: __get_user_asm(__gu_val, uw, addr, __gu_ret); break; \
172 		case 8: __get_user_asm(__gu_val, x, addr, __gu_ret); break;  \
173 		default:						     \
174 			__gu_val = 0;					     \
175 			__gu_ret = __get_user_bad();			     \
176 			break;						     \
177 	} 								     \
178 	data = (__force type) __gu_val;					     \
179 	 __gu_ret;							     \
180 })
181 
182 #define __get_user_asm(x, size, addr, ret)				\
183 __asm__ __volatile__(							\
184 		"/* Get user asm, inline. */\n"				\
185 	"1:\t"	"ld"#size "a [%2] %%asi, %1\n\t"			\
186 		"clr	%0\n"						\
187 	"2:\n\n\t"							\
188 		".section .fixup,#alloc,#execinstr\n\t"			\
189 		".align	4\n"						\
190 	"3:\n\t"							\
191 		"sethi	%%hi(2b), %0\n\t"				\
192 		"clr	%1\n\t"						\
193 		"jmpl	%0 + %%lo(2b), %%g0\n\t"			\
194 		" mov	%3, %0\n\n\t"					\
195 		".previous\n\t"						\
196 		".section __ex_table,\"a\"\n\t"				\
197 		".align	4\n\t"						\
198 		".word	1b, 3b\n\n\t"					\
199 		".previous\n\t"						\
200 	       : "=r" (ret), "=r" (x) : "r" (__m(addr)),		\
201 		 "i" (-EFAULT))
202 
203 int __get_user_bad(void);
204 
205 unsigned long __must_check ___copy_from_user(void *to,
206 					     const void __user *from,
207 					     unsigned long size);
208 unsigned long copy_from_user_fixup(void *to, const void __user *from,
209 				   unsigned long size);
210 static inline unsigned long __must_check
211 copy_from_user(void *to, const void __user *from, unsigned long size)
212 {
213 	unsigned long ret = ___copy_from_user(to, from, size);
214 
215 	if (unlikely(ret))
216 		ret = copy_from_user_fixup(to, from, size);
217 
218 	return ret;
219 }
220 #define __copy_from_user copy_from_user
221 
222 unsigned long __must_check ___copy_to_user(void __user *to,
223 					   const void *from,
224 					   unsigned long size);
225 unsigned long copy_to_user_fixup(void __user *to, const void *from,
226 				 unsigned long size);
227 static inline unsigned long __must_check
228 copy_to_user(void __user *to, const void *from, unsigned long size)
229 {
230 	unsigned long ret = ___copy_to_user(to, from, size);
231 
232 	if (unlikely(ret))
233 		ret = copy_to_user_fixup(to, from, size);
234 	return ret;
235 }
236 #define __copy_to_user copy_to_user
237 
238 unsigned long __must_check ___copy_in_user(void __user *to,
239 					   const void __user *from,
240 					   unsigned long size);
241 unsigned long copy_in_user_fixup(void __user *to, void __user *from,
242 				 unsigned long size);
243 static inline unsigned long __must_check
244 copy_in_user(void __user *to, void __user *from, unsigned long size)
245 {
246 	unsigned long ret = ___copy_in_user(to, from, size);
247 
248 	if (unlikely(ret))
249 		ret = copy_in_user_fixup(to, from, size);
250 	return ret;
251 }
252 #define __copy_in_user copy_in_user
253 
254 unsigned long __must_check __clear_user(void __user *, unsigned long);
255 
256 #define clear_user __clear_user
257 
258 __must_check long strlen_user(const char __user *str);
259 __must_check long strnlen_user(const char __user *str, long n);
260 
261 #define __copy_to_user_inatomic __copy_to_user
262 #define __copy_from_user_inatomic __copy_from_user
263 
264 struct pt_regs;
265 unsigned long compute_effective_address(struct pt_regs *,
266 					unsigned int insn,
267 					unsigned int rd);
268 
269 #endif  /* __ASSEMBLY__ */
270 
271 #endif /* _ASM_UACCESS_H */
272