xref: /linux/arch/alpha/include/asm/uaccess.h (revision bd628c1bed7902ec1f24ba0fe70758949146abbe)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __ALPHA_UACCESS_H
3 #define __ALPHA_UACCESS_H
4 
5 /*
6  * The fs value determines whether argument validity checking should be
7  * performed or not.  If get_fs() == USER_DS, checking is performed, with
8  * get_fs() == KERNEL_DS, checking is bypassed.
9  *
10  * Or at least it did once upon a time.  Nowadays it is a mask that
11  * defines which bits of the address space are off limits.  This is a
12  * wee bit faster than the above.
13  *
14  * For historical reasons, these macros are grossly misnamed.
15  */
16 
17 #define KERNEL_DS	((mm_segment_t) { 0UL })
18 #define USER_DS		((mm_segment_t) { -0x40000000000UL })
19 
20 #define get_fs()  (current_thread_info()->addr_limit)
21 #define get_ds()  (KERNEL_DS)
22 #define set_fs(x) (current_thread_info()->addr_limit = (x))
23 
24 #define segment_eq(a, b)	((a).seg == (b).seg)
25 
26 /*
27  * Is a address valid? This does a straightforward calculation rather
28  * than tests.
29  *
30  * Address valid if:
31  *  - "addr" doesn't have any high-bits set
32  *  - AND "size" doesn't have any high-bits set
33  *  - AND "addr+size-(size != 0)" doesn't have any high-bits set
34  *  - OR we are in kernel mode.
35  */
36 #define __access_ok(addr, size) ({				\
37 	unsigned long __ao_a = (addr), __ao_b = (size);		\
38 	unsigned long __ao_end = __ao_a + __ao_b - !!__ao_b;	\
39 	(get_fs().seg & (__ao_a | __ao_b | __ao_end)) == 0; })
40 
41 #define access_ok(addr, size)				\
42 ({							\
43 	__chk_user_ptr(addr);				\
44 	__access_ok(((unsigned long)(addr)), (size));	\
45 })
46 
47 /*
48  * These are the main single-value transfer routines.  They automatically
49  * use the right size if we just have the right pointer type.
50  *
51  * As the alpha uses the same address space for kernel and user
52  * data, we can just do these as direct assignments.  (Of course, the
53  * exception handling means that it's no longer "just"...)
54  *
55  * Careful to not
56  * (a) re-use the arguments for side effects (sizeof/typeof is ok)
57  * (b) require any knowledge of processes at this stage
58  */
59 #define put_user(x, ptr) \
60   __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
61 #define get_user(x, ptr) \
62   __get_user_check((x), (ptr), sizeof(*(ptr)))
63 
64 /*
65  * The "__xxx" versions do not do address space checking, useful when
66  * doing multiple accesses to the same area (the programmer has to do the
67  * checks by hand with "access_ok()")
68  */
69 #define __put_user(x, ptr) \
70   __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
71 #define __get_user(x, ptr) \
72   __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
73 
74 /*
75  * The "lda %1, 2b-1b(%0)" bits are magic to get the assembler to
76  * encode the bits we need for resolving the exception.  See the
77  * more extensive comments with fixup_inline_exception below for
78  * more information.
79  */
80 #define EXC(label,cont,res,err)				\
81 	".section __ex_table,\"a\"\n"			\
82 	"	.long "#label"-.\n"			\
83 	"	lda "#res","#cont"-"#label"("#err")\n"	\
84 	".previous\n"
85 
86 extern void __get_user_unknown(void);
87 
88 #define __get_user_nocheck(x, ptr, size)			\
89 ({								\
90 	long __gu_err = 0;					\
91 	unsigned long __gu_val;					\
92 	__chk_user_ptr(ptr);					\
93 	switch (size) {						\
94 	  case 1: __get_user_8(ptr); break;			\
95 	  case 2: __get_user_16(ptr); break;			\
96 	  case 4: __get_user_32(ptr); break;			\
97 	  case 8: __get_user_64(ptr); break;			\
98 	  default: __get_user_unknown(); break;			\
99 	}							\
100 	(x) = (__force __typeof__(*(ptr))) __gu_val;		\
101 	__gu_err;						\
102 })
103 
104 #define __get_user_check(x, ptr, size)				\
105 ({								\
106 	long __gu_err = -EFAULT;				\
107 	unsigned long __gu_val = 0;				\
108 	const __typeof__(*(ptr)) __user *__gu_addr = (ptr);	\
109 	if (__access_ok((unsigned long)__gu_addr, size)) {	\
110 		__gu_err = 0;					\
111 		switch (size) {					\
112 		  case 1: __get_user_8(__gu_addr); break;	\
113 		  case 2: __get_user_16(__gu_addr); break;	\
114 		  case 4: __get_user_32(__gu_addr); break;	\
115 		  case 8: __get_user_64(__gu_addr); break;	\
116 		  default: __get_user_unknown(); break;		\
117 		}						\
118 	}							\
119 	(x) = (__force __typeof__(*(ptr))) __gu_val;		\
120 	__gu_err;						\
121 })
122 
123 struct __large_struct { unsigned long buf[100]; };
124 #define __m(x) (*(struct __large_struct __user *)(x))
125 
126 #define __get_user_64(addr)				\
127 	__asm__("1: ldq %0,%2\n"			\
128 	"2:\n"						\
129 	EXC(1b,2b,%0,%1)				\
130 		: "=r"(__gu_val), "=r"(__gu_err)	\
131 		: "m"(__m(addr)), "1"(__gu_err))
132 
133 #define __get_user_32(addr)				\
134 	__asm__("1: ldl %0,%2\n"			\
135 	"2:\n"						\
136 	EXC(1b,2b,%0,%1)				\
137 		: "=r"(__gu_val), "=r"(__gu_err)	\
138 		: "m"(__m(addr)), "1"(__gu_err))
139 
140 #ifdef __alpha_bwx__
141 /* Those lucky bastards with ev56 and later CPUs can do byte/word moves.  */
142 
143 #define __get_user_16(addr)				\
144 	__asm__("1: ldwu %0,%2\n"			\
145 	"2:\n"						\
146 	EXC(1b,2b,%0,%1)				\
147 		: "=r"(__gu_val), "=r"(__gu_err)	\
148 		: "m"(__m(addr)), "1"(__gu_err))
149 
150 #define __get_user_8(addr)				\
151 	__asm__("1: ldbu %0,%2\n"			\
152 	"2:\n"						\
153 	EXC(1b,2b,%0,%1)				\
154 		: "=r"(__gu_val), "=r"(__gu_err)	\
155 		: "m"(__m(addr)), "1"(__gu_err))
156 #else
157 /* Unfortunately, we can't get an unaligned access trap for the sub-word
158    load, so we have to do a general unaligned operation.  */
159 
160 #define __get_user_16(addr)						\
161 {									\
162 	long __gu_tmp;							\
163 	__asm__("1: ldq_u %0,0(%3)\n"					\
164 	"2:	ldq_u %1,1(%3)\n"					\
165 	"	extwl %0,%3,%0\n"					\
166 	"	extwh %1,%3,%1\n"					\
167 	"	or %0,%1,%0\n"						\
168 	"3:\n"								\
169 	EXC(1b,3b,%0,%2)						\
170 	EXC(2b,3b,%0,%2)						\
171 		: "=&r"(__gu_val), "=&r"(__gu_tmp), "=r"(__gu_err)	\
172 		: "r"(addr), "2"(__gu_err));				\
173 }
174 
175 #define __get_user_8(addr)						\
176 	__asm__("1: ldq_u %0,0(%2)\n"					\
177 	"	extbl %0,%2,%0\n"					\
178 	"2:\n"								\
179 	EXC(1b,2b,%0,%1)						\
180 		: "=&r"(__gu_val), "=r"(__gu_err)			\
181 		: "r"(addr), "1"(__gu_err))
182 #endif
183 
184 extern void __put_user_unknown(void);
185 
186 #define __put_user_nocheck(x, ptr, size)			\
187 ({								\
188 	long __pu_err = 0;					\
189 	__chk_user_ptr(ptr);					\
190 	switch (size) {						\
191 	  case 1: __put_user_8(x, ptr); break;			\
192 	  case 2: __put_user_16(x, ptr); break;			\
193 	  case 4: __put_user_32(x, ptr); break;			\
194 	  case 8: __put_user_64(x, ptr); break;			\
195 	  default: __put_user_unknown(); break;			\
196 	}							\
197 	__pu_err;						\
198 })
199 
200 #define __put_user_check(x, ptr, size)				\
201 ({								\
202 	long __pu_err = -EFAULT;				\
203 	__typeof__(*(ptr)) __user *__pu_addr = (ptr);		\
204 	if (__access_ok((unsigned long)__pu_addr, size)) {	\
205 		__pu_err = 0;					\
206 		switch (size) {					\
207 		  case 1: __put_user_8(x, __pu_addr); break;	\
208 		  case 2: __put_user_16(x, __pu_addr); break;	\
209 		  case 4: __put_user_32(x, __pu_addr); break;	\
210 		  case 8: __put_user_64(x, __pu_addr); break;	\
211 		  default: __put_user_unknown(); break;		\
212 		}						\
213 	}							\
214 	__pu_err;						\
215 })
216 
217 /*
218  * The "__put_user_xx()" macros tell gcc they read from memory
219  * instead of writing: this is because they do not write to
220  * any memory gcc knows about, so there are no aliasing issues
221  */
222 #define __put_user_64(x, addr)					\
223 __asm__ __volatile__("1: stq %r2,%1\n"				\
224 	"2:\n"							\
225 	EXC(1b,2b,$31,%0)					\
226 		: "=r"(__pu_err)				\
227 		: "m" (__m(addr)), "rJ" (x), "0"(__pu_err))
228 
229 #define __put_user_32(x, addr)					\
230 __asm__ __volatile__("1: stl %r2,%1\n"				\
231 	"2:\n"							\
232 	EXC(1b,2b,$31,%0)					\
233 		: "=r"(__pu_err)				\
234 		: "m"(__m(addr)), "rJ"(x), "0"(__pu_err))
235 
236 #ifdef __alpha_bwx__
237 /* Those lucky bastards with ev56 and later CPUs can do byte/word moves.  */
238 
239 #define __put_user_16(x, addr)					\
240 __asm__ __volatile__("1: stw %r2,%1\n"				\
241 	"2:\n"							\
242 	EXC(1b,2b,$31,%0)					\
243 		: "=r"(__pu_err)				\
244 		: "m"(__m(addr)), "rJ"(x), "0"(__pu_err))
245 
246 #define __put_user_8(x, addr)					\
247 __asm__ __volatile__("1: stb %r2,%1\n"				\
248 	"2:\n"							\
249 	EXC(1b,2b,$31,%0)					\
250 		: "=r"(__pu_err)				\
251 		: "m"(__m(addr)), "rJ"(x), "0"(__pu_err))
252 #else
253 /* Unfortunately, we can't get an unaligned access trap for the sub-word
254    write, so we have to do a general unaligned operation.  */
255 
256 #define __put_user_16(x, addr)					\
257 {								\
258 	long __pu_tmp1, __pu_tmp2, __pu_tmp3, __pu_tmp4;	\
259 	__asm__ __volatile__(					\
260 	"1:	ldq_u %2,1(%5)\n"				\
261 	"2:	ldq_u %1,0(%5)\n"				\
262 	"	inswh %6,%5,%4\n"				\
263 	"	inswl %6,%5,%3\n"				\
264 	"	mskwh %2,%5,%2\n"				\
265 	"	mskwl %1,%5,%1\n"				\
266 	"	or %2,%4,%2\n"					\
267 	"	or %1,%3,%1\n"					\
268 	"3:	stq_u %2,1(%5)\n"				\
269 	"4:	stq_u %1,0(%5)\n"				\
270 	"5:\n"							\
271 	EXC(1b,5b,$31,%0)					\
272 	EXC(2b,5b,$31,%0)					\
273 	EXC(3b,5b,$31,%0)					\
274 	EXC(4b,5b,$31,%0)					\
275 		: "=r"(__pu_err), "=&r"(__pu_tmp1), 		\
276 		  "=&r"(__pu_tmp2), "=&r"(__pu_tmp3), 		\
277 		  "=&r"(__pu_tmp4)				\
278 		: "r"(addr), "r"((unsigned long)(x)), "0"(__pu_err)); \
279 }
280 
281 #define __put_user_8(x, addr)					\
282 {								\
283 	long __pu_tmp1, __pu_tmp2;				\
284 	__asm__ __volatile__(					\
285 	"1:	ldq_u %1,0(%4)\n"				\
286 	"	insbl %3,%4,%2\n"				\
287 	"	mskbl %1,%4,%1\n"				\
288 	"	or %1,%2,%1\n"					\
289 	"2:	stq_u %1,0(%4)\n"				\
290 	"3:\n"							\
291 	EXC(1b,3b,$31,%0)					\
292 	EXC(2b,3b,$31,%0)					\
293 		: "=r"(__pu_err), 				\
294 	  	  "=&r"(__pu_tmp1), "=&r"(__pu_tmp2)		\
295 		: "r"((unsigned long)(x)), "r"(addr), "0"(__pu_err)); \
296 }
297 #endif
298 
299 
300 /*
301  * Complex access routines
302  */
303 
304 extern long __copy_user(void *to, const void *from, long len);
305 
306 static inline unsigned long
307 raw_copy_from_user(void *to, const void __user *from, unsigned long len)
308 {
309 	return __copy_user(to, (__force const void *)from, len);
310 }
311 
312 static inline unsigned long
313 raw_copy_to_user(void __user *to, const void *from, unsigned long len)
314 {
315 	return __copy_user((__force void *)to, from, len);
316 }
317 
318 extern long __clear_user(void __user *to, long len);
319 
320 extern inline long
321 clear_user(void __user *to, long len)
322 {
323 	if (__access_ok((unsigned long)to, len))
324 		len = __clear_user(to, len);
325 	return len;
326 }
327 
328 #define user_addr_max() \
329         (uaccess_kernel() ? ~0UL : TASK_SIZE)
330 
331 extern long strncpy_from_user(char *dest, const char __user *src, long count);
332 extern __must_check long strnlen_user(const char __user *str, long n);
333 
334 #include <asm/extable.h>
335 
336 #endif /* __ALPHA_UACCESS_H */
337