xref: /linux/arch/arm64/include/asm/uaccess.h (revision 302df34c4e64b9e83ee31cbf508b38b62b428bd6)
1 /*
2  * Based on arch/arm/include/asm/uaccess.h
3  *
4  * Copyright (C) 2012 ARM Ltd.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
17  */
18 #ifndef __ASM_UACCESS_H
19 #define __ASM_UACCESS_H
20 
21 #include <asm/alternative.h>
22 #include <asm/kernel-pgtable.h>
23 #include <asm/sysreg.h>
24 
25 /*
26  * User space memory access functions
27  */
28 #include <linux/bitops.h>
29 #include <linux/kasan-checks.h>
30 #include <linux/string.h>
31 
32 #include <asm/cpufeature.h>
33 #include <asm/ptrace.h>
34 #include <asm/memory.h>
35 #include <asm/extable.h>
36 
37 #define get_ds()	(KERNEL_DS)
38 #define get_fs()	(current_thread_info()->addr_limit)
39 
40 static inline void set_fs(mm_segment_t fs)
41 {
42 	current_thread_info()->addr_limit = fs;
43 
44 	/*
45 	 * Prevent a mispredicted conditional call to set_fs from forwarding
46 	 * the wrong address limit to access_ok under speculation.
47 	 */
48 	spec_bar();
49 
50 	/* On user-mode return, check fs is correct */
51 	set_thread_flag(TIF_FSCHECK);
52 
53 	/*
54 	 * Enable/disable UAO so that copy_to_user() etc can access
55 	 * kernel memory with the unprivileged instructions.
56 	 */
57 	if (IS_ENABLED(CONFIG_ARM64_UAO) && fs == KERNEL_DS)
58 		asm(ALTERNATIVE("nop", SET_PSTATE_UAO(1), ARM64_HAS_UAO));
59 	else
60 		asm(ALTERNATIVE("nop", SET_PSTATE_UAO(0), ARM64_HAS_UAO,
61 				CONFIG_ARM64_UAO));
62 }
63 
64 #define segment_eq(a, b)	((a) == (b))
65 
66 /*
67  * Test whether a block of memory is a valid user space address.
68  * Returns 1 if the range is valid, 0 otherwise.
69  *
70  * This is equivalent to the following test:
71  * (u65)addr + (u65)size <= (u65)current->addr_limit + 1
72  */
73 static inline unsigned long __range_ok(const void __user *addr, unsigned long size)
74 {
75 	unsigned long ret, limit = current_thread_info()->addr_limit;
76 
77 	__chk_user_ptr(addr);
78 	asm volatile(
79 	// A + B <= C + 1 for all A,B,C, in four easy steps:
80 	// 1: X = A + B; X' = X % 2^64
81 	"	adds	%0, %3, %2\n"
82 	// 2: Set C = 0 if X > 2^64, to guarantee X' > C in step 4
83 	"	csel	%1, xzr, %1, hi\n"
84 	// 3: Set X' = ~0 if X >= 2^64. For X == 2^64, this decrements X'
85 	//    to compensate for the carry flag being set in step 4. For
86 	//    X > 2^64, X' merely has to remain nonzero, which it does.
87 	"	csinv	%0, %0, xzr, cc\n"
88 	// 4: For X < 2^64, this gives us X' - C - 1 <= 0, where the -1
89 	//    comes from the carry in being clear. Otherwise, we are
90 	//    testing X' - C == 0, subject to the previous adjustments.
91 	"	sbcs	xzr, %0, %1\n"
92 	"	cset	%0, ls\n"
93 	: "=&r" (ret), "+r" (limit) : "Ir" (size), "0" (addr) : "cc");
94 
95 	return ret;
96 }
97 
98 /*
99  * When dealing with data aborts, watchpoints, or instruction traps we may end
100  * up with a tagged userland pointer. Clear the tag to get a sane pointer to
101  * pass on to access_ok(), for instance.
102  */
103 #define untagged_addr(addr)		sign_extend64(addr, 55)
104 
105 #define access_ok(type, addr, size)	__range_ok(addr, size)
106 #define user_addr_max			get_fs
107 
108 #define _ASM_EXTABLE(from, to)						\
109 	"	.pushsection	__ex_table, \"a\"\n"			\
110 	"	.align		3\n"					\
111 	"	.long		(" #from " - .), (" #to " - .)\n"	\
112 	"	.popsection\n"
113 
114 /*
115  * User access enabling/disabling.
116  */
117 #ifdef CONFIG_ARM64_SW_TTBR0_PAN
118 static inline void __uaccess_ttbr0_disable(void)
119 {
120 	unsigned long flags, ttbr;
121 
122 	local_irq_save(flags);
123 	ttbr = read_sysreg(ttbr1_el1);
124 	ttbr &= ~TTBR_ASID_MASK;
125 	/* reserved_ttbr0 placed before swapper_pg_dir */
126 	write_sysreg(ttbr - RESERVED_TTBR0_SIZE, ttbr0_el1);
127 	isb();
128 	/* Set reserved ASID */
129 	write_sysreg(ttbr, ttbr1_el1);
130 	isb();
131 	local_irq_restore(flags);
132 }
133 
134 static inline void __uaccess_ttbr0_enable(void)
135 {
136 	unsigned long flags, ttbr0, ttbr1;
137 
138 	/*
139 	 * Disable interrupts to avoid preemption between reading the 'ttbr0'
140 	 * variable and the MSR. A context switch could trigger an ASID
141 	 * roll-over and an update of 'ttbr0'.
142 	 */
143 	local_irq_save(flags);
144 	ttbr0 = READ_ONCE(current_thread_info()->ttbr0);
145 
146 	/* Restore active ASID */
147 	ttbr1 = read_sysreg(ttbr1_el1);
148 	ttbr1 &= ~TTBR_ASID_MASK;		/* safety measure */
149 	ttbr1 |= ttbr0 & TTBR_ASID_MASK;
150 	write_sysreg(ttbr1, ttbr1_el1);
151 	isb();
152 
153 	/* Restore user page table */
154 	write_sysreg(ttbr0, ttbr0_el1);
155 	isb();
156 	local_irq_restore(flags);
157 }
158 
159 static inline bool uaccess_ttbr0_disable(void)
160 {
161 	if (!system_uses_ttbr0_pan())
162 		return false;
163 	__uaccess_ttbr0_disable();
164 	return true;
165 }
166 
167 static inline bool uaccess_ttbr0_enable(void)
168 {
169 	if (!system_uses_ttbr0_pan())
170 		return false;
171 	__uaccess_ttbr0_enable();
172 	return true;
173 }
174 #else
175 static inline bool uaccess_ttbr0_disable(void)
176 {
177 	return false;
178 }
179 
180 static inline bool uaccess_ttbr0_enable(void)
181 {
182 	return false;
183 }
184 #endif
185 
186 static inline void __uaccess_disable_hw_pan(void)
187 {
188 	asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN,
189 			CONFIG_ARM64_PAN));
190 }
191 
192 static inline void __uaccess_enable_hw_pan(void)
193 {
194 	asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN,
195 			CONFIG_ARM64_PAN));
196 }
197 
198 #define __uaccess_disable(alt)						\
199 do {									\
200 	if (!uaccess_ttbr0_disable())					\
201 		asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), alt,		\
202 				CONFIG_ARM64_PAN));			\
203 } while (0)
204 
205 #define __uaccess_enable(alt)						\
206 do {									\
207 	if (!uaccess_ttbr0_enable())					\
208 		asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), alt,		\
209 				CONFIG_ARM64_PAN));			\
210 } while (0)
211 
212 static inline void uaccess_disable(void)
213 {
214 	__uaccess_disable(ARM64_HAS_PAN);
215 }
216 
217 static inline void uaccess_enable(void)
218 {
219 	__uaccess_enable(ARM64_HAS_PAN);
220 }
221 
222 /*
223  * These functions are no-ops when UAO is present.
224  */
225 static inline void uaccess_disable_not_uao(void)
226 {
227 	__uaccess_disable(ARM64_ALT_PAN_NOT_UAO);
228 }
229 
230 static inline void uaccess_enable_not_uao(void)
231 {
232 	__uaccess_enable(ARM64_ALT_PAN_NOT_UAO);
233 }
234 
235 /*
236  * Sanitise a uaccess pointer such that it becomes NULL if above the
237  * current addr_limit.
238  */
239 #define uaccess_mask_ptr(ptr) (__typeof__(ptr))__uaccess_mask_ptr(ptr)
240 static inline void __user *__uaccess_mask_ptr(const void __user *ptr)
241 {
242 	void __user *safe_ptr;
243 
244 	asm volatile(
245 	"	bics	xzr, %1, %2\n"
246 	"	csel	%0, %1, xzr, eq\n"
247 	: "=&r" (safe_ptr)
248 	: "r" (ptr), "r" (current_thread_info()->addr_limit)
249 	: "cc");
250 
251 	csdb();
252 	return safe_ptr;
253 }
254 
255 /*
256  * The "__xxx" versions of the user access functions do not verify the address
257  * space - it must have been done previously with a separate "access_ok()"
258  * call.
259  *
260  * The "__xxx_error" versions set the third argument to -EFAULT if an error
261  * occurs, and leave it unchanged on success.
262  */
263 #define __get_user_asm(instr, alt_instr, reg, x, addr, err, feature)	\
264 	asm volatile(							\
265 	"1:"ALTERNATIVE(instr "     " reg "1, [%2]\n",			\
266 			alt_instr " " reg "1, [%2]\n", feature)		\
267 	"2:\n"								\
268 	"	.section .fixup, \"ax\"\n"				\
269 	"	.align	2\n"						\
270 	"3:	mov	%w0, %3\n"					\
271 	"	mov	%1, #0\n"					\
272 	"	b	2b\n"						\
273 	"	.previous\n"						\
274 	_ASM_EXTABLE(1b, 3b)						\
275 	: "+r" (err), "=&r" (x)						\
276 	: "r" (addr), "i" (-EFAULT))
277 
278 #define __get_user_err(x, ptr, err)					\
279 do {									\
280 	unsigned long __gu_val;						\
281 	__chk_user_ptr(ptr);						\
282 	uaccess_enable_not_uao();					\
283 	switch (sizeof(*(ptr))) {					\
284 	case 1:								\
285 		__get_user_asm("ldrb", "ldtrb", "%w", __gu_val, (ptr),  \
286 			       (err), ARM64_HAS_UAO);			\
287 		break;							\
288 	case 2:								\
289 		__get_user_asm("ldrh", "ldtrh", "%w", __gu_val, (ptr),  \
290 			       (err), ARM64_HAS_UAO);			\
291 		break;							\
292 	case 4:								\
293 		__get_user_asm("ldr", "ldtr", "%w", __gu_val, (ptr),	\
294 			       (err), ARM64_HAS_UAO);			\
295 		break;							\
296 	case 8:								\
297 		__get_user_asm("ldr", "ldtr", "%x",  __gu_val, (ptr),	\
298 			       (err), ARM64_HAS_UAO);			\
299 		break;							\
300 	default:							\
301 		BUILD_BUG();						\
302 	}								\
303 	uaccess_disable_not_uao();					\
304 	(x) = (__force __typeof__(*(ptr)))__gu_val;			\
305 } while (0)
306 
307 #define __get_user_check(x, ptr, err)					\
308 ({									\
309 	__typeof__(*(ptr)) __user *__p = (ptr);				\
310 	might_fault();							\
311 	if (access_ok(VERIFY_READ, __p, sizeof(*__p))) {		\
312 		__p = uaccess_mask_ptr(__p);				\
313 		__get_user_err((x), __p, (err));			\
314 	} else {							\
315 		(x) = 0; (err) = -EFAULT;				\
316 	}								\
317 })
318 
319 #define __get_user_error(x, ptr, err)					\
320 ({									\
321 	__get_user_check((x), (ptr), (err));				\
322 	(void)0;							\
323 })
324 
325 #define __get_user(x, ptr)						\
326 ({									\
327 	int __gu_err = 0;						\
328 	__get_user_check((x), (ptr), __gu_err);				\
329 	__gu_err;							\
330 })
331 
332 #define get_user	__get_user
333 
334 #define __put_user_asm(instr, alt_instr, reg, x, addr, err, feature)	\
335 	asm volatile(							\
336 	"1:"ALTERNATIVE(instr "     " reg "1, [%2]\n",			\
337 			alt_instr " " reg "1, [%2]\n", feature)		\
338 	"2:\n"								\
339 	"	.section .fixup,\"ax\"\n"				\
340 	"	.align	2\n"						\
341 	"3:	mov	%w0, %3\n"					\
342 	"	b	2b\n"						\
343 	"	.previous\n"						\
344 	_ASM_EXTABLE(1b, 3b)						\
345 	: "+r" (err)							\
346 	: "r" (x), "r" (addr), "i" (-EFAULT))
347 
348 #define __put_user_err(x, ptr, err)					\
349 do {									\
350 	__typeof__(*(ptr)) __pu_val = (x);				\
351 	__chk_user_ptr(ptr);						\
352 	uaccess_enable_not_uao();					\
353 	switch (sizeof(*(ptr))) {					\
354 	case 1:								\
355 		__put_user_asm("strb", "sttrb", "%w", __pu_val, (ptr),	\
356 			       (err), ARM64_HAS_UAO);			\
357 		break;							\
358 	case 2:								\
359 		__put_user_asm("strh", "sttrh", "%w", __pu_val, (ptr),	\
360 			       (err), ARM64_HAS_UAO);			\
361 		break;							\
362 	case 4:								\
363 		__put_user_asm("str", "sttr", "%w", __pu_val, (ptr),	\
364 			       (err), ARM64_HAS_UAO);			\
365 		break;							\
366 	case 8:								\
367 		__put_user_asm("str", "sttr", "%x", __pu_val, (ptr),	\
368 			       (err), ARM64_HAS_UAO);			\
369 		break;							\
370 	default:							\
371 		BUILD_BUG();						\
372 	}								\
373 	uaccess_disable_not_uao();					\
374 } while (0)
375 
376 #define __put_user_check(x, ptr, err)					\
377 ({									\
378 	__typeof__(*(ptr)) __user *__p = (ptr);				\
379 	might_fault();							\
380 	if (access_ok(VERIFY_WRITE, __p, sizeof(*__p))) {		\
381 		__p = uaccess_mask_ptr(__p);				\
382 		__put_user_err((x), __p, (err));			\
383 	} else	{							\
384 		(err) = -EFAULT;					\
385 	}								\
386 })
387 
388 #define __put_user_error(x, ptr, err)					\
389 ({									\
390 	__put_user_check((x), (ptr), (err));				\
391 	(void)0;							\
392 })
393 
394 #define __put_user(x, ptr)						\
395 ({									\
396 	int __pu_err = 0;						\
397 	__put_user_check((x), (ptr), __pu_err);				\
398 	__pu_err;							\
399 })
400 
401 #define put_user	__put_user
402 
403 extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n);
404 #define raw_copy_from_user(to, from, n)					\
405 ({									\
406 	__arch_copy_from_user((to), __uaccess_mask_ptr(from), (n));	\
407 })
408 
409 extern unsigned long __must_check __arch_copy_to_user(void __user *to, const void *from, unsigned long n);
410 #define raw_copy_to_user(to, from, n)					\
411 ({									\
412 	__arch_copy_to_user(__uaccess_mask_ptr(to), (from), (n));	\
413 })
414 
415 extern unsigned long __must_check __arch_copy_in_user(void __user *to, const void __user *from, unsigned long n);
416 #define raw_copy_in_user(to, from, n)					\
417 ({									\
418 	__arch_copy_in_user(__uaccess_mask_ptr(to),			\
419 			    __uaccess_mask_ptr(from), (n));		\
420 })
421 
422 #define INLINE_COPY_TO_USER
423 #define INLINE_COPY_FROM_USER
424 
425 extern unsigned long __must_check __arch_clear_user(void __user *to, unsigned long n);
426 static inline unsigned long __must_check __clear_user(void __user *to, unsigned long n)
427 {
428 	if (access_ok(VERIFY_WRITE, to, n))
429 		n = __arch_clear_user(__uaccess_mask_ptr(to), n);
430 	return n;
431 }
432 #define clear_user	__clear_user
433 
434 extern long strncpy_from_user(char *dest, const char __user *src, long count);
435 
436 extern __must_check long strnlen_user(const char __user *str, long n);
437 
438 #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
439 struct page;
440 void memcpy_page_flushcache(char *to, struct page *page, size_t offset, size_t len);
441 extern unsigned long __must_check __copy_user_flushcache(void *to, const void __user *from, unsigned long n);
442 
443 static inline int __copy_from_user_flushcache(void *dst, const void __user *src, unsigned size)
444 {
445 	kasan_check_write(dst, size);
446 	return __copy_user_flushcache(dst, __uaccess_mask_ptr(src), size);
447 }
448 #endif
449 
450 #endif /* __ASM_UACCESS_H */
451