xref: /linux/arch/arm64/include/asm/uaccess.h (revision 55f3538c4923e9dfca132e99ebec370e8094afda)
1 /*
2  * Based on arch/arm/include/asm/uaccess.h
3  *
4  * Copyright (C) 2012 ARM Ltd.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
17  */
18 #ifndef __ASM_UACCESS_H
19 #define __ASM_UACCESS_H
20 
21 #include <asm/alternative.h>
22 #include <asm/kernel-pgtable.h>
23 #include <asm/sysreg.h>
24 
25 /*
26  * User space memory access functions
27  */
28 #include <linux/bitops.h>
29 #include <linux/kasan-checks.h>
30 #include <linux/string.h>
31 
32 #include <asm/cpufeature.h>
33 #include <asm/ptrace.h>
34 #include <asm/memory.h>
35 #include <asm/compiler.h>
36 #include <asm/extable.h>
37 
38 #define KERNEL_DS	(-1UL)
39 #define get_ds()	(KERNEL_DS)
40 
41 #define USER_DS		TASK_SIZE_64
42 #define get_fs()	(current_thread_info()->addr_limit)
43 
44 static inline void set_fs(mm_segment_t fs)
45 {
46 	current_thread_info()->addr_limit = fs;
47 
48 	/* On user-mode return, check fs is correct */
49 	set_thread_flag(TIF_FSCHECK);
50 
51 	/*
52 	 * Enable/disable UAO so that copy_to_user() etc can access
53 	 * kernel memory with the unprivileged instructions.
54 	 */
55 	if (IS_ENABLED(CONFIG_ARM64_UAO) && fs == KERNEL_DS)
56 		asm(ALTERNATIVE("nop", SET_PSTATE_UAO(1), ARM64_HAS_UAO));
57 	else
58 		asm(ALTERNATIVE("nop", SET_PSTATE_UAO(0), ARM64_HAS_UAO,
59 				CONFIG_ARM64_UAO));
60 }
61 
62 #define segment_eq(a, b)	((a) == (b))
63 
64 /*
65  * Test whether a block of memory is a valid user space address.
66  * Returns 1 if the range is valid, 0 otherwise.
67  *
68  * This is equivalent to the following test:
69  * (u65)addr + (u65)size <= current->addr_limit
70  *
71  * This needs 65-bit arithmetic.
72  */
73 #define __range_ok(addr, size)						\
74 ({									\
75 	unsigned long __addr = (unsigned long)(addr);			\
76 	unsigned long flag, roksum;					\
77 	__chk_user_ptr(addr);						\
78 	asm("adds %1, %1, %3; ccmp %1, %4, #2, cc; cset %0, ls"		\
79 		: "=&r" (flag), "=&r" (roksum)				\
80 		: "1" (__addr), "Ir" (size),				\
81 		  "r" (current_thread_info()->addr_limit)		\
82 		: "cc");						\
83 	flag;								\
84 })
85 
86 /*
87  * When dealing with data aborts, watchpoints, or instruction traps we may end
88  * up with a tagged userland pointer. Clear the tag to get a sane pointer to
89  * pass on to access_ok(), for instance.
90  */
91 #define untagged_addr(addr)		sign_extend64(addr, 55)
92 
93 #define access_ok(type, addr, size)	__range_ok(addr, size)
94 #define user_addr_max			get_fs
95 
96 #define _ASM_EXTABLE(from, to)						\
97 	"	.pushsection	__ex_table, \"a\"\n"			\
98 	"	.align		3\n"					\
99 	"	.long		(" #from " - .), (" #to " - .)\n"	\
100 	"	.popsection\n"
101 
102 /*
103  * User access enabling/disabling.
104  */
105 #ifdef CONFIG_ARM64_SW_TTBR0_PAN
106 static inline void __uaccess_ttbr0_disable(void)
107 {
108 	unsigned long flags, ttbr;
109 
110 	local_irq_save(flags);
111 	ttbr = read_sysreg(ttbr1_el1);
112 	ttbr &= ~TTBR_ASID_MASK;
113 	/* reserved_ttbr0 placed before swapper_pg_dir */
114 	write_sysreg(ttbr - RESERVED_TTBR0_SIZE, ttbr0_el1);
115 	isb();
116 	/* Set reserved ASID */
117 	write_sysreg(ttbr, ttbr1_el1);
118 	isb();
119 	local_irq_restore(flags);
120 }
121 
122 static inline void __uaccess_ttbr0_enable(void)
123 {
124 	unsigned long flags, ttbr0, ttbr1;
125 
126 	/*
127 	 * Disable interrupts to avoid preemption between reading the 'ttbr0'
128 	 * variable and the MSR. A context switch could trigger an ASID
129 	 * roll-over and an update of 'ttbr0'.
130 	 */
131 	local_irq_save(flags);
132 	ttbr0 = READ_ONCE(current_thread_info()->ttbr0);
133 
134 	/* Restore active ASID */
135 	ttbr1 = read_sysreg(ttbr1_el1);
136 	ttbr1 &= ~TTBR_ASID_MASK;		/* safety measure */
137 	ttbr1 |= ttbr0 & TTBR_ASID_MASK;
138 	write_sysreg(ttbr1, ttbr1_el1);
139 	isb();
140 
141 	/* Restore user page table */
142 	write_sysreg(ttbr0, ttbr0_el1);
143 	isb();
144 	local_irq_restore(flags);
145 }
146 
147 static inline bool uaccess_ttbr0_disable(void)
148 {
149 	if (!system_uses_ttbr0_pan())
150 		return false;
151 	__uaccess_ttbr0_disable();
152 	return true;
153 }
154 
155 static inline bool uaccess_ttbr0_enable(void)
156 {
157 	if (!system_uses_ttbr0_pan())
158 		return false;
159 	__uaccess_ttbr0_enable();
160 	return true;
161 }
162 #else
163 static inline bool uaccess_ttbr0_disable(void)
164 {
165 	return false;
166 }
167 
168 static inline bool uaccess_ttbr0_enable(void)
169 {
170 	return false;
171 }
172 #endif
173 
174 static inline void __uaccess_disable_hw_pan(void)
175 {
176 	asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN,
177 			CONFIG_ARM64_PAN));
178 }
179 
180 static inline void __uaccess_enable_hw_pan(void)
181 {
182 	asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN,
183 			CONFIG_ARM64_PAN));
184 }
185 
186 #define __uaccess_disable(alt)						\
187 do {									\
188 	if (!uaccess_ttbr0_disable())					\
189 		asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), alt,		\
190 				CONFIG_ARM64_PAN));			\
191 } while (0)
192 
193 #define __uaccess_enable(alt)						\
194 do {									\
195 	if (!uaccess_ttbr0_enable())					\
196 		asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), alt,		\
197 				CONFIG_ARM64_PAN));			\
198 } while (0)
199 
200 static inline void uaccess_disable(void)
201 {
202 	__uaccess_disable(ARM64_HAS_PAN);
203 }
204 
205 static inline void uaccess_enable(void)
206 {
207 	__uaccess_enable(ARM64_HAS_PAN);
208 }
209 
210 /*
211  * These functions are no-ops when UAO is present.
212  */
213 static inline void uaccess_disable_not_uao(void)
214 {
215 	__uaccess_disable(ARM64_ALT_PAN_NOT_UAO);
216 }
217 
218 static inline void uaccess_enable_not_uao(void)
219 {
220 	__uaccess_enable(ARM64_ALT_PAN_NOT_UAO);
221 }
222 
223 /*
224  * The "__xxx" versions of the user access functions do not verify the address
225  * space - it must have been done previously with a separate "access_ok()"
226  * call.
227  *
228  * The "__xxx_error" versions set the third argument to -EFAULT if an error
229  * occurs, and leave it unchanged on success.
230  */
231 #define __get_user_asm(instr, alt_instr, reg, x, addr, err, feature)	\
232 	asm volatile(							\
233 	"1:"ALTERNATIVE(instr "     " reg "1, [%2]\n",			\
234 			alt_instr " " reg "1, [%2]\n", feature)		\
235 	"2:\n"								\
236 	"	.section .fixup, \"ax\"\n"				\
237 	"	.align	2\n"						\
238 	"3:	mov	%w0, %3\n"					\
239 	"	mov	%1, #0\n"					\
240 	"	b	2b\n"						\
241 	"	.previous\n"						\
242 	_ASM_EXTABLE(1b, 3b)						\
243 	: "+r" (err), "=&r" (x)						\
244 	: "r" (addr), "i" (-EFAULT))
245 
246 #define __get_user_err(x, ptr, err)					\
247 do {									\
248 	unsigned long __gu_val;						\
249 	__chk_user_ptr(ptr);						\
250 	uaccess_enable_not_uao();					\
251 	switch (sizeof(*(ptr))) {					\
252 	case 1:								\
253 		__get_user_asm("ldrb", "ldtrb", "%w", __gu_val, (ptr),  \
254 			       (err), ARM64_HAS_UAO);			\
255 		break;							\
256 	case 2:								\
257 		__get_user_asm("ldrh", "ldtrh", "%w", __gu_val, (ptr),  \
258 			       (err), ARM64_HAS_UAO);			\
259 		break;							\
260 	case 4:								\
261 		__get_user_asm("ldr", "ldtr", "%w", __gu_val, (ptr),	\
262 			       (err), ARM64_HAS_UAO);			\
263 		break;							\
264 	case 8:								\
265 		__get_user_asm("ldr", "ldtr", "%x",  __gu_val, (ptr),	\
266 			       (err), ARM64_HAS_UAO);			\
267 		break;							\
268 	default:							\
269 		BUILD_BUG();						\
270 	}								\
271 	uaccess_disable_not_uao();					\
272 	(x) = (__force __typeof__(*(ptr)))__gu_val;			\
273 } while (0)
274 
275 #define __get_user(x, ptr)						\
276 ({									\
277 	int __gu_err = 0;						\
278 	__get_user_err((x), (ptr), __gu_err);				\
279 	__gu_err;							\
280 })
281 
282 #define __get_user_error(x, ptr, err)					\
283 ({									\
284 	__get_user_err((x), (ptr), (err));				\
285 	(void)0;							\
286 })
287 
288 #define get_user(x, ptr)						\
289 ({									\
290 	__typeof__(*(ptr)) __user *__p = (ptr);				\
291 	might_fault();							\
292 	access_ok(VERIFY_READ, __p, sizeof(*__p)) ?			\
293 		__get_user((x), __p) :					\
294 		((x) = 0, -EFAULT);					\
295 })
296 
297 #define __put_user_asm(instr, alt_instr, reg, x, addr, err, feature)	\
298 	asm volatile(							\
299 	"1:"ALTERNATIVE(instr "     " reg "1, [%2]\n",			\
300 			alt_instr " " reg "1, [%2]\n", feature)		\
301 	"2:\n"								\
302 	"	.section .fixup,\"ax\"\n"				\
303 	"	.align	2\n"						\
304 	"3:	mov	%w0, %3\n"					\
305 	"	b	2b\n"						\
306 	"	.previous\n"						\
307 	_ASM_EXTABLE(1b, 3b)						\
308 	: "+r" (err)							\
309 	: "r" (x), "r" (addr), "i" (-EFAULT))
310 
311 #define __put_user_err(x, ptr, err)					\
312 do {									\
313 	__typeof__(*(ptr)) __pu_val = (x);				\
314 	__chk_user_ptr(ptr);						\
315 	uaccess_enable_not_uao();					\
316 	switch (sizeof(*(ptr))) {					\
317 	case 1:								\
318 		__put_user_asm("strb", "sttrb", "%w", __pu_val, (ptr),	\
319 			       (err), ARM64_HAS_UAO);			\
320 		break;							\
321 	case 2:								\
322 		__put_user_asm("strh", "sttrh", "%w", __pu_val, (ptr),	\
323 			       (err), ARM64_HAS_UAO);			\
324 		break;							\
325 	case 4:								\
326 		__put_user_asm("str", "sttr", "%w", __pu_val, (ptr),	\
327 			       (err), ARM64_HAS_UAO);			\
328 		break;							\
329 	case 8:								\
330 		__put_user_asm("str", "sttr", "%x", __pu_val, (ptr),	\
331 			       (err), ARM64_HAS_UAO);			\
332 		break;							\
333 	default:							\
334 		BUILD_BUG();						\
335 	}								\
336 	uaccess_disable_not_uao();					\
337 } while (0)
338 
339 #define __put_user(x, ptr)						\
340 ({									\
341 	int __pu_err = 0;						\
342 	__put_user_err((x), (ptr), __pu_err);				\
343 	__pu_err;							\
344 })
345 
346 #define __put_user_error(x, ptr, err)					\
347 ({									\
348 	__put_user_err((x), (ptr), (err));				\
349 	(void)0;							\
350 })
351 
352 #define put_user(x, ptr)						\
353 ({									\
354 	__typeof__(*(ptr)) __user *__p = (ptr);				\
355 	might_fault();							\
356 	access_ok(VERIFY_WRITE, __p, sizeof(*__p)) ?			\
357 		__put_user((x), __p) :					\
358 		-EFAULT;						\
359 })
360 
361 extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n);
362 #define raw_copy_from_user __arch_copy_from_user
363 extern unsigned long __must_check __arch_copy_to_user(void __user *to, const void *from, unsigned long n);
364 #define raw_copy_to_user __arch_copy_to_user
365 extern unsigned long __must_check raw_copy_in_user(void __user *to, const void __user *from, unsigned long n);
366 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
367 #define INLINE_COPY_TO_USER
368 #define INLINE_COPY_FROM_USER
369 
370 static inline unsigned long __must_check clear_user(void __user *to, unsigned long n)
371 {
372 	if (access_ok(VERIFY_WRITE, to, n))
373 		n = __clear_user(to, n);
374 	return n;
375 }
376 
377 extern long strncpy_from_user(char *dest, const char __user *src, long count);
378 
379 extern __must_check long strnlen_user(const char __user *str, long n);
380 
381 #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
382 struct page;
383 void memcpy_page_flushcache(char *to, struct page *page, size_t offset, size_t len);
384 extern unsigned long __must_check __copy_user_flushcache(void *to, const void __user *from, unsigned long n);
385 
386 static inline int __copy_from_user_flushcache(void *dst, const void __user *src, unsigned size)
387 {
388 	kasan_check_write(dst, size);
389 	return __copy_user_flushcache(dst, src, size);
390 }
391 #endif
392 
393 #endif /* __ASM_UACCESS_H */
394