xref: /linux/arch/x86/include/asm/uaccess_64.h (revision c8d430db8eec7d4fd13a6bea27b7086a54eda6da)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_UACCESS_64_H
3 #define _ASM_X86_UACCESS_64_H
4 
5 /*
6  * User space memory access functions
7  */
8 #include <linux/compiler.h>
9 #include <linux/lockdep.h>
10 #include <linux/kasan-checks.h>
11 #include <asm/alternative.h>
12 #include <asm/cpufeatures.h>
13 #include <asm/page.h>
14 #include <asm/percpu.h>
15 
16 #ifdef CONFIG_ADDRESS_MASKING
17 /*
18  * Mask out tag bits from the address.
19  */
20 static inline unsigned long __untagged_addr(unsigned long addr)
21 {
22 	asm (ALTERNATIVE("",
23 			 "and " __percpu_arg([mask]) ", %[addr]", X86_FEATURE_LAM)
24 	     : [addr] "+r" (addr)
25 	     : [mask] "m" (__my_cpu_var(tlbstate_untag_mask)));
26 
27 	return addr;
28 }
29 
30 #define untagged_addr(addr)	({					\
31 	unsigned long __addr = (__force unsigned long)(addr);		\
32 	(__force __typeof__(addr))__untagged_addr(__addr);		\
33 })
34 
35 static inline unsigned long __untagged_addr_remote(struct mm_struct *mm,
36 						   unsigned long addr)
37 {
38 	mmap_assert_locked(mm);
39 	return addr & (mm)->context.untag_mask;
40 }
41 
42 #define untagged_addr_remote(mm, addr)	({				\
43 	unsigned long __addr = (__force unsigned long)(addr);		\
44 	(__force __typeof__(addr))__untagged_addr_remote(mm, __addr);	\
45 })
46 
47 #endif
48 
49 /*
50  * The virtual address space space is logically divided into a kernel
51  * half and a user half.  When cast to a signed type, user pointers
52  * are positive and kernel pointers are negative.
53  */
54 #define valid_user_address(x) ((__force long)(x) >= 0)
55 
56 /*
57  * Masking the user address is an alternative to a conditional
58  * user_access_begin that can avoid the fencing. This only works
59  * for dense accesses starting at the address.
60  */
61 #define mask_user_address(x) ((typeof(x))((long)(x)|((long)(x)>>63)))
62 #define masked_user_access_begin(x) ({				\
63 	__auto_type __masked_ptr = (x);				\
64 	__masked_ptr = mask_user_address(__masked_ptr);		\
65 	__uaccess_begin(); __masked_ptr; })
66 
67 /*
68  * User pointers can have tag bits on x86-64.  This scheme tolerates
69  * arbitrary values in those bits rather then masking them off.
70  *
71  * Enforce two rules:
72  * 1. 'ptr' must be in the user half of the address space
73  * 2. 'ptr+size' must not overflow into kernel addresses
74  *
75  * Note that addresses around the sign change are not valid addresses,
76  * and will GP-fault even with LAM enabled if the sign bit is set (see
77  * "CR3.LAM_SUP" that can narrow the canonicality check if we ever
78  * enable it, but not remove it entirely).
79  *
80  * So the "overflow into kernel addresses" does not imply some sudden
81  * exact boundary at the sign bit, and we can allow a lot of slop on the
82  * size check.
83  *
84  * In fact, we could probably remove the size check entirely, since
85  * any kernel accesses will be in increasing address order starting
86  * at 'ptr', and even if the end might be in kernel space, we'll
87  * hit the GP faults for non-canonical accesses before we ever get
88  * there.
89  *
90  * That's a separate optimization, for now just handle the small
91  * constant case.
92  */
93 static inline bool __access_ok(const void __user *ptr, unsigned long size)
94 {
95 	if (__builtin_constant_p(size <= PAGE_SIZE) && size <= PAGE_SIZE) {
96 		return valid_user_address(ptr);
97 	} else {
98 		unsigned long sum = size + (__force unsigned long)ptr;
99 
100 		return valid_user_address(sum) && sum >= (__force unsigned long)ptr;
101 	}
102 }
103 #define __access_ok __access_ok
104 
105 /*
106  * Copy To/From Userspace
107  */
108 
109 /* Handles exceptions in both to and from, but doesn't do access_ok */
110 __must_check unsigned long
111 rep_movs_alternative(void *to, const void *from, unsigned len);
112 
113 static __always_inline __must_check unsigned long
114 copy_user_generic(void *to, const void *from, unsigned long len)
115 {
116 	stac();
117 	/*
118 	 * If CPU has FSRM feature, use 'rep movs'.
119 	 * Otherwise, use rep_movs_alternative.
120 	 */
121 	asm volatile(
122 		"1:\n\t"
123 		ALTERNATIVE("rep movsb",
124 			    "call rep_movs_alternative", ALT_NOT(X86_FEATURE_FSRM))
125 		"2:\n"
126 		_ASM_EXTABLE_UA(1b, 2b)
127 		:"+c" (len), "+D" (to), "+S" (from), ASM_CALL_CONSTRAINT
128 		: : "memory", "rax");
129 	clac();
130 	return len;
131 }
132 
133 static __always_inline __must_check unsigned long
134 raw_copy_from_user(void *dst, const void __user *src, unsigned long size)
135 {
136 	return copy_user_generic(dst, (__force void *)src, size);
137 }
138 
139 static __always_inline __must_check unsigned long
140 raw_copy_to_user(void __user *dst, const void *src, unsigned long size)
141 {
142 	return copy_user_generic((__force void *)dst, src, size);
143 }
144 
145 extern long __copy_user_nocache(void *dst, const void __user *src, unsigned size);
146 extern long __copy_user_flushcache(void *dst, const void __user *src, unsigned size);
147 
148 static inline int
149 __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
150 				  unsigned size)
151 {
152 	long ret;
153 	kasan_check_write(dst, size);
154 	stac();
155 	ret = __copy_user_nocache(dst, src, size);
156 	clac();
157 	return ret;
158 }
159 
160 static inline int
161 __copy_from_user_flushcache(void *dst, const void __user *src, unsigned size)
162 {
163 	kasan_check_write(dst, size);
164 	return __copy_user_flushcache(dst, src, size);
165 }
166 
167 /*
168  * Zero Userspace.
169  */
170 
171 __must_check unsigned long
172 rep_stos_alternative(void __user *addr, unsigned long len);
173 
174 static __always_inline __must_check unsigned long __clear_user(void __user *addr, unsigned long size)
175 {
176 	might_fault();
177 	stac();
178 
179 	/*
180 	 * No memory constraint because it doesn't change any memory gcc
181 	 * knows about.
182 	 */
183 	asm volatile(
184 		"1:\n\t"
185 		ALTERNATIVE("rep stosb",
186 			    "call rep_stos_alternative", ALT_NOT(X86_FEATURE_FSRS))
187 		"2:\n"
188 	       _ASM_EXTABLE_UA(1b, 2b)
189 	       : "+c" (size), "+D" (addr), ASM_CALL_CONSTRAINT
190 	       : "a" (0));
191 
192 	clac();
193 
194 	return size;
195 }
196 
197 static __always_inline unsigned long clear_user(void __user *to, unsigned long n)
198 {
199 	if (__access_ok(to, n))
200 		return __clear_user(to, n);
201 	return n;
202 }
203 #endif /* _ASM_X86_UACCESS_64_H */
204