xref: /linux/arch/arm64/include/asm/futex.h (revision aec2f682d47c54ef434b2d440992626d80b1ebdc)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2012 ARM Ltd.
4  */
5 #ifndef __ASM_FUTEX_H
6 #define __ASM_FUTEX_H
7 
8 #include <linux/futex.h>
9 #include <linux/uaccess.h>
10 
11 #include <asm/errno.h>
12 #include <asm/lsui.h>
13 
14 #define FUTEX_MAX_LOOPS	128 /* What's the largest number you can think of? */
15 
16 #define LLSC_FUTEX_ATOMIC_OP(op, insn)					\
17 static __always_inline int						\
18 __llsc_futex_atomic_##op(int oparg, u32 __user *uaddr, int *oval)	\
19 {									\
20 	unsigned int loops = FUTEX_MAX_LOOPS;				\
21 	int ret, oldval, newval;					\
22 									\
23 	uaccess_enable_privileged();					\
24 	asm volatile("// __llsc_futex_atomic_" #op "\n"			\
25 "	prfm	pstl1strm, %[uaddr]\n"					\
26 "1:	ldxr	%w[oldval], %[uaddr]\n"					\
27 	insn "\n"							\
28 "2:	stlxr	%w[ret], %w[newval], %[uaddr]\n"			\
29 "	cbz	%w[ret], 3f\n"						\
30 "	sub	%w[loops], %w[loops], %w[ret]\n"			\
31 "	cbnz	%w[loops], 1b\n"					\
32 "	mov	%w[ret], %w[err]\n"					\
33 "3:\n"									\
34 "	dmb	ish\n"							\
35 	_ASM_EXTABLE_UACCESS_ERR(1b, 3b, %w[ret])			\
36 	_ASM_EXTABLE_UACCESS_ERR(2b, 3b, %w[ret])			\
37 	: [ret] "=&r" (ret), [oldval] "=&r" (oldval),			\
38 	  [uaddr] "+Q" (*uaddr), [newval] "=&r" (newval),		\
39 	  [loops] "+r" (loops)						\
40 	: [oparg] "r" (oparg), [err] "Ir" (-EAGAIN)			\
41 	: "memory");							\
42 	uaccess_disable_privileged();					\
43 									\
44 	if (!ret)							\
45 		*oval = oldval;						\
46 									\
47 	return ret;							\
48 }
49 
50 LLSC_FUTEX_ATOMIC_OP(add, "add	%w[newval], %w[oldval], %w[oparg]")
51 LLSC_FUTEX_ATOMIC_OP(or,  "orr	%w[newval], %w[oldval], %w[oparg]")
52 LLSC_FUTEX_ATOMIC_OP(and, "and	%w[newval], %w[oldval], %w[oparg]")
53 LLSC_FUTEX_ATOMIC_OP(eor, "eor	%w[newval], %w[oldval], %w[oparg]")
54 LLSC_FUTEX_ATOMIC_OP(set, "mov	%w[newval], %w[oparg]")
55 
56 static __always_inline int
57 __llsc_futex_cmpxchg(u32 __user *uaddr, u32 oldval, u32 newval, u32 *oval)
58 {
59 	int ret = 0;
60 	unsigned int loops = FUTEX_MAX_LOOPS;
61 	u32 val, tmp;
62 
63 	uaccess_enable_privileged();
64 	asm volatile("//__llsc_futex_cmpxchg\n"
65 "	prfm	pstl1strm, %[uaddr]\n"
66 "1:	ldxr	%w[curval], %[uaddr]\n"
67 "	eor	%w[tmp], %w[curval], %w[oldval]\n"
68 "	cbnz	%w[tmp], 4f\n"
69 "2:	stlxr	%w[tmp], %w[newval], %[uaddr]\n"
70 "	cbz	%w[tmp], 3f\n"
71 "	sub	%w[loops], %w[loops], %w[tmp]\n"
72 "	cbnz	%w[loops], 1b\n"
73 "	mov	%w[ret], %w[err]\n"
74 "3:\n"
75 "	dmb	ish\n"
76 "4:\n"
77 	_ASM_EXTABLE_UACCESS_ERR(1b, 4b, %w[ret])
78 	_ASM_EXTABLE_UACCESS_ERR(2b, 4b, %w[ret])
79 	: [ret] "+r" (ret), [curval] "=&r" (val),
80 	  [uaddr] "+Q" (*uaddr), [tmp] "=&r" (tmp),
81 	  [loops] "+r" (loops)
82 	: [oldval] "r" (oldval), [newval] "r" (newval),
83 	  [err] "Ir" (-EAGAIN)
84 	: "memory");
85 	uaccess_disable_privileged();
86 
87 	if (!ret)
88 		*oval = val;
89 
90 	return ret;
91 }
92 
93 #ifdef CONFIG_ARM64_LSUI
94 
95 /*
96  * Wrap LSUI instructions with uaccess_ttbr0_enable()/disable(), as
97  * PAN toggling is not required.
98  */
99 
100 #define LSUI_FUTEX_ATOMIC_OP(op, asm_op)				\
101 static __always_inline int						\
102 __lsui_futex_atomic_##op(int oparg, u32 __user *uaddr, int *oval)	\
103 {									\
104 	int ret = 0;							\
105 	int oldval;							\
106 									\
107 	uaccess_ttbr0_enable();						\
108 									\
109 	asm volatile("// __lsui_futex_atomic_" #op "\n"			\
110 	__LSUI_PREAMBLE							\
111 "1:	" #asm_op "al	%w[oparg], %w[oldval], %[uaddr]\n"		\
112 "2:\n"									\
113 	_ASM_EXTABLE_UACCESS_ERR(1b, 2b, %w[ret])			\
114 	: [ret] "+r" (ret), [uaddr] "+Q" (*uaddr),			\
115 	  [oldval] "=r" (oldval)					\
116 	: [oparg] "r" (oparg)						\
117 	: "memory");							\
118 									\
119 	uaccess_ttbr0_disable();					\
120 									\
121 	if (!ret)							\
122 		*oval = oldval;						\
123 	return ret;							\
124 }
125 
126 LSUI_FUTEX_ATOMIC_OP(add, ldtadd)
127 LSUI_FUTEX_ATOMIC_OP(or, ldtset)
128 LSUI_FUTEX_ATOMIC_OP(andnot, ldtclr)
129 LSUI_FUTEX_ATOMIC_OP(set, swpt)
130 
131 static __always_inline int
132 __lsui_cmpxchg64(u64 __user *uaddr, u64 *oldval, u64 newval)
133 {
134 	int ret = 0;
135 
136 	uaccess_ttbr0_enable();
137 
138 	asm volatile("// __lsui_cmpxchg64\n"
139 	__LSUI_PREAMBLE
140 "1:	casalt	%[oldval], %[newval], %[uaddr]\n"
141 "2:\n"
142 	_ASM_EXTABLE_UACCESS_ERR(1b, 2b, %w[ret])
143 	: [ret] "+r" (ret), [uaddr] "+Q" (*uaddr),
144 	  [oldval] "+r" (*oldval)
145 	: [newval] "r" (newval)
146 	: "memory");
147 
148 	uaccess_ttbr0_disable();
149 
150 	return ret;
151 }
152 
153 static __always_inline int
154 __lsui_cmpxchg32(u32 __user *uaddr, u32 oldval, u32 newval, u32 *oval)
155 {
156 	u64 __user *uaddr64;
157 	bool futex_pos, other_pos;
158 	u32 other, orig_other;
159 	union {
160 		u32 futex[2];
161 		u64 raw;
162 	} oval64, orig64, nval64;
163 
164 	uaddr64 = (u64 __user *)PTR_ALIGN_DOWN(uaddr, sizeof(u64));
165 	futex_pos = !IS_ALIGNED((unsigned long)uaddr, sizeof(u64));
166 	other_pos = !futex_pos;
167 
168 	oval64.futex[futex_pos] = oldval;
169 	if (get_user(oval64.futex[other_pos], (u32 __user *)uaddr64 + other_pos))
170 		return -EFAULT;
171 
172 	orig64.raw = oval64.raw;
173 
174 	nval64.futex[futex_pos] = newval;
175 	nval64.futex[other_pos] = oval64.futex[other_pos];
176 
177 	if (__lsui_cmpxchg64(uaddr64, &oval64.raw, nval64.raw))
178 		return -EFAULT;
179 
180 	oldval = oval64.futex[futex_pos];
181 	other = oval64.futex[other_pos];
182 	orig_other = orig64.futex[other_pos];
183 
184 	if (other != orig_other)
185 		return -EAGAIN;
186 
187 	*oval = oldval;
188 
189 	return 0;
190 }
191 
192 static __always_inline int
193 __lsui_futex_atomic_and(int oparg, u32 __user *uaddr, int *oval)
194 {
195 	/*
196 	 * Undo the bitwise negation applied to the oparg passed from
197 	 * arch_futex_atomic_op_inuser() with FUTEX_OP_ANDN.
198 	 */
199 	return __lsui_futex_atomic_andnot(~oparg, uaddr, oval);
200 }
201 
202 static __always_inline int
203 __lsui_futex_atomic_eor(int oparg, u32 __user *uaddr, int *oval)
204 {
205 	u32 oldval, newval, val;
206 	int ret, i;
207 
208 	if (get_user(oldval, uaddr))
209 		return -EFAULT;
210 
211 	/*
212 	 * there are no ldteor/stteor instructions...
213 	 */
214 	for (i = 0; i < FUTEX_MAX_LOOPS; i++) {
215 		newval = oldval ^ oparg;
216 
217 		ret = __lsui_cmpxchg32(uaddr, oldval, newval, &val);
218 		switch (ret) {
219 		case -EFAULT:
220 			return ret;
221 		case -EAGAIN:
222 			continue;
223 		}
224 
225 		if (val == oldval) {
226 			*oval = val;
227 			return 0;
228 		}
229 
230 		oldval = val;
231 	}
232 
233 	return -EAGAIN;
234 }
235 
236 static __always_inline int
237 __lsui_futex_cmpxchg(u32 __user *uaddr, u32 oldval, u32 newval, u32 *oval)
238 {
239 	/*
240 	 * Callers of futex_atomic_cmpxchg_inatomic() already retry on
241 	 * -EAGAIN, no need for another loop of max retries.
242 	 */
243 	return __lsui_cmpxchg32(uaddr, oldval, newval, oval);
244 }
245 #endif	/* CONFIG_ARM64_LSUI */
246 
247 
248 #define FUTEX_ATOMIC_OP(op)						\
249 static __always_inline int						\
250 __futex_atomic_##op(int oparg, u32 __user *uaddr, int *oval)		\
251 {									\
252 	return __lsui_llsc_body(futex_atomic_##op, oparg, uaddr, oval);	\
253 }
254 
255 FUTEX_ATOMIC_OP(add)
256 FUTEX_ATOMIC_OP(or)
257 FUTEX_ATOMIC_OP(and)
258 FUTEX_ATOMIC_OP(eor)
259 FUTEX_ATOMIC_OP(set)
260 
261 static __always_inline int
262 __futex_cmpxchg(u32 __user *uaddr, u32 oldval, u32 newval, u32 *oval)
263 {
264 	return __lsui_llsc_body(futex_cmpxchg, uaddr, oldval, newval, oval);
265 }
266 
267 static inline int
268 arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *_uaddr)
269 {
270 	int ret;
271 	u32 __user *uaddr;
272 
273 	if (!access_ok(_uaddr, sizeof(u32)))
274 		return -EFAULT;
275 
276 	uaddr = __uaccess_mask_ptr(_uaddr);
277 
278 	switch (op) {
279 	case FUTEX_OP_SET:
280 		ret = __futex_atomic_set(oparg, uaddr, oval);
281 		break;
282 	case FUTEX_OP_ADD:
283 		ret = __futex_atomic_add(oparg, uaddr, oval);
284 		break;
285 	case FUTEX_OP_OR:
286 		ret = __futex_atomic_or(oparg, uaddr, oval);
287 		break;
288 	case FUTEX_OP_ANDN:
289 		ret = __futex_atomic_and(~oparg, uaddr, oval);
290 		break;
291 	case FUTEX_OP_XOR:
292 		ret = __futex_atomic_eor(oparg, uaddr, oval);
293 		break;
294 	default:
295 		ret = -ENOSYS;
296 	}
297 
298 	return ret;
299 }
300 
301 static inline int
302 futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *_uaddr,
303 			      u32 oldval, u32 newval)
304 {
305 	u32 __user *uaddr;
306 
307 	if (!access_ok(_uaddr, sizeof(u32)))
308 		return -EFAULT;
309 
310 	uaddr = __uaccess_mask_ptr(_uaddr);
311 
312 	return __futex_cmpxchg(uaddr, oldval, newval, uval);
313 }
314 
315 #endif /* __ASM_FUTEX_H */
316