xref: /linux/arch/riscv/include/asm/cmpxchg.h (revision c532de5a67a70f8533d495f8f2aaa9a0491c3ad0)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2014 Regents of the University of California
4  */
5 
6 #ifndef _ASM_RISCV_CMPXCHG_H
7 #define _ASM_RISCV_CMPXCHG_H
8 
9 #include <linux/bug.h>
10 
11 #include <asm/alternative-macros.h>
12 #include <asm/fence.h>
13 #include <asm/hwcap.h>
14 #include <asm/insn-def.h>
15 
16 #define __arch_xchg_masked(sc_sfx, prepend, append, r, p, n)		\
17 ({									\
18 	u32 *__ptr32b = (u32 *)((ulong)(p) & ~0x3);			\
19 	ulong __s = ((ulong)(p) & (0x4 - sizeof(*p))) * BITS_PER_BYTE;	\
20 	ulong __mask = GENMASK(((sizeof(*p)) * BITS_PER_BYTE) - 1, 0)	\
21 			<< __s;						\
22 	ulong __newx = (ulong)(n) << __s;				\
23 	ulong __retx;							\
24 	ulong __rc;							\
25 									\
26 	__asm__ __volatile__ (						\
27 	       prepend							\
28 	       "0:	lr.w %0, %2\n"					\
29 	       "	and  %1, %0, %z4\n"				\
30 	       "	or   %1, %1, %z3\n"				\
31 	       "	sc.w" sc_sfx " %1, %1, %2\n"			\
32 	       "	bnez %1, 0b\n"					\
33 	       append							\
34 	       : "=&r" (__retx), "=&r" (__rc), "+A" (*(__ptr32b))	\
35 	       : "rJ" (__newx), "rJ" (~__mask)				\
36 	       : "memory");						\
37 									\
38 	r = (__typeof__(*(p)))((__retx & __mask) >> __s);		\
39 })
40 
41 #define __arch_xchg(sfx, prepend, append, r, p, n)			\
42 ({									\
43 	__asm__ __volatile__ (						\
44 		prepend							\
45 		"	amoswap" sfx " %0, %2, %1\n"			\
46 		append							\
47 		: "=r" (r), "+A" (*(p))					\
48 		: "r" (n)						\
49 		: "memory");						\
50 })
51 
52 #define _arch_xchg(ptr, new, sc_sfx, swap_sfx, prepend,			\
53 		   sc_append, swap_append)				\
54 ({									\
55 	__typeof__(ptr) __ptr = (ptr);					\
56 	__typeof__(*(__ptr)) __new = (new);				\
57 	__typeof__(*(__ptr)) __ret;					\
58 									\
59 	switch (sizeof(*__ptr)) {					\
60 	case 1:								\
61 	case 2:								\
62 		__arch_xchg_masked(sc_sfx, prepend, sc_append,		\
63 				   __ret, __ptr, __new);		\
64 		break;							\
65 	case 4:								\
66 		__arch_xchg(".w" swap_sfx, prepend, swap_append,	\
67 			      __ret, __ptr, __new);			\
68 		break;							\
69 	case 8:								\
70 		__arch_xchg(".d" swap_sfx, prepend, swap_append,	\
71 			      __ret, __ptr, __new);			\
72 		break;							\
73 	default:							\
74 		BUILD_BUG();						\
75 	}								\
76 	(__typeof__(*(__ptr)))__ret;					\
77 })
78 
79 #define arch_xchg_relaxed(ptr, x)					\
80 	_arch_xchg(ptr, x, "", "", "", "", "")
81 
82 #define arch_xchg_acquire(ptr, x)					\
83 	_arch_xchg(ptr, x, "", "", "",					\
84 		   RISCV_ACQUIRE_BARRIER, RISCV_ACQUIRE_BARRIER)
85 
86 #define arch_xchg_release(ptr, x)					\
87 	_arch_xchg(ptr, x, "", "", RISCV_RELEASE_BARRIER, "", "")
88 
89 #define arch_xchg(ptr, x)						\
90 	_arch_xchg(ptr, x, ".rl", ".aqrl", "", RISCV_FULL_BARRIER, "")
91 
92 #define xchg32(ptr, x)							\
93 ({									\
94 	BUILD_BUG_ON(sizeof(*(ptr)) != 4);				\
95 	arch_xchg((ptr), (x));						\
96 })
97 
98 #define xchg64(ptr, x)							\
99 ({									\
100 	BUILD_BUG_ON(sizeof(*(ptr)) != 8);				\
101 	arch_xchg((ptr), (x));						\
102 })
103 
104 /*
105  * Atomic compare and exchange.  Compare OLD with MEM, if identical,
106  * store NEW in MEM.  Return the initial value in MEM.  Success is
107  * indicated by comparing RETURN with OLD.
108  */
109 
110 #define __arch_cmpxchg_masked(sc_sfx, prepend, append, r, p, o, n)	\
111 ({									\
112 	u32 *__ptr32b = (u32 *)((ulong)(p) & ~0x3);			\
113 	ulong __s = ((ulong)(p) & (0x4 - sizeof(*p))) * BITS_PER_BYTE;	\
114 	ulong __mask = GENMASK(((sizeof(*p)) * BITS_PER_BYTE) - 1, 0)	\
115 			<< __s;						\
116 	ulong __newx = (ulong)(n) << __s;				\
117 	ulong __oldx = (ulong)(o) << __s;				\
118 	ulong __retx;							\
119 	ulong __rc;							\
120 									\
121 	__asm__ __volatile__ (						\
122 		prepend							\
123 		"0:	lr.w %0, %2\n"					\
124 		"	and  %1, %0, %z5\n"				\
125 		"	bne  %1, %z3, 1f\n"				\
126 		"	and  %1, %0, %z6\n"				\
127 		"	or   %1, %1, %z4\n"				\
128 		"	sc.w" sc_sfx " %1, %1, %2\n"			\
129 		"	bnez %1, 0b\n"					\
130 		append							\
131 		"1:\n"							\
132 		: "=&r" (__retx), "=&r" (__rc), "+A" (*(__ptr32b))	\
133 		: "rJ" ((long)__oldx), "rJ" (__newx),			\
134 		  "rJ" (__mask), "rJ" (~__mask)				\
135 		: "memory");						\
136 									\
137 	r = (__typeof__(*(p)))((__retx & __mask) >> __s);		\
138 })
139 
140 #define __arch_cmpxchg(lr_sfx, sc_sfx, prepend, append, r, p, co, o, n)	\
141 ({									\
142 	register unsigned int __rc;					\
143 									\
144 	__asm__ __volatile__ (						\
145 		prepend							\
146 		"0:	lr" lr_sfx " %0, %2\n"				\
147 		"	bne  %0, %z3, 1f\n"				\
148 		"	sc" sc_sfx " %1, %z4, %2\n"			\
149 		"	bnez %1, 0b\n"					\
150 		append							\
151 		"1:\n"							\
152 		: "=&r" (r), "=&r" (__rc), "+A" (*(p))			\
153 		: "rJ" (co o), "rJ" (n)					\
154 		: "memory");						\
155 })
156 
157 #define _arch_cmpxchg(ptr, old, new, sc_sfx, prepend, append)		\
158 ({									\
159 	__typeof__(ptr) __ptr = (ptr);					\
160 	__typeof__(*(__ptr)) __old = (old);				\
161 	__typeof__(*(__ptr)) __new = (new);				\
162 	__typeof__(*(__ptr)) __ret;					\
163 									\
164 	switch (sizeof(*__ptr)) {					\
165 	case 1:								\
166 	case 2:								\
167 		__arch_cmpxchg_masked(sc_sfx, prepend, append,		\
168 					__ret, __ptr, __old, __new);	\
169 		break;							\
170 	case 4:								\
171 		__arch_cmpxchg(".w", ".w" sc_sfx, prepend, append,	\
172 				__ret, __ptr, (long), __old, __new);	\
173 		break;							\
174 	case 8:								\
175 		__arch_cmpxchg(".d", ".d" sc_sfx, prepend, append,	\
176 				__ret, __ptr, /**/, __old, __new);	\
177 		break;							\
178 	default:							\
179 		BUILD_BUG();						\
180 	}								\
181 	(__typeof__(*(__ptr)))__ret;					\
182 })
183 
184 #define arch_cmpxchg_relaxed(ptr, o, n)					\
185 	_arch_cmpxchg((ptr), (o), (n), "", "", "")
186 
187 #define arch_cmpxchg_acquire(ptr, o, n)					\
188 	_arch_cmpxchg((ptr), (o), (n), "", "", RISCV_ACQUIRE_BARRIER)
189 
190 #define arch_cmpxchg_release(ptr, o, n)					\
191 	_arch_cmpxchg((ptr), (o), (n), "", RISCV_RELEASE_BARRIER, "")
192 
193 #define arch_cmpxchg(ptr, o, n)						\
194 	_arch_cmpxchg((ptr), (o), (n), ".rl", "", "	fence rw, rw\n")
195 
196 #define arch_cmpxchg_local(ptr, o, n)					\
197 	arch_cmpxchg_relaxed((ptr), (o), (n))
198 
199 #define arch_cmpxchg64(ptr, o, n)					\
200 ({									\
201 	BUILD_BUG_ON(sizeof(*(ptr)) != 8);				\
202 	arch_cmpxchg((ptr), (o), (n));					\
203 })
204 
205 #define arch_cmpxchg64_local(ptr, o, n)					\
206 ({									\
207 	BUILD_BUG_ON(sizeof(*(ptr)) != 8);				\
208 	arch_cmpxchg_relaxed((ptr), (o), (n));				\
209 })
210 
211 #define arch_cmpxchg64_relaxed(ptr, o, n)				\
212 ({									\
213 	BUILD_BUG_ON(sizeof(*(ptr)) != 8);				\
214 	arch_cmpxchg_relaxed((ptr), (o), (n));				\
215 })
216 
217 #define arch_cmpxchg64_acquire(ptr, o, n)				\
218 ({									\
219 	BUILD_BUG_ON(sizeof(*(ptr)) != 8);				\
220 	arch_cmpxchg_acquire((ptr), (o), (n));				\
221 })
222 
223 #define arch_cmpxchg64_release(ptr, o, n)				\
224 ({									\
225 	BUILD_BUG_ON(sizeof(*(ptr)) != 8);				\
226 	arch_cmpxchg_release((ptr), (o), (n));				\
227 })
228 
229 #ifdef CONFIG_RISCV_ISA_ZAWRS
230 /*
231  * Despite wrs.nto being "WRS-with-no-timeout", in the absence of changes to
232  * @val we expect it to still terminate within a "reasonable" amount of time
233  * for an implementation-specific other reason, a pending, locally-enabled
234  * interrupt, or because it has been configured to raise an illegal
235  * instruction exception.
236  */
237 static __always_inline void __cmpwait(volatile void *ptr,
238 				      unsigned long val,
239 				      int size)
240 {
241 	unsigned long tmp;
242 
243 	asm goto(ALTERNATIVE("j %l[no_zawrs]", "nop",
244 			     0, RISCV_ISA_EXT_ZAWRS, 1)
245 		 : : : : no_zawrs);
246 
247 	switch (size) {
248 	case 4:
249 		asm volatile(
250 		"	lr.w	%0, %1\n"
251 		"	xor	%0, %0, %2\n"
252 		"	bnez	%0, 1f\n"
253 			ZAWRS_WRS_NTO "\n"
254 		"1:"
255 		: "=&r" (tmp), "+A" (*(u32 *)ptr)
256 		: "r" (val));
257 		break;
258 #if __riscv_xlen == 64
259 	case 8:
260 		asm volatile(
261 		"	lr.d	%0, %1\n"
262 		"	xor	%0, %0, %2\n"
263 		"	bnez	%0, 1f\n"
264 			ZAWRS_WRS_NTO "\n"
265 		"1:"
266 		: "=&r" (tmp), "+A" (*(u64 *)ptr)
267 		: "r" (val));
268 		break;
269 #endif
270 	default:
271 		BUILD_BUG();
272 	}
273 
274 	return;
275 
276 no_zawrs:
277 	asm volatile(RISCV_PAUSE : : : "memory");
278 }
279 
280 #define __cmpwait_relaxed(ptr, val) \
281 	__cmpwait((ptr), (unsigned long)(val), sizeof(*(ptr)))
282 #endif
283 
284 #endif /* _ASM_RISCV_CMPXCHG_H */
285