xref: /linux/arch/riscv/include/asm/cmpxchg.h (revision e9f0878c4b2004ac19581274c1ae4c61ae3ca70e)
1 /*
2  * Copyright (C) 2014 Regents of the University of California
3  *
4  *   This program is free software; you can redistribute it and/or
5  *   modify it under the terms of the GNU General Public License
6  *   as published by the Free Software Foundation, version 2.
7  *
8  *   This program is distributed in the hope that it will be useful,
9  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
10  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11  *   GNU General Public License for more details.
12  */
13 
14 #ifndef _ASM_RISCV_CMPXCHG_H
15 #define _ASM_RISCV_CMPXCHG_H
16 
17 #include <linux/bug.h>
18 
19 #include <asm/barrier.h>
20 #include <asm/fence.h>
21 
22 #define __xchg_relaxed(ptr, new, size)					\
23 ({									\
24 	__typeof__(ptr) __ptr = (ptr);					\
25 	__typeof__(new) __new = (new);					\
26 	__typeof__(*(ptr)) __ret;					\
27 	switch (size) {							\
28 	case 4:								\
29 		__asm__ __volatile__ (					\
30 			"	amoswap.w %0, %2, %1\n"			\
31 			: "=r" (__ret), "+A" (*__ptr)			\
32 			: "r" (__new)					\
33 			: "memory");					\
34 		break;							\
35 	case 8:								\
36 		__asm__ __volatile__ (					\
37 			"	amoswap.d %0, %2, %1\n"			\
38 			: "=r" (__ret), "+A" (*__ptr)			\
39 			: "r" (__new)					\
40 			: "memory");					\
41 		break;							\
42 	default:							\
43 		BUILD_BUG();						\
44 	}								\
45 	__ret;								\
46 })
47 
48 #define xchg_relaxed(ptr, x)						\
49 ({									\
50 	__typeof__(*(ptr)) _x_ = (x);					\
51 	(__typeof__(*(ptr))) __xchg_relaxed((ptr),			\
52 					    _x_, sizeof(*(ptr)));	\
53 })
54 
55 #define __xchg_acquire(ptr, new, size)					\
56 ({									\
57 	__typeof__(ptr) __ptr = (ptr);					\
58 	__typeof__(new) __new = (new);					\
59 	__typeof__(*(ptr)) __ret;					\
60 	switch (size) {							\
61 	case 4:								\
62 		__asm__ __volatile__ (					\
63 			"	amoswap.w %0, %2, %1\n"			\
64 			RISCV_ACQUIRE_BARRIER				\
65 			: "=r" (__ret), "+A" (*__ptr)			\
66 			: "r" (__new)					\
67 			: "memory");					\
68 		break;							\
69 	case 8:								\
70 		__asm__ __volatile__ (					\
71 			"	amoswap.d %0, %2, %1\n"			\
72 			RISCV_ACQUIRE_BARRIER				\
73 			: "=r" (__ret), "+A" (*__ptr)			\
74 			: "r" (__new)					\
75 			: "memory");					\
76 		break;							\
77 	default:							\
78 		BUILD_BUG();						\
79 	}								\
80 	__ret;								\
81 })
82 
83 #define xchg_acquire(ptr, x)						\
84 ({									\
85 	__typeof__(*(ptr)) _x_ = (x);					\
86 	(__typeof__(*(ptr))) __xchg_acquire((ptr),			\
87 					    _x_, sizeof(*(ptr)));	\
88 })
89 
90 #define __xchg_release(ptr, new, size)					\
91 ({									\
92 	__typeof__(ptr) __ptr = (ptr);					\
93 	__typeof__(new) __new = (new);					\
94 	__typeof__(*(ptr)) __ret;					\
95 	switch (size) {							\
96 	case 4:								\
97 		__asm__ __volatile__ (					\
98 			RISCV_RELEASE_BARRIER				\
99 			"	amoswap.w %0, %2, %1\n"			\
100 			: "=r" (__ret), "+A" (*__ptr)			\
101 			: "r" (__new)					\
102 			: "memory");					\
103 		break;							\
104 	case 8:								\
105 		__asm__ __volatile__ (					\
106 			RISCV_RELEASE_BARRIER				\
107 			"	amoswap.d %0, %2, %1\n"			\
108 			: "=r" (__ret), "+A" (*__ptr)			\
109 			: "r" (__new)					\
110 			: "memory");					\
111 		break;							\
112 	default:							\
113 		BUILD_BUG();						\
114 	}								\
115 	__ret;								\
116 })
117 
118 #define xchg_release(ptr, x)						\
119 ({									\
120 	__typeof__(*(ptr)) _x_ = (x);					\
121 	(__typeof__(*(ptr))) __xchg_release((ptr),			\
122 					    _x_, sizeof(*(ptr)));	\
123 })
124 
125 #define __xchg(ptr, new, size)						\
126 ({									\
127 	__typeof__(ptr) __ptr = (ptr);					\
128 	__typeof__(new) __new = (new);					\
129 	__typeof__(*(ptr)) __ret;					\
130 	switch (size) {							\
131 	case 4:								\
132 		__asm__ __volatile__ (					\
133 			"	amoswap.w.aqrl %0, %2, %1\n"		\
134 			: "=r" (__ret), "+A" (*__ptr)			\
135 			: "r" (__new)					\
136 			: "memory");					\
137 		break;							\
138 	case 8:								\
139 		__asm__ __volatile__ (					\
140 			"	amoswap.d.aqrl %0, %2, %1\n"		\
141 			: "=r" (__ret), "+A" (*__ptr)			\
142 			: "r" (__new)					\
143 			: "memory");					\
144 		break;							\
145 	default:							\
146 		BUILD_BUG();						\
147 	}								\
148 	__ret;								\
149 })
150 
151 #define xchg(ptr, x)							\
152 ({									\
153 	__typeof__(*(ptr)) _x_ = (x);					\
154 	(__typeof__(*(ptr))) __xchg((ptr), _x_, sizeof(*(ptr)));	\
155 })
156 
157 #define xchg32(ptr, x)							\
158 ({									\
159 	BUILD_BUG_ON(sizeof(*(ptr)) != 4);				\
160 	xchg((ptr), (x));						\
161 })
162 
163 #define xchg64(ptr, x)							\
164 ({									\
165 	BUILD_BUG_ON(sizeof(*(ptr)) != 8);				\
166 	xchg((ptr), (x));						\
167 })
168 
169 /*
170  * Atomic compare and exchange.  Compare OLD with MEM, if identical,
171  * store NEW in MEM.  Return the initial value in MEM.  Success is
172  * indicated by comparing RETURN with OLD.
173  */
174 #define __cmpxchg_relaxed(ptr, old, new, size)				\
175 ({									\
176 	__typeof__(ptr) __ptr = (ptr);					\
177 	__typeof__(*(ptr)) __old = (old);				\
178 	__typeof__(*(ptr)) __new = (new);				\
179 	__typeof__(*(ptr)) __ret;					\
180 	register unsigned int __rc;					\
181 	switch (size) {							\
182 	case 4:								\
183 		__asm__ __volatile__ (					\
184 			"0:	lr.w %0, %2\n"				\
185 			"	bne  %0, %z3, 1f\n"			\
186 			"	sc.w %1, %z4, %2\n"			\
187 			"	bnez %1, 0b\n"				\
188 			"1:\n"						\
189 			: "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr)	\
190 			: "rJ" (__old), "rJ" (__new)			\
191 			: "memory");					\
192 		break;							\
193 	case 8:								\
194 		__asm__ __volatile__ (					\
195 			"0:	lr.d %0, %2\n"				\
196 			"	bne %0, %z3, 1f\n"			\
197 			"	sc.d %1, %z4, %2\n"			\
198 			"	bnez %1, 0b\n"				\
199 			"1:\n"						\
200 			: "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr)	\
201 			: "rJ" (__old), "rJ" (__new)			\
202 			: "memory");					\
203 		break;							\
204 	default:							\
205 		BUILD_BUG();						\
206 	}								\
207 	__ret;								\
208 })
209 
210 #define cmpxchg_relaxed(ptr, o, n)					\
211 ({									\
212 	__typeof__(*(ptr)) _o_ = (o);					\
213 	__typeof__(*(ptr)) _n_ = (n);					\
214 	(__typeof__(*(ptr))) __cmpxchg_relaxed((ptr),			\
215 					_o_, _n_, sizeof(*(ptr)));	\
216 })
217 
218 #define __cmpxchg_acquire(ptr, old, new, size)				\
219 ({									\
220 	__typeof__(ptr) __ptr = (ptr);					\
221 	__typeof__(*(ptr)) __old = (old);				\
222 	__typeof__(*(ptr)) __new = (new);				\
223 	__typeof__(*(ptr)) __ret;					\
224 	register unsigned int __rc;					\
225 	switch (size) {							\
226 	case 4:								\
227 		__asm__ __volatile__ (					\
228 			"0:	lr.w %0, %2\n"				\
229 			"	bne  %0, %z3, 1f\n"			\
230 			"	sc.w %1, %z4, %2\n"			\
231 			"	bnez %1, 0b\n"				\
232 			RISCV_ACQUIRE_BARRIER				\
233 			"1:\n"						\
234 			: "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr)	\
235 			: "rJ" (__old), "rJ" (__new)			\
236 			: "memory");					\
237 		break;							\
238 	case 8:								\
239 		__asm__ __volatile__ (					\
240 			"0:	lr.d %0, %2\n"				\
241 			"	bne %0, %z3, 1f\n"			\
242 			"	sc.d %1, %z4, %2\n"			\
243 			"	bnez %1, 0b\n"				\
244 			RISCV_ACQUIRE_BARRIER				\
245 			"1:\n"						\
246 			: "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr)	\
247 			: "rJ" (__old), "rJ" (__new)			\
248 			: "memory");					\
249 		break;							\
250 	default:							\
251 		BUILD_BUG();						\
252 	}								\
253 	__ret;								\
254 })
255 
256 #define cmpxchg_acquire(ptr, o, n)					\
257 ({									\
258 	__typeof__(*(ptr)) _o_ = (o);					\
259 	__typeof__(*(ptr)) _n_ = (n);					\
260 	(__typeof__(*(ptr))) __cmpxchg_acquire((ptr),			\
261 					_o_, _n_, sizeof(*(ptr)));	\
262 })
263 
264 #define __cmpxchg_release(ptr, old, new, size)				\
265 ({									\
266 	__typeof__(ptr) __ptr = (ptr);					\
267 	__typeof__(*(ptr)) __old = (old);				\
268 	__typeof__(*(ptr)) __new = (new);				\
269 	__typeof__(*(ptr)) __ret;					\
270 	register unsigned int __rc;					\
271 	switch (size) {							\
272 	case 4:								\
273 		__asm__ __volatile__ (					\
274 			RISCV_RELEASE_BARRIER				\
275 			"0:	lr.w %0, %2\n"				\
276 			"	bne  %0, %z3, 1f\n"			\
277 			"	sc.w %1, %z4, %2\n"			\
278 			"	bnez %1, 0b\n"				\
279 			"1:\n"						\
280 			: "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr)	\
281 			: "rJ" (__old), "rJ" (__new)			\
282 			: "memory");					\
283 		break;							\
284 	case 8:								\
285 		__asm__ __volatile__ (					\
286 			RISCV_RELEASE_BARRIER				\
287 			"0:	lr.d %0, %2\n"				\
288 			"	bne %0, %z3, 1f\n"			\
289 			"	sc.d %1, %z4, %2\n"			\
290 			"	bnez %1, 0b\n"				\
291 			"1:\n"						\
292 			: "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr)	\
293 			: "rJ" (__old), "rJ" (__new)			\
294 			: "memory");					\
295 		break;							\
296 	default:							\
297 		BUILD_BUG();						\
298 	}								\
299 	__ret;								\
300 })
301 
302 #define cmpxchg_release(ptr, o, n)					\
303 ({									\
304 	__typeof__(*(ptr)) _o_ = (o);					\
305 	__typeof__(*(ptr)) _n_ = (n);					\
306 	(__typeof__(*(ptr))) __cmpxchg_release((ptr),			\
307 					_o_, _n_, sizeof(*(ptr)));	\
308 })
309 
310 #define __cmpxchg(ptr, old, new, size)					\
311 ({									\
312 	__typeof__(ptr) __ptr = (ptr);					\
313 	__typeof__(*(ptr)) __old = (old);				\
314 	__typeof__(*(ptr)) __new = (new);				\
315 	__typeof__(*(ptr)) __ret;					\
316 	register unsigned int __rc;					\
317 	switch (size) {							\
318 	case 4:								\
319 		__asm__ __volatile__ (					\
320 			"0:	lr.w %0, %2\n"				\
321 			"	bne  %0, %z3, 1f\n"			\
322 			"	sc.w.rl %1, %z4, %2\n"			\
323 			"	bnez %1, 0b\n"				\
324 			"	fence rw, rw\n"				\
325 			"1:\n"						\
326 			: "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr)	\
327 			: "rJ" (__old), "rJ" (__new)			\
328 			: "memory");					\
329 		break;							\
330 	case 8:								\
331 		__asm__ __volatile__ (					\
332 			"0:	lr.d %0, %2\n"				\
333 			"	bne %0, %z3, 1f\n"			\
334 			"	sc.d.rl %1, %z4, %2\n"			\
335 			"	bnez %1, 0b\n"				\
336 			"	fence rw, rw\n"				\
337 			"1:\n"						\
338 			: "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr)	\
339 			: "rJ" (__old), "rJ" (__new)			\
340 			: "memory");					\
341 		break;							\
342 	default:							\
343 		BUILD_BUG();						\
344 	}								\
345 	__ret;								\
346 })
347 
348 #define cmpxchg(ptr, o, n)						\
349 ({									\
350 	__typeof__(*(ptr)) _o_ = (o);					\
351 	__typeof__(*(ptr)) _n_ = (n);					\
352 	(__typeof__(*(ptr))) __cmpxchg((ptr),				\
353 				       _o_, _n_, sizeof(*(ptr)));	\
354 })
355 
356 #define cmpxchg_local(ptr, o, n)					\
357 	(__cmpxchg_relaxed((ptr), (o), (n), sizeof(*(ptr))))
358 
359 #define cmpxchg32(ptr, o, n)						\
360 ({									\
361 	BUILD_BUG_ON(sizeof(*(ptr)) != 4);				\
362 	cmpxchg((ptr), (o), (n));					\
363 })
364 
365 #define cmpxchg32_local(ptr, o, n)					\
366 ({									\
367 	BUILD_BUG_ON(sizeof(*(ptr)) != 4);				\
368 	cmpxchg_relaxed((ptr), (o), (n))				\
369 })
370 
371 #define cmpxchg64(ptr, o, n)						\
372 ({									\
373 	BUILD_BUG_ON(sizeof(*(ptr)) != 8);				\
374 	cmpxchg((ptr), (o), (n));					\
375 })
376 
377 #define cmpxchg64_local(ptr, o, n)					\
378 ({									\
379 	BUILD_BUG_ON(sizeof(*(ptr)) != 8);				\
380 	cmpxchg_relaxed((ptr), (o), (n));				\
381 })
382 
383 #endif /* _ASM_RISCV_CMPXCHG_H */
384