xref: /linux/arch/riscv/include/asm/atomic.h (revision 24168c5e6dfbdd5b414f048f47f75d64533296ca)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3  * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4  * Copyright (C) 2012 Regents of the University of California
5  * Copyright (C) 2017 SiFive
6  */
7 
8 #ifndef _ASM_RISCV_ATOMIC_H
9 #define _ASM_RISCV_ATOMIC_H
10 
11 #ifdef CONFIG_GENERIC_ATOMIC64
12 # include <asm-generic/atomic64.h>
13 #else
14 # if (__riscv_xlen < 64)
15 #  error "64-bit atomics require XLEN to be at least 64"
16 # endif
17 #endif
18 
19 #include <asm/cmpxchg.h>
20 
21 #define __atomic_acquire_fence()					\
22 	__asm__ __volatile__(RISCV_ACQUIRE_BARRIER "" ::: "memory")
23 
24 #define __atomic_release_fence()					\
25 	__asm__ __volatile__(RISCV_RELEASE_BARRIER "" ::: "memory");
26 
27 static __always_inline int arch_atomic_read(const atomic_t *v)
28 {
29 	return READ_ONCE(v->counter);
30 }
31 static __always_inline void arch_atomic_set(atomic_t *v, int i)
32 {
33 	WRITE_ONCE(v->counter, i);
34 }
35 
36 #ifndef CONFIG_GENERIC_ATOMIC64
37 #define ATOMIC64_INIT(i) { (i) }
38 static __always_inline s64 arch_atomic64_read(const atomic64_t *v)
39 {
40 	return READ_ONCE(v->counter);
41 }
42 static __always_inline void arch_atomic64_set(atomic64_t *v, s64 i)
43 {
44 	WRITE_ONCE(v->counter, i);
45 }
46 #endif
47 
48 /*
49  * First, the atomic ops that have no ordering constraints and therefor don't
50  * have the AQ or RL bits set.  These don't return anything, so there's only
51  * one version to worry about.
52  */
53 #define ATOMIC_OP(op, asm_op, I, asm_type, c_type, prefix)		\
54 static __always_inline							\
55 void arch_atomic##prefix##_##op(c_type i, atomic##prefix##_t *v)	\
56 {									\
57 	__asm__ __volatile__ (						\
58 		"	amo" #asm_op "." #asm_type " zero, %1, %0"	\
59 		: "+A" (v->counter)					\
60 		: "r" (I)						\
61 		: "memory");						\
62 }									\
63 
64 #ifdef CONFIG_GENERIC_ATOMIC64
65 #define ATOMIC_OPS(op, asm_op, I)					\
66         ATOMIC_OP (op, asm_op, I, w, int,   )
67 #else
68 #define ATOMIC_OPS(op, asm_op, I)					\
69         ATOMIC_OP (op, asm_op, I, w, int,   )				\
70         ATOMIC_OP (op, asm_op, I, d, s64, 64)
71 #endif
72 
73 ATOMIC_OPS(add, add,  i)
74 ATOMIC_OPS(sub, add, -i)
75 ATOMIC_OPS(and, and,  i)
76 ATOMIC_OPS( or,  or,  i)
77 ATOMIC_OPS(xor, xor,  i)
78 
79 #undef ATOMIC_OP
80 #undef ATOMIC_OPS
81 
82 /*
83  * Atomic ops that have ordered, relaxed, acquire, and release variants.
84  * There's two flavors of these: the arithmatic ops have both fetch and return
85  * versions, while the logical ops only have fetch versions.
86  */
87 #define ATOMIC_FETCH_OP(op, asm_op, I, asm_type, c_type, prefix)	\
88 static __always_inline							\
89 c_type arch_atomic##prefix##_fetch_##op##_relaxed(c_type i,		\
90 					     atomic##prefix##_t *v)	\
91 {									\
92 	register c_type ret;						\
93 	__asm__ __volatile__ (						\
94 		"	amo" #asm_op "." #asm_type " %1, %2, %0"	\
95 		: "+A" (v->counter), "=r" (ret)				\
96 		: "r" (I)						\
97 		: "memory");						\
98 	return ret;							\
99 }									\
100 static __always_inline							\
101 c_type arch_atomic##prefix##_fetch_##op(c_type i, atomic##prefix##_t *v)	\
102 {									\
103 	register c_type ret;						\
104 	__asm__ __volatile__ (						\
105 		"	amo" #asm_op "." #asm_type ".aqrl  %1, %2, %0"	\
106 		: "+A" (v->counter), "=r" (ret)				\
107 		: "r" (I)						\
108 		: "memory");						\
109 	return ret;							\
110 }
111 
112 #define ATOMIC_OP_RETURN(op, asm_op, c_op, I, asm_type, c_type, prefix)	\
113 static __always_inline							\
114 c_type arch_atomic##prefix##_##op##_return_relaxed(c_type i,		\
115 					      atomic##prefix##_t *v)	\
116 {									\
117         return arch_atomic##prefix##_fetch_##op##_relaxed(i, v) c_op I;	\
118 }									\
119 static __always_inline							\
120 c_type arch_atomic##prefix##_##op##_return(c_type i, atomic##prefix##_t *v)	\
121 {									\
122         return arch_atomic##prefix##_fetch_##op(i, v) c_op I;		\
123 }
124 
125 #ifdef CONFIG_GENERIC_ATOMIC64
126 #define ATOMIC_OPS(op, asm_op, c_op, I)					\
127         ATOMIC_FETCH_OP( op, asm_op,       I, w, int,   )		\
128         ATOMIC_OP_RETURN(op, asm_op, c_op, I, w, int,   )
129 #else
130 #define ATOMIC_OPS(op, asm_op, c_op, I)					\
131         ATOMIC_FETCH_OP( op, asm_op,       I, w, int,   )		\
132         ATOMIC_OP_RETURN(op, asm_op, c_op, I, w, int,   )		\
133         ATOMIC_FETCH_OP( op, asm_op,       I, d, s64, 64)		\
134         ATOMIC_OP_RETURN(op, asm_op, c_op, I, d, s64, 64)
135 #endif
136 
137 ATOMIC_OPS(add, add, +,  i)
138 ATOMIC_OPS(sub, add, +, -i)
139 
140 #define arch_atomic_add_return_relaxed	arch_atomic_add_return_relaxed
141 #define arch_atomic_sub_return_relaxed	arch_atomic_sub_return_relaxed
142 #define arch_atomic_add_return		arch_atomic_add_return
143 #define arch_atomic_sub_return		arch_atomic_sub_return
144 
145 #define arch_atomic_fetch_add_relaxed	arch_atomic_fetch_add_relaxed
146 #define arch_atomic_fetch_sub_relaxed	arch_atomic_fetch_sub_relaxed
147 #define arch_atomic_fetch_add		arch_atomic_fetch_add
148 #define arch_atomic_fetch_sub		arch_atomic_fetch_sub
149 
150 #ifndef CONFIG_GENERIC_ATOMIC64
151 #define arch_atomic64_add_return_relaxed	arch_atomic64_add_return_relaxed
152 #define arch_atomic64_sub_return_relaxed	arch_atomic64_sub_return_relaxed
153 #define arch_atomic64_add_return		arch_atomic64_add_return
154 #define arch_atomic64_sub_return		arch_atomic64_sub_return
155 
156 #define arch_atomic64_fetch_add_relaxed	arch_atomic64_fetch_add_relaxed
157 #define arch_atomic64_fetch_sub_relaxed	arch_atomic64_fetch_sub_relaxed
158 #define arch_atomic64_fetch_add		arch_atomic64_fetch_add
159 #define arch_atomic64_fetch_sub		arch_atomic64_fetch_sub
160 #endif
161 
162 #undef ATOMIC_OPS
163 
164 #ifdef CONFIG_GENERIC_ATOMIC64
165 #define ATOMIC_OPS(op, asm_op, I)					\
166         ATOMIC_FETCH_OP(op, asm_op, I, w, int,   )
167 #else
168 #define ATOMIC_OPS(op, asm_op, I)					\
169         ATOMIC_FETCH_OP(op, asm_op, I, w, int,   )			\
170         ATOMIC_FETCH_OP(op, asm_op, I, d, s64, 64)
171 #endif
172 
173 ATOMIC_OPS(and, and, i)
174 ATOMIC_OPS( or,  or, i)
175 ATOMIC_OPS(xor, xor, i)
176 
177 #define arch_atomic_fetch_and_relaxed	arch_atomic_fetch_and_relaxed
178 #define arch_atomic_fetch_or_relaxed	arch_atomic_fetch_or_relaxed
179 #define arch_atomic_fetch_xor_relaxed	arch_atomic_fetch_xor_relaxed
180 #define arch_atomic_fetch_and		arch_atomic_fetch_and
181 #define arch_atomic_fetch_or		arch_atomic_fetch_or
182 #define arch_atomic_fetch_xor		arch_atomic_fetch_xor
183 
184 #ifndef CONFIG_GENERIC_ATOMIC64
185 #define arch_atomic64_fetch_and_relaxed	arch_atomic64_fetch_and_relaxed
186 #define arch_atomic64_fetch_or_relaxed	arch_atomic64_fetch_or_relaxed
187 #define arch_atomic64_fetch_xor_relaxed	arch_atomic64_fetch_xor_relaxed
188 #define arch_atomic64_fetch_and		arch_atomic64_fetch_and
189 #define arch_atomic64_fetch_or		arch_atomic64_fetch_or
190 #define arch_atomic64_fetch_xor		arch_atomic64_fetch_xor
191 #endif
192 
193 #undef ATOMIC_OPS
194 
195 #undef ATOMIC_FETCH_OP
196 #undef ATOMIC_OP_RETURN
197 
198 #define _arch_atomic_fetch_add_unless(_prev, _rc, counter, _a, _u, sfx)	\
199 ({									\
200 	__asm__ __volatile__ (						\
201 		"0:	lr." sfx "     %[p],  %[c]\n"			\
202 		"	beq	       %[p],  %[u], 1f\n"		\
203 		"	add            %[rc], %[p], %[a]\n"		\
204 		"	sc." sfx ".rl  %[rc], %[rc], %[c]\n"		\
205 		"	bnez           %[rc], 0b\n"			\
206 		"	fence          rw, rw\n"			\
207 		"1:\n"							\
208 		: [p]"=&r" (_prev), [rc]"=&r" (_rc), [c]"+A" (counter)	\
209 		: [a]"r" (_a), [u]"r" (_u)				\
210 		: "memory");						\
211 })
212 
213 /* This is required to provide a full barrier on success. */
214 static __always_inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
215 {
216        int prev, rc;
217 
218 	_arch_atomic_fetch_add_unless(prev, rc, v->counter, a, u, "w");
219 
220 	return prev;
221 }
222 #define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
223 
224 #ifndef CONFIG_GENERIC_ATOMIC64
225 static __always_inline s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
226 {
227        s64 prev;
228        long rc;
229 
230 	_arch_atomic_fetch_add_unless(prev, rc, v->counter, a, u, "d");
231 
232 	return prev;
233 }
234 #define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
235 #endif
236 
237 #define _arch_atomic_inc_unless_negative(_prev, _rc, counter, sfx)	\
238 ({									\
239 	__asm__ __volatile__ (						\
240 		"0:	lr." sfx "      %[p],  %[c]\n"			\
241 		"	bltz            %[p],  1f\n"			\
242 		"	addi            %[rc], %[p], 1\n"		\
243 		"	sc." sfx ".rl   %[rc], %[rc], %[c]\n"		\
244 		"	bnez            %[rc], 0b\n"			\
245 		"	fence           rw, rw\n"			\
246 		"1:\n"							\
247 		: [p]"=&r" (_prev), [rc]"=&r" (_rc), [c]"+A" (counter)	\
248 		:							\
249 		: "memory");						\
250 })
251 
252 static __always_inline bool arch_atomic_inc_unless_negative(atomic_t *v)
253 {
254 	int prev, rc;
255 
256 	_arch_atomic_inc_unless_negative(prev, rc, v->counter, "w");
257 
258 	return !(prev < 0);
259 }
260 
261 #define arch_atomic_inc_unless_negative arch_atomic_inc_unless_negative
262 
263 #define _arch_atomic_dec_unless_positive(_prev, _rc, counter, sfx)	\
264 ({									\
265 	__asm__ __volatile__ (						\
266 		"0:	lr." sfx "      %[p],  %[c]\n"			\
267 		"	bgtz            %[p],  1f\n"			\
268 		"	addi            %[rc], %[p], -1\n"		\
269 		"	sc." sfx ".rl   %[rc], %[rc], %[c]\n"		\
270 		"	bnez            %[rc], 0b\n"			\
271 		"	fence           rw, rw\n"			\
272 		"1:\n"							\
273 		: [p]"=&r" (_prev), [rc]"=&r" (_rc), [c]"+A" (counter)	\
274 		:							\
275 		: "memory");						\
276 })
277 
278 static __always_inline bool arch_atomic_dec_unless_positive(atomic_t *v)
279 {
280 	int prev, rc;
281 
282 	_arch_atomic_dec_unless_positive(prev, rc, v->counter, "w");
283 
284 	return !(prev > 0);
285 }
286 
287 #define arch_atomic_dec_unless_positive arch_atomic_dec_unless_positive
288 
289 #define _arch_atomic_dec_if_positive(_prev, _rc, counter, sfx)		\
290 ({									\
291 	__asm__ __volatile__ (						\
292 		"0:	lr." sfx "     %[p],  %[c]\n"			\
293 		"	addi           %[rc], %[p], -1\n"		\
294 		"	bltz           %[rc], 1f\n"			\
295 		"	sc." sfx ".rl  %[rc], %[rc], %[c]\n"		\
296 		"	bnez           %[rc], 0b\n"			\
297 		"	fence          rw, rw\n"			\
298 		"1:\n"							\
299 		: [p]"=&r" (_prev), [rc]"=&r" (_rc), [c]"+A" (counter)	\
300 		:							\
301 		: "memory");						\
302 })
303 
304 static __always_inline int arch_atomic_dec_if_positive(atomic_t *v)
305 {
306        int prev, rc;
307 
308 	_arch_atomic_dec_if_positive(prev, rc, v->counter, "w");
309 
310 	return prev - 1;
311 }
312 
313 #define arch_atomic_dec_if_positive arch_atomic_dec_if_positive
314 
315 #ifndef CONFIG_GENERIC_ATOMIC64
316 static __always_inline bool arch_atomic64_inc_unless_negative(atomic64_t *v)
317 {
318 	s64 prev;
319 	long rc;
320 
321 	_arch_atomic_inc_unless_negative(prev, rc, v->counter, "d");
322 
323 	return !(prev < 0);
324 }
325 
326 #define arch_atomic64_inc_unless_negative arch_atomic64_inc_unless_negative
327 
328 static __always_inline bool arch_atomic64_dec_unless_positive(atomic64_t *v)
329 {
330 	s64 prev;
331 	long rc;
332 
333 	_arch_atomic_dec_unless_positive(prev, rc, v->counter, "d");
334 
335 	return !(prev > 0);
336 }
337 
338 #define arch_atomic64_dec_unless_positive arch_atomic64_dec_unless_positive
339 
340 static __always_inline s64 arch_atomic64_dec_if_positive(atomic64_t *v)
341 {
342        s64 prev;
343        long rc;
344 
345 	_arch_atomic_dec_if_positive(prev, rc, v->counter, "d");
346 
347 	return prev - 1;
348 }
349 
350 #define arch_atomic64_dec_if_positive	arch_atomic64_dec_if_positive
351 #endif
352 
353 #endif /* _ASM_RISCV_ATOMIC_H */
354