xref: /linux/arch/alpha/include/asm/atomic.h (revision 8795a739e5c72abeec51caf36b6df2b37e5720c5)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ALPHA_ATOMIC_H
3 #define _ALPHA_ATOMIC_H
4 
5 #include <linux/types.h>
6 #include <asm/barrier.h>
7 #include <asm/cmpxchg.h>
8 
9 /*
10  * Atomic operations that C can't guarantee us.  Useful for
11  * resource counting etc...
12  *
13  * But use these as seldom as possible since they are much slower
14  * than regular operations.
15  */
16 
17 /*
18  * To ensure dependency ordering is preserved for the _relaxed and
19  * _release atomics, an smp_read_barrier_depends() is unconditionally
20  * inserted into the _relaxed variants, which are used to build the
21  * barriered versions. Avoid redundant back-to-back fences in the
22  * _acquire and _fence versions.
23  */
24 #define __atomic_acquire_fence()
25 #define __atomic_post_full_fence()
26 
27 #define ATOMIC_INIT(i)		{ (i) }
28 #define ATOMIC64_INIT(i)	{ (i) }
29 
30 #define atomic_read(v)		READ_ONCE((v)->counter)
31 #define atomic64_read(v)	READ_ONCE((v)->counter)
32 
33 #define atomic_set(v,i)		WRITE_ONCE((v)->counter, (i))
34 #define atomic64_set(v,i)	WRITE_ONCE((v)->counter, (i))
35 
36 /*
37  * To get proper branch prediction for the main line, we must branch
38  * forward to code at the end of this object's .text section, then
39  * branch back to restart the operation.
40  */
41 
42 #define ATOMIC_OP(op, asm_op)						\
43 static __inline__ void atomic_##op(int i, atomic_t * v)			\
44 {									\
45 	unsigned long temp;						\
46 	__asm__ __volatile__(						\
47 	"1:	ldl_l %0,%1\n"						\
48 	"	" #asm_op " %0,%2,%0\n"					\
49 	"	stl_c %0,%1\n"						\
50 	"	beq %0,2f\n"						\
51 	".subsection 2\n"						\
52 	"2:	br 1b\n"						\
53 	".previous"							\
54 	:"=&r" (temp), "=m" (v->counter)				\
55 	:"Ir" (i), "m" (v->counter));					\
56 }									\
57 
58 #define ATOMIC_OP_RETURN(op, asm_op)					\
59 static inline int atomic_##op##_return_relaxed(int i, atomic_t *v)	\
60 {									\
61 	long temp, result;						\
62 	__asm__ __volatile__(						\
63 	"1:	ldl_l %0,%1\n"						\
64 	"	" #asm_op " %0,%3,%2\n"					\
65 	"	" #asm_op " %0,%3,%0\n"					\
66 	"	stl_c %0,%1\n"						\
67 	"	beq %0,2f\n"						\
68 	".subsection 2\n"						\
69 	"2:	br 1b\n"						\
70 	".previous"							\
71 	:"=&r" (temp), "=m" (v->counter), "=&r" (result)		\
72 	:"Ir" (i), "m" (v->counter) : "memory");			\
73 	smp_read_barrier_depends();					\
74 	return result;							\
75 }
76 
77 #define ATOMIC_FETCH_OP(op, asm_op)					\
78 static inline int atomic_fetch_##op##_relaxed(int i, atomic_t *v)	\
79 {									\
80 	long temp, result;						\
81 	__asm__ __volatile__(						\
82 	"1:	ldl_l %2,%1\n"						\
83 	"	" #asm_op " %2,%3,%0\n"					\
84 	"	stl_c %0,%1\n"						\
85 	"	beq %0,2f\n"						\
86 	".subsection 2\n"						\
87 	"2:	br 1b\n"						\
88 	".previous"							\
89 	:"=&r" (temp), "=m" (v->counter), "=&r" (result)		\
90 	:"Ir" (i), "m" (v->counter) : "memory");			\
91 	smp_read_barrier_depends();					\
92 	return result;							\
93 }
94 
95 #define ATOMIC64_OP(op, asm_op)						\
96 static __inline__ void atomic64_##op(s64 i, atomic64_t * v)		\
97 {									\
98 	s64 temp;							\
99 	__asm__ __volatile__(						\
100 	"1:	ldq_l %0,%1\n"						\
101 	"	" #asm_op " %0,%2,%0\n"					\
102 	"	stq_c %0,%1\n"						\
103 	"	beq %0,2f\n"						\
104 	".subsection 2\n"						\
105 	"2:	br 1b\n"						\
106 	".previous"							\
107 	:"=&r" (temp), "=m" (v->counter)				\
108 	:"Ir" (i), "m" (v->counter));					\
109 }									\
110 
111 #define ATOMIC64_OP_RETURN(op, asm_op)					\
112 static __inline__ s64 atomic64_##op##_return_relaxed(s64 i, atomic64_t * v)	\
113 {									\
114 	s64 temp, result;						\
115 	__asm__ __volatile__(						\
116 	"1:	ldq_l %0,%1\n"						\
117 	"	" #asm_op " %0,%3,%2\n"					\
118 	"	" #asm_op " %0,%3,%0\n"					\
119 	"	stq_c %0,%1\n"						\
120 	"	beq %0,2f\n"						\
121 	".subsection 2\n"						\
122 	"2:	br 1b\n"						\
123 	".previous"							\
124 	:"=&r" (temp), "=m" (v->counter), "=&r" (result)		\
125 	:"Ir" (i), "m" (v->counter) : "memory");			\
126 	smp_read_barrier_depends();					\
127 	return result;							\
128 }
129 
130 #define ATOMIC64_FETCH_OP(op, asm_op)					\
131 static __inline__ s64 atomic64_fetch_##op##_relaxed(s64 i, atomic64_t * v)	\
132 {									\
133 	s64 temp, result;						\
134 	__asm__ __volatile__(						\
135 	"1:	ldq_l %2,%1\n"						\
136 	"	" #asm_op " %2,%3,%0\n"					\
137 	"	stq_c %0,%1\n"						\
138 	"	beq %0,2f\n"						\
139 	".subsection 2\n"						\
140 	"2:	br 1b\n"						\
141 	".previous"							\
142 	:"=&r" (temp), "=m" (v->counter), "=&r" (result)		\
143 	:"Ir" (i), "m" (v->counter) : "memory");			\
144 	smp_read_barrier_depends();					\
145 	return result;							\
146 }
147 
148 #define ATOMIC_OPS(op)							\
149 	ATOMIC_OP(op, op##l)						\
150 	ATOMIC_OP_RETURN(op, op##l)					\
151 	ATOMIC_FETCH_OP(op, op##l)					\
152 	ATOMIC64_OP(op, op##q)						\
153 	ATOMIC64_OP_RETURN(op, op##q)					\
154 	ATOMIC64_FETCH_OP(op, op##q)
155 
156 ATOMIC_OPS(add)
157 ATOMIC_OPS(sub)
158 
159 #define atomic_add_return_relaxed	atomic_add_return_relaxed
160 #define atomic_sub_return_relaxed	atomic_sub_return_relaxed
161 #define atomic_fetch_add_relaxed	atomic_fetch_add_relaxed
162 #define atomic_fetch_sub_relaxed	atomic_fetch_sub_relaxed
163 
164 #define atomic64_add_return_relaxed	atomic64_add_return_relaxed
165 #define atomic64_sub_return_relaxed	atomic64_sub_return_relaxed
166 #define atomic64_fetch_add_relaxed	atomic64_fetch_add_relaxed
167 #define atomic64_fetch_sub_relaxed	atomic64_fetch_sub_relaxed
168 
169 #define atomic_andnot atomic_andnot
170 #define atomic64_andnot atomic64_andnot
171 
172 #undef ATOMIC_OPS
173 #define ATOMIC_OPS(op, asm)						\
174 	ATOMIC_OP(op, asm)						\
175 	ATOMIC_FETCH_OP(op, asm)					\
176 	ATOMIC64_OP(op, asm)						\
177 	ATOMIC64_FETCH_OP(op, asm)
178 
179 ATOMIC_OPS(and, and)
180 ATOMIC_OPS(andnot, bic)
181 ATOMIC_OPS(or, bis)
182 ATOMIC_OPS(xor, xor)
183 
184 #define atomic_fetch_and_relaxed	atomic_fetch_and_relaxed
185 #define atomic_fetch_andnot_relaxed	atomic_fetch_andnot_relaxed
186 #define atomic_fetch_or_relaxed		atomic_fetch_or_relaxed
187 #define atomic_fetch_xor_relaxed	atomic_fetch_xor_relaxed
188 
189 #define atomic64_fetch_and_relaxed	atomic64_fetch_and_relaxed
190 #define atomic64_fetch_andnot_relaxed	atomic64_fetch_andnot_relaxed
191 #define atomic64_fetch_or_relaxed	atomic64_fetch_or_relaxed
192 #define atomic64_fetch_xor_relaxed	atomic64_fetch_xor_relaxed
193 
194 #undef ATOMIC_OPS
195 #undef ATOMIC64_FETCH_OP
196 #undef ATOMIC64_OP_RETURN
197 #undef ATOMIC64_OP
198 #undef ATOMIC_FETCH_OP
199 #undef ATOMIC_OP_RETURN
200 #undef ATOMIC_OP
201 
202 #define atomic64_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
203 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
204 
205 #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
206 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
207 
208 /**
209  * atomic_fetch_add_unless - add unless the number is a given value
210  * @v: pointer of type atomic_t
211  * @a: the amount to add to v...
212  * @u: ...unless v is equal to u.
213  *
214  * Atomically adds @a to @v, so long as it was not @u.
215  * Returns the old value of @v.
216  */
217 static __inline__ int atomic_fetch_add_unless(atomic_t *v, int a, int u)
218 {
219 	int c, new, old;
220 	smp_mb();
221 	__asm__ __volatile__(
222 	"1:	ldl_l	%[old],%[mem]\n"
223 	"	cmpeq	%[old],%[u],%[c]\n"
224 	"	addl	%[old],%[a],%[new]\n"
225 	"	bne	%[c],2f\n"
226 	"	stl_c	%[new],%[mem]\n"
227 	"	beq	%[new],3f\n"
228 	"2:\n"
229 	".subsection 2\n"
230 	"3:	br	1b\n"
231 	".previous"
232 	: [old] "=&r"(old), [new] "=&r"(new), [c] "=&r"(c)
233 	: [mem] "m"(*v), [a] "rI"(a), [u] "rI"((long)u)
234 	: "memory");
235 	smp_mb();
236 	return old;
237 }
238 #define atomic_fetch_add_unless atomic_fetch_add_unless
239 
240 /**
241  * atomic64_fetch_add_unless - add unless the number is a given value
242  * @v: pointer of type atomic64_t
243  * @a: the amount to add to v...
244  * @u: ...unless v is equal to u.
245  *
246  * Atomically adds @a to @v, so long as it was not @u.
247  * Returns the old value of @v.
248  */
249 static __inline__ s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
250 {
251 	s64 c, new, old;
252 	smp_mb();
253 	__asm__ __volatile__(
254 	"1:	ldq_l	%[old],%[mem]\n"
255 	"	cmpeq	%[old],%[u],%[c]\n"
256 	"	addq	%[old],%[a],%[new]\n"
257 	"	bne	%[c],2f\n"
258 	"	stq_c	%[new],%[mem]\n"
259 	"	beq	%[new],3f\n"
260 	"2:\n"
261 	".subsection 2\n"
262 	"3:	br	1b\n"
263 	".previous"
264 	: [old] "=&r"(old), [new] "=&r"(new), [c] "=&r"(c)
265 	: [mem] "m"(*v), [a] "rI"(a), [u] "rI"(u)
266 	: "memory");
267 	smp_mb();
268 	return old;
269 }
270 #define atomic64_fetch_add_unless atomic64_fetch_add_unless
271 
272 /*
273  * atomic64_dec_if_positive - decrement by 1 if old value positive
274  * @v: pointer of type atomic_t
275  *
276  * The function returns the old value of *v minus 1, even if
277  * the atomic variable, v, was not decremented.
278  */
279 static inline s64 atomic64_dec_if_positive(atomic64_t *v)
280 {
281 	s64 old, tmp;
282 	smp_mb();
283 	__asm__ __volatile__(
284 	"1:	ldq_l	%[old],%[mem]\n"
285 	"	subq	%[old],1,%[tmp]\n"
286 	"	ble	%[old],2f\n"
287 	"	stq_c	%[tmp],%[mem]\n"
288 	"	beq	%[tmp],3f\n"
289 	"2:\n"
290 	".subsection 2\n"
291 	"3:	br	1b\n"
292 	".previous"
293 	: [old] "=&r"(old), [tmp] "=&r"(tmp)
294 	: [mem] "m"(*v)
295 	: "memory");
296 	smp_mb();
297 	return old - 1;
298 }
299 #define atomic64_dec_if_positive atomic64_dec_if_positive
300 
301 #endif /* _ALPHA_ATOMIC_H */
302