xref: /linux/arch/powerpc/include/asm/atomic.h (revision e7d759f31ca295d589f7420719c311870bb3166f)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_POWERPC_ATOMIC_H_
3 #define _ASM_POWERPC_ATOMIC_H_
4 
5 /*
6  * PowerPC atomic operations
7  */
8 
9 #ifdef __KERNEL__
10 #include <linux/types.h>
11 #include <asm/cmpxchg.h>
12 #include <asm/barrier.h>
13 #include <asm/asm-const.h>
14 
15 /*
16  * Since *_return_relaxed and {cmp}xchg_relaxed are implemented with
17  * a "bne-" instruction at the end, so an isync is enough as a acquire barrier
18  * on the platform without lwsync.
19  */
20 #define __atomic_acquire_fence()					\
21 	__asm__ __volatile__(PPC_ACQUIRE_BARRIER "" : : : "memory")
22 
23 #define __atomic_release_fence()					\
24 	__asm__ __volatile__(PPC_RELEASE_BARRIER "" : : : "memory")
25 
26 static __inline__ int arch_atomic_read(const atomic_t *v)
27 {
28 	int t;
29 
30 	/* -mprefixed can generate offsets beyond range, fall back hack */
31 	if (IS_ENABLED(CONFIG_PPC_KERNEL_PREFIXED))
32 		__asm__ __volatile__("lwz %0,0(%1)" : "=r"(t) : "b"(&v->counter));
33 	else
34 		__asm__ __volatile__("lwz%U1%X1 %0,%1" : "=r"(t) : "m<>"(v->counter));
35 
36 	return t;
37 }
38 
39 static __inline__ void arch_atomic_set(atomic_t *v, int i)
40 {
41 	/* -mprefixed can generate offsets beyond range, fall back hack */
42 	if (IS_ENABLED(CONFIG_PPC_KERNEL_PREFIXED))
43 		__asm__ __volatile__("stw %1,0(%2)" : "=m"(v->counter) : "r"(i), "b"(&v->counter));
44 	else
45 		__asm__ __volatile__("stw%U0%X0 %1,%0" : "=m<>"(v->counter) : "r"(i));
46 }
47 
48 #define ATOMIC_OP(op, asm_op, suffix, sign, ...)			\
49 static __inline__ void arch_atomic_##op(int a, atomic_t *v)		\
50 {									\
51 	int t;								\
52 									\
53 	__asm__ __volatile__(						\
54 "1:	lwarx	%0,0,%3		# atomic_" #op "\n"			\
55 	#asm_op "%I2" suffix " %0,%0,%2\n"				\
56 "	stwcx.	%0,0,%3 \n"						\
57 "	bne-	1b\n"							\
58 	: "=&r" (t), "+m" (v->counter)					\
59 	: "r"#sign (a), "r" (&v->counter)				\
60 	: "cc", ##__VA_ARGS__);						\
61 }									\
62 
63 #define ATOMIC_OP_RETURN_RELAXED(op, asm_op, suffix, sign, ...)		\
64 static inline int arch_atomic_##op##_return_relaxed(int a, atomic_t *v)	\
65 {									\
66 	int t;								\
67 									\
68 	__asm__ __volatile__(						\
69 "1:	lwarx	%0,0,%3		# atomic_" #op "_return_relaxed\n"	\
70 	#asm_op "%I2" suffix " %0,%0,%2\n"				\
71 "	stwcx.	%0,0,%3\n"						\
72 "	bne-	1b\n"							\
73 	: "=&r" (t), "+m" (v->counter)					\
74 	: "r"#sign (a), "r" (&v->counter)				\
75 	: "cc", ##__VA_ARGS__);						\
76 									\
77 	return t;							\
78 }
79 
80 #define ATOMIC_FETCH_OP_RELAXED(op, asm_op, suffix, sign, ...)		\
81 static inline int arch_atomic_fetch_##op##_relaxed(int a, atomic_t *v)	\
82 {									\
83 	int res, t;							\
84 									\
85 	__asm__ __volatile__(						\
86 "1:	lwarx	%0,0,%4		# atomic_fetch_" #op "_relaxed\n"	\
87 	#asm_op "%I3" suffix " %1,%0,%3\n"				\
88 "	stwcx.	%1,0,%4\n"						\
89 "	bne-	1b\n"							\
90 	: "=&r" (res), "=&r" (t), "+m" (v->counter)			\
91 	: "r"#sign (a), "r" (&v->counter)				\
92 	: "cc", ##__VA_ARGS__);						\
93 									\
94 	return res;							\
95 }
96 
97 #define ATOMIC_OPS(op, asm_op, suffix, sign, ...)			\
98 	ATOMIC_OP(op, asm_op, suffix, sign, ##__VA_ARGS__)		\
99 	ATOMIC_OP_RETURN_RELAXED(op, asm_op, suffix, sign, ##__VA_ARGS__)\
100 	ATOMIC_FETCH_OP_RELAXED(op, asm_op, suffix, sign, ##__VA_ARGS__)
101 
102 ATOMIC_OPS(add, add, "c", I, "xer")
103 ATOMIC_OPS(sub, sub, "c", I, "xer")
104 
105 #define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
106 #define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed
107 
108 #define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed
109 #define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed
110 
111 #undef ATOMIC_OPS
112 #define ATOMIC_OPS(op, asm_op, suffix, sign)				\
113 	ATOMIC_OP(op, asm_op, suffix, sign)				\
114 	ATOMIC_FETCH_OP_RELAXED(op, asm_op, suffix, sign)
115 
116 ATOMIC_OPS(and, and, ".", K)
117 ATOMIC_OPS(or, or, "", K)
118 ATOMIC_OPS(xor, xor, "", K)
119 
120 #define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed
121 #define arch_atomic_fetch_or_relaxed  arch_atomic_fetch_or_relaxed
122 #define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed
123 
124 #undef ATOMIC_OPS
125 #undef ATOMIC_FETCH_OP_RELAXED
126 #undef ATOMIC_OP_RETURN_RELAXED
127 #undef ATOMIC_OP
128 
129 /**
130  * atomic_fetch_add_unless - add unless the number is a given value
131  * @v: pointer of type atomic_t
132  * @a: the amount to add to v...
133  * @u: ...unless v is equal to u.
134  *
135  * Atomically adds @a to @v, so long as it was not @u.
136  * Returns the old value of @v.
137  */
138 static __inline__ int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
139 {
140 	int t;
141 
142 	__asm__ __volatile__ (
143 	PPC_ATOMIC_ENTRY_BARRIER
144 "1:	lwarx	%0,0,%1		# atomic_fetch_add_unless\n\
145 	cmpw	0,%0,%3 \n\
146 	beq	2f \n\
147 	add%I2c	%0,%0,%2 \n"
148 "	stwcx.	%0,0,%1 \n\
149 	bne-	1b \n"
150 	PPC_ATOMIC_EXIT_BARRIER
151 "	sub%I2c	%0,%0,%2 \n\
152 2:"
153 	: "=&r" (t)
154 	: "r" (&v->counter), "rI" (a), "r" (u)
155 	: "cc", "memory", "xer");
156 
157 	return t;
158 }
159 #define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
160 
161 /*
162  * Atomically test *v and decrement if it is greater than 0.
163  * The function returns the old value of *v minus 1, even if
164  * the atomic variable, v, was not decremented.
165  */
166 static __inline__ int arch_atomic_dec_if_positive(atomic_t *v)
167 {
168 	int t;
169 
170 	__asm__ __volatile__(
171 	PPC_ATOMIC_ENTRY_BARRIER
172 "1:	lwarx	%0,0,%1		# atomic_dec_if_positive\n\
173 	cmpwi	%0,1\n\
174 	addi	%0,%0,-1\n\
175 	blt-	2f\n"
176 "	stwcx.	%0,0,%1\n\
177 	bne-	1b"
178 	PPC_ATOMIC_EXIT_BARRIER
179 	"\n\
180 2:"	: "=&b" (t)
181 	: "r" (&v->counter)
182 	: "cc", "memory");
183 
184 	return t;
185 }
186 #define arch_atomic_dec_if_positive arch_atomic_dec_if_positive
187 
188 #ifdef __powerpc64__
189 
190 #define ATOMIC64_INIT(i)	{ (i) }
191 
192 static __inline__ s64 arch_atomic64_read(const atomic64_t *v)
193 {
194 	s64 t;
195 
196 	/* -mprefixed can generate offsets beyond range, fall back hack */
197 	if (IS_ENABLED(CONFIG_PPC_KERNEL_PREFIXED))
198 		__asm__ __volatile__("ld %0,0(%1)" : "=r"(t) : "b"(&v->counter));
199 	else
200 		__asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m<>"(v->counter));
201 
202 	return t;
203 }
204 
205 static __inline__ void arch_atomic64_set(atomic64_t *v, s64 i)
206 {
207 	/* -mprefixed can generate offsets beyond range, fall back hack */
208 	if (IS_ENABLED(CONFIG_PPC_KERNEL_PREFIXED))
209 		__asm__ __volatile__("std %1,0(%2)" : "=m"(v->counter) : "r"(i), "b"(&v->counter));
210 	else
211 		__asm__ __volatile__("std%U0%X0 %1,%0" : "=m<>"(v->counter) : "r"(i));
212 }
213 
214 #define ATOMIC64_OP(op, asm_op)						\
215 static __inline__ void arch_atomic64_##op(s64 a, atomic64_t *v)		\
216 {									\
217 	s64 t;								\
218 									\
219 	__asm__ __volatile__(						\
220 "1:	ldarx	%0,0,%3		# atomic64_" #op "\n"			\
221 	#asm_op " %0,%2,%0\n"						\
222 "	stdcx.	%0,0,%3 \n"						\
223 "	bne-	1b\n"							\
224 	: "=&r" (t), "+m" (v->counter)					\
225 	: "r" (a), "r" (&v->counter)					\
226 	: "cc");							\
227 }
228 
229 #define ATOMIC64_OP_RETURN_RELAXED(op, asm_op)				\
230 static inline s64							\
231 arch_atomic64_##op##_return_relaxed(s64 a, atomic64_t *v)		\
232 {									\
233 	s64 t;								\
234 									\
235 	__asm__ __volatile__(						\
236 "1:	ldarx	%0,0,%3		# atomic64_" #op "_return_relaxed\n"	\
237 	#asm_op " %0,%2,%0\n"						\
238 "	stdcx.	%0,0,%3\n"						\
239 "	bne-	1b\n"							\
240 	: "=&r" (t), "+m" (v->counter)					\
241 	: "r" (a), "r" (&v->counter)					\
242 	: "cc");							\
243 									\
244 	return t;							\
245 }
246 
247 #define ATOMIC64_FETCH_OP_RELAXED(op, asm_op)				\
248 static inline s64							\
249 arch_atomic64_fetch_##op##_relaxed(s64 a, atomic64_t *v)		\
250 {									\
251 	s64 res, t;							\
252 									\
253 	__asm__ __volatile__(						\
254 "1:	ldarx	%0,0,%4		# atomic64_fetch_" #op "_relaxed\n"	\
255 	#asm_op " %1,%3,%0\n"						\
256 "	stdcx.	%1,0,%4\n"						\
257 "	bne-	1b\n"							\
258 	: "=&r" (res), "=&r" (t), "+m" (v->counter)			\
259 	: "r" (a), "r" (&v->counter)					\
260 	: "cc");							\
261 									\
262 	return res;							\
263 }
264 
265 #define ATOMIC64_OPS(op, asm_op)					\
266 	ATOMIC64_OP(op, asm_op)						\
267 	ATOMIC64_OP_RETURN_RELAXED(op, asm_op)				\
268 	ATOMIC64_FETCH_OP_RELAXED(op, asm_op)
269 
270 ATOMIC64_OPS(add, add)
271 ATOMIC64_OPS(sub, subf)
272 
273 #define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed
274 #define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed
275 
276 #define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed
277 #define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed
278 
279 #undef ATOMIC64_OPS
280 #define ATOMIC64_OPS(op, asm_op)					\
281 	ATOMIC64_OP(op, asm_op)						\
282 	ATOMIC64_FETCH_OP_RELAXED(op, asm_op)
283 
284 ATOMIC64_OPS(and, and)
285 ATOMIC64_OPS(or, or)
286 ATOMIC64_OPS(xor, xor)
287 
288 #define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed
289 #define arch_atomic64_fetch_or_relaxed  arch_atomic64_fetch_or_relaxed
290 #define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed
291 
292 #undef ATOPIC64_OPS
293 #undef ATOMIC64_FETCH_OP_RELAXED
294 #undef ATOMIC64_OP_RETURN_RELAXED
295 #undef ATOMIC64_OP
296 
297 static __inline__ void arch_atomic64_inc(atomic64_t *v)
298 {
299 	s64 t;
300 
301 	__asm__ __volatile__(
302 "1:	ldarx	%0,0,%2		# atomic64_inc\n\
303 	addic	%0,%0,1\n\
304 	stdcx.	%0,0,%2 \n\
305 	bne-	1b"
306 	: "=&r" (t), "+m" (v->counter)
307 	: "r" (&v->counter)
308 	: "cc", "xer");
309 }
310 #define arch_atomic64_inc arch_atomic64_inc
311 
312 static __inline__ s64 arch_atomic64_inc_return_relaxed(atomic64_t *v)
313 {
314 	s64 t;
315 
316 	__asm__ __volatile__(
317 "1:	ldarx	%0,0,%2		# atomic64_inc_return_relaxed\n"
318 "	addic	%0,%0,1\n"
319 "	stdcx.	%0,0,%2\n"
320 "	bne-	1b"
321 	: "=&r" (t), "+m" (v->counter)
322 	: "r" (&v->counter)
323 	: "cc", "xer");
324 
325 	return t;
326 }
327 
328 static __inline__ void arch_atomic64_dec(atomic64_t *v)
329 {
330 	s64 t;
331 
332 	__asm__ __volatile__(
333 "1:	ldarx	%0,0,%2		# atomic64_dec\n\
334 	addic	%0,%0,-1\n\
335 	stdcx.	%0,0,%2\n\
336 	bne-	1b"
337 	: "=&r" (t), "+m" (v->counter)
338 	: "r" (&v->counter)
339 	: "cc", "xer");
340 }
341 #define arch_atomic64_dec arch_atomic64_dec
342 
343 static __inline__ s64 arch_atomic64_dec_return_relaxed(atomic64_t *v)
344 {
345 	s64 t;
346 
347 	__asm__ __volatile__(
348 "1:	ldarx	%0,0,%2		# atomic64_dec_return_relaxed\n"
349 "	addic	%0,%0,-1\n"
350 "	stdcx.	%0,0,%2\n"
351 "	bne-	1b"
352 	: "=&r" (t), "+m" (v->counter)
353 	: "r" (&v->counter)
354 	: "cc", "xer");
355 
356 	return t;
357 }
358 
359 #define arch_atomic64_inc_return_relaxed arch_atomic64_inc_return_relaxed
360 #define arch_atomic64_dec_return_relaxed arch_atomic64_dec_return_relaxed
361 
362 /*
363  * Atomically test *v and decrement if it is greater than 0.
364  * The function returns the old value of *v minus 1.
365  */
366 static __inline__ s64 arch_atomic64_dec_if_positive(atomic64_t *v)
367 {
368 	s64 t;
369 
370 	__asm__ __volatile__(
371 	PPC_ATOMIC_ENTRY_BARRIER
372 "1:	ldarx	%0,0,%1		# atomic64_dec_if_positive\n\
373 	addic.	%0,%0,-1\n\
374 	blt-	2f\n\
375 	stdcx.	%0,0,%1\n\
376 	bne-	1b"
377 	PPC_ATOMIC_EXIT_BARRIER
378 	"\n\
379 2:"	: "=&r" (t)
380 	: "r" (&v->counter)
381 	: "cc", "xer", "memory");
382 
383 	return t;
384 }
385 #define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
386 
387 /**
388  * atomic64_fetch_add_unless - add unless the number is a given value
389  * @v: pointer of type atomic64_t
390  * @a: the amount to add to v...
391  * @u: ...unless v is equal to u.
392  *
393  * Atomically adds @a to @v, so long as it was not @u.
394  * Returns the old value of @v.
395  */
396 static __inline__ s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
397 {
398 	s64 t;
399 
400 	__asm__ __volatile__ (
401 	PPC_ATOMIC_ENTRY_BARRIER
402 "1:	ldarx	%0,0,%1		# atomic64_fetch_add_unless\n\
403 	cmpd	0,%0,%3 \n\
404 	beq	2f \n\
405 	add	%0,%2,%0 \n"
406 "	stdcx.	%0,0,%1 \n\
407 	bne-	1b \n"
408 	PPC_ATOMIC_EXIT_BARRIER
409 "	subf	%0,%2,%0 \n\
410 2:"
411 	: "=&r" (t)
412 	: "r" (&v->counter), "r" (a), "r" (u)
413 	: "cc", "memory");
414 
415 	return t;
416 }
417 #define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
418 
419 /**
420  * atomic_inc64_not_zero - increment unless the number is zero
421  * @v: pointer of type atomic64_t
422  *
423  * Atomically increments @v by 1, so long as @v is non-zero.
424  * Returns non-zero if @v was non-zero, and zero otherwise.
425  */
426 static __inline__ int arch_atomic64_inc_not_zero(atomic64_t *v)
427 {
428 	s64 t1, t2;
429 
430 	__asm__ __volatile__ (
431 	PPC_ATOMIC_ENTRY_BARRIER
432 "1:	ldarx	%0,0,%2		# atomic64_inc_not_zero\n\
433 	cmpdi	0,%0,0\n\
434 	beq-	2f\n\
435 	addic	%1,%0,1\n\
436 	stdcx.	%1,0,%2\n\
437 	bne-	1b\n"
438 	PPC_ATOMIC_EXIT_BARRIER
439 	"\n\
440 2:"
441 	: "=&r" (t1), "=&r" (t2)
442 	: "r" (&v->counter)
443 	: "cc", "xer", "memory");
444 
445 	return t1 != 0;
446 }
447 #define arch_atomic64_inc_not_zero(v) arch_atomic64_inc_not_zero((v))
448 
449 #endif /* __powerpc64__ */
450 
451 #endif /* __KERNEL__ */
452 #endif /* _ASM_POWERPC_ATOMIC_H_ */
453