xref: /linux/arch/powerpc/include/asm/atomic.h (revision 3a39d672e7f48b8d6b91a09afa4b55352773b4b5)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_POWERPC_ATOMIC_H_
3 #define _ASM_POWERPC_ATOMIC_H_
4 
5 /*
6  * PowerPC atomic operations
7  */
8 
9 #ifdef __KERNEL__
10 #include <linux/types.h>
11 #include <asm/cmpxchg.h>
12 #include <asm/barrier.h>
13 #include <asm/asm-const.h>
14 #include <asm/asm-compat.h>
15 
16 /*
17  * Since *_return_relaxed and {cmp}xchg_relaxed are implemented with
18  * a "bne-" instruction at the end, so an isync is enough as a acquire barrier
19  * on the platform without lwsync.
20  */
21 #define __atomic_acquire_fence()					\
22 	__asm__ __volatile__(PPC_ACQUIRE_BARRIER "" : : : "memory")
23 
24 #define __atomic_release_fence()					\
25 	__asm__ __volatile__(PPC_RELEASE_BARRIER "" : : : "memory")
26 
arch_atomic_read(const atomic_t * v)27 static __inline__ int arch_atomic_read(const atomic_t *v)
28 {
29 	int t;
30 
31 	/* -mprefixed can generate offsets beyond range, fall back hack */
32 	if (IS_ENABLED(CONFIG_PPC_KERNEL_PREFIXED))
33 		__asm__ __volatile__("lwz %0,0(%1)" : "=r"(t) : "b"(&v->counter));
34 	else
35 		__asm__ __volatile__("lwz%U1%X1 %0,%1" : "=r"(t) : "m<>"(v->counter));
36 
37 	return t;
38 }
39 
arch_atomic_set(atomic_t * v,int i)40 static __inline__ void arch_atomic_set(atomic_t *v, int i)
41 {
42 	/* -mprefixed can generate offsets beyond range, fall back hack */
43 	if (IS_ENABLED(CONFIG_PPC_KERNEL_PREFIXED))
44 		__asm__ __volatile__("stw %1,0(%2)" : "=m"(v->counter) : "r"(i), "b"(&v->counter));
45 	else
46 		__asm__ __volatile__("stw%U0%X0 %1,%0" : "=m<>"(v->counter) : "r"(i));
47 }
48 
49 #define ATOMIC_OP(op, asm_op, suffix, sign, ...)			\
50 static __inline__ void arch_atomic_##op(int a, atomic_t *v)		\
51 {									\
52 	int t;								\
53 									\
54 	__asm__ __volatile__(						\
55 "1:	lwarx	%0,0,%3		# atomic_" #op "\n"			\
56 	#asm_op "%I2" suffix " %0,%0,%2\n"				\
57 "	stwcx.	%0,0,%3 \n"						\
58 "	bne-	1b\n"							\
59 	: "=&r" (t), "+m" (v->counter)					\
60 	: "r"#sign (a), "r" (&v->counter)				\
61 	: "cc", ##__VA_ARGS__);						\
62 }									\
63 
64 #define ATOMIC_OP_RETURN_RELAXED(op, asm_op, suffix, sign, ...)		\
65 static inline int arch_atomic_##op##_return_relaxed(int a, atomic_t *v)	\
66 {									\
67 	int t;								\
68 									\
69 	__asm__ __volatile__(						\
70 "1:	lwarx	%0,0,%3		# atomic_" #op "_return_relaxed\n"	\
71 	#asm_op "%I2" suffix " %0,%0,%2\n"				\
72 "	stwcx.	%0,0,%3\n"						\
73 "	bne-	1b\n"							\
74 	: "=&r" (t), "+m" (v->counter)					\
75 	: "r"#sign (a), "r" (&v->counter)				\
76 	: "cc", ##__VA_ARGS__);						\
77 									\
78 	return t;							\
79 }
80 
81 #define ATOMIC_FETCH_OP_RELAXED(op, asm_op, suffix, sign, ...)		\
82 static inline int arch_atomic_fetch_##op##_relaxed(int a, atomic_t *v)	\
83 {									\
84 	int res, t;							\
85 									\
86 	__asm__ __volatile__(						\
87 "1:	lwarx	%0,0,%4		# atomic_fetch_" #op "_relaxed\n"	\
88 	#asm_op "%I3" suffix " %1,%0,%3\n"				\
89 "	stwcx.	%1,0,%4\n"						\
90 "	bne-	1b\n"							\
91 	: "=&r" (res), "=&r" (t), "+m" (v->counter)			\
92 	: "r"#sign (a), "r" (&v->counter)				\
93 	: "cc", ##__VA_ARGS__);						\
94 									\
95 	return res;							\
96 }
97 
98 #define ATOMIC_OPS(op, asm_op, suffix, sign, ...)			\
99 	ATOMIC_OP(op, asm_op, suffix, sign, ##__VA_ARGS__)		\
100 	ATOMIC_OP_RETURN_RELAXED(op, asm_op, suffix, sign, ##__VA_ARGS__)\
101 	ATOMIC_FETCH_OP_RELAXED(op, asm_op, suffix, sign, ##__VA_ARGS__)
102 
103 ATOMIC_OPS(add, add, "c", I, "xer")
104 ATOMIC_OPS(sub, sub, "c", I, "xer")
105 
106 #define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
107 #define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed
108 
109 #define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed
110 #define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed
111 
112 #undef ATOMIC_OPS
113 #define ATOMIC_OPS(op, asm_op, suffix, sign)				\
114 	ATOMIC_OP(op, asm_op, suffix, sign)				\
115 	ATOMIC_FETCH_OP_RELAXED(op, asm_op, suffix, sign)
116 
117 ATOMIC_OPS(and, and, ".", K)
118 ATOMIC_OPS(or, or, "", K)
119 ATOMIC_OPS(xor, xor, "", K)
120 
121 #define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed
122 #define arch_atomic_fetch_or_relaxed  arch_atomic_fetch_or_relaxed
123 #define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed
124 
125 #undef ATOMIC_OPS
126 #undef ATOMIC_FETCH_OP_RELAXED
127 #undef ATOMIC_OP_RETURN_RELAXED
128 #undef ATOMIC_OP
129 
130 /**
131  * atomic_fetch_add_unless - add unless the number is a given value
132  * @v: pointer of type atomic_t
133  * @a: the amount to add to v...
134  * @u: ...unless v is equal to u.
135  *
136  * Atomically adds @a to @v, so long as it was not @u.
137  * Returns the old value of @v.
138  */
arch_atomic_fetch_add_unless(atomic_t * v,int a,int u)139 static __inline__ int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
140 {
141 	int t;
142 
143 	__asm__ __volatile__ (
144 	PPC_ATOMIC_ENTRY_BARRIER
145 "1:	lwarx	%0,0,%1		# atomic_fetch_add_unless\n\
146 	cmpw	0,%0,%3 \n\
147 	beq	2f \n\
148 	add%I2c	%0,%0,%2 \n"
149 "	stwcx.	%0,0,%1 \n\
150 	bne-	1b \n"
151 	PPC_ATOMIC_EXIT_BARRIER
152 "	sub%I2c	%0,%0,%2 \n\
153 2:"
154 	: "=&r" (t)
155 	: "r" (&v->counter), "rI" (a), "r" (u)
156 	: "cc", "memory", "xer");
157 
158 	return t;
159 }
160 #define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
161 
162 /*
163  * Atomically test *v and decrement if it is greater than 0.
164  * The function returns the old value of *v minus 1, even if
165  * the atomic variable, v, was not decremented.
166  */
arch_atomic_dec_if_positive(atomic_t * v)167 static __inline__ int arch_atomic_dec_if_positive(atomic_t *v)
168 {
169 	int t;
170 
171 	__asm__ __volatile__(
172 	PPC_ATOMIC_ENTRY_BARRIER
173 "1:	lwarx	%0,0,%1		# atomic_dec_if_positive\n\
174 	cmpwi	%0,1\n\
175 	addi	%0,%0,-1\n\
176 	blt-	2f\n"
177 "	stwcx.	%0,0,%1\n\
178 	bne-	1b"
179 	PPC_ATOMIC_EXIT_BARRIER
180 	"\n\
181 2:"	: "=&b" (t)
182 	: "r" (&v->counter)
183 	: "cc", "memory");
184 
185 	return t;
186 }
187 #define arch_atomic_dec_if_positive arch_atomic_dec_if_positive
188 
189 #ifdef __powerpc64__
190 
191 #define ATOMIC64_INIT(i)	{ (i) }
192 
arch_atomic64_read(const atomic64_t * v)193 static __inline__ s64 arch_atomic64_read(const atomic64_t *v)
194 {
195 	s64 t;
196 
197 	/* -mprefixed can generate offsets beyond range, fall back hack */
198 	if (IS_ENABLED(CONFIG_PPC_KERNEL_PREFIXED))
199 		__asm__ __volatile__("ld %0,0(%1)" : "=r"(t) : "b"(&v->counter));
200 	else
201 		__asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : DS_FORM_CONSTRAINT (v->counter));
202 
203 	return t;
204 }
205 
arch_atomic64_set(atomic64_t * v,s64 i)206 static __inline__ void arch_atomic64_set(atomic64_t *v, s64 i)
207 {
208 	/* -mprefixed can generate offsets beyond range, fall back hack */
209 	if (IS_ENABLED(CONFIG_PPC_KERNEL_PREFIXED))
210 		__asm__ __volatile__("std %1,0(%2)" : "=m"(v->counter) : "r"(i), "b"(&v->counter));
211 	else
212 		__asm__ __volatile__("std%U0%X0 %1,%0" : "=" DS_FORM_CONSTRAINT (v->counter) : "r"(i));
213 }
214 
215 #define ATOMIC64_OP(op, asm_op)						\
216 static __inline__ void arch_atomic64_##op(s64 a, atomic64_t *v)		\
217 {									\
218 	s64 t;								\
219 									\
220 	__asm__ __volatile__(						\
221 "1:	ldarx	%0,0,%3		# atomic64_" #op "\n"			\
222 	#asm_op " %0,%2,%0\n"						\
223 "	stdcx.	%0,0,%3 \n"						\
224 "	bne-	1b\n"							\
225 	: "=&r" (t), "+m" (v->counter)					\
226 	: "r" (a), "r" (&v->counter)					\
227 	: "cc");							\
228 }
229 
230 #define ATOMIC64_OP_RETURN_RELAXED(op, asm_op)				\
231 static inline s64							\
232 arch_atomic64_##op##_return_relaxed(s64 a, atomic64_t *v)		\
233 {									\
234 	s64 t;								\
235 									\
236 	__asm__ __volatile__(						\
237 "1:	ldarx	%0,0,%3		# atomic64_" #op "_return_relaxed\n"	\
238 	#asm_op " %0,%2,%0\n"						\
239 "	stdcx.	%0,0,%3\n"						\
240 "	bne-	1b\n"							\
241 	: "=&r" (t), "+m" (v->counter)					\
242 	: "r" (a), "r" (&v->counter)					\
243 	: "cc");							\
244 									\
245 	return t;							\
246 }
247 
248 #define ATOMIC64_FETCH_OP_RELAXED(op, asm_op)				\
249 static inline s64							\
250 arch_atomic64_fetch_##op##_relaxed(s64 a, atomic64_t *v)		\
251 {									\
252 	s64 res, t;							\
253 									\
254 	__asm__ __volatile__(						\
255 "1:	ldarx	%0,0,%4		# atomic64_fetch_" #op "_relaxed\n"	\
256 	#asm_op " %1,%3,%0\n"						\
257 "	stdcx.	%1,0,%4\n"						\
258 "	bne-	1b\n"							\
259 	: "=&r" (res), "=&r" (t), "+m" (v->counter)			\
260 	: "r" (a), "r" (&v->counter)					\
261 	: "cc");							\
262 									\
263 	return res;							\
264 }
265 
266 #define ATOMIC64_OPS(op, asm_op)					\
267 	ATOMIC64_OP(op, asm_op)						\
268 	ATOMIC64_OP_RETURN_RELAXED(op, asm_op)				\
269 	ATOMIC64_FETCH_OP_RELAXED(op, asm_op)
270 
ATOMIC64_OPS(add,add)271 ATOMIC64_OPS(add, add)
272 ATOMIC64_OPS(sub, subf)
273 
274 #define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed
275 #define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed
276 
277 #define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed
278 #define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed
279 
280 #undef ATOMIC64_OPS
281 #define ATOMIC64_OPS(op, asm_op)					\
282 	ATOMIC64_OP(op, asm_op)						\
283 	ATOMIC64_FETCH_OP_RELAXED(op, asm_op)
284 
285 ATOMIC64_OPS(and, and)
286 ATOMIC64_OPS(or, or)
287 ATOMIC64_OPS(xor, xor)
288 
289 #define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed
290 #define arch_atomic64_fetch_or_relaxed  arch_atomic64_fetch_or_relaxed
291 #define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed
292 
293 #undef ATOPIC64_OPS
294 #undef ATOMIC64_FETCH_OP_RELAXED
295 #undef ATOMIC64_OP_RETURN_RELAXED
296 #undef ATOMIC64_OP
297 
298 static __inline__ void arch_atomic64_inc(atomic64_t *v)
299 {
300 	s64 t;
301 
302 	__asm__ __volatile__(
303 "1:	ldarx	%0,0,%2		# atomic64_inc\n\
304 	addic	%0,%0,1\n\
305 	stdcx.	%0,0,%2 \n\
306 	bne-	1b"
307 	: "=&r" (t), "+m" (v->counter)
308 	: "r" (&v->counter)
309 	: "cc", "xer");
310 }
311 #define arch_atomic64_inc arch_atomic64_inc
312 
arch_atomic64_inc_return_relaxed(atomic64_t * v)313 static __inline__ s64 arch_atomic64_inc_return_relaxed(atomic64_t *v)
314 {
315 	s64 t;
316 
317 	__asm__ __volatile__(
318 "1:	ldarx	%0,0,%2		# atomic64_inc_return_relaxed\n"
319 "	addic	%0,%0,1\n"
320 "	stdcx.	%0,0,%2\n"
321 "	bne-	1b"
322 	: "=&r" (t), "+m" (v->counter)
323 	: "r" (&v->counter)
324 	: "cc", "xer");
325 
326 	return t;
327 }
328 
arch_atomic64_dec(atomic64_t * v)329 static __inline__ void arch_atomic64_dec(atomic64_t *v)
330 {
331 	s64 t;
332 
333 	__asm__ __volatile__(
334 "1:	ldarx	%0,0,%2		# atomic64_dec\n\
335 	addic	%0,%0,-1\n\
336 	stdcx.	%0,0,%2\n\
337 	bne-	1b"
338 	: "=&r" (t), "+m" (v->counter)
339 	: "r" (&v->counter)
340 	: "cc", "xer");
341 }
342 #define arch_atomic64_dec arch_atomic64_dec
343 
arch_atomic64_dec_return_relaxed(atomic64_t * v)344 static __inline__ s64 arch_atomic64_dec_return_relaxed(atomic64_t *v)
345 {
346 	s64 t;
347 
348 	__asm__ __volatile__(
349 "1:	ldarx	%0,0,%2		# atomic64_dec_return_relaxed\n"
350 "	addic	%0,%0,-1\n"
351 "	stdcx.	%0,0,%2\n"
352 "	bne-	1b"
353 	: "=&r" (t), "+m" (v->counter)
354 	: "r" (&v->counter)
355 	: "cc", "xer");
356 
357 	return t;
358 }
359 
360 #define arch_atomic64_inc_return_relaxed arch_atomic64_inc_return_relaxed
361 #define arch_atomic64_dec_return_relaxed arch_atomic64_dec_return_relaxed
362 
363 /*
364  * Atomically test *v and decrement if it is greater than 0.
365  * The function returns the old value of *v minus 1.
366  */
arch_atomic64_dec_if_positive(atomic64_t * v)367 static __inline__ s64 arch_atomic64_dec_if_positive(atomic64_t *v)
368 {
369 	s64 t;
370 
371 	__asm__ __volatile__(
372 	PPC_ATOMIC_ENTRY_BARRIER
373 "1:	ldarx	%0,0,%1		# atomic64_dec_if_positive\n\
374 	addic.	%0,%0,-1\n\
375 	blt-	2f\n\
376 	stdcx.	%0,0,%1\n\
377 	bne-	1b"
378 	PPC_ATOMIC_EXIT_BARRIER
379 	"\n\
380 2:"	: "=&r" (t)
381 	: "r" (&v->counter)
382 	: "cc", "xer", "memory");
383 
384 	return t;
385 }
386 #define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
387 
388 /**
389  * atomic64_fetch_add_unless - add unless the number is a given value
390  * @v: pointer of type atomic64_t
391  * @a: the amount to add to v...
392  * @u: ...unless v is equal to u.
393  *
394  * Atomically adds @a to @v, so long as it was not @u.
395  * Returns the old value of @v.
396  */
arch_atomic64_fetch_add_unless(atomic64_t * v,s64 a,s64 u)397 static __inline__ s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
398 {
399 	s64 t;
400 
401 	__asm__ __volatile__ (
402 	PPC_ATOMIC_ENTRY_BARRIER
403 "1:	ldarx	%0,0,%1		# atomic64_fetch_add_unless\n\
404 	cmpd	0,%0,%3 \n\
405 	beq	2f \n\
406 	add	%0,%2,%0 \n"
407 "	stdcx.	%0,0,%1 \n\
408 	bne-	1b \n"
409 	PPC_ATOMIC_EXIT_BARRIER
410 "	subf	%0,%2,%0 \n\
411 2:"
412 	: "=&r" (t)
413 	: "r" (&v->counter), "r" (a), "r" (u)
414 	: "cc", "memory");
415 
416 	return t;
417 }
418 #define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
419 
420 /**
421  * atomic_inc64_not_zero - increment unless the number is zero
422  * @v: pointer of type atomic64_t
423  *
424  * Atomically increments @v by 1, so long as @v is non-zero.
425  * Returns non-zero if @v was non-zero, and zero otherwise.
426  */
arch_atomic64_inc_not_zero(atomic64_t * v)427 static __inline__ int arch_atomic64_inc_not_zero(atomic64_t *v)
428 {
429 	s64 t1, t2;
430 
431 	__asm__ __volatile__ (
432 	PPC_ATOMIC_ENTRY_BARRIER
433 "1:	ldarx	%0,0,%2		# atomic64_inc_not_zero\n\
434 	cmpdi	0,%0,0\n\
435 	beq-	2f\n\
436 	addic	%1,%0,1\n\
437 	stdcx.	%1,0,%2\n\
438 	bne-	1b\n"
439 	PPC_ATOMIC_EXIT_BARRIER
440 	"\n\
441 2:"
442 	: "=&r" (t1), "=&r" (t2)
443 	: "r" (&v->counter)
444 	: "cc", "xer", "memory");
445 
446 	return t1 != 0;
447 }
448 #define arch_atomic64_inc_not_zero(v) arch_atomic64_inc_not_zero((v))
449 
450 #endif /* __powerpc64__ */
451 
452 #endif /* __KERNEL__ */
453 #endif /* _ASM_POWERPC_ATOMIC_H_ */
454