xref: /linux/arch/powerpc/include/asm/atomic.h (revision 872d11bca9c29ed19595c993b9f552ffe9b63dcb)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_POWERPC_ATOMIC_H_
3 #define _ASM_POWERPC_ATOMIC_H_
4 
5 /*
6  * PowerPC atomic operations
7  */
8 
9 #ifdef __KERNEL__
10 #include <linux/types.h>
11 #include <asm/cmpxchg.h>
12 #include <asm/barrier.h>
13 
14 #define ATOMIC_INIT(i)		{ (i) }
15 
16 /*
17  * Since *_return_relaxed and {cmp}xchg_relaxed are implemented with
18  * a "bne-" instruction at the end, so an isync is enough as a acquire barrier
19  * on the platform without lwsync.
20  */
21 #define __atomic_acquire_fence()					\
22 	__asm__ __volatile__(PPC_ACQUIRE_BARRIER "" : : : "memory")
23 
24 #define __atomic_release_fence()					\
25 	__asm__ __volatile__(PPC_RELEASE_BARRIER "" : : : "memory")
26 
27 static __inline__ int atomic_read(const atomic_t *v)
28 {
29 	int t;
30 
31 	__asm__ __volatile__("lwz%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
32 
33 	return t;
34 }
35 
36 static __inline__ void atomic_set(atomic_t *v, int i)
37 {
38 	__asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
39 }
40 
41 #define ATOMIC_OP(op, asm_op)						\
42 static __inline__ void atomic_##op(int a, atomic_t *v)			\
43 {									\
44 	int t;								\
45 									\
46 	__asm__ __volatile__(						\
47 "1:	lwarx	%0,0,%3		# atomic_" #op "\n"			\
48 	#asm_op " %0,%2,%0\n"						\
49 "	stwcx.	%0,0,%3 \n"						\
50 "	bne-	1b\n"							\
51 	: "=&r" (t), "+m" (v->counter)					\
52 	: "r" (a), "r" (&v->counter)					\
53 	: "cc");							\
54 }									\
55 
56 #define ATOMIC_OP_RETURN_RELAXED(op, asm_op)				\
57 static inline int atomic_##op##_return_relaxed(int a, atomic_t *v)	\
58 {									\
59 	int t;								\
60 									\
61 	__asm__ __volatile__(						\
62 "1:	lwarx	%0,0,%3		# atomic_" #op "_return_relaxed\n"	\
63 	#asm_op " %0,%2,%0\n"						\
64 "	stwcx.	%0,0,%3\n"						\
65 "	bne-	1b\n"							\
66 	: "=&r" (t), "+m" (v->counter)					\
67 	: "r" (a), "r" (&v->counter)					\
68 	: "cc");							\
69 									\
70 	return t;							\
71 }
72 
73 #define ATOMIC_FETCH_OP_RELAXED(op, asm_op)				\
74 static inline int atomic_fetch_##op##_relaxed(int a, atomic_t *v)	\
75 {									\
76 	int res, t;							\
77 									\
78 	__asm__ __volatile__(						\
79 "1:	lwarx	%0,0,%4		# atomic_fetch_" #op "_relaxed\n"	\
80 	#asm_op " %1,%3,%0\n"						\
81 "	stwcx.	%1,0,%4\n"						\
82 "	bne-	1b\n"							\
83 	: "=&r" (res), "=&r" (t), "+m" (v->counter)			\
84 	: "r" (a), "r" (&v->counter)					\
85 	: "cc");							\
86 									\
87 	return res;							\
88 }
89 
90 #define ATOMIC_OPS(op, asm_op)						\
91 	ATOMIC_OP(op, asm_op)						\
92 	ATOMIC_OP_RETURN_RELAXED(op, asm_op)				\
93 	ATOMIC_FETCH_OP_RELAXED(op, asm_op)
94 
95 ATOMIC_OPS(add, add)
96 ATOMIC_OPS(sub, subf)
97 
98 #define atomic_add_return_relaxed atomic_add_return_relaxed
99 #define atomic_sub_return_relaxed atomic_sub_return_relaxed
100 
101 #define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
102 #define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
103 
104 #undef ATOMIC_OPS
105 #define ATOMIC_OPS(op, asm_op)						\
106 	ATOMIC_OP(op, asm_op)						\
107 	ATOMIC_FETCH_OP_RELAXED(op, asm_op)
108 
109 ATOMIC_OPS(and, and)
110 ATOMIC_OPS(or, or)
111 ATOMIC_OPS(xor, xor)
112 
113 #define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
114 #define atomic_fetch_or_relaxed  atomic_fetch_or_relaxed
115 #define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
116 
117 #undef ATOMIC_OPS
118 #undef ATOMIC_FETCH_OP_RELAXED
119 #undef ATOMIC_OP_RETURN_RELAXED
120 #undef ATOMIC_OP
121 
122 static __inline__ void atomic_inc(atomic_t *v)
123 {
124 	int t;
125 
126 	__asm__ __volatile__(
127 "1:	lwarx	%0,0,%2		# atomic_inc\n\
128 	addic	%0,%0,1\n"
129 "	stwcx.	%0,0,%2 \n\
130 	bne-	1b"
131 	: "=&r" (t), "+m" (v->counter)
132 	: "r" (&v->counter)
133 	: "cc", "xer");
134 }
135 #define atomic_inc atomic_inc
136 
137 static __inline__ int atomic_inc_return_relaxed(atomic_t *v)
138 {
139 	int t;
140 
141 	__asm__ __volatile__(
142 "1:	lwarx	%0,0,%2		# atomic_inc_return_relaxed\n"
143 "	addic	%0,%0,1\n"
144 "	stwcx.	%0,0,%2\n"
145 "	bne-	1b"
146 	: "=&r" (t), "+m" (v->counter)
147 	: "r" (&v->counter)
148 	: "cc", "xer");
149 
150 	return t;
151 }
152 
153 static __inline__ void atomic_dec(atomic_t *v)
154 {
155 	int t;
156 
157 	__asm__ __volatile__(
158 "1:	lwarx	%0,0,%2		# atomic_dec\n\
159 	addic	%0,%0,-1\n"
160 "	stwcx.	%0,0,%2\n\
161 	bne-	1b"
162 	: "=&r" (t), "+m" (v->counter)
163 	: "r" (&v->counter)
164 	: "cc", "xer");
165 }
166 #define atomic_dec atomic_dec
167 
168 static __inline__ int atomic_dec_return_relaxed(atomic_t *v)
169 {
170 	int t;
171 
172 	__asm__ __volatile__(
173 "1:	lwarx	%0,0,%2		# atomic_dec_return_relaxed\n"
174 "	addic	%0,%0,-1\n"
175 "	stwcx.	%0,0,%2\n"
176 "	bne-	1b"
177 	: "=&r" (t), "+m" (v->counter)
178 	: "r" (&v->counter)
179 	: "cc", "xer");
180 
181 	return t;
182 }
183 
184 #define atomic_inc_return_relaxed atomic_inc_return_relaxed
185 #define atomic_dec_return_relaxed atomic_dec_return_relaxed
186 
187 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
188 #define atomic_cmpxchg_relaxed(v, o, n) \
189 	cmpxchg_relaxed(&((v)->counter), (o), (n))
190 #define atomic_cmpxchg_acquire(v, o, n) \
191 	cmpxchg_acquire(&((v)->counter), (o), (n))
192 
193 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
194 #define atomic_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new))
195 
196 /*
197  * Don't want to override the generic atomic_try_cmpxchg_acquire, because
198  * we add a lock hint to the lwarx, which may not be wanted for the
199  * _acquire case (and is not used by the other _acquire variants so it
200  * would be a surprise).
201  */
202 static __always_inline bool
203 atomic_try_cmpxchg_lock(atomic_t *v, int *old, int new)
204 {
205 	int r, o = *old;
206 
207 	__asm__ __volatile__ (
208 "1:\t"	PPC_LWARX(%0,0,%2,1) "	# atomic_try_cmpxchg_acquire	\n"
209 "	cmpw	0,%0,%3							\n"
210 "	bne-	2f							\n"
211 "	stwcx.	%4,0,%2							\n"
212 "	bne-	1b							\n"
213 "\t"	PPC_ACQUIRE_BARRIER "						\n"
214 "2:									\n"
215 	: "=&r" (r), "+m" (v->counter)
216 	: "r" (&v->counter), "r" (o), "r" (new)
217 	: "cr0", "memory");
218 
219 	if (unlikely(r != o))
220 		*old = r;
221 	return likely(r == o);
222 }
223 
224 /**
225  * atomic_fetch_add_unless - add unless the number is a given value
226  * @v: pointer of type atomic_t
227  * @a: the amount to add to v...
228  * @u: ...unless v is equal to u.
229  *
230  * Atomically adds @a to @v, so long as it was not @u.
231  * Returns the old value of @v.
232  */
233 static __inline__ int atomic_fetch_add_unless(atomic_t *v, int a, int u)
234 {
235 	int t;
236 
237 	__asm__ __volatile__ (
238 	PPC_ATOMIC_ENTRY_BARRIER
239 "1:	lwarx	%0,0,%1		# atomic_fetch_add_unless\n\
240 	cmpw	0,%0,%3 \n\
241 	beq	2f \n\
242 	add	%0,%2,%0 \n"
243 "	stwcx.	%0,0,%1 \n\
244 	bne-	1b \n"
245 	PPC_ATOMIC_EXIT_BARRIER
246 "	subf	%0,%2,%0 \n\
247 2:"
248 	: "=&r" (t)
249 	: "r" (&v->counter), "r" (a), "r" (u)
250 	: "cc", "memory");
251 
252 	return t;
253 }
254 #define atomic_fetch_add_unless atomic_fetch_add_unless
255 
256 /**
257  * atomic_inc_not_zero - increment unless the number is zero
258  * @v: pointer of type atomic_t
259  *
260  * Atomically increments @v by 1, so long as @v is non-zero.
261  * Returns non-zero if @v was non-zero, and zero otherwise.
262  */
263 static __inline__ int atomic_inc_not_zero(atomic_t *v)
264 {
265 	int t1, t2;
266 
267 	__asm__ __volatile__ (
268 	PPC_ATOMIC_ENTRY_BARRIER
269 "1:	lwarx	%0,0,%2		# atomic_inc_not_zero\n\
270 	cmpwi	0,%0,0\n\
271 	beq-	2f\n\
272 	addic	%1,%0,1\n"
273 "	stwcx.	%1,0,%2\n\
274 	bne-	1b\n"
275 	PPC_ATOMIC_EXIT_BARRIER
276 	"\n\
277 2:"
278 	: "=&r" (t1), "=&r" (t2)
279 	: "r" (&v->counter)
280 	: "cc", "xer", "memory");
281 
282 	return t1;
283 }
284 #define atomic_inc_not_zero(v) atomic_inc_not_zero((v))
285 
286 /*
287  * Atomically test *v and decrement if it is greater than 0.
288  * The function returns the old value of *v minus 1, even if
289  * the atomic variable, v, was not decremented.
290  */
291 static __inline__ int atomic_dec_if_positive(atomic_t *v)
292 {
293 	int t;
294 
295 	__asm__ __volatile__(
296 	PPC_ATOMIC_ENTRY_BARRIER
297 "1:	lwarx	%0,0,%1		# atomic_dec_if_positive\n\
298 	cmpwi	%0,1\n\
299 	addi	%0,%0,-1\n\
300 	blt-	2f\n"
301 "	stwcx.	%0,0,%1\n\
302 	bne-	1b"
303 	PPC_ATOMIC_EXIT_BARRIER
304 	"\n\
305 2:"	: "=&b" (t)
306 	: "r" (&v->counter)
307 	: "cc", "memory");
308 
309 	return t;
310 }
311 #define atomic_dec_if_positive atomic_dec_if_positive
312 
313 #ifdef __powerpc64__
314 
315 #define ATOMIC64_INIT(i)	{ (i) }
316 
317 static __inline__ s64 atomic64_read(const atomic64_t *v)
318 {
319 	s64 t;
320 
321 	__asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
322 
323 	return t;
324 }
325 
326 static __inline__ void atomic64_set(atomic64_t *v, s64 i)
327 {
328 	__asm__ __volatile__("std%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
329 }
330 
331 #define ATOMIC64_OP(op, asm_op)						\
332 static __inline__ void atomic64_##op(s64 a, atomic64_t *v)		\
333 {									\
334 	s64 t;								\
335 									\
336 	__asm__ __volatile__(						\
337 "1:	ldarx	%0,0,%3		# atomic64_" #op "\n"			\
338 	#asm_op " %0,%2,%0\n"						\
339 "	stdcx.	%0,0,%3 \n"						\
340 "	bne-	1b\n"							\
341 	: "=&r" (t), "+m" (v->counter)					\
342 	: "r" (a), "r" (&v->counter)					\
343 	: "cc");							\
344 }
345 
346 #define ATOMIC64_OP_RETURN_RELAXED(op, asm_op)				\
347 static inline s64							\
348 atomic64_##op##_return_relaxed(s64 a, atomic64_t *v)			\
349 {									\
350 	s64 t;								\
351 									\
352 	__asm__ __volatile__(						\
353 "1:	ldarx	%0,0,%3		# atomic64_" #op "_return_relaxed\n"	\
354 	#asm_op " %0,%2,%0\n"						\
355 "	stdcx.	%0,0,%3\n"						\
356 "	bne-	1b\n"							\
357 	: "=&r" (t), "+m" (v->counter)					\
358 	: "r" (a), "r" (&v->counter)					\
359 	: "cc");							\
360 									\
361 	return t;							\
362 }
363 
364 #define ATOMIC64_FETCH_OP_RELAXED(op, asm_op)				\
365 static inline s64							\
366 atomic64_fetch_##op##_relaxed(s64 a, atomic64_t *v)			\
367 {									\
368 	s64 res, t;							\
369 									\
370 	__asm__ __volatile__(						\
371 "1:	ldarx	%0,0,%4		# atomic64_fetch_" #op "_relaxed\n"	\
372 	#asm_op " %1,%3,%0\n"						\
373 "	stdcx.	%1,0,%4\n"						\
374 "	bne-	1b\n"							\
375 	: "=&r" (res), "=&r" (t), "+m" (v->counter)			\
376 	: "r" (a), "r" (&v->counter)					\
377 	: "cc");							\
378 									\
379 	return res;							\
380 }
381 
382 #define ATOMIC64_OPS(op, asm_op)					\
383 	ATOMIC64_OP(op, asm_op)						\
384 	ATOMIC64_OP_RETURN_RELAXED(op, asm_op)				\
385 	ATOMIC64_FETCH_OP_RELAXED(op, asm_op)
386 
387 ATOMIC64_OPS(add, add)
388 ATOMIC64_OPS(sub, subf)
389 
390 #define atomic64_add_return_relaxed atomic64_add_return_relaxed
391 #define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
392 
393 #define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
394 #define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
395 
396 #undef ATOMIC64_OPS
397 #define ATOMIC64_OPS(op, asm_op)					\
398 	ATOMIC64_OP(op, asm_op)						\
399 	ATOMIC64_FETCH_OP_RELAXED(op, asm_op)
400 
401 ATOMIC64_OPS(and, and)
402 ATOMIC64_OPS(or, or)
403 ATOMIC64_OPS(xor, xor)
404 
405 #define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
406 #define atomic64_fetch_or_relaxed  atomic64_fetch_or_relaxed
407 #define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
408 
409 #undef ATOPIC64_OPS
410 #undef ATOMIC64_FETCH_OP_RELAXED
411 #undef ATOMIC64_OP_RETURN_RELAXED
412 #undef ATOMIC64_OP
413 
414 static __inline__ void atomic64_inc(atomic64_t *v)
415 {
416 	s64 t;
417 
418 	__asm__ __volatile__(
419 "1:	ldarx	%0,0,%2		# atomic64_inc\n\
420 	addic	%0,%0,1\n\
421 	stdcx.	%0,0,%2 \n\
422 	bne-	1b"
423 	: "=&r" (t), "+m" (v->counter)
424 	: "r" (&v->counter)
425 	: "cc", "xer");
426 }
427 #define atomic64_inc atomic64_inc
428 
429 static __inline__ s64 atomic64_inc_return_relaxed(atomic64_t *v)
430 {
431 	s64 t;
432 
433 	__asm__ __volatile__(
434 "1:	ldarx	%0,0,%2		# atomic64_inc_return_relaxed\n"
435 "	addic	%0,%0,1\n"
436 "	stdcx.	%0,0,%2\n"
437 "	bne-	1b"
438 	: "=&r" (t), "+m" (v->counter)
439 	: "r" (&v->counter)
440 	: "cc", "xer");
441 
442 	return t;
443 }
444 
445 static __inline__ void atomic64_dec(atomic64_t *v)
446 {
447 	s64 t;
448 
449 	__asm__ __volatile__(
450 "1:	ldarx	%0,0,%2		# atomic64_dec\n\
451 	addic	%0,%0,-1\n\
452 	stdcx.	%0,0,%2\n\
453 	bne-	1b"
454 	: "=&r" (t), "+m" (v->counter)
455 	: "r" (&v->counter)
456 	: "cc", "xer");
457 }
458 #define atomic64_dec atomic64_dec
459 
460 static __inline__ s64 atomic64_dec_return_relaxed(atomic64_t *v)
461 {
462 	s64 t;
463 
464 	__asm__ __volatile__(
465 "1:	ldarx	%0,0,%2		# atomic64_dec_return_relaxed\n"
466 "	addic	%0,%0,-1\n"
467 "	stdcx.	%0,0,%2\n"
468 "	bne-	1b"
469 	: "=&r" (t), "+m" (v->counter)
470 	: "r" (&v->counter)
471 	: "cc", "xer");
472 
473 	return t;
474 }
475 
476 #define atomic64_inc_return_relaxed atomic64_inc_return_relaxed
477 #define atomic64_dec_return_relaxed atomic64_dec_return_relaxed
478 
479 /*
480  * Atomically test *v and decrement if it is greater than 0.
481  * The function returns the old value of *v minus 1.
482  */
483 static __inline__ s64 atomic64_dec_if_positive(atomic64_t *v)
484 {
485 	s64 t;
486 
487 	__asm__ __volatile__(
488 	PPC_ATOMIC_ENTRY_BARRIER
489 "1:	ldarx	%0,0,%1		# atomic64_dec_if_positive\n\
490 	addic.	%0,%0,-1\n\
491 	blt-	2f\n\
492 	stdcx.	%0,0,%1\n\
493 	bne-	1b"
494 	PPC_ATOMIC_EXIT_BARRIER
495 	"\n\
496 2:"	: "=&r" (t)
497 	: "r" (&v->counter)
498 	: "cc", "xer", "memory");
499 
500 	return t;
501 }
502 #define atomic64_dec_if_positive atomic64_dec_if_positive
503 
504 #define atomic64_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
505 #define atomic64_cmpxchg_relaxed(v, o, n) \
506 	cmpxchg_relaxed(&((v)->counter), (o), (n))
507 #define atomic64_cmpxchg_acquire(v, o, n) \
508 	cmpxchg_acquire(&((v)->counter), (o), (n))
509 
510 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
511 #define atomic64_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new))
512 
513 /**
514  * atomic64_fetch_add_unless - add unless the number is a given value
515  * @v: pointer of type atomic64_t
516  * @a: the amount to add to v...
517  * @u: ...unless v is equal to u.
518  *
519  * Atomically adds @a to @v, so long as it was not @u.
520  * Returns the old value of @v.
521  */
522 static __inline__ s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
523 {
524 	s64 t;
525 
526 	__asm__ __volatile__ (
527 	PPC_ATOMIC_ENTRY_BARRIER
528 "1:	ldarx	%0,0,%1		# atomic64_fetch_add_unless\n\
529 	cmpd	0,%0,%3 \n\
530 	beq	2f \n\
531 	add	%0,%2,%0 \n"
532 "	stdcx.	%0,0,%1 \n\
533 	bne-	1b \n"
534 	PPC_ATOMIC_EXIT_BARRIER
535 "	subf	%0,%2,%0 \n\
536 2:"
537 	: "=&r" (t)
538 	: "r" (&v->counter), "r" (a), "r" (u)
539 	: "cc", "memory");
540 
541 	return t;
542 }
543 #define atomic64_fetch_add_unless atomic64_fetch_add_unless
544 
545 /**
546  * atomic_inc64_not_zero - increment unless the number is zero
547  * @v: pointer of type atomic64_t
548  *
549  * Atomically increments @v by 1, so long as @v is non-zero.
550  * Returns non-zero if @v was non-zero, and zero otherwise.
551  */
552 static __inline__ int atomic64_inc_not_zero(atomic64_t *v)
553 {
554 	s64 t1, t2;
555 
556 	__asm__ __volatile__ (
557 	PPC_ATOMIC_ENTRY_BARRIER
558 "1:	ldarx	%0,0,%2		# atomic64_inc_not_zero\n\
559 	cmpdi	0,%0,0\n\
560 	beq-	2f\n\
561 	addic	%1,%0,1\n\
562 	stdcx.	%1,0,%2\n\
563 	bne-	1b\n"
564 	PPC_ATOMIC_EXIT_BARRIER
565 	"\n\
566 2:"
567 	: "=&r" (t1), "=&r" (t2)
568 	: "r" (&v->counter)
569 	: "cc", "xer", "memory");
570 
571 	return t1 != 0;
572 }
573 #define atomic64_inc_not_zero(v) atomic64_inc_not_zero((v))
574 
575 #endif /* __powerpc64__ */
576 
577 #endif /* __KERNEL__ */
578 #endif /* _ASM_POWERPC_ATOMIC_H_ */
579