xref: /linux/arch/powerpc/include/asm/atomic.h (revision 396ab6ab284a9fff4154e9bd491372f43de8284c)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_POWERPC_ATOMIC_H_
3 #define _ASM_POWERPC_ATOMIC_H_
4 
5 /*
6  * PowerPC atomic operations
7  */
8 
9 #ifdef __KERNEL__
10 #include <linux/types.h>
11 #include <asm/cmpxchg.h>
12 #include <asm/barrier.h>
13 #include <asm/asm-405.h>
14 
15 #define ATOMIC_INIT(i)		{ (i) }
16 
17 /*
18  * Since *_return_relaxed and {cmp}xchg_relaxed are implemented with
19  * a "bne-" instruction at the end, so an isync is enough as a acquire barrier
20  * on the platform without lwsync.
21  */
22 #define __atomic_op_acquire(op, args...)				\
23 ({									\
24 	typeof(op##_relaxed(args)) __ret  = op##_relaxed(args);		\
25 	__asm__ __volatile__(PPC_ACQUIRE_BARRIER "" : : : "memory");	\
26 	__ret;								\
27 })
28 
29 #define __atomic_op_release(op, args...)				\
30 ({									\
31 	__asm__ __volatile__(PPC_RELEASE_BARRIER "" : : : "memory");	\
32 	op##_relaxed(args);						\
33 })
34 
35 static __inline__ int atomic_read(const atomic_t *v)
36 {
37 	int t;
38 
39 	__asm__ __volatile__("lwz%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
40 
41 	return t;
42 }
43 
44 static __inline__ void atomic_set(atomic_t *v, int i)
45 {
46 	__asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
47 }
48 
49 #define ATOMIC_OP(op, asm_op)						\
50 static __inline__ void atomic_##op(int a, atomic_t *v)			\
51 {									\
52 	int t;								\
53 									\
54 	__asm__ __volatile__(						\
55 "1:	lwarx	%0,0,%3		# atomic_" #op "\n"			\
56 	#asm_op " %0,%2,%0\n"						\
57 	PPC405_ERR77(0,%3)						\
58 "	stwcx.	%0,0,%3 \n"						\
59 "	bne-	1b\n"							\
60 	: "=&r" (t), "+m" (v->counter)					\
61 	: "r" (a), "r" (&v->counter)					\
62 	: "cc");							\
63 }									\
64 
65 #define ATOMIC_OP_RETURN_RELAXED(op, asm_op)				\
66 static inline int atomic_##op##_return_relaxed(int a, atomic_t *v)	\
67 {									\
68 	int t;								\
69 									\
70 	__asm__ __volatile__(						\
71 "1:	lwarx	%0,0,%3		# atomic_" #op "_return_relaxed\n"	\
72 	#asm_op " %0,%2,%0\n"						\
73 	PPC405_ERR77(0, %3)						\
74 "	stwcx.	%0,0,%3\n"						\
75 "	bne-	1b\n"							\
76 	: "=&r" (t), "+m" (v->counter)					\
77 	: "r" (a), "r" (&v->counter)					\
78 	: "cc");							\
79 									\
80 	return t;							\
81 }
82 
83 #define ATOMIC_FETCH_OP_RELAXED(op, asm_op)				\
84 static inline int atomic_fetch_##op##_relaxed(int a, atomic_t *v)	\
85 {									\
86 	int res, t;							\
87 									\
88 	__asm__ __volatile__(						\
89 "1:	lwarx	%0,0,%4		# atomic_fetch_" #op "_relaxed\n"	\
90 	#asm_op " %1,%3,%0\n"						\
91 	PPC405_ERR77(0, %4)						\
92 "	stwcx.	%1,0,%4\n"						\
93 "	bne-	1b\n"							\
94 	: "=&r" (res), "=&r" (t), "+m" (v->counter)			\
95 	: "r" (a), "r" (&v->counter)					\
96 	: "cc");							\
97 									\
98 	return res;							\
99 }
100 
101 #define ATOMIC_OPS(op, asm_op)						\
102 	ATOMIC_OP(op, asm_op)						\
103 	ATOMIC_OP_RETURN_RELAXED(op, asm_op)				\
104 	ATOMIC_FETCH_OP_RELAXED(op, asm_op)
105 
106 ATOMIC_OPS(add, add)
107 ATOMIC_OPS(sub, subf)
108 
109 #define atomic_add_return_relaxed atomic_add_return_relaxed
110 #define atomic_sub_return_relaxed atomic_sub_return_relaxed
111 
112 #define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
113 #define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
114 
115 #undef ATOMIC_OPS
116 #define ATOMIC_OPS(op, asm_op)						\
117 	ATOMIC_OP(op, asm_op)						\
118 	ATOMIC_FETCH_OP_RELAXED(op, asm_op)
119 
120 ATOMIC_OPS(and, and)
121 ATOMIC_OPS(or, or)
122 ATOMIC_OPS(xor, xor)
123 
124 #define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
125 #define atomic_fetch_or_relaxed  atomic_fetch_or_relaxed
126 #define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
127 
128 #undef ATOMIC_OPS
129 #undef ATOMIC_FETCH_OP_RELAXED
130 #undef ATOMIC_OP_RETURN_RELAXED
131 #undef ATOMIC_OP
132 
133 #define atomic_add_negative(a, v)	(atomic_add_return((a), (v)) < 0)
134 
135 static __inline__ void atomic_inc(atomic_t *v)
136 {
137 	int t;
138 
139 	__asm__ __volatile__(
140 "1:	lwarx	%0,0,%2		# atomic_inc\n\
141 	addic	%0,%0,1\n"
142 	PPC405_ERR77(0,%2)
143 "	stwcx.	%0,0,%2 \n\
144 	bne-	1b"
145 	: "=&r" (t), "+m" (v->counter)
146 	: "r" (&v->counter)
147 	: "cc", "xer");
148 }
149 
150 static __inline__ int atomic_inc_return_relaxed(atomic_t *v)
151 {
152 	int t;
153 
154 	__asm__ __volatile__(
155 "1:	lwarx	%0,0,%2		# atomic_inc_return_relaxed\n"
156 "	addic	%0,%0,1\n"
157 	PPC405_ERR77(0, %2)
158 "	stwcx.	%0,0,%2\n"
159 "	bne-	1b"
160 	: "=&r" (t), "+m" (v->counter)
161 	: "r" (&v->counter)
162 	: "cc", "xer");
163 
164 	return t;
165 }
166 
167 /*
168  * atomic_inc_and_test - increment and test
169  * @v: pointer of type atomic_t
170  *
171  * Atomically increments @v by 1
172  * and returns true if the result is zero, or false for all
173  * other cases.
174  */
175 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
176 
177 static __inline__ void atomic_dec(atomic_t *v)
178 {
179 	int t;
180 
181 	__asm__ __volatile__(
182 "1:	lwarx	%0,0,%2		# atomic_dec\n\
183 	addic	%0,%0,-1\n"
184 	PPC405_ERR77(0,%2)\
185 "	stwcx.	%0,0,%2\n\
186 	bne-	1b"
187 	: "=&r" (t), "+m" (v->counter)
188 	: "r" (&v->counter)
189 	: "cc", "xer");
190 }
191 
192 static __inline__ int atomic_dec_return_relaxed(atomic_t *v)
193 {
194 	int t;
195 
196 	__asm__ __volatile__(
197 "1:	lwarx	%0,0,%2		# atomic_dec_return_relaxed\n"
198 "	addic	%0,%0,-1\n"
199 	PPC405_ERR77(0, %2)
200 "	stwcx.	%0,0,%2\n"
201 "	bne-	1b"
202 	: "=&r" (t), "+m" (v->counter)
203 	: "r" (&v->counter)
204 	: "cc", "xer");
205 
206 	return t;
207 }
208 
209 #define atomic_inc_return_relaxed atomic_inc_return_relaxed
210 #define atomic_dec_return_relaxed atomic_dec_return_relaxed
211 
212 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
213 #define atomic_cmpxchg_relaxed(v, o, n) \
214 	cmpxchg_relaxed(&((v)->counter), (o), (n))
215 #define atomic_cmpxchg_acquire(v, o, n) \
216 	cmpxchg_acquire(&((v)->counter), (o), (n))
217 
218 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
219 #define atomic_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new))
220 
221 /**
222  * __atomic_add_unless - add unless the number is a given value
223  * @v: pointer of type atomic_t
224  * @a: the amount to add to v...
225  * @u: ...unless v is equal to u.
226  *
227  * Atomically adds @a to @v, so long as it was not @u.
228  * Returns the old value of @v.
229  */
230 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
231 {
232 	int t;
233 
234 	__asm__ __volatile__ (
235 	PPC_ATOMIC_ENTRY_BARRIER
236 "1:	lwarx	%0,0,%1		# __atomic_add_unless\n\
237 	cmpw	0,%0,%3 \n\
238 	beq	2f \n\
239 	add	%0,%2,%0 \n"
240 	PPC405_ERR77(0,%2)
241 "	stwcx.	%0,0,%1 \n\
242 	bne-	1b \n"
243 	PPC_ATOMIC_EXIT_BARRIER
244 "	subf	%0,%2,%0 \n\
245 2:"
246 	: "=&r" (t)
247 	: "r" (&v->counter), "r" (a), "r" (u)
248 	: "cc", "memory");
249 
250 	return t;
251 }
252 
253 /**
254  * atomic_inc_not_zero - increment unless the number is zero
255  * @v: pointer of type atomic_t
256  *
257  * Atomically increments @v by 1, so long as @v is non-zero.
258  * Returns non-zero if @v was non-zero, and zero otherwise.
259  */
260 static __inline__ int atomic_inc_not_zero(atomic_t *v)
261 {
262 	int t1, t2;
263 
264 	__asm__ __volatile__ (
265 	PPC_ATOMIC_ENTRY_BARRIER
266 "1:	lwarx	%0,0,%2		# atomic_inc_not_zero\n\
267 	cmpwi	0,%0,0\n\
268 	beq-	2f\n\
269 	addic	%1,%0,1\n"
270 	PPC405_ERR77(0,%2)
271 "	stwcx.	%1,0,%2\n\
272 	bne-	1b\n"
273 	PPC_ATOMIC_EXIT_BARRIER
274 	"\n\
275 2:"
276 	: "=&r" (t1), "=&r" (t2)
277 	: "r" (&v->counter)
278 	: "cc", "xer", "memory");
279 
280 	return t1;
281 }
282 #define atomic_inc_not_zero(v) atomic_inc_not_zero((v))
283 
284 #define atomic_sub_and_test(a, v)	(atomic_sub_return((a), (v)) == 0)
285 #define atomic_dec_and_test(v)		(atomic_dec_return((v)) == 0)
286 
287 /*
288  * Atomically test *v and decrement if it is greater than 0.
289  * The function returns the old value of *v minus 1, even if
290  * the atomic variable, v, was not decremented.
291  */
292 static __inline__ int atomic_dec_if_positive(atomic_t *v)
293 {
294 	int t;
295 
296 	__asm__ __volatile__(
297 	PPC_ATOMIC_ENTRY_BARRIER
298 "1:	lwarx	%0,0,%1		# atomic_dec_if_positive\n\
299 	cmpwi	%0,1\n\
300 	addi	%0,%0,-1\n\
301 	blt-	2f\n"
302 	PPC405_ERR77(0,%1)
303 "	stwcx.	%0,0,%1\n\
304 	bne-	1b"
305 	PPC_ATOMIC_EXIT_BARRIER
306 	"\n\
307 2:"	: "=&b" (t)
308 	: "r" (&v->counter)
309 	: "cc", "memory");
310 
311 	return t;
312 }
313 #define atomic_dec_if_positive atomic_dec_if_positive
314 
315 #ifdef __powerpc64__
316 
317 #define ATOMIC64_INIT(i)	{ (i) }
318 
319 static __inline__ long atomic64_read(const atomic64_t *v)
320 {
321 	long t;
322 
323 	__asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
324 
325 	return t;
326 }
327 
328 static __inline__ void atomic64_set(atomic64_t *v, long i)
329 {
330 	__asm__ __volatile__("std%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
331 }
332 
333 #define ATOMIC64_OP(op, asm_op)						\
334 static __inline__ void atomic64_##op(long a, atomic64_t *v)		\
335 {									\
336 	long t;								\
337 									\
338 	__asm__ __volatile__(						\
339 "1:	ldarx	%0,0,%3		# atomic64_" #op "\n"			\
340 	#asm_op " %0,%2,%0\n"						\
341 "	stdcx.	%0,0,%3 \n"						\
342 "	bne-	1b\n"							\
343 	: "=&r" (t), "+m" (v->counter)					\
344 	: "r" (a), "r" (&v->counter)					\
345 	: "cc");							\
346 }
347 
348 #define ATOMIC64_OP_RETURN_RELAXED(op, asm_op)				\
349 static inline long							\
350 atomic64_##op##_return_relaxed(long a, atomic64_t *v)			\
351 {									\
352 	long t;								\
353 									\
354 	__asm__ __volatile__(						\
355 "1:	ldarx	%0,0,%3		# atomic64_" #op "_return_relaxed\n"	\
356 	#asm_op " %0,%2,%0\n"						\
357 "	stdcx.	%0,0,%3\n"						\
358 "	bne-	1b\n"							\
359 	: "=&r" (t), "+m" (v->counter)					\
360 	: "r" (a), "r" (&v->counter)					\
361 	: "cc");							\
362 									\
363 	return t;							\
364 }
365 
366 #define ATOMIC64_FETCH_OP_RELAXED(op, asm_op)				\
367 static inline long							\
368 atomic64_fetch_##op##_relaxed(long a, atomic64_t *v)			\
369 {									\
370 	long res, t;							\
371 									\
372 	__asm__ __volatile__(						\
373 "1:	ldarx	%0,0,%4		# atomic64_fetch_" #op "_relaxed\n"	\
374 	#asm_op " %1,%3,%0\n"						\
375 "	stdcx.	%1,0,%4\n"						\
376 "	bne-	1b\n"							\
377 	: "=&r" (res), "=&r" (t), "+m" (v->counter)			\
378 	: "r" (a), "r" (&v->counter)					\
379 	: "cc");							\
380 									\
381 	return res;							\
382 }
383 
384 #define ATOMIC64_OPS(op, asm_op)					\
385 	ATOMIC64_OP(op, asm_op)						\
386 	ATOMIC64_OP_RETURN_RELAXED(op, asm_op)				\
387 	ATOMIC64_FETCH_OP_RELAXED(op, asm_op)
388 
389 ATOMIC64_OPS(add, add)
390 ATOMIC64_OPS(sub, subf)
391 
392 #define atomic64_add_return_relaxed atomic64_add_return_relaxed
393 #define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
394 
395 #define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
396 #define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
397 
398 #undef ATOMIC64_OPS
399 #define ATOMIC64_OPS(op, asm_op)					\
400 	ATOMIC64_OP(op, asm_op)						\
401 	ATOMIC64_FETCH_OP_RELAXED(op, asm_op)
402 
403 ATOMIC64_OPS(and, and)
404 ATOMIC64_OPS(or, or)
405 ATOMIC64_OPS(xor, xor)
406 
407 #define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
408 #define atomic64_fetch_or_relaxed  atomic64_fetch_or_relaxed
409 #define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
410 
411 #undef ATOPIC64_OPS
412 #undef ATOMIC64_FETCH_OP_RELAXED
413 #undef ATOMIC64_OP_RETURN_RELAXED
414 #undef ATOMIC64_OP
415 
416 #define atomic64_add_negative(a, v)	(atomic64_add_return((a), (v)) < 0)
417 
418 static __inline__ void atomic64_inc(atomic64_t *v)
419 {
420 	long t;
421 
422 	__asm__ __volatile__(
423 "1:	ldarx	%0,0,%2		# atomic64_inc\n\
424 	addic	%0,%0,1\n\
425 	stdcx.	%0,0,%2 \n\
426 	bne-	1b"
427 	: "=&r" (t), "+m" (v->counter)
428 	: "r" (&v->counter)
429 	: "cc", "xer");
430 }
431 
432 static __inline__ long atomic64_inc_return_relaxed(atomic64_t *v)
433 {
434 	long t;
435 
436 	__asm__ __volatile__(
437 "1:	ldarx	%0,0,%2		# atomic64_inc_return_relaxed\n"
438 "	addic	%0,%0,1\n"
439 "	stdcx.	%0,0,%2\n"
440 "	bne-	1b"
441 	: "=&r" (t), "+m" (v->counter)
442 	: "r" (&v->counter)
443 	: "cc", "xer");
444 
445 	return t;
446 }
447 
448 /*
449  * atomic64_inc_and_test - increment and test
450  * @v: pointer of type atomic64_t
451  *
452  * Atomically increments @v by 1
453  * and returns true if the result is zero, or false for all
454  * other cases.
455  */
456 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
457 
458 static __inline__ void atomic64_dec(atomic64_t *v)
459 {
460 	long t;
461 
462 	__asm__ __volatile__(
463 "1:	ldarx	%0,0,%2		# atomic64_dec\n\
464 	addic	%0,%0,-1\n\
465 	stdcx.	%0,0,%2\n\
466 	bne-	1b"
467 	: "=&r" (t), "+m" (v->counter)
468 	: "r" (&v->counter)
469 	: "cc", "xer");
470 }
471 
472 static __inline__ long atomic64_dec_return_relaxed(atomic64_t *v)
473 {
474 	long t;
475 
476 	__asm__ __volatile__(
477 "1:	ldarx	%0,0,%2		# atomic64_dec_return_relaxed\n"
478 "	addic	%0,%0,-1\n"
479 "	stdcx.	%0,0,%2\n"
480 "	bne-	1b"
481 	: "=&r" (t), "+m" (v->counter)
482 	: "r" (&v->counter)
483 	: "cc", "xer");
484 
485 	return t;
486 }
487 
488 #define atomic64_inc_return_relaxed atomic64_inc_return_relaxed
489 #define atomic64_dec_return_relaxed atomic64_dec_return_relaxed
490 
491 #define atomic64_sub_and_test(a, v)	(atomic64_sub_return((a), (v)) == 0)
492 #define atomic64_dec_and_test(v)	(atomic64_dec_return((v)) == 0)
493 
494 /*
495  * Atomically test *v and decrement if it is greater than 0.
496  * The function returns the old value of *v minus 1.
497  */
498 static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
499 {
500 	long t;
501 
502 	__asm__ __volatile__(
503 	PPC_ATOMIC_ENTRY_BARRIER
504 "1:	ldarx	%0,0,%1		# atomic64_dec_if_positive\n\
505 	addic.	%0,%0,-1\n\
506 	blt-	2f\n\
507 	stdcx.	%0,0,%1\n\
508 	bne-	1b"
509 	PPC_ATOMIC_EXIT_BARRIER
510 	"\n\
511 2:"	: "=&r" (t)
512 	: "r" (&v->counter)
513 	: "cc", "xer", "memory");
514 
515 	return t;
516 }
517 
518 #define atomic64_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
519 #define atomic64_cmpxchg_relaxed(v, o, n) \
520 	cmpxchg_relaxed(&((v)->counter), (o), (n))
521 #define atomic64_cmpxchg_acquire(v, o, n) \
522 	cmpxchg_acquire(&((v)->counter), (o), (n))
523 
524 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
525 #define atomic64_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new))
526 
527 /**
528  * atomic64_add_unless - add unless the number is a given value
529  * @v: pointer of type atomic64_t
530  * @a: the amount to add to v...
531  * @u: ...unless v is equal to u.
532  *
533  * Atomically adds @a to @v, so long as it was not @u.
534  * Returns the old value of @v.
535  */
536 static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
537 {
538 	long t;
539 
540 	__asm__ __volatile__ (
541 	PPC_ATOMIC_ENTRY_BARRIER
542 "1:	ldarx	%0,0,%1		# __atomic_add_unless\n\
543 	cmpd	0,%0,%3 \n\
544 	beq	2f \n\
545 	add	%0,%2,%0 \n"
546 "	stdcx.	%0,0,%1 \n\
547 	bne-	1b \n"
548 	PPC_ATOMIC_EXIT_BARRIER
549 "	subf	%0,%2,%0 \n\
550 2:"
551 	: "=&r" (t)
552 	: "r" (&v->counter), "r" (a), "r" (u)
553 	: "cc", "memory");
554 
555 	return t != u;
556 }
557 
558 /**
559  * atomic_inc64_not_zero - increment unless the number is zero
560  * @v: pointer of type atomic64_t
561  *
562  * Atomically increments @v by 1, so long as @v is non-zero.
563  * Returns non-zero if @v was non-zero, and zero otherwise.
564  */
565 static __inline__ int atomic64_inc_not_zero(atomic64_t *v)
566 {
567 	long t1, t2;
568 
569 	__asm__ __volatile__ (
570 	PPC_ATOMIC_ENTRY_BARRIER
571 "1:	ldarx	%0,0,%2		# atomic64_inc_not_zero\n\
572 	cmpdi	0,%0,0\n\
573 	beq-	2f\n\
574 	addic	%1,%0,1\n\
575 	stdcx.	%1,0,%2\n\
576 	bne-	1b\n"
577 	PPC_ATOMIC_EXIT_BARRIER
578 	"\n\
579 2:"
580 	: "=&r" (t1), "=&r" (t2)
581 	: "r" (&v->counter)
582 	: "cc", "xer", "memory");
583 
584 	return t1 != 0;
585 }
586 
587 #endif /* __powerpc64__ */
588 
589 #endif /* __KERNEL__ */
590 #endif /* _ASM_POWERPC_ATOMIC_H_ */
591