xref: /linux/arch/powerpc/include/asm/atomic.h (revision c6ed444fd6fffaaf2e3857d926ed18bf3df81e8e)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_POWERPC_ATOMIC_H_
3 #define _ASM_POWERPC_ATOMIC_H_
4 
5 /*
6  * PowerPC atomic operations
7  */
8 
9 #ifdef __KERNEL__
10 #include <linux/types.h>
11 #include <asm/cmpxchg.h>
12 #include <asm/barrier.h>
13 
14 #define ATOMIC_INIT(i)		{ (i) }
15 
16 /*
17  * Since *_return_relaxed and {cmp}xchg_relaxed are implemented with
18  * a "bne-" instruction at the end, so an isync is enough as a acquire barrier
19  * on the platform without lwsync.
20  */
21 #define __atomic_acquire_fence()					\
22 	__asm__ __volatile__(PPC_ACQUIRE_BARRIER "" : : : "memory")
23 
24 #define __atomic_release_fence()					\
25 	__asm__ __volatile__(PPC_RELEASE_BARRIER "" : : : "memory")
26 
27 static __inline__ int atomic_read(const atomic_t *v)
28 {
29 	int t;
30 
31 	__asm__ __volatile__("lwz%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
32 
33 	return t;
34 }
35 
36 static __inline__ void atomic_set(atomic_t *v, int i)
37 {
38 	__asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
39 }
40 
41 #define ATOMIC_OP(op, asm_op)						\
42 static __inline__ void atomic_##op(int a, atomic_t *v)			\
43 {									\
44 	int t;								\
45 									\
46 	__asm__ __volatile__(						\
47 "1:	lwarx	%0,0,%3		# atomic_" #op "\n"			\
48 	#asm_op " %0,%2,%0\n"						\
49 	PPC405_ERR77(0,%3)						\
50 "	stwcx.	%0,0,%3 \n"						\
51 "	bne-	1b\n"							\
52 	: "=&r" (t), "+m" (v->counter)					\
53 	: "r" (a), "r" (&v->counter)					\
54 	: "cc");							\
55 }									\
56 
57 #define ATOMIC_OP_RETURN_RELAXED(op, asm_op)				\
58 static inline int atomic_##op##_return_relaxed(int a, atomic_t *v)	\
59 {									\
60 	int t;								\
61 									\
62 	__asm__ __volatile__(						\
63 "1:	lwarx	%0,0,%3		# atomic_" #op "_return_relaxed\n"	\
64 	#asm_op " %0,%2,%0\n"						\
65 	PPC405_ERR77(0, %3)						\
66 "	stwcx.	%0,0,%3\n"						\
67 "	bne-	1b\n"							\
68 	: "=&r" (t), "+m" (v->counter)					\
69 	: "r" (a), "r" (&v->counter)					\
70 	: "cc");							\
71 									\
72 	return t;							\
73 }
74 
75 #define ATOMIC_FETCH_OP_RELAXED(op, asm_op)				\
76 static inline int atomic_fetch_##op##_relaxed(int a, atomic_t *v)	\
77 {									\
78 	int res, t;							\
79 									\
80 	__asm__ __volatile__(						\
81 "1:	lwarx	%0,0,%4		# atomic_fetch_" #op "_relaxed\n"	\
82 	#asm_op " %1,%3,%0\n"						\
83 	PPC405_ERR77(0, %4)						\
84 "	stwcx.	%1,0,%4\n"						\
85 "	bne-	1b\n"							\
86 	: "=&r" (res), "=&r" (t), "+m" (v->counter)			\
87 	: "r" (a), "r" (&v->counter)					\
88 	: "cc");							\
89 									\
90 	return res;							\
91 }
92 
93 #define ATOMIC_OPS(op, asm_op)						\
94 	ATOMIC_OP(op, asm_op)						\
95 	ATOMIC_OP_RETURN_RELAXED(op, asm_op)				\
96 	ATOMIC_FETCH_OP_RELAXED(op, asm_op)
97 
98 ATOMIC_OPS(add, add)
99 ATOMIC_OPS(sub, subf)
100 
101 #define atomic_add_return_relaxed atomic_add_return_relaxed
102 #define atomic_sub_return_relaxed atomic_sub_return_relaxed
103 
104 #define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
105 #define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
106 
107 #undef ATOMIC_OPS
108 #define ATOMIC_OPS(op, asm_op)						\
109 	ATOMIC_OP(op, asm_op)						\
110 	ATOMIC_FETCH_OP_RELAXED(op, asm_op)
111 
112 ATOMIC_OPS(and, and)
113 ATOMIC_OPS(or, or)
114 ATOMIC_OPS(xor, xor)
115 
116 #define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
117 #define atomic_fetch_or_relaxed  atomic_fetch_or_relaxed
118 #define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
119 
120 #undef ATOMIC_OPS
121 #undef ATOMIC_FETCH_OP_RELAXED
122 #undef ATOMIC_OP_RETURN_RELAXED
123 #undef ATOMIC_OP
124 
125 static __inline__ void atomic_inc(atomic_t *v)
126 {
127 	int t;
128 
129 	__asm__ __volatile__(
130 "1:	lwarx	%0,0,%2		# atomic_inc\n\
131 	addic	%0,%0,1\n"
132 	PPC405_ERR77(0,%2)
133 "	stwcx.	%0,0,%2 \n\
134 	bne-	1b"
135 	: "=&r" (t), "+m" (v->counter)
136 	: "r" (&v->counter)
137 	: "cc", "xer");
138 }
139 #define atomic_inc atomic_inc
140 
141 static __inline__ int atomic_inc_return_relaxed(atomic_t *v)
142 {
143 	int t;
144 
145 	__asm__ __volatile__(
146 "1:	lwarx	%0,0,%2		# atomic_inc_return_relaxed\n"
147 "	addic	%0,%0,1\n"
148 	PPC405_ERR77(0, %2)
149 "	stwcx.	%0,0,%2\n"
150 "	bne-	1b"
151 	: "=&r" (t), "+m" (v->counter)
152 	: "r" (&v->counter)
153 	: "cc", "xer");
154 
155 	return t;
156 }
157 
158 static __inline__ void atomic_dec(atomic_t *v)
159 {
160 	int t;
161 
162 	__asm__ __volatile__(
163 "1:	lwarx	%0,0,%2		# atomic_dec\n\
164 	addic	%0,%0,-1\n"
165 	PPC405_ERR77(0,%2)\
166 "	stwcx.	%0,0,%2\n\
167 	bne-	1b"
168 	: "=&r" (t), "+m" (v->counter)
169 	: "r" (&v->counter)
170 	: "cc", "xer");
171 }
172 #define atomic_dec atomic_dec
173 
174 static __inline__ int atomic_dec_return_relaxed(atomic_t *v)
175 {
176 	int t;
177 
178 	__asm__ __volatile__(
179 "1:	lwarx	%0,0,%2		# atomic_dec_return_relaxed\n"
180 "	addic	%0,%0,-1\n"
181 	PPC405_ERR77(0, %2)
182 "	stwcx.	%0,0,%2\n"
183 "	bne-	1b"
184 	: "=&r" (t), "+m" (v->counter)
185 	: "r" (&v->counter)
186 	: "cc", "xer");
187 
188 	return t;
189 }
190 
191 #define atomic_inc_return_relaxed atomic_inc_return_relaxed
192 #define atomic_dec_return_relaxed atomic_dec_return_relaxed
193 
194 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
195 #define atomic_cmpxchg_relaxed(v, o, n) \
196 	cmpxchg_relaxed(&((v)->counter), (o), (n))
197 #define atomic_cmpxchg_acquire(v, o, n) \
198 	cmpxchg_acquire(&((v)->counter), (o), (n))
199 
200 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
201 #define atomic_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new))
202 
203 /**
204  * atomic_fetch_add_unless - add unless the number is a given value
205  * @v: pointer of type atomic_t
206  * @a: the amount to add to v...
207  * @u: ...unless v is equal to u.
208  *
209  * Atomically adds @a to @v, so long as it was not @u.
210  * Returns the old value of @v.
211  */
212 static __inline__ int atomic_fetch_add_unless(atomic_t *v, int a, int u)
213 {
214 	int t;
215 
216 	__asm__ __volatile__ (
217 	PPC_ATOMIC_ENTRY_BARRIER
218 "1:	lwarx	%0,0,%1		# atomic_fetch_add_unless\n\
219 	cmpw	0,%0,%3 \n\
220 	beq	2f \n\
221 	add	%0,%2,%0 \n"
222 	PPC405_ERR77(0,%2)
223 "	stwcx.	%0,0,%1 \n\
224 	bne-	1b \n"
225 	PPC_ATOMIC_EXIT_BARRIER
226 "	subf	%0,%2,%0 \n\
227 2:"
228 	: "=&r" (t)
229 	: "r" (&v->counter), "r" (a), "r" (u)
230 	: "cc", "memory");
231 
232 	return t;
233 }
234 #define atomic_fetch_add_unless atomic_fetch_add_unless
235 
236 /**
237  * atomic_inc_not_zero - increment unless the number is zero
238  * @v: pointer of type atomic_t
239  *
240  * Atomically increments @v by 1, so long as @v is non-zero.
241  * Returns non-zero if @v was non-zero, and zero otherwise.
242  */
243 static __inline__ int atomic_inc_not_zero(atomic_t *v)
244 {
245 	int t1, t2;
246 
247 	__asm__ __volatile__ (
248 	PPC_ATOMIC_ENTRY_BARRIER
249 "1:	lwarx	%0,0,%2		# atomic_inc_not_zero\n\
250 	cmpwi	0,%0,0\n\
251 	beq-	2f\n\
252 	addic	%1,%0,1\n"
253 	PPC405_ERR77(0,%2)
254 "	stwcx.	%1,0,%2\n\
255 	bne-	1b\n"
256 	PPC_ATOMIC_EXIT_BARRIER
257 	"\n\
258 2:"
259 	: "=&r" (t1), "=&r" (t2)
260 	: "r" (&v->counter)
261 	: "cc", "xer", "memory");
262 
263 	return t1;
264 }
265 #define atomic_inc_not_zero(v) atomic_inc_not_zero((v))
266 
267 /*
268  * Atomically test *v and decrement if it is greater than 0.
269  * The function returns the old value of *v minus 1, even if
270  * the atomic variable, v, was not decremented.
271  */
272 static __inline__ int atomic_dec_if_positive(atomic_t *v)
273 {
274 	int t;
275 
276 	__asm__ __volatile__(
277 	PPC_ATOMIC_ENTRY_BARRIER
278 "1:	lwarx	%0,0,%1		# atomic_dec_if_positive\n\
279 	cmpwi	%0,1\n\
280 	addi	%0,%0,-1\n\
281 	blt-	2f\n"
282 	PPC405_ERR77(0,%1)
283 "	stwcx.	%0,0,%1\n\
284 	bne-	1b"
285 	PPC_ATOMIC_EXIT_BARRIER
286 	"\n\
287 2:"	: "=&b" (t)
288 	: "r" (&v->counter)
289 	: "cc", "memory");
290 
291 	return t;
292 }
293 #define atomic_dec_if_positive atomic_dec_if_positive
294 
295 #ifdef __powerpc64__
296 
297 #define ATOMIC64_INIT(i)	{ (i) }
298 
299 static __inline__ long atomic64_read(const atomic64_t *v)
300 {
301 	long t;
302 
303 	__asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
304 
305 	return t;
306 }
307 
308 static __inline__ void atomic64_set(atomic64_t *v, long i)
309 {
310 	__asm__ __volatile__("std%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
311 }
312 
313 #define ATOMIC64_OP(op, asm_op)						\
314 static __inline__ void atomic64_##op(long a, atomic64_t *v)		\
315 {									\
316 	long t;								\
317 									\
318 	__asm__ __volatile__(						\
319 "1:	ldarx	%0,0,%3		# atomic64_" #op "\n"			\
320 	#asm_op " %0,%2,%0\n"						\
321 "	stdcx.	%0,0,%3 \n"						\
322 "	bne-	1b\n"							\
323 	: "=&r" (t), "+m" (v->counter)					\
324 	: "r" (a), "r" (&v->counter)					\
325 	: "cc");							\
326 }
327 
328 #define ATOMIC64_OP_RETURN_RELAXED(op, asm_op)				\
329 static inline long							\
330 atomic64_##op##_return_relaxed(long a, atomic64_t *v)			\
331 {									\
332 	long t;								\
333 									\
334 	__asm__ __volatile__(						\
335 "1:	ldarx	%0,0,%3		# atomic64_" #op "_return_relaxed\n"	\
336 	#asm_op " %0,%2,%0\n"						\
337 "	stdcx.	%0,0,%3\n"						\
338 "	bne-	1b\n"							\
339 	: "=&r" (t), "+m" (v->counter)					\
340 	: "r" (a), "r" (&v->counter)					\
341 	: "cc");							\
342 									\
343 	return t;							\
344 }
345 
346 #define ATOMIC64_FETCH_OP_RELAXED(op, asm_op)				\
347 static inline long							\
348 atomic64_fetch_##op##_relaxed(long a, atomic64_t *v)			\
349 {									\
350 	long res, t;							\
351 									\
352 	__asm__ __volatile__(						\
353 "1:	ldarx	%0,0,%4		# atomic64_fetch_" #op "_relaxed\n"	\
354 	#asm_op " %1,%3,%0\n"						\
355 "	stdcx.	%1,0,%4\n"						\
356 "	bne-	1b\n"							\
357 	: "=&r" (res), "=&r" (t), "+m" (v->counter)			\
358 	: "r" (a), "r" (&v->counter)					\
359 	: "cc");							\
360 									\
361 	return res;							\
362 }
363 
364 #define ATOMIC64_OPS(op, asm_op)					\
365 	ATOMIC64_OP(op, asm_op)						\
366 	ATOMIC64_OP_RETURN_RELAXED(op, asm_op)				\
367 	ATOMIC64_FETCH_OP_RELAXED(op, asm_op)
368 
369 ATOMIC64_OPS(add, add)
370 ATOMIC64_OPS(sub, subf)
371 
372 #define atomic64_add_return_relaxed atomic64_add_return_relaxed
373 #define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
374 
375 #define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
376 #define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
377 
378 #undef ATOMIC64_OPS
379 #define ATOMIC64_OPS(op, asm_op)					\
380 	ATOMIC64_OP(op, asm_op)						\
381 	ATOMIC64_FETCH_OP_RELAXED(op, asm_op)
382 
383 ATOMIC64_OPS(and, and)
384 ATOMIC64_OPS(or, or)
385 ATOMIC64_OPS(xor, xor)
386 
387 #define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
388 #define atomic64_fetch_or_relaxed  atomic64_fetch_or_relaxed
389 #define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
390 
391 #undef ATOPIC64_OPS
392 #undef ATOMIC64_FETCH_OP_RELAXED
393 #undef ATOMIC64_OP_RETURN_RELAXED
394 #undef ATOMIC64_OP
395 
396 static __inline__ void atomic64_inc(atomic64_t *v)
397 {
398 	long t;
399 
400 	__asm__ __volatile__(
401 "1:	ldarx	%0,0,%2		# atomic64_inc\n\
402 	addic	%0,%0,1\n\
403 	stdcx.	%0,0,%2 \n\
404 	bne-	1b"
405 	: "=&r" (t), "+m" (v->counter)
406 	: "r" (&v->counter)
407 	: "cc", "xer");
408 }
409 #define atomic64_inc atomic64_inc
410 
411 static __inline__ long atomic64_inc_return_relaxed(atomic64_t *v)
412 {
413 	long t;
414 
415 	__asm__ __volatile__(
416 "1:	ldarx	%0,0,%2		# atomic64_inc_return_relaxed\n"
417 "	addic	%0,%0,1\n"
418 "	stdcx.	%0,0,%2\n"
419 "	bne-	1b"
420 	: "=&r" (t), "+m" (v->counter)
421 	: "r" (&v->counter)
422 	: "cc", "xer");
423 
424 	return t;
425 }
426 
427 static __inline__ void atomic64_dec(atomic64_t *v)
428 {
429 	long t;
430 
431 	__asm__ __volatile__(
432 "1:	ldarx	%0,0,%2		# atomic64_dec\n\
433 	addic	%0,%0,-1\n\
434 	stdcx.	%0,0,%2\n\
435 	bne-	1b"
436 	: "=&r" (t), "+m" (v->counter)
437 	: "r" (&v->counter)
438 	: "cc", "xer");
439 }
440 #define atomic64_dec atomic64_dec
441 
442 static __inline__ long atomic64_dec_return_relaxed(atomic64_t *v)
443 {
444 	long t;
445 
446 	__asm__ __volatile__(
447 "1:	ldarx	%0,0,%2		# atomic64_dec_return_relaxed\n"
448 "	addic	%0,%0,-1\n"
449 "	stdcx.	%0,0,%2\n"
450 "	bne-	1b"
451 	: "=&r" (t), "+m" (v->counter)
452 	: "r" (&v->counter)
453 	: "cc", "xer");
454 
455 	return t;
456 }
457 
458 #define atomic64_inc_return_relaxed atomic64_inc_return_relaxed
459 #define atomic64_dec_return_relaxed atomic64_dec_return_relaxed
460 
461 /*
462  * Atomically test *v and decrement if it is greater than 0.
463  * The function returns the old value of *v minus 1.
464  */
465 static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
466 {
467 	long t;
468 
469 	__asm__ __volatile__(
470 	PPC_ATOMIC_ENTRY_BARRIER
471 "1:	ldarx	%0,0,%1		# atomic64_dec_if_positive\n\
472 	addic.	%0,%0,-1\n\
473 	blt-	2f\n\
474 	stdcx.	%0,0,%1\n\
475 	bne-	1b"
476 	PPC_ATOMIC_EXIT_BARRIER
477 	"\n\
478 2:"	: "=&r" (t)
479 	: "r" (&v->counter)
480 	: "cc", "xer", "memory");
481 
482 	return t;
483 }
484 #define atomic64_dec_if_positive atomic64_dec_if_positive
485 
486 #define atomic64_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
487 #define atomic64_cmpxchg_relaxed(v, o, n) \
488 	cmpxchg_relaxed(&((v)->counter), (o), (n))
489 #define atomic64_cmpxchg_acquire(v, o, n) \
490 	cmpxchg_acquire(&((v)->counter), (o), (n))
491 
492 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
493 #define atomic64_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new))
494 
495 /**
496  * atomic64_fetch_add_unless - add unless the number is a given value
497  * @v: pointer of type atomic64_t
498  * @a: the amount to add to v...
499  * @u: ...unless v is equal to u.
500  *
501  * Atomically adds @a to @v, so long as it was not @u.
502  * Returns the old value of @v.
503  */
504 static __inline__ long atomic64_fetch_add_unless(atomic64_t *v, long a, long u)
505 {
506 	long t;
507 
508 	__asm__ __volatile__ (
509 	PPC_ATOMIC_ENTRY_BARRIER
510 "1:	ldarx	%0,0,%1		# atomic64_fetch_add_unless\n\
511 	cmpd	0,%0,%3 \n\
512 	beq	2f \n\
513 	add	%0,%2,%0 \n"
514 "	stdcx.	%0,0,%1 \n\
515 	bne-	1b \n"
516 	PPC_ATOMIC_EXIT_BARRIER
517 "	subf	%0,%2,%0 \n\
518 2:"
519 	: "=&r" (t)
520 	: "r" (&v->counter), "r" (a), "r" (u)
521 	: "cc", "memory");
522 
523 	return t;
524 }
525 #define atomic64_fetch_add_unless atomic64_fetch_add_unless
526 
527 /**
528  * atomic_inc64_not_zero - increment unless the number is zero
529  * @v: pointer of type atomic64_t
530  *
531  * Atomically increments @v by 1, so long as @v is non-zero.
532  * Returns non-zero if @v was non-zero, and zero otherwise.
533  */
534 static __inline__ int atomic64_inc_not_zero(atomic64_t *v)
535 {
536 	long t1, t2;
537 
538 	__asm__ __volatile__ (
539 	PPC_ATOMIC_ENTRY_BARRIER
540 "1:	ldarx	%0,0,%2		# atomic64_inc_not_zero\n\
541 	cmpdi	0,%0,0\n\
542 	beq-	2f\n\
543 	addic	%1,%0,1\n\
544 	stdcx.	%1,0,%2\n\
545 	bne-	1b\n"
546 	PPC_ATOMIC_EXIT_BARRIER
547 	"\n\
548 2:"
549 	: "=&r" (t1), "=&r" (t2)
550 	: "r" (&v->counter)
551 	: "cc", "xer", "memory");
552 
553 	return t1 != 0;
554 }
555 #define atomic64_inc_not_zero(v) atomic64_inc_not_zero((v))
556 
557 #endif /* __powerpc64__ */
558 
559 #endif /* __KERNEL__ */
560 #endif /* _ASM_POWERPC_ATOMIC_H_ */
561