xref: /linux/arch/powerpc/include/asm/atomic.h (revision b43ab901d671e3e3cad425ea5e9a3c74e266dcdd)
1 #ifndef _ASM_POWERPC_ATOMIC_H_
2 #define _ASM_POWERPC_ATOMIC_H_
3 
4 /*
5  * PowerPC atomic operations
6  */
7 
8 #include <linux/types.h>
9 
10 #ifdef __KERNEL__
11 #include <linux/compiler.h>
12 #include <asm/synch.h>
13 #include <asm/asm-compat.h>
14 #include <asm/system.h>
15 
16 #define ATOMIC_INIT(i)		{ (i) }
17 
18 static __inline__ int atomic_read(const atomic_t *v)
19 {
20 	int t;
21 
22 	__asm__ __volatile__("lwz%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
23 
24 	return t;
25 }
26 
27 static __inline__ void atomic_set(atomic_t *v, int i)
28 {
29 	__asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
30 }
31 
32 static __inline__ void atomic_add(int a, atomic_t *v)
33 {
34 	int t;
35 
36 	__asm__ __volatile__(
37 "1:	lwarx	%0,0,%3		# atomic_add\n\
38 	add	%0,%2,%0\n"
39 	PPC405_ERR77(0,%3)
40 "	stwcx.	%0,0,%3 \n\
41 	bne-	1b"
42 	: "=&r" (t), "+m" (v->counter)
43 	: "r" (a), "r" (&v->counter)
44 	: "cc");
45 }
46 
47 static __inline__ int atomic_add_return(int a, atomic_t *v)
48 {
49 	int t;
50 
51 	__asm__ __volatile__(
52 	PPC_ATOMIC_ENTRY_BARRIER
53 "1:	lwarx	%0,0,%2		# atomic_add_return\n\
54 	add	%0,%1,%0\n"
55 	PPC405_ERR77(0,%2)
56 "	stwcx.	%0,0,%2 \n\
57 	bne-	1b"
58 	PPC_ATOMIC_EXIT_BARRIER
59 	: "=&r" (t)
60 	: "r" (a), "r" (&v->counter)
61 	: "cc", "memory");
62 
63 	return t;
64 }
65 
66 #define atomic_add_negative(a, v)	(atomic_add_return((a), (v)) < 0)
67 
68 static __inline__ void atomic_sub(int a, atomic_t *v)
69 {
70 	int t;
71 
72 	__asm__ __volatile__(
73 "1:	lwarx	%0,0,%3		# atomic_sub\n\
74 	subf	%0,%2,%0\n"
75 	PPC405_ERR77(0,%3)
76 "	stwcx.	%0,0,%3 \n\
77 	bne-	1b"
78 	: "=&r" (t), "+m" (v->counter)
79 	: "r" (a), "r" (&v->counter)
80 	: "cc");
81 }
82 
83 static __inline__ int atomic_sub_return(int a, atomic_t *v)
84 {
85 	int t;
86 
87 	__asm__ __volatile__(
88 	PPC_ATOMIC_ENTRY_BARRIER
89 "1:	lwarx	%0,0,%2		# atomic_sub_return\n\
90 	subf	%0,%1,%0\n"
91 	PPC405_ERR77(0,%2)
92 "	stwcx.	%0,0,%2 \n\
93 	bne-	1b"
94 	PPC_ATOMIC_EXIT_BARRIER
95 	: "=&r" (t)
96 	: "r" (a), "r" (&v->counter)
97 	: "cc", "memory");
98 
99 	return t;
100 }
101 
102 static __inline__ void atomic_inc(atomic_t *v)
103 {
104 	int t;
105 
106 	__asm__ __volatile__(
107 "1:	lwarx	%0,0,%2		# atomic_inc\n\
108 	addic	%0,%0,1\n"
109 	PPC405_ERR77(0,%2)
110 "	stwcx.	%0,0,%2 \n\
111 	bne-	1b"
112 	: "=&r" (t), "+m" (v->counter)
113 	: "r" (&v->counter)
114 	: "cc", "xer");
115 }
116 
117 static __inline__ int atomic_inc_return(atomic_t *v)
118 {
119 	int t;
120 
121 	__asm__ __volatile__(
122 	PPC_ATOMIC_ENTRY_BARRIER
123 "1:	lwarx	%0,0,%1		# atomic_inc_return\n\
124 	addic	%0,%0,1\n"
125 	PPC405_ERR77(0,%1)
126 "	stwcx.	%0,0,%1 \n\
127 	bne-	1b"
128 	PPC_ATOMIC_EXIT_BARRIER
129 	: "=&r" (t)
130 	: "r" (&v->counter)
131 	: "cc", "xer", "memory");
132 
133 	return t;
134 }
135 
136 /*
137  * atomic_inc_and_test - increment and test
138  * @v: pointer of type atomic_t
139  *
140  * Atomically increments @v by 1
141  * and returns true if the result is zero, or false for all
142  * other cases.
143  */
144 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
145 
146 static __inline__ void atomic_dec(atomic_t *v)
147 {
148 	int t;
149 
150 	__asm__ __volatile__(
151 "1:	lwarx	%0,0,%2		# atomic_dec\n\
152 	addic	%0,%0,-1\n"
153 	PPC405_ERR77(0,%2)\
154 "	stwcx.	%0,0,%2\n\
155 	bne-	1b"
156 	: "=&r" (t), "+m" (v->counter)
157 	: "r" (&v->counter)
158 	: "cc", "xer");
159 }
160 
161 static __inline__ int atomic_dec_return(atomic_t *v)
162 {
163 	int t;
164 
165 	__asm__ __volatile__(
166 	PPC_ATOMIC_ENTRY_BARRIER
167 "1:	lwarx	%0,0,%1		# atomic_dec_return\n\
168 	addic	%0,%0,-1\n"
169 	PPC405_ERR77(0,%1)
170 "	stwcx.	%0,0,%1\n\
171 	bne-	1b"
172 	PPC_ATOMIC_EXIT_BARRIER
173 	: "=&r" (t)
174 	: "r" (&v->counter)
175 	: "cc", "xer", "memory");
176 
177 	return t;
178 }
179 
180 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
181 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
182 
183 /**
184  * __atomic_add_unless - add unless the number is a given value
185  * @v: pointer of type atomic_t
186  * @a: the amount to add to v...
187  * @u: ...unless v is equal to u.
188  *
189  * Atomically adds @a to @v, so long as it was not @u.
190  * Returns the old value of @v.
191  */
192 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
193 {
194 	int t;
195 
196 	__asm__ __volatile__ (
197 	PPC_ATOMIC_ENTRY_BARRIER
198 "1:	lwarx	%0,0,%1		# __atomic_add_unless\n\
199 	cmpw	0,%0,%3 \n\
200 	beq-	2f \n\
201 	add	%0,%2,%0 \n"
202 	PPC405_ERR77(0,%2)
203 "	stwcx.	%0,0,%1 \n\
204 	bne-	1b \n"
205 	PPC_ATOMIC_EXIT_BARRIER
206 "	subf	%0,%2,%0 \n\
207 2:"
208 	: "=&r" (t)
209 	: "r" (&v->counter), "r" (a), "r" (u)
210 	: "cc", "memory");
211 
212 	return t;
213 }
214 
215 
216 #define atomic_sub_and_test(a, v)	(atomic_sub_return((a), (v)) == 0)
217 #define atomic_dec_and_test(v)		(atomic_dec_return((v)) == 0)
218 
219 /*
220  * Atomically test *v and decrement if it is greater than 0.
221  * The function returns the old value of *v minus 1, even if
222  * the atomic variable, v, was not decremented.
223  */
224 static __inline__ int atomic_dec_if_positive(atomic_t *v)
225 {
226 	int t;
227 
228 	__asm__ __volatile__(
229 	PPC_ATOMIC_ENTRY_BARRIER
230 "1:	lwarx	%0,0,%1		# atomic_dec_if_positive\n\
231 	cmpwi	%0,1\n\
232 	addi	%0,%0,-1\n\
233 	blt-	2f\n"
234 	PPC405_ERR77(0,%1)
235 "	stwcx.	%0,0,%1\n\
236 	bne-	1b"
237 	PPC_ATOMIC_EXIT_BARRIER
238 	"\n\
239 2:"	: "=&b" (t)
240 	: "r" (&v->counter)
241 	: "cc", "memory");
242 
243 	return t;
244 }
245 
246 #define smp_mb__before_atomic_dec()     smp_mb()
247 #define smp_mb__after_atomic_dec()      smp_mb()
248 #define smp_mb__before_atomic_inc()     smp_mb()
249 #define smp_mb__after_atomic_inc()      smp_mb()
250 
251 #ifdef __powerpc64__
252 
253 #define ATOMIC64_INIT(i)	{ (i) }
254 
255 static __inline__ long atomic64_read(const atomic64_t *v)
256 {
257 	long t;
258 
259 	__asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
260 
261 	return t;
262 }
263 
264 static __inline__ void atomic64_set(atomic64_t *v, long i)
265 {
266 	__asm__ __volatile__("std%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
267 }
268 
269 static __inline__ void atomic64_add(long a, atomic64_t *v)
270 {
271 	long t;
272 
273 	__asm__ __volatile__(
274 "1:	ldarx	%0,0,%3		# atomic64_add\n\
275 	add	%0,%2,%0\n\
276 	stdcx.	%0,0,%3 \n\
277 	bne-	1b"
278 	: "=&r" (t), "+m" (v->counter)
279 	: "r" (a), "r" (&v->counter)
280 	: "cc");
281 }
282 
283 static __inline__ long atomic64_add_return(long a, atomic64_t *v)
284 {
285 	long t;
286 
287 	__asm__ __volatile__(
288 	PPC_ATOMIC_ENTRY_BARRIER
289 "1:	ldarx	%0,0,%2		# atomic64_add_return\n\
290 	add	%0,%1,%0\n\
291 	stdcx.	%0,0,%2 \n\
292 	bne-	1b"
293 	PPC_ATOMIC_EXIT_BARRIER
294 	: "=&r" (t)
295 	: "r" (a), "r" (&v->counter)
296 	: "cc", "memory");
297 
298 	return t;
299 }
300 
301 #define atomic64_add_negative(a, v)	(atomic64_add_return((a), (v)) < 0)
302 
303 static __inline__ void atomic64_sub(long a, atomic64_t *v)
304 {
305 	long t;
306 
307 	__asm__ __volatile__(
308 "1:	ldarx	%0,0,%3		# atomic64_sub\n\
309 	subf	%0,%2,%0\n\
310 	stdcx.	%0,0,%3 \n\
311 	bne-	1b"
312 	: "=&r" (t), "+m" (v->counter)
313 	: "r" (a), "r" (&v->counter)
314 	: "cc");
315 }
316 
317 static __inline__ long atomic64_sub_return(long a, atomic64_t *v)
318 {
319 	long t;
320 
321 	__asm__ __volatile__(
322 	PPC_ATOMIC_ENTRY_BARRIER
323 "1:	ldarx	%0,0,%2		# atomic64_sub_return\n\
324 	subf	%0,%1,%0\n\
325 	stdcx.	%0,0,%2 \n\
326 	bne-	1b"
327 	PPC_ATOMIC_EXIT_BARRIER
328 	: "=&r" (t)
329 	: "r" (a), "r" (&v->counter)
330 	: "cc", "memory");
331 
332 	return t;
333 }
334 
335 static __inline__ void atomic64_inc(atomic64_t *v)
336 {
337 	long t;
338 
339 	__asm__ __volatile__(
340 "1:	ldarx	%0,0,%2		# atomic64_inc\n\
341 	addic	%0,%0,1\n\
342 	stdcx.	%0,0,%2 \n\
343 	bne-	1b"
344 	: "=&r" (t), "+m" (v->counter)
345 	: "r" (&v->counter)
346 	: "cc", "xer");
347 }
348 
349 static __inline__ long atomic64_inc_return(atomic64_t *v)
350 {
351 	long t;
352 
353 	__asm__ __volatile__(
354 	PPC_ATOMIC_ENTRY_BARRIER
355 "1:	ldarx	%0,0,%1		# atomic64_inc_return\n\
356 	addic	%0,%0,1\n\
357 	stdcx.	%0,0,%1 \n\
358 	bne-	1b"
359 	PPC_ATOMIC_EXIT_BARRIER
360 	: "=&r" (t)
361 	: "r" (&v->counter)
362 	: "cc", "xer", "memory");
363 
364 	return t;
365 }
366 
367 /*
368  * atomic64_inc_and_test - increment and test
369  * @v: pointer of type atomic64_t
370  *
371  * Atomically increments @v by 1
372  * and returns true if the result is zero, or false for all
373  * other cases.
374  */
375 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
376 
377 static __inline__ void atomic64_dec(atomic64_t *v)
378 {
379 	long t;
380 
381 	__asm__ __volatile__(
382 "1:	ldarx	%0,0,%2		# atomic64_dec\n\
383 	addic	%0,%0,-1\n\
384 	stdcx.	%0,0,%2\n\
385 	bne-	1b"
386 	: "=&r" (t), "+m" (v->counter)
387 	: "r" (&v->counter)
388 	: "cc", "xer");
389 }
390 
391 static __inline__ long atomic64_dec_return(atomic64_t *v)
392 {
393 	long t;
394 
395 	__asm__ __volatile__(
396 	PPC_ATOMIC_ENTRY_BARRIER
397 "1:	ldarx	%0,0,%1		# atomic64_dec_return\n\
398 	addic	%0,%0,-1\n\
399 	stdcx.	%0,0,%1\n\
400 	bne-	1b"
401 	PPC_ATOMIC_EXIT_BARRIER
402 	: "=&r" (t)
403 	: "r" (&v->counter)
404 	: "cc", "xer", "memory");
405 
406 	return t;
407 }
408 
409 #define atomic64_sub_and_test(a, v)	(atomic64_sub_return((a), (v)) == 0)
410 #define atomic64_dec_and_test(v)	(atomic64_dec_return((v)) == 0)
411 
412 /*
413  * Atomically test *v and decrement if it is greater than 0.
414  * The function returns the old value of *v minus 1.
415  */
416 static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
417 {
418 	long t;
419 
420 	__asm__ __volatile__(
421 	PPC_ATOMIC_ENTRY_BARRIER
422 "1:	ldarx	%0,0,%1		# atomic64_dec_if_positive\n\
423 	addic.	%0,%0,-1\n\
424 	blt-	2f\n\
425 	stdcx.	%0,0,%1\n\
426 	bne-	1b"
427 	PPC_ATOMIC_EXIT_BARRIER
428 	"\n\
429 2:"	: "=&r" (t)
430 	: "r" (&v->counter)
431 	: "cc", "xer", "memory");
432 
433 	return t;
434 }
435 
436 #define atomic64_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
437 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
438 
439 /**
440  * atomic64_add_unless - add unless the number is a given value
441  * @v: pointer of type atomic64_t
442  * @a: the amount to add to v...
443  * @u: ...unless v is equal to u.
444  *
445  * Atomically adds @a to @v, so long as it was not @u.
446  * Returns the old value of @v.
447  */
448 static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
449 {
450 	long t;
451 
452 	__asm__ __volatile__ (
453 	PPC_ATOMIC_ENTRY_BARRIER
454 "1:	ldarx	%0,0,%1		# __atomic_add_unless\n\
455 	cmpd	0,%0,%3 \n\
456 	beq-	2f \n\
457 	add	%0,%2,%0 \n"
458 "	stdcx.	%0,0,%1 \n\
459 	bne-	1b \n"
460 	PPC_ATOMIC_EXIT_BARRIER
461 "	subf	%0,%2,%0 \n\
462 2:"
463 	: "=&r" (t)
464 	: "r" (&v->counter), "r" (a), "r" (u)
465 	: "cc", "memory");
466 
467 	return t != u;
468 }
469 
470 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
471 
472 #endif /* __powerpc64__ */
473 
474 #endif /* __KERNEL__ */
475 #endif /* _ASM_POWERPC_ATOMIC_H_ */
476