xref: /linux/arch/alpha/include/asm/atomic.h (revision 9ffc93f203c18a70623f21950f1dd473c9ec48cd)
1 #ifndef _ALPHA_ATOMIC_H
2 #define _ALPHA_ATOMIC_H
3 
4 #include <linux/types.h>
5 #include <asm/barrier.h>
6 
7 /*
8  * Atomic operations that C can't guarantee us.  Useful for
9  * resource counting etc...
10  *
11  * But use these as seldom as possible since they are much slower
12  * than regular operations.
13  */
14 
15 
16 #define ATOMIC_INIT(i)		( (atomic_t) { (i) } )
17 #define ATOMIC64_INIT(i)	( (atomic64_t) { (i) } )
18 
19 #define atomic_read(v)		(*(volatile int *)&(v)->counter)
20 #define atomic64_read(v)	(*(volatile long *)&(v)->counter)
21 
22 #define atomic_set(v,i)		((v)->counter = (i))
23 #define atomic64_set(v,i)	((v)->counter = (i))
24 
25 /*
26  * To get proper branch prediction for the main line, we must branch
27  * forward to code at the end of this object's .text section, then
28  * branch back to restart the operation.
29  */
30 
31 static __inline__ void atomic_add(int i, atomic_t * v)
32 {
33 	unsigned long temp;
34 	__asm__ __volatile__(
35 	"1:	ldl_l %0,%1\n"
36 	"	addl %0,%2,%0\n"
37 	"	stl_c %0,%1\n"
38 	"	beq %0,2f\n"
39 	".subsection 2\n"
40 	"2:	br 1b\n"
41 	".previous"
42 	:"=&r" (temp), "=m" (v->counter)
43 	:"Ir" (i), "m" (v->counter));
44 }
45 
46 static __inline__ void atomic64_add(long i, atomic64_t * v)
47 {
48 	unsigned long temp;
49 	__asm__ __volatile__(
50 	"1:	ldq_l %0,%1\n"
51 	"	addq %0,%2,%0\n"
52 	"	stq_c %0,%1\n"
53 	"	beq %0,2f\n"
54 	".subsection 2\n"
55 	"2:	br 1b\n"
56 	".previous"
57 	:"=&r" (temp), "=m" (v->counter)
58 	:"Ir" (i), "m" (v->counter));
59 }
60 
61 static __inline__ void atomic_sub(int i, atomic_t * v)
62 {
63 	unsigned long temp;
64 	__asm__ __volatile__(
65 	"1:	ldl_l %0,%1\n"
66 	"	subl %0,%2,%0\n"
67 	"	stl_c %0,%1\n"
68 	"	beq %0,2f\n"
69 	".subsection 2\n"
70 	"2:	br 1b\n"
71 	".previous"
72 	:"=&r" (temp), "=m" (v->counter)
73 	:"Ir" (i), "m" (v->counter));
74 }
75 
76 static __inline__ void atomic64_sub(long i, atomic64_t * v)
77 {
78 	unsigned long temp;
79 	__asm__ __volatile__(
80 	"1:	ldq_l %0,%1\n"
81 	"	subq %0,%2,%0\n"
82 	"	stq_c %0,%1\n"
83 	"	beq %0,2f\n"
84 	".subsection 2\n"
85 	"2:	br 1b\n"
86 	".previous"
87 	:"=&r" (temp), "=m" (v->counter)
88 	:"Ir" (i), "m" (v->counter));
89 }
90 
91 
92 /*
93  * Same as above, but return the result value
94  */
95 static inline int atomic_add_return(int i, atomic_t *v)
96 {
97 	long temp, result;
98 	smp_mb();
99 	__asm__ __volatile__(
100 	"1:	ldl_l %0,%1\n"
101 	"	addl %0,%3,%2\n"
102 	"	addl %0,%3,%0\n"
103 	"	stl_c %0,%1\n"
104 	"	beq %0,2f\n"
105 	".subsection 2\n"
106 	"2:	br 1b\n"
107 	".previous"
108 	:"=&r" (temp), "=m" (v->counter), "=&r" (result)
109 	:"Ir" (i), "m" (v->counter) : "memory");
110 	smp_mb();
111 	return result;
112 }
113 
114 static __inline__ long atomic64_add_return(long i, atomic64_t * v)
115 {
116 	long temp, result;
117 	smp_mb();
118 	__asm__ __volatile__(
119 	"1:	ldq_l %0,%1\n"
120 	"	addq %0,%3,%2\n"
121 	"	addq %0,%3,%0\n"
122 	"	stq_c %0,%1\n"
123 	"	beq %0,2f\n"
124 	".subsection 2\n"
125 	"2:	br 1b\n"
126 	".previous"
127 	:"=&r" (temp), "=m" (v->counter), "=&r" (result)
128 	:"Ir" (i), "m" (v->counter) : "memory");
129 	smp_mb();
130 	return result;
131 }
132 
133 static __inline__ long atomic_sub_return(int i, atomic_t * v)
134 {
135 	long temp, result;
136 	smp_mb();
137 	__asm__ __volatile__(
138 	"1:	ldl_l %0,%1\n"
139 	"	subl %0,%3,%2\n"
140 	"	subl %0,%3,%0\n"
141 	"	stl_c %0,%1\n"
142 	"	beq %0,2f\n"
143 	".subsection 2\n"
144 	"2:	br 1b\n"
145 	".previous"
146 	:"=&r" (temp), "=m" (v->counter), "=&r" (result)
147 	:"Ir" (i), "m" (v->counter) : "memory");
148 	smp_mb();
149 	return result;
150 }
151 
152 static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
153 {
154 	long temp, result;
155 	smp_mb();
156 	__asm__ __volatile__(
157 	"1:	ldq_l %0,%1\n"
158 	"	subq %0,%3,%2\n"
159 	"	subq %0,%3,%0\n"
160 	"	stq_c %0,%1\n"
161 	"	beq %0,2f\n"
162 	".subsection 2\n"
163 	"2:	br 1b\n"
164 	".previous"
165 	:"=&r" (temp), "=m" (v->counter), "=&r" (result)
166 	:"Ir" (i), "m" (v->counter) : "memory");
167 	smp_mb();
168 	return result;
169 }
170 
171 /*
172  * Atomic exchange routines.
173  */
174 
175 #define __ASM__MB
176 #define ____xchg(type, args...)		__xchg ## type ## _local(args)
177 #define ____cmpxchg(type, args...)	__cmpxchg ## type ## _local(args)
178 #include <asm/xchg.h>
179 
180 #define xchg_local(ptr,x)						\
181   ({									\
182      __typeof__(*(ptr)) _x_ = (x);					\
183      (__typeof__(*(ptr))) __xchg_local((ptr), (unsigned long)_x_,	\
184 				       sizeof(*(ptr)));			\
185   })
186 
187 #define cmpxchg_local(ptr, o, n)					\
188   ({									\
189      __typeof__(*(ptr)) _o_ = (o);					\
190      __typeof__(*(ptr)) _n_ = (n);					\
191      (__typeof__(*(ptr))) __cmpxchg_local((ptr), (unsigned long)_o_,	\
192 					  (unsigned long)_n_,		\
193 					  sizeof(*(ptr)));		\
194   })
195 
196 #define cmpxchg64_local(ptr, o, n)					\
197   ({									\
198 	BUILD_BUG_ON(sizeof(*(ptr)) != 8);				\
199 	cmpxchg_local((ptr), (o), (n));					\
200   })
201 
202 #ifdef CONFIG_SMP
203 #undef __ASM__MB
204 #define __ASM__MB	"\tmb\n"
205 #endif
206 #undef ____xchg
207 #undef ____cmpxchg
208 #define ____xchg(type, args...)		__xchg ##type(args)
209 #define ____cmpxchg(type, args...)	__cmpxchg ##type(args)
210 #include <asm/xchg.h>
211 
212 #define xchg(ptr,x)							\
213   ({									\
214      __typeof__(*(ptr)) _x_ = (x);					\
215      (__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_,		\
216 				 sizeof(*(ptr)));			\
217   })
218 
219 #define cmpxchg(ptr, o, n)						\
220   ({									\
221      __typeof__(*(ptr)) _o_ = (o);					\
222      __typeof__(*(ptr)) _n_ = (n);					\
223      (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_,		\
224 				    (unsigned long)_n_,	sizeof(*(ptr)));\
225   })
226 
227 #define cmpxchg64(ptr, o, n)						\
228   ({									\
229 	BUILD_BUG_ON(sizeof(*(ptr)) != 8);				\
230 	cmpxchg((ptr), (o), (n));					\
231   })
232 
233 #undef __ASM__MB
234 #undef ____cmpxchg
235 
236 #define __HAVE_ARCH_CMPXCHG 1
237 
238 #define atomic64_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
239 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
240 
241 #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
242 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
243 
244 /**
245  * __atomic_add_unless - add unless the number is a given value
246  * @v: pointer of type atomic_t
247  * @a: the amount to add to v...
248  * @u: ...unless v is equal to u.
249  *
250  * Atomically adds @a to @v, so long as it was not @u.
251  * Returns the old value of @v.
252  */
253 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
254 {
255 	int c, old;
256 	c = atomic_read(v);
257 	for (;;) {
258 		if (unlikely(c == (u)))
259 			break;
260 		old = atomic_cmpxchg((v), c, c + (a));
261 		if (likely(old == c))
262 			break;
263 		c = old;
264 	}
265 	return c;
266 }
267 
268 
269 /**
270  * atomic64_add_unless - add unless the number is a given value
271  * @v: pointer of type atomic64_t
272  * @a: the amount to add to v...
273  * @u: ...unless v is equal to u.
274  *
275  * Atomically adds @a to @v, so long as it was not @u.
276  * Returns the old value of @v.
277  */
278 static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
279 {
280 	long c, old;
281 	c = atomic64_read(v);
282 	for (;;) {
283 		if (unlikely(c == (u)))
284 			break;
285 		old = atomic64_cmpxchg((v), c, c + (a));
286 		if (likely(old == c))
287 			break;
288 		c = old;
289 	}
290 	return c != (u);
291 }
292 
293 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
294 
295 #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
296 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
297 
298 #define atomic_dec_return(v) atomic_sub_return(1,(v))
299 #define atomic64_dec_return(v) atomic64_sub_return(1,(v))
300 
301 #define atomic_inc_return(v) atomic_add_return(1,(v))
302 #define atomic64_inc_return(v) atomic64_add_return(1,(v))
303 
304 #define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
305 #define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0)
306 
307 #define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
308 #define atomic64_inc_and_test(v) (atomic64_add_return(1, (v)) == 0)
309 
310 #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
311 #define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0)
312 
313 #define atomic_inc(v) atomic_add(1,(v))
314 #define atomic64_inc(v) atomic64_add(1,(v))
315 
316 #define atomic_dec(v) atomic_sub(1,(v))
317 #define atomic64_dec(v) atomic64_sub(1,(v))
318 
319 #define smp_mb__before_atomic_dec()	smp_mb()
320 #define smp_mb__after_atomic_dec()	smp_mb()
321 #define smp_mb__before_atomic_inc()	smp_mb()
322 #define smp_mb__after_atomic_inc()	smp_mb()
323 
324 #endif /* _ALPHA_ATOMIC_H */
325