xref: /linux/arch/s390/include/asm/atomic.h (revision fb72014d98afd51e85aab9c061344ef32d615606)
1 #ifndef __ARCH_S390_ATOMIC__
2 #define __ARCH_S390_ATOMIC__
3 
4 /*
5  * Copyright 1999,2009 IBM Corp.
6  * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
7  *	      Denis Joseph Barrow,
8  *	      Arnd Bergmann <arndb@de.ibm.com>,
9  *
10  * Atomic operations that C can't guarantee us.
11  * Useful for resource counting etc.
12  * s390 uses 'Compare And Swap' for atomicity in SMP enviroment.
13  *
14  */
15 
16 #include <linux/compiler.h>
17 #include <linux/types.h>
18 
19 #define ATOMIC_INIT(i)  { (i) }
20 
21 #define __CS_LOOP(ptr, op_val, op_string) ({				\
22 	int old_val, new_val;						\
23 	asm volatile(							\
24 		"	l	%0,%2\n"				\
25 		"0:	lr	%1,%0\n"				\
26 		op_string "	%1,%3\n"				\
27 		"	cs	%0,%1,%2\n"				\
28 		"	jl	0b"					\
29 		: "=&d" (old_val), "=&d" (new_val),			\
30 		  "=Q" (((atomic_t *)(ptr))->counter)			\
31 		: "d" (op_val),	 "Q" (((atomic_t *)(ptr))->counter)	\
32 		: "cc", "memory");					\
33 	new_val;							\
34 })
35 
36 static inline int atomic_read(const atomic_t *v)
37 {
38 	barrier();
39 	return v->counter;
40 }
41 
42 static inline void atomic_set(atomic_t *v, int i)
43 {
44 	v->counter = i;
45 	barrier();
46 }
47 
48 static inline int atomic_add_return(int i, atomic_t *v)
49 {
50 	return __CS_LOOP(v, i, "ar");
51 }
52 #define atomic_add(_i, _v)		atomic_add_return(_i, _v)
53 #define atomic_add_negative(_i, _v)	(atomic_add_return(_i, _v) < 0)
54 #define atomic_inc(_v)			atomic_add_return(1, _v)
55 #define atomic_inc_return(_v)		atomic_add_return(1, _v)
56 #define atomic_inc_and_test(_v)		(atomic_add_return(1, _v) == 0)
57 
58 static inline int atomic_sub_return(int i, atomic_t *v)
59 {
60 	return __CS_LOOP(v, i, "sr");
61 }
62 #define atomic_sub(_i, _v)		atomic_sub_return(_i, _v)
63 #define atomic_sub_and_test(_i, _v)	(atomic_sub_return(_i, _v) == 0)
64 #define atomic_dec(_v)			atomic_sub_return(1, _v)
65 #define atomic_dec_return(_v)		atomic_sub_return(1, _v)
66 #define atomic_dec_and_test(_v)		(atomic_sub_return(1, _v) == 0)
67 
68 static inline void atomic_clear_mask(unsigned long mask, atomic_t *v)
69 {
70 	__CS_LOOP(v, ~mask, "nr");
71 }
72 
73 static inline void atomic_set_mask(unsigned long mask, atomic_t *v)
74 {
75 	__CS_LOOP(v, mask, "or");
76 }
77 
78 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
79 
80 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
81 {
82 	asm volatile(
83 		"	cs	%0,%2,%1"
84 		: "+d" (old), "=Q" (v->counter)
85 		: "d" (new), "Q" (v->counter)
86 		: "cc", "memory");
87 	return old;
88 }
89 
90 static inline int atomic_add_unless(atomic_t *v, int a, int u)
91 {
92 	int c, old;
93 	c = atomic_read(v);
94 	for (;;) {
95 		if (unlikely(c == u))
96 			break;
97 		old = atomic_cmpxchg(v, c, c + a);
98 		if (likely(old == c))
99 			break;
100 		c = old;
101 	}
102 	return c != u;
103 }
104 
105 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
106 
107 #undef __CS_LOOP
108 
109 #define ATOMIC64_INIT(i)  { (i) }
110 
111 #ifdef CONFIG_64BIT
112 
113 #define __CSG_LOOP(ptr, op_val, op_string) ({				\
114 	long long old_val, new_val;					\
115 	asm volatile(							\
116 		"	lg	%0,%2\n"				\
117 		"0:	lgr	%1,%0\n"				\
118 		op_string "	%1,%3\n"				\
119 		"	csg	%0,%1,%2\n"				\
120 		"	jl	0b"					\
121 		: "=&d" (old_val), "=&d" (new_val),			\
122 		  "=Q" (((atomic_t *)(ptr))->counter)			\
123 		: "d" (op_val),	"Q" (((atomic_t *)(ptr))->counter)	\
124 		: "cc", "memory");					\
125 	new_val;							\
126 })
127 
128 static inline long long atomic64_read(const atomic64_t *v)
129 {
130 	barrier();
131 	return v->counter;
132 }
133 
134 static inline void atomic64_set(atomic64_t *v, long long i)
135 {
136 	v->counter = i;
137 	barrier();
138 }
139 
140 static inline long long atomic64_add_return(long long i, atomic64_t *v)
141 {
142 	return __CSG_LOOP(v, i, "agr");
143 }
144 
145 static inline long long atomic64_sub_return(long long i, atomic64_t *v)
146 {
147 	return __CSG_LOOP(v, i, "sgr");
148 }
149 
150 static inline void atomic64_clear_mask(unsigned long mask, atomic64_t *v)
151 {
152 	__CSG_LOOP(v, ~mask, "ngr");
153 }
154 
155 static inline void atomic64_set_mask(unsigned long mask, atomic64_t *v)
156 {
157 	__CSG_LOOP(v, mask, "ogr");
158 }
159 
160 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
161 
162 static inline long long atomic64_cmpxchg(atomic64_t *v,
163 					     long long old, long long new)
164 {
165 	asm volatile(
166 		"	csg	%0,%2,%1"
167 		: "+d" (old), "=Q" (v->counter)
168 		: "d" (new), "Q" (v->counter)
169 		: "cc", "memory");
170 	return old;
171 }
172 
173 #undef __CSG_LOOP
174 
175 #else /* CONFIG_64BIT */
176 
177 typedef struct {
178 	long long counter;
179 } atomic64_t;
180 
181 static inline long long atomic64_read(const atomic64_t *v)
182 {
183 	register_pair rp;
184 
185 	asm volatile(
186 		"	lm	%0,%N0,%1"
187 		: "=&d" (rp) : "Q" (v->counter)	);
188 	return rp.pair;
189 }
190 
191 static inline void atomic64_set(atomic64_t *v, long long i)
192 {
193 	register_pair rp = {.pair = i};
194 
195 	asm volatile(
196 		"	stm	%1,%N1,%0"
197 		: "=Q" (v->counter) : "d" (rp) );
198 }
199 
200 static inline long long atomic64_xchg(atomic64_t *v, long long new)
201 {
202 	register_pair rp_new = {.pair = new};
203 	register_pair rp_old;
204 
205 	asm volatile(
206 		"	lm	%0,%N0,%1\n"
207 		"0:	cds	%0,%2,%1\n"
208 		"	jl	0b\n"
209 		: "=&d" (rp_old), "=Q" (v->counter)
210 		: "d" (rp_new), "Q" (v->counter)
211 		: "cc");
212 	return rp_old.pair;
213 }
214 
215 static inline long long atomic64_cmpxchg(atomic64_t *v,
216 					 long long old, long long new)
217 {
218 	register_pair rp_old = {.pair = old};
219 	register_pair rp_new = {.pair = new};
220 
221 	asm volatile(
222 		"	cds	%0,%2,%1"
223 		: "+&d" (rp_old), "=Q" (v->counter)
224 		: "d" (rp_new), "Q" (v->counter)
225 		: "cc");
226 	return rp_old.pair;
227 }
228 
229 
230 static inline long long atomic64_add_return(long long i, atomic64_t *v)
231 {
232 	long long old, new;
233 
234 	do {
235 		old = atomic64_read(v);
236 		new = old + i;
237 	} while (atomic64_cmpxchg(v, old, new) != old);
238 	return new;
239 }
240 
241 static inline long long atomic64_sub_return(long long i, atomic64_t *v)
242 {
243 	long long old, new;
244 
245 	do {
246 		old = atomic64_read(v);
247 		new = old - i;
248 	} while (atomic64_cmpxchg(v, old, new) != old);
249 	return new;
250 }
251 
252 static inline void atomic64_set_mask(unsigned long long mask, atomic64_t *v)
253 {
254 	long long old, new;
255 
256 	do {
257 		old = atomic64_read(v);
258 		new = old | mask;
259 	} while (atomic64_cmpxchg(v, old, new) != old);
260 }
261 
262 static inline void atomic64_clear_mask(unsigned long long mask, atomic64_t *v)
263 {
264 	long long old, new;
265 
266 	do {
267 		old = atomic64_read(v);
268 		new = old & mask;
269 	} while (atomic64_cmpxchg(v, old, new) != old);
270 }
271 
272 #endif /* CONFIG_64BIT */
273 
274 static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
275 {
276 	long long c, old;
277 	c = atomic64_read(v);
278 	for (;;) {
279 		if (unlikely(c == u))
280 			break;
281 		old = atomic64_cmpxchg(v, c, c + a);
282 		if (likely(old == c))
283 			break;
284 		c = old;
285 	}
286 	return c != u;
287 }
288 
289 #define atomic64_add(_i, _v)		atomic64_add_return(_i, _v)
290 #define atomic64_add_negative(_i, _v)	(atomic64_add_return(_i, _v) < 0)
291 #define atomic64_inc(_v)		atomic64_add_return(1, _v)
292 #define atomic64_inc_return(_v)		atomic64_add_return(1, _v)
293 #define atomic64_inc_and_test(_v)	(atomic64_add_return(1, _v) == 0)
294 #define atomic64_sub(_i, _v)		atomic64_sub_return(_i, _v)
295 #define atomic64_sub_and_test(_i, _v)	(atomic64_sub_return(_i, _v) == 0)
296 #define atomic64_dec(_v)		atomic64_sub_return(1, _v)
297 #define atomic64_dec_return(_v)		atomic64_sub_return(1, _v)
298 #define atomic64_dec_and_test(_v)	(atomic64_sub_return(1, _v) == 0)
299 #define atomic64_inc_not_zero(v)	atomic64_add_unless((v), 1, 0)
300 
301 #define smp_mb__before_atomic_dec()	smp_mb()
302 #define smp_mb__after_atomic_dec()	smp_mb()
303 #define smp_mb__before_atomic_inc()	smp_mb()
304 #define smp_mb__after_atomic_inc()	smp_mb()
305 
306 #include <asm-generic/atomic-long.h>
307 
308 #endif /* __ARCH_S390_ATOMIC__  */
309