xref: /linux/arch/s390/include/asm/atomic.h (revision 367b8112fe2ea5c39a7bb4d263dcdd9b612fae18)
1 #ifndef __ARCH_S390_ATOMIC__
2 #define __ARCH_S390_ATOMIC__
3 
4 #include <linux/compiler.h>
5 
6 /*
7  *  include/asm-s390/atomic.h
8  *
9  *  S390 version
10  *    Copyright (C) 1999-2005 IBM Deutschland Entwicklung GmbH, IBM Corporation
11  *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
12  *               Denis Joseph Barrow,
13  *		 Arnd Bergmann (arndb@de.ibm.com)
14  *
15  *  Derived from "include/asm-i386/bitops.h"
16  *    Copyright (C) 1992, Linus Torvalds
17  *
18  */
19 
20 /*
21  * Atomic operations that C can't guarantee us.  Useful for
22  * resource counting etc..
23  * S390 uses 'Compare And Swap' for atomicity in SMP enviroment
24  */
25 
26 typedef struct {
27 	int counter;
28 } __attribute__ ((aligned (4))) atomic_t;
29 #define ATOMIC_INIT(i)  { (i) }
30 
31 #ifdef __KERNEL__
32 
33 #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
34 
35 #define __CS_LOOP(ptr, op_val, op_string) ({				\
36 	typeof(ptr->counter) old_val, new_val;				\
37 	asm volatile(							\
38 		"	l	%0,%2\n"				\
39 		"0:	lr	%1,%0\n"				\
40 		op_string "	%1,%3\n"				\
41 		"	cs	%0,%1,%2\n"				\
42 		"	jl	0b"					\
43 		: "=&d" (old_val), "=&d" (new_val),			\
44 		  "=Q" (((atomic_t *)(ptr))->counter)			\
45 		: "d" (op_val),	 "Q" (((atomic_t *)(ptr))->counter)	\
46 		: "cc", "memory");					\
47 	new_val;							\
48 })
49 
50 #else /* __GNUC__ */
51 
52 #define __CS_LOOP(ptr, op_val, op_string) ({				\
53 	typeof(ptr->counter) old_val, new_val;				\
54 	asm volatile(							\
55 		"	l	%0,0(%3)\n"				\
56 		"0:	lr	%1,%0\n"				\
57 		op_string "	%1,%4\n"				\
58 		"	cs	%0,%1,0(%3)\n"				\
59 		"	jl	0b"					\
60 		: "=&d" (old_val), "=&d" (new_val),			\
61 		  "=m" (((atomic_t *)(ptr))->counter)			\
62 		: "a" (ptr), "d" (op_val),				\
63 		  "m" (((atomic_t *)(ptr))->counter)			\
64 		: "cc", "memory");					\
65 	new_val;							\
66 })
67 
68 #endif /* __GNUC__ */
69 
70 static inline int atomic_read(const atomic_t *v)
71 {
72 	barrier();
73 	return v->counter;
74 }
75 
76 static inline void atomic_set(atomic_t *v, int i)
77 {
78 	v->counter = i;
79 	barrier();
80 }
81 
82 static __inline__ int atomic_add_return(int i, atomic_t * v)
83 {
84 	return __CS_LOOP(v, i, "ar");
85 }
86 #define atomic_add(_i, _v)		atomic_add_return(_i, _v)
87 #define atomic_add_negative(_i, _v)	(atomic_add_return(_i, _v) < 0)
88 #define atomic_inc(_v)			atomic_add_return(1, _v)
89 #define atomic_inc_return(_v)		atomic_add_return(1, _v)
90 #define atomic_inc_and_test(_v)		(atomic_add_return(1, _v) == 0)
91 
92 static __inline__ int atomic_sub_return(int i, atomic_t * v)
93 {
94 	return __CS_LOOP(v, i, "sr");
95 }
96 #define atomic_sub(_i, _v)		atomic_sub_return(_i, _v)
97 #define atomic_sub_and_test(_i, _v)	(atomic_sub_return(_i, _v) == 0)
98 #define atomic_dec(_v)			atomic_sub_return(1, _v)
99 #define atomic_dec_return(_v)		atomic_sub_return(1, _v)
100 #define atomic_dec_and_test(_v)		(atomic_sub_return(1, _v) == 0)
101 
102 static __inline__ void atomic_clear_mask(unsigned long mask, atomic_t * v)
103 {
104 	       __CS_LOOP(v, ~mask, "nr");
105 }
106 
107 static __inline__ void atomic_set_mask(unsigned long mask, atomic_t * v)
108 {
109 	       __CS_LOOP(v, mask, "or");
110 }
111 
112 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
113 
114 static __inline__ int atomic_cmpxchg(atomic_t *v, int old, int new)
115 {
116 #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
117 	asm volatile(
118 		"	cs	%0,%2,%1"
119 		: "+d" (old), "=Q" (v->counter)
120 		: "d" (new), "Q" (v->counter)
121 		: "cc", "memory");
122 #else /* __GNUC__ */
123 	asm volatile(
124 		"	cs	%0,%3,0(%2)"
125 		: "+d" (old), "=m" (v->counter)
126 		: "a" (v), "d" (new), "m" (v->counter)
127 		: "cc", "memory");
128 #endif /* __GNUC__ */
129 	return old;
130 }
131 
132 static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
133 {
134 	int c, old;
135 	c = atomic_read(v);
136 	for (;;) {
137 		if (unlikely(c == u))
138 			break;
139 		old = atomic_cmpxchg(v, c, c + a);
140 		if (likely(old == c))
141 			break;
142 		c = old;
143 	}
144 	return c != u;
145 }
146 
147 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
148 
149 #undef __CS_LOOP
150 
151 #ifdef __s390x__
152 typedef struct {
153 	long long counter;
154 } __attribute__ ((aligned (8))) atomic64_t;
155 #define ATOMIC64_INIT(i)  { (i) }
156 
157 #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
158 
159 #define __CSG_LOOP(ptr, op_val, op_string) ({				\
160 	typeof(ptr->counter) old_val, new_val;				\
161 	asm volatile(							\
162 		"	lg	%0,%2\n"				\
163 		"0:	lgr	%1,%0\n"				\
164 		op_string "	%1,%3\n"				\
165 		"	csg	%0,%1,%2\n"				\
166 		"	jl	0b"					\
167 		: "=&d" (old_val), "=&d" (new_val),			\
168 		  "=Q" (((atomic_t *)(ptr))->counter)			\
169 		: "d" (op_val),	"Q" (((atomic_t *)(ptr))->counter)	\
170 		: "cc", "memory" );					\
171 	new_val;							\
172 })
173 
174 #else /* __GNUC__ */
175 
176 #define __CSG_LOOP(ptr, op_val, op_string) ({				\
177 	typeof(ptr->counter) old_val, new_val;				\
178 	asm volatile(							\
179 		"	lg	%0,0(%3)\n"				\
180 		"0:	lgr	%1,%0\n"				\
181 		op_string "	%1,%4\n"				\
182 		"	csg	%0,%1,0(%3)\n"				\
183 		"	jl	0b"					\
184 		: "=&d" (old_val), "=&d" (new_val),			\
185 		  "=m" (((atomic_t *)(ptr))->counter)			\
186 		: "a" (ptr), "d" (op_val),				\
187 		  "m" (((atomic_t *)(ptr))->counter)			\
188 		: "cc", "memory" );					\
189 	new_val;							\
190 })
191 
192 #endif /* __GNUC__ */
193 
194 static inline long long atomic64_read(const atomic64_t *v)
195 {
196 	barrier();
197 	return v->counter;
198 }
199 
200 static inline void atomic64_set(atomic64_t *v, long long i)
201 {
202 	v->counter = i;
203 	barrier();
204 }
205 
206 static __inline__ long long atomic64_add_return(long long i, atomic64_t * v)
207 {
208 	return __CSG_LOOP(v, i, "agr");
209 }
210 #define atomic64_add(_i, _v)		atomic64_add_return(_i, _v)
211 #define atomic64_add_negative(_i, _v)	(atomic64_add_return(_i, _v) < 0)
212 #define atomic64_inc(_v)		atomic64_add_return(1, _v)
213 #define atomic64_inc_return(_v)		atomic64_add_return(1, _v)
214 #define atomic64_inc_and_test(_v)	(atomic64_add_return(1, _v) == 0)
215 
216 static __inline__ long long atomic64_sub_return(long long i, atomic64_t * v)
217 {
218 	return __CSG_LOOP(v, i, "sgr");
219 }
220 #define atomic64_sub(_i, _v)		atomic64_sub_return(_i, _v)
221 #define atomic64_sub_and_test(_i, _v)	(atomic64_sub_return(_i, _v) == 0)
222 #define atomic64_dec(_v)		atomic64_sub_return(1, _v)
223 #define atomic64_dec_return(_v)		atomic64_sub_return(1, _v)
224 #define atomic64_dec_and_test(_v)	(atomic64_sub_return(1, _v) == 0)
225 
226 static __inline__ void atomic64_clear_mask(unsigned long mask, atomic64_t * v)
227 {
228 	       __CSG_LOOP(v, ~mask, "ngr");
229 }
230 
231 static __inline__ void atomic64_set_mask(unsigned long mask, atomic64_t * v)
232 {
233 	       __CSG_LOOP(v, mask, "ogr");
234 }
235 
236 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
237 
238 static __inline__ long long atomic64_cmpxchg(atomic64_t *v,
239 					     long long old, long long new)
240 {
241 #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
242 	asm volatile(
243 		"	csg	%0,%2,%1"
244 		: "+d" (old), "=Q" (v->counter)
245 		: "d" (new), "Q" (v->counter)
246 		: "cc", "memory");
247 #else /* __GNUC__ */
248 	asm volatile(
249 		"	csg	%0,%3,0(%2)"
250 		: "+d" (old), "=m" (v->counter)
251 		: "a" (v), "d" (new), "m" (v->counter)
252 		: "cc", "memory");
253 #endif /* __GNUC__ */
254 	return old;
255 }
256 
257 static __inline__ int atomic64_add_unless(atomic64_t *v,
258 					  long long a, long long u)
259 {
260 	long long c, old;
261 	c = atomic64_read(v);
262 	for (;;) {
263 		if (unlikely(c == u))
264 			break;
265 		old = atomic64_cmpxchg(v, c, c + a);
266 		if (likely(old == c))
267 			break;
268 		c = old;
269 	}
270 	return c != u;
271 }
272 
273 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
274 
275 #undef __CSG_LOOP
276 #endif
277 
278 #define smp_mb__before_atomic_dec()	smp_mb()
279 #define smp_mb__after_atomic_dec()	smp_mb()
280 #define smp_mb__before_atomic_inc()	smp_mb()
281 #define smp_mb__after_atomic_inc()	smp_mb()
282 
283 #include <asm-generic/atomic.h>
284 #endif /* __KERNEL__ */
285 #endif /* __ARCH_S390_ATOMIC__  */
286