xref: /linux/arch/arm64/include/asm/atomic.h (revision 0d456bad36d42d16022be045c8a53ddbb59ee478)
1 /*
2  * Based on arch/arm/include/asm/atomic.h
3  *
4  * Copyright (C) 1996 Russell King.
5  * Copyright (C) 2002 Deep Blue Solutions Ltd.
6  * Copyright (C) 2012 ARM Ltd.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 as
10  * published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
19  */
20 #ifndef __ASM_ATOMIC_H
21 #define __ASM_ATOMIC_H
22 
23 #include <linux/compiler.h>
24 #include <linux/types.h>
25 
26 #include <asm/barrier.h>
27 #include <asm/cmpxchg.h>
28 
29 #define ATOMIC_INIT(i)	{ (i) }
30 
31 #ifdef __KERNEL__
32 
33 /*
34  * On ARM, ordinary assignment (str instruction) doesn't clear the local
35  * strex/ldrex monitor on some implementations. The reason we can use it for
36  * atomic_set() is the clrex or dummy strex done on every exception return.
37  */
38 #define atomic_read(v)	(*(volatile int *)&(v)->counter)
39 #define atomic_set(v,i)	(((v)->counter) = (i))
40 
41 /*
42  * AArch64 UP and SMP safe atomic ops.  We use load exclusive and
43  * store exclusive to ensure that these are atomic.  We may loop
44  * to ensure that the update happens.
45  */
46 static inline void atomic_add(int i, atomic_t *v)
47 {
48 	unsigned long tmp;
49 	int result;
50 
51 	asm volatile("// atomic_add\n"
52 "1:	ldxr	%w0, [%3]\n"
53 "	add	%w0, %w0, %w4\n"
54 "	stxr	%w1, %w0, [%3]\n"
55 "	cbnz	%w1, 1b"
56 	: "=&r" (result), "=&r" (tmp), "+o" (v->counter)
57 	: "r" (&v->counter), "Ir" (i)
58 	: "cc");
59 }
60 
61 static inline int atomic_add_return(int i, atomic_t *v)
62 {
63 	unsigned long tmp;
64 	int result;
65 
66 	asm volatile("// atomic_add_return\n"
67 "1:	ldaxr	%w0, [%3]\n"
68 "	add	%w0, %w0, %w4\n"
69 "	stlxr	%w1, %w0, [%3]\n"
70 "	cbnz	%w1, 1b"
71 	: "=&r" (result), "=&r" (tmp), "+o" (v->counter)
72 	: "r" (&v->counter), "Ir" (i)
73 	: "cc");
74 
75 	return result;
76 }
77 
78 static inline void atomic_sub(int i, atomic_t *v)
79 {
80 	unsigned long tmp;
81 	int result;
82 
83 	asm volatile("// atomic_sub\n"
84 "1:	ldxr	%w0, [%3]\n"
85 "	sub	%w0, %w0, %w4\n"
86 "	stxr	%w1, %w0, [%3]\n"
87 "	cbnz	%w1, 1b"
88 	: "=&r" (result), "=&r" (tmp), "+o" (v->counter)
89 	: "r" (&v->counter), "Ir" (i)
90 	: "cc");
91 }
92 
93 static inline int atomic_sub_return(int i, atomic_t *v)
94 {
95 	unsigned long tmp;
96 	int result;
97 
98 	asm volatile("// atomic_sub_return\n"
99 "1:	ldaxr	%w0, [%3]\n"
100 "	sub	%w0, %w0, %w4\n"
101 "	stlxr	%w1, %w0, [%3]\n"
102 "	cbnz	%w1, 1b"
103 	: "=&r" (result), "=&r" (tmp), "+o" (v->counter)
104 	: "r" (&v->counter), "Ir" (i)
105 	: "cc");
106 
107 	return result;
108 }
109 
110 static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
111 {
112 	unsigned long tmp;
113 	int oldval;
114 
115 	asm volatile("// atomic_cmpxchg\n"
116 "1:	ldaxr	%w1, [%3]\n"
117 "	cmp	%w1, %w4\n"
118 "	b.ne	2f\n"
119 "	stlxr	%w0, %w5, [%3]\n"
120 "	cbnz	%w0, 1b\n"
121 "2:"
122 	: "=&r" (tmp), "=&r" (oldval), "+o" (ptr->counter)
123 	: "r" (&ptr->counter), "Ir" (old), "r" (new)
124 	: "cc");
125 
126 	return oldval;
127 }
128 
129 static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
130 {
131 	unsigned long tmp, tmp2;
132 
133 	asm volatile("// atomic_clear_mask\n"
134 "1:	ldxr	%0, [%3]\n"
135 "	bic	%0, %0, %4\n"
136 "	stxr	%w1, %0, [%3]\n"
137 "	cbnz	%w1, 1b"
138 	: "=&r" (tmp), "=&r" (tmp2), "+o" (*addr)
139 	: "r" (addr), "Ir" (mask)
140 	: "cc");
141 }
142 
143 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
144 
145 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
146 {
147 	int c, old;
148 
149 	c = atomic_read(v);
150 	while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c)
151 		c = old;
152 	return c;
153 }
154 
155 #define atomic_inc(v)		atomic_add(1, v)
156 #define atomic_dec(v)		atomic_sub(1, v)
157 
158 #define atomic_inc_and_test(v)	(atomic_add_return(1, v) == 0)
159 #define atomic_dec_and_test(v)	(atomic_sub_return(1, v) == 0)
160 #define atomic_inc_return(v)    (atomic_add_return(1, v))
161 #define atomic_dec_return(v)    (atomic_sub_return(1, v))
162 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
163 
164 #define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0)
165 
166 #define smp_mb__before_atomic_dec()	smp_mb()
167 #define smp_mb__after_atomic_dec()	smp_mb()
168 #define smp_mb__before_atomic_inc()	smp_mb()
169 #define smp_mb__after_atomic_inc()	smp_mb()
170 
171 /*
172  * 64-bit atomic operations.
173  */
174 #define ATOMIC64_INIT(i) { (i) }
175 
176 #define atomic64_read(v)	(*(volatile long long *)&(v)->counter)
177 #define atomic64_set(v,i)	(((v)->counter) = (i))
178 
179 static inline void atomic64_add(u64 i, atomic64_t *v)
180 {
181 	long result;
182 	unsigned long tmp;
183 
184 	asm volatile("// atomic64_add\n"
185 "1:	ldxr	%0, [%3]\n"
186 "	add	%0, %0, %4\n"
187 "	stxr	%w1, %0, [%3]\n"
188 "	cbnz	%w1, 1b"
189 	: "=&r" (result), "=&r" (tmp), "+o" (v->counter)
190 	: "r" (&v->counter), "Ir" (i)
191 	: "cc");
192 }
193 
194 static inline long atomic64_add_return(long i, atomic64_t *v)
195 {
196 	long result;
197 	unsigned long tmp;
198 
199 	asm volatile("// atomic64_add_return\n"
200 "1:	ldaxr	%0, [%3]\n"
201 "	add	%0, %0, %4\n"
202 "	stlxr	%w1, %0, [%3]\n"
203 "	cbnz	%w1, 1b"
204 	: "=&r" (result), "=&r" (tmp), "+o" (v->counter)
205 	: "r" (&v->counter), "Ir" (i)
206 	: "cc");
207 
208 	return result;
209 }
210 
211 static inline void atomic64_sub(u64 i, atomic64_t *v)
212 {
213 	long result;
214 	unsigned long tmp;
215 
216 	asm volatile("// atomic64_sub\n"
217 "1:	ldxr	%0, [%3]\n"
218 "	sub	%0, %0, %4\n"
219 "	stxr	%w1, %0, [%3]\n"
220 "	cbnz	%w1, 1b"
221 	: "=&r" (result), "=&r" (tmp), "+o" (v->counter)
222 	: "r" (&v->counter), "Ir" (i)
223 	: "cc");
224 }
225 
226 static inline long atomic64_sub_return(long i, atomic64_t *v)
227 {
228 	long result;
229 	unsigned long tmp;
230 
231 	asm volatile("// atomic64_sub_return\n"
232 "1:	ldaxr	%0, [%3]\n"
233 "	sub	%0, %0, %4\n"
234 "	stlxr	%w1, %0, [%3]\n"
235 "	cbnz	%w1, 1b"
236 	: "=&r" (result), "=&r" (tmp), "+o" (v->counter)
237 	: "r" (&v->counter), "Ir" (i)
238 	: "cc");
239 
240 	return result;
241 }
242 
243 static inline long atomic64_cmpxchg(atomic64_t *ptr, long old, long new)
244 {
245 	long oldval;
246 	unsigned long res;
247 
248 	asm volatile("// atomic64_cmpxchg\n"
249 "1:	ldaxr	%1, [%3]\n"
250 "	cmp	%1, %4\n"
251 "	b.ne	2f\n"
252 "	stlxr	%w0, %5, [%3]\n"
253 "	cbnz	%w0, 1b\n"
254 "2:"
255 	: "=&r" (res), "=&r" (oldval), "+o" (ptr->counter)
256 	: "r" (&ptr->counter), "Ir" (old), "r" (new)
257 	: "cc");
258 
259 	return oldval;
260 }
261 
262 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
263 
264 static inline long atomic64_dec_if_positive(atomic64_t *v)
265 {
266 	long result;
267 	unsigned long tmp;
268 
269 	asm volatile("// atomic64_dec_if_positive\n"
270 "1:	ldaxr	%0, [%3]\n"
271 "	subs	%0, %0, #1\n"
272 "	b.mi	2f\n"
273 "	stlxr	%w1, %0, [%3]\n"
274 "	cbnz	%w1, 1b\n"
275 "2:"
276 	: "=&r" (result), "=&r" (tmp), "+o" (v->counter)
277 	: "r" (&v->counter)
278 	: "cc");
279 
280 	return result;
281 }
282 
283 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
284 {
285 	long c, old;
286 
287 	c = atomic64_read(v);
288 	while (c != u && (old = atomic64_cmpxchg((v), c, c + a)) != c)
289 		c = old;
290 
291 	return c != u;
292 }
293 
294 #define atomic64_add_negative(a, v)	(atomic64_add_return((a), (v)) < 0)
295 #define atomic64_inc(v)			atomic64_add(1LL, (v))
296 #define atomic64_inc_return(v)		atomic64_add_return(1LL, (v))
297 #define atomic64_inc_and_test(v)	(atomic64_inc_return(v) == 0)
298 #define atomic64_sub_and_test(a, v)	(atomic64_sub_return((a), (v)) == 0)
299 #define atomic64_dec(v)			atomic64_sub(1LL, (v))
300 #define atomic64_dec_return(v)		atomic64_sub_return(1LL, (v))
301 #define atomic64_dec_and_test(v)	(atomic64_dec_return((v)) == 0)
302 #define atomic64_inc_not_zero(v)	atomic64_add_unless((v), 1LL, 0LL)
303 
304 #endif
305 #endif
306