xref: /linux/arch/arm/include/asm/atomic.h (revision 5bdef865eb358b6f3760e25e591ae115e9eeddef)
1 /*
2  *  arch/arm/include/asm/atomic.h
3  *
4  *  Copyright (C) 1996 Russell King.
5  *  Copyright (C) 2002 Deep Blue Solutions Ltd.
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11 #ifndef __ASM_ARM_ATOMIC_H
12 #define __ASM_ARM_ATOMIC_H
13 
14 #include <linux/compiler.h>
15 #include <linux/types.h>
16 #include <asm/system.h>
17 
18 #define ATOMIC_INIT(i)	{ (i) }
19 
20 #ifdef __KERNEL__
21 
22 #define atomic_read(v)	((v)->counter)
23 
24 #if __LINUX_ARM_ARCH__ >= 6
25 
26 /*
27  * ARMv6 UP and SMP safe atomic ops.  We use load exclusive and
28  * store exclusive to ensure that these are atomic.  We may loop
29  * to ensure that the update happens.  Writing to 'v->counter'
30  * without using the following operations WILL break the atomic
31  * nature of these ops.
32  */
33 static inline void atomic_set(atomic_t *v, int i)
34 {
35 	unsigned long tmp;
36 
37 	__asm__ __volatile__("@ atomic_set\n"
38 "1:	ldrex	%0, [%1]\n"
39 "	strex	%0, %2, [%1]\n"
40 "	teq	%0, #0\n"
41 "	bne	1b"
42 	: "=&r" (tmp)
43 	: "r" (&v->counter), "r" (i)
44 	: "cc");
45 }
46 
47 static inline void atomic_add(int i, atomic_t *v)
48 {
49 	unsigned long tmp;
50 	int result;
51 
52 	__asm__ __volatile__("@ atomic_add\n"
53 "1:	ldrex	%0, [%2]\n"
54 "	add	%0, %0, %3\n"
55 "	strex	%1, %0, [%2]\n"
56 "	teq	%1, #0\n"
57 "	bne	1b"
58 	: "=&r" (result), "=&r" (tmp)
59 	: "r" (&v->counter), "Ir" (i)
60 	: "cc");
61 }
62 
63 static inline int atomic_add_return(int i, atomic_t *v)
64 {
65 	unsigned long tmp;
66 	int result;
67 
68 	smp_mb();
69 
70 	__asm__ __volatile__("@ atomic_add_return\n"
71 "1:	ldrex	%0, [%2]\n"
72 "	add	%0, %0, %3\n"
73 "	strex	%1, %0, [%2]\n"
74 "	teq	%1, #0\n"
75 "	bne	1b"
76 	: "=&r" (result), "=&r" (tmp)
77 	: "r" (&v->counter), "Ir" (i)
78 	: "cc");
79 
80 	smp_mb();
81 
82 	return result;
83 }
84 
85 static inline void atomic_sub(int i, atomic_t *v)
86 {
87 	unsigned long tmp;
88 	int result;
89 
90 	__asm__ __volatile__("@ atomic_sub\n"
91 "1:	ldrex	%0, [%2]\n"
92 "	sub	%0, %0, %3\n"
93 "	strex	%1, %0, [%2]\n"
94 "	teq	%1, #0\n"
95 "	bne	1b"
96 	: "=&r" (result), "=&r" (tmp)
97 	: "r" (&v->counter), "Ir" (i)
98 	: "cc");
99 }
100 
101 static inline int atomic_sub_return(int i, atomic_t *v)
102 {
103 	unsigned long tmp;
104 	int result;
105 
106 	smp_mb();
107 
108 	__asm__ __volatile__("@ atomic_sub_return\n"
109 "1:	ldrex	%0, [%2]\n"
110 "	sub	%0, %0, %3\n"
111 "	strex	%1, %0, [%2]\n"
112 "	teq	%1, #0\n"
113 "	bne	1b"
114 	: "=&r" (result), "=&r" (tmp)
115 	: "r" (&v->counter), "Ir" (i)
116 	: "cc");
117 
118 	smp_mb();
119 
120 	return result;
121 }
122 
123 static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
124 {
125 	unsigned long oldval, res;
126 
127 	smp_mb();
128 
129 	do {
130 		__asm__ __volatile__("@ atomic_cmpxchg\n"
131 		"ldrex	%1, [%2]\n"
132 		"mov	%0, #0\n"
133 		"teq	%1, %3\n"
134 		"strexeq %0, %4, [%2]\n"
135 		    : "=&r" (res), "=&r" (oldval)
136 		    : "r" (&ptr->counter), "Ir" (old), "r" (new)
137 		    : "cc");
138 	} while (res);
139 
140 	smp_mb();
141 
142 	return oldval;
143 }
144 
145 static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
146 {
147 	unsigned long tmp, tmp2;
148 
149 	__asm__ __volatile__("@ atomic_clear_mask\n"
150 "1:	ldrex	%0, [%2]\n"
151 "	bic	%0, %0, %3\n"
152 "	strex	%1, %0, [%2]\n"
153 "	teq	%1, #0\n"
154 "	bne	1b"
155 	: "=&r" (tmp), "=&r" (tmp2)
156 	: "r" (addr), "Ir" (mask)
157 	: "cc");
158 }
159 
160 #else /* ARM_ARCH_6 */
161 
162 #include <asm/system.h>
163 
164 #ifdef CONFIG_SMP
165 #error SMP not supported on pre-ARMv6 CPUs
166 #endif
167 
168 #define atomic_set(v,i)	(((v)->counter) = (i))
169 
170 static inline int atomic_add_return(int i, atomic_t *v)
171 {
172 	unsigned long flags;
173 	int val;
174 
175 	raw_local_irq_save(flags);
176 	val = v->counter;
177 	v->counter = val += i;
178 	raw_local_irq_restore(flags);
179 
180 	return val;
181 }
182 #define atomic_add(i, v)	(void) atomic_add_return(i, v)
183 
184 static inline int atomic_sub_return(int i, atomic_t *v)
185 {
186 	unsigned long flags;
187 	int val;
188 
189 	raw_local_irq_save(flags);
190 	val = v->counter;
191 	v->counter = val -= i;
192 	raw_local_irq_restore(flags);
193 
194 	return val;
195 }
196 #define atomic_sub(i, v)	(void) atomic_sub_return(i, v)
197 
198 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
199 {
200 	int ret;
201 	unsigned long flags;
202 
203 	raw_local_irq_save(flags);
204 	ret = v->counter;
205 	if (likely(ret == old))
206 		v->counter = new;
207 	raw_local_irq_restore(flags);
208 
209 	return ret;
210 }
211 
212 static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
213 {
214 	unsigned long flags;
215 
216 	raw_local_irq_save(flags);
217 	*addr &= ~mask;
218 	raw_local_irq_restore(flags);
219 }
220 
221 #endif /* __LINUX_ARM_ARCH__ */
222 
223 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
224 
225 static inline int atomic_add_unless(atomic_t *v, int a, int u)
226 {
227 	int c, old;
228 
229 	c = atomic_read(v);
230 	while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c)
231 		c = old;
232 	return c != u;
233 }
234 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
235 
236 #define atomic_inc(v)		atomic_add(1, v)
237 #define atomic_dec(v)		atomic_sub(1, v)
238 
239 #define atomic_inc_and_test(v)	(atomic_add_return(1, v) == 0)
240 #define atomic_dec_and_test(v)	(atomic_sub_return(1, v) == 0)
241 #define atomic_inc_return(v)    (atomic_add_return(1, v))
242 #define atomic_dec_return(v)    (atomic_sub_return(1, v))
243 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
244 
245 #define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0)
246 
247 #define smp_mb__before_atomic_dec()	smp_mb()
248 #define smp_mb__after_atomic_dec()	smp_mb()
249 #define smp_mb__before_atomic_inc()	smp_mb()
250 #define smp_mb__after_atomic_inc()	smp_mb()
251 
252 #include <asm-generic/atomic-long.h>
253 #endif
254 #endif
255