xref: /linux/arch/x86/include/asm/atomic.h (revision c98be0c96db00e9b6b02d31e0fa7590c54cdaaac)
1 #ifndef _ASM_X86_ATOMIC_H
2 #define _ASM_X86_ATOMIC_H
3 
4 #include <linux/compiler.h>
5 #include <linux/types.h>
6 #include <asm/processor.h>
7 #include <asm/alternative.h>
8 #include <asm/cmpxchg.h>
9 #include <asm/rmwcc.h>
10 
11 /*
12  * Atomic operations that C can't guarantee us.  Useful for
13  * resource counting etc..
14  */
15 
16 #define ATOMIC_INIT(i)	{ (i) }
17 
18 /**
19  * atomic_read - read atomic variable
20  * @v: pointer of type atomic_t
21  *
22  * Atomically reads the value of @v.
23  */
24 static inline int atomic_read(const atomic_t *v)
25 {
26 	return (*(volatile int *)&(v)->counter);
27 }
28 
29 /**
30  * atomic_set - set atomic variable
31  * @v: pointer of type atomic_t
32  * @i: required value
33  *
34  * Atomically sets the value of @v to @i.
35  */
36 static inline void atomic_set(atomic_t *v, int i)
37 {
38 	v->counter = i;
39 }
40 
41 /**
42  * atomic_add - add integer to atomic variable
43  * @i: integer value to add
44  * @v: pointer of type atomic_t
45  *
46  * Atomically adds @i to @v.
47  */
48 static inline void atomic_add(int i, atomic_t *v)
49 {
50 	asm volatile(LOCK_PREFIX "addl %1,%0"
51 		     : "+m" (v->counter)
52 		     : "ir" (i));
53 }
54 
55 /**
56  * atomic_sub - subtract integer from atomic variable
57  * @i: integer value to subtract
58  * @v: pointer of type atomic_t
59  *
60  * Atomically subtracts @i from @v.
61  */
62 static inline void atomic_sub(int i, atomic_t *v)
63 {
64 	asm volatile(LOCK_PREFIX "subl %1,%0"
65 		     : "+m" (v->counter)
66 		     : "ir" (i));
67 }
68 
69 /**
70  * atomic_sub_and_test - subtract value from variable and test result
71  * @i: integer value to subtract
72  * @v: pointer of type atomic_t
73  *
74  * Atomically subtracts @i from @v and returns
75  * true if the result is zero, or false for all
76  * other cases.
77  */
78 static inline int atomic_sub_and_test(int i, atomic_t *v)
79 {
80 	GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, "er", i, "%0", "e");
81 }
82 
83 /**
84  * atomic_inc - increment atomic variable
85  * @v: pointer of type atomic_t
86  *
87  * Atomically increments @v by 1.
88  */
89 static inline void atomic_inc(atomic_t *v)
90 {
91 	asm volatile(LOCK_PREFIX "incl %0"
92 		     : "+m" (v->counter));
93 }
94 
95 /**
96  * atomic_dec - decrement atomic variable
97  * @v: pointer of type atomic_t
98  *
99  * Atomically decrements @v by 1.
100  */
101 static inline void atomic_dec(atomic_t *v)
102 {
103 	asm volatile(LOCK_PREFIX "decl %0"
104 		     : "+m" (v->counter));
105 }
106 
107 /**
108  * atomic_dec_and_test - decrement and test
109  * @v: pointer of type atomic_t
110  *
111  * Atomically decrements @v by 1 and
112  * returns true if the result is 0, or false for all other
113  * cases.
114  */
115 static inline int atomic_dec_and_test(atomic_t *v)
116 {
117 	GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", "e");
118 }
119 
120 /**
121  * atomic_inc_and_test - increment and test
122  * @v: pointer of type atomic_t
123  *
124  * Atomically increments @v by 1
125  * and returns true if the result is zero, or false for all
126  * other cases.
127  */
128 static inline int atomic_inc_and_test(atomic_t *v)
129 {
130 	GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, "%0", "e");
131 }
132 
133 /**
134  * atomic_add_negative - add and test if negative
135  * @i: integer value to add
136  * @v: pointer of type atomic_t
137  *
138  * Atomically adds @i to @v and returns true
139  * if the result is negative, or false when
140  * result is greater than or equal to zero.
141  */
142 static inline int atomic_add_negative(int i, atomic_t *v)
143 {
144 	GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, "er", i, "%0", "s");
145 }
146 
147 /**
148  * atomic_add_return - add integer and return
149  * @i: integer value to add
150  * @v: pointer of type atomic_t
151  *
152  * Atomically adds @i to @v and returns @i + @v
153  */
154 static inline int atomic_add_return(int i, atomic_t *v)
155 {
156 	return i + xadd(&v->counter, i);
157 }
158 
159 /**
160  * atomic_sub_return - subtract integer and return
161  * @v: pointer of type atomic_t
162  * @i: integer value to subtract
163  *
164  * Atomically subtracts @i from @v and returns @v - @i
165  */
166 static inline int atomic_sub_return(int i, atomic_t *v)
167 {
168 	return atomic_add_return(-i, v);
169 }
170 
171 #define atomic_inc_return(v)  (atomic_add_return(1, v))
172 #define atomic_dec_return(v)  (atomic_sub_return(1, v))
173 
174 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
175 {
176 	return cmpxchg(&v->counter, old, new);
177 }
178 
179 static inline int atomic_xchg(atomic_t *v, int new)
180 {
181 	return xchg(&v->counter, new);
182 }
183 
184 /**
185  * __atomic_add_unless - add unless the number is already a given value
186  * @v: pointer of type atomic_t
187  * @a: the amount to add to v...
188  * @u: ...unless v is equal to u.
189  *
190  * Atomically adds @a to @v, so long as @v was not already @u.
191  * Returns the old value of @v.
192  */
193 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
194 {
195 	int c, old;
196 	c = atomic_read(v);
197 	for (;;) {
198 		if (unlikely(c == (u)))
199 			break;
200 		old = atomic_cmpxchg((v), c, c + (a));
201 		if (likely(old == c))
202 			break;
203 		c = old;
204 	}
205 	return c;
206 }
207 
208 /**
209  * atomic_inc_short - increment of a short integer
210  * @v: pointer to type int
211  *
212  * Atomically adds 1 to @v
213  * Returns the new value of @u
214  */
215 static inline short int atomic_inc_short(short int *v)
216 {
217 	asm(LOCK_PREFIX "addw $1, %0" : "+m" (*v));
218 	return *v;
219 }
220 
221 #ifdef CONFIG_X86_64
222 /**
223  * atomic_or_long - OR of two long integers
224  * @v1: pointer to type unsigned long
225  * @v2: pointer to type unsigned long
226  *
227  * Atomically ORs @v1 and @v2
228  * Returns the result of the OR
229  */
230 static inline void atomic_or_long(unsigned long *v1, unsigned long v2)
231 {
232 	asm(LOCK_PREFIX "orq %1, %0" : "+m" (*v1) : "r" (v2));
233 }
234 #endif
235 
236 /* These are x86-specific, used by some header files */
237 #define atomic_clear_mask(mask, addr)				\
238 	asm volatile(LOCK_PREFIX "andl %0,%1"			\
239 		     : : "r" (~(mask)), "m" (*(addr)) : "memory")
240 
241 #define atomic_set_mask(mask, addr)				\
242 	asm volatile(LOCK_PREFIX "orl %0,%1"			\
243 		     : : "r" ((unsigned)(mask)), "m" (*(addr))	\
244 		     : "memory")
245 
246 /* Atomic operations are already serializing on x86 */
247 #define smp_mb__before_atomic_dec()	barrier()
248 #define smp_mb__after_atomic_dec()	barrier()
249 #define smp_mb__before_atomic_inc()	barrier()
250 #define smp_mb__after_atomic_inc()	barrier()
251 
252 #ifdef CONFIG_X86_32
253 # include <asm/atomic64_32.h>
254 #else
255 # include <asm/atomic64_64.h>
256 #endif
257 
258 #endif /* _ASM_X86_ATOMIC_H */
259