xref: /linux/arch/x86/include/asm/atomic.h (revision cea0f76a483d1270ac6f6513964e3e75193dda48)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_ATOMIC_H
3 #define _ASM_X86_ATOMIC_H
4 
5 #include <linux/compiler.h>
6 #include <linux/types.h>
7 #include <asm/alternative.h>
8 #include <asm/cmpxchg.h>
9 #include <asm/rmwcc.h>
10 #include <asm/barrier.h>
11 
12 /*
13  * Atomic operations that C can't guarantee us.  Useful for
14  * resource counting etc..
15  */
16 
17 #define ATOMIC_INIT(i)	{ (i) }
18 
19 /**
20  * arch_atomic_read - read atomic variable
21  * @v: pointer of type atomic_t
22  *
23  * Atomically reads the value of @v.
24  */
25 static __always_inline int arch_atomic_read(const atomic_t *v)
26 {
27 	/*
28 	 * Note for KASAN: we deliberately don't use READ_ONCE_NOCHECK() here,
29 	 * it's non-inlined function that increases binary size and stack usage.
30 	 */
31 	return __READ_ONCE((v)->counter);
32 }
33 
34 /**
35  * arch_atomic_set - set atomic variable
36  * @v: pointer of type atomic_t
37  * @i: required value
38  *
39  * Atomically sets the value of @v to @i.
40  */
41 static __always_inline void arch_atomic_set(atomic_t *v, int i)
42 {
43 	__WRITE_ONCE(v->counter, i);
44 }
45 
46 /**
47  * arch_atomic_add - add integer to atomic variable
48  * @i: integer value to add
49  * @v: pointer of type atomic_t
50  *
51  * Atomically adds @i to @v.
52  */
53 static __always_inline void arch_atomic_add(int i, atomic_t *v)
54 {
55 	asm volatile(LOCK_PREFIX "addl %1,%0"
56 		     : "+m" (v->counter)
57 		     : "ir" (i) : "memory");
58 }
59 
60 /**
61  * arch_atomic_sub - subtract integer from atomic variable
62  * @i: integer value to subtract
63  * @v: pointer of type atomic_t
64  *
65  * Atomically subtracts @i from @v.
66  */
67 static __always_inline void arch_atomic_sub(int i, atomic_t *v)
68 {
69 	asm volatile(LOCK_PREFIX "subl %1,%0"
70 		     : "+m" (v->counter)
71 		     : "ir" (i) : "memory");
72 }
73 
74 /**
75  * arch_atomic_sub_and_test - subtract value from variable and test result
76  * @i: integer value to subtract
77  * @v: pointer of type atomic_t
78  *
79  * Atomically subtracts @i from @v and returns
80  * true if the result is zero, or false for all
81  * other cases.
82  */
83 static __always_inline bool arch_atomic_sub_and_test(int i, atomic_t *v)
84 {
85 	return GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, e, "er", i);
86 }
87 #define arch_atomic_sub_and_test arch_atomic_sub_and_test
88 
89 /**
90  * arch_atomic_inc - increment atomic variable
91  * @v: pointer of type atomic_t
92  *
93  * Atomically increments @v by 1.
94  */
95 static __always_inline void arch_atomic_inc(atomic_t *v)
96 {
97 	asm volatile(LOCK_PREFIX "incl %0"
98 		     : "+m" (v->counter) :: "memory");
99 }
100 #define arch_atomic_inc arch_atomic_inc
101 
102 /**
103  * arch_atomic_dec - decrement atomic variable
104  * @v: pointer of type atomic_t
105  *
106  * Atomically decrements @v by 1.
107  */
108 static __always_inline void arch_atomic_dec(atomic_t *v)
109 {
110 	asm volatile(LOCK_PREFIX "decl %0"
111 		     : "+m" (v->counter) :: "memory");
112 }
113 #define arch_atomic_dec arch_atomic_dec
114 
115 /**
116  * arch_atomic_dec_and_test - decrement and test
117  * @v: pointer of type atomic_t
118  *
119  * Atomically decrements @v by 1 and
120  * returns true if the result is 0, or false for all other
121  * cases.
122  */
123 static __always_inline bool arch_atomic_dec_and_test(atomic_t *v)
124 {
125 	return GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, e);
126 }
127 #define arch_atomic_dec_and_test arch_atomic_dec_and_test
128 
129 /**
130  * arch_atomic_inc_and_test - increment and test
131  * @v: pointer of type atomic_t
132  *
133  * Atomically increments @v by 1
134  * and returns true if the result is zero, or false for all
135  * other cases.
136  */
137 static __always_inline bool arch_atomic_inc_and_test(atomic_t *v)
138 {
139 	return GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, e);
140 }
141 #define arch_atomic_inc_and_test arch_atomic_inc_and_test
142 
143 /**
144  * arch_atomic_add_negative - add and test if negative
145  * @i: integer value to add
146  * @v: pointer of type atomic_t
147  *
148  * Atomically adds @i to @v and returns true
149  * if the result is negative, or false when
150  * result is greater than or equal to zero.
151  */
152 static __always_inline bool arch_atomic_add_negative(int i, atomic_t *v)
153 {
154 	return GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, s, "er", i);
155 }
156 #define arch_atomic_add_negative arch_atomic_add_negative
157 
158 /**
159  * arch_atomic_add_return - add integer and return
160  * @i: integer value to add
161  * @v: pointer of type atomic_t
162  *
163  * Atomically adds @i to @v and returns @i + @v
164  */
165 static __always_inline int arch_atomic_add_return(int i, atomic_t *v)
166 {
167 	return i + xadd(&v->counter, i);
168 }
169 #define arch_atomic_add_return arch_atomic_add_return
170 
171 /**
172  * arch_atomic_sub_return - subtract integer and return
173  * @v: pointer of type atomic_t
174  * @i: integer value to subtract
175  *
176  * Atomically subtracts @i from @v and returns @v - @i
177  */
178 static __always_inline int arch_atomic_sub_return(int i, atomic_t *v)
179 {
180 	return arch_atomic_add_return(-i, v);
181 }
182 #define arch_atomic_sub_return arch_atomic_sub_return
183 
184 static __always_inline int arch_atomic_fetch_add(int i, atomic_t *v)
185 {
186 	return xadd(&v->counter, i);
187 }
188 #define arch_atomic_fetch_add arch_atomic_fetch_add
189 
190 static __always_inline int arch_atomic_fetch_sub(int i, atomic_t *v)
191 {
192 	return xadd(&v->counter, -i);
193 }
194 #define arch_atomic_fetch_sub arch_atomic_fetch_sub
195 
196 static __always_inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
197 {
198 	return arch_cmpxchg(&v->counter, old, new);
199 }
200 #define arch_atomic_cmpxchg arch_atomic_cmpxchg
201 
202 static __always_inline bool arch_atomic_try_cmpxchg(atomic_t *v, int *old, int new)
203 {
204 	return try_cmpxchg(&v->counter, old, new);
205 }
206 #define arch_atomic_try_cmpxchg arch_atomic_try_cmpxchg
207 
208 static __always_inline int arch_atomic_xchg(atomic_t *v, int new)
209 {
210 	return arch_xchg(&v->counter, new);
211 }
212 #define arch_atomic_xchg arch_atomic_xchg
213 
214 static __always_inline void arch_atomic_and(int i, atomic_t *v)
215 {
216 	asm volatile(LOCK_PREFIX "andl %1,%0"
217 			: "+m" (v->counter)
218 			: "ir" (i)
219 			: "memory");
220 }
221 
222 static __always_inline int arch_atomic_fetch_and(int i, atomic_t *v)
223 {
224 	int val = arch_atomic_read(v);
225 
226 	do { } while (!arch_atomic_try_cmpxchg(v, &val, val & i));
227 
228 	return val;
229 }
230 #define arch_atomic_fetch_and arch_atomic_fetch_and
231 
232 static __always_inline void arch_atomic_or(int i, atomic_t *v)
233 {
234 	asm volatile(LOCK_PREFIX "orl %1,%0"
235 			: "+m" (v->counter)
236 			: "ir" (i)
237 			: "memory");
238 }
239 
240 static __always_inline int arch_atomic_fetch_or(int i, atomic_t *v)
241 {
242 	int val = arch_atomic_read(v);
243 
244 	do { } while (!arch_atomic_try_cmpxchg(v, &val, val | i));
245 
246 	return val;
247 }
248 #define arch_atomic_fetch_or arch_atomic_fetch_or
249 
250 static __always_inline void arch_atomic_xor(int i, atomic_t *v)
251 {
252 	asm volatile(LOCK_PREFIX "xorl %1,%0"
253 			: "+m" (v->counter)
254 			: "ir" (i)
255 			: "memory");
256 }
257 
258 static __always_inline int arch_atomic_fetch_xor(int i, atomic_t *v)
259 {
260 	int val = arch_atomic_read(v);
261 
262 	do { } while (!arch_atomic_try_cmpxchg(v, &val, val ^ i));
263 
264 	return val;
265 }
266 #define arch_atomic_fetch_xor arch_atomic_fetch_xor
267 
268 #ifdef CONFIG_X86_32
269 # include <asm/atomic64_32.h>
270 #else
271 # include <asm/atomic64_64.h>
272 #endif
273 
274 #define ARCH_ATOMIC
275 
276 #endif /* _ASM_X86_ATOMIC_H */
277