xref: /linux/arch/x86/include/asm/atomic64_32.h (revision 24168c5e6dfbdd5b414f048f47f75d64533296ca)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_ATOMIC64_32_H
3 #define _ASM_X86_ATOMIC64_32_H
4 
5 #include <linux/compiler.h>
6 #include <linux/types.h>
7 //#include <asm/cmpxchg.h>
8 
9 /* An 64bit atomic type */
10 
11 typedef struct {
12 	s64 __aligned(8) counter;
13 } atomic64_t;
14 
15 #define ATOMIC64_INIT(val)	{ (val) }
16 
17 /*
18  * Read an atomic64_t non-atomically.
19  *
20  * This is intended to be used in cases where a subsequent atomic operation
21  * will handle the torn value, and can be used to prime the first iteration
22  * of unconditional try_cmpxchg() loops, e.g.:
23  *
24  * 	s64 val = arch_atomic64_read_nonatomic(v);
25  * 	do { } while (!arch_atomic64_try_cmpxchg(v, &val, val OP i);
26  *
27  * This is NOT safe to use where the value is not always checked by a
28  * subsequent atomic operation, such as in conditional try_cmpxchg() loops
29  * that can break before the atomic operation, e.g.:
30  *
31  * 	s64 val = arch_atomic64_read_nonatomic(v);
32  * 	do {
33  * 		if (condition(val))
34  * 			break;
35  * 	} while (!arch_atomic64_try_cmpxchg(v, &val, val OP i);
36  */
37 static __always_inline s64 arch_atomic64_read_nonatomic(const atomic64_t *v)
38 {
39 	/* See comment in arch_atomic_read(). */
40 	return __READ_ONCE(v->counter);
41 }
42 
43 #define __ATOMIC64_DECL(sym) void atomic64_##sym(atomic64_t *, ...)
44 #ifndef ATOMIC64_EXPORT
45 #define ATOMIC64_DECL_ONE __ATOMIC64_DECL
46 #else
47 #define ATOMIC64_DECL_ONE(sym) __ATOMIC64_DECL(sym); \
48 	ATOMIC64_EXPORT(atomic64_##sym)
49 #endif
50 
51 #ifdef CONFIG_X86_CMPXCHG64
52 #define __alternative_atomic64(f, g, out, in...) \
53 	asm volatile("call %c[func]" \
54 		     : out : [func] "i" (atomic64_##g##_cx8), ## in)
55 
56 #define ATOMIC64_DECL(sym) ATOMIC64_DECL_ONE(sym##_cx8)
57 #else
58 #define __alternative_atomic64(f, g, out, in...) \
59 	alternative_call(atomic64_##f##_386, atomic64_##g##_cx8, \
60 			 X86_FEATURE_CX8, ASM_OUTPUT2(out), ## in)
61 
62 #define ATOMIC64_DECL(sym) ATOMIC64_DECL_ONE(sym##_cx8); \
63 	ATOMIC64_DECL_ONE(sym##_386)
64 
65 ATOMIC64_DECL_ONE(add_386);
66 ATOMIC64_DECL_ONE(sub_386);
67 ATOMIC64_DECL_ONE(inc_386);
68 ATOMIC64_DECL_ONE(dec_386);
69 #endif
70 
71 #define alternative_atomic64(f, out, in...) \
72 	__alternative_atomic64(f, f, ASM_OUTPUT2(out), ## in)
73 
74 ATOMIC64_DECL(read);
75 ATOMIC64_DECL(set);
76 ATOMIC64_DECL(xchg);
77 ATOMIC64_DECL(add_return);
78 ATOMIC64_DECL(sub_return);
79 ATOMIC64_DECL(inc_return);
80 ATOMIC64_DECL(dec_return);
81 ATOMIC64_DECL(dec_if_positive);
82 ATOMIC64_DECL(inc_not_zero);
83 ATOMIC64_DECL(add_unless);
84 
85 #undef ATOMIC64_DECL
86 #undef ATOMIC64_DECL_ONE
87 #undef __ATOMIC64_DECL
88 #undef ATOMIC64_EXPORT
89 
90 static __always_inline s64 arch_atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
91 {
92 	return arch_cmpxchg64(&v->counter, old, new);
93 }
94 #define arch_atomic64_cmpxchg arch_atomic64_cmpxchg
95 
96 static __always_inline bool arch_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
97 {
98 	return arch_try_cmpxchg64(&v->counter, old, new);
99 }
100 #define arch_atomic64_try_cmpxchg arch_atomic64_try_cmpxchg
101 
102 static __always_inline s64 arch_atomic64_xchg(atomic64_t *v, s64 n)
103 {
104 	s64 o;
105 	unsigned high = (unsigned)(n >> 32);
106 	unsigned low = (unsigned)n;
107 	alternative_atomic64(xchg, "=&A" (o),
108 			     "S" (v), "b" (low), "c" (high)
109 			     : "memory");
110 	return o;
111 }
112 #define arch_atomic64_xchg arch_atomic64_xchg
113 
114 static __always_inline void arch_atomic64_set(atomic64_t *v, s64 i)
115 {
116 	unsigned high = (unsigned)(i >> 32);
117 	unsigned low = (unsigned)i;
118 	alternative_atomic64(set, /* no output */,
119 			     "S" (v), "b" (low), "c" (high)
120 			     : "eax", "edx", "memory");
121 }
122 
123 static __always_inline s64 arch_atomic64_read(const atomic64_t *v)
124 {
125 	s64 r;
126 	alternative_atomic64(read, "=&A" (r), "c" (v) : "memory");
127 	return r;
128 }
129 
130 static __always_inline s64 arch_atomic64_add_return(s64 i, atomic64_t *v)
131 {
132 	alternative_atomic64(add_return,
133 			     ASM_OUTPUT2("+A" (i), "+c" (v)),
134 			     ASM_NO_INPUT_CLOBBER("memory"));
135 	return i;
136 }
137 #define arch_atomic64_add_return arch_atomic64_add_return
138 
139 static __always_inline s64 arch_atomic64_sub_return(s64 i, atomic64_t *v)
140 {
141 	alternative_atomic64(sub_return,
142 			     ASM_OUTPUT2("+A" (i), "+c" (v)),
143 			     ASM_NO_INPUT_CLOBBER("memory"));
144 	return i;
145 }
146 #define arch_atomic64_sub_return arch_atomic64_sub_return
147 
148 static __always_inline s64 arch_atomic64_inc_return(atomic64_t *v)
149 {
150 	s64 a;
151 	alternative_atomic64(inc_return, "=&A" (a),
152 			     "S" (v) : "memory", "ecx");
153 	return a;
154 }
155 #define arch_atomic64_inc_return arch_atomic64_inc_return
156 
157 static __always_inline s64 arch_atomic64_dec_return(atomic64_t *v)
158 {
159 	s64 a;
160 	alternative_atomic64(dec_return, "=&A" (a),
161 			     "S" (v) : "memory", "ecx");
162 	return a;
163 }
164 #define arch_atomic64_dec_return arch_atomic64_dec_return
165 
166 static __always_inline s64 arch_atomic64_add(s64 i, atomic64_t *v)
167 {
168 	__alternative_atomic64(add, add_return,
169 			       ASM_OUTPUT2("+A" (i), "+c" (v)),
170 			       ASM_NO_INPUT_CLOBBER("memory"));
171 	return i;
172 }
173 
174 static __always_inline s64 arch_atomic64_sub(s64 i, atomic64_t *v)
175 {
176 	__alternative_atomic64(sub, sub_return,
177 			       ASM_OUTPUT2("+A" (i), "+c" (v)),
178 			       ASM_NO_INPUT_CLOBBER("memory"));
179 	return i;
180 }
181 
182 static __always_inline void arch_atomic64_inc(atomic64_t *v)
183 {
184 	__alternative_atomic64(inc, inc_return, /* no output */,
185 			       "S" (v) : "memory", "eax", "ecx", "edx");
186 }
187 #define arch_atomic64_inc arch_atomic64_inc
188 
189 static __always_inline void arch_atomic64_dec(atomic64_t *v)
190 {
191 	__alternative_atomic64(dec, dec_return, /* no output */,
192 			       "S" (v) : "memory", "eax", "ecx", "edx");
193 }
194 #define arch_atomic64_dec arch_atomic64_dec
195 
196 static __always_inline int arch_atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
197 {
198 	unsigned low = (unsigned)u;
199 	unsigned high = (unsigned)(u >> 32);
200 	alternative_atomic64(add_unless,
201 			     ASM_OUTPUT2("+A" (a), "+c" (low), "+D" (high)),
202 			     "S" (v) : "memory");
203 	return (int)a;
204 }
205 #define arch_atomic64_add_unless arch_atomic64_add_unless
206 
207 static __always_inline int arch_atomic64_inc_not_zero(atomic64_t *v)
208 {
209 	int r;
210 	alternative_atomic64(inc_not_zero, "=&a" (r),
211 			     "S" (v) : "ecx", "edx", "memory");
212 	return r;
213 }
214 #define arch_atomic64_inc_not_zero arch_atomic64_inc_not_zero
215 
216 static __always_inline s64 arch_atomic64_dec_if_positive(atomic64_t *v)
217 {
218 	s64 r;
219 	alternative_atomic64(dec_if_positive, "=&A" (r),
220 			     "S" (v) : "ecx", "memory");
221 	return r;
222 }
223 #define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
224 
225 #undef alternative_atomic64
226 #undef __alternative_atomic64
227 
228 static __always_inline void arch_atomic64_and(s64 i, atomic64_t *v)
229 {
230 	s64 val = arch_atomic64_read_nonatomic(v);
231 
232 	do { } while (!arch_atomic64_try_cmpxchg(v, &val, val & i));
233 }
234 
235 static __always_inline s64 arch_atomic64_fetch_and(s64 i, atomic64_t *v)
236 {
237 	s64 val = arch_atomic64_read_nonatomic(v);
238 
239 	do { } while (!arch_atomic64_try_cmpxchg(v, &val, val & i));
240 
241 	return val;
242 }
243 #define arch_atomic64_fetch_and arch_atomic64_fetch_and
244 
245 static __always_inline void arch_atomic64_or(s64 i, atomic64_t *v)
246 {
247 	s64 val = arch_atomic64_read_nonatomic(v);
248 
249 	do { } while (!arch_atomic64_try_cmpxchg(v, &val, val | i));
250 }
251 
252 static __always_inline s64 arch_atomic64_fetch_or(s64 i, atomic64_t *v)
253 {
254 	s64 val = arch_atomic64_read_nonatomic(v);
255 
256 	do { } while (!arch_atomic64_try_cmpxchg(v, &val, val | i));
257 
258 	return val;
259 }
260 #define arch_atomic64_fetch_or arch_atomic64_fetch_or
261 
262 static __always_inline void arch_atomic64_xor(s64 i, atomic64_t *v)
263 {
264 	s64 val = arch_atomic64_read_nonatomic(v);
265 
266 	do { } while (!arch_atomic64_try_cmpxchg(v, &val, val ^ i));
267 }
268 
269 static __always_inline s64 arch_atomic64_fetch_xor(s64 i, atomic64_t *v)
270 {
271 	s64 val = arch_atomic64_read_nonatomic(v);
272 
273 	do { } while (!arch_atomic64_try_cmpxchg(v, &val, val ^ i));
274 
275 	return val;
276 }
277 #define arch_atomic64_fetch_xor arch_atomic64_fetch_xor
278 
279 static __always_inline s64 arch_atomic64_fetch_add(s64 i, atomic64_t *v)
280 {
281 	s64 val = arch_atomic64_read_nonatomic(v);
282 
283 	do { } while (!arch_atomic64_try_cmpxchg(v, &val, val + i));
284 
285 	return val;
286 }
287 #define arch_atomic64_fetch_add arch_atomic64_fetch_add
288 
289 #define arch_atomic64_fetch_sub(i, v)	arch_atomic64_fetch_add(-(i), (v))
290 
291 #endif /* _ASM_X86_ATOMIC64_32_H */
292