1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_ATOMIC64_32_H
3 #define _ASM_X86_ATOMIC64_32_H
4
5 #include <linux/compiler.h>
6 #include <linux/types.h>
7 //#include <asm/cmpxchg.h>
8
9 /* An 64bit atomic type */
10
11 typedef struct {
12 s64 __aligned(8) counter;
13 } atomic64_t;
14
15 #define ATOMIC64_INIT(val) { (val) }
16
17 /*
18 * Read an atomic64_t non-atomically.
19 *
20 * This is intended to be used in cases where a subsequent atomic operation
21 * will handle the torn value, and can be used to prime the first iteration
22 * of unconditional try_cmpxchg() loops, e.g.:
23 *
24 * s64 val = arch_atomic64_read_nonatomic(v);
25 * do { } while (!arch_atomic64_try_cmpxchg(v, &val, val OP i);
26 *
27 * This is NOT safe to use where the value is not always checked by a
28 * subsequent atomic operation, such as in conditional try_cmpxchg() loops
29 * that can break before the atomic operation, e.g.:
30 *
31 * s64 val = arch_atomic64_read_nonatomic(v);
32 * do {
33 * if (condition(val))
34 * break;
35 * } while (!arch_atomic64_try_cmpxchg(v, &val, val OP i);
36 */
arch_atomic64_read_nonatomic(const atomic64_t * v)37 static __always_inline s64 arch_atomic64_read_nonatomic(const atomic64_t *v)
38 {
39 /* See comment in arch_atomic_read(). */
40 return __READ_ONCE(v->counter);
41 }
42
43 #define __ATOMIC64_DECL(sym) void atomic64_##sym(atomic64_t *, ...)
44 #ifndef ATOMIC64_EXPORT
45 #define ATOMIC64_DECL_ONE __ATOMIC64_DECL
46 #else
47 #define ATOMIC64_DECL_ONE(sym) __ATOMIC64_DECL(sym); \
48 ATOMIC64_EXPORT(atomic64_##sym)
49 #endif
50
51 #ifdef CONFIG_X86_CMPXCHG64
52 #define __alternative_atomic64(f, g, out, in...) \
53 asm volatile("call %c[func]" \
54 : out : [func] "i" (atomic64_##g##_cx8), ## in)
55
56 #define ATOMIC64_DECL(sym) ATOMIC64_DECL_ONE(sym##_cx8)
57 #else
58 #define __alternative_atomic64(f, g, out, in...) \
59 alternative_call(atomic64_##f##_386, atomic64_##g##_cx8, \
60 X86_FEATURE_CX8, ASM_OUTPUT2(out), ## in)
61
62 #define ATOMIC64_DECL(sym) ATOMIC64_DECL_ONE(sym##_cx8); \
63 ATOMIC64_DECL_ONE(sym##_386)
64
65 ATOMIC64_DECL_ONE(add_386);
66 ATOMIC64_DECL_ONE(sub_386);
67 ATOMIC64_DECL_ONE(inc_386);
68 ATOMIC64_DECL_ONE(dec_386);
69 #endif
70
71 #define alternative_atomic64(f, out, in...) \
72 __alternative_atomic64(f, f, ASM_OUTPUT2(out), ## in)
73
74 ATOMIC64_DECL(read);
75 ATOMIC64_DECL(set);
76 ATOMIC64_DECL(xchg);
77 ATOMIC64_DECL(add_return);
78 ATOMIC64_DECL(sub_return);
79 ATOMIC64_DECL(inc_return);
80 ATOMIC64_DECL(dec_return);
81 ATOMIC64_DECL(dec_if_positive);
82 ATOMIC64_DECL(inc_not_zero);
83 ATOMIC64_DECL(add_unless);
84
85 #undef ATOMIC64_DECL
86 #undef ATOMIC64_DECL_ONE
87 #undef __ATOMIC64_DECL
88 #undef ATOMIC64_EXPORT
89
arch_atomic64_cmpxchg(atomic64_t * v,s64 old,s64 new)90 static __always_inline s64 arch_atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
91 {
92 return arch_cmpxchg64(&v->counter, old, new);
93 }
94 #define arch_atomic64_cmpxchg arch_atomic64_cmpxchg
95
arch_atomic64_try_cmpxchg(atomic64_t * v,s64 * old,s64 new)96 static __always_inline bool arch_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
97 {
98 return arch_try_cmpxchg64(&v->counter, old, new);
99 }
100 #define arch_atomic64_try_cmpxchg arch_atomic64_try_cmpxchg
101
arch_atomic64_xchg(atomic64_t * v,s64 n)102 static __always_inline s64 arch_atomic64_xchg(atomic64_t *v, s64 n)
103 {
104 s64 o;
105 unsigned high = (unsigned)(n >> 32);
106 unsigned low = (unsigned)n;
107 alternative_atomic64(xchg, "=&A" (o),
108 "S" (v), "b" (low), "c" (high)
109 : "memory");
110 return o;
111 }
112 #define arch_atomic64_xchg arch_atomic64_xchg
113
arch_atomic64_set(atomic64_t * v,s64 i)114 static __always_inline void arch_atomic64_set(atomic64_t *v, s64 i)
115 {
116 unsigned high = (unsigned)(i >> 32);
117 unsigned low = (unsigned)i;
118 alternative_atomic64(set, /* no output */,
119 "S" (v), "b" (low), "c" (high)
120 : "eax", "edx", "memory");
121 }
122
arch_atomic64_read(const atomic64_t * v)123 static __always_inline s64 arch_atomic64_read(const atomic64_t *v)
124 {
125 s64 r;
126 alternative_atomic64(read, "=&A" (r), "c" (v) : "memory");
127 return r;
128 }
129
arch_atomic64_add_return(s64 i,atomic64_t * v)130 static __always_inline s64 arch_atomic64_add_return(s64 i, atomic64_t *v)
131 {
132 alternative_atomic64(add_return,
133 ASM_OUTPUT2("+A" (i), "+c" (v)),
134 ASM_NO_INPUT_CLOBBER("memory"));
135 return i;
136 }
137 #define arch_atomic64_add_return arch_atomic64_add_return
138
arch_atomic64_sub_return(s64 i,atomic64_t * v)139 static __always_inline s64 arch_atomic64_sub_return(s64 i, atomic64_t *v)
140 {
141 alternative_atomic64(sub_return,
142 ASM_OUTPUT2("+A" (i), "+c" (v)),
143 ASM_NO_INPUT_CLOBBER("memory"));
144 return i;
145 }
146 #define arch_atomic64_sub_return arch_atomic64_sub_return
147
arch_atomic64_inc_return(atomic64_t * v)148 static __always_inline s64 arch_atomic64_inc_return(atomic64_t *v)
149 {
150 s64 a;
151 alternative_atomic64(inc_return, "=&A" (a),
152 "S" (v) : "memory", "ecx");
153 return a;
154 }
155 #define arch_atomic64_inc_return arch_atomic64_inc_return
156
arch_atomic64_dec_return(atomic64_t * v)157 static __always_inline s64 arch_atomic64_dec_return(atomic64_t *v)
158 {
159 s64 a;
160 alternative_atomic64(dec_return, "=&A" (a),
161 "S" (v) : "memory", "ecx");
162 return a;
163 }
164 #define arch_atomic64_dec_return arch_atomic64_dec_return
165
arch_atomic64_add(s64 i,atomic64_t * v)166 static __always_inline void arch_atomic64_add(s64 i, atomic64_t *v)
167 {
168 __alternative_atomic64(add, add_return,
169 ASM_OUTPUT2("+A" (i), "+c" (v)),
170 ASM_NO_INPUT_CLOBBER("memory"));
171 }
172
arch_atomic64_sub(s64 i,atomic64_t * v)173 static __always_inline void arch_atomic64_sub(s64 i, atomic64_t *v)
174 {
175 __alternative_atomic64(sub, sub_return,
176 ASM_OUTPUT2("+A" (i), "+c" (v)),
177 ASM_NO_INPUT_CLOBBER("memory"));
178 }
179
arch_atomic64_inc(atomic64_t * v)180 static __always_inline void arch_atomic64_inc(atomic64_t *v)
181 {
182 __alternative_atomic64(inc, inc_return, /* no output */,
183 "S" (v) : "memory", "eax", "ecx", "edx");
184 }
185 #define arch_atomic64_inc arch_atomic64_inc
186
arch_atomic64_dec(atomic64_t * v)187 static __always_inline void arch_atomic64_dec(atomic64_t *v)
188 {
189 __alternative_atomic64(dec, dec_return, /* no output */,
190 "S" (v) : "memory", "eax", "ecx", "edx");
191 }
192 #define arch_atomic64_dec arch_atomic64_dec
193
arch_atomic64_add_unless(atomic64_t * v,s64 a,s64 u)194 static __always_inline int arch_atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
195 {
196 unsigned low = (unsigned)u;
197 unsigned high = (unsigned)(u >> 32);
198 alternative_atomic64(add_unless,
199 ASM_OUTPUT2("+A" (a), "+c" (low), "+D" (high)),
200 "S" (v) : "memory");
201 return (int)a;
202 }
203 #define arch_atomic64_add_unless arch_atomic64_add_unless
204
arch_atomic64_inc_not_zero(atomic64_t * v)205 static __always_inline int arch_atomic64_inc_not_zero(atomic64_t *v)
206 {
207 int r;
208 alternative_atomic64(inc_not_zero, "=&a" (r),
209 "S" (v) : "ecx", "edx", "memory");
210 return r;
211 }
212 #define arch_atomic64_inc_not_zero arch_atomic64_inc_not_zero
213
arch_atomic64_dec_if_positive(atomic64_t * v)214 static __always_inline s64 arch_atomic64_dec_if_positive(atomic64_t *v)
215 {
216 s64 r;
217 alternative_atomic64(dec_if_positive, "=&A" (r),
218 "S" (v) : "ecx", "memory");
219 return r;
220 }
221 #define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
222
223 #undef alternative_atomic64
224 #undef __alternative_atomic64
225
arch_atomic64_and(s64 i,atomic64_t * v)226 static __always_inline void arch_atomic64_and(s64 i, atomic64_t *v)
227 {
228 s64 val = arch_atomic64_read_nonatomic(v);
229
230 do { } while (!arch_atomic64_try_cmpxchg(v, &val, val & i));
231 }
232
arch_atomic64_fetch_and(s64 i,atomic64_t * v)233 static __always_inline s64 arch_atomic64_fetch_and(s64 i, atomic64_t *v)
234 {
235 s64 val = arch_atomic64_read_nonatomic(v);
236
237 do { } while (!arch_atomic64_try_cmpxchg(v, &val, val & i));
238
239 return val;
240 }
241 #define arch_atomic64_fetch_and arch_atomic64_fetch_and
242
arch_atomic64_or(s64 i,atomic64_t * v)243 static __always_inline void arch_atomic64_or(s64 i, atomic64_t *v)
244 {
245 s64 val = arch_atomic64_read_nonatomic(v);
246
247 do { } while (!arch_atomic64_try_cmpxchg(v, &val, val | i));
248 }
249
arch_atomic64_fetch_or(s64 i,atomic64_t * v)250 static __always_inline s64 arch_atomic64_fetch_or(s64 i, atomic64_t *v)
251 {
252 s64 val = arch_atomic64_read_nonatomic(v);
253
254 do { } while (!arch_atomic64_try_cmpxchg(v, &val, val | i));
255
256 return val;
257 }
258 #define arch_atomic64_fetch_or arch_atomic64_fetch_or
259
arch_atomic64_xor(s64 i,atomic64_t * v)260 static __always_inline void arch_atomic64_xor(s64 i, atomic64_t *v)
261 {
262 s64 val = arch_atomic64_read_nonatomic(v);
263
264 do { } while (!arch_atomic64_try_cmpxchg(v, &val, val ^ i));
265 }
266
arch_atomic64_fetch_xor(s64 i,atomic64_t * v)267 static __always_inline s64 arch_atomic64_fetch_xor(s64 i, atomic64_t *v)
268 {
269 s64 val = arch_atomic64_read_nonatomic(v);
270
271 do { } while (!arch_atomic64_try_cmpxchg(v, &val, val ^ i));
272
273 return val;
274 }
275 #define arch_atomic64_fetch_xor arch_atomic64_fetch_xor
276
arch_atomic64_fetch_add(s64 i,atomic64_t * v)277 static __always_inline s64 arch_atomic64_fetch_add(s64 i, atomic64_t *v)
278 {
279 s64 val = arch_atomic64_read_nonatomic(v);
280
281 do { } while (!arch_atomic64_try_cmpxchg(v, &val, val + i));
282
283 return val;
284 }
285 #define arch_atomic64_fetch_add arch_atomic64_fetch_add
286
287 #define arch_atomic64_fetch_sub(i, v) arch_atomic64_fetch_add(-(i), (v))
288
289 #endif /* _ASM_X86_ATOMIC64_32_H */
290