1 //===-- A simple equivalent of std::atomic ----------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8
9 #ifndef LLVM_LIBC_SRC___SUPPORT_CPP_ATOMIC_H
10 #define LLVM_LIBC_SRC___SUPPORT_CPP_ATOMIC_H
11
12 #include "src/__support/CPP/type_traits/has_unique_object_representations.h"
13 #include "src/__support/macros/attributes.h"
14 #include "src/__support/macros/config.h"
15 #include "src/__support/macros/properties/architectures.h"
16
17 #include "type_traits.h"
18
19 namespace LIBC_NAMESPACE_DECL {
20 namespace cpp {
21
22 enum class MemoryOrder : int {
23 RELAXED = __ATOMIC_RELAXED,
24 CONSUME = __ATOMIC_CONSUME,
25 ACQUIRE = __ATOMIC_ACQUIRE,
26 RELEASE = __ATOMIC_RELEASE,
27 ACQ_REL = __ATOMIC_ACQ_REL,
28 SEQ_CST = __ATOMIC_SEQ_CST
29 };
30
31 // These are a clang extension, see the clang documentation for more
32 // information:
33 // https://clang.llvm.org/docs/LanguageExtensions.html#scoped-atomic-builtins.
34 enum class MemoryScope : int {
35 #if defined(__MEMORY_SCOPE_SYSTEM) && defined(__MEMORY_SCOPE_DEVICE)
36 SYSTEM = __MEMORY_SCOPE_SYSTEM,
37 DEVICE = __MEMORY_SCOPE_DEVICE,
38 #else
39 SYSTEM = 0,
40 DEVICE = 0,
41 #endif
42 };
43
44 namespace impl {
order(MemoryOrder mem_ord)45 LIBC_INLINE constexpr int order(MemoryOrder mem_ord) {
46 return static_cast<int>(mem_ord);
47 }
48
scope(MemoryScope mem_scope)49 LIBC_INLINE constexpr int scope(MemoryScope mem_scope) {
50 return static_cast<int>(mem_scope);
51 }
52
addressof(T & ref)53 template <class T> LIBC_INLINE T *addressof(T &ref) {
54 return __builtin_addressof(ref);
55 }
56
infer_failure_order(MemoryOrder mem_ord)57 LIBC_INLINE constexpr int infer_failure_order(MemoryOrder mem_ord) {
58 if (mem_ord == MemoryOrder::RELEASE)
59 return order(MemoryOrder::RELAXED);
60 if (mem_ord == MemoryOrder::ACQ_REL)
61 return order(MemoryOrder::ACQUIRE);
62 return order(mem_ord);
63 }
64 } // namespace impl
65
66 template <typename T> struct Atomic {
67 static_assert(is_trivially_copyable_v<T> && is_copy_constructible_v<T> &&
68 is_move_constructible_v<T> && is_copy_assignable_v<T> &&
69 is_move_assignable_v<T>,
70 "atomic<T> requires T to be trivially copyable, copy "
71 "constructible, move constructible, copy assignable, "
72 "and move assignable.");
73
74 static_assert(cpp::has_unique_object_representations_v<T>,
75 "atomic<T> in libc only support types whose values has unique "
76 "object representations.");
77
78 private:
79 // type conversion helper to avoid long c++ style casts
80
81 // Require types that are 1, 2, 4, 8, or 16 bytes in length to be aligned to
82 // at least their size to be potentially used lock-free.
83 LIBC_INLINE_VAR static constexpr size_t MIN_ALIGNMENT =
84 (sizeof(T) & (sizeof(T) - 1)) || (sizeof(T) > 16) ? 0 : sizeof(T);
85
86 LIBC_INLINE_VAR static constexpr size_t ALIGNMENT = alignof(T) > MIN_ALIGNMENT
87 ? alignof(T)
88 : MIN_ALIGNMENT;
89
90 public:
91 using value_type = T;
92
93 // We keep the internal value public so that it can be addressable.
94 // This is useful in places like the Linux futex operations where
95 // we need pointers to the memory of the atomic values. Load and store
96 // operations should be performed using the atomic methods however.
97 alignas(ALIGNMENT) value_type val;
98
99 LIBC_INLINE constexpr Atomic() = default;
100
101 // Initializes the value without using atomic operations.
AtomicAtomic102 LIBC_INLINE constexpr Atomic(value_type v) : val(v) {}
103
104 LIBC_INLINE Atomic(const Atomic &) = delete;
105 LIBC_INLINE Atomic &operator=(const Atomic &) = delete;
106
107 // Atomic load.
TAtomic108 LIBC_INLINE operator T() { return load(); }
109
110 LIBC_INLINE T
111 load(MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
112 [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
113 T res;
114 #if __has_builtin(__scoped_atomic_load)
115 __scoped_atomic_load(impl::addressof(val), impl::addressof(res),
116 impl::order(mem_ord), impl::scope(mem_scope));
117 #else
118 __atomic_load(impl::addressof(val), impl::addressof(res),
119 impl::order(mem_ord));
120 #endif
121 return res;
122 }
123
124 // Atomic store.
125 LIBC_INLINE T operator=(T rhs) {
126 store(rhs);
127 return rhs;
128 }
129
130 LIBC_INLINE void
131 store(T rhs, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
132 [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
133 #if __has_builtin(__scoped_atomic_store)
134 __scoped_atomic_store(impl::addressof(val), impl::addressof(rhs),
135 impl::order(mem_ord), impl::scope(mem_scope));
136 #else
137 __atomic_store(impl::addressof(val), impl::addressof(rhs),
138 impl::order(mem_ord));
139 #endif
140 }
141
142 // Atomic compare exchange
143 LIBC_INLINE bool compare_exchange_strong(
144 T &expected, T desired, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
145 [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
146 return __atomic_compare_exchange(
147 impl::addressof(val), impl::addressof(expected),
148 impl::addressof(desired), false, impl::order(mem_ord),
149 impl::infer_failure_order(mem_ord));
150 }
151
152 // Atomic compare exchange (separate success and failure memory orders)
153 LIBC_INLINE bool compare_exchange_strong(
154 T &expected, T desired, MemoryOrder success_order,
155 MemoryOrder failure_order,
156 [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
157 return __atomic_compare_exchange(
158 impl::addressof(val), impl::addressof(expected),
159 impl::addressof(desired), false, impl::order(success_order),
160 impl::order(failure_order));
161 }
162
163 // Atomic compare exchange (weak version)
164 LIBC_INLINE bool compare_exchange_weak(
165 T &expected, T desired, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
166 [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
167 return __atomic_compare_exchange(
168 impl::addressof(val), impl::addressof(expected),
169 impl::addressof(desired), true, impl::order(mem_ord),
170 impl::infer_failure_order(mem_ord));
171 }
172
173 // Atomic compare exchange (weak version with separate success and failure
174 // memory orders)
175 LIBC_INLINE bool compare_exchange_weak(
176 T &expected, T desired, MemoryOrder success_order,
177 MemoryOrder failure_order,
178 [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
179 return __atomic_compare_exchange(
180 impl::addressof(val), impl::addressof(expected),
181 impl::addressof(desired), true, impl::order(success_order),
182 impl::order(failure_order));
183 }
184
185 LIBC_INLINE T
186 exchange(T desired, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
187 [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
188 T ret;
189 #if __has_builtin(__scoped_atomic_exchange)
190 __scoped_atomic_exchange(impl::addressof(val), impl::addressof(desired),
191 impl::addressof(ret), impl::order(mem_ord),
192 impl::scope(mem_scope));
193 #else
194 __atomic_exchange(impl::addressof(val), impl::addressof(desired),
195 impl::addressof(ret), impl::order(mem_ord));
196 #endif
197 return ret;
198 }
199
200 LIBC_INLINE T
201 fetch_add(T increment, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
202 [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
203 static_assert(cpp::is_integral_v<T>, "T must be an integral type.");
204 #if __has_builtin(__scoped_atomic_fetch_add)
205 return __scoped_atomic_fetch_add(impl::addressof(val), increment,
206 impl::order(mem_ord),
207 impl::scope(mem_scope));
208 #else
209 return __atomic_fetch_add(impl::addressof(val), increment,
210 impl::order(mem_ord));
211 #endif
212 }
213
214 LIBC_INLINE T
215 fetch_or(T mask, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
216 [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
217 static_assert(cpp::is_integral_v<T>, "T must be an integral type.");
218 #if __has_builtin(__scoped_atomic_fetch_or)
219 return __scoped_atomic_fetch_or(impl::addressof(val), mask,
220 impl::order(mem_ord),
221 impl::scope(mem_scope));
222 #else
223 return __atomic_fetch_or(impl::addressof(val), mask, impl::order(mem_ord));
224 #endif
225 }
226
227 LIBC_INLINE T
228 fetch_and(T mask, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
229 [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
230 static_assert(cpp::is_integral_v<T>, "T must be an integral type.");
231 #if __has_builtin(__scoped_atomic_fetch_and)
232 return __scoped_atomic_fetch_and(impl::addressof(val), mask,
233 impl::order(mem_ord),
234 impl::scope(mem_scope));
235 #else
236 return __atomic_fetch_and(impl::addressof(val), mask, impl::order(mem_ord));
237 #endif
238 }
239
240 LIBC_INLINE T
241 fetch_sub(T decrement, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
242 [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
243 static_assert(cpp::is_integral_v<T>, "T must be an integral type.");
244 #if __has_builtin(__scoped_atomic_fetch_sub)
245 return __scoped_atomic_fetch_sub(impl::addressof(val), decrement,
246 impl::order(mem_ord),
247 impl::scope(mem_scope));
248 #else
249 return __atomic_fetch_sub(impl::addressof(val), decrement,
250 impl::order(mem_ord));
251 #endif
252 }
253
254 // Set the value without using an atomic operation. This is useful
255 // in initializing atomic values without a constructor.
setAtomic256 LIBC_INLINE void set(T rhs) { val = rhs; }
257 };
258
259 template <typename T> struct AtomicRef {
260 static_assert(is_trivially_copyable_v<T> && is_copy_constructible_v<T> &&
261 is_move_constructible_v<T> && is_copy_assignable_v<T> &&
262 is_move_assignable_v<T>,
263 "AtomicRef<T> requires T to be trivially copyable, copy "
264 "constructible, move constructible, copy assignable, "
265 "and move assignable.");
266
267 static_assert(cpp::has_unique_object_representations_v<T>,
268 "AtomicRef<T> only supports types with unique object "
269 "representations.");
270
271 private:
272 T *ptr;
273
274 public:
275 // Constructor from T reference
AtomicRefAtomicRef276 LIBC_INLINE explicit constexpr AtomicRef(T &obj) : ptr(&obj) {}
277
278 // Non-standard Implicit conversion from T*
AtomicRefAtomicRef279 LIBC_INLINE constexpr AtomicRef(T *obj) : ptr(obj) {}
280
281 LIBC_INLINE AtomicRef(const AtomicRef &) = default;
282 LIBC_INLINE AtomicRef &operator=(const AtomicRef &) = default;
283
284 // Atomic load
TAtomicRef285 LIBC_INLINE operator T() const { return load(); }
286
287 LIBC_INLINE T
288 load(MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
289 [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) const {
290 T res;
291 #if __has_builtin(__scoped_atomic_load)
292 __scoped_atomic_load(ptr, &res, impl::order(mem_ord),
293 impl::scope(mem_scope));
294 #else
295 __atomic_load(ptr, &res, impl::order(mem_ord));
296 #endif
297 return res;
298 }
299
300 // Atomic store
301 LIBC_INLINE T operator=(T rhs) const {
302 store(rhs);
303 return rhs;
304 }
305
306 LIBC_INLINE void
307 store(T rhs, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
308 [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) const {
309 #if __has_builtin(__scoped_atomic_store)
310 __scoped_atomic_store(ptr, &rhs, impl::order(mem_ord),
311 impl::scope(mem_scope));
312 #else
313 __atomic_store(ptr, &rhs, impl::order(mem_ord));
314 #endif
315 }
316
317 // Atomic compare exchange (strong)
318 LIBC_INLINE bool compare_exchange_strong(
319 T &expected, T desired, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
320 [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) const {
321 return __atomic_compare_exchange(ptr, &expected, &desired, false,
322 impl::order(mem_ord),
323 impl::infer_failure_order(mem_ord));
324 }
325
326 // Atomic compare exchange (strong, separate success/failure memory orders)
327 LIBC_INLINE bool compare_exchange_strong(
328 T &expected, T desired, MemoryOrder success_order,
329 MemoryOrder failure_order,
330 [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) const {
331 return __atomic_compare_exchange(ptr, &expected, &desired, false,
332 impl::order(success_order),
333 impl::order(failure_order));
334 }
335
336 // Atomic exchange
337 LIBC_INLINE T
338 exchange(T desired, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
339 [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) const {
340 T ret;
341 #if __has_builtin(__scoped_atomic_exchange)
342 __scoped_atomic_exchange(ptr, &desired, &ret, impl::order(mem_ord),
343 impl::scope(mem_scope));
344 #else
345 __atomic_exchange(ptr, &desired, &ret, impl::order(mem_ord));
346 #endif
347 return ret;
348 }
349
350 LIBC_INLINE T fetch_add(
351 T increment, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
352 [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) const {
353 static_assert(cpp::is_integral_v<T>, "T must be an integral type.");
354 #if __has_builtin(__scoped_atomic_fetch_add)
355 return __scoped_atomic_fetch_add(ptr, increment, impl::order(mem_ord),
356 impl::scope(mem_scope));
357 #else
358 return __atomic_fetch_add(ptr, increment, impl::order(mem_ord));
359 #endif
360 }
361
362 LIBC_INLINE T
363 fetch_or(T mask, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
364 [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) const {
365 static_assert(cpp::is_integral_v<T>, "T must be an integral type.");
366 #if __has_builtin(__scoped_atomic_fetch_or)
367 return __scoped_atomic_fetch_or(ptr, mask, impl::order(mem_ord),
368 impl::scope(mem_scope));
369 #else
370 return __atomic_fetch_or(ptr, mask, impl::order(mem_ord));
371 #endif
372 }
373
374 LIBC_INLINE T fetch_and(
375 T mask, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
376 [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) const {
377 static_assert(cpp::is_integral_v<T>, "T must be an integral type.");
378 #if __has_builtin(__scoped_atomic_fetch_and)
379 return __scoped_atomic_fetch_and(ptr, mask, impl::order(mem_ord),
380 impl::scope(mem_scope));
381 #else
382 return __atomic_fetch_and(ptr, mask, impl::order(mem_ord));
383 #endif
384 }
385
386 LIBC_INLINE T fetch_sub(
387 T decrement, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
388 [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) const {
389 static_assert(cpp::is_integral_v<T>, "T must be an integral type.");
390 #if __has_builtin(__scoped_atomic_fetch_sub)
391 return __scoped_atomic_fetch_sub(ptr, decrement, impl::order(mem_ord),
392 impl::scope(mem_scope));
393 #else
394 return __atomic_fetch_sub(ptr, decrement, impl::order(mem_ord));
395 #endif
396 }
397 };
398
399 // Permit CTAD when generating an atomic reference.
400 template <typename T> AtomicRef(T &) -> AtomicRef<T>;
401
402 // Issue a thread fence with the given memory ordering.
403 LIBC_INLINE void atomic_thread_fence(
404 MemoryOrder mem_ord,
405 [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
406 #if __has_builtin(__scoped_atomic_thread_fence)
407 __scoped_atomic_thread_fence(static_cast<int>(mem_ord),
408 static_cast<int>(mem_scope));
409 #else
410 __atomic_thread_fence(static_cast<int>(mem_ord));
411 #endif
412 }
413
414 // Establishes memory synchronization ordering of non-atomic and relaxed atomic
415 // accesses, as instructed by order, between a thread and a signal handler
416 // executed on the same thread. This is equivalent to atomic_thread_fence,
417 // except no instructions for memory ordering are issued. Only reordering of
418 // the instructions by the compiler is suppressed as order instructs.
atomic_signal_fence(MemoryOrder mem_ord)419 LIBC_INLINE void atomic_signal_fence([[maybe_unused]] MemoryOrder mem_ord) {
420 #if __has_builtin(__atomic_signal_fence)
421 __atomic_signal_fence(static_cast<int>(mem_ord));
422 #else
423 // if the builtin is not ready, use asm as a full compiler barrier.
424 asm volatile("" ::: "memory");
425 #endif
426 }
427 } // namespace cpp
428 } // namespace LIBC_NAMESPACE_DECL
429
430 #endif // LLVM_LIBC_SRC___SUPPORT_CPP_ATOMIC_H
431