xref: /freebsd/contrib/llvm-project/libcxx/include/__atomic/atomic_ref.h (revision b64c5a0ace59af62eff52bfe110a521dc73c937b)
1 // -*- C++ -*-
2 //===----------------------------------------------------------------------===//
3 //
4 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
5 // See https://llvm.org/LICENSE.txt for license information.
6 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //
8 //                        Kokkos v. 4.0
9 //       Copyright (2022) National Technology & Engineering
10 //               Solutions of Sandia, LLC (NTESS).
11 //
12 // Under the terms of Contract DE-NA0003525 with NTESS,
13 // the U.S. Government retains certain rights in this software.
14 //
15 //===---------------------------------------------------------------------===//
16 
17 #ifndef _LIBCPP___ATOMIC_ATOMIC_REF_H
18 #define _LIBCPP___ATOMIC_ATOMIC_REF_H
19 
20 #include <__assert>
21 #include <__atomic/atomic_sync.h>
22 #include <__atomic/check_memory_order.h>
23 #include <__atomic/to_gcc_order.h>
24 #include <__concepts/arithmetic.h>
25 #include <__concepts/same_as.h>
26 #include <__config>
27 #include <__memory/addressof.h>
28 #include <__type_traits/has_unique_object_representation.h>
29 #include <__type_traits/is_trivially_copyable.h>
30 #include <cstddef>
31 #include <cstdint>
32 #include <cstring>
33 
34 #if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
35 #  pragma GCC system_header
36 #endif
37 
38 _LIBCPP_PUSH_MACROS
39 #include <__undef_macros>
40 
41 _LIBCPP_BEGIN_NAMESPACE_STD
42 
43 #if _LIBCPP_STD_VER >= 20
44 
45 // These types are required to make __atomic_is_always_lock_free work across GCC and Clang.
46 // The purpose of this trick is to make sure that we provide an object with the correct alignment
47 // to __atomic_is_always_lock_free, since that answer depends on the alignment.
48 template <size_t _Alignment>
49 struct __alignment_checker_type {
50   alignas(_Alignment) char __data;
51 };
52 
53 template <size_t _Alignment>
54 struct __get_aligner_instance {
55   static constexpr __alignment_checker_type<_Alignment> __instance{};
56 };
57 
58 template <class _Tp>
59 struct __atomic_ref_base {
60 private:
61   _LIBCPP_HIDE_FROM_ABI static _Tp* __clear_padding(_Tp& __val) noexcept {
62     _Tp* __ptr = std::addressof(__val);
63 #  if __has_builtin(__builtin_clear_padding)
64     __builtin_clear_padding(__ptr);
65 #  endif
66     return __ptr;
67   }
68 
69   _LIBCPP_HIDE_FROM_ABI static bool __compare_exchange(
70       _Tp* __ptr, _Tp* __expected, _Tp* __desired, bool __is_weak, int __success, int __failure) noexcept {
71     if constexpr (
72 #  if __has_builtin(__builtin_clear_padding)
73         has_unique_object_representations_v<_Tp> || floating_point<_Tp>
74 #  else
75         true // NOLINT(readability-simplify-boolean-expr)
76 #  endif
77     ) {
78       return __atomic_compare_exchange(__ptr, __expected, __desired, __is_weak, __success, __failure);
79     } else { // _Tp has padding bits and __builtin_clear_padding is available
80       __clear_padding(*__desired);
81       _Tp __copy = *__expected;
82       __clear_padding(__copy);
83       // The algorithm we use here is basically to perform `__atomic_compare_exchange` on the
84       // values until it has either succeeded, or failed because the value representation of the
85       // objects involved was different. This is why we loop around __atomic_compare_exchange:
86       // we basically loop until its failure is caused by the value representation of the objects
87       // being different, not only their object representation.
88       while (true) {
89         _Tp __prev = __copy;
90         if (__atomic_compare_exchange(__ptr, std::addressof(__copy), __desired, __is_weak, __success, __failure)) {
91           return true;
92         }
93         _Tp __curr = __copy;
94         if (std::memcmp(__clear_padding(__prev), __clear_padding(__curr), sizeof(_Tp)) != 0) {
95           // Value representation without padding bits do not compare equal ->
96           // write the current content of *ptr into *expected
97           std::memcpy(__expected, std::addressof(__copy), sizeof(_Tp));
98           return false;
99         }
100       }
101     }
102   }
103 
104   friend struct __atomic_waitable_traits<__atomic_ref_base<_Tp>>;
105 
106   // require types that are 1, 2, 4, 8, or 16 bytes in length to be aligned to at least their size to be potentially
107   // used lock-free
108   static constexpr size_t __min_alignment = (sizeof(_Tp) & (sizeof(_Tp) - 1)) || (sizeof(_Tp) > 16) ? 0 : sizeof(_Tp);
109 
110 public:
111   using value_type = _Tp;
112 
113   static constexpr size_t required_alignment = alignof(_Tp) > __min_alignment ? alignof(_Tp) : __min_alignment;
114 
115   // The __atomic_always_lock_free builtin takes into account the alignment of the pointer if provided,
116   // so we create a fake pointer with a suitable alignment when querying it. Note that we are guaranteed
117   // that the pointer is going to be aligned properly at runtime because that is a (checked) precondition
118   // of atomic_ref's constructor.
119   static constexpr bool is_always_lock_free =
120       __atomic_always_lock_free(sizeof(_Tp), &__get_aligner_instance<required_alignment>::__instance);
121 
122   _LIBCPP_HIDE_FROM_ABI bool is_lock_free() const noexcept { return __atomic_is_lock_free(sizeof(_Tp), __ptr_); }
123 
124   _LIBCPP_HIDE_FROM_ABI void store(_Tp __desired, memory_order __order = memory_order::seq_cst) const noexcept
125       _LIBCPP_CHECK_STORE_MEMORY_ORDER(__order) {
126     _LIBCPP_ASSERT_ARGUMENT_WITHIN_DOMAIN(
127         __order == memory_order::relaxed || __order == memory_order::release || __order == memory_order::seq_cst,
128         "atomic_ref: memory order argument to atomic store operation is invalid");
129     __atomic_store(__ptr_, __clear_padding(__desired), std::__to_gcc_order(__order));
130   }
131 
132   _LIBCPP_HIDE_FROM_ABI _Tp operator=(_Tp __desired) const noexcept {
133     store(__desired);
134     return __desired;
135   }
136 
137   _LIBCPP_HIDE_FROM_ABI _Tp load(memory_order __order = memory_order::seq_cst) const noexcept
138       _LIBCPP_CHECK_LOAD_MEMORY_ORDER(__order) {
139     _LIBCPP_ASSERT_ARGUMENT_WITHIN_DOMAIN(
140         __order == memory_order::relaxed || __order == memory_order::consume || __order == memory_order::acquire ||
141             __order == memory_order::seq_cst,
142         "atomic_ref: memory order argument to atomic load operation is invalid");
143     alignas(_Tp) byte __mem[sizeof(_Tp)];
144     auto* __ret = reinterpret_cast<_Tp*>(__mem);
145     __atomic_load(__ptr_, __ret, std::__to_gcc_order(__order));
146     return *__ret;
147   }
148 
149   _LIBCPP_HIDE_FROM_ABI operator _Tp() const noexcept { return load(); }
150 
151   _LIBCPP_HIDE_FROM_ABI _Tp exchange(_Tp __desired, memory_order __order = memory_order::seq_cst) const noexcept {
152     alignas(_Tp) byte __mem[sizeof(_Tp)];
153     auto* __ret = reinterpret_cast<_Tp*>(__mem);
154     __atomic_exchange(__ptr_, __clear_padding(__desired), __ret, std::__to_gcc_order(__order));
155     return *__ret;
156   }
157 
158   _LIBCPP_HIDE_FROM_ABI bool
159   compare_exchange_weak(_Tp& __expected, _Tp __desired, memory_order __success, memory_order __failure) const noexcept
160       _LIBCPP_CHECK_EXCHANGE_MEMORY_ORDER(__success, __failure) {
161     _LIBCPP_ASSERT_ARGUMENT_WITHIN_DOMAIN(
162         __failure == memory_order::relaxed || __failure == memory_order::consume ||
163             __failure == memory_order::acquire || __failure == memory_order::seq_cst,
164         "atomic_ref: failure memory order argument to weak atomic compare-and-exchange operation is invalid");
165     return __compare_exchange(
166         __ptr_,
167         std::addressof(__expected),
168         std::addressof(__desired),
169         true,
170         std::__to_gcc_order(__success),
171         std::__to_gcc_order(__failure));
172   }
173   _LIBCPP_HIDE_FROM_ABI bool
174   compare_exchange_strong(_Tp& __expected, _Tp __desired, memory_order __success, memory_order __failure) const noexcept
175       _LIBCPP_CHECK_EXCHANGE_MEMORY_ORDER(__success, __failure) {
176     _LIBCPP_ASSERT_ARGUMENT_WITHIN_DOMAIN(
177         __failure == memory_order::relaxed || __failure == memory_order::consume ||
178             __failure == memory_order::acquire || __failure == memory_order::seq_cst,
179         "atomic_ref: failure memory order argument to strong atomic compare-and-exchange operation is invalid");
180     return __compare_exchange(
181         __ptr_,
182         std::addressof(__expected),
183         std::addressof(__desired),
184         false,
185         std::__to_gcc_order(__success),
186         std::__to_gcc_order(__failure));
187   }
188 
189   _LIBCPP_HIDE_FROM_ABI bool
190   compare_exchange_weak(_Tp& __expected, _Tp __desired, memory_order __order = memory_order::seq_cst) const noexcept {
191     return __compare_exchange(
192         __ptr_,
193         std::addressof(__expected),
194         std::addressof(__desired),
195         true,
196         std::__to_gcc_order(__order),
197         std::__to_gcc_failure_order(__order));
198   }
199   _LIBCPP_HIDE_FROM_ABI bool
200   compare_exchange_strong(_Tp& __expected, _Tp __desired, memory_order __order = memory_order::seq_cst) const noexcept {
201     return __compare_exchange(
202         __ptr_,
203         std::addressof(__expected),
204         std::addressof(__desired),
205         false,
206         std::__to_gcc_order(__order),
207         std::__to_gcc_failure_order(__order));
208   }
209 
210   _LIBCPP_HIDE_FROM_ABI void wait(_Tp __old, memory_order __order = memory_order::seq_cst) const noexcept
211       _LIBCPP_CHECK_WAIT_MEMORY_ORDER(__order) {
212     _LIBCPP_ASSERT_ARGUMENT_WITHIN_DOMAIN(
213         __order == memory_order::relaxed || __order == memory_order::consume || __order == memory_order::acquire ||
214             __order == memory_order::seq_cst,
215         "atomic_ref: memory order argument to atomic wait operation is invalid");
216     std::__atomic_wait(*this, __old, __order);
217   }
218   _LIBCPP_HIDE_FROM_ABI void notify_one() const noexcept { std::__atomic_notify_one(*this); }
219   _LIBCPP_HIDE_FROM_ABI void notify_all() const noexcept { std::__atomic_notify_all(*this); }
220 
221 protected:
222   typedef _Tp _Aligned_Tp __attribute__((aligned(required_alignment)));
223   _Aligned_Tp* __ptr_;
224 
225   _LIBCPP_HIDE_FROM_ABI __atomic_ref_base(_Tp& __obj) : __ptr_(std::addressof(__obj)) {}
226 };
227 
228 template <class _Tp>
229 struct __atomic_waitable_traits<__atomic_ref_base<_Tp>> {
230   static _LIBCPP_HIDE_FROM_ABI _Tp __atomic_load(const __atomic_ref_base<_Tp>& __a, memory_order __order) {
231     return __a.load(__order);
232   }
233   static _LIBCPP_HIDE_FROM_ABI const _Tp* __atomic_contention_address(const __atomic_ref_base<_Tp>& __a) {
234     return __a.__ptr_;
235   }
236 };
237 
238 template <class _Tp>
239 struct atomic_ref : public __atomic_ref_base<_Tp> {
240   static_assert(is_trivially_copyable_v<_Tp>, "std::atomic_ref<T> requires that 'T' be a trivially copyable type");
241 
242   using __base = __atomic_ref_base<_Tp>;
243 
244   _LIBCPP_HIDE_FROM_ABI explicit atomic_ref(_Tp& __obj) : __base(__obj) {
245     _LIBCPP_ASSERT_ARGUMENT_WITHIN_DOMAIN(
246         reinterpret_cast<uintptr_t>(std::addressof(__obj)) % __base::required_alignment == 0,
247         "atomic_ref ctor: referenced object must be aligned to required_alignment");
248   }
249 
250   _LIBCPP_HIDE_FROM_ABI atomic_ref(const atomic_ref&) noexcept = default;
251 
252   _LIBCPP_HIDE_FROM_ABI _Tp operator=(_Tp __desired) const noexcept { return __base::operator=(__desired); }
253 
254   atomic_ref& operator=(const atomic_ref&) = delete;
255 };
256 
257 template <class _Tp>
258   requires(std::integral<_Tp> && !std::same_as<bool, _Tp>)
259 struct atomic_ref<_Tp> : public __atomic_ref_base<_Tp> {
260   using __base = __atomic_ref_base<_Tp>;
261 
262   using difference_type = typename __base::value_type;
263 
264   _LIBCPP_HIDE_FROM_ABI explicit atomic_ref(_Tp& __obj) : __base(__obj) {
265     _LIBCPP_ASSERT_ARGUMENT_WITHIN_DOMAIN(
266         reinterpret_cast<uintptr_t>(std::addressof(__obj)) % __base::required_alignment == 0,
267         "atomic_ref ctor: referenced object must be aligned to required_alignment");
268   }
269 
270   _LIBCPP_HIDE_FROM_ABI atomic_ref(const atomic_ref&) noexcept = default;
271 
272   _LIBCPP_HIDE_FROM_ABI _Tp operator=(_Tp __desired) const noexcept { return __base::operator=(__desired); }
273 
274   atomic_ref& operator=(const atomic_ref&) = delete;
275 
276   _LIBCPP_HIDE_FROM_ABI _Tp fetch_add(_Tp __arg, memory_order __order = memory_order_seq_cst) const noexcept {
277     return __atomic_fetch_add(this->__ptr_, __arg, std::__to_gcc_order(__order));
278   }
279   _LIBCPP_HIDE_FROM_ABI _Tp fetch_sub(_Tp __arg, memory_order __order = memory_order_seq_cst) const noexcept {
280     return __atomic_fetch_sub(this->__ptr_, __arg, std::__to_gcc_order(__order));
281   }
282   _LIBCPP_HIDE_FROM_ABI _Tp fetch_and(_Tp __arg, memory_order __order = memory_order_seq_cst) const noexcept {
283     return __atomic_fetch_and(this->__ptr_, __arg, std::__to_gcc_order(__order));
284   }
285   _LIBCPP_HIDE_FROM_ABI _Tp fetch_or(_Tp __arg, memory_order __order = memory_order_seq_cst) const noexcept {
286     return __atomic_fetch_or(this->__ptr_, __arg, std::__to_gcc_order(__order));
287   }
288   _LIBCPP_HIDE_FROM_ABI _Tp fetch_xor(_Tp __arg, memory_order __order = memory_order_seq_cst) const noexcept {
289     return __atomic_fetch_xor(this->__ptr_, __arg, std::__to_gcc_order(__order));
290   }
291 
292   _LIBCPP_HIDE_FROM_ABI _Tp operator++(int) const noexcept { return fetch_add(_Tp(1)); }
293   _LIBCPP_HIDE_FROM_ABI _Tp operator--(int) const noexcept { return fetch_sub(_Tp(1)); }
294   _LIBCPP_HIDE_FROM_ABI _Tp operator++() const noexcept { return fetch_add(_Tp(1)) + _Tp(1); }
295   _LIBCPP_HIDE_FROM_ABI _Tp operator--() const noexcept { return fetch_sub(_Tp(1)) - _Tp(1); }
296   _LIBCPP_HIDE_FROM_ABI _Tp operator+=(_Tp __arg) const noexcept { return fetch_add(__arg) + __arg; }
297   _LIBCPP_HIDE_FROM_ABI _Tp operator-=(_Tp __arg) const noexcept { return fetch_sub(__arg) - __arg; }
298   _LIBCPP_HIDE_FROM_ABI _Tp operator&=(_Tp __arg) const noexcept { return fetch_and(__arg) & __arg; }
299   _LIBCPP_HIDE_FROM_ABI _Tp operator|=(_Tp __arg) const noexcept { return fetch_or(__arg) | __arg; }
300   _LIBCPP_HIDE_FROM_ABI _Tp operator^=(_Tp __arg) const noexcept { return fetch_xor(__arg) ^ __arg; }
301 };
302 
303 template <class _Tp>
304   requires std::floating_point<_Tp>
305 struct atomic_ref<_Tp> : public __atomic_ref_base<_Tp> {
306   using __base = __atomic_ref_base<_Tp>;
307 
308   using difference_type = typename __base::value_type;
309 
310   _LIBCPP_HIDE_FROM_ABI explicit atomic_ref(_Tp& __obj) : __base(__obj) {
311     _LIBCPP_ASSERT_ARGUMENT_WITHIN_DOMAIN(
312         reinterpret_cast<uintptr_t>(std::addressof(__obj)) % __base::required_alignment == 0,
313         "atomic_ref ctor: referenced object must be aligned to required_alignment");
314   }
315 
316   _LIBCPP_HIDE_FROM_ABI atomic_ref(const atomic_ref&) noexcept = default;
317 
318   _LIBCPP_HIDE_FROM_ABI _Tp operator=(_Tp __desired) const noexcept { return __base::operator=(__desired); }
319 
320   atomic_ref& operator=(const atomic_ref&) = delete;
321 
322   _LIBCPP_HIDE_FROM_ABI _Tp fetch_add(_Tp __arg, memory_order __order = memory_order_seq_cst) const noexcept {
323     _Tp __old = this->load(memory_order_relaxed);
324     _Tp __new = __old + __arg;
325     while (!this->compare_exchange_weak(__old, __new, __order, memory_order_relaxed)) {
326       __new = __old + __arg;
327     }
328     return __old;
329   }
330   _LIBCPP_HIDE_FROM_ABI _Tp fetch_sub(_Tp __arg, memory_order __order = memory_order_seq_cst) const noexcept {
331     _Tp __old = this->load(memory_order_relaxed);
332     _Tp __new = __old - __arg;
333     while (!this->compare_exchange_weak(__old, __new, __order, memory_order_relaxed)) {
334       __new = __old - __arg;
335     }
336     return __old;
337   }
338 
339   _LIBCPP_HIDE_FROM_ABI _Tp operator+=(_Tp __arg) const noexcept { return fetch_add(__arg) + __arg; }
340   _LIBCPP_HIDE_FROM_ABI _Tp operator-=(_Tp __arg) const noexcept { return fetch_sub(__arg) - __arg; }
341 };
342 
343 template <class _Tp>
344 struct atomic_ref<_Tp*> : public __atomic_ref_base<_Tp*> {
345   using __base = __atomic_ref_base<_Tp*>;
346 
347   using difference_type = ptrdiff_t;
348 
349   _LIBCPP_HIDE_FROM_ABI explicit atomic_ref(_Tp*& __ptr) : __base(__ptr) {}
350 
351   _LIBCPP_HIDE_FROM_ABI _Tp* operator=(_Tp* __desired) const noexcept { return __base::operator=(__desired); }
352 
353   atomic_ref& operator=(const atomic_ref&) = delete;
354 
355   _LIBCPP_HIDE_FROM_ABI _Tp* fetch_add(ptrdiff_t __arg, memory_order __order = memory_order_seq_cst) const noexcept {
356     return __atomic_fetch_add(this->__ptr_, __arg * sizeof(_Tp), std::__to_gcc_order(__order));
357   }
358   _LIBCPP_HIDE_FROM_ABI _Tp* fetch_sub(ptrdiff_t __arg, memory_order __order = memory_order_seq_cst) const noexcept {
359     return __atomic_fetch_sub(this->__ptr_, __arg * sizeof(_Tp), std::__to_gcc_order(__order));
360   }
361 
362   _LIBCPP_HIDE_FROM_ABI _Tp* operator++(int) const noexcept { return fetch_add(1); }
363   _LIBCPP_HIDE_FROM_ABI _Tp* operator--(int) const noexcept { return fetch_sub(1); }
364   _LIBCPP_HIDE_FROM_ABI _Tp* operator++() const noexcept { return fetch_add(1) + 1; }
365   _LIBCPP_HIDE_FROM_ABI _Tp* operator--() const noexcept { return fetch_sub(1) - 1; }
366   _LIBCPP_HIDE_FROM_ABI _Tp* operator+=(ptrdiff_t __arg) const noexcept { return fetch_add(__arg) + __arg; }
367   _LIBCPP_HIDE_FROM_ABI _Tp* operator-=(ptrdiff_t __arg) const noexcept { return fetch_sub(__arg) - __arg; }
368 };
369 
370 _LIBCPP_CTAD_SUPPORTED_FOR_TYPE(atomic_ref);
371 
372 #endif // _LIBCPP_STD_VER >= 20
373 
374 _LIBCPP_END_NAMESPACE_STD
375 
376 _LIBCPP_POP_MACROS
377 
378 #endif // _LIBCPP__ATOMIC_ATOMIC_REF_H
379