xref: /freebsd/contrib/llvm-project/libcxx/src/atomic.cpp (revision 2e3507c25e42292b45a5482e116d278f5515d04d)
1 //===----------------------------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include <__config>
10 #ifndef _LIBCPP_HAS_NO_THREADS
11 
12 #include <__thread/timed_backoff_policy.h>
13 #include <atomic>
14 #include <climits>
15 #include <functional>
16 #include <thread>
17 
18 #include "include/apple_availability.h"
19 
20 #ifdef __linux__
21 
22 #include <unistd.h>
23 #include <linux/futex.h>
24 #include <sys/syscall.h>
25 
26 // libc++ uses SYS_futex as a universal syscall name. However, on 32 bit architectures
27 // with a 64 bit time_t, we need to specify SYS_futex_time64.
28 #if !defined(SYS_futex) && defined(SYS_futex_time64)
29 # define SYS_futex SYS_futex_time64
30 #endif
31 
32 #elif defined(__FreeBSD__)
33 
34 #include <sys/types.h>
35 #include <sys/umtx.h>
36 
37 #else // <- Add other operating systems here
38 
39 // Baseline needs no new headers
40 
41 #endif
42 
43 _LIBCPP_BEGIN_NAMESPACE_STD
44 
45 #ifdef __linux__
46 
47 static void __libcpp_platform_wait_on_address(__cxx_atomic_contention_t const volatile* __ptr,
48                                               __cxx_contention_t __val)
49 {
50     static constexpr timespec __timeout = { 2, 0 };
51     syscall(SYS_futex, __ptr, FUTEX_WAIT_PRIVATE, __val, &__timeout, 0, 0);
52 }
53 
54 static void __libcpp_platform_wake_by_address(__cxx_atomic_contention_t const volatile* __ptr,
55                                               bool __notify_one)
56 {
57     syscall(SYS_futex, __ptr, FUTEX_WAKE_PRIVATE, __notify_one ? 1 : INT_MAX, 0, 0, 0);
58 }
59 
60 #elif defined(__APPLE__) && defined(_LIBCPP_USE_ULOCK)
61 
62 extern "C" int __ulock_wait(uint32_t operation, void *addr, uint64_t value,
63                             uint32_t timeout); /* timeout is specified in microseconds */
64 extern "C" int __ulock_wake(uint32_t operation, void *addr, uint64_t wake_value);
65 
66 #define UL_COMPARE_AND_WAIT 1
67 #define ULF_WAKE_ALL        0x00000100
68 
69 static void __libcpp_platform_wait_on_address(__cxx_atomic_contention_t const volatile* __ptr,
70                                               __cxx_contention_t __val)
71 {
72     __ulock_wait(UL_COMPARE_AND_WAIT,
73                  const_cast<__cxx_atomic_contention_t*>(__ptr), __val, 0);
74 }
75 
76 static void __libcpp_platform_wake_by_address(__cxx_atomic_contention_t const volatile* __ptr,
77                                               bool __notify_one)
78 {
79     __ulock_wake(UL_COMPARE_AND_WAIT | (__notify_one ? 0 : ULF_WAKE_ALL),
80                  const_cast<__cxx_atomic_contention_t*>(__ptr), 0);
81 }
82 
83 #elif defined(__FreeBSD__) && __SIZEOF_LONG__ == 8
84 /*
85  * Since __cxx_contention_t is int64_t even on 32bit FreeBSD
86  * platforms, we have to use umtx ops that work on the long type, and
87  * limit its use to architectures where long and int64_t are synonyms.
88  */
89 
90 static void __libcpp_platform_wait_on_address(__cxx_atomic_contention_t const volatile* __ptr,
91                                               __cxx_contention_t __val)
92 {
93     _umtx_op(const_cast<__cxx_atomic_contention_t*>(__ptr),
94              UMTX_OP_WAIT, __val, NULL, NULL);
95 }
96 
97 static void __libcpp_platform_wake_by_address(__cxx_atomic_contention_t const volatile* __ptr,
98                                               bool __notify_one)
99 {
100     _umtx_op(const_cast<__cxx_atomic_contention_t*>(__ptr),
101              UMTX_OP_WAKE, __notify_one ? 1 : INT_MAX, NULL, NULL);
102 }
103 
104 #else // <- Add other operating systems here
105 
106 // Baseline is just a timed backoff
107 
108 static void __libcpp_platform_wait_on_address(__cxx_atomic_contention_t const volatile* __ptr,
109                                               __cxx_contention_t __val)
110 {
111     __libcpp_thread_poll_with_backoff([=]() -> bool {
112         return !__cxx_nonatomic_compare_equal(__cxx_atomic_load(__ptr, memory_order_relaxed), __val);
113     }, __libcpp_timed_backoff_policy());
114 }
115 
116 static void __libcpp_platform_wake_by_address(__cxx_atomic_contention_t const volatile*, bool) { }
117 
118 #endif // __linux__
119 
120 static constexpr size_t __libcpp_contention_table_size = (1 << 8);  /* < there's no magic in this number */
121 
122 struct alignas(64) /*  aim to avoid false sharing */ __libcpp_contention_table_entry
123 {
124     __cxx_atomic_contention_t __contention_state;
125     __cxx_atomic_contention_t __platform_state;
126     inline constexpr __libcpp_contention_table_entry() :
127         __contention_state(0), __platform_state(0) { }
128 };
129 
130 static __libcpp_contention_table_entry __libcpp_contention_table[ __libcpp_contention_table_size ];
131 
132 static hash<void const volatile*> __libcpp_contention_hasher;
133 
134 static __libcpp_contention_table_entry* __libcpp_contention_state(void const volatile * p)
135 {
136     return &__libcpp_contention_table[__libcpp_contention_hasher(p) & (__libcpp_contention_table_size - 1)];
137 }
138 
139 /* Given an atomic to track contention and an atomic to actually wait on, which may be
140    the same atomic, we try to detect contention to avoid spuriously calling the platform. */
141 
142 static void __libcpp_contention_notify(__cxx_atomic_contention_t volatile* __contention_state,
143                                        __cxx_atomic_contention_t const volatile* __platform_state,
144                                        bool __notify_one)
145 {
146     if(0 != __cxx_atomic_load(__contention_state, memory_order_seq_cst))
147         // We only call 'wake' if we consumed a contention bit here.
148         __libcpp_platform_wake_by_address(__platform_state, __notify_one);
149 }
150 static __cxx_contention_t __libcpp_contention_monitor_for_wait(__cxx_atomic_contention_t volatile* __contention_state,
151                                                                __cxx_atomic_contention_t const volatile* __platform_state)
152 {
153     // We will monitor this value.
154     return __cxx_atomic_load(__platform_state, memory_order_acquire);
155 }
156 static void __libcpp_contention_wait(__cxx_atomic_contention_t volatile* __contention_state,
157                                      __cxx_atomic_contention_t const volatile* __platform_state,
158                                      __cxx_contention_t __old_value)
159 {
160     __cxx_atomic_fetch_add(__contention_state, __cxx_contention_t(1), memory_order_seq_cst);
161     // We sleep as long as the monitored value hasn't changed.
162     __libcpp_platform_wait_on_address(__platform_state, __old_value);
163     __cxx_atomic_fetch_sub(__contention_state, __cxx_contention_t(1), memory_order_release);
164 }
165 
166 /* When the incoming atomic is the wrong size for the platform wait size, need to
167    launder the value sequence through an atomic from our table. */
168 
169 static void __libcpp_atomic_notify(void const volatile* __location)
170 {
171     auto const __entry = __libcpp_contention_state(__location);
172     // The value sequence laundering happens on the next line below.
173     __cxx_atomic_fetch_add(&__entry->__platform_state, __cxx_contention_t(1), memory_order_release);
174     __libcpp_contention_notify(&__entry->__contention_state,
175                                &__entry->__platform_state,
176                                false /* when laundering, we can't handle notify_one */);
177 }
178 _LIBCPP_EXPORTED_FROM_ABI
179 void __cxx_atomic_notify_one(void const volatile* __location)
180     { __libcpp_atomic_notify(__location); }
181 _LIBCPP_EXPORTED_FROM_ABI
182 void __cxx_atomic_notify_all(void const volatile* __location)
183     { __libcpp_atomic_notify(__location); }
184 _LIBCPP_EXPORTED_FROM_ABI
185 __cxx_contention_t __libcpp_atomic_monitor(void const volatile* __location)
186 {
187     auto const __entry = __libcpp_contention_state(__location);
188     return __libcpp_contention_monitor_for_wait(&__entry->__contention_state, &__entry->__platform_state);
189 }
190 _LIBCPP_EXPORTED_FROM_ABI
191 void __libcpp_atomic_wait(void const volatile* __location, __cxx_contention_t __old_value)
192 {
193     auto const __entry = __libcpp_contention_state(__location);
194     __libcpp_contention_wait(&__entry->__contention_state, &__entry->__platform_state, __old_value);
195 }
196 
197 /* When the incoming atomic happens to be the platform wait size, we still need to use the
198    table for the contention detection, but we can use the atomic directly for the wait. */
199 
200 _LIBCPP_EXPORTED_FROM_ABI
201 void __cxx_atomic_notify_one(__cxx_atomic_contention_t const volatile* __location)
202 {
203     __libcpp_contention_notify(&__libcpp_contention_state(__location)->__contention_state, __location, true);
204 }
205 _LIBCPP_EXPORTED_FROM_ABI
206 void __cxx_atomic_notify_all(__cxx_atomic_contention_t const volatile* __location)
207 {
208     __libcpp_contention_notify(&__libcpp_contention_state(__location)->__contention_state, __location, false);
209 }
210 _LIBCPP_EXPORTED_FROM_ABI
211 __cxx_contention_t __libcpp_atomic_monitor(__cxx_atomic_contention_t const volatile* __location)
212 {
213     return __libcpp_contention_monitor_for_wait(&__libcpp_contention_state(__location)->__contention_state, __location);
214 }
215 _LIBCPP_EXPORTED_FROM_ABI
216 void __libcpp_atomic_wait(__cxx_atomic_contention_t const volatile* __location, __cxx_contention_t __old_value)
217 {
218     __libcpp_contention_wait(&__libcpp_contention_state(__location)->__contention_state, __location, __old_value);
219 }
220 
221 _LIBCPP_END_NAMESPACE_STD
222 
223 #endif //_LIBCPP_HAS_NO_THREADS
224