xref: /freebsd/contrib/llvm-project/compiler-rt/lib/builtins/atomic.c (revision cfd6422a5217410fbd66f7a7a8a64d9d85e61229)
1 //===-- atomic.c - Implement support functions for atomic operations.------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 //  atomic.c defines a set of functions for performing atomic accesses on
10 //  arbitrary-sized memory locations.  This design uses locks that should
11 //  be fast in the uncontended case, for two reasons:
12 //
13 //  1) This code must work with C programs that do not link to anything
14 //     (including pthreads) and so it should not depend on any pthread
15 //     functions.
16 //  2) Atomic operations, rather than explicit mutexes, are most commonly used
17 //     on code where contended operations are rate.
18 //
19 //  To avoid needing a per-object lock, this code allocates an array of
20 //  locks and hashes the object pointers to find the one that it should use.
21 //  For operations that must be atomic on two locations, the lower lock is
22 //  always acquired first, to avoid deadlock.
23 //
24 //===----------------------------------------------------------------------===//
25 
26 #include <stdbool.h>
27 #include <stdint.h>
28 #include <string.h>
29 
30 #include "assembly.h"
31 
32 // Clang objects if you redefine a builtin.  This little hack allows us to
33 // define a function with the same name as an intrinsic.
34 #pragma redefine_extname __atomic_load_c SYMBOL_NAME(__atomic_load)
35 #pragma redefine_extname __atomic_store_c SYMBOL_NAME(__atomic_store)
36 #pragma redefine_extname __atomic_exchange_c SYMBOL_NAME(__atomic_exchange)
37 #pragma redefine_extname __atomic_compare_exchange_c SYMBOL_NAME(              \
38     __atomic_compare_exchange)
39 
40 /// Number of locks.  This allocates one page on 32-bit platforms, two on
41 /// 64-bit.  This can be specified externally if a different trade between
42 /// memory usage and contention probability is required for a given platform.
43 #ifndef SPINLOCK_COUNT
44 #define SPINLOCK_COUNT (1 << 10)
45 #endif
46 static const long SPINLOCK_MASK = SPINLOCK_COUNT - 1;
47 
48 ////////////////////////////////////////////////////////////////////////////////
49 // Platform-specific lock implementation.  Falls back to spinlocks if none is
50 // defined.  Each platform should define the Lock type, and corresponding
51 // lock() and unlock() functions.
52 ////////////////////////////////////////////////////////////////////////////////
53 #ifdef __FreeBSD__
54 #include <errno.h>
55 // clang-format off
56 #include <sys/types.h>
57 #include <machine/atomic.h>
58 #include <sys/umtx.h>
59 // clang-format on
60 typedef struct _usem Lock;
61 __inline static void unlock(Lock *l) {
62   __c11_atomic_store((_Atomic(uint32_t) *)&l->_count, 1, __ATOMIC_RELEASE);
63   __c11_atomic_thread_fence(__ATOMIC_SEQ_CST);
64   if (l->_has_waiters)
65     _umtx_op(l, UMTX_OP_SEM_WAKE, 1, 0, 0);
66 }
67 __inline static void lock(Lock *l) {
68   uint32_t old = 1;
69   while (!__c11_atomic_compare_exchange_weak((_Atomic(uint32_t) *)&l->_count,
70                                              &old, 0, __ATOMIC_ACQUIRE,
71                                              __ATOMIC_RELAXED)) {
72     _umtx_op(l, UMTX_OP_SEM_WAIT, 0, 0, 0);
73     old = 1;
74   }
75 }
76 /// locks for atomic operations
77 static Lock locks[SPINLOCK_COUNT] = {[0 ... SPINLOCK_COUNT - 1] = {0, 1, 0}};
78 
79 #elif defined(__APPLE__)
80 #include <libkern/OSAtomic.h>
81 typedef OSSpinLock Lock;
82 __inline static void unlock(Lock *l) { OSSpinLockUnlock(l); }
83 /// Locks a lock.  In the current implementation, this is potentially
84 /// unbounded in the contended case.
85 __inline static void lock(Lock *l) { OSSpinLockLock(l); }
86 static Lock locks[SPINLOCK_COUNT]; // initialized to OS_SPINLOCK_INIT which is 0
87 
88 #else
89 typedef _Atomic(uintptr_t) Lock;
90 /// Unlock a lock.  This is a release operation.
91 __inline static void unlock(Lock *l) {
92   __c11_atomic_store(l, 0, __ATOMIC_RELEASE);
93 }
94 /// Locks a lock.  In the current implementation, this is potentially
95 /// unbounded in the contended case.
96 __inline static void lock(Lock *l) {
97   uintptr_t old = 0;
98   while (!__c11_atomic_compare_exchange_weak(l, &old, 1, __ATOMIC_ACQUIRE,
99                                              __ATOMIC_RELAXED))
100     old = 0;
101 }
102 /// locks for atomic operations
103 static Lock locks[SPINLOCK_COUNT];
104 #endif
105 
106 /// Returns a lock to use for a given pointer.
107 static __inline Lock *lock_for_pointer(void *ptr) {
108   intptr_t hash = (intptr_t)ptr;
109   // Disregard the lowest 4 bits.  We want all values that may be part of the
110   // same memory operation to hash to the same value and therefore use the same
111   // lock.
112   hash >>= 4;
113   // Use the next bits as the basis for the hash
114   intptr_t low = hash & SPINLOCK_MASK;
115   // Now use the high(er) set of bits to perturb the hash, so that we don't
116   // get collisions from atomic fields in a single object
117   hash >>= 16;
118   hash ^= low;
119   // Return a pointer to the word to use
120   return locks + (hash & SPINLOCK_MASK);
121 }
122 
123 /// Macros for determining whether a size is lock free.
124 #define IS_LOCK_FREE_1 __c11_atomic_is_lock_free(1)
125 #define IS_LOCK_FREE_2 __c11_atomic_is_lock_free(2)
126 #define IS_LOCK_FREE_4 __c11_atomic_is_lock_free(4)
127 
128 /// 32 bit MIPS and PowerPC don't support 8-byte lock_free atomics
129 #if defined(__mips__) || (!defined(__powerpc64__) && defined(__powerpc__))
130 #define IS_LOCK_FREE_8 0
131 #else
132 #define IS_LOCK_FREE_8 __c11_atomic_is_lock_free(8)
133 #endif
134 
135 /// Clang can not yet codegen __atomic_is_lock_free(16), so for now we assume
136 /// 16-byte values are not lock free.
137 #define IS_LOCK_FREE_16 0
138 
139 /// Macro that calls the compiler-generated lock-free versions of functions
140 /// when they exist.
141 #define LOCK_FREE_CASES()                                                      \
142   do {                                                                         \
143     switch (size) {                                                            \
144     case 1:                                                                    \
145       if (IS_LOCK_FREE_1) {                                                    \
146         LOCK_FREE_ACTION(uint8_t);                                             \
147       }                                                                        \
148       break;                                                                   \
149     case 2:                                                                    \
150       if (IS_LOCK_FREE_2) {                                                    \
151         LOCK_FREE_ACTION(uint16_t);                                            \
152       }                                                                        \
153       break;                                                                   \
154     case 4:                                                                    \
155       if (IS_LOCK_FREE_4) {                                                    \
156         LOCK_FREE_ACTION(uint32_t);                                            \
157       }                                                                        \
158       break;                                                                   \
159     case 8:                                                                    \
160       if (IS_LOCK_FREE_8) {                                                    \
161         LOCK_FREE_ACTION(uint64_t);                                            \
162       }                                                                        \
163       break;                                                                   \
164     case 16:                                                                   \
165       if (IS_LOCK_FREE_16) {                                                   \
166         /* FIXME: __uint128_t isn't available on 32 bit platforms.             \
167         LOCK_FREE_ACTION(__uint128_t);*/                                       \
168       }                                                                        \
169       break;                                                                   \
170     }                                                                          \
171   } while (0)
172 
173 /// An atomic load operation.  This is atomic with respect to the source
174 /// pointer only.
175 void __atomic_load_c(int size, void *src, void *dest, int model) {
176 #define LOCK_FREE_ACTION(type)                                                 \
177   *((type *)dest) = __c11_atomic_load((_Atomic(type) *)src, model);            \
178   return;
179   LOCK_FREE_CASES();
180 #undef LOCK_FREE_ACTION
181   Lock *l = lock_for_pointer(src);
182   lock(l);
183   memcpy(dest, src, size);
184   unlock(l);
185 }
186 
187 /// An atomic store operation.  This is atomic with respect to the destination
188 /// pointer only.
189 void __atomic_store_c(int size, void *dest, void *src, int model) {
190 #define LOCK_FREE_ACTION(type)                                                 \
191   __c11_atomic_store((_Atomic(type) *)dest, *(type *)src, model);              \
192   return;
193   LOCK_FREE_CASES();
194 #undef LOCK_FREE_ACTION
195   Lock *l = lock_for_pointer(dest);
196   lock(l);
197   memcpy(dest, src, size);
198   unlock(l);
199 }
200 
201 /// Atomic compare and exchange operation.  If the value at *ptr is identical
202 /// to the value at *expected, then this copies value at *desired to *ptr.  If
203 /// they  are not, then this stores the current value from *ptr in *expected.
204 ///
205 /// This function returns 1 if the exchange takes place or 0 if it fails.
206 int __atomic_compare_exchange_c(int size, void *ptr, void *expected,
207                                 void *desired, int success, int failure) {
208 #define LOCK_FREE_ACTION(type)                                                 \
209   return __c11_atomic_compare_exchange_strong(                                 \
210       (_Atomic(type) *)ptr, (type *)expected, *(type *)desired, success,       \
211       failure)
212   LOCK_FREE_CASES();
213 #undef LOCK_FREE_ACTION
214   Lock *l = lock_for_pointer(ptr);
215   lock(l);
216   if (memcmp(ptr, expected, size) == 0) {
217     memcpy(ptr, desired, size);
218     unlock(l);
219     return 1;
220   }
221   memcpy(expected, ptr, size);
222   unlock(l);
223   return 0;
224 }
225 
226 /// Performs an atomic exchange operation between two pointers.  This is atomic
227 /// with respect to the target address.
228 void __atomic_exchange_c(int size, void *ptr, void *val, void *old, int model) {
229 #define LOCK_FREE_ACTION(type)                                                 \
230   *(type *)old =                                                               \
231       __c11_atomic_exchange((_Atomic(type) *)ptr, *(type *)val, model);        \
232   return;
233   LOCK_FREE_CASES();
234 #undef LOCK_FREE_ACTION
235   Lock *l = lock_for_pointer(ptr);
236   lock(l);
237   memcpy(old, ptr, size);
238   memcpy(ptr, val, size);
239   unlock(l);
240 }
241 
242 ////////////////////////////////////////////////////////////////////////////////
243 // Where the size is known at compile time, the compiler may emit calls to
244 // specialised versions of the above functions.
245 ////////////////////////////////////////////////////////////////////////////////
246 #ifdef __SIZEOF_INT128__
247 #define OPTIMISED_CASES                                                        \
248   OPTIMISED_CASE(1, IS_LOCK_FREE_1, uint8_t)                                   \
249   OPTIMISED_CASE(2, IS_LOCK_FREE_2, uint16_t)                                  \
250   OPTIMISED_CASE(4, IS_LOCK_FREE_4, uint32_t)                                  \
251   OPTIMISED_CASE(8, IS_LOCK_FREE_8, uint64_t)                                  \
252   OPTIMISED_CASE(16, IS_LOCK_FREE_16, __uint128_t)
253 #else
254 #define OPTIMISED_CASES                                                        \
255   OPTIMISED_CASE(1, IS_LOCK_FREE_1, uint8_t)                                   \
256   OPTIMISED_CASE(2, IS_LOCK_FREE_2, uint16_t)                                  \
257   OPTIMISED_CASE(4, IS_LOCK_FREE_4, uint32_t)                                  \
258   OPTIMISED_CASE(8, IS_LOCK_FREE_8, uint64_t)
259 #endif
260 
261 #define OPTIMISED_CASE(n, lockfree, type)                                      \
262   type __atomic_load_##n(type *src, int model) {                               \
263     if (lockfree)                                                              \
264       return __c11_atomic_load((_Atomic(type) *)src, model);                   \
265     Lock *l = lock_for_pointer(src);                                           \
266     lock(l);                                                                   \
267     type val = *src;                                                           \
268     unlock(l);                                                                 \
269     return val;                                                                \
270   }
271 OPTIMISED_CASES
272 #undef OPTIMISED_CASE
273 
274 #define OPTIMISED_CASE(n, lockfree, type)                                      \
275   void __atomic_store_##n(type *dest, type val, int model) {                   \
276     if (lockfree) {                                                            \
277       __c11_atomic_store((_Atomic(type) *)dest, val, model);                   \
278       return;                                                                  \
279     }                                                                          \
280     Lock *l = lock_for_pointer(dest);                                          \
281     lock(l);                                                                   \
282     *dest = val;                                                               \
283     unlock(l);                                                                 \
284     return;                                                                    \
285   }
286 OPTIMISED_CASES
287 #undef OPTIMISED_CASE
288 
289 #define OPTIMISED_CASE(n, lockfree, type)                                      \
290   type __atomic_exchange_##n(type *dest, type val, int model) {                \
291     if (lockfree)                                                              \
292       return __c11_atomic_exchange((_Atomic(type) *)dest, val, model);         \
293     Lock *l = lock_for_pointer(dest);                                          \
294     lock(l);                                                                   \
295     type tmp = *dest;                                                          \
296     *dest = val;                                                               \
297     unlock(l);                                                                 \
298     return tmp;                                                                \
299   }
300 OPTIMISED_CASES
301 #undef OPTIMISED_CASE
302 
303 #define OPTIMISED_CASE(n, lockfree, type)                                      \
304   bool __atomic_compare_exchange_##n(type *ptr, type *expected, type desired,  \
305                                      int success, int failure) {               \
306     if (lockfree)                                                              \
307       return __c11_atomic_compare_exchange_strong(                             \
308           (_Atomic(type) *)ptr, expected, desired, success, failure);          \
309     Lock *l = lock_for_pointer(ptr);                                           \
310     lock(l);                                                                   \
311     if (*ptr == *expected) {                                                   \
312       *ptr = desired;                                                          \
313       unlock(l);                                                               \
314       return true;                                                             \
315     }                                                                          \
316     *expected = *ptr;                                                          \
317     unlock(l);                                                                 \
318     return false;                                                              \
319   }
320 OPTIMISED_CASES
321 #undef OPTIMISED_CASE
322 
323 ////////////////////////////////////////////////////////////////////////////////
324 // Atomic read-modify-write operations for integers of various sizes.
325 ////////////////////////////////////////////////////////////////////////////////
326 #define ATOMIC_RMW(n, lockfree, type, opname, op)                              \
327   type __atomic_fetch_##opname##_##n(type *ptr, type val, int model) {         \
328     if (lockfree)                                                              \
329       return __c11_atomic_fetch_##opname((_Atomic(type) *)ptr, val, model);    \
330     Lock *l = lock_for_pointer(ptr);                                           \
331     lock(l);                                                                   \
332     type tmp = *ptr;                                                           \
333     *ptr = tmp op val;                                                         \
334     unlock(l);                                                                 \
335     return tmp;                                                                \
336   }
337 
338 #define OPTIMISED_CASE(n, lockfree, type) ATOMIC_RMW(n, lockfree, type, add, +)
339 OPTIMISED_CASES
340 #undef OPTIMISED_CASE
341 #define OPTIMISED_CASE(n, lockfree, type) ATOMIC_RMW(n, lockfree, type, sub, -)
342 OPTIMISED_CASES
343 #undef OPTIMISED_CASE
344 #define OPTIMISED_CASE(n, lockfree, type) ATOMIC_RMW(n, lockfree, type, and, &)
345 OPTIMISED_CASES
346 #undef OPTIMISED_CASE
347 #define OPTIMISED_CASE(n, lockfree, type) ATOMIC_RMW(n, lockfree, type, or, |)
348 OPTIMISED_CASES
349 #undef OPTIMISED_CASE
350 #define OPTIMISED_CASE(n, lockfree, type) ATOMIC_RMW(n, lockfree, type, xor, ^)
351 OPTIMISED_CASES
352 #undef OPTIMISED_CASE
353