1 //===-- atomic.c - Implement support functions for atomic operations.------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // atomic.c defines a set of functions for performing atomic accesses on 10 // arbitrary-sized memory locations. This design uses locks that should 11 // be fast in the uncontended case, for two reasons: 12 // 13 // 1) This code must work with C programs that do not link to anything 14 // (including pthreads) and so it should not depend on any pthread 15 // functions. 16 // 2) Atomic operations, rather than explicit mutexes, are most commonly used 17 // on code where contended operations are rate. 18 // 19 // To avoid needing a per-object lock, this code allocates an array of 20 // locks and hashes the object pointers to find the one that it should use. 21 // For operations that must be atomic on two locations, the lower lock is 22 // always acquired first, to avoid deadlock. 23 // 24 //===----------------------------------------------------------------------===// 25 26 #include <stdint.h> 27 #include <string.h> 28 29 #include "assembly.h" 30 31 // Clang objects if you redefine a builtin. This little hack allows us to 32 // define a function with the same name as an intrinsic. 33 #pragma redefine_extname __atomic_load_c SYMBOL_NAME(__atomic_load) 34 #pragma redefine_extname __atomic_store_c SYMBOL_NAME(__atomic_store) 35 #pragma redefine_extname __atomic_exchange_c SYMBOL_NAME(__atomic_exchange) 36 #pragma redefine_extname __atomic_compare_exchange_c SYMBOL_NAME( \ 37 __atomic_compare_exchange) 38 39 /// Number of locks. This allocates one page on 32-bit platforms, two on 40 /// 64-bit. This can be specified externally if a different trade between 41 /// memory usage and contention probability is required for a given platform. 42 #ifndef SPINLOCK_COUNT 43 #define SPINLOCK_COUNT (1 << 10) 44 #endif 45 static const long SPINLOCK_MASK = SPINLOCK_COUNT - 1; 46 47 //////////////////////////////////////////////////////////////////////////////// 48 // Platform-specific lock implementation. Falls back to spinlocks if none is 49 // defined. Each platform should define the Lock type, and corresponding 50 // lock() and unlock() functions. 51 //////////////////////////////////////////////////////////////////////////////// 52 #ifdef __FreeBSD__ 53 #include <errno.h> 54 #include <sys/types.h> 55 #include <machine/atomic.h> 56 #include <sys/umtx.h> 57 typedef struct _usem Lock; 58 __inline static void unlock(Lock *l) { 59 __c11_atomic_store((_Atomic(uint32_t) *)&l->_count, 1, __ATOMIC_RELEASE); 60 __c11_atomic_thread_fence(__ATOMIC_SEQ_CST); 61 if (l->_has_waiters) 62 _umtx_op(l, UMTX_OP_SEM_WAKE, 1, 0, 0); 63 } 64 __inline static void lock(Lock *l) { 65 uint32_t old = 1; 66 while (!__c11_atomic_compare_exchange_weak((_Atomic(uint32_t) *)&l->_count, 67 &old, 0, __ATOMIC_ACQUIRE, 68 __ATOMIC_RELAXED)) { 69 _umtx_op(l, UMTX_OP_SEM_WAIT, 0, 0, 0); 70 old = 1; 71 } 72 } 73 /// locks for atomic operations 74 static Lock locks[SPINLOCK_COUNT] = {[0 ... SPINLOCK_COUNT - 1] = {0, 1, 0}}; 75 76 #elif defined(__APPLE__) 77 #include <libkern/OSAtomic.h> 78 typedef OSSpinLock Lock; 79 __inline static void unlock(Lock *l) { OSSpinLockUnlock(l); } 80 /// Locks a lock. In the current implementation, this is potentially 81 /// unbounded in the contended case. 82 __inline static void lock(Lock *l) { OSSpinLockLock(l); } 83 static Lock locks[SPINLOCK_COUNT]; // initialized to OS_SPINLOCK_INIT which is 0 84 85 #else 86 typedef _Atomic(uintptr_t) Lock; 87 /// Unlock a lock. This is a release operation. 88 __inline static void unlock(Lock *l) { 89 __c11_atomic_store(l, 0, __ATOMIC_RELEASE); 90 } 91 /// Locks a lock. In the current implementation, this is potentially 92 /// unbounded in the contended case. 93 __inline static void lock(Lock *l) { 94 uintptr_t old = 0; 95 while (!__c11_atomic_compare_exchange_weak(l, &old, 1, __ATOMIC_ACQUIRE, 96 __ATOMIC_RELAXED)) 97 old = 0; 98 } 99 /// locks for atomic operations 100 static Lock locks[SPINLOCK_COUNT]; 101 #endif 102 103 /// Returns a lock to use for a given pointer. 104 static __inline Lock *lock_for_pointer(void *ptr) { 105 intptr_t hash = (intptr_t)ptr; 106 // Disregard the lowest 4 bits. We want all values that may be part of the 107 // same memory operation to hash to the same value and therefore use the same 108 // lock. 109 hash >>= 4; 110 // Use the next bits as the basis for the hash 111 intptr_t low = hash & SPINLOCK_MASK; 112 // Now use the high(er) set of bits to perturb the hash, so that we don't 113 // get collisions from atomic fields in a single object 114 hash >>= 16; 115 hash ^= low; 116 // Return a pointer to the word to use 117 return locks + (hash & SPINLOCK_MASK); 118 } 119 120 /// Macros for determining whether a size is lock free. 121 #define IS_LOCK_FREE_1 __c11_atomic_is_lock_free(1) 122 #define IS_LOCK_FREE_2 __c11_atomic_is_lock_free(2) 123 #define IS_LOCK_FREE_4 __c11_atomic_is_lock_free(4) 124 125 /// 32 bit PowerPC doesn't support 8-byte lock_free atomics 126 #if !defined(__powerpc64__) && defined(__powerpc__) 127 #define IS_LOCK_FREE_8 0 128 #else 129 #define IS_LOCK_FREE_8 __c11_atomic_is_lock_free(8) 130 #endif 131 132 /// Clang can not yet codegen __atomic_is_lock_free(16), so for now we assume 133 /// 16-byte values are not lock free. 134 #define IS_LOCK_FREE_16 0 135 136 /// Macro that calls the compiler-generated lock-free versions of functions 137 /// when they exist. 138 #define LOCK_FREE_CASES() \ 139 do { \ 140 switch (size) { \ 141 case 1: \ 142 if (IS_LOCK_FREE_1) { \ 143 LOCK_FREE_ACTION(uint8_t); \ 144 } \ 145 break; \ 146 case 2: \ 147 if (IS_LOCK_FREE_2) { \ 148 LOCK_FREE_ACTION(uint16_t); \ 149 } \ 150 break; \ 151 case 4: \ 152 if (IS_LOCK_FREE_4) { \ 153 LOCK_FREE_ACTION(uint32_t); \ 154 } \ 155 break; \ 156 case 8: \ 157 if (IS_LOCK_FREE_8) { \ 158 LOCK_FREE_ACTION(uint64_t); \ 159 } \ 160 break; \ 161 case 16: \ 162 if (IS_LOCK_FREE_16) { \ 163 /* FIXME: __uint128_t isn't available on 32 bit platforms. \ 164 LOCK_FREE_ACTION(__uint128_t);*/ \ 165 } \ 166 break; \ 167 } \ 168 } while (0) 169 170 /// An atomic load operation. This is atomic with respect to the source 171 /// pointer only. 172 void __atomic_load_c(int size, void *src, void *dest, int model) { 173 #define LOCK_FREE_ACTION(type) \ 174 *((type *)dest) = __c11_atomic_load((_Atomic(type) *)src, model); \ 175 return; 176 LOCK_FREE_CASES(); 177 #undef LOCK_FREE_ACTION 178 Lock *l = lock_for_pointer(src); 179 lock(l); 180 memcpy(dest, src, size); 181 unlock(l); 182 } 183 184 /// An atomic store operation. This is atomic with respect to the destination 185 /// pointer only. 186 void __atomic_store_c(int size, void *dest, void *src, int model) { 187 #define LOCK_FREE_ACTION(type) \ 188 __c11_atomic_store((_Atomic(type) *)dest, *(type *)src, model); \ 189 return; 190 LOCK_FREE_CASES(); 191 #undef LOCK_FREE_ACTION 192 Lock *l = lock_for_pointer(dest); 193 lock(l); 194 memcpy(dest, src, size); 195 unlock(l); 196 } 197 198 /// Atomic compare and exchange operation. If the value at *ptr is identical 199 /// to the value at *expected, then this copies value at *desired to *ptr. If 200 /// they are not, then this stores the current value from *ptr in *expected. 201 /// 202 /// This function returns 1 if the exchange takes place or 0 if it fails. 203 int __atomic_compare_exchange_c(int size, void *ptr, void *expected, 204 void *desired, int success, int failure) { 205 #define LOCK_FREE_ACTION(type) \ 206 return __c11_atomic_compare_exchange_strong( \ 207 (_Atomic(type) *)ptr, (type *)expected, *(type *)desired, success, \ 208 failure) 209 LOCK_FREE_CASES(); 210 #undef LOCK_FREE_ACTION 211 Lock *l = lock_for_pointer(ptr); 212 lock(l); 213 if (memcmp(ptr, expected, size) == 0) { 214 memcpy(ptr, desired, size); 215 unlock(l); 216 return 1; 217 } 218 memcpy(expected, ptr, size); 219 unlock(l); 220 return 0; 221 } 222 223 /// Performs an atomic exchange operation between two pointers. This is atomic 224 /// with respect to the target address. 225 void __atomic_exchange_c(int size, void *ptr, void *val, void *old, int model) { 226 #define LOCK_FREE_ACTION(type) \ 227 *(type *)old = \ 228 __c11_atomic_exchange((_Atomic(type) *)ptr, *(type *)val, model); \ 229 return; 230 LOCK_FREE_CASES(); 231 #undef LOCK_FREE_ACTION 232 Lock *l = lock_for_pointer(ptr); 233 lock(l); 234 memcpy(old, ptr, size); 235 memcpy(ptr, val, size); 236 unlock(l); 237 } 238 239 //////////////////////////////////////////////////////////////////////////////// 240 // Where the size is known at compile time, the compiler may emit calls to 241 // specialised versions of the above functions. 242 //////////////////////////////////////////////////////////////////////////////// 243 #ifdef __SIZEOF_INT128__ 244 #define OPTIMISED_CASES \ 245 OPTIMISED_CASE(1, IS_LOCK_FREE_1, uint8_t) \ 246 OPTIMISED_CASE(2, IS_LOCK_FREE_2, uint16_t) \ 247 OPTIMISED_CASE(4, IS_LOCK_FREE_4, uint32_t) \ 248 OPTIMISED_CASE(8, IS_LOCK_FREE_8, uint64_t) \ 249 OPTIMISED_CASE(16, IS_LOCK_FREE_16, __uint128_t) 250 #else 251 #define OPTIMISED_CASES \ 252 OPTIMISED_CASE(1, IS_LOCK_FREE_1, uint8_t) \ 253 OPTIMISED_CASE(2, IS_LOCK_FREE_2, uint16_t) \ 254 OPTIMISED_CASE(4, IS_LOCK_FREE_4, uint32_t) \ 255 OPTIMISED_CASE(8, IS_LOCK_FREE_8, uint64_t) 256 #endif 257 258 #define OPTIMISED_CASE(n, lockfree, type) \ 259 type __atomic_load_##n(type *src, int model) { \ 260 if (lockfree) \ 261 return __c11_atomic_load((_Atomic(type) *)src, model); \ 262 Lock *l = lock_for_pointer(src); \ 263 lock(l); \ 264 type val = *src; \ 265 unlock(l); \ 266 return val; \ 267 } 268 OPTIMISED_CASES 269 #undef OPTIMISED_CASE 270 271 #define OPTIMISED_CASE(n, lockfree, type) \ 272 void __atomic_store_##n(type *dest, type val, int model) { \ 273 if (lockfree) { \ 274 __c11_atomic_store((_Atomic(type) *)dest, val, model); \ 275 return; \ 276 } \ 277 Lock *l = lock_for_pointer(dest); \ 278 lock(l); \ 279 *dest = val; \ 280 unlock(l); \ 281 return; \ 282 } 283 OPTIMISED_CASES 284 #undef OPTIMISED_CASE 285 286 #define OPTIMISED_CASE(n, lockfree, type) \ 287 type __atomic_exchange_##n(type *dest, type val, int model) { \ 288 if (lockfree) \ 289 return __c11_atomic_exchange((_Atomic(type) *)dest, val, model); \ 290 Lock *l = lock_for_pointer(dest); \ 291 lock(l); \ 292 type tmp = *dest; \ 293 *dest = val; \ 294 unlock(l); \ 295 return tmp; \ 296 } 297 OPTIMISED_CASES 298 #undef OPTIMISED_CASE 299 300 #define OPTIMISED_CASE(n, lockfree, type) \ 301 int __atomic_compare_exchange_##n(type *ptr, type *expected, type desired, \ 302 int success, int failure) { \ 303 if (lockfree) \ 304 return __c11_atomic_compare_exchange_strong( \ 305 (_Atomic(type) *)ptr, expected, desired, success, failure); \ 306 Lock *l = lock_for_pointer(ptr); \ 307 lock(l); \ 308 if (*ptr == *expected) { \ 309 *ptr = desired; \ 310 unlock(l); \ 311 return 1; \ 312 } \ 313 *expected = *ptr; \ 314 unlock(l); \ 315 return 0; \ 316 } 317 OPTIMISED_CASES 318 #undef OPTIMISED_CASE 319 320 //////////////////////////////////////////////////////////////////////////////// 321 // Atomic read-modify-write operations for integers of various sizes. 322 //////////////////////////////////////////////////////////////////////////////// 323 #define ATOMIC_RMW(n, lockfree, type, opname, op) \ 324 type __atomic_fetch_##opname##_##n(type *ptr, type val, int model) { \ 325 if (lockfree) \ 326 return __c11_atomic_fetch_##opname((_Atomic(type) *)ptr, val, model); \ 327 Lock *l = lock_for_pointer(ptr); \ 328 lock(l); \ 329 type tmp = *ptr; \ 330 *ptr = tmp op val; \ 331 unlock(l); \ 332 return tmp; \ 333 } 334 335 #define OPTIMISED_CASE(n, lockfree, type) ATOMIC_RMW(n, lockfree, type, add, +) 336 OPTIMISED_CASES 337 #undef OPTIMISED_CASE 338 #define OPTIMISED_CASE(n, lockfree, type) ATOMIC_RMW(n, lockfree, type, sub, -) 339 OPTIMISED_CASES 340 #undef OPTIMISED_CASE 341 #define OPTIMISED_CASE(n, lockfree, type) ATOMIC_RMW(n, lockfree, type, and, &) 342 OPTIMISED_CASES 343 #undef OPTIMISED_CASE 344 #define OPTIMISED_CASE(n, lockfree, type) ATOMIC_RMW(n, lockfree, type, or, |) 345 OPTIMISED_CASES 346 #undef OPTIMISED_CASE 347 #define OPTIMISED_CASE(n, lockfree, type) ATOMIC_RMW(n, lockfree, type, xor, ^) 348 OPTIMISED_CASES 349 #undef OPTIMISED_CASE 350