1 //===-- tsan_interface_atomic.cpp -----------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of ThreadSanitizer (TSan), a race detector.
10 //
11 //===----------------------------------------------------------------------===//
12
13 // ThreadSanitizer atomic operations are based on C++11/C1x standards.
14 // For background see C++11 standard. A slightly older, publicly
15 // available draft of the standard (not entirely up-to-date, but close enough
16 // for casual browsing) is available here:
17 // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2011/n3242.pdf
18 // The following page contains more background information:
19 // http://www.hpl.hp.com/personal/Hans_Boehm/c++mm/
20
21 #include "sanitizer_common/sanitizer_mutex.h"
22 #include "sanitizer_common/sanitizer_placement_new.h"
23 #include "sanitizer_common/sanitizer_stacktrace.h"
24 #include "tsan_flags.h"
25 #include "tsan_interface.h"
26 #include "tsan_rtl.h"
27
28 using namespace __tsan;
29
30 #if !SANITIZER_GO && __TSAN_HAS_INT128
31 // Protects emulation of 128-bit atomic operations.
32 static StaticSpinMutex mutex128;
33 #endif
34
35 #if SANITIZER_DEBUG
IsLoadOrder(morder mo)36 static bool IsLoadOrder(morder mo) {
37 return mo == mo_relaxed || mo == mo_consume || mo == mo_acquire ||
38 mo == mo_seq_cst;
39 }
40
IsStoreOrder(morder mo)41 static bool IsStoreOrder(morder mo) {
42 return mo == mo_relaxed || mo == mo_release || mo == mo_seq_cst;
43 }
44 #endif
45
IsReleaseOrder(morder mo)46 static bool IsReleaseOrder(morder mo) {
47 return mo == mo_release || mo == mo_acq_rel || mo == mo_seq_cst;
48 }
49
IsAcquireOrder(morder mo)50 static bool IsAcquireOrder(morder mo) {
51 return mo == mo_consume || mo == mo_acquire || mo == mo_acq_rel ||
52 mo == mo_seq_cst;
53 }
54
IsAcqRelOrder(morder mo)55 static bool IsAcqRelOrder(morder mo) {
56 return mo == mo_acq_rel || mo == mo_seq_cst;
57 }
58
59 template <typename T>
func_xchg(volatile T * v,T op)60 T func_xchg(volatile T *v, T op) {
61 T res = __sync_lock_test_and_set(v, op);
62 // __sync_lock_test_and_set does not contain full barrier.
63 __sync_synchronize();
64 return res;
65 }
66
67 template <typename T>
func_add(volatile T * v,T op)68 T func_add(volatile T *v, T op) {
69 return __sync_fetch_and_add(v, op);
70 }
71
72 template <typename T>
func_sub(volatile T * v,T op)73 T func_sub(volatile T *v, T op) {
74 return __sync_fetch_and_sub(v, op);
75 }
76
77 template <typename T>
func_and(volatile T * v,T op)78 T func_and(volatile T *v, T op) {
79 return __sync_fetch_and_and(v, op);
80 }
81
82 template <typename T>
func_or(volatile T * v,T op)83 T func_or(volatile T *v, T op) {
84 return __sync_fetch_and_or(v, op);
85 }
86
87 template <typename T>
func_xor(volatile T * v,T op)88 T func_xor(volatile T *v, T op) {
89 return __sync_fetch_and_xor(v, op);
90 }
91
92 template <typename T>
func_nand(volatile T * v,T op)93 T func_nand(volatile T *v, T op) {
94 // clang does not support __sync_fetch_and_nand.
95 T cmp = *v;
96 for (;;) {
97 T newv = ~(cmp & op);
98 T cur = __sync_val_compare_and_swap(v, cmp, newv);
99 if (cmp == cur)
100 return cmp;
101 cmp = cur;
102 }
103 }
104
105 template <typename T>
func_cas(volatile T * v,T cmp,T xch)106 T func_cas(volatile T *v, T cmp, T xch) {
107 return __sync_val_compare_and_swap(v, cmp, xch);
108 }
109
110 // clang does not support 128-bit atomic ops.
111 // Atomic ops are executed under tsan internal mutex,
112 // here we assume that the atomic variables are not accessed
113 // from non-instrumented code.
114 #if !defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16) && !SANITIZER_GO && \
115 __TSAN_HAS_INT128
func_xchg(volatile a128 * v,a128 op)116 a128 func_xchg(volatile a128 *v, a128 op) {
117 SpinMutexLock lock(&mutex128);
118 a128 cmp = *v;
119 *v = op;
120 return cmp;
121 }
122
func_add(volatile a128 * v,a128 op)123 a128 func_add(volatile a128 *v, a128 op) {
124 SpinMutexLock lock(&mutex128);
125 a128 cmp = *v;
126 *v = cmp + op;
127 return cmp;
128 }
129
func_sub(volatile a128 * v,a128 op)130 a128 func_sub(volatile a128 *v, a128 op) {
131 SpinMutexLock lock(&mutex128);
132 a128 cmp = *v;
133 *v = cmp - op;
134 return cmp;
135 }
136
func_and(volatile a128 * v,a128 op)137 a128 func_and(volatile a128 *v, a128 op) {
138 SpinMutexLock lock(&mutex128);
139 a128 cmp = *v;
140 *v = cmp & op;
141 return cmp;
142 }
143
func_or(volatile a128 * v,a128 op)144 a128 func_or(volatile a128 *v, a128 op) {
145 SpinMutexLock lock(&mutex128);
146 a128 cmp = *v;
147 *v = cmp | op;
148 return cmp;
149 }
150
func_xor(volatile a128 * v,a128 op)151 a128 func_xor(volatile a128 *v, a128 op) {
152 SpinMutexLock lock(&mutex128);
153 a128 cmp = *v;
154 *v = cmp ^ op;
155 return cmp;
156 }
157
func_nand(volatile a128 * v,a128 op)158 a128 func_nand(volatile a128 *v, a128 op) {
159 SpinMutexLock lock(&mutex128);
160 a128 cmp = *v;
161 *v = ~(cmp & op);
162 return cmp;
163 }
164
func_cas(volatile a128 * v,a128 cmp,a128 xch)165 a128 func_cas(volatile a128 *v, a128 cmp, a128 xch) {
166 SpinMutexLock lock(&mutex128);
167 a128 cur = *v;
168 if (cur == cmp)
169 *v = xch;
170 return cur;
171 }
172 #endif
173
174 template <typename T>
AccessSize()175 static int AccessSize() {
176 if (sizeof(T) <= 1)
177 return 1;
178 else if (sizeof(T) <= 2)
179 return 2;
180 else if (sizeof(T) <= 4)
181 return 4;
182 else
183 return 8;
184 // For 16-byte atomics we also use 8-byte memory access,
185 // this leads to false negatives only in very obscure cases.
186 }
187
188 #if !SANITIZER_GO
to_atomic(const volatile a8 * a)189 static atomic_uint8_t *to_atomic(const volatile a8 *a) {
190 return reinterpret_cast<atomic_uint8_t *>(const_cast<a8 *>(a));
191 }
192
to_atomic(const volatile a16 * a)193 static atomic_uint16_t *to_atomic(const volatile a16 *a) {
194 return reinterpret_cast<atomic_uint16_t *>(const_cast<a16 *>(a));
195 }
196 #endif
197
to_atomic(const volatile a32 * a)198 static atomic_uint32_t *to_atomic(const volatile a32 *a) {
199 return reinterpret_cast<atomic_uint32_t *>(const_cast<a32 *>(a));
200 }
201
to_atomic(const volatile a64 * a)202 static atomic_uint64_t *to_atomic(const volatile a64 *a) {
203 return reinterpret_cast<atomic_uint64_t *>(const_cast<a64 *>(a));
204 }
205
to_mo(morder mo)206 static memory_order to_mo(morder mo) {
207 switch (mo) {
208 case mo_relaxed:
209 return memory_order_relaxed;
210 case mo_consume:
211 return memory_order_consume;
212 case mo_acquire:
213 return memory_order_acquire;
214 case mo_release:
215 return memory_order_release;
216 case mo_acq_rel:
217 return memory_order_acq_rel;
218 case mo_seq_cst:
219 return memory_order_seq_cst;
220 }
221 DCHECK(0);
222 return memory_order_seq_cst;
223 }
224
225 namespace {
226
227 template <typename T, T (*F)(volatile T *v, T op)>
AtomicRMW(ThreadState * thr,uptr pc,volatile T * a,T v,morder mo)228 static T AtomicRMW(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) {
229 MemoryAccess(thr, pc, (uptr)a, AccessSize<T>(), kAccessWrite | kAccessAtomic);
230 if (LIKELY(mo == mo_relaxed))
231 return F(a, v);
232 SlotLocker locker(thr);
233 {
234 auto s = ctx->metamap.GetSyncOrCreate(thr, pc, (uptr)a, false);
235 RWLock lock(&s->mtx, IsReleaseOrder(mo));
236 if (IsAcqRelOrder(mo))
237 thr->clock.ReleaseAcquire(&s->clock);
238 else if (IsReleaseOrder(mo))
239 thr->clock.Release(&s->clock);
240 else if (IsAcquireOrder(mo))
241 thr->clock.Acquire(s->clock);
242 v = F(a, v);
243 }
244 if (IsReleaseOrder(mo))
245 IncrementEpoch(thr);
246 return v;
247 }
248
249 struct OpLoad {
250 template <typename T>
NoTsanAtomic__anonc3cf0ccb0111::OpLoad251 static T NoTsanAtomic(morder mo, const volatile T *a) {
252 return atomic_load(to_atomic(a), to_mo(mo));
253 }
254
255 #if __TSAN_HAS_INT128 && !SANITIZER_GO
NoTsanAtomic__anonc3cf0ccb0111::OpLoad256 static a128 NoTsanAtomic(morder mo, const volatile a128 *a) {
257 SpinMutexLock lock(&mutex128);
258 return *a;
259 }
260 #endif
261
262 template <typename T>
Atomic__anonc3cf0ccb0111::OpLoad263 static T Atomic(ThreadState *thr, uptr pc, morder mo, const volatile T *a) {
264 DCHECK(IsLoadOrder(mo));
265 // This fast-path is critical for performance.
266 // Assume the access is atomic.
267 if (!IsAcquireOrder(mo)) {
268 MemoryAccess(thr, pc, (uptr)a, AccessSize<T>(),
269 kAccessRead | kAccessAtomic);
270 return NoTsanAtomic(mo, a);
271 }
272 // Don't create sync object if it does not exist yet. For example, an atomic
273 // pointer is initialized to nullptr and then periodically acquire-loaded.
274 T v = NoTsanAtomic(mo, a);
275 SyncVar *s = ctx->metamap.GetSyncIfExists((uptr)a);
276 if (s) {
277 SlotLocker locker(thr);
278 ReadLock lock(&s->mtx);
279 thr->clock.Acquire(s->clock);
280 // Re-read under sync mutex because we need a consistent snapshot
281 // of the value and the clock we acquire.
282 v = NoTsanAtomic(mo, a);
283 }
284 MemoryAccess(thr, pc, (uptr)a, AccessSize<T>(),
285 kAccessRead | kAccessAtomic);
286 return v;
287 }
288 };
289
290 struct OpStore {
291 template <typename T>
NoTsanAtomic__anonc3cf0ccb0111::OpStore292 static void NoTsanAtomic(morder mo, volatile T *a, T v) {
293 atomic_store(to_atomic(a), v, to_mo(mo));
294 }
295
296 #if __TSAN_HAS_INT128 && !SANITIZER_GO
NoTsanAtomic__anonc3cf0ccb0111::OpStore297 static void NoTsanAtomic(morder mo, volatile a128 *a, a128 v) {
298 SpinMutexLock lock(&mutex128);
299 *a = v;
300 }
301 #endif
302
303 template <typename T>
Atomic__anonc3cf0ccb0111::OpStore304 static void Atomic(ThreadState *thr, uptr pc, morder mo, volatile T *a, T v) {
305 DCHECK(IsStoreOrder(mo));
306 MemoryAccess(thr, pc, (uptr)a, AccessSize<T>(),
307 kAccessWrite | kAccessAtomic);
308 // This fast-path is critical for performance.
309 // Assume the access is atomic.
310 // Strictly saying even relaxed store cuts off release sequence,
311 // so must reset the clock.
312 if (!IsReleaseOrder(mo)) {
313 NoTsanAtomic(mo, a, v);
314 return;
315 }
316 SlotLocker locker(thr);
317 {
318 auto s = ctx->metamap.GetSyncOrCreate(thr, pc, (uptr)a, false);
319 Lock lock(&s->mtx);
320 thr->clock.ReleaseStore(&s->clock);
321 NoTsanAtomic(mo, a, v);
322 }
323 IncrementEpoch(thr);
324 }
325 };
326
327 struct OpExchange {
328 template <typename T>
NoTsanAtomic__anonc3cf0ccb0111::OpExchange329 static T NoTsanAtomic(morder mo, volatile T *a, T v) {
330 return func_xchg(a, v);
331 }
332 template <typename T>
Atomic__anonc3cf0ccb0111::OpExchange333 static T Atomic(ThreadState *thr, uptr pc, morder mo, volatile T *a, T v) {
334 return AtomicRMW<T, func_xchg>(thr, pc, a, v, mo);
335 }
336 };
337
338 struct OpFetchAdd {
339 template <typename T>
NoTsanAtomic__anonc3cf0ccb0111::OpFetchAdd340 static T NoTsanAtomic(morder mo, volatile T *a, T v) {
341 return func_add(a, v);
342 }
343
344 template <typename T>
Atomic__anonc3cf0ccb0111::OpFetchAdd345 static T Atomic(ThreadState *thr, uptr pc, morder mo, volatile T *a, T v) {
346 return AtomicRMW<T, func_add>(thr, pc, a, v, mo);
347 }
348 };
349
350 struct OpFetchSub {
351 template <typename T>
NoTsanAtomic__anonc3cf0ccb0111::OpFetchSub352 static T NoTsanAtomic(morder mo, volatile T *a, T v) {
353 return func_sub(a, v);
354 }
355
356 template <typename T>
Atomic__anonc3cf0ccb0111::OpFetchSub357 static T Atomic(ThreadState *thr, uptr pc, morder mo, volatile T *a, T v) {
358 return AtomicRMW<T, func_sub>(thr, pc, a, v, mo);
359 }
360 };
361
362 struct OpFetchAnd {
363 template <typename T>
NoTsanAtomic__anonc3cf0ccb0111::OpFetchAnd364 static T NoTsanAtomic(morder mo, volatile T *a, T v) {
365 return func_and(a, v);
366 }
367
368 template <typename T>
Atomic__anonc3cf0ccb0111::OpFetchAnd369 static T Atomic(ThreadState *thr, uptr pc, morder mo, volatile T *a, T v) {
370 return AtomicRMW<T, func_and>(thr, pc, a, v, mo);
371 }
372 };
373
374 struct OpFetchOr {
375 template <typename T>
NoTsanAtomic__anonc3cf0ccb0111::OpFetchOr376 static T NoTsanAtomic(morder mo, volatile T *a, T v) {
377 return func_or(a, v);
378 }
379
380 template <typename T>
Atomic__anonc3cf0ccb0111::OpFetchOr381 static T Atomic(ThreadState *thr, uptr pc, morder mo, volatile T *a, T v) {
382 return AtomicRMW<T, func_or>(thr, pc, a, v, mo);
383 }
384 };
385
386 struct OpFetchXor {
387 template <typename T>
NoTsanAtomic__anonc3cf0ccb0111::OpFetchXor388 static T NoTsanAtomic(morder mo, volatile T *a, T v) {
389 return func_xor(a, v);
390 }
391
392 template <typename T>
Atomic__anonc3cf0ccb0111::OpFetchXor393 static T Atomic(ThreadState *thr, uptr pc, morder mo, volatile T *a, T v) {
394 return AtomicRMW<T, func_xor>(thr, pc, a, v, mo);
395 }
396 };
397
398 struct OpFetchNand {
399 template <typename T>
NoTsanAtomic__anonc3cf0ccb0111::OpFetchNand400 static T NoTsanAtomic(morder mo, volatile T *a, T v) {
401 return func_nand(a, v);
402 }
403
404 template <typename T>
Atomic__anonc3cf0ccb0111::OpFetchNand405 static T Atomic(ThreadState *thr, uptr pc, morder mo, volatile T *a, T v) {
406 return AtomicRMW<T, func_nand>(thr, pc, a, v, mo);
407 }
408 };
409
410 struct OpCAS {
411 template <typename T>
NoTsanAtomic__anonc3cf0ccb0111::OpCAS412 static bool NoTsanAtomic(morder mo, morder fmo, volatile T *a, T *c, T v) {
413 return atomic_compare_exchange_strong(to_atomic(a), c, v, to_mo(mo));
414 }
415
416 #if __TSAN_HAS_INT128
NoTsanAtomic__anonc3cf0ccb0111::OpCAS417 static bool NoTsanAtomic(morder mo, morder fmo, volatile a128 *a, a128 *c,
418 a128 v) {
419 a128 old = *c;
420 a128 cur = func_cas(a, old, v);
421 if (cur == old)
422 return true;
423 *c = cur;
424 return false;
425 }
426 #endif
427
428 template <typename T>
NoTsanAtomic__anonc3cf0ccb0111::OpCAS429 static T NoTsanAtomic(morder mo, morder fmo, volatile T *a, T c, T v) {
430 NoTsanAtomic(mo, fmo, a, &c, v);
431 return c;
432 }
433
434 template <typename T>
Atomic__anonc3cf0ccb0111::OpCAS435 static bool Atomic(ThreadState *thr, uptr pc, morder mo, morder fmo,
436 volatile T *a, T *c, T v) {
437 // 31.7.2.18: "The failure argument shall not be memory_order_release
438 // nor memory_order_acq_rel". LLVM (2021-05) fallbacks to Monotonic
439 // (mo_relaxed) when those are used.
440 DCHECK(IsLoadOrder(fmo));
441
442 MemoryAccess(thr, pc, (uptr)a, AccessSize<T>(),
443 kAccessWrite | kAccessAtomic);
444 if (LIKELY(mo == mo_relaxed && fmo == mo_relaxed)) {
445 T cc = *c;
446 T pr = func_cas(a, cc, v);
447 if (pr == cc)
448 return true;
449 *c = pr;
450 return false;
451 }
452 SlotLocker locker(thr);
453 bool release = IsReleaseOrder(mo);
454 bool success;
455 {
456 auto s = ctx->metamap.GetSyncOrCreate(thr, pc, (uptr)a, false);
457 RWLock lock(&s->mtx, release);
458 T cc = *c;
459 T pr = func_cas(a, cc, v);
460 success = pr == cc;
461 if (!success) {
462 *c = pr;
463 mo = fmo;
464 }
465 if (success && IsAcqRelOrder(mo))
466 thr->clock.ReleaseAcquire(&s->clock);
467 else if (success && IsReleaseOrder(mo))
468 thr->clock.Release(&s->clock);
469 else if (IsAcquireOrder(mo))
470 thr->clock.Acquire(s->clock);
471 }
472 if (success && release)
473 IncrementEpoch(thr);
474 return success;
475 }
476
477 template <typename T>
Atomic__anonc3cf0ccb0111::OpCAS478 static T Atomic(ThreadState *thr, uptr pc, morder mo, morder fmo,
479 volatile T *a, T c, T v) {
480 Atomic(thr, pc, mo, fmo, a, &c, v);
481 return c;
482 }
483 };
484
485 #if !SANITIZER_GO
486 struct OpFence {
NoTsanAtomic__anonc3cf0ccb0111::OpFence487 static void NoTsanAtomic(morder mo) { __sync_synchronize(); }
488
Atomic__anonc3cf0ccb0111::OpFence489 static void Atomic(ThreadState *thr, uptr pc, morder mo) {
490 // FIXME(dvyukov): not implemented.
491 __sync_synchronize();
492 }
493 };
494 #endif
495
496 } // namespace
497
498 // Interface functions follow.
499 #if !SANITIZER_GO
500
501 // C/C++
502
convert_morder(morder mo)503 static morder convert_morder(morder mo) {
504 return flags()->force_seq_cst_atomics ? mo_seq_cst : mo;
505 }
506
to_morder(int mo)507 static morder to_morder(int mo) {
508 // Filter out additional memory order flags:
509 // MEMMODEL_SYNC = 1 << 15
510 // __ATOMIC_HLE_ACQUIRE = 1 << 16
511 // __ATOMIC_HLE_RELEASE = 1 << 17
512 //
513 // HLE is an optimization, and we pretend that elision always fails.
514 // MEMMODEL_SYNC is used when lowering __sync_ atomics,
515 // since we use __sync_ atomics for actual atomic operations,
516 // we can safely ignore it as well. It also subtly affects semantics,
517 // but we don't model the difference.
518 morder res = static_cast<morder>(static_cast<u8>(mo));
519 DCHECK_LE(res, mo_seq_cst);
520 return res;
521 }
522
523 template <class Op, class... Types>
AtomicImpl(morder mo,Types...args)524 ALWAYS_INLINE auto AtomicImpl(morder mo, Types... args) {
525 ThreadState *const thr = cur_thread();
526 ProcessPendingSignals(thr);
527 if (UNLIKELY(thr->ignore_sync || thr->ignore_interceptors))
528 return Op::NoTsanAtomic(mo, args...);
529 return Op::Atomic(thr, GET_CALLER_PC(), convert_morder(mo), args...);
530 }
531
532 extern "C" {
533 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic8_load(const volatile a8 * a,int mo)534 a8 __tsan_atomic8_load(const volatile a8 *a, int mo) {
535 return AtomicImpl<OpLoad>(to_morder(mo), a);
536 }
537
538 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic16_load(const volatile a16 * a,int mo)539 a16 __tsan_atomic16_load(const volatile a16 *a, int mo) {
540 return AtomicImpl<OpLoad>(to_morder(mo), a);
541 }
542
543 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic32_load(const volatile a32 * a,int mo)544 a32 __tsan_atomic32_load(const volatile a32 *a, int mo) {
545 return AtomicImpl<OpLoad>(to_morder(mo), a);
546 }
547
548 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic64_load(const volatile a64 * a,int mo)549 a64 __tsan_atomic64_load(const volatile a64 *a, int mo) {
550 return AtomicImpl<OpLoad>(to_morder(mo), a);
551 }
552
553 # if __TSAN_HAS_INT128
554 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic128_load(const volatile a128 * a,int mo)555 a128 __tsan_atomic128_load(const volatile a128 *a, int mo) {
556 return AtomicImpl<OpLoad>(to_morder(mo), a);
557 }
558 # endif
559
560 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic8_store(volatile a8 * a,a8 v,int mo)561 void __tsan_atomic8_store(volatile a8 *a, a8 v, int mo) {
562 return AtomicImpl<OpStore>(to_morder(mo), a, v);
563 }
564
565 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic16_store(volatile a16 * a,a16 v,int mo)566 void __tsan_atomic16_store(volatile a16 *a, a16 v, int mo) {
567 return AtomicImpl<OpStore>(to_morder(mo), a, v);
568 }
569
570 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic32_store(volatile a32 * a,a32 v,int mo)571 void __tsan_atomic32_store(volatile a32 *a, a32 v, int mo) {
572 return AtomicImpl<OpStore>(to_morder(mo), a, v);
573 }
574
575 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic64_store(volatile a64 * a,a64 v,int mo)576 void __tsan_atomic64_store(volatile a64 *a, a64 v, int mo) {
577 return AtomicImpl<OpStore>(to_morder(mo), a, v);
578 }
579
580 # if __TSAN_HAS_INT128
581 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic128_store(volatile a128 * a,a128 v,int mo)582 void __tsan_atomic128_store(volatile a128 *a, a128 v, int mo) {
583 return AtomicImpl<OpStore>(to_morder(mo), a, v);
584 }
585 # endif
586
587 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic8_exchange(volatile a8 * a,a8 v,int mo)588 a8 __tsan_atomic8_exchange(volatile a8 *a, a8 v, int mo) {
589 return AtomicImpl<OpExchange>(to_morder(mo), a, v);
590 }
591
592 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic16_exchange(volatile a16 * a,a16 v,int mo)593 a16 __tsan_atomic16_exchange(volatile a16 *a, a16 v, int mo) {
594 return AtomicImpl<OpExchange>(to_morder(mo), a, v);
595 }
596
597 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic32_exchange(volatile a32 * a,a32 v,int mo)598 a32 __tsan_atomic32_exchange(volatile a32 *a, a32 v, int mo) {
599 return AtomicImpl<OpExchange>(to_morder(mo), a, v);
600 }
601
602 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic64_exchange(volatile a64 * a,a64 v,int mo)603 a64 __tsan_atomic64_exchange(volatile a64 *a, a64 v, int mo) {
604 return AtomicImpl<OpExchange>(to_morder(mo), a, v);
605 }
606
607 # if __TSAN_HAS_INT128
608 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic128_exchange(volatile a128 * a,a128 v,int mo)609 a128 __tsan_atomic128_exchange(volatile a128 *a, a128 v, int mo) {
610 return AtomicImpl<OpExchange>(to_morder(mo), a, v);
611 }
612 # endif
613
614 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic8_fetch_add(volatile a8 * a,a8 v,int mo)615 a8 __tsan_atomic8_fetch_add(volatile a8 *a, a8 v, int mo) {
616 return AtomicImpl<OpFetchAdd>(to_morder(mo), a, v);
617 }
618
619 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic16_fetch_add(volatile a16 * a,a16 v,int mo)620 a16 __tsan_atomic16_fetch_add(volatile a16 *a, a16 v, int mo) {
621 return AtomicImpl<OpFetchAdd>(to_morder(mo), a, v);
622 }
623
624 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic32_fetch_add(volatile a32 * a,a32 v,int mo)625 a32 __tsan_atomic32_fetch_add(volatile a32 *a, a32 v, int mo) {
626 return AtomicImpl<OpFetchAdd>(to_morder(mo), a, v);
627 }
628
629 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic64_fetch_add(volatile a64 * a,a64 v,int mo)630 a64 __tsan_atomic64_fetch_add(volatile a64 *a, a64 v, int mo) {
631 return AtomicImpl<OpFetchAdd>(to_morder(mo), a, v);
632 }
633
634 # if __TSAN_HAS_INT128
635 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic128_fetch_add(volatile a128 * a,a128 v,int mo)636 a128 __tsan_atomic128_fetch_add(volatile a128 *a, a128 v, int mo) {
637 return AtomicImpl<OpFetchAdd>(to_morder(mo), a, v);
638 }
639 # endif
640
641 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic8_fetch_sub(volatile a8 * a,a8 v,int mo)642 a8 __tsan_atomic8_fetch_sub(volatile a8 *a, a8 v, int mo) {
643 return AtomicImpl<OpFetchSub>(to_morder(mo), a, v);
644 }
645
646 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic16_fetch_sub(volatile a16 * a,a16 v,int mo)647 a16 __tsan_atomic16_fetch_sub(volatile a16 *a, a16 v, int mo) {
648 return AtomicImpl<OpFetchSub>(to_morder(mo), a, v);
649 }
650
651 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic32_fetch_sub(volatile a32 * a,a32 v,int mo)652 a32 __tsan_atomic32_fetch_sub(volatile a32 *a, a32 v, int mo) {
653 return AtomicImpl<OpFetchSub>(to_morder(mo), a, v);
654 }
655
656 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic64_fetch_sub(volatile a64 * a,a64 v,int mo)657 a64 __tsan_atomic64_fetch_sub(volatile a64 *a, a64 v, int mo) {
658 return AtomicImpl<OpFetchSub>(to_morder(mo), a, v);
659 }
660
661 # if __TSAN_HAS_INT128
662 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic128_fetch_sub(volatile a128 * a,a128 v,int mo)663 a128 __tsan_atomic128_fetch_sub(volatile a128 *a, a128 v, int mo) {
664 return AtomicImpl<OpFetchSub>(to_morder(mo), a, v);
665 }
666 # endif
667
668 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic8_fetch_and(volatile a8 * a,a8 v,int mo)669 a8 __tsan_atomic8_fetch_and(volatile a8 *a, a8 v, int mo) {
670 return AtomicImpl<OpFetchAnd>(to_morder(mo), a, v);
671 }
672
673 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic16_fetch_and(volatile a16 * a,a16 v,int mo)674 a16 __tsan_atomic16_fetch_and(volatile a16 *a, a16 v, int mo) {
675 return AtomicImpl<OpFetchAnd>(to_morder(mo), a, v);
676 }
677
678 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic32_fetch_and(volatile a32 * a,a32 v,int mo)679 a32 __tsan_atomic32_fetch_and(volatile a32 *a, a32 v, int mo) {
680 return AtomicImpl<OpFetchAnd>(to_morder(mo), a, v);
681 }
682
683 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic64_fetch_and(volatile a64 * a,a64 v,int mo)684 a64 __tsan_atomic64_fetch_and(volatile a64 *a, a64 v, int mo) {
685 return AtomicImpl<OpFetchAnd>(to_morder(mo), a, v);
686 }
687
688 # if __TSAN_HAS_INT128
689 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic128_fetch_and(volatile a128 * a,a128 v,int mo)690 a128 __tsan_atomic128_fetch_and(volatile a128 *a, a128 v, int mo) {
691 return AtomicImpl<OpFetchAnd>(to_morder(mo), a, v);
692 }
693 # endif
694
695 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic8_fetch_or(volatile a8 * a,a8 v,int mo)696 a8 __tsan_atomic8_fetch_or(volatile a8 *a, a8 v, int mo) {
697 return AtomicImpl<OpFetchOr>(to_morder(mo), a, v);
698 }
699
700 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic16_fetch_or(volatile a16 * a,a16 v,int mo)701 a16 __tsan_atomic16_fetch_or(volatile a16 *a, a16 v, int mo) {
702 return AtomicImpl<OpFetchOr>(to_morder(mo), a, v);
703 }
704
705 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic32_fetch_or(volatile a32 * a,a32 v,int mo)706 a32 __tsan_atomic32_fetch_or(volatile a32 *a, a32 v, int mo) {
707 return AtomicImpl<OpFetchOr>(to_morder(mo), a, v);
708 }
709
710 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic64_fetch_or(volatile a64 * a,a64 v,int mo)711 a64 __tsan_atomic64_fetch_or(volatile a64 *a, a64 v, int mo) {
712 return AtomicImpl<OpFetchOr>(to_morder(mo), a, v);
713 }
714
715 # if __TSAN_HAS_INT128
716 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic128_fetch_or(volatile a128 * a,a128 v,int mo)717 a128 __tsan_atomic128_fetch_or(volatile a128 *a, a128 v, int mo) {
718 return AtomicImpl<OpFetchOr>(to_morder(mo), a, v);
719 }
720 # endif
721
722 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic8_fetch_xor(volatile a8 * a,a8 v,int mo)723 a8 __tsan_atomic8_fetch_xor(volatile a8 *a, a8 v, int mo) {
724 return AtomicImpl<OpFetchXor>(to_morder(mo), a, v);
725 }
726
727 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic16_fetch_xor(volatile a16 * a,a16 v,int mo)728 a16 __tsan_atomic16_fetch_xor(volatile a16 *a, a16 v, int mo) {
729 return AtomicImpl<OpFetchXor>(to_morder(mo), a, v);
730 }
731
732 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic32_fetch_xor(volatile a32 * a,a32 v,int mo)733 a32 __tsan_atomic32_fetch_xor(volatile a32 *a, a32 v, int mo) {
734 return AtomicImpl<OpFetchXor>(to_morder(mo), a, v);
735 }
736
737 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic64_fetch_xor(volatile a64 * a,a64 v,int mo)738 a64 __tsan_atomic64_fetch_xor(volatile a64 *a, a64 v, int mo) {
739 return AtomicImpl<OpFetchXor>(to_morder(mo), a, v);
740 }
741
742 # if __TSAN_HAS_INT128
743 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic128_fetch_xor(volatile a128 * a,a128 v,int mo)744 a128 __tsan_atomic128_fetch_xor(volatile a128 *a, a128 v, int mo) {
745 return AtomicImpl<OpFetchXor>(to_morder(mo), a, v);
746 }
747 # endif
748
749 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic8_fetch_nand(volatile a8 * a,a8 v,int mo)750 a8 __tsan_atomic8_fetch_nand(volatile a8 *a, a8 v, int mo) {
751 return AtomicImpl<OpFetchNand>(to_morder(mo), a, v);
752 }
753
754 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic16_fetch_nand(volatile a16 * a,a16 v,int mo)755 a16 __tsan_atomic16_fetch_nand(volatile a16 *a, a16 v, int mo) {
756 return AtomicImpl<OpFetchNand>(to_morder(mo), a, v);
757 }
758
759 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic32_fetch_nand(volatile a32 * a,a32 v,int mo)760 a32 __tsan_atomic32_fetch_nand(volatile a32 *a, a32 v, int mo) {
761 return AtomicImpl<OpFetchNand>(to_morder(mo), a, v);
762 }
763
764 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic64_fetch_nand(volatile a64 * a,a64 v,int mo)765 a64 __tsan_atomic64_fetch_nand(volatile a64 *a, a64 v, int mo) {
766 return AtomicImpl<OpFetchNand>(to_morder(mo), a, v);
767 }
768
769 # if __TSAN_HAS_INT128
770 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic128_fetch_nand(volatile a128 * a,a128 v,int mo)771 a128 __tsan_atomic128_fetch_nand(volatile a128 *a, a128 v, int mo) {
772 return AtomicImpl<OpFetchNand>(to_morder(mo), a, v);
773 }
774 # endif
775
776 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic8_compare_exchange_strong(volatile a8 * a,a8 * c,a8 v,int mo,int fmo)777 int __tsan_atomic8_compare_exchange_strong(volatile a8 *a, a8 *c, a8 v, int mo,
778 int fmo) {
779 return AtomicImpl<OpCAS>(to_morder(mo), to_morder(fmo), a, c, v);
780 }
781
782 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic16_compare_exchange_strong(volatile a16 * a,a16 * c,a16 v,int mo,int fmo)783 int __tsan_atomic16_compare_exchange_strong(volatile a16 *a, a16 *c, a16 v,
784 int mo, int fmo) {
785 return AtomicImpl<OpCAS>(to_morder(mo), to_morder(fmo), a, c, v);
786 }
787
788 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic32_compare_exchange_strong(volatile a32 * a,a32 * c,a32 v,int mo,int fmo)789 int __tsan_atomic32_compare_exchange_strong(volatile a32 *a, a32 *c, a32 v,
790 int mo, int fmo) {
791 return AtomicImpl<OpCAS>(to_morder(mo), to_morder(fmo), a, c, v);
792 }
793
794 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic64_compare_exchange_strong(volatile a64 * a,a64 * c,a64 v,int mo,int fmo)795 int __tsan_atomic64_compare_exchange_strong(volatile a64 *a, a64 *c, a64 v,
796 int mo, int fmo) {
797 return AtomicImpl<OpCAS>(to_morder(mo), to_morder(fmo), a, c, v);
798 }
799
800 # if __TSAN_HAS_INT128
801 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic128_compare_exchange_strong(volatile a128 * a,a128 * c,a128 v,int mo,int fmo)802 int __tsan_atomic128_compare_exchange_strong(volatile a128 *a, a128 *c, a128 v,
803 int mo, int fmo) {
804 return AtomicImpl<OpCAS>(to_morder(mo), to_morder(fmo), a, c, v);
805 }
806 # endif
807
808 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic8_compare_exchange_weak(volatile a8 * a,a8 * c,a8 v,int mo,int fmo)809 int __tsan_atomic8_compare_exchange_weak(volatile a8 *a, a8 *c, a8 v, int mo,
810 int fmo) {
811 return AtomicImpl<OpCAS>(to_morder(mo), to_morder(fmo), a, c, v);
812 }
813
814 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic16_compare_exchange_weak(volatile a16 * a,a16 * c,a16 v,int mo,int fmo)815 int __tsan_atomic16_compare_exchange_weak(volatile a16 *a, a16 *c, a16 v,
816 int mo, int fmo) {
817 return AtomicImpl<OpCAS>(to_morder(mo), to_morder(fmo), a, c, v);
818 }
819
820 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic32_compare_exchange_weak(volatile a32 * a,a32 * c,a32 v,int mo,int fmo)821 int __tsan_atomic32_compare_exchange_weak(volatile a32 *a, a32 *c, a32 v,
822 int mo, int fmo) {
823 return AtomicImpl<OpCAS>(to_morder(mo), to_morder(fmo), a, c, v);
824 }
825
826 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic64_compare_exchange_weak(volatile a64 * a,a64 * c,a64 v,int mo,int fmo)827 int __tsan_atomic64_compare_exchange_weak(volatile a64 *a, a64 *c, a64 v,
828 int mo, int fmo) {
829 return AtomicImpl<OpCAS>(to_morder(mo), to_morder(fmo), a, c, v);
830 }
831
832 # if __TSAN_HAS_INT128
833 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic128_compare_exchange_weak(volatile a128 * a,a128 * c,a128 v,int mo,int fmo)834 int __tsan_atomic128_compare_exchange_weak(volatile a128 *a, a128 *c, a128 v,
835 int mo, int fmo) {
836 return AtomicImpl<OpCAS>(to_morder(mo), to_morder(fmo), a, c, v);
837 }
838 # endif
839
840 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic8_compare_exchange_val(volatile a8 * a,a8 c,a8 v,int mo,int fmo)841 a8 __tsan_atomic8_compare_exchange_val(volatile a8 *a, a8 c, a8 v, int mo,
842 int fmo) {
843 return AtomicImpl<OpCAS>(to_morder(mo), to_morder(fmo), a, c, v);
844 }
845
846 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic16_compare_exchange_val(volatile a16 * a,a16 c,a16 v,int mo,int fmo)847 a16 __tsan_atomic16_compare_exchange_val(volatile a16 *a, a16 c, a16 v, int mo,
848 int fmo) {
849 return AtomicImpl<OpCAS>(to_morder(mo), to_morder(fmo), a, c, v);
850 }
851
852 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic32_compare_exchange_val(volatile a32 * a,a32 c,a32 v,int mo,int fmo)853 a32 __tsan_atomic32_compare_exchange_val(volatile a32 *a, a32 c, a32 v, int mo,
854 int fmo) {
855 return AtomicImpl<OpCAS>(to_morder(mo), to_morder(fmo), a, c, v);
856 }
857
858 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic64_compare_exchange_val(volatile a64 * a,a64 c,a64 v,int mo,int fmo)859 a64 __tsan_atomic64_compare_exchange_val(volatile a64 *a, a64 c, a64 v, int mo,
860 int fmo) {
861 return AtomicImpl<OpCAS>(to_morder(mo), to_morder(fmo), a, c, v);
862 }
863
864 # if __TSAN_HAS_INT128
865 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic128_compare_exchange_val(volatile a128 * a,a128 c,a128 v,int mo,int fmo)866 a128 __tsan_atomic128_compare_exchange_val(volatile a128 *a, a128 c, a128 v,
867 int mo, int fmo) {
868 return AtomicImpl<OpCAS>(to_morder(mo), to_morder(fmo), a, c, v);
869 }
870 # endif
871
872 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic_thread_fence(int mo)873 void __tsan_atomic_thread_fence(int mo) {
874 return AtomicImpl<OpFence>(to_morder(mo));
875 }
876
877 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic_signal_fence(int mo)878 void __tsan_atomic_signal_fence(int mo) {}
879 } // extern "C"
880
881 #else // #if !SANITIZER_GO
882
883 // Go
884
885 template <class Op, class... Types>
AtomicGo(ThreadState * thr,uptr cpc,uptr pc,Types...args)886 void AtomicGo(ThreadState *thr, uptr cpc, uptr pc, Types... args) {
887 if (thr->ignore_sync) {
888 (void)Op::NoTsanAtomic(args...);
889 } else {
890 FuncEntry(thr, cpc);
891 (void)Op::Atomic(thr, pc, args...);
892 FuncExit(thr);
893 }
894 }
895
896 template <class Op, class... Types>
AtomicGoRet(ThreadState * thr,uptr cpc,uptr pc,Types...args)897 auto AtomicGoRet(ThreadState *thr, uptr cpc, uptr pc, Types... args) {
898 if (thr->ignore_sync) {
899 return Op::NoTsanAtomic(args...);
900 } else {
901 FuncEntry(thr, cpc);
902 auto ret = Op::Atomic(thr, pc, args...);
903 FuncExit(thr);
904 return ret;
905 }
906 }
907
908 extern "C" {
909 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_go_atomic32_load(ThreadState * thr,uptr cpc,uptr pc,u8 * a)910 void __tsan_go_atomic32_load(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
911 *(a32 *)(a + 8) = AtomicGoRet<OpLoad>(thr, cpc, pc, mo_acquire, *(a32 **)a);
912 }
913
914 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_go_atomic64_load(ThreadState * thr,uptr cpc,uptr pc,u8 * a)915 void __tsan_go_atomic64_load(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
916 *(a64 *)(a + 8) = AtomicGoRet<OpLoad>(thr, cpc, pc, mo_acquire, *(a64 **)a);
917 }
918
919 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_go_atomic32_store(ThreadState * thr,uptr cpc,uptr pc,u8 * a)920 void __tsan_go_atomic32_store(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
921 AtomicGo<OpStore>(thr, cpc, pc, mo_release, *(a32 **)a, *(a32 *)(a + 8));
922 }
923
924 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_go_atomic64_store(ThreadState * thr,uptr cpc,uptr pc,u8 * a)925 void __tsan_go_atomic64_store(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
926 AtomicGo<OpStore>(thr, cpc, pc, mo_release, *(a64 **)a, *(a64 *)(a + 8));
927 }
928
929 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_go_atomic32_fetch_add(ThreadState * thr,uptr cpc,uptr pc,u8 * a)930 void __tsan_go_atomic32_fetch_add(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
931 *(a32 *)(a + 16) = AtomicGoRet<OpFetchAdd>(thr, cpc, pc, mo_acq_rel,
932 *(a32 **)a, *(a32 *)(a + 8));
933 }
934
935 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_go_atomic64_fetch_add(ThreadState * thr,uptr cpc,uptr pc,u8 * a)936 void __tsan_go_atomic64_fetch_add(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
937 *(a64 *)(a + 16) = AtomicGoRet<OpFetchAdd>(thr, cpc, pc, mo_acq_rel,
938 *(a64 **)a, *(a64 *)(a + 8));
939 }
940
941 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_go_atomic32_fetch_and(ThreadState * thr,uptr cpc,uptr pc,u8 * a)942 void __tsan_go_atomic32_fetch_and(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
943 *(a32 *)(a + 16) = AtomicGoRet<OpFetchAnd>(thr, cpc, pc, mo_acq_rel,
944 *(a32 **)a, *(a32 *)(a + 8));
945 }
946
947 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_go_atomic64_fetch_and(ThreadState * thr,uptr cpc,uptr pc,u8 * a)948 void __tsan_go_atomic64_fetch_and(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
949 *(a64 *)(a + 16) = AtomicGoRet<OpFetchAnd>(thr, cpc, pc, mo_acq_rel,
950 *(a64 **)a, *(a64 *)(a + 8));
951 }
952
953 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_go_atomic32_fetch_or(ThreadState * thr,uptr cpc,uptr pc,u8 * a)954 void __tsan_go_atomic32_fetch_or(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
955 *(a32 *)(a + 16) = AtomicGoRet<OpFetchOr>(thr, cpc, pc, mo_acq_rel,
956 *(a32 **)a, *(a32 *)(a + 8));
957 }
958
959 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_go_atomic64_fetch_or(ThreadState * thr,uptr cpc,uptr pc,u8 * a)960 void __tsan_go_atomic64_fetch_or(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
961 *(a64 *)(a + 16) = AtomicGoRet<OpFetchOr>(thr, cpc, pc, mo_acq_rel,
962 *(a64 **)a, *(a64 *)(a + 8));
963 }
964
965 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_go_atomic32_exchange(ThreadState * thr,uptr cpc,uptr pc,u8 * a)966 void __tsan_go_atomic32_exchange(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
967 *(a32 *)(a + 16) = AtomicGoRet<OpExchange>(thr, cpc, pc, mo_acq_rel,
968 *(a32 **)a, *(a32 *)(a + 8));
969 }
970
971 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_go_atomic64_exchange(ThreadState * thr,uptr cpc,uptr pc,u8 * a)972 void __tsan_go_atomic64_exchange(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
973 *(a64 *)(a + 16) = AtomicGoRet<OpExchange>(thr, cpc, pc, mo_acq_rel,
974 *(a64 **)a, *(a64 *)(a + 8));
975 }
976
977 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_go_atomic32_compare_exchange(ThreadState * thr,uptr cpc,uptr pc,u8 * a)978 void __tsan_go_atomic32_compare_exchange(ThreadState *thr, uptr cpc, uptr pc,
979 u8 *a) {
980 a32 cmp = *(a32 *)(a + 8);
981 a32 cur = AtomicGoRet<OpCAS>(thr, cpc, pc, mo_acq_rel, mo_acquire, *(a32 **)a,
982 cmp, *(a32 *)(a + 12));
983 *(bool *)(a + 16) = (cur == cmp);
984 }
985
986 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_go_atomic64_compare_exchange(ThreadState * thr,uptr cpc,uptr pc,u8 * a)987 void __tsan_go_atomic64_compare_exchange(ThreadState *thr, uptr cpc, uptr pc,
988 u8 *a) {
989 a64 cmp = *(a64 *)(a + 8);
990 a64 cur = AtomicGoRet<OpCAS>(thr, cpc, pc, mo_acq_rel, mo_acquire, *(a64 **)a,
991 cmp, *(a64 *)(a + 16));
992 *(bool *)(a + 24) = (cur == cmp);
993 }
994 } // extern "C"
995 #endif // #if !SANITIZER_GO
996