168d75effSDimitry Andric //===-- tsan_rtl.cpp ------------------------------------------------------===//
268d75effSDimitry Andric //
368d75effSDimitry Andric // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
468d75effSDimitry Andric // See https://llvm.org/LICENSE.txt for license information.
568d75effSDimitry Andric // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
668d75effSDimitry Andric //
768d75effSDimitry Andric //===----------------------------------------------------------------------===//
868d75effSDimitry Andric //
968d75effSDimitry Andric // This file is a part of ThreadSanitizer (TSan), a race detector.
1068d75effSDimitry Andric //
1168d75effSDimitry Andric // Main file (entry points) for the TSan run-time.
1268d75effSDimitry Andric //===----------------------------------------------------------------------===//
1368d75effSDimitry Andric
14fe6060f1SDimitry Andric #include "tsan_rtl.h"
15fe6060f1SDimitry Andric
1668d75effSDimitry Andric #include "sanitizer_common/sanitizer_atomic.h"
1768d75effSDimitry Andric #include "sanitizer_common/sanitizer_common.h"
1868d75effSDimitry Andric #include "sanitizer_common/sanitizer_file.h"
1981ad6265SDimitry Andric #include "sanitizer_common/sanitizer_interface_internal.h"
2068d75effSDimitry Andric #include "sanitizer_common/sanitizer_libc.h"
2168d75effSDimitry Andric #include "sanitizer_common/sanitizer_placement_new.h"
22fe6060f1SDimitry Andric #include "sanitizer_common/sanitizer_stackdepot.h"
2368d75effSDimitry Andric #include "sanitizer_common/sanitizer_symbolizer.h"
2468d75effSDimitry Andric #include "tsan_defs.h"
25fe6060f1SDimitry Andric #include "tsan_interface.h"
2668d75effSDimitry Andric #include "tsan_mman.h"
27fe6060f1SDimitry Andric #include "tsan_platform.h"
2868d75effSDimitry Andric #include "tsan_suppressions.h"
2968d75effSDimitry Andric #include "tsan_symbolize.h"
3068d75effSDimitry Andric #include "ubsan/ubsan_init.h"
3168d75effSDimitry Andric
3268d75effSDimitry Andric volatile int __tsan_resumed = 0;
3368d75effSDimitry Andric
__tsan_resume()3468d75effSDimitry Andric extern "C" void __tsan_resume() {
3568d75effSDimitry Andric __tsan_resumed = 1;
3668d75effSDimitry Andric }
3768d75effSDimitry Andric
38*0fca6ea1SDimitry Andric #if SANITIZER_APPLE
394824e7fdSDimitry Andric SANITIZER_WEAK_DEFAULT_IMPL
__tsan_test_only_on_fork()404824e7fdSDimitry Andric void __tsan_test_only_on_fork() {}
41*0fca6ea1SDimitry Andric #endif
424824e7fdSDimitry Andric
4368d75effSDimitry Andric namespace __tsan {
4468d75effSDimitry Andric
45349cc55cSDimitry Andric #if !SANITIZER_GO
46349cc55cSDimitry Andric void (*on_initialize)(void);
47349cc55cSDimitry Andric int (*on_finalize)(int);
48349cc55cSDimitry Andric #endif
49349cc55cSDimitry Andric
5081ad6265SDimitry Andric #if !SANITIZER_GO && !SANITIZER_APPLE
51*0fca6ea1SDimitry Andric alignas(SANITIZER_CACHE_LINE_SIZE) THREADLOCAL __attribute__((tls_model(
52*0fca6ea1SDimitry Andric "initial-exec"))) char cur_thread_placeholder[sizeof(ThreadState)];
5368d75effSDimitry Andric #endif
54*0fca6ea1SDimitry Andric alignas(SANITIZER_CACHE_LINE_SIZE) static char ctx_placeholder[sizeof(Context)];
5568d75effSDimitry Andric Context *ctx;
5668d75effSDimitry Andric
5768d75effSDimitry Andric // Can be overriden by a front-end.
5868d75effSDimitry Andric #ifdef TSAN_EXTERNAL_HOOKS
5968d75effSDimitry Andric bool OnFinalize(bool failed);
6068d75effSDimitry Andric void OnInitialize();
6168d75effSDimitry Andric #else
6268d75effSDimitry Andric SANITIZER_WEAK_CXX_DEFAULT_IMPL
OnFinalize(bool failed)6368d75effSDimitry Andric bool OnFinalize(bool failed) {
64fe6060f1SDimitry Andric # if !SANITIZER_GO
65349cc55cSDimitry Andric if (on_finalize)
66349cc55cSDimitry Andric return on_finalize(failed);
67fe6060f1SDimitry Andric # endif
6868d75effSDimitry Andric return failed;
6968d75effSDimitry Andric }
700eae32dcSDimitry Andric
7168d75effSDimitry Andric SANITIZER_WEAK_CXX_DEFAULT_IMPL
OnInitialize()72fe6060f1SDimitry Andric void OnInitialize() {
73fe6060f1SDimitry Andric # if !SANITIZER_GO
74349cc55cSDimitry Andric if (on_initialize)
75349cc55cSDimitry Andric on_initialize();
76fe6060f1SDimitry Andric # endif
77fe6060f1SDimitry Andric }
7868d75effSDimitry Andric #endif
7968d75effSDimitry Andric
TracePartAlloc(ThreadState * thr)800eae32dcSDimitry Andric static TracePart* TracePartAlloc(ThreadState* thr) {
810eae32dcSDimitry Andric TracePart* part = nullptr;
820eae32dcSDimitry Andric {
830eae32dcSDimitry Andric Lock lock(&ctx->slot_mtx);
840eae32dcSDimitry Andric uptr max_parts = Trace::kMinParts + flags()->history_size;
850eae32dcSDimitry Andric Trace* trace = &thr->tctx->trace;
860eae32dcSDimitry Andric if (trace->parts_allocated == max_parts ||
870eae32dcSDimitry Andric ctx->trace_part_finished_excess) {
880eae32dcSDimitry Andric part = ctx->trace_part_recycle.PopFront();
890eae32dcSDimitry Andric DPrintf("#%d: TracePartAlloc: part=%p\n", thr->tid, part);
900eae32dcSDimitry Andric if (part && part->trace) {
910eae32dcSDimitry Andric Trace* trace1 = part->trace;
920eae32dcSDimitry Andric Lock trace_lock(&trace1->mtx);
930eae32dcSDimitry Andric part->trace = nullptr;
940eae32dcSDimitry Andric TracePart* part1 = trace1->parts.PopFront();
950eae32dcSDimitry Andric CHECK_EQ(part, part1);
960eae32dcSDimitry Andric if (trace1->parts_allocated > trace1->parts.Size()) {
970eae32dcSDimitry Andric ctx->trace_part_finished_excess +=
980eae32dcSDimitry Andric trace1->parts_allocated - trace1->parts.Size();
990eae32dcSDimitry Andric trace1->parts_allocated = trace1->parts.Size();
100fe6060f1SDimitry Andric }
101fe6060f1SDimitry Andric }
1020eae32dcSDimitry Andric }
1030eae32dcSDimitry Andric if (trace->parts_allocated < max_parts) {
1040eae32dcSDimitry Andric trace->parts_allocated++;
1050eae32dcSDimitry Andric if (ctx->trace_part_finished_excess)
1060eae32dcSDimitry Andric ctx->trace_part_finished_excess--;
1070eae32dcSDimitry Andric }
1080eae32dcSDimitry Andric if (!part)
1090eae32dcSDimitry Andric ctx->trace_part_total_allocated++;
1100eae32dcSDimitry Andric else if (ctx->trace_part_recycle_finished)
1110eae32dcSDimitry Andric ctx->trace_part_recycle_finished--;
1120eae32dcSDimitry Andric }
1130eae32dcSDimitry Andric if (!part)
1140eae32dcSDimitry Andric part = new (MmapOrDie(sizeof(*part), "TracePart")) TracePart();
1150eae32dcSDimitry Andric return part;
11668d75effSDimitry Andric }
11768d75effSDimitry Andric
TracePartFree(TracePart * part)11804eeddc0SDimitry Andric static void TracePartFree(TracePart* part) SANITIZER_REQUIRES(ctx->slot_mtx) {
1190eae32dcSDimitry Andric DCHECK(part->trace);
1200eae32dcSDimitry Andric part->trace = nullptr;
1210eae32dcSDimitry Andric ctx->trace_part_recycle.PushFront(part);
1220eae32dcSDimitry Andric }
1230eae32dcSDimitry Andric
TraceResetForTesting()1240eae32dcSDimitry Andric void TraceResetForTesting() {
1250eae32dcSDimitry Andric Lock lock(&ctx->slot_mtx);
1260eae32dcSDimitry Andric while (auto* part = ctx->trace_part_recycle.PopFront()) {
1270eae32dcSDimitry Andric if (auto trace = part->trace)
1280eae32dcSDimitry Andric CHECK_EQ(trace->parts.PopFront(), part);
1290eae32dcSDimitry Andric UnmapOrDie(part, sizeof(*part));
1300eae32dcSDimitry Andric }
1310eae32dcSDimitry Andric ctx->trace_part_total_allocated = 0;
1320eae32dcSDimitry Andric ctx->trace_part_recycle_finished = 0;
1330eae32dcSDimitry Andric ctx->trace_part_finished_excess = 0;
1340eae32dcSDimitry Andric }
1350eae32dcSDimitry Andric
DoResetImpl(uptr epoch)1360eae32dcSDimitry Andric static void DoResetImpl(uptr epoch) {
1370eae32dcSDimitry Andric ThreadRegistryLock lock0(&ctx->thread_registry);
1380eae32dcSDimitry Andric Lock lock1(&ctx->slot_mtx);
1390eae32dcSDimitry Andric CHECK_EQ(ctx->global_epoch, epoch);
1400eae32dcSDimitry Andric ctx->global_epoch++;
1410eae32dcSDimitry Andric CHECK(!ctx->resetting);
1420eae32dcSDimitry Andric ctx->resetting = true;
1430eae32dcSDimitry Andric for (u32 i = ctx->thread_registry.NumThreadsLocked(); i--;) {
1440eae32dcSDimitry Andric ThreadContext* tctx = (ThreadContext*)ctx->thread_registry.GetThreadLocked(
1450eae32dcSDimitry Andric static_cast<Tid>(i));
1460eae32dcSDimitry Andric // Potentially we could purge all ThreadStatusDead threads from the
1470eae32dcSDimitry Andric // registry. Since we reset all shadow, they can't race with anything
1480eae32dcSDimitry Andric // anymore. However, their tid's can still be stored in some aux places
1490eae32dcSDimitry Andric // (e.g. tid of thread that created something).
1500eae32dcSDimitry Andric auto trace = &tctx->trace;
1510eae32dcSDimitry Andric Lock lock(&trace->mtx);
1520eae32dcSDimitry Andric bool attached = tctx->thr && tctx->thr->slot;
1530eae32dcSDimitry Andric auto parts = &trace->parts;
1540eae32dcSDimitry Andric bool local = false;
1550eae32dcSDimitry Andric while (!parts->Empty()) {
1560eae32dcSDimitry Andric auto part = parts->Front();
1570eae32dcSDimitry Andric local = local || part == trace->local_head;
1580eae32dcSDimitry Andric if (local)
1590eae32dcSDimitry Andric CHECK(!ctx->trace_part_recycle.Queued(part));
1600eae32dcSDimitry Andric else
1610eae32dcSDimitry Andric ctx->trace_part_recycle.Remove(part);
1620eae32dcSDimitry Andric if (attached && parts->Size() == 1) {
1630eae32dcSDimitry Andric // The thread is running and this is the last/current part.
1640eae32dcSDimitry Andric // Set the trace position to the end of the current part
1650eae32dcSDimitry Andric // to force the thread to call SwitchTracePart and re-attach
1660eae32dcSDimitry Andric // to a new slot and allocate a new trace part.
1670eae32dcSDimitry Andric // Note: the thread is concurrently modifying the position as well,
1680eae32dcSDimitry Andric // so this is only best-effort. The thread can only modify position
1690eae32dcSDimitry Andric // within this part, because switching parts is protected by
1700eae32dcSDimitry Andric // slot/trace mutexes that we hold here.
1710eae32dcSDimitry Andric atomic_store_relaxed(
1720eae32dcSDimitry Andric &tctx->thr->trace_pos,
1730eae32dcSDimitry Andric reinterpret_cast<uptr>(&part->events[TracePart::kSize]));
1740eae32dcSDimitry Andric break;
1750eae32dcSDimitry Andric }
1760eae32dcSDimitry Andric parts->Remove(part);
1770eae32dcSDimitry Andric TracePartFree(part);
1780eae32dcSDimitry Andric }
1790eae32dcSDimitry Andric CHECK_LE(parts->Size(), 1);
1800eae32dcSDimitry Andric trace->local_head = parts->Front();
1810eae32dcSDimitry Andric if (tctx->thr && !tctx->thr->slot) {
1820eae32dcSDimitry Andric atomic_store_relaxed(&tctx->thr->trace_pos, 0);
1830eae32dcSDimitry Andric tctx->thr->trace_prev_pc = 0;
1840eae32dcSDimitry Andric }
1850eae32dcSDimitry Andric if (trace->parts_allocated > trace->parts.Size()) {
1860eae32dcSDimitry Andric ctx->trace_part_finished_excess +=
1870eae32dcSDimitry Andric trace->parts_allocated - trace->parts.Size();
1880eae32dcSDimitry Andric trace->parts_allocated = trace->parts.Size();
1890eae32dcSDimitry Andric }
1900eae32dcSDimitry Andric }
1910eae32dcSDimitry Andric while (ctx->slot_queue.PopFront()) {
1920eae32dcSDimitry Andric }
1930eae32dcSDimitry Andric for (auto& slot : ctx->slots) {
1940eae32dcSDimitry Andric slot.SetEpoch(kEpochZero);
1950eae32dcSDimitry Andric slot.journal.Reset();
1960eae32dcSDimitry Andric slot.thr = nullptr;
1970eae32dcSDimitry Andric ctx->slot_queue.PushBack(&slot);
1980eae32dcSDimitry Andric }
1990eae32dcSDimitry Andric
2000eae32dcSDimitry Andric DPrintf("Resetting shadow...\n");
201972a253aSDimitry Andric auto shadow_begin = ShadowBeg();
202972a253aSDimitry Andric auto shadow_end = ShadowEnd();
203972a253aSDimitry Andric #if SANITIZER_GO
204972a253aSDimitry Andric CHECK_NE(0, ctx->mapped_shadow_begin);
205972a253aSDimitry Andric shadow_begin = ctx->mapped_shadow_begin;
206972a253aSDimitry Andric shadow_end = ctx->mapped_shadow_end;
207972a253aSDimitry Andric VPrintf(2, "shadow_begin-shadow_end: (0x%zx-0x%zx)\n",
208972a253aSDimitry Andric shadow_begin, shadow_end);
209972a253aSDimitry Andric #endif
210972a253aSDimitry Andric
211972a253aSDimitry Andric #if SANITIZER_WINDOWS
212972a253aSDimitry Andric auto resetFailed =
213972a253aSDimitry Andric !ZeroMmapFixedRegion(shadow_begin, shadow_end - shadow_begin);
214972a253aSDimitry Andric #else
215972a253aSDimitry Andric auto resetFailed =
216972a253aSDimitry Andric !MmapFixedSuperNoReserve(shadow_begin, shadow_end-shadow_begin, "shadow");
217bdd1243dSDimitry Andric # if !SANITIZER_GO
218bdd1243dSDimitry Andric DontDumpShadow(shadow_begin, shadow_end - shadow_begin);
219bdd1243dSDimitry Andric # endif
220972a253aSDimitry Andric #endif
221972a253aSDimitry Andric if (resetFailed) {
2220eae32dcSDimitry Andric Printf("failed to reset shadow memory\n");
2230eae32dcSDimitry Andric Die();
2240eae32dcSDimitry Andric }
2250eae32dcSDimitry Andric DPrintf("Resetting meta shadow...\n");
2260eae32dcSDimitry Andric ctx->metamap.ResetClocks();
227972a253aSDimitry Andric StoreShadow(&ctx->last_spurious_race, Shadow::kEmpty);
2280eae32dcSDimitry Andric ctx->resetting = false;
2290eae32dcSDimitry Andric }
2300eae32dcSDimitry Andric
2310eae32dcSDimitry Andric // Clang does not understand locking all slots in the loop:
2320eae32dcSDimitry Andric // error: expecting mutex 'slot.mtx' to be held at start of each loop
DoReset(ThreadState * thr,uptr epoch)23304eeddc0SDimitry Andric void DoReset(ThreadState* thr, uptr epoch) SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
2340eae32dcSDimitry Andric for (auto& slot : ctx->slots) {
2350eae32dcSDimitry Andric slot.mtx.Lock();
2360eae32dcSDimitry Andric if (UNLIKELY(epoch == 0))
2370eae32dcSDimitry Andric epoch = ctx->global_epoch;
2380eae32dcSDimitry Andric if (UNLIKELY(epoch != ctx->global_epoch)) {
2390eae32dcSDimitry Andric // Epoch can't change once we've locked the first slot.
2400eae32dcSDimitry Andric CHECK_EQ(slot.sid, 0);
2410eae32dcSDimitry Andric slot.mtx.Unlock();
2420eae32dcSDimitry Andric return;
2430eae32dcSDimitry Andric }
2440eae32dcSDimitry Andric }
2450eae32dcSDimitry Andric DPrintf("#%d: DoReset epoch=%lu\n", thr ? thr->tid : -1, epoch);
2460eae32dcSDimitry Andric DoResetImpl(epoch);
2470eae32dcSDimitry Andric for (auto& slot : ctx->slots) slot.mtx.Unlock();
2480eae32dcSDimitry Andric }
2490eae32dcSDimitry Andric
FlushShadowMemory()2500eae32dcSDimitry Andric void FlushShadowMemory() { DoReset(nullptr, 0); }
2510eae32dcSDimitry Andric
FindSlotAndLock(ThreadState * thr)2520eae32dcSDimitry Andric static TidSlot* FindSlotAndLock(ThreadState* thr)
25304eeddc0SDimitry Andric SANITIZER_ACQUIRE(thr->slot->mtx) SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
2540eae32dcSDimitry Andric CHECK(!thr->slot);
2550eae32dcSDimitry Andric TidSlot* slot = nullptr;
2560eae32dcSDimitry Andric for (;;) {
2570eae32dcSDimitry Andric uptr epoch;
2580eae32dcSDimitry Andric {
2590eae32dcSDimitry Andric Lock lock(&ctx->slot_mtx);
2600eae32dcSDimitry Andric epoch = ctx->global_epoch;
2610eae32dcSDimitry Andric if (slot) {
2620eae32dcSDimitry Andric // This is an exhausted slot from the previous iteration.
2630eae32dcSDimitry Andric if (ctx->slot_queue.Queued(slot))
2640eae32dcSDimitry Andric ctx->slot_queue.Remove(slot);
2650eae32dcSDimitry Andric thr->slot_locked = false;
2660eae32dcSDimitry Andric slot->mtx.Unlock();
2670eae32dcSDimitry Andric }
2680eae32dcSDimitry Andric for (;;) {
2690eae32dcSDimitry Andric slot = ctx->slot_queue.PopFront();
2700eae32dcSDimitry Andric if (!slot)
2710eae32dcSDimitry Andric break;
2720eae32dcSDimitry Andric if (slot->epoch() != kEpochLast) {
2730eae32dcSDimitry Andric ctx->slot_queue.PushBack(slot);
2740eae32dcSDimitry Andric break;
2750eae32dcSDimitry Andric }
2760eae32dcSDimitry Andric }
2770eae32dcSDimitry Andric }
2780eae32dcSDimitry Andric if (!slot) {
2790eae32dcSDimitry Andric DoReset(thr, epoch);
2800eae32dcSDimitry Andric continue;
2810eae32dcSDimitry Andric }
2820eae32dcSDimitry Andric slot->mtx.Lock();
2830eae32dcSDimitry Andric CHECK(!thr->slot_locked);
2840eae32dcSDimitry Andric thr->slot_locked = true;
2850eae32dcSDimitry Andric if (slot->thr) {
2860eae32dcSDimitry Andric DPrintf("#%d: preempting sid=%d tid=%d\n", thr->tid, (u32)slot->sid,
2870eae32dcSDimitry Andric slot->thr->tid);
2880eae32dcSDimitry Andric slot->SetEpoch(slot->thr->fast_state.epoch());
2890eae32dcSDimitry Andric slot->thr = nullptr;
2900eae32dcSDimitry Andric }
2910eae32dcSDimitry Andric if (slot->epoch() != kEpochLast)
2920eae32dcSDimitry Andric return slot;
2930eae32dcSDimitry Andric }
2940eae32dcSDimitry Andric }
2950eae32dcSDimitry Andric
SlotAttachAndLock(ThreadState * thr)2960eae32dcSDimitry Andric void SlotAttachAndLock(ThreadState* thr) {
2970eae32dcSDimitry Andric TidSlot* slot = FindSlotAndLock(thr);
2980eae32dcSDimitry Andric DPrintf("#%d: SlotAttach: slot=%u\n", thr->tid, static_cast<int>(slot->sid));
2990eae32dcSDimitry Andric CHECK(!slot->thr);
3000eae32dcSDimitry Andric CHECK(!thr->slot);
3010eae32dcSDimitry Andric slot->thr = thr;
3020eae32dcSDimitry Andric thr->slot = slot;
3030eae32dcSDimitry Andric Epoch epoch = EpochInc(slot->epoch());
3040eae32dcSDimitry Andric CHECK(!EpochOverflow(epoch));
3050eae32dcSDimitry Andric slot->SetEpoch(epoch);
3060eae32dcSDimitry Andric thr->fast_state.SetSid(slot->sid);
3070eae32dcSDimitry Andric thr->fast_state.SetEpoch(epoch);
3080eae32dcSDimitry Andric if (thr->slot_epoch != ctx->global_epoch) {
3090eae32dcSDimitry Andric thr->slot_epoch = ctx->global_epoch;
3100eae32dcSDimitry Andric thr->clock.Reset();
31168d75effSDimitry Andric #if !SANITIZER_GO
3120eae32dcSDimitry Andric thr->last_sleep_stack_id = kInvalidStackID;
3130eae32dcSDimitry Andric thr->last_sleep_clock.Reset();
31468d75effSDimitry Andric #endif
3150eae32dcSDimitry Andric }
3160eae32dcSDimitry Andric thr->clock.Set(slot->sid, epoch);
3170eae32dcSDimitry Andric slot->journal.PushBack({thr->tid, epoch});
3180eae32dcSDimitry Andric }
3190eae32dcSDimitry Andric
SlotDetachImpl(ThreadState * thr,bool exiting)3200eae32dcSDimitry Andric static void SlotDetachImpl(ThreadState* thr, bool exiting) {
3210eae32dcSDimitry Andric TidSlot* slot = thr->slot;
3220eae32dcSDimitry Andric thr->slot = nullptr;
3230eae32dcSDimitry Andric if (thr != slot->thr) {
3240eae32dcSDimitry Andric slot = nullptr; // we don't own the slot anymore
3250eae32dcSDimitry Andric if (thr->slot_epoch != ctx->global_epoch) {
3260eae32dcSDimitry Andric TracePart* part = nullptr;
3270eae32dcSDimitry Andric auto* trace = &thr->tctx->trace;
3280eae32dcSDimitry Andric {
3290eae32dcSDimitry Andric Lock l(&trace->mtx);
3300eae32dcSDimitry Andric auto* parts = &trace->parts;
3310eae32dcSDimitry Andric // The trace can be completely empty in an unlikely event
3320eae32dcSDimitry Andric // the thread is preempted right after it acquired the slot
3330eae32dcSDimitry Andric // in ThreadStart and did not trace any events yet.
3340eae32dcSDimitry Andric CHECK_LE(parts->Size(), 1);
3350eae32dcSDimitry Andric part = parts->PopFront();
3360eae32dcSDimitry Andric thr->tctx->trace.local_head = nullptr;
3370eae32dcSDimitry Andric atomic_store_relaxed(&thr->trace_pos, 0);
3380eae32dcSDimitry Andric thr->trace_prev_pc = 0;
3390eae32dcSDimitry Andric }
3400eae32dcSDimitry Andric if (part) {
3410eae32dcSDimitry Andric Lock l(&ctx->slot_mtx);
3420eae32dcSDimitry Andric TracePartFree(part);
3430eae32dcSDimitry Andric }
3440eae32dcSDimitry Andric }
3450eae32dcSDimitry Andric return;
3460eae32dcSDimitry Andric }
3470eae32dcSDimitry Andric CHECK(exiting || thr->fast_state.epoch() == kEpochLast);
3480eae32dcSDimitry Andric slot->SetEpoch(thr->fast_state.epoch());
3490eae32dcSDimitry Andric slot->thr = nullptr;
3500eae32dcSDimitry Andric }
3510eae32dcSDimitry Andric
SlotDetach(ThreadState * thr)3520eae32dcSDimitry Andric void SlotDetach(ThreadState* thr) {
3530eae32dcSDimitry Andric Lock lock(&thr->slot->mtx);
3540eae32dcSDimitry Andric SlotDetachImpl(thr, true);
3550eae32dcSDimitry Andric }
3560eae32dcSDimitry Andric
SlotLock(ThreadState * thr)35704eeddc0SDimitry Andric void SlotLock(ThreadState* thr) SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
3580eae32dcSDimitry Andric DCHECK(!thr->slot_locked);
3590eae32dcSDimitry Andric #if SANITIZER_DEBUG
3600eae32dcSDimitry Andric // Check these mutexes are not locked.
3610eae32dcSDimitry Andric // We can call DoReset from SlotAttachAndLock, which will lock
3620eae32dcSDimitry Andric // these mutexes, but it happens only every once in a while.
3630eae32dcSDimitry Andric { ThreadRegistryLock lock(&ctx->thread_registry); }
3640eae32dcSDimitry Andric { Lock lock(&ctx->slot_mtx); }
3650eae32dcSDimitry Andric #endif
3660eae32dcSDimitry Andric TidSlot* slot = thr->slot;
3670eae32dcSDimitry Andric slot->mtx.Lock();
3680eae32dcSDimitry Andric thr->slot_locked = true;
3690eae32dcSDimitry Andric if (LIKELY(thr == slot->thr && thr->fast_state.epoch() != kEpochLast))
3700eae32dcSDimitry Andric return;
3710eae32dcSDimitry Andric SlotDetachImpl(thr, false);
3720eae32dcSDimitry Andric thr->slot_locked = false;
3730eae32dcSDimitry Andric slot->mtx.Unlock();
3740eae32dcSDimitry Andric SlotAttachAndLock(thr);
3750eae32dcSDimitry Andric }
3760eae32dcSDimitry Andric
SlotUnlock(ThreadState * thr)3770eae32dcSDimitry Andric void SlotUnlock(ThreadState* thr) {
3780eae32dcSDimitry Andric DCHECK(thr->slot_locked);
3790eae32dcSDimitry Andric thr->slot_locked = false;
3800eae32dcSDimitry Andric thr->slot->mtx.Unlock();
3810eae32dcSDimitry Andric }
38268d75effSDimitry Andric
Context()38368d75effSDimitry Andric Context::Context()
384fe6060f1SDimitry Andric : initialized(),
385fe6060f1SDimitry Andric report_mtx(MutexTypeReport),
386fe6060f1SDimitry Andric nreported(),
3870eae32dcSDimitry Andric thread_registry([](Tid tid) -> ThreadContextBase* {
3880eae32dcSDimitry Andric return new (Alloc(sizeof(ThreadContext))) ThreadContext(tid);
3890eae32dcSDimitry Andric }),
390fe6060f1SDimitry Andric racy_mtx(MutexTypeRacy),
391fe6060f1SDimitry Andric racy_stacks(),
392fe6060f1SDimitry Andric fired_suppressions_mtx(MutexTypeFired),
3930eae32dcSDimitry Andric slot_mtx(MutexTypeSlots),
3940eae32dcSDimitry Andric resetting() {
39568d75effSDimitry Andric fired_suppressions.reserve(8);
3960eae32dcSDimitry Andric for (uptr i = 0; i < ARRAY_SIZE(slots); i++) {
3970eae32dcSDimitry Andric TidSlot* slot = &slots[i];
3980eae32dcSDimitry Andric slot->sid = static_cast<Sid>(i);
3990eae32dcSDimitry Andric slot_queue.PushBack(slot);
4000eae32dcSDimitry Andric }
4010eae32dcSDimitry Andric global_epoch = 1;
40268d75effSDimitry Andric }
40368d75effSDimitry Andric
TidSlot()4040eae32dcSDimitry Andric TidSlot::TidSlot() : mtx(MutexTypeSlot) {}
4050eae32dcSDimitry Andric
40668d75effSDimitry Andric // The objects are allocated in TLS, so one may rely on zero-initialization.
ThreadState(Tid tid)4070eae32dcSDimitry Andric ThreadState::ThreadState(Tid tid)
40868d75effSDimitry Andric // Do not touch these, rely on zero initialization,
40968d75effSDimitry Andric // they may be accessed before the ctor.
4100eae32dcSDimitry Andric // ignore_reads_and_writes()
4110eae32dcSDimitry Andric // ignore_interceptors()
4120eae32dcSDimitry Andric : tid(tid) {
413349cc55cSDimitry Andric CHECK_EQ(reinterpret_cast<uptr>(this) % SANITIZER_CACHE_LINE_SIZE, 0);
414349cc55cSDimitry Andric #if !SANITIZER_GO
415349cc55cSDimitry Andric // C/C++ uses fixed size shadow stack.
416349cc55cSDimitry Andric const int kInitStackSize = kShadowStackSize;
417349cc55cSDimitry Andric shadow_stack = static_cast<uptr*>(
418349cc55cSDimitry Andric MmapNoReserveOrDie(kInitStackSize * sizeof(uptr), "shadow stack"));
419349cc55cSDimitry Andric SetShadowRegionHugePageMode(reinterpret_cast<uptr>(shadow_stack),
420349cc55cSDimitry Andric kInitStackSize * sizeof(uptr));
421349cc55cSDimitry Andric #else
422349cc55cSDimitry Andric // Go uses malloc-allocated shadow stack with dynamic size.
423349cc55cSDimitry Andric const int kInitStackSize = 8;
424349cc55cSDimitry Andric shadow_stack = static_cast<uptr*>(Alloc(kInitStackSize * sizeof(uptr)));
425349cc55cSDimitry Andric #endif
426349cc55cSDimitry Andric shadow_stack_pos = shadow_stack;
427349cc55cSDimitry Andric shadow_stack_end = shadow_stack + kInitStackSize;
42868d75effSDimitry Andric }
42968d75effSDimitry Andric
43068d75effSDimitry Andric #if !SANITIZER_GO
MemoryProfiler(u64 uptime)431349cc55cSDimitry Andric void MemoryProfiler(u64 uptime) {
432349cc55cSDimitry Andric if (ctx->memprof_fd == kInvalidFd)
433349cc55cSDimitry Andric return;
43468d75effSDimitry Andric InternalMmapVector<char> buf(4096);
435349cc55cSDimitry Andric WriteMemoryProfile(buf.data(), buf.size(), uptime);
436349cc55cSDimitry Andric WriteToFile(ctx->memprof_fd, buf.data(), internal_strlen(buf.data()));
437349cc55cSDimitry Andric }
438349cc55cSDimitry Andric
InitializeMemoryProfiler()4390eae32dcSDimitry Andric static bool InitializeMemoryProfiler() {
440349cc55cSDimitry Andric ctx->memprof_fd = kInvalidFd;
441349cc55cSDimitry Andric const char *fname = flags()->profile_memory;
442349cc55cSDimitry Andric if (!fname || !fname[0])
4430eae32dcSDimitry Andric return false;
444349cc55cSDimitry Andric if (internal_strcmp(fname, "stdout") == 0) {
445349cc55cSDimitry Andric ctx->memprof_fd = 1;
446349cc55cSDimitry Andric } else if (internal_strcmp(fname, "stderr") == 0) {
447349cc55cSDimitry Andric ctx->memprof_fd = 2;
448349cc55cSDimitry Andric } else {
449349cc55cSDimitry Andric InternalScopedString filename;
4505f757f3fSDimitry Andric filename.AppendF("%s.%d", fname, (int)internal_getpid());
451349cc55cSDimitry Andric ctx->memprof_fd = OpenFile(filename.data(), WrOnly);
452349cc55cSDimitry Andric if (ctx->memprof_fd == kInvalidFd) {
453349cc55cSDimitry Andric Printf("ThreadSanitizer: failed to open memory profile file '%s'\n",
454349cc55cSDimitry Andric filename.data());
4550eae32dcSDimitry Andric return false;
456349cc55cSDimitry Andric }
457349cc55cSDimitry Andric }
458349cc55cSDimitry Andric MemoryProfiler(0);
4590eae32dcSDimitry Andric return true;
46068d75effSDimitry Andric }
46168d75effSDimitry Andric
BackgroundThread(void * arg)4625ffd83dbSDimitry Andric static void *BackgroundThread(void *arg) {
46368d75effSDimitry Andric // This is a non-initialized non-user thread, nothing to see here.
46468d75effSDimitry Andric // We don't use ScopedIgnoreInterceptors, because we want ignores to be
46568d75effSDimitry Andric // enabled even when the thread function exits (e.g. during pthread thread
46668d75effSDimitry Andric // shutdown code).
467349cc55cSDimitry Andric cur_thread_init()->ignore_interceptors++;
46868d75effSDimitry Andric const u64 kMs2Ns = 1000 * 1000;
469349cc55cSDimitry Andric const u64 start = NanoTime();
47068d75effSDimitry Andric
4710eae32dcSDimitry Andric u64 last_flush = start;
47268d75effSDimitry Andric uptr last_rss = 0;
4730eae32dcSDimitry Andric while (!atomic_load_relaxed(&ctx->stop_background_thread)) {
47468d75effSDimitry Andric SleepForMillis(100);
47568d75effSDimitry Andric u64 now = NanoTime();
47668d75effSDimitry Andric
47768d75effSDimitry Andric // Flush memory if requested.
47868d75effSDimitry Andric if (flags()->flush_memory_ms > 0) {
47968d75effSDimitry Andric if (last_flush + flags()->flush_memory_ms * kMs2Ns < now) {
4800eae32dcSDimitry Andric VReport(1, "ThreadSanitizer: periodic memory flush\n");
48168d75effSDimitry Andric FlushShadowMemory();
4820eae32dcSDimitry Andric now = last_flush = NanoTime();
48368d75effSDimitry Andric }
48468d75effSDimitry Andric }
48568d75effSDimitry Andric if (flags()->memory_limit_mb > 0) {
48668d75effSDimitry Andric uptr rss = GetRSS();
48768d75effSDimitry Andric uptr limit = uptr(flags()->memory_limit_mb) << 20;
4880eae32dcSDimitry Andric VReport(1,
4890eae32dcSDimitry Andric "ThreadSanitizer: memory flush check"
49068d75effSDimitry Andric " RSS=%llu LAST=%llu LIMIT=%llu\n",
49168d75effSDimitry Andric (u64)rss >> 20, (u64)last_rss >> 20, (u64)limit >> 20);
49268d75effSDimitry Andric if (2 * rss > limit + last_rss) {
4930eae32dcSDimitry Andric VReport(1, "ThreadSanitizer: flushing memory due to RSS\n");
49468d75effSDimitry Andric FlushShadowMemory();
49568d75effSDimitry Andric rss = GetRSS();
4960eae32dcSDimitry Andric now = NanoTime();
4970eae32dcSDimitry Andric VReport(1, "ThreadSanitizer: memory flushed RSS=%llu\n",
4980eae32dcSDimitry Andric (u64)rss >> 20);
49968d75effSDimitry Andric }
50068d75effSDimitry Andric last_rss = rss;
50168d75effSDimitry Andric }
50268d75effSDimitry Andric
503349cc55cSDimitry Andric MemoryProfiler(now - start);
50468d75effSDimitry Andric
50568d75effSDimitry Andric // Flush symbolizer cache if requested.
50668d75effSDimitry Andric if (flags()->flush_symbolizer_ms > 0) {
50768d75effSDimitry Andric u64 last = atomic_load(&ctx->last_symbolize_time_ns,
50868d75effSDimitry Andric memory_order_relaxed);
50968d75effSDimitry Andric if (last != 0 && last + flags()->flush_symbolizer_ms * kMs2Ns < now) {
51068d75effSDimitry Andric Lock l(&ctx->report_mtx);
51168d75effSDimitry Andric ScopedErrorReportLock l2;
51268d75effSDimitry Andric SymbolizeFlush();
51368d75effSDimitry Andric atomic_store(&ctx->last_symbolize_time_ns, 0, memory_order_relaxed);
51468d75effSDimitry Andric }
51568d75effSDimitry Andric }
51668d75effSDimitry Andric }
5175ffd83dbSDimitry Andric return nullptr;
51868d75effSDimitry Andric }
51968d75effSDimitry Andric
StartBackgroundThread()52068d75effSDimitry Andric static void StartBackgroundThread() {
52168d75effSDimitry Andric ctx->background_thread = internal_start_thread(&BackgroundThread, 0);
52268d75effSDimitry Andric }
52368d75effSDimitry Andric
52468d75effSDimitry Andric #ifndef __mips__
StopBackgroundThread()52568d75effSDimitry Andric static void StopBackgroundThread() {
52668d75effSDimitry Andric atomic_store(&ctx->stop_background_thread, 1, memory_order_relaxed);
52768d75effSDimitry Andric internal_join_thread(ctx->background_thread);
52868d75effSDimitry Andric ctx->background_thread = 0;
52968d75effSDimitry Andric }
53068d75effSDimitry Andric #endif
53168d75effSDimitry Andric #endif
53268d75effSDimitry Andric
DontNeedShadowFor(uptr addr,uptr size)53368d75effSDimitry Andric void DontNeedShadowFor(uptr addr, uptr size) {
534349cc55cSDimitry Andric ReleaseMemoryPagesToOS(reinterpret_cast<uptr>(MemToShadow(addr)),
535349cc55cSDimitry Andric reinterpret_cast<uptr>(MemToShadow(addr + size)));
53668d75effSDimitry Andric }
53768d75effSDimitry Andric
53868d75effSDimitry Andric #if !SANITIZER_GO
5394824e7fdSDimitry Andric // We call UnmapShadow before the actual munmap, at that point we don't yet
5404824e7fdSDimitry Andric // know if the provided address/size are sane. We can't call UnmapShadow
5414824e7fdSDimitry Andric // after the actual munmap becuase at that point the memory range can
5424824e7fdSDimitry Andric // already be reused for something else, so we can't rely on the munmap
5434824e7fdSDimitry Andric // return value to understand is the values are sane.
5444824e7fdSDimitry Andric // While calling munmap with insane values (non-canonical address, negative
5454824e7fdSDimitry Andric // size, etc) is an error, the kernel won't crash. We must also try to not
5464824e7fdSDimitry Andric // crash as the failure mode is very confusing (paging fault inside of the
5474824e7fdSDimitry Andric // runtime on some derived shadow address).
IsValidMmapRange(uptr addr,uptr size)5484824e7fdSDimitry Andric static bool IsValidMmapRange(uptr addr, uptr size) {
5494824e7fdSDimitry Andric if (size == 0)
5504824e7fdSDimitry Andric return true;
5514824e7fdSDimitry Andric if (static_cast<sptr>(size) < 0)
5524824e7fdSDimitry Andric return false;
5534824e7fdSDimitry Andric if (!IsAppMem(addr) || !IsAppMem(addr + size - 1))
5544824e7fdSDimitry Andric return false;
5554824e7fdSDimitry Andric // Check that if the start of the region belongs to one of app ranges,
5564824e7fdSDimitry Andric // end of the region belongs to the same region.
5574824e7fdSDimitry Andric const uptr ranges[][2] = {
5584824e7fdSDimitry Andric {LoAppMemBeg(), LoAppMemEnd()},
5594824e7fdSDimitry Andric {MidAppMemBeg(), MidAppMemEnd()},
5604824e7fdSDimitry Andric {HiAppMemBeg(), HiAppMemEnd()},
5614824e7fdSDimitry Andric };
5624824e7fdSDimitry Andric for (auto range : ranges) {
5634824e7fdSDimitry Andric if (addr >= range[0] && addr < range[1])
5644824e7fdSDimitry Andric return addr + size <= range[1];
5654824e7fdSDimitry Andric }
5664824e7fdSDimitry Andric return false;
5674824e7fdSDimitry Andric }
5684824e7fdSDimitry Andric
UnmapShadow(ThreadState * thr,uptr addr,uptr size)56968d75effSDimitry Andric void UnmapShadow(ThreadState *thr, uptr addr, uptr size) {
5704824e7fdSDimitry Andric if (size == 0 || !IsValidMmapRange(addr, size))
5714824e7fdSDimitry Andric return;
57268d75effSDimitry Andric DontNeedShadowFor(addr, size);
57368d75effSDimitry Andric ScopedGlobalProcessor sgp;
5740eae32dcSDimitry Andric SlotLocker locker(thr, true);
5750eae32dcSDimitry Andric ctx->metamap.ResetRange(thr->proc(), addr, size, true);
57668d75effSDimitry Andric }
57768d75effSDimitry Andric #endif
57868d75effSDimitry Andric
MapShadow(uptr addr,uptr size)57968d75effSDimitry Andric void MapShadow(uptr addr, uptr size) {
580972a253aSDimitry Andric // Ensure thead registry lock held, so as to synchronize
581972a253aSDimitry Andric // with DoReset, which also access the mapped_shadow_* ctxt fields.
582972a253aSDimitry Andric ThreadRegistryLock lock0(&ctx->thread_registry);
583972a253aSDimitry Andric static bool data_mapped = false;
584972a253aSDimitry Andric
585972a253aSDimitry Andric #if !SANITIZER_GO
58668d75effSDimitry Andric // Global data is not 64K aligned, but there are no adjacent mappings,
58768d75effSDimitry Andric // so we can get away with unaligned mapping.
58868d75effSDimitry Andric // CHECK_EQ(addr, addr & ~((64 << 10) - 1)); // windows wants 64K alignment
58968d75effSDimitry Andric const uptr kPageSize = GetPageSizeCached();
59068d75effSDimitry Andric uptr shadow_begin = RoundDownTo((uptr)MemToShadow(addr), kPageSize);
59168d75effSDimitry Andric uptr shadow_end = RoundUpTo((uptr)MemToShadow(addr + size), kPageSize);
592972a253aSDimitry Andric if (!MmapFixedNoReserve(shadow_begin, shadow_end - shadow_begin, "shadow"))
593972a253aSDimitry Andric Die();
594972a253aSDimitry Andric #else
595972a253aSDimitry Andric uptr shadow_begin = RoundDownTo((uptr)MemToShadow(addr), (64 << 10));
596972a253aSDimitry Andric uptr shadow_end = RoundUpTo((uptr)MemToShadow(addr + size), (64 << 10));
597972a253aSDimitry Andric VPrintf(2, "MapShadow for (0x%zx-0x%zx), begin/end: (0x%zx-0x%zx)\n",
598972a253aSDimitry Andric addr, addr + size, shadow_begin, shadow_end);
599972a253aSDimitry Andric
600972a253aSDimitry Andric if (!data_mapped) {
601972a253aSDimitry Andric // First call maps data+bss.
602972a253aSDimitry Andric if (!MmapFixedSuperNoReserve(shadow_begin, shadow_end - shadow_begin, "shadow"))
603972a253aSDimitry Andric Die();
604972a253aSDimitry Andric } else {
605972a253aSDimitry Andric VPrintf(2, "ctx->mapped_shadow_{begin,end} = (0x%zx-0x%zx)\n",
606972a253aSDimitry Andric ctx->mapped_shadow_begin, ctx->mapped_shadow_end);
607972a253aSDimitry Andric // Second and subsequent calls map heap.
608972a253aSDimitry Andric if (shadow_end <= ctx->mapped_shadow_end)
609972a253aSDimitry Andric return;
610bdd1243dSDimitry Andric if (!ctx->mapped_shadow_begin || ctx->mapped_shadow_begin > shadow_begin)
611972a253aSDimitry Andric ctx->mapped_shadow_begin = shadow_begin;
612972a253aSDimitry Andric if (shadow_begin < ctx->mapped_shadow_end)
613972a253aSDimitry Andric shadow_begin = ctx->mapped_shadow_end;
614972a253aSDimitry Andric VPrintf(2, "MapShadow begin/end = (0x%zx-0x%zx)\n",
615972a253aSDimitry Andric shadow_begin, shadow_end);
616e8d8bef9SDimitry Andric if (!MmapFixedSuperNoReserve(shadow_begin, shadow_end - shadow_begin,
617e8d8bef9SDimitry Andric "shadow"))
61868d75effSDimitry Andric Die();
619972a253aSDimitry Andric ctx->mapped_shadow_end = shadow_end;
620972a253aSDimitry Andric }
621972a253aSDimitry Andric #endif
62268d75effSDimitry Andric
62368d75effSDimitry Andric // Meta shadow is 2:1, so tread carefully.
62468d75effSDimitry Andric static uptr mapped_meta_end = 0;
62568d75effSDimitry Andric uptr meta_begin = (uptr)MemToMeta(addr);
62668d75effSDimitry Andric uptr meta_end = (uptr)MemToMeta(addr + size);
62768d75effSDimitry Andric meta_begin = RoundDownTo(meta_begin, 64 << 10);
62868d75effSDimitry Andric meta_end = RoundUpTo(meta_end, 64 << 10);
62968d75effSDimitry Andric if (!data_mapped) {
63068d75effSDimitry Andric // First call maps data+bss.
63168d75effSDimitry Andric data_mapped = true;
632e8d8bef9SDimitry Andric if (!MmapFixedSuperNoReserve(meta_begin, meta_end - meta_begin,
633e8d8bef9SDimitry Andric "meta shadow"))
63468d75effSDimitry Andric Die();
63568d75effSDimitry Andric } else {
636349cc55cSDimitry Andric // Mapping continuous heap.
63768d75effSDimitry Andric // Windows wants 64K alignment.
63868d75effSDimitry Andric meta_begin = RoundDownTo(meta_begin, 64 << 10);
63968d75effSDimitry Andric meta_end = RoundUpTo(meta_end, 64 << 10);
640972a253aSDimitry Andric CHECK_GT(meta_end, mapped_meta_end);
64168d75effSDimitry Andric if (meta_begin < mapped_meta_end)
64268d75effSDimitry Andric meta_begin = mapped_meta_end;
643e8d8bef9SDimitry Andric if (!MmapFixedSuperNoReserve(meta_begin, meta_end - meta_begin,
644e8d8bef9SDimitry Andric "meta shadow"))
64568d75effSDimitry Andric Die();
64668d75effSDimitry Andric mapped_meta_end = meta_end;
64768d75effSDimitry Andric }
648349cc55cSDimitry Andric VPrintf(2, "mapped meta shadow for (0x%zx-0x%zx) at (0x%zx-0x%zx)\n", addr,
649349cc55cSDimitry Andric addr + size, meta_begin, meta_end);
65068d75effSDimitry Andric }
65168d75effSDimitry Andric
65268d75effSDimitry Andric #if !SANITIZER_GO
OnStackUnwind(const SignalContext & sig,const void *,BufferedStackTrace * stack)65368d75effSDimitry Andric static void OnStackUnwind(const SignalContext &sig, const void *,
65468d75effSDimitry Andric BufferedStackTrace *stack) {
65568d75effSDimitry Andric stack->Unwind(StackTrace::GetNextInstructionPc(sig.pc), sig.bp, sig.context,
65668d75effSDimitry Andric common_flags()->fast_unwind_on_fatal);
65768d75effSDimitry Andric }
65868d75effSDimitry Andric
TsanOnDeadlySignal(int signo,void * siginfo,void * context)65968d75effSDimitry Andric static void TsanOnDeadlySignal(int signo, void *siginfo, void *context) {
66068d75effSDimitry Andric HandleDeadlySignal(siginfo, context, GetTid(), &OnStackUnwind, nullptr);
66168d75effSDimitry Andric }
66268d75effSDimitry Andric #endif
66368d75effSDimitry Andric
CheckUnwind()664fe6060f1SDimitry Andric void CheckUnwind() {
665fe6060f1SDimitry Andric // There is high probability that interceptors will check-fail as well,
666fe6060f1SDimitry Andric // on the other hand there is no sense in processing interceptors
667fe6060f1SDimitry Andric // since we are going to die soon.
668fe6060f1SDimitry Andric ScopedIgnoreInterceptors ignore;
669fe6060f1SDimitry Andric #if !SANITIZER_GO
6700eae32dcSDimitry Andric ThreadState* thr = cur_thread();
6710eae32dcSDimitry Andric thr->nomalloc = false;
6720eae32dcSDimitry Andric thr->ignore_sync++;
6730eae32dcSDimitry Andric thr->ignore_reads_and_writes++;
6740eae32dcSDimitry Andric atomic_store_relaxed(&thr->in_signal_handler, 0);
675fe6060f1SDimitry Andric #endif
676fe6060f1SDimitry Andric PrintCurrentStackSlow(StackTrace::GetCurrentPc());
677fe6060f1SDimitry Andric }
678fe6060f1SDimitry Andric
679349cc55cSDimitry Andric bool is_initialized;
680349cc55cSDimitry Andric
Initialize(ThreadState * thr)68168d75effSDimitry Andric void Initialize(ThreadState *thr) {
68268d75effSDimitry Andric // Thread safe because done before all threads exist.
68368d75effSDimitry Andric if (is_initialized)
68468d75effSDimitry Andric return;
68568d75effSDimitry Andric is_initialized = true;
68668d75effSDimitry Andric // We are not ready to handle interceptors yet.
68768d75effSDimitry Andric ScopedIgnoreInterceptors ignore;
68868d75effSDimitry Andric SanitizerToolName = "ThreadSanitizer";
68968d75effSDimitry Andric // Install tool-specific callbacks in sanitizer_common.
690fe6060f1SDimitry Andric SetCheckUnwindCallback(CheckUnwind);
69168d75effSDimitry Andric
69268d75effSDimitry Andric ctx = new(ctx_placeholder) Context;
69368d75effSDimitry Andric const char *env_name = SANITIZER_GO ? "GORACE" : "TSAN_OPTIONS";
69468d75effSDimitry Andric const char *options = GetEnv(env_name);
69568d75effSDimitry Andric CacheBinaryName();
69668d75effSDimitry Andric CheckASLR();
69768d75effSDimitry Andric InitializeFlags(&ctx->flags, options, env_name);
69868d75effSDimitry Andric AvoidCVE_2016_2143();
69968d75effSDimitry Andric __sanitizer::InitializePlatformEarly();
70068d75effSDimitry Andric __tsan::InitializePlatformEarly();
70168d75effSDimitry Andric
70268d75effSDimitry Andric #if !SANITIZER_GO
70368d75effSDimitry Andric InitializeAllocator();
70468d75effSDimitry Andric ReplaceSystemMalloc();
70568d75effSDimitry Andric #endif
70668d75effSDimitry Andric if (common_flags()->detect_deadlocks)
70768d75effSDimitry Andric ctx->dd = DDetector::Create(flags());
70868d75effSDimitry Andric Processor *proc = ProcCreate();
70968d75effSDimitry Andric ProcWire(proc, thr);
71068d75effSDimitry Andric InitializeInterceptors();
71168d75effSDimitry Andric InitializePlatform();
71268d75effSDimitry Andric InitializeDynamicAnnotations();
71368d75effSDimitry Andric #if !SANITIZER_GO
71468d75effSDimitry Andric InitializeShadowMemory();
71568d75effSDimitry Andric InitializeAllocatorLate();
71668d75effSDimitry Andric InstallDeadlySignalHandlers(TsanOnDeadlySignal);
71768d75effSDimitry Andric #endif
71868d75effSDimitry Andric // Setup correct file descriptor for error reports.
71968d75effSDimitry Andric __sanitizer_set_report_path(common_flags()->log_path);
72068d75effSDimitry Andric InitializeSuppressions();
72168d75effSDimitry Andric #if !SANITIZER_GO
72268d75effSDimitry Andric InitializeLibIgnore();
72368d75effSDimitry Andric Symbolizer::GetOrInit()->AddHooks(EnterSymbolizer, ExitSymbolizer);
72468d75effSDimitry Andric #endif
72568d75effSDimitry Andric
7260eae32dcSDimitry Andric VPrintf(1, "***** Running under ThreadSanitizer v3 (pid %d) *****\n",
72768d75effSDimitry Andric (int)internal_getpid());
72868d75effSDimitry Andric
72968d75effSDimitry Andric // Initialize thread 0.
7300eae32dcSDimitry Andric Tid tid = ThreadCreate(nullptr, 0, 0, true);
731349cc55cSDimitry Andric CHECK_EQ(tid, kMainTid);
73268d75effSDimitry Andric ThreadStart(thr, tid, GetTid(), ThreadType::Regular);
73368d75effSDimitry Andric #if TSAN_CONTAINS_UBSAN
73468d75effSDimitry Andric __ubsan::InitAsPlugin();
73568d75effSDimitry Andric #endif
73668d75effSDimitry Andric
73768d75effSDimitry Andric #if !SANITIZER_GO
73868d75effSDimitry Andric Symbolizer::LateInitialize();
7390eae32dcSDimitry Andric if (InitializeMemoryProfiler() || flags()->force_background_thread)
7400eae32dcSDimitry Andric MaybeSpawnBackgroundThread();
74168d75effSDimitry Andric #endif
7420eae32dcSDimitry Andric ctx->initialized = true;
74368d75effSDimitry Andric
74468d75effSDimitry Andric if (flags()->stop_on_start) {
74568d75effSDimitry Andric Printf("ThreadSanitizer is suspended at startup (pid %d)."
74668d75effSDimitry Andric " Call __tsan_resume().\n",
74768d75effSDimitry Andric (int)internal_getpid());
74868d75effSDimitry Andric while (__tsan_resumed == 0) {}
74968d75effSDimitry Andric }
75068d75effSDimitry Andric
75168d75effSDimitry Andric OnInitialize();
75268d75effSDimitry Andric }
75368d75effSDimitry Andric
MaybeSpawnBackgroundThread()75468d75effSDimitry Andric void MaybeSpawnBackgroundThread() {
75568d75effSDimitry Andric // On MIPS, TSan initialization is run before
75668d75effSDimitry Andric // __pthread_initialize_minimal_internal() is finished, so we can not spawn
75768d75effSDimitry Andric // new threads.
75868d75effSDimitry Andric #if !SANITIZER_GO && !defined(__mips__)
75968d75effSDimitry Andric static atomic_uint32_t bg_thread = {};
76068d75effSDimitry Andric if (atomic_load(&bg_thread, memory_order_relaxed) == 0 &&
76168d75effSDimitry Andric atomic_exchange(&bg_thread, 1, memory_order_relaxed) == 0) {
76268d75effSDimitry Andric StartBackgroundThread();
76368d75effSDimitry Andric SetSandboxingCallback(StopBackgroundThread);
76468d75effSDimitry Andric }
76568d75effSDimitry Andric #endif
76668d75effSDimitry Andric }
76768d75effSDimitry Andric
Finalize(ThreadState * thr)76868d75effSDimitry Andric int Finalize(ThreadState *thr) {
76968d75effSDimitry Andric bool failed = false;
77068d75effSDimitry Andric
77181ad6265SDimitry Andric #if !SANITIZER_GO
772e8d8bef9SDimitry Andric if (common_flags()->print_module_map == 1)
773e8d8bef9SDimitry Andric DumpProcessMap();
77481ad6265SDimitry Andric #endif
77568d75effSDimitry Andric
77668d75effSDimitry Andric if (flags()->atexit_sleep_ms > 0 && ThreadCount(thr) > 1)
7770eae32dcSDimitry Andric internal_usleep(u64(flags()->atexit_sleep_ms) * 1000);
77868d75effSDimitry Andric
7790eae32dcSDimitry Andric {
78068d75effSDimitry Andric // Wait for pending reports.
7810eae32dcSDimitry Andric ScopedErrorReportLock lock;
7820eae32dcSDimitry Andric }
78368d75effSDimitry Andric
78468d75effSDimitry Andric #if !SANITIZER_GO
78568d75effSDimitry Andric if (Verbosity()) AllocatorPrintStats();
78668d75effSDimitry Andric #endif
78768d75effSDimitry Andric
78868d75effSDimitry Andric ThreadFinalize(thr);
78968d75effSDimitry Andric
79068d75effSDimitry Andric if (ctx->nreported) {
79168d75effSDimitry Andric failed = true;
79268d75effSDimitry Andric #if !SANITIZER_GO
79368d75effSDimitry Andric Printf("ThreadSanitizer: reported %d warnings\n", ctx->nreported);
79468d75effSDimitry Andric #else
79568d75effSDimitry Andric Printf("Found %d data race(s)\n", ctx->nreported);
79668d75effSDimitry Andric #endif
79768d75effSDimitry Andric }
79868d75effSDimitry Andric
79968d75effSDimitry Andric if (common_flags()->print_suppressions)
80068d75effSDimitry Andric PrintMatchedSuppressions();
80168d75effSDimitry Andric
80268d75effSDimitry Andric failed = OnFinalize(failed);
80368d75effSDimitry Andric
80468d75effSDimitry Andric return failed ? common_flags()->exitcode : 0;
80568d75effSDimitry Andric }
80668d75effSDimitry Andric
80768d75effSDimitry Andric #if !SANITIZER_GO
ForkBefore(ThreadState * thr,uptr pc)80804eeddc0SDimitry Andric void ForkBefore(ThreadState* thr, uptr pc) SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
8090eae32dcSDimitry Andric GlobalProcessorLock();
8100eae32dcSDimitry Andric // Detaching from the slot makes OnUserFree skip writing to the shadow.
8110eae32dcSDimitry Andric // The slot will be locked so any attempts to use it will deadlock anyway.
8120eae32dcSDimitry Andric SlotDetach(thr);
8130eae32dcSDimitry Andric for (auto& slot : ctx->slots) slot.mtx.Lock();
814349cc55cSDimitry Andric ctx->thread_registry.Lock();
8150eae32dcSDimitry Andric ctx->slot_mtx.Lock();
816fe6060f1SDimitry Andric ScopedErrorReportLock::Lock();
817*0fca6ea1SDimitry Andric AllocatorLockBeforeFork();
818fe6060f1SDimitry Andric // Suppress all reports in the pthread_atfork callbacks.
819fe6060f1SDimitry Andric // Reports will deadlock on the report_mtx.
820fe6060f1SDimitry Andric // We could ignore sync operations as well,
8215ffd83dbSDimitry Andric // but so far it's unclear if it will do more good or harm.
8225ffd83dbSDimitry Andric // Unnecessarily ignoring things can lead to false positives later.
823fe6060f1SDimitry Andric thr->suppress_reports++;
824fe6060f1SDimitry Andric // On OS X, REAL(fork) can call intercepted functions (OSSpinLockLock), and
825fe6060f1SDimitry Andric // we'll assert in CheckNoLocks() unless we ignore interceptors.
8264824e7fdSDimitry Andric // On OS X libSystem_atfork_prepare/parent/child callbacks are called
8274824e7fdSDimitry Andric // after/before our callbacks and they call free.
828fe6060f1SDimitry Andric thr->ignore_interceptors++;
8294824e7fdSDimitry Andric // Disables memory write in OnUserAlloc/Free.
8304824e7fdSDimitry Andric thr->ignore_reads_and_writes++;
8314824e7fdSDimitry Andric
832*0fca6ea1SDimitry Andric # if SANITIZER_APPLE
8334824e7fdSDimitry Andric __tsan_test_only_on_fork();
834*0fca6ea1SDimitry Andric # endif
83568d75effSDimitry Andric }
83668d75effSDimitry Andric
ForkAfter(ThreadState * thr,bool child)837*0fca6ea1SDimitry Andric static void ForkAfter(ThreadState* thr,
838*0fca6ea1SDimitry Andric bool child) SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
839fe6060f1SDimitry Andric thr->suppress_reports--; // Enabled in ForkBefore.
840fe6060f1SDimitry Andric thr->ignore_interceptors--;
8414824e7fdSDimitry Andric thr->ignore_reads_and_writes--;
842*0fca6ea1SDimitry Andric AllocatorUnlockAfterFork(child);
843fe6060f1SDimitry Andric ScopedErrorReportLock::Unlock();
8440eae32dcSDimitry Andric ctx->slot_mtx.Unlock();
845349cc55cSDimitry Andric ctx->thread_registry.Unlock();
8460eae32dcSDimitry Andric for (auto& slot : ctx->slots) slot.mtx.Unlock();
8470eae32dcSDimitry Andric SlotAttachAndLock(thr);
8480eae32dcSDimitry Andric SlotUnlock(thr);
8490eae32dcSDimitry Andric GlobalProcessorUnlock();
85068d75effSDimitry Andric }
85168d75effSDimitry Andric
ForkParentAfter(ThreadState * thr,uptr pc)852*0fca6ea1SDimitry Andric void ForkParentAfter(ThreadState* thr, uptr pc) { ForkAfter(thr, false); }
85368d75effSDimitry Andric
ForkChildAfter(ThreadState * thr,uptr pc,bool start_thread)8540eae32dcSDimitry Andric void ForkChildAfter(ThreadState* thr, uptr pc, bool start_thread) {
855*0fca6ea1SDimitry Andric ForkAfter(thr, true);
8560eae32dcSDimitry Andric u32 nthread = ctx->thread_registry.OnFork(thr->tid);
8570eae32dcSDimitry Andric VPrintf(1,
8580eae32dcSDimitry Andric "ThreadSanitizer: forked new process with pid %d,"
8590eae32dcSDimitry Andric " parent had %d threads\n",
8600eae32dcSDimitry Andric (int)internal_getpid(), (int)nthread);
86168d75effSDimitry Andric if (nthread == 1) {
862349cc55cSDimitry Andric if (start_thread)
86368d75effSDimitry Andric StartBackgroundThread();
86468d75effSDimitry Andric } else {
86568d75effSDimitry Andric // We've just forked a multi-threaded process. We cannot reasonably function
86668d75effSDimitry Andric // after that (some mutexes may be locked before fork). So just enable
86768d75effSDimitry Andric // ignores for everything in the hope that we will exec soon.
86868d75effSDimitry Andric ctx->after_multithreaded_fork = true;
86968d75effSDimitry Andric thr->ignore_interceptors++;
8700eae32dcSDimitry Andric thr->suppress_reports++;
87168d75effSDimitry Andric ThreadIgnoreBegin(thr, pc);
87268d75effSDimitry Andric ThreadIgnoreSyncBegin(thr, pc);
87368d75effSDimitry Andric }
87468d75effSDimitry Andric }
87568d75effSDimitry Andric #endif
87668d75effSDimitry Andric
87768d75effSDimitry Andric #if SANITIZER_GO
87868d75effSDimitry Andric NOINLINE
GrowShadowStack(ThreadState * thr)87968d75effSDimitry Andric void GrowShadowStack(ThreadState *thr) {
88068d75effSDimitry Andric const int sz = thr->shadow_stack_end - thr->shadow_stack;
88168d75effSDimitry Andric const int newsz = 2 * sz;
882349cc55cSDimitry Andric auto *newstack = (uptr *)Alloc(newsz * sizeof(uptr));
88368d75effSDimitry Andric internal_memcpy(newstack, thr->shadow_stack, sz * sizeof(uptr));
884349cc55cSDimitry Andric Free(thr->shadow_stack);
88568d75effSDimitry Andric thr->shadow_stack = newstack;
88668d75effSDimitry Andric thr->shadow_stack_pos = newstack + sz;
88768d75effSDimitry Andric thr->shadow_stack_end = newstack + newsz;
88868d75effSDimitry Andric }
88968d75effSDimitry Andric #endif
89068d75effSDimitry Andric
CurrentStackId(ThreadState * thr,uptr pc)891349cc55cSDimitry Andric StackID CurrentStackId(ThreadState *thr, uptr pc) {
8920eae32dcSDimitry Andric #if !SANITIZER_GO
89368d75effSDimitry Andric if (!thr->is_inited) // May happen during bootstrap.
894349cc55cSDimitry Andric return kInvalidStackID;
8950eae32dcSDimitry Andric #endif
89668d75effSDimitry Andric if (pc != 0) {
89768d75effSDimitry Andric #if !SANITIZER_GO
89868d75effSDimitry Andric DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end);
89968d75effSDimitry Andric #else
90068d75effSDimitry Andric if (thr->shadow_stack_pos == thr->shadow_stack_end)
90168d75effSDimitry Andric GrowShadowStack(thr);
90268d75effSDimitry Andric #endif
90368d75effSDimitry Andric thr->shadow_stack_pos[0] = pc;
90468d75effSDimitry Andric thr->shadow_stack_pos++;
90568d75effSDimitry Andric }
906349cc55cSDimitry Andric StackID id = StackDepotPut(
90768d75effSDimitry Andric StackTrace(thr->shadow_stack, thr->shadow_stack_pos - thr->shadow_stack));
90868d75effSDimitry Andric if (pc != 0)
90968d75effSDimitry Andric thr->shadow_stack_pos--;
91068d75effSDimitry Andric return id;
91168d75effSDimitry Andric }
91268d75effSDimitry Andric
TraceSkipGap(ThreadState * thr)9130eae32dcSDimitry Andric static bool TraceSkipGap(ThreadState* thr) {
914349cc55cSDimitry Andric Trace *trace = &thr->tctx->trace;
915349cc55cSDimitry Andric Event *pos = reinterpret_cast<Event *>(atomic_load_relaxed(&thr->trace_pos));
916349cc55cSDimitry Andric DCHECK_EQ(reinterpret_cast<uptr>(pos + 1) & TracePart::kAlignment, 0);
917349cc55cSDimitry Andric auto *part = trace->parts.Back();
9180eae32dcSDimitry Andric DPrintf("#%d: TraceSwitchPart enter trace=%p parts=%p-%p pos=%p\n", thr->tid,
9190eae32dcSDimitry Andric trace, trace->parts.Front(), part, pos);
9200eae32dcSDimitry Andric if (!part)
9210eae32dcSDimitry Andric return false;
922349cc55cSDimitry Andric // We can get here when we still have space in the current trace part.
923349cc55cSDimitry Andric // The fast-path check in TraceAcquire has false positives in the middle of
924349cc55cSDimitry Andric // the part. Check if we are indeed at the end of the current part or not,
925349cc55cSDimitry Andric // and fill any gaps with NopEvent's.
926349cc55cSDimitry Andric Event* end = &part->events[TracePart::kSize];
927349cc55cSDimitry Andric DCHECK_GE(pos, &part->events[0]);
928349cc55cSDimitry Andric DCHECK_LE(pos, end);
929349cc55cSDimitry Andric if (pos + 1 < end) {
930349cc55cSDimitry Andric if ((reinterpret_cast<uptr>(pos) & TracePart::kAlignment) ==
931349cc55cSDimitry Andric TracePart::kAlignment)
932349cc55cSDimitry Andric *pos++ = NopEvent;
933349cc55cSDimitry Andric *pos++ = NopEvent;
934349cc55cSDimitry Andric DCHECK_LE(pos + 2, end);
935349cc55cSDimitry Andric atomic_store_relaxed(&thr->trace_pos, reinterpret_cast<uptr>(pos));
9360eae32dcSDimitry Andric return true;
937349cc55cSDimitry Andric }
938349cc55cSDimitry Andric // We are indeed at the end.
939349cc55cSDimitry Andric for (; pos < end; pos++) *pos = NopEvent;
9400eae32dcSDimitry Andric return false;
941349cc55cSDimitry Andric }
9420eae32dcSDimitry Andric
9430eae32dcSDimitry Andric NOINLINE
TraceSwitchPart(ThreadState * thr)9440eae32dcSDimitry Andric void TraceSwitchPart(ThreadState* thr) {
9450eae32dcSDimitry Andric if (TraceSkipGap(thr))
9460eae32dcSDimitry Andric return;
947349cc55cSDimitry Andric #if !SANITIZER_GO
948349cc55cSDimitry Andric if (ctx->after_multithreaded_fork) {
949349cc55cSDimitry Andric // We just need to survive till exec.
9500eae32dcSDimitry Andric TracePart* part = thr->tctx->trace.parts.Back();
9510eae32dcSDimitry Andric if (part) {
952349cc55cSDimitry Andric atomic_store_relaxed(&thr->trace_pos,
953349cc55cSDimitry Andric reinterpret_cast<uptr>(&part->events[0]));
954349cc55cSDimitry Andric return;
955349cc55cSDimitry Andric }
9560eae32dcSDimitry Andric }
957349cc55cSDimitry Andric #endif
9580eae32dcSDimitry Andric TraceSwitchPartImpl(thr);
9590eae32dcSDimitry Andric }
9600eae32dcSDimitry Andric
TraceSwitchPartImpl(ThreadState * thr)9610eae32dcSDimitry Andric void TraceSwitchPartImpl(ThreadState* thr) {
9620eae32dcSDimitry Andric SlotLocker locker(thr, true);
9630eae32dcSDimitry Andric Trace* trace = &thr->tctx->trace;
9640eae32dcSDimitry Andric TracePart* part = TracePartAlloc(thr);
965349cc55cSDimitry Andric part->trace = trace;
966349cc55cSDimitry Andric thr->trace_prev_pc = 0;
9670eae32dcSDimitry Andric TracePart* recycle = nullptr;
9680eae32dcSDimitry Andric // Keep roughly half of parts local to the thread
9690eae32dcSDimitry Andric // (not queued into the recycle queue).
9700eae32dcSDimitry Andric uptr local_parts = (Trace::kMinParts + flags()->history_size + 1) / 2;
971349cc55cSDimitry Andric {
972349cc55cSDimitry Andric Lock lock(&trace->mtx);
9730eae32dcSDimitry Andric if (trace->parts.Empty())
9740eae32dcSDimitry Andric trace->local_head = part;
9750eae32dcSDimitry Andric if (trace->parts.Size() >= local_parts) {
9760eae32dcSDimitry Andric recycle = trace->local_head;
9770eae32dcSDimitry Andric trace->local_head = trace->parts.Next(recycle);
9780eae32dcSDimitry Andric }
979349cc55cSDimitry Andric trace->parts.PushBack(part);
980349cc55cSDimitry Andric atomic_store_relaxed(&thr->trace_pos,
981349cc55cSDimitry Andric reinterpret_cast<uptr>(&part->events[0]));
982349cc55cSDimitry Andric }
983349cc55cSDimitry Andric // Make this part self-sufficient by restoring the current stack
984349cc55cSDimitry Andric // and mutex set in the beginning of the trace.
985349cc55cSDimitry Andric TraceTime(thr);
9860eae32dcSDimitry Andric {
9870eae32dcSDimitry Andric // Pathologically large stacks may not fit into the part.
9880eae32dcSDimitry Andric // In these cases we log only fixed number of top frames.
9890eae32dcSDimitry Andric const uptr kMaxFrames = 1000;
99081ad6265SDimitry Andric // Check that kMaxFrames won't consume the whole part.
9910eae32dcSDimitry Andric static_assert(kMaxFrames < TracePart::kSize / 2, "kMaxFrames is too big");
9920eae32dcSDimitry Andric uptr* pos = Max(&thr->shadow_stack[0], thr->shadow_stack_pos - kMaxFrames);
9930eae32dcSDimitry Andric for (; pos < thr->shadow_stack_pos; pos++) {
9940eae32dcSDimitry Andric if (TryTraceFunc(thr, *pos))
9950eae32dcSDimitry Andric continue;
9960eae32dcSDimitry Andric CHECK(TraceSkipGap(thr));
997349cc55cSDimitry Andric CHECK(TryTraceFunc(thr, *pos));
9980eae32dcSDimitry Andric }
9990eae32dcSDimitry Andric }
1000349cc55cSDimitry Andric for (uptr i = 0; i < thr->mset.Size(); i++) {
1001349cc55cSDimitry Andric MutexSet::Desc d = thr->mset.Get(i);
10020eae32dcSDimitry Andric for (uptr i = 0; i < d.count; i++)
1003349cc55cSDimitry Andric TraceMutexLock(thr, d.write ? EventType::kLock : EventType::kRLock, 0,
1004349cc55cSDimitry Andric d.addr, d.stack_id);
1005349cc55cSDimitry Andric }
1006fcaf7f86SDimitry Andric // Callers of TraceSwitchPart expect that TraceAcquire will always succeed
1007fcaf7f86SDimitry Andric // after the call. It's possible that TryTraceFunc/TraceMutexLock above
1008fcaf7f86SDimitry Andric // filled the trace part exactly up to the TracePart::kAlignment gap
1009fcaf7f86SDimitry Andric // and the next TraceAcquire won't succeed. Skip the gap to avoid that.
1010fcaf7f86SDimitry Andric EventFunc *ev;
1011fcaf7f86SDimitry Andric if (!TraceAcquire(thr, &ev)) {
1012fcaf7f86SDimitry Andric CHECK(TraceSkipGap(thr));
1013fcaf7f86SDimitry Andric CHECK(TraceAcquire(thr, &ev));
1014fcaf7f86SDimitry Andric }
10150eae32dcSDimitry Andric {
10160eae32dcSDimitry Andric Lock lock(&ctx->slot_mtx);
10170eae32dcSDimitry Andric // There is a small chance that the slot may be not queued at this point.
10180eae32dcSDimitry Andric // This can happen if the slot has kEpochLast epoch and another thread
10190eae32dcSDimitry Andric // in FindSlotAndLock discovered that it's exhausted and removed it from
10200eae32dcSDimitry Andric // the slot queue. kEpochLast can happen in 2 cases: (1) if TraceSwitchPart
10210eae32dcSDimitry Andric // was called with the slot locked and epoch already at kEpochLast,
10220eae32dcSDimitry Andric // or (2) if we've acquired a new slot in SlotLock in the beginning
10230eae32dcSDimitry Andric // of the function and the slot was at kEpochLast - 1, so after increment
10240eae32dcSDimitry Andric // in SlotAttachAndLock it become kEpochLast.
10250eae32dcSDimitry Andric if (ctx->slot_queue.Queued(thr->slot)) {
10260eae32dcSDimitry Andric ctx->slot_queue.Remove(thr->slot);
10270eae32dcSDimitry Andric ctx->slot_queue.PushBack(thr->slot);
1028349cc55cSDimitry Andric }
10290eae32dcSDimitry Andric if (recycle)
10300eae32dcSDimitry Andric ctx->trace_part_recycle.PushBack(recycle);
103168d75effSDimitry Andric }
10320eae32dcSDimitry Andric DPrintf("#%d: TraceSwitchPart exit parts=%p-%p pos=0x%zx\n", thr->tid,
10330eae32dcSDimitry Andric trace->parts.Front(), trace->parts.Back(),
10340eae32dcSDimitry Andric atomic_load_relaxed(&thr->trace_pos));
103568d75effSDimitry Andric }
103668d75effSDimitry Andric
ThreadIgnoreBegin(ThreadState * thr,uptr pc)1037349cc55cSDimitry Andric void ThreadIgnoreBegin(ThreadState* thr, uptr pc) {
103868d75effSDimitry Andric DPrintf("#%d: ThreadIgnoreBegin\n", thr->tid);
103968d75effSDimitry Andric thr->ignore_reads_and_writes++;
104068d75effSDimitry Andric CHECK_GT(thr->ignore_reads_and_writes, 0);
104168d75effSDimitry Andric thr->fast_state.SetIgnoreBit();
104268d75effSDimitry Andric #if !SANITIZER_GO
1043349cc55cSDimitry Andric if (pc && !ctx->after_multithreaded_fork)
104468d75effSDimitry Andric thr->mop_ignore_set.Add(CurrentStackId(thr, pc));
104568d75effSDimitry Andric #endif
104668d75effSDimitry Andric }
104768d75effSDimitry Andric
ThreadIgnoreEnd(ThreadState * thr)1048349cc55cSDimitry Andric void ThreadIgnoreEnd(ThreadState *thr) {
104968d75effSDimitry Andric DPrintf("#%d: ThreadIgnoreEnd\n", thr->tid);
105068d75effSDimitry Andric CHECK_GT(thr->ignore_reads_and_writes, 0);
105168d75effSDimitry Andric thr->ignore_reads_and_writes--;
105268d75effSDimitry Andric if (thr->ignore_reads_and_writes == 0) {
105368d75effSDimitry Andric thr->fast_state.ClearIgnoreBit();
105468d75effSDimitry Andric #if !SANITIZER_GO
105568d75effSDimitry Andric thr->mop_ignore_set.Reset();
105668d75effSDimitry Andric #endif
105768d75effSDimitry Andric }
105868d75effSDimitry Andric }
105968d75effSDimitry Andric
106068d75effSDimitry Andric #if !SANITIZER_GO
106168d75effSDimitry Andric extern "C" SANITIZER_INTERFACE_ATTRIBUTE
__tsan_testonly_shadow_stack_current_size()106268d75effSDimitry Andric uptr __tsan_testonly_shadow_stack_current_size() {
106368d75effSDimitry Andric ThreadState *thr = cur_thread();
106468d75effSDimitry Andric return thr->shadow_stack_pos - thr->shadow_stack;
106568d75effSDimitry Andric }
106668d75effSDimitry Andric #endif
106768d75effSDimitry Andric
ThreadIgnoreSyncBegin(ThreadState * thr,uptr pc)1068349cc55cSDimitry Andric void ThreadIgnoreSyncBegin(ThreadState *thr, uptr pc) {
106968d75effSDimitry Andric DPrintf("#%d: ThreadIgnoreSyncBegin\n", thr->tid);
107068d75effSDimitry Andric thr->ignore_sync++;
107168d75effSDimitry Andric CHECK_GT(thr->ignore_sync, 0);
107268d75effSDimitry Andric #if !SANITIZER_GO
1073349cc55cSDimitry Andric if (pc && !ctx->after_multithreaded_fork)
107468d75effSDimitry Andric thr->sync_ignore_set.Add(CurrentStackId(thr, pc));
107568d75effSDimitry Andric #endif
107668d75effSDimitry Andric }
107768d75effSDimitry Andric
ThreadIgnoreSyncEnd(ThreadState * thr)1078349cc55cSDimitry Andric void ThreadIgnoreSyncEnd(ThreadState *thr) {
107968d75effSDimitry Andric DPrintf("#%d: ThreadIgnoreSyncEnd\n", thr->tid);
108068d75effSDimitry Andric CHECK_GT(thr->ignore_sync, 0);
108168d75effSDimitry Andric thr->ignore_sync--;
108268d75effSDimitry Andric #if !SANITIZER_GO
108368d75effSDimitry Andric if (thr->ignore_sync == 0)
108468d75effSDimitry Andric thr->sync_ignore_set.Reset();
108568d75effSDimitry Andric #endif
108668d75effSDimitry Andric }
108768d75effSDimitry Andric
operator ==(const MD5Hash & other) const108868d75effSDimitry Andric bool MD5Hash::operator==(const MD5Hash &other) const {
108968d75effSDimitry Andric return hash[0] == other.hash[0] && hash[1] == other.hash[1];
109068d75effSDimitry Andric }
109168d75effSDimitry Andric
109268d75effSDimitry Andric #if SANITIZER_DEBUG
build_consistency_debug()109368d75effSDimitry Andric void build_consistency_debug() {}
109468d75effSDimitry Andric #else
build_consistency_release()109568d75effSDimitry Andric void build_consistency_release() {}
109668d75effSDimitry Andric #endif
109768d75effSDimitry Andric } // namespace __tsan
109868d75effSDimitry Andric
1099fe6060f1SDimitry Andric #if SANITIZER_CHECK_DEADLOCKS
1100fe6060f1SDimitry Andric namespace __sanitizer {
1101fe6060f1SDimitry Andric using namespace __tsan;
1102fe6060f1SDimitry Andric MutexMeta mutex_meta[] = {
1103fe6060f1SDimitry Andric {MutexInvalid, "Invalid", {}},
11040eae32dcSDimitry Andric {MutexThreadRegistry,
11050eae32dcSDimitry Andric "ThreadRegistry",
11060eae32dcSDimitry Andric {MutexTypeSlots, MutexTypeTrace, MutexTypeReport}},
11070eae32dcSDimitry Andric {MutexTypeReport, "Report", {MutexTypeTrace}},
11080eae32dcSDimitry Andric {MutexTypeSyncVar, "SyncVar", {MutexTypeReport, MutexTypeTrace}},
1109fe6060f1SDimitry Andric {MutexTypeAnnotations, "Annotations", {}},
11100eae32dcSDimitry Andric {MutexTypeAtExit, "AtExit", {}},
1111fe6060f1SDimitry Andric {MutexTypeFired, "Fired", {MutexLeaf}},
1112fe6060f1SDimitry Andric {MutexTypeRacy, "Racy", {MutexLeaf}},
11130eae32dcSDimitry Andric {MutexTypeGlobalProc, "GlobalProc", {MutexTypeSlot, MutexTypeSlots}},
11144824e7fdSDimitry Andric {MutexTypeInternalAlloc, "InternalAlloc", {MutexLeaf}},
11150eae32dcSDimitry Andric {MutexTypeTrace, "Trace", {}},
11160eae32dcSDimitry Andric {MutexTypeSlot,
11170eae32dcSDimitry Andric "Slot",
11180eae32dcSDimitry Andric {MutexMulti, MutexTypeTrace, MutexTypeSyncVar, MutexThreadRegistry,
11190eae32dcSDimitry Andric MutexTypeSlots}},
11200eae32dcSDimitry Andric {MutexTypeSlots, "Slots", {MutexTypeTrace, MutexTypeReport}},
1121fe6060f1SDimitry Andric {},
1122fe6060f1SDimitry Andric };
1123fe6060f1SDimitry Andric
PrintMutexPC(uptr pc)1124fe6060f1SDimitry Andric void PrintMutexPC(uptr pc) { StackTrace(&pc, 1).Print(); }
11250eae32dcSDimitry Andric
1126fe6060f1SDimitry Andric } // namespace __sanitizer
1127fe6060f1SDimitry Andric #endif
1128