xref: /freebsd/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp (revision 700637cbb5e582861067a11aaca4d053546871d2)
1 //===-- tsan_rtl.cpp ------------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of ThreadSanitizer (TSan), a race detector.
10 //
11 // Main file (entry points) for the TSan run-time.
12 //===----------------------------------------------------------------------===//
13 
14 #include "tsan_rtl.h"
15 
16 #include "sanitizer_common/sanitizer_atomic.h"
17 #include "sanitizer_common/sanitizer_common.h"
18 #include "sanitizer_common/sanitizer_file.h"
19 #include "sanitizer_common/sanitizer_interface_internal.h"
20 #include "sanitizer_common/sanitizer_libc.h"
21 #include "sanitizer_common/sanitizer_placement_new.h"
22 #include "sanitizer_common/sanitizer_stackdepot.h"
23 #include "sanitizer_common/sanitizer_symbolizer.h"
24 #include "tsan_defs.h"
25 #include "tsan_interface.h"
26 #include "tsan_mman.h"
27 #include "tsan_platform.h"
28 #include "tsan_suppressions.h"
29 #include "tsan_symbolize.h"
30 #include "ubsan/ubsan_init.h"
31 
32 volatile int __tsan_resumed = 0;
33 
__tsan_resume()34 extern "C" void __tsan_resume() {
35   __tsan_resumed = 1;
36 }
37 
38 #if SANITIZER_APPLE
39 SANITIZER_WEAK_DEFAULT_IMPL
__tsan_test_only_on_fork()40 void __tsan_test_only_on_fork() {}
41 #endif
42 
43 namespace __tsan {
44 
45 #if !SANITIZER_GO
46 void (*on_initialize)(void);
47 int (*on_finalize)(int);
48 #endif
49 
50 #if !SANITIZER_GO && !SANITIZER_APPLE
51 alignas(SANITIZER_CACHE_LINE_SIZE) THREADLOCAL __attribute__((tls_model(
52     "initial-exec"))) char cur_thread_placeholder[sizeof(ThreadState)];
53 #endif
54 alignas(SANITIZER_CACHE_LINE_SIZE) static char ctx_placeholder[sizeof(Context)];
55 Context *ctx;
56 
57 // Can be overriden by a front-end.
58 #ifdef TSAN_EXTERNAL_HOOKS
59 bool OnFinalize(bool failed);
60 void OnInitialize();
61 #else
62 SANITIZER_WEAK_CXX_DEFAULT_IMPL
OnFinalize(bool failed)63 bool OnFinalize(bool failed) {
64 #  if !SANITIZER_GO
65   if (on_finalize)
66     return on_finalize(failed);
67 #  endif
68   return failed;
69 }
70 
71 SANITIZER_WEAK_CXX_DEFAULT_IMPL
OnInitialize()72 void OnInitialize() {
73 #  if !SANITIZER_GO
74   if (on_initialize)
75     on_initialize();
76 #  endif
77 }
78 #endif
79 
TracePartAlloc(ThreadState * thr)80 static TracePart* TracePartAlloc(ThreadState* thr) {
81   TracePart* part = nullptr;
82   {
83     Lock lock(&ctx->slot_mtx);
84     uptr max_parts = Trace::kMinParts + flags()->history_size;
85     Trace* trace = &thr->tctx->trace;
86     if (trace->parts_allocated == max_parts ||
87         ctx->trace_part_finished_excess) {
88       part = ctx->trace_part_recycle.PopFront();
89       DPrintf("#%d: TracePartAlloc: part=%p\n", thr->tid, part);
90       if (part && part->trace) {
91         Trace* trace1 = part->trace;
92         Lock trace_lock(&trace1->mtx);
93         part->trace = nullptr;
94         TracePart* part1 = trace1->parts.PopFront();
95         CHECK_EQ(part, part1);
96         if (trace1->parts_allocated > trace1->parts.Size()) {
97           ctx->trace_part_finished_excess +=
98               trace1->parts_allocated - trace1->parts.Size();
99           trace1->parts_allocated = trace1->parts.Size();
100         }
101       }
102     }
103     if (trace->parts_allocated < max_parts) {
104       trace->parts_allocated++;
105       if (ctx->trace_part_finished_excess)
106         ctx->trace_part_finished_excess--;
107     }
108     if (!part)
109       ctx->trace_part_total_allocated++;
110     else if (ctx->trace_part_recycle_finished)
111       ctx->trace_part_recycle_finished--;
112   }
113   if (!part)
114     part = new (MmapOrDie(sizeof(*part), "TracePart")) TracePart();
115   return part;
116 }
117 
TracePartFree(TracePart * part)118 static void TracePartFree(TracePart* part) SANITIZER_REQUIRES(ctx->slot_mtx) {
119   DCHECK(part->trace);
120   part->trace = nullptr;
121   ctx->trace_part_recycle.PushFront(part);
122 }
123 
TraceResetForTesting()124 void TraceResetForTesting() {
125   Lock lock(&ctx->slot_mtx);
126   while (auto* part = ctx->trace_part_recycle.PopFront()) {
127     if (auto trace = part->trace)
128       CHECK_EQ(trace->parts.PopFront(), part);
129     UnmapOrDie(part, sizeof(*part));
130   }
131   ctx->trace_part_total_allocated = 0;
132   ctx->trace_part_recycle_finished = 0;
133   ctx->trace_part_finished_excess = 0;
134 }
135 
DoResetImpl(uptr epoch)136 static void DoResetImpl(uptr epoch) {
137   ThreadRegistryLock lock0(&ctx->thread_registry);
138   Lock lock1(&ctx->slot_mtx);
139   CHECK_EQ(ctx->global_epoch, epoch);
140   ctx->global_epoch++;
141   CHECK(!ctx->resetting);
142   ctx->resetting = true;
143   for (u32 i = ctx->thread_registry.NumThreadsLocked(); i--;) {
144     ThreadContext* tctx = (ThreadContext*)ctx->thread_registry.GetThreadLocked(
145         static_cast<Tid>(i));
146     // Potentially we could purge all ThreadStatusDead threads from the
147     // registry. Since we reset all shadow, they can't race with anything
148     // anymore. However, their tid's can still be stored in some aux places
149     // (e.g. tid of thread that created something).
150     auto trace = &tctx->trace;
151     Lock lock(&trace->mtx);
152     bool attached = tctx->thr && tctx->thr->slot;
153     auto parts = &trace->parts;
154     bool local = false;
155     while (!parts->Empty()) {
156       auto part = parts->Front();
157       local = local || part == trace->local_head;
158       if (local)
159         CHECK(!ctx->trace_part_recycle.Queued(part));
160       else
161         ctx->trace_part_recycle.Remove(part);
162       if (attached && parts->Size() == 1) {
163         // The thread is running and this is the last/current part.
164         // Set the trace position to the end of the current part
165         // to force the thread to call SwitchTracePart and re-attach
166         // to a new slot and allocate a new trace part.
167         // Note: the thread is concurrently modifying the position as well,
168         // so this is only best-effort. The thread can only modify position
169         // within this part, because switching parts is protected by
170         // slot/trace mutexes that we hold here.
171         atomic_store_relaxed(
172             &tctx->thr->trace_pos,
173             reinterpret_cast<uptr>(&part->events[TracePart::kSize]));
174         break;
175       }
176       parts->Remove(part);
177       TracePartFree(part);
178     }
179     CHECK_LE(parts->Size(), 1);
180     trace->local_head = parts->Front();
181     if (tctx->thr && !tctx->thr->slot) {
182       atomic_store_relaxed(&tctx->thr->trace_pos, 0);
183       tctx->thr->trace_prev_pc = 0;
184     }
185     if (trace->parts_allocated > trace->parts.Size()) {
186       ctx->trace_part_finished_excess +=
187           trace->parts_allocated - trace->parts.Size();
188       trace->parts_allocated = trace->parts.Size();
189     }
190   }
191   while (ctx->slot_queue.PopFront()) {
192   }
193   for (auto& slot : ctx->slots) {
194     slot.SetEpoch(kEpochZero);
195     slot.journal.Reset();
196     slot.thr = nullptr;
197     ctx->slot_queue.PushBack(&slot);
198   }
199 
200   DPrintf("Resetting shadow...\n");
201   auto shadow_begin = ShadowBeg();
202   auto shadow_end = ShadowEnd();
203 #if SANITIZER_GO
204   CHECK_NE(0, ctx->mapped_shadow_begin);
205   shadow_begin = ctx->mapped_shadow_begin;
206   shadow_end = ctx->mapped_shadow_end;
207   VPrintf(2, "shadow_begin-shadow_end: (0x%zx-0x%zx)\n",
208           shadow_begin, shadow_end);
209 #endif
210 
211 #if SANITIZER_WINDOWS
212   auto resetFailed =
213       !ZeroMmapFixedRegion(shadow_begin, shadow_end - shadow_begin);
214 #else
215   auto resetFailed =
216       !MmapFixedSuperNoReserve(shadow_begin, shadow_end-shadow_begin, "shadow");
217 #  if !SANITIZER_GO
218   DontDumpShadow(shadow_begin, shadow_end - shadow_begin);
219 #  endif
220 #endif
221   if (resetFailed) {
222     Printf("failed to reset shadow memory\n");
223     Die();
224   }
225   DPrintf("Resetting meta shadow...\n");
226   ctx->metamap.ResetClocks();
227   StoreShadow(&ctx->last_spurious_race, Shadow::kEmpty);
228   ctx->resetting = false;
229 }
230 
231 // Clang does not understand locking all slots in the loop:
232 // error: expecting mutex 'slot.mtx' to be held at start of each loop
DoReset(ThreadState * thr,uptr epoch)233 void DoReset(ThreadState* thr, uptr epoch) SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
234   for (auto& slot : ctx->slots) {
235     slot.mtx.Lock();
236     if (UNLIKELY(epoch == 0))
237       epoch = ctx->global_epoch;
238     if (UNLIKELY(epoch != ctx->global_epoch)) {
239       // Epoch can't change once we've locked the first slot.
240       CHECK_EQ(slot.sid, 0);
241       slot.mtx.Unlock();
242       return;
243     }
244   }
245   DPrintf("#%d: DoReset epoch=%lu\n", thr ? thr->tid : -1, epoch);
246   DoResetImpl(epoch);
247   for (auto& slot : ctx->slots) slot.mtx.Unlock();
248 }
249 
FlushShadowMemory()250 void FlushShadowMemory() { DoReset(nullptr, 0); }
251 
FindSlotAndLock(ThreadState * thr)252 static TidSlot* FindSlotAndLock(ThreadState* thr)
253     SANITIZER_ACQUIRE(thr->slot->mtx) SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
254   CHECK(!thr->slot);
255   TidSlot* slot = nullptr;
256   for (;;) {
257     uptr epoch;
258     {
259       Lock lock(&ctx->slot_mtx);
260       epoch = ctx->global_epoch;
261       if (slot) {
262         // This is an exhausted slot from the previous iteration.
263         if (ctx->slot_queue.Queued(slot))
264           ctx->slot_queue.Remove(slot);
265         thr->slot_locked = false;
266         slot->mtx.Unlock();
267       }
268       for (;;) {
269         slot = ctx->slot_queue.PopFront();
270         if (!slot)
271           break;
272         if (slot->epoch() != kEpochLast) {
273           ctx->slot_queue.PushBack(slot);
274           break;
275         }
276       }
277     }
278     if (!slot) {
279       DoReset(thr, epoch);
280       continue;
281     }
282     slot->mtx.Lock();
283     CHECK(!thr->slot_locked);
284     thr->slot_locked = true;
285     if (slot->thr) {
286       DPrintf("#%d: preempting sid=%d tid=%d\n", thr->tid, (u32)slot->sid,
287               slot->thr->tid);
288       slot->SetEpoch(slot->thr->fast_state.epoch());
289       slot->thr = nullptr;
290     }
291     if (slot->epoch() != kEpochLast)
292       return slot;
293   }
294 }
295 
SlotAttachAndLock(ThreadState * thr)296 void SlotAttachAndLock(ThreadState* thr) {
297   TidSlot* slot = FindSlotAndLock(thr);
298   DPrintf("#%d: SlotAttach: slot=%u\n", thr->tid, static_cast<int>(slot->sid));
299   CHECK(!slot->thr);
300   CHECK(!thr->slot);
301   slot->thr = thr;
302   thr->slot = slot;
303   Epoch epoch = EpochInc(slot->epoch());
304   CHECK(!EpochOverflow(epoch));
305   slot->SetEpoch(epoch);
306   thr->fast_state.SetSid(slot->sid);
307   thr->fast_state.SetEpoch(epoch);
308   if (thr->slot_epoch != ctx->global_epoch) {
309     thr->slot_epoch = ctx->global_epoch;
310     thr->clock.Reset();
311 #if !SANITIZER_GO
312     thr->last_sleep_stack_id = kInvalidStackID;
313     thr->last_sleep_clock.Reset();
314 #endif
315   }
316   thr->clock.Set(slot->sid, epoch);
317   slot->journal.PushBack({thr->tid, epoch});
318 }
319 
SlotDetachImpl(ThreadState * thr,bool exiting)320 static void SlotDetachImpl(ThreadState* thr, bool exiting) {
321   TidSlot* slot = thr->slot;
322   thr->slot = nullptr;
323   if (thr != slot->thr) {
324     slot = nullptr;  // we don't own the slot anymore
325     if (thr->slot_epoch != ctx->global_epoch) {
326       TracePart* part = nullptr;
327       auto* trace = &thr->tctx->trace;
328       {
329         Lock l(&trace->mtx);
330         auto* parts = &trace->parts;
331         // The trace can be completely empty in an unlikely event
332         // the thread is preempted right after it acquired the slot
333         // in ThreadStart and did not trace any events yet.
334         CHECK_LE(parts->Size(), 1);
335         part = parts->PopFront();
336         thr->tctx->trace.local_head = nullptr;
337         atomic_store_relaxed(&thr->trace_pos, 0);
338         thr->trace_prev_pc = 0;
339       }
340       if (part) {
341         Lock l(&ctx->slot_mtx);
342         TracePartFree(part);
343       }
344     }
345     return;
346   }
347   CHECK(exiting || thr->fast_state.epoch() == kEpochLast);
348   slot->SetEpoch(thr->fast_state.epoch());
349   slot->thr = nullptr;
350 }
351 
SlotDetach(ThreadState * thr)352 void SlotDetach(ThreadState* thr) {
353   Lock lock(&thr->slot->mtx);
354   SlotDetachImpl(thr, true);
355 }
356 
SlotLock(ThreadState * thr)357 void SlotLock(ThreadState* thr) SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
358   DCHECK(!thr->slot_locked);
359 #if SANITIZER_DEBUG
360   // Check these mutexes are not locked.
361   // We can call DoReset from SlotAttachAndLock, which will lock
362   // these mutexes, but it happens only every once in a while.
363   { ThreadRegistryLock lock(&ctx->thread_registry); }
364   { Lock lock(&ctx->slot_mtx); }
365 #endif
366   TidSlot* slot = thr->slot;
367   slot->mtx.Lock();
368   thr->slot_locked = true;
369   if (LIKELY(thr == slot->thr && thr->fast_state.epoch() != kEpochLast))
370     return;
371   SlotDetachImpl(thr, false);
372   thr->slot_locked = false;
373   slot->mtx.Unlock();
374   SlotAttachAndLock(thr);
375 }
376 
SlotUnlock(ThreadState * thr)377 void SlotUnlock(ThreadState* thr) {
378   DCHECK(thr->slot_locked);
379   thr->slot_locked = false;
380   thr->slot->mtx.Unlock();
381 }
382 
Context()383 Context::Context()
384     : initialized(),
385       report_mtx(MutexTypeReport),
386       nreported(),
387       thread_registry([](Tid tid) -> ThreadContextBase* {
388         return new (Alloc(sizeof(ThreadContext))) ThreadContext(tid);
389       }),
390       racy_mtx(MutexTypeRacy),
391       racy_stacks(),
392       fired_suppressions_mtx(MutexTypeFired),
393       slot_mtx(MutexTypeSlots),
394       resetting() {
395   fired_suppressions.reserve(8);
396   for (uptr i = 0; i < ARRAY_SIZE(slots); i++) {
397     TidSlot* slot = &slots[i];
398     slot->sid = static_cast<Sid>(i);
399     slot_queue.PushBack(slot);
400   }
401   global_epoch = 1;
402 }
403 
TidSlot()404 TidSlot::TidSlot() : mtx(MutexTypeSlot) {}
405 
406 // The objects are allocated in TLS, so one may rely on zero-initialization.
ThreadState(Tid tid)407 ThreadState::ThreadState(Tid tid)
408     // Do not touch these, rely on zero initialization,
409     // they may be accessed before the ctor.
410     // ignore_reads_and_writes()
411     // ignore_interceptors()
412     : tid(tid) {
413   CHECK_EQ(reinterpret_cast<uptr>(this) % SANITIZER_CACHE_LINE_SIZE, 0);
414 #if !SANITIZER_GO
415   // C/C++ uses fixed size shadow stack.
416   const int kInitStackSize = kShadowStackSize;
417   shadow_stack = static_cast<uptr*>(
418       MmapNoReserveOrDie(kInitStackSize * sizeof(uptr), "shadow stack"));
419   SetShadowRegionHugePageMode(reinterpret_cast<uptr>(shadow_stack),
420                               kInitStackSize * sizeof(uptr));
421 #else
422   // Go uses malloc-allocated shadow stack with dynamic size.
423   const int kInitStackSize = 8;
424   shadow_stack = static_cast<uptr*>(Alloc(kInitStackSize * sizeof(uptr)));
425 #endif
426   shadow_stack_pos = shadow_stack;
427   shadow_stack_end = shadow_stack + kInitStackSize;
428 }
429 
430 #if !SANITIZER_GO
MemoryProfiler(u64 uptime)431 void MemoryProfiler(u64 uptime) {
432   if (ctx->memprof_fd == kInvalidFd)
433     return;
434   InternalMmapVector<char> buf(4096);
435   WriteMemoryProfile(buf.data(), buf.size(), uptime);
436   WriteToFile(ctx->memprof_fd, buf.data(), internal_strlen(buf.data()));
437 }
438 
InitializeMemoryProfiler()439 static bool InitializeMemoryProfiler() {
440   ctx->memprof_fd = kInvalidFd;
441   const char *fname = flags()->profile_memory;
442   if (!fname || !fname[0])
443     return false;
444   if (internal_strcmp(fname, "stdout") == 0) {
445     ctx->memprof_fd = 1;
446   } else if (internal_strcmp(fname, "stderr") == 0) {
447     ctx->memprof_fd = 2;
448   } else {
449     InternalScopedString filename;
450     filename.AppendF("%s.%d", fname, (int)internal_getpid());
451     ctx->memprof_fd = OpenFile(filename.data(), WrOnly);
452     if (ctx->memprof_fd == kInvalidFd) {
453       Printf("ThreadSanitizer: failed to open memory profile file '%s'\n",
454              filename.data());
455       return false;
456     }
457   }
458   MemoryProfiler(0);
459   return true;
460 }
461 
BackgroundThread(void * arg)462 static void *BackgroundThread(void *arg) {
463   // This is a non-initialized non-user thread, nothing to see here.
464   // We don't use ScopedIgnoreInterceptors, because we want ignores to be
465   // enabled even when the thread function exits (e.g. during pthread thread
466   // shutdown code).
467   cur_thread_init()->ignore_interceptors++;
468   const u64 kMs2Ns = 1000 * 1000;
469   const u64 start = NanoTime();
470 
471   u64 last_flush = start;
472   uptr last_rss = 0;
473   while (!atomic_load_relaxed(&ctx->stop_background_thread)) {
474     SleepForMillis(100);
475     u64 now = NanoTime();
476 
477     // Flush memory if requested.
478     if (flags()->flush_memory_ms > 0) {
479       if (last_flush + flags()->flush_memory_ms * kMs2Ns < now) {
480         VReport(1, "ThreadSanitizer: periodic memory flush\n");
481         FlushShadowMemory();
482         now = last_flush = NanoTime();
483       }
484     }
485     if (flags()->memory_limit_mb > 0) {
486       uptr rss = GetRSS();
487       uptr limit = uptr(flags()->memory_limit_mb) << 20;
488       VReport(1,
489               "ThreadSanitizer: memory flush check"
490               " RSS=%llu LAST=%llu LIMIT=%llu\n",
491               (u64)rss >> 20, (u64)last_rss >> 20, (u64)limit >> 20);
492       if (2 * rss > limit + last_rss) {
493         VReport(1, "ThreadSanitizer: flushing memory due to RSS\n");
494         FlushShadowMemory();
495         rss = GetRSS();
496         now = NanoTime();
497         VReport(1, "ThreadSanitizer: memory flushed RSS=%llu\n",
498                 (u64)rss >> 20);
499       }
500       last_rss = rss;
501     }
502 
503     MemoryProfiler(now - start);
504 
505     // Flush symbolizer cache if requested.
506     if (flags()->flush_symbolizer_ms > 0) {
507       u64 last = atomic_load(&ctx->last_symbolize_time_ns,
508                              memory_order_relaxed);
509       if (last != 0 && last + flags()->flush_symbolizer_ms * kMs2Ns < now) {
510         Lock l(&ctx->report_mtx);
511         ScopedErrorReportLock l2;
512         SymbolizeFlush();
513         atomic_store(&ctx->last_symbolize_time_ns, 0, memory_order_relaxed);
514       }
515     }
516   }
517   return nullptr;
518 }
519 
StartBackgroundThread()520 static void StartBackgroundThread() {
521   ctx->background_thread = internal_start_thread(&BackgroundThread, 0);
522 }
523 
524 #ifndef __mips__
StopBackgroundThread()525 static void StopBackgroundThread() {
526   atomic_store(&ctx->stop_background_thread, 1, memory_order_relaxed);
527   internal_join_thread(ctx->background_thread);
528   ctx->background_thread = 0;
529 }
530 #endif
531 #endif
532 
DontNeedShadowFor(uptr addr,uptr size)533 void DontNeedShadowFor(uptr addr, uptr size) {
534   ReleaseMemoryPagesToOS(reinterpret_cast<uptr>(MemToShadow(addr)),
535                          reinterpret_cast<uptr>(MemToShadow(addr + size)));
536 }
537 
538 #if !SANITIZER_GO
539 // We call UnmapShadow before the actual munmap, at that point we don't yet
540 // know if the provided address/size are sane. We can't call UnmapShadow
541 // after the actual munmap becuase at that point the memory range can
542 // already be reused for something else, so we can't rely on the munmap
543 // return value to understand is the values are sane.
544 // While calling munmap with insane values (non-canonical address, negative
545 // size, etc) is an error, the kernel won't crash. We must also try to not
546 // crash as the failure mode is very confusing (paging fault inside of the
547 // runtime on some derived shadow address).
IsValidMmapRange(uptr addr,uptr size)548 static bool IsValidMmapRange(uptr addr, uptr size) {
549   if (size == 0)
550     return true;
551   if (static_cast<sptr>(size) < 0)
552     return false;
553   if (!IsAppMem(addr) || !IsAppMem(addr + size - 1))
554     return false;
555   // Check that if the start of the region belongs to one of app ranges,
556   // end of the region belongs to the same region.
557   const uptr ranges[][2] = {
558       {LoAppMemBeg(), LoAppMemEnd()},
559       {MidAppMemBeg(), MidAppMemEnd()},
560       {HiAppMemBeg(), HiAppMemEnd()},
561   };
562   for (auto range : ranges) {
563     if (addr >= range[0] && addr < range[1])
564       return addr + size <= range[1];
565   }
566   return false;
567 }
568 
UnmapShadow(ThreadState * thr,uptr addr,uptr size)569 void UnmapShadow(ThreadState* thr, uptr addr, uptr size) {
570   if (size == 0 || !IsValidMmapRange(addr, size))
571     return;
572   // unmap shadow is related to semantic of mmap/munmap, so we
573   // should clear the whole shadow range, including the tail shadow
574   // while addr + size % kShadowCell != 0.
575   uptr rounded_size_shadow = RoundUp(addr + size, kShadowCell) - addr;
576   DontNeedShadowFor(addr, rounded_size_shadow);
577   ScopedGlobalProcessor sgp;
578   SlotLocker locker(thr, true);
579   uptr rounded_size_meta = RoundUp(addr + size, kMetaShadowCell) - addr;
580   ctx->metamap.ResetRange(thr->proc(), addr, rounded_size_meta, true);
581 }
582 #endif
583 
MapShadow(uptr addr,uptr size)584 void MapShadow(uptr addr, uptr size) {
585   // Although named MapShadow, this function's semantic is unrelated to
586   // UnmapShadow. This function currently only used for Go's lazy allocation
587   // of shadow, whose targets are program section (e.g., bss, data, etc.).
588   // Therefore, we can guarantee that the addr and size align to kShadowCell
589   // and kMetaShadowCell by the following assertions.
590   DCHECK_EQ(addr % kShadowCell, 0);
591   DCHECK_EQ(size % kShadowCell, 0);
592   DCHECK_EQ(addr % kMetaShadowCell, 0);
593   DCHECK_EQ(size % kMetaShadowCell, 0);
594 
595   // Ensure thead registry lock held, so as to synchronize
596   // with DoReset, which also access the mapped_shadow_* ctxt fields.
597   ThreadRegistryLock lock0(&ctx->thread_registry);
598   static bool data_mapped = false;
599 
600 #if !SANITIZER_GO
601   // Global data is not 64K aligned, but there are no adjacent mappings,
602   // so we can get away with unaligned mapping.
603   // CHECK_EQ(addr, addr & ~((64 << 10) - 1));  // windows wants 64K alignment
604   const uptr kPageSize = GetPageSizeCached();
605   uptr shadow_begin = RoundDownTo((uptr)MemToShadow(addr), kPageSize);
606   uptr shadow_end = RoundUpTo((uptr)MemToShadow(addr + size), kPageSize);
607   if (!MmapFixedNoReserve(shadow_begin, shadow_end - shadow_begin, "shadow"))
608     Die();
609 #else
610   uptr shadow_begin = RoundDownTo((uptr)MemToShadow(addr), (64 << 10));
611   uptr shadow_end = RoundUpTo((uptr)MemToShadow(addr + size), (64 << 10));
612   VPrintf(2, "MapShadow for (0x%zx-0x%zx), begin/end: (0x%zx-0x%zx)\n",
613           addr, addr + size, shadow_begin, shadow_end);
614 
615   if (!data_mapped) {
616     // First call maps data+bss.
617     if (!MmapFixedSuperNoReserve(shadow_begin, shadow_end - shadow_begin, "shadow"))
618       Die();
619   } else {
620     VPrintf(2, "ctx->mapped_shadow_{begin,end} = (0x%zx-0x%zx)\n",
621             ctx->mapped_shadow_begin, ctx->mapped_shadow_end);
622     // Second and subsequent calls map heap.
623     if (shadow_end <= ctx->mapped_shadow_end)
624       return;
625     if (!ctx->mapped_shadow_begin || ctx->mapped_shadow_begin > shadow_begin)
626        ctx->mapped_shadow_begin = shadow_begin;
627     if (shadow_begin < ctx->mapped_shadow_end)
628       shadow_begin = ctx->mapped_shadow_end;
629     VPrintf(2, "MapShadow begin/end = (0x%zx-0x%zx)\n",
630             shadow_begin, shadow_end);
631     if (!MmapFixedSuperNoReserve(shadow_begin, shadow_end - shadow_begin,
632                                  "shadow"))
633       Die();
634     ctx->mapped_shadow_end = shadow_end;
635   }
636 #endif
637 
638   // Meta shadow is 2:1, so tread carefully.
639   static uptr mapped_meta_end = 0;
640   uptr meta_begin = (uptr)MemToMeta(addr);
641   uptr meta_end = (uptr)MemToMeta(addr + size);
642   // Windows wants 64K alignment.
643   meta_begin = RoundDownTo(meta_begin, 64 << 10);
644   meta_end = RoundUpTo(meta_end, 64 << 10);
645   if (!data_mapped) {
646     // First call maps data+bss.
647     data_mapped = true;
648     if (!MmapFixedSuperNoReserve(meta_begin, meta_end - meta_begin,
649                                  "meta shadow"))
650       Die();
651   } else {
652     // Mapping continuous heap.
653     CHECK_GT(meta_end, mapped_meta_end);
654     if (meta_begin < mapped_meta_end)
655       meta_begin = mapped_meta_end;
656     if (!MmapFixedSuperNoReserve(meta_begin, meta_end - meta_begin,
657                                  "meta shadow"))
658       Die();
659     mapped_meta_end = meta_end;
660   }
661   VPrintf(2, "mapped meta shadow for (0x%zx-0x%zx) at (0x%zx-0x%zx)\n", addr,
662           addr + size, meta_begin, meta_end);
663 }
664 
665 #if !SANITIZER_GO
OnStackUnwind(const SignalContext & sig,const void *,BufferedStackTrace * stack)666 static void OnStackUnwind(const SignalContext &sig, const void *,
667                           BufferedStackTrace *stack) {
668   stack->Unwind(StackTrace::GetNextInstructionPc(sig.pc), sig.bp, sig.context,
669                 common_flags()->fast_unwind_on_fatal);
670 }
671 
TsanOnDeadlySignal(int signo,void * siginfo,void * context)672 static void TsanOnDeadlySignal(int signo, void *siginfo, void *context) {
673   HandleDeadlySignal(siginfo, context, GetTid(), &OnStackUnwind, nullptr);
674 }
675 #endif
676 
CheckUnwind()677 void CheckUnwind() {
678   // There is high probability that interceptors will check-fail as well,
679   // on the other hand there is no sense in processing interceptors
680   // since we are going to die soon.
681   ScopedIgnoreInterceptors ignore;
682 #if !SANITIZER_GO
683   ThreadState* thr = cur_thread();
684   thr->nomalloc = false;
685   thr->ignore_sync++;
686   thr->ignore_reads_and_writes++;
687   atomic_store_relaxed(&thr->in_signal_handler, 0);
688 #endif
689   PrintCurrentStack(StackTrace::GetCurrentPc(),
690                     common_flags()->fast_unwind_on_fatal);
691 }
692 
693 bool is_initialized;
694 
695 // Symbolization indirectly calls dl_iterate_phdr. If a CHECK() fails early on
696 // (prior to the dl_iterate_phdr interceptor setup), resulting in an attempted
697 // symbolization, it will segfault.
698 // dl_iterate_phdr is not intercepted for Android.
699 bool ready_to_symbolize = SANITIZER_ANDROID;
700 
Initialize(ThreadState * thr)701 void Initialize(ThreadState *thr) {
702   // Thread safe because done before all threads exist.
703   if (is_initialized)
704     return;
705   is_initialized = true;
706   // We are not ready to handle interceptors yet.
707   ScopedIgnoreInterceptors ignore;
708   SanitizerToolName = "ThreadSanitizer";
709   // Install tool-specific callbacks in sanitizer_common.
710   SetCheckUnwindCallback(CheckUnwind);
711 
712   ctx = new(ctx_placeholder) Context;
713   const char *env_name = SANITIZER_GO ? "GORACE" : "TSAN_OPTIONS";
714   const char *options = GetEnv(env_name);
715   CacheBinaryName();
716   CheckASLR();
717   InitializeFlags(&ctx->flags, options, env_name);
718   AvoidCVE_2016_2143();
719   __sanitizer::InitializePlatformEarly();
720   __tsan::InitializePlatformEarly();
721 
722 #if !SANITIZER_GO
723   InitializeAllocator();
724   ReplaceSystemMalloc();
725 #endif
726   if (common_flags()->detect_deadlocks)
727     ctx->dd = DDetector::Create(flags());
728   Processor *proc = ProcCreate();
729   ProcWire(proc, thr);
730   InitializeInterceptors();
731   InitializePlatform();
732   InitializeDynamicAnnotations();
733 #if !SANITIZER_GO
734   InitializeShadowMemory();
735   InitializeAllocatorLate();
736   InstallDeadlySignalHandlers(TsanOnDeadlySignal);
737 #endif
738   // Setup correct file descriptor for error reports.
739   __sanitizer_set_report_path(common_flags()->log_path);
740   InitializeSuppressions();
741 #if !SANITIZER_GO
742   InitializeLibIgnore();
743   Symbolizer::GetOrInit()->AddHooks(EnterSymbolizer, ExitSymbolizer);
744 #endif
745 
746   VPrintf(1, "***** Running under ThreadSanitizer v3 (pid %d) *****\n",
747           (int)internal_getpid());
748 
749   // Initialize thread 0.
750   Tid tid = ThreadCreate(nullptr, 0, 0, true);
751   CHECK_EQ(tid, kMainTid);
752   ThreadStart(thr, tid, GetTid(), ThreadType::Regular);
753 #if TSAN_CONTAINS_UBSAN
754   __ubsan::InitAsPlugin();
755 #endif
756 
757 #if !SANITIZER_GO
758   Symbolizer::LateInitialize();
759   if (InitializeMemoryProfiler() || flags()->force_background_thread)
760     MaybeSpawnBackgroundThread();
761 #endif
762   ctx->initialized = true;
763 
764   if (flags()->stop_on_start) {
765     Printf("ThreadSanitizer is suspended at startup (pid %d)."
766            " Call __tsan_resume().\n",
767            (int)internal_getpid());
768     while (__tsan_resumed == 0) {}
769   }
770 
771   OnInitialize();
772 }
773 
MaybeSpawnBackgroundThread()774 void MaybeSpawnBackgroundThread() {
775   // On MIPS, TSan initialization is run before
776   // __pthread_initialize_minimal_internal() is finished, so we can not spawn
777   // new threads.
778 #if !SANITIZER_GO && !defined(__mips__)
779   static atomic_uint32_t bg_thread = {};
780   if (atomic_load(&bg_thread, memory_order_relaxed) == 0 &&
781       atomic_exchange(&bg_thread, 1, memory_order_relaxed) == 0) {
782     StartBackgroundThread();
783     SetSandboxingCallback(StopBackgroundThread);
784   }
785 #endif
786 }
787 
Finalize(ThreadState * thr)788 int Finalize(ThreadState *thr) {
789   bool failed = false;
790 
791 #if !SANITIZER_GO
792   if (common_flags()->print_module_map == 1)
793     DumpProcessMap();
794 #endif
795 
796   if (flags()->atexit_sleep_ms > 0 && ThreadCount(thr) > 1)
797     internal_usleep(u64(flags()->atexit_sleep_ms) * 1000);
798 
799   {
800     // Wait for pending reports.
801     ScopedErrorReportLock lock;
802   }
803 
804 #if !SANITIZER_GO
805   if (Verbosity()) AllocatorPrintStats();
806 #endif
807 
808   ThreadFinalize(thr);
809 
810   if (ctx->nreported) {
811     failed = true;
812 #if !SANITIZER_GO
813     Printf("ThreadSanitizer: reported %d warnings\n", ctx->nreported);
814 #else
815     Printf("Found %d data race(s)\n", ctx->nreported);
816 #endif
817   }
818 
819   if (common_flags()->print_suppressions)
820     PrintMatchedSuppressions();
821 
822   failed = OnFinalize(failed);
823 
824   return failed ? common_flags()->exitcode : 0;
825 }
826 
827 #if !SANITIZER_GO
ForkBefore(ThreadState * thr,uptr pc)828 void ForkBefore(ThreadState* thr, uptr pc) SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
829   VReport(2, "BeforeFork tid: %llu\n", GetTid());
830   GlobalProcessorLock();
831   // Detaching from the slot makes OnUserFree skip writing to the shadow.
832   // The slot will be locked so any attempts to use it will deadlock anyway.
833   SlotDetach(thr);
834   for (auto& slot : ctx->slots) slot.mtx.Lock();
835   ctx->thread_registry.Lock();
836   ctx->slot_mtx.Lock();
837   ScopedErrorReportLock::Lock();
838   AllocatorLockBeforeFork();
839   // Suppress all reports in the pthread_atfork callbacks.
840   // Reports will deadlock on the report_mtx.
841   // We could ignore sync operations as well,
842   // but so far it's unclear if it will do more good or harm.
843   // Unnecessarily ignoring things can lead to false positives later.
844   thr->suppress_reports++;
845   // On OS X, REAL(fork) can call intercepted functions (OSSpinLockLock), and
846   // we'll assert in CheckNoLocks() unless we ignore interceptors.
847   // On OS X libSystem_atfork_prepare/parent/child callbacks are called
848   // after/before our callbacks and they call free.
849   thr->ignore_interceptors++;
850   // Disables memory write in OnUserAlloc/Free.
851   thr->ignore_reads_and_writes++;
852 
853 #  if SANITIZER_APPLE
854   __tsan_test_only_on_fork();
855 #  endif
856 }
857 
ForkAfter(ThreadState * thr,bool child)858 static void ForkAfter(ThreadState* thr,
859                       bool child) SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
860   thr->suppress_reports--;  // Enabled in ForkBefore.
861   thr->ignore_interceptors--;
862   thr->ignore_reads_and_writes--;
863   AllocatorUnlockAfterFork(child);
864   ScopedErrorReportLock::Unlock();
865   ctx->slot_mtx.Unlock();
866   ctx->thread_registry.Unlock();
867   for (auto& slot : ctx->slots) slot.mtx.Unlock();
868   SlotAttachAndLock(thr);
869   SlotUnlock(thr);
870   GlobalProcessorUnlock();
871   VReport(2, "AfterFork tid: %llu\n", GetTid());
872 }
873 
ForkParentAfter(ThreadState * thr,uptr pc)874 void ForkParentAfter(ThreadState* thr, uptr pc) { ForkAfter(thr, false); }
875 
ForkChildAfter(ThreadState * thr,uptr pc,bool start_thread)876 void ForkChildAfter(ThreadState* thr, uptr pc, bool start_thread) {
877   ForkAfter(thr, true);
878   u32 nthread = ctx->thread_registry.OnFork(thr->tid);
879   VPrintf(1,
880           "ThreadSanitizer: forked new process with pid %d,"
881           " parent had %d threads\n",
882           (int)internal_getpid(), (int)nthread);
883   if (nthread == 1) {
884     if (start_thread)
885       StartBackgroundThread();
886   } else {
887     // We've just forked a multi-threaded process. We cannot reasonably function
888     // after that (some mutexes may be locked before fork). So just enable
889     // ignores for everything in the hope that we will exec soon.
890     ctx->after_multithreaded_fork = true;
891     thr->ignore_interceptors++;
892     thr->suppress_reports++;
893     ThreadIgnoreBegin(thr, pc);
894     ThreadIgnoreSyncBegin(thr, pc);
895   }
896 }
897 #endif
898 
899 #if SANITIZER_GO
900 NOINLINE
GrowShadowStack(ThreadState * thr)901 void GrowShadowStack(ThreadState *thr) {
902   const int sz = thr->shadow_stack_end - thr->shadow_stack;
903   const int newsz = 2 * sz;
904   auto *newstack = (uptr *)Alloc(newsz * sizeof(uptr));
905   internal_memcpy(newstack, thr->shadow_stack, sz * sizeof(uptr));
906   Free(thr->shadow_stack);
907   thr->shadow_stack = newstack;
908   thr->shadow_stack_pos = newstack + sz;
909   thr->shadow_stack_end = newstack + newsz;
910 }
911 #endif
912 
CurrentStackId(ThreadState * thr,uptr pc)913 StackID CurrentStackId(ThreadState *thr, uptr pc) {
914 #if !SANITIZER_GO
915   if (!thr->is_inited)  // May happen during bootstrap.
916     return kInvalidStackID;
917 #endif
918   if (pc != 0) {
919 #if !SANITIZER_GO
920     DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end);
921 #else
922     if (thr->shadow_stack_pos == thr->shadow_stack_end)
923       GrowShadowStack(thr);
924 #endif
925     thr->shadow_stack_pos[0] = pc;
926     thr->shadow_stack_pos++;
927   }
928   StackID id = StackDepotPut(
929       StackTrace(thr->shadow_stack, thr->shadow_stack_pos - thr->shadow_stack));
930   if (pc != 0)
931     thr->shadow_stack_pos--;
932   return id;
933 }
934 
TraceSkipGap(ThreadState * thr)935 static bool TraceSkipGap(ThreadState* thr) {
936   Trace *trace = &thr->tctx->trace;
937   Event *pos = reinterpret_cast<Event *>(atomic_load_relaxed(&thr->trace_pos));
938   DCHECK_EQ(reinterpret_cast<uptr>(pos + 1) & TracePart::kAlignment, 0);
939   auto *part = trace->parts.Back();
940   DPrintf("#%d: TraceSwitchPart enter trace=%p parts=%p-%p pos=%p\n", thr->tid,
941           trace, trace->parts.Front(), part, pos);
942   if (!part)
943     return false;
944   // We can get here when we still have space in the current trace part.
945   // The fast-path check in TraceAcquire has false positives in the middle of
946   // the part. Check if we are indeed at the end of the current part or not,
947   // and fill any gaps with NopEvent's.
948   Event* end = &part->events[TracePart::kSize];
949   DCHECK_GE(pos, &part->events[0]);
950   DCHECK_LE(pos, end);
951   if (pos + 1 < end) {
952     if ((reinterpret_cast<uptr>(pos) & TracePart::kAlignment) ==
953         TracePart::kAlignment)
954       *pos++ = NopEvent;
955     *pos++ = NopEvent;
956     DCHECK_LE(pos + 2, end);
957     atomic_store_relaxed(&thr->trace_pos, reinterpret_cast<uptr>(pos));
958     return true;
959   }
960   // We are indeed at the end.
961   for (; pos < end; pos++) *pos = NopEvent;
962   return false;
963 }
964 
965 NOINLINE
TraceSwitchPart(ThreadState * thr)966 void TraceSwitchPart(ThreadState* thr) {
967   if (TraceSkipGap(thr))
968     return;
969 #if !SANITIZER_GO
970   if (ctx->after_multithreaded_fork) {
971     // We just need to survive till exec.
972     TracePart* part = thr->tctx->trace.parts.Back();
973     if (part) {
974       atomic_store_relaxed(&thr->trace_pos,
975                            reinterpret_cast<uptr>(&part->events[0]));
976       return;
977     }
978   }
979 #endif
980   TraceSwitchPartImpl(thr);
981 }
982 
TraceSwitchPartImpl(ThreadState * thr)983 void TraceSwitchPartImpl(ThreadState* thr) {
984   SlotLocker locker(thr, true);
985   Trace* trace = &thr->tctx->trace;
986   TracePart* part = TracePartAlloc(thr);
987   part->trace = trace;
988   thr->trace_prev_pc = 0;
989   TracePart* recycle = nullptr;
990   // Keep roughly half of parts local to the thread
991   // (not queued into the recycle queue).
992   uptr local_parts = (Trace::kMinParts + flags()->history_size + 1) / 2;
993   {
994     Lock lock(&trace->mtx);
995     if (trace->parts.Empty())
996       trace->local_head = part;
997     if (trace->parts.Size() >= local_parts) {
998       recycle = trace->local_head;
999       trace->local_head = trace->parts.Next(recycle);
1000     }
1001     trace->parts.PushBack(part);
1002     atomic_store_relaxed(&thr->trace_pos,
1003                          reinterpret_cast<uptr>(&part->events[0]));
1004   }
1005   // Make this part self-sufficient by restoring the current stack
1006   // and mutex set in the beginning of the trace.
1007   TraceTime(thr);
1008   {
1009     // Pathologically large stacks may not fit into the part.
1010     // In these cases we log only fixed number of top frames.
1011     const uptr kMaxFrames = 1000;
1012     // Check that kMaxFrames won't consume the whole part.
1013     static_assert(kMaxFrames < TracePart::kSize / 2, "kMaxFrames is too big");
1014     uptr* pos = Max(&thr->shadow_stack[0], thr->shadow_stack_pos - kMaxFrames);
1015     for (; pos < thr->shadow_stack_pos; pos++) {
1016       if (TryTraceFunc(thr, *pos))
1017         continue;
1018       CHECK(TraceSkipGap(thr));
1019       CHECK(TryTraceFunc(thr, *pos));
1020     }
1021   }
1022   for (uptr i = 0; i < thr->mset.Size(); i++) {
1023     MutexSet::Desc d = thr->mset.Get(i);
1024     for (uptr i = 0; i < d.count; i++)
1025       TraceMutexLock(thr, d.write ? EventType::kLock : EventType::kRLock, 0,
1026                      d.addr, d.stack_id);
1027   }
1028   // Callers of TraceSwitchPart expect that TraceAcquire will always succeed
1029   // after the call. It's possible that TryTraceFunc/TraceMutexLock above
1030   // filled the trace part exactly up to the TracePart::kAlignment gap
1031   // and the next TraceAcquire won't succeed. Skip the gap to avoid that.
1032   EventFunc *ev;
1033   if (!TraceAcquire(thr, &ev)) {
1034     CHECK(TraceSkipGap(thr));
1035     CHECK(TraceAcquire(thr, &ev));
1036   }
1037   {
1038     Lock lock(&ctx->slot_mtx);
1039     // There is a small chance that the slot may be not queued at this point.
1040     // This can happen if the slot has kEpochLast epoch and another thread
1041     // in FindSlotAndLock discovered that it's exhausted and removed it from
1042     // the slot queue. kEpochLast can happen in 2 cases: (1) if TraceSwitchPart
1043     // was called with the slot locked and epoch already at kEpochLast,
1044     // or (2) if we've acquired a new slot in SlotLock in the beginning
1045     // of the function and the slot was at kEpochLast - 1, so after increment
1046     // in SlotAttachAndLock it become kEpochLast.
1047     if (ctx->slot_queue.Queued(thr->slot)) {
1048       ctx->slot_queue.Remove(thr->slot);
1049       ctx->slot_queue.PushBack(thr->slot);
1050     }
1051     if (recycle)
1052       ctx->trace_part_recycle.PushBack(recycle);
1053   }
1054   DPrintf("#%d: TraceSwitchPart exit parts=%p-%p pos=0x%zx\n", thr->tid,
1055           trace->parts.Front(), trace->parts.Back(),
1056           atomic_load_relaxed(&thr->trace_pos));
1057 }
1058 
ThreadIgnoreBegin(ThreadState * thr,uptr pc)1059 void ThreadIgnoreBegin(ThreadState* thr, uptr pc) {
1060   DPrintf("#%d: ThreadIgnoreBegin\n", thr->tid);
1061   thr->ignore_reads_and_writes++;
1062   CHECK_GT(thr->ignore_reads_and_writes, 0);
1063   thr->fast_state.SetIgnoreBit();
1064 #if !SANITIZER_GO
1065   if (pc && !ctx->after_multithreaded_fork)
1066     thr->mop_ignore_set.Add(CurrentStackId(thr, pc));
1067 #endif
1068 }
1069 
ThreadIgnoreEnd(ThreadState * thr)1070 void ThreadIgnoreEnd(ThreadState *thr) {
1071   DPrintf("#%d: ThreadIgnoreEnd\n", thr->tid);
1072   CHECK_GT(thr->ignore_reads_and_writes, 0);
1073   thr->ignore_reads_and_writes--;
1074   if (thr->ignore_reads_and_writes == 0) {
1075     thr->fast_state.ClearIgnoreBit();
1076 #if !SANITIZER_GO
1077     thr->mop_ignore_set.Reset();
1078 #endif
1079   }
1080 }
1081 
1082 #if !SANITIZER_GO
1083 extern "C" SANITIZER_INTERFACE_ATTRIBUTE
__tsan_testonly_shadow_stack_current_size()1084 uptr __tsan_testonly_shadow_stack_current_size() {
1085   ThreadState *thr = cur_thread();
1086   return thr->shadow_stack_pos - thr->shadow_stack;
1087 }
1088 #endif
1089 
ThreadIgnoreSyncBegin(ThreadState * thr,uptr pc)1090 void ThreadIgnoreSyncBegin(ThreadState *thr, uptr pc) {
1091   DPrintf("#%d: ThreadIgnoreSyncBegin\n", thr->tid);
1092   thr->ignore_sync++;
1093   CHECK_GT(thr->ignore_sync, 0);
1094 #if !SANITIZER_GO
1095   if (pc && !ctx->after_multithreaded_fork)
1096     thr->sync_ignore_set.Add(CurrentStackId(thr, pc));
1097 #endif
1098 }
1099 
ThreadIgnoreSyncEnd(ThreadState * thr)1100 void ThreadIgnoreSyncEnd(ThreadState *thr) {
1101   DPrintf("#%d: ThreadIgnoreSyncEnd\n", thr->tid);
1102   CHECK_GT(thr->ignore_sync, 0);
1103   thr->ignore_sync--;
1104 #if !SANITIZER_GO
1105   if (thr->ignore_sync == 0)
1106     thr->sync_ignore_set.Reset();
1107 #endif
1108 }
1109 
operator ==(const MD5Hash & other) const1110 bool MD5Hash::operator==(const MD5Hash &other) const {
1111   return hash[0] == other.hash[0] && hash[1] == other.hash[1];
1112 }
1113 
1114 #if SANITIZER_DEBUG
build_consistency_debug()1115 void build_consistency_debug() {}
1116 #else
build_consistency_release()1117 void build_consistency_release() {}
1118 #endif
1119 }  // namespace __tsan
1120 
1121 #if SANITIZER_CHECK_DEADLOCKS
1122 namespace __sanitizer {
1123 using namespace __tsan;
1124 MutexMeta mutex_meta[] = {
1125     {MutexInvalid, "Invalid", {}},
1126     {MutexThreadRegistry,
1127      "ThreadRegistry",
1128      {MutexTypeSlots, MutexTypeTrace, MutexTypeReport}},
1129     {MutexTypeReport, "Report", {MutexTypeTrace}},
1130     {MutexTypeSyncVar, "SyncVar", {MutexTypeReport, MutexTypeTrace}},
1131     {MutexTypeAnnotations, "Annotations", {}},
1132     {MutexTypeAtExit, "AtExit", {}},
1133     {MutexTypeFired, "Fired", {MutexLeaf}},
1134     {MutexTypeRacy, "Racy", {MutexLeaf}},
1135     {MutexTypeGlobalProc, "GlobalProc", {MutexTypeSlot, MutexTypeSlots}},
1136     {MutexTypeInternalAlloc, "InternalAlloc", {MutexLeaf}},
1137     {MutexTypeTrace, "Trace", {}},
1138     {MutexTypeSlot,
1139      "Slot",
1140      {MutexMulti, MutexTypeTrace, MutexTypeSyncVar, MutexThreadRegistry,
1141       MutexTypeSlots}},
1142     {MutexTypeSlots, "Slots", {MutexTypeTrace, MutexTypeReport}},
1143     {},
1144 };
1145 
PrintMutexPC(uptr pc)1146 void PrintMutexPC(uptr pc) { StackTrace(&pc, 1).Print(); }
1147 
1148 }  // namespace __sanitizer
1149 #endif
1150