1 //===-- tsan_rtl.cpp ------------------------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file is a part of ThreadSanitizer (TSan), a race detector. 10 // 11 // Main file (entry points) for the TSan run-time. 12 //===----------------------------------------------------------------------===// 13 14 #include "tsan_rtl.h" 15 16 #include "sanitizer_common/sanitizer_atomic.h" 17 #include "sanitizer_common/sanitizer_common.h" 18 #include "sanitizer_common/sanitizer_file.h" 19 #include "sanitizer_common/sanitizer_libc.h" 20 #include "sanitizer_common/sanitizer_placement_new.h" 21 #include "sanitizer_common/sanitizer_stackdepot.h" 22 #include "sanitizer_common/sanitizer_symbolizer.h" 23 #include "tsan_defs.h" 24 #include "tsan_interface.h" 25 #include "tsan_mman.h" 26 #include "tsan_platform.h" 27 #include "tsan_suppressions.h" 28 #include "tsan_symbolize.h" 29 #include "ubsan/ubsan_init.h" 30 31 volatile int __tsan_resumed = 0; 32 33 extern "C" void __tsan_resume() { 34 __tsan_resumed = 1; 35 } 36 37 namespace __tsan { 38 39 #if !SANITIZER_GO 40 void (*on_initialize)(void); 41 int (*on_finalize)(int); 42 #endif 43 44 #if !SANITIZER_GO && !SANITIZER_MAC 45 __attribute__((tls_model("initial-exec"))) 46 THREADLOCAL char cur_thread_placeholder[sizeof(ThreadState)] ALIGNED( 47 SANITIZER_CACHE_LINE_SIZE); 48 #endif 49 static char ctx_placeholder[sizeof(Context)] ALIGNED(SANITIZER_CACHE_LINE_SIZE); 50 Context *ctx; 51 52 // Can be overriden by a front-end. 53 #ifdef TSAN_EXTERNAL_HOOKS 54 bool OnFinalize(bool failed); 55 void OnInitialize(); 56 #else 57 #include <dlfcn.h> 58 SANITIZER_WEAK_CXX_DEFAULT_IMPL 59 bool OnFinalize(bool failed) { 60 #if !SANITIZER_GO 61 if (on_finalize) 62 return on_finalize(failed); 63 #endif 64 return failed; 65 } 66 SANITIZER_WEAK_CXX_DEFAULT_IMPL 67 void OnInitialize() { 68 #if !SANITIZER_GO 69 if (on_initialize) 70 on_initialize(); 71 #endif 72 } 73 #endif 74 75 static ThreadContextBase *CreateThreadContext(Tid tid) { 76 // Map thread trace when context is created. 77 char name[50]; 78 internal_snprintf(name, sizeof(name), "trace %u", tid); 79 MapThreadTrace(GetThreadTrace(tid), TraceSize() * sizeof(Event), name); 80 const uptr hdr = GetThreadTraceHeader(tid); 81 internal_snprintf(name, sizeof(name), "trace header %u", tid); 82 MapThreadTrace(hdr, sizeof(Trace), name); 83 new((void*)hdr) Trace(); 84 // We are going to use only a small part of the trace with the default 85 // value of history_size. However, the constructor writes to the whole trace. 86 // Release the unused part. 87 uptr hdr_end = hdr + sizeof(Trace); 88 hdr_end -= sizeof(TraceHeader) * (kTraceParts - TraceParts()); 89 hdr_end = RoundUp(hdr_end, GetPageSizeCached()); 90 if (hdr_end < hdr + sizeof(Trace)) { 91 ReleaseMemoryPagesToOS(hdr_end, hdr + sizeof(Trace)); 92 uptr unused = hdr + sizeof(Trace) - hdr_end; 93 if (hdr_end != (uptr)MmapFixedNoAccess(hdr_end, unused)) { 94 Report("ThreadSanitizer: failed to mprotect [0x%zx-0x%zx) \n", hdr_end, 95 unused); 96 CHECK("unable to mprotect" && 0); 97 } 98 } 99 return New<ThreadContext>(tid); 100 } 101 102 #if !SANITIZER_GO 103 static const u32 kThreadQuarantineSize = 16; 104 #else 105 static const u32 kThreadQuarantineSize = 64; 106 #endif 107 108 Context::Context() 109 : initialized(), 110 report_mtx(MutexTypeReport), 111 nreported(), 112 thread_registry(CreateThreadContext, kMaxTid, kThreadQuarantineSize, 113 kMaxTidReuse), 114 racy_mtx(MutexTypeRacy), 115 racy_stacks(), 116 racy_addresses(), 117 fired_suppressions_mtx(MutexTypeFired), 118 clock_alloc(LINKER_INITIALIZED, "clock allocator") { 119 fired_suppressions.reserve(8); 120 } 121 122 // The objects are allocated in TLS, so one may rely on zero-initialization. 123 ThreadState::ThreadState(Context *ctx, Tid tid, int unique_id, u64 epoch, 124 unsigned reuse_count, uptr stk_addr, uptr stk_size, 125 uptr tls_addr, uptr tls_size) 126 : fast_state(tid, epoch) 127 // Do not touch these, rely on zero initialization, 128 // they may be accessed before the ctor. 129 // , ignore_reads_and_writes() 130 // , ignore_interceptors() 131 , 132 clock(tid, reuse_count) 133 #if !SANITIZER_GO 134 , 135 jmp_bufs() 136 #endif 137 , 138 tid(tid), 139 unique_id(unique_id), 140 stk_addr(stk_addr), 141 stk_size(stk_size), 142 tls_addr(tls_addr), 143 tls_size(tls_size) 144 #if !SANITIZER_GO 145 , 146 last_sleep_clock(tid) 147 #endif 148 { 149 CHECK_EQ(reinterpret_cast<uptr>(this) % SANITIZER_CACHE_LINE_SIZE, 0); 150 #if !SANITIZER_GO 151 // C/C++ uses fixed size shadow stack. 152 const int kInitStackSize = kShadowStackSize; 153 shadow_stack = static_cast<uptr *>( 154 MmapNoReserveOrDie(kInitStackSize * sizeof(uptr), "shadow stack")); 155 SetShadowRegionHugePageMode(reinterpret_cast<uptr>(shadow_stack), 156 kInitStackSize * sizeof(uptr)); 157 #else 158 // Go uses malloc-allocated shadow stack with dynamic size. 159 const int kInitStackSize = 8; 160 shadow_stack = static_cast<uptr *>(Alloc(kInitStackSize * sizeof(uptr))); 161 #endif 162 shadow_stack_pos = shadow_stack; 163 shadow_stack_end = shadow_stack + kInitStackSize; 164 } 165 166 #if !SANITIZER_GO 167 void MemoryProfiler(u64 uptime) { 168 if (ctx->memprof_fd == kInvalidFd) 169 return; 170 InternalMmapVector<char> buf(4096); 171 WriteMemoryProfile(buf.data(), buf.size(), uptime); 172 WriteToFile(ctx->memprof_fd, buf.data(), internal_strlen(buf.data())); 173 } 174 175 void InitializeMemoryProfiler() { 176 ctx->memprof_fd = kInvalidFd; 177 const char *fname = flags()->profile_memory; 178 if (!fname || !fname[0]) 179 return; 180 if (internal_strcmp(fname, "stdout") == 0) { 181 ctx->memprof_fd = 1; 182 } else if (internal_strcmp(fname, "stderr") == 0) { 183 ctx->memprof_fd = 2; 184 } else { 185 InternalScopedString filename; 186 filename.append("%s.%d", fname, (int)internal_getpid()); 187 ctx->memprof_fd = OpenFile(filename.data(), WrOnly); 188 if (ctx->memprof_fd == kInvalidFd) { 189 Printf("ThreadSanitizer: failed to open memory profile file '%s'\n", 190 filename.data()); 191 return; 192 } 193 } 194 MemoryProfiler(0); 195 MaybeSpawnBackgroundThread(); 196 } 197 198 static void *BackgroundThread(void *arg) { 199 // This is a non-initialized non-user thread, nothing to see here. 200 // We don't use ScopedIgnoreInterceptors, because we want ignores to be 201 // enabled even when the thread function exits (e.g. during pthread thread 202 // shutdown code). 203 cur_thread_init()->ignore_interceptors++; 204 const u64 kMs2Ns = 1000 * 1000; 205 const u64 start = NanoTime(); 206 207 u64 last_flush = NanoTime(); 208 uptr last_rss = 0; 209 for (int i = 0; 210 atomic_load(&ctx->stop_background_thread, memory_order_relaxed) == 0; 211 i++) { 212 SleepForMillis(100); 213 u64 now = NanoTime(); 214 215 // Flush memory if requested. 216 if (flags()->flush_memory_ms > 0) { 217 if (last_flush + flags()->flush_memory_ms * kMs2Ns < now) { 218 VPrintf(1, "ThreadSanitizer: periodic memory flush\n"); 219 FlushShadowMemory(); 220 last_flush = NanoTime(); 221 } 222 } 223 if (flags()->memory_limit_mb > 0) { 224 uptr rss = GetRSS(); 225 uptr limit = uptr(flags()->memory_limit_mb) << 20; 226 VPrintf(1, "ThreadSanitizer: memory flush check" 227 " RSS=%llu LAST=%llu LIMIT=%llu\n", 228 (u64)rss >> 20, (u64)last_rss >> 20, (u64)limit >> 20); 229 if (2 * rss > limit + last_rss) { 230 VPrintf(1, "ThreadSanitizer: flushing memory due to RSS\n"); 231 FlushShadowMemory(); 232 rss = GetRSS(); 233 VPrintf(1, "ThreadSanitizer: memory flushed RSS=%llu\n", (u64)rss>>20); 234 } 235 last_rss = rss; 236 } 237 238 MemoryProfiler(now - start); 239 240 // Flush symbolizer cache if requested. 241 if (flags()->flush_symbolizer_ms > 0) { 242 u64 last = atomic_load(&ctx->last_symbolize_time_ns, 243 memory_order_relaxed); 244 if (last != 0 && last + flags()->flush_symbolizer_ms * kMs2Ns < now) { 245 Lock l(&ctx->report_mtx); 246 ScopedErrorReportLock l2; 247 SymbolizeFlush(); 248 atomic_store(&ctx->last_symbolize_time_ns, 0, memory_order_relaxed); 249 } 250 } 251 } 252 return nullptr; 253 } 254 255 static void StartBackgroundThread() { 256 ctx->background_thread = internal_start_thread(&BackgroundThread, 0); 257 } 258 259 #ifndef __mips__ 260 static void StopBackgroundThread() { 261 atomic_store(&ctx->stop_background_thread, 1, memory_order_relaxed); 262 internal_join_thread(ctx->background_thread); 263 ctx->background_thread = 0; 264 } 265 #endif 266 #endif 267 268 void DontNeedShadowFor(uptr addr, uptr size) { 269 ReleaseMemoryPagesToOS(reinterpret_cast<uptr>(MemToShadow(addr)), 270 reinterpret_cast<uptr>(MemToShadow(addr + size))); 271 } 272 273 #if !SANITIZER_GO 274 void UnmapShadow(ThreadState *thr, uptr addr, uptr size) { 275 if (size == 0) return; 276 DontNeedShadowFor(addr, size); 277 ScopedGlobalProcessor sgp; 278 ctx->metamap.ResetRange(thr->proc(), addr, size); 279 } 280 #endif 281 282 void MapShadow(uptr addr, uptr size) { 283 // Global data is not 64K aligned, but there are no adjacent mappings, 284 // so we can get away with unaligned mapping. 285 // CHECK_EQ(addr, addr & ~((64 << 10) - 1)); // windows wants 64K alignment 286 const uptr kPageSize = GetPageSizeCached(); 287 uptr shadow_begin = RoundDownTo((uptr)MemToShadow(addr), kPageSize); 288 uptr shadow_end = RoundUpTo((uptr)MemToShadow(addr + size), kPageSize); 289 if (!MmapFixedSuperNoReserve(shadow_begin, shadow_end - shadow_begin, 290 "shadow")) 291 Die(); 292 293 // Meta shadow is 2:1, so tread carefully. 294 static bool data_mapped = false; 295 static uptr mapped_meta_end = 0; 296 uptr meta_begin = (uptr)MemToMeta(addr); 297 uptr meta_end = (uptr)MemToMeta(addr + size); 298 meta_begin = RoundDownTo(meta_begin, 64 << 10); 299 meta_end = RoundUpTo(meta_end, 64 << 10); 300 if (!data_mapped) { 301 // First call maps data+bss. 302 data_mapped = true; 303 if (!MmapFixedSuperNoReserve(meta_begin, meta_end - meta_begin, 304 "meta shadow")) 305 Die(); 306 } else { 307 // Mapping continuous heap. 308 // Windows wants 64K alignment. 309 meta_begin = RoundDownTo(meta_begin, 64 << 10); 310 meta_end = RoundUpTo(meta_end, 64 << 10); 311 if (meta_end <= mapped_meta_end) 312 return; 313 if (meta_begin < mapped_meta_end) 314 meta_begin = mapped_meta_end; 315 if (!MmapFixedSuperNoReserve(meta_begin, meta_end - meta_begin, 316 "meta shadow")) 317 Die(); 318 mapped_meta_end = meta_end; 319 } 320 VPrintf(2, "mapped meta shadow for (0x%zx-0x%zx) at (0x%zx-0x%zx)\n", addr, 321 addr + size, meta_begin, meta_end); 322 } 323 324 void MapThreadTrace(uptr addr, uptr size, const char *name) { 325 DPrintf("#0: Mapping trace at 0x%zx-0x%zx(0x%zx)\n", addr, addr + size, size); 326 CHECK_GE(addr, TraceMemBeg()); 327 CHECK_LE(addr + size, TraceMemEnd()); 328 CHECK_EQ(addr, addr & ~((64 << 10) - 1)); // windows wants 64K alignment 329 if (!MmapFixedSuperNoReserve(addr, size, name)) { 330 Printf("FATAL: ThreadSanitizer can not mmap thread trace (0x%zx/0x%zx)\n", 331 addr, size); 332 Die(); 333 } 334 } 335 336 #if !SANITIZER_GO 337 static void OnStackUnwind(const SignalContext &sig, const void *, 338 BufferedStackTrace *stack) { 339 stack->Unwind(StackTrace::GetNextInstructionPc(sig.pc), sig.bp, sig.context, 340 common_flags()->fast_unwind_on_fatal); 341 } 342 343 static void TsanOnDeadlySignal(int signo, void *siginfo, void *context) { 344 HandleDeadlySignal(siginfo, context, GetTid(), &OnStackUnwind, nullptr); 345 } 346 #endif 347 348 void CheckUnwind() { 349 // There is high probability that interceptors will check-fail as well, 350 // on the other hand there is no sense in processing interceptors 351 // since we are going to die soon. 352 ScopedIgnoreInterceptors ignore; 353 #if !SANITIZER_GO 354 cur_thread()->ignore_sync++; 355 cur_thread()->ignore_reads_and_writes++; 356 #endif 357 PrintCurrentStackSlow(StackTrace::GetCurrentPc()); 358 } 359 360 bool is_initialized; 361 362 void Initialize(ThreadState *thr) { 363 // Thread safe because done before all threads exist. 364 if (is_initialized) 365 return; 366 is_initialized = true; 367 // We are not ready to handle interceptors yet. 368 ScopedIgnoreInterceptors ignore; 369 SanitizerToolName = "ThreadSanitizer"; 370 // Install tool-specific callbacks in sanitizer_common. 371 SetCheckUnwindCallback(CheckUnwind); 372 373 ctx = new(ctx_placeholder) Context; 374 const char *env_name = SANITIZER_GO ? "GORACE" : "TSAN_OPTIONS"; 375 const char *options = GetEnv(env_name); 376 CacheBinaryName(); 377 CheckASLR(); 378 InitializeFlags(&ctx->flags, options, env_name); 379 AvoidCVE_2016_2143(); 380 __sanitizer::InitializePlatformEarly(); 381 __tsan::InitializePlatformEarly(); 382 383 #if !SANITIZER_GO 384 // Re-exec ourselves if we need to set additional env or command line args. 385 MaybeReexec(); 386 387 InitializeAllocator(); 388 ReplaceSystemMalloc(); 389 #endif 390 if (common_flags()->detect_deadlocks) 391 ctx->dd = DDetector::Create(flags()); 392 Processor *proc = ProcCreate(); 393 ProcWire(proc, thr); 394 InitializeInterceptors(); 395 InitializePlatform(); 396 InitializeDynamicAnnotations(); 397 #if !SANITIZER_GO 398 InitializeShadowMemory(); 399 InitializeAllocatorLate(); 400 InstallDeadlySignalHandlers(TsanOnDeadlySignal); 401 #endif 402 // Setup correct file descriptor for error reports. 403 __sanitizer_set_report_path(common_flags()->log_path); 404 InitializeSuppressions(); 405 #if !SANITIZER_GO 406 InitializeLibIgnore(); 407 Symbolizer::GetOrInit()->AddHooks(EnterSymbolizer, ExitSymbolizer); 408 #endif 409 410 VPrintf(1, "***** Running under ThreadSanitizer v2 (pid %d) *****\n", 411 (int)internal_getpid()); 412 413 // Initialize thread 0. 414 Tid tid = ThreadCreate(thr, 0, 0, true); 415 CHECK_EQ(tid, kMainTid); 416 ThreadStart(thr, tid, GetTid(), ThreadType::Regular); 417 #if TSAN_CONTAINS_UBSAN 418 __ubsan::InitAsPlugin(); 419 #endif 420 ctx->initialized = true; 421 422 #if !SANITIZER_GO 423 Symbolizer::LateInitialize(); 424 InitializeMemoryProfiler(); 425 #endif 426 427 if (flags()->stop_on_start) { 428 Printf("ThreadSanitizer is suspended at startup (pid %d)." 429 " Call __tsan_resume().\n", 430 (int)internal_getpid()); 431 while (__tsan_resumed == 0) {} 432 } 433 434 OnInitialize(); 435 } 436 437 void MaybeSpawnBackgroundThread() { 438 // On MIPS, TSan initialization is run before 439 // __pthread_initialize_minimal_internal() is finished, so we can not spawn 440 // new threads. 441 #if !SANITIZER_GO && !defined(__mips__) 442 static atomic_uint32_t bg_thread = {}; 443 if (atomic_load(&bg_thread, memory_order_relaxed) == 0 && 444 atomic_exchange(&bg_thread, 1, memory_order_relaxed) == 0) { 445 StartBackgroundThread(); 446 SetSandboxingCallback(StopBackgroundThread); 447 } 448 #endif 449 } 450 451 452 int Finalize(ThreadState *thr) { 453 bool failed = false; 454 455 if (common_flags()->print_module_map == 1) 456 DumpProcessMap(); 457 458 if (flags()->atexit_sleep_ms > 0 && ThreadCount(thr) > 1) 459 SleepForMillis(flags()->atexit_sleep_ms); 460 461 // Wait for pending reports. 462 ctx->report_mtx.Lock(); 463 { ScopedErrorReportLock l; } 464 ctx->report_mtx.Unlock(); 465 466 #if !SANITIZER_GO 467 if (Verbosity()) AllocatorPrintStats(); 468 #endif 469 470 ThreadFinalize(thr); 471 472 if (ctx->nreported) { 473 failed = true; 474 #if !SANITIZER_GO 475 Printf("ThreadSanitizer: reported %d warnings\n", ctx->nreported); 476 #else 477 Printf("Found %d data race(s)\n", ctx->nreported); 478 #endif 479 } 480 481 if (common_flags()->print_suppressions) 482 PrintMatchedSuppressions(); 483 484 failed = OnFinalize(failed); 485 486 return failed ? common_flags()->exitcode : 0; 487 } 488 489 #if !SANITIZER_GO 490 void ForkBefore(ThreadState *thr, uptr pc) NO_THREAD_SAFETY_ANALYSIS { 491 ctx->thread_registry.Lock(); 492 ctx->report_mtx.Lock(); 493 ScopedErrorReportLock::Lock(); 494 // Suppress all reports in the pthread_atfork callbacks. 495 // Reports will deadlock on the report_mtx. 496 // We could ignore sync operations as well, 497 // but so far it's unclear if it will do more good or harm. 498 // Unnecessarily ignoring things can lead to false positives later. 499 thr->suppress_reports++; 500 // On OS X, REAL(fork) can call intercepted functions (OSSpinLockLock), and 501 // we'll assert in CheckNoLocks() unless we ignore interceptors. 502 thr->ignore_interceptors++; 503 } 504 505 void ForkParentAfter(ThreadState *thr, uptr pc) NO_THREAD_SAFETY_ANALYSIS { 506 thr->suppress_reports--; // Enabled in ForkBefore. 507 thr->ignore_interceptors--; 508 ScopedErrorReportLock::Unlock(); 509 ctx->report_mtx.Unlock(); 510 ctx->thread_registry.Unlock(); 511 } 512 513 void ForkChildAfter(ThreadState *thr, uptr pc, 514 bool start_thread) NO_THREAD_SAFETY_ANALYSIS { 515 thr->suppress_reports--; // Enabled in ForkBefore. 516 thr->ignore_interceptors--; 517 ScopedErrorReportLock::Unlock(); 518 ctx->report_mtx.Unlock(); 519 ctx->thread_registry.Unlock(); 520 521 uptr nthread = 0; 522 ctx->thread_registry.GetNumberOfThreads(0, 0, &nthread /* alive threads */); 523 VPrintf(1, "ThreadSanitizer: forked new process with pid %d," 524 " parent had %d threads\n", (int)internal_getpid(), (int)nthread); 525 if (nthread == 1) { 526 if (start_thread) 527 StartBackgroundThread(); 528 } else { 529 // We've just forked a multi-threaded process. We cannot reasonably function 530 // after that (some mutexes may be locked before fork). So just enable 531 // ignores for everything in the hope that we will exec soon. 532 ctx->after_multithreaded_fork = true; 533 thr->ignore_interceptors++; 534 ThreadIgnoreBegin(thr, pc); 535 ThreadIgnoreSyncBegin(thr, pc); 536 } 537 } 538 #endif 539 540 #if SANITIZER_GO 541 NOINLINE 542 void GrowShadowStack(ThreadState *thr) { 543 const int sz = thr->shadow_stack_end - thr->shadow_stack; 544 const int newsz = 2 * sz; 545 auto *newstack = (uptr *)Alloc(newsz * sizeof(uptr)); 546 internal_memcpy(newstack, thr->shadow_stack, sz * sizeof(uptr)); 547 Free(thr->shadow_stack); 548 thr->shadow_stack = newstack; 549 thr->shadow_stack_pos = newstack + sz; 550 thr->shadow_stack_end = newstack + newsz; 551 } 552 #endif 553 554 StackID CurrentStackId(ThreadState *thr, uptr pc) { 555 if (!thr->is_inited) // May happen during bootstrap. 556 return kInvalidStackID; 557 if (pc != 0) { 558 #if !SANITIZER_GO 559 DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end); 560 #else 561 if (thr->shadow_stack_pos == thr->shadow_stack_end) 562 GrowShadowStack(thr); 563 #endif 564 thr->shadow_stack_pos[0] = pc; 565 thr->shadow_stack_pos++; 566 } 567 StackID id = StackDepotPut( 568 StackTrace(thr->shadow_stack, thr->shadow_stack_pos - thr->shadow_stack)); 569 if (pc != 0) 570 thr->shadow_stack_pos--; 571 return id; 572 } 573 574 namespace v3 { 575 576 NOINLINE 577 void TraceSwitchPart(ThreadState *thr) { 578 Trace *trace = &thr->tctx->trace; 579 Event *pos = reinterpret_cast<Event *>(atomic_load_relaxed(&thr->trace_pos)); 580 DCHECK_EQ(reinterpret_cast<uptr>(pos + 1) & TracePart::kAlignment, 0); 581 auto *part = trace->parts.Back(); 582 DPrintf("TraceSwitchPart part=%p pos=%p\n", part, pos); 583 if (part) { 584 // We can get here when we still have space in the current trace part. 585 // The fast-path check in TraceAcquire has false positives in the middle of 586 // the part. Check if we are indeed at the end of the current part or not, 587 // and fill any gaps with NopEvent's. 588 Event *end = &part->events[TracePart::kSize]; 589 DCHECK_GE(pos, &part->events[0]); 590 DCHECK_LE(pos, end); 591 if (pos + 1 < end) { 592 if ((reinterpret_cast<uptr>(pos) & TracePart::kAlignment) == 593 TracePart::kAlignment) 594 *pos++ = NopEvent; 595 *pos++ = NopEvent; 596 DCHECK_LE(pos + 2, end); 597 atomic_store_relaxed(&thr->trace_pos, reinterpret_cast<uptr>(pos)); 598 // Ensure we setup trace so that the next TraceAcquire 599 // won't detect trace part end. 600 Event *ev; 601 CHECK(TraceAcquire(thr, &ev)); 602 return; 603 } 604 // We are indeed at the end. 605 for (; pos < end; pos++) *pos = NopEvent; 606 } 607 #if !SANITIZER_GO 608 if (ctx->after_multithreaded_fork) { 609 // We just need to survive till exec. 610 CHECK(part); 611 atomic_store_relaxed(&thr->trace_pos, 612 reinterpret_cast<uptr>(&part->events[0])); 613 return; 614 } 615 #endif 616 part = new (MmapOrDie(sizeof(TracePart), "TracePart")) TracePart(); 617 part->trace = trace; 618 thr->trace_prev_pc = 0; 619 { 620 Lock lock(&trace->mtx); 621 trace->parts.PushBack(part); 622 atomic_store_relaxed(&thr->trace_pos, 623 reinterpret_cast<uptr>(&part->events[0])); 624 } 625 // Make this part self-sufficient by restoring the current stack 626 // and mutex set in the beginning of the trace. 627 TraceTime(thr); 628 for (uptr *pos = &thr->shadow_stack[0]; pos < thr->shadow_stack_pos; pos++) 629 CHECK(TryTraceFunc(thr, *pos)); 630 for (uptr i = 0; i < thr->mset.Size(); i++) { 631 MutexSet::Desc d = thr->mset.Get(i); 632 TraceMutexLock(thr, d.write ? EventType::kLock : EventType::kRLock, 0, 633 d.addr, d.stack_id); 634 } 635 } 636 637 } // namespace v3 638 639 void TraceSwitch(ThreadState *thr) { 640 #if !SANITIZER_GO 641 if (ctx->after_multithreaded_fork) 642 return; 643 #endif 644 thr->nomalloc++; 645 Trace *thr_trace = ThreadTrace(thr->tid); 646 Lock l(&thr_trace->mtx); 647 unsigned trace = (thr->fast_state.epoch() / kTracePartSize) % TraceParts(); 648 TraceHeader *hdr = &thr_trace->headers[trace]; 649 hdr->epoch0 = thr->fast_state.epoch(); 650 ObtainCurrentStack(thr, 0, &hdr->stack0); 651 hdr->mset0 = thr->mset; 652 thr->nomalloc--; 653 } 654 655 Trace *ThreadTrace(Tid tid) { return (Trace *)GetThreadTraceHeader(tid); } 656 657 uptr TraceTopPC(ThreadState *thr) { 658 Event *events = (Event*)GetThreadTrace(thr->tid); 659 uptr pc = events[thr->fast_state.GetTracePos()]; 660 return pc; 661 } 662 663 uptr TraceSize() { 664 return (uptr)(1ull << (kTracePartSizeBits + flags()->history_size + 1)); 665 } 666 667 uptr TraceParts() { 668 return TraceSize() / kTracePartSize; 669 } 670 671 #if !SANITIZER_GO 672 extern "C" void __tsan_trace_switch() { 673 TraceSwitch(cur_thread()); 674 } 675 676 extern "C" void __tsan_report_race() { 677 ReportRace(cur_thread()); 678 } 679 #endif 680 681 void ThreadIgnoreBegin(ThreadState *thr, uptr pc) { 682 DPrintf("#%d: ThreadIgnoreBegin\n", thr->tid); 683 thr->ignore_reads_and_writes++; 684 CHECK_GT(thr->ignore_reads_and_writes, 0); 685 thr->fast_state.SetIgnoreBit(); 686 #if !SANITIZER_GO 687 if (pc && !ctx->after_multithreaded_fork) 688 thr->mop_ignore_set.Add(CurrentStackId(thr, pc)); 689 #endif 690 } 691 692 void ThreadIgnoreEnd(ThreadState *thr) { 693 DPrintf("#%d: ThreadIgnoreEnd\n", thr->tid); 694 CHECK_GT(thr->ignore_reads_and_writes, 0); 695 thr->ignore_reads_and_writes--; 696 if (thr->ignore_reads_and_writes == 0) { 697 thr->fast_state.ClearIgnoreBit(); 698 #if !SANITIZER_GO 699 thr->mop_ignore_set.Reset(); 700 #endif 701 } 702 } 703 704 #if !SANITIZER_GO 705 extern "C" SANITIZER_INTERFACE_ATTRIBUTE 706 uptr __tsan_testonly_shadow_stack_current_size() { 707 ThreadState *thr = cur_thread(); 708 return thr->shadow_stack_pos - thr->shadow_stack; 709 } 710 #endif 711 712 void ThreadIgnoreSyncBegin(ThreadState *thr, uptr pc) { 713 DPrintf("#%d: ThreadIgnoreSyncBegin\n", thr->tid); 714 thr->ignore_sync++; 715 CHECK_GT(thr->ignore_sync, 0); 716 #if !SANITIZER_GO 717 if (pc && !ctx->after_multithreaded_fork) 718 thr->sync_ignore_set.Add(CurrentStackId(thr, pc)); 719 #endif 720 } 721 722 void ThreadIgnoreSyncEnd(ThreadState *thr) { 723 DPrintf("#%d: ThreadIgnoreSyncEnd\n", thr->tid); 724 CHECK_GT(thr->ignore_sync, 0); 725 thr->ignore_sync--; 726 #if !SANITIZER_GO 727 if (thr->ignore_sync == 0) 728 thr->sync_ignore_set.Reset(); 729 #endif 730 } 731 732 bool MD5Hash::operator==(const MD5Hash &other) const { 733 return hash[0] == other.hash[0] && hash[1] == other.hash[1]; 734 } 735 736 #if SANITIZER_DEBUG 737 void build_consistency_debug() {} 738 #else 739 void build_consistency_release() {} 740 #endif 741 742 } // namespace __tsan 743 744 #if SANITIZER_CHECK_DEADLOCKS 745 namespace __sanitizer { 746 using namespace __tsan; 747 MutexMeta mutex_meta[] = { 748 {MutexInvalid, "Invalid", {}}, 749 {MutexThreadRegistry, "ThreadRegistry", {}}, 750 {MutexTypeTrace, "Trace", {MutexLeaf}}, 751 {MutexTypeReport, "Report", {MutexTypeSyncVar}}, 752 {MutexTypeSyncVar, "SyncVar", {}}, 753 {MutexTypeAnnotations, "Annotations", {}}, 754 {MutexTypeAtExit, "AtExit", {MutexTypeSyncVar}}, 755 {MutexTypeFired, "Fired", {MutexLeaf}}, 756 {MutexTypeRacy, "Racy", {MutexLeaf}}, 757 {MutexTypeGlobalProc, "GlobalProc", {}}, 758 {}, 759 }; 760 761 void PrintMutexPC(uptr pc) { StackTrace(&pc, 1).Print(); } 762 } // namespace __sanitizer 763 #endif 764