1 //===-- tsan_rtl.cpp ------------------------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file is a part of ThreadSanitizer (TSan), a race detector. 10 // 11 // Main file (entry points) for the TSan run-time. 12 //===----------------------------------------------------------------------===// 13 14 #include "sanitizer_common/sanitizer_atomic.h" 15 #include "sanitizer_common/sanitizer_common.h" 16 #include "sanitizer_common/sanitizer_file.h" 17 #include "sanitizer_common/sanitizer_libc.h" 18 #include "sanitizer_common/sanitizer_stackdepot.h" 19 #include "sanitizer_common/sanitizer_placement_new.h" 20 #include "sanitizer_common/sanitizer_symbolizer.h" 21 #include "tsan_defs.h" 22 #include "tsan_platform.h" 23 #include "tsan_rtl.h" 24 #include "tsan_mman.h" 25 #include "tsan_suppressions.h" 26 #include "tsan_symbolize.h" 27 #include "ubsan/ubsan_init.h" 28 29 #ifdef __SSE3__ 30 // <emmintrin.h> transitively includes <stdlib.h>, 31 // and it's prohibited to include std headers into tsan runtime. 32 // So we do this dirty trick. 33 #define _MM_MALLOC_H_INCLUDED 34 #define __MM_MALLOC_H 35 #include <emmintrin.h> 36 typedef __m128i m128; 37 #endif 38 39 volatile int __tsan_resumed = 0; 40 41 extern "C" void __tsan_resume() { 42 __tsan_resumed = 1; 43 } 44 45 namespace __tsan { 46 47 #if !SANITIZER_GO && !SANITIZER_MAC 48 __attribute__((tls_model("initial-exec"))) 49 THREADLOCAL char cur_thread_placeholder[sizeof(ThreadState)] ALIGNED(64); 50 #endif 51 static char ctx_placeholder[sizeof(Context)] ALIGNED(64); 52 Context *ctx; 53 54 // Can be overriden by a front-end. 55 #ifdef TSAN_EXTERNAL_HOOKS 56 bool OnFinalize(bool failed); 57 void OnInitialize(); 58 #else 59 SANITIZER_WEAK_CXX_DEFAULT_IMPL 60 bool OnFinalize(bool failed) { 61 return failed; 62 } 63 SANITIZER_WEAK_CXX_DEFAULT_IMPL 64 void OnInitialize() {} 65 #endif 66 67 static char thread_registry_placeholder[sizeof(ThreadRegistry)]; 68 69 static ThreadContextBase *CreateThreadContext(u32 tid) { 70 // Map thread trace when context is created. 71 char name[50]; 72 internal_snprintf(name, sizeof(name), "trace %u", tid); 73 MapThreadTrace(GetThreadTrace(tid), TraceSize() * sizeof(Event), name); 74 const uptr hdr = GetThreadTraceHeader(tid); 75 internal_snprintf(name, sizeof(name), "trace header %u", tid); 76 MapThreadTrace(hdr, sizeof(Trace), name); 77 new((void*)hdr) Trace(); 78 // We are going to use only a small part of the trace with the default 79 // value of history_size. However, the constructor writes to the whole trace. 80 // Unmap the unused part. 81 uptr hdr_end = hdr + sizeof(Trace); 82 hdr_end -= sizeof(TraceHeader) * (kTraceParts - TraceParts()); 83 hdr_end = RoundUp(hdr_end, GetPageSizeCached()); 84 if (hdr_end < hdr + sizeof(Trace)) 85 UnmapOrDie((void*)hdr_end, hdr + sizeof(Trace) - hdr_end); 86 void *mem = internal_alloc(MBlockThreadContex, sizeof(ThreadContext)); 87 return new(mem) ThreadContext(tid); 88 } 89 90 #if !SANITIZER_GO 91 static const u32 kThreadQuarantineSize = 16; 92 #else 93 static const u32 kThreadQuarantineSize = 64; 94 #endif 95 96 Context::Context() 97 : initialized() 98 , report_mtx(MutexTypeReport, StatMtxReport) 99 , nreported() 100 , nmissed_expected() 101 , thread_registry(new(thread_registry_placeholder) ThreadRegistry( 102 CreateThreadContext, kMaxTid, kThreadQuarantineSize, kMaxTidReuse)) 103 , racy_mtx(MutexTypeRacy, StatMtxRacy) 104 , racy_stacks() 105 , racy_addresses() 106 , fired_suppressions_mtx(MutexTypeFired, StatMtxFired) 107 , clock_alloc("clock allocator") { 108 fired_suppressions.reserve(8); 109 } 110 111 // The objects are allocated in TLS, so one may rely on zero-initialization. 112 ThreadState::ThreadState(Context *ctx, int tid, int unique_id, u64 epoch, 113 unsigned reuse_count, 114 uptr stk_addr, uptr stk_size, 115 uptr tls_addr, uptr tls_size) 116 : fast_state(tid, epoch) 117 // Do not touch these, rely on zero initialization, 118 // they may be accessed before the ctor. 119 // , ignore_reads_and_writes() 120 // , ignore_interceptors() 121 , clock(tid, reuse_count) 122 #if !SANITIZER_GO 123 , jmp_bufs() 124 #endif 125 , tid(tid) 126 , unique_id(unique_id) 127 , stk_addr(stk_addr) 128 , stk_size(stk_size) 129 , tls_addr(tls_addr) 130 , tls_size(tls_size) 131 #if !SANITIZER_GO 132 , last_sleep_clock(tid) 133 #endif 134 { 135 } 136 137 #if !SANITIZER_GO 138 static void MemoryProfiler(Context *ctx, fd_t fd, int i) { 139 uptr n_threads; 140 uptr n_running_threads; 141 ctx->thread_registry->GetNumberOfThreads(&n_threads, &n_running_threads); 142 InternalMmapVector<char> buf(4096); 143 WriteMemoryProfile(buf.data(), buf.size(), n_threads, n_running_threads); 144 WriteToFile(fd, buf.data(), internal_strlen(buf.data())); 145 } 146 147 static void BackgroundThread(void *arg) { 148 // This is a non-initialized non-user thread, nothing to see here. 149 // We don't use ScopedIgnoreInterceptors, because we want ignores to be 150 // enabled even when the thread function exits (e.g. during pthread thread 151 // shutdown code). 152 cur_thread_init(); 153 cur_thread()->ignore_interceptors++; 154 const u64 kMs2Ns = 1000 * 1000; 155 156 fd_t mprof_fd = kInvalidFd; 157 if (flags()->profile_memory && flags()->profile_memory[0]) { 158 if (internal_strcmp(flags()->profile_memory, "stdout") == 0) { 159 mprof_fd = 1; 160 } else if (internal_strcmp(flags()->profile_memory, "stderr") == 0) { 161 mprof_fd = 2; 162 } else { 163 InternalScopedString filename(kMaxPathLength); 164 filename.append("%s.%d", flags()->profile_memory, (int)internal_getpid()); 165 fd_t fd = OpenFile(filename.data(), WrOnly); 166 if (fd == kInvalidFd) { 167 Printf("ThreadSanitizer: failed to open memory profile file '%s'\n", 168 &filename[0]); 169 } else { 170 mprof_fd = fd; 171 } 172 } 173 } 174 175 u64 last_flush = NanoTime(); 176 uptr last_rss = 0; 177 for (int i = 0; 178 atomic_load(&ctx->stop_background_thread, memory_order_relaxed) == 0; 179 i++) { 180 SleepForMillis(100); 181 u64 now = NanoTime(); 182 183 // Flush memory if requested. 184 if (flags()->flush_memory_ms > 0) { 185 if (last_flush + flags()->flush_memory_ms * kMs2Ns < now) { 186 VPrintf(1, "ThreadSanitizer: periodic memory flush\n"); 187 FlushShadowMemory(); 188 last_flush = NanoTime(); 189 } 190 } 191 // GetRSS can be expensive on huge programs, so don't do it every 100ms. 192 if (flags()->memory_limit_mb > 0) { 193 uptr rss = GetRSS(); 194 uptr limit = uptr(flags()->memory_limit_mb) << 20; 195 VPrintf(1, "ThreadSanitizer: memory flush check" 196 " RSS=%llu LAST=%llu LIMIT=%llu\n", 197 (u64)rss >> 20, (u64)last_rss >> 20, (u64)limit >> 20); 198 if (2 * rss > limit + last_rss) { 199 VPrintf(1, "ThreadSanitizer: flushing memory due to RSS\n"); 200 FlushShadowMemory(); 201 rss = GetRSS(); 202 VPrintf(1, "ThreadSanitizer: memory flushed RSS=%llu\n", (u64)rss>>20); 203 } 204 last_rss = rss; 205 } 206 207 // Write memory profile if requested. 208 if (mprof_fd != kInvalidFd) 209 MemoryProfiler(ctx, mprof_fd, i); 210 211 // Flush symbolizer cache if requested. 212 if (flags()->flush_symbolizer_ms > 0) { 213 u64 last = atomic_load(&ctx->last_symbolize_time_ns, 214 memory_order_relaxed); 215 if (last != 0 && last + flags()->flush_symbolizer_ms * kMs2Ns < now) { 216 Lock l(&ctx->report_mtx); 217 ScopedErrorReportLock l2; 218 SymbolizeFlush(); 219 atomic_store(&ctx->last_symbolize_time_ns, 0, memory_order_relaxed); 220 } 221 } 222 } 223 } 224 225 static void StartBackgroundThread() { 226 ctx->background_thread = internal_start_thread(&BackgroundThread, 0); 227 } 228 229 #ifndef __mips__ 230 static void StopBackgroundThread() { 231 atomic_store(&ctx->stop_background_thread, 1, memory_order_relaxed); 232 internal_join_thread(ctx->background_thread); 233 ctx->background_thread = 0; 234 } 235 #endif 236 #endif 237 238 void DontNeedShadowFor(uptr addr, uptr size) { 239 ReleaseMemoryPagesToOS(MemToShadow(addr), MemToShadow(addr + size)); 240 } 241 242 #if !SANITIZER_GO 243 void UnmapShadow(ThreadState *thr, uptr addr, uptr size) { 244 if (size == 0) return; 245 DontNeedShadowFor(addr, size); 246 ScopedGlobalProcessor sgp; 247 ctx->metamap.ResetRange(thr->proc(), addr, size); 248 } 249 #endif 250 251 void MapShadow(uptr addr, uptr size) { 252 // Global data is not 64K aligned, but there are no adjacent mappings, 253 // so we can get away with unaligned mapping. 254 // CHECK_EQ(addr, addr & ~((64 << 10) - 1)); // windows wants 64K alignment 255 const uptr kPageSize = GetPageSizeCached(); 256 uptr shadow_begin = RoundDownTo((uptr)MemToShadow(addr), kPageSize); 257 uptr shadow_end = RoundUpTo((uptr)MemToShadow(addr + size), kPageSize); 258 if (!MmapFixedNoReserve(shadow_begin, shadow_end - shadow_begin, "shadow")) 259 Die(); 260 261 // Meta shadow is 2:1, so tread carefully. 262 static bool data_mapped = false; 263 static uptr mapped_meta_end = 0; 264 uptr meta_begin = (uptr)MemToMeta(addr); 265 uptr meta_end = (uptr)MemToMeta(addr + size); 266 meta_begin = RoundDownTo(meta_begin, 64 << 10); 267 meta_end = RoundUpTo(meta_end, 64 << 10); 268 if (!data_mapped) { 269 // First call maps data+bss. 270 data_mapped = true; 271 if (!MmapFixedNoReserve(meta_begin, meta_end - meta_begin, "meta shadow")) 272 Die(); 273 } else { 274 // Mapping continous heap. 275 // Windows wants 64K alignment. 276 meta_begin = RoundDownTo(meta_begin, 64 << 10); 277 meta_end = RoundUpTo(meta_end, 64 << 10); 278 if (meta_end <= mapped_meta_end) 279 return; 280 if (meta_begin < mapped_meta_end) 281 meta_begin = mapped_meta_end; 282 if (!MmapFixedNoReserve(meta_begin, meta_end - meta_begin, "meta shadow")) 283 Die(); 284 mapped_meta_end = meta_end; 285 } 286 VPrintf(2, "mapped meta shadow for (%p-%p) at (%p-%p)\n", 287 addr, addr+size, meta_begin, meta_end); 288 } 289 290 void MapThreadTrace(uptr addr, uptr size, const char *name) { 291 DPrintf("#0: Mapping trace at %p-%p(0x%zx)\n", addr, addr + size, size); 292 CHECK_GE(addr, TraceMemBeg()); 293 CHECK_LE(addr + size, TraceMemEnd()); 294 CHECK_EQ(addr, addr & ~((64 << 10) - 1)); // windows wants 64K alignment 295 if (!MmapFixedNoReserve(addr, size, name)) { 296 Printf("FATAL: ThreadSanitizer can not mmap thread trace (%p/%p)\n", 297 addr, size); 298 Die(); 299 } 300 } 301 302 static void CheckShadowMapping() { 303 uptr beg, end; 304 for (int i = 0; GetUserRegion(i, &beg, &end); i++) { 305 // Skip cases for empty regions (heap definition for architectures that 306 // do not use 64-bit allocator). 307 if (beg == end) 308 continue; 309 VPrintf(3, "checking shadow region %p-%p\n", beg, end); 310 uptr prev = 0; 311 for (uptr p0 = beg; p0 <= end; p0 += (end - beg) / 4) { 312 for (int x = -(int)kShadowCell; x <= (int)kShadowCell; x += kShadowCell) { 313 const uptr p = RoundDown(p0 + x, kShadowCell); 314 if (p < beg || p >= end) 315 continue; 316 const uptr s = MemToShadow(p); 317 const uptr m = (uptr)MemToMeta(p); 318 VPrintf(3, " checking pointer %p: shadow=%p meta=%p\n", p, s, m); 319 CHECK(IsAppMem(p)); 320 CHECK(IsShadowMem(s)); 321 CHECK_EQ(p, ShadowToMem(s)); 322 CHECK(IsMetaMem(m)); 323 if (prev) { 324 // Ensure that shadow and meta mappings are linear within a single 325 // user range. Lots of code that processes memory ranges assumes it. 326 const uptr prev_s = MemToShadow(prev); 327 const uptr prev_m = (uptr)MemToMeta(prev); 328 CHECK_EQ(s - prev_s, (p - prev) * kShadowMultiplier); 329 CHECK_EQ((m - prev_m) / kMetaShadowSize, 330 (p - prev) / kMetaShadowCell); 331 } 332 prev = p; 333 } 334 } 335 } 336 } 337 338 #if !SANITIZER_GO 339 static void OnStackUnwind(const SignalContext &sig, const void *, 340 BufferedStackTrace *stack) { 341 stack->Unwind(StackTrace::GetNextInstructionPc(sig.pc), sig.bp, sig.context, 342 common_flags()->fast_unwind_on_fatal); 343 } 344 345 static void TsanOnDeadlySignal(int signo, void *siginfo, void *context) { 346 HandleDeadlySignal(siginfo, context, GetTid(), &OnStackUnwind, nullptr); 347 } 348 #endif 349 350 void Initialize(ThreadState *thr) { 351 // Thread safe because done before all threads exist. 352 static bool is_initialized = false; 353 if (is_initialized) 354 return; 355 is_initialized = true; 356 // We are not ready to handle interceptors yet. 357 ScopedIgnoreInterceptors ignore; 358 SanitizerToolName = "ThreadSanitizer"; 359 // Install tool-specific callbacks in sanitizer_common. 360 SetCheckFailedCallback(TsanCheckFailed); 361 362 ctx = new(ctx_placeholder) Context; 363 const char *env_name = SANITIZER_GO ? "GORACE" : "TSAN_OPTIONS"; 364 const char *options = GetEnv(env_name); 365 CacheBinaryName(); 366 CheckASLR(); 367 InitializeFlags(&ctx->flags, options, env_name); 368 AvoidCVE_2016_2143(); 369 __sanitizer::InitializePlatformEarly(); 370 __tsan::InitializePlatformEarly(); 371 372 #if !SANITIZER_GO 373 // Re-exec ourselves if we need to set additional env or command line args. 374 MaybeReexec(); 375 376 InitializeAllocator(); 377 ReplaceSystemMalloc(); 378 #endif 379 if (common_flags()->detect_deadlocks) 380 ctx->dd = DDetector::Create(flags()); 381 Processor *proc = ProcCreate(); 382 ProcWire(proc, thr); 383 InitializeInterceptors(); 384 CheckShadowMapping(); 385 InitializePlatform(); 386 InitializeMutex(); 387 InitializeDynamicAnnotations(); 388 #if !SANITIZER_GO 389 InitializeShadowMemory(); 390 InitializeAllocatorLate(); 391 InstallDeadlySignalHandlers(TsanOnDeadlySignal); 392 #endif 393 // Setup correct file descriptor for error reports. 394 __sanitizer_set_report_path(common_flags()->log_path); 395 InitializeSuppressions(); 396 #if !SANITIZER_GO 397 InitializeLibIgnore(); 398 Symbolizer::GetOrInit()->AddHooks(EnterSymbolizer, ExitSymbolizer); 399 #endif 400 401 VPrintf(1, "***** Running under ThreadSanitizer v2 (pid %d) *****\n", 402 (int)internal_getpid()); 403 404 // Initialize thread 0. 405 int tid = ThreadCreate(thr, 0, 0, true); 406 CHECK_EQ(tid, 0); 407 ThreadStart(thr, tid, GetTid(), ThreadType::Regular); 408 #if TSAN_CONTAINS_UBSAN 409 __ubsan::InitAsPlugin(); 410 #endif 411 ctx->initialized = true; 412 413 #if !SANITIZER_GO 414 Symbolizer::LateInitialize(); 415 #endif 416 417 if (flags()->stop_on_start) { 418 Printf("ThreadSanitizer is suspended at startup (pid %d)." 419 " Call __tsan_resume().\n", 420 (int)internal_getpid()); 421 while (__tsan_resumed == 0) {} 422 } 423 424 OnInitialize(); 425 } 426 427 void MaybeSpawnBackgroundThread() { 428 // On MIPS, TSan initialization is run before 429 // __pthread_initialize_minimal_internal() is finished, so we can not spawn 430 // new threads. 431 #if !SANITIZER_GO && !defined(__mips__) 432 static atomic_uint32_t bg_thread = {}; 433 if (atomic_load(&bg_thread, memory_order_relaxed) == 0 && 434 atomic_exchange(&bg_thread, 1, memory_order_relaxed) == 0) { 435 StartBackgroundThread(); 436 SetSandboxingCallback(StopBackgroundThread); 437 } 438 #endif 439 } 440 441 442 int Finalize(ThreadState *thr) { 443 bool failed = false; 444 445 if (common_flags()->print_module_map == 1) PrintModuleMap(); 446 447 if (flags()->atexit_sleep_ms > 0 && ThreadCount(thr) > 1) 448 SleepForMillis(flags()->atexit_sleep_ms); 449 450 // Wait for pending reports. 451 ctx->report_mtx.Lock(); 452 { ScopedErrorReportLock l; } 453 ctx->report_mtx.Unlock(); 454 455 #if !SANITIZER_GO 456 if (Verbosity()) AllocatorPrintStats(); 457 #endif 458 459 ThreadFinalize(thr); 460 461 if (ctx->nreported) { 462 failed = true; 463 #if !SANITIZER_GO 464 Printf("ThreadSanitizer: reported %d warnings\n", ctx->nreported); 465 #else 466 Printf("Found %d data race(s)\n", ctx->nreported); 467 #endif 468 } 469 470 if (ctx->nmissed_expected) { 471 failed = true; 472 Printf("ThreadSanitizer: missed %d expected races\n", 473 ctx->nmissed_expected); 474 } 475 476 if (common_flags()->print_suppressions) 477 PrintMatchedSuppressions(); 478 #if !SANITIZER_GO 479 if (flags()->print_benign) 480 PrintMatchedBenignRaces(); 481 #endif 482 483 failed = OnFinalize(failed); 484 485 #if TSAN_COLLECT_STATS 486 StatAggregate(ctx->stat, thr->stat); 487 StatOutput(ctx->stat); 488 #endif 489 490 return failed ? common_flags()->exitcode : 0; 491 } 492 493 #if !SANITIZER_GO 494 void ForkBefore(ThreadState *thr, uptr pc) { 495 ctx->thread_registry->Lock(); 496 ctx->report_mtx.Lock(); 497 } 498 499 void ForkParentAfter(ThreadState *thr, uptr pc) { 500 ctx->report_mtx.Unlock(); 501 ctx->thread_registry->Unlock(); 502 } 503 504 void ForkChildAfter(ThreadState *thr, uptr pc) { 505 ctx->report_mtx.Unlock(); 506 ctx->thread_registry->Unlock(); 507 508 uptr nthread = 0; 509 ctx->thread_registry->GetNumberOfThreads(0, 0, &nthread /* alive threads */); 510 VPrintf(1, "ThreadSanitizer: forked new process with pid %d," 511 " parent had %d threads\n", (int)internal_getpid(), (int)nthread); 512 if (nthread == 1) { 513 StartBackgroundThread(); 514 } else { 515 // We've just forked a multi-threaded process. We cannot reasonably function 516 // after that (some mutexes may be locked before fork). So just enable 517 // ignores for everything in the hope that we will exec soon. 518 ctx->after_multithreaded_fork = true; 519 thr->ignore_interceptors++; 520 ThreadIgnoreBegin(thr, pc); 521 ThreadIgnoreSyncBegin(thr, pc); 522 } 523 } 524 #endif 525 526 #if SANITIZER_GO 527 NOINLINE 528 void GrowShadowStack(ThreadState *thr) { 529 const int sz = thr->shadow_stack_end - thr->shadow_stack; 530 const int newsz = 2 * sz; 531 uptr *newstack = (uptr*)internal_alloc(MBlockShadowStack, 532 newsz * sizeof(uptr)); 533 internal_memcpy(newstack, thr->shadow_stack, sz * sizeof(uptr)); 534 internal_free(thr->shadow_stack); 535 thr->shadow_stack = newstack; 536 thr->shadow_stack_pos = newstack + sz; 537 thr->shadow_stack_end = newstack + newsz; 538 } 539 #endif 540 541 u32 CurrentStackId(ThreadState *thr, uptr pc) { 542 if (!thr->is_inited) // May happen during bootstrap. 543 return 0; 544 if (pc != 0) { 545 #if !SANITIZER_GO 546 DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end); 547 #else 548 if (thr->shadow_stack_pos == thr->shadow_stack_end) 549 GrowShadowStack(thr); 550 #endif 551 thr->shadow_stack_pos[0] = pc; 552 thr->shadow_stack_pos++; 553 } 554 u32 id = StackDepotPut( 555 StackTrace(thr->shadow_stack, thr->shadow_stack_pos - thr->shadow_stack)); 556 if (pc != 0) 557 thr->shadow_stack_pos--; 558 return id; 559 } 560 561 void TraceSwitch(ThreadState *thr) { 562 #if !SANITIZER_GO 563 if (ctx->after_multithreaded_fork) 564 return; 565 #endif 566 thr->nomalloc++; 567 Trace *thr_trace = ThreadTrace(thr->tid); 568 Lock l(&thr_trace->mtx); 569 unsigned trace = (thr->fast_state.epoch() / kTracePartSize) % TraceParts(); 570 TraceHeader *hdr = &thr_trace->headers[trace]; 571 hdr->epoch0 = thr->fast_state.epoch(); 572 ObtainCurrentStack(thr, 0, &hdr->stack0); 573 hdr->mset0 = thr->mset; 574 thr->nomalloc--; 575 } 576 577 Trace *ThreadTrace(int tid) { 578 return (Trace*)GetThreadTraceHeader(tid); 579 } 580 581 uptr TraceTopPC(ThreadState *thr) { 582 Event *events = (Event*)GetThreadTrace(thr->tid); 583 uptr pc = events[thr->fast_state.GetTracePos()]; 584 return pc; 585 } 586 587 uptr TraceSize() { 588 return (uptr)(1ull << (kTracePartSizeBits + flags()->history_size + 1)); 589 } 590 591 uptr TraceParts() { 592 return TraceSize() / kTracePartSize; 593 } 594 595 #if !SANITIZER_GO 596 extern "C" void __tsan_trace_switch() { 597 TraceSwitch(cur_thread()); 598 } 599 600 extern "C" void __tsan_report_race() { 601 ReportRace(cur_thread()); 602 } 603 #endif 604 605 ALWAYS_INLINE 606 Shadow LoadShadow(u64 *p) { 607 u64 raw = atomic_load((atomic_uint64_t*)p, memory_order_relaxed); 608 return Shadow(raw); 609 } 610 611 ALWAYS_INLINE 612 void StoreShadow(u64 *sp, u64 s) { 613 atomic_store((atomic_uint64_t*)sp, s, memory_order_relaxed); 614 } 615 616 ALWAYS_INLINE 617 void StoreIfNotYetStored(u64 *sp, u64 *s) { 618 StoreShadow(sp, *s); 619 *s = 0; 620 } 621 622 ALWAYS_INLINE 623 void HandleRace(ThreadState *thr, u64 *shadow_mem, 624 Shadow cur, Shadow old) { 625 thr->racy_state[0] = cur.raw(); 626 thr->racy_state[1] = old.raw(); 627 thr->racy_shadow_addr = shadow_mem; 628 #if !SANITIZER_GO 629 HACKY_CALL(__tsan_report_race); 630 #else 631 ReportRace(thr); 632 #endif 633 } 634 635 static inline bool HappensBefore(Shadow old, ThreadState *thr) { 636 return thr->clock.get(old.TidWithIgnore()) >= old.epoch(); 637 } 638 639 ALWAYS_INLINE 640 void MemoryAccessImpl1(ThreadState *thr, uptr addr, 641 int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic, 642 u64 *shadow_mem, Shadow cur) { 643 StatInc(thr, StatMop); 644 StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead); 645 StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog)); 646 647 // This potentially can live in an MMX/SSE scratch register. 648 // The required intrinsics are: 649 // __m128i _mm_move_epi64(__m128i*); 650 // _mm_storel_epi64(u64*, __m128i); 651 u64 store_word = cur.raw(); 652 bool stored = false; 653 654 // scan all the shadow values and dispatch to 4 categories: 655 // same, replace, candidate and race (see comments below). 656 // we consider only 3 cases regarding access sizes: 657 // equal, intersect and not intersect. initially I considered 658 // larger and smaller as well, it allowed to replace some 659 // 'candidates' with 'same' or 'replace', but I think 660 // it's just not worth it (performance- and complexity-wise). 661 662 Shadow old(0); 663 664 // It release mode we manually unroll the loop, 665 // because empirically gcc generates better code this way. 666 // However, we can't afford unrolling in debug mode, because the function 667 // consumes almost 4K of stack. Gtest gives only 4K of stack to death test 668 // threads, which is not enough for the unrolled loop. 669 #if SANITIZER_DEBUG 670 for (int idx = 0; idx < 4; idx++) { 671 #include "tsan_update_shadow_word_inl.h" 672 } 673 #else 674 int idx = 0; 675 #include "tsan_update_shadow_word_inl.h" 676 idx = 1; 677 if (stored) { 678 #include "tsan_update_shadow_word_inl.h" 679 } else { 680 #include "tsan_update_shadow_word_inl.h" 681 } 682 idx = 2; 683 if (stored) { 684 #include "tsan_update_shadow_word_inl.h" 685 } else { 686 #include "tsan_update_shadow_word_inl.h" 687 } 688 idx = 3; 689 if (stored) { 690 #include "tsan_update_shadow_word_inl.h" 691 } else { 692 #include "tsan_update_shadow_word_inl.h" 693 } 694 #endif 695 696 // we did not find any races and had already stored 697 // the current access info, so we are done 698 if (LIKELY(stored)) 699 return; 700 // choose a random candidate slot and replace it 701 StoreShadow(shadow_mem + (cur.epoch() % kShadowCnt), store_word); 702 StatInc(thr, StatShadowReplace); 703 return; 704 RACE: 705 HandleRace(thr, shadow_mem, cur, old); 706 return; 707 } 708 709 void UnalignedMemoryAccess(ThreadState *thr, uptr pc, uptr addr, 710 int size, bool kAccessIsWrite, bool kIsAtomic) { 711 while (size) { 712 int size1 = 1; 713 int kAccessSizeLog = kSizeLog1; 714 if (size >= 8 && (addr & ~7) == ((addr + 7) & ~7)) { 715 size1 = 8; 716 kAccessSizeLog = kSizeLog8; 717 } else if (size >= 4 && (addr & ~7) == ((addr + 3) & ~7)) { 718 size1 = 4; 719 kAccessSizeLog = kSizeLog4; 720 } else if (size >= 2 && (addr & ~7) == ((addr + 1) & ~7)) { 721 size1 = 2; 722 kAccessSizeLog = kSizeLog2; 723 } 724 MemoryAccess(thr, pc, addr, kAccessSizeLog, kAccessIsWrite, kIsAtomic); 725 addr += size1; 726 size -= size1; 727 } 728 } 729 730 ALWAYS_INLINE 731 bool ContainsSameAccessSlow(u64 *s, u64 a, u64 sync_epoch, bool is_write) { 732 Shadow cur(a); 733 for (uptr i = 0; i < kShadowCnt; i++) { 734 Shadow old(LoadShadow(&s[i])); 735 if (Shadow::Addr0AndSizeAreEqual(cur, old) && 736 old.TidWithIgnore() == cur.TidWithIgnore() && 737 old.epoch() > sync_epoch && 738 old.IsAtomic() == cur.IsAtomic() && 739 old.IsRead() <= cur.IsRead()) 740 return true; 741 } 742 return false; 743 } 744 745 #if defined(__SSE3__) 746 #define SHUF(v0, v1, i0, i1, i2, i3) _mm_castps_si128(_mm_shuffle_ps( \ 747 _mm_castsi128_ps(v0), _mm_castsi128_ps(v1), \ 748 (i0)*1 + (i1)*4 + (i2)*16 + (i3)*64)) 749 ALWAYS_INLINE 750 bool ContainsSameAccessFast(u64 *s, u64 a, u64 sync_epoch, bool is_write) { 751 // This is an optimized version of ContainsSameAccessSlow. 752 // load current access into access[0:63] 753 const m128 access = _mm_cvtsi64_si128(a); 754 // duplicate high part of access in addr0: 755 // addr0[0:31] = access[32:63] 756 // addr0[32:63] = access[32:63] 757 // addr0[64:95] = access[32:63] 758 // addr0[96:127] = access[32:63] 759 const m128 addr0 = SHUF(access, access, 1, 1, 1, 1); 760 // load 4 shadow slots 761 const m128 shadow0 = _mm_load_si128((__m128i*)s); 762 const m128 shadow1 = _mm_load_si128((__m128i*)s + 1); 763 // load high parts of 4 shadow slots into addr_vect: 764 // addr_vect[0:31] = shadow0[32:63] 765 // addr_vect[32:63] = shadow0[96:127] 766 // addr_vect[64:95] = shadow1[32:63] 767 // addr_vect[96:127] = shadow1[96:127] 768 m128 addr_vect = SHUF(shadow0, shadow1, 1, 3, 1, 3); 769 if (!is_write) { 770 // set IsRead bit in addr_vect 771 const m128 rw_mask1 = _mm_cvtsi64_si128(1<<15); 772 const m128 rw_mask = SHUF(rw_mask1, rw_mask1, 0, 0, 0, 0); 773 addr_vect = _mm_or_si128(addr_vect, rw_mask); 774 } 775 // addr0 == addr_vect? 776 const m128 addr_res = _mm_cmpeq_epi32(addr0, addr_vect); 777 // epoch1[0:63] = sync_epoch 778 const m128 epoch1 = _mm_cvtsi64_si128(sync_epoch); 779 // epoch[0:31] = sync_epoch[0:31] 780 // epoch[32:63] = sync_epoch[0:31] 781 // epoch[64:95] = sync_epoch[0:31] 782 // epoch[96:127] = sync_epoch[0:31] 783 const m128 epoch = SHUF(epoch1, epoch1, 0, 0, 0, 0); 784 // load low parts of shadow cell epochs into epoch_vect: 785 // epoch_vect[0:31] = shadow0[0:31] 786 // epoch_vect[32:63] = shadow0[64:95] 787 // epoch_vect[64:95] = shadow1[0:31] 788 // epoch_vect[96:127] = shadow1[64:95] 789 const m128 epoch_vect = SHUF(shadow0, shadow1, 0, 2, 0, 2); 790 // epoch_vect >= sync_epoch? 791 const m128 epoch_res = _mm_cmpgt_epi32(epoch_vect, epoch); 792 // addr_res & epoch_res 793 const m128 res = _mm_and_si128(addr_res, epoch_res); 794 // mask[0] = res[7] 795 // mask[1] = res[15] 796 // ... 797 // mask[15] = res[127] 798 const int mask = _mm_movemask_epi8(res); 799 return mask != 0; 800 } 801 #endif 802 803 ALWAYS_INLINE 804 bool ContainsSameAccess(u64 *s, u64 a, u64 sync_epoch, bool is_write) { 805 #if defined(__SSE3__) 806 bool res = ContainsSameAccessFast(s, a, sync_epoch, is_write); 807 // NOTE: this check can fail if the shadow is concurrently mutated 808 // by other threads. But it still can be useful if you modify 809 // ContainsSameAccessFast and want to ensure that it's not completely broken. 810 // DCHECK_EQ(res, ContainsSameAccessSlow(s, a, sync_epoch, is_write)); 811 return res; 812 #else 813 return ContainsSameAccessSlow(s, a, sync_epoch, is_write); 814 #endif 815 } 816 817 ALWAYS_INLINE USED 818 void MemoryAccess(ThreadState *thr, uptr pc, uptr addr, 819 int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic) { 820 u64 *shadow_mem = (u64*)MemToShadow(addr); 821 DPrintf2("#%d: MemoryAccess: @%p %p size=%d" 822 " is_write=%d shadow_mem=%p {%zx, %zx, %zx, %zx}\n", 823 (int)thr->fast_state.tid(), (void*)pc, (void*)addr, 824 (int)(1 << kAccessSizeLog), kAccessIsWrite, shadow_mem, 825 (uptr)shadow_mem[0], (uptr)shadow_mem[1], 826 (uptr)shadow_mem[2], (uptr)shadow_mem[3]); 827 #if SANITIZER_DEBUG 828 if (!IsAppMem(addr)) { 829 Printf("Access to non app mem %zx\n", addr); 830 DCHECK(IsAppMem(addr)); 831 } 832 if (!IsShadowMem((uptr)shadow_mem)) { 833 Printf("Bad shadow addr %p (%zx)\n", shadow_mem, addr); 834 DCHECK(IsShadowMem((uptr)shadow_mem)); 835 } 836 #endif 837 838 if (!SANITIZER_GO && !kAccessIsWrite && *shadow_mem == kShadowRodata) { 839 // Access to .rodata section, no races here. 840 // Measurements show that it can be 10-20% of all memory accesses. 841 StatInc(thr, StatMop); 842 StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead); 843 StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog)); 844 StatInc(thr, StatMopRodata); 845 return; 846 } 847 848 FastState fast_state = thr->fast_state; 849 if (UNLIKELY(fast_state.GetIgnoreBit())) { 850 StatInc(thr, StatMop); 851 StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead); 852 StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog)); 853 StatInc(thr, StatMopIgnored); 854 return; 855 } 856 857 Shadow cur(fast_state); 858 cur.SetAddr0AndSizeLog(addr & 7, kAccessSizeLog); 859 cur.SetWrite(kAccessIsWrite); 860 cur.SetAtomic(kIsAtomic); 861 862 if (LIKELY(ContainsSameAccess(shadow_mem, cur.raw(), 863 thr->fast_synch_epoch, kAccessIsWrite))) { 864 StatInc(thr, StatMop); 865 StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead); 866 StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog)); 867 StatInc(thr, StatMopSame); 868 return; 869 } 870 871 if (kCollectHistory) { 872 fast_state.IncrementEpoch(); 873 thr->fast_state = fast_state; 874 TraceAddEvent(thr, fast_state, EventTypeMop, pc); 875 cur.IncrementEpoch(); 876 } 877 878 MemoryAccessImpl1(thr, addr, kAccessSizeLog, kAccessIsWrite, kIsAtomic, 879 shadow_mem, cur); 880 } 881 882 // Called by MemoryAccessRange in tsan_rtl_thread.cpp 883 ALWAYS_INLINE USED 884 void MemoryAccessImpl(ThreadState *thr, uptr addr, 885 int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic, 886 u64 *shadow_mem, Shadow cur) { 887 if (LIKELY(ContainsSameAccess(shadow_mem, cur.raw(), 888 thr->fast_synch_epoch, kAccessIsWrite))) { 889 StatInc(thr, StatMop); 890 StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead); 891 StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog)); 892 StatInc(thr, StatMopSame); 893 return; 894 } 895 896 MemoryAccessImpl1(thr, addr, kAccessSizeLog, kAccessIsWrite, kIsAtomic, 897 shadow_mem, cur); 898 } 899 900 static void MemoryRangeSet(ThreadState *thr, uptr pc, uptr addr, uptr size, 901 u64 val) { 902 (void)thr; 903 (void)pc; 904 if (size == 0) 905 return; 906 // FIXME: fix me. 907 uptr offset = addr % kShadowCell; 908 if (offset) { 909 offset = kShadowCell - offset; 910 if (size <= offset) 911 return; 912 addr += offset; 913 size -= offset; 914 } 915 DCHECK_EQ(addr % 8, 0); 916 // If a user passes some insane arguments (memset(0)), 917 // let it just crash as usual. 918 if (!IsAppMem(addr) || !IsAppMem(addr + size - 1)) 919 return; 920 // Don't want to touch lots of shadow memory. 921 // If a program maps 10MB stack, there is no need reset the whole range. 922 size = (size + (kShadowCell - 1)) & ~(kShadowCell - 1); 923 // UnmapOrDie/MmapFixedNoReserve does not work on Windows. 924 if (SANITIZER_WINDOWS || size < common_flags()->clear_shadow_mmap_threshold) { 925 u64 *p = (u64*)MemToShadow(addr); 926 CHECK(IsShadowMem((uptr)p)); 927 CHECK(IsShadowMem((uptr)(p + size * kShadowCnt / kShadowCell - 1))); 928 // FIXME: may overwrite a part outside the region 929 for (uptr i = 0; i < size / kShadowCell * kShadowCnt;) { 930 p[i++] = val; 931 for (uptr j = 1; j < kShadowCnt; j++) 932 p[i++] = 0; 933 } 934 } else { 935 // The region is big, reset only beginning and end. 936 const uptr kPageSize = GetPageSizeCached(); 937 u64 *begin = (u64*)MemToShadow(addr); 938 u64 *end = begin + size / kShadowCell * kShadowCnt; 939 u64 *p = begin; 940 // Set at least first kPageSize/2 to page boundary. 941 while ((p < begin + kPageSize / kShadowSize / 2) || ((uptr)p % kPageSize)) { 942 *p++ = val; 943 for (uptr j = 1; j < kShadowCnt; j++) 944 *p++ = 0; 945 } 946 // Reset middle part. 947 u64 *p1 = p; 948 p = RoundDown(end, kPageSize); 949 UnmapOrDie((void*)p1, (uptr)p - (uptr)p1); 950 if (!MmapFixedNoReserve((uptr)p1, (uptr)p - (uptr)p1)) 951 Die(); 952 // Set the ending. 953 while (p < end) { 954 *p++ = val; 955 for (uptr j = 1; j < kShadowCnt; j++) 956 *p++ = 0; 957 } 958 } 959 } 960 961 void MemoryResetRange(ThreadState *thr, uptr pc, uptr addr, uptr size) { 962 MemoryRangeSet(thr, pc, addr, size, 0); 963 } 964 965 void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size) { 966 // Processing more than 1k (4k of shadow) is expensive, 967 // can cause excessive memory consumption (user does not necessary touch 968 // the whole range) and most likely unnecessary. 969 if (size > 1024) 970 size = 1024; 971 CHECK_EQ(thr->is_freeing, false); 972 thr->is_freeing = true; 973 MemoryAccessRange(thr, pc, addr, size, true); 974 thr->is_freeing = false; 975 if (kCollectHistory) { 976 thr->fast_state.IncrementEpoch(); 977 TraceAddEvent(thr, thr->fast_state, EventTypeMop, pc); 978 } 979 Shadow s(thr->fast_state); 980 s.ClearIgnoreBit(); 981 s.MarkAsFreed(); 982 s.SetWrite(true); 983 s.SetAddr0AndSizeLog(0, 3); 984 MemoryRangeSet(thr, pc, addr, size, s.raw()); 985 } 986 987 void MemoryRangeImitateWrite(ThreadState *thr, uptr pc, uptr addr, uptr size) { 988 if (kCollectHistory) { 989 thr->fast_state.IncrementEpoch(); 990 TraceAddEvent(thr, thr->fast_state, EventTypeMop, pc); 991 } 992 Shadow s(thr->fast_state); 993 s.ClearIgnoreBit(); 994 s.SetWrite(true); 995 s.SetAddr0AndSizeLog(0, 3); 996 MemoryRangeSet(thr, pc, addr, size, s.raw()); 997 } 998 999 void MemoryRangeImitateWriteOrResetRange(ThreadState *thr, uptr pc, uptr addr, 1000 uptr size) { 1001 if (thr->ignore_reads_and_writes == 0) 1002 MemoryRangeImitateWrite(thr, pc, addr, size); 1003 else 1004 MemoryResetRange(thr, pc, addr, size); 1005 } 1006 1007 ALWAYS_INLINE USED 1008 void FuncEntry(ThreadState *thr, uptr pc) { 1009 StatInc(thr, StatFuncEnter); 1010 DPrintf2("#%d: FuncEntry %p\n", (int)thr->fast_state.tid(), (void*)pc); 1011 if (kCollectHistory) { 1012 thr->fast_state.IncrementEpoch(); 1013 TraceAddEvent(thr, thr->fast_state, EventTypeFuncEnter, pc); 1014 } 1015 1016 // Shadow stack maintenance can be replaced with 1017 // stack unwinding during trace switch (which presumably must be faster). 1018 DCHECK_GE(thr->shadow_stack_pos, thr->shadow_stack); 1019 #if !SANITIZER_GO 1020 DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end); 1021 #else 1022 if (thr->shadow_stack_pos == thr->shadow_stack_end) 1023 GrowShadowStack(thr); 1024 #endif 1025 thr->shadow_stack_pos[0] = pc; 1026 thr->shadow_stack_pos++; 1027 } 1028 1029 ALWAYS_INLINE USED 1030 void FuncExit(ThreadState *thr) { 1031 StatInc(thr, StatFuncExit); 1032 DPrintf2("#%d: FuncExit\n", (int)thr->fast_state.tid()); 1033 if (kCollectHistory) { 1034 thr->fast_state.IncrementEpoch(); 1035 TraceAddEvent(thr, thr->fast_state, EventTypeFuncExit, 0); 1036 } 1037 1038 DCHECK_GT(thr->shadow_stack_pos, thr->shadow_stack); 1039 #if !SANITIZER_GO 1040 DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end); 1041 #endif 1042 thr->shadow_stack_pos--; 1043 } 1044 1045 void ThreadIgnoreBegin(ThreadState *thr, uptr pc, bool save_stack) { 1046 DPrintf("#%d: ThreadIgnoreBegin\n", thr->tid); 1047 thr->ignore_reads_and_writes++; 1048 CHECK_GT(thr->ignore_reads_and_writes, 0); 1049 thr->fast_state.SetIgnoreBit(); 1050 #if !SANITIZER_GO 1051 if (save_stack && !ctx->after_multithreaded_fork) 1052 thr->mop_ignore_set.Add(CurrentStackId(thr, pc)); 1053 #endif 1054 } 1055 1056 void ThreadIgnoreEnd(ThreadState *thr, uptr pc) { 1057 DPrintf("#%d: ThreadIgnoreEnd\n", thr->tid); 1058 CHECK_GT(thr->ignore_reads_and_writes, 0); 1059 thr->ignore_reads_and_writes--; 1060 if (thr->ignore_reads_and_writes == 0) { 1061 thr->fast_state.ClearIgnoreBit(); 1062 #if !SANITIZER_GO 1063 thr->mop_ignore_set.Reset(); 1064 #endif 1065 } 1066 } 1067 1068 #if !SANITIZER_GO 1069 extern "C" SANITIZER_INTERFACE_ATTRIBUTE 1070 uptr __tsan_testonly_shadow_stack_current_size() { 1071 ThreadState *thr = cur_thread(); 1072 return thr->shadow_stack_pos - thr->shadow_stack; 1073 } 1074 #endif 1075 1076 void ThreadIgnoreSyncBegin(ThreadState *thr, uptr pc, bool save_stack) { 1077 DPrintf("#%d: ThreadIgnoreSyncBegin\n", thr->tid); 1078 thr->ignore_sync++; 1079 CHECK_GT(thr->ignore_sync, 0); 1080 #if !SANITIZER_GO 1081 if (save_stack && !ctx->after_multithreaded_fork) 1082 thr->sync_ignore_set.Add(CurrentStackId(thr, pc)); 1083 #endif 1084 } 1085 1086 void ThreadIgnoreSyncEnd(ThreadState *thr, uptr pc) { 1087 DPrintf("#%d: ThreadIgnoreSyncEnd\n", thr->tid); 1088 CHECK_GT(thr->ignore_sync, 0); 1089 thr->ignore_sync--; 1090 #if !SANITIZER_GO 1091 if (thr->ignore_sync == 0) 1092 thr->sync_ignore_set.Reset(); 1093 #endif 1094 } 1095 1096 bool MD5Hash::operator==(const MD5Hash &other) const { 1097 return hash[0] == other.hash[0] && hash[1] == other.hash[1]; 1098 } 1099 1100 #if SANITIZER_DEBUG 1101 void build_consistency_debug() {} 1102 #else 1103 void build_consistency_release() {} 1104 #endif 1105 1106 #if TSAN_COLLECT_STATS 1107 void build_consistency_stats() {} 1108 #else 1109 void build_consistency_nostats() {} 1110 #endif 1111 1112 } // namespace __tsan 1113 1114 #if !SANITIZER_GO 1115 // Must be included in this file to make sure everything is inlined. 1116 #include "tsan_interface_inl.h" 1117 #endif 1118