1 //===-- asan_thread.cpp ---------------------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file is a part of AddressSanitizer, an address sanity checker. 10 // 11 // Thread-related code. 12 //===----------------------------------------------------------------------===// 13 #include "asan_allocator.h" 14 #include "asan_interceptors.h" 15 #include "asan_poisoning.h" 16 #include "asan_stack.h" 17 #include "asan_thread.h" 18 #include "asan_mapping.h" 19 #include "sanitizer_common/sanitizer_common.h" 20 #include "sanitizer_common/sanitizer_placement_new.h" 21 #include "sanitizer_common/sanitizer_stackdepot.h" 22 #include "sanitizer_common/sanitizer_tls_get_addr.h" 23 #include "lsan/lsan_common.h" 24 25 namespace __asan { 26 27 // AsanThreadContext implementation. 28 29 void AsanThreadContext::OnCreated(void *arg) { 30 CreateThreadContextArgs *args = static_cast<CreateThreadContextArgs*>(arg); 31 if (args->stack) 32 stack_id = StackDepotPut(*args->stack); 33 thread = args->thread; 34 thread->set_context(this); 35 } 36 37 void AsanThreadContext::OnFinished() { 38 // Drop the link to the AsanThread object. 39 thread = nullptr; 40 } 41 42 // MIPS requires aligned address 43 static ALIGNED(16) char thread_registry_placeholder[sizeof(ThreadRegistry)]; 44 static ThreadRegistry *asan_thread_registry; 45 46 static Mutex mu_for_thread_context; 47 static LowLevelAllocator allocator_for_thread_context; 48 49 static ThreadContextBase *GetAsanThreadContext(u32 tid) { 50 Lock lock(&mu_for_thread_context); 51 return new(allocator_for_thread_context) AsanThreadContext(tid); 52 } 53 54 ThreadRegistry &asanThreadRegistry() { 55 static bool initialized; 56 // Don't worry about thread_safety - this should be called when there is 57 // a single thread. 58 if (!initialized) { 59 // Never reuse ASan threads: we store pointer to AsanThreadContext 60 // in TSD and can't reliably tell when no more TSD destructors will 61 // be called. It would be wrong to reuse AsanThreadContext for another 62 // thread before all TSD destructors will be called for it. 63 asan_thread_registry = 64 new (thread_registry_placeholder) ThreadRegistry(GetAsanThreadContext); 65 initialized = true; 66 } 67 return *asan_thread_registry; 68 } 69 70 AsanThreadContext *GetThreadContextByTidLocked(u32 tid) { 71 return static_cast<AsanThreadContext *>( 72 asanThreadRegistry().GetThreadLocked(tid)); 73 } 74 75 // AsanThread implementation. 76 77 AsanThread *AsanThread::Create(thread_callback_t start_routine, void *arg, 78 u32 parent_tid, StackTrace *stack, 79 bool detached) { 80 uptr PageSize = GetPageSizeCached(); 81 uptr size = RoundUpTo(sizeof(AsanThread), PageSize); 82 AsanThread *thread = (AsanThread*)MmapOrDie(size, __func__); 83 thread->start_routine_ = start_routine; 84 thread->arg_ = arg; 85 AsanThreadContext::CreateThreadContextArgs args = {thread, stack}; 86 asanThreadRegistry().CreateThread(0, detached, parent_tid, &args); 87 88 return thread; 89 } 90 91 void AsanThread::TSDDtor(void *tsd) { 92 AsanThreadContext *context = (AsanThreadContext*)tsd; 93 VReport(1, "T%d TSDDtor\n", context->tid); 94 if (context->thread) 95 context->thread->Destroy(); 96 } 97 98 void AsanThread::Destroy() { 99 int tid = this->tid(); 100 VReport(1, "T%d exited\n", tid); 101 102 bool was_running = 103 (asanThreadRegistry().FinishThread(tid) == ThreadStatusRunning); 104 if (was_running) { 105 if (AsanThread *thread = GetCurrentThread()) 106 CHECK_EQ(this, thread); 107 malloc_storage().CommitBack(); 108 if (common_flags()->use_sigaltstack) 109 UnsetAlternateSignalStack(); 110 FlushToDeadThreadStats(&stats_); 111 // We also clear the shadow on thread destruction because 112 // some code may still be executing in later TSD destructors 113 // and we don't want it to have any poisoned stack. 114 ClearShadowForThreadStackAndTLS(); 115 DeleteFakeStack(tid); 116 } else { 117 CHECK_NE(this, GetCurrentThread()); 118 } 119 uptr size = RoundUpTo(sizeof(AsanThread), GetPageSizeCached()); 120 UnmapOrDie(this, size); 121 if (was_running) 122 DTLS_Destroy(); 123 } 124 125 void AsanThread::StartSwitchFiber(FakeStack **fake_stack_save, uptr bottom, 126 uptr size) { 127 if (atomic_load(&stack_switching_, memory_order_relaxed)) { 128 Report("ERROR: starting fiber switch while in fiber switch\n"); 129 Die(); 130 } 131 132 next_stack_bottom_ = bottom; 133 next_stack_top_ = bottom + size; 134 atomic_store(&stack_switching_, 1, memory_order_release); 135 136 FakeStack *current_fake_stack = fake_stack_; 137 if (fake_stack_save) 138 *fake_stack_save = fake_stack_; 139 fake_stack_ = nullptr; 140 SetTLSFakeStack(nullptr); 141 // if fake_stack_save is null, the fiber will die, delete the fakestack 142 if (!fake_stack_save && current_fake_stack) 143 current_fake_stack->Destroy(this->tid()); 144 } 145 146 void AsanThread::FinishSwitchFiber(FakeStack *fake_stack_save, 147 uptr *bottom_old, 148 uptr *size_old) { 149 if (!atomic_load(&stack_switching_, memory_order_relaxed)) { 150 Report("ERROR: finishing a fiber switch that has not started\n"); 151 Die(); 152 } 153 154 if (fake_stack_save) { 155 SetTLSFakeStack(fake_stack_save); 156 fake_stack_ = fake_stack_save; 157 } 158 159 if (bottom_old) 160 *bottom_old = stack_bottom_; 161 if (size_old) 162 *size_old = stack_top_ - stack_bottom_; 163 stack_bottom_ = next_stack_bottom_; 164 stack_top_ = next_stack_top_; 165 atomic_store(&stack_switching_, 0, memory_order_release); 166 next_stack_top_ = 0; 167 next_stack_bottom_ = 0; 168 } 169 170 inline AsanThread::StackBounds AsanThread::GetStackBounds() const { 171 if (!atomic_load(&stack_switching_, memory_order_acquire)) { 172 // Make sure the stack bounds are fully initialized. 173 if (stack_bottom_ >= stack_top_) return {0, 0}; 174 return {stack_bottom_, stack_top_}; 175 } 176 char local; 177 const uptr cur_stack = (uptr)&local; 178 // Note: need to check next stack first, because FinishSwitchFiber 179 // may be in process of overwriting stack_top_/bottom_. But in such case 180 // we are already on the next stack. 181 if (cur_stack >= next_stack_bottom_ && cur_stack < next_stack_top_) 182 return {next_stack_bottom_, next_stack_top_}; 183 return {stack_bottom_, stack_top_}; 184 } 185 186 uptr AsanThread::stack_top() { 187 return GetStackBounds().top; 188 } 189 190 uptr AsanThread::stack_bottom() { 191 return GetStackBounds().bottom; 192 } 193 194 uptr AsanThread::stack_size() { 195 const auto bounds = GetStackBounds(); 196 return bounds.top - bounds.bottom; 197 } 198 199 // We want to create the FakeStack lazily on the first use, but not earlier 200 // than the stack size is known and the procedure has to be async-signal safe. 201 FakeStack *AsanThread::AsyncSignalSafeLazyInitFakeStack() { 202 uptr stack_size = this->stack_size(); 203 if (stack_size == 0) // stack_size is not yet available, don't use FakeStack. 204 return nullptr; 205 uptr old_val = 0; 206 // fake_stack_ has 3 states: 207 // 0 -- not initialized 208 // 1 -- being initialized 209 // ptr -- initialized 210 // This CAS checks if the state was 0 and if so changes it to state 1, 211 // if that was successful, it initializes the pointer. 212 if (atomic_compare_exchange_strong( 213 reinterpret_cast<atomic_uintptr_t *>(&fake_stack_), &old_val, 1UL, 214 memory_order_relaxed)) { 215 uptr stack_size_log = Log2(RoundUpToPowerOfTwo(stack_size)); 216 CHECK_LE(flags()->min_uar_stack_size_log, flags()->max_uar_stack_size_log); 217 stack_size_log = 218 Min(stack_size_log, static_cast<uptr>(flags()->max_uar_stack_size_log)); 219 stack_size_log = 220 Max(stack_size_log, static_cast<uptr>(flags()->min_uar_stack_size_log)); 221 fake_stack_ = FakeStack::Create(stack_size_log); 222 DCHECK_EQ(GetCurrentThread(), this); 223 SetTLSFakeStack(fake_stack_); 224 return fake_stack_; 225 } 226 return nullptr; 227 } 228 229 void AsanThread::Init(const InitOptions *options) { 230 DCHECK_NE(tid(), kInvalidTid); 231 next_stack_top_ = next_stack_bottom_ = 0; 232 atomic_store(&stack_switching_, false, memory_order_release); 233 CHECK_EQ(this->stack_size(), 0U); 234 SetThreadStackAndTls(options); 235 if (stack_top_ != stack_bottom_) { 236 CHECK_GT(this->stack_size(), 0U); 237 CHECK(AddrIsInMem(stack_bottom_)); 238 CHECK(AddrIsInMem(stack_top_ - 1)); 239 } 240 ClearShadowForThreadStackAndTLS(); 241 fake_stack_ = nullptr; 242 if (__asan_option_detect_stack_use_after_return && 243 tid() == GetCurrentTidOrInvalid()) { 244 // AsyncSignalSafeLazyInitFakeStack makes use of threadlocals and must be 245 // called from the context of the thread it is initializing, not its parent. 246 // Most platforms call AsanThread::Init on the newly-spawned thread, but 247 // Fuchsia calls this function from the parent thread. To support that 248 // approach, we avoid calling AsyncSignalSafeLazyInitFakeStack here; it will 249 // be called by the new thread when it first attempts to access the fake 250 // stack. 251 AsyncSignalSafeLazyInitFakeStack(); 252 } 253 int local = 0; 254 VReport(1, "T%d: stack [%p,%p) size 0x%zx; local=%p\n", tid(), 255 (void *)stack_bottom_, (void *)stack_top_, stack_top_ - stack_bottom_, 256 (void *)&local); 257 } 258 259 // Fuchsia doesn't use ThreadStart. 260 // asan_fuchsia.c definies CreateMainThread and SetThreadStackAndTls. 261 #if !SANITIZER_FUCHSIA 262 263 thread_return_t AsanThread::ThreadStart(tid_t os_id) { 264 Init(); 265 asanThreadRegistry().StartThread(tid(), os_id, ThreadType::Regular, nullptr); 266 267 if (common_flags()->use_sigaltstack) SetAlternateSignalStack(); 268 269 if (!start_routine_) { 270 // start_routine_ == 0 if we're on the main thread or on one of the 271 // OS X libdispatch worker threads. But nobody is supposed to call 272 // ThreadStart() for the worker threads. 273 CHECK_EQ(tid(), 0); 274 return 0; 275 } 276 277 thread_return_t res = start_routine_(arg_); 278 279 // On POSIX systems we defer this to the TSD destructor. LSan will consider 280 // the thread's memory as non-live from the moment we call Destroy(), even 281 // though that memory might contain pointers to heap objects which will be 282 // cleaned up by a user-defined TSD destructor. Thus, calling Destroy() before 283 // the TSD destructors have run might cause false positives in LSan. 284 if (!SANITIZER_POSIX) 285 this->Destroy(); 286 287 return res; 288 } 289 290 AsanThread *CreateMainThread() { 291 AsanThread *main_thread = AsanThread::Create( 292 /* start_routine */ nullptr, /* arg */ nullptr, /* parent_tid */ kMainTid, 293 /* stack */ nullptr, /* detached */ true); 294 SetCurrentThread(main_thread); 295 main_thread->ThreadStart(internal_getpid()); 296 return main_thread; 297 } 298 299 // This implementation doesn't use the argument, which is just passed down 300 // from the caller of Init (which see, above). It's only there to support 301 // OS-specific implementations that need more information passed through. 302 void AsanThread::SetThreadStackAndTls(const InitOptions *options) { 303 DCHECK_EQ(options, nullptr); 304 uptr tls_size = 0; 305 uptr stack_size = 0; 306 GetThreadStackAndTls(tid() == kMainTid, &stack_bottom_, &stack_size, 307 &tls_begin_, &tls_size); 308 stack_top_ = RoundDownTo(stack_bottom_ + stack_size, ASAN_SHADOW_GRANULARITY); 309 tls_end_ = tls_begin_ + tls_size; 310 dtls_ = DTLS_Get(); 311 312 if (stack_top_ != stack_bottom_) { 313 int local; 314 CHECK(AddrIsInStack((uptr)&local)); 315 } 316 } 317 318 #endif // !SANITIZER_FUCHSIA 319 320 void AsanThread::ClearShadowForThreadStackAndTLS() { 321 if (stack_top_ != stack_bottom_) 322 PoisonShadow(stack_bottom_, stack_top_ - stack_bottom_, 0); 323 if (tls_begin_ != tls_end_) { 324 uptr tls_begin_aligned = RoundDownTo(tls_begin_, ASAN_SHADOW_GRANULARITY); 325 uptr tls_end_aligned = RoundUpTo(tls_end_, ASAN_SHADOW_GRANULARITY); 326 FastPoisonShadowPartialRightRedzone(tls_begin_aligned, 327 tls_end_ - tls_begin_aligned, 328 tls_end_aligned - tls_end_, 0); 329 } 330 } 331 332 bool AsanThread::GetStackFrameAccessByAddr(uptr addr, 333 StackFrameAccess *access) { 334 if (stack_top_ == stack_bottom_) 335 return false; 336 337 uptr bottom = 0; 338 if (AddrIsInStack(addr)) { 339 bottom = stack_bottom(); 340 } else if (FakeStack *fake_stack = get_fake_stack()) { 341 bottom = fake_stack->AddrIsInFakeStack(addr); 342 CHECK(bottom); 343 access->offset = addr - bottom; 344 access->frame_pc = ((uptr*)bottom)[2]; 345 access->frame_descr = (const char *)((uptr*)bottom)[1]; 346 return true; 347 } 348 uptr aligned_addr = RoundDownTo(addr, SANITIZER_WORDSIZE / 8); // align addr. 349 uptr mem_ptr = RoundDownTo(aligned_addr, ASAN_SHADOW_GRANULARITY); 350 u8 *shadow_ptr = (u8*)MemToShadow(aligned_addr); 351 u8 *shadow_bottom = (u8*)MemToShadow(bottom); 352 353 while (shadow_ptr >= shadow_bottom && 354 *shadow_ptr != kAsanStackLeftRedzoneMagic) { 355 shadow_ptr--; 356 mem_ptr -= ASAN_SHADOW_GRANULARITY; 357 } 358 359 while (shadow_ptr >= shadow_bottom && 360 *shadow_ptr == kAsanStackLeftRedzoneMagic) { 361 shadow_ptr--; 362 mem_ptr -= ASAN_SHADOW_GRANULARITY; 363 } 364 365 if (shadow_ptr < shadow_bottom) { 366 return false; 367 } 368 369 uptr *ptr = (uptr *)(mem_ptr + ASAN_SHADOW_GRANULARITY); 370 CHECK(ptr[0] == kCurrentStackFrameMagic); 371 access->offset = addr - (uptr)ptr; 372 access->frame_pc = ptr[2]; 373 access->frame_descr = (const char*)ptr[1]; 374 return true; 375 } 376 377 uptr AsanThread::GetStackVariableShadowStart(uptr addr) { 378 uptr bottom = 0; 379 if (AddrIsInStack(addr)) { 380 bottom = stack_bottom(); 381 } else if (FakeStack *fake_stack = get_fake_stack()) { 382 bottom = fake_stack->AddrIsInFakeStack(addr); 383 if (bottom == 0) { 384 return 0; 385 } 386 } else { 387 return 0; 388 } 389 390 uptr aligned_addr = RoundDownTo(addr, SANITIZER_WORDSIZE / 8); // align addr. 391 u8 *shadow_ptr = (u8*)MemToShadow(aligned_addr); 392 u8 *shadow_bottom = (u8*)MemToShadow(bottom); 393 394 while (shadow_ptr >= shadow_bottom && 395 (*shadow_ptr != kAsanStackLeftRedzoneMagic && 396 *shadow_ptr != kAsanStackMidRedzoneMagic && 397 *shadow_ptr != kAsanStackRightRedzoneMagic)) 398 shadow_ptr--; 399 400 return (uptr)shadow_ptr + 1; 401 } 402 403 bool AsanThread::AddrIsInStack(uptr addr) { 404 const auto bounds = GetStackBounds(); 405 return addr >= bounds.bottom && addr < bounds.top; 406 } 407 408 static bool ThreadStackContainsAddress(ThreadContextBase *tctx_base, 409 void *addr) { 410 AsanThreadContext *tctx = static_cast<AsanThreadContext *>(tctx_base); 411 AsanThread *t = tctx->thread; 412 if (!t) 413 return false; 414 if (t->AddrIsInStack((uptr)addr)) 415 return true; 416 FakeStack *fake_stack = t->get_fake_stack(); 417 if (!fake_stack) 418 return false; 419 return fake_stack->AddrIsInFakeStack((uptr)addr); 420 } 421 422 AsanThread *GetCurrentThread() { 423 AsanThreadContext *context = 424 reinterpret_cast<AsanThreadContext *>(AsanTSDGet()); 425 if (!context) { 426 if (SANITIZER_ANDROID) { 427 // On Android, libc constructor is called _after_ asan_init, and cleans up 428 // TSD. Try to figure out if this is still the main thread by the stack 429 // address. We are not entirely sure that we have correct main thread 430 // limits, so only do this magic on Android, and only if the found thread 431 // is the main thread. 432 AsanThreadContext *tctx = GetThreadContextByTidLocked(kMainTid); 433 if (tctx && ThreadStackContainsAddress(tctx, &context)) { 434 SetCurrentThread(tctx->thread); 435 return tctx->thread; 436 } 437 } 438 return nullptr; 439 } 440 return context->thread; 441 } 442 443 void SetCurrentThread(AsanThread *t) { 444 CHECK(t->context()); 445 VReport(2, "SetCurrentThread: %p for thread %p\n", (void *)t->context(), 446 (void *)GetThreadSelf()); 447 // Make sure we do not reset the current AsanThread. 448 CHECK_EQ(0, AsanTSDGet()); 449 AsanTSDSet(t->context()); 450 CHECK_EQ(t->context(), AsanTSDGet()); 451 } 452 453 u32 GetCurrentTidOrInvalid() { 454 AsanThread *t = GetCurrentThread(); 455 return t ? t->tid() : kInvalidTid; 456 } 457 458 AsanThread *FindThreadByStackAddress(uptr addr) { 459 asanThreadRegistry().CheckLocked(); 460 AsanThreadContext *tctx = static_cast<AsanThreadContext *>( 461 asanThreadRegistry().FindThreadContextLocked(ThreadStackContainsAddress, 462 (void *)addr)); 463 return tctx ? tctx->thread : nullptr; 464 } 465 466 void EnsureMainThreadIDIsCorrect() { 467 AsanThreadContext *context = 468 reinterpret_cast<AsanThreadContext *>(AsanTSDGet()); 469 if (context && (context->tid == kMainTid)) 470 context->os_id = GetTid(); 471 } 472 473 __asan::AsanThread *GetAsanThreadByOsIDLocked(tid_t os_id) { 474 __asan::AsanThreadContext *context = static_cast<__asan::AsanThreadContext *>( 475 __asan::asanThreadRegistry().FindThreadContextByOsIDLocked(os_id)); 476 if (!context) return nullptr; 477 return context->thread; 478 } 479 } // namespace __asan 480 481 // --- Implementation of LSan-specific functions --- {{{1 482 namespace __lsan { 483 bool GetThreadRangesLocked(tid_t os_id, uptr *stack_begin, uptr *stack_end, 484 uptr *tls_begin, uptr *tls_end, uptr *cache_begin, 485 uptr *cache_end, DTLS **dtls) { 486 __asan::AsanThread *t = __asan::GetAsanThreadByOsIDLocked(os_id); 487 if (!t) return false; 488 *stack_begin = t->stack_bottom(); 489 *stack_end = t->stack_top(); 490 *tls_begin = t->tls_begin(); 491 *tls_end = t->tls_end(); 492 // ASan doesn't keep allocator caches in TLS, so these are unused. 493 *cache_begin = 0; 494 *cache_end = 0; 495 *dtls = t->dtls(); 496 return true; 497 } 498 499 void GetAllThreadAllocatorCachesLocked(InternalMmapVector<uptr> *caches) {} 500 501 void ForEachExtraStackRange(tid_t os_id, RangeIteratorCallback callback, 502 void *arg) { 503 __asan::AsanThread *t = __asan::GetAsanThreadByOsIDLocked(os_id); 504 if (!t) 505 return; 506 __asan::FakeStack *fake_stack = t->get_fake_stack(); 507 if (!fake_stack) 508 return; 509 fake_stack->ForEachFakeFrame(callback, arg); 510 } 511 512 void LockThreadRegistry() { 513 __asan::asanThreadRegistry().Lock(); 514 } 515 516 void UnlockThreadRegistry() { 517 __asan::asanThreadRegistry().Unlock(); 518 } 519 520 ThreadRegistry *GetThreadRegistryLocked() { 521 __asan::asanThreadRegistry().CheckLocked(); 522 return &__asan::asanThreadRegistry(); 523 } 524 525 void EnsureMainThreadIDIsCorrect() { 526 __asan::EnsureMainThreadIDIsCorrect(); 527 } 528 } // namespace __lsan 529 530 // ---------------------- Interface ---------------- {{{1 531 using namespace __asan; 532 533 extern "C" { 534 SANITIZER_INTERFACE_ATTRIBUTE 535 void __sanitizer_start_switch_fiber(void **fakestacksave, const void *bottom, 536 uptr size) { 537 AsanThread *t = GetCurrentThread(); 538 if (!t) { 539 VReport(1, "__asan_start_switch_fiber called from unknown thread\n"); 540 return; 541 } 542 t->StartSwitchFiber((FakeStack**)fakestacksave, (uptr)bottom, size); 543 } 544 545 SANITIZER_INTERFACE_ATTRIBUTE 546 void __sanitizer_finish_switch_fiber(void* fakestack, 547 const void **bottom_old, 548 uptr *size_old) { 549 AsanThread *t = GetCurrentThread(); 550 if (!t) { 551 VReport(1, "__asan_finish_switch_fiber called from unknown thread\n"); 552 return; 553 } 554 t->FinishSwitchFiber((FakeStack*)fakestack, 555 (uptr*)bottom_old, 556 (uptr*)size_old); 557 } 558 } 559