xref: /freebsd/contrib/llvm-project/compiler-rt/lib/asan/asan_thread.cpp (revision 9f23cbd6cae82fd77edfad7173432fa8dccd0a95)
1 //===-- asan_thread.cpp ---------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of AddressSanitizer, an address sanity checker.
10 //
11 // Thread-related code.
12 //===----------------------------------------------------------------------===//
13 #include "asan_allocator.h"
14 #include "asan_interceptors.h"
15 #include "asan_poisoning.h"
16 #include "asan_stack.h"
17 #include "asan_thread.h"
18 #include "asan_mapping.h"
19 #include "sanitizer_common/sanitizer_common.h"
20 #include "sanitizer_common/sanitizer_placement_new.h"
21 #include "sanitizer_common/sanitizer_stackdepot.h"
22 #include "sanitizer_common/sanitizer_tls_get_addr.h"
23 #include "lsan/lsan_common.h"
24 
25 namespace __asan {
26 
27 // AsanThreadContext implementation.
28 
29 void AsanThreadContext::OnCreated(void *arg) {
30   CreateThreadContextArgs *args = static_cast<CreateThreadContextArgs*>(arg);
31   if (args->stack)
32     stack_id = StackDepotPut(*args->stack);
33   thread = args->thread;
34   thread->set_context(this);
35 }
36 
37 void AsanThreadContext::OnFinished() {
38   // Drop the link to the AsanThread object.
39   thread = nullptr;
40 }
41 
42 // MIPS requires aligned address
43 static ALIGNED(16) char thread_registry_placeholder[sizeof(ThreadRegistry)];
44 static ThreadRegistry *asan_thread_registry;
45 
46 static Mutex mu_for_thread_context;
47 static LowLevelAllocator allocator_for_thread_context;
48 
49 static ThreadContextBase *GetAsanThreadContext(u32 tid) {
50   Lock lock(&mu_for_thread_context);
51   return new(allocator_for_thread_context) AsanThreadContext(tid);
52 }
53 
54 ThreadRegistry &asanThreadRegistry() {
55   static bool initialized;
56   // Don't worry about thread_safety - this should be called when there is
57   // a single thread.
58   if (!initialized) {
59     // Never reuse ASan threads: we store pointer to AsanThreadContext
60     // in TSD and can't reliably tell when no more TSD destructors will
61     // be called. It would be wrong to reuse AsanThreadContext for another
62     // thread before all TSD destructors will be called for it.
63     asan_thread_registry =
64         new (thread_registry_placeholder) ThreadRegistry(GetAsanThreadContext);
65     initialized = true;
66   }
67   return *asan_thread_registry;
68 }
69 
70 AsanThreadContext *GetThreadContextByTidLocked(u32 tid) {
71   return static_cast<AsanThreadContext *>(
72       asanThreadRegistry().GetThreadLocked(tid));
73 }
74 
75 // AsanThread implementation.
76 
77 AsanThread *AsanThread::Create(thread_callback_t start_routine, void *arg,
78                                u32 parent_tid, StackTrace *stack,
79                                bool detached) {
80   uptr PageSize = GetPageSizeCached();
81   uptr size = RoundUpTo(sizeof(AsanThread), PageSize);
82   AsanThread *thread = (AsanThread*)MmapOrDie(size, __func__);
83   thread->start_routine_ = start_routine;
84   thread->arg_ = arg;
85   AsanThreadContext::CreateThreadContextArgs args = {thread, stack};
86   asanThreadRegistry().CreateThread(0, detached, parent_tid, &args);
87 
88   return thread;
89 }
90 
91 void AsanThread::TSDDtor(void *tsd) {
92   AsanThreadContext *context = (AsanThreadContext*)tsd;
93   VReport(1, "T%d TSDDtor\n", context->tid);
94   if (context->thread)
95     context->thread->Destroy();
96 }
97 
98 void AsanThread::Destroy() {
99   int tid = this->tid();
100   VReport(1, "T%d exited\n", tid);
101 
102   bool was_running =
103       (asanThreadRegistry().FinishThread(tid) == ThreadStatusRunning);
104   if (was_running) {
105     if (AsanThread *thread = GetCurrentThread())
106       CHECK_EQ(this, thread);
107     malloc_storage().CommitBack();
108     if (common_flags()->use_sigaltstack)
109       UnsetAlternateSignalStack();
110     FlushToDeadThreadStats(&stats_);
111     // We also clear the shadow on thread destruction because
112     // some code may still be executing in later TSD destructors
113     // and we don't want it to have any poisoned stack.
114     ClearShadowForThreadStackAndTLS();
115     DeleteFakeStack(tid);
116   } else {
117     CHECK_NE(this, GetCurrentThread());
118   }
119   uptr size = RoundUpTo(sizeof(AsanThread), GetPageSizeCached());
120   UnmapOrDie(this, size);
121   if (was_running)
122     DTLS_Destroy();
123 }
124 
125 void AsanThread::StartSwitchFiber(FakeStack **fake_stack_save, uptr bottom,
126                                   uptr size) {
127   if (atomic_load(&stack_switching_, memory_order_relaxed)) {
128     Report("ERROR: starting fiber switch while in fiber switch\n");
129     Die();
130   }
131 
132   next_stack_bottom_ = bottom;
133   next_stack_top_ = bottom + size;
134   atomic_store(&stack_switching_, 1, memory_order_release);
135 
136   FakeStack *current_fake_stack = fake_stack_;
137   if (fake_stack_save)
138     *fake_stack_save = fake_stack_;
139   fake_stack_ = nullptr;
140   SetTLSFakeStack(nullptr);
141   // if fake_stack_save is null, the fiber will die, delete the fakestack
142   if (!fake_stack_save && current_fake_stack)
143     current_fake_stack->Destroy(this->tid());
144 }
145 
146 void AsanThread::FinishSwitchFiber(FakeStack *fake_stack_save,
147                                    uptr *bottom_old,
148                                    uptr *size_old) {
149   if (!atomic_load(&stack_switching_, memory_order_relaxed)) {
150     Report("ERROR: finishing a fiber switch that has not started\n");
151     Die();
152   }
153 
154   if (fake_stack_save) {
155     SetTLSFakeStack(fake_stack_save);
156     fake_stack_ = fake_stack_save;
157   }
158 
159   if (bottom_old)
160     *bottom_old = stack_bottom_;
161   if (size_old)
162     *size_old = stack_top_ - stack_bottom_;
163   stack_bottom_ = next_stack_bottom_;
164   stack_top_ = next_stack_top_;
165   atomic_store(&stack_switching_, 0, memory_order_release);
166   next_stack_top_ = 0;
167   next_stack_bottom_ = 0;
168 }
169 
170 inline AsanThread::StackBounds AsanThread::GetStackBounds() const {
171   if (!atomic_load(&stack_switching_, memory_order_acquire)) {
172     // Make sure the stack bounds are fully initialized.
173     if (stack_bottom_ >= stack_top_) return {0, 0};
174     return {stack_bottom_, stack_top_};
175   }
176   char local;
177   const uptr cur_stack = (uptr)&local;
178   // Note: need to check next stack first, because FinishSwitchFiber
179   // may be in process of overwriting stack_top_/bottom_. But in such case
180   // we are already on the next stack.
181   if (cur_stack >= next_stack_bottom_ && cur_stack < next_stack_top_)
182     return {next_stack_bottom_, next_stack_top_};
183   return {stack_bottom_, stack_top_};
184 }
185 
186 uptr AsanThread::stack_top() {
187   return GetStackBounds().top;
188 }
189 
190 uptr AsanThread::stack_bottom() {
191   return GetStackBounds().bottom;
192 }
193 
194 uptr AsanThread::stack_size() {
195   const auto bounds = GetStackBounds();
196   return bounds.top - bounds.bottom;
197 }
198 
199 // We want to create the FakeStack lazily on the first use, but not earlier
200 // than the stack size is known and the procedure has to be async-signal safe.
201 FakeStack *AsanThread::AsyncSignalSafeLazyInitFakeStack() {
202   uptr stack_size = this->stack_size();
203   if (stack_size == 0)  // stack_size is not yet available, don't use FakeStack.
204     return nullptr;
205   uptr old_val = 0;
206   // fake_stack_ has 3 states:
207   // 0   -- not initialized
208   // 1   -- being initialized
209   // ptr -- initialized
210   // This CAS checks if the state was 0 and if so changes it to state 1,
211   // if that was successful, it initializes the pointer.
212   if (atomic_compare_exchange_strong(
213       reinterpret_cast<atomic_uintptr_t *>(&fake_stack_), &old_val, 1UL,
214       memory_order_relaxed)) {
215     uptr stack_size_log = Log2(RoundUpToPowerOfTwo(stack_size));
216     CHECK_LE(flags()->min_uar_stack_size_log, flags()->max_uar_stack_size_log);
217     stack_size_log =
218         Min(stack_size_log, static_cast<uptr>(flags()->max_uar_stack_size_log));
219     stack_size_log =
220         Max(stack_size_log, static_cast<uptr>(flags()->min_uar_stack_size_log));
221     fake_stack_ = FakeStack::Create(stack_size_log);
222     DCHECK_EQ(GetCurrentThread(), this);
223     SetTLSFakeStack(fake_stack_);
224     return fake_stack_;
225   }
226   return nullptr;
227 }
228 
229 void AsanThread::Init(const InitOptions *options) {
230   DCHECK_NE(tid(), kInvalidTid);
231   next_stack_top_ = next_stack_bottom_ = 0;
232   atomic_store(&stack_switching_, false, memory_order_release);
233   CHECK_EQ(this->stack_size(), 0U);
234   SetThreadStackAndTls(options);
235   if (stack_top_ != stack_bottom_) {
236     CHECK_GT(this->stack_size(), 0U);
237     CHECK(AddrIsInMem(stack_bottom_));
238     CHECK(AddrIsInMem(stack_top_ - 1));
239   }
240   ClearShadowForThreadStackAndTLS();
241   fake_stack_ = nullptr;
242   if (__asan_option_detect_stack_use_after_return &&
243       tid() == GetCurrentTidOrInvalid()) {
244     // AsyncSignalSafeLazyInitFakeStack makes use of threadlocals and must be
245     // called from the context of the thread it is initializing, not its parent.
246     // Most platforms call AsanThread::Init on the newly-spawned thread, but
247     // Fuchsia calls this function from the parent thread.  To support that
248     // approach, we avoid calling AsyncSignalSafeLazyInitFakeStack here; it will
249     // be called by the new thread when it first attempts to access the fake
250     // stack.
251     AsyncSignalSafeLazyInitFakeStack();
252   }
253   int local = 0;
254   VReport(1, "T%d: stack [%p,%p) size 0x%zx; local=%p\n", tid(),
255           (void *)stack_bottom_, (void *)stack_top_, stack_top_ - stack_bottom_,
256           (void *)&local);
257 }
258 
259 // Fuchsia doesn't use ThreadStart.
260 // asan_fuchsia.c definies CreateMainThread and SetThreadStackAndTls.
261 #if !SANITIZER_FUCHSIA
262 
263 thread_return_t AsanThread::ThreadStart(tid_t os_id) {
264   Init();
265   asanThreadRegistry().StartThread(tid(), os_id, ThreadType::Regular, nullptr);
266 
267   if (common_flags()->use_sigaltstack) SetAlternateSignalStack();
268 
269   if (!start_routine_) {
270     // start_routine_ == 0 if we're on the main thread or on one of the
271     // OS X libdispatch worker threads. But nobody is supposed to call
272     // ThreadStart() for the worker threads.
273     CHECK_EQ(tid(), 0);
274     return 0;
275   }
276 
277   thread_return_t res = start_routine_(arg_);
278 
279   // On POSIX systems we defer this to the TSD destructor. LSan will consider
280   // the thread's memory as non-live from the moment we call Destroy(), even
281   // though that memory might contain pointers to heap objects which will be
282   // cleaned up by a user-defined TSD destructor. Thus, calling Destroy() before
283   // the TSD destructors have run might cause false positives in LSan.
284   if (!SANITIZER_POSIX)
285     this->Destroy();
286 
287   return res;
288 }
289 
290 AsanThread *CreateMainThread() {
291   AsanThread *main_thread = AsanThread::Create(
292       /* start_routine */ nullptr, /* arg */ nullptr, /* parent_tid */ kMainTid,
293       /* stack */ nullptr, /* detached */ true);
294   SetCurrentThread(main_thread);
295   main_thread->ThreadStart(internal_getpid());
296   return main_thread;
297 }
298 
299 // This implementation doesn't use the argument, which is just passed down
300 // from the caller of Init (which see, above).  It's only there to support
301 // OS-specific implementations that need more information passed through.
302 void AsanThread::SetThreadStackAndTls(const InitOptions *options) {
303   DCHECK_EQ(options, nullptr);
304   uptr tls_size = 0;
305   uptr stack_size = 0;
306   GetThreadStackAndTls(tid() == kMainTid, &stack_bottom_, &stack_size,
307                        &tls_begin_, &tls_size);
308   stack_top_ = RoundDownTo(stack_bottom_ + stack_size, ASAN_SHADOW_GRANULARITY);
309   tls_end_ = tls_begin_ + tls_size;
310   dtls_ = DTLS_Get();
311 
312   if (stack_top_ != stack_bottom_) {
313     int local;
314     CHECK(AddrIsInStack((uptr)&local));
315   }
316 }
317 
318 #endif  // !SANITIZER_FUCHSIA
319 
320 void AsanThread::ClearShadowForThreadStackAndTLS() {
321   if (stack_top_ != stack_bottom_)
322     PoisonShadow(stack_bottom_, stack_top_ - stack_bottom_, 0);
323   if (tls_begin_ != tls_end_) {
324     uptr tls_begin_aligned = RoundDownTo(tls_begin_, ASAN_SHADOW_GRANULARITY);
325     uptr tls_end_aligned = RoundUpTo(tls_end_, ASAN_SHADOW_GRANULARITY);
326     FastPoisonShadow(tls_begin_aligned, tls_end_aligned - tls_begin_aligned, 0);
327   }
328 }
329 
330 bool AsanThread::GetStackFrameAccessByAddr(uptr addr,
331                                            StackFrameAccess *access) {
332   if (stack_top_ == stack_bottom_)
333     return false;
334 
335   uptr bottom = 0;
336   if (AddrIsInStack(addr)) {
337     bottom = stack_bottom();
338   } else if (FakeStack *fake_stack = get_fake_stack()) {
339     bottom = fake_stack->AddrIsInFakeStack(addr);
340     CHECK(bottom);
341     access->offset = addr - bottom;
342     access->frame_pc = ((uptr*)bottom)[2];
343     access->frame_descr = (const char *)((uptr*)bottom)[1];
344     return true;
345   }
346   uptr aligned_addr = RoundDownTo(addr, SANITIZER_WORDSIZE / 8);  // align addr.
347   uptr mem_ptr = RoundDownTo(aligned_addr, ASAN_SHADOW_GRANULARITY);
348   u8 *shadow_ptr = (u8*)MemToShadow(aligned_addr);
349   u8 *shadow_bottom = (u8*)MemToShadow(bottom);
350 
351   while (shadow_ptr >= shadow_bottom &&
352          *shadow_ptr != kAsanStackLeftRedzoneMagic) {
353     shadow_ptr--;
354     mem_ptr -= ASAN_SHADOW_GRANULARITY;
355   }
356 
357   while (shadow_ptr >= shadow_bottom &&
358          *shadow_ptr == kAsanStackLeftRedzoneMagic) {
359     shadow_ptr--;
360     mem_ptr -= ASAN_SHADOW_GRANULARITY;
361   }
362 
363   if (shadow_ptr < shadow_bottom) {
364     return false;
365   }
366 
367   uptr *ptr = (uptr *)(mem_ptr + ASAN_SHADOW_GRANULARITY);
368   CHECK(ptr[0] == kCurrentStackFrameMagic);
369   access->offset = addr - (uptr)ptr;
370   access->frame_pc = ptr[2];
371   access->frame_descr = (const char*)ptr[1];
372   return true;
373 }
374 
375 uptr AsanThread::GetStackVariableShadowStart(uptr addr) {
376   uptr bottom = 0;
377   if (AddrIsInStack(addr)) {
378     bottom = stack_bottom();
379   } else if (FakeStack *fake_stack = get_fake_stack()) {
380     bottom = fake_stack->AddrIsInFakeStack(addr);
381     if (bottom == 0) {
382       return 0;
383     }
384   } else {
385     return 0;
386   }
387 
388   uptr aligned_addr = RoundDownTo(addr, SANITIZER_WORDSIZE / 8);  // align addr.
389   u8 *shadow_ptr = (u8*)MemToShadow(aligned_addr);
390   u8 *shadow_bottom = (u8*)MemToShadow(bottom);
391 
392   while (shadow_ptr >= shadow_bottom &&
393          (*shadow_ptr != kAsanStackLeftRedzoneMagic &&
394           *shadow_ptr != kAsanStackMidRedzoneMagic &&
395           *shadow_ptr != kAsanStackRightRedzoneMagic))
396     shadow_ptr--;
397 
398   return (uptr)shadow_ptr + 1;
399 }
400 
401 bool AsanThread::AddrIsInStack(uptr addr) {
402   const auto bounds = GetStackBounds();
403   return addr >= bounds.bottom && addr < bounds.top;
404 }
405 
406 static bool ThreadStackContainsAddress(ThreadContextBase *tctx_base,
407                                        void *addr) {
408   AsanThreadContext *tctx = static_cast<AsanThreadContext *>(tctx_base);
409   AsanThread *t = tctx->thread;
410   if (!t)
411     return false;
412   if (t->AddrIsInStack((uptr)addr))
413     return true;
414   FakeStack *fake_stack = t->get_fake_stack();
415   if (!fake_stack)
416     return false;
417   return fake_stack->AddrIsInFakeStack((uptr)addr);
418 }
419 
420 AsanThread *GetCurrentThread() {
421   AsanThreadContext *context =
422       reinterpret_cast<AsanThreadContext *>(AsanTSDGet());
423   if (!context) {
424     if (SANITIZER_ANDROID) {
425       // On Android, libc constructor is called _after_ asan_init, and cleans up
426       // TSD. Try to figure out if this is still the main thread by the stack
427       // address. We are not entirely sure that we have correct main thread
428       // limits, so only do this magic on Android, and only if the found thread
429       // is the main thread.
430       AsanThreadContext *tctx = GetThreadContextByTidLocked(kMainTid);
431       if (tctx && ThreadStackContainsAddress(tctx, &context)) {
432         SetCurrentThread(tctx->thread);
433         return tctx->thread;
434       }
435     }
436     return nullptr;
437   }
438   return context->thread;
439 }
440 
441 void SetCurrentThread(AsanThread *t) {
442   CHECK(t->context());
443   VReport(2, "SetCurrentThread: %p for thread %p\n", (void *)t->context(),
444           (void *)GetThreadSelf());
445   // Make sure we do not reset the current AsanThread.
446   CHECK_EQ(0, AsanTSDGet());
447   AsanTSDSet(t->context());
448   CHECK_EQ(t->context(), AsanTSDGet());
449 }
450 
451 u32 GetCurrentTidOrInvalid() {
452   AsanThread *t = GetCurrentThread();
453   return t ? t->tid() : kInvalidTid;
454 }
455 
456 AsanThread *FindThreadByStackAddress(uptr addr) {
457   asanThreadRegistry().CheckLocked();
458   AsanThreadContext *tctx = static_cast<AsanThreadContext *>(
459       asanThreadRegistry().FindThreadContextLocked(ThreadStackContainsAddress,
460                                                    (void *)addr));
461   return tctx ? tctx->thread : nullptr;
462 }
463 
464 void EnsureMainThreadIDIsCorrect() {
465   AsanThreadContext *context =
466       reinterpret_cast<AsanThreadContext *>(AsanTSDGet());
467   if (context && (context->tid == kMainTid))
468     context->os_id = GetTid();
469 }
470 
471 __asan::AsanThread *GetAsanThreadByOsIDLocked(tid_t os_id) {
472   __asan::AsanThreadContext *context = static_cast<__asan::AsanThreadContext *>(
473       __asan::asanThreadRegistry().FindThreadContextByOsIDLocked(os_id));
474   if (!context) return nullptr;
475   return context->thread;
476 }
477 } // namespace __asan
478 
479 // --- Implementation of LSan-specific functions --- {{{1
480 namespace __lsan {
481 void LockThreadRegistry() { __asan::asanThreadRegistry().Lock(); }
482 
483 void UnlockThreadRegistry() { __asan::asanThreadRegistry().Unlock(); }
484 
485 static ThreadRegistry *GetAsanThreadRegistryLocked() {
486   __asan::asanThreadRegistry().CheckLocked();
487   return &__asan::asanThreadRegistry();
488 }
489 
490 void EnsureMainThreadIDIsCorrect() { __asan::EnsureMainThreadIDIsCorrect(); }
491 
492 bool GetThreadRangesLocked(tid_t os_id, uptr *stack_begin, uptr *stack_end,
493                            uptr *tls_begin, uptr *tls_end, uptr *cache_begin,
494                            uptr *cache_end, DTLS **dtls) {
495   __asan::AsanThread *t = __asan::GetAsanThreadByOsIDLocked(os_id);
496   if (!t) return false;
497   *stack_begin = t->stack_bottom();
498   *stack_end = t->stack_top();
499   *tls_begin = t->tls_begin();
500   *tls_end = t->tls_end();
501   // ASan doesn't keep allocator caches in TLS, so these are unused.
502   *cache_begin = 0;
503   *cache_end = 0;
504   *dtls = t->dtls();
505   return true;
506 }
507 
508 void GetAllThreadAllocatorCachesLocked(InternalMmapVector<uptr> *caches) {}
509 
510 void GetThreadExtraStackRangesLocked(tid_t os_id,
511                                      InternalMmapVector<Range> *ranges) {
512   __asan::AsanThread *t = __asan::GetAsanThreadByOsIDLocked(os_id);
513   if (!t)
514     return;
515   __asan::FakeStack *fake_stack = t->get_fake_stack();
516   if (!fake_stack)
517     return;
518 
519   fake_stack->ForEachFakeFrame(
520       [](uptr begin, uptr end, void *arg) {
521         reinterpret_cast<InternalMmapVector<Range> *>(arg)->push_back(
522             {begin, end});
523       },
524       ranges);
525 }
526 
527 void GetThreadExtraStackRangesLocked(InternalMmapVector<Range> *ranges) {
528   GetAsanThreadRegistryLocked()->RunCallbackForEachThreadLocked(
529       [](ThreadContextBase *tctx, void *arg) {
530         GetThreadExtraStackRangesLocked(
531             tctx->os_id, reinterpret_cast<InternalMmapVector<Range> *>(arg));
532       },
533       ranges);
534 }
535 
536 void GetAdditionalThreadContextPtrsLocked(InternalMmapVector<uptr> *ptrs) {
537   GetAsanThreadRegistryLocked()->RunCallbackForEachThreadLocked(
538       [](ThreadContextBase *tctx, void *ptrs) {
539         // Look for the arg pointer of threads that have been created or are
540         // running. This is necessary to prevent false positive leaks due to the
541         // AsanThread holding the only live reference to a heap object.  This
542         // can happen because the `pthread_create()` interceptor doesn't wait
543         // for the child thread to start before returning and thus loosing the
544         // the only live reference to the heap object on the stack.
545 
546         __asan::AsanThreadContext *atctx =
547             static_cast<__asan::AsanThreadContext *>(tctx);
548 
549         // Note ThreadStatusRunning is required because there is a small window
550         // where the thread status switches to `ThreadStatusRunning` but the
551         // `arg` pointer still isn't on the stack yet.
552         if (atctx->status != ThreadStatusCreated &&
553             atctx->status != ThreadStatusRunning)
554           return;
555 
556         uptr thread_arg = reinterpret_cast<uptr>(atctx->thread->get_arg());
557         if (!thread_arg)
558           return;
559 
560         auto ptrsVec = reinterpret_cast<InternalMmapVector<uptr> *>(ptrs);
561         ptrsVec->push_back(thread_arg);
562       },
563       ptrs);
564 }
565 
566 void GetRunningThreadsLocked(InternalMmapVector<tid_t> *threads) {
567   GetAsanThreadRegistryLocked()->RunCallbackForEachThreadLocked(
568       [](ThreadContextBase *tctx, void *threads) {
569         if (tctx->status == ThreadStatusRunning)
570           reinterpret_cast<InternalMmapVector<tid_t> *>(threads)->push_back(
571               tctx->os_id);
572       },
573       threads);
574 }
575 
576 void FinishThreadLocked(u32 tid) {
577   GetAsanThreadRegistryLocked()->FinishThread(tid);
578 }
579 
580 } // namespace __lsan
581 
582 // ---------------------- Interface ---------------- {{{1
583 using namespace __asan;
584 
585 extern "C" {
586 SANITIZER_INTERFACE_ATTRIBUTE
587 void __sanitizer_start_switch_fiber(void **fakestacksave, const void *bottom,
588                                     uptr size) {
589   AsanThread *t = GetCurrentThread();
590   if (!t) {
591     VReport(1, "__asan_start_switch_fiber called from unknown thread\n");
592     return;
593   }
594   t->StartSwitchFiber((FakeStack**)fakestacksave, (uptr)bottom, size);
595 }
596 
597 SANITIZER_INTERFACE_ATTRIBUTE
598 void __sanitizer_finish_switch_fiber(void* fakestack,
599                                      const void **bottom_old,
600                                      uptr *size_old) {
601   AsanThread *t = GetCurrentThread();
602   if (!t) {
603     VReport(1, "__asan_finish_switch_fiber called from unknown thread\n");
604     return;
605   }
606   t->FinishSwitchFiber((FakeStack*)fakestack,
607                        (uptr*)bottom_old,
608                        (uptr*)size_old);
609 }
610 }
611