1 //===-- tsd_exclusive.h -----------------------------------------*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #ifndef SCUDO_TSD_EXCLUSIVE_H_ 10 #define SCUDO_TSD_EXCLUSIVE_H_ 11 12 #include "tsd.h" 13 14 #include <pthread.h> 15 16 namespace scudo { 17 18 enum class ThreadState : u8 { 19 NotInitialized = 0, 20 Initialized, 21 TornDown, 22 }; 23 24 template <class Allocator> void teardownThread(void *Ptr); 25 26 template <class Allocator> struct TSDRegistryExT { 27 void initLinkerInitialized(Allocator *Instance) { 28 Instance->initLinkerInitialized(); 29 CHECK_EQ(pthread_key_create(&PThreadKey, teardownThread<Allocator>), 0); 30 FallbackTSD = reinterpret_cast<TSD<Allocator> *>( 31 map(nullptr, sizeof(TSD<Allocator>), "scudo:tsd")); 32 FallbackTSD->initLinkerInitialized(Instance); 33 Initialized = true; 34 } 35 void init(Allocator *Instance) { 36 memset(this, 0, sizeof(*this)); 37 initLinkerInitialized(Instance); 38 } 39 40 void unmapTestOnly() { 41 unmap(reinterpret_cast<void *>(FallbackTSD), sizeof(TSD<Allocator>)); 42 } 43 44 ALWAYS_INLINE void initThreadMaybe(Allocator *Instance, bool MinimalInit) { 45 if (LIKELY(State != ThreadState::NotInitialized)) 46 return; 47 initThread(Instance, MinimalInit); 48 } 49 50 ALWAYS_INLINE TSD<Allocator> *getTSDAndLock(bool *UnlockRequired) { 51 if (LIKELY(State == ThreadState::Initialized)) { 52 *UnlockRequired = false; 53 return &ThreadTSD; 54 } 55 DCHECK(FallbackTSD); 56 FallbackTSD->lock(); 57 *UnlockRequired = true; 58 return FallbackTSD; 59 } 60 61 private: 62 void initOnceMaybe(Allocator *Instance) { 63 ScopedLock L(Mutex); 64 if (Initialized) 65 return; 66 initLinkerInitialized(Instance); // Sets Initialized. 67 } 68 69 // Using minimal initialization allows for global initialization while keeping 70 // the thread specific structure untouched. The fallback structure will be 71 // used instead. 72 NOINLINE void initThread(Allocator *Instance, bool MinimalInit) { 73 initOnceMaybe(Instance); 74 if (MinimalInit) 75 return; 76 CHECK_EQ( 77 pthread_setspecific(PThreadKey, reinterpret_cast<void *>(Instance)), 0); 78 ThreadTSD.initLinkerInitialized(Instance); 79 State = ThreadState::Initialized; 80 } 81 82 pthread_key_t PThreadKey; 83 bool Initialized; 84 TSD<Allocator> *FallbackTSD; 85 HybridMutex Mutex; 86 static THREADLOCAL ThreadState State; 87 static THREADLOCAL TSD<Allocator> ThreadTSD; 88 89 friend void teardownThread<Allocator>(void *Ptr); 90 }; 91 92 template <class Allocator> 93 THREADLOCAL TSD<Allocator> TSDRegistryExT<Allocator>::ThreadTSD; 94 template <class Allocator> 95 THREADLOCAL ThreadState TSDRegistryExT<Allocator>::State; 96 97 template <class Allocator> void teardownThread(void *Ptr) { 98 typedef TSDRegistryExT<Allocator> TSDRegistryT; 99 Allocator *Instance = reinterpret_cast<Allocator *>(Ptr); 100 // The glibc POSIX thread-local-storage deallocation routine calls user 101 // provided destructors in a loop of PTHREAD_DESTRUCTOR_ITERATIONS. 102 // We want to be called last since other destructors might call free and the 103 // like, so we wait until PTHREAD_DESTRUCTOR_ITERATIONS before draining the 104 // quarantine and swallowing the cache. 105 if (TSDRegistryT::ThreadTSD.DestructorIterations > 1) { 106 TSDRegistryT::ThreadTSD.DestructorIterations--; 107 // If pthread_setspecific fails, we will go ahead with the teardown. 108 if (LIKELY(pthread_setspecific(Instance->getTSDRegistry()->PThreadKey, 109 Ptr) == 0)) 110 return; 111 } 112 TSDRegistryT::ThreadTSD.commitBack(Instance); 113 TSDRegistryT::State = ThreadState::TornDown; 114 } 115 116 } // namespace scudo 117 118 #endif // SCUDO_TSD_EXCLUSIVE_H_ 119