xref: /freebsd/contrib/llvm-project/compiler-rt/lib/scudo/standalone/tsd_exclusive.h (revision dc318a4ffabcbfa23bb56a33403aad36e6de30af)
1 //===-- tsd_exclusive.h -----------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #ifndef SCUDO_TSD_EXCLUSIVE_H_
10 #define SCUDO_TSD_EXCLUSIVE_H_
11 
12 #include "tsd.h"
13 
14 namespace scudo {
15 
16 enum class ThreadState : u8 {
17   NotInitialized = 0,
18   Initialized,
19   TornDown,
20 };
21 
22 template <class Allocator> void teardownThread(void *Ptr);
23 
24 template <class Allocator> struct TSDRegistryExT {
25   void initLinkerInitialized(Allocator *Instance) {
26     Instance->initLinkerInitialized();
27     CHECK_EQ(pthread_key_create(&PThreadKey, teardownThread<Allocator>), 0);
28     FallbackTSD.initLinkerInitialized(Instance);
29     Initialized = true;
30   }
31   void init(Allocator *Instance) {
32     memset(this, 0, sizeof(*this));
33     initLinkerInitialized(Instance);
34   }
35 
36   void unmapTestOnly() {}
37 
38   ALWAYS_INLINE void initThreadMaybe(Allocator *Instance, bool MinimalInit) {
39     if (LIKELY(State != ThreadState::NotInitialized))
40       return;
41     initThread(Instance, MinimalInit);
42   }
43 
44   ALWAYS_INLINE TSD<Allocator> *getTSDAndLock(bool *UnlockRequired) {
45     if (LIKELY(State == ThreadState::Initialized &&
46                !atomic_load(&Disabled, memory_order_acquire))) {
47       *UnlockRequired = false;
48       return &ThreadTSD;
49     }
50     FallbackTSD.lock();
51     *UnlockRequired = true;
52     return &FallbackTSD;
53   }
54 
55   // To disable the exclusive TSD registry, we effectively lock the fallback TSD
56   // and force all threads to attempt to use it instead of their local one.
57   void disable() {
58     Mutex.lock();
59     FallbackTSD.lock();
60     atomic_store(&Disabled, 1U, memory_order_release);
61   }
62 
63   void enable() {
64     atomic_store(&Disabled, 0U, memory_order_release);
65     FallbackTSD.unlock();
66     Mutex.unlock();
67   }
68 
69 private:
70   void initOnceMaybe(Allocator *Instance) {
71     ScopedLock L(Mutex);
72     if (LIKELY(Initialized))
73       return;
74     initLinkerInitialized(Instance); // Sets Initialized.
75   }
76 
77   // Using minimal initialization allows for global initialization while keeping
78   // the thread specific structure untouched. The fallback structure will be
79   // used instead.
80   NOINLINE void initThread(Allocator *Instance, bool MinimalInit) {
81     initOnceMaybe(Instance);
82     if (UNLIKELY(MinimalInit))
83       return;
84     CHECK_EQ(
85         pthread_setspecific(PThreadKey, reinterpret_cast<void *>(Instance)), 0);
86     ThreadTSD.initLinkerInitialized(Instance);
87     State = ThreadState::Initialized;
88     Instance->callPostInitCallback();
89   }
90 
91   pthread_key_t PThreadKey;
92   bool Initialized;
93   atomic_u8 Disabled;
94   TSD<Allocator> FallbackTSD;
95   HybridMutex Mutex;
96   static THREADLOCAL ThreadState State;
97   static THREADLOCAL TSD<Allocator> ThreadTSD;
98 
99   friend void teardownThread<Allocator>(void *Ptr);
100 };
101 
102 template <class Allocator>
103 THREADLOCAL TSD<Allocator> TSDRegistryExT<Allocator>::ThreadTSD;
104 template <class Allocator>
105 THREADLOCAL ThreadState TSDRegistryExT<Allocator>::State;
106 
107 template <class Allocator> void teardownThread(void *Ptr) {
108   typedef TSDRegistryExT<Allocator> TSDRegistryT;
109   Allocator *Instance = reinterpret_cast<Allocator *>(Ptr);
110   // The glibc POSIX thread-local-storage deallocation routine calls user
111   // provided destructors in a loop of PTHREAD_DESTRUCTOR_ITERATIONS.
112   // We want to be called last since other destructors might call free and the
113   // like, so we wait until PTHREAD_DESTRUCTOR_ITERATIONS before draining the
114   // quarantine and swallowing the cache.
115   if (TSDRegistryT::ThreadTSD.DestructorIterations > 1) {
116     TSDRegistryT::ThreadTSD.DestructorIterations--;
117     // If pthread_setspecific fails, we will go ahead with the teardown.
118     if (LIKELY(pthread_setspecific(Instance->getTSDRegistry()->PThreadKey,
119                                    Ptr) == 0))
120       return;
121   }
122   TSDRegistryT::ThreadTSD.commitBack(Instance);
123   TSDRegistryT::State = ThreadState::TornDown;
124 }
125 
126 } // namespace scudo
127 
128 #endif // SCUDO_TSD_EXCLUSIVE_H_
129