xref: /freebsd/contrib/llvm-project/compiler-rt/lib/scudo/standalone/tsd_exclusive.h (revision 725a9f47324d42037db93c27ceb40d4956872f3e)
1 //===-- tsd_exclusive.h -----------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #ifndef SCUDO_TSD_EXCLUSIVE_H_
10 #define SCUDO_TSD_EXCLUSIVE_H_
11 
12 #include "tsd.h"
13 
14 #include "string_utils.h"
15 
16 namespace scudo {
17 
18 struct ThreadState {
19   bool DisableMemInit : 1;
20   enum : unsigned {
21     NotInitialized = 0,
22     Initialized,
23     TornDown,
24   } InitState : 2;
25 };
26 
27 template <class Allocator> void teardownThread(void *Ptr);
28 
29 template <class Allocator> struct TSDRegistryExT {
30   void init(Allocator *Instance) REQUIRES(Mutex) {
31     DCHECK(!Initialized);
32     Instance->init();
33     CHECK_EQ(pthread_key_create(&PThreadKey, teardownThread<Allocator>), 0);
34     FallbackTSD.init(Instance);
35     Initialized = true;
36   }
37 
38   void initOnceMaybe(Allocator *Instance) EXCLUDES(Mutex) {
39     ScopedLock L(Mutex);
40     if (LIKELY(Initialized))
41       return;
42     init(Instance); // Sets Initialized.
43   }
44 
45   void unmapTestOnly(Allocator *Instance) EXCLUDES(Mutex) {
46     DCHECK(Instance);
47     if (reinterpret_cast<Allocator *>(pthread_getspecific(PThreadKey))) {
48       DCHECK_EQ(reinterpret_cast<Allocator *>(pthread_getspecific(PThreadKey)),
49                 Instance);
50       ThreadTSD.commitBack(Instance);
51       ThreadTSD = {};
52     }
53     CHECK_EQ(pthread_key_delete(PThreadKey), 0);
54     PThreadKey = {};
55     FallbackTSD.commitBack(Instance);
56     FallbackTSD = {};
57     State = {};
58     ScopedLock L(Mutex);
59     Initialized = false;
60   }
61 
62   void drainCaches(Allocator *Instance) {
63     // We don't have a way to iterate all thread local `ThreadTSD`s. Simply
64     // drain the `ThreadTSD` of current thread and `FallbackTSD`.
65     Instance->drainCache(&ThreadTSD);
66     FallbackTSD.lock();
67     Instance->drainCache(&FallbackTSD);
68     FallbackTSD.unlock();
69   }
70 
71   ALWAYS_INLINE void initThreadMaybe(Allocator *Instance, bool MinimalInit) {
72     if (LIKELY(State.InitState != ThreadState::NotInitialized))
73       return;
74     initThread(Instance, MinimalInit);
75   }
76 
77   // TODO(chiahungduan): Consider removing the argument `UnlockRequired` by
78   // embedding the logic into TSD or always locking the TSD. It will enable us
79   // to properly mark thread annotation here and adding proper runtime
80   // assertions in the member functions of TSD. For example, assert the lock is
81   // acquired before calling TSD::commitBack().
82   ALWAYS_INLINE TSD<Allocator> *
83   getTSDAndLock(bool *UnlockRequired) NO_THREAD_SAFETY_ANALYSIS {
84     if (LIKELY(State.InitState == ThreadState::Initialized &&
85                !atomic_load(&Disabled, memory_order_acquire))) {
86       *UnlockRequired = false;
87       return &ThreadTSD;
88     }
89     FallbackTSD.lock();
90     *UnlockRequired = true;
91     return &FallbackTSD;
92   }
93 
94   // To disable the exclusive TSD registry, we effectively lock the fallback TSD
95   // and force all threads to attempt to use it instead of their local one.
96   void disable() NO_THREAD_SAFETY_ANALYSIS {
97     Mutex.lock();
98     FallbackTSD.lock();
99     atomic_store(&Disabled, 1U, memory_order_release);
100   }
101 
102   void enable() NO_THREAD_SAFETY_ANALYSIS {
103     atomic_store(&Disabled, 0U, memory_order_release);
104     FallbackTSD.unlock();
105     Mutex.unlock();
106   }
107 
108   bool setOption(Option O, sptr Value) {
109     if (O == Option::ThreadDisableMemInit)
110       State.DisableMemInit = Value;
111     if (O == Option::MaxTSDsCount)
112       return false;
113     return true;
114   }
115 
116   bool getDisableMemInit() { return State.DisableMemInit; }
117 
118   void getStats(ScopedString *Str) {
119     // We don't have a way to iterate all thread local `ThreadTSD`s. Instead of
120     // printing only self `ThreadTSD` which may mislead the usage, we just skip
121     // it.
122     Str->append("Exclusive TSD don't support iterating each TSD\n");
123   }
124 
125 private:
126   // Using minimal initialization allows for global initialization while keeping
127   // the thread specific structure untouched. The fallback structure will be
128   // used instead.
129   NOINLINE void initThread(Allocator *Instance, bool MinimalInit) {
130     initOnceMaybe(Instance);
131     if (UNLIKELY(MinimalInit))
132       return;
133     CHECK_EQ(
134         pthread_setspecific(PThreadKey, reinterpret_cast<void *>(Instance)), 0);
135     ThreadTSD.init(Instance);
136     State.InitState = ThreadState::Initialized;
137     Instance->callPostInitCallback();
138   }
139 
140   pthread_key_t PThreadKey = {};
141   bool Initialized GUARDED_BY(Mutex) = false;
142   atomic_u8 Disabled = {};
143   TSD<Allocator> FallbackTSD;
144   HybridMutex Mutex;
145   static thread_local ThreadState State;
146   static thread_local TSD<Allocator> ThreadTSD;
147 
148   friend void teardownThread<Allocator>(void *Ptr);
149 };
150 
151 template <class Allocator>
152 thread_local TSD<Allocator> TSDRegistryExT<Allocator>::ThreadTSD;
153 template <class Allocator>
154 thread_local ThreadState TSDRegistryExT<Allocator>::State;
155 
156 template <class Allocator>
157 void teardownThread(void *Ptr) NO_THREAD_SAFETY_ANALYSIS {
158   typedef TSDRegistryExT<Allocator> TSDRegistryT;
159   Allocator *Instance = reinterpret_cast<Allocator *>(Ptr);
160   // The glibc POSIX thread-local-storage deallocation routine calls user
161   // provided destructors in a loop of PTHREAD_DESTRUCTOR_ITERATIONS.
162   // We want to be called last since other destructors might call free and the
163   // like, so we wait until PTHREAD_DESTRUCTOR_ITERATIONS before draining the
164   // quarantine and swallowing the cache.
165   if (TSDRegistryT::ThreadTSD.DestructorIterations > 1) {
166     TSDRegistryT::ThreadTSD.DestructorIterations--;
167     // If pthread_setspecific fails, we will go ahead with the teardown.
168     if (LIKELY(pthread_setspecific(Instance->getTSDRegistry()->PThreadKey,
169                                    Ptr) == 0))
170       return;
171   }
172   TSDRegistryT::ThreadTSD.commitBack(Instance);
173   TSDRegistryT::State.InitState = ThreadState::TornDown;
174 }
175 
176 } // namespace scudo
177 
178 #endif // SCUDO_TSD_EXCLUSIVE_H_
179