xref: /freebsd/contrib/llvm-project/compiler-rt/lib/scudo/standalone/tsd_exclusive.h (revision ec0ea6efa1ad229d75c394c1a9b9cac33af2b1d3)
1 //===-- tsd_exclusive.h -----------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #ifndef SCUDO_TSD_EXCLUSIVE_H_
10 #define SCUDO_TSD_EXCLUSIVE_H_
11 
12 #include "tsd.h"
13 
14 namespace scudo {
15 
16 struct ThreadState {
17   bool DisableMemInit : 1;
18   enum {
19     NotInitialized = 0,
20     Initialized,
21     TornDown,
22   } InitState : 2;
23 };
24 
25 template <class Allocator> void teardownThread(void *Ptr);
26 
27 template <class Allocator> struct TSDRegistryExT {
28   void init(Allocator *Instance) {
29     DCHECK(!Initialized);
30     Instance->init();
31     CHECK_EQ(pthread_key_create(&PThreadKey, teardownThread<Allocator>), 0);
32     FallbackTSD.init(Instance);
33     Initialized = true;
34   }
35 
36   void initOnceMaybe(Allocator *Instance) {
37     ScopedLock L(Mutex);
38     if (LIKELY(Initialized))
39       return;
40     init(Instance); // Sets Initialized.
41   }
42 
43   void unmapTestOnly(Allocator *Instance) {
44     DCHECK(Instance);
45     if (reinterpret_cast<Allocator *>(pthread_getspecific(PThreadKey))) {
46       DCHECK_EQ(reinterpret_cast<Allocator *>(pthread_getspecific(PThreadKey)),
47                 Instance);
48       ThreadTSD.commitBack(Instance);
49       ThreadTSD = {};
50     }
51     CHECK_EQ(pthread_key_delete(PThreadKey), 0);
52     PThreadKey = {};
53     FallbackTSD.commitBack(Instance);
54     FallbackTSD = {};
55     State = {};
56     Initialized = false;
57   }
58 
59   ALWAYS_INLINE void initThreadMaybe(Allocator *Instance, bool MinimalInit) {
60     if (LIKELY(State.InitState != ThreadState::NotInitialized))
61       return;
62     initThread(Instance, MinimalInit);
63   }
64 
65   ALWAYS_INLINE TSD<Allocator> *getTSDAndLock(bool *UnlockRequired) {
66     if (LIKELY(State.InitState == ThreadState::Initialized &&
67                !atomic_load(&Disabled, memory_order_acquire))) {
68       *UnlockRequired = false;
69       return &ThreadTSD;
70     }
71     FallbackTSD.lock();
72     *UnlockRequired = true;
73     return &FallbackTSD;
74   }
75 
76   // To disable the exclusive TSD registry, we effectively lock the fallback TSD
77   // and force all threads to attempt to use it instead of their local one.
78   void disable() {
79     Mutex.lock();
80     FallbackTSD.lock();
81     atomic_store(&Disabled, 1U, memory_order_release);
82   }
83 
84   void enable() {
85     atomic_store(&Disabled, 0U, memory_order_release);
86     FallbackTSD.unlock();
87     Mutex.unlock();
88   }
89 
90   bool setOption(Option O, UNUSED sptr Value) {
91     if (O == Option::ThreadDisableMemInit)
92       State.DisableMemInit = Value;
93     if (O == Option::MaxTSDsCount)
94       return false;
95     return true;
96   }
97 
98   bool getDisableMemInit() { return State.DisableMemInit; }
99 
100 private:
101   // Using minimal initialization allows for global initialization while keeping
102   // the thread specific structure untouched. The fallback structure will be
103   // used instead.
104   NOINLINE void initThread(Allocator *Instance, bool MinimalInit) {
105     initOnceMaybe(Instance);
106     if (UNLIKELY(MinimalInit))
107       return;
108     CHECK_EQ(
109         pthread_setspecific(PThreadKey, reinterpret_cast<void *>(Instance)), 0);
110     ThreadTSD.init(Instance);
111     State.InitState = ThreadState::Initialized;
112     Instance->callPostInitCallback();
113   }
114 
115   pthread_key_t PThreadKey = {};
116   bool Initialized = false;
117   atomic_u8 Disabled = {};
118   TSD<Allocator> FallbackTSD;
119   HybridMutex Mutex;
120   static thread_local ThreadState State;
121   static thread_local TSD<Allocator> ThreadTSD;
122 
123   friend void teardownThread<Allocator>(void *Ptr);
124 };
125 
126 template <class Allocator>
127 thread_local TSD<Allocator> TSDRegistryExT<Allocator>::ThreadTSD;
128 template <class Allocator>
129 thread_local ThreadState TSDRegistryExT<Allocator>::State;
130 
131 template <class Allocator> void teardownThread(void *Ptr) {
132   typedef TSDRegistryExT<Allocator> TSDRegistryT;
133   Allocator *Instance = reinterpret_cast<Allocator *>(Ptr);
134   // The glibc POSIX thread-local-storage deallocation routine calls user
135   // provided destructors in a loop of PTHREAD_DESTRUCTOR_ITERATIONS.
136   // We want to be called last since other destructors might call free and the
137   // like, so we wait until PTHREAD_DESTRUCTOR_ITERATIONS before draining the
138   // quarantine and swallowing the cache.
139   if (TSDRegistryT::ThreadTSD.DestructorIterations > 1) {
140     TSDRegistryT::ThreadTSD.DestructorIterations--;
141     // If pthread_setspecific fails, we will go ahead with the teardown.
142     if (LIKELY(pthread_setspecific(Instance->getTSDRegistry()->PThreadKey,
143                                    Ptr) == 0))
144       return;
145   }
146   TSDRegistryT::ThreadTSD.commitBack(Instance);
147   TSDRegistryT::State.InitState = ThreadState::TornDown;
148 }
149 
150 } // namespace scudo
151 
152 #endif // SCUDO_TSD_EXCLUSIVE_H_
153