10b57cec5SDimitry Andric //===-- tsd_exclusive.h -----------------------------------------*- C++ -*-===//
20b57cec5SDimitry Andric //
30b57cec5SDimitry Andric // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
40b57cec5SDimitry Andric // See https://llvm.org/LICENSE.txt for license information.
50b57cec5SDimitry Andric // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
60b57cec5SDimitry Andric //
70b57cec5SDimitry Andric //===----------------------------------------------------------------------===//
80b57cec5SDimitry Andric
90b57cec5SDimitry Andric #ifndef SCUDO_TSD_EXCLUSIVE_H_
100b57cec5SDimitry Andric #define SCUDO_TSD_EXCLUSIVE_H_
110b57cec5SDimitry Andric
120b57cec5SDimitry Andric #include "tsd.h"
130b57cec5SDimitry Andric
1406c3fb27SDimitry Andric #include "string_utils.h"
1506c3fb27SDimitry Andric
160b57cec5SDimitry Andric namespace scudo {
170b57cec5SDimitry Andric
18e8d8bef9SDimitry Andric struct ThreadState {
19e8d8bef9SDimitry Andric bool DisableMemInit : 1;
2081ad6265SDimitry Andric enum : unsigned {
210b57cec5SDimitry Andric NotInitialized = 0,
220b57cec5SDimitry Andric Initialized,
230b57cec5SDimitry Andric TornDown,
24e8d8bef9SDimitry Andric } InitState : 2;
250b57cec5SDimitry Andric };
260b57cec5SDimitry Andric
270b57cec5SDimitry Andric template <class Allocator> void teardownThread(void *Ptr);
280b57cec5SDimitry Andric
290b57cec5SDimitry Andric template <class Allocator> struct TSDRegistryExT {
30*0fca6ea1SDimitry Andric using ThisT = TSDRegistryExT<Allocator>;
31*0fca6ea1SDimitry Andric
32*0fca6ea1SDimitry Andric struct ScopedTSD {
ScopedTSDTSDRegistryExT::ScopedTSD33*0fca6ea1SDimitry Andric ALWAYS_INLINE ScopedTSD(ThisT &TSDRegistry) {
34*0fca6ea1SDimitry Andric CurrentTSD = TSDRegistry.getTSDAndLock(&UnlockRequired);
35*0fca6ea1SDimitry Andric DCHECK_NE(CurrentTSD, nullptr);
36*0fca6ea1SDimitry Andric }
37*0fca6ea1SDimitry Andric
~ScopedTSDTSDRegistryExT::ScopedTSD38*0fca6ea1SDimitry Andric ~ScopedTSD() {
39*0fca6ea1SDimitry Andric if (UNLIKELY(UnlockRequired))
40*0fca6ea1SDimitry Andric CurrentTSD->unlock();
41*0fca6ea1SDimitry Andric }
42*0fca6ea1SDimitry Andric
43*0fca6ea1SDimitry Andric TSD<Allocator> &operator*() { return *CurrentTSD; }
44*0fca6ea1SDimitry Andric
45*0fca6ea1SDimitry Andric TSD<Allocator> *operator->() {
46*0fca6ea1SDimitry Andric CurrentTSD->assertLocked(/*BypassCheck=*/!UnlockRequired);
47*0fca6ea1SDimitry Andric return CurrentTSD;
48*0fca6ea1SDimitry Andric }
49*0fca6ea1SDimitry Andric
50*0fca6ea1SDimitry Andric private:
51*0fca6ea1SDimitry Andric TSD<Allocator> *CurrentTSD;
52*0fca6ea1SDimitry Andric bool UnlockRequired;
53*0fca6ea1SDimitry Andric };
54*0fca6ea1SDimitry Andric
initTSDRegistryExT5506c3fb27SDimitry Andric void init(Allocator *Instance) REQUIRES(Mutex) {
56fe6060f1SDimitry Andric DCHECK(!Initialized);
57fe6060f1SDimitry Andric Instance->init();
580b57cec5SDimitry Andric CHECK_EQ(pthread_key_create(&PThreadKey, teardownThread<Allocator>), 0);
59fe6060f1SDimitry Andric FallbackTSD.init(Instance);
600b57cec5SDimitry Andric Initialized = true;
610b57cec5SDimitry Andric }
62fe6060f1SDimitry Andric
initOnceMaybeTSDRegistryExT6306c3fb27SDimitry Andric void initOnceMaybe(Allocator *Instance) EXCLUDES(Mutex) {
64fe6060f1SDimitry Andric ScopedLock L(Mutex);
65fe6060f1SDimitry Andric if (LIKELY(Initialized))
66fe6060f1SDimitry Andric return;
67fe6060f1SDimitry Andric init(Instance); // Sets Initialized.
680b57cec5SDimitry Andric }
690b57cec5SDimitry Andric
unmapTestOnlyTSDRegistryExT7006c3fb27SDimitry Andric void unmapTestOnly(Allocator *Instance) EXCLUDES(Mutex) {
71fe6060f1SDimitry Andric DCHECK(Instance);
72fe6060f1SDimitry Andric if (reinterpret_cast<Allocator *>(pthread_getspecific(PThreadKey))) {
73fe6060f1SDimitry Andric DCHECK_EQ(reinterpret_cast<Allocator *>(pthread_getspecific(PThreadKey)),
74fe6060f1SDimitry Andric Instance);
75fe6060f1SDimitry Andric ThreadTSD.commitBack(Instance);
76fe6060f1SDimitry Andric ThreadTSD = {};
77fe6060f1SDimitry Andric }
78fe6060f1SDimitry Andric CHECK_EQ(pthread_key_delete(PThreadKey), 0);
79fe6060f1SDimitry Andric PThreadKey = {};
80fe6060f1SDimitry Andric FallbackTSD.commitBack(Instance);
81fe6060f1SDimitry Andric FallbackTSD = {};
82fe6060f1SDimitry Andric State = {};
8306c3fb27SDimitry Andric ScopedLock L(Mutex);
84fe6060f1SDimitry Andric Initialized = false;
85fe6060f1SDimitry Andric }
860b57cec5SDimitry Andric
drainCachesTSDRegistryExT8706c3fb27SDimitry Andric void drainCaches(Allocator *Instance) {
8806c3fb27SDimitry Andric // We don't have a way to iterate all thread local `ThreadTSD`s. Simply
8906c3fb27SDimitry Andric // drain the `ThreadTSD` of current thread and `FallbackTSD`.
9006c3fb27SDimitry Andric Instance->drainCache(&ThreadTSD);
9106c3fb27SDimitry Andric FallbackTSD.lock();
9206c3fb27SDimitry Andric Instance->drainCache(&FallbackTSD);
9306c3fb27SDimitry Andric FallbackTSD.unlock();
9406c3fb27SDimitry Andric }
9506c3fb27SDimitry Andric
initThreadMaybeTSDRegistryExT960b57cec5SDimitry Andric ALWAYS_INLINE void initThreadMaybe(Allocator *Instance, bool MinimalInit) {
97e8d8bef9SDimitry Andric if (LIKELY(State.InitState != ThreadState::NotInitialized))
980b57cec5SDimitry Andric return;
990b57cec5SDimitry Andric initThread(Instance, MinimalInit);
1000b57cec5SDimitry Andric }
1010b57cec5SDimitry Andric
102480093f4SDimitry Andric // To disable the exclusive TSD registry, we effectively lock the fallback TSD
103480093f4SDimitry Andric // and force all threads to attempt to use it instead of their local one.
disableTSDRegistryExT10406c3fb27SDimitry Andric void disable() NO_THREAD_SAFETY_ANALYSIS {
105480093f4SDimitry Andric Mutex.lock();
1065ffd83dbSDimitry Andric FallbackTSD.lock();
107480093f4SDimitry Andric atomic_store(&Disabled, 1U, memory_order_release);
108480093f4SDimitry Andric }
109480093f4SDimitry Andric
enableTSDRegistryExT11006c3fb27SDimitry Andric void enable() NO_THREAD_SAFETY_ANALYSIS {
111480093f4SDimitry Andric atomic_store(&Disabled, 0U, memory_order_release);
1125ffd83dbSDimitry Andric FallbackTSD.unlock();
113480093f4SDimitry Andric Mutex.unlock();
114480093f4SDimitry Andric }
115480093f4SDimitry Andric
setOptionTSDRegistryExT11681ad6265SDimitry Andric bool setOption(Option O, sptr Value) {
117e8d8bef9SDimitry Andric if (O == Option::ThreadDisableMemInit)
118e8d8bef9SDimitry Andric State.DisableMemInit = Value;
119e8d8bef9SDimitry Andric if (O == Option::MaxTSDsCount)
120e8d8bef9SDimitry Andric return false;
121e8d8bef9SDimitry Andric return true;
122e8d8bef9SDimitry Andric }
123e8d8bef9SDimitry Andric
getDisableMemInitTSDRegistryExT124e8d8bef9SDimitry Andric bool getDisableMemInit() { return State.DisableMemInit; }
125e8d8bef9SDimitry Andric
getStatsTSDRegistryExT12606c3fb27SDimitry Andric void getStats(ScopedString *Str) {
12706c3fb27SDimitry Andric // We don't have a way to iterate all thread local `ThreadTSD`s. Instead of
12806c3fb27SDimitry Andric // printing only self `ThreadTSD` which may mislead the usage, we just skip
12906c3fb27SDimitry Andric // it.
13006c3fb27SDimitry Andric Str->append("Exclusive TSD don't support iterating each TSD\n");
13106c3fb27SDimitry Andric }
13206c3fb27SDimitry Andric
1330b57cec5SDimitry Andric private:
134*0fca6ea1SDimitry Andric ALWAYS_INLINE TSD<Allocator> *
getTSDAndLockTSDRegistryExT135*0fca6ea1SDimitry Andric getTSDAndLock(bool *UnlockRequired) NO_THREAD_SAFETY_ANALYSIS {
136*0fca6ea1SDimitry Andric if (LIKELY(State.InitState == ThreadState::Initialized &&
137*0fca6ea1SDimitry Andric !atomic_load(&Disabled, memory_order_acquire))) {
138*0fca6ea1SDimitry Andric *UnlockRequired = false;
139*0fca6ea1SDimitry Andric return &ThreadTSD;
140*0fca6ea1SDimitry Andric }
141*0fca6ea1SDimitry Andric FallbackTSD.lock();
142*0fca6ea1SDimitry Andric *UnlockRequired = true;
143*0fca6ea1SDimitry Andric return &FallbackTSD;
144*0fca6ea1SDimitry Andric }
145*0fca6ea1SDimitry Andric
1460b57cec5SDimitry Andric // Using minimal initialization allows for global initialization while keeping
1470b57cec5SDimitry Andric // the thread specific structure untouched. The fallback structure will be
1480b57cec5SDimitry Andric // used instead.
initThreadTSDRegistryExT1490b57cec5SDimitry Andric NOINLINE void initThread(Allocator *Instance, bool MinimalInit) {
1500b57cec5SDimitry Andric initOnceMaybe(Instance);
15168d75effSDimitry Andric if (UNLIKELY(MinimalInit))
1520b57cec5SDimitry Andric return;
1530b57cec5SDimitry Andric CHECK_EQ(
1540b57cec5SDimitry Andric pthread_setspecific(PThreadKey, reinterpret_cast<void *>(Instance)), 0);
155fe6060f1SDimitry Andric ThreadTSD.init(Instance);
156e8d8bef9SDimitry Andric State.InitState = ThreadState::Initialized;
157480093f4SDimitry Andric Instance->callPostInitCallback();
1580b57cec5SDimitry Andric }
1590b57cec5SDimitry Andric
160fe6060f1SDimitry Andric pthread_key_t PThreadKey = {};
16106c3fb27SDimitry Andric bool Initialized GUARDED_BY(Mutex) = false;
162fe6060f1SDimitry Andric atomic_u8 Disabled = {};
1635ffd83dbSDimitry Andric TSD<Allocator> FallbackTSD;
1640b57cec5SDimitry Andric HybridMutex Mutex;
165e8d8bef9SDimitry Andric static thread_local ThreadState State;
166e8d8bef9SDimitry Andric static thread_local TSD<Allocator> ThreadTSD;
1670b57cec5SDimitry Andric
1680b57cec5SDimitry Andric friend void teardownThread<Allocator>(void *Ptr);
1690b57cec5SDimitry Andric };
1700b57cec5SDimitry Andric
1710b57cec5SDimitry Andric template <class Allocator>
172e8d8bef9SDimitry Andric thread_local TSD<Allocator> TSDRegistryExT<Allocator>::ThreadTSD;
1730b57cec5SDimitry Andric template <class Allocator>
174e8d8bef9SDimitry Andric thread_local ThreadState TSDRegistryExT<Allocator>::State;
1750b57cec5SDimitry Andric
17606c3fb27SDimitry Andric template <class Allocator>
teardownThread(void * Ptr)17706c3fb27SDimitry Andric void teardownThread(void *Ptr) NO_THREAD_SAFETY_ANALYSIS {
1780b57cec5SDimitry Andric typedef TSDRegistryExT<Allocator> TSDRegistryT;
1790b57cec5SDimitry Andric Allocator *Instance = reinterpret_cast<Allocator *>(Ptr);
1800b57cec5SDimitry Andric // The glibc POSIX thread-local-storage deallocation routine calls user
1810b57cec5SDimitry Andric // provided destructors in a loop of PTHREAD_DESTRUCTOR_ITERATIONS.
1820b57cec5SDimitry Andric // We want to be called last since other destructors might call free and the
1830b57cec5SDimitry Andric // like, so we wait until PTHREAD_DESTRUCTOR_ITERATIONS before draining the
1840b57cec5SDimitry Andric // quarantine and swallowing the cache.
1850b57cec5SDimitry Andric if (TSDRegistryT::ThreadTSD.DestructorIterations > 1) {
1860b57cec5SDimitry Andric TSDRegistryT::ThreadTSD.DestructorIterations--;
1870b57cec5SDimitry Andric // If pthread_setspecific fails, we will go ahead with the teardown.
1880b57cec5SDimitry Andric if (LIKELY(pthread_setspecific(Instance->getTSDRegistry()->PThreadKey,
1890b57cec5SDimitry Andric Ptr) == 0))
1900b57cec5SDimitry Andric return;
1910b57cec5SDimitry Andric }
1920b57cec5SDimitry Andric TSDRegistryT::ThreadTSD.commitBack(Instance);
193e8d8bef9SDimitry Andric TSDRegistryT::State.InitState = ThreadState::TornDown;
1940b57cec5SDimitry Andric }
1950b57cec5SDimitry Andric
1960b57cec5SDimitry Andric } // namespace scudo
1970b57cec5SDimitry Andric
1980b57cec5SDimitry Andric #endif // SCUDO_TSD_EXCLUSIVE_H_
199