1 //===-- memprof_thread.cpp -----------------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file is a part of MemProfiler, a memory profiler. 10 // 11 // Thread-related code. 12 //===----------------------------------------------------------------------===// 13 #include "memprof_thread.h" 14 #include "memprof_allocator.h" 15 #include "memprof_interceptors.h" 16 #include "memprof_mapping.h" 17 #include "memprof_stack.h" 18 #include "sanitizer_common/sanitizer_common.h" 19 #include "sanitizer_common/sanitizer_placement_new.h" 20 #include "sanitizer_common/sanitizer_stackdepot.h" 21 #include "sanitizer_common/sanitizer_tls_get_addr.h" 22 23 namespace __memprof { 24 25 // MemprofThreadContext implementation. 26 27 void MemprofThreadContext::OnCreated(void *arg) { 28 CreateThreadContextArgs *args = static_cast<CreateThreadContextArgs *>(arg); 29 if (args->stack) 30 stack_id = StackDepotPut(*args->stack); 31 thread = args->thread; 32 thread->set_context(this); 33 } 34 35 void MemprofThreadContext::OnFinished() { 36 // Drop the link to the MemprofThread object. 37 thread = nullptr; 38 } 39 40 static ALIGNED(16) char thread_registry_placeholder[sizeof(ThreadRegistry)]; 41 static ThreadRegistry *memprof_thread_registry; 42 43 static BlockingMutex mu_for_thread_context(LINKER_INITIALIZED); 44 static LowLevelAllocator allocator_for_thread_context; 45 46 static ThreadContextBase *GetMemprofThreadContext(u32 tid) { 47 BlockingMutexLock lock(&mu_for_thread_context); 48 return new (allocator_for_thread_context) MemprofThreadContext(tid); 49 } 50 51 ThreadRegistry &memprofThreadRegistry() { 52 static bool initialized; 53 // Don't worry about thread_safety - this should be called when there is 54 // a single thread. 55 if (!initialized) { 56 // Never reuse MemProf threads: we store pointer to MemprofThreadContext 57 // in TSD and can't reliably tell when no more TSD destructors will 58 // be called. It would be wrong to reuse MemprofThreadContext for another 59 // thread before all TSD destructors will be called for it. 60 memprof_thread_registry = new (thread_registry_placeholder) 61 ThreadRegistry(GetMemprofThreadContext); 62 initialized = true; 63 } 64 return *memprof_thread_registry; 65 } 66 67 MemprofThreadContext *GetThreadContextByTidLocked(u32 tid) { 68 return static_cast<MemprofThreadContext *>( 69 memprofThreadRegistry().GetThreadLocked(tid)); 70 } 71 72 // MemprofThread implementation. 73 74 MemprofThread *MemprofThread::Create(thread_callback_t start_routine, void *arg, 75 u32 parent_tid, StackTrace *stack, 76 bool detached) { 77 uptr PageSize = GetPageSizeCached(); 78 uptr size = RoundUpTo(sizeof(MemprofThread), PageSize); 79 MemprofThread *thread = (MemprofThread *)MmapOrDie(size, __func__); 80 thread->start_routine_ = start_routine; 81 thread->arg_ = arg; 82 MemprofThreadContext::CreateThreadContextArgs args = {thread, stack}; 83 memprofThreadRegistry().CreateThread(*reinterpret_cast<uptr *>(thread), 84 detached, parent_tid, &args); 85 86 return thread; 87 } 88 89 void MemprofThread::TSDDtor(void *tsd) { 90 MemprofThreadContext *context = (MemprofThreadContext *)tsd; 91 VReport(1, "T%d TSDDtor\n", context->tid); 92 if (context->thread) 93 context->thread->Destroy(); 94 } 95 96 void MemprofThread::Destroy() { 97 int tid = this->tid(); 98 VReport(1, "T%d exited\n", tid); 99 100 malloc_storage().CommitBack(); 101 memprofThreadRegistry().FinishThread(tid); 102 FlushToDeadThreadStats(&stats_); 103 uptr size = RoundUpTo(sizeof(MemprofThread), GetPageSizeCached()); 104 UnmapOrDie(this, size); 105 DTLS_Destroy(); 106 } 107 108 inline MemprofThread::StackBounds MemprofThread::GetStackBounds() const { 109 if (stack_bottom_ >= stack_top_) 110 return {0, 0}; 111 return {stack_bottom_, stack_top_}; 112 } 113 114 uptr MemprofThread::stack_top() { return GetStackBounds().top; } 115 116 uptr MemprofThread::stack_bottom() { return GetStackBounds().bottom; } 117 118 uptr MemprofThread::stack_size() { 119 const auto bounds = GetStackBounds(); 120 return bounds.top - bounds.bottom; 121 } 122 123 void MemprofThread::Init(const InitOptions *options) { 124 CHECK_EQ(this->stack_size(), 0U); 125 SetThreadStackAndTls(options); 126 if (stack_top_ != stack_bottom_) { 127 CHECK_GT(this->stack_size(), 0U); 128 CHECK(AddrIsInMem(stack_bottom_)); 129 CHECK(AddrIsInMem(stack_top_ - 1)); 130 } 131 int local = 0; 132 VReport(1, "T%d: stack [%p,%p) size 0x%zx; local=%p\n", tid(), 133 (void *)stack_bottom_, (void *)stack_top_, stack_top_ - stack_bottom_, 134 &local); 135 } 136 137 thread_return_t 138 MemprofThread::ThreadStart(tid_t os_id, 139 atomic_uintptr_t *signal_thread_is_registered) { 140 Init(); 141 memprofThreadRegistry().StartThread(tid(), os_id, ThreadType::Regular, 142 nullptr); 143 if (signal_thread_is_registered) 144 atomic_store(signal_thread_is_registered, 1, memory_order_release); 145 146 if (!start_routine_) { 147 // start_routine_ == 0 if we're on the main thread or on one of the 148 // OS X libdispatch worker threads. But nobody is supposed to call 149 // ThreadStart() for the worker threads. 150 CHECK_EQ(tid(), 0); 151 return 0; 152 } 153 154 return start_routine_(arg_); 155 } 156 157 MemprofThread *CreateMainThread() { 158 MemprofThread *main_thread = MemprofThread::Create( 159 /* start_routine */ nullptr, /* arg */ nullptr, /* parent_tid */ kMainTid, 160 /* stack */ nullptr, /* detached */ true); 161 SetCurrentThread(main_thread); 162 main_thread->ThreadStart(internal_getpid(), 163 /* signal_thread_is_registered */ nullptr); 164 return main_thread; 165 } 166 167 // This implementation doesn't use the argument, which is just passed down 168 // from the caller of Init (which see, above). It's only there to support 169 // OS-specific implementations that need more information passed through. 170 void MemprofThread::SetThreadStackAndTls(const InitOptions *options) { 171 DCHECK_EQ(options, nullptr); 172 uptr tls_size = 0; 173 uptr stack_size = 0; 174 GetThreadStackAndTls(tid() == kMainTid, &stack_bottom_, &stack_size, 175 &tls_begin_, &tls_size); 176 stack_top_ = stack_bottom_ + stack_size; 177 tls_end_ = tls_begin_ + tls_size; 178 dtls_ = DTLS_Get(); 179 180 if (stack_top_ != stack_bottom_) { 181 int local; 182 CHECK(AddrIsInStack((uptr)&local)); 183 } 184 } 185 186 bool MemprofThread::AddrIsInStack(uptr addr) { 187 const auto bounds = GetStackBounds(); 188 return addr >= bounds.bottom && addr < bounds.top; 189 } 190 191 MemprofThread *GetCurrentThread() { 192 MemprofThreadContext *context = 193 reinterpret_cast<MemprofThreadContext *>(TSDGet()); 194 if (!context) 195 return nullptr; 196 return context->thread; 197 } 198 199 void SetCurrentThread(MemprofThread *t) { 200 CHECK(t->context()); 201 VReport(2, "SetCurrentThread: %p for thread %p\n", t->context(), 202 (void *)GetThreadSelf()); 203 // Make sure we do not reset the current MemprofThread. 204 CHECK_EQ(0, TSDGet()); 205 TSDSet(t->context()); 206 CHECK_EQ(t->context(), TSDGet()); 207 } 208 209 u32 GetCurrentTidOrInvalid() { 210 MemprofThread *t = GetCurrentThread(); 211 return t ? t->tid() : kInvalidTid; 212 } 213 214 void EnsureMainThreadIDIsCorrect() { 215 MemprofThreadContext *context = 216 reinterpret_cast<MemprofThreadContext *>(TSDGet()); 217 if (context && (context->tid == kMainTid)) 218 context->os_id = GetTid(); 219 } 220 } // namespace __memprof 221