xref: /freebsd/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stackdepot.cpp (revision d5b0e70f7e04d971691517ce1304d86a1e367e2e)
1 //===-- sanitizer_stackdepot.cpp ------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is shared between AddressSanitizer and ThreadSanitizer
10 // run-time libraries.
11 //===----------------------------------------------------------------------===//
12 
13 #include "sanitizer_stackdepot.h"
14 
15 #include "sanitizer_atomic.h"
16 #include "sanitizer_common.h"
17 #include "sanitizer_hash.h"
18 #include "sanitizer_mutex.h"
19 #include "sanitizer_stack_store.h"
20 #include "sanitizer_stackdepotbase.h"
21 
22 namespace __sanitizer {
23 
24 struct StackDepotNode {
25   using hash_type = u64;
26   hash_type stack_hash;
27   u32 link;
28   StackStore::Id store_id;
29 
30   static const u32 kTabSizeLog = SANITIZER_ANDROID ? 16 : 20;
31 
32   typedef StackTrace args_type;
33   bool eq(hash_type hash, const args_type &args) const {
34     return hash == stack_hash;
35   }
36   static uptr allocated();
37   static hash_type hash(const args_type &args) {
38     MurMur2Hash64Builder H(args.size * sizeof(uptr));
39     for (uptr i = 0; i < args.size; i++) H.add(args.trace[i]);
40     H.add(args.tag);
41     return H.get();
42   }
43   static bool is_valid(const args_type &args) {
44     return args.size > 0 && args.trace;
45   }
46   void store(u32 id, const args_type &args, hash_type hash);
47   args_type load(u32 id) const;
48   static StackDepotHandle get_handle(u32 id);
49 
50   typedef StackDepotHandle handle_type;
51 };
52 
53 static StackStore stackStore;
54 
55 // FIXME(dvyukov): this single reserved bit is used in TSan.
56 typedef StackDepotBase<StackDepotNode, 1, StackDepotNode::kTabSizeLog>
57     StackDepot;
58 static StackDepot theDepot;
59 // Keep mutable data out of frequently access nodes to improve caching
60 // efficiency.
61 static TwoLevelMap<atomic_uint32_t, StackDepot::kNodesSize1,
62                    StackDepot::kNodesSize2>
63     useCounts;
64 
65 int StackDepotHandle::use_count() const {
66   return atomic_load_relaxed(&useCounts[id_]);
67 }
68 
69 void StackDepotHandle::inc_use_count_unsafe() {
70   atomic_fetch_add(&useCounts[id_], 1, memory_order_relaxed);
71 }
72 
73 uptr StackDepotNode::allocated() {
74   return stackStore.Allocated() + useCounts.MemoryUsage();
75 }
76 
77 static void CompressStackStore() {
78   u64 start = MonotonicNanoTime();
79   uptr diff = stackStore.Pack(static_cast<StackStore::Compression>(
80       Abs(common_flags()->compress_stack_depot)));
81   if (!diff)
82     return;
83   u64 finish = MonotonicNanoTime();
84   uptr total_before = theDepot.GetStats().allocated + diff;
85   VPrintf(1, "%s: StackDepot released %zu KiB out of %zu KiB in %llu ms\n",
86           SanitizerToolName, diff >> 10, total_before >> 10,
87           (finish - start) / 1000000);
88 }
89 
90 namespace {
91 
92 class CompressThread {
93  public:
94   constexpr CompressThread() = default;
95   void NewWorkNotify();
96   void Stop();
97   void LockAndStop() SANITIZER_NO_THREAD_SAFETY_ANALYSIS;
98   void Unlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS;
99 
100  private:
101   enum class State {
102     NotStarted = 0,
103     Started,
104     Failed,
105     Stopped,
106   };
107 
108   void Run();
109 
110   bool WaitForWork() {
111     semaphore_.Wait();
112     return atomic_load(&run_, memory_order_acquire);
113   }
114 
115   Semaphore semaphore_ = {};
116   StaticSpinMutex mutex_ = {};
117   State state_ SANITIZER_GUARDED_BY(mutex_) = State::NotStarted;
118   void *thread_ SANITIZER_GUARDED_BY(mutex_) = nullptr;
119   atomic_uint8_t run_ = {};
120 };
121 
122 static CompressThread compress_thread;
123 
124 void CompressThread::NewWorkNotify() {
125   int compress = common_flags()->compress_stack_depot;
126   if (!compress)
127     return;
128   if (compress > 0 /* for testing or debugging */) {
129     SpinMutexLock l(&mutex_);
130     if (state_ == State::NotStarted) {
131       atomic_store(&run_, 1, memory_order_release);
132       CHECK_EQ(nullptr, thread_);
133       thread_ = internal_start_thread(
134           [](void *arg) -> void * {
135             reinterpret_cast<CompressThread *>(arg)->Run();
136             return nullptr;
137           },
138           this);
139       state_ = thread_ ? State::Started : State::Failed;
140     }
141     if (state_ == State::Started) {
142       semaphore_.Post();
143       return;
144     }
145   }
146   CompressStackStore();
147 }
148 
149 void CompressThread::Run() {
150   VPrintf(1, "%s: StackDepot compression thread started\n", SanitizerToolName);
151   while (WaitForWork()) CompressStackStore();
152   VPrintf(1, "%s: StackDepot compression thread stopped\n", SanitizerToolName);
153 }
154 
155 void CompressThread::Stop() {
156   void *t = nullptr;
157   {
158     SpinMutexLock l(&mutex_);
159     if (state_ != State::Started)
160       return;
161     state_ = State::Stopped;
162     CHECK_NE(nullptr, thread_);
163     t = thread_;
164     thread_ = nullptr;
165   }
166   atomic_store(&run_, 0, memory_order_release);
167   semaphore_.Post();
168   internal_join_thread(t);
169 }
170 
171 void CompressThread::LockAndStop() {
172   mutex_.Lock();
173   if (state_ != State::Started)
174     return;
175   CHECK_NE(nullptr, thread_);
176 
177   atomic_store(&run_, 0, memory_order_release);
178   semaphore_.Post();
179   internal_join_thread(thread_);
180   // Allow to restart after Unlock() if needed.
181   state_ = State::NotStarted;
182   thread_ = nullptr;
183 }
184 
185 void CompressThread::Unlock() { mutex_.Unlock(); }
186 
187 }  // namespace
188 
189 void StackDepotNode::store(u32 id, const args_type &args, hash_type hash) {
190   stack_hash = hash;
191   uptr pack = 0;
192   store_id = stackStore.Store(args, &pack);
193   if (LIKELY(!pack))
194     return;
195   compress_thread.NewWorkNotify();
196 }
197 
198 StackDepotNode::args_type StackDepotNode::load(u32 id) const {
199   if (!store_id)
200     return {};
201   return stackStore.Load(store_id);
202 }
203 
204 StackDepotStats StackDepotGetStats() { return theDepot.GetStats(); }
205 
206 u32 StackDepotPut(StackTrace stack) { return theDepot.Put(stack); }
207 
208 StackDepotHandle StackDepotPut_WithHandle(StackTrace stack) {
209   return StackDepotNode::get_handle(theDepot.Put(stack));
210 }
211 
212 StackTrace StackDepotGet(u32 id) {
213   return theDepot.Get(id);
214 }
215 
216 void StackDepotLockAll() {
217   theDepot.LockAll();
218   compress_thread.LockAndStop();
219   stackStore.LockAll();
220 }
221 
222 void StackDepotUnlockAll() {
223   stackStore.UnlockAll();
224   compress_thread.Unlock();
225   theDepot.UnlockAll();
226 }
227 
228 void StackDepotPrintAll() {
229 #if !SANITIZER_GO
230   theDepot.PrintAll();
231 #endif
232 }
233 
234 void StackDepotStopBackgroundThread() { compress_thread.Stop(); }
235 
236 StackDepotHandle StackDepotNode::get_handle(u32 id) {
237   return StackDepotHandle(&theDepot.nodes[id], id);
238 }
239 
240 void StackDepotTestOnlyUnmap() {
241   theDepot.TestOnlyUnmap();
242   stackStore.TestOnlyUnmap();
243 }
244 
245 } // namespace __sanitizer
246