xref: /freebsd/contrib/llvm-project/compiler-rt/lib/scudo/standalone/quarantine.h (revision 924226fba12cc9a228c73b956e1b7fa24c60b055)
1 //===-- quarantine.h --------------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #ifndef SCUDO_QUARANTINE_H_
10 #define SCUDO_QUARANTINE_H_
11 
12 #include "list.h"
13 #include "mutex.h"
14 #include "string_utils.h"
15 
16 namespace scudo {
17 
18 struct QuarantineBatch {
19   // With the following count, a batch (and the header that protects it) occupy
20   // 4096 bytes on 32-bit platforms, and 8192 bytes on 64-bit.
21   static const u32 MaxCount = 1019;
22   QuarantineBatch *Next;
23   uptr Size;
24   u32 Count;
25   void *Batch[MaxCount];
26 
27   void init(void *Ptr, uptr Size) {
28     Count = 1;
29     Batch[0] = Ptr;
30     this->Size = Size + sizeof(QuarantineBatch); // Account for the Batch Size.
31   }
32 
33   // The total size of quarantined nodes recorded in this batch.
34   uptr getQuarantinedSize() const { return Size - sizeof(QuarantineBatch); }
35 
36   void push_back(void *Ptr, uptr Size) {
37     DCHECK_LT(Count, MaxCount);
38     Batch[Count++] = Ptr;
39     this->Size += Size;
40   }
41 
42   bool canMerge(const QuarantineBatch *const From) const {
43     return Count + From->Count <= MaxCount;
44   }
45 
46   void merge(QuarantineBatch *const From) {
47     DCHECK_LE(Count + From->Count, MaxCount);
48     DCHECK_GE(Size, sizeof(QuarantineBatch));
49 
50     for (uptr I = 0; I < From->Count; ++I)
51       Batch[Count + I] = From->Batch[I];
52     Count += From->Count;
53     Size += From->getQuarantinedSize();
54 
55     From->Count = 0;
56     From->Size = sizeof(QuarantineBatch);
57   }
58 
59   void shuffle(u32 State) { ::scudo::shuffle(Batch, Count, &State); }
60 };
61 
62 static_assert(sizeof(QuarantineBatch) <= (1U << 13), ""); // 8Kb.
63 
64 // Per-thread cache of memory blocks.
65 template <typename Callback> class QuarantineCache {
66 public:
67   void init() { DCHECK_EQ(atomic_load_relaxed(&Size), 0U); }
68 
69   // Total memory used, including internal accounting.
70   uptr getSize() const { return atomic_load_relaxed(&Size); }
71   // Memory used for internal accounting.
72   uptr getOverheadSize() const { return List.size() * sizeof(QuarantineBatch); }
73 
74   void enqueue(Callback Cb, void *Ptr, uptr Size) {
75     if (List.empty() || List.back()->Count == QuarantineBatch::MaxCount) {
76       QuarantineBatch *B =
77           reinterpret_cast<QuarantineBatch *>(Cb.allocate(sizeof(*B)));
78       DCHECK(B);
79       B->init(Ptr, Size);
80       enqueueBatch(B);
81     } else {
82       List.back()->push_back(Ptr, Size);
83       addToSize(Size);
84     }
85   }
86 
87   void transfer(QuarantineCache *From) {
88     List.append_back(&From->List);
89     addToSize(From->getSize());
90     atomic_store_relaxed(&From->Size, 0);
91   }
92 
93   void enqueueBatch(QuarantineBatch *B) {
94     List.push_back(B);
95     addToSize(B->Size);
96   }
97 
98   QuarantineBatch *dequeueBatch() {
99     if (List.empty())
100       return nullptr;
101     QuarantineBatch *B = List.front();
102     List.pop_front();
103     subFromSize(B->Size);
104     return B;
105   }
106 
107   void mergeBatches(QuarantineCache *ToDeallocate) {
108     uptr ExtractedSize = 0;
109     QuarantineBatch *Current = List.front();
110     while (Current && Current->Next) {
111       if (Current->canMerge(Current->Next)) {
112         QuarantineBatch *Extracted = Current->Next;
113         // Move all the chunks into the current batch.
114         Current->merge(Extracted);
115         DCHECK_EQ(Extracted->Count, 0);
116         DCHECK_EQ(Extracted->Size, sizeof(QuarantineBatch));
117         // Remove the next batch From the list and account for its Size.
118         List.extract(Current, Extracted);
119         ExtractedSize += Extracted->Size;
120         // Add it to deallocation list.
121         ToDeallocate->enqueueBatch(Extracted);
122       } else {
123         Current = Current->Next;
124       }
125     }
126     subFromSize(ExtractedSize);
127   }
128 
129   void getStats(ScopedString *Str) const {
130     uptr BatchCount = 0;
131     uptr TotalOverheadBytes = 0;
132     uptr TotalBytes = 0;
133     uptr TotalQuarantineChunks = 0;
134     for (const QuarantineBatch &Batch : List) {
135       BatchCount++;
136       TotalBytes += Batch.Size;
137       TotalOverheadBytes += Batch.Size - Batch.getQuarantinedSize();
138       TotalQuarantineChunks += Batch.Count;
139     }
140     const uptr QuarantineChunksCapacity =
141         BatchCount * QuarantineBatch::MaxCount;
142     const uptr ChunksUsagePercent =
143         (QuarantineChunksCapacity == 0)
144             ? 0
145             : TotalQuarantineChunks * 100 / QuarantineChunksCapacity;
146     const uptr TotalQuarantinedBytes = TotalBytes - TotalOverheadBytes;
147     const uptr MemoryOverheadPercent =
148         (TotalQuarantinedBytes == 0)
149             ? 0
150             : TotalOverheadBytes * 100 / TotalQuarantinedBytes;
151     Str->append(
152         "Stats: Quarantine: batches: %zu; bytes: %zu (user: %zu); chunks: %zu "
153         "(capacity: %zu); %zu%% chunks used; %zu%% memory overhead\n",
154         BatchCount, TotalBytes, TotalQuarantinedBytes, TotalQuarantineChunks,
155         QuarantineChunksCapacity, ChunksUsagePercent, MemoryOverheadPercent);
156   }
157 
158 private:
159   SinglyLinkedList<QuarantineBatch> List;
160   atomic_uptr Size = {};
161 
162   void addToSize(uptr add) { atomic_store_relaxed(&Size, getSize() + add); }
163   void subFromSize(uptr sub) { atomic_store_relaxed(&Size, getSize() - sub); }
164 };
165 
166 // The callback interface is:
167 // void Callback::recycle(Node *Ptr);
168 // void *Callback::allocate(uptr Size);
169 // void Callback::deallocate(void *Ptr);
170 template <typename Callback, typename Node> class GlobalQuarantine {
171 public:
172   typedef QuarantineCache<Callback> CacheT;
173   using ThisT = GlobalQuarantine<Callback, Node>;
174 
175   void init(uptr Size, uptr CacheSize) {
176     DCHECK(isAligned(reinterpret_cast<uptr>(this), alignof(ThisT)));
177     DCHECK_EQ(atomic_load_relaxed(&MaxSize), 0U);
178     DCHECK_EQ(atomic_load_relaxed(&MinSize), 0U);
179     DCHECK_EQ(atomic_load_relaxed(&MaxCacheSize), 0U);
180     // Thread local quarantine size can be zero only when global quarantine size
181     // is zero (it allows us to perform just one atomic read per put() call).
182     CHECK((Size == 0 && CacheSize == 0) || CacheSize != 0);
183 
184     atomic_store_relaxed(&MaxSize, Size);
185     atomic_store_relaxed(&MinSize, Size / 10 * 9); // 90% of max size.
186     atomic_store_relaxed(&MaxCacheSize, CacheSize);
187 
188     Cache.init();
189   }
190 
191   uptr getMaxSize() const { return atomic_load_relaxed(&MaxSize); }
192   uptr getCacheSize() const { return atomic_load_relaxed(&MaxCacheSize); }
193 
194   void put(CacheT *C, Callback Cb, Node *Ptr, uptr Size) {
195     C->enqueue(Cb, Ptr, Size);
196     if (C->getSize() > getCacheSize())
197       drain(C, Cb);
198   }
199 
200   void NOINLINE drain(CacheT *C, Callback Cb) {
201     {
202       ScopedLock L(CacheMutex);
203       Cache.transfer(C);
204     }
205     if (Cache.getSize() > getMaxSize() && RecycleMutex.tryLock())
206       recycle(atomic_load_relaxed(&MinSize), Cb);
207   }
208 
209   void NOINLINE drainAndRecycle(CacheT *C, Callback Cb) {
210     {
211       ScopedLock L(CacheMutex);
212       Cache.transfer(C);
213     }
214     RecycleMutex.lock();
215     recycle(0, Cb);
216   }
217 
218   void getStats(ScopedString *Str) const {
219     // It assumes that the world is stopped, just as the allocator's printStats.
220     Cache.getStats(Str);
221     Str->append("Quarantine limits: global: %zuK; thread local: %zuK\n",
222                 getMaxSize() >> 10, getCacheSize() >> 10);
223   }
224 
225   void disable() {
226     // RecycleMutex must be locked 1st since we grab CacheMutex within recycle.
227     RecycleMutex.lock();
228     CacheMutex.lock();
229   }
230 
231   void enable() {
232     CacheMutex.unlock();
233     RecycleMutex.unlock();
234   }
235 
236 private:
237   // Read-only data.
238   alignas(SCUDO_CACHE_LINE_SIZE) HybridMutex CacheMutex;
239   CacheT Cache;
240   alignas(SCUDO_CACHE_LINE_SIZE) HybridMutex RecycleMutex;
241   atomic_uptr MinSize = {};
242   atomic_uptr MaxSize = {};
243   alignas(SCUDO_CACHE_LINE_SIZE) atomic_uptr MaxCacheSize = {};
244 
245   void NOINLINE recycle(uptr MinSize, Callback Cb) {
246     CacheT Tmp;
247     Tmp.init();
248     {
249       ScopedLock L(CacheMutex);
250       // Go over the batches and merge partially filled ones to
251       // save some memory, otherwise batches themselves (since the memory used
252       // by them is counted against quarantine limit) can overcome the actual
253       // user's quarantined chunks, which diminishes the purpose of the
254       // quarantine.
255       const uptr CacheSize = Cache.getSize();
256       const uptr OverheadSize = Cache.getOverheadSize();
257       DCHECK_GE(CacheSize, OverheadSize);
258       // Do the merge only when overhead exceeds this predefined limit (might
259       // require some tuning). It saves us merge attempt when the batch list
260       // quarantine is unlikely to contain batches suitable for merge.
261       constexpr uptr OverheadThresholdPercents = 100;
262       if (CacheSize > OverheadSize &&
263           OverheadSize * (100 + OverheadThresholdPercents) >
264               CacheSize * OverheadThresholdPercents) {
265         Cache.mergeBatches(&Tmp);
266       }
267       // Extract enough chunks from the quarantine to get below the max
268       // quarantine size and leave some leeway for the newly quarantined chunks.
269       while (Cache.getSize() > MinSize)
270         Tmp.enqueueBatch(Cache.dequeueBatch());
271     }
272     RecycleMutex.unlock();
273     doRecycle(&Tmp, Cb);
274   }
275 
276   void NOINLINE doRecycle(CacheT *C, Callback Cb) {
277     while (QuarantineBatch *B = C->dequeueBatch()) {
278       const u32 Seed = static_cast<u32>(
279           (reinterpret_cast<uptr>(B) ^ reinterpret_cast<uptr>(C)) >> 4);
280       B->shuffle(Seed);
281       constexpr uptr NumberOfPrefetch = 8UL;
282       CHECK(NumberOfPrefetch <= ARRAY_SIZE(B->Batch));
283       for (uptr I = 0; I < NumberOfPrefetch; I++)
284         PREFETCH(B->Batch[I]);
285       for (uptr I = 0, Count = B->Count; I < Count; I++) {
286         if (I + NumberOfPrefetch < Count)
287           PREFETCH(B->Batch[I + NumberOfPrefetch]);
288         Cb.recycle(reinterpret_cast<Node *>(B->Batch[I]));
289       }
290       Cb.deallocate(B);
291     }
292   }
293 };
294 
295 } // namespace scudo
296 
297 #endif // SCUDO_QUARANTINE_H_
298