xref: /freebsd/contrib/llvm-project/compiler-rt/lib/scudo/standalone/quarantine.h (revision cfd6422a5217410fbd66f7a7a8a64d9d85e61229)
1 //===-- quarantine.h --------------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #ifndef SCUDO_QUARANTINE_H_
10 #define SCUDO_QUARANTINE_H_
11 
12 #include "list.h"
13 #include "mutex.h"
14 #include "string_utils.h"
15 
16 namespace scudo {
17 
18 struct QuarantineBatch {
19   // With the following count, a batch (and the header that protects it) occupy
20   // 4096 bytes on 32-bit platforms, and 8192 bytes on 64-bit.
21   static const u32 MaxCount = 1019;
22   QuarantineBatch *Next;
23   uptr Size;
24   u32 Count;
25   void *Batch[MaxCount];
26 
27   void init(void *Ptr, uptr Size) {
28     Count = 1;
29     Batch[0] = Ptr;
30     this->Size = Size + sizeof(QuarantineBatch); // Account for the Batch Size.
31   }
32 
33   // The total size of quarantined nodes recorded in this batch.
34   uptr getQuarantinedSize() const { return Size - sizeof(QuarantineBatch); }
35 
36   void push_back(void *Ptr, uptr Size) {
37     DCHECK_LT(Count, MaxCount);
38     Batch[Count++] = Ptr;
39     this->Size += Size;
40   }
41 
42   bool canMerge(const QuarantineBatch *const From) const {
43     return Count + From->Count <= MaxCount;
44   }
45 
46   void merge(QuarantineBatch *const From) {
47     DCHECK_LE(Count + From->Count, MaxCount);
48     DCHECK_GE(Size, sizeof(QuarantineBatch));
49 
50     for (uptr I = 0; I < From->Count; ++I)
51       Batch[Count + I] = From->Batch[I];
52     Count += From->Count;
53     Size += From->getQuarantinedSize();
54 
55     From->Count = 0;
56     From->Size = sizeof(QuarantineBatch);
57   }
58 
59   void shuffle(u32 State) { ::scudo::shuffle(Batch, Count, &State); }
60 };
61 
62 static_assert(sizeof(QuarantineBatch) <= (1U << 13), ""); // 8Kb.
63 
64 // Per-thread cache of memory blocks.
65 template <typename Callback> class QuarantineCache {
66 public:
67   void initLinkerInitialized() {}
68   void init() {
69     memset(this, 0, sizeof(*this));
70     initLinkerInitialized();
71   }
72 
73   // Total memory used, including internal accounting.
74   uptr getSize() const { return atomic_load_relaxed(&Size); }
75   // Memory used for internal accounting.
76   uptr getOverheadSize() const { return List.size() * sizeof(QuarantineBatch); }
77 
78   void enqueue(Callback Cb, void *Ptr, uptr Size) {
79     if (List.empty() || List.back()->Count == QuarantineBatch::MaxCount) {
80       QuarantineBatch *B =
81           reinterpret_cast<QuarantineBatch *>(Cb.allocate(sizeof(*B)));
82       DCHECK(B);
83       B->init(Ptr, Size);
84       enqueueBatch(B);
85     } else {
86       List.back()->push_back(Ptr, Size);
87       addToSize(Size);
88     }
89   }
90 
91   void transfer(QuarantineCache *From) {
92     List.append_back(&From->List);
93     addToSize(From->getSize());
94     atomic_store_relaxed(&From->Size, 0);
95   }
96 
97   void enqueueBatch(QuarantineBatch *B) {
98     List.push_back(B);
99     addToSize(B->Size);
100   }
101 
102   QuarantineBatch *dequeueBatch() {
103     if (List.empty())
104       return nullptr;
105     QuarantineBatch *B = List.front();
106     List.pop_front();
107     subFromSize(B->Size);
108     return B;
109   }
110 
111   void mergeBatches(QuarantineCache *ToDeallocate) {
112     uptr ExtractedSize = 0;
113     QuarantineBatch *Current = List.front();
114     while (Current && Current->Next) {
115       if (Current->canMerge(Current->Next)) {
116         QuarantineBatch *Extracted = Current->Next;
117         // Move all the chunks into the current batch.
118         Current->merge(Extracted);
119         DCHECK_EQ(Extracted->Count, 0);
120         DCHECK_EQ(Extracted->Size, sizeof(QuarantineBatch));
121         // Remove the next batch From the list and account for its Size.
122         List.extract(Current, Extracted);
123         ExtractedSize += Extracted->Size;
124         // Add it to deallocation list.
125         ToDeallocate->enqueueBatch(Extracted);
126       } else {
127         Current = Current->Next;
128       }
129     }
130     subFromSize(ExtractedSize);
131   }
132 
133   void getStats(ScopedString *Str) const {
134     uptr BatchCount = 0;
135     uptr TotalOverheadBytes = 0;
136     uptr TotalBytes = 0;
137     uptr TotalQuarantineChunks = 0;
138     for (const QuarantineBatch &Batch : List) {
139       BatchCount++;
140       TotalBytes += Batch.Size;
141       TotalOverheadBytes += Batch.Size - Batch.getQuarantinedSize();
142       TotalQuarantineChunks += Batch.Count;
143     }
144     const uptr QuarantineChunksCapacity =
145         BatchCount * QuarantineBatch::MaxCount;
146     const uptr ChunksUsagePercent =
147         (QuarantineChunksCapacity == 0)
148             ? 0
149             : TotalQuarantineChunks * 100 / QuarantineChunksCapacity;
150     const uptr TotalQuarantinedBytes = TotalBytes - TotalOverheadBytes;
151     const uptr MemoryOverheadPercent =
152         (TotalQuarantinedBytes == 0)
153             ? 0
154             : TotalOverheadBytes * 100 / TotalQuarantinedBytes;
155     Str->append(
156         "Stats: Quarantine: batches: %zu; bytes: %zu (user: %zu); chunks: %zu "
157         "(capacity: %zu); %zu%% chunks used; %zu%% memory overhead\n",
158         BatchCount, TotalBytes, TotalQuarantinedBytes, TotalQuarantineChunks,
159         QuarantineChunksCapacity, ChunksUsagePercent, MemoryOverheadPercent);
160   }
161 
162 private:
163   SinglyLinkedList<QuarantineBatch> List;
164   atomic_uptr Size;
165 
166   void addToSize(uptr add) { atomic_store_relaxed(&Size, getSize() + add); }
167   void subFromSize(uptr sub) { atomic_store_relaxed(&Size, getSize() - sub); }
168 };
169 
170 // The callback interface is:
171 // void Callback::recycle(Node *Ptr);
172 // void *Callback::allocate(uptr Size);
173 // void Callback::deallocate(void *Ptr);
174 template <typename Callback, typename Node> class GlobalQuarantine {
175 public:
176   typedef QuarantineCache<Callback> CacheT;
177 
178   void initLinkerInitialized(uptr Size, uptr CacheSize) {
179     // Thread local quarantine size can be zero only when global quarantine size
180     // is zero (it allows us to perform just one atomic read per put() call).
181     CHECK((Size == 0 && CacheSize == 0) || CacheSize != 0);
182 
183     atomic_store_relaxed(&MaxSize, Size);
184     atomic_store_relaxed(&MinSize, Size / 10 * 9); // 90% of max size.
185     atomic_store_relaxed(&MaxCacheSize, CacheSize);
186 
187     Cache.initLinkerInitialized();
188   }
189   void init(uptr Size, uptr CacheSize) {
190     CacheMutex.init();
191     Cache.init();
192     RecycleMutex.init();
193     MinSize = {};
194     MaxSize = {};
195     MaxCacheSize = {};
196     initLinkerInitialized(Size, CacheSize);
197   }
198 
199   uptr getMaxSize() const { return atomic_load_relaxed(&MaxSize); }
200   uptr getCacheSize() const { return atomic_load_relaxed(&MaxCacheSize); }
201 
202   void put(CacheT *C, Callback Cb, Node *Ptr, uptr Size) {
203     C->enqueue(Cb, Ptr, Size);
204     if (C->getSize() > getCacheSize())
205       drain(C, Cb);
206   }
207 
208   void NOINLINE drain(CacheT *C, Callback Cb) {
209     {
210       ScopedLock L(CacheMutex);
211       Cache.transfer(C);
212     }
213     if (Cache.getSize() > getMaxSize() && RecycleMutex.tryLock())
214       recycle(atomic_load_relaxed(&MinSize), Cb);
215   }
216 
217   void NOINLINE drainAndRecycle(CacheT *C, Callback Cb) {
218     {
219       ScopedLock L(CacheMutex);
220       Cache.transfer(C);
221     }
222     RecycleMutex.lock();
223     recycle(0, Cb);
224   }
225 
226   void getStats(ScopedString *Str) const {
227     // It assumes that the world is stopped, just as the allocator's printStats.
228     Cache.getStats(Str);
229     Str->append("Quarantine limits: global: %zuK; thread local: %zuK\n",
230                 getMaxSize() >> 10, getCacheSize() >> 10);
231   }
232 
233   void disable() {
234     // RecycleMutex must be locked 1st since we grab CacheMutex within recycle.
235     RecycleMutex.lock();
236     CacheMutex.lock();
237   }
238 
239   void enable() {
240     CacheMutex.unlock();
241     RecycleMutex.unlock();
242   }
243 
244 private:
245   // Read-only data.
246   alignas(SCUDO_CACHE_LINE_SIZE) HybridMutex CacheMutex;
247   CacheT Cache;
248   alignas(SCUDO_CACHE_LINE_SIZE) HybridMutex RecycleMutex;
249   atomic_uptr MinSize;
250   atomic_uptr MaxSize;
251   alignas(SCUDO_CACHE_LINE_SIZE) atomic_uptr MaxCacheSize;
252 
253   void NOINLINE recycle(uptr MinSize, Callback Cb) {
254     CacheT Tmp;
255     Tmp.init();
256     {
257       ScopedLock L(CacheMutex);
258       // Go over the batches and merge partially filled ones to
259       // save some memory, otherwise batches themselves (since the memory used
260       // by them is counted against quarantine limit) can overcome the actual
261       // user's quarantined chunks, which diminishes the purpose of the
262       // quarantine.
263       const uptr CacheSize = Cache.getSize();
264       const uptr OverheadSize = Cache.getOverheadSize();
265       DCHECK_GE(CacheSize, OverheadSize);
266       // Do the merge only when overhead exceeds this predefined limit (might
267       // require some tuning). It saves us merge attempt when the batch list
268       // quarantine is unlikely to contain batches suitable for merge.
269       constexpr uptr OverheadThresholdPercents = 100;
270       if (CacheSize > OverheadSize &&
271           OverheadSize * (100 + OverheadThresholdPercents) >
272               CacheSize * OverheadThresholdPercents) {
273         Cache.mergeBatches(&Tmp);
274       }
275       // Extract enough chunks from the quarantine to get below the max
276       // quarantine size and leave some leeway for the newly quarantined chunks.
277       while (Cache.getSize() > MinSize)
278         Tmp.enqueueBatch(Cache.dequeueBatch());
279     }
280     RecycleMutex.unlock();
281     doRecycle(&Tmp, Cb);
282   }
283 
284   void NOINLINE doRecycle(CacheT *C, Callback Cb) {
285     while (QuarantineBatch *B = C->dequeueBatch()) {
286       const u32 Seed = static_cast<u32>(
287           (reinterpret_cast<uptr>(B) ^ reinterpret_cast<uptr>(C)) >> 4);
288       B->shuffle(Seed);
289       constexpr uptr NumberOfPrefetch = 8UL;
290       CHECK(NumberOfPrefetch <= ARRAY_SIZE(B->Batch));
291       for (uptr I = 0; I < NumberOfPrefetch; I++)
292         PREFETCH(B->Batch[I]);
293       for (uptr I = 0, Count = B->Count; I < Count; I++) {
294         if (I + NumberOfPrefetch < Count)
295           PREFETCH(B->Batch[I + NumberOfPrefetch]);
296         Cb.recycle(reinterpret_cast<Node *>(B->Batch[I]));
297       }
298       Cb.deallocate(B);
299     }
300   }
301 };
302 
303 } // namespace scudo
304 
305 #endif // SCUDO_QUARANTINE_H_
306