xref: /freebsd/contrib/llvm-project/compiler-rt/lib/scudo/standalone/secondary.h (revision c66ec88fed842fbaad62c30d510644ceb7bd2d71)
1 //===-- secondary.h ---------------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #ifndef SCUDO_SECONDARY_H_
10 #define SCUDO_SECONDARY_H_
11 
12 #include "common.h"
13 #include "list.h"
14 #include "mutex.h"
15 #include "stats.h"
16 #include "string_utils.h"
17 
18 namespace scudo {
19 
20 // This allocator wraps the platform allocation primitives, and as such is on
21 // the slower side and should preferably be used for larger sized allocations.
22 // Blocks allocated will be preceded and followed by a guard page, and hold
23 // their own header that is not checksummed: the guard pages and the Combined
24 // header should be enough for our purpose.
25 
26 namespace LargeBlock {
27 
28 struct Header {
29   LargeBlock::Header *Prev;
30   LargeBlock::Header *Next;
31   uptr BlockEnd;
32   uptr MapBase;
33   uptr MapSize;
34   MapPlatformData Data;
35 };
36 
37 constexpr uptr getHeaderSize() {
38   return roundUpTo(sizeof(Header), 1U << SCUDO_MIN_ALIGNMENT_LOG);
39 }
40 
41 static Header *getHeader(uptr Ptr) {
42   return reinterpret_cast<Header *>(Ptr - getHeaderSize());
43 }
44 
45 static Header *getHeader(const void *Ptr) {
46   return getHeader(reinterpret_cast<uptr>(Ptr));
47 }
48 
49 } // namespace LargeBlock
50 
51 class MapAllocatorNoCache {
52 public:
53   void initLinkerInitialized(UNUSED s32 ReleaseToOsInterval) {}
54   void init(UNUSED s32 ReleaseToOsInterval) {}
55   bool retrieve(UNUSED uptr Size, UNUSED LargeBlock::Header **H) {
56     return false;
57   }
58   bool store(UNUSED LargeBlock::Header *H) { return false; }
59   static bool canCache(UNUSED uptr Size) { return false; }
60   void disable() {}
61   void enable() {}
62   void releaseToOS() {}
63   void setReleaseToOsIntervalMs(UNUSED s32 Interval) {}
64 };
65 
66 template <uptr MaxEntriesCount = 32U, uptr MaxEntrySize = 1UL << 19,
67           s32 MinReleaseToOsIntervalMs = INT32_MIN,
68           s32 MaxReleaseToOsIntervalMs = INT32_MAX>
69 class MapAllocatorCache {
70 public:
71   // Fuchsia doesn't allow releasing Secondary blocks yet. Note that 0 length
72   // arrays are an extension for some compilers.
73   // FIXME(kostyak): support (partially) the cache on Fuchsia.
74   static_assert(!SCUDO_FUCHSIA || MaxEntriesCount == 0U, "");
75 
76   void initLinkerInitialized(s32 ReleaseToOsInterval) {
77     setReleaseToOsIntervalMs(ReleaseToOsInterval);
78   }
79   void init(s32 ReleaseToOsInterval) {
80     memset(this, 0, sizeof(*this));
81     initLinkerInitialized(ReleaseToOsInterval);
82   }
83 
84   bool store(LargeBlock::Header *H) {
85     bool EntryCached = false;
86     bool EmptyCache = false;
87     const u64 Time = getMonotonicTime();
88     {
89       ScopedLock L(Mutex);
90       if (EntriesCount == MaxEntriesCount) {
91         if (IsFullEvents++ == 4U)
92           EmptyCache = true;
93       } else {
94         for (uptr I = 0; I < MaxEntriesCount; I++) {
95           if (Entries[I].Block)
96             continue;
97           if (I != 0)
98             Entries[I] = Entries[0];
99           Entries[0].Block = reinterpret_cast<uptr>(H);
100           Entries[0].BlockEnd = H->BlockEnd;
101           Entries[0].MapBase = H->MapBase;
102           Entries[0].MapSize = H->MapSize;
103           Entries[0].Data = H->Data;
104           Entries[0].Time = Time;
105           EntriesCount++;
106           EntryCached = true;
107           break;
108         }
109       }
110     }
111     s32 Interval;
112     if (EmptyCache)
113       empty();
114     else if ((Interval = getReleaseToOsIntervalMs()) >= 0)
115       releaseOlderThan(Time - static_cast<u64>(Interval) * 1000000);
116     return EntryCached;
117   }
118 
119   bool retrieve(uptr Size, LargeBlock::Header **H) {
120     const uptr PageSize = getPageSizeCached();
121     ScopedLock L(Mutex);
122     if (EntriesCount == 0)
123       return false;
124     for (uptr I = 0; I < MaxEntriesCount; I++) {
125       if (!Entries[I].Block)
126         continue;
127       const uptr BlockSize = Entries[I].BlockEnd - Entries[I].Block;
128       if (Size > BlockSize)
129         continue;
130       if (Size < BlockSize - PageSize * 4U)
131         continue;
132       *H = reinterpret_cast<LargeBlock::Header *>(Entries[I].Block);
133       Entries[I].Block = 0;
134       (*H)->BlockEnd = Entries[I].BlockEnd;
135       (*H)->MapBase = Entries[I].MapBase;
136       (*H)->MapSize = Entries[I].MapSize;
137       (*H)->Data = Entries[I].Data;
138       EntriesCount--;
139       return true;
140     }
141     return false;
142   }
143 
144   static bool canCache(uptr Size) {
145     return MaxEntriesCount != 0U && Size <= MaxEntrySize;
146   }
147 
148   void setReleaseToOsIntervalMs(s32 Interval) {
149     if (Interval >= MaxReleaseToOsIntervalMs) {
150       Interval = MaxReleaseToOsIntervalMs;
151     } else if (Interval <= MinReleaseToOsIntervalMs) {
152       Interval = MinReleaseToOsIntervalMs;
153     }
154     atomic_store(&ReleaseToOsIntervalMs, Interval, memory_order_relaxed);
155   }
156 
157   void releaseToOS() { releaseOlderThan(UINT64_MAX); }
158 
159   void disable() { Mutex.lock(); }
160 
161   void enable() { Mutex.unlock(); }
162 
163 private:
164   void empty() {
165     struct {
166       void *MapBase;
167       uptr MapSize;
168       MapPlatformData Data;
169     } MapInfo[MaxEntriesCount];
170     uptr N = 0;
171     {
172       ScopedLock L(Mutex);
173       for (uptr I = 0; I < MaxEntriesCount; I++) {
174         if (!Entries[I].Block)
175           continue;
176         MapInfo[N].MapBase = reinterpret_cast<void *>(Entries[I].MapBase);
177         MapInfo[N].MapSize = Entries[I].MapSize;
178         MapInfo[N].Data = Entries[I].Data;
179         Entries[I].Block = 0;
180         N++;
181       }
182       EntriesCount = 0;
183       IsFullEvents = 0;
184     }
185     for (uptr I = 0; I < N; I++)
186       unmap(MapInfo[I].MapBase, MapInfo[I].MapSize, UNMAP_ALL,
187             &MapInfo[I].Data);
188   }
189 
190   void releaseOlderThan(u64 Time) {
191     ScopedLock L(Mutex);
192     if (!EntriesCount)
193       return;
194     for (uptr I = 0; I < MaxEntriesCount; I++) {
195       if (!Entries[I].Block || !Entries[I].Time || Entries[I].Time > Time)
196         continue;
197       releasePagesToOS(Entries[I].Block, 0,
198                        Entries[I].BlockEnd - Entries[I].Block,
199                        &Entries[I].Data);
200       Entries[I].Time = 0;
201     }
202   }
203 
204   s32 getReleaseToOsIntervalMs() {
205     return atomic_load(&ReleaseToOsIntervalMs, memory_order_relaxed);
206   }
207 
208   struct CachedBlock {
209     uptr Block;
210     uptr BlockEnd;
211     uptr MapBase;
212     uptr MapSize;
213     MapPlatformData Data;
214     u64 Time;
215   };
216 
217   HybridMutex Mutex;
218   CachedBlock Entries[MaxEntriesCount];
219   u32 EntriesCount;
220   uptr LargestSize;
221   u32 IsFullEvents;
222   atomic_s32 ReleaseToOsIntervalMs;
223 };
224 
225 template <class CacheT> class MapAllocator {
226 public:
227   void initLinkerInitialized(GlobalStats *S, s32 ReleaseToOsInterval = -1) {
228     Cache.initLinkerInitialized(ReleaseToOsInterval);
229     Stats.initLinkerInitialized();
230     if (LIKELY(S))
231       S->link(&Stats);
232   }
233   void init(GlobalStats *S, s32 ReleaseToOsInterval = -1) {
234     memset(this, 0, sizeof(*this));
235     initLinkerInitialized(S, ReleaseToOsInterval);
236   }
237 
238   void *allocate(uptr Size, uptr AlignmentHint = 0, uptr *BlockEnd = nullptr,
239                  FillContentsMode FillContents = NoFill);
240 
241   void deallocate(void *Ptr);
242 
243   static uptr getBlockEnd(void *Ptr) {
244     return LargeBlock::getHeader(Ptr)->BlockEnd;
245   }
246 
247   static uptr getBlockSize(void *Ptr) {
248     return getBlockEnd(Ptr) - reinterpret_cast<uptr>(Ptr);
249   }
250 
251   void getStats(ScopedString *Str) const;
252 
253   void disable() {
254     Mutex.lock();
255     Cache.disable();
256   }
257 
258   void enable() {
259     Cache.enable();
260     Mutex.unlock();
261   }
262 
263   template <typename F> void iterateOverBlocks(F Callback) const {
264     for (const auto &H : InUseBlocks)
265       Callback(reinterpret_cast<uptr>(&H) + LargeBlock::getHeaderSize());
266   }
267 
268   static uptr canCache(uptr Size) { return CacheT::canCache(Size); }
269 
270   void setReleaseToOsIntervalMs(s32 Interval) {
271     Cache.setReleaseToOsIntervalMs(Interval);
272   }
273 
274   void releaseToOS() { Cache.releaseToOS(); }
275 
276 private:
277   CacheT Cache;
278 
279   HybridMutex Mutex;
280   DoublyLinkedList<LargeBlock::Header> InUseBlocks;
281   uptr AllocatedBytes;
282   uptr FreedBytes;
283   uptr LargestSize;
284   u32 NumberOfAllocs;
285   u32 NumberOfFrees;
286   LocalStats Stats;
287 };
288 
289 // As with the Primary, the size passed to this function includes any desired
290 // alignment, so that the frontend can align the user allocation. The hint
291 // parameter allows us to unmap spurious memory when dealing with larger
292 // (greater than a page) alignments on 32-bit platforms.
293 // Due to the sparsity of address space available on those platforms, requesting
294 // an allocation from the Secondary with a large alignment would end up wasting
295 // VA space (even though we are not committing the whole thing), hence the need
296 // to trim off some of the reserved space.
297 // For allocations requested with an alignment greater than or equal to a page,
298 // the committed memory will amount to something close to Size - AlignmentHint
299 // (pending rounding and headers).
300 template <class CacheT>
301 void *MapAllocator<CacheT>::allocate(uptr Size, uptr AlignmentHint,
302                                      uptr *BlockEnd,
303                                      FillContentsMode FillContents) {
304   DCHECK_GE(Size, AlignmentHint);
305   const uptr PageSize = getPageSizeCached();
306   const uptr RoundedSize =
307       roundUpTo(Size + LargeBlock::getHeaderSize(), PageSize);
308 
309   if (AlignmentHint < PageSize && CacheT::canCache(RoundedSize)) {
310     LargeBlock::Header *H;
311     if (Cache.retrieve(RoundedSize, &H)) {
312       if (BlockEnd)
313         *BlockEnd = H->BlockEnd;
314       void *Ptr = reinterpret_cast<void *>(reinterpret_cast<uptr>(H) +
315                                            LargeBlock::getHeaderSize());
316       if (FillContents)
317         memset(Ptr, FillContents == ZeroFill ? 0 : PatternFillByte,
318                H->BlockEnd - reinterpret_cast<uptr>(Ptr));
319       const uptr BlockSize = H->BlockEnd - reinterpret_cast<uptr>(H);
320       {
321         ScopedLock L(Mutex);
322         InUseBlocks.push_back(H);
323         AllocatedBytes += BlockSize;
324         NumberOfAllocs++;
325         Stats.add(StatAllocated, BlockSize);
326         Stats.add(StatMapped, H->MapSize);
327       }
328       return Ptr;
329     }
330   }
331 
332   MapPlatformData Data = {};
333   const uptr MapSize = RoundedSize + 2 * PageSize;
334   uptr MapBase =
335       reinterpret_cast<uptr>(map(nullptr, MapSize, "scudo:secondary",
336                                  MAP_NOACCESS | MAP_ALLOWNOMEM, &Data));
337   if (UNLIKELY(!MapBase))
338     return nullptr;
339   uptr CommitBase = MapBase + PageSize;
340   uptr MapEnd = MapBase + MapSize;
341 
342   // In the unlikely event of alignments larger than a page, adjust the amount
343   // of memory we want to commit, and trim the extra memory.
344   if (UNLIKELY(AlignmentHint >= PageSize)) {
345     // For alignments greater than or equal to a page, the user pointer (eg: the
346     // pointer that is returned by the C or C++ allocation APIs) ends up on a
347     // page boundary , and our headers will live in the preceding page.
348     CommitBase = roundUpTo(MapBase + PageSize + 1, AlignmentHint) - PageSize;
349     const uptr NewMapBase = CommitBase - PageSize;
350     DCHECK_GE(NewMapBase, MapBase);
351     // We only trim the extra memory on 32-bit platforms: 64-bit platforms
352     // are less constrained memory wise, and that saves us two syscalls.
353     if (SCUDO_WORDSIZE == 32U && NewMapBase != MapBase) {
354       unmap(reinterpret_cast<void *>(MapBase), NewMapBase - MapBase, 0, &Data);
355       MapBase = NewMapBase;
356     }
357     const uptr NewMapEnd = CommitBase + PageSize +
358                            roundUpTo((Size - AlignmentHint), PageSize) +
359                            PageSize;
360     DCHECK_LE(NewMapEnd, MapEnd);
361     if (SCUDO_WORDSIZE == 32U && NewMapEnd != MapEnd) {
362       unmap(reinterpret_cast<void *>(NewMapEnd), MapEnd - NewMapEnd, 0, &Data);
363       MapEnd = NewMapEnd;
364     }
365   }
366 
367   const uptr CommitSize = MapEnd - PageSize - CommitBase;
368   const uptr Ptr =
369       reinterpret_cast<uptr>(map(reinterpret_cast<void *>(CommitBase),
370                                  CommitSize, "scudo:secondary", 0, &Data));
371   LargeBlock::Header *H = reinterpret_cast<LargeBlock::Header *>(Ptr);
372   H->MapBase = MapBase;
373   H->MapSize = MapEnd - MapBase;
374   H->BlockEnd = CommitBase + CommitSize;
375   H->Data = Data;
376   if (BlockEnd)
377     *BlockEnd = CommitBase + CommitSize;
378   {
379     ScopedLock L(Mutex);
380     InUseBlocks.push_back(H);
381     AllocatedBytes += CommitSize;
382     if (LargestSize < CommitSize)
383       LargestSize = CommitSize;
384     NumberOfAllocs++;
385     Stats.add(StatAllocated, CommitSize);
386     Stats.add(StatMapped, H->MapSize);
387   }
388   return reinterpret_cast<void *>(Ptr + LargeBlock::getHeaderSize());
389 }
390 
391 template <class CacheT> void MapAllocator<CacheT>::deallocate(void *Ptr) {
392   LargeBlock::Header *H = LargeBlock::getHeader(Ptr);
393   const uptr Block = reinterpret_cast<uptr>(H);
394   const uptr CommitSize = H->BlockEnd - Block;
395   {
396     ScopedLock L(Mutex);
397     InUseBlocks.remove(H);
398     FreedBytes += CommitSize;
399     NumberOfFrees++;
400     Stats.sub(StatAllocated, CommitSize);
401     Stats.sub(StatMapped, H->MapSize);
402   }
403   if (CacheT::canCache(CommitSize) && Cache.store(H))
404     return;
405   void *Addr = reinterpret_cast<void *>(H->MapBase);
406   const uptr Size = H->MapSize;
407   MapPlatformData Data = H->Data;
408   unmap(Addr, Size, UNMAP_ALL, &Data);
409 }
410 
411 template <class CacheT>
412 void MapAllocator<CacheT>::getStats(ScopedString *Str) const {
413   Str->append(
414       "Stats: MapAllocator: allocated %zu times (%zuK), freed %zu times "
415       "(%zuK), remains %zu (%zuK) max %zuM\n",
416       NumberOfAllocs, AllocatedBytes >> 10, NumberOfFrees, FreedBytes >> 10,
417       NumberOfAllocs - NumberOfFrees, (AllocatedBytes - FreedBytes) >> 10,
418       LargestSize >> 20);
419 }
420 
421 } // namespace scudo
422 
423 #endif // SCUDO_SECONDARY_H_
424