xref: /freebsd/contrib/llvm-project/compiler-rt/lib/scudo/standalone/primary64.h (revision e40139ff33b48b56a24c808b166b04b8ee6f5b21)
1 //===-- primary64.h ---------------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #ifndef SCUDO_PRIMARY64_H_
10 #define SCUDO_PRIMARY64_H_
11 
12 #include "bytemap.h"
13 #include "common.h"
14 #include "list.h"
15 #include "local_cache.h"
16 #include "release.h"
17 #include "stats.h"
18 #include "string_utils.h"
19 
20 namespace scudo {
21 
22 // SizeClassAllocator64 is an allocator tuned for 64-bit address space.
23 //
24 // It starts by reserving NumClasses * 2^RegionSizeLog bytes, equally divided in
25 // Regions, specific to each size class. Note that the base of that mapping is
26 // random (based to the platform specific map() capabilities), and that each
27 // Region actually starts at a random offset from its base.
28 //
29 // Regions are mapped incrementally on demand to fulfill allocation requests,
30 // those mappings being split into equally sized Blocks based on the size class
31 // they belong to. The Blocks created are shuffled to prevent predictable
32 // address patterns (the predictability increases with the size of the Blocks).
33 //
34 // The 1st Region (for size class 0) holds the TransferBatches. This is a
35 // structure used to transfer arrays of available pointers from the class size
36 // freelist to the thread specific freelist, and back.
37 //
38 // The memory used by this allocator is never unmapped, but can be partially
39 // released if the platform allows for it.
40 
41 template <class SizeClassMapT, uptr RegionSizeLog> class SizeClassAllocator64 {
42 public:
43   typedef SizeClassMapT SizeClassMap;
44   typedef SizeClassAllocator64<SizeClassMap, RegionSizeLog> ThisT;
45   typedef SizeClassAllocatorLocalCache<ThisT> CacheT;
46   typedef typename CacheT::TransferBatch TransferBatch;
47 
48   static uptr getSizeByClassId(uptr ClassId) {
49     return (ClassId == SizeClassMap::BatchClassId)
50                ? sizeof(TransferBatch)
51                : SizeClassMap::getSizeByClassId(ClassId);
52   }
53 
54   static bool canAllocate(uptr Size) { return Size <= SizeClassMap::MaxSize; }
55 
56   void initLinkerInitialized(s32 ReleaseToOsInterval) {
57     // Reserve the space required for the Primary.
58     PrimaryBase = reinterpret_cast<uptr>(
59         map(nullptr, PrimarySize, "scudo:primary", MAP_NOACCESS, &Data));
60 
61     RegionInfoArray = reinterpret_cast<RegionInfo *>(
62         map(nullptr, sizeof(RegionInfo) * NumClasses, "scudo:regioninfo"));
63     DCHECK_EQ(reinterpret_cast<uptr>(RegionInfoArray) % SCUDO_CACHE_LINE_SIZE,
64               0);
65 
66     u32 Seed;
67     if (UNLIKELY(!getRandom(reinterpret_cast<void *>(&Seed), sizeof(Seed))))
68       Seed = static_cast<u32>(getMonotonicTime() ^ (PrimaryBase >> 12));
69     const uptr PageSize = getPageSizeCached();
70     for (uptr I = 0; I < NumClasses; I++) {
71       RegionInfo *Region = getRegionInfo(I);
72       // The actual start of a region is offseted by a random number of pages.
73       Region->RegionBeg =
74           getRegionBaseByClassId(I) + (getRandomModN(&Seed, 16) + 1) * PageSize;
75       // Releasing smaller size classes doesn't necessarily yield to a
76       // meaningful RSS impact: there are more blocks per page, they are
77       // randomized around, and thus pages are less likely to be entirely empty.
78       // On top of this, attempting to release those require more iterations and
79       // memory accesses which ends up being fairly costly. The current lower
80       // limit is mostly arbitrary and based on empirical observations.
81       // TODO(kostyak): make the lower limit a runtime option
82       Region->CanRelease = (ReleaseToOsInterval >= 0) &&
83                            (I != SizeClassMap::BatchClassId) &&
84                            (getSizeByClassId(I) >= (PageSize / 32));
85       Region->RandState = getRandomU32(&Seed);
86     }
87     ReleaseToOsIntervalMs = ReleaseToOsInterval;
88   }
89   void init(s32 ReleaseToOsInterval) {
90     memset(this, 0, sizeof(*this));
91     initLinkerInitialized(ReleaseToOsInterval);
92   }
93 
94   void unmapTestOnly() {
95     unmap(reinterpret_cast<void *>(PrimaryBase), PrimarySize, UNMAP_ALL, &Data);
96     unmap(reinterpret_cast<void *>(RegionInfoArray),
97           sizeof(RegionInfo) * NumClasses);
98   }
99 
100   TransferBatch *popBatch(CacheT *C, uptr ClassId) {
101     DCHECK_LT(ClassId, NumClasses);
102     RegionInfo *Region = getRegionInfo(ClassId);
103     ScopedLock L(Region->Mutex);
104     TransferBatch *B = Region->FreeList.front();
105     if (B) {
106       Region->FreeList.pop_front();
107     } else {
108       B = populateFreeList(C, ClassId, Region);
109       if (UNLIKELY(!B))
110         return nullptr;
111     }
112     DCHECK_GT(B->getCount(), 0);
113     Region->Stats.PoppedBlocks += B->getCount();
114     return B;
115   }
116 
117   void pushBatch(uptr ClassId, TransferBatch *B) {
118     DCHECK_GT(B->getCount(), 0);
119     RegionInfo *Region = getRegionInfo(ClassId);
120     ScopedLock L(Region->Mutex);
121     Region->FreeList.push_front(B);
122     Region->Stats.PushedBlocks += B->getCount();
123     if (Region->CanRelease)
124       releaseToOSMaybe(Region, ClassId);
125   }
126 
127   void disable() {
128     for (uptr I = 0; I < NumClasses; I++)
129       getRegionInfo(I)->Mutex.lock();
130   }
131 
132   void enable() {
133     for (sptr I = static_cast<sptr>(NumClasses) - 1; I >= 0; I--)
134       getRegionInfo(static_cast<uptr>(I))->Mutex.unlock();
135   }
136 
137   template <typename F> void iterateOverBlocks(F Callback) const {
138     for (uptr I = 0; I < NumClasses; I++) {
139       if (I == SizeClassMap::BatchClassId)
140         continue;
141       const RegionInfo *Region = getRegionInfo(I);
142       const uptr BlockSize = getSizeByClassId(I);
143       const uptr From = Region->RegionBeg;
144       const uptr To = From + Region->AllocatedUser;
145       for (uptr Block = From; Block < To; Block += BlockSize)
146         Callback(Block);
147     }
148   }
149 
150   void getStats(ScopedString *Str) const {
151     // TODO(kostyak): get the RSS per region.
152     uptr TotalMapped = 0;
153     uptr PoppedBlocks = 0;
154     uptr PushedBlocks = 0;
155     for (uptr I = 0; I < NumClasses; I++) {
156       RegionInfo *Region = getRegionInfo(I);
157       if (Region->MappedUser)
158         TotalMapped += Region->MappedUser;
159       PoppedBlocks += Region->Stats.PoppedBlocks;
160       PushedBlocks += Region->Stats.PushedBlocks;
161     }
162     Str->append("Stats: SizeClassAllocator64: %zuM mapped (%zuM rss) in %zu "
163                 "allocations; remains %zu\n",
164                 TotalMapped >> 20, 0, PoppedBlocks,
165                 PoppedBlocks - PushedBlocks);
166 
167     for (uptr I = 0; I < NumClasses; I++)
168       getStats(Str, I, 0);
169   }
170 
171   uptr releaseToOS() {
172     uptr TotalReleasedBytes = 0;
173     for (uptr I = 0; I < NumClasses; I++) {
174       if (I == SizeClassMap::BatchClassId)
175         continue;
176       RegionInfo *Region = getRegionInfo(I);
177       ScopedLock L(Region->Mutex);
178       TotalReleasedBytes += releaseToOSMaybe(Region, I, /*Force=*/true);
179     }
180     return TotalReleasedBytes;
181   }
182 
183 private:
184   static const uptr RegionSize = 1UL << RegionSizeLog;
185   static const uptr NumClasses = SizeClassMap::NumClasses;
186   static const uptr PrimarySize = RegionSize * NumClasses;
187 
188   // Call map for user memory with at least this size.
189   static const uptr MapSizeIncrement = 1UL << 17;
190 
191   struct RegionStats {
192     uptr PoppedBlocks;
193     uptr PushedBlocks;
194   };
195 
196   struct ReleaseToOsInfo {
197     uptr PushedBlocksAtLastRelease;
198     uptr RangesReleased;
199     uptr LastReleasedBytes;
200     u64 LastReleaseAtNs;
201   };
202 
203   struct ALIGNED(SCUDO_CACHE_LINE_SIZE) RegionInfo {
204     HybridMutex Mutex;
205     IntrusiveList<TransferBatch> FreeList;
206     RegionStats Stats;
207     bool CanRelease;
208     bool Exhausted;
209     u32 RandState;
210     uptr RegionBeg;
211     uptr MappedUser;    // Bytes mapped for user memory.
212     uptr AllocatedUser; // Bytes allocated for user memory.
213     MapPlatformData Data;
214     ReleaseToOsInfo ReleaseInfo;
215   };
216   COMPILER_CHECK(sizeof(RegionInfo) % SCUDO_CACHE_LINE_SIZE == 0);
217 
218   uptr PrimaryBase;
219   RegionInfo *RegionInfoArray;
220   MapPlatformData Data;
221   s32 ReleaseToOsIntervalMs;
222 
223   RegionInfo *getRegionInfo(uptr ClassId) const {
224     DCHECK_LT(ClassId, NumClasses);
225     return &RegionInfoArray[ClassId];
226   }
227 
228   uptr getRegionBaseByClassId(uptr ClassId) const {
229     return PrimaryBase + (ClassId << RegionSizeLog);
230   }
231 
232   bool populateBatches(CacheT *C, RegionInfo *Region, uptr ClassId,
233                        TransferBatch **CurrentBatch, u32 MaxCount,
234                        void **PointersArray, u32 Count) {
235     // No need to shuffle the batches size class.
236     if (ClassId != SizeClassMap::BatchClassId)
237       shuffle(PointersArray, Count, &Region->RandState);
238     TransferBatch *B = *CurrentBatch;
239     for (uptr I = 0; I < Count; I++) {
240       if (B && B->getCount() == MaxCount) {
241         Region->FreeList.push_back(B);
242         B = nullptr;
243       }
244       if (!B) {
245         B = C->createBatch(ClassId, PointersArray[I]);
246         if (UNLIKELY(!B))
247           return false;
248         B->clear();
249       }
250       B->add(PointersArray[I]);
251     }
252     *CurrentBatch = B;
253     return true;
254   }
255 
256   NOINLINE TransferBatch *populateFreeList(CacheT *C, uptr ClassId,
257                                            RegionInfo *Region) {
258     const uptr Size = getSizeByClassId(ClassId);
259     const u32 MaxCount = TransferBatch::getMaxCached(Size);
260 
261     const uptr RegionBeg = Region->RegionBeg;
262     const uptr MappedUser = Region->MappedUser;
263     const uptr TotalUserBytes = Region->AllocatedUser + MaxCount * Size;
264     // Map more space for blocks, if necessary.
265     if (TotalUserBytes > MappedUser) {
266       // Do the mmap for the user memory.
267       const uptr UserMapSize =
268           roundUpTo(TotalUserBytes - MappedUser, MapSizeIncrement);
269       const uptr RegionBase = RegionBeg - getRegionBaseByClassId(ClassId);
270       if (UNLIKELY(RegionBase + MappedUser + UserMapSize > RegionSize)) {
271         if (!Region->Exhausted) {
272           Region->Exhausted = true;
273           ScopedString Str(1024);
274           getStats(&Str);
275           Str.append(
276               "Scudo OOM: The process has Exhausted %zuM for size class %zu.\n",
277               RegionSize >> 20, Size);
278           Str.output();
279         }
280         return nullptr;
281       }
282       if (UNLIKELY(MappedUser == 0))
283         Region->Data = Data;
284       if (UNLIKELY(!map(reinterpret_cast<void *>(RegionBeg + MappedUser),
285                         UserMapSize, "scudo:primary",
286                         MAP_ALLOWNOMEM | MAP_RESIZABLE, &Region->Data)))
287         return nullptr;
288       Region->MappedUser += UserMapSize;
289       C->getStats().add(StatMapped, UserMapSize);
290     }
291 
292     const uptr NumberOfBlocks = Min(
293         8UL * MaxCount, (Region->MappedUser - Region->AllocatedUser) / Size);
294     DCHECK_GT(NumberOfBlocks, 0);
295 
296     TransferBatch *B = nullptr;
297     constexpr uptr ShuffleArraySize = 48;
298     void *ShuffleArray[ShuffleArraySize];
299     u32 Count = 0;
300     const uptr P = RegionBeg + Region->AllocatedUser;
301     const uptr AllocatedUser = NumberOfBlocks * Size;
302     for (uptr I = P; I < P + AllocatedUser; I += Size) {
303       ShuffleArray[Count++] = reinterpret_cast<void *>(I);
304       if (Count == ShuffleArraySize) {
305         if (UNLIKELY(!populateBatches(C, Region, ClassId, &B, MaxCount,
306                                       ShuffleArray, Count)))
307           return nullptr;
308         Count = 0;
309       }
310     }
311     if (Count) {
312       if (UNLIKELY(!populateBatches(C, Region, ClassId, &B, MaxCount,
313                                     ShuffleArray, Count)))
314         return nullptr;
315     }
316     DCHECK(B);
317     DCHECK_GT(B->getCount(), 0);
318 
319     C->getStats().add(StatFree, AllocatedUser);
320     Region->AllocatedUser += AllocatedUser;
321     Region->Exhausted = false;
322     if (Region->CanRelease)
323       Region->ReleaseInfo.LastReleaseAtNs = getMonotonicTime();
324 
325     return B;
326   }
327 
328   void getStats(ScopedString *Str, uptr ClassId, uptr Rss) const {
329     RegionInfo *Region = getRegionInfo(ClassId);
330     if (Region->MappedUser == 0)
331       return;
332     const uptr InUse = Region->Stats.PoppedBlocks - Region->Stats.PushedBlocks;
333     const uptr TotalChunks = Region->AllocatedUser / getSizeByClassId(ClassId);
334     Str->append("%s %02zu (%6zu): mapped: %6zuK popped: %7zu pushed: %7zu "
335                 "inuse: %6zu total: %6zu rss: %6zuK releases: %6zu last "
336                 "released: %6zuK region: 0x%zx (0x%zx)\n",
337                 Region->Exhausted ? "F" : " ", ClassId,
338                 getSizeByClassId(ClassId), Region->MappedUser >> 10,
339                 Region->Stats.PoppedBlocks, Region->Stats.PushedBlocks, InUse,
340                 TotalChunks, Rss >> 10, Region->ReleaseInfo.RangesReleased,
341                 Region->ReleaseInfo.LastReleasedBytes >> 10, Region->RegionBeg,
342                 getRegionBaseByClassId(ClassId));
343   }
344 
345   NOINLINE uptr releaseToOSMaybe(RegionInfo *Region, uptr ClassId,
346                                  bool Force = false) {
347     const uptr BlockSize = getSizeByClassId(ClassId);
348     const uptr PageSize = getPageSizeCached();
349 
350     CHECK_GE(Region->Stats.PoppedBlocks, Region->Stats.PushedBlocks);
351     const uptr BytesInFreeList =
352         Region->AllocatedUser -
353         (Region->Stats.PoppedBlocks - Region->Stats.PushedBlocks) * BlockSize;
354     if (BytesInFreeList < PageSize)
355       return 0; // No chance to release anything.
356     if ((Region->Stats.PushedBlocks -
357          Region->ReleaseInfo.PushedBlocksAtLastRelease) *
358             BlockSize <
359         PageSize) {
360       return 0; // Nothing new to release.
361     }
362 
363     if (!Force) {
364       const s32 IntervalMs = ReleaseToOsIntervalMs;
365       if (IntervalMs < 0)
366         return 0;
367       if (Region->ReleaseInfo.LastReleaseAtNs +
368               static_cast<uptr>(IntervalMs) * 1000000ULL >
369           getMonotonicTime()) {
370         return 0; // Memory was returned recently.
371       }
372     }
373 
374     ReleaseRecorder Recorder(Region->RegionBeg, &Region->Data);
375     releaseFreeMemoryToOS(&Region->FreeList, Region->RegionBeg,
376                           roundUpTo(Region->AllocatedUser, PageSize) / PageSize,
377                           BlockSize, &Recorder);
378 
379     if (Recorder.getReleasedRangesCount() > 0) {
380       Region->ReleaseInfo.PushedBlocksAtLastRelease =
381           Region->Stats.PushedBlocks;
382       Region->ReleaseInfo.RangesReleased += Recorder.getReleasedRangesCount();
383       Region->ReleaseInfo.LastReleasedBytes = Recorder.getReleasedBytes();
384     }
385     Region->ReleaseInfo.LastReleaseAtNs = getMonotonicTime();
386     return Recorder.getReleasedBytes();
387   }
388 };
389 
390 } // namespace scudo
391 
392 #endif // SCUDO_PRIMARY64_H_
393