xref: /freebsd/contrib/llvm-project/compiler-rt/lib/scudo/standalone/primary64.h (revision 77013d11e6483b970af25e13c9b892075742f7e5)
1 //===-- primary64.h ---------------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #ifndef SCUDO_PRIMARY64_H_
10 #define SCUDO_PRIMARY64_H_
11 
12 #include "bytemap.h"
13 #include "common.h"
14 #include "list.h"
15 #include "local_cache.h"
16 #include "memtag.h"
17 #include "options.h"
18 #include "release.h"
19 #include "stats.h"
20 #include "string_utils.h"
21 
22 namespace scudo {
23 
24 // SizeClassAllocator64 is an allocator tuned for 64-bit address space.
25 //
26 // It starts by reserving NumClasses * 2^RegionSizeLog bytes, equally divided in
27 // Regions, specific to each size class. Note that the base of that mapping is
28 // random (based to the platform specific map() capabilities), and that each
29 // Region actually starts at a random offset from its base.
30 //
31 // Regions are mapped incrementally on demand to fulfill allocation requests,
32 // those mappings being split into equally sized Blocks based on the size class
33 // they belong to. The Blocks created are shuffled to prevent predictable
34 // address patterns (the predictability increases with the size of the Blocks).
35 //
36 // The 1st Region (for size class 0) holds the TransferBatches. This is a
37 // structure used to transfer arrays of available pointers from the class size
38 // freelist to the thread specific freelist, and back.
39 //
40 // The memory used by this allocator is never unmapped, but can be partially
41 // released if the platform allows for it.
42 
43 template <typename Config> class SizeClassAllocator64 {
44 public:
45   typedef typename Config::SizeClassMap SizeClassMap;
46   typedef SizeClassAllocator64<Config> ThisT;
47   typedef SizeClassAllocatorLocalCache<ThisT> CacheT;
48   typedef typename CacheT::TransferBatch TransferBatch;
49 
50   static uptr getSizeByClassId(uptr ClassId) {
51     return (ClassId == SizeClassMap::BatchClassId)
52                ? sizeof(TransferBatch)
53                : SizeClassMap::getSizeByClassId(ClassId);
54   }
55 
56   static bool canAllocate(uptr Size) { return Size <= SizeClassMap::MaxSize; }
57 
58   void initLinkerInitialized(s32 ReleaseToOsInterval) {
59     // Reserve the space required for the Primary.
60     PrimaryBase = reinterpret_cast<uptr>(
61         map(nullptr, PrimarySize, "scudo:primary", MAP_NOACCESS, &Data));
62 
63     u32 Seed;
64     const u64 Time = getMonotonicTime();
65     if (UNLIKELY(!getRandom(reinterpret_cast<void *>(&Seed), sizeof(Seed))))
66       Seed = static_cast<u32>(Time ^ (PrimaryBase >> 12));
67     const uptr PageSize = getPageSizeCached();
68     for (uptr I = 0; I < NumClasses; I++) {
69       RegionInfo *Region = getRegionInfo(I);
70       // The actual start of a region is offseted by a random number of pages.
71       Region->RegionBeg =
72           getRegionBaseByClassId(I) + (getRandomModN(&Seed, 16) + 1) * PageSize;
73       Region->RandState = getRandomU32(&Seed);
74       Region->ReleaseInfo.LastReleaseAtNs = Time;
75     }
76     setOption(Option::ReleaseInterval, static_cast<sptr>(ReleaseToOsInterval));
77   }
78   void init(s32 ReleaseToOsInterval) {
79     memset(this, 0, sizeof(*this));
80     initLinkerInitialized(ReleaseToOsInterval);
81   }
82 
83   void unmapTestOnly() {
84     unmap(reinterpret_cast<void *>(PrimaryBase), PrimarySize, UNMAP_ALL, &Data);
85   }
86 
87   TransferBatch *popBatch(CacheT *C, uptr ClassId) {
88     DCHECK_LT(ClassId, NumClasses);
89     RegionInfo *Region = getRegionInfo(ClassId);
90     ScopedLock L(Region->Mutex);
91     TransferBatch *B = Region->FreeList.front();
92     if (B) {
93       Region->FreeList.pop_front();
94     } else {
95       B = populateFreeList(C, ClassId, Region);
96       if (UNLIKELY(!B))
97         return nullptr;
98     }
99     DCHECK_GT(B->getCount(), 0);
100     Region->Stats.PoppedBlocks += B->getCount();
101     return B;
102   }
103 
104   void pushBatch(uptr ClassId, TransferBatch *B) {
105     DCHECK_GT(B->getCount(), 0);
106     RegionInfo *Region = getRegionInfo(ClassId);
107     ScopedLock L(Region->Mutex);
108     Region->FreeList.push_front(B);
109     Region->Stats.PushedBlocks += B->getCount();
110     if (ClassId != SizeClassMap::BatchClassId)
111       releaseToOSMaybe(Region, ClassId);
112   }
113 
114   void disable() {
115     // The BatchClassId must be locked last since other classes can use it.
116     for (sptr I = static_cast<sptr>(NumClasses) - 1; I >= 0; I--) {
117       if (static_cast<uptr>(I) == SizeClassMap::BatchClassId)
118         continue;
119       getRegionInfo(static_cast<uptr>(I))->Mutex.lock();
120     }
121     getRegionInfo(SizeClassMap::BatchClassId)->Mutex.lock();
122   }
123 
124   void enable() {
125     getRegionInfo(SizeClassMap::BatchClassId)->Mutex.unlock();
126     for (uptr I = 0; I < NumClasses; I++) {
127       if (I == SizeClassMap::BatchClassId)
128         continue;
129       getRegionInfo(I)->Mutex.unlock();
130     }
131   }
132 
133   template <typename F> void iterateOverBlocks(F Callback) {
134     for (uptr I = 0; I < NumClasses; I++) {
135       if (I == SizeClassMap::BatchClassId)
136         continue;
137       const RegionInfo *Region = getRegionInfo(I);
138       const uptr BlockSize = getSizeByClassId(I);
139       const uptr From = Region->RegionBeg;
140       const uptr To = From + Region->AllocatedUser;
141       for (uptr Block = From; Block < To; Block += BlockSize)
142         Callback(Block);
143     }
144   }
145 
146   void getStats(ScopedString *Str) {
147     // TODO(kostyak): get the RSS per region.
148     uptr TotalMapped = 0;
149     uptr PoppedBlocks = 0;
150     uptr PushedBlocks = 0;
151     for (uptr I = 0; I < NumClasses; I++) {
152       RegionInfo *Region = getRegionInfo(I);
153       if (Region->MappedUser)
154         TotalMapped += Region->MappedUser;
155       PoppedBlocks += Region->Stats.PoppedBlocks;
156       PushedBlocks += Region->Stats.PushedBlocks;
157     }
158     Str->append("Stats: SizeClassAllocator64: %zuM mapped (%zuM rss) in %zu "
159                 "allocations; remains %zu\n",
160                 TotalMapped >> 20, 0, PoppedBlocks,
161                 PoppedBlocks - PushedBlocks);
162 
163     for (uptr I = 0; I < NumClasses; I++)
164       getStats(Str, I, 0);
165   }
166 
167   bool setOption(Option O, sptr Value) {
168     if (O == Option::ReleaseInterval) {
169       const s32 Interval = Max(
170           Min(static_cast<s32>(Value), Config::PrimaryMaxReleaseToOsIntervalMs),
171           Config::PrimaryMinReleaseToOsIntervalMs);
172       atomic_store_relaxed(&ReleaseToOsIntervalMs, Interval);
173       return true;
174     }
175     // Not supported by the Primary, but not an error either.
176     return true;
177   }
178 
179   uptr releaseToOS() {
180     uptr TotalReleasedBytes = 0;
181     for (uptr I = 0; I < NumClasses; I++) {
182       if (I == SizeClassMap::BatchClassId)
183         continue;
184       RegionInfo *Region = getRegionInfo(I);
185       ScopedLock L(Region->Mutex);
186       TotalReleasedBytes += releaseToOSMaybe(Region, I, /*Force=*/true);
187     }
188     return TotalReleasedBytes;
189   }
190 
191   const char *getRegionInfoArrayAddress() const {
192     return reinterpret_cast<const char *>(RegionInfoArray);
193   }
194 
195   static uptr getRegionInfoArraySize() { return sizeof(RegionInfoArray); }
196 
197   static BlockInfo findNearestBlock(const char *RegionInfoData, uptr Ptr) {
198     const RegionInfo *RegionInfoArray =
199         reinterpret_cast<const RegionInfo *>(RegionInfoData);
200     uptr ClassId;
201     uptr MinDistance = -1UL;
202     for (uptr I = 0; I != NumClasses; ++I) {
203       if (I == SizeClassMap::BatchClassId)
204         continue;
205       uptr Begin = RegionInfoArray[I].RegionBeg;
206       uptr End = Begin + RegionInfoArray[I].AllocatedUser;
207       if (Begin > End || End - Begin < SizeClassMap::getSizeByClassId(I))
208         continue;
209       uptr RegionDistance;
210       if (Begin <= Ptr) {
211         if (Ptr < End)
212           RegionDistance = 0;
213         else
214           RegionDistance = Ptr - End;
215       } else {
216         RegionDistance = Begin - Ptr;
217       }
218 
219       if (RegionDistance < MinDistance) {
220         MinDistance = RegionDistance;
221         ClassId = I;
222       }
223     }
224 
225     BlockInfo B = {};
226     if (MinDistance <= 8192) {
227       B.RegionBegin = RegionInfoArray[ClassId].RegionBeg;
228       B.RegionEnd = B.RegionBegin + RegionInfoArray[ClassId].AllocatedUser;
229       B.BlockSize = SizeClassMap::getSizeByClassId(ClassId);
230       B.BlockBegin =
231           B.RegionBegin + uptr(sptr(Ptr - B.RegionBegin) / sptr(B.BlockSize) *
232                                sptr(B.BlockSize));
233       while (B.BlockBegin < B.RegionBegin)
234         B.BlockBegin += B.BlockSize;
235       while (B.RegionEnd < B.BlockBegin + B.BlockSize)
236         B.BlockBegin -= B.BlockSize;
237     }
238     return B;
239   }
240 
241   AtomicOptions Options;
242 
243 private:
244   static const uptr RegionSize = 1UL << Config::PrimaryRegionSizeLog;
245   static const uptr NumClasses = SizeClassMap::NumClasses;
246   static const uptr PrimarySize = RegionSize * NumClasses;
247 
248   // Call map for user memory with at least this size.
249   static const uptr MapSizeIncrement = 1UL << 18;
250   // Fill at most this number of batches from the newly map'd memory.
251   static const u32 MaxNumBatches = SCUDO_ANDROID ? 4U : 8U;
252 
253   struct RegionStats {
254     uptr PoppedBlocks;
255     uptr PushedBlocks;
256   };
257 
258   struct ReleaseToOsInfo {
259     uptr PushedBlocksAtLastRelease;
260     uptr RangesReleased;
261     uptr LastReleasedBytes;
262     u64 LastReleaseAtNs;
263   };
264 
265   struct UnpaddedRegionInfo {
266     HybridMutex Mutex;
267     SinglyLinkedList<TransferBatch> FreeList;
268     RegionStats Stats;
269     bool Exhausted;
270     u32 RandState;
271     uptr RegionBeg;
272     uptr MappedUser;    // Bytes mapped for user memory.
273     uptr AllocatedUser; // Bytes allocated for user memory.
274     MapPlatformData Data;
275     ReleaseToOsInfo ReleaseInfo;
276   };
277   struct RegionInfo : UnpaddedRegionInfo {
278     char Padding[SCUDO_CACHE_LINE_SIZE -
279                  (sizeof(UnpaddedRegionInfo) % SCUDO_CACHE_LINE_SIZE)];
280   };
281   static_assert(sizeof(RegionInfo) % SCUDO_CACHE_LINE_SIZE == 0, "");
282 
283   uptr PrimaryBase;
284   MapPlatformData Data;
285   atomic_s32 ReleaseToOsIntervalMs;
286   alignas(SCUDO_CACHE_LINE_SIZE) RegionInfo RegionInfoArray[NumClasses];
287 
288   RegionInfo *getRegionInfo(uptr ClassId) {
289     DCHECK_LT(ClassId, NumClasses);
290     return &RegionInfoArray[ClassId];
291   }
292 
293   uptr getRegionBaseByClassId(uptr ClassId) const {
294     return PrimaryBase + (ClassId << Config::PrimaryRegionSizeLog);
295   }
296 
297   NOINLINE TransferBatch *populateFreeList(CacheT *C, uptr ClassId,
298                                            RegionInfo *Region) {
299     const uptr Size = getSizeByClassId(ClassId);
300     const u32 MaxCount = TransferBatch::getMaxCached(Size);
301 
302     const uptr RegionBeg = Region->RegionBeg;
303     const uptr MappedUser = Region->MappedUser;
304     const uptr TotalUserBytes = Region->AllocatedUser + MaxCount * Size;
305     // Map more space for blocks, if necessary.
306     if (UNLIKELY(TotalUserBytes > MappedUser)) {
307       // Do the mmap for the user memory.
308       const uptr UserMapSize =
309           roundUpTo(TotalUserBytes - MappedUser, MapSizeIncrement);
310       const uptr RegionBase = RegionBeg - getRegionBaseByClassId(ClassId);
311       if (RegionBase + MappedUser + UserMapSize > RegionSize) {
312         if (!Region->Exhausted) {
313           Region->Exhausted = true;
314           ScopedString Str(1024);
315           getStats(&Str);
316           Str.append(
317               "Scudo OOM: The process has exhausted %zuM for size class %zu.\n",
318               RegionSize >> 20, Size);
319           Str.output();
320         }
321         return nullptr;
322       }
323       if (MappedUser == 0)
324         Region->Data = Data;
325       if (!map(reinterpret_cast<void *>(RegionBeg + MappedUser), UserMapSize,
326                "scudo:primary",
327                MAP_ALLOWNOMEM | MAP_RESIZABLE |
328                    (useMemoryTagging<Config>(Options.load()) ? MAP_MEMTAG : 0),
329                &Region->Data))
330         return nullptr;
331       Region->MappedUser += UserMapSize;
332       C->getStats().add(StatMapped, UserMapSize);
333     }
334 
335     const u32 NumberOfBlocks = Min(
336         MaxNumBatches * MaxCount,
337         static_cast<u32>((Region->MappedUser - Region->AllocatedUser) / Size));
338     DCHECK_GT(NumberOfBlocks, 0);
339 
340     constexpr u32 ShuffleArraySize =
341         MaxNumBatches * TransferBatch::MaxNumCached;
342     void *ShuffleArray[ShuffleArraySize];
343     DCHECK_LE(NumberOfBlocks, ShuffleArraySize);
344 
345     uptr P = RegionBeg + Region->AllocatedUser;
346     for (u32 I = 0; I < NumberOfBlocks; I++, P += Size)
347       ShuffleArray[I] = reinterpret_cast<void *>(P);
348     // No need to shuffle the batches size class.
349     if (ClassId != SizeClassMap::BatchClassId)
350       shuffle(ShuffleArray, NumberOfBlocks, &Region->RandState);
351     for (u32 I = 0; I < NumberOfBlocks;) {
352       TransferBatch *B = C->createBatch(ClassId, ShuffleArray[I]);
353       if (UNLIKELY(!B))
354         return nullptr;
355       const u32 N = Min(MaxCount, NumberOfBlocks - I);
356       B->setFromArray(&ShuffleArray[I], N);
357       Region->FreeList.push_back(B);
358       I += N;
359     }
360     TransferBatch *B = Region->FreeList.front();
361     Region->FreeList.pop_front();
362     DCHECK(B);
363     DCHECK_GT(B->getCount(), 0);
364 
365     const uptr AllocatedUser = Size * NumberOfBlocks;
366     C->getStats().add(StatFree, AllocatedUser);
367     Region->AllocatedUser += AllocatedUser;
368 
369     return B;
370   }
371 
372   void getStats(ScopedString *Str, uptr ClassId, uptr Rss) {
373     RegionInfo *Region = getRegionInfo(ClassId);
374     if (Region->MappedUser == 0)
375       return;
376     const uptr InUse = Region->Stats.PoppedBlocks - Region->Stats.PushedBlocks;
377     const uptr TotalChunks = Region->AllocatedUser / getSizeByClassId(ClassId);
378     Str->append("%s %02zu (%6zu): mapped: %6zuK popped: %7zu pushed: %7zu "
379                 "inuse: %6zu total: %6zu rss: %6zuK releases: %6zu last "
380                 "released: %6zuK region: 0x%zx (0x%zx)\n",
381                 Region->Exhausted ? "F" : " ", ClassId,
382                 getSizeByClassId(ClassId), Region->MappedUser >> 10,
383                 Region->Stats.PoppedBlocks, Region->Stats.PushedBlocks, InUse,
384                 TotalChunks, Rss >> 10, Region->ReleaseInfo.RangesReleased,
385                 Region->ReleaseInfo.LastReleasedBytes >> 10, Region->RegionBeg,
386                 getRegionBaseByClassId(ClassId));
387   }
388 
389   NOINLINE uptr releaseToOSMaybe(RegionInfo *Region, uptr ClassId,
390                                  bool Force = false) {
391     const uptr BlockSize = getSizeByClassId(ClassId);
392     const uptr PageSize = getPageSizeCached();
393 
394     DCHECK_GE(Region->Stats.PoppedBlocks, Region->Stats.PushedBlocks);
395     const uptr BytesInFreeList =
396         Region->AllocatedUser -
397         (Region->Stats.PoppedBlocks - Region->Stats.PushedBlocks) * BlockSize;
398     if (BytesInFreeList < PageSize)
399       return 0; // No chance to release anything.
400     const uptr BytesPushed = (Region->Stats.PushedBlocks -
401                               Region->ReleaseInfo.PushedBlocksAtLastRelease) *
402                              BlockSize;
403     if (BytesPushed < PageSize)
404       return 0; // Nothing new to release.
405 
406     // Releasing smaller blocks is expensive, so we want to make sure that a
407     // significant amount of bytes are free, and that there has been a good
408     // amount of batches pushed to the freelist before attempting to release.
409     if (BlockSize < PageSize / 16U) {
410       if (!Force && BytesPushed < Region->AllocatedUser / 16U)
411         return 0;
412       // We want 8x% to 9x% free bytes (the larger the bock, the lower the %).
413       if ((BytesInFreeList * 100U) / Region->AllocatedUser <
414           (100U - 1U - BlockSize / 16U))
415         return 0;
416     }
417 
418     if (!Force) {
419       const s32 IntervalMs = atomic_load_relaxed(&ReleaseToOsIntervalMs);
420       if (IntervalMs < 0)
421         return 0;
422       if (Region->ReleaseInfo.LastReleaseAtNs +
423               static_cast<u64>(IntervalMs) * 1000000 >
424           getMonotonicTime()) {
425         return 0; // Memory was returned recently.
426       }
427     }
428 
429     auto SkipRegion = [](UNUSED uptr RegionIndex) { return false; };
430     ReleaseRecorder Recorder(Region->RegionBeg, &Region->Data);
431     releaseFreeMemoryToOS(Region->FreeList, Region->RegionBeg,
432                           Region->AllocatedUser, 1U, BlockSize, &Recorder,
433                           SkipRegion);
434 
435     if (Recorder.getReleasedRangesCount() > 0) {
436       Region->ReleaseInfo.PushedBlocksAtLastRelease =
437           Region->Stats.PushedBlocks;
438       Region->ReleaseInfo.RangesReleased += Recorder.getReleasedRangesCount();
439       Region->ReleaseInfo.LastReleasedBytes = Recorder.getReleasedBytes();
440     }
441     Region->ReleaseInfo.LastReleaseAtNs = getMonotonicTime();
442     return Recorder.getReleasedBytes();
443   }
444 };
445 
446 } // namespace scudo
447 
448 #endif // SCUDO_PRIMARY64_H_
449