xref: /freebsd/contrib/llvm-project/compiler-rt/lib/scudo/standalone/primary64.h (revision c66ec88fed842fbaad62c30d510644ceb7bd2d71)
1 //===-- primary64.h ---------------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #ifndef SCUDO_PRIMARY64_H_
10 #define SCUDO_PRIMARY64_H_
11 
12 #include "bytemap.h"
13 #include "common.h"
14 #include "list.h"
15 #include "local_cache.h"
16 #include "memtag.h"
17 #include "release.h"
18 #include "stats.h"
19 #include "string_utils.h"
20 
21 namespace scudo {
22 
23 // SizeClassAllocator64 is an allocator tuned for 64-bit address space.
24 //
25 // It starts by reserving NumClasses * 2^RegionSizeLog bytes, equally divided in
26 // Regions, specific to each size class. Note that the base of that mapping is
27 // random (based to the platform specific map() capabilities), and that each
28 // Region actually starts at a random offset from its base.
29 //
30 // Regions are mapped incrementally on demand to fulfill allocation requests,
31 // those mappings being split into equally sized Blocks based on the size class
32 // they belong to. The Blocks created are shuffled to prevent predictable
33 // address patterns (the predictability increases with the size of the Blocks).
34 //
35 // The 1st Region (for size class 0) holds the TransferBatches. This is a
36 // structure used to transfer arrays of available pointers from the class size
37 // freelist to the thread specific freelist, and back.
38 //
39 // The memory used by this allocator is never unmapped, but can be partially
40 // released if the platform allows for it.
41 
42 template <class SizeClassMapT, uptr RegionSizeLog,
43           s32 MinReleaseToOsIntervalMs = INT32_MIN,
44           s32 MaxReleaseToOsIntervalMs = INT32_MAX,
45           bool MaySupportMemoryTagging = false>
46 class SizeClassAllocator64 {
47 public:
48   typedef SizeClassMapT SizeClassMap;
49   typedef SizeClassAllocator64<
50       SizeClassMap, RegionSizeLog, MinReleaseToOsIntervalMs,
51       MaxReleaseToOsIntervalMs, MaySupportMemoryTagging>
52       ThisT;
53   typedef SizeClassAllocatorLocalCache<ThisT> CacheT;
54   typedef typename CacheT::TransferBatch TransferBatch;
55   static const bool SupportsMemoryTagging =
56       MaySupportMemoryTagging && archSupportsMemoryTagging();
57 
58   static uptr getSizeByClassId(uptr ClassId) {
59     return (ClassId == SizeClassMap::BatchClassId)
60                ? sizeof(TransferBatch)
61                : SizeClassMap::getSizeByClassId(ClassId);
62   }
63 
64   static bool canAllocate(uptr Size) { return Size <= SizeClassMap::MaxSize; }
65 
66   void initLinkerInitialized(s32 ReleaseToOsInterval) {
67     // Reserve the space required for the Primary.
68     PrimaryBase = reinterpret_cast<uptr>(
69         map(nullptr, PrimarySize, "scudo:primary", MAP_NOACCESS, &Data));
70 
71     u32 Seed;
72     const u64 Time = getMonotonicTime();
73     if (UNLIKELY(!getRandom(reinterpret_cast<void *>(&Seed), sizeof(Seed))))
74       Seed = static_cast<u32>(Time ^ (PrimaryBase >> 12));
75     const uptr PageSize = getPageSizeCached();
76     for (uptr I = 0; I < NumClasses; I++) {
77       RegionInfo *Region = getRegionInfo(I);
78       // The actual start of a region is offseted by a random number of pages.
79       Region->RegionBeg =
80           getRegionBaseByClassId(I) + (getRandomModN(&Seed, 16) + 1) * PageSize;
81       Region->RandState = getRandomU32(&Seed);
82       // Releasing smaller size classes doesn't necessarily yield to a
83       // meaningful RSS impact: there are more blocks per page, they are
84       // randomized around, and thus pages are less likely to be entirely empty.
85       // On top of this, attempting to release those require more iterations and
86       // memory accesses which ends up being fairly costly. The current lower
87       // limit is mostly arbitrary and based on empirical observations.
88       // TODO(kostyak): make the lower limit a runtime option
89       Region->CanRelease = (I != SizeClassMap::BatchClassId) &&
90                            (getSizeByClassId(I) >= (PageSize / 32));
91       if (Region->CanRelease)
92         Region->ReleaseInfo.LastReleaseAtNs = Time;
93     }
94     setReleaseToOsIntervalMs(ReleaseToOsInterval);
95 
96     if (SupportsMemoryTagging)
97       UseMemoryTagging = systemSupportsMemoryTagging();
98   }
99   void init(s32 ReleaseToOsInterval) {
100     memset(this, 0, sizeof(*this));
101     initLinkerInitialized(ReleaseToOsInterval);
102   }
103 
104   void unmapTestOnly() {
105     unmap(reinterpret_cast<void *>(PrimaryBase), PrimarySize, UNMAP_ALL, &Data);
106   }
107 
108   TransferBatch *popBatch(CacheT *C, uptr ClassId) {
109     DCHECK_LT(ClassId, NumClasses);
110     RegionInfo *Region = getRegionInfo(ClassId);
111     ScopedLock L(Region->Mutex);
112     TransferBatch *B = Region->FreeList.front();
113     if (B) {
114       Region->FreeList.pop_front();
115     } else {
116       B = populateFreeList(C, ClassId, Region);
117       if (UNLIKELY(!B))
118         return nullptr;
119     }
120     DCHECK_GT(B->getCount(), 0);
121     Region->Stats.PoppedBlocks += B->getCount();
122     return B;
123   }
124 
125   void pushBatch(uptr ClassId, TransferBatch *B) {
126     DCHECK_GT(B->getCount(), 0);
127     RegionInfo *Region = getRegionInfo(ClassId);
128     ScopedLock L(Region->Mutex);
129     Region->FreeList.push_front(B);
130     Region->Stats.PushedBlocks += B->getCount();
131     if (Region->CanRelease)
132       releaseToOSMaybe(Region, ClassId);
133   }
134 
135   void disable() {
136     // The BatchClassId must be locked last since other classes can use it.
137     for (sptr I = static_cast<sptr>(NumClasses) - 1; I >= 0; I--) {
138       if (static_cast<uptr>(I) == SizeClassMap::BatchClassId)
139         continue;
140       getRegionInfo(static_cast<uptr>(I))->Mutex.lock();
141     }
142     getRegionInfo(SizeClassMap::BatchClassId)->Mutex.lock();
143   }
144 
145   void enable() {
146     getRegionInfo(SizeClassMap::BatchClassId)->Mutex.unlock();
147     for (uptr I = 0; I < NumClasses; I++) {
148       if (I == SizeClassMap::BatchClassId)
149         continue;
150       getRegionInfo(I)->Mutex.unlock();
151     }
152   }
153 
154   template <typename F> void iterateOverBlocks(F Callback) {
155     for (uptr I = 0; I < NumClasses; I++) {
156       if (I == SizeClassMap::BatchClassId)
157         continue;
158       const RegionInfo *Region = getRegionInfo(I);
159       const uptr BlockSize = getSizeByClassId(I);
160       const uptr From = Region->RegionBeg;
161       const uptr To = From + Region->AllocatedUser;
162       for (uptr Block = From; Block < To; Block += BlockSize)
163         Callback(Block);
164     }
165   }
166 
167   void getStats(ScopedString *Str) {
168     // TODO(kostyak): get the RSS per region.
169     uptr TotalMapped = 0;
170     uptr PoppedBlocks = 0;
171     uptr PushedBlocks = 0;
172     for (uptr I = 0; I < NumClasses; I++) {
173       RegionInfo *Region = getRegionInfo(I);
174       if (Region->MappedUser)
175         TotalMapped += Region->MappedUser;
176       PoppedBlocks += Region->Stats.PoppedBlocks;
177       PushedBlocks += Region->Stats.PushedBlocks;
178     }
179     Str->append("Stats: SizeClassAllocator64: %zuM mapped (%zuM rss) in %zu "
180                 "allocations; remains %zu\n",
181                 TotalMapped >> 20, 0, PoppedBlocks,
182                 PoppedBlocks - PushedBlocks);
183 
184     for (uptr I = 0; I < NumClasses; I++)
185       getStats(Str, I, 0);
186   }
187 
188   void setReleaseToOsIntervalMs(s32 Interval) {
189     if (Interval >= MaxReleaseToOsIntervalMs) {
190       Interval = MaxReleaseToOsIntervalMs;
191     } else if (Interval <= MinReleaseToOsIntervalMs) {
192       Interval = MinReleaseToOsIntervalMs;
193     }
194     atomic_store(&ReleaseToOsIntervalMs, Interval, memory_order_relaxed);
195   }
196 
197   uptr releaseToOS() {
198     uptr TotalReleasedBytes = 0;
199     for (uptr I = 0; I < NumClasses; I++) {
200       RegionInfo *Region = getRegionInfo(I);
201       ScopedLock L(Region->Mutex);
202       TotalReleasedBytes += releaseToOSMaybe(Region, I, /*Force=*/true);
203     }
204     return TotalReleasedBytes;
205   }
206 
207   bool useMemoryTagging() const {
208     return SupportsMemoryTagging && UseMemoryTagging;
209   }
210   void disableMemoryTagging() { UseMemoryTagging = false; }
211 
212   const char *getRegionInfoArrayAddress() const {
213     return reinterpret_cast<const char *>(RegionInfoArray);
214   }
215 
216   static uptr getRegionInfoArraySize() {
217     return sizeof(RegionInfoArray);
218   }
219 
220   static BlockInfo findNearestBlock(const char *RegionInfoData, uptr Ptr) {
221     const RegionInfo *RegionInfoArray =
222         reinterpret_cast<const RegionInfo *>(RegionInfoData);
223     uptr ClassId;
224     uptr MinDistance = -1UL;
225     for (uptr I = 0; I != NumClasses; ++I) {
226       if (I == SizeClassMap::BatchClassId)
227         continue;
228       uptr Begin = RegionInfoArray[I].RegionBeg;
229       uptr End = Begin + RegionInfoArray[I].AllocatedUser;
230       if (Begin > End || End - Begin < SizeClassMap::getSizeByClassId(I))
231         continue;
232       uptr RegionDistance;
233       if (Begin <= Ptr) {
234         if (Ptr < End)
235           RegionDistance = 0;
236         else
237           RegionDistance = Ptr - End;
238       } else {
239         RegionDistance = Begin - Ptr;
240       }
241 
242       if (RegionDistance < MinDistance) {
243         MinDistance = RegionDistance;
244         ClassId = I;
245       }
246     }
247 
248     BlockInfo B = {};
249     if (MinDistance <= 8192) {
250       B.RegionBegin = RegionInfoArray[ClassId].RegionBeg;
251       B.RegionEnd = B.RegionBegin + RegionInfoArray[ClassId].AllocatedUser;
252       B.BlockSize = SizeClassMap::getSizeByClassId(ClassId);
253       B.BlockBegin =
254           B.RegionBegin + uptr(sptr(Ptr - B.RegionBegin) / sptr(B.BlockSize) *
255                                sptr(B.BlockSize));
256       while (B.BlockBegin < B.RegionBegin)
257         B.BlockBegin += B.BlockSize;
258       while (B.RegionEnd < B.BlockBegin + B.BlockSize)
259         B.BlockBegin -= B.BlockSize;
260     }
261     return B;
262   }
263 
264 private:
265   static const uptr RegionSize = 1UL << RegionSizeLog;
266   static const uptr NumClasses = SizeClassMap::NumClasses;
267   static const uptr PrimarySize = RegionSize * NumClasses;
268 
269   // Call map for user memory with at least this size.
270   static const uptr MapSizeIncrement = 1UL << 18;
271   // Fill at most this number of batches from the newly map'd memory.
272   static const u32 MaxNumBatches = SCUDO_ANDROID ? 4U : 8U;
273 
274   struct RegionStats {
275     uptr PoppedBlocks;
276     uptr PushedBlocks;
277   };
278 
279   struct ReleaseToOsInfo {
280     uptr PushedBlocksAtLastRelease;
281     uptr RangesReleased;
282     uptr LastReleasedBytes;
283     u64 LastReleaseAtNs;
284   };
285 
286   struct UnpaddedRegionInfo {
287     HybridMutex Mutex;
288     SinglyLinkedList<TransferBatch> FreeList;
289     RegionStats Stats;
290     bool CanRelease;
291     bool Exhausted;
292     u32 RandState;
293     uptr RegionBeg;
294     uptr MappedUser;    // Bytes mapped for user memory.
295     uptr AllocatedUser; // Bytes allocated for user memory.
296     MapPlatformData Data;
297     ReleaseToOsInfo ReleaseInfo;
298   };
299   struct RegionInfo : UnpaddedRegionInfo {
300     char Padding[SCUDO_CACHE_LINE_SIZE -
301                  (sizeof(UnpaddedRegionInfo) % SCUDO_CACHE_LINE_SIZE)];
302   };
303   static_assert(sizeof(RegionInfo) % SCUDO_CACHE_LINE_SIZE == 0, "");
304 
305   uptr PrimaryBase;
306   MapPlatformData Data;
307   atomic_s32 ReleaseToOsIntervalMs;
308   bool UseMemoryTagging;
309   alignas(SCUDO_CACHE_LINE_SIZE) RegionInfo RegionInfoArray[NumClasses];
310 
311   RegionInfo *getRegionInfo(uptr ClassId) {
312     DCHECK_LT(ClassId, NumClasses);
313     return &RegionInfoArray[ClassId];
314   }
315 
316   uptr getRegionBaseByClassId(uptr ClassId) const {
317     return PrimaryBase + (ClassId << RegionSizeLog);
318   }
319 
320   bool populateBatches(CacheT *C, RegionInfo *Region, uptr ClassId,
321                        TransferBatch **CurrentBatch, u32 MaxCount,
322                        void **PointersArray, u32 Count) {
323     // No need to shuffle the batches size class.
324     if (ClassId != SizeClassMap::BatchClassId)
325       shuffle(PointersArray, Count, &Region->RandState);
326     TransferBatch *B = *CurrentBatch;
327     for (uptr I = 0; I < Count; I++) {
328       if (B && B->getCount() == MaxCount) {
329         Region->FreeList.push_back(B);
330         B = nullptr;
331       }
332       if (!B) {
333         B = C->createBatch(ClassId, PointersArray[I]);
334         if (UNLIKELY(!B))
335           return false;
336         B->clear();
337       }
338       B->add(PointersArray[I]);
339     }
340     *CurrentBatch = B;
341     return true;
342   }
343 
344   NOINLINE TransferBatch *populateFreeList(CacheT *C, uptr ClassId,
345                                            RegionInfo *Region) {
346     const uptr Size = getSizeByClassId(ClassId);
347     const u32 MaxCount = TransferBatch::getMaxCached(Size);
348 
349     const uptr RegionBeg = Region->RegionBeg;
350     const uptr MappedUser = Region->MappedUser;
351     const uptr TotalUserBytes = Region->AllocatedUser + MaxCount * Size;
352     // Map more space for blocks, if necessary.
353     if (TotalUserBytes > MappedUser) {
354       // Do the mmap for the user memory.
355       const uptr UserMapSize =
356           roundUpTo(TotalUserBytes - MappedUser, MapSizeIncrement);
357       const uptr RegionBase = RegionBeg - getRegionBaseByClassId(ClassId);
358       if (UNLIKELY(RegionBase + MappedUser + UserMapSize > RegionSize)) {
359         if (!Region->Exhausted) {
360           Region->Exhausted = true;
361           ScopedString Str(1024);
362           getStats(&Str);
363           Str.append(
364               "Scudo OOM: The process has Exhausted %zuM for size class %zu.\n",
365               RegionSize >> 20, Size);
366           Str.output();
367         }
368         return nullptr;
369       }
370       if (UNLIKELY(MappedUser == 0))
371         Region->Data = Data;
372       if (UNLIKELY(!map(reinterpret_cast<void *>(RegionBeg + MappedUser),
373                         UserMapSize, "scudo:primary",
374                         MAP_ALLOWNOMEM | MAP_RESIZABLE |
375                             (useMemoryTagging() ? MAP_MEMTAG : 0),
376                         &Region->Data)))
377         return nullptr;
378       Region->MappedUser += UserMapSize;
379       C->getStats().add(StatMapped, UserMapSize);
380     }
381 
382     const u32 NumberOfBlocks = Min(
383         MaxNumBatches * MaxCount,
384         static_cast<u32>((Region->MappedUser - Region->AllocatedUser) / Size));
385     DCHECK_GT(NumberOfBlocks, 0);
386 
387     TransferBatch *B = nullptr;
388     constexpr u32 ShuffleArraySize =
389         MaxNumBatches * TransferBatch::MaxNumCached;
390     void *ShuffleArray[ShuffleArraySize];
391     u32 Count = 0;
392     const uptr P = RegionBeg + Region->AllocatedUser;
393     const uptr AllocatedUser = Size * NumberOfBlocks;
394     for (uptr I = P; I < P + AllocatedUser; I += Size) {
395       ShuffleArray[Count++] = reinterpret_cast<void *>(I);
396       if (Count == ShuffleArraySize) {
397         if (UNLIKELY(!populateBatches(C, Region, ClassId, &B, MaxCount,
398                                       ShuffleArray, Count)))
399           return nullptr;
400         Count = 0;
401       }
402     }
403     if (Count) {
404       if (UNLIKELY(!populateBatches(C, Region, ClassId, &B, MaxCount,
405                                     ShuffleArray, Count)))
406         return nullptr;
407     }
408     DCHECK(B);
409     if (!Region->FreeList.empty()) {
410       Region->FreeList.push_back(B);
411       B = Region->FreeList.front();
412       Region->FreeList.pop_front();
413     }
414     DCHECK_GT(B->getCount(), 0);
415 
416     C->getStats().add(StatFree, AllocatedUser);
417     Region->AllocatedUser += AllocatedUser;
418     Region->Exhausted = false;
419 
420     return B;
421   }
422 
423   void getStats(ScopedString *Str, uptr ClassId, uptr Rss) {
424     RegionInfo *Region = getRegionInfo(ClassId);
425     if (Region->MappedUser == 0)
426       return;
427     const uptr InUse = Region->Stats.PoppedBlocks - Region->Stats.PushedBlocks;
428     const uptr TotalChunks = Region->AllocatedUser / getSizeByClassId(ClassId);
429     Str->append("%s %02zu (%6zu): mapped: %6zuK popped: %7zu pushed: %7zu "
430                 "inuse: %6zu total: %6zu rss: %6zuK releases: %6zu last "
431                 "released: %6zuK region: 0x%zx (0x%zx)\n",
432                 Region->Exhausted ? "F" : " ", ClassId,
433                 getSizeByClassId(ClassId), Region->MappedUser >> 10,
434                 Region->Stats.PoppedBlocks, Region->Stats.PushedBlocks, InUse,
435                 TotalChunks, Rss >> 10, Region->ReleaseInfo.RangesReleased,
436                 Region->ReleaseInfo.LastReleasedBytes >> 10, Region->RegionBeg,
437                 getRegionBaseByClassId(ClassId));
438   }
439 
440   s32 getReleaseToOsIntervalMs() {
441     return atomic_load(&ReleaseToOsIntervalMs, memory_order_relaxed);
442   }
443 
444   NOINLINE uptr releaseToOSMaybe(RegionInfo *Region, uptr ClassId,
445                                  bool Force = false) {
446     const uptr BlockSize = getSizeByClassId(ClassId);
447     const uptr PageSize = getPageSizeCached();
448 
449     CHECK_GE(Region->Stats.PoppedBlocks, Region->Stats.PushedBlocks);
450     const uptr BytesInFreeList =
451         Region->AllocatedUser -
452         (Region->Stats.PoppedBlocks - Region->Stats.PushedBlocks) * BlockSize;
453     if (BytesInFreeList < PageSize)
454       return 0; // No chance to release anything.
455     const uptr BytesPushed = (Region->Stats.PushedBlocks -
456                               Region->ReleaseInfo.PushedBlocksAtLastRelease) *
457                              BlockSize;
458     if (BytesPushed < PageSize)
459       return 0; // Nothing new to release.
460 
461     if (!Force) {
462       const s32 IntervalMs = getReleaseToOsIntervalMs();
463       if (IntervalMs < 0)
464         return 0;
465       if (Region->ReleaseInfo.LastReleaseAtNs +
466               static_cast<u64>(IntervalMs) * 1000000 >
467           getMonotonicTime()) {
468         return 0; // Memory was returned recently.
469       }
470     }
471 
472     ReleaseRecorder Recorder(Region->RegionBeg, &Region->Data);
473     releaseFreeMemoryToOS(Region->FreeList, Region->RegionBeg,
474                           Region->AllocatedUser, BlockSize, &Recorder);
475 
476     if (Recorder.getReleasedRangesCount() > 0) {
477       Region->ReleaseInfo.PushedBlocksAtLastRelease =
478           Region->Stats.PushedBlocks;
479       Region->ReleaseInfo.RangesReleased += Recorder.getReleasedRangesCount();
480       Region->ReleaseInfo.LastReleasedBytes = Recorder.getReleasedBytes();
481     }
482     Region->ReleaseInfo.LastReleaseAtNs = getMonotonicTime();
483     return Recorder.getReleasedBytes();
484   }
485 };
486 
487 } // namespace scudo
488 
489 #endif // SCUDO_PRIMARY64_H_
490