xref: /freebsd/contrib/llvm-project/compiler-rt/lib/scudo/standalone/primary32.h (revision 0fca6ea1d4eea4c934cfff25ac9ee8ad6fe95583)
10b57cec5SDimitry Andric //===-- primary32.h ---------------------------------------------*- C++ -*-===//
20b57cec5SDimitry Andric //
30b57cec5SDimitry Andric // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
40b57cec5SDimitry Andric // See https://llvm.org/LICENSE.txt for license information.
50b57cec5SDimitry Andric // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
60b57cec5SDimitry Andric //
70b57cec5SDimitry Andric //===----------------------------------------------------------------------===//
80b57cec5SDimitry Andric 
90b57cec5SDimitry Andric #ifndef SCUDO_PRIMARY32_H_
100b57cec5SDimitry Andric #define SCUDO_PRIMARY32_H_
110b57cec5SDimitry Andric 
125f757f3fSDimitry Andric #include "allocator_common.h"
130b57cec5SDimitry Andric #include "bytemap.h"
140b57cec5SDimitry Andric #include "common.h"
150b57cec5SDimitry Andric #include "list.h"
160b57cec5SDimitry Andric #include "local_cache.h"
17e8d8bef9SDimitry Andric #include "options.h"
180b57cec5SDimitry Andric #include "release.h"
190b57cec5SDimitry Andric #include "report.h"
200b57cec5SDimitry Andric #include "stats.h"
210b57cec5SDimitry Andric #include "string_utils.h"
2206c3fb27SDimitry Andric #include "thread_annotations.h"
230b57cec5SDimitry Andric 
240b57cec5SDimitry Andric namespace scudo {
250b57cec5SDimitry Andric 
260b57cec5SDimitry Andric // SizeClassAllocator32 is an allocator for 32 or 64-bit address space.
270b57cec5SDimitry Andric //
280b57cec5SDimitry Andric // It maps Regions of 2^RegionSizeLog bytes aligned on a 2^RegionSizeLog bytes
290b57cec5SDimitry Andric // boundary, and keeps a bytemap of the mappable address space to track the size
300b57cec5SDimitry Andric // class they are associated with.
310b57cec5SDimitry Andric //
320b57cec5SDimitry Andric // Mapped regions are split into equally sized Blocks according to the size
330b57cec5SDimitry Andric // class they belong to, and the associated pointers are shuffled to prevent any
340b57cec5SDimitry Andric // predictable address pattern (the predictability increases with the block
350b57cec5SDimitry Andric // size).
360b57cec5SDimitry Andric //
370b57cec5SDimitry Andric // Regions for size class 0 are special and used to hold TransferBatches, which
380b57cec5SDimitry Andric // allow to transfer arrays of pointers from the global size class freelist to
390b57cec5SDimitry Andric // the thread specific freelist for said class, and back.
400b57cec5SDimitry Andric //
410b57cec5SDimitry Andric // Memory used by this allocator is never unmapped but can be partially
420b57cec5SDimitry Andric // reclaimed if the platform allows for it.
430b57cec5SDimitry Andric 
44e8d8bef9SDimitry Andric template <typename Config> class SizeClassAllocator32 {
450b57cec5SDimitry Andric public:
46*0fca6ea1SDimitry Andric   typedef typename Config::CompactPtrT CompactPtrT;
47*0fca6ea1SDimitry Andric   typedef typename Config::SizeClassMap SizeClassMap;
48*0fca6ea1SDimitry Andric   static const uptr GroupSizeLog = Config::getGroupSizeLog();
495ffd83dbSDimitry Andric   // The bytemap can only track UINT8_MAX - 1 classes.
505ffd83dbSDimitry Andric   static_assert(SizeClassMap::LargestClassId <= (UINT8_MAX - 1), "");
510b57cec5SDimitry Andric   // Regions should be large enough to hold the largest Block.
52*0fca6ea1SDimitry Andric   static_assert((1UL << Config::getRegionSizeLog()) >= SizeClassMap::MaxSize,
53e8d8bef9SDimitry Andric                 "");
54e8d8bef9SDimitry Andric   typedef SizeClassAllocator32<Config> ThisT;
550b57cec5SDimitry Andric   typedef SizeClassAllocatorLocalCache<ThisT> CacheT;
565f757f3fSDimitry Andric   typedef TransferBatch<ThisT> TransferBatchT;
575f757f3fSDimitry Andric   typedef BatchGroup<ThisT> BatchGroupT;
585f757f3fSDimitry Andric 
595f757f3fSDimitry Andric   static_assert(sizeof(BatchGroupT) <= sizeof(TransferBatchT),
605f757f3fSDimitry Andric                 "BatchGroupT uses the same class size as TransferBatchT");
610b57cec5SDimitry Andric 
getSizeByClassId(uptr ClassId)620b57cec5SDimitry Andric   static uptr getSizeByClassId(uptr ClassId) {
630b57cec5SDimitry Andric     return (ClassId == SizeClassMap::BatchClassId)
645f757f3fSDimitry Andric                ? sizeof(TransferBatchT)
650b57cec5SDimitry Andric                : SizeClassMap::getSizeByClassId(ClassId);
660b57cec5SDimitry Andric   }
670b57cec5SDimitry Andric 
canAllocate(uptr Size)680b57cec5SDimitry Andric   static bool canAllocate(uptr Size) { return Size <= SizeClassMap::MaxSize; }
690b57cec5SDimitry Andric 
init(s32 ReleaseToOsInterval)7006c3fb27SDimitry Andric   void init(s32 ReleaseToOsInterval) NO_THREAD_SAFETY_ANALYSIS {
710b57cec5SDimitry Andric     if (SCUDO_FUCHSIA)
720b57cec5SDimitry Andric       reportError("SizeClassAllocator32 is not supported on Fuchsia");
730b57cec5SDimitry Andric 
74fe6060f1SDimitry Andric     if (SCUDO_TRUSTY)
75fe6060f1SDimitry Andric       reportError("SizeClassAllocator32 is not supported on Trusty");
760b57cec5SDimitry Andric 
77fe6060f1SDimitry Andric     DCHECK(isAligned(reinterpret_cast<uptr>(this), alignof(ThisT)));
78fe6060f1SDimitry Andric     PossibleRegions.init();
790b57cec5SDimitry Andric     u32 Seed;
8006c3fb27SDimitry Andric     const u64 Time = getMonotonicTimeFast();
81fe6060f1SDimitry Andric     if (!getRandom(reinterpret_cast<void *>(&Seed), sizeof(Seed)))
825ffd83dbSDimitry Andric       Seed = static_cast<u32>(
835ffd83dbSDimitry Andric           Time ^ (reinterpret_cast<uptr>(SizeClassInfoArray) >> 6));
840b57cec5SDimitry Andric     for (uptr I = 0; I < NumClasses; I++) {
850b57cec5SDimitry Andric       SizeClassInfo *Sci = getSizeClassInfo(I);
860b57cec5SDimitry Andric       Sci->RandState = getRandomU32(&Seed);
87e8d8bef9SDimitry Andric       // Sci->MaxRegionIndex is already initialized to 0.
88e8d8bef9SDimitry Andric       Sci->MinRegionIndex = NumRegions;
895ffd83dbSDimitry Andric       Sci->ReleaseInfo.LastReleaseAtNs = Time;
900b57cec5SDimitry Andric     }
91*0fca6ea1SDimitry Andric 
92*0fca6ea1SDimitry Andric     // The default value in the primary config has the higher priority.
93*0fca6ea1SDimitry Andric     if (Config::getDefaultReleaseToOsIntervalMs() != INT32_MIN)
94*0fca6ea1SDimitry Andric       ReleaseToOsInterval = Config::getDefaultReleaseToOsIntervalMs();
95e8d8bef9SDimitry Andric     setOption(Option::ReleaseInterval, static_cast<sptr>(ReleaseToOsInterval));
960b57cec5SDimitry Andric   }
970b57cec5SDimitry Andric 
unmapTestOnly()980b57cec5SDimitry Andric   void unmapTestOnly() {
9906c3fb27SDimitry Andric     {
10006c3fb27SDimitry Andric       ScopedLock L(RegionsStashMutex);
10106c3fb27SDimitry Andric       while (NumberOfStashedRegions > 0) {
1020b57cec5SDimitry Andric         unmap(reinterpret_cast<void *>(RegionsStash[--NumberOfStashedRegions]),
1030b57cec5SDimitry Andric               RegionSize);
10406c3fb27SDimitry Andric       }
10506c3fb27SDimitry Andric     }
10606c3fb27SDimitry Andric 
107e8d8bef9SDimitry Andric     uptr MinRegionIndex = NumRegions, MaxRegionIndex = 0;
108e8d8bef9SDimitry Andric     for (uptr I = 0; I < NumClasses; I++) {
109e8d8bef9SDimitry Andric       SizeClassInfo *Sci = getSizeClassInfo(I);
11006c3fb27SDimitry Andric       ScopedLock L(Sci->Mutex);
111e8d8bef9SDimitry Andric       if (Sci->MinRegionIndex < MinRegionIndex)
112e8d8bef9SDimitry Andric         MinRegionIndex = Sci->MinRegionIndex;
113e8d8bef9SDimitry Andric       if (Sci->MaxRegionIndex > MaxRegionIndex)
114e8d8bef9SDimitry Andric         MaxRegionIndex = Sci->MaxRegionIndex;
115fe6060f1SDimitry Andric       *Sci = {};
116e8d8bef9SDimitry Andric     }
11706c3fb27SDimitry Andric 
11806c3fb27SDimitry Andric     ScopedLock L(ByteMapMutex);
11906c3fb27SDimitry Andric     for (uptr I = MinRegionIndex; I <= MaxRegionIndex; I++)
1200b57cec5SDimitry Andric       if (PossibleRegions[I])
1210b57cec5SDimitry Andric         unmap(reinterpret_cast<void *>(I * RegionSize), RegionSize);
1220b57cec5SDimitry Andric     PossibleRegions.unmapTestOnly();
1230b57cec5SDimitry Andric   }
1240b57cec5SDimitry Andric 
12506c3fb27SDimitry Andric   // When all blocks are freed, it has to be the same size as `AllocatedUser`.
verifyAllBlocksAreReleasedTestOnly()12606c3fb27SDimitry Andric   void verifyAllBlocksAreReleasedTestOnly() {
12706c3fb27SDimitry Andric     // `BatchGroup` and `TransferBatch` also use the blocks from BatchClass.
12806c3fb27SDimitry Andric     uptr BatchClassUsedInFreeLists = 0;
12906c3fb27SDimitry Andric     for (uptr I = 0; I < NumClasses; I++) {
13006c3fb27SDimitry Andric       // We have to count BatchClassUsedInFreeLists in other regions first.
13106c3fb27SDimitry Andric       if (I == SizeClassMap::BatchClassId)
13206c3fb27SDimitry Andric         continue;
13306c3fb27SDimitry Andric       SizeClassInfo *Sci = getSizeClassInfo(I);
13406c3fb27SDimitry Andric       ScopedLock L1(Sci->Mutex);
13506c3fb27SDimitry Andric       uptr TotalBlocks = 0;
1365f757f3fSDimitry Andric       for (BatchGroupT &BG : Sci->FreeListInfo.BlockList) {
13706c3fb27SDimitry Andric         // `BG::Batches` are `TransferBatches`. +1 for `BatchGroup`.
13806c3fb27SDimitry Andric         BatchClassUsedInFreeLists += BG.Batches.size() + 1;
13906c3fb27SDimitry Andric         for (const auto &It : BG.Batches)
14006c3fb27SDimitry Andric           TotalBlocks += It.getCount();
14106c3fb27SDimitry Andric       }
14206c3fb27SDimitry Andric 
14306c3fb27SDimitry Andric       const uptr BlockSize = getSizeByClassId(I);
14406c3fb27SDimitry Andric       DCHECK_EQ(TotalBlocks, Sci->AllocatedUser / BlockSize);
14506c3fb27SDimitry Andric       DCHECK_EQ(Sci->FreeListInfo.PushedBlocks, Sci->FreeListInfo.PoppedBlocks);
14606c3fb27SDimitry Andric     }
14706c3fb27SDimitry Andric 
14806c3fb27SDimitry Andric     SizeClassInfo *Sci = getSizeClassInfo(SizeClassMap::BatchClassId);
14906c3fb27SDimitry Andric     ScopedLock L1(Sci->Mutex);
15006c3fb27SDimitry Andric     uptr TotalBlocks = 0;
1515f757f3fSDimitry Andric     for (BatchGroupT &BG : Sci->FreeListInfo.BlockList) {
15206c3fb27SDimitry Andric       if (LIKELY(!BG.Batches.empty())) {
15306c3fb27SDimitry Andric         for (const auto &It : BG.Batches)
15406c3fb27SDimitry Andric           TotalBlocks += It.getCount();
15506c3fb27SDimitry Andric       } else {
15606c3fb27SDimitry Andric         // `BatchGroup` with empty freelist doesn't have `TransferBatch` record
15706c3fb27SDimitry Andric         // itself.
15806c3fb27SDimitry Andric         ++TotalBlocks;
15906c3fb27SDimitry Andric       }
16006c3fb27SDimitry Andric     }
16106c3fb27SDimitry Andric 
16206c3fb27SDimitry Andric     const uptr BlockSize = getSizeByClassId(SizeClassMap::BatchClassId);
16306c3fb27SDimitry Andric     DCHECK_EQ(TotalBlocks + BatchClassUsedInFreeLists,
16406c3fb27SDimitry Andric               Sci->AllocatedUser / BlockSize);
16506c3fb27SDimitry Andric     const uptr BlocksInUse =
16606c3fb27SDimitry Andric         Sci->FreeListInfo.PoppedBlocks - Sci->FreeListInfo.PushedBlocks;
16706c3fb27SDimitry Andric     DCHECK_EQ(BlocksInUse, BatchClassUsedInFreeLists);
16806c3fb27SDimitry Andric   }
16906c3fb27SDimitry Andric 
compactPtr(UNUSED uptr ClassId,uptr Ptr)170fe6060f1SDimitry Andric   CompactPtrT compactPtr(UNUSED uptr ClassId, uptr Ptr) const {
171fe6060f1SDimitry Andric     return static_cast<CompactPtrT>(Ptr);
172fe6060f1SDimitry Andric   }
173fe6060f1SDimitry Andric 
decompactPtr(UNUSED uptr ClassId,CompactPtrT CompactPtr)174fe6060f1SDimitry Andric   void *decompactPtr(UNUSED uptr ClassId, CompactPtrT CompactPtr) const {
175fe6060f1SDimitry Andric     return reinterpret_cast<void *>(static_cast<uptr>(CompactPtr));
176fe6060f1SDimitry Andric   }
177fe6060f1SDimitry Andric 
compactPtrGroupBase(CompactPtrT CompactPtr)17806c3fb27SDimitry Andric   uptr compactPtrGroupBase(CompactPtrT CompactPtr) {
17906c3fb27SDimitry Andric     const uptr Mask = (static_cast<uptr>(1) << GroupSizeLog) - 1;
18006c3fb27SDimitry Andric     return CompactPtr & ~Mask;
18106c3fb27SDimitry Andric   }
18206c3fb27SDimitry Andric 
decompactGroupBase(uptr CompactPtrGroupBase)18306c3fb27SDimitry Andric   uptr decompactGroupBase(uptr CompactPtrGroupBase) {
18406c3fb27SDimitry Andric     return CompactPtrGroupBase;
18506c3fb27SDimitry Andric   }
18606c3fb27SDimitry Andric 
isSmallBlock(uptr BlockSize)18706c3fb27SDimitry Andric   ALWAYS_INLINE static bool isSmallBlock(uptr BlockSize) {
18806c3fb27SDimitry Andric     const uptr PageSize = getPageSizeCached();
18906c3fb27SDimitry Andric     return BlockSize < PageSize / 16U;
19006c3fb27SDimitry Andric   }
19106c3fb27SDimitry Andric 
isLargeBlock(uptr BlockSize)19206c3fb27SDimitry Andric   ALWAYS_INLINE static bool isLargeBlock(uptr BlockSize) {
19306c3fb27SDimitry Andric     const uptr PageSize = getPageSizeCached();
19406c3fb27SDimitry Andric     return BlockSize > PageSize;
195bdd1243dSDimitry Andric   }
196bdd1243dSDimitry Andric 
popBlocks(CacheT * C,uptr ClassId,CompactPtrT * ToArray,const u16 MaxBlockCount)1975f757f3fSDimitry Andric   u16 popBlocks(CacheT *C, uptr ClassId, CompactPtrT *ToArray,
198*0fca6ea1SDimitry Andric                 const u16 MaxBlockCount) {
1990b57cec5SDimitry Andric     DCHECK_LT(ClassId, NumClasses);
2000b57cec5SDimitry Andric     SizeClassInfo *Sci = getSizeClassInfo(ClassId);
2010b57cec5SDimitry Andric     ScopedLock L(Sci->Mutex);
202*0fca6ea1SDimitry Andric 
203*0fca6ea1SDimitry Andric     u16 PopCount = popBlocksImpl(C, ClassId, Sci, ToArray, MaxBlockCount);
204*0fca6ea1SDimitry Andric     if (UNLIKELY(PopCount == 0)) {
205bdd1243dSDimitry Andric       if (UNLIKELY(!populateFreeList(C, ClassId, Sci)))
206*0fca6ea1SDimitry Andric         return 0U;
207*0fca6ea1SDimitry Andric       PopCount = popBlocksImpl(C, ClassId, Sci, ToArray, MaxBlockCount);
208*0fca6ea1SDimitry Andric       DCHECK_NE(PopCount, 0U);
2090b57cec5SDimitry Andric     }
210*0fca6ea1SDimitry Andric 
211*0fca6ea1SDimitry Andric     return PopCount;
2120b57cec5SDimitry Andric   }
2130b57cec5SDimitry Andric 
214bdd1243dSDimitry Andric   // Push the array of free blocks to the designated batch group.
pushBlocks(CacheT * C,uptr ClassId,CompactPtrT * Array,u32 Size)215bdd1243dSDimitry Andric   void pushBlocks(CacheT *C, uptr ClassId, CompactPtrT *Array, u32 Size) {
2160b57cec5SDimitry Andric     DCHECK_LT(ClassId, NumClasses);
217bdd1243dSDimitry Andric     DCHECK_GT(Size, 0);
218bdd1243dSDimitry Andric 
2190b57cec5SDimitry Andric     SizeClassInfo *Sci = getSizeClassInfo(ClassId);
220bdd1243dSDimitry Andric     if (ClassId == SizeClassMap::BatchClassId) {
2210b57cec5SDimitry Andric       ScopedLock L(Sci->Mutex);
22206c3fb27SDimitry Andric       pushBatchClassBlocks(Sci, Array, Size);
223bdd1243dSDimitry Andric       return;
224bdd1243dSDimitry Andric     }
225bdd1243dSDimitry Andric 
226bdd1243dSDimitry Andric     // TODO(chiahungduan): Consider not doing grouping if the group size is not
227bdd1243dSDimitry Andric     // greater than the block size with a certain scale.
228bdd1243dSDimitry Andric 
229bdd1243dSDimitry Andric     // Sort the blocks so that blocks belonging to the same group can be pushed
230bdd1243dSDimitry Andric     // together.
231bdd1243dSDimitry Andric     bool SameGroup = true;
232bdd1243dSDimitry Andric     for (u32 I = 1; I < Size; ++I) {
23306c3fb27SDimitry Andric       if (compactPtrGroupBase(Array[I - 1]) != compactPtrGroupBase(Array[I]))
234bdd1243dSDimitry Andric         SameGroup = false;
235bdd1243dSDimitry Andric       CompactPtrT Cur = Array[I];
236bdd1243dSDimitry Andric       u32 J = I;
23706c3fb27SDimitry Andric       while (J > 0 &&
23806c3fb27SDimitry Andric              compactPtrGroupBase(Cur) < compactPtrGroupBase(Array[J - 1])) {
239bdd1243dSDimitry Andric         Array[J] = Array[J - 1];
240bdd1243dSDimitry Andric         --J;
241bdd1243dSDimitry Andric       }
242bdd1243dSDimitry Andric       Array[J] = Cur;
243bdd1243dSDimitry Andric     }
244bdd1243dSDimitry Andric 
245bdd1243dSDimitry Andric     ScopedLock L(Sci->Mutex);
24606c3fb27SDimitry Andric     pushBlocksImpl(C, ClassId, Sci, Array, Size, SameGroup);
2470b57cec5SDimitry Andric   }
2480b57cec5SDimitry Andric 
disable()24906c3fb27SDimitry Andric   void disable() NO_THREAD_SAFETY_ANALYSIS {
250480093f4SDimitry Andric     // The BatchClassId must be locked last since other classes can use it.
251480093f4SDimitry Andric     for (sptr I = static_cast<sptr>(NumClasses) - 1; I >= 0; I--) {
252480093f4SDimitry Andric       if (static_cast<uptr>(I) == SizeClassMap::BatchClassId)
253480093f4SDimitry Andric         continue;
254480093f4SDimitry Andric       getSizeClassInfo(static_cast<uptr>(I))->Mutex.lock();
255480093f4SDimitry Andric     }
256480093f4SDimitry Andric     getSizeClassInfo(SizeClassMap::BatchClassId)->Mutex.lock();
257480093f4SDimitry Andric     RegionsStashMutex.lock();
25806c3fb27SDimitry Andric     ByteMapMutex.lock();
2590b57cec5SDimitry Andric   }
2600b57cec5SDimitry Andric 
enable()26106c3fb27SDimitry Andric   void enable() NO_THREAD_SAFETY_ANALYSIS {
26206c3fb27SDimitry Andric     ByteMapMutex.unlock();
263480093f4SDimitry Andric     RegionsStashMutex.unlock();
264480093f4SDimitry Andric     getSizeClassInfo(SizeClassMap::BatchClassId)->Mutex.unlock();
265480093f4SDimitry Andric     for (uptr I = 0; I < NumClasses; I++) {
266480093f4SDimitry Andric       if (I == SizeClassMap::BatchClassId)
267480093f4SDimitry Andric         continue;
268480093f4SDimitry Andric       getSizeClassInfo(I)->Mutex.unlock();
269480093f4SDimitry Andric     }
2700b57cec5SDimitry Andric   }
2710b57cec5SDimitry Andric 
iterateOverBlocks(F Callback)2720b57cec5SDimitry Andric   template <typename F> void iterateOverBlocks(F Callback) {
273e8d8bef9SDimitry Andric     uptr MinRegionIndex = NumRegions, MaxRegionIndex = 0;
274e8d8bef9SDimitry Andric     for (uptr I = 0; I < NumClasses; I++) {
275e8d8bef9SDimitry Andric       SizeClassInfo *Sci = getSizeClassInfo(I);
27606c3fb27SDimitry Andric       // TODO: The call of `iterateOverBlocks` requires disabling
27706c3fb27SDimitry Andric       // SizeClassAllocator32. We may consider locking each region on demand
27806c3fb27SDimitry Andric       // only.
27906c3fb27SDimitry Andric       Sci->Mutex.assertHeld();
280e8d8bef9SDimitry Andric       if (Sci->MinRegionIndex < MinRegionIndex)
281e8d8bef9SDimitry Andric         MinRegionIndex = Sci->MinRegionIndex;
282e8d8bef9SDimitry Andric       if (Sci->MaxRegionIndex > MaxRegionIndex)
283e8d8bef9SDimitry Andric         MaxRegionIndex = Sci->MaxRegionIndex;
284e8d8bef9SDimitry Andric     }
28506c3fb27SDimitry Andric 
28606c3fb27SDimitry Andric     // SizeClassAllocator32 is disabled, i.e., ByteMapMutex is held.
28706c3fb27SDimitry Andric     ByteMapMutex.assertHeld();
28806c3fb27SDimitry Andric 
28906c3fb27SDimitry Andric     for (uptr I = MinRegionIndex; I <= MaxRegionIndex; I++) {
2905ffd83dbSDimitry Andric       if (PossibleRegions[I] &&
2915ffd83dbSDimitry Andric           (PossibleRegions[I] - 1U) != SizeClassMap::BatchClassId) {
2925ffd83dbSDimitry Andric         const uptr BlockSize = getSizeByClassId(PossibleRegions[I] - 1U);
2930b57cec5SDimitry Andric         const uptr From = I * RegionSize;
2940b57cec5SDimitry Andric         const uptr To = From + (RegionSize / BlockSize) * BlockSize;
2950b57cec5SDimitry Andric         for (uptr Block = From; Block < To; Block += BlockSize)
2960b57cec5SDimitry Andric           Callback(Block);
2970b57cec5SDimitry Andric       }
2980b57cec5SDimitry Andric     }
29906c3fb27SDimitry Andric   }
3000b57cec5SDimitry Andric 
getStats(ScopedString * Str)30168d75effSDimitry Andric   void getStats(ScopedString *Str) {
3020b57cec5SDimitry Andric     // TODO(kostyak): get the RSS per region.
3030b57cec5SDimitry Andric     uptr TotalMapped = 0;
3040b57cec5SDimitry Andric     uptr PoppedBlocks = 0;
3050b57cec5SDimitry Andric     uptr PushedBlocks = 0;
3060b57cec5SDimitry Andric     for (uptr I = 0; I < NumClasses; I++) {
3070b57cec5SDimitry Andric       SizeClassInfo *Sci = getSizeClassInfo(I);
30806c3fb27SDimitry Andric       ScopedLock L(Sci->Mutex);
3090b57cec5SDimitry Andric       TotalMapped += Sci->AllocatedUser;
31006c3fb27SDimitry Andric       PoppedBlocks += Sci->FreeListInfo.PoppedBlocks;
31106c3fb27SDimitry Andric       PushedBlocks += Sci->FreeListInfo.PushedBlocks;
3120b57cec5SDimitry Andric     }
31368d75effSDimitry Andric     Str->append("Stats: SizeClassAllocator32: %zuM mapped in %zu allocations; "
3140b57cec5SDimitry Andric                 "remains %zu\n",
3150b57cec5SDimitry Andric                 TotalMapped >> 20, PoppedBlocks, PoppedBlocks - PushedBlocks);
31606c3fb27SDimitry Andric     for (uptr I = 0; I < NumClasses; I++) {
31706c3fb27SDimitry Andric       SizeClassInfo *Sci = getSizeClassInfo(I);
31806c3fb27SDimitry Andric       ScopedLock L(Sci->Mutex);
31906c3fb27SDimitry Andric       getStats(Str, I, Sci);
32006c3fb27SDimitry Andric     }
3210b57cec5SDimitry Andric   }
3220b57cec5SDimitry Andric 
getFragmentationInfo(ScopedString * Str)3235f757f3fSDimitry Andric   void getFragmentationInfo(ScopedString *Str) {
3245f757f3fSDimitry Andric     Str->append(
3255f757f3fSDimitry Andric         "Fragmentation Stats: SizeClassAllocator32: page size = %zu bytes\n",
3265f757f3fSDimitry Andric         getPageSizeCached());
3275f757f3fSDimitry Andric 
3285f757f3fSDimitry Andric     for (uptr I = 1; I < NumClasses; I++) {
3295f757f3fSDimitry Andric       SizeClassInfo *Sci = getSizeClassInfo(I);
3305f757f3fSDimitry Andric       ScopedLock L(Sci->Mutex);
3315f757f3fSDimitry Andric       getSizeClassFragmentationInfo(Sci, I, Str);
3325f757f3fSDimitry Andric     }
3335f757f3fSDimitry Andric   }
3345f757f3fSDimitry Andric 
setOption(Option O,sptr Value)335e8d8bef9SDimitry Andric   bool setOption(Option O, sptr Value) {
336e8d8bef9SDimitry Andric     if (O == Option::ReleaseInterval) {
337*0fca6ea1SDimitry Andric       const s32 Interval = Max(
338*0fca6ea1SDimitry Andric           Min(static_cast<s32>(Value), Config::getMaxReleaseToOsIntervalMs()),
339*0fca6ea1SDimitry Andric           Config::getMinReleaseToOsIntervalMs());
340e8d8bef9SDimitry Andric       atomic_store_relaxed(&ReleaseToOsIntervalMs, Interval);
341e8d8bef9SDimitry Andric       return true;
3425ffd83dbSDimitry Andric     }
343e8d8bef9SDimitry Andric     // Not supported by the Primary, but not an error either.
344e8d8bef9SDimitry Andric     return true;
3455ffd83dbSDimitry Andric   }
3465ffd83dbSDimitry Andric 
tryReleaseToOS(uptr ClassId,ReleaseToOS ReleaseType)34706c3fb27SDimitry Andric   uptr tryReleaseToOS(uptr ClassId, ReleaseToOS ReleaseType) {
34806c3fb27SDimitry Andric     SizeClassInfo *Sci = getSizeClassInfo(ClassId);
34906c3fb27SDimitry Andric     // TODO: Once we have separate locks like primary64, we may consider using
35006c3fb27SDimitry Andric     // tryLock() as well.
35106c3fb27SDimitry Andric     ScopedLock L(Sci->Mutex);
35206c3fb27SDimitry Andric     return releaseToOSMaybe(Sci, ClassId, ReleaseType);
35306c3fb27SDimitry Andric   }
35406c3fb27SDimitry Andric 
releaseToOS(ReleaseToOS ReleaseType)35506c3fb27SDimitry Andric   uptr releaseToOS(ReleaseToOS ReleaseType) {
35668d75effSDimitry Andric     uptr TotalReleasedBytes = 0;
3570b57cec5SDimitry Andric     for (uptr I = 0; I < NumClasses; I++) {
358e8d8bef9SDimitry Andric       if (I == SizeClassMap::BatchClassId)
359e8d8bef9SDimitry Andric         continue;
3600b57cec5SDimitry Andric       SizeClassInfo *Sci = getSizeClassInfo(I);
3610b57cec5SDimitry Andric       ScopedLock L(Sci->Mutex);
36206c3fb27SDimitry Andric       TotalReleasedBytes += releaseToOSMaybe(Sci, I, ReleaseType);
3630b57cec5SDimitry Andric     }
36468d75effSDimitry Andric     return TotalReleasedBytes;
3650b57cec5SDimitry Andric   }
3660b57cec5SDimitry Andric 
getRegionInfoArrayAddress()3675ffd83dbSDimitry Andric   const char *getRegionInfoArrayAddress() const { return nullptr; }
getRegionInfoArraySize()3685ffd83dbSDimitry Andric   static uptr getRegionInfoArraySize() { return 0; }
3695ffd83dbSDimitry Andric 
findNearestBlock(UNUSED const char * RegionInfoData,UNUSED uptr Ptr)370e8d8bef9SDimitry Andric   static BlockInfo findNearestBlock(UNUSED const char *RegionInfoData,
371e8d8bef9SDimitry Andric                                     UNUSED uptr Ptr) {
3725ffd83dbSDimitry Andric     return {};
3735ffd83dbSDimitry Andric   }
3745ffd83dbSDimitry Andric 
375e8d8bef9SDimitry Andric   AtomicOptions Options;
376e8d8bef9SDimitry Andric 
3770b57cec5SDimitry Andric private:
3780b57cec5SDimitry Andric   static const uptr NumClasses = SizeClassMap::NumClasses;
379*0fca6ea1SDimitry Andric   static const uptr RegionSize = 1UL << Config::getRegionSizeLog();
380*0fca6ea1SDimitry Andric   static const uptr NumRegions = SCUDO_MMAP_RANGE_SIZE >>
381*0fca6ea1SDimitry Andric                                  Config::getRegionSizeLog();
3825ffd83dbSDimitry Andric   static const u32 MaxNumBatches = SCUDO_ANDROID ? 4U : 8U;
3830b57cec5SDimitry Andric   typedef FlatByteMap<NumRegions> ByteMap;
3840b57cec5SDimitry Andric 
3850b57cec5SDimitry Andric   struct ReleaseToOsInfo {
38606c3fb27SDimitry Andric     uptr BytesInFreeListAtLastCheckpoint;
3870b57cec5SDimitry Andric     uptr RangesReleased;
3880b57cec5SDimitry Andric     uptr LastReleasedBytes;
3890b57cec5SDimitry Andric     u64 LastReleaseAtNs;
3900b57cec5SDimitry Andric   };
3910b57cec5SDimitry Andric 
39206c3fb27SDimitry Andric   struct BlocksInfo {
3935f757f3fSDimitry Andric     SinglyLinkedList<BatchGroupT> BlockList = {};
39406c3fb27SDimitry Andric     uptr PoppedBlocks = 0;
39506c3fb27SDimitry Andric     uptr PushedBlocks = 0;
39606c3fb27SDimitry Andric   };
39706c3fb27SDimitry Andric 
3985ffd83dbSDimitry Andric   struct alignas(SCUDO_CACHE_LINE_SIZE) SizeClassInfo {
3990b57cec5SDimitry Andric     HybridMutex Mutex;
40006c3fb27SDimitry Andric     BlocksInfo FreeListInfo GUARDED_BY(Mutex);
40106c3fb27SDimitry Andric     uptr CurrentRegion GUARDED_BY(Mutex);
40206c3fb27SDimitry Andric     uptr CurrentRegionAllocated GUARDED_BY(Mutex);
4030b57cec5SDimitry Andric     u32 RandState;
40406c3fb27SDimitry Andric     uptr AllocatedUser GUARDED_BY(Mutex);
405e8d8bef9SDimitry Andric     // Lowest & highest region index allocated for this size class, to avoid
406e8d8bef9SDimitry Andric     // looping through the whole NumRegions.
40706c3fb27SDimitry Andric     uptr MinRegionIndex GUARDED_BY(Mutex);
40806c3fb27SDimitry Andric     uptr MaxRegionIndex GUARDED_BY(Mutex);
40906c3fb27SDimitry Andric     ReleaseToOsInfo ReleaseInfo GUARDED_BY(Mutex);
4100b57cec5SDimitry Andric   };
411480093f4SDimitry Andric   static_assert(sizeof(SizeClassInfo) % SCUDO_CACHE_LINE_SIZE == 0, "");
4120b57cec5SDimitry Andric 
computeRegionId(uptr Mem)4130b57cec5SDimitry Andric   uptr computeRegionId(uptr Mem) {
414*0fca6ea1SDimitry Andric     const uptr Id = Mem >> Config::getRegionSizeLog();
4150b57cec5SDimitry Andric     CHECK_LT(Id, NumRegions);
4160b57cec5SDimitry Andric     return Id;
4170b57cec5SDimitry Andric   }
4180b57cec5SDimitry Andric 
allocateRegionSlow()4190b57cec5SDimitry Andric   uptr allocateRegionSlow() {
4200b57cec5SDimitry Andric     uptr MapSize = 2 * RegionSize;
4210b57cec5SDimitry Andric     const uptr MapBase = reinterpret_cast<uptr>(
4220b57cec5SDimitry Andric         map(nullptr, MapSize, "scudo:primary", MAP_ALLOWNOMEM));
423e8d8bef9SDimitry Andric     if (!MapBase)
4240b57cec5SDimitry Andric       return 0;
4250b57cec5SDimitry Andric     const uptr MapEnd = MapBase + MapSize;
4260b57cec5SDimitry Andric     uptr Region = MapBase;
4270b57cec5SDimitry Andric     if (isAligned(Region, RegionSize)) {
4280b57cec5SDimitry Andric       ScopedLock L(RegionsStashMutex);
4290b57cec5SDimitry Andric       if (NumberOfStashedRegions < MaxStashedRegions)
4300b57cec5SDimitry Andric         RegionsStash[NumberOfStashedRegions++] = MapBase + RegionSize;
4310b57cec5SDimitry Andric       else
4320b57cec5SDimitry Andric         MapSize = RegionSize;
4330b57cec5SDimitry Andric     } else {
43406c3fb27SDimitry Andric       Region = roundUp(MapBase, RegionSize);
4350b57cec5SDimitry Andric       unmap(reinterpret_cast<void *>(MapBase), Region - MapBase);
4360b57cec5SDimitry Andric       MapSize = RegionSize;
4370b57cec5SDimitry Andric     }
4380b57cec5SDimitry Andric     const uptr End = Region + MapSize;
4390b57cec5SDimitry Andric     if (End != MapEnd)
4400b57cec5SDimitry Andric       unmap(reinterpret_cast<void *>(End), MapEnd - End);
44106c3fb27SDimitry Andric 
44206c3fb27SDimitry Andric     DCHECK_EQ(Region % RegionSize, 0U);
443*0fca6ea1SDimitry Andric     static_assert(Config::getRegionSizeLog() == GroupSizeLog,
44406c3fb27SDimitry Andric                   "Memory group should be the same size as Region");
44506c3fb27SDimitry Andric 
4460b57cec5SDimitry Andric     return Region;
4470b57cec5SDimitry Andric   }
4480b57cec5SDimitry Andric 
allocateRegion(SizeClassInfo * Sci,uptr ClassId)44906c3fb27SDimitry Andric   uptr allocateRegion(SizeClassInfo *Sci, uptr ClassId) REQUIRES(Sci->Mutex) {
4500b57cec5SDimitry Andric     DCHECK_LT(ClassId, NumClasses);
4510b57cec5SDimitry Andric     uptr Region = 0;
4520b57cec5SDimitry Andric     {
4530b57cec5SDimitry Andric       ScopedLock L(RegionsStashMutex);
4540b57cec5SDimitry Andric       if (NumberOfStashedRegions > 0)
4550b57cec5SDimitry Andric         Region = RegionsStash[--NumberOfStashedRegions];
4560b57cec5SDimitry Andric     }
4570b57cec5SDimitry Andric     if (!Region)
4580b57cec5SDimitry Andric       Region = allocateRegionSlow();
4590b57cec5SDimitry Andric     if (LIKELY(Region)) {
460e8d8bef9SDimitry Andric       // Sci->Mutex is held by the caller, updating the Min/Max is safe.
4610b57cec5SDimitry Andric       const uptr RegionIndex = computeRegionId(Region);
462e8d8bef9SDimitry Andric       if (RegionIndex < Sci->MinRegionIndex)
463e8d8bef9SDimitry Andric         Sci->MinRegionIndex = RegionIndex;
464e8d8bef9SDimitry Andric       if (RegionIndex > Sci->MaxRegionIndex)
465e8d8bef9SDimitry Andric         Sci->MaxRegionIndex = RegionIndex;
46606c3fb27SDimitry Andric       ScopedLock L(ByteMapMutex);
4675ffd83dbSDimitry Andric       PossibleRegions.set(RegionIndex, static_cast<u8>(ClassId + 1U));
4680b57cec5SDimitry Andric     }
4690b57cec5SDimitry Andric     return Region;
4700b57cec5SDimitry Andric   }
4710b57cec5SDimitry Andric 
getSizeClassInfo(uptr ClassId)4720b57cec5SDimitry Andric   SizeClassInfo *getSizeClassInfo(uptr ClassId) {
4730b57cec5SDimitry Andric     DCHECK_LT(ClassId, NumClasses);
4740b57cec5SDimitry Andric     return &SizeClassInfoArray[ClassId];
4750b57cec5SDimitry Andric   }
4760b57cec5SDimitry Andric 
pushBatchClassBlocks(SizeClassInfo * Sci,CompactPtrT * Array,u32 Size)47706c3fb27SDimitry Andric   void pushBatchClassBlocks(SizeClassInfo *Sci, CompactPtrT *Array, u32 Size)
47806c3fb27SDimitry Andric       REQUIRES(Sci->Mutex) {
47906c3fb27SDimitry Andric     DCHECK_EQ(Sci, getSizeClassInfo(SizeClassMap::BatchClassId));
48006c3fb27SDimitry Andric 
48106c3fb27SDimitry Andric     // Free blocks are recorded by TransferBatch in freelist for all
48206c3fb27SDimitry Andric     // size-classes. In addition, TransferBatch is allocated from BatchClassId.
48306c3fb27SDimitry Andric     // In order not to use additional block to record the free blocks in
48406c3fb27SDimitry Andric     // BatchClassId, they are self-contained. I.e., A TransferBatch records the
48506c3fb27SDimitry Andric     // block address of itself. See the figure below:
48606c3fb27SDimitry Andric     //
48706c3fb27SDimitry Andric     // TransferBatch at 0xABCD
48806c3fb27SDimitry Andric     // +----------------------------+
48906c3fb27SDimitry Andric     // | Free blocks' addr          |
49006c3fb27SDimitry Andric     // | +------+------+------+     |
49106c3fb27SDimitry Andric     // | |0xABCD|...   |...   |     |
49206c3fb27SDimitry Andric     // | +------+------+------+     |
49306c3fb27SDimitry Andric     // +----------------------------+
49406c3fb27SDimitry Andric     //
49506c3fb27SDimitry Andric     // When we allocate all the free blocks in the TransferBatch, the block used
49606c3fb27SDimitry Andric     // by TransferBatch is also free for use. We don't need to recycle the
49706c3fb27SDimitry Andric     // TransferBatch. Note that the correctness is maintained by the invariant,
49806c3fb27SDimitry Andric     //
499*0fca6ea1SDimitry Andric     //   Each popBlocks() request returns the entire TransferBatch. Returning
50006c3fb27SDimitry Andric     //   part of the blocks in a TransferBatch is invalid.
50106c3fb27SDimitry Andric     //
50206c3fb27SDimitry Andric     // This ensures that TransferBatch won't leak the address itself while it's
50306c3fb27SDimitry Andric     // still holding other valid data.
50406c3fb27SDimitry Andric     //
50506c3fb27SDimitry Andric     // Besides, BatchGroup is also allocated from BatchClassId and has its
50606c3fb27SDimitry Andric     // address recorded in the TransferBatch too. To maintain the correctness,
50706c3fb27SDimitry Andric     //
50806c3fb27SDimitry Andric     //   The address of BatchGroup is always recorded in the last TransferBatch
50906c3fb27SDimitry Andric     //   in the freelist (also imply that the freelist should only be
51006c3fb27SDimitry Andric     //   updated with push_front). Once the last TransferBatch is popped,
51106c3fb27SDimitry Andric     //   the block used by BatchGroup is also free for use.
51206c3fb27SDimitry Andric     //
51306c3fb27SDimitry Andric     // With this approach, the blocks used by BatchGroup and TransferBatch are
51406c3fb27SDimitry Andric     // reusable and don't need additional space for them.
51506c3fb27SDimitry Andric 
51606c3fb27SDimitry Andric     Sci->FreeListInfo.PushedBlocks += Size;
5175f757f3fSDimitry Andric     BatchGroupT *BG = Sci->FreeListInfo.BlockList.front();
51806c3fb27SDimitry Andric 
51906c3fb27SDimitry Andric     if (BG == nullptr) {
52006c3fb27SDimitry Andric       // Construct `BatchGroup` on the last element.
5215f757f3fSDimitry Andric       BG = reinterpret_cast<BatchGroupT *>(
52206c3fb27SDimitry Andric           decompactPtr(SizeClassMap::BatchClassId, Array[Size - 1]));
52306c3fb27SDimitry Andric       --Size;
52406c3fb27SDimitry Andric       BG->Batches.clear();
52506c3fb27SDimitry Andric       // BatchClass hasn't enabled memory group. Use `0` to indicate there's no
52606c3fb27SDimitry Andric       // memory group here.
52706c3fb27SDimitry Andric       BG->CompactPtrGroupBase = 0;
52806c3fb27SDimitry Andric       // `BG` is also the block of BatchClassId. Note that this is different
52906c3fb27SDimitry Andric       // from `CreateGroup` in `pushBlocksImpl`
53006c3fb27SDimitry Andric       BG->PushedBlocks = 1;
53106c3fb27SDimitry Andric       BG->BytesInBGAtLastCheckpoint = 0;
5325f757f3fSDimitry Andric       BG->MaxCachedPerBatch =
5335f757f3fSDimitry Andric           CacheT::getMaxCached(getSizeByClassId(SizeClassMap::BatchClassId));
53406c3fb27SDimitry Andric 
53506c3fb27SDimitry Andric       Sci->FreeListInfo.BlockList.push_front(BG);
53606c3fb27SDimitry Andric     }
53706c3fb27SDimitry Andric 
53806c3fb27SDimitry Andric     if (UNLIKELY(Size == 0))
53906c3fb27SDimitry Andric       return;
54006c3fb27SDimitry Andric 
54106c3fb27SDimitry Andric     // This happens under 2 cases.
54206c3fb27SDimitry Andric     //   1. just allocated a new `BatchGroup`.
54306c3fb27SDimitry Andric     //   2. Only 1 block is pushed when the freelist is empty.
54406c3fb27SDimitry Andric     if (BG->Batches.empty()) {
54506c3fb27SDimitry Andric       // Construct the `TransferBatch` on the last element.
5465f757f3fSDimitry Andric       TransferBatchT *TB = reinterpret_cast<TransferBatchT *>(
54706c3fb27SDimitry Andric           decompactPtr(SizeClassMap::BatchClassId, Array[Size - 1]));
54806c3fb27SDimitry Andric       TB->clear();
54906c3fb27SDimitry Andric       // As mentioned above, addresses of `TransferBatch` and `BatchGroup` are
55006c3fb27SDimitry Andric       // recorded in the TransferBatch.
55106c3fb27SDimitry Andric       TB->add(Array[Size - 1]);
55206c3fb27SDimitry Andric       TB->add(
55306c3fb27SDimitry Andric           compactPtr(SizeClassMap::BatchClassId, reinterpret_cast<uptr>(BG)));
55406c3fb27SDimitry Andric       --Size;
55506c3fb27SDimitry Andric       DCHECK_EQ(BG->PushedBlocks, 1U);
55606c3fb27SDimitry Andric       // `TB` is also the block of BatchClassId.
55706c3fb27SDimitry Andric       BG->PushedBlocks += 1;
55806c3fb27SDimitry Andric       BG->Batches.push_front(TB);
55906c3fb27SDimitry Andric     }
56006c3fb27SDimitry Andric 
5615f757f3fSDimitry Andric     TransferBatchT *CurBatch = BG->Batches.front();
56206c3fb27SDimitry Andric     DCHECK_NE(CurBatch, nullptr);
56306c3fb27SDimitry Andric 
56406c3fb27SDimitry Andric     for (u32 I = 0; I < Size;) {
56506c3fb27SDimitry Andric       u16 UnusedSlots =
56606c3fb27SDimitry Andric           static_cast<u16>(BG->MaxCachedPerBatch - CurBatch->getCount());
56706c3fb27SDimitry Andric       if (UnusedSlots == 0) {
5685f757f3fSDimitry Andric         CurBatch = reinterpret_cast<TransferBatchT *>(
56906c3fb27SDimitry Andric             decompactPtr(SizeClassMap::BatchClassId, Array[I]));
57006c3fb27SDimitry Andric         CurBatch->clear();
57106c3fb27SDimitry Andric         // Self-contained
57206c3fb27SDimitry Andric         CurBatch->add(Array[I]);
57306c3fb27SDimitry Andric         ++I;
57406c3fb27SDimitry Andric         // TODO(chiahungduan): Avoid the use of push_back() in `Batches` of
57506c3fb27SDimitry Andric         // BatchClassId.
57606c3fb27SDimitry Andric         BG->Batches.push_front(CurBatch);
57706c3fb27SDimitry Andric         UnusedSlots = static_cast<u16>(BG->MaxCachedPerBatch - 1);
57806c3fb27SDimitry Andric       }
57906c3fb27SDimitry Andric       // `UnusedSlots` is u16 so the result will be also fit in u16.
58006c3fb27SDimitry Andric       const u16 AppendSize = static_cast<u16>(Min<u32>(UnusedSlots, Size - I));
58106c3fb27SDimitry Andric       CurBatch->appendFromArray(&Array[I], AppendSize);
58206c3fb27SDimitry Andric       I += AppendSize;
58306c3fb27SDimitry Andric     }
58406c3fb27SDimitry Andric 
58506c3fb27SDimitry Andric     BG->PushedBlocks += Size;
58606c3fb27SDimitry Andric   }
587bdd1243dSDimitry Andric   // Push the blocks to their batch group. The layout will be like,
588bdd1243dSDimitry Andric   //
58906c3fb27SDimitry Andric   // FreeListInfo.BlockList - > BG -> BG -> BG
590bdd1243dSDimitry Andric   //                            |     |     |
591bdd1243dSDimitry Andric   //                            v     v     v
592bdd1243dSDimitry Andric   //                            TB    TB    TB
593bdd1243dSDimitry Andric   //                            |
594bdd1243dSDimitry Andric   //                            v
595bdd1243dSDimitry Andric   //                            TB
596bdd1243dSDimitry Andric   //
597bdd1243dSDimitry Andric   // Each BlockGroup(BG) will associate with unique group id and the free blocks
598bdd1243dSDimitry Andric   // are managed by a list of TransferBatch(TB). To reduce the time of inserting
599bdd1243dSDimitry Andric   // blocks, BGs are sorted and the input `Array` are supposed to be sorted so
600bdd1243dSDimitry Andric   // that we can get better performance of maintaining sorted property.
601bdd1243dSDimitry Andric   // Use `SameGroup=true` to indicate that all blocks in the array are from the
602bdd1243dSDimitry Andric   // same group then we will skip checking the group id of each block.
603bdd1243dSDimitry Andric   //
604bdd1243dSDimitry Andric   // The region mutex needs to be held while calling this method.
60506c3fb27SDimitry Andric   void pushBlocksImpl(CacheT *C, uptr ClassId, SizeClassInfo *Sci,
60606c3fb27SDimitry Andric                       CompactPtrT *Array, u32 Size, bool SameGroup = false)
60706c3fb27SDimitry Andric       REQUIRES(Sci->Mutex) {
60806c3fb27SDimitry Andric     DCHECK_NE(ClassId, SizeClassMap::BatchClassId);
609bdd1243dSDimitry Andric     DCHECK_GT(Size, 0U);
610bdd1243dSDimitry Andric 
61106c3fb27SDimitry Andric     auto CreateGroup = [&](uptr CompactPtrGroupBase) {
6125f757f3fSDimitry Andric       BatchGroupT *BG =
6135f757f3fSDimitry Andric           reinterpret_cast<BatchGroupT *>(C->getBatchClassBlock());
614bdd1243dSDimitry Andric       BG->Batches.clear();
6155f757f3fSDimitry Andric       TransferBatchT *TB =
6165f757f3fSDimitry Andric           reinterpret_cast<TransferBatchT *>(C->getBatchClassBlock());
617bdd1243dSDimitry Andric       TB->clear();
618bdd1243dSDimitry Andric 
61906c3fb27SDimitry Andric       BG->CompactPtrGroupBase = CompactPtrGroupBase;
620bdd1243dSDimitry Andric       BG->Batches.push_front(TB);
621bdd1243dSDimitry Andric       BG->PushedBlocks = 0;
62206c3fb27SDimitry Andric       BG->BytesInBGAtLastCheckpoint = 0;
623*0fca6ea1SDimitry Andric       BG->MaxCachedPerBatch = TransferBatchT::MaxNumCached;
624bdd1243dSDimitry Andric 
625bdd1243dSDimitry Andric       return BG;
626bdd1243dSDimitry Andric     };
627bdd1243dSDimitry Andric 
6285f757f3fSDimitry Andric     auto InsertBlocks = [&](BatchGroupT *BG, CompactPtrT *Array, u32 Size) {
6295f757f3fSDimitry Andric       SinglyLinkedList<TransferBatchT> &Batches = BG->Batches;
6305f757f3fSDimitry Andric       TransferBatchT *CurBatch = Batches.front();
631bdd1243dSDimitry Andric       DCHECK_NE(CurBatch, nullptr);
632bdd1243dSDimitry Andric 
633bdd1243dSDimitry Andric       for (u32 I = 0; I < Size;) {
634bdd1243dSDimitry Andric         DCHECK_GE(BG->MaxCachedPerBatch, CurBatch->getCount());
635bdd1243dSDimitry Andric         u16 UnusedSlots =
636bdd1243dSDimitry Andric             static_cast<u16>(BG->MaxCachedPerBatch - CurBatch->getCount());
637bdd1243dSDimitry Andric         if (UnusedSlots == 0) {
6385f757f3fSDimitry Andric           CurBatch =
6395f757f3fSDimitry Andric               reinterpret_cast<TransferBatchT *>(C->getBatchClassBlock());
640bdd1243dSDimitry Andric           CurBatch->clear();
641bdd1243dSDimitry Andric           Batches.push_front(CurBatch);
642bdd1243dSDimitry Andric           UnusedSlots = BG->MaxCachedPerBatch;
643bdd1243dSDimitry Andric         }
644bdd1243dSDimitry Andric         // `UnusedSlots` is u16 so the result will be also fit in u16.
645bdd1243dSDimitry Andric         u16 AppendSize = static_cast<u16>(Min<u32>(UnusedSlots, Size - I));
646bdd1243dSDimitry Andric         CurBatch->appendFromArray(&Array[I], AppendSize);
647bdd1243dSDimitry Andric         I += AppendSize;
648bdd1243dSDimitry Andric       }
649bdd1243dSDimitry Andric 
650bdd1243dSDimitry Andric       BG->PushedBlocks += Size;
651bdd1243dSDimitry Andric     };
652bdd1243dSDimitry Andric 
65306c3fb27SDimitry Andric     Sci->FreeListInfo.PushedBlocks += Size;
6545f757f3fSDimitry Andric     BatchGroupT *Cur = Sci->FreeListInfo.BlockList.front();
655bdd1243dSDimitry Andric 
656bdd1243dSDimitry Andric     // In the following, `Cur` always points to the BatchGroup for blocks that
657bdd1243dSDimitry Andric     // will be pushed next. `Prev` is the element right before `Cur`.
6585f757f3fSDimitry Andric     BatchGroupT *Prev = nullptr;
659bdd1243dSDimitry Andric 
66006c3fb27SDimitry Andric     while (Cur != nullptr &&
66106c3fb27SDimitry Andric            compactPtrGroupBase(Array[0]) > Cur->CompactPtrGroupBase) {
662bdd1243dSDimitry Andric       Prev = Cur;
663bdd1243dSDimitry Andric       Cur = Cur->Next;
664bdd1243dSDimitry Andric     }
665bdd1243dSDimitry Andric 
66606c3fb27SDimitry Andric     if (Cur == nullptr ||
66706c3fb27SDimitry Andric         compactPtrGroupBase(Array[0]) != Cur->CompactPtrGroupBase) {
66806c3fb27SDimitry Andric       Cur = CreateGroup(compactPtrGroupBase(Array[0]));
669bdd1243dSDimitry Andric       if (Prev == nullptr)
67006c3fb27SDimitry Andric         Sci->FreeListInfo.BlockList.push_front(Cur);
671bdd1243dSDimitry Andric       else
67206c3fb27SDimitry Andric         Sci->FreeListInfo.BlockList.insert(Prev, Cur);
673bdd1243dSDimitry Andric     }
674bdd1243dSDimitry Andric 
675bdd1243dSDimitry Andric     // All the blocks are from the same group, just push without checking group
676bdd1243dSDimitry Andric     // id.
677bdd1243dSDimitry Andric     if (SameGroup) {
67806c3fb27SDimitry Andric       for (u32 I = 0; I < Size; ++I)
67906c3fb27SDimitry Andric         DCHECK_EQ(compactPtrGroupBase(Array[I]), Cur->CompactPtrGroupBase);
68006c3fb27SDimitry Andric 
681bdd1243dSDimitry Andric       InsertBlocks(Cur, Array, Size);
682bdd1243dSDimitry Andric       return;
683bdd1243dSDimitry Andric     }
684bdd1243dSDimitry Andric 
685bdd1243dSDimitry Andric     // The blocks are sorted by group id. Determine the segment of group and
686bdd1243dSDimitry Andric     // push them to their group together.
687bdd1243dSDimitry Andric     u32 Count = 1;
688bdd1243dSDimitry Andric     for (u32 I = 1; I < Size; ++I) {
68906c3fb27SDimitry Andric       if (compactPtrGroupBase(Array[I - 1]) != compactPtrGroupBase(Array[I])) {
69006c3fb27SDimitry Andric         DCHECK_EQ(compactPtrGroupBase(Array[I - 1]), Cur->CompactPtrGroupBase);
691bdd1243dSDimitry Andric         InsertBlocks(Cur, Array + I - Count, Count);
692bdd1243dSDimitry Andric 
69306c3fb27SDimitry Andric         while (Cur != nullptr &&
69406c3fb27SDimitry Andric                compactPtrGroupBase(Array[I]) > Cur->CompactPtrGroupBase) {
695bdd1243dSDimitry Andric           Prev = Cur;
696bdd1243dSDimitry Andric           Cur = Cur->Next;
697bdd1243dSDimitry Andric         }
698bdd1243dSDimitry Andric 
69906c3fb27SDimitry Andric         if (Cur == nullptr ||
70006c3fb27SDimitry Andric             compactPtrGroupBase(Array[I]) != Cur->CompactPtrGroupBase) {
70106c3fb27SDimitry Andric           Cur = CreateGroup(compactPtrGroupBase(Array[I]));
702bdd1243dSDimitry Andric           DCHECK_NE(Prev, nullptr);
70306c3fb27SDimitry Andric           Sci->FreeListInfo.BlockList.insert(Prev, Cur);
704bdd1243dSDimitry Andric         }
705bdd1243dSDimitry Andric 
706bdd1243dSDimitry Andric         Count = 1;
707bdd1243dSDimitry Andric       } else {
708bdd1243dSDimitry Andric         ++Count;
709bdd1243dSDimitry Andric       }
710bdd1243dSDimitry Andric     }
711bdd1243dSDimitry Andric 
712bdd1243dSDimitry Andric     InsertBlocks(Cur, Array + Size - Count, Count);
713bdd1243dSDimitry Andric   }
714bdd1243dSDimitry Andric 
popBlocksImpl(CacheT * C,uptr ClassId,SizeClassInfo * Sci,CompactPtrT * ToArray,const u16 MaxBlockCount)715*0fca6ea1SDimitry Andric   u16 popBlocksImpl(CacheT *C, uptr ClassId, SizeClassInfo *Sci,
716*0fca6ea1SDimitry Andric                     CompactPtrT *ToArray, const u16 MaxBlockCount)
71706c3fb27SDimitry Andric       REQUIRES(Sci->Mutex) {
71806c3fb27SDimitry Andric     if (Sci->FreeListInfo.BlockList.empty())
719*0fca6ea1SDimitry Andric       return 0U;
720bdd1243dSDimitry Andric 
7215f757f3fSDimitry Andric     SinglyLinkedList<TransferBatchT> &Batches =
72206c3fb27SDimitry Andric         Sci->FreeListInfo.BlockList.front()->Batches;
72306c3fb27SDimitry Andric 
72406c3fb27SDimitry Andric     if (Batches.empty()) {
72506c3fb27SDimitry Andric       DCHECK_EQ(ClassId, SizeClassMap::BatchClassId);
7265f757f3fSDimitry Andric       BatchGroupT *BG = Sci->FreeListInfo.BlockList.front();
72706c3fb27SDimitry Andric       Sci->FreeListInfo.BlockList.pop_front();
72806c3fb27SDimitry Andric 
72906c3fb27SDimitry Andric       // Block used by `BatchGroup` is from BatchClassId. Turn the block into
73006c3fb27SDimitry Andric       // `TransferBatch` with single block.
7315f757f3fSDimitry Andric       TransferBatchT *TB = reinterpret_cast<TransferBatchT *>(BG);
732*0fca6ea1SDimitry Andric       ToArray[0] =
733*0fca6ea1SDimitry Andric           compactPtr(SizeClassMap::BatchClassId, reinterpret_cast<uptr>(TB));
73406c3fb27SDimitry Andric       Sci->FreeListInfo.PoppedBlocks += 1;
735*0fca6ea1SDimitry Andric       return 1U;
73606c3fb27SDimitry Andric     }
737bdd1243dSDimitry Andric 
738*0fca6ea1SDimitry Andric     // So far, instead of always filling the blocks to `MaxBlockCount`, we only
739*0fca6ea1SDimitry Andric     // examine single `TransferBatch` to minimize the time spent on the primary
740*0fca6ea1SDimitry Andric     // allocator. Besides, the sizes of `TransferBatch` and
741*0fca6ea1SDimitry Andric     // `CacheT::getMaxCached()` may also impact the time spent on accessing the
742*0fca6ea1SDimitry Andric     // primary allocator.
743*0fca6ea1SDimitry Andric     // TODO(chiahungduan): Evaluate if we want to always prepare `MaxBlockCount`
744*0fca6ea1SDimitry Andric     // blocks and/or adjust the size of `TransferBatch` according to
745*0fca6ea1SDimitry Andric     // `CacheT::getMaxCached()`.
7465f757f3fSDimitry Andric     TransferBatchT *B = Batches.front();
747bdd1243dSDimitry Andric     DCHECK_NE(B, nullptr);
748bdd1243dSDimitry Andric     DCHECK_GT(B->getCount(), 0U);
749bdd1243dSDimitry Andric 
750*0fca6ea1SDimitry Andric     // BachClassId should always take all blocks in the TransferBatch. Read the
751*0fca6ea1SDimitry Andric     // comment in `pushBatchClassBlocks()` for more details.
752*0fca6ea1SDimitry Andric     const u16 PopCount = ClassId == SizeClassMap::BatchClassId
753*0fca6ea1SDimitry Andric                              ? B->getCount()
754*0fca6ea1SDimitry Andric                              : Min(MaxBlockCount, B->getCount());
755*0fca6ea1SDimitry Andric     B->moveNToArray(ToArray, PopCount);
756*0fca6ea1SDimitry Andric 
757*0fca6ea1SDimitry Andric     // TODO(chiahungduan): The deallocation of unused BatchClassId blocks can be
758*0fca6ea1SDimitry Andric     // done without holding `Mutex`.
759*0fca6ea1SDimitry Andric     if (B->empty()) {
760*0fca6ea1SDimitry Andric       Batches.pop_front();
761*0fca6ea1SDimitry Andric       // `TransferBatch` of BatchClassId is self-contained, no need to
762*0fca6ea1SDimitry Andric       // deallocate. Read the comment in `pushBatchClassBlocks()` for more
763*0fca6ea1SDimitry Andric       // details.
764*0fca6ea1SDimitry Andric       if (ClassId != SizeClassMap::BatchClassId)
765*0fca6ea1SDimitry Andric         C->deallocate(SizeClassMap::BatchClassId, B);
766*0fca6ea1SDimitry Andric 
767bdd1243dSDimitry Andric       if (Batches.empty()) {
7685f757f3fSDimitry Andric         BatchGroupT *BG = Sci->FreeListInfo.BlockList.front();
76906c3fb27SDimitry Andric         Sci->FreeListInfo.BlockList.pop_front();
770bdd1243dSDimitry Andric 
771*0fca6ea1SDimitry Andric         // We don't keep BatchGroup with zero blocks to avoid empty-checking
772*0fca6ea1SDimitry Andric         // while allocating. Note that block used for constructing BatchGroup is
773*0fca6ea1SDimitry Andric         // recorded as free blocks in the last element of BatchGroup::Batches.
774*0fca6ea1SDimitry Andric         // Which means, once we pop the last TransferBatch, the block is
775*0fca6ea1SDimitry Andric         // implicitly deallocated.
776bdd1243dSDimitry Andric         if (ClassId != SizeClassMap::BatchClassId)
777bdd1243dSDimitry Andric           C->deallocate(SizeClassMap::BatchClassId, BG);
778bdd1243dSDimitry Andric       }
779*0fca6ea1SDimitry Andric     }
780bdd1243dSDimitry Andric 
781*0fca6ea1SDimitry Andric     Sci->FreeListInfo.PoppedBlocks += PopCount;
782*0fca6ea1SDimitry Andric     return PopCount;
783bdd1243dSDimitry Andric   }
784bdd1243dSDimitry Andric 
populateFreeList(CacheT * C,uptr ClassId,SizeClassInfo * Sci)78506c3fb27SDimitry Andric   NOINLINE bool populateFreeList(CacheT *C, uptr ClassId, SizeClassInfo *Sci)
78606c3fb27SDimitry Andric       REQUIRES(Sci->Mutex) {
7875ffd83dbSDimitry Andric     uptr Region;
7885ffd83dbSDimitry Andric     uptr Offset;
7895ffd83dbSDimitry Andric     // If the size-class currently has a region associated to it, use it. The
7905ffd83dbSDimitry Andric     // newly created blocks will be located after the currently allocated memory
7915ffd83dbSDimitry Andric     // for that region (up to RegionSize). Otherwise, create a new region, where
7925ffd83dbSDimitry Andric     // the new blocks will be carved from the beginning.
7935ffd83dbSDimitry Andric     if (Sci->CurrentRegion) {
7945ffd83dbSDimitry Andric       Region = Sci->CurrentRegion;
7955ffd83dbSDimitry Andric       DCHECK_GT(Sci->CurrentRegionAllocated, 0U);
7965ffd83dbSDimitry Andric       Offset = Sci->CurrentRegionAllocated;
7975ffd83dbSDimitry Andric     } else {
7985ffd83dbSDimitry Andric       DCHECK_EQ(Sci->CurrentRegionAllocated, 0U);
799e8d8bef9SDimitry Andric       Region = allocateRegion(Sci, ClassId);
8000b57cec5SDimitry Andric       if (UNLIKELY(!Region))
801bdd1243dSDimitry Andric         return false;
8020b57cec5SDimitry Andric       C->getStats().add(StatMapped, RegionSize);
8035ffd83dbSDimitry Andric       Sci->CurrentRegion = Region;
8045ffd83dbSDimitry Andric       Offset = 0;
8055ffd83dbSDimitry Andric     }
8065ffd83dbSDimitry Andric 
8070b57cec5SDimitry Andric     const uptr Size = getSizeByClassId(ClassId);
8085f757f3fSDimitry Andric     const u16 MaxCount = CacheT::getMaxCached(Size);
8095ffd83dbSDimitry Andric     DCHECK_GT(MaxCount, 0U);
8105ffd83dbSDimitry Andric     // The maximum number of blocks we should carve in the region is dictated
8115ffd83dbSDimitry Andric     // by the maximum number of batches we want to fill, and the amount of
8125ffd83dbSDimitry Andric     // memory left in the current region (we use the lowest of the two). This
8135ffd83dbSDimitry Andric     // will not be 0 as we ensure that a region can at least hold one block (via
8145ffd83dbSDimitry Andric     // static_assert and at the end of this function).
8155ffd83dbSDimitry Andric     const u32 NumberOfBlocks =
8165ffd83dbSDimitry Andric         Min(MaxNumBatches * MaxCount,
8175ffd83dbSDimitry Andric             static_cast<u32>((RegionSize - Offset) / Size));
8185ffd83dbSDimitry Andric     DCHECK_GT(NumberOfBlocks, 0U);
8195ffd83dbSDimitry Andric 
8205ffd83dbSDimitry Andric     constexpr u32 ShuffleArraySize =
8215f757f3fSDimitry Andric         MaxNumBatches * TransferBatchT::MaxNumCached;
8225ffd83dbSDimitry Andric     // Fill the transfer batches and put them in the size-class freelist. We
8235ffd83dbSDimitry Andric     // need to randomize the blocks for security purposes, so we first fill a
8245ffd83dbSDimitry Andric     // local array that we then shuffle before populating the batches.
825fe6060f1SDimitry Andric     CompactPtrT ShuffleArray[ShuffleArraySize];
826e8d8bef9SDimitry Andric     DCHECK_LE(NumberOfBlocks, ShuffleArraySize);
827e8d8bef9SDimitry Andric 
828e8d8bef9SDimitry Andric     uptr P = Region + Offset;
829e8d8bef9SDimitry Andric     for (u32 I = 0; I < NumberOfBlocks; I++, P += Size)
830fe6060f1SDimitry Andric       ShuffleArray[I] = reinterpret_cast<CompactPtrT>(P);
83106c3fb27SDimitry Andric 
83206c3fb27SDimitry Andric     if (ClassId != SizeClassMap::BatchClassId) {
83306c3fb27SDimitry Andric       u32 N = 1;
83406c3fb27SDimitry Andric       uptr CurGroup = compactPtrGroupBase(ShuffleArray[0]);
83506c3fb27SDimitry Andric       for (u32 I = 1; I < NumberOfBlocks; I++) {
83606c3fb27SDimitry Andric         if (UNLIKELY(compactPtrGroupBase(ShuffleArray[I]) != CurGroup)) {
83706c3fb27SDimitry Andric           shuffle(ShuffleArray + I - N, N, &Sci->RandState);
83806c3fb27SDimitry Andric           pushBlocksImpl(C, ClassId, Sci, ShuffleArray + I - N, N,
83906c3fb27SDimitry Andric                          /*SameGroup=*/true);
84006c3fb27SDimitry Andric           N = 1;
84106c3fb27SDimitry Andric           CurGroup = compactPtrGroupBase(ShuffleArray[I]);
84206c3fb27SDimitry Andric         } else {
84306c3fb27SDimitry Andric           ++N;
844480093f4SDimitry Andric         }
84506c3fb27SDimitry Andric       }
84606c3fb27SDimitry Andric 
84706c3fb27SDimitry Andric       shuffle(ShuffleArray + NumberOfBlocks - N, N, &Sci->RandState);
84806c3fb27SDimitry Andric       pushBlocksImpl(C, ClassId, Sci, &ShuffleArray[NumberOfBlocks - N], N,
84906c3fb27SDimitry Andric                      /*SameGroup=*/true);
85006c3fb27SDimitry Andric     } else {
85106c3fb27SDimitry Andric       pushBatchClassBlocks(Sci, ShuffleArray, NumberOfBlocks);
85206c3fb27SDimitry Andric     }
85306c3fb27SDimitry Andric 
85406c3fb27SDimitry Andric     // Note that `PushedBlocks` and `PoppedBlocks` are supposed to only record
85506c3fb27SDimitry Andric     // the requests from `PushBlocks` and `PopBatch` which are external
85606c3fb27SDimitry Andric     // interfaces. `populateFreeList` is the internal interface so we should set
85706c3fb27SDimitry Andric     // the values back to avoid incorrectly setting the stats.
85806c3fb27SDimitry Andric     Sci->FreeListInfo.PushedBlocks -= NumberOfBlocks;
85968d75effSDimitry Andric 
860e8d8bef9SDimitry Andric     const uptr AllocatedUser = Size * NumberOfBlocks;
86168d75effSDimitry Andric     C->getStats().add(StatFree, AllocatedUser);
8625ffd83dbSDimitry Andric     DCHECK_LE(Sci->CurrentRegionAllocated + AllocatedUser, RegionSize);
8635ffd83dbSDimitry Andric     // If there is not enough room in the region currently associated to fit
8645ffd83dbSDimitry Andric     // more blocks, we deassociate the region by resetting CurrentRegion and
8655ffd83dbSDimitry Andric     // CurrentRegionAllocated. Otherwise, update the allocated amount.
8665ffd83dbSDimitry Andric     if (RegionSize - (Sci->CurrentRegionAllocated + AllocatedUser) < Size) {
8675ffd83dbSDimitry Andric       Sci->CurrentRegion = 0;
8685ffd83dbSDimitry Andric       Sci->CurrentRegionAllocated = 0;
8695ffd83dbSDimitry Andric     } else {
8705ffd83dbSDimitry Andric       Sci->CurrentRegionAllocated += AllocatedUser;
8715ffd83dbSDimitry Andric     }
8720b57cec5SDimitry Andric     Sci->AllocatedUser += AllocatedUser;
8735ffd83dbSDimitry Andric 
874bdd1243dSDimitry Andric     return true;
8750b57cec5SDimitry Andric   }
8760b57cec5SDimitry Andric 
getStats(ScopedString * Str,uptr ClassId,SizeClassInfo * Sci)87706c3fb27SDimitry Andric   void getStats(ScopedString *Str, uptr ClassId, SizeClassInfo *Sci)
87806c3fb27SDimitry Andric       REQUIRES(Sci->Mutex) {
8790b57cec5SDimitry Andric     if (Sci->AllocatedUser == 0)
8800b57cec5SDimitry Andric       return;
88106c3fb27SDimitry Andric     const uptr BlockSize = getSizeByClassId(ClassId);
88206c3fb27SDimitry Andric     const uptr InUse =
88306c3fb27SDimitry Andric         Sci->FreeListInfo.PoppedBlocks - Sci->FreeListInfo.PushedBlocks;
88406c3fb27SDimitry Andric     const uptr BytesInFreeList = Sci->AllocatedUser - InUse * BlockSize;
88506c3fb27SDimitry Andric     uptr PushedBytesDelta = 0;
88606c3fb27SDimitry Andric     if (BytesInFreeList >= Sci->ReleaseInfo.BytesInFreeListAtLastCheckpoint) {
88706c3fb27SDimitry Andric       PushedBytesDelta =
88806c3fb27SDimitry Andric           BytesInFreeList - Sci->ReleaseInfo.BytesInFreeListAtLastCheckpoint;
88906c3fb27SDimitry Andric     }
89006c3fb27SDimitry Andric     const uptr AvailableChunks = Sci->AllocatedUser / BlockSize;
89168d75effSDimitry Andric     Str->append("  %02zu (%6zu): mapped: %6zuK popped: %7zu pushed: %7zu "
89206c3fb27SDimitry Andric                 "inuse: %6zu avail: %6zu releases: %6zu last released: %6zuK "
89306c3fb27SDimitry Andric                 "latest pushed bytes: %6zuK\n",
8940b57cec5SDimitry Andric                 ClassId, getSizeByClassId(ClassId), Sci->AllocatedUser >> 10,
89506c3fb27SDimitry Andric                 Sci->FreeListInfo.PoppedBlocks, Sci->FreeListInfo.PushedBlocks,
89606c3fb27SDimitry Andric                 InUse, AvailableChunks, Sci->ReleaseInfo.RangesReleased,
89706c3fb27SDimitry Andric                 Sci->ReleaseInfo.LastReleasedBytes >> 10,
89806c3fb27SDimitry Andric                 PushedBytesDelta >> 10);
8995ffd83dbSDimitry Andric   }
9005ffd83dbSDimitry Andric 
getSizeClassFragmentationInfo(SizeClassInfo * Sci,uptr ClassId,ScopedString * Str)9015f757f3fSDimitry Andric   void getSizeClassFragmentationInfo(SizeClassInfo *Sci, uptr ClassId,
9025f757f3fSDimitry Andric                                      ScopedString *Str) REQUIRES(Sci->Mutex) {
9035f757f3fSDimitry Andric     const uptr BlockSize = getSizeByClassId(ClassId);
9045f757f3fSDimitry Andric     const uptr First = Sci->MinRegionIndex;
9055f757f3fSDimitry Andric     const uptr Last = Sci->MaxRegionIndex;
9065f757f3fSDimitry Andric     const uptr Base = First * RegionSize;
9075f757f3fSDimitry Andric     const uptr NumberOfRegions = Last - First + 1U;
9085f757f3fSDimitry Andric     auto SkipRegion = [this, First, ClassId](uptr RegionIndex) {
9095f757f3fSDimitry Andric       ScopedLock L(ByteMapMutex);
9105f757f3fSDimitry Andric       return (PossibleRegions[First + RegionIndex] - 1U) != ClassId;
9115f757f3fSDimitry Andric     };
9125f757f3fSDimitry Andric 
9135f757f3fSDimitry Andric     FragmentationRecorder Recorder;
9145f757f3fSDimitry Andric     if (!Sci->FreeListInfo.BlockList.empty()) {
9155f757f3fSDimitry Andric       PageReleaseContext Context =
9165f757f3fSDimitry Andric           markFreeBlocks(Sci, ClassId, BlockSize, Base, NumberOfRegions,
9175f757f3fSDimitry Andric                          ReleaseToOS::ForceAll);
9185f757f3fSDimitry Andric       releaseFreeMemoryToOS(Context, Recorder, SkipRegion);
9195f757f3fSDimitry Andric     }
9205f757f3fSDimitry Andric 
9215f757f3fSDimitry Andric     const uptr PageSize = getPageSizeCached();
9225f757f3fSDimitry Andric     const uptr TotalBlocks = Sci->AllocatedUser / BlockSize;
9235f757f3fSDimitry Andric     const uptr InUseBlocks =
9245f757f3fSDimitry Andric         Sci->FreeListInfo.PoppedBlocks - Sci->FreeListInfo.PushedBlocks;
9255f757f3fSDimitry Andric     uptr AllocatedPagesCount = 0;
9265f757f3fSDimitry Andric     if (TotalBlocks != 0U) {
9275f757f3fSDimitry Andric       for (uptr I = 0; I < NumberOfRegions; ++I) {
9285f757f3fSDimitry Andric         if (SkipRegion(I))
9295f757f3fSDimitry Andric           continue;
9305f757f3fSDimitry Andric         AllocatedPagesCount += RegionSize / PageSize;
9315f757f3fSDimitry Andric       }
9325f757f3fSDimitry Andric 
9335f757f3fSDimitry Andric       DCHECK_NE(AllocatedPagesCount, 0U);
9345f757f3fSDimitry Andric     }
9355f757f3fSDimitry Andric 
9365f757f3fSDimitry Andric     DCHECK_GE(AllocatedPagesCount, Recorder.getReleasedPagesCount());
9375f757f3fSDimitry Andric     const uptr InUsePages =
9385f757f3fSDimitry Andric         AllocatedPagesCount - Recorder.getReleasedPagesCount();
9395f757f3fSDimitry Andric     const uptr InUseBytes = InUsePages * PageSize;
9405f757f3fSDimitry Andric 
9415f757f3fSDimitry Andric     uptr Integral;
9425f757f3fSDimitry Andric     uptr Fractional;
9435f757f3fSDimitry Andric     computePercentage(BlockSize * InUseBlocks, InUsePages * PageSize, &Integral,
9445f757f3fSDimitry Andric                       &Fractional);
9455f757f3fSDimitry Andric     Str->append("  %02zu (%6zu): inuse/total blocks: %6zu/%6zu inuse/total "
9465f757f3fSDimitry Andric                 "pages: %6zu/%6zu inuse bytes: %6zuK util: %3zu.%02zu%%\n",
9475f757f3fSDimitry Andric                 ClassId, BlockSize, InUseBlocks, TotalBlocks, InUsePages,
9485f757f3fSDimitry Andric                 AllocatedPagesCount, InUseBytes >> 10, Integral, Fractional);
9495f757f3fSDimitry Andric   }
9505f757f3fSDimitry Andric 
95168d75effSDimitry Andric   NOINLINE uptr releaseToOSMaybe(SizeClassInfo *Sci, uptr ClassId,
95206c3fb27SDimitry Andric                                  ReleaseToOS ReleaseType = ReleaseToOS::Normal)
95306c3fb27SDimitry Andric       REQUIRES(Sci->Mutex) {
9540b57cec5SDimitry Andric     const uptr BlockSize = getSizeByClassId(ClassId);
9550b57cec5SDimitry Andric 
95606c3fb27SDimitry Andric     DCHECK_GE(Sci->FreeListInfo.PoppedBlocks, Sci->FreeListInfo.PushedBlocks);
95768d75effSDimitry Andric     const uptr BytesInFreeList =
95868d75effSDimitry Andric         Sci->AllocatedUser -
95906c3fb27SDimitry Andric         (Sci->FreeListInfo.PoppedBlocks - Sci->FreeListInfo.PushedBlocks) *
9605ffd83dbSDimitry Andric             BlockSize;
9610b57cec5SDimitry Andric 
96206c3fb27SDimitry Andric     if (UNLIKELY(BytesInFreeList == 0))
96306c3fb27SDimitry Andric       return 0;
96406c3fb27SDimitry Andric 
9655f757f3fSDimitry Andric     // ====================================================================== //
9665f757f3fSDimitry Andric     // 1. Check if we have enough free blocks and if it's worth doing a page
9675f757f3fSDimitry Andric     // release.
9685f757f3fSDimitry Andric     // ====================================================================== //
9695f757f3fSDimitry Andric     if (ReleaseType != ReleaseToOS::ForceAll &&
9705f757f3fSDimitry Andric         !hasChanceToReleasePages(Sci, BlockSize, BytesInFreeList,
9715f757f3fSDimitry Andric                                  ReleaseType)) {
9725f757f3fSDimitry Andric       return 0;
9735f757f3fSDimitry Andric     }
9745f757f3fSDimitry Andric 
9755f757f3fSDimitry Andric     const uptr First = Sci->MinRegionIndex;
9765f757f3fSDimitry Andric     const uptr Last = Sci->MaxRegionIndex;
9775f757f3fSDimitry Andric     DCHECK_NE(Last, 0U);
9785f757f3fSDimitry Andric     DCHECK_LE(First, Last);
9795f757f3fSDimitry Andric     uptr TotalReleasedBytes = 0;
9805f757f3fSDimitry Andric     const uptr Base = First * RegionSize;
9815f757f3fSDimitry Andric     const uptr NumberOfRegions = Last - First + 1U;
9825f757f3fSDimitry Andric 
9835f757f3fSDimitry Andric     // ==================================================================== //
9845f757f3fSDimitry Andric     // 2. Mark the free blocks and we can tell which pages are in-use by
9855f757f3fSDimitry Andric     //    querying `PageReleaseContext`.
9865f757f3fSDimitry Andric     // ==================================================================== //
9875f757f3fSDimitry Andric     PageReleaseContext Context = markFreeBlocks(Sci, ClassId, BlockSize, Base,
9885f757f3fSDimitry Andric                                                 NumberOfRegions, ReleaseType);
9895f757f3fSDimitry Andric     if (!Context.hasBlockMarked())
9905f757f3fSDimitry Andric       return 0;
9915f757f3fSDimitry Andric 
9925f757f3fSDimitry Andric     // ==================================================================== //
9935f757f3fSDimitry Andric     // 3. Release the unused physical pages back to the OS.
9945f757f3fSDimitry Andric     // ==================================================================== //
9955f757f3fSDimitry Andric     ReleaseRecorder Recorder(Base);
9965f757f3fSDimitry Andric     auto SkipRegion = [this, First, ClassId](uptr RegionIndex) {
9975f757f3fSDimitry Andric       ScopedLock L(ByteMapMutex);
9985f757f3fSDimitry Andric       return (PossibleRegions[First + RegionIndex] - 1U) != ClassId;
9995f757f3fSDimitry Andric     };
10005f757f3fSDimitry Andric     releaseFreeMemoryToOS(Context, Recorder, SkipRegion);
10015f757f3fSDimitry Andric 
10025f757f3fSDimitry Andric     if (Recorder.getReleasedRangesCount() > 0) {
10035f757f3fSDimitry Andric       Sci->ReleaseInfo.BytesInFreeListAtLastCheckpoint = BytesInFreeList;
10045f757f3fSDimitry Andric       Sci->ReleaseInfo.RangesReleased += Recorder.getReleasedRangesCount();
10055f757f3fSDimitry Andric       Sci->ReleaseInfo.LastReleasedBytes = Recorder.getReleasedBytes();
10065f757f3fSDimitry Andric       TotalReleasedBytes += Sci->ReleaseInfo.LastReleasedBytes;
10075f757f3fSDimitry Andric     }
10085f757f3fSDimitry Andric     Sci->ReleaseInfo.LastReleaseAtNs = getMonotonicTimeFast();
10095f757f3fSDimitry Andric 
10105f757f3fSDimitry Andric     return TotalReleasedBytes;
10115f757f3fSDimitry Andric   }
10125f757f3fSDimitry Andric 
hasChanceToReleasePages(SizeClassInfo * Sci,uptr BlockSize,uptr BytesInFreeList,ReleaseToOS ReleaseType)10135f757f3fSDimitry Andric   bool hasChanceToReleasePages(SizeClassInfo *Sci, uptr BlockSize,
10145f757f3fSDimitry Andric                                uptr BytesInFreeList, ReleaseToOS ReleaseType)
10155f757f3fSDimitry Andric       REQUIRES(Sci->Mutex) {
10165f757f3fSDimitry Andric     DCHECK_GE(Sci->FreeListInfo.PoppedBlocks, Sci->FreeListInfo.PushedBlocks);
10175f757f3fSDimitry Andric     const uptr PageSize = getPageSizeCached();
10185f757f3fSDimitry Andric 
101906c3fb27SDimitry Andric     if (BytesInFreeList <= Sci->ReleaseInfo.BytesInFreeListAtLastCheckpoint)
102006c3fb27SDimitry Andric       Sci->ReleaseInfo.BytesInFreeListAtLastCheckpoint = BytesInFreeList;
102106c3fb27SDimitry Andric 
102206c3fb27SDimitry Andric     // Always update `BytesInFreeListAtLastCheckpoint` with the smallest value
102306c3fb27SDimitry Andric     // so that we won't underestimate the releasable pages. For example, the
102406c3fb27SDimitry Andric     // following is the region usage,
102506c3fb27SDimitry Andric     //
102606c3fb27SDimitry Andric     //  BytesInFreeListAtLastCheckpoint   AllocatedUser
102706c3fb27SDimitry Andric     //                v                         v
102806c3fb27SDimitry Andric     //  |--------------------------------------->
102906c3fb27SDimitry Andric     //         ^                   ^
103006c3fb27SDimitry Andric     //  BytesInFreeList     ReleaseThreshold
103106c3fb27SDimitry Andric     //
103206c3fb27SDimitry Andric     // In general, if we have collected enough bytes and the amount of free
103306c3fb27SDimitry Andric     // bytes meets the ReleaseThreshold, we will try to do page release. If we
103406c3fb27SDimitry Andric     // don't update `BytesInFreeListAtLastCheckpoint` when the current
103506c3fb27SDimitry Andric     // `BytesInFreeList` is smaller, we may take longer time to wait for enough
103606c3fb27SDimitry Andric     // freed blocks because we miss the bytes between
103706c3fb27SDimitry Andric     // (BytesInFreeListAtLastCheckpoint - BytesInFreeList).
103806c3fb27SDimitry Andric     const uptr PushedBytesDelta =
103906c3fb27SDimitry Andric         BytesInFreeList - Sci->ReleaseInfo.BytesInFreeListAtLastCheckpoint;
10405f757f3fSDimitry Andric     if (PushedBytesDelta < PageSize)
10415f757f3fSDimitry Andric       return false;
104206c3fb27SDimitry Andric 
1043e8d8bef9SDimitry Andric     // Releasing smaller blocks is expensive, so we want to make sure that a
1044e8d8bef9SDimitry Andric     // significant amount of bytes are free, and that there has been a good
1045e8d8bef9SDimitry Andric     // amount of batches pushed to the freelist before attempting to release.
10465f757f3fSDimitry Andric     if (isSmallBlock(BlockSize) && ReleaseType == ReleaseToOS::Normal)
104706c3fb27SDimitry Andric       if (PushedBytesDelta < Sci->AllocatedUser / 16U)
10485f757f3fSDimitry Andric         return false;
1049e8d8bef9SDimitry Andric 
105006c3fb27SDimitry Andric     if (ReleaseType == ReleaseToOS::Normal) {
1051e8d8bef9SDimitry Andric       const s32 IntervalMs = atomic_load_relaxed(&ReleaseToOsIntervalMs);
10520b57cec5SDimitry Andric       if (IntervalMs < 0)
10535f757f3fSDimitry Andric         return false;
105406c3fb27SDimitry Andric 
105506c3fb27SDimitry Andric       // The constant 8 here is selected from profiling some apps and the number
105606c3fb27SDimitry Andric       // of unreleased pages in the large size classes is around 16 pages or
105706c3fb27SDimitry Andric       // more. Choose half of it as a heuristic and which also avoids page
105806c3fb27SDimitry Andric       // release every time for every pushBlocks() attempt by large blocks.
105906c3fb27SDimitry Andric       const bool ByPassReleaseInterval =
106006c3fb27SDimitry Andric           isLargeBlock(BlockSize) && PushedBytesDelta > 8 * PageSize;
106106c3fb27SDimitry Andric       if (!ByPassReleaseInterval) {
106268d75effSDimitry Andric         if (Sci->ReleaseInfo.LastReleaseAtNs +
10635ffd83dbSDimitry Andric                 static_cast<u64>(IntervalMs) * 1000000 >
106406c3fb27SDimitry Andric             getMonotonicTimeFast()) {
106506c3fb27SDimitry Andric           // Memory was returned recently.
10665f757f3fSDimitry Andric           return false;
10670b57cec5SDimitry Andric         }
10680b57cec5SDimitry Andric       }
106906c3fb27SDimitry Andric     } // if (ReleaseType == ReleaseToOS::Normal)
10700b57cec5SDimitry Andric 
10715f757f3fSDimitry Andric     return true;
10725f757f3fSDimitry Andric   }
10735f757f3fSDimitry Andric 
markFreeBlocks(SizeClassInfo * Sci,const uptr ClassId,const uptr BlockSize,const uptr Base,const uptr NumberOfRegions,ReleaseToOS ReleaseType)10745f757f3fSDimitry Andric   PageReleaseContext markFreeBlocks(SizeClassInfo *Sci, const uptr ClassId,
10755f757f3fSDimitry Andric                                     const uptr BlockSize, const uptr Base,
10765f757f3fSDimitry Andric                                     const uptr NumberOfRegions,
10775f757f3fSDimitry Andric                                     ReleaseToOS ReleaseType)
10785f757f3fSDimitry Andric       REQUIRES(Sci->Mutex) {
10795f757f3fSDimitry Andric     const uptr PageSize = getPageSizeCached();
10805f757f3fSDimitry Andric     const uptr GroupSize = (1UL << GroupSizeLog);
108106c3fb27SDimitry Andric     const uptr CurGroupBase =
108206c3fb27SDimitry Andric         compactPtrGroupBase(compactPtr(ClassId, Sci->CurrentRegion));
1083bdd1243dSDimitry Andric 
108406c3fb27SDimitry Andric     PageReleaseContext Context(BlockSize, NumberOfRegions,
108506c3fb27SDimitry Andric                                /*ReleaseSize=*/RegionSize);
1086bdd1243dSDimitry Andric 
1087fe6060f1SDimitry Andric     auto DecompactPtr = [](CompactPtrT CompactPtr) {
1088fe6060f1SDimitry Andric       return reinterpret_cast<uptr>(CompactPtr);
1089fe6060f1SDimitry Andric     };
10905f757f3fSDimitry Andric     for (BatchGroupT &BG : Sci->FreeListInfo.BlockList) {
109106c3fb27SDimitry Andric       const uptr GroupBase = decompactGroupBase(BG.CompactPtrGroupBase);
109206c3fb27SDimitry Andric       // The `GroupSize` may not be divided by `BlockSize`, which means there is
109306c3fb27SDimitry Andric       // an unused space at the end of Region. Exclude that space to avoid
109406c3fb27SDimitry Andric       // unused page map entry.
109506c3fb27SDimitry Andric       uptr AllocatedGroupSize = GroupBase == CurGroupBase
1096bdd1243dSDimitry Andric                                     ? Sci->CurrentRegionAllocated
109706c3fb27SDimitry Andric                                     : roundDownSlow(GroupSize, BlockSize);
1098bdd1243dSDimitry Andric       if (AllocatedGroupSize == 0)
1099bdd1243dSDimitry Andric         continue;
1100bdd1243dSDimitry Andric 
1101bdd1243dSDimitry Andric       // TransferBatches are pushed in front of BG.Batches. The first one may
1102bdd1243dSDimitry Andric       // not have all caches used.
1103bdd1243dSDimitry Andric       const uptr NumBlocks = (BG.Batches.size() - 1) * BG.MaxCachedPerBatch +
1104bdd1243dSDimitry Andric                              BG.Batches.front()->getCount();
1105bdd1243dSDimitry Andric       const uptr BytesInBG = NumBlocks * BlockSize;
110606c3fb27SDimitry Andric 
11075f757f3fSDimitry Andric       if (ReleaseType != ReleaseToOS::ForceAll) {
11085f757f3fSDimitry Andric         if (BytesInBG <= BG.BytesInBGAtLastCheckpoint) {
110906c3fb27SDimitry Andric           BG.BytesInBGAtLastCheckpoint = BytesInBG;
111006c3fb27SDimitry Andric           continue;
111106c3fb27SDimitry Andric         }
11125f757f3fSDimitry Andric 
111306c3fb27SDimitry Andric         const uptr PushedBytesDelta = BytesInBG - BG.BytesInBGAtLastCheckpoint;
11145f757f3fSDimitry Andric         if (PushedBytesDelta < PageSize)
111506c3fb27SDimitry Andric           continue;
111606c3fb27SDimitry Andric 
11175f757f3fSDimitry Andric         // Given the randomness property, we try to release the pages only if
11185f757f3fSDimitry Andric         // the bytes used by free blocks exceed certain proportion of allocated
1119bdd1243dSDimitry Andric         // spaces.
11205f757f3fSDimitry Andric         if (isSmallBlock(BlockSize) && (BytesInBG * 100U) / AllocatedGroupSize <
1121bdd1243dSDimitry Andric                                            (100U - 1U - BlockSize / 16U)) {
1122bdd1243dSDimitry Andric           continue;
1123bdd1243dSDimitry Andric         }
11245f757f3fSDimitry Andric       }
1125bdd1243dSDimitry Andric 
112606c3fb27SDimitry Andric       // TODO: Consider updating this after page release if `ReleaseRecorder`
11275f757f3fSDimitry Andric       // can tell the released bytes in each group.
112806c3fb27SDimitry Andric       BG.BytesInBGAtLastCheckpoint = BytesInBG;
112906c3fb27SDimitry Andric 
113006c3fb27SDimitry Andric       const uptr MaxContainedBlocks = AllocatedGroupSize / BlockSize;
113106c3fb27SDimitry Andric       const uptr RegionIndex = (GroupBase - Base) / RegionSize;
113206c3fb27SDimitry Andric 
113306c3fb27SDimitry Andric       if (NumBlocks == MaxContainedBlocks) {
113406c3fb27SDimitry Andric         for (const auto &It : BG.Batches)
113506c3fb27SDimitry Andric           for (u16 I = 0; I < It.getCount(); ++I)
113606c3fb27SDimitry Andric             DCHECK_EQ(compactPtrGroupBase(It.get(I)), BG.CompactPtrGroupBase);
113706c3fb27SDimitry Andric 
113806c3fb27SDimitry Andric         const uptr To = GroupBase + AllocatedGroupSize;
113906c3fb27SDimitry Andric         Context.markRangeAsAllCounted(GroupBase, To, GroupBase, RegionIndex,
114006c3fb27SDimitry Andric                                       AllocatedGroupSize);
114106c3fb27SDimitry Andric       } else {
114206c3fb27SDimitry Andric         DCHECK_LT(NumBlocks, MaxContainedBlocks);
114306c3fb27SDimitry Andric 
1144bdd1243dSDimitry Andric         // Note that we don't always visit blocks in each BatchGroup so that we
114506c3fb27SDimitry Andric         // may miss the chance of releasing certain pages that cross
114606c3fb27SDimitry Andric         // BatchGroups.
114706c3fb27SDimitry Andric         Context.markFreeBlocksInRegion(BG.Batches, DecompactPtr, GroupBase,
114806c3fb27SDimitry Andric                                        RegionIndex, AllocatedGroupSize,
114906c3fb27SDimitry Andric                                        /*MayContainLastBlockInRegion=*/true);
115006c3fb27SDimitry Andric       }
115106c3fb27SDimitry Andric 
115206c3fb27SDimitry Andric       // We may not be able to do the page release In a rare case that we may
115306c3fb27SDimitry Andric       // fail on PageMap allocation.
115406c3fb27SDimitry Andric       if (UNLIKELY(!Context.hasBlockMarked()))
11555f757f3fSDimitry Andric         break;
1156bdd1243dSDimitry Andric     }
1157bdd1243dSDimitry Andric 
11585f757f3fSDimitry Andric     return Context;
11590b57cec5SDimitry Andric   }
11600b57cec5SDimitry Andric 
1161fe6060f1SDimitry Andric   SizeClassInfo SizeClassInfoArray[NumClasses] = {};
11620b57cec5SDimitry Andric 
116306c3fb27SDimitry Andric   HybridMutex ByteMapMutex;
11645ffd83dbSDimitry Andric   // Track the regions in use, 0 is unused, otherwise store ClassId + 1.
116506c3fb27SDimitry Andric   ByteMap PossibleRegions GUARDED_BY(ByteMapMutex) = {};
1166fe6060f1SDimitry Andric   atomic_s32 ReleaseToOsIntervalMs = {};
11670b57cec5SDimitry Andric   // Unless several threads request regions simultaneously from different size
11680b57cec5SDimitry Andric   // classes, the stash rarely contains more than 1 entry.
11690b57cec5SDimitry Andric   static constexpr uptr MaxStashedRegions = 4;
11700b57cec5SDimitry Andric   HybridMutex RegionsStashMutex;
117106c3fb27SDimitry Andric   uptr NumberOfStashedRegions GUARDED_BY(RegionsStashMutex) = 0;
117206c3fb27SDimitry Andric   uptr RegionsStash[MaxStashedRegions] GUARDED_BY(RegionsStashMutex) = {};
11730b57cec5SDimitry Andric };
11740b57cec5SDimitry Andric 
11750b57cec5SDimitry Andric } // namespace scudo
11760b57cec5SDimitry Andric 
11770b57cec5SDimitry Andric #endif // SCUDO_PRIMARY32_H_
1178