10b57cec5SDimitry Andric //===-- release.h -----------------------------------------------*- C++ -*-===// 20b57cec5SDimitry Andric // 30b57cec5SDimitry Andric // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 40b57cec5SDimitry Andric // See https://llvm.org/LICENSE.txt for license information. 50b57cec5SDimitry Andric // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 60b57cec5SDimitry Andric // 70b57cec5SDimitry Andric //===----------------------------------------------------------------------===// 80b57cec5SDimitry Andric 90b57cec5SDimitry Andric #ifndef SCUDO_RELEASE_H_ 100b57cec5SDimitry Andric #define SCUDO_RELEASE_H_ 110b57cec5SDimitry Andric 120b57cec5SDimitry Andric #include "common.h" 130b57cec5SDimitry Andric #include "list.h" 1406c3fb27SDimitry Andric #include "mem_map.h" 155ffd83dbSDimitry Andric #include "mutex.h" 1606c3fb27SDimitry Andric #include "thread_annotations.h" 170b57cec5SDimitry Andric 180b57cec5SDimitry Andric namespace scudo { 190b57cec5SDimitry Andric 2006c3fb27SDimitry Andric template <typename MemMapT> class RegionReleaseRecorder { 2106c3fb27SDimitry Andric public: 2206c3fb27SDimitry Andric RegionReleaseRecorder(MemMapT *RegionMemMap, uptr Base, uptr Offset = 0) 2306c3fb27SDimitry Andric : RegionMemMap(RegionMemMap), Base(Base), Offset(Offset) {} 2406c3fb27SDimitry Andric 2506c3fb27SDimitry Andric uptr getReleasedRangesCount() const { return ReleasedRangesCount; } 2606c3fb27SDimitry Andric 2706c3fb27SDimitry Andric uptr getReleasedBytes() const { return ReleasedBytes; } 2806c3fb27SDimitry Andric 2906c3fb27SDimitry Andric uptr getBase() const { return Base; } 3006c3fb27SDimitry Andric 3106c3fb27SDimitry Andric // Releases [From, To) range of pages back to OS. Note that `From` and `To` 3206c3fb27SDimitry Andric // are offseted from `Base` + Offset. 3306c3fb27SDimitry Andric void releasePageRangeToOS(uptr From, uptr To) { 3406c3fb27SDimitry Andric const uptr Size = To - From; 3506c3fb27SDimitry Andric RegionMemMap->releasePagesToOS(getBase() + Offset + From, Size); 3606c3fb27SDimitry Andric ReleasedRangesCount++; 3706c3fb27SDimitry Andric ReleasedBytes += Size; 3806c3fb27SDimitry Andric } 3906c3fb27SDimitry Andric 4006c3fb27SDimitry Andric private: 4106c3fb27SDimitry Andric uptr ReleasedRangesCount = 0; 4206c3fb27SDimitry Andric uptr ReleasedBytes = 0; 4306c3fb27SDimitry Andric MemMapT *RegionMemMap = nullptr; 4406c3fb27SDimitry Andric uptr Base = 0; 4506c3fb27SDimitry Andric // The release offset from Base. This is used when we know a given range after 4606c3fb27SDimitry Andric // Base will not be released. 4706c3fb27SDimitry Andric uptr Offset = 0; 4806c3fb27SDimitry Andric }; 4906c3fb27SDimitry Andric 500b57cec5SDimitry Andric class ReleaseRecorder { 510b57cec5SDimitry Andric public: 5206c3fb27SDimitry Andric ReleaseRecorder(uptr Base, uptr Offset = 0, MapPlatformData *Data = nullptr) 5306c3fb27SDimitry Andric : Base(Base), Offset(Offset), Data(Data) {} 540b57cec5SDimitry Andric 550b57cec5SDimitry Andric uptr getReleasedRangesCount() const { return ReleasedRangesCount; } 560b57cec5SDimitry Andric 570b57cec5SDimitry Andric uptr getReleasedBytes() const { return ReleasedBytes; } 580b57cec5SDimitry Andric 59fe6060f1SDimitry Andric uptr getBase() const { return Base; } 60fe6060f1SDimitry Andric 610b57cec5SDimitry Andric // Releases [From, To) range of pages back to OS. 620b57cec5SDimitry Andric void releasePageRangeToOS(uptr From, uptr To) { 630b57cec5SDimitry Andric const uptr Size = To - From; 6406c3fb27SDimitry Andric releasePagesToOS(Base, From + Offset, Size, Data); 650b57cec5SDimitry Andric ReleasedRangesCount++; 660b57cec5SDimitry Andric ReleasedBytes += Size; 670b57cec5SDimitry Andric } 680b57cec5SDimitry Andric 690b57cec5SDimitry Andric private: 700b57cec5SDimitry Andric uptr ReleasedRangesCount = 0; 710b57cec5SDimitry Andric uptr ReleasedBytes = 0; 7206c3fb27SDimitry Andric // The starting address to release. Note that we may want to combine (Base + 7306c3fb27SDimitry Andric // Offset) as a new Base. However, the Base is retrieved from 7406c3fb27SDimitry Andric // `MapPlatformData` on Fuchsia, which means the offset won't be aware. 7506c3fb27SDimitry Andric // Therefore, store them separately to make it work on all the platforms. 76fe6060f1SDimitry Andric uptr Base = 0; 7706c3fb27SDimitry Andric // The release offset from Base. This is used when we know a given range after 7806c3fb27SDimitry Andric // Base will not be released. 7906c3fb27SDimitry Andric uptr Offset = 0; 800b57cec5SDimitry Andric MapPlatformData *Data = nullptr; 810b57cec5SDimitry Andric }; 820b57cec5SDimitry Andric 83*5f757f3fSDimitry Andric class FragmentationRecorder { 84*5f757f3fSDimitry Andric public: 85*5f757f3fSDimitry Andric FragmentationRecorder() = default; 86*5f757f3fSDimitry Andric 87*5f757f3fSDimitry Andric uptr getReleasedPagesCount() const { return ReleasedPagesCount; } 88*5f757f3fSDimitry Andric 89*5f757f3fSDimitry Andric void releasePageRangeToOS(uptr From, uptr To) { 90*5f757f3fSDimitry Andric DCHECK_EQ((To - From) % getPageSizeCached(), 0U); 91*5f757f3fSDimitry Andric ReleasedPagesCount += (To - From) / getPageSizeCached(); 92*5f757f3fSDimitry Andric } 93*5f757f3fSDimitry Andric 94*5f757f3fSDimitry Andric private: 95*5f757f3fSDimitry Andric uptr ReleasedPagesCount = 0; 96*5f757f3fSDimitry Andric }; 97*5f757f3fSDimitry Andric 98*5f757f3fSDimitry Andric // A buffer pool which holds a fixed number of static buffers of `uptr` elements 99*5f757f3fSDimitry Andric // for fast buffer allocation. If the request size is greater than 100*5f757f3fSDimitry Andric // `StaticBufferNumElements` or if all the static buffers are in use, it'll 10106c3fb27SDimitry Andric // delegate the allocation to map(). 102*5f757f3fSDimitry Andric template <uptr StaticBufferCount, uptr StaticBufferNumElements> 103*5f757f3fSDimitry Andric class BufferPool { 10406c3fb27SDimitry Andric public: 10506c3fb27SDimitry Andric // Preserve 1 bit in the `Mask` so that we don't need to do zero-check while 10606c3fb27SDimitry Andric // extracting the least significant bit from the `Mask`. 10706c3fb27SDimitry Andric static_assert(StaticBufferCount < SCUDO_WORDSIZE, ""); 108*5f757f3fSDimitry Andric static_assert(isAligned(StaticBufferNumElements * sizeof(uptr), 109*5f757f3fSDimitry Andric SCUDO_CACHE_LINE_SIZE), 110*5f757f3fSDimitry Andric ""); 11106c3fb27SDimitry Andric 112*5f757f3fSDimitry Andric struct Buffer { 113*5f757f3fSDimitry Andric // Pointer to the buffer's memory, or nullptr if no buffer was allocated. 114*5f757f3fSDimitry Andric uptr *Data = nullptr; 115*5f757f3fSDimitry Andric 116*5f757f3fSDimitry Andric // The index of the underlying static buffer, or StaticBufferCount if this 117*5f757f3fSDimitry Andric // buffer was dynamically allocated. This value is initially set to a poison 118*5f757f3fSDimitry Andric // value to aid debugging. 119*5f757f3fSDimitry Andric uptr BufferIndex = ~static_cast<uptr>(0); 120*5f757f3fSDimitry Andric 121*5f757f3fSDimitry Andric // Only valid if BufferIndex == StaticBufferCount. 122*5f757f3fSDimitry Andric MemMapT MemMap = {}; 123*5f757f3fSDimitry Andric }; 124*5f757f3fSDimitry Andric 125*5f757f3fSDimitry Andric // Return a zero-initialized buffer which can contain at least the given 126*5f757f3fSDimitry Andric // number of elements, or nullptr on failure. 127*5f757f3fSDimitry Andric Buffer getBuffer(const uptr NumElements) { 128*5f757f3fSDimitry Andric if (UNLIKELY(NumElements > StaticBufferNumElements)) 129*5f757f3fSDimitry Andric return getDynamicBuffer(NumElements); 13006c3fb27SDimitry Andric 13106c3fb27SDimitry Andric uptr index; 13206c3fb27SDimitry Andric { 13306c3fb27SDimitry Andric // TODO: In general, we expect this operation should be fast so the 13406c3fb27SDimitry Andric // waiting thread won't be put into sleep. The HybridMutex does implement 13506c3fb27SDimitry Andric // the busy-waiting but we may want to review the performance and see if 13606c3fb27SDimitry Andric // we need an explict spin lock here. 13706c3fb27SDimitry Andric ScopedLock L(Mutex); 13806c3fb27SDimitry Andric index = getLeastSignificantSetBitIndex(Mask); 13906c3fb27SDimitry Andric if (index < StaticBufferCount) 14006c3fb27SDimitry Andric Mask ^= static_cast<uptr>(1) << index; 14106c3fb27SDimitry Andric } 14206c3fb27SDimitry Andric 14306c3fb27SDimitry Andric if (index >= StaticBufferCount) 144*5f757f3fSDimitry Andric return getDynamicBuffer(NumElements); 14506c3fb27SDimitry Andric 146*5f757f3fSDimitry Andric Buffer Buf; 147*5f757f3fSDimitry Andric Buf.Data = &RawBuffer[index * StaticBufferNumElements]; 148*5f757f3fSDimitry Andric Buf.BufferIndex = index; 149*5f757f3fSDimitry Andric memset(Buf.Data, 0, StaticBufferNumElements * sizeof(uptr)); 150*5f757f3fSDimitry Andric return Buf; 15106c3fb27SDimitry Andric } 15206c3fb27SDimitry Andric 153*5f757f3fSDimitry Andric void releaseBuffer(Buffer Buf) { 154*5f757f3fSDimitry Andric DCHECK_NE(Buf.Data, nullptr); 155*5f757f3fSDimitry Andric DCHECK_LE(Buf.BufferIndex, StaticBufferCount); 156*5f757f3fSDimitry Andric if (Buf.BufferIndex != StaticBufferCount) { 15706c3fb27SDimitry Andric ScopedLock L(Mutex); 158*5f757f3fSDimitry Andric DCHECK_EQ((Mask & (static_cast<uptr>(1) << Buf.BufferIndex)), 0U); 159*5f757f3fSDimitry Andric Mask |= static_cast<uptr>(1) << Buf.BufferIndex; 16006c3fb27SDimitry Andric } else { 161*5f757f3fSDimitry Andric Buf.MemMap.unmap(Buf.MemMap.getBase(), Buf.MemMap.getCapacity()); 16206c3fb27SDimitry Andric } 16306c3fb27SDimitry Andric } 16406c3fb27SDimitry Andric 165*5f757f3fSDimitry Andric bool isStaticBufferTestOnly(const Buffer &Buf) { 166*5f757f3fSDimitry Andric DCHECK_NE(Buf.Data, nullptr); 167*5f757f3fSDimitry Andric DCHECK_LE(Buf.BufferIndex, StaticBufferCount); 168*5f757f3fSDimitry Andric return Buf.BufferIndex != StaticBufferCount; 16906c3fb27SDimitry Andric } 17006c3fb27SDimitry Andric 17106c3fb27SDimitry Andric private: 172*5f757f3fSDimitry Andric Buffer getDynamicBuffer(const uptr NumElements) { 17306c3fb27SDimitry Andric // When using a heap-based buffer, precommit the pages backing the 17406c3fb27SDimitry Andric // Vmar by passing |MAP_PRECOMMIT| flag. This allows an optimization 17506c3fb27SDimitry Andric // where page fault exceptions are skipped as the allocated memory 17606c3fb27SDimitry Andric // is accessed. So far, this is only enabled on Fuchsia. It hasn't proven a 17706c3fb27SDimitry Andric // performance benefit on other platforms. 17806c3fb27SDimitry Andric const uptr MmapFlags = MAP_ALLOWNOMEM | (SCUDO_FUCHSIA ? MAP_PRECOMMIT : 0); 179*5f757f3fSDimitry Andric const uptr MappedSize = 180*5f757f3fSDimitry Andric roundUp(NumElements * sizeof(uptr), getPageSizeCached()); 181*5f757f3fSDimitry Andric Buffer Buf; 182*5f757f3fSDimitry Andric if (Buf.MemMap.map(/*Addr=*/0, MappedSize, "scudo:counters", MmapFlags)) { 183*5f757f3fSDimitry Andric Buf.Data = reinterpret_cast<uptr *>(Buf.MemMap.getBase()); 184*5f757f3fSDimitry Andric Buf.BufferIndex = StaticBufferCount; 185*5f757f3fSDimitry Andric } 186*5f757f3fSDimitry Andric return Buf; 18706c3fb27SDimitry Andric } 18806c3fb27SDimitry Andric 18906c3fb27SDimitry Andric HybridMutex Mutex; 19006c3fb27SDimitry Andric // '1' means that buffer index is not used. '0' means the buffer is in use. 19106c3fb27SDimitry Andric uptr Mask GUARDED_BY(Mutex) = ~static_cast<uptr>(0); 192*5f757f3fSDimitry Andric uptr RawBuffer[StaticBufferCount * StaticBufferNumElements] GUARDED_BY(Mutex); 19306c3fb27SDimitry Andric }; 19406c3fb27SDimitry Andric 195bdd1243dSDimitry Andric // A Region page map is used to record the usage of pages in the regions. It 196bdd1243dSDimitry Andric // implements a packed array of Counters. Each counter occupies 2^N bits, enough 197bdd1243dSDimitry Andric // to store counter's MaxValue. Ctor will try to use a static buffer first, and 198bdd1243dSDimitry Andric // if that fails (the buffer is too small or already locked), will allocate the 1995ffd83dbSDimitry Andric // required Buffer via map(). The caller is expected to check whether the 2005ffd83dbSDimitry Andric // initialization was successful by checking isAllocated() result. For 2015ffd83dbSDimitry Andric // performance sake, none of the accessors check the validity of the arguments, 2025ffd83dbSDimitry Andric // It is assumed that Index is always in [0, N) range and the value is not 2035ffd83dbSDimitry Andric // incremented past MaxValue. 204bdd1243dSDimitry Andric class RegionPageMap { 2050b57cec5SDimitry Andric public: 206bdd1243dSDimitry Andric RegionPageMap() 207*5f757f3fSDimitry Andric : Regions(0), NumCounters(0), CounterSizeBitsLog(0), CounterMask(0), 208*5f757f3fSDimitry Andric PackingRatioLog(0), BitOffsetMask(0), SizePerRegion(0), 209*5f757f3fSDimitry Andric BufferNumElements(0) {} 210bdd1243dSDimitry Andric RegionPageMap(uptr NumberOfRegions, uptr CountersPerRegion, uptr MaxValue) { 211bdd1243dSDimitry Andric reset(NumberOfRegions, CountersPerRegion, MaxValue); 212bdd1243dSDimitry Andric } 213bdd1243dSDimitry Andric ~RegionPageMap() { 214bdd1243dSDimitry Andric if (!isAllocated()) 215bdd1243dSDimitry Andric return; 216*5f757f3fSDimitry Andric Buffers.releaseBuffer(Buffer); 217*5f757f3fSDimitry Andric Buffer = {}; 218bdd1243dSDimitry Andric } 219bdd1243dSDimitry Andric 22006c3fb27SDimitry Andric // Lock of `StaticBuffer` is acquired conditionally and there's no easy way to 22106c3fb27SDimitry Andric // specify the thread-safety attribute properly in current code structure. 22206c3fb27SDimitry Andric // Besides, it's the only place we may want to check thread safety. Therefore, 22306c3fb27SDimitry Andric // it's fine to bypass the thread-safety analysis now. 224bdd1243dSDimitry Andric void reset(uptr NumberOfRegion, uptr CountersPerRegion, uptr MaxValue) { 225bdd1243dSDimitry Andric DCHECK_GT(NumberOfRegion, 0); 226bdd1243dSDimitry Andric DCHECK_GT(CountersPerRegion, 0); 227e8d8bef9SDimitry Andric DCHECK_GT(MaxValue, 0); 228bdd1243dSDimitry Andric 229bdd1243dSDimitry Andric Regions = NumberOfRegion; 230bdd1243dSDimitry Andric NumCounters = CountersPerRegion; 231bdd1243dSDimitry Andric 232*5f757f3fSDimitry Andric constexpr uptr MaxCounterBits = sizeof(*Buffer.Data) * 8UL; 2330b57cec5SDimitry Andric // Rounding counter storage size up to the power of two allows for using 2340b57cec5SDimitry Andric // bit shifts calculating particular counter's Index and offset. 2350b57cec5SDimitry Andric const uptr CounterSizeBits = 23606c3fb27SDimitry Andric roundUpPowerOfTwo(getMostSignificantSetBitIndex(MaxValue) + 1); 237e8d8bef9SDimitry Andric DCHECK_LE(CounterSizeBits, MaxCounterBits); 2380b57cec5SDimitry Andric CounterSizeBitsLog = getLog2(CounterSizeBits); 2390b57cec5SDimitry Andric CounterMask = ~(static_cast<uptr>(0)) >> (MaxCounterBits - CounterSizeBits); 2400b57cec5SDimitry Andric 2410b57cec5SDimitry Andric const uptr PackingRatio = MaxCounterBits >> CounterSizeBitsLog; 242e8d8bef9SDimitry Andric DCHECK_GT(PackingRatio, 0); 2430b57cec5SDimitry Andric PackingRatioLog = getLog2(PackingRatio); 2440b57cec5SDimitry Andric BitOffsetMask = PackingRatio - 1; 2450b57cec5SDimitry Andric 246e8d8bef9SDimitry Andric SizePerRegion = 24706c3fb27SDimitry Andric roundUp(NumCounters, static_cast<uptr>(1U) << PackingRatioLog) >> 248e8d8bef9SDimitry Andric PackingRatioLog; 249*5f757f3fSDimitry Andric BufferNumElements = SizePerRegion * Regions; 250*5f757f3fSDimitry Andric Buffer = Buffers.getBuffer(BufferNumElements); 2515ffd83dbSDimitry Andric } 2520b57cec5SDimitry Andric 253*5f757f3fSDimitry Andric bool isAllocated() const { return Buffer.Data != nullptr; } 2540b57cec5SDimitry Andric 255e8d8bef9SDimitry Andric uptr getCount() const { return NumCounters; } 2560b57cec5SDimitry Andric 257e8d8bef9SDimitry Andric uptr get(uptr Region, uptr I) const { 258e8d8bef9SDimitry Andric DCHECK_LT(Region, Regions); 259e8d8bef9SDimitry Andric DCHECK_LT(I, NumCounters); 2600b57cec5SDimitry Andric const uptr Index = I >> PackingRatioLog; 2610b57cec5SDimitry Andric const uptr BitOffset = (I & BitOffsetMask) << CounterSizeBitsLog; 262*5f757f3fSDimitry Andric return (Buffer.Data[Region * SizePerRegion + Index] >> BitOffset) & 263*5f757f3fSDimitry Andric CounterMask; 2640b57cec5SDimitry Andric } 2650b57cec5SDimitry Andric 266e8d8bef9SDimitry Andric void inc(uptr Region, uptr I) const { 267e8d8bef9SDimitry Andric DCHECK_LT(get(Region, I), CounterMask); 2680b57cec5SDimitry Andric const uptr Index = I >> PackingRatioLog; 2690b57cec5SDimitry Andric const uptr BitOffset = (I & BitOffsetMask) << CounterSizeBitsLog; 2700b57cec5SDimitry Andric DCHECK_LT(BitOffset, SCUDO_WORDSIZE); 271bdd1243dSDimitry Andric DCHECK_EQ(isAllCounted(Region, I), false); 272*5f757f3fSDimitry Andric Buffer.Data[Region * SizePerRegion + Index] += static_cast<uptr>(1U) 273e8d8bef9SDimitry Andric << BitOffset; 2740b57cec5SDimitry Andric } 2750b57cec5SDimitry Andric 27606c3fb27SDimitry Andric void incN(uptr Region, uptr I, uptr N) const { 27706c3fb27SDimitry Andric DCHECK_GT(N, 0U); 27806c3fb27SDimitry Andric DCHECK_LE(N, CounterMask); 27906c3fb27SDimitry Andric DCHECK_LE(get(Region, I), CounterMask - N); 28006c3fb27SDimitry Andric const uptr Index = I >> PackingRatioLog; 28106c3fb27SDimitry Andric const uptr BitOffset = (I & BitOffsetMask) << CounterSizeBitsLog; 28206c3fb27SDimitry Andric DCHECK_LT(BitOffset, SCUDO_WORDSIZE); 28306c3fb27SDimitry Andric DCHECK_EQ(isAllCounted(Region, I), false); 284*5f757f3fSDimitry Andric Buffer.Data[Region * SizePerRegion + Index] += N << BitOffset; 28506c3fb27SDimitry Andric } 28606c3fb27SDimitry Andric 287e8d8bef9SDimitry Andric void incRange(uptr Region, uptr From, uptr To) const { 2880b57cec5SDimitry Andric DCHECK_LE(From, To); 289e8d8bef9SDimitry Andric const uptr Top = Min(To + 1, NumCounters); 2905ffd83dbSDimitry Andric for (uptr I = From; I < Top; I++) 291e8d8bef9SDimitry Andric inc(Region, I); 2920b57cec5SDimitry Andric } 2930b57cec5SDimitry Andric 294bdd1243dSDimitry Andric // Set the counter to the max value. Note that the max number of blocks in a 295bdd1243dSDimitry Andric // page may vary. To provide an easier way to tell if all the blocks are 296bdd1243dSDimitry Andric // counted for different pages, set to the same max value to denote the 297bdd1243dSDimitry Andric // all-counted status. 298bdd1243dSDimitry Andric void setAsAllCounted(uptr Region, uptr I) const { 299bdd1243dSDimitry Andric DCHECK_LE(get(Region, I), CounterMask); 300bdd1243dSDimitry Andric const uptr Index = I >> PackingRatioLog; 301bdd1243dSDimitry Andric const uptr BitOffset = (I & BitOffsetMask) << CounterSizeBitsLog; 302bdd1243dSDimitry Andric DCHECK_LT(BitOffset, SCUDO_WORDSIZE); 303*5f757f3fSDimitry Andric Buffer.Data[Region * SizePerRegion + Index] |= CounterMask << BitOffset; 304bdd1243dSDimitry Andric } 30506c3fb27SDimitry Andric void setAsAllCountedRange(uptr Region, uptr From, uptr To) const { 30606c3fb27SDimitry Andric DCHECK_LE(From, To); 30706c3fb27SDimitry Andric const uptr Top = Min(To + 1, NumCounters); 30806c3fb27SDimitry Andric for (uptr I = From; I < Top; I++) 30906c3fb27SDimitry Andric setAsAllCounted(Region, I); 31006c3fb27SDimitry Andric } 31106c3fb27SDimitry Andric 31206c3fb27SDimitry Andric bool updateAsAllCountedIf(uptr Region, uptr I, uptr MaxCount) { 31306c3fb27SDimitry Andric const uptr Count = get(Region, I); 31406c3fb27SDimitry Andric if (Count == CounterMask) 31506c3fb27SDimitry Andric return true; 31606c3fb27SDimitry Andric if (Count == MaxCount) { 31706c3fb27SDimitry Andric setAsAllCounted(Region, I); 31806c3fb27SDimitry Andric return true; 31906c3fb27SDimitry Andric } 32006c3fb27SDimitry Andric return false; 32106c3fb27SDimitry Andric } 322bdd1243dSDimitry Andric bool isAllCounted(uptr Region, uptr I) const { 323bdd1243dSDimitry Andric return get(Region, I) == CounterMask; 324bdd1243dSDimitry Andric } 325bdd1243dSDimitry Andric 326*5f757f3fSDimitry Andric uptr getBufferNumElements() const { return BufferNumElements; } 3270b57cec5SDimitry Andric 3280b57cec5SDimitry Andric private: 329*5f757f3fSDimitry Andric // We may consider making this configurable if there are cases which may 330*5f757f3fSDimitry Andric // benefit from this. 331*5f757f3fSDimitry Andric static const uptr StaticBufferCount = 2U; 332*5f757f3fSDimitry Andric static const uptr StaticBufferNumElements = 512U; 333*5f757f3fSDimitry Andric using BufferPoolT = BufferPool<StaticBufferCount, StaticBufferNumElements>; 334*5f757f3fSDimitry Andric static BufferPoolT Buffers; 335*5f757f3fSDimitry Andric 336bdd1243dSDimitry Andric uptr Regions; 337bdd1243dSDimitry Andric uptr NumCounters; 3380b57cec5SDimitry Andric uptr CounterSizeBitsLog; 3390b57cec5SDimitry Andric uptr CounterMask; 3400b57cec5SDimitry Andric uptr PackingRatioLog; 3410b57cec5SDimitry Andric uptr BitOffsetMask; 3420b57cec5SDimitry Andric 343e8d8bef9SDimitry Andric uptr SizePerRegion; 344*5f757f3fSDimitry Andric uptr BufferNumElements; 345*5f757f3fSDimitry Andric BufferPoolT::Buffer Buffer; 3460b57cec5SDimitry Andric }; 3470b57cec5SDimitry Andric 3480b57cec5SDimitry Andric template <class ReleaseRecorderT> class FreePagesRangeTracker { 3490b57cec5SDimitry Andric public: 350bdd1243dSDimitry Andric explicit FreePagesRangeTracker(ReleaseRecorderT &Recorder) 3510b57cec5SDimitry Andric : Recorder(Recorder), PageSizeLog(getLog2(getPageSizeCached())) {} 3520b57cec5SDimitry Andric 353bdd1243dSDimitry Andric void processNextPage(bool Released) { 354bdd1243dSDimitry Andric if (Released) { 3550b57cec5SDimitry Andric if (!InRange) { 3560b57cec5SDimitry Andric CurrentRangeStatePage = CurrentPage; 3570b57cec5SDimitry Andric InRange = true; 3580b57cec5SDimitry Andric } 3590b57cec5SDimitry Andric } else { 3600b57cec5SDimitry Andric closeOpenedRange(); 3610b57cec5SDimitry Andric } 3620b57cec5SDimitry Andric CurrentPage++; 3630b57cec5SDimitry Andric } 3640b57cec5SDimitry Andric 365e8d8bef9SDimitry Andric void skipPages(uptr N) { 366e8d8bef9SDimitry Andric closeOpenedRange(); 367e8d8bef9SDimitry Andric CurrentPage += N; 368e8d8bef9SDimitry Andric } 369e8d8bef9SDimitry Andric 3700b57cec5SDimitry Andric void finish() { closeOpenedRange(); } 3710b57cec5SDimitry Andric 3720b57cec5SDimitry Andric private: 3730b57cec5SDimitry Andric void closeOpenedRange() { 3740b57cec5SDimitry Andric if (InRange) { 375bdd1243dSDimitry Andric Recorder.releasePageRangeToOS((CurrentRangeStatePage << PageSizeLog), 3760b57cec5SDimitry Andric (CurrentPage << PageSizeLog)); 3770b57cec5SDimitry Andric InRange = false; 3780b57cec5SDimitry Andric } 3790b57cec5SDimitry Andric } 3800b57cec5SDimitry Andric 381bdd1243dSDimitry Andric ReleaseRecorderT &Recorder; 3820b57cec5SDimitry Andric const uptr PageSizeLog; 3830b57cec5SDimitry Andric bool InRange = false; 3840b57cec5SDimitry Andric uptr CurrentPage = 0; 3850b57cec5SDimitry Andric uptr CurrentRangeStatePage = 0; 3860b57cec5SDimitry Andric }; 3870b57cec5SDimitry Andric 388bdd1243dSDimitry Andric struct PageReleaseContext { 38906c3fb27SDimitry Andric PageReleaseContext(uptr BlockSize, uptr NumberOfRegions, uptr ReleaseSize, 39006c3fb27SDimitry Andric uptr ReleaseOffset = 0) 39106c3fb27SDimitry Andric : BlockSize(BlockSize), NumberOfRegions(NumberOfRegions) { 392bdd1243dSDimitry Andric PageSize = getPageSizeCached(); 3930b57cec5SDimitry Andric if (BlockSize <= PageSize) { 3940b57cec5SDimitry Andric if (PageSize % BlockSize == 0) { 3950b57cec5SDimitry Andric // Same number of chunks per page, no cross overs. 3960b57cec5SDimitry Andric FullPagesBlockCountMax = PageSize / BlockSize; 3970b57cec5SDimitry Andric SameBlockCountPerPage = true; 3980b57cec5SDimitry Andric } else if (BlockSize % (PageSize % BlockSize) == 0) { 3990b57cec5SDimitry Andric // Some chunks are crossing page boundaries, which means that the page 4000b57cec5SDimitry Andric // contains one or two partial chunks, but all pages contain the same 4010b57cec5SDimitry Andric // number of chunks. 4020b57cec5SDimitry Andric FullPagesBlockCountMax = PageSize / BlockSize + 1; 4030b57cec5SDimitry Andric SameBlockCountPerPage = true; 4040b57cec5SDimitry Andric } else { 4050b57cec5SDimitry Andric // Some chunks are crossing page boundaries, which means that the page 4060b57cec5SDimitry Andric // contains one or two partial chunks. 4070b57cec5SDimitry Andric FullPagesBlockCountMax = PageSize / BlockSize + 2; 4080b57cec5SDimitry Andric SameBlockCountPerPage = false; 4090b57cec5SDimitry Andric } 4100b57cec5SDimitry Andric } else { 4110b57cec5SDimitry Andric if (BlockSize % PageSize == 0) { 4120b57cec5SDimitry Andric // One chunk covers multiple pages, no cross overs. 4130b57cec5SDimitry Andric FullPagesBlockCountMax = 1; 4140b57cec5SDimitry Andric SameBlockCountPerPage = true; 4150b57cec5SDimitry Andric } else { 4160b57cec5SDimitry Andric // One chunk covers multiple pages, Some chunks are crossing page 4170b57cec5SDimitry Andric // boundaries. Some pages contain one chunk, some contain two. 4180b57cec5SDimitry Andric FullPagesBlockCountMax = 2; 4190b57cec5SDimitry Andric SameBlockCountPerPage = false; 4200b57cec5SDimitry Andric } 4210b57cec5SDimitry Andric } 4220b57cec5SDimitry Andric 42306c3fb27SDimitry Andric // TODO: For multiple regions, it's more complicated to support partial 42406c3fb27SDimitry Andric // region marking (which includes the complexity of how to handle the last 42506c3fb27SDimitry Andric // block in a region). We may consider this after markFreeBlocks() accepts 42606c3fb27SDimitry Andric // only free blocks from the same region. 42706c3fb27SDimitry Andric if (NumberOfRegions != 1) 42806c3fb27SDimitry Andric DCHECK_EQ(ReleaseOffset, 0U); 42906c3fb27SDimitry Andric 43006c3fb27SDimitry Andric PagesCount = roundUp(ReleaseSize, PageSize) / PageSize; 431bdd1243dSDimitry Andric PageSizeLog = getLog2(PageSize); 43206c3fb27SDimitry Andric ReleasePageOffset = ReleaseOffset >> PageSizeLog; 433bdd1243dSDimitry Andric } 4340b57cec5SDimitry Andric 435bdd1243dSDimitry Andric // PageMap is lazily allocated when markFreeBlocks() is invoked. 436bdd1243dSDimitry Andric bool hasBlockMarked() const { 437bdd1243dSDimitry Andric return PageMap.isAllocated(); 438bdd1243dSDimitry Andric } 439bdd1243dSDimitry Andric 44006c3fb27SDimitry Andric bool ensurePageMapAllocated() { 441bdd1243dSDimitry Andric if (PageMap.isAllocated()) 44206c3fb27SDimitry Andric return true; 443bdd1243dSDimitry Andric PageMap.reset(NumberOfRegions, PagesCount, FullPagesBlockCountMax); 44406c3fb27SDimitry Andric // TODO: Log some message when we fail on PageMap allocation. 44506c3fb27SDimitry Andric return PageMap.isAllocated(); 44606c3fb27SDimitry Andric } 44706c3fb27SDimitry Andric 44806c3fb27SDimitry Andric // Mark all the blocks in the given range [From, to). Instead of visiting all 44906c3fb27SDimitry Andric // the blocks, we will just mark the page as all counted. Note the `From` and 45006c3fb27SDimitry Andric // `To` has to be page aligned but with one exception, if `To` is equal to the 45106c3fb27SDimitry Andric // RegionSize, it's not necessary to be aligned with page size. 45206c3fb27SDimitry Andric bool markRangeAsAllCounted(uptr From, uptr To, uptr Base, 45306c3fb27SDimitry Andric const uptr RegionIndex, const uptr RegionSize) { 45406c3fb27SDimitry Andric DCHECK_LT(From, To); 45506c3fb27SDimitry Andric DCHECK_LE(To, Base + RegionSize); 45606c3fb27SDimitry Andric DCHECK_EQ(From % PageSize, 0U); 45706c3fb27SDimitry Andric DCHECK_LE(To - From, RegionSize); 45806c3fb27SDimitry Andric 45906c3fb27SDimitry Andric if (!ensurePageMapAllocated()) 46006c3fb27SDimitry Andric return false; 46106c3fb27SDimitry Andric 46206c3fb27SDimitry Andric uptr FromInRegion = From - Base; 46306c3fb27SDimitry Andric uptr ToInRegion = To - Base; 46406c3fb27SDimitry Andric uptr FirstBlockInRange = roundUpSlow(FromInRegion, BlockSize); 46506c3fb27SDimitry Andric 46606c3fb27SDimitry Andric // The straddling block sits across entire range. 46706c3fb27SDimitry Andric if (FirstBlockInRange >= ToInRegion) 46806c3fb27SDimitry Andric return true; 46906c3fb27SDimitry Andric 47006c3fb27SDimitry Andric // First block may not sit at the first pape in the range, move 47106c3fb27SDimitry Andric // `FromInRegion` to the first block page. 47206c3fb27SDimitry Andric FromInRegion = roundDown(FirstBlockInRange, PageSize); 47306c3fb27SDimitry Andric 47406c3fb27SDimitry Andric // When The first block is not aligned to the range boundary, which means 47506c3fb27SDimitry Andric // there is a block sitting acorss `From`, that looks like, 47606c3fb27SDimitry Andric // 47706c3fb27SDimitry Andric // From To 47806c3fb27SDimitry Andric // V V 47906c3fb27SDimitry Andric // +-----------------------------------------------+ 48006c3fb27SDimitry Andric // +-----+-----+-----+-----+ 48106c3fb27SDimitry Andric // | | | | | ... 48206c3fb27SDimitry Andric // +-----+-----+-----+-----+ 48306c3fb27SDimitry Andric // |- first page -||- second page -||- ... 48406c3fb27SDimitry Andric // 48506c3fb27SDimitry Andric // Therefore, we can't just mark the first page as all counted. Instead, we 48606c3fb27SDimitry Andric // increment the number of blocks in the first page in the page map and 48706c3fb27SDimitry Andric // then round up the `From` to the next page. 48806c3fb27SDimitry Andric if (FirstBlockInRange != FromInRegion) { 48906c3fb27SDimitry Andric DCHECK_GT(FromInRegion + PageSize, FirstBlockInRange); 49006c3fb27SDimitry Andric uptr NumBlocksInFirstPage = 49106c3fb27SDimitry Andric (FromInRegion + PageSize - FirstBlockInRange + BlockSize - 1) / 49206c3fb27SDimitry Andric BlockSize; 49306c3fb27SDimitry Andric PageMap.incN(RegionIndex, getPageIndex(FromInRegion), 49406c3fb27SDimitry Andric NumBlocksInFirstPage); 49506c3fb27SDimitry Andric FromInRegion = roundUp(FromInRegion + 1, PageSize); 49606c3fb27SDimitry Andric } 49706c3fb27SDimitry Andric 49806c3fb27SDimitry Andric uptr LastBlockInRange = roundDownSlow(ToInRegion - 1, BlockSize); 49906c3fb27SDimitry Andric 50006c3fb27SDimitry Andric // Note that LastBlockInRange may be smaller than `FromInRegion` at this 50106c3fb27SDimitry Andric // point because it may contain only one block in the range. 50206c3fb27SDimitry Andric 50306c3fb27SDimitry Andric // When the last block sits across `To`, we can't just mark the pages 50406c3fb27SDimitry Andric // occupied by the last block as all counted. Instead, we increment the 50506c3fb27SDimitry Andric // counters of those pages by 1. The exception is that if it's the last 50606c3fb27SDimitry Andric // block in the region, it's fine to mark those pages as all counted. 50706c3fb27SDimitry Andric if (LastBlockInRange + BlockSize != RegionSize) { 50806c3fb27SDimitry Andric DCHECK_EQ(ToInRegion % PageSize, 0U); 50906c3fb27SDimitry Andric // The case below is like, 51006c3fb27SDimitry Andric // 51106c3fb27SDimitry Andric // From To 51206c3fb27SDimitry Andric // V V 51306c3fb27SDimitry Andric // +----------------------------------------+ 51406c3fb27SDimitry Andric // +-----+-----+-----+-----+ 51506c3fb27SDimitry Andric // | | | | | ... 51606c3fb27SDimitry Andric // +-----+-----+-----+-----+ 51706c3fb27SDimitry Andric // ... -||- last page -||- next page -| 51806c3fb27SDimitry Andric // 51906c3fb27SDimitry Andric // The last block is not aligned to `To`, we need to increment the 52006c3fb27SDimitry Andric // counter of `next page` by 1. 52106c3fb27SDimitry Andric if (LastBlockInRange + BlockSize != ToInRegion) { 52206c3fb27SDimitry Andric PageMap.incRange(RegionIndex, getPageIndex(ToInRegion), 52306c3fb27SDimitry Andric getPageIndex(LastBlockInRange + BlockSize - 1)); 52406c3fb27SDimitry Andric } 52506c3fb27SDimitry Andric } else { 52606c3fb27SDimitry Andric ToInRegion = RegionSize; 52706c3fb27SDimitry Andric } 52806c3fb27SDimitry Andric 52906c3fb27SDimitry Andric // After handling the first page and the last block, it's safe to mark any 53006c3fb27SDimitry Andric // page in between the range [From, To). 53106c3fb27SDimitry Andric if (FromInRegion < ToInRegion) { 53206c3fb27SDimitry Andric PageMap.setAsAllCountedRange(RegionIndex, getPageIndex(FromInRegion), 53306c3fb27SDimitry Andric getPageIndex(ToInRegion - 1)); 53406c3fb27SDimitry Andric } 53506c3fb27SDimitry Andric 53606c3fb27SDimitry Andric return true; 537bdd1243dSDimitry Andric } 538bdd1243dSDimitry Andric 539bdd1243dSDimitry Andric template <class TransferBatchT, typename DecompactPtrT> 54006c3fb27SDimitry Andric bool markFreeBlocksInRegion(const IntrusiveList<TransferBatchT> &FreeList, 54106c3fb27SDimitry Andric DecompactPtrT DecompactPtr, const uptr Base, 54206c3fb27SDimitry Andric const uptr RegionIndex, const uptr RegionSize, 54306c3fb27SDimitry Andric bool MayContainLastBlockInRegion) { 54406c3fb27SDimitry Andric if (!ensurePageMapAllocated()) 54506c3fb27SDimitry Andric return false; 54606c3fb27SDimitry Andric 54706c3fb27SDimitry Andric if (MayContainLastBlockInRegion) { 54806c3fb27SDimitry Andric const uptr LastBlockInRegion = 54906c3fb27SDimitry Andric ((RegionSize / BlockSize) - 1U) * BlockSize; 55006c3fb27SDimitry Andric // The last block in a region may not use the entire page, we mark the 55106c3fb27SDimitry Andric // following "pretend" memory block(s) as free in advance. 55206c3fb27SDimitry Andric // 55306c3fb27SDimitry Andric // Region Boundary 55406c3fb27SDimitry Andric // v 55506c3fb27SDimitry Andric // -----+-----------------------+ 55606c3fb27SDimitry Andric // | Last Page | <- Rounded Region Boundary 55706c3fb27SDimitry Andric // -----+-----------------------+ 55806c3fb27SDimitry Andric // |-----||- trailing blocks -| 55906c3fb27SDimitry Andric // ^ 56006c3fb27SDimitry Andric // last block 56106c3fb27SDimitry Andric const uptr RoundedRegionSize = roundUp(RegionSize, PageSize); 56206c3fb27SDimitry Andric const uptr TrailingBlockBase = LastBlockInRegion + BlockSize; 56306c3fb27SDimitry Andric // If the difference between `RoundedRegionSize` and 56406c3fb27SDimitry Andric // `TrailingBlockBase` is larger than a page, that implies the reported 56506c3fb27SDimitry Andric // `RegionSize` may not be accurate. 56606c3fb27SDimitry Andric DCHECK_LT(RoundedRegionSize - TrailingBlockBase, PageSize); 56706c3fb27SDimitry Andric 56806c3fb27SDimitry Andric // Only the last page touched by the last block needs to mark the trailing 56906c3fb27SDimitry Andric // blocks. Note that if the last "pretend" block straddles the boundary, 57006c3fb27SDimitry Andric // we still have to count it in so that the logic of counting the number 57106c3fb27SDimitry Andric // of blocks on a page is consistent. 57206c3fb27SDimitry Andric uptr NumTrailingBlocks = 57306c3fb27SDimitry Andric (roundUpSlow(RoundedRegionSize - TrailingBlockBase, BlockSize) + 57406c3fb27SDimitry Andric BlockSize - 1) / 57506c3fb27SDimitry Andric BlockSize; 57606c3fb27SDimitry Andric if (NumTrailingBlocks > 0) { 57706c3fb27SDimitry Andric PageMap.incN(RegionIndex, getPageIndex(TrailingBlockBase), 57806c3fb27SDimitry Andric NumTrailingBlocks); 57906c3fb27SDimitry Andric } 58006c3fb27SDimitry Andric } 5810b57cec5SDimitry Andric 5820b57cec5SDimitry Andric // Iterate over free chunks and count how many free chunks affect each 5830b57cec5SDimitry Andric // allocated page. 5840b57cec5SDimitry Andric if (BlockSize <= PageSize && PageSize % BlockSize == 0) { 5850b57cec5SDimitry Andric // Each chunk affects one page only. 586480093f4SDimitry Andric for (const auto &It : FreeList) { 587bdd1243dSDimitry Andric for (u16 I = 0; I < It.getCount(); I++) { 58806c3fb27SDimitry Andric const uptr PInRegion = DecompactPtr(It.get(I)) - Base; 58906c3fb27SDimitry Andric DCHECK_LT(PInRegion, RegionSize); 59006c3fb27SDimitry Andric PageMap.inc(RegionIndex, getPageIndex(PInRegion)); 5910b57cec5SDimitry Andric } 5920b57cec5SDimitry Andric } 5930b57cec5SDimitry Andric } else { 5940b57cec5SDimitry Andric // In all other cases chunks might affect more than one page. 595e8d8bef9SDimitry Andric DCHECK_GE(RegionSize, BlockSize); 596480093f4SDimitry Andric for (const auto &It : FreeList) { 597bdd1243dSDimitry Andric for (u16 I = 0; I < It.getCount(); I++) { 59806c3fb27SDimitry Andric const uptr PInRegion = DecompactPtr(It.get(I)) - Base; 59906c3fb27SDimitry Andric PageMap.incRange(RegionIndex, getPageIndex(PInRegion), 60006c3fb27SDimitry Andric getPageIndex(PInRegion + BlockSize - 1)); 601e8d8bef9SDimitry Andric } 6020b57cec5SDimitry Andric } 603bdd1243dSDimitry Andric } 604bdd1243dSDimitry Andric 60506c3fb27SDimitry Andric return true; 60606c3fb27SDimitry Andric } 60706c3fb27SDimitry Andric 60806c3fb27SDimitry Andric uptr getPageIndex(uptr P) { return (P >> PageSizeLog) - ReleasePageOffset; } 60906c3fb27SDimitry Andric uptr getReleaseOffset() { return ReleasePageOffset << PageSizeLog; } 61006c3fb27SDimitry Andric 611bdd1243dSDimitry Andric uptr BlockSize; 612bdd1243dSDimitry Andric uptr NumberOfRegions; 61306c3fb27SDimitry Andric // For partial region marking, some pages in front are not needed to be 61406c3fb27SDimitry Andric // counted. 61506c3fb27SDimitry Andric uptr ReleasePageOffset; 616bdd1243dSDimitry Andric uptr PageSize; 617bdd1243dSDimitry Andric uptr PagesCount; 618bdd1243dSDimitry Andric uptr PageSizeLog; 619bdd1243dSDimitry Andric uptr FullPagesBlockCountMax; 620bdd1243dSDimitry Andric bool SameBlockCountPerPage; 621bdd1243dSDimitry Andric RegionPageMap PageMap; 622bdd1243dSDimitry Andric }; 623bdd1243dSDimitry Andric 624bdd1243dSDimitry Andric // Try to release the page which doesn't have any in-used block, i.e., they are 625bdd1243dSDimitry Andric // all free blocks. The `PageMap` will record the number of free blocks in each 626bdd1243dSDimitry Andric // page. 627bdd1243dSDimitry Andric template <class ReleaseRecorderT, typename SkipRegionT> 628bdd1243dSDimitry Andric NOINLINE void 629bdd1243dSDimitry Andric releaseFreeMemoryToOS(PageReleaseContext &Context, 630bdd1243dSDimitry Andric ReleaseRecorderT &Recorder, SkipRegionT SkipRegion) { 631bdd1243dSDimitry Andric const uptr PageSize = Context.PageSize; 632bdd1243dSDimitry Andric const uptr BlockSize = Context.BlockSize; 633bdd1243dSDimitry Andric const uptr PagesCount = Context.PagesCount; 634bdd1243dSDimitry Andric const uptr NumberOfRegions = Context.NumberOfRegions; 63506c3fb27SDimitry Andric const uptr ReleasePageOffset = Context.ReleasePageOffset; 636bdd1243dSDimitry Andric const uptr FullPagesBlockCountMax = Context.FullPagesBlockCountMax; 637bdd1243dSDimitry Andric const bool SameBlockCountPerPage = Context.SameBlockCountPerPage; 638bdd1243dSDimitry Andric RegionPageMap &PageMap = Context.PageMap; 6390b57cec5SDimitry Andric 6400b57cec5SDimitry Andric // Iterate over pages detecting ranges of pages with chunk Counters equal 6410b57cec5SDimitry Andric // to the expected number of chunks for the particular page. 6420b57cec5SDimitry Andric FreePagesRangeTracker<ReleaseRecorderT> RangeTracker(Recorder); 6430b57cec5SDimitry Andric if (SameBlockCountPerPage) { 6440b57cec5SDimitry Andric // Fast path, every page has the same number of chunks affecting it. 645e8d8bef9SDimitry Andric for (uptr I = 0; I < NumberOfRegions; I++) { 646e8d8bef9SDimitry Andric if (SkipRegion(I)) { 647e8d8bef9SDimitry Andric RangeTracker.skipPages(PagesCount); 648e8d8bef9SDimitry Andric continue; 649e8d8bef9SDimitry Andric } 650bdd1243dSDimitry Andric for (uptr J = 0; J < PagesCount; J++) { 65106c3fb27SDimitry Andric const bool CanRelease = 65206c3fb27SDimitry Andric PageMap.updateAsAllCountedIf(I, J, FullPagesBlockCountMax); 653bdd1243dSDimitry Andric RangeTracker.processNextPage(CanRelease); 654bdd1243dSDimitry Andric } 655e8d8bef9SDimitry Andric } 6560b57cec5SDimitry Andric } else { 6570b57cec5SDimitry Andric // Slow path, go through the pages keeping count how many chunks affect 6580b57cec5SDimitry Andric // each page. 6590b57cec5SDimitry Andric const uptr Pn = BlockSize < PageSize ? PageSize / BlockSize : 1; 6600b57cec5SDimitry Andric const uptr Pnc = Pn * BlockSize; 6610b57cec5SDimitry Andric // The idea is to increment the current page pointer by the first chunk 6620b57cec5SDimitry Andric // size, middle portion size (the portion of the page covered by chunks 6630b57cec5SDimitry Andric // except the first and the last one) and then the last chunk size, adding 6640b57cec5SDimitry Andric // up the number of chunks on the current page and checking on every step 6650b57cec5SDimitry Andric // whether the page boundary was crossed. 666e8d8bef9SDimitry Andric for (uptr I = 0; I < NumberOfRegions; I++) { 667e8d8bef9SDimitry Andric if (SkipRegion(I)) { 668e8d8bef9SDimitry Andric RangeTracker.skipPages(PagesCount); 669e8d8bef9SDimitry Andric continue; 670e8d8bef9SDimitry Andric } 6710b57cec5SDimitry Andric uptr PrevPageBoundary = 0; 6720b57cec5SDimitry Andric uptr CurrentBoundary = 0; 67306c3fb27SDimitry Andric if (ReleasePageOffset > 0) { 67406c3fb27SDimitry Andric PrevPageBoundary = ReleasePageOffset * PageSize; 67506c3fb27SDimitry Andric CurrentBoundary = roundUpSlow(PrevPageBoundary, BlockSize); 67606c3fb27SDimitry Andric } 677e8d8bef9SDimitry Andric for (uptr J = 0; J < PagesCount; J++) { 6780b57cec5SDimitry Andric const uptr PageBoundary = PrevPageBoundary + PageSize; 6790b57cec5SDimitry Andric uptr BlocksPerPage = Pn; 6800b57cec5SDimitry Andric if (CurrentBoundary < PageBoundary) { 6810b57cec5SDimitry Andric if (CurrentBoundary > PrevPageBoundary) 6820b57cec5SDimitry Andric BlocksPerPage++; 6830b57cec5SDimitry Andric CurrentBoundary += Pnc; 6840b57cec5SDimitry Andric if (CurrentBoundary < PageBoundary) { 6850b57cec5SDimitry Andric BlocksPerPage++; 6860b57cec5SDimitry Andric CurrentBoundary += BlockSize; 6870b57cec5SDimitry Andric } 6880b57cec5SDimitry Andric } 6890b57cec5SDimitry Andric PrevPageBoundary = PageBoundary; 69006c3fb27SDimitry Andric const bool CanRelease = 69106c3fb27SDimitry Andric PageMap.updateAsAllCountedIf(I, J, BlocksPerPage); 692bdd1243dSDimitry Andric RangeTracker.processNextPage(CanRelease); 693e8d8bef9SDimitry Andric } 6940b57cec5SDimitry Andric } 6950b57cec5SDimitry Andric } 6960b57cec5SDimitry Andric RangeTracker.finish(); 6970b57cec5SDimitry Andric } 6980b57cec5SDimitry Andric 6990b57cec5SDimitry Andric } // namespace scudo 7000b57cec5SDimitry Andric 7010b57cec5SDimitry Andric #endif // SCUDO_RELEASE_H_ 702