//===-- release.h -----------------------------------------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// #ifndef SCUDO_RELEASE_H_ #define SCUDO_RELEASE_H_ #include "common.h" #include "list.h" #include "mutex.h" namespace scudo { class ReleaseRecorder { public: ReleaseRecorder(uptr BaseAddress, MapPlatformData *Data = nullptr) : BaseAddress(BaseAddress), Data(Data) {} uptr getReleasedRangesCount() const { return ReleasedRangesCount; } uptr getReleasedBytes() const { return ReleasedBytes; } // Releases [From, To) range of pages back to OS. void releasePageRangeToOS(uptr From, uptr To) { const uptr Size = To - From; releasePagesToOS(BaseAddress, From, Size, Data); ReleasedRangesCount++; ReleasedBytes += Size; } private: uptr ReleasedRangesCount = 0; uptr ReleasedBytes = 0; uptr BaseAddress = 0; MapPlatformData *Data = nullptr; }; // A packed array of Counters. Each counter occupies 2^N bits, enough to store // counter's MaxValue. Ctor will try to use a static buffer first, and if that // fails (the buffer is too small or already locked), will allocate the // required Buffer via map(). The caller is expected to check whether the // initialization was successful by checking isAllocated() result. For // performance sake, none of the accessors check the validity of the arguments, // It is assumed that Index is always in [0, N) range and the value is not // incremented past MaxValue. class PackedCounterArray { public: PackedCounterArray(uptr NumCounters, uptr MaxValue) : N(NumCounters) { CHECK_GT(NumCounters, 0); CHECK_GT(MaxValue, 0); constexpr uptr MaxCounterBits = sizeof(*Buffer) * 8UL; // Rounding counter storage size up to the power of two allows for using // bit shifts calculating particular counter's Index and offset. const uptr CounterSizeBits = roundUpToPowerOfTwo(getMostSignificantSetBitIndex(MaxValue) + 1); CHECK_LE(CounterSizeBits, MaxCounterBits); CounterSizeBitsLog = getLog2(CounterSizeBits); CounterMask = ~(static_cast(0)) >> (MaxCounterBits - CounterSizeBits); const uptr PackingRatio = MaxCounterBits >> CounterSizeBitsLog; CHECK_GT(PackingRatio, 0); PackingRatioLog = getLog2(PackingRatio); BitOffsetMask = PackingRatio - 1; BufferSize = (roundUpTo(N, static_cast(1U) << PackingRatioLog) >> PackingRatioLog) * sizeof(*Buffer); if (BufferSize <= StaticBufferSize && Mutex.tryLock()) { Buffer = &StaticBuffer[0]; memset(Buffer, 0, BufferSize); } else { Buffer = reinterpret_cast( map(nullptr, BufferSize, "scudo:counters", MAP_ALLOWNOMEM)); } } ~PackedCounterArray() { if (!isAllocated()) return; if (Buffer == &StaticBuffer[0]) Mutex.unlock(); else unmap(reinterpret_cast(Buffer), BufferSize); } bool isAllocated() const { return !!Buffer; } uptr getCount() const { return N; } uptr get(uptr I) const { DCHECK_LT(I, N); const uptr Index = I >> PackingRatioLog; const uptr BitOffset = (I & BitOffsetMask) << CounterSizeBitsLog; return (Buffer[Index] >> BitOffset) & CounterMask; } void inc(uptr I) const { DCHECK_LT(get(I), CounterMask); const uptr Index = I >> PackingRatioLog; const uptr BitOffset = (I & BitOffsetMask) << CounterSizeBitsLog; DCHECK_LT(BitOffset, SCUDO_WORDSIZE); Buffer[Index] += static_cast(1U) << BitOffset; } void incRange(uptr From, uptr To) const { DCHECK_LE(From, To); const uptr Top = Min(To + 1, N); for (uptr I = From; I < Top; I++) inc(I); } uptr getBufferSize() const { return BufferSize; } private: const uptr N; uptr CounterSizeBitsLog; uptr CounterMask; uptr PackingRatioLog; uptr BitOffsetMask; uptr BufferSize; uptr *Buffer; static HybridMutex Mutex; static const uptr StaticBufferSize = 1024U; static uptr StaticBuffer[StaticBufferSize]; }; template class FreePagesRangeTracker { public: explicit FreePagesRangeTracker(ReleaseRecorderT *Recorder) : Recorder(Recorder), PageSizeLog(getLog2(getPageSizeCached())) {} void processNextPage(bool Freed) { if (Freed) { if (!InRange) { CurrentRangeStatePage = CurrentPage; InRange = true; } } else { closeOpenedRange(); } CurrentPage++; } void finish() { closeOpenedRange(); } private: void closeOpenedRange() { if (InRange) { Recorder->releasePageRangeToOS((CurrentRangeStatePage << PageSizeLog), (CurrentPage << PageSizeLog)); InRange = false; } } ReleaseRecorderT *const Recorder; const uptr PageSizeLog; bool InRange = false; uptr CurrentPage = 0; uptr CurrentRangeStatePage = 0; }; template NOINLINE void releaseFreeMemoryToOS(const IntrusiveList &FreeList, uptr Base, uptr Size, uptr BlockSize, ReleaseRecorderT *Recorder) { const uptr PageSize = getPageSizeCached(); // Figure out the number of chunks per page and whether we can take a fast // path (the number of chunks per page is the same for all pages). uptr FullPagesBlockCountMax; bool SameBlockCountPerPage; if (BlockSize <= PageSize) { if (PageSize % BlockSize == 0) { // Same number of chunks per page, no cross overs. FullPagesBlockCountMax = PageSize / BlockSize; SameBlockCountPerPage = true; } else if (BlockSize % (PageSize % BlockSize) == 0) { // Some chunks are crossing page boundaries, which means that the page // contains one or two partial chunks, but all pages contain the same // number of chunks. FullPagesBlockCountMax = PageSize / BlockSize + 1; SameBlockCountPerPage = true; } else { // Some chunks are crossing page boundaries, which means that the page // contains one or two partial chunks. FullPagesBlockCountMax = PageSize / BlockSize + 2; SameBlockCountPerPage = false; } } else { if (BlockSize % PageSize == 0) { // One chunk covers multiple pages, no cross overs. FullPagesBlockCountMax = 1; SameBlockCountPerPage = true; } else { // One chunk covers multiple pages, Some chunks are crossing page // boundaries. Some pages contain one chunk, some contain two. FullPagesBlockCountMax = 2; SameBlockCountPerPage = false; } } const uptr PagesCount = roundUpTo(Size, PageSize) / PageSize; PackedCounterArray Counters(PagesCount, FullPagesBlockCountMax); if (!Counters.isAllocated()) return; const uptr PageSizeLog = getLog2(PageSize); const uptr RoundedSize = PagesCount << PageSizeLog; // Iterate over free chunks and count how many free chunks affect each // allocated page. if (BlockSize <= PageSize && PageSize % BlockSize == 0) { // Each chunk affects one page only. for (const auto &It : FreeList) { // If dealing with a TransferBatch, the first pointer of the batch will // point to the batch itself, we do not want to mark this for release as // the batch is in use, so skip the first entry. const bool IsTransferBatch = (It.getCount() != 0) && (reinterpret_cast(It.get(0)) == reinterpret_cast(&It)); for (u32 I = IsTransferBatch ? 1 : 0; I < It.getCount(); I++) { const uptr P = reinterpret_cast(It.get(I)) - Base; // This takes care of P < Base and P >= Base + RoundedSize. if (P < RoundedSize) Counters.inc(P >> PageSizeLog); } } for (uptr P = Size; P < RoundedSize; P += BlockSize) Counters.inc(P >> PageSizeLog); } else { // In all other cases chunks might affect more than one page. for (const auto &It : FreeList) { // See TransferBatch comment above. const bool IsTransferBatch = (It.getCount() != 0) && (reinterpret_cast(It.get(0)) == reinterpret_cast(&It)); for (u32 I = IsTransferBatch ? 1 : 0; I < It.getCount(); I++) { const uptr P = reinterpret_cast(It.get(I)) - Base; // This takes care of P < Base and P >= Base + RoundedSize. if (P < RoundedSize) Counters.incRange(P >> PageSizeLog, (P + BlockSize - 1) >> PageSizeLog); } } for (uptr P = Size; P < RoundedSize; P += BlockSize) Counters.incRange(P >> PageSizeLog, (P + BlockSize - 1) >> PageSizeLog); } // Iterate over pages detecting ranges of pages with chunk Counters equal // to the expected number of chunks for the particular page. FreePagesRangeTracker RangeTracker(Recorder); if (SameBlockCountPerPage) { // Fast path, every page has the same number of chunks affecting it. for (uptr I = 0; I < Counters.getCount(); I++) RangeTracker.processNextPage(Counters.get(I) == FullPagesBlockCountMax); } else { // Slow path, go through the pages keeping count how many chunks affect // each page. const uptr Pn = BlockSize < PageSize ? PageSize / BlockSize : 1; const uptr Pnc = Pn * BlockSize; // The idea is to increment the current page pointer by the first chunk // size, middle portion size (the portion of the page covered by chunks // except the first and the last one) and then the last chunk size, adding // up the number of chunks on the current page and checking on every step // whether the page boundary was crossed. uptr PrevPageBoundary = 0; uptr CurrentBoundary = 0; for (uptr I = 0; I < Counters.getCount(); I++) { const uptr PageBoundary = PrevPageBoundary + PageSize; uptr BlocksPerPage = Pn; if (CurrentBoundary < PageBoundary) { if (CurrentBoundary > PrevPageBoundary) BlocksPerPage++; CurrentBoundary += Pnc; if (CurrentBoundary < PageBoundary) { BlocksPerPage++; CurrentBoundary += BlockSize; } } PrevPageBoundary = PageBoundary; RangeTracker.processNextPage(Counters.get(I) == BlocksPerPage); } } RangeTracker.finish(); } } // namespace scudo #endif // SCUDO_RELEASE_H_