xref: /freebsd/contrib/llvm-project/compiler-rt/lib/scudo/standalone/release.h (revision 7ef62cebc2f965b0f640263e179276928885e33d)
1 //===-- release.h -----------------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #ifndef SCUDO_RELEASE_H_
10 #define SCUDO_RELEASE_H_
11 
12 #include "common.h"
13 #include "list.h"
14 #include "mutex.h"
15 
16 namespace scudo {
17 
18 class ReleaseRecorder {
19 public:
20   ReleaseRecorder(uptr Base, MapPlatformData *Data = nullptr)
21       : Base(Base), Data(Data) {}
22 
23   uptr getReleasedRangesCount() const { return ReleasedRangesCount; }
24 
25   uptr getReleasedBytes() const { return ReleasedBytes; }
26 
27   uptr getBase() const { return Base; }
28 
29   // Releases [From, To) range of pages back to OS.
30   void releasePageRangeToOS(uptr From, uptr To) {
31     const uptr Size = To - From;
32     releasePagesToOS(Base, From, Size, Data);
33     ReleasedRangesCount++;
34     ReleasedBytes += Size;
35   }
36 
37 private:
38   uptr ReleasedRangesCount = 0;
39   uptr ReleasedBytes = 0;
40   uptr Base = 0;
41   MapPlatformData *Data = nullptr;
42 };
43 
44 // A Region page map is used to record the usage of pages in the regions. It
45 // implements a packed array of Counters. Each counter occupies 2^N bits, enough
46 // to store counter's MaxValue. Ctor will try to use a static buffer first, and
47 // if that fails (the buffer is too small or already locked), will allocate the
48 // required Buffer via map(). The caller is expected to check whether the
49 // initialization was successful by checking isAllocated() result. For
50 // performance sake, none of the accessors check the validity of the arguments,
51 // It is assumed that Index is always in [0, N) range and the value is not
52 // incremented past MaxValue.
53 class RegionPageMap {
54 public:
55   RegionPageMap()
56       : Regions(0),
57         NumCounters(0),
58         CounterSizeBitsLog(0),
59         CounterMask(0),
60         PackingRatioLog(0),
61         BitOffsetMask(0),
62         SizePerRegion(0),
63         BufferSize(0),
64         Buffer(nullptr) {}
65   RegionPageMap(uptr NumberOfRegions, uptr CountersPerRegion, uptr MaxValue) {
66     reset(NumberOfRegions, CountersPerRegion, MaxValue);
67   }
68   ~RegionPageMap() {
69     if (!isAllocated())
70       return;
71     if (Buffer == &StaticBuffer[0])
72       Mutex.unlock();
73     else
74       unmap(reinterpret_cast<void *>(Buffer),
75             roundUpTo(BufferSize, getPageSizeCached()));
76     Buffer = nullptr;
77   }
78 
79   void reset(uptr NumberOfRegion, uptr CountersPerRegion, uptr MaxValue) {
80     DCHECK_GT(NumberOfRegion, 0);
81     DCHECK_GT(CountersPerRegion, 0);
82     DCHECK_GT(MaxValue, 0);
83 
84     Regions = NumberOfRegion;
85     NumCounters = CountersPerRegion;
86 
87     constexpr uptr MaxCounterBits = sizeof(*Buffer) * 8UL;
88     // Rounding counter storage size up to the power of two allows for using
89     // bit shifts calculating particular counter's Index and offset.
90     const uptr CounterSizeBits =
91         roundUpToPowerOfTwo(getMostSignificantSetBitIndex(MaxValue) + 1);
92     DCHECK_LE(CounterSizeBits, MaxCounterBits);
93     CounterSizeBitsLog = getLog2(CounterSizeBits);
94     CounterMask = ~(static_cast<uptr>(0)) >> (MaxCounterBits - CounterSizeBits);
95 
96     const uptr PackingRatio = MaxCounterBits >> CounterSizeBitsLog;
97     DCHECK_GT(PackingRatio, 0);
98     PackingRatioLog = getLog2(PackingRatio);
99     BitOffsetMask = PackingRatio - 1;
100 
101     SizePerRegion =
102         roundUpTo(NumCounters, static_cast<uptr>(1U) << PackingRatioLog) >>
103         PackingRatioLog;
104     BufferSize = SizePerRegion * sizeof(*Buffer) * Regions;
105     if (BufferSize <= (StaticBufferCount * sizeof(Buffer[0])) &&
106         Mutex.tryLock()) {
107       Buffer = &StaticBuffer[0];
108       memset(Buffer, 0, BufferSize);
109     } else {
110       // When using a heap-based buffer, precommit the pages backing the
111       // Vmar by passing |MAP_PRECOMMIT| flag. This allows an optimization
112       // where page fault exceptions are skipped as the allocated memory
113       // is accessed.
114       const uptr MmapFlags =
115           MAP_ALLOWNOMEM | (SCUDO_FUCHSIA ? MAP_PRECOMMIT : 0);
116       Buffer = reinterpret_cast<uptr *>(
117           map(nullptr, roundUpTo(BufferSize, getPageSizeCached()),
118               "scudo:counters", MmapFlags, &MapData));
119     }
120   }
121 
122   bool isAllocated() const { return !!Buffer; }
123 
124   uptr getCount() const { return NumCounters; }
125 
126   uptr get(uptr Region, uptr I) const {
127     DCHECK_LT(Region, Regions);
128     DCHECK_LT(I, NumCounters);
129     const uptr Index = I >> PackingRatioLog;
130     const uptr BitOffset = (I & BitOffsetMask) << CounterSizeBitsLog;
131     return (Buffer[Region * SizePerRegion + Index] >> BitOffset) & CounterMask;
132   }
133 
134   void inc(uptr Region, uptr I) const {
135     DCHECK_LT(get(Region, I), CounterMask);
136     const uptr Index = I >> PackingRatioLog;
137     const uptr BitOffset = (I & BitOffsetMask) << CounterSizeBitsLog;
138     DCHECK_LT(BitOffset, SCUDO_WORDSIZE);
139     DCHECK_EQ(isAllCounted(Region, I), false);
140     Buffer[Region * SizePerRegion + Index] += static_cast<uptr>(1U)
141                                               << BitOffset;
142   }
143 
144   void incRange(uptr Region, uptr From, uptr To) const {
145     DCHECK_LE(From, To);
146     const uptr Top = Min(To + 1, NumCounters);
147     for (uptr I = From; I < Top; I++)
148       inc(Region, I);
149   }
150 
151   // Set the counter to the max value. Note that the max number of blocks in a
152   // page may vary. To provide an easier way to tell if all the blocks are
153   // counted for different pages, set to the same max value to denote the
154   // all-counted status.
155   void setAsAllCounted(uptr Region, uptr I) const {
156     DCHECK_LE(get(Region, I), CounterMask);
157     const uptr Index = I >> PackingRatioLog;
158     const uptr BitOffset = (I & BitOffsetMask) << CounterSizeBitsLog;
159     DCHECK_LT(BitOffset, SCUDO_WORDSIZE);
160     Buffer[Region * SizePerRegion + Index] |= CounterMask << BitOffset;
161   }
162   bool isAllCounted(uptr Region, uptr I) const {
163     return get(Region, I) == CounterMask;
164   }
165 
166   uptr getBufferSize() const { return BufferSize; }
167 
168   static const uptr StaticBufferCount = 2048U;
169 
170 private:
171   uptr Regions;
172   uptr NumCounters;
173   uptr CounterSizeBitsLog;
174   uptr CounterMask;
175   uptr PackingRatioLog;
176   uptr BitOffsetMask;
177 
178   uptr SizePerRegion;
179   uptr BufferSize;
180   uptr *Buffer;
181   [[no_unique_address]] MapPlatformData MapData = {};
182 
183   static HybridMutex Mutex;
184   static uptr StaticBuffer[StaticBufferCount];
185 };
186 
187 template <class ReleaseRecorderT> class FreePagesRangeTracker {
188 public:
189   explicit FreePagesRangeTracker(ReleaseRecorderT &Recorder)
190       : Recorder(Recorder), PageSizeLog(getLog2(getPageSizeCached())) {}
191 
192   void processNextPage(bool Released) {
193     if (Released) {
194       if (!InRange) {
195         CurrentRangeStatePage = CurrentPage;
196         InRange = true;
197       }
198     } else {
199       closeOpenedRange();
200     }
201     CurrentPage++;
202   }
203 
204   void skipPages(uptr N) {
205     closeOpenedRange();
206     CurrentPage += N;
207   }
208 
209   void finish() { closeOpenedRange(); }
210 
211 private:
212   void closeOpenedRange() {
213     if (InRange) {
214       Recorder.releasePageRangeToOS((CurrentRangeStatePage << PageSizeLog),
215                                     (CurrentPage << PageSizeLog));
216       InRange = false;
217     }
218   }
219 
220   ReleaseRecorderT &Recorder;
221   const uptr PageSizeLog;
222   bool InRange = false;
223   uptr CurrentPage = 0;
224   uptr CurrentRangeStatePage = 0;
225 };
226 
227 struct PageReleaseContext {
228   PageReleaseContext(uptr BlockSize, uptr RegionSize, uptr NumberOfRegions) :
229       BlockSize(BlockSize),
230       RegionSize(RegionSize),
231       NumberOfRegions(NumberOfRegions) {
232     PageSize = getPageSizeCached();
233     if (BlockSize <= PageSize) {
234       if (PageSize % BlockSize == 0) {
235         // Same number of chunks per page, no cross overs.
236         FullPagesBlockCountMax = PageSize / BlockSize;
237         SameBlockCountPerPage = true;
238       } else if (BlockSize % (PageSize % BlockSize) == 0) {
239         // Some chunks are crossing page boundaries, which means that the page
240         // contains one or two partial chunks, but all pages contain the same
241         // number of chunks.
242         FullPagesBlockCountMax = PageSize / BlockSize + 1;
243         SameBlockCountPerPage = true;
244       } else {
245         // Some chunks are crossing page boundaries, which means that the page
246         // contains one or two partial chunks.
247         FullPagesBlockCountMax = PageSize / BlockSize + 2;
248         SameBlockCountPerPage = false;
249       }
250     } else {
251       if (BlockSize % PageSize == 0) {
252         // One chunk covers multiple pages, no cross overs.
253         FullPagesBlockCountMax = 1;
254         SameBlockCountPerPage = true;
255       } else {
256         // One chunk covers multiple pages, Some chunks are crossing page
257         // boundaries. Some pages contain one chunk, some contain two.
258         FullPagesBlockCountMax = 2;
259         SameBlockCountPerPage = false;
260       }
261     }
262 
263     PagesCount = roundUpTo(RegionSize, PageSize) / PageSize;
264     PageSizeLog = getLog2(PageSize);
265     RoundedRegionSize = PagesCount << PageSizeLog;
266     RoundedSize = NumberOfRegions * RoundedRegionSize;
267   }
268 
269   // PageMap is lazily allocated when markFreeBlocks() is invoked.
270   bool hasBlockMarked() const {
271     return PageMap.isAllocated();
272   }
273 
274   void ensurePageMapAllocated() {
275     if (PageMap.isAllocated())
276       return;
277     PageMap.reset(NumberOfRegions, PagesCount, FullPagesBlockCountMax);
278     DCHECK(PageMap.isAllocated());
279   }
280 
281   template<class TransferBatchT, typename DecompactPtrT>
282   void markFreeBlocks(const IntrusiveList<TransferBatchT> &FreeList,
283                       DecompactPtrT DecompactPtr, uptr Base) {
284     ensurePageMapAllocated();
285 
286     // Iterate over free chunks and count how many free chunks affect each
287     // allocated page.
288     if (BlockSize <= PageSize && PageSize % BlockSize == 0) {
289       // Each chunk affects one page only.
290       for (const auto &It : FreeList) {
291         for (u16 I = 0; I < It.getCount(); I++) {
292           const uptr P = DecompactPtr(It.get(I)) - Base;
293           if (P >= RoundedSize)
294             continue;
295           const uptr RegionIndex = NumberOfRegions == 1U ? 0 : P / RegionSize;
296           const uptr PInRegion = P - RegionIndex * RegionSize;
297           PageMap.inc(RegionIndex, PInRegion >> PageSizeLog);
298         }
299       }
300     } else {
301       // In all other cases chunks might affect more than one page.
302       DCHECK_GE(RegionSize, BlockSize);
303       const uptr LastBlockInRegion =
304           ((RegionSize / BlockSize) - 1U) * BlockSize;
305       for (const auto &It : FreeList) {
306         for (u16 I = 0; I < It.getCount(); I++) {
307           const uptr P = DecompactPtr(It.get(I)) - Base;
308           if (P >= RoundedSize)
309             continue;
310           const uptr RegionIndex = NumberOfRegions == 1U ? 0 : P / RegionSize;
311           uptr PInRegion = P - RegionIndex * RegionSize;
312           PageMap.incRange(RegionIndex, PInRegion >> PageSizeLog,
313                             (PInRegion + BlockSize - 1) >> PageSizeLog);
314           // The last block in a region might straddle a page, so if it's
315           // free, we mark the following "pretend" memory block(s) as free.
316           if (PInRegion == LastBlockInRegion) {
317             PInRegion += BlockSize;
318             while (PInRegion < RoundedRegionSize) {
319               PageMap.incRange(RegionIndex, PInRegion >> PageSizeLog,
320                                 (PInRegion + BlockSize - 1) >> PageSizeLog);
321               PInRegion += BlockSize;
322             }
323           }
324         }
325       }
326     }
327   }
328 
329   uptr BlockSize;
330   uptr RegionSize;
331   uptr NumberOfRegions;
332   uptr PageSize;
333   uptr PagesCount;
334   uptr PageSizeLog;
335   uptr RoundedRegionSize;
336   uptr RoundedSize;
337   uptr FullPagesBlockCountMax;
338   bool SameBlockCountPerPage;
339   RegionPageMap PageMap;
340 };
341 
342 // Try to release the page which doesn't have any in-used block, i.e., they are
343 // all free blocks. The `PageMap` will record the number of free blocks in each
344 // page.
345 template <class ReleaseRecorderT, typename SkipRegionT>
346 NOINLINE void
347 releaseFreeMemoryToOS(PageReleaseContext &Context,
348                       ReleaseRecorderT &Recorder, SkipRegionT SkipRegion) {
349   const uptr PageSize = Context.PageSize;
350   const uptr BlockSize = Context.BlockSize;
351   const uptr PagesCount = Context.PagesCount;
352   const uptr NumberOfRegions = Context.NumberOfRegions;
353   const uptr FullPagesBlockCountMax = Context.FullPagesBlockCountMax;
354   const bool SameBlockCountPerPage = Context.SameBlockCountPerPage;
355   RegionPageMap &PageMap = Context.PageMap;
356 
357   // Iterate over pages detecting ranges of pages with chunk Counters equal
358   // to the expected number of chunks for the particular page.
359   FreePagesRangeTracker<ReleaseRecorderT> RangeTracker(Recorder);
360   if (SameBlockCountPerPage) {
361     // Fast path, every page has the same number of chunks affecting it.
362     for (uptr I = 0; I < NumberOfRegions; I++) {
363       if (SkipRegion(I)) {
364         RangeTracker.skipPages(PagesCount);
365         continue;
366       }
367       for (uptr J = 0; J < PagesCount; J++) {
368         const bool CanRelease = PageMap.get(I, J) == FullPagesBlockCountMax;
369         if (CanRelease)
370           PageMap.setAsAllCounted(I, J);
371         RangeTracker.processNextPage(CanRelease);
372       }
373     }
374   } else {
375     // Slow path, go through the pages keeping count how many chunks affect
376     // each page.
377     const uptr Pn = BlockSize < PageSize ? PageSize / BlockSize : 1;
378     const uptr Pnc = Pn * BlockSize;
379     // The idea is to increment the current page pointer by the first chunk
380     // size, middle portion size (the portion of the page covered by chunks
381     // except the first and the last one) and then the last chunk size, adding
382     // up the number of chunks on the current page and checking on every step
383     // whether the page boundary was crossed.
384     for (uptr I = 0; I < NumberOfRegions; I++) {
385       if (SkipRegion(I)) {
386         RangeTracker.skipPages(PagesCount);
387         continue;
388       }
389       uptr PrevPageBoundary = 0;
390       uptr CurrentBoundary = 0;
391       for (uptr J = 0; J < PagesCount; J++) {
392         const uptr PageBoundary = PrevPageBoundary + PageSize;
393         uptr BlocksPerPage = Pn;
394         if (CurrentBoundary < PageBoundary) {
395           if (CurrentBoundary > PrevPageBoundary)
396             BlocksPerPage++;
397           CurrentBoundary += Pnc;
398           if (CurrentBoundary < PageBoundary) {
399             BlocksPerPage++;
400             CurrentBoundary += BlockSize;
401           }
402         }
403         PrevPageBoundary = PageBoundary;
404         const bool CanRelease = PageMap.get(I, J) == BlocksPerPage;
405         if (CanRelease)
406           PageMap.setAsAllCounted(I, J);
407         RangeTracker.processNextPage(CanRelease);
408       }
409     }
410   }
411   RangeTracker.finish();
412 }
413 
414 // An overload releaseFreeMemoryToOS which doesn't require the page usage
415 // information after releasing.
416 template <class TransferBatchT, class ReleaseRecorderT, typename DecompactPtrT,
417           typename SkipRegionT>
418 NOINLINE void
419 releaseFreeMemoryToOS(const IntrusiveList<TransferBatchT> &FreeList,
420                       uptr RegionSize, uptr NumberOfRegions, uptr BlockSize,
421                       ReleaseRecorderT &Recorder, DecompactPtrT DecompactPtr,
422                       SkipRegionT SkipRegion) {
423   PageReleaseContext Context(BlockSize, RegionSize, NumberOfRegions);
424   Context.markFreeBlocks(FreeList, DecompactPtr, Recorder.getBase());
425   releaseFreeMemoryToOS(Context, Recorder, SkipRegion);
426 }
427 
428 } // namespace scudo
429 
430 #endif // SCUDO_RELEASE_H_
431