xref: /freebsd/contrib/llvm-project/compiler-rt/lib/scudo/standalone/release.h (revision 9aaf4e3be61fc20a84347b7c2c524256a4b93a43)
1 //===-- release.h -----------------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #ifndef SCUDO_RELEASE_H_
10 #define SCUDO_RELEASE_H_
11 
12 #include "common.h"
13 #include "list.h"
14 #include "mem_map.h"
15 #include "mutex.h"
16 #include "thread_annotations.h"
17 
18 namespace scudo {
19 
20 template <typename MemMapT> class RegionReleaseRecorder {
21 public:
22   RegionReleaseRecorder(MemMapT *RegionMemMap, uptr Base, uptr Offset = 0)
23       : RegionMemMap(RegionMemMap), Base(Base), Offset(Offset) {}
24 
25   uptr getReleasedRangesCount() const { return ReleasedRangesCount; }
26 
27   uptr getReleasedBytes() const { return ReleasedBytes; }
28 
29   uptr getBase() const { return Base; }
30 
31   // Releases [From, To) range of pages back to OS. Note that `From` and `To`
32   // are offseted from `Base` + Offset.
33   void releasePageRangeToOS(uptr From, uptr To) {
34     const uptr Size = To - From;
35     RegionMemMap->releasePagesToOS(getBase() + Offset + From, Size);
36     ReleasedRangesCount++;
37     ReleasedBytes += Size;
38   }
39 
40 private:
41   uptr ReleasedRangesCount = 0;
42   uptr ReleasedBytes = 0;
43   MemMapT *RegionMemMap = nullptr;
44   uptr Base = 0;
45   // The release offset from Base. This is used when we know a given range after
46   // Base will not be released.
47   uptr Offset = 0;
48 };
49 
50 class ReleaseRecorder {
51 public:
52   ReleaseRecorder(uptr Base, uptr Offset = 0, MapPlatformData *Data = nullptr)
53       : Base(Base), Offset(Offset), Data(Data) {}
54 
55   uptr getReleasedRangesCount() const { return ReleasedRangesCount; }
56 
57   uptr getReleasedBytes() const { return ReleasedBytes; }
58 
59   uptr getBase() const { return Base; }
60 
61   // Releases [From, To) range of pages back to OS.
62   void releasePageRangeToOS(uptr From, uptr To) {
63     const uptr Size = To - From;
64     releasePagesToOS(Base, From + Offset, Size, Data);
65     ReleasedRangesCount++;
66     ReleasedBytes += Size;
67   }
68 
69 private:
70   uptr ReleasedRangesCount = 0;
71   uptr ReleasedBytes = 0;
72   // The starting address to release. Note that we may want to combine (Base +
73   // Offset) as a new Base. However, the Base is retrieved from
74   // `MapPlatformData` on Fuchsia, which means the offset won't be aware.
75   // Therefore, store them separately to make it work on all the platforms.
76   uptr Base = 0;
77   // The release offset from Base. This is used when we know a given range after
78   // Base will not be released.
79   uptr Offset = 0;
80   MapPlatformData *Data = nullptr;
81 };
82 
83 // A buffer pool which holds a fixed number of static buffers for fast buffer
84 // allocation. If the request size is greater than `StaticBufferSize`, it'll
85 // delegate the allocation to map().
86 template <uptr StaticBufferCount, uptr StaticBufferSize> class BufferPool {
87 public:
88   // Preserve 1 bit in the `Mask` so that we don't need to do zero-check while
89   // extracting the least significant bit from the `Mask`.
90   static_assert(StaticBufferCount < SCUDO_WORDSIZE, "");
91   static_assert(isAligned(StaticBufferSize, SCUDO_CACHE_LINE_SIZE), "");
92 
93   // Return a buffer which is at least `BufferSize`.
94   uptr *getBuffer(const uptr BufferSize) {
95     if (UNLIKELY(BufferSize > StaticBufferSize))
96       return getDynamicBuffer(BufferSize);
97 
98     uptr index;
99     {
100       // TODO: In general, we expect this operation should be fast so the
101       // waiting thread won't be put into sleep. The HybridMutex does implement
102       // the busy-waiting but we may want to review the performance and see if
103       // we need an explict spin lock here.
104       ScopedLock L(Mutex);
105       index = getLeastSignificantSetBitIndex(Mask);
106       if (index < StaticBufferCount)
107         Mask ^= static_cast<uptr>(1) << index;
108     }
109 
110     if (index >= StaticBufferCount)
111       return getDynamicBuffer(BufferSize);
112 
113     const uptr Offset = index * StaticBufferSize;
114     memset(&RawBuffer[Offset], 0, StaticBufferSize);
115     return &RawBuffer[Offset];
116   }
117 
118   void releaseBuffer(uptr *Buffer, const uptr BufferSize) {
119     const uptr index = getStaticBufferIndex(Buffer, BufferSize);
120     if (index < StaticBufferCount) {
121       ScopedLock L(Mutex);
122       DCHECK_EQ((Mask & (static_cast<uptr>(1) << index)), 0U);
123       Mask |= static_cast<uptr>(1) << index;
124     } else {
125       unmap(reinterpret_cast<void *>(Buffer),
126             roundUp(BufferSize, getPageSizeCached()));
127     }
128   }
129 
130   bool isStaticBufferTestOnly(uptr *Buffer, uptr BufferSize) {
131     return getStaticBufferIndex(Buffer, BufferSize) < StaticBufferCount;
132   }
133 
134 private:
135   uptr getStaticBufferIndex(uptr *Buffer, uptr BufferSize) {
136     if (UNLIKELY(BufferSize > StaticBufferSize))
137       return StaticBufferCount;
138 
139     const uptr BufferBase = reinterpret_cast<uptr>(Buffer);
140     const uptr RawBufferBase = reinterpret_cast<uptr>(RawBuffer);
141 
142     if (BufferBase < RawBufferBase ||
143         BufferBase >= RawBufferBase + sizeof(RawBuffer)) {
144       return StaticBufferCount;
145     }
146 
147     DCHECK_LE(BufferSize, StaticBufferSize);
148     DCHECK_LE(BufferBase + BufferSize, RawBufferBase + sizeof(RawBuffer));
149     DCHECK_EQ((BufferBase - RawBufferBase) % StaticBufferSize, 0U);
150 
151     const uptr index =
152         (BufferBase - RawBufferBase) / (StaticBufferSize * sizeof(uptr));
153     DCHECK_LT(index, StaticBufferCount);
154     return index;
155   }
156 
157   uptr *getDynamicBuffer(const uptr BufferSize) {
158     // When using a heap-based buffer, precommit the pages backing the
159     // Vmar by passing |MAP_PRECOMMIT| flag. This allows an optimization
160     // where page fault exceptions are skipped as the allocated memory
161     // is accessed. So far, this is only enabled on Fuchsia. It hasn't proven a
162     // performance benefit on other platforms.
163     const uptr MmapFlags = MAP_ALLOWNOMEM | (SCUDO_FUCHSIA ? MAP_PRECOMMIT : 0);
164     return reinterpret_cast<uptr *>(
165         map(nullptr, roundUp(BufferSize, getPageSizeCached()), "scudo:counters",
166             MmapFlags, &MapData));
167   }
168 
169   HybridMutex Mutex;
170   // '1' means that buffer index is not used. '0' means the buffer is in use.
171   uptr Mask GUARDED_BY(Mutex) = ~static_cast<uptr>(0);
172   uptr RawBuffer[StaticBufferCount * StaticBufferSize] GUARDED_BY(Mutex);
173   [[no_unique_address]] MapPlatformData MapData = {};
174 };
175 
176 // A Region page map is used to record the usage of pages in the regions. It
177 // implements a packed array of Counters. Each counter occupies 2^N bits, enough
178 // to store counter's MaxValue. Ctor will try to use a static buffer first, and
179 // if that fails (the buffer is too small or already locked), will allocate the
180 // required Buffer via map(). The caller is expected to check whether the
181 // initialization was successful by checking isAllocated() result. For
182 // performance sake, none of the accessors check the validity of the arguments,
183 // It is assumed that Index is always in [0, N) range and the value is not
184 // incremented past MaxValue.
185 class RegionPageMap {
186 public:
187   RegionPageMap()
188       : Regions(0),
189         NumCounters(0),
190         CounterSizeBitsLog(0),
191         CounterMask(0),
192         PackingRatioLog(0),
193         BitOffsetMask(0),
194         SizePerRegion(0),
195         BufferSize(0),
196         Buffer(nullptr) {}
197   RegionPageMap(uptr NumberOfRegions, uptr CountersPerRegion, uptr MaxValue) {
198     reset(NumberOfRegions, CountersPerRegion, MaxValue);
199   }
200   ~RegionPageMap() {
201     if (!isAllocated())
202       return;
203     Buffers.releaseBuffer(Buffer, BufferSize);
204     Buffer = nullptr;
205   }
206 
207   // Lock of `StaticBuffer` is acquired conditionally and there's no easy way to
208   // specify the thread-safety attribute properly in current code structure.
209   // Besides, it's the only place we may want to check thread safety. Therefore,
210   // it's fine to bypass the thread-safety analysis now.
211   void reset(uptr NumberOfRegion, uptr CountersPerRegion, uptr MaxValue) {
212     DCHECK_GT(NumberOfRegion, 0);
213     DCHECK_GT(CountersPerRegion, 0);
214     DCHECK_GT(MaxValue, 0);
215 
216     Regions = NumberOfRegion;
217     NumCounters = CountersPerRegion;
218 
219     constexpr uptr MaxCounterBits = sizeof(*Buffer) * 8UL;
220     // Rounding counter storage size up to the power of two allows for using
221     // bit shifts calculating particular counter's Index and offset.
222     const uptr CounterSizeBits =
223         roundUpPowerOfTwo(getMostSignificantSetBitIndex(MaxValue) + 1);
224     DCHECK_LE(CounterSizeBits, MaxCounterBits);
225     CounterSizeBitsLog = getLog2(CounterSizeBits);
226     CounterMask = ~(static_cast<uptr>(0)) >> (MaxCounterBits - CounterSizeBits);
227 
228     const uptr PackingRatio = MaxCounterBits >> CounterSizeBitsLog;
229     DCHECK_GT(PackingRatio, 0);
230     PackingRatioLog = getLog2(PackingRatio);
231     BitOffsetMask = PackingRatio - 1;
232 
233     SizePerRegion =
234         roundUp(NumCounters, static_cast<uptr>(1U) << PackingRatioLog) >>
235         PackingRatioLog;
236     BufferSize = SizePerRegion * sizeof(*Buffer) * Regions;
237     Buffer = Buffers.getBuffer(BufferSize);
238   }
239 
240   bool isAllocated() const { return !!Buffer; }
241 
242   uptr getCount() const { return NumCounters; }
243 
244   uptr get(uptr Region, uptr I) const {
245     DCHECK_LT(Region, Regions);
246     DCHECK_LT(I, NumCounters);
247     const uptr Index = I >> PackingRatioLog;
248     const uptr BitOffset = (I & BitOffsetMask) << CounterSizeBitsLog;
249     return (Buffer[Region * SizePerRegion + Index] >> BitOffset) & CounterMask;
250   }
251 
252   void inc(uptr Region, uptr I) const {
253     DCHECK_LT(get(Region, I), CounterMask);
254     const uptr Index = I >> PackingRatioLog;
255     const uptr BitOffset = (I & BitOffsetMask) << CounterSizeBitsLog;
256     DCHECK_LT(BitOffset, SCUDO_WORDSIZE);
257     DCHECK_EQ(isAllCounted(Region, I), false);
258     Buffer[Region * SizePerRegion + Index] += static_cast<uptr>(1U)
259                                               << BitOffset;
260   }
261 
262   void incN(uptr Region, uptr I, uptr N) const {
263     DCHECK_GT(N, 0U);
264     DCHECK_LE(N, CounterMask);
265     DCHECK_LE(get(Region, I), CounterMask - N);
266     const uptr Index = I >> PackingRatioLog;
267     const uptr BitOffset = (I & BitOffsetMask) << CounterSizeBitsLog;
268     DCHECK_LT(BitOffset, SCUDO_WORDSIZE);
269     DCHECK_EQ(isAllCounted(Region, I), false);
270     Buffer[Region * SizePerRegion + Index] += N << BitOffset;
271   }
272 
273   void incRange(uptr Region, uptr From, uptr To) const {
274     DCHECK_LE(From, To);
275     const uptr Top = Min(To + 1, NumCounters);
276     for (uptr I = From; I < Top; I++)
277       inc(Region, I);
278   }
279 
280   // Set the counter to the max value. Note that the max number of blocks in a
281   // page may vary. To provide an easier way to tell if all the blocks are
282   // counted for different pages, set to the same max value to denote the
283   // all-counted status.
284   void setAsAllCounted(uptr Region, uptr I) const {
285     DCHECK_LE(get(Region, I), CounterMask);
286     const uptr Index = I >> PackingRatioLog;
287     const uptr BitOffset = (I & BitOffsetMask) << CounterSizeBitsLog;
288     DCHECK_LT(BitOffset, SCUDO_WORDSIZE);
289     Buffer[Region * SizePerRegion + Index] |= CounterMask << BitOffset;
290   }
291   void setAsAllCountedRange(uptr Region, uptr From, uptr To) const {
292     DCHECK_LE(From, To);
293     const uptr Top = Min(To + 1, NumCounters);
294     for (uptr I = From; I < Top; I++)
295       setAsAllCounted(Region, I);
296   }
297 
298   bool updateAsAllCountedIf(uptr Region, uptr I, uptr MaxCount) {
299     const uptr Count = get(Region, I);
300     if (Count == CounterMask)
301       return true;
302     if (Count == MaxCount) {
303       setAsAllCounted(Region, I);
304       return true;
305     }
306     return false;
307   }
308   bool isAllCounted(uptr Region, uptr I) const {
309     return get(Region, I) == CounterMask;
310   }
311 
312   uptr getBufferSize() const { return BufferSize; }
313 
314 private:
315   uptr Regions;
316   uptr NumCounters;
317   uptr CounterSizeBitsLog;
318   uptr CounterMask;
319   uptr PackingRatioLog;
320   uptr BitOffsetMask;
321 
322   uptr SizePerRegion;
323   uptr BufferSize;
324   uptr *Buffer;
325 
326   // We may consider making this configurable if there are cases which may
327   // benefit from this.
328   static const uptr StaticBufferCount = 2U;
329   static const uptr StaticBufferSize = 512U;
330   static BufferPool<StaticBufferCount, StaticBufferSize> Buffers;
331 };
332 
333 template <class ReleaseRecorderT> class FreePagesRangeTracker {
334 public:
335   explicit FreePagesRangeTracker(ReleaseRecorderT &Recorder)
336       : Recorder(Recorder), PageSizeLog(getLog2(getPageSizeCached())) {}
337 
338   void processNextPage(bool Released) {
339     if (Released) {
340       if (!InRange) {
341         CurrentRangeStatePage = CurrentPage;
342         InRange = true;
343       }
344     } else {
345       closeOpenedRange();
346     }
347     CurrentPage++;
348   }
349 
350   void skipPages(uptr N) {
351     closeOpenedRange();
352     CurrentPage += N;
353   }
354 
355   void finish() { closeOpenedRange(); }
356 
357 private:
358   void closeOpenedRange() {
359     if (InRange) {
360       Recorder.releasePageRangeToOS((CurrentRangeStatePage << PageSizeLog),
361                                     (CurrentPage << PageSizeLog));
362       InRange = false;
363     }
364   }
365 
366   ReleaseRecorderT &Recorder;
367   const uptr PageSizeLog;
368   bool InRange = false;
369   uptr CurrentPage = 0;
370   uptr CurrentRangeStatePage = 0;
371 };
372 
373 struct PageReleaseContext {
374   PageReleaseContext(uptr BlockSize, uptr NumberOfRegions, uptr ReleaseSize,
375                      uptr ReleaseOffset = 0)
376       : BlockSize(BlockSize), NumberOfRegions(NumberOfRegions) {
377     PageSize = getPageSizeCached();
378     if (BlockSize <= PageSize) {
379       if (PageSize % BlockSize == 0) {
380         // Same number of chunks per page, no cross overs.
381         FullPagesBlockCountMax = PageSize / BlockSize;
382         SameBlockCountPerPage = true;
383       } else if (BlockSize % (PageSize % BlockSize) == 0) {
384         // Some chunks are crossing page boundaries, which means that the page
385         // contains one or two partial chunks, but all pages contain the same
386         // number of chunks.
387         FullPagesBlockCountMax = PageSize / BlockSize + 1;
388         SameBlockCountPerPage = true;
389       } else {
390         // Some chunks are crossing page boundaries, which means that the page
391         // contains one or two partial chunks.
392         FullPagesBlockCountMax = PageSize / BlockSize + 2;
393         SameBlockCountPerPage = false;
394       }
395     } else {
396       if (BlockSize % PageSize == 0) {
397         // One chunk covers multiple pages, no cross overs.
398         FullPagesBlockCountMax = 1;
399         SameBlockCountPerPage = true;
400       } else {
401         // One chunk covers multiple pages, Some chunks are crossing page
402         // boundaries. Some pages contain one chunk, some contain two.
403         FullPagesBlockCountMax = 2;
404         SameBlockCountPerPage = false;
405       }
406     }
407 
408     // TODO: For multiple regions, it's more complicated to support partial
409     // region marking (which includes the complexity of how to handle the last
410     // block in a region). We may consider this after markFreeBlocks() accepts
411     // only free blocks from the same region.
412     if (NumberOfRegions != 1)
413       DCHECK_EQ(ReleaseOffset, 0U);
414 
415     PagesCount = roundUp(ReleaseSize, PageSize) / PageSize;
416     PageSizeLog = getLog2(PageSize);
417     ReleasePageOffset = ReleaseOffset >> PageSizeLog;
418   }
419 
420   // PageMap is lazily allocated when markFreeBlocks() is invoked.
421   bool hasBlockMarked() const {
422     return PageMap.isAllocated();
423   }
424 
425   bool ensurePageMapAllocated() {
426     if (PageMap.isAllocated())
427       return true;
428     PageMap.reset(NumberOfRegions, PagesCount, FullPagesBlockCountMax);
429     // TODO: Log some message when we fail on PageMap allocation.
430     return PageMap.isAllocated();
431   }
432 
433   // Mark all the blocks in the given range [From, to). Instead of visiting all
434   // the blocks, we will just mark the page as all counted. Note the `From` and
435   // `To` has to be page aligned but with one exception, if `To` is equal to the
436   // RegionSize, it's not necessary to be aligned with page size.
437   bool markRangeAsAllCounted(uptr From, uptr To, uptr Base,
438                              const uptr RegionIndex, const uptr RegionSize) {
439     DCHECK_LT(From, To);
440     DCHECK_LE(To, Base + RegionSize);
441     DCHECK_EQ(From % PageSize, 0U);
442     DCHECK_LE(To - From, RegionSize);
443 
444     if (!ensurePageMapAllocated())
445       return false;
446 
447     uptr FromInRegion = From - Base;
448     uptr ToInRegion = To - Base;
449     uptr FirstBlockInRange = roundUpSlow(FromInRegion, BlockSize);
450 
451     // The straddling block sits across entire range.
452     if (FirstBlockInRange >= ToInRegion)
453       return true;
454 
455     // First block may not sit at the first pape in the range, move
456     // `FromInRegion` to the first block page.
457     FromInRegion = roundDown(FirstBlockInRange, PageSize);
458 
459     // When The first block is not aligned to the range boundary, which means
460     // there is a block sitting acorss `From`, that looks like,
461     //
462     //   From                                             To
463     //     V                                               V
464     //     +-----------------------------------------------+
465     //  +-----+-----+-----+-----+
466     //  |     |     |     |     | ...
467     //  +-----+-----+-----+-----+
468     //     |-    first page     -||-    second page    -||- ...
469     //
470     // Therefore, we can't just mark the first page as all counted. Instead, we
471     // increment the number of blocks in the first page in the page map and
472     // then round up the `From` to the next page.
473     if (FirstBlockInRange != FromInRegion) {
474       DCHECK_GT(FromInRegion + PageSize, FirstBlockInRange);
475       uptr NumBlocksInFirstPage =
476           (FromInRegion + PageSize - FirstBlockInRange + BlockSize - 1) /
477           BlockSize;
478       PageMap.incN(RegionIndex, getPageIndex(FromInRegion),
479                    NumBlocksInFirstPage);
480       FromInRegion = roundUp(FromInRegion + 1, PageSize);
481     }
482 
483     uptr LastBlockInRange = roundDownSlow(ToInRegion - 1, BlockSize);
484 
485     // Note that LastBlockInRange may be smaller than `FromInRegion` at this
486     // point because it may contain only one block in the range.
487 
488     // When the last block sits across `To`, we can't just mark the pages
489     // occupied by the last block as all counted. Instead, we increment the
490     // counters of those pages by 1. The exception is that if it's the last
491     // block in the region, it's fine to mark those pages as all counted.
492     if (LastBlockInRange + BlockSize != RegionSize) {
493       DCHECK_EQ(ToInRegion % PageSize, 0U);
494       // The case below is like,
495       //
496       //   From                                      To
497       //     V                                        V
498       //     +----------------------------------------+
499       //                          +-----+-----+-----+-----+
500       //                          |     |     |     |     | ...
501       //                          +-----+-----+-----+-----+
502       //                    ... -||-    last page    -||-    next page    -|
503       //
504       // The last block is not aligned to `To`, we need to increment the
505       // counter of `next page` by 1.
506       if (LastBlockInRange + BlockSize != ToInRegion) {
507         PageMap.incRange(RegionIndex, getPageIndex(ToInRegion),
508                          getPageIndex(LastBlockInRange + BlockSize - 1));
509       }
510     } else {
511       ToInRegion = RegionSize;
512     }
513 
514     // After handling the first page and the last block, it's safe to mark any
515     // page in between the range [From, To).
516     if (FromInRegion < ToInRegion) {
517       PageMap.setAsAllCountedRange(RegionIndex, getPageIndex(FromInRegion),
518                                    getPageIndex(ToInRegion - 1));
519     }
520 
521     return true;
522   }
523 
524   template <class TransferBatchT, typename DecompactPtrT>
525   bool markFreeBlocksInRegion(const IntrusiveList<TransferBatchT> &FreeList,
526                               DecompactPtrT DecompactPtr, const uptr Base,
527                               const uptr RegionIndex, const uptr RegionSize,
528                               bool MayContainLastBlockInRegion) {
529     if (!ensurePageMapAllocated())
530       return false;
531 
532     if (MayContainLastBlockInRegion) {
533       const uptr LastBlockInRegion =
534           ((RegionSize / BlockSize) - 1U) * BlockSize;
535       // The last block in a region may not use the entire page, we mark the
536       // following "pretend" memory block(s) as free in advance.
537       //
538       //     Region Boundary
539       //         v
540       //  -----+-----------------------+
541       //       |      Last Page        | <- Rounded Region Boundary
542       //  -----+-----------------------+
543       //   |-----||- trailing blocks  -|
544       //      ^
545       //   last block
546       const uptr RoundedRegionSize = roundUp(RegionSize, PageSize);
547       const uptr TrailingBlockBase = LastBlockInRegion + BlockSize;
548       // If the difference between `RoundedRegionSize` and
549       // `TrailingBlockBase` is larger than a page, that implies the reported
550       // `RegionSize` may not be accurate.
551       DCHECK_LT(RoundedRegionSize - TrailingBlockBase, PageSize);
552 
553       // Only the last page touched by the last block needs to mark the trailing
554       // blocks. Note that if the last "pretend" block straddles the boundary,
555       // we still have to count it in so that the logic of counting the number
556       // of blocks on a page is consistent.
557       uptr NumTrailingBlocks =
558           (roundUpSlow(RoundedRegionSize - TrailingBlockBase, BlockSize) +
559            BlockSize - 1) /
560           BlockSize;
561       if (NumTrailingBlocks > 0) {
562         PageMap.incN(RegionIndex, getPageIndex(TrailingBlockBase),
563                      NumTrailingBlocks);
564       }
565     }
566 
567     // Iterate over free chunks and count how many free chunks affect each
568     // allocated page.
569     if (BlockSize <= PageSize && PageSize % BlockSize == 0) {
570       // Each chunk affects one page only.
571       for (const auto &It : FreeList) {
572         for (u16 I = 0; I < It.getCount(); I++) {
573           const uptr PInRegion = DecompactPtr(It.get(I)) - Base;
574           DCHECK_LT(PInRegion, RegionSize);
575           PageMap.inc(RegionIndex, getPageIndex(PInRegion));
576         }
577       }
578     } else {
579       // In all other cases chunks might affect more than one page.
580       DCHECK_GE(RegionSize, BlockSize);
581       for (const auto &It : FreeList) {
582         for (u16 I = 0; I < It.getCount(); I++) {
583           const uptr PInRegion = DecompactPtr(It.get(I)) - Base;
584           PageMap.incRange(RegionIndex, getPageIndex(PInRegion),
585                            getPageIndex(PInRegion + BlockSize - 1));
586         }
587       }
588     }
589 
590     return true;
591   }
592 
593   uptr getPageIndex(uptr P) { return (P >> PageSizeLog) - ReleasePageOffset; }
594   uptr getReleaseOffset() { return ReleasePageOffset << PageSizeLog; }
595 
596   uptr BlockSize;
597   uptr NumberOfRegions;
598   // For partial region marking, some pages in front are not needed to be
599   // counted.
600   uptr ReleasePageOffset;
601   uptr PageSize;
602   uptr PagesCount;
603   uptr PageSizeLog;
604   uptr FullPagesBlockCountMax;
605   bool SameBlockCountPerPage;
606   RegionPageMap PageMap;
607 };
608 
609 // Try to release the page which doesn't have any in-used block, i.e., they are
610 // all free blocks. The `PageMap` will record the number of free blocks in each
611 // page.
612 template <class ReleaseRecorderT, typename SkipRegionT>
613 NOINLINE void
614 releaseFreeMemoryToOS(PageReleaseContext &Context,
615                       ReleaseRecorderT &Recorder, SkipRegionT SkipRegion) {
616   const uptr PageSize = Context.PageSize;
617   const uptr BlockSize = Context.BlockSize;
618   const uptr PagesCount = Context.PagesCount;
619   const uptr NumberOfRegions = Context.NumberOfRegions;
620   const uptr ReleasePageOffset = Context.ReleasePageOffset;
621   const uptr FullPagesBlockCountMax = Context.FullPagesBlockCountMax;
622   const bool SameBlockCountPerPage = Context.SameBlockCountPerPage;
623   RegionPageMap &PageMap = Context.PageMap;
624 
625   // Iterate over pages detecting ranges of pages with chunk Counters equal
626   // to the expected number of chunks for the particular page.
627   FreePagesRangeTracker<ReleaseRecorderT> RangeTracker(Recorder);
628   if (SameBlockCountPerPage) {
629     // Fast path, every page has the same number of chunks affecting it.
630     for (uptr I = 0; I < NumberOfRegions; I++) {
631       if (SkipRegion(I)) {
632         RangeTracker.skipPages(PagesCount);
633         continue;
634       }
635       for (uptr J = 0; J < PagesCount; J++) {
636         const bool CanRelease =
637             PageMap.updateAsAllCountedIf(I, J, FullPagesBlockCountMax);
638         RangeTracker.processNextPage(CanRelease);
639       }
640     }
641   } else {
642     // Slow path, go through the pages keeping count how many chunks affect
643     // each page.
644     const uptr Pn = BlockSize < PageSize ? PageSize / BlockSize : 1;
645     const uptr Pnc = Pn * BlockSize;
646     // The idea is to increment the current page pointer by the first chunk
647     // size, middle portion size (the portion of the page covered by chunks
648     // except the first and the last one) and then the last chunk size, adding
649     // up the number of chunks on the current page and checking on every step
650     // whether the page boundary was crossed.
651     for (uptr I = 0; I < NumberOfRegions; I++) {
652       if (SkipRegion(I)) {
653         RangeTracker.skipPages(PagesCount);
654         continue;
655       }
656       uptr PrevPageBoundary = 0;
657       uptr CurrentBoundary = 0;
658       if (ReleasePageOffset > 0) {
659         PrevPageBoundary = ReleasePageOffset * PageSize;
660         CurrentBoundary = roundUpSlow(PrevPageBoundary, BlockSize);
661       }
662       for (uptr J = 0; J < PagesCount; J++) {
663         const uptr PageBoundary = PrevPageBoundary + PageSize;
664         uptr BlocksPerPage = Pn;
665         if (CurrentBoundary < PageBoundary) {
666           if (CurrentBoundary > PrevPageBoundary)
667             BlocksPerPage++;
668           CurrentBoundary += Pnc;
669           if (CurrentBoundary < PageBoundary) {
670             BlocksPerPage++;
671             CurrentBoundary += BlockSize;
672           }
673         }
674         PrevPageBoundary = PageBoundary;
675         const bool CanRelease =
676             PageMap.updateAsAllCountedIf(I, J, BlocksPerPage);
677         RangeTracker.processNextPage(CanRelease);
678       }
679     }
680   }
681   RangeTracker.finish();
682 }
683 
684 } // namespace scudo
685 
686 #endif // SCUDO_RELEASE_H_
687