xref: /freebsd/contrib/llvm-project/compiler-rt/lib/scudo/standalone/release.h (revision 972a253a57b6f144b0e4a3e2080a2a0076ec55a0)
1 //===-- release.h -----------------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #ifndef SCUDO_RELEASE_H_
10 #define SCUDO_RELEASE_H_
11 
12 #include "common.h"
13 #include "list.h"
14 #include "mutex.h"
15 
16 namespace scudo {
17 
18 class ReleaseRecorder {
19 public:
20   ReleaseRecorder(uptr Base, MapPlatformData *Data = nullptr)
21       : Base(Base), Data(Data) {}
22 
23   uptr getReleasedRangesCount() const { return ReleasedRangesCount; }
24 
25   uptr getReleasedBytes() const { return ReleasedBytes; }
26 
27   uptr getBase() const { return Base; }
28 
29   // Releases [From, To) range of pages back to OS.
30   void releasePageRangeToOS(uptr From, uptr To) {
31     const uptr Size = To - From;
32     releasePagesToOS(Base, From, Size, Data);
33     ReleasedRangesCount++;
34     ReleasedBytes += Size;
35   }
36 
37 private:
38   uptr ReleasedRangesCount = 0;
39   uptr ReleasedBytes = 0;
40   uptr Base = 0;
41   MapPlatformData *Data = nullptr;
42 };
43 
44 // A packed array of Counters. Each counter occupies 2^N bits, enough to store
45 // counter's MaxValue. Ctor will try to use a static buffer first, and if that
46 // fails (the buffer is too small or already locked), will allocate the
47 // required Buffer via map(). The caller is expected to check whether the
48 // initialization was successful by checking isAllocated() result. For
49 // performance sake, none of the accessors check the validity of the arguments,
50 // It is assumed that Index is always in [0, N) range and the value is not
51 // incremented past MaxValue.
52 class PackedCounterArray {
53 public:
54   PackedCounterArray(uptr NumberOfRegions, uptr CountersPerRegion,
55                      uptr MaxValue)
56       : Regions(NumberOfRegions), NumCounters(CountersPerRegion) {
57     DCHECK_GT(Regions, 0);
58     DCHECK_GT(NumCounters, 0);
59     DCHECK_GT(MaxValue, 0);
60     constexpr uptr MaxCounterBits = sizeof(*Buffer) * 8UL;
61     // Rounding counter storage size up to the power of two allows for using
62     // bit shifts calculating particular counter's Index and offset.
63     const uptr CounterSizeBits =
64         roundUpToPowerOfTwo(getMostSignificantSetBitIndex(MaxValue) + 1);
65     DCHECK_LE(CounterSizeBits, MaxCounterBits);
66     CounterSizeBitsLog = getLog2(CounterSizeBits);
67     CounterMask = ~(static_cast<uptr>(0)) >> (MaxCounterBits - CounterSizeBits);
68 
69     const uptr PackingRatio = MaxCounterBits >> CounterSizeBitsLog;
70     DCHECK_GT(PackingRatio, 0);
71     PackingRatioLog = getLog2(PackingRatio);
72     BitOffsetMask = PackingRatio - 1;
73 
74     SizePerRegion =
75         roundUpTo(NumCounters, static_cast<uptr>(1U) << PackingRatioLog) >>
76         PackingRatioLog;
77     BufferSize = SizePerRegion * sizeof(*Buffer) * Regions;
78     if (BufferSize <= (StaticBufferCount * sizeof(Buffer[0])) &&
79         Mutex.tryLock()) {
80       Buffer = &StaticBuffer[0];
81       memset(Buffer, 0, BufferSize);
82     } else {
83       Buffer = reinterpret_cast<uptr *>(
84           map(nullptr, roundUpTo(BufferSize, getPageSizeCached()),
85               "scudo:counters", MAP_ALLOWNOMEM, &MapData));
86     }
87   }
88   ~PackedCounterArray() {
89     if (!isAllocated())
90       return;
91     if (Buffer == &StaticBuffer[0])
92       Mutex.unlock();
93     else
94       unmap(reinterpret_cast<void *>(Buffer),
95             roundUpTo(BufferSize, getPageSizeCached()), 0, &MapData);
96   }
97 
98   bool isAllocated() const { return !!Buffer; }
99 
100   uptr getCount() const { return NumCounters; }
101 
102   uptr get(uptr Region, uptr I) const {
103     DCHECK_LT(Region, Regions);
104     DCHECK_LT(I, NumCounters);
105     const uptr Index = I >> PackingRatioLog;
106     const uptr BitOffset = (I & BitOffsetMask) << CounterSizeBitsLog;
107     return (Buffer[Region * SizePerRegion + Index] >> BitOffset) & CounterMask;
108   }
109 
110   void inc(uptr Region, uptr I) const {
111     DCHECK_LT(get(Region, I), CounterMask);
112     const uptr Index = I >> PackingRatioLog;
113     const uptr BitOffset = (I & BitOffsetMask) << CounterSizeBitsLog;
114     DCHECK_LT(BitOffset, SCUDO_WORDSIZE);
115     Buffer[Region * SizePerRegion + Index] += static_cast<uptr>(1U)
116                                               << BitOffset;
117   }
118 
119   void incRange(uptr Region, uptr From, uptr To) const {
120     DCHECK_LE(From, To);
121     const uptr Top = Min(To + 1, NumCounters);
122     for (uptr I = From; I < Top; I++)
123       inc(Region, I);
124   }
125 
126   uptr getBufferSize() const { return BufferSize; }
127 
128   static const uptr StaticBufferCount = 2048U;
129 
130 private:
131   const uptr Regions;
132   const uptr NumCounters;
133   uptr CounterSizeBitsLog;
134   uptr CounterMask;
135   uptr PackingRatioLog;
136   uptr BitOffsetMask;
137 
138   uptr SizePerRegion;
139   uptr BufferSize;
140   uptr *Buffer;
141   [[no_unique_address]] MapPlatformData MapData = {};
142 
143   static HybridMutex Mutex;
144   static uptr StaticBuffer[StaticBufferCount];
145 };
146 
147 template <class ReleaseRecorderT> class FreePagesRangeTracker {
148 public:
149   explicit FreePagesRangeTracker(ReleaseRecorderT *Recorder)
150       : Recorder(Recorder), PageSizeLog(getLog2(getPageSizeCached())) {}
151 
152   void processNextPage(bool Freed) {
153     if (Freed) {
154       if (!InRange) {
155         CurrentRangeStatePage = CurrentPage;
156         InRange = true;
157       }
158     } else {
159       closeOpenedRange();
160     }
161     CurrentPage++;
162   }
163 
164   void skipPages(uptr N) {
165     closeOpenedRange();
166     CurrentPage += N;
167   }
168 
169   void finish() { closeOpenedRange(); }
170 
171 private:
172   void closeOpenedRange() {
173     if (InRange) {
174       Recorder->releasePageRangeToOS((CurrentRangeStatePage << PageSizeLog),
175                                      (CurrentPage << PageSizeLog));
176       InRange = false;
177     }
178   }
179 
180   ReleaseRecorderT *const Recorder;
181   const uptr PageSizeLog;
182   bool InRange = false;
183   uptr CurrentPage = 0;
184   uptr CurrentRangeStatePage = 0;
185 };
186 
187 template <class TransferBatchT, class ReleaseRecorderT, typename DecompactPtrT,
188           typename SkipRegionT>
189 NOINLINE void
190 releaseFreeMemoryToOS(const IntrusiveList<TransferBatchT> &FreeList,
191                       uptr RegionSize, uptr NumberOfRegions, uptr BlockSize,
192                       ReleaseRecorderT *Recorder, DecompactPtrT DecompactPtr,
193                       SkipRegionT SkipRegion) {
194   const uptr PageSize = getPageSizeCached();
195 
196   // Figure out the number of chunks per page and whether we can take a fast
197   // path (the number of chunks per page is the same for all pages).
198   uptr FullPagesBlockCountMax;
199   bool SameBlockCountPerPage;
200   if (BlockSize <= PageSize) {
201     if (PageSize % BlockSize == 0) {
202       // Same number of chunks per page, no cross overs.
203       FullPagesBlockCountMax = PageSize / BlockSize;
204       SameBlockCountPerPage = true;
205     } else if (BlockSize % (PageSize % BlockSize) == 0) {
206       // Some chunks are crossing page boundaries, which means that the page
207       // contains one or two partial chunks, but all pages contain the same
208       // number of chunks.
209       FullPagesBlockCountMax = PageSize / BlockSize + 1;
210       SameBlockCountPerPage = true;
211     } else {
212       // Some chunks are crossing page boundaries, which means that the page
213       // contains one or two partial chunks.
214       FullPagesBlockCountMax = PageSize / BlockSize + 2;
215       SameBlockCountPerPage = false;
216     }
217   } else {
218     if (BlockSize % PageSize == 0) {
219       // One chunk covers multiple pages, no cross overs.
220       FullPagesBlockCountMax = 1;
221       SameBlockCountPerPage = true;
222     } else {
223       // One chunk covers multiple pages, Some chunks are crossing page
224       // boundaries. Some pages contain one chunk, some contain two.
225       FullPagesBlockCountMax = 2;
226       SameBlockCountPerPage = false;
227     }
228   }
229 
230   const uptr PagesCount = roundUpTo(RegionSize, PageSize) / PageSize;
231   PackedCounterArray Counters(NumberOfRegions, PagesCount,
232                               FullPagesBlockCountMax);
233   if (!Counters.isAllocated())
234     return;
235 
236   const uptr PageSizeLog = getLog2(PageSize);
237   const uptr RoundedRegionSize = PagesCount << PageSizeLog;
238   const uptr RoundedSize = NumberOfRegions * RoundedRegionSize;
239 
240   // Iterate over free chunks and count how many free chunks affect each
241   // allocated page.
242   if (BlockSize <= PageSize && PageSize % BlockSize == 0) {
243     // Each chunk affects one page only.
244     for (const auto &It : FreeList) {
245       for (u32 I = 0; I < It.getCount(); I++) {
246         const uptr P = DecompactPtr(It.get(I)) - Recorder->getBase();
247         if (P >= RoundedSize)
248           continue;
249         const uptr RegionIndex = NumberOfRegions == 1U ? 0 : P / RegionSize;
250         const uptr PInRegion = P - RegionIndex * RegionSize;
251         Counters.inc(RegionIndex, PInRegion >> PageSizeLog);
252       }
253     }
254   } else {
255     // In all other cases chunks might affect more than one page.
256     DCHECK_GE(RegionSize, BlockSize);
257     const uptr LastBlockInRegion = ((RegionSize / BlockSize) - 1U) * BlockSize;
258     for (const auto &It : FreeList) {
259       for (u32 I = 0; I < It.getCount(); I++) {
260         const uptr P = DecompactPtr(It.get(I)) - Recorder->getBase();
261         if (P >= RoundedSize)
262           continue;
263         const uptr RegionIndex = NumberOfRegions == 1U ? 0 : P / RegionSize;
264         uptr PInRegion = P - RegionIndex * RegionSize;
265         Counters.incRange(RegionIndex, PInRegion >> PageSizeLog,
266                           (PInRegion + BlockSize - 1) >> PageSizeLog);
267         // The last block in a region might straddle a page, so if it's
268         // free, we mark the following "pretend" memory block(s) as free.
269         if (PInRegion == LastBlockInRegion) {
270           PInRegion += BlockSize;
271           while (PInRegion < RoundedRegionSize) {
272             Counters.incRange(RegionIndex, PInRegion >> PageSizeLog,
273                               (PInRegion + BlockSize - 1) >> PageSizeLog);
274             PInRegion += BlockSize;
275           }
276         }
277       }
278     }
279   }
280 
281   // Iterate over pages detecting ranges of pages with chunk Counters equal
282   // to the expected number of chunks for the particular page.
283   FreePagesRangeTracker<ReleaseRecorderT> RangeTracker(Recorder);
284   if (SameBlockCountPerPage) {
285     // Fast path, every page has the same number of chunks affecting it.
286     for (uptr I = 0; I < NumberOfRegions; I++) {
287       if (SkipRegion(I)) {
288         RangeTracker.skipPages(PagesCount);
289         continue;
290       }
291       for (uptr J = 0; J < PagesCount; J++)
292         RangeTracker.processNextPage(Counters.get(I, J) ==
293                                      FullPagesBlockCountMax);
294     }
295   } else {
296     // Slow path, go through the pages keeping count how many chunks affect
297     // each page.
298     const uptr Pn = BlockSize < PageSize ? PageSize / BlockSize : 1;
299     const uptr Pnc = Pn * BlockSize;
300     // The idea is to increment the current page pointer by the first chunk
301     // size, middle portion size (the portion of the page covered by chunks
302     // except the first and the last one) and then the last chunk size, adding
303     // up the number of chunks on the current page and checking on every step
304     // whether the page boundary was crossed.
305     for (uptr I = 0; I < NumberOfRegions; I++) {
306       if (SkipRegion(I)) {
307         RangeTracker.skipPages(PagesCount);
308         continue;
309       }
310       uptr PrevPageBoundary = 0;
311       uptr CurrentBoundary = 0;
312       for (uptr J = 0; J < PagesCount; J++) {
313         const uptr PageBoundary = PrevPageBoundary + PageSize;
314         uptr BlocksPerPage = Pn;
315         if (CurrentBoundary < PageBoundary) {
316           if (CurrentBoundary > PrevPageBoundary)
317             BlocksPerPage++;
318           CurrentBoundary += Pnc;
319           if (CurrentBoundary < PageBoundary) {
320             BlocksPerPage++;
321             CurrentBoundary += BlockSize;
322           }
323         }
324         PrevPageBoundary = PageBoundary;
325         RangeTracker.processNextPage(Counters.get(I, J) == BlocksPerPage);
326       }
327     }
328   }
329   RangeTracker.finish();
330 }
331 
332 } // namespace scudo
333 
334 #endif // SCUDO_RELEASE_H_
335