xref: /freebsd/contrib/llvm-project/compiler-rt/lib/scudo/standalone/release.h (revision 13ec1e3155c7e9bf037b12af186351b7fa9b9450)
1 //===-- release.h -----------------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #ifndef SCUDO_RELEASE_H_
10 #define SCUDO_RELEASE_H_
11 
12 #include "common.h"
13 #include "list.h"
14 #include "mutex.h"
15 
16 namespace scudo {
17 
18 class ReleaseRecorder {
19 public:
20   ReleaseRecorder(uptr Base, MapPlatformData *Data = nullptr)
21       : Base(Base), Data(Data) {}
22 
23   uptr getReleasedRangesCount() const { return ReleasedRangesCount; }
24 
25   uptr getReleasedBytes() const { return ReleasedBytes; }
26 
27   uptr getBase() const { return Base; }
28 
29   // Releases [From, To) range of pages back to OS.
30   void releasePageRangeToOS(uptr From, uptr To) {
31     const uptr Size = To - From;
32     releasePagesToOS(Base, From, Size, Data);
33     ReleasedRangesCount++;
34     ReleasedBytes += Size;
35   }
36 
37 private:
38   uptr ReleasedRangesCount = 0;
39   uptr ReleasedBytes = 0;
40   uptr Base = 0;
41   MapPlatformData *Data = nullptr;
42 };
43 
44 // A packed array of Counters. Each counter occupies 2^N bits, enough to store
45 // counter's MaxValue. Ctor will try to use a static buffer first, and if that
46 // fails (the buffer is too small or already locked), will allocate the
47 // required Buffer via map(). The caller is expected to check whether the
48 // initialization was successful by checking isAllocated() result. For
49 // performance sake, none of the accessors check the validity of the arguments,
50 // It is assumed that Index is always in [0, N) range and the value is not
51 // incremented past MaxValue.
52 class PackedCounterArray {
53 public:
54   PackedCounterArray(uptr NumberOfRegions, uptr CountersPerRegion,
55                      uptr MaxValue)
56       : Regions(NumberOfRegions), NumCounters(CountersPerRegion) {
57     DCHECK_GT(Regions, 0);
58     DCHECK_GT(NumCounters, 0);
59     DCHECK_GT(MaxValue, 0);
60     constexpr uptr MaxCounterBits = sizeof(*Buffer) * 8UL;
61     // Rounding counter storage size up to the power of two allows for using
62     // bit shifts calculating particular counter's Index and offset.
63     const uptr CounterSizeBits =
64         roundUpToPowerOfTwo(getMostSignificantSetBitIndex(MaxValue) + 1);
65     DCHECK_LE(CounterSizeBits, MaxCounterBits);
66     CounterSizeBitsLog = getLog2(CounterSizeBits);
67     CounterMask = ~(static_cast<uptr>(0)) >> (MaxCounterBits - CounterSizeBits);
68 
69     const uptr PackingRatio = MaxCounterBits >> CounterSizeBitsLog;
70     DCHECK_GT(PackingRatio, 0);
71     PackingRatioLog = getLog2(PackingRatio);
72     BitOffsetMask = PackingRatio - 1;
73 
74     SizePerRegion =
75         roundUpTo(NumCounters, static_cast<uptr>(1U) << PackingRatioLog) >>
76         PackingRatioLog;
77     BufferSize = SizePerRegion * sizeof(*Buffer) * Regions;
78     if (BufferSize <= (StaticBufferCount * sizeof(Buffer[0])) &&
79         Mutex.tryLock()) {
80       Buffer = &StaticBuffer[0];
81       memset(Buffer, 0, BufferSize);
82     } else {
83       Buffer = reinterpret_cast<uptr *>(
84           map(nullptr, roundUpTo(BufferSize, getPageSizeCached()),
85               "scudo:counters", MAP_ALLOWNOMEM));
86     }
87   }
88   ~PackedCounterArray() {
89     if (!isAllocated())
90       return;
91     if (Buffer == &StaticBuffer[0])
92       Mutex.unlock();
93     else
94       unmap(reinterpret_cast<void *>(Buffer),
95             roundUpTo(BufferSize, getPageSizeCached()));
96   }
97 
98   bool isAllocated() const { return !!Buffer; }
99 
100   uptr getCount() const { return NumCounters; }
101 
102   uptr get(uptr Region, uptr I) const {
103     DCHECK_LT(Region, Regions);
104     DCHECK_LT(I, NumCounters);
105     const uptr Index = I >> PackingRatioLog;
106     const uptr BitOffset = (I & BitOffsetMask) << CounterSizeBitsLog;
107     return (Buffer[Region * SizePerRegion + Index] >> BitOffset) & CounterMask;
108   }
109 
110   void inc(uptr Region, uptr I) const {
111     DCHECK_LT(get(Region, I), CounterMask);
112     const uptr Index = I >> PackingRatioLog;
113     const uptr BitOffset = (I & BitOffsetMask) << CounterSizeBitsLog;
114     DCHECK_LT(BitOffset, SCUDO_WORDSIZE);
115     Buffer[Region * SizePerRegion + Index] += static_cast<uptr>(1U)
116                                               << BitOffset;
117   }
118 
119   void incRange(uptr Region, uptr From, uptr To) const {
120     DCHECK_LE(From, To);
121     const uptr Top = Min(To + 1, NumCounters);
122     for (uptr I = From; I < Top; I++)
123       inc(Region, I);
124   }
125 
126   uptr getBufferSize() const { return BufferSize; }
127 
128   static const uptr StaticBufferCount = 2048U;
129 
130 private:
131   const uptr Regions;
132   const uptr NumCounters;
133   uptr CounterSizeBitsLog;
134   uptr CounterMask;
135   uptr PackingRatioLog;
136   uptr BitOffsetMask;
137 
138   uptr SizePerRegion;
139   uptr BufferSize;
140   uptr *Buffer;
141 
142   static HybridMutex Mutex;
143   static uptr StaticBuffer[StaticBufferCount];
144 };
145 
146 template <class ReleaseRecorderT> class FreePagesRangeTracker {
147 public:
148   explicit FreePagesRangeTracker(ReleaseRecorderT *Recorder)
149       : Recorder(Recorder), PageSizeLog(getLog2(getPageSizeCached())) {}
150 
151   void processNextPage(bool Freed) {
152     if (Freed) {
153       if (!InRange) {
154         CurrentRangeStatePage = CurrentPage;
155         InRange = true;
156       }
157     } else {
158       closeOpenedRange();
159     }
160     CurrentPage++;
161   }
162 
163   void skipPages(uptr N) {
164     closeOpenedRange();
165     CurrentPage += N;
166   }
167 
168   void finish() { closeOpenedRange(); }
169 
170 private:
171   void closeOpenedRange() {
172     if (InRange) {
173       Recorder->releasePageRangeToOS((CurrentRangeStatePage << PageSizeLog),
174                                      (CurrentPage << PageSizeLog));
175       InRange = false;
176     }
177   }
178 
179   ReleaseRecorderT *const Recorder;
180   const uptr PageSizeLog;
181   bool InRange = false;
182   uptr CurrentPage = 0;
183   uptr CurrentRangeStatePage = 0;
184 };
185 
186 template <class TransferBatchT, class ReleaseRecorderT, typename DecompactPtrT,
187           typename SkipRegionT>
188 NOINLINE void
189 releaseFreeMemoryToOS(const IntrusiveList<TransferBatchT> &FreeList,
190                       uptr RegionSize, uptr NumberOfRegions, uptr BlockSize,
191                       ReleaseRecorderT *Recorder, DecompactPtrT DecompactPtr,
192                       SkipRegionT SkipRegion) {
193   const uptr PageSize = getPageSizeCached();
194 
195   // Figure out the number of chunks per page and whether we can take a fast
196   // path (the number of chunks per page is the same for all pages).
197   uptr FullPagesBlockCountMax;
198   bool SameBlockCountPerPage;
199   if (BlockSize <= PageSize) {
200     if (PageSize % BlockSize == 0) {
201       // Same number of chunks per page, no cross overs.
202       FullPagesBlockCountMax = PageSize / BlockSize;
203       SameBlockCountPerPage = true;
204     } else if (BlockSize % (PageSize % BlockSize) == 0) {
205       // Some chunks are crossing page boundaries, which means that the page
206       // contains one or two partial chunks, but all pages contain the same
207       // number of chunks.
208       FullPagesBlockCountMax = PageSize / BlockSize + 1;
209       SameBlockCountPerPage = true;
210     } else {
211       // Some chunks are crossing page boundaries, which means that the page
212       // contains one or two partial chunks.
213       FullPagesBlockCountMax = PageSize / BlockSize + 2;
214       SameBlockCountPerPage = false;
215     }
216   } else {
217     if (BlockSize % PageSize == 0) {
218       // One chunk covers multiple pages, no cross overs.
219       FullPagesBlockCountMax = 1;
220       SameBlockCountPerPage = true;
221     } else {
222       // One chunk covers multiple pages, Some chunks are crossing page
223       // boundaries. Some pages contain one chunk, some contain two.
224       FullPagesBlockCountMax = 2;
225       SameBlockCountPerPage = false;
226     }
227   }
228 
229   const uptr PagesCount = roundUpTo(RegionSize, PageSize) / PageSize;
230   PackedCounterArray Counters(NumberOfRegions, PagesCount,
231                               FullPagesBlockCountMax);
232   if (!Counters.isAllocated())
233     return;
234 
235   const uptr PageSizeLog = getLog2(PageSize);
236   const uptr RoundedRegionSize = PagesCount << PageSizeLog;
237   const uptr RoundedSize = NumberOfRegions * RoundedRegionSize;
238 
239   // Iterate over free chunks and count how many free chunks affect each
240   // allocated page.
241   if (BlockSize <= PageSize && PageSize % BlockSize == 0) {
242     // Each chunk affects one page only.
243     for (const auto &It : FreeList) {
244       for (u32 I = 0; I < It.getCount(); I++) {
245         const uptr P = DecompactPtr(It.get(I)) - Recorder->getBase();
246         if (P >= RoundedSize)
247           continue;
248         const uptr RegionIndex = NumberOfRegions == 1U ? 0 : P / RegionSize;
249         const uptr PInRegion = P - RegionIndex * RegionSize;
250         Counters.inc(RegionIndex, PInRegion >> PageSizeLog);
251       }
252     }
253   } else {
254     // In all other cases chunks might affect more than one page.
255     DCHECK_GE(RegionSize, BlockSize);
256     const uptr LastBlockInRegion = ((RegionSize / BlockSize) - 1U) * BlockSize;
257     for (const auto &It : FreeList) {
258       for (u32 I = 0; I < It.getCount(); I++) {
259         const uptr P = DecompactPtr(It.get(I)) - Recorder->getBase();
260         if (P >= RoundedSize)
261           continue;
262         const uptr RegionIndex = NumberOfRegions == 1U ? 0 : P / RegionSize;
263         uptr PInRegion = P - RegionIndex * RegionSize;
264         Counters.incRange(RegionIndex, PInRegion >> PageSizeLog,
265                           (PInRegion + BlockSize - 1) >> PageSizeLog);
266         // The last block in a region might straddle a page, so if it's
267         // free, we mark the following "pretend" memory block(s) as free.
268         if (PInRegion == LastBlockInRegion) {
269           PInRegion += BlockSize;
270           while (PInRegion < RoundedRegionSize) {
271             Counters.incRange(RegionIndex, PInRegion >> PageSizeLog,
272                               (PInRegion + BlockSize - 1) >> PageSizeLog);
273             PInRegion += BlockSize;
274           }
275         }
276       }
277     }
278   }
279 
280   // Iterate over pages detecting ranges of pages with chunk Counters equal
281   // to the expected number of chunks for the particular page.
282   FreePagesRangeTracker<ReleaseRecorderT> RangeTracker(Recorder);
283   if (SameBlockCountPerPage) {
284     // Fast path, every page has the same number of chunks affecting it.
285     for (uptr I = 0; I < NumberOfRegions; I++) {
286       if (SkipRegion(I)) {
287         RangeTracker.skipPages(PagesCount);
288         continue;
289       }
290       for (uptr J = 0; J < PagesCount; J++)
291         RangeTracker.processNextPage(Counters.get(I, J) ==
292                                      FullPagesBlockCountMax);
293     }
294   } else {
295     // Slow path, go through the pages keeping count how many chunks affect
296     // each page.
297     const uptr Pn = BlockSize < PageSize ? PageSize / BlockSize : 1;
298     const uptr Pnc = Pn * BlockSize;
299     // The idea is to increment the current page pointer by the first chunk
300     // size, middle portion size (the portion of the page covered by chunks
301     // except the first and the last one) and then the last chunk size, adding
302     // up the number of chunks on the current page and checking on every step
303     // whether the page boundary was crossed.
304     for (uptr I = 0; I < NumberOfRegions; I++) {
305       if (SkipRegion(I)) {
306         RangeTracker.skipPages(PagesCount);
307         continue;
308       }
309       uptr PrevPageBoundary = 0;
310       uptr CurrentBoundary = 0;
311       for (uptr J = 0; J < PagesCount; J++) {
312         const uptr PageBoundary = PrevPageBoundary + PageSize;
313         uptr BlocksPerPage = Pn;
314         if (CurrentBoundary < PageBoundary) {
315           if (CurrentBoundary > PrevPageBoundary)
316             BlocksPerPage++;
317           CurrentBoundary += Pnc;
318           if (CurrentBoundary < PageBoundary) {
319             BlocksPerPage++;
320             CurrentBoundary += BlockSize;
321           }
322         }
323         PrevPageBoundary = PageBoundary;
324         RangeTracker.processNextPage(Counters.get(I, J) == BlocksPerPage);
325       }
326     }
327   }
328   RangeTracker.finish();
329 }
330 
331 } // namespace scudo
332 
333 #endif // SCUDO_RELEASE_H_
334