xref: /freebsd/contrib/llvm-project/compiler-rt/lib/scudo/standalone/release.h (revision da477bcdc0c335171bb0ed3813f570026de6df85)
1 //===-- release.h -----------------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #ifndef SCUDO_RELEASE_H_
10 #define SCUDO_RELEASE_H_
11 
12 #include "common.h"
13 #include "list.h"
14 #include "mutex.h"
15 
16 namespace scudo {
17 
18 class ReleaseRecorder {
19 public:
20   ReleaseRecorder(uptr BaseAddress, MapPlatformData *Data = nullptr)
21       : BaseAddress(BaseAddress), Data(Data) {}
22 
23   uptr getReleasedRangesCount() const { return ReleasedRangesCount; }
24 
25   uptr getReleasedBytes() const { return ReleasedBytes; }
26 
27   // Releases [From, To) range of pages back to OS.
28   void releasePageRangeToOS(uptr From, uptr To) {
29     const uptr Size = To - From;
30     releasePagesToOS(BaseAddress, From, Size, Data);
31     ReleasedRangesCount++;
32     ReleasedBytes += Size;
33   }
34 
35 private:
36   uptr ReleasedRangesCount = 0;
37   uptr ReleasedBytes = 0;
38   uptr BaseAddress = 0;
39   MapPlatformData *Data = nullptr;
40 };
41 
42 // A packed array of Counters. Each counter occupies 2^N bits, enough to store
43 // counter's MaxValue. Ctor will try to use a static buffer first, and if that
44 // fails (the buffer is too small or already locked), will allocate the
45 // required Buffer via map(). The caller is expected to check whether the
46 // initialization was successful by checking isAllocated() result. For
47 // performance sake, none of the accessors check the validity of the arguments,
48 // It is assumed that Index is always in [0, N) range and the value is not
49 // incremented past MaxValue.
50 class PackedCounterArray {
51 public:
52   PackedCounterArray(uptr NumCounters, uptr MaxValue) : N(NumCounters) {
53     CHECK_GT(NumCounters, 0);
54     CHECK_GT(MaxValue, 0);
55     constexpr uptr MaxCounterBits = sizeof(*Buffer) * 8UL;
56     // Rounding counter storage size up to the power of two allows for using
57     // bit shifts calculating particular counter's Index and offset.
58     const uptr CounterSizeBits =
59         roundUpToPowerOfTwo(getMostSignificantSetBitIndex(MaxValue) + 1);
60     CHECK_LE(CounterSizeBits, MaxCounterBits);
61     CounterSizeBitsLog = getLog2(CounterSizeBits);
62     CounterMask = ~(static_cast<uptr>(0)) >> (MaxCounterBits - CounterSizeBits);
63 
64     const uptr PackingRatio = MaxCounterBits >> CounterSizeBitsLog;
65     CHECK_GT(PackingRatio, 0);
66     PackingRatioLog = getLog2(PackingRatio);
67     BitOffsetMask = PackingRatio - 1;
68 
69     BufferSize = (roundUpTo(N, static_cast<uptr>(1U) << PackingRatioLog) >>
70                   PackingRatioLog) *
71                  sizeof(*Buffer);
72     if (BufferSize <= StaticBufferSize && Mutex.tryLock()) {
73       Buffer = &StaticBuffer[0];
74       memset(Buffer, 0, BufferSize);
75     } else {
76       Buffer = reinterpret_cast<uptr *>(
77           map(nullptr, BufferSize, "scudo:counters", MAP_ALLOWNOMEM));
78     }
79   }
80   ~PackedCounterArray() {
81     if (!isAllocated())
82       return;
83     if (Buffer == &StaticBuffer[0])
84       Mutex.unlock();
85     else
86       unmap(reinterpret_cast<void *>(Buffer), BufferSize);
87   }
88 
89   bool isAllocated() const { return !!Buffer; }
90 
91   uptr getCount() const { return N; }
92 
93   uptr get(uptr I) const {
94     DCHECK_LT(I, N);
95     const uptr Index = I >> PackingRatioLog;
96     const uptr BitOffset = (I & BitOffsetMask) << CounterSizeBitsLog;
97     return (Buffer[Index] >> BitOffset) & CounterMask;
98   }
99 
100   void inc(uptr I) const {
101     DCHECK_LT(get(I), CounterMask);
102     const uptr Index = I >> PackingRatioLog;
103     const uptr BitOffset = (I & BitOffsetMask) << CounterSizeBitsLog;
104     DCHECK_LT(BitOffset, SCUDO_WORDSIZE);
105     Buffer[Index] += static_cast<uptr>(1U) << BitOffset;
106   }
107 
108   void incRange(uptr From, uptr To) const {
109     DCHECK_LE(From, To);
110     const uptr Top = Min(To + 1, N);
111     for (uptr I = From; I < Top; I++)
112       inc(I);
113   }
114 
115   uptr getBufferSize() const { return BufferSize; }
116 
117 private:
118   const uptr N;
119   uptr CounterSizeBitsLog;
120   uptr CounterMask;
121   uptr PackingRatioLog;
122   uptr BitOffsetMask;
123 
124   uptr BufferSize;
125   uptr *Buffer;
126 
127   static HybridMutex Mutex;
128   static const uptr StaticBufferSize = 1024U;
129   static uptr StaticBuffer[StaticBufferSize];
130 };
131 
132 template <class ReleaseRecorderT> class FreePagesRangeTracker {
133 public:
134   explicit FreePagesRangeTracker(ReleaseRecorderT *Recorder)
135       : Recorder(Recorder), PageSizeLog(getLog2(getPageSizeCached())) {}
136 
137   void processNextPage(bool Freed) {
138     if (Freed) {
139       if (!InRange) {
140         CurrentRangeStatePage = CurrentPage;
141         InRange = true;
142       }
143     } else {
144       closeOpenedRange();
145     }
146     CurrentPage++;
147   }
148 
149   void finish() { closeOpenedRange(); }
150 
151 private:
152   void closeOpenedRange() {
153     if (InRange) {
154       Recorder->releasePageRangeToOS((CurrentRangeStatePage << PageSizeLog),
155                                      (CurrentPage << PageSizeLog));
156       InRange = false;
157     }
158   }
159 
160   ReleaseRecorderT *const Recorder;
161   const uptr PageSizeLog;
162   bool InRange = false;
163   uptr CurrentPage = 0;
164   uptr CurrentRangeStatePage = 0;
165 };
166 
167 template <class TransferBatchT, class ReleaseRecorderT>
168 NOINLINE void
169 releaseFreeMemoryToOS(const IntrusiveList<TransferBatchT> &FreeList, uptr Base,
170                       uptr Size, uptr BlockSize, ReleaseRecorderT *Recorder) {
171   const uptr PageSize = getPageSizeCached();
172 
173   // Figure out the number of chunks per page and whether we can take a fast
174   // path (the number of chunks per page is the same for all pages).
175   uptr FullPagesBlockCountMax;
176   bool SameBlockCountPerPage;
177   if (BlockSize <= PageSize) {
178     if (PageSize % BlockSize == 0) {
179       // Same number of chunks per page, no cross overs.
180       FullPagesBlockCountMax = PageSize / BlockSize;
181       SameBlockCountPerPage = true;
182     } else if (BlockSize % (PageSize % BlockSize) == 0) {
183       // Some chunks are crossing page boundaries, which means that the page
184       // contains one or two partial chunks, but all pages contain the same
185       // number of chunks.
186       FullPagesBlockCountMax = PageSize / BlockSize + 1;
187       SameBlockCountPerPage = true;
188     } else {
189       // Some chunks are crossing page boundaries, which means that the page
190       // contains one or two partial chunks.
191       FullPagesBlockCountMax = PageSize / BlockSize + 2;
192       SameBlockCountPerPage = false;
193     }
194   } else {
195     if (BlockSize % PageSize == 0) {
196       // One chunk covers multiple pages, no cross overs.
197       FullPagesBlockCountMax = 1;
198       SameBlockCountPerPage = true;
199     } else {
200       // One chunk covers multiple pages, Some chunks are crossing page
201       // boundaries. Some pages contain one chunk, some contain two.
202       FullPagesBlockCountMax = 2;
203       SameBlockCountPerPage = false;
204     }
205   }
206 
207   const uptr PagesCount = roundUpTo(Size, PageSize) / PageSize;
208   PackedCounterArray Counters(PagesCount, FullPagesBlockCountMax);
209   if (!Counters.isAllocated())
210     return;
211 
212   const uptr PageSizeLog = getLog2(PageSize);
213   const uptr RoundedSize = PagesCount << PageSizeLog;
214 
215   // Iterate over free chunks and count how many free chunks affect each
216   // allocated page.
217   if (BlockSize <= PageSize && PageSize % BlockSize == 0) {
218     // Each chunk affects one page only.
219     for (const auto &It : FreeList) {
220       // If dealing with a TransferBatch, the first pointer of the batch will
221       // point to the batch itself, we do not want to mark this for release as
222       // the batch is in use, so skip the first entry.
223       const bool IsTransferBatch =
224           (It.getCount() != 0) &&
225           (reinterpret_cast<uptr>(It.get(0)) == reinterpret_cast<uptr>(&It));
226       for (u32 I = IsTransferBatch ? 1 : 0; I < It.getCount(); I++) {
227         const uptr P = reinterpret_cast<uptr>(It.get(I)) - Base;
228         // This takes care of P < Base and P >= Base + RoundedSize.
229         if (P < RoundedSize)
230           Counters.inc(P >> PageSizeLog);
231       }
232     }
233     for (uptr P = Size; P < RoundedSize; P += BlockSize)
234       Counters.inc(P >> PageSizeLog);
235   } else {
236     // In all other cases chunks might affect more than one page.
237     for (const auto &It : FreeList) {
238       // See TransferBatch comment above.
239       const bool IsTransferBatch =
240           (It.getCount() != 0) &&
241           (reinterpret_cast<uptr>(It.get(0)) == reinterpret_cast<uptr>(&It));
242       for (u32 I = IsTransferBatch ? 1 : 0; I < It.getCount(); I++) {
243         const uptr P = reinterpret_cast<uptr>(It.get(I)) - Base;
244         // This takes care of P < Base and P >= Base + RoundedSize.
245         if (P < RoundedSize)
246           Counters.incRange(P >> PageSizeLog,
247                             (P + BlockSize - 1) >> PageSizeLog);
248       }
249     }
250     for (uptr P = Size; P < RoundedSize; P += BlockSize)
251       Counters.incRange(P >> PageSizeLog, (P + BlockSize - 1) >> PageSizeLog);
252   }
253 
254   // Iterate over pages detecting ranges of pages with chunk Counters equal
255   // to the expected number of chunks for the particular page.
256   FreePagesRangeTracker<ReleaseRecorderT> RangeTracker(Recorder);
257   if (SameBlockCountPerPage) {
258     // Fast path, every page has the same number of chunks affecting it.
259     for (uptr I = 0; I < Counters.getCount(); I++)
260       RangeTracker.processNextPage(Counters.get(I) == FullPagesBlockCountMax);
261   } else {
262     // Slow path, go through the pages keeping count how many chunks affect
263     // each page.
264     const uptr Pn = BlockSize < PageSize ? PageSize / BlockSize : 1;
265     const uptr Pnc = Pn * BlockSize;
266     // The idea is to increment the current page pointer by the first chunk
267     // size, middle portion size (the portion of the page covered by chunks
268     // except the first and the last one) and then the last chunk size, adding
269     // up the number of chunks on the current page and checking on every step
270     // whether the page boundary was crossed.
271     uptr PrevPageBoundary = 0;
272     uptr CurrentBoundary = 0;
273     for (uptr I = 0; I < Counters.getCount(); I++) {
274       const uptr PageBoundary = PrevPageBoundary + PageSize;
275       uptr BlocksPerPage = Pn;
276       if (CurrentBoundary < PageBoundary) {
277         if (CurrentBoundary > PrevPageBoundary)
278           BlocksPerPage++;
279         CurrentBoundary += Pnc;
280         if (CurrentBoundary < PageBoundary) {
281           BlocksPerPage++;
282           CurrentBoundary += BlockSize;
283         }
284       }
285       PrevPageBoundary = PageBoundary;
286 
287       RangeTracker.processNextPage(Counters.get(I) == BlocksPerPage);
288     }
289   }
290   RangeTracker.finish();
291 }
292 
293 } // namespace scudo
294 
295 #endif // SCUDO_RELEASE_H_
296