xref: /freebsd/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_secondary.h (revision 06c3fb2749bda94cb5201f81ffdb8fa6c3161b2e)
1 //===-- sanitizer_allocator_secondary.h -------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // Part of the Sanitizer Allocator.
10 //
11 //===----------------------------------------------------------------------===//
12 #ifndef SANITIZER_ALLOCATOR_H
13 #error This file must be included inside sanitizer_allocator.h
14 #endif
15 
16 // Fixed array to store LargeMmapAllocator chunks list, limited to 32K total
17 // allocated chunks. To be used in memory constrained or not memory hungry cases
18 // (currently, 32 bits and internal allocator).
19 class LargeMmapAllocatorPtrArrayStatic {
20  public:
Init()21   inline void *Init() { return &p_[0]; }
EnsureSpace(uptr n)22   inline void EnsureSpace(uptr n) { CHECK_LT(n, kMaxNumChunks); }
23  private:
24   static const int kMaxNumChunks = 1 << 15;
25   uptr p_[kMaxNumChunks];
26 };
27 
28 // Much less restricted LargeMmapAllocator chunks list (comparing to
29 // PtrArrayStatic). Backed by mmaped memory region and can hold up to 1M chunks.
30 // ReservedAddressRange was used instead of just MAP_NORESERVE to achieve the
31 // same functionality in Fuchsia case, which does not support MAP_NORESERVE.
32 class LargeMmapAllocatorPtrArrayDynamic {
33  public:
Init()34   inline void *Init() {
35     uptr p = address_range_.Init(kMaxNumChunks * sizeof(uptr),
36                                  SecondaryAllocatorName);
37     CHECK(p);
38     return reinterpret_cast<void*>(p);
39   }
40 
EnsureSpace(uptr n)41   inline void EnsureSpace(uptr n) {
42     CHECK_LT(n, kMaxNumChunks);
43     DCHECK(n <= n_reserved_);
44     if (UNLIKELY(n == n_reserved_)) {
45       address_range_.MapOrDie(
46           reinterpret_cast<uptr>(address_range_.base()) +
47               n_reserved_ * sizeof(uptr),
48           kChunksBlockCount * sizeof(uptr));
49       n_reserved_ += kChunksBlockCount;
50     }
51   }
52 
53  private:
54   static const int kMaxNumChunks = 1 << 20;
55   static const int kChunksBlockCount = 1 << 14;
56   ReservedAddressRange address_range_;
57   uptr n_reserved_;
58 };
59 
60 #if SANITIZER_WORDSIZE == 32
61 typedef LargeMmapAllocatorPtrArrayStatic DefaultLargeMmapAllocatorPtrArray;
62 #else
63 typedef LargeMmapAllocatorPtrArrayDynamic DefaultLargeMmapAllocatorPtrArray;
64 #endif
65 
66 // This class can (de)allocate only large chunks of memory using mmap/unmap.
67 // The main purpose of this allocator is to cover large and rare allocation
68 // sizes not covered by more efficient allocators (e.g. SizeClassAllocator64).
69 template <class MapUnmapCallback = NoOpMapUnmapCallback,
70           class PtrArrayT = DefaultLargeMmapAllocatorPtrArray,
71           class AddressSpaceViewTy = LocalAddressSpaceView>
72 class LargeMmapAllocator {
73  public:
74   using AddressSpaceView = AddressSpaceViewTy;
InitLinkerInitialized()75   void InitLinkerInitialized() {
76     page_size_ = GetPageSizeCached();
77     chunks_ = reinterpret_cast<Header**>(ptr_array_.Init());
78   }
79 
Init()80   void Init() {
81     internal_memset(this, 0, sizeof(*this));
82     InitLinkerInitialized();
83   }
84 
Allocate(AllocatorStats * stat,const uptr size,uptr alignment)85   void *Allocate(AllocatorStats *stat, const uptr size, uptr alignment) {
86     CHECK(IsPowerOfTwo(alignment));
87     uptr map_size = RoundUpMapSize(size);
88     if (alignment > page_size_)
89       map_size += alignment;
90     // Overflow.
91     if (map_size < size) {
92       Report("WARNING: %s: LargeMmapAllocator allocation overflow: "
93              "0x%zx bytes with 0x%zx alignment requested\n",
94              SanitizerToolName, map_size, alignment);
95       return nullptr;
96     }
97     uptr map_beg = reinterpret_cast<uptr>(
98         MmapOrDieOnFatalError(map_size, SecondaryAllocatorName));
99     if (!map_beg)
100       return nullptr;
101     CHECK(IsAligned(map_beg, page_size_));
102     uptr map_end = map_beg + map_size;
103     uptr res = map_beg + page_size_;
104     if (res & (alignment - 1))  // Align.
105       res += alignment - (res & (alignment - 1));
106     MapUnmapCallback().OnMapSecondary(map_beg, map_size, res, size);
107     CHECK(IsAligned(res, alignment));
108     CHECK(IsAligned(res, page_size_));
109     CHECK_GE(res + size, map_beg);
110     CHECK_LE(res + size, map_end);
111     Header *h = GetHeader(res);
112     h->size = size;
113     h->map_beg = map_beg;
114     h->map_size = map_size;
115     uptr size_log = MostSignificantSetBitIndex(map_size);
116     CHECK_LT(size_log, ARRAY_SIZE(stats.by_size_log));
117     {
118       SpinMutexLock l(&mutex_);
119       ptr_array_.EnsureSpace(n_chunks_);
120       uptr idx = n_chunks_++;
121       h->chunk_idx = idx;
122       chunks_[idx] = h;
123       chunks_sorted_ = false;
124       stats.n_allocs++;
125       stats.currently_allocated += map_size;
126       stats.max_allocated = Max(stats.max_allocated, stats.currently_allocated);
127       stats.by_size_log[size_log]++;
128       stat->Add(AllocatorStatAllocated, map_size);
129       stat->Add(AllocatorStatMapped, map_size);
130     }
131     return reinterpret_cast<void*>(res);
132   }
133 
Deallocate(AllocatorStats * stat,void * p)134   void Deallocate(AllocatorStats *stat, void *p) {
135     Header *h = GetHeader(p);
136     {
137       SpinMutexLock l(&mutex_);
138       uptr idx = h->chunk_idx;
139       CHECK_EQ(chunks_[idx], h);
140       CHECK_LT(idx, n_chunks_);
141       chunks_[idx] = chunks_[--n_chunks_];
142       chunks_[idx]->chunk_idx = idx;
143       chunks_sorted_ = false;
144       stats.n_frees++;
145       stats.currently_allocated -= h->map_size;
146       stat->Sub(AllocatorStatAllocated, h->map_size);
147       stat->Sub(AllocatorStatMapped, h->map_size);
148     }
149     MapUnmapCallback().OnUnmap(h->map_beg, h->map_size);
150     UnmapOrDie(reinterpret_cast<void*>(h->map_beg), h->map_size);
151   }
152 
TotalMemoryUsed()153   uptr TotalMemoryUsed() {
154     SpinMutexLock l(&mutex_);
155     uptr res = 0;
156     for (uptr i = 0; i < n_chunks_; i++) {
157       Header *h = chunks_[i];
158       CHECK_EQ(h->chunk_idx, i);
159       res += RoundUpMapSize(h->size);
160     }
161     return res;
162   }
163 
PointerIsMine(const void * p)164   bool PointerIsMine(const void *p) const {
165     return GetBlockBegin(p) != nullptr;
166   }
167 
GetActuallyAllocatedSize(void * p)168   uptr GetActuallyAllocatedSize(void *p) {
169     return RoundUpTo(GetHeader(p)->size, page_size_);
170   }
171 
172   // At least page_size_/2 metadata bytes is available.
GetMetaData(const void * p)173   void *GetMetaData(const void *p) {
174     // Too slow: CHECK_EQ(p, GetBlockBegin(p));
175     if (!IsAligned(reinterpret_cast<uptr>(p), page_size_)) {
176       Printf("%s: bad pointer %p\n", SanitizerToolName, p);
177       CHECK(IsAligned(reinterpret_cast<uptr>(p), page_size_));
178     }
179     return GetHeader(p) + 1;
180   }
181 
GetBlockBegin(const void * ptr)182   void *GetBlockBegin(const void *ptr) const {
183     uptr p = reinterpret_cast<uptr>(ptr);
184     SpinMutexLock l(&mutex_);
185     uptr nearest_chunk = 0;
186     Header *const *chunks = AddressSpaceView::Load(chunks_, n_chunks_);
187     // Cache-friendly linear search.
188     for (uptr i = 0; i < n_chunks_; i++) {
189       uptr ch = reinterpret_cast<uptr>(chunks[i]);
190       if (p < ch) continue;  // p is at left to this chunk, skip it.
191       if (p - ch < p - nearest_chunk)
192         nearest_chunk = ch;
193     }
194     if (!nearest_chunk)
195       return nullptr;
196     const Header *h =
197         AddressSpaceView::Load(reinterpret_cast<Header *>(nearest_chunk));
198     Header *h_ptr = reinterpret_cast<Header *>(nearest_chunk);
199     CHECK_GE(nearest_chunk, h->map_beg);
200     CHECK_LT(nearest_chunk, h->map_beg + h->map_size);
201     CHECK_LE(nearest_chunk, p);
202     if (h->map_beg + h->map_size <= p)
203       return nullptr;
204     return GetUser(h_ptr);
205   }
206 
EnsureSortedChunks()207   void EnsureSortedChunks() {
208     if (chunks_sorted_) return;
209     Header **chunks = AddressSpaceView::LoadWritable(chunks_, n_chunks_);
210     Sort(reinterpret_cast<uptr *>(chunks), n_chunks_);
211     for (uptr i = 0; i < n_chunks_; i++)
212       AddressSpaceView::LoadWritable(chunks[i])->chunk_idx = i;
213     chunks_sorted_ = true;
214   }
215 
216   // This function does the same as GetBlockBegin, but is much faster.
217   // Must be called with the allocator locked.
GetBlockBeginFastLocked(const void * ptr)218   void *GetBlockBeginFastLocked(const void *ptr) {
219     mutex_.CheckLocked();
220     uptr p = reinterpret_cast<uptr>(ptr);
221     uptr n = n_chunks_;
222     if (!n) return nullptr;
223     EnsureSortedChunks();
224     Header *const *chunks = AddressSpaceView::Load(chunks_, n_chunks_);
225     auto min_mmap_ = reinterpret_cast<uptr>(chunks[0]);
226     auto max_mmap_ = reinterpret_cast<uptr>(chunks[n - 1]) +
227                      AddressSpaceView::Load(chunks[n - 1])->map_size;
228     if (p < min_mmap_ || p >= max_mmap_)
229       return nullptr;
230     uptr beg = 0, end = n - 1;
231     // This loop is a log(n) lower_bound. It does not check for the exact match
232     // to avoid expensive cache-thrashing loads.
233     while (end - beg >= 2) {
234       uptr mid = (beg + end) / 2;  // Invariant: mid >= beg + 1
235       if (p < reinterpret_cast<uptr>(chunks[mid]))
236         end = mid - 1;  // We are not interested in chunks[mid].
237       else
238         beg = mid;  // chunks[mid] may still be what we want.
239     }
240 
241     if (beg < end) {
242       CHECK_EQ(beg + 1, end);
243       // There are 2 chunks left, choose one.
244       if (p >= reinterpret_cast<uptr>(chunks[end]))
245         beg = end;
246     }
247 
248     const Header *h = AddressSpaceView::Load(chunks[beg]);
249     Header *h_ptr = chunks[beg];
250     if (h->map_beg + h->map_size <= p || p < h->map_beg)
251       return nullptr;
252     return GetUser(h_ptr);
253   }
254 
PrintStats()255   void PrintStats() {
256     Printf("Stats: LargeMmapAllocator: allocated %zd times, "
257            "remains %zd (%zd K) max %zd M; by size logs: ",
258            stats.n_allocs, stats.n_allocs - stats.n_frees,
259            stats.currently_allocated >> 10, stats.max_allocated >> 20);
260     for (uptr i = 0; i < ARRAY_SIZE(stats.by_size_log); i++) {
261       uptr c = stats.by_size_log[i];
262       if (!c) continue;
263       Printf("%zd:%zd; ", i, c);
264     }
265     Printf("\n");
266   }
267 
268   // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
269   // introspection API.
ForceLock()270   void ForceLock() SANITIZER_ACQUIRE(mutex_) { mutex_.Lock(); }
271 
ForceUnlock()272   void ForceUnlock() SANITIZER_RELEASE(mutex_) { mutex_.Unlock(); }
273 
274   // Iterate over all existing chunks.
275   // The allocator must be locked when calling this function.
ForEachChunk(ForEachChunkCallback callback,void * arg)276   void ForEachChunk(ForEachChunkCallback callback, void *arg) {
277     EnsureSortedChunks();  // Avoid doing the sort while iterating.
278     const Header *const *chunks = AddressSpaceView::Load(chunks_, n_chunks_);
279     for (uptr i = 0; i < n_chunks_; i++) {
280       const Header *t = chunks[i];
281       callback(reinterpret_cast<uptr>(GetUser(t)), arg);
282       // Consistency check: verify that the array did not change.
283       CHECK_EQ(chunks[i], t);
284       CHECK_EQ(AddressSpaceView::Load(chunks[i])->chunk_idx, i);
285     }
286   }
287 
288  private:
289   struct Header {
290     uptr map_beg;
291     uptr map_size;
292     uptr size;
293     uptr chunk_idx;
294   };
295 
GetHeader(uptr p)296   Header *GetHeader(uptr p) {
297     CHECK(IsAligned(p, page_size_));
298     return reinterpret_cast<Header*>(p - page_size_);
299   }
GetHeader(const void * p)300   Header *GetHeader(const void *p) {
301     return GetHeader(reinterpret_cast<uptr>(p));
302   }
303 
GetUser(const Header * h)304   void *GetUser(const Header *h) const {
305     CHECK(IsAligned((uptr)h, page_size_));
306     return reinterpret_cast<void*>(reinterpret_cast<uptr>(h) + page_size_);
307   }
308 
RoundUpMapSize(uptr size)309   uptr RoundUpMapSize(uptr size) {
310     return RoundUpTo(size, page_size_) + page_size_;
311   }
312 
313   uptr page_size_;
314   Header **chunks_;
315   PtrArrayT ptr_array_;
316   uptr n_chunks_;
317   bool chunks_sorted_;
318   struct Stats {
319     uptr n_allocs, n_frees, currently_allocated, max_allocated, by_size_log[64];
320   } stats;
321   mutable StaticSpinMutex mutex_;
322 };
323