xref: /freebsd/contrib/llvm-project/compiler-rt/lib/asan/asan_allocator.h (revision 725a9f47324d42037db93c27ceb40d4956872f3e)
1 //===-- asan_allocator.h ----------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of AddressSanitizer, an address sanity checker.
10 //
11 // ASan-private header for asan_allocator.cpp.
12 //===----------------------------------------------------------------------===//
13 
14 #ifndef ASAN_ALLOCATOR_H
15 #define ASAN_ALLOCATOR_H
16 
17 #include "asan_flags.h"
18 #include "asan_interceptors.h"
19 #include "asan_internal.h"
20 #include "sanitizer_common/sanitizer_allocator.h"
21 #include "sanitizer_common/sanitizer_list.h"
22 #include "sanitizer_common/sanitizer_platform.h"
23 
24 namespace __asan {
25 
26 enum AllocType {
27   FROM_MALLOC = 1,  // Memory block came from malloc, calloc, realloc, etc.
28   FROM_NEW = 2,     // Memory block came from operator new.
29   FROM_NEW_BR = 3   // Memory block came from operator new [ ]
30 };
31 
32 class AsanChunk;
33 
34 struct AllocatorOptions {
35   u32 quarantine_size_mb;
36   u32 thread_local_quarantine_size_kb;
37   u16 min_redzone;
38   u16 max_redzone;
39   u8 may_return_null;
40   u8 alloc_dealloc_mismatch;
41   s32 release_to_os_interval_ms;
42 
43   void SetFrom(const Flags *f, const CommonFlags *cf);
44   void CopyTo(Flags *f, CommonFlags *cf);
45 };
46 
47 void InitializeAllocator(const AllocatorOptions &options);
48 void ReInitializeAllocator(const AllocatorOptions &options);
49 void GetAllocatorOptions(AllocatorOptions *options);
50 
51 class AsanChunkView {
52  public:
53   explicit AsanChunkView(AsanChunk *chunk) : chunk_(chunk) {}
54   bool IsValid() const;        // Checks if AsanChunkView points to a valid
55                                // allocated or quarantined chunk.
56   bool IsAllocated() const;    // Checks if the memory is currently allocated.
57   bool IsQuarantined() const;  // Checks if the memory is currently quarantined.
58   uptr Beg() const;            // First byte of user memory.
59   uptr End() const;            // Last byte of user memory.
60   uptr UsedSize() const;       // Size requested by the user.
61   u32 UserRequestedAlignment() const;  // Originally requested alignment.
62   uptr AllocTid() const;
63   uptr FreeTid() const;
64   bool Eq(const AsanChunkView &c) const { return chunk_ == c.chunk_; }
65   u32 GetAllocStackId() const;
66   u32 GetFreeStackId() const;
67   AllocType GetAllocType() const;
68   bool AddrIsInside(uptr addr, uptr access_size, sptr *offset) const {
69     if (addr >= Beg() && (addr + access_size) <= End()) {
70       *offset = addr - Beg();
71       return true;
72     }
73     return false;
74   }
75   bool AddrIsAtLeft(uptr addr, uptr access_size, sptr *offset) const {
76     (void)access_size;
77     if (addr < Beg()) {
78       *offset = Beg() - addr;
79       return true;
80     }
81     return false;
82   }
83   bool AddrIsAtRight(uptr addr, uptr access_size, sptr *offset) const {
84     if (addr + access_size > End()) {
85       *offset = addr - End();
86       return true;
87     }
88     return false;
89   }
90 
91  private:
92   AsanChunk *const chunk_;
93 };
94 
95 AsanChunkView FindHeapChunkByAddress(uptr address);
96 AsanChunkView FindHeapChunkByAllocBeg(uptr address);
97 
98 // List of AsanChunks with total size.
99 class AsanChunkFifoList: public IntrusiveList<AsanChunk> {
100  public:
101   explicit AsanChunkFifoList(LinkerInitialized) { }
102   AsanChunkFifoList() { clear(); }
103   void Push(AsanChunk *n);
104   void PushList(AsanChunkFifoList *q);
105   AsanChunk *Pop();
106   uptr size() { return size_; }
107   void clear() {
108     IntrusiveList<AsanChunk>::clear();
109     size_ = 0;
110   }
111  private:
112   uptr size_;
113 };
114 
115 struct AsanMapUnmapCallback {
116   void OnMap(uptr p, uptr size) const;
117   void OnMapSecondary(uptr p, uptr size, uptr user_begin, uptr user_size) const;
118   void OnUnmap(uptr p, uptr size) const;
119 };
120 
121 #if SANITIZER_CAN_USE_ALLOCATOR64
122 # if SANITIZER_FUCHSIA
123 // This is a sentinel indicating we do not want the primary allocator arena to
124 // be placed at a fixed address. It will be anonymously mmap'd.
125 const uptr kAllocatorSpace = ~(uptr)0;
126 #    if SANITIZER_RISCV64
127 
128 // These are sanitizer tunings that allow all bringup tests for RISCV-64 Sv39 +
129 // Fuchsia to run with asan-instrumented. That is, we can run bringup, e2e,
130 // libc, and scudo tests with this configuration.
131 //
132 // TODO: This is specifically tuned for Sv39. 48/57 will likely require other
133 // tunings, or possibly use the same tunings Fuchsia uses for other archs. The
134 // VMA size isn't technically tied to the Fuchsia System ABI, so once 48/57 is
135 // supported, we'd need a way of dynamically checking what the VMA size is and
136 // determining optimal configuration.
137 
138 // This indicates the total amount of space dedicated for the primary allocator
139 // during initialization. This is roughly proportional to the size set by the
140 // FuchsiaConfig for scudo (~11.25GB == ~2^33.49). Requesting any more could
141 // lead to some failures in sanitized bringup tests where we can't allocate new
142 // vmars because there wouldn't be enough contiguous space. We could try 2^34 if
143 // we re-evaluate the SizeClassMap settings.
144 const uptr kAllocatorSize = UINT64_C(1) << 33;  // 8GB
145 
146 // This is roughly equivalent to the configuration for the VeryDenseSizeClassMap
147 // but has fewer size classes (ideally at most 32). Fewer class sizes means the
148 // region size for each class is larger, thus less chances of running out of
149 // space for each region. The main differences are the MidSizeLog (which is
150 // smaller) and the MaxSizeLog (which is larger).
151 //
152 // - The MaxSizeLog is higher to allow some of the largest allocations I've
153 //   observed to be placed in the primary allocator's arena as opposed to being
154 //   mmap'd by the secondary allocator. This helps reduce fragmentation from
155 //   large classes. A huge example of this the scudo allocator tests (and its
156 //   testing infrastructure) which malloc's/new's objects on the order of
157 //   hundreds of kilobytes which normally would not be in the primary allocator
158 //   arena with the default VeryDenseSizeClassMap.
159 // - The MidSizeLog is reduced to help shrink the number of size classes and
160 //   increase region size. Without this, we'd see ASan complain many times about
161 //   a region running out of available space.
162 //
163 // This differs a bit from the fuchsia config in scudo, mainly from the NumBits,
164 // MaxSizeLog, and NumCachedHintT. This should place the number of size classes
165 // for scudo at 45 and some large objects allocated by this config would be
166 // placed in the arena whereas scudo would mmap them. The asan allocator needs
167 // to have a number of classes that are a power of 2 for various internal things
168 // to work, so we can't match the scudo settings to a tee. The sanitizer
169 // allocator is slightly slower than scudo's but this is enough to get
170 // memory-intensive scudo tests to run with asan instrumentation.
171 typedef SizeClassMap</*kNumBits=*/2,
172                      /*kMinSizeLog=*/5,
173                      /*kMidSizeLog=*/8,
174                      /*kMaxSizeLog=*/18,
175                      /*kNumCachedHintT=*/8,
176                      /*kMaxBytesCachedLog=*/10>
177     SizeClassMap;
178 static_assert(SizeClassMap::kNumClassesRounded <= 32,
179               "The above tunings were specifically selected to ensure there "
180               "would be at most 32 size classes. This restriction could be "
181               "loosened to 64 size classes if we can find a configuration of "
182               "allocator size and SizeClassMap tunings that allows us to "
183               "reliably run all bringup tests in a sanitized environment.");
184 
185 #    else
186 // These are the default allocator tunings for non-RISCV environments where the
187 // VMA is usually 48 bits and we have lots of space.
188 const uptr kAllocatorSize = 0x40000000000ULL;  // 4T.
189 typedef DefaultSizeClassMap SizeClassMap;
190 #    endif
191 #  elif defined(__powerpc64__)
192 const uptr kAllocatorSpace = ~(uptr)0;
193 const uptr kAllocatorSize  =  0x20000000000ULL;  // 2T.
194 typedef DefaultSizeClassMap SizeClassMap;
195 #  elif defined(__aarch64__) && SANITIZER_ANDROID
196 // Android needs to support 39, 42 and 48 bit VMA.
197 const uptr kAllocatorSpace =  ~(uptr)0;
198 const uptr kAllocatorSize  =  0x2000000000ULL;  // 128G.
199 typedef VeryCompactSizeClassMap SizeClassMap;
200 #  elif SANITIZER_RISCV64
201 const uptr kAllocatorSpace = ~(uptr)0;
202 const uptr kAllocatorSize = 0x2000000000ULL;  // 128G.
203 typedef VeryDenseSizeClassMap SizeClassMap;
204 #  elif defined(__sparc__)
205 const uptr kAllocatorSpace = ~(uptr)0;
206 const uptr kAllocatorSize = 0x20000000000ULL;  // 2T.
207 typedef DefaultSizeClassMap SizeClassMap;
208 #  elif SANITIZER_WINDOWS
209 const uptr kAllocatorSpace = ~(uptr)0;
210 const uptr kAllocatorSize  =  0x8000000000ULL;  // 500G
211 typedef DefaultSizeClassMap SizeClassMap;
212 #  elif SANITIZER_APPLE
213 const uptr kAllocatorSpace = 0x600000000000ULL;
214 const uptr kAllocatorSize  =  0x40000000000ULL;  // 4T.
215 typedef DefaultSizeClassMap SizeClassMap;
216 #  else
217 const uptr kAllocatorSpace = 0x500000000000ULL;
218 const uptr kAllocatorSize = 0x40000000000ULL;  // 4T.
219 typedef DefaultSizeClassMap SizeClassMap;
220 #  endif
221 template <typename AddressSpaceViewTy>
222 struct AP64 {  // Allocator64 parameters. Deliberately using a short name.
223   static const uptr kSpaceBeg = kAllocatorSpace;
224   static const uptr kSpaceSize = kAllocatorSize;
225   static const uptr kMetadataSize = 0;
226   typedef __asan::SizeClassMap SizeClassMap;
227   typedef AsanMapUnmapCallback MapUnmapCallback;
228   static const uptr kFlags = 0;
229   using AddressSpaceView = AddressSpaceViewTy;
230 };
231 
232 template <typename AddressSpaceView>
233 using PrimaryAllocatorASVT = SizeClassAllocator64<AP64<AddressSpaceView>>;
234 using PrimaryAllocator = PrimaryAllocatorASVT<LocalAddressSpaceView>;
235 #else  // Fallback to SizeClassAllocator32.
236 typedef CompactSizeClassMap SizeClassMap;
237 template <typename AddressSpaceViewTy>
238 struct AP32 {
239   static const uptr kSpaceBeg = 0;
240   static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE;
241   static const uptr kMetadataSize = 0;
242   typedef __asan::SizeClassMap SizeClassMap;
243   static const uptr kRegionSizeLog = 20;
244   using AddressSpaceView = AddressSpaceViewTy;
245   typedef AsanMapUnmapCallback MapUnmapCallback;
246   static const uptr kFlags = 0;
247 };
248 template <typename AddressSpaceView>
249 using PrimaryAllocatorASVT = SizeClassAllocator32<AP32<AddressSpaceView> >;
250 using PrimaryAllocator = PrimaryAllocatorASVT<LocalAddressSpaceView>;
251 #endif  // SANITIZER_CAN_USE_ALLOCATOR64
252 
253 static const uptr kNumberOfSizeClasses = SizeClassMap::kNumClasses;
254 
255 template <typename AddressSpaceView>
256 using AsanAllocatorASVT =
257     CombinedAllocator<PrimaryAllocatorASVT<AddressSpaceView>>;
258 using AsanAllocator = AsanAllocatorASVT<LocalAddressSpaceView>;
259 using AllocatorCache = AsanAllocator::AllocatorCache;
260 
261 struct AsanThreadLocalMallocStorage {
262   uptr quarantine_cache[16];
263   AllocatorCache allocator_cache;
264   void CommitBack();
265  private:
266   // These objects are allocated via mmap() and are zero-initialized.
267   AsanThreadLocalMallocStorage() {}
268 };
269 
270 void *asan_memalign(uptr alignment, uptr size, BufferedStackTrace *stack,
271                     AllocType alloc_type);
272 void asan_free(void *ptr, BufferedStackTrace *stack, AllocType alloc_type);
273 void asan_delete(void *ptr, uptr size, uptr alignment,
274                  BufferedStackTrace *stack, AllocType alloc_type);
275 
276 void *asan_malloc(uptr size, BufferedStackTrace *stack);
277 void *asan_calloc(uptr nmemb, uptr size, BufferedStackTrace *stack);
278 void *asan_realloc(void *p, uptr size, BufferedStackTrace *stack);
279 void *asan_reallocarray(void *p, uptr nmemb, uptr size,
280                         BufferedStackTrace *stack);
281 void *asan_valloc(uptr size, BufferedStackTrace *stack);
282 void *asan_pvalloc(uptr size, BufferedStackTrace *stack);
283 
284 void *asan_aligned_alloc(uptr alignment, uptr size, BufferedStackTrace *stack);
285 int asan_posix_memalign(void **memptr, uptr alignment, uptr size,
286                         BufferedStackTrace *stack);
287 uptr asan_malloc_usable_size(const void *ptr, uptr pc, uptr bp);
288 
289 uptr asan_mz_size(const void *ptr);
290 void asan_mz_force_lock();
291 void asan_mz_force_unlock();
292 
293 void PrintInternalAllocatorStats();
294 void AsanSoftRssLimitExceededCallback(bool exceeded);
295 
296 }  // namespace __asan
297 #endif  // ASAN_ALLOCATOR_H
298