xref: /freebsd/contrib/llvm-project/compiler-rt/lib/msan/msan_allocator.cpp (revision 1db9f3b21e39176dd5b67cf8ac378633b172463e)
1 //===-- msan_allocator.cpp -------------------------- ---------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of MemorySanitizer.
10 //
11 // MemorySanitizer allocator.
12 //===----------------------------------------------------------------------===//
13 
14 #include "msan_allocator.h"
15 
16 #include "msan.h"
17 #include "msan_interface_internal.h"
18 #include "msan_origin.h"
19 #include "msan_poisoning.h"
20 #include "msan_thread.h"
21 #include "sanitizer_common/sanitizer_allocator.h"
22 #include "sanitizer_common/sanitizer_allocator_checks.h"
23 #include "sanitizer_common/sanitizer_allocator_interface.h"
24 #include "sanitizer_common/sanitizer_allocator_report.h"
25 #include "sanitizer_common/sanitizer_errno.h"
26 
27 namespace __msan {
28 
29 struct Metadata {
30   uptr requested_size;
31 };
32 
33 struct MsanMapUnmapCallback {
34   void OnMap(uptr p, uptr size) const {}
35   void OnMapSecondary(uptr p, uptr size, uptr user_begin,
36                       uptr user_size) const {}
37   void OnUnmap(uptr p, uptr size) const {
38     __msan_unpoison((void *)p, size);
39 
40     // We are about to unmap a chunk of user memory.
41     // Mark the corresponding shadow memory as not needed.
42     uptr shadow_p = MEM_TO_SHADOW(p);
43     ReleaseMemoryPagesToOS(shadow_p, shadow_p + size);
44     if (__msan_get_track_origins()) {
45       uptr origin_p = MEM_TO_ORIGIN(p);
46       ReleaseMemoryPagesToOS(origin_p, origin_p + size);
47     }
48   }
49 };
50 
51 #if defined(__mips64)
52 static const uptr kMaxAllowedMallocSize = 2UL << 30;
53 
54 struct AP32 {
55   static const uptr kSpaceBeg = 0;
56   static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE;
57   static const uptr kMetadataSize = sizeof(Metadata);
58   typedef __sanitizer::CompactSizeClassMap SizeClassMap;
59   static const uptr kRegionSizeLog = 20;
60   using AddressSpaceView = LocalAddressSpaceView;
61   typedef MsanMapUnmapCallback MapUnmapCallback;
62   static const uptr kFlags = 0;
63 };
64 typedef SizeClassAllocator32<AP32> PrimaryAllocator;
65 #elif defined(__x86_64__)
66 #if SANITIZER_NETBSD || SANITIZER_LINUX
67 static const uptr kAllocatorSpace = 0x700000000000ULL;
68 #else
69 static const uptr kAllocatorSpace = 0x600000000000ULL;
70 #endif
71 static const uptr kMaxAllowedMallocSize = 8UL << 30;
72 
73 struct AP64 {  // Allocator64 parameters. Deliberately using a short name.
74   static const uptr kSpaceBeg = kAllocatorSpace;
75   static const uptr kSpaceSize = 0x40000000000;  // 4T.
76   static const uptr kMetadataSize = sizeof(Metadata);
77   typedef DefaultSizeClassMap SizeClassMap;
78   typedef MsanMapUnmapCallback MapUnmapCallback;
79   static const uptr kFlags = 0;
80   using AddressSpaceView = LocalAddressSpaceView;
81 };
82 
83 typedef SizeClassAllocator64<AP64> PrimaryAllocator;
84 
85 #elif defined(__loongarch_lp64)
86 const uptr kAllocatorSpace = 0x700000000000ULL;
87 const uptr kMaxAllowedMallocSize = 8UL << 30;
88 
89 struct AP64 {  // Allocator64 parameters. Deliberately using a short name.
90   static const uptr kSpaceBeg = kAllocatorSpace;
91   static const uptr kSpaceSize = 0x40000000000;  // 4T.
92   static const uptr kMetadataSize = sizeof(Metadata);
93   typedef DefaultSizeClassMap SizeClassMap;
94   typedef MsanMapUnmapCallback MapUnmapCallback;
95   static const uptr kFlags = 0;
96   using AddressSpaceView = LocalAddressSpaceView;
97 };
98 
99 typedef SizeClassAllocator64<AP64> PrimaryAllocator;
100 
101 #elif defined(__powerpc64__)
102 static const uptr kMaxAllowedMallocSize = 2UL << 30;  // 2G
103 
104 struct AP64 {  // Allocator64 parameters. Deliberately using a short name.
105   static const uptr kSpaceBeg = 0x300000000000;
106   static const uptr kSpaceSize = 0x020000000000;  // 2T.
107   static const uptr kMetadataSize = sizeof(Metadata);
108   typedef DefaultSizeClassMap SizeClassMap;
109   typedef MsanMapUnmapCallback MapUnmapCallback;
110   static const uptr kFlags = 0;
111   using AddressSpaceView = LocalAddressSpaceView;
112 };
113 
114 typedef SizeClassAllocator64<AP64> PrimaryAllocator;
115 #elif defined(__s390x__)
116 static const uptr kMaxAllowedMallocSize = 2UL << 30;  // 2G
117 
118 struct AP64 {  // Allocator64 parameters. Deliberately using a short name.
119   static const uptr kSpaceBeg = 0x440000000000;
120   static const uptr kSpaceSize = 0x020000000000;  // 2T.
121   static const uptr kMetadataSize = sizeof(Metadata);
122   typedef DefaultSizeClassMap SizeClassMap;
123   typedef MsanMapUnmapCallback MapUnmapCallback;
124   static const uptr kFlags = 0;
125   using AddressSpaceView = LocalAddressSpaceView;
126 };
127 
128 typedef SizeClassAllocator64<AP64> PrimaryAllocator;
129 #elif defined(__aarch64__)
130 static const uptr kMaxAllowedMallocSize = 8UL << 30;
131 
132 struct AP64 {
133   static const uptr kSpaceBeg = 0xE00000000000ULL;
134   static const uptr kSpaceSize = 0x40000000000;  // 4T.
135   static const uptr kMetadataSize = sizeof(Metadata);
136   typedef DefaultSizeClassMap SizeClassMap;
137   typedef MsanMapUnmapCallback MapUnmapCallback;
138   static const uptr kFlags = 0;
139   using AddressSpaceView = LocalAddressSpaceView;
140 };
141 typedef SizeClassAllocator64<AP64> PrimaryAllocator;
142 #endif
143 typedef CombinedAllocator<PrimaryAllocator> Allocator;
144 typedef Allocator::AllocatorCache AllocatorCache;
145 
146 static Allocator allocator;
147 static AllocatorCache fallback_allocator_cache;
148 static StaticSpinMutex fallback_mutex;
149 
150 static uptr max_malloc_size;
151 
152 void MsanAllocatorInit() {
153   SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
154   allocator.Init(common_flags()->allocator_release_to_os_interval_ms);
155   if (common_flags()->max_allocation_size_mb)
156     max_malloc_size = Min(common_flags()->max_allocation_size_mb << 20,
157                           kMaxAllowedMallocSize);
158   else
159     max_malloc_size = kMaxAllowedMallocSize;
160 }
161 
162 void LockAllocator() { allocator.ForceLock(); }
163 
164 void UnlockAllocator() { allocator.ForceUnlock(); }
165 
166 AllocatorCache *GetAllocatorCache(MsanThreadLocalMallocStorage *ms) {
167   CHECK(ms);
168   CHECK_LE(sizeof(AllocatorCache), sizeof(ms->allocator_cache));
169   return reinterpret_cast<AllocatorCache *>(ms->allocator_cache);
170 }
171 
172 void MsanThreadLocalMallocStorage::Init() {
173   allocator.InitCache(GetAllocatorCache(this));
174 }
175 
176 void MsanThreadLocalMallocStorage::CommitBack() {
177   allocator.SwallowCache(GetAllocatorCache(this));
178   allocator.DestroyCache(GetAllocatorCache(this));
179 }
180 
181 static void *MsanAllocate(BufferedStackTrace *stack, uptr size, uptr alignment,
182                           bool zeroise) {
183   if (UNLIKELY(size > max_malloc_size)) {
184     if (AllocatorMayReturnNull()) {
185       Report("WARNING: MemorySanitizer failed to allocate 0x%zx bytes\n", size);
186       return nullptr;
187     }
188     GET_FATAL_STACK_TRACE_IF_EMPTY(stack);
189     ReportAllocationSizeTooBig(size, max_malloc_size, stack);
190   }
191   if (UNLIKELY(IsRssLimitExceeded())) {
192     if (AllocatorMayReturnNull())
193       return nullptr;
194     GET_FATAL_STACK_TRACE_IF_EMPTY(stack);
195     ReportRssLimitExceeded(stack);
196   }
197   MsanThread *t = GetCurrentThread();
198   void *allocated;
199   if (t) {
200     AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage());
201     allocated = allocator.Allocate(cache, size, alignment);
202   } else {
203     SpinMutexLock l(&fallback_mutex);
204     AllocatorCache *cache = &fallback_allocator_cache;
205     allocated = allocator.Allocate(cache, size, alignment);
206   }
207   if (UNLIKELY(!allocated)) {
208     SetAllocatorOutOfMemory();
209     if (AllocatorMayReturnNull())
210       return nullptr;
211     GET_FATAL_STACK_TRACE_IF_EMPTY(stack);
212     ReportOutOfMemory(size, stack);
213   }
214   Metadata *meta =
215       reinterpret_cast<Metadata *>(allocator.GetMetaData(allocated));
216   meta->requested_size = size;
217   if (zeroise) {
218     if (allocator.FromPrimary(allocated))
219       __msan_clear_and_unpoison(allocated, size);
220     else
221       __msan_unpoison(allocated, size);  // Mem is already zeroed.
222   } else if (flags()->poison_in_malloc) {
223     __msan_poison(allocated, size);
224     if (__msan_get_track_origins()) {
225       stack->tag = StackTrace::TAG_ALLOC;
226       Origin o = Origin::CreateHeapOrigin(stack);
227       __msan_set_origin(allocated, size, o.raw_id());
228     }
229   }
230   UnpoisonParam(2);
231   RunMallocHooks(allocated, size);
232   return allocated;
233 }
234 
235 void MsanDeallocate(BufferedStackTrace *stack, void *p) {
236   CHECK(p);
237   UnpoisonParam(1);
238   RunFreeHooks(p);
239 
240   Metadata *meta = reinterpret_cast<Metadata *>(allocator.GetMetaData(p));
241   uptr size = meta->requested_size;
242   meta->requested_size = 0;
243   // This memory will not be reused by anyone else, so we are free to keep it
244   // poisoned. The secondary allocator will unmap and unpoison by
245   // MsanMapUnmapCallback, no need to poison it here.
246   if (flags()->poison_in_free && allocator.FromPrimary(p)) {
247     __msan_poison(p, size);
248     if (__msan_get_track_origins()) {
249       stack->tag = StackTrace::TAG_DEALLOC;
250       Origin o = Origin::CreateHeapOrigin(stack);
251       __msan_set_origin(p, size, o.raw_id());
252     }
253   }
254   MsanThread *t = GetCurrentThread();
255   if (t) {
256     AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage());
257     allocator.Deallocate(cache, p);
258   } else {
259     SpinMutexLock l(&fallback_mutex);
260     AllocatorCache *cache = &fallback_allocator_cache;
261     allocator.Deallocate(cache, p);
262   }
263 }
264 
265 static void *MsanReallocate(BufferedStackTrace *stack, void *old_p,
266                             uptr new_size, uptr alignment) {
267   Metadata *meta = reinterpret_cast<Metadata*>(allocator.GetMetaData(old_p));
268   uptr old_size = meta->requested_size;
269   uptr actually_allocated_size = allocator.GetActuallyAllocatedSize(old_p);
270   if (new_size <= actually_allocated_size) {
271     // We are not reallocating here.
272     meta->requested_size = new_size;
273     if (new_size > old_size) {
274       if (flags()->poison_in_malloc) {
275         stack->tag = StackTrace::TAG_ALLOC;
276         PoisonMemory((char *)old_p + old_size, new_size - old_size, stack);
277       }
278     }
279     return old_p;
280   }
281   uptr memcpy_size = Min(new_size, old_size);
282   void *new_p = MsanAllocate(stack, new_size, alignment, false /*zeroise*/);
283   if (new_p) {
284     CopyMemory(new_p, old_p, memcpy_size, stack);
285     MsanDeallocate(stack, old_p);
286   }
287   return new_p;
288 }
289 
290 static void *MsanCalloc(BufferedStackTrace *stack, uptr nmemb, uptr size) {
291   if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
292     if (AllocatorMayReturnNull())
293       return nullptr;
294     GET_FATAL_STACK_TRACE_IF_EMPTY(stack);
295     ReportCallocOverflow(nmemb, size, stack);
296   }
297   return MsanAllocate(stack, nmemb * size, sizeof(u64), true);
298 }
299 
300 static const void *AllocationBegin(const void *p) {
301   if (!p)
302     return nullptr;
303   void *beg = allocator.GetBlockBegin(p);
304   if (!beg)
305     return nullptr;
306   Metadata *b = (Metadata *)allocator.GetMetaData(beg);
307   if (!b)
308     return nullptr;
309   if (b->requested_size == 0)
310     return nullptr;
311 
312   return (const void *)beg;
313 }
314 
315 static uptr AllocationSize(const void *p) {
316   if (!p) return 0;
317   const void *beg = allocator.GetBlockBegin(p);
318   if (beg != p) return 0;
319   Metadata *b = (Metadata *)allocator.GetMetaData(p);
320   return b->requested_size;
321 }
322 
323 static uptr AllocationSizeFast(const void *p) {
324   return reinterpret_cast<Metadata *>(allocator.GetMetaData(p))->requested_size;
325 }
326 
327 void *msan_malloc(uptr size, BufferedStackTrace *stack) {
328   return SetErrnoOnNull(MsanAllocate(stack, size, sizeof(u64), false));
329 }
330 
331 void *msan_calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) {
332   return SetErrnoOnNull(MsanCalloc(stack, nmemb, size));
333 }
334 
335 void *msan_realloc(void *ptr, uptr size, BufferedStackTrace *stack) {
336   if (!ptr)
337     return SetErrnoOnNull(MsanAllocate(stack, size, sizeof(u64), false));
338   if (size == 0) {
339     MsanDeallocate(stack, ptr);
340     return nullptr;
341   }
342   return SetErrnoOnNull(MsanReallocate(stack, ptr, size, sizeof(u64)));
343 }
344 
345 void *msan_reallocarray(void *ptr, uptr nmemb, uptr size,
346                         BufferedStackTrace *stack) {
347   if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
348     errno = errno_ENOMEM;
349     if (AllocatorMayReturnNull())
350       return nullptr;
351     GET_FATAL_STACK_TRACE_IF_EMPTY(stack);
352     ReportReallocArrayOverflow(nmemb, size, stack);
353   }
354   return msan_realloc(ptr, nmemb * size, stack);
355 }
356 
357 void *msan_valloc(uptr size, BufferedStackTrace *stack) {
358   return SetErrnoOnNull(MsanAllocate(stack, size, GetPageSizeCached(), false));
359 }
360 
361 void *msan_pvalloc(uptr size, BufferedStackTrace *stack) {
362   uptr PageSize = GetPageSizeCached();
363   if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) {
364     errno = errno_ENOMEM;
365     if (AllocatorMayReturnNull())
366       return nullptr;
367     GET_FATAL_STACK_TRACE_IF_EMPTY(stack);
368     ReportPvallocOverflow(size, stack);
369   }
370   // pvalloc(0) should allocate one page.
371   size = size ? RoundUpTo(size, PageSize) : PageSize;
372   return SetErrnoOnNull(MsanAllocate(stack, size, PageSize, false));
373 }
374 
375 void *msan_aligned_alloc(uptr alignment, uptr size, BufferedStackTrace *stack) {
376   if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) {
377     errno = errno_EINVAL;
378     if (AllocatorMayReturnNull())
379       return nullptr;
380     GET_FATAL_STACK_TRACE_IF_EMPTY(stack);
381     ReportInvalidAlignedAllocAlignment(size, alignment, stack);
382   }
383   return SetErrnoOnNull(MsanAllocate(stack, size, alignment, false));
384 }
385 
386 void *msan_memalign(uptr alignment, uptr size, BufferedStackTrace *stack) {
387   if (UNLIKELY(!IsPowerOfTwo(alignment))) {
388     errno = errno_EINVAL;
389     if (AllocatorMayReturnNull())
390       return nullptr;
391     GET_FATAL_STACK_TRACE_IF_EMPTY(stack);
392     ReportInvalidAllocationAlignment(alignment, stack);
393   }
394   return SetErrnoOnNull(MsanAllocate(stack, size, alignment, false));
395 }
396 
397 int msan_posix_memalign(void **memptr, uptr alignment, uptr size,
398                         BufferedStackTrace *stack) {
399   if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) {
400     if (AllocatorMayReturnNull())
401       return errno_EINVAL;
402     GET_FATAL_STACK_TRACE_IF_EMPTY(stack);
403     ReportInvalidPosixMemalignAlignment(alignment, stack);
404   }
405   void *ptr = MsanAllocate(stack, size, alignment, false);
406   if (UNLIKELY(!ptr))
407     // OOM error is already taken care of by MsanAllocate.
408     return errno_ENOMEM;
409   CHECK(IsAligned((uptr)ptr, alignment));
410   *memptr = ptr;
411   return 0;
412 }
413 
414 } // namespace __msan
415 
416 using namespace __msan;
417 
418 uptr __sanitizer_get_current_allocated_bytes() {
419   uptr stats[AllocatorStatCount];
420   allocator.GetStats(stats);
421   return stats[AllocatorStatAllocated];
422 }
423 
424 uptr __sanitizer_get_heap_size() {
425   uptr stats[AllocatorStatCount];
426   allocator.GetStats(stats);
427   return stats[AllocatorStatMapped];
428 }
429 
430 uptr __sanitizer_get_free_bytes() { return 1; }
431 
432 uptr __sanitizer_get_unmapped_bytes() { return 1; }
433 
434 uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; }
435 
436 int __sanitizer_get_ownership(const void *p) { return AllocationSize(p) != 0; }
437 
438 const void *__sanitizer_get_allocated_begin(const void *p) {
439   return AllocationBegin(p);
440 }
441 
442 uptr __sanitizer_get_allocated_size(const void *p) { return AllocationSize(p); }
443 
444 uptr __sanitizer_get_allocated_size_fast(const void *p) {
445   DCHECK_EQ(p, __sanitizer_get_allocated_begin(p));
446   uptr ret = AllocationSizeFast(p);
447   DCHECK_EQ(ret, __sanitizer_get_allocated_size(p));
448   return ret;
449 }
450 
451 void __sanitizer_purge_allocator() { allocator.ForceReleaseToOS(); }
452