xref: /freebsd/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_allocator.cpp (revision ec0ea6efa1ad229d75c394c1a9b9cac33af2b1d3)
1 //===-- hwasan_allocator.cpp ------------------------ ---------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of HWAddressSanitizer.
10 //
11 // HWAddressSanitizer allocator.
12 //===----------------------------------------------------------------------===//
13 
14 #include "sanitizer_common/sanitizer_atomic.h"
15 #include "sanitizer_common/sanitizer_errno.h"
16 #include "sanitizer_common/sanitizer_stackdepot.h"
17 #include "hwasan.h"
18 #include "hwasan_allocator.h"
19 #include "hwasan_checks.h"
20 #include "hwasan_mapping.h"
21 #include "hwasan_malloc_bisect.h"
22 #include "hwasan_thread.h"
23 #include "hwasan_report.h"
24 
25 namespace __hwasan {
26 
27 static Allocator allocator;
28 static AllocatorCache fallback_allocator_cache;
29 static SpinMutex fallback_mutex;
30 static atomic_uint8_t hwasan_allocator_tagging_enabled;
31 
32 static constexpr tag_t kFallbackAllocTag = 0xBB & kTagMask;
33 static constexpr tag_t kFallbackFreeTag = 0xBC;
34 
35 enum RightAlignMode {
36   kRightAlignNever,
37   kRightAlignSometimes,
38   kRightAlignAlways
39 };
40 
41 // Initialized in HwasanAllocatorInit, an never changed.
42 static ALIGNED(16) u8 tail_magic[kShadowAlignment - 1];
43 
44 bool HwasanChunkView::IsAllocated() const {
45   return metadata_ && metadata_->alloc_context_id &&
46          metadata_->get_requested_size();
47 }
48 
49 // Aligns the 'addr' right to the granule boundary.
50 static uptr AlignRight(uptr addr, uptr requested_size) {
51   uptr tail_size = requested_size % kShadowAlignment;
52   if (!tail_size) return addr;
53   return addr + kShadowAlignment - tail_size;
54 }
55 
56 uptr HwasanChunkView::Beg() const {
57   if (metadata_ && metadata_->right_aligned)
58     return AlignRight(block_, metadata_->get_requested_size());
59   return block_;
60 }
61 uptr HwasanChunkView::End() const {
62   return Beg() + UsedSize();
63 }
64 uptr HwasanChunkView::UsedSize() const {
65   return metadata_->get_requested_size();
66 }
67 u32 HwasanChunkView::GetAllocStackId() const {
68   return metadata_->alloc_context_id;
69 }
70 
71 uptr HwasanChunkView::ActualSize() const {
72   return allocator.GetActuallyAllocatedSize(reinterpret_cast<void *>(block_));
73 }
74 
75 bool HwasanChunkView::FromSmallHeap() const {
76   return allocator.FromPrimary(reinterpret_cast<void *>(block_));
77 }
78 
79 void GetAllocatorStats(AllocatorStatCounters s) {
80   allocator.GetStats(s);
81 }
82 
83 uptr GetAliasRegionStart() {
84 #if defined(HWASAN_ALIASING_MODE)
85   constexpr uptr kAliasRegionOffset = 1ULL << (kTaggableRegionCheckShift - 1);
86   uptr AliasRegionStart =
87       __hwasan_shadow_memory_dynamic_address + kAliasRegionOffset;
88 
89   CHECK_EQ(AliasRegionStart >> kTaggableRegionCheckShift,
90            __hwasan_shadow_memory_dynamic_address >> kTaggableRegionCheckShift);
91   CHECK_EQ(
92       (AliasRegionStart + kAliasRegionOffset - 1) >> kTaggableRegionCheckShift,
93       __hwasan_shadow_memory_dynamic_address >> kTaggableRegionCheckShift);
94   return AliasRegionStart;
95 #else
96   return 0;
97 #endif
98 }
99 
100 void HwasanAllocatorInit() {
101   atomic_store_relaxed(&hwasan_allocator_tagging_enabled,
102                        !flags()->disable_allocator_tagging);
103   SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
104   allocator.Init(common_flags()->allocator_release_to_os_interval_ms,
105                  GetAliasRegionStart());
106   for (uptr i = 0; i < sizeof(tail_magic); i++)
107     tail_magic[i] = GetCurrentThread()->GenerateRandomTag();
108 }
109 
110 void AllocatorSwallowThreadLocalCache(AllocatorCache *cache) {
111   allocator.SwallowCache(cache);
112 }
113 
114 static uptr TaggedSize(uptr size) {
115   if (!size) size = 1;
116   uptr new_size = RoundUpTo(size, kShadowAlignment);
117   CHECK_GE(new_size, size);
118   return new_size;
119 }
120 
121 static void *HwasanAllocate(StackTrace *stack, uptr orig_size, uptr alignment,
122                             bool zeroise) {
123   if (orig_size > kMaxAllowedMallocSize) {
124     if (AllocatorMayReturnNull()) {
125       Report("WARNING: HWAddressSanitizer failed to allocate 0x%zx bytes\n",
126              orig_size);
127       return nullptr;
128     }
129     ReportAllocationSizeTooBig(orig_size, kMaxAllowedMallocSize, stack);
130   }
131 
132   alignment = Max(alignment, kShadowAlignment);
133   uptr size = TaggedSize(orig_size);
134   Thread *t = GetCurrentThread();
135   void *allocated;
136   if (t) {
137     allocated = allocator.Allocate(t->allocator_cache(), size, alignment);
138   } else {
139     SpinMutexLock l(&fallback_mutex);
140     AllocatorCache *cache = &fallback_allocator_cache;
141     allocated = allocator.Allocate(cache, size, alignment);
142   }
143   if (UNLIKELY(!allocated)) {
144     SetAllocatorOutOfMemory();
145     if (AllocatorMayReturnNull())
146       return nullptr;
147     ReportOutOfMemory(size, stack);
148   }
149   Metadata *meta =
150       reinterpret_cast<Metadata *>(allocator.GetMetaData(allocated));
151   meta->set_requested_size(orig_size);
152   meta->alloc_context_id = StackDepotPut(*stack);
153   meta->right_aligned = false;
154   if (zeroise) {
155     internal_memset(allocated, 0, size);
156   } else if (flags()->max_malloc_fill_size > 0) {
157     uptr fill_size = Min(size, (uptr)flags()->max_malloc_fill_size);
158     internal_memset(allocated, flags()->malloc_fill_byte, fill_size);
159   }
160   if (size != orig_size) {
161     internal_memcpy(reinterpret_cast<u8 *>(allocated) + orig_size, tail_magic,
162                     size - orig_size - 1);
163   }
164 
165   void *user_ptr = allocated;
166   // Tagging can only be skipped when both tag_in_malloc and tag_in_free are
167   // false. When tag_in_malloc = false and tag_in_free = true malloc needs to
168   // retag to 0.
169   if (InTaggableRegion(reinterpret_cast<uptr>(user_ptr)) &&
170       (flags()->tag_in_malloc || flags()->tag_in_free) &&
171       atomic_load_relaxed(&hwasan_allocator_tagging_enabled)) {
172     if (flags()->tag_in_malloc && malloc_bisect(stack, orig_size)) {
173       tag_t tag = t ? t->GenerateRandomTag() : kFallbackAllocTag;
174       uptr tag_size = orig_size ? orig_size : 1;
175       uptr full_granule_size = RoundDownTo(tag_size, kShadowAlignment);
176       user_ptr =
177           (void *)TagMemoryAligned((uptr)user_ptr, full_granule_size, tag);
178       if (full_granule_size != tag_size) {
179         u8 *short_granule =
180             reinterpret_cast<u8 *>(allocated) + full_granule_size;
181         TagMemoryAligned((uptr)short_granule, kShadowAlignment,
182                          tag_size % kShadowAlignment);
183         short_granule[kShadowAlignment - 1] = tag;
184       }
185     } else {
186       user_ptr = (void *)TagMemoryAligned((uptr)user_ptr, size, 0);
187     }
188   }
189 
190   HWASAN_MALLOC_HOOK(user_ptr, size);
191   return user_ptr;
192 }
193 
194 static bool PointerAndMemoryTagsMatch(void *tagged_ptr) {
195   CHECK(tagged_ptr);
196   uptr tagged_uptr = reinterpret_cast<uptr>(tagged_ptr);
197   if (!InTaggableRegion(tagged_uptr))
198     return true;
199   tag_t mem_tag = *reinterpret_cast<tag_t *>(
200       MemToShadow(reinterpret_cast<uptr>(UntagPtr(tagged_ptr))));
201   return PossiblyShortTagMatches(mem_tag, tagged_uptr, 1);
202 }
203 
204 static void HwasanDeallocate(StackTrace *stack, void *tagged_ptr) {
205   CHECK(tagged_ptr);
206   HWASAN_FREE_HOOK(tagged_ptr);
207 
208   if (!PointerAndMemoryTagsMatch(tagged_ptr))
209     ReportInvalidFree(stack, reinterpret_cast<uptr>(tagged_ptr));
210 
211   void *untagged_ptr = InTaggableRegion(reinterpret_cast<uptr>(tagged_ptr))
212                            ? UntagPtr(tagged_ptr)
213                            : tagged_ptr;
214   void *aligned_ptr = reinterpret_cast<void *>(
215       RoundDownTo(reinterpret_cast<uptr>(untagged_ptr), kShadowAlignment));
216   tag_t pointer_tag = GetTagFromPointer(reinterpret_cast<uptr>(tagged_ptr));
217   Metadata *meta =
218       reinterpret_cast<Metadata *>(allocator.GetMetaData(aligned_ptr));
219   uptr orig_size = meta->get_requested_size();
220   u32 free_context_id = StackDepotPut(*stack);
221   u32 alloc_context_id = meta->alloc_context_id;
222 
223   // Check tail magic.
224   uptr tagged_size = TaggedSize(orig_size);
225   if (flags()->free_checks_tail_magic && orig_size &&
226       tagged_size != orig_size) {
227     uptr tail_size = tagged_size - orig_size - 1;
228     CHECK_LT(tail_size, kShadowAlignment);
229     void *tail_beg = reinterpret_cast<void *>(
230         reinterpret_cast<uptr>(aligned_ptr) + orig_size);
231     if (tail_size && internal_memcmp(tail_beg, tail_magic, tail_size))
232       ReportTailOverwritten(stack, reinterpret_cast<uptr>(tagged_ptr),
233                             orig_size, tail_magic);
234   }
235 
236   meta->set_requested_size(0);
237   meta->alloc_context_id = 0;
238   // This memory will not be reused by anyone else, so we are free to keep it
239   // poisoned.
240   Thread *t = GetCurrentThread();
241   if (flags()->max_free_fill_size > 0) {
242     uptr fill_size =
243         Min(TaggedSize(orig_size), (uptr)flags()->max_free_fill_size);
244     internal_memset(aligned_ptr, flags()->free_fill_byte, fill_size);
245   }
246   if (InTaggableRegion(reinterpret_cast<uptr>(tagged_ptr)) &&
247       flags()->tag_in_free && malloc_bisect(stack, 0) &&
248       atomic_load_relaxed(&hwasan_allocator_tagging_enabled)) {
249     // Always store full 8-bit tags on free to maximize UAF detection.
250     tag_t tag;
251     if (t) {
252       // Make sure we are not using a short granule tag as a poison tag. This
253       // would make us attempt to read the memory on a UaF.
254       // The tag can be zero if tagging is disabled on this thread.
255       do {
256         tag = t->GenerateRandomTag(/*num_bits=*/8);
257       } while (
258           UNLIKELY((tag < kShadowAlignment || tag == pointer_tag) && tag != 0));
259     } else {
260       static_assert(kFallbackFreeTag >= kShadowAlignment,
261                     "fallback tag must not be a short granule tag.");
262       tag = kFallbackFreeTag;
263     }
264     TagMemoryAligned(reinterpret_cast<uptr>(aligned_ptr), TaggedSize(orig_size),
265                      tag);
266   }
267   if (t) {
268     allocator.Deallocate(t->allocator_cache(), aligned_ptr);
269     if (auto *ha = t->heap_allocations())
270       ha->push({reinterpret_cast<uptr>(tagged_ptr), alloc_context_id,
271                 free_context_id, static_cast<u32>(orig_size)});
272   } else {
273     SpinMutexLock l(&fallback_mutex);
274     AllocatorCache *cache = &fallback_allocator_cache;
275     allocator.Deallocate(cache, aligned_ptr);
276   }
277 }
278 
279 static void *HwasanReallocate(StackTrace *stack, void *tagged_ptr_old,
280                               uptr new_size, uptr alignment) {
281   if (!PointerAndMemoryTagsMatch(tagged_ptr_old))
282     ReportInvalidFree(stack, reinterpret_cast<uptr>(tagged_ptr_old));
283 
284   void *tagged_ptr_new =
285       HwasanAllocate(stack, new_size, alignment, false /*zeroise*/);
286   if (tagged_ptr_old && tagged_ptr_new) {
287     void *untagged_ptr_old =  UntagPtr(tagged_ptr_old);
288     Metadata *meta =
289         reinterpret_cast<Metadata *>(allocator.GetMetaData(untagged_ptr_old));
290     internal_memcpy(
291         UntagPtr(tagged_ptr_new), untagged_ptr_old,
292         Min(new_size, static_cast<uptr>(meta->get_requested_size())));
293     HwasanDeallocate(stack, tagged_ptr_old);
294   }
295   return tagged_ptr_new;
296 }
297 
298 static void *HwasanCalloc(StackTrace *stack, uptr nmemb, uptr size) {
299   if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
300     if (AllocatorMayReturnNull())
301       return nullptr;
302     ReportCallocOverflow(nmemb, size, stack);
303   }
304   return HwasanAllocate(stack, nmemb * size, sizeof(u64), true);
305 }
306 
307 HwasanChunkView FindHeapChunkByAddress(uptr address) {
308   void *block = allocator.GetBlockBegin(reinterpret_cast<void*>(address));
309   if (!block)
310     return HwasanChunkView();
311   Metadata *metadata =
312       reinterpret_cast<Metadata*>(allocator.GetMetaData(block));
313   return HwasanChunkView(reinterpret_cast<uptr>(block), metadata);
314 }
315 
316 static uptr AllocationSize(const void *tagged_ptr) {
317   const void *untagged_ptr = UntagPtr(tagged_ptr);
318   if (!untagged_ptr) return 0;
319   const void *beg = allocator.GetBlockBegin(untagged_ptr);
320   Metadata *b = (Metadata *)allocator.GetMetaData(untagged_ptr);
321   if (b->right_aligned) {
322     if (beg != reinterpret_cast<void *>(RoundDownTo(
323                    reinterpret_cast<uptr>(untagged_ptr), kShadowAlignment)))
324       return 0;
325   } else {
326     if (beg != untagged_ptr) return 0;
327   }
328   return b->get_requested_size();
329 }
330 
331 void *hwasan_malloc(uptr size, StackTrace *stack) {
332   return SetErrnoOnNull(HwasanAllocate(stack, size, sizeof(u64), false));
333 }
334 
335 void *hwasan_calloc(uptr nmemb, uptr size, StackTrace *stack) {
336   return SetErrnoOnNull(HwasanCalloc(stack, nmemb, size));
337 }
338 
339 void *hwasan_realloc(void *ptr, uptr size, StackTrace *stack) {
340   if (!ptr)
341     return SetErrnoOnNull(HwasanAllocate(stack, size, sizeof(u64), false));
342   if (size == 0) {
343     HwasanDeallocate(stack, ptr);
344     return nullptr;
345   }
346   return SetErrnoOnNull(HwasanReallocate(stack, ptr, size, sizeof(u64)));
347 }
348 
349 void *hwasan_reallocarray(void *ptr, uptr nmemb, uptr size, StackTrace *stack) {
350   if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
351     errno = errno_ENOMEM;
352     if (AllocatorMayReturnNull())
353       return nullptr;
354     ReportReallocArrayOverflow(nmemb, size, stack);
355   }
356   return hwasan_realloc(ptr, nmemb * size, stack);
357 }
358 
359 void *hwasan_valloc(uptr size, StackTrace *stack) {
360   return SetErrnoOnNull(
361       HwasanAllocate(stack, size, GetPageSizeCached(), false));
362 }
363 
364 void *hwasan_pvalloc(uptr size, StackTrace *stack) {
365   uptr PageSize = GetPageSizeCached();
366   if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) {
367     errno = errno_ENOMEM;
368     if (AllocatorMayReturnNull())
369       return nullptr;
370     ReportPvallocOverflow(size, stack);
371   }
372   // pvalloc(0) should allocate one page.
373   size = size ? RoundUpTo(size, PageSize) : PageSize;
374   return SetErrnoOnNull(HwasanAllocate(stack, size, PageSize, false));
375 }
376 
377 void *hwasan_aligned_alloc(uptr alignment, uptr size, StackTrace *stack) {
378   if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) {
379     errno = errno_EINVAL;
380     if (AllocatorMayReturnNull())
381       return nullptr;
382     ReportInvalidAlignedAllocAlignment(size, alignment, stack);
383   }
384   return SetErrnoOnNull(HwasanAllocate(stack, size, alignment, false));
385 }
386 
387 void *hwasan_memalign(uptr alignment, uptr size, StackTrace *stack) {
388   if (UNLIKELY(!IsPowerOfTwo(alignment))) {
389     errno = errno_EINVAL;
390     if (AllocatorMayReturnNull())
391       return nullptr;
392     ReportInvalidAllocationAlignment(alignment, stack);
393   }
394   return SetErrnoOnNull(HwasanAllocate(stack, size, alignment, false));
395 }
396 
397 int hwasan_posix_memalign(void **memptr, uptr alignment, uptr size,
398                         StackTrace *stack) {
399   if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) {
400     if (AllocatorMayReturnNull())
401       return errno_EINVAL;
402     ReportInvalidPosixMemalignAlignment(alignment, stack);
403   }
404   void *ptr = HwasanAllocate(stack, size, alignment, false);
405   if (UNLIKELY(!ptr))
406     // OOM error is already taken care of by HwasanAllocate.
407     return errno_ENOMEM;
408   CHECK(IsAligned((uptr)ptr, alignment));
409   *memptr = ptr;
410   return 0;
411 }
412 
413 void hwasan_free(void *ptr, StackTrace *stack) {
414   return HwasanDeallocate(stack, ptr);
415 }
416 
417 }  // namespace __hwasan
418 
419 using namespace __hwasan;
420 
421 void __hwasan_enable_allocator_tagging() {
422   atomic_store_relaxed(&hwasan_allocator_tagging_enabled, 1);
423 }
424 
425 void __hwasan_disable_allocator_tagging() {
426   atomic_store_relaxed(&hwasan_allocator_tagging_enabled, 0);
427 }
428 
429 uptr __sanitizer_get_current_allocated_bytes() {
430   uptr stats[AllocatorStatCount];
431   allocator.GetStats(stats);
432   return stats[AllocatorStatAllocated];
433 }
434 
435 uptr __sanitizer_get_heap_size() {
436   uptr stats[AllocatorStatCount];
437   allocator.GetStats(stats);
438   return stats[AllocatorStatMapped];
439 }
440 
441 uptr __sanitizer_get_free_bytes() { return 1; }
442 
443 uptr __sanitizer_get_unmapped_bytes() { return 1; }
444 
445 uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; }
446 
447 int __sanitizer_get_ownership(const void *p) { return AllocationSize(p) != 0; }
448 
449 uptr __sanitizer_get_allocated_size(const void *p) { return AllocationSize(p); }
450