xref: /freebsd/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_allocator.cpp (revision 9f23cbd6cae82fd77edfad7173432fa8dccd0a95)
1 //===-- hwasan_allocator.cpp ------------------------ ---------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of HWAddressSanitizer.
10 //
11 // HWAddressSanitizer allocator.
12 //===----------------------------------------------------------------------===//
13 
14 #include "sanitizer_common/sanitizer_atomic.h"
15 #include "sanitizer_common/sanitizer_errno.h"
16 #include "sanitizer_common/sanitizer_stackdepot.h"
17 #include "hwasan.h"
18 #include "hwasan_allocator.h"
19 #include "hwasan_checks.h"
20 #include "hwasan_mapping.h"
21 #include "hwasan_malloc_bisect.h"
22 #include "hwasan_thread.h"
23 #include "hwasan_report.h"
24 #include "lsan/lsan_common.h"
25 
26 namespace __hwasan {
27 
28 static Allocator allocator;
29 static AllocatorCache fallback_allocator_cache;
30 static SpinMutex fallback_mutex;
31 static atomic_uint8_t hwasan_allocator_tagging_enabled;
32 
33 static constexpr tag_t kFallbackAllocTag = 0xBB & kTagMask;
34 static constexpr tag_t kFallbackFreeTag = 0xBC;
35 
36 enum {
37   // Either just allocated by underlying allocator, but AsanChunk is not yet
38   // ready, or almost returned to undelying allocator and AsanChunk is already
39   // meaningless.
40   CHUNK_INVALID = 0,
41   // The chunk is allocated and not yet freed.
42   CHUNK_ALLOCATED = 1,
43 };
44 
45 
46 // Initialized in HwasanAllocatorInit, an never changed.
47 static ALIGNED(16) u8 tail_magic[kShadowAlignment - 1];
48 
49 bool HwasanChunkView::IsAllocated() const {
50   return metadata_ && metadata_->IsAllocated();
51 }
52 
53 uptr HwasanChunkView::Beg() const {
54   return block_;
55 }
56 uptr HwasanChunkView::End() const {
57   return Beg() + UsedSize();
58 }
59 uptr HwasanChunkView::UsedSize() const {
60   return metadata_->GetRequestedSize();
61 }
62 u32 HwasanChunkView::GetAllocStackId() const {
63   return metadata_->GetAllocStackId();
64 }
65 
66 uptr HwasanChunkView::ActualSize() const {
67   return allocator.GetActuallyAllocatedSize(reinterpret_cast<void *>(block_));
68 }
69 
70 bool HwasanChunkView::FromSmallHeap() const {
71   return allocator.FromPrimary(reinterpret_cast<void *>(block_));
72 }
73 
74 bool HwasanChunkView::AddrIsInside(uptr addr) const {
75   return (addr >= Beg()) && (addr < Beg() + UsedSize());
76 }
77 
78 inline void Metadata::SetAllocated(u32 stack, u64 size) {
79   Thread *t = GetCurrentThread();
80   u64 context = t ? t->unique_id() : kMainTid;
81   context <<= 32;
82   context += stack;
83   requested_size_low = size & ((1ul << 32) - 1);
84   requested_size_high = size >> 32;
85   atomic_store(&alloc_context_id, context, memory_order_relaxed);
86   atomic_store(&chunk_state, CHUNK_ALLOCATED, memory_order_release);
87 }
88 
89 inline void Metadata::SetUnallocated() {
90   atomic_store(&chunk_state, CHUNK_INVALID, memory_order_release);
91   requested_size_low = 0;
92   requested_size_high = 0;
93   atomic_store(&alloc_context_id, 0, memory_order_relaxed);
94 }
95 
96 inline bool Metadata::IsAllocated() const {
97   return atomic_load(&chunk_state, memory_order_relaxed) == CHUNK_ALLOCATED &&
98          GetRequestedSize();
99 }
100 
101 inline u64 Metadata::GetRequestedSize() const {
102   return (static_cast<u64>(requested_size_high) << 32) + requested_size_low;
103 }
104 
105 inline u32 Metadata::GetAllocStackId() const {
106   return atomic_load(&alloc_context_id, memory_order_relaxed);
107 }
108 
109 void GetAllocatorStats(AllocatorStatCounters s) {
110   allocator.GetStats(s);
111 }
112 
113 inline void Metadata::SetLsanTag(__lsan::ChunkTag tag) {
114   lsan_tag = tag;
115 }
116 
117 inline __lsan::ChunkTag Metadata::GetLsanTag() const {
118   return static_cast<__lsan::ChunkTag>(lsan_tag);
119 }
120 
121 uptr GetAliasRegionStart() {
122 #if defined(HWASAN_ALIASING_MODE)
123   constexpr uptr kAliasRegionOffset = 1ULL << (kTaggableRegionCheckShift - 1);
124   uptr AliasRegionStart =
125       __hwasan_shadow_memory_dynamic_address + kAliasRegionOffset;
126 
127   CHECK_EQ(AliasRegionStart >> kTaggableRegionCheckShift,
128            __hwasan_shadow_memory_dynamic_address >> kTaggableRegionCheckShift);
129   CHECK_EQ(
130       (AliasRegionStart + kAliasRegionOffset - 1) >> kTaggableRegionCheckShift,
131       __hwasan_shadow_memory_dynamic_address >> kTaggableRegionCheckShift);
132   return AliasRegionStart;
133 #else
134   return 0;
135 #endif
136 }
137 
138 void HwasanAllocatorInit() {
139   atomic_store_relaxed(&hwasan_allocator_tagging_enabled,
140                        !flags()->disable_allocator_tagging);
141   SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
142   allocator.Init(common_flags()->allocator_release_to_os_interval_ms,
143                  GetAliasRegionStart());
144   for (uptr i = 0; i < sizeof(tail_magic); i++)
145     tail_magic[i] = GetCurrentThread()->GenerateRandomTag();
146 }
147 
148 void HwasanAllocatorLock() { allocator.ForceLock(); }
149 
150 void HwasanAllocatorUnlock() { allocator.ForceUnlock(); }
151 
152 void AllocatorSwallowThreadLocalCache(AllocatorCache *cache) {
153   allocator.SwallowCache(cache);
154 }
155 
156 static uptr TaggedSize(uptr size) {
157   if (!size) size = 1;
158   uptr new_size = RoundUpTo(size, kShadowAlignment);
159   CHECK_GE(new_size, size);
160   return new_size;
161 }
162 
163 static void *HwasanAllocate(StackTrace *stack, uptr orig_size, uptr alignment,
164                             bool zeroise) {
165   if (orig_size > kMaxAllowedMallocSize) {
166     if (AllocatorMayReturnNull()) {
167       Report("WARNING: HWAddressSanitizer failed to allocate 0x%zx bytes\n",
168              orig_size);
169       return nullptr;
170     }
171     ReportAllocationSizeTooBig(orig_size, kMaxAllowedMallocSize, stack);
172   }
173   if (UNLIKELY(IsRssLimitExceeded())) {
174     if (AllocatorMayReturnNull())
175       return nullptr;
176     ReportRssLimitExceeded(stack);
177   }
178 
179   alignment = Max(alignment, kShadowAlignment);
180   uptr size = TaggedSize(orig_size);
181   Thread *t = GetCurrentThread();
182   void *allocated;
183   if (t) {
184     allocated = allocator.Allocate(t->allocator_cache(), size, alignment);
185   } else {
186     SpinMutexLock l(&fallback_mutex);
187     AllocatorCache *cache = &fallback_allocator_cache;
188     allocated = allocator.Allocate(cache, size, alignment);
189   }
190   if (UNLIKELY(!allocated)) {
191     SetAllocatorOutOfMemory();
192     if (AllocatorMayReturnNull())
193       return nullptr;
194     ReportOutOfMemory(size, stack);
195   }
196   if (zeroise) {
197     internal_memset(allocated, 0, size);
198   } else if (flags()->max_malloc_fill_size > 0) {
199     uptr fill_size = Min(size, (uptr)flags()->max_malloc_fill_size);
200     internal_memset(allocated, flags()->malloc_fill_byte, fill_size);
201   }
202   if (size != orig_size) {
203     u8 *tail = reinterpret_cast<u8 *>(allocated) + orig_size;
204     uptr tail_length = size - orig_size;
205     internal_memcpy(tail, tail_magic, tail_length - 1);
206     // Short granule is excluded from magic tail, so we explicitly untag.
207     tail[tail_length - 1] = 0;
208   }
209 
210   void *user_ptr = allocated;
211   // Tagging can only be skipped when both tag_in_malloc and tag_in_free are
212   // false. When tag_in_malloc = false and tag_in_free = true malloc needs to
213   // retag to 0.
214   if (InTaggableRegion(reinterpret_cast<uptr>(user_ptr)) &&
215       (flags()->tag_in_malloc || flags()->tag_in_free) &&
216       atomic_load_relaxed(&hwasan_allocator_tagging_enabled)) {
217     if (flags()->tag_in_malloc && malloc_bisect(stack, orig_size)) {
218       tag_t tag = t ? t->GenerateRandomTag() : kFallbackAllocTag;
219       uptr tag_size = orig_size ? orig_size : 1;
220       uptr full_granule_size = RoundDownTo(tag_size, kShadowAlignment);
221       user_ptr =
222           (void *)TagMemoryAligned((uptr)user_ptr, full_granule_size, tag);
223       if (full_granule_size != tag_size) {
224         u8 *short_granule =
225             reinterpret_cast<u8 *>(allocated) + full_granule_size;
226         TagMemoryAligned((uptr)short_granule, kShadowAlignment,
227                          tag_size % kShadowAlignment);
228         short_granule[kShadowAlignment - 1] = tag;
229       }
230     } else {
231       user_ptr = (void *)TagMemoryAligned((uptr)user_ptr, size, 0);
232     }
233   }
234 
235   Metadata *meta =
236       reinterpret_cast<Metadata *>(allocator.GetMetaData(allocated));
237 #if CAN_SANITIZE_LEAKS
238   meta->SetLsanTag(__lsan::DisabledInThisThread() ? __lsan::kIgnored
239                                                   : __lsan::kDirectlyLeaked);
240 #endif
241   meta->SetAllocated(StackDepotPut(*stack), orig_size);
242   RunMallocHooks(user_ptr, size);
243   return user_ptr;
244 }
245 
246 static bool PointerAndMemoryTagsMatch(void *tagged_ptr) {
247   CHECK(tagged_ptr);
248   uptr tagged_uptr = reinterpret_cast<uptr>(tagged_ptr);
249   if (!InTaggableRegion(tagged_uptr))
250     return true;
251   tag_t mem_tag = *reinterpret_cast<tag_t *>(
252       MemToShadow(reinterpret_cast<uptr>(UntagPtr(tagged_ptr))));
253   return PossiblyShortTagMatches(mem_tag, tagged_uptr, 1);
254 }
255 
256 static bool CheckInvalidFree(StackTrace *stack, void *untagged_ptr,
257                              void *tagged_ptr) {
258   // This function can return true if halt_on_error is false.
259   if (!MemIsApp(reinterpret_cast<uptr>(untagged_ptr)) ||
260       !PointerAndMemoryTagsMatch(tagged_ptr)) {
261     ReportInvalidFree(stack, reinterpret_cast<uptr>(tagged_ptr));
262     return true;
263   }
264   return false;
265 }
266 
267 static void HwasanDeallocate(StackTrace *stack, void *tagged_ptr) {
268   CHECK(tagged_ptr);
269   RunFreeHooks(tagged_ptr);
270 
271   bool in_taggable_region =
272       InTaggableRegion(reinterpret_cast<uptr>(tagged_ptr));
273   void *untagged_ptr = in_taggable_region ? UntagPtr(tagged_ptr) : tagged_ptr;
274 
275   if (CheckInvalidFree(stack, untagged_ptr, tagged_ptr))
276     return;
277 
278   void *aligned_ptr = reinterpret_cast<void *>(
279       RoundDownTo(reinterpret_cast<uptr>(untagged_ptr), kShadowAlignment));
280   tag_t pointer_tag = GetTagFromPointer(reinterpret_cast<uptr>(tagged_ptr));
281   Metadata *meta =
282       reinterpret_cast<Metadata *>(allocator.GetMetaData(aligned_ptr));
283   if (!meta) {
284     ReportInvalidFree(stack, reinterpret_cast<uptr>(tagged_ptr));
285     return;
286   }
287   uptr orig_size = meta->GetRequestedSize();
288   u32 free_context_id = StackDepotPut(*stack);
289   u32 alloc_context_id = meta->GetAllocStackId();
290 
291   // Check tail magic.
292   uptr tagged_size = TaggedSize(orig_size);
293   if (flags()->free_checks_tail_magic && orig_size &&
294       tagged_size != orig_size) {
295     uptr tail_size = tagged_size - orig_size - 1;
296     CHECK_LT(tail_size, kShadowAlignment);
297     void *tail_beg = reinterpret_cast<void *>(
298         reinterpret_cast<uptr>(aligned_ptr) + orig_size);
299     tag_t short_granule_memtag = *(reinterpret_cast<tag_t *>(
300         reinterpret_cast<uptr>(tail_beg) + tail_size));
301     if (tail_size &&
302         (internal_memcmp(tail_beg, tail_magic, tail_size) ||
303          (in_taggable_region && pointer_tag != short_granule_memtag)))
304       ReportTailOverwritten(stack, reinterpret_cast<uptr>(tagged_ptr),
305                             orig_size, tail_magic);
306   }
307 
308   // TODO(kstoimenov): consider meta->SetUnallocated(free_context_id).
309   meta->SetUnallocated();
310   // This memory will not be reused by anyone else, so we are free to keep it
311   // poisoned.
312   Thread *t = GetCurrentThread();
313   if (flags()->max_free_fill_size > 0) {
314     uptr fill_size =
315         Min(TaggedSize(orig_size), (uptr)flags()->max_free_fill_size);
316     internal_memset(aligned_ptr, flags()->free_fill_byte, fill_size);
317   }
318   if (in_taggable_region && flags()->tag_in_free && malloc_bisect(stack, 0) &&
319       atomic_load_relaxed(&hwasan_allocator_tagging_enabled)) {
320     // Always store full 8-bit tags on free to maximize UAF detection.
321     tag_t tag;
322     if (t) {
323       // Make sure we are not using a short granule tag as a poison tag. This
324       // would make us attempt to read the memory on a UaF.
325       // The tag can be zero if tagging is disabled on this thread.
326       do {
327         tag = t->GenerateRandomTag(/*num_bits=*/8);
328       } while (
329           UNLIKELY((tag < kShadowAlignment || tag == pointer_tag) && tag != 0));
330     } else {
331       static_assert(kFallbackFreeTag >= kShadowAlignment,
332                     "fallback tag must not be a short granule tag.");
333       tag = kFallbackFreeTag;
334     }
335     TagMemoryAligned(reinterpret_cast<uptr>(aligned_ptr), TaggedSize(orig_size),
336                      tag);
337   }
338   if (t) {
339     allocator.Deallocate(t->allocator_cache(), aligned_ptr);
340     if (auto *ha = t->heap_allocations())
341       ha->push({reinterpret_cast<uptr>(tagged_ptr), alloc_context_id,
342                 free_context_id, static_cast<u32>(orig_size)});
343   } else {
344     SpinMutexLock l(&fallback_mutex);
345     AllocatorCache *cache = &fallback_allocator_cache;
346     allocator.Deallocate(cache, aligned_ptr);
347   }
348 }
349 
350 static void *HwasanReallocate(StackTrace *stack, void *tagged_ptr_old,
351                               uptr new_size, uptr alignment) {
352   void *untagged_ptr_old =
353       InTaggableRegion(reinterpret_cast<uptr>(tagged_ptr_old))
354           ? UntagPtr(tagged_ptr_old)
355           : tagged_ptr_old;
356   if (CheckInvalidFree(stack, untagged_ptr_old, tagged_ptr_old))
357     return nullptr;
358   void *tagged_ptr_new =
359       HwasanAllocate(stack, new_size, alignment, false /*zeroise*/);
360   if (tagged_ptr_old && tagged_ptr_new) {
361     Metadata *meta =
362         reinterpret_cast<Metadata *>(allocator.GetMetaData(untagged_ptr_old));
363     internal_memcpy(
364         UntagPtr(tagged_ptr_new), untagged_ptr_old,
365         Min(new_size, static_cast<uptr>(meta->GetRequestedSize())));
366     HwasanDeallocate(stack, tagged_ptr_old);
367   }
368   return tagged_ptr_new;
369 }
370 
371 static void *HwasanCalloc(StackTrace *stack, uptr nmemb, uptr size) {
372   if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
373     if (AllocatorMayReturnNull())
374       return nullptr;
375     ReportCallocOverflow(nmemb, size, stack);
376   }
377   return HwasanAllocate(stack, nmemb * size, sizeof(u64), true);
378 }
379 
380 HwasanChunkView FindHeapChunkByAddress(uptr address) {
381   if (!allocator.PointerIsMine(reinterpret_cast<void *>(address)))
382     return HwasanChunkView();
383   void *block = allocator.GetBlockBegin(reinterpret_cast<void*>(address));
384   if (!block)
385     return HwasanChunkView();
386   Metadata *metadata =
387       reinterpret_cast<Metadata*>(allocator.GetMetaData(block));
388   return HwasanChunkView(reinterpret_cast<uptr>(block), metadata);
389 }
390 
391 static uptr AllocationSize(const void *tagged_ptr) {
392   const void *untagged_ptr = UntagPtr(tagged_ptr);
393   if (!untagged_ptr) return 0;
394   const void *beg = allocator.GetBlockBegin(untagged_ptr);
395   Metadata *b = (Metadata *)allocator.GetMetaData(untagged_ptr);
396   if (beg != untagged_ptr) return 0;
397   return b->GetRequestedSize();
398 }
399 
400 void *hwasan_malloc(uptr size, StackTrace *stack) {
401   return SetErrnoOnNull(HwasanAllocate(stack, size, sizeof(u64), false));
402 }
403 
404 void *hwasan_calloc(uptr nmemb, uptr size, StackTrace *stack) {
405   return SetErrnoOnNull(HwasanCalloc(stack, nmemb, size));
406 }
407 
408 void *hwasan_realloc(void *ptr, uptr size, StackTrace *stack) {
409   if (!ptr)
410     return SetErrnoOnNull(HwasanAllocate(stack, size, sizeof(u64), false));
411   if (size == 0) {
412     HwasanDeallocate(stack, ptr);
413     return nullptr;
414   }
415   return SetErrnoOnNull(HwasanReallocate(stack, ptr, size, sizeof(u64)));
416 }
417 
418 void *hwasan_reallocarray(void *ptr, uptr nmemb, uptr size, StackTrace *stack) {
419   if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
420     errno = errno_ENOMEM;
421     if (AllocatorMayReturnNull())
422       return nullptr;
423     ReportReallocArrayOverflow(nmemb, size, stack);
424   }
425   return hwasan_realloc(ptr, nmemb * size, stack);
426 }
427 
428 void *hwasan_valloc(uptr size, StackTrace *stack) {
429   return SetErrnoOnNull(
430       HwasanAllocate(stack, size, GetPageSizeCached(), false));
431 }
432 
433 void *hwasan_pvalloc(uptr size, StackTrace *stack) {
434   uptr PageSize = GetPageSizeCached();
435   if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) {
436     errno = errno_ENOMEM;
437     if (AllocatorMayReturnNull())
438       return nullptr;
439     ReportPvallocOverflow(size, stack);
440   }
441   // pvalloc(0) should allocate one page.
442   size = size ? RoundUpTo(size, PageSize) : PageSize;
443   return SetErrnoOnNull(HwasanAllocate(stack, size, PageSize, false));
444 }
445 
446 void *hwasan_aligned_alloc(uptr alignment, uptr size, StackTrace *stack) {
447   if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) {
448     errno = errno_EINVAL;
449     if (AllocatorMayReturnNull())
450       return nullptr;
451     ReportInvalidAlignedAllocAlignment(size, alignment, stack);
452   }
453   return SetErrnoOnNull(HwasanAllocate(stack, size, alignment, false));
454 }
455 
456 void *hwasan_memalign(uptr alignment, uptr size, StackTrace *stack) {
457   if (UNLIKELY(!IsPowerOfTwo(alignment))) {
458     errno = errno_EINVAL;
459     if (AllocatorMayReturnNull())
460       return nullptr;
461     ReportInvalidAllocationAlignment(alignment, stack);
462   }
463   return SetErrnoOnNull(HwasanAllocate(stack, size, alignment, false));
464 }
465 
466 int hwasan_posix_memalign(void **memptr, uptr alignment, uptr size,
467                         StackTrace *stack) {
468   if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) {
469     if (AllocatorMayReturnNull())
470       return errno_EINVAL;
471     ReportInvalidPosixMemalignAlignment(alignment, stack);
472   }
473   void *ptr = HwasanAllocate(stack, size, alignment, false);
474   if (UNLIKELY(!ptr))
475     // OOM error is already taken care of by HwasanAllocate.
476     return errno_ENOMEM;
477   CHECK(IsAligned((uptr)ptr, alignment));
478   *memptr = ptr;
479   return 0;
480 }
481 
482 void hwasan_free(void *ptr, StackTrace *stack) {
483   return HwasanDeallocate(stack, ptr);
484 }
485 
486 }  // namespace __hwasan
487 
488 // --- Implementation of LSan-specific functions --- {{{1
489 namespace __lsan {
490 
491 void LockAllocator() {
492   __hwasan::HwasanAllocatorLock();
493 }
494 
495 void UnlockAllocator() {
496   __hwasan::HwasanAllocatorUnlock();
497 }
498 
499 void GetAllocatorGlobalRange(uptr *begin, uptr *end) {
500   *begin = (uptr)&__hwasan::allocator;
501   *end = *begin + sizeof(__hwasan::allocator);
502 }
503 
504 uptr PointsIntoChunk(void *p) {
505   p = __hwasan::InTaggableRegion(reinterpret_cast<uptr>(p)) ? UntagPtr(p) : p;
506   uptr addr = reinterpret_cast<uptr>(p);
507   uptr chunk =
508       reinterpret_cast<uptr>(__hwasan::allocator.GetBlockBeginFastLocked(p));
509   if (!chunk)
510     return 0;
511   __hwasan::Metadata *metadata = reinterpret_cast<__hwasan::Metadata *>(
512       __hwasan::allocator.GetMetaData(reinterpret_cast<void *>(chunk)));
513   if (!metadata || !metadata->IsAllocated())
514     return 0;
515   if (addr < chunk + metadata->GetRequestedSize())
516     return chunk;
517   if (IsSpecialCaseOfOperatorNew0(chunk, metadata->GetRequestedSize(), addr))
518     return chunk;
519   return 0;
520 }
521 
522 uptr GetUserBegin(uptr chunk) {
523   if (__hwasan::InTaggableRegion(chunk))
524     CHECK_EQ(UntagAddr(chunk), chunk);
525   void *block = __hwasan::allocator.GetBlockBeginFastLocked(
526       reinterpret_cast<void *>(chunk));
527   if (!block)
528     return 0;
529   __hwasan::Metadata *metadata = reinterpret_cast<__hwasan::Metadata *>(
530       __hwasan::allocator.GetMetaData(block));
531   if (!metadata || !metadata->IsAllocated())
532     return 0;
533 
534   return reinterpret_cast<uptr>(block);
535 }
536 
537 LsanMetadata::LsanMetadata(uptr chunk) {
538   if (__hwasan::InTaggableRegion(chunk))
539     CHECK_EQ(UntagAddr(chunk), chunk);
540   metadata_ =
541       chunk ? __hwasan::allocator.GetMetaData(reinterpret_cast<void *>(chunk))
542             : nullptr;
543 }
544 
545 bool LsanMetadata::allocated() const {
546   if (!metadata_)
547     return false;
548   __hwasan::Metadata *m = reinterpret_cast<__hwasan::Metadata *>(metadata_);
549   return m->IsAllocated();
550 }
551 
552 ChunkTag LsanMetadata::tag() const {
553   __hwasan::Metadata *m = reinterpret_cast<__hwasan::Metadata *>(metadata_);
554   return m->GetLsanTag();
555 }
556 
557 void LsanMetadata::set_tag(ChunkTag value) {
558   __hwasan::Metadata *m = reinterpret_cast<__hwasan::Metadata *>(metadata_);
559   m->SetLsanTag(value);
560 }
561 
562 uptr LsanMetadata::requested_size() const {
563   __hwasan::Metadata *m = reinterpret_cast<__hwasan::Metadata *>(metadata_);
564   return m->GetRequestedSize();
565 }
566 
567 u32 LsanMetadata::stack_trace_id() const {
568   __hwasan::Metadata *m = reinterpret_cast<__hwasan::Metadata *>(metadata_);
569   return m->GetAllocStackId();
570 }
571 
572 void ForEachChunk(ForEachChunkCallback callback, void *arg) {
573   __hwasan::allocator.ForEachChunk(callback, arg);
574 }
575 
576 IgnoreObjectResult IgnoreObjectLocked(const void *p) {
577   p = __hwasan::InTaggableRegion(reinterpret_cast<uptr>(p)) ? UntagPtr(p) : p;
578   uptr addr = reinterpret_cast<uptr>(p);
579   uptr chunk =
580       reinterpret_cast<uptr>(__hwasan::allocator.GetBlockBeginFastLocked(p));
581   if (!chunk)
582     return kIgnoreObjectInvalid;
583   __hwasan::Metadata *metadata = reinterpret_cast<__hwasan::Metadata *>(
584       __hwasan::allocator.GetMetaData(reinterpret_cast<void *>(chunk)));
585   if (!metadata || !metadata->IsAllocated())
586     return kIgnoreObjectInvalid;
587   if (addr >= chunk + metadata->GetRequestedSize())
588     return kIgnoreObjectInvalid;
589   if (metadata->GetLsanTag() == kIgnored)
590     return kIgnoreObjectAlreadyIgnored;
591 
592   metadata->SetLsanTag(kIgnored);
593   return kIgnoreObjectSuccess;
594 }
595 
596 }  // namespace __lsan
597 
598 using namespace __hwasan;
599 
600 void __hwasan_enable_allocator_tagging() {
601   atomic_store_relaxed(&hwasan_allocator_tagging_enabled, 1);
602 }
603 
604 void __hwasan_disable_allocator_tagging() {
605   atomic_store_relaxed(&hwasan_allocator_tagging_enabled, 0);
606 }
607 
608 uptr __sanitizer_get_current_allocated_bytes() {
609   uptr stats[AllocatorStatCount];
610   allocator.GetStats(stats);
611   return stats[AllocatorStatAllocated];
612 }
613 
614 uptr __sanitizer_get_heap_size() {
615   uptr stats[AllocatorStatCount];
616   allocator.GetStats(stats);
617   return stats[AllocatorStatMapped];
618 }
619 
620 uptr __sanitizer_get_free_bytes() { return 1; }
621 
622 uptr __sanitizer_get_unmapped_bytes() { return 1; }
623 
624 uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; }
625 
626 int __sanitizer_get_ownership(const void *p) { return AllocationSize(p) != 0; }
627 
628 uptr __sanitizer_get_allocated_size(const void *p) { return AllocationSize(p); }
629