xref: /freebsd/contrib/llvm-project/compiler-rt/lib/asan/asan_allocator.cpp (revision 5e801ac66d24704442eba426ed13c3effb8a34e7)
1 //===-- asan_allocator.cpp ------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of AddressSanitizer, an address sanity checker.
10 //
11 // Implementation of ASan's memory allocator, 2-nd version.
12 // This variant uses the allocator from sanitizer_common, i.e. the one shared
13 // with ThreadSanitizer and MemorySanitizer.
14 //
15 //===----------------------------------------------------------------------===//
16 
17 #include "asan_allocator.h"
18 
19 #include "asan_mapping.h"
20 #include "asan_poisoning.h"
21 #include "asan_report.h"
22 #include "asan_stack.h"
23 #include "asan_thread.h"
24 #include "lsan/lsan_common.h"
25 #include "sanitizer_common/sanitizer_allocator_checks.h"
26 #include "sanitizer_common/sanitizer_allocator_interface.h"
27 #include "sanitizer_common/sanitizer_errno.h"
28 #include "sanitizer_common/sanitizer_flags.h"
29 #include "sanitizer_common/sanitizer_internal_defs.h"
30 #include "sanitizer_common/sanitizer_list.h"
31 #include "sanitizer_common/sanitizer_quarantine.h"
32 #include "sanitizer_common/sanitizer_stackdepot.h"
33 
34 namespace __asan {
35 
36 // Valid redzone sizes are 16, 32, 64, ... 2048, so we encode them in 3 bits.
37 // We use adaptive redzones: for larger allocation larger redzones are used.
38 static u32 RZLog2Size(u32 rz_log) {
39   CHECK_LT(rz_log, 8);
40   return 16 << rz_log;
41 }
42 
43 static u32 RZSize2Log(u32 rz_size) {
44   CHECK_GE(rz_size, 16);
45   CHECK_LE(rz_size, 2048);
46   CHECK(IsPowerOfTwo(rz_size));
47   u32 res = Log2(rz_size) - 4;
48   CHECK_EQ(rz_size, RZLog2Size(res));
49   return res;
50 }
51 
52 static AsanAllocator &get_allocator();
53 
54 static void AtomicContextStore(volatile atomic_uint64_t *atomic_context,
55                                u32 tid, u32 stack) {
56   u64 context = tid;
57   context <<= 32;
58   context += stack;
59   atomic_store(atomic_context, context, memory_order_relaxed);
60 }
61 
62 static void AtomicContextLoad(const volatile atomic_uint64_t *atomic_context,
63                               u32 &tid, u32 &stack) {
64   u64 context = atomic_load(atomic_context, memory_order_relaxed);
65   stack = context;
66   context >>= 32;
67   tid = context;
68 }
69 
70 // The memory chunk allocated from the underlying allocator looks like this:
71 // L L L L L L H H U U U U U U R R
72 //   L -- left redzone words (0 or more bytes)
73 //   H -- ChunkHeader (16 bytes), which is also a part of the left redzone.
74 //   U -- user memory.
75 //   R -- right redzone (0 or more bytes)
76 // ChunkBase consists of ChunkHeader and other bytes that overlap with user
77 // memory.
78 
79 // If the left redzone is greater than the ChunkHeader size we store a magic
80 // value in the first uptr word of the memory block and store the address of
81 // ChunkBase in the next uptr.
82 // M B L L L L L L L L L  H H U U U U U U
83 //   |                    ^
84 //   ---------------------|
85 //   M -- magic value kAllocBegMagic
86 //   B -- address of ChunkHeader pointing to the first 'H'
87 
88 class ChunkHeader {
89  public:
90   atomic_uint8_t chunk_state;
91   u8 alloc_type : 2;
92   u8 lsan_tag : 2;
93 
94   // align < 8 -> 0
95   // else      -> log2(min(align, 512)) - 2
96   u8 user_requested_alignment_log : 3;
97 
98  private:
99   u16 user_requested_size_hi;
100   u32 user_requested_size_lo;
101   atomic_uint64_t alloc_context_id;
102 
103  public:
104   uptr UsedSize() const {
105     static_assert(sizeof(user_requested_size_lo) == 4,
106                   "Expression below requires this");
107     return FIRST_32_SECOND_64(0, ((uptr)user_requested_size_hi << 32)) +
108            user_requested_size_lo;
109   }
110 
111   void SetUsedSize(uptr size) {
112     user_requested_size_lo = size;
113     static_assert(sizeof(user_requested_size_lo) == 4,
114                   "Expression below requires this");
115     user_requested_size_hi = FIRST_32_SECOND_64(0, size >> 32);
116     CHECK_EQ(UsedSize(), size);
117   }
118 
119   void SetAllocContext(u32 tid, u32 stack) {
120     AtomicContextStore(&alloc_context_id, tid, stack);
121   }
122 
123   void GetAllocContext(u32 &tid, u32 &stack) const {
124     AtomicContextLoad(&alloc_context_id, tid, stack);
125   }
126 };
127 
128 class ChunkBase : public ChunkHeader {
129   atomic_uint64_t free_context_id;
130 
131  public:
132   void SetFreeContext(u32 tid, u32 stack) {
133     AtomicContextStore(&free_context_id, tid, stack);
134   }
135 
136   void GetFreeContext(u32 &tid, u32 &stack) const {
137     AtomicContextLoad(&free_context_id, tid, stack);
138   }
139 };
140 
141 static const uptr kChunkHeaderSize = sizeof(ChunkHeader);
142 static const uptr kChunkHeader2Size = sizeof(ChunkBase) - kChunkHeaderSize;
143 COMPILER_CHECK(kChunkHeaderSize == 16);
144 COMPILER_CHECK(kChunkHeader2Size <= 16);
145 
146 enum {
147   // Either just allocated by underlying allocator, but AsanChunk is not yet
148   // ready, or almost returned to undelying allocator and AsanChunk is already
149   // meaningless.
150   CHUNK_INVALID = 0,
151   // The chunk is allocated and not yet freed.
152   CHUNK_ALLOCATED = 2,
153   // The chunk was freed and put into quarantine zone.
154   CHUNK_QUARANTINE = 3,
155 };
156 
157 class AsanChunk : public ChunkBase {
158  public:
159   uptr Beg() { return reinterpret_cast<uptr>(this) + kChunkHeaderSize; }
160   bool AddrIsInside(uptr addr) {
161     return (addr >= Beg()) && (addr < Beg() + UsedSize());
162   }
163 };
164 
165 class LargeChunkHeader {
166   static constexpr uptr kAllocBegMagic =
167       FIRST_32_SECOND_64(0xCC6E96B9, 0xCC6E96B9CC6E96B9ULL);
168   atomic_uintptr_t magic;
169   AsanChunk *chunk_header;
170 
171  public:
172   AsanChunk *Get() const {
173     return atomic_load(&magic, memory_order_acquire) == kAllocBegMagic
174                ? chunk_header
175                : nullptr;
176   }
177 
178   void Set(AsanChunk *p) {
179     if (p) {
180       chunk_header = p;
181       atomic_store(&magic, kAllocBegMagic, memory_order_release);
182       return;
183     }
184 
185     uptr old = kAllocBegMagic;
186     if (!atomic_compare_exchange_strong(&magic, &old, 0,
187                                         memory_order_release)) {
188       CHECK_EQ(old, kAllocBegMagic);
189     }
190   }
191 };
192 
193 struct QuarantineCallback {
194   QuarantineCallback(AllocatorCache *cache, BufferedStackTrace *stack)
195       : cache_(cache),
196         stack_(stack) {
197   }
198 
199   void Recycle(AsanChunk *m) {
200     void *p = get_allocator().GetBlockBegin(m);
201     if (p != m) {
202       // Clear the magic value, as allocator internals may overwrite the
203       // contents of deallocated chunk, confusing GetAsanChunk lookup.
204       reinterpret_cast<LargeChunkHeader *>(p)->Set(nullptr);
205     }
206 
207     u8 old_chunk_state = CHUNK_QUARANTINE;
208     if (!atomic_compare_exchange_strong(&m->chunk_state, &old_chunk_state,
209                                         CHUNK_INVALID, memory_order_acquire)) {
210       CHECK_EQ(old_chunk_state, CHUNK_QUARANTINE);
211     }
212 
213     PoisonShadow(m->Beg(),
214                  RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY),
215                  kAsanHeapLeftRedzoneMagic);
216 
217     // Statistics.
218     AsanStats &thread_stats = GetCurrentThreadStats();
219     thread_stats.real_frees++;
220     thread_stats.really_freed += m->UsedSize();
221 
222     get_allocator().Deallocate(cache_, p);
223   }
224 
225   void *Allocate(uptr size) {
226     void *res = get_allocator().Allocate(cache_, size, 1);
227     // TODO(alekseys): Consider making quarantine OOM-friendly.
228     if (UNLIKELY(!res))
229       ReportOutOfMemory(size, stack_);
230     return res;
231   }
232 
233   void Deallocate(void *p) {
234     get_allocator().Deallocate(cache_, p);
235   }
236 
237  private:
238   AllocatorCache* const cache_;
239   BufferedStackTrace* const stack_;
240 };
241 
242 typedef Quarantine<QuarantineCallback, AsanChunk> AsanQuarantine;
243 typedef AsanQuarantine::Cache QuarantineCache;
244 
245 void AsanMapUnmapCallback::OnMap(uptr p, uptr size) const {
246   PoisonShadow(p, size, kAsanHeapLeftRedzoneMagic);
247   // Statistics.
248   AsanStats &thread_stats = GetCurrentThreadStats();
249   thread_stats.mmaps++;
250   thread_stats.mmaped += size;
251 }
252 void AsanMapUnmapCallback::OnUnmap(uptr p, uptr size) const {
253   PoisonShadow(p, size, 0);
254   // We are about to unmap a chunk of user memory.
255   // Mark the corresponding shadow memory as not needed.
256   FlushUnneededASanShadowMemory(p, size);
257   // Statistics.
258   AsanStats &thread_stats = GetCurrentThreadStats();
259   thread_stats.munmaps++;
260   thread_stats.munmaped += size;
261 }
262 
263 // We can not use THREADLOCAL because it is not supported on some of the
264 // platforms we care about (OSX 10.6, Android).
265 // static THREADLOCAL AllocatorCache cache;
266 AllocatorCache *GetAllocatorCache(AsanThreadLocalMallocStorage *ms) {
267   CHECK(ms);
268   return &ms->allocator_cache;
269 }
270 
271 QuarantineCache *GetQuarantineCache(AsanThreadLocalMallocStorage *ms) {
272   CHECK(ms);
273   CHECK_LE(sizeof(QuarantineCache), sizeof(ms->quarantine_cache));
274   return reinterpret_cast<QuarantineCache *>(ms->quarantine_cache);
275 }
276 
277 void AllocatorOptions::SetFrom(const Flags *f, const CommonFlags *cf) {
278   quarantine_size_mb = f->quarantine_size_mb;
279   thread_local_quarantine_size_kb = f->thread_local_quarantine_size_kb;
280   min_redzone = f->redzone;
281   max_redzone = f->max_redzone;
282   may_return_null = cf->allocator_may_return_null;
283   alloc_dealloc_mismatch = f->alloc_dealloc_mismatch;
284   release_to_os_interval_ms = cf->allocator_release_to_os_interval_ms;
285 }
286 
287 void AllocatorOptions::CopyTo(Flags *f, CommonFlags *cf) {
288   f->quarantine_size_mb = quarantine_size_mb;
289   f->thread_local_quarantine_size_kb = thread_local_quarantine_size_kb;
290   f->redzone = min_redzone;
291   f->max_redzone = max_redzone;
292   cf->allocator_may_return_null = may_return_null;
293   f->alloc_dealloc_mismatch = alloc_dealloc_mismatch;
294   cf->allocator_release_to_os_interval_ms = release_to_os_interval_ms;
295 }
296 
297 struct Allocator {
298   static const uptr kMaxAllowedMallocSize =
299       FIRST_32_SECOND_64(3UL << 30, 1ULL << 40);
300 
301   AsanAllocator allocator;
302   AsanQuarantine quarantine;
303   StaticSpinMutex fallback_mutex;
304   AllocatorCache fallback_allocator_cache;
305   QuarantineCache fallback_quarantine_cache;
306 
307   uptr max_user_defined_malloc_size;
308   atomic_uint8_t rss_limit_exceeded;
309 
310   // ------------------- Options --------------------------
311   atomic_uint16_t min_redzone;
312   atomic_uint16_t max_redzone;
313   atomic_uint8_t alloc_dealloc_mismatch;
314 
315   // ------------------- Initialization ------------------------
316   explicit Allocator(LinkerInitialized)
317       : quarantine(LINKER_INITIALIZED),
318         fallback_quarantine_cache(LINKER_INITIALIZED) {}
319 
320   void CheckOptions(const AllocatorOptions &options) const {
321     CHECK_GE(options.min_redzone, 16);
322     CHECK_GE(options.max_redzone, options.min_redzone);
323     CHECK_LE(options.max_redzone, 2048);
324     CHECK(IsPowerOfTwo(options.min_redzone));
325     CHECK(IsPowerOfTwo(options.max_redzone));
326   }
327 
328   void SharedInitCode(const AllocatorOptions &options) {
329     CheckOptions(options);
330     quarantine.Init((uptr)options.quarantine_size_mb << 20,
331                     (uptr)options.thread_local_quarantine_size_kb << 10);
332     atomic_store(&alloc_dealloc_mismatch, options.alloc_dealloc_mismatch,
333                  memory_order_release);
334     atomic_store(&min_redzone, options.min_redzone, memory_order_release);
335     atomic_store(&max_redzone, options.max_redzone, memory_order_release);
336   }
337 
338   void InitLinkerInitialized(const AllocatorOptions &options) {
339     SetAllocatorMayReturnNull(options.may_return_null);
340     allocator.InitLinkerInitialized(options.release_to_os_interval_ms);
341     SharedInitCode(options);
342     max_user_defined_malloc_size = common_flags()->max_allocation_size_mb
343                                        ? common_flags()->max_allocation_size_mb
344                                              << 20
345                                        : kMaxAllowedMallocSize;
346   }
347 
348   bool RssLimitExceeded() {
349     return atomic_load(&rss_limit_exceeded, memory_order_relaxed);
350   }
351 
352   void SetRssLimitExceeded(bool limit_exceeded) {
353     atomic_store(&rss_limit_exceeded, limit_exceeded, memory_order_relaxed);
354   }
355 
356   void RePoisonChunk(uptr chunk) {
357     // This could be a user-facing chunk (with redzones), or some internal
358     // housekeeping chunk, like TransferBatch. Start by assuming the former.
359     AsanChunk *ac = GetAsanChunk((void *)chunk);
360     uptr allocated_size = allocator.GetActuallyAllocatedSize((void *)chunk);
361     if (ac && atomic_load(&ac->chunk_state, memory_order_acquire) ==
362                   CHUNK_ALLOCATED) {
363       uptr beg = ac->Beg();
364       uptr end = ac->Beg() + ac->UsedSize();
365       uptr chunk_end = chunk + allocated_size;
366       if (chunk < beg && beg < end && end <= chunk_end) {
367         // Looks like a valid AsanChunk in use, poison redzones only.
368         PoisonShadow(chunk, beg - chunk, kAsanHeapLeftRedzoneMagic);
369         uptr end_aligned_down = RoundDownTo(end, SHADOW_GRANULARITY);
370         FastPoisonShadowPartialRightRedzone(
371             end_aligned_down, end - end_aligned_down,
372             chunk_end - end_aligned_down, kAsanHeapLeftRedzoneMagic);
373         return;
374       }
375     }
376 
377     // This is either not an AsanChunk or freed or quarantined AsanChunk.
378     // In either case, poison everything.
379     PoisonShadow(chunk, allocated_size, kAsanHeapLeftRedzoneMagic);
380   }
381 
382   void ReInitialize(const AllocatorOptions &options) {
383     SetAllocatorMayReturnNull(options.may_return_null);
384     allocator.SetReleaseToOSIntervalMs(options.release_to_os_interval_ms);
385     SharedInitCode(options);
386 
387     // Poison all existing allocation's redzones.
388     if (CanPoisonMemory()) {
389       allocator.ForceLock();
390       allocator.ForEachChunk(
391           [](uptr chunk, void *alloc) {
392             ((Allocator *)alloc)->RePoisonChunk(chunk);
393           },
394           this);
395       allocator.ForceUnlock();
396     }
397   }
398 
399   void GetOptions(AllocatorOptions *options) const {
400     options->quarantine_size_mb = quarantine.GetSize() >> 20;
401     options->thread_local_quarantine_size_kb = quarantine.GetCacheSize() >> 10;
402     options->min_redzone = atomic_load(&min_redzone, memory_order_acquire);
403     options->max_redzone = atomic_load(&max_redzone, memory_order_acquire);
404     options->may_return_null = AllocatorMayReturnNull();
405     options->alloc_dealloc_mismatch =
406         atomic_load(&alloc_dealloc_mismatch, memory_order_acquire);
407     options->release_to_os_interval_ms = allocator.ReleaseToOSIntervalMs();
408   }
409 
410   // -------------------- Helper methods. -------------------------
411   uptr ComputeRZLog(uptr user_requested_size) {
412     u32 rz_log = user_requested_size <= 64 - 16            ? 0
413                  : user_requested_size <= 128 - 32         ? 1
414                  : user_requested_size <= 512 - 64         ? 2
415                  : user_requested_size <= 4096 - 128       ? 3
416                  : user_requested_size <= (1 << 14) - 256  ? 4
417                  : user_requested_size <= (1 << 15) - 512  ? 5
418                  : user_requested_size <= (1 << 16) - 1024 ? 6
419                                                            : 7;
420     u32 hdr_log = RZSize2Log(RoundUpToPowerOfTwo(sizeof(ChunkHeader)));
421     u32 min_log = RZSize2Log(atomic_load(&min_redzone, memory_order_acquire));
422     u32 max_log = RZSize2Log(atomic_load(&max_redzone, memory_order_acquire));
423     return Min(Max(rz_log, Max(min_log, hdr_log)), Max(max_log, hdr_log));
424   }
425 
426   static uptr ComputeUserRequestedAlignmentLog(uptr user_requested_alignment) {
427     if (user_requested_alignment < 8)
428       return 0;
429     if (user_requested_alignment > 512)
430       user_requested_alignment = 512;
431     return Log2(user_requested_alignment) - 2;
432   }
433 
434   static uptr ComputeUserAlignment(uptr user_requested_alignment_log) {
435     if (user_requested_alignment_log == 0)
436       return 0;
437     return 1LL << (user_requested_alignment_log + 2);
438   }
439 
440   // We have an address between two chunks, and we want to report just one.
441   AsanChunk *ChooseChunk(uptr addr, AsanChunk *left_chunk,
442                          AsanChunk *right_chunk) {
443     if (!left_chunk)
444       return right_chunk;
445     if (!right_chunk)
446       return left_chunk;
447     // Prefer an allocated chunk over freed chunk and freed chunk
448     // over available chunk.
449     u8 left_state = atomic_load(&left_chunk->chunk_state, memory_order_relaxed);
450     u8 right_state =
451         atomic_load(&right_chunk->chunk_state, memory_order_relaxed);
452     if (left_state != right_state) {
453       if (left_state == CHUNK_ALLOCATED)
454         return left_chunk;
455       if (right_state == CHUNK_ALLOCATED)
456         return right_chunk;
457       if (left_state == CHUNK_QUARANTINE)
458         return left_chunk;
459       if (right_state == CHUNK_QUARANTINE)
460         return right_chunk;
461     }
462     // Same chunk_state: choose based on offset.
463     sptr l_offset = 0, r_offset = 0;
464     CHECK(AsanChunkView(left_chunk).AddrIsAtRight(addr, 1, &l_offset));
465     CHECK(AsanChunkView(right_chunk).AddrIsAtLeft(addr, 1, &r_offset));
466     if (l_offset < r_offset)
467       return left_chunk;
468     return right_chunk;
469   }
470 
471   bool UpdateAllocationStack(uptr addr, BufferedStackTrace *stack) {
472     AsanChunk *m = GetAsanChunkByAddr(addr);
473     if (!m) return false;
474     if (atomic_load(&m->chunk_state, memory_order_acquire) != CHUNK_ALLOCATED)
475       return false;
476     if (m->Beg() != addr) return false;
477     AsanThread *t = GetCurrentThread();
478     m->SetAllocContext(t ? t->tid() : kMainTid, StackDepotPut(*stack));
479     return true;
480   }
481 
482   // -------------------- Allocation/Deallocation routines ---------------
483   void *Allocate(uptr size, uptr alignment, BufferedStackTrace *stack,
484                  AllocType alloc_type, bool can_fill) {
485     if (UNLIKELY(!asan_inited))
486       AsanInitFromRtl();
487     if (RssLimitExceeded()) {
488       if (AllocatorMayReturnNull())
489         return nullptr;
490       ReportRssLimitExceeded(stack);
491     }
492     Flags &fl = *flags();
493     CHECK(stack);
494     const uptr min_alignment = SHADOW_GRANULARITY;
495     const uptr user_requested_alignment_log =
496         ComputeUserRequestedAlignmentLog(alignment);
497     if (alignment < min_alignment)
498       alignment = min_alignment;
499     if (size == 0) {
500       // We'd be happy to avoid allocating memory for zero-size requests, but
501       // some programs/tests depend on this behavior and assume that malloc
502       // would not return NULL even for zero-size allocations. Moreover, it
503       // looks like operator new should never return NULL, and results of
504       // consecutive "new" calls must be different even if the allocated size
505       // is zero.
506       size = 1;
507     }
508     CHECK(IsPowerOfTwo(alignment));
509     uptr rz_log = ComputeRZLog(size);
510     uptr rz_size = RZLog2Size(rz_log);
511     uptr rounded_size = RoundUpTo(Max(size, kChunkHeader2Size), alignment);
512     uptr needed_size = rounded_size + rz_size;
513     if (alignment > min_alignment)
514       needed_size += alignment;
515     // If we are allocating from the secondary allocator, there will be no
516     // automatic right redzone, so add the right redzone manually.
517     if (!PrimaryAllocator::CanAllocate(needed_size, alignment))
518       needed_size += rz_size;
519     CHECK(IsAligned(needed_size, min_alignment));
520     if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize ||
521         size > max_user_defined_malloc_size) {
522       if (AllocatorMayReturnNull()) {
523         Report("WARNING: AddressSanitizer failed to allocate 0x%zx bytes\n",
524                size);
525         return nullptr;
526       }
527       uptr malloc_limit =
528           Min(kMaxAllowedMallocSize, max_user_defined_malloc_size);
529       ReportAllocationSizeTooBig(size, needed_size, malloc_limit, stack);
530     }
531 
532     AsanThread *t = GetCurrentThread();
533     void *allocated;
534     if (t) {
535       AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage());
536       allocated = allocator.Allocate(cache, needed_size, 8);
537     } else {
538       SpinMutexLock l(&fallback_mutex);
539       AllocatorCache *cache = &fallback_allocator_cache;
540       allocated = allocator.Allocate(cache, needed_size, 8);
541     }
542     if (UNLIKELY(!allocated)) {
543       SetAllocatorOutOfMemory();
544       if (AllocatorMayReturnNull())
545         return nullptr;
546       ReportOutOfMemory(size, stack);
547     }
548 
549     if (*(u8 *)MEM_TO_SHADOW((uptr)allocated) == 0 && CanPoisonMemory()) {
550       // Heap poisoning is enabled, but the allocator provides an unpoisoned
551       // chunk. This is possible if CanPoisonMemory() was false for some
552       // time, for example, due to flags()->start_disabled.
553       // Anyway, poison the block before using it for anything else.
554       uptr allocated_size = allocator.GetActuallyAllocatedSize(allocated);
555       PoisonShadow((uptr)allocated, allocated_size, kAsanHeapLeftRedzoneMagic);
556     }
557 
558     uptr alloc_beg = reinterpret_cast<uptr>(allocated);
559     uptr alloc_end = alloc_beg + needed_size;
560     uptr user_beg = alloc_beg + rz_size;
561     if (!IsAligned(user_beg, alignment))
562       user_beg = RoundUpTo(user_beg, alignment);
563     uptr user_end = user_beg + size;
564     CHECK_LE(user_end, alloc_end);
565     uptr chunk_beg = user_beg - kChunkHeaderSize;
566     AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
567     m->alloc_type = alloc_type;
568     CHECK(size);
569     m->SetUsedSize(size);
570     m->user_requested_alignment_log = user_requested_alignment_log;
571 
572     m->SetAllocContext(t ? t->tid() : kMainTid, StackDepotPut(*stack));
573 
574     uptr size_rounded_down_to_granularity =
575         RoundDownTo(size, SHADOW_GRANULARITY);
576     // Unpoison the bulk of the memory region.
577     if (size_rounded_down_to_granularity)
578       PoisonShadow(user_beg, size_rounded_down_to_granularity, 0);
579     // Deal with the end of the region if size is not aligned to granularity.
580     if (size != size_rounded_down_to_granularity && CanPoisonMemory()) {
581       u8 *shadow =
582           (u8 *)MemToShadow(user_beg + size_rounded_down_to_granularity);
583       *shadow = fl.poison_partial ? (size & (SHADOW_GRANULARITY - 1)) : 0;
584     }
585 
586     AsanStats &thread_stats = GetCurrentThreadStats();
587     thread_stats.mallocs++;
588     thread_stats.malloced += size;
589     thread_stats.malloced_redzones += needed_size - size;
590     if (needed_size > SizeClassMap::kMaxSize)
591       thread_stats.malloc_large++;
592     else
593       thread_stats.malloced_by_size[SizeClassMap::ClassID(needed_size)]++;
594 
595     void *res = reinterpret_cast<void *>(user_beg);
596     if (can_fill && fl.max_malloc_fill_size) {
597       uptr fill_size = Min(size, (uptr)fl.max_malloc_fill_size);
598       REAL(memset)(res, fl.malloc_fill_byte, fill_size);
599     }
600 #if CAN_SANITIZE_LEAKS
601     m->lsan_tag = __lsan::DisabledInThisThread() ? __lsan::kIgnored
602                                                  : __lsan::kDirectlyLeaked;
603 #endif
604     // Must be the last mutation of metadata in this function.
605     atomic_store(&m->chunk_state, CHUNK_ALLOCATED, memory_order_release);
606     if (alloc_beg != chunk_beg) {
607       CHECK_LE(alloc_beg + sizeof(LargeChunkHeader), chunk_beg);
608       reinterpret_cast<LargeChunkHeader *>(alloc_beg)->Set(m);
609     }
610     ASAN_MALLOC_HOOK(res, size);
611     return res;
612   }
613 
614   // Set quarantine flag if chunk is allocated, issue ASan error report on
615   // available and quarantined chunks. Return true on success, false otherwise.
616   bool AtomicallySetQuarantineFlagIfAllocated(AsanChunk *m, void *ptr,
617                                               BufferedStackTrace *stack) {
618     u8 old_chunk_state = CHUNK_ALLOCATED;
619     // Flip the chunk_state atomically to avoid race on double-free.
620     if (!atomic_compare_exchange_strong(&m->chunk_state, &old_chunk_state,
621                                         CHUNK_QUARANTINE,
622                                         memory_order_acquire)) {
623       ReportInvalidFree(ptr, old_chunk_state, stack);
624       // It's not safe to push a chunk in quarantine on invalid free.
625       return false;
626     }
627     CHECK_EQ(CHUNK_ALLOCATED, old_chunk_state);
628     // It was a user data.
629     m->SetFreeContext(kInvalidTid, 0);
630     return true;
631   }
632 
633   // Expects the chunk to already be marked as quarantined by using
634   // AtomicallySetQuarantineFlagIfAllocated.
635   void QuarantineChunk(AsanChunk *m, void *ptr, BufferedStackTrace *stack) {
636     CHECK_EQ(atomic_load(&m->chunk_state, memory_order_relaxed),
637              CHUNK_QUARANTINE);
638     AsanThread *t = GetCurrentThread();
639     m->SetFreeContext(t ? t->tid() : 0, StackDepotPut(*stack));
640 
641     Flags &fl = *flags();
642     if (fl.max_free_fill_size > 0) {
643       // We have to skip the chunk header, it contains free_context_id.
644       uptr scribble_start = (uptr)m + kChunkHeaderSize + kChunkHeader2Size;
645       if (m->UsedSize() >= kChunkHeader2Size) {  // Skip Header2 in user area.
646         uptr size_to_fill = m->UsedSize() - kChunkHeader2Size;
647         size_to_fill = Min(size_to_fill, (uptr)fl.max_free_fill_size);
648         REAL(memset)((void *)scribble_start, fl.free_fill_byte, size_to_fill);
649       }
650     }
651 
652     // Poison the region.
653     PoisonShadow(m->Beg(),
654                  RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY),
655                  kAsanHeapFreeMagic);
656 
657     AsanStats &thread_stats = GetCurrentThreadStats();
658     thread_stats.frees++;
659     thread_stats.freed += m->UsedSize();
660 
661     // Push into quarantine.
662     if (t) {
663       AsanThreadLocalMallocStorage *ms = &t->malloc_storage();
664       AllocatorCache *ac = GetAllocatorCache(ms);
665       quarantine.Put(GetQuarantineCache(ms), QuarantineCallback(ac, stack), m,
666                      m->UsedSize());
667     } else {
668       SpinMutexLock l(&fallback_mutex);
669       AllocatorCache *ac = &fallback_allocator_cache;
670       quarantine.Put(&fallback_quarantine_cache, QuarantineCallback(ac, stack),
671                      m, m->UsedSize());
672     }
673   }
674 
675   void Deallocate(void *ptr, uptr delete_size, uptr delete_alignment,
676                   BufferedStackTrace *stack, AllocType alloc_type) {
677     uptr p = reinterpret_cast<uptr>(ptr);
678     if (p == 0) return;
679 
680     uptr chunk_beg = p - kChunkHeaderSize;
681     AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
682 
683     // On Windows, uninstrumented DLLs may allocate memory before ASan hooks
684     // malloc. Don't report an invalid free in this case.
685     if (SANITIZER_WINDOWS &&
686         !get_allocator().PointerIsMine(ptr)) {
687       if (!IsSystemHeapAddress(p))
688         ReportFreeNotMalloced(p, stack);
689       return;
690     }
691 
692     ASAN_FREE_HOOK(ptr);
693 
694     // Must mark the chunk as quarantined before any changes to its metadata.
695     // Do not quarantine given chunk if we failed to set CHUNK_QUARANTINE flag.
696     if (!AtomicallySetQuarantineFlagIfAllocated(m, ptr, stack)) return;
697 
698     if (m->alloc_type != alloc_type) {
699       if (atomic_load(&alloc_dealloc_mismatch, memory_order_acquire)) {
700         ReportAllocTypeMismatch((uptr)ptr, stack, (AllocType)m->alloc_type,
701                                 (AllocType)alloc_type);
702       }
703     } else {
704       if (flags()->new_delete_type_mismatch &&
705           (alloc_type == FROM_NEW || alloc_type == FROM_NEW_BR) &&
706           ((delete_size && delete_size != m->UsedSize()) ||
707            ComputeUserRequestedAlignmentLog(delete_alignment) !=
708                m->user_requested_alignment_log)) {
709         ReportNewDeleteTypeMismatch(p, delete_size, delete_alignment, stack);
710       }
711     }
712 
713     QuarantineChunk(m, ptr, stack);
714   }
715 
716   void *Reallocate(void *old_ptr, uptr new_size, BufferedStackTrace *stack) {
717     CHECK(old_ptr && new_size);
718     uptr p = reinterpret_cast<uptr>(old_ptr);
719     uptr chunk_beg = p - kChunkHeaderSize;
720     AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
721 
722     AsanStats &thread_stats = GetCurrentThreadStats();
723     thread_stats.reallocs++;
724     thread_stats.realloced += new_size;
725 
726     void *new_ptr = Allocate(new_size, 8, stack, FROM_MALLOC, true);
727     if (new_ptr) {
728       u8 chunk_state = atomic_load(&m->chunk_state, memory_order_acquire);
729       if (chunk_state != CHUNK_ALLOCATED)
730         ReportInvalidFree(old_ptr, chunk_state, stack);
731       CHECK_NE(REAL(memcpy), nullptr);
732       uptr memcpy_size = Min(new_size, m->UsedSize());
733       // If realloc() races with free(), we may start copying freed memory.
734       // However, we will report racy double-free later anyway.
735       REAL(memcpy)(new_ptr, old_ptr, memcpy_size);
736       Deallocate(old_ptr, 0, 0, stack, FROM_MALLOC);
737     }
738     return new_ptr;
739   }
740 
741   void *Calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) {
742     if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
743       if (AllocatorMayReturnNull())
744         return nullptr;
745       ReportCallocOverflow(nmemb, size, stack);
746     }
747     void *ptr = Allocate(nmemb * size, 8, stack, FROM_MALLOC, false);
748     // If the memory comes from the secondary allocator no need to clear it
749     // as it comes directly from mmap.
750     if (ptr && allocator.FromPrimary(ptr))
751       REAL(memset)(ptr, 0, nmemb * size);
752     return ptr;
753   }
754 
755   void ReportInvalidFree(void *ptr, u8 chunk_state, BufferedStackTrace *stack) {
756     if (chunk_state == CHUNK_QUARANTINE)
757       ReportDoubleFree((uptr)ptr, stack);
758     else
759       ReportFreeNotMalloced((uptr)ptr, stack);
760   }
761 
762   void CommitBack(AsanThreadLocalMallocStorage *ms, BufferedStackTrace *stack) {
763     AllocatorCache *ac = GetAllocatorCache(ms);
764     quarantine.Drain(GetQuarantineCache(ms), QuarantineCallback(ac, stack));
765     allocator.SwallowCache(ac);
766   }
767 
768   // -------------------------- Chunk lookup ----------------------
769 
770   // Assumes alloc_beg == allocator.GetBlockBegin(alloc_beg).
771   // Returns nullptr if AsanChunk is not yet initialized just after
772   // get_allocator().Allocate(), or is being destroyed just before
773   // get_allocator().Deallocate().
774   AsanChunk *GetAsanChunk(void *alloc_beg) {
775     if (!alloc_beg)
776       return nullptr;
777     AsanChunk *p = reinterpret_cast<LargeChunkHeader *>(alloc_beg)->Get();
778     if (!p) {
779       if (!allocator.FromPrimary(alloc_beg))
780         return nullptr;
781       p = reinterpret_cast<AsanChunk *>(alloc_beg);
782     }
783     u8 state = atomic_load(&p->chunk_state, memory_order_relaxed);
784     // It does not guaranty that Chunk is initialized, but it's
785     // definitely not for any other value.
786     if (state == CHUNK_ALLOCATED || state == CHUNK_QUARANTINE)
787       return p;
788     return nullptr;
789   }
790 
791   AsanChunk *GetAsanChunkByAddr(uptr p) {
792     void *alloc_beg = allocator.GetBlockBegin(reinterpret_cast<void *>(p));
793     return GetAsanChunk(alloc_beg);
794   }
795 
796   // Allocator must be locked when this function is called.
797   AsanChunk *GetAsanChunkByAddrFastLocked(uptr p) {
798     void *alloc_beg =
799         allocator.GetBlockBeginFastLocked(reinterpret_cast<void *>(p));
800     return GetAsanChunk(alloc_beg);
801   }
802 
803   uptr AllocationSize(uptr p) {
804     AsanChunk *m = GetAsanChunkByAddr(p);
805     if (!m) return 0;
806     if (atomic_load(&m->chunk_state, memory_order_acquire) != CHUNK_ALLOCATED)
807       return 0;
808     if (m->Beg() != p) return 0;
809     return m->UsedSize();
810   }
811 
812   AsanChunkView FindHeapChunkByAddress(uptr addr) {
813     AsanChunk *m1 = GetAsanChunkByAddr(addr);
814     sptr offset = 0;
815     if (!m1 || AsanChunkView(m1).AddrIsAtLeft(addr, 1, &offset)) {
816       // The address is in the chunk's left redzone, so maybe it is actually
817       // a right buffer overflow from the other chunk to the left.
818       // Search a bit to the left to see if there is another chunk.
819       AsanChunk *m2 = nullptr;
820       for (uptr l = 1; l < GetPageSizeCached(); l++) {
821         m2 = GetAsanChunkByAddr(addr - l);
822         if (m2 == m1) continue;  // Still the same chunk.
823         break;
824       }
825       if (m2 && AsanChunkView(m2).AddrIsAtRight(addr, 1, &offset))
826         m1 = ChooseChunk(addr, m2, m1);
827     }
828     return AsanChunkView(m1);
829   }
830 
831   void Purge(BufferedStackTrace *stack) {
832     AsanThread *t = GetCurrentThread();
833     if (t) {
834       AsanThreadLocalMallocStorage *ms = &t->malloc_storage();
835       quarantine.DrainAndRecycle(GetQuarantineCache(ms),
836                                  QuarantineCallback(GetAllocatorCache(ms),
837                                                     stack));
838     }
839     {
840       SpinMutexLock l(&fallback_mutex);
841       quarantine.DrainAndRecycle(&fallback_quarantine_cache,
842                                  QuarantineCallback(&fallback_allocator_cache,
843                                                     stack));
844     }
845 
846     allocator.ForceReleaseToOS();
847   }
848 
849   void PrintStats() {
850     allocator.PrintStats();
851     quarantine.PrintStats();
852   }
853 
854   void ForceLock() ACQUIRE(fallback_mutex) {
855     allocator.ForceLock();
856     fallback_mutex.Lock();
857   }
858 
859   void ForceUnlock() RELEASE(fallback_mutex) {
860     fallback_mutex.Unlock();
861     allocator.ForceUnlock();
862   }
863 };
864 
865 static Allocator instance(LINKER_INITIALIZED);
866 
867 static AsanAllocator &get_allocator() {
868   return instance.allocator;
869 }
870 
871 bool AsanChunkView::IsValid() const {
872   return chunk_ && atomic_load(&chunk_->chunk_state, memory_order_relaxed) !=
873                        CHUNK_INVALID;
874 }
875 bool AsanChunkView::IsAllocated() const {
876   return chunk_ && atomic_load(&chunk_->chunk_state, memory_order_relaxed) ==
877                        CHUNK_ALLOCATED;
878 }
879 bool AsanChunkView::IsQuarantined() const {
880   return chunk_ && atomic_load(&chunk_->chunk_state, memory_order_relaxed) ==
881                        CHUNK_QUARANTINE;
882 }
883 uptr AsanChunkView::Beg() const { return chunk_->Beg(); }
884 uptr AsanChunkView::End() const { return Beg() + UsedSize(); }
885 uptr AsanChunkView::UsedSize() const { return chunk_->UsedSize(); }
886 u32 AsanChunkView::UserRequestedAlignment() const {
887   return Allocator::ComputeUserAlignment(chunk_->user_requested_alignment_log);
888 }
889 
890 uptr AsanChunkView::AllocTid() const {
891   u32 tid = 0;
892   u32 stack = 0;
893   chunk_->GetAllocContext(tid, stack);
894   return tid;
895 }
896 
897 uptr AsanChunkView::FreeTid() const {
898   if (!IsQuarantined())
899     return kInvalidTid;
900   u32 tid = 0;
901   u32 stack = 0;
902   chunk_->GetFreeContext(tid, stack);
903   return tid;
904 }
905 
906 AllocType AsanChunkView::GetAllocType() const {
907   return (AllocType)chunk_->alloc_type;
908 }
909 
910 u32 AsanChunkView::GetAllocStackId() const {
911   u32 tid = 0;
912   u32 stack = 0;
913   chunk_->GetAllocContext(tid, stack);
914   return stack;
915 }
916 
917 u32 AsanChunkView::GetFreeStackId() const {
918   if (!IsQuarantined())
919     return 0;
920   u32 tid = 0;
921   u32 stack = 0;
922   chunk_->GetFreeContext(tid, stack);
923   return stack;
924 }
925 
926 void InitializeAllocator(const AllocatorOptions &options) {
927   instance.InitLinkerInitialized(options);
928 }
929 
930 void ReInitializeAllocator(const AllocatorOptions &options) {
931   instance.ReInitialize(options);
932 }
933 
934 void GetAllocatorOptions(AllocatorOptions *options) {
935   instance.GetOptions(options);
936 }
937 
938 AsanChunkView FindHeapChunkByAddress(uptr addr) {
939   return instance.FindHeapChunkByAddress(addr);
940 }
941 AsanChunkView FindHeapChunkByAllocBeg(uptr addr) {
942   return AsanChunkView(instance.GetAsanChunk(reinterpret_cast<void*>(addr)));
943 }
944 
945 void AsanThreadLocalMallocStorage::CommitBack() {
946   GET_STACK_TRACE_MALLOC;
947   instance.CommitBack(this, &stack);
948 }
949 
950 void PrintInternalAllocatorStats() {
951   instance.PrintStats();
952 }
953 
954 void asan_free(void *ptr, BufferedStackTrace *stack, AllocType alloc_type) {
955   instance.Deallocate(ptr, 0, 0, stack, alloc_type);
956 }
957 
958 void asan_delete(void *ptr, uptr size, uptr alignment,
959                  BufferedStackTrace *stack, AllocType alloc_type) {
960   instance.Deallocate(ptr, size, alignment, stack, alloc_type);
961 }
962 
963 void *asan_malloc(uptr size, BufferedStackTrace *stack) {
964   return SetErrnoOnNull(instance.Allocate(size, 8, stack, FROM_MALLOC, true));
965 }
966 
967 void *asan_calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) {
968   return SetErrnoOnNull(instance.Calloc(nmemb, size, stack));
969 }
970 
971 void *asan_reallocarray(void *p, uptr nmemb, uptr size,
972                         BufferedStackTrace *stack) {
973   if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
974     errno = errno_ENOMEM;
975     if (AllocatorMayReturnNull())
976       return nullptr;
977     ReportReallocArrayOverflow(nmemb, size, stack);
978   }
979   return asan_realloc(p, nmemb * size, stack);
980 }
981 
982 void *asan_realloc(void *p, uptr size, BufferedStackTrace *stack) {
983   if (!p)
984     return SetErrnoOnNull(instance.Allocate(size, 8, stack, FROM_MALLOC, true));
985   if (size == 0) {
986     if (flags()->allocator_frees_and_returns_null_on_realloc_zero) {
987       instance.Deallocate(p, 0, 0, stack, FROM_MALLOC);
988       return nullptr;
989     }
990     // Allocate a size of 1 if we shouldn't free() on Realloc to 0
991     size = 1;
992   }
993   return SetErrnoOnNull(instance.Reallocate(p, size, stack));
994 }
995 
996 void *asan_valloc(uptr size, BufferedStackTrace *stack) {
997   return SetErrnoOnNull(
998       instance.Allocate(size, GetPageSizeCached(), stack, FROM_MALLOC, true));
999 }
1000 
1001 void *asan_pvalloc(uptr size, BufferedStackTrace *stack) {
1002   uptr PageSize = GetPageSizeCached();
1003   if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) {
1004     errno = errno_ENOMEM;
1005     if (AllocatorMayReturnNull())
1006       return nullptr;
1007     ReportPvallocOverflow(size, stack);
1008   }
1009   // pvalloc(0) should allocate one page.
1010   size = size ? RoundUpTo(size, PageSize) : PageSize;
1011   return SetErrnoOnNull(
1012       instance.Allocate(size, PageSize, stack, FROM_MALLOC, true));
1013 }
1014 
1015 void *asan_memalign(uptr alignment, uptr size, BufferedStackTrace *stack,
1016                     AllocType alloc_type) {
1017   if (UNLIKELY(!IsPowerOfTwo(alignment))) {
1018     errno = errno_EINVAL;
1019     if (AllocatorMayReturnNull())
1020       return nullptr;
1021     ReportInvalidAllocationAlignment(alignment, stack);
1022   }
1023   return SetErrnoOnNull(
1024       instance.Allocate(size, alignment, stack, alloc_type, true));
1025 }
1026 
1027 void *asan_aligned_alloc(uptr alignment, uptr size, BufferedStackTrace *stack) {
1028   if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) {
1029     errno = errno_EINVAL;
1030     if (AllocatorMayReturnNull())
1031       return nullptr;
1032     ReportInvalidAlignedAllocAlignment(size, alignment, stack);
1033   }
1034   return SetErrnoOnNull(
1035       instance.Allocate(size, alignment, stack, FROM_MALLOC, true));
1036 }
1037 
1038 int asan_posix_memalign(void **memptr, uptr alignment, uptr size,
1039                         BufferedStackTrace *stack) {
1040   if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) {
1041     if (AllocatorMayReturnNull())
1042       return errno_EINVAL;
1043     ReportInvalidPosixMemalignAlignment(alignment, stack);
1044   }
1045   void *ptr = instance.Allocate(size, alignment, stack, FROM_MALLOC, true);
1046   if (UNLIKELY(!ptr))
1047     // OOM error is already taken care of by Allocate.
1048     return errno_ENOMEM;
1049   CHECK(IsAligned((uptr)ptr, alignment));
1050   *memptr = ptr;
1051   return 0;
1052 }
1053 
1054 uptr asan_malloc_usable_size(const void *ptr, uptr pc, uptr bp) {
1055   if (!ptr) return 0;
1056   uptr usable_size = instance.AllocationSize(reinterpret_cast<uptr>(ptr));
1057   if (flags()->check_malloc_usable_size && (usable_size == 0)) {
1058     GET_STACK_TRACE_FATAL(pc, bp);
1059     ReportMallocUsableSizeNotOwned((uptr)ptr, &stack);
1060   }
1061   return usable_size;
1062 }
1063 
1064 uptr asan_mz_size(const void *ptr) {
1065   return instance.AllocationSize(reinterpret_cast<uptr>(ptr));
1066 }
1067 
1068 void asan_mz_force_lock() NO_THREAD_SAFETY_ANALYSIS { instance.ForceLock(); }
1069 
1070 void asan_mz_force_unlock() NO_THREAD_SAFETY_ANALYSIS {
1071   instance.ForceUnlock();
1072 }
1073 
1074 void AsanSoftRssLimitExceededCallback(bool limit_exceeded) {
1075   instance.SetRssLimitExceeded(limit_exceeded);
1076 }
1077 
1078 }  // namespace __asan
1079 
1080 // --- Implementation of LSan-specific functions --- {{{1
1081 namespace __lsan {
1082 void LockAllocator() {
1083   __asan::get_allocator().ForceLock();
1084 }
1085 
1086 void UnlockAllocator() {
1087   __asan::get_allocator().ForceUnlock();
1088 }
1089 
1090 void GetAllocatorGlobalRange(uptr *begin, uptr *end) {
1091   *begin = (uptr)&__asan::get_allocator();
1092   *end = *begin + sizeof(__asan::get_allocator());
1093 }
1094 
1095 uptr PointsIntoChunk(void *p) {
1096   uptr addr = reinterpret_cast<uptr>(p);
1097   __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddrFastLocked(addr);
1098   if (!m || atomic_load(&m->chunk_state, memory_order_acquire) !=
1099                 __asan::CHUNK_ALLOCATED)
1100     return 0;
1101   uptr chunk = m->Beg();
1102   if (m->AddrIsInside(addr))
1103     return chunk;
1104   if (IsSpecialCaseOfOperatorNew0(chunk, m->UsedSize(), addr))
1105     return chunk;
1106   return 0;
1107 }
1108 
1109 uptr GetUserBegin(uptr chunk) {
1110   __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddrFastLocked(chunk);
1111   return m ? m->Beg() : 0;
1112 }
1113 
1114 LsanMetadata::LsanMetadata(uptr chunk) {
1115   metadata_ = chunk ? reinterpret_cast<void *>(chunk - __asan::kChunkHeaderSize)
1116                     : nullptr;
1117 }
1118 
1119 bool LsanMetadata::allocated() const {
1120   if (!metadata_)
1121     return false;
1122   __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
1123   return atomic_load(&m->chunk_state, memory_order_relaxed) ==
1124          __asan::CHUNK_ALLOCATED;
1125 }
1126 
1127 ChunkTag LsanMetadata::tag() const {
1128   __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
1129   return static_cast<ChunkTag>(m->lsan_tag);
1130 }
1131 
1132 void LsanMetadata::set_tag(ChunkTag value) {
1133   __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
1134   m->lsan_tag = value;
1135 }
1136 
1137 uptr LsanMetadata::requested_size() const {
1138   __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
1139   return m->UsedSize();
1140 }
1141 
1142 u32 LsanMetadata::stack_trace_id() const {
1143   __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
1144   u32 tid = 0;
1145   u32 stack = 0;
1146   m->GetAllocContext(tid, stack);
1147   return stack;
1148 }
1149 
1150 void ForEachChunk(ForEachChunkCallback callback, void *arg) {
1151   __asan::get_allocator().ForEachChunk(callback, arg);
1152 }
1153 
1154 IgnoreObjectResult IgnoreObjectLocked(const void *p) {
1155   uptr addr = reinterpret_cast<uptr>(p);
1156   __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddr(addr);
1157   if (!m ||
1158       (atomic_load(&m->chunk_state, memory_order_acquire) !=
1159        __asan::CHUNK_ALLOCATED) ||
1160       !m->AddrIsInside(addr)) {
1161     return kIgnoreObjectInvalid;
1162   }
1163   if (m->lsan_tag == kIgnored)
1164     return kIgnoreObjectAlreadyIgnored;
1165   m->lsan_tag = __lsan::kIgnored;
1166   return kIgnoreObjectSuccess;
1167 }
1168 
1169 void GetAdditionalThreadContextPtrs(ThreadContextBase *tctx, void *ptrs) {
1170   // Look for the arg pointer of threads that have been created or are running.
1171   // This is necessary to prevent false positive leaks due to the AsanThread
1172   // holding the only live reference to a heap object.  This can happen because
1173   // the `pthread_create()` interceptor doesn't wait for the child thread to
1174   // start before returning and thus loosing the the only live reference to the
1175   // heap object on the stack.
1176 
1177   __asan::AsanThreadContext *atctx =
1178       reinterpret_cast<__asan::AsanThreadContext *>(tctx);
1179   __asan::AsanThread *asan_thread = atctx->thread;
1180 
1181   // Note ThreadStatusRunning is required because there is a small window where
1182   // the thread status switches to `ThreadStatusRunning` but the `arg` pointer
1183   // still isn't on the stack yet.
1184   if (atctx->status != ThreadStatusCreated &&
1185       atctx->status != ThreadStatusRunning)
1186     return;
1187 
1188   uptr thread_arg = reinterpret_cast<uptr>(asan_thread->get_arg());
1189   if (!thread_arg)
1190     return;
1191 
1192   auto ptrsVec = reinterpret_cast<InternalMmapVector<uptr> *>(ptrs);
1193   ptrsVec->push_back(thread_arg);
1194 }
1195 
1196 }  // namespace __lsan
1197 
1198 // ---------------------- Interface ---------------- {{{1
1199 using namespace __asan;
1200 
1201 // ASan allocator doesn't reserve extra bytes, so normally we would
1202 // just return "size". We don't want to expose our redzone sizes, etc here.
1203 uptr __sanitizer_get_estimated_allocated_size(uptr size) {
1204   return size;
1205 }
1206 
1207 int __sanitizer_get_ownership(const void *p) {
1208   uptr ptr = reinterpret_cast<uptr>(p);
1209   return instance.AllocationSize(ptr) > 0;
1210 }
1211 
1212 uptr __sanitizer_get_allocated_size(const void *p) {
1213   if (!p) return 0;
1214   uptr ptr = reinterpret_cast<uptr>(p);
1215   uptr allocated_size = instance.AllocationSize(ptr);
1216   // Die if p is not malloced or if it is already freed.
1217   if (allocated_size == 0) {
1218     GET_STACK_TRACE_FATAL_HERE;
1219     ReportSanitizerGetAllocatedSizeNotOwned(ptr, &stack);
1220   }
1221   return allocated_size;
1222 }
1223 
1224 void __sanitizer_purge_allocator() {
1225   GET_STACK_TRACE_MALLOC;
1226   instance.Purge(&stack);
1227 }
1228 
1229 int __asan_update_allocation_context(void* addr) {
1230   GET_STACK_TRACE_MALLOC;
1231   return instance.UpdateAllocationStack((uptr)addr, &stack);
1232 }
1233 
1234 #if !SANITIZER_SUPPORTS_WEAK_HOOKS
1235 // Provide default (no-op) implementation of malloc hooks.
1236 SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_malloc_hook,
1237                              void *ptr, uptr size) {
1238   (void)ptr;
1239   (void)size;
1240 }
1241 
1242 SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_free_hook, void *ptr) {
1243   (void)ptr;
1244 }
1245 #endif
1246