xref: /freebsd/contrib/llvm-project/compiler-rt/lib/asan/asan_allocator.cpp (revision e64bea71c21eb42e97aa615188ba91f6cce0d36d)
1 //===-- asan_allocator.cpp ------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of AddressSanitizer, an address sanity checker.
10 //
11 // Implementation of ASan's memory allocator, 2-nd version.
12 // This variant uses the allocator from sanitizer_common, i.e. the one shared
13 // with ThreadSanitizer and MemorySanitizer.
14 //
15 //===----------------------------------------------------------------------===//
16 
17 #include "asan_allocator.h"
18 
19 #include "asan_internal.h"
20 #include "asan_mapping.h"
21 #include "asan_poisoning.h"
22 #include "asan_report.h"
23 #include "asan_stack.h"
24 #include "asan_suppressions.h"
25 #include "asan_thread.h"
26 #include "lsan/lsan_common.h"
27 #include "sanitizer_common/sanitizer_allocator_checks.h"
28 #include "sanitizer_common/sanitizer_allocator_interface.h"
29 #include "sanitizer_common/sanitizer_common.h"
30 #include "sanitizer_common/sanitizer_errno.h"
31 #include "sanitizer_common/sanitizer_flags.h"
32 #include "sanitizer_common/sanitizer_internal_defs.h"
33 #include "sanitizer_common/sanitizer_list.h"
34 #include "sanitizer_common/sanitizer_quarantine.h"
35 #include "sanitizer_common/sanitizer_stackdepot.h"
36 
37 namespace __asan {
38 
39 // Valid redzone sizes are 16, 32, 64, ... 2048, so we encode them in 3 bits.
40 // We use adaptive redzones: for larger allocation larger redzones are used.
41 static u32 RZLog2Size(u32 rz_log) {
42   CHECK_LT(rz_log, 8);
43   return 16 << rz_log;
44 }
45 
46 static u32 RZSize2Log(u32 rz_size) {
47   CHECK_GE(rz_size, 16);
48   CHECK_LE(rz_size, 2048);
49   CHECK(IsPowerOfTwo(rz_size));
50   u32 res = Log2(rz_size) - 4;
51   CHECK_EQ(rz_size, RZLog2Size(res));
52   return res;
53 }
54 
55 static AsanAllocator &get_allocator();
56 
57 static void AtomicContextStore(volatile atomic_uint64_t *atomic_context,
58                                u32 tid, u32 stack) {
59   u64 context = tid;
60   context <<= 32;
61   context += stack;
62   atomic_store(atomic_context, context, memory_order_relaxed);
63 }
64 
65 static void AtomicContextLoad(const volatile atomic_uint64_t *atomic_context,
66                               u32 &tid, u32 &stack) {
67   u64 context = atomic_load(atomic_context, memory_order_relaxed);
68   stack = context;
69   context >>= 32;
70   tid = context;
71 }
72 
73 // The memory chunk allocated from the underlying allocator looks like this:
74 // L L L L L L H H U U U U U U R R
75 //   L -- left redzone words (0 or more bytes)
76 //   H -- ChunkHeader (16 bytes), which is also a part of the left redzone.
77 //   U -- user memory.
78 //   R -- right redzone (0 or more bytes)
79 // ChunkBase consists of ChunkHeader and other bytes that overlap with user
80 // memory.
81 
82 // If the left redzone is greater than the ChunkHeader size we store a magic
83 // value in the first uptr word of the memory block and store the address of
84 // ChunkBase in the next uptr.
85 // M B L L L L L L L L L  H H U U U U U U
86 //   |                    ^
87 //   ---------------------|
88 //   M -- magic value kAllocBegMagic
89 //   B -- address of ChunkHeader pointing to the first 'H'
90 
91 class ChunkHeader {
92  public:
93   atomic_uint8_t chunk_state;
94   u8 alloc_type : 2;
95   u8 lsan_tag : 2;
96 
97   // align < 8 -> 0
98   // else      -> log2(min(align, 512)) - 2
99   u8 user_requested_alignment_log : 3;
100 
101  private:
102   u16 user_requested_size_hi;
103   u32 user_requested_size_lo;
104   atomic_uint64_t alloc_context_id;
105 
106  public:
107   uptr UsedSize() const {
108     static_assert(sizeof(user_requested_size_lo) == 4,
109                   "Expression below requires this");
110     return FIRST_32_SECOND_64(0, ((uptr)user_requested_size_hi << 32)) +
111            user_requested_size_lo;
112   }
113 
114   void SetUsedSize(uptr size) {
115     user_requested_size_lo = size;
116     static_assert(sizeof(user_requested_size_lo) == 4,
117                   "Expression below requires this");
118     user_requested_size_hi = FIRST_32_SECOND_64(0, size >> 32);
119     CHECK_EQ(UsedSize(), size);
120   }
121 
122   void SetAllocContext(u32 tid, u32 stack) {
123     AtomicContextStore(&alloc_context_id, tid, stack);
124   }
125 
126   void GetAllocContext(u32 &tid, u32 &stack) const {
127     AtomicContextLoad(&alloc_context_id, tid, stack);
128   }
129 };
130 
131 class ChunkBase : public ChunkHeader {
132   atomic_uint64_t free_context_id;
133 
134  public:
135   void SetFreeContext(u32 tid, u32 stack) {
136     AtomicContextStore(&free_context_id, tid, stack);
137   }
138 
139   void GetFreeContext(u32 &tid, u32 &stack) const {
140     AtomicContextLoad(&free_context_id, tid, stack);
141   }
142 };
143 
144 static const uptr kChunkHeaderSize = sizeof(ChunkHeader);
145 static const uptr kChunkHeader2Size = sizeof(ChunkBase) - kChunkHeaderSize;
146 COMPILER_CHECK(kChunkHeaderSize == 16);
147 COMPILER_CHECK(kChunkHeader2Size <= 16);
148 
149 enum {
150   // Either just allocated by underlying allocator, but AsanChunk is not yet
151   // ready, or almost returned to undelying allocator and AsanChunk is already
152   // meaningless.
153   CHUNK_INVALID = 0,
154   // The chunk is allocated and not yet freed.
155   CHUNK_ALLOCATED = 2,
156   // The chunk was freed and put into quarantine zone.
157   CHUNK_QUARANTINE = 3,
158 };
159 
160 class AsanChunk : public ChunkBase {
161  public:
162   uptr Beg() { return reinterpret_cast<uptr>(this) + kChunkHeaderSize; }
163   bool AddrIsInside(uptr addr) {
164     return (addr >= Beg()) && (addr < Beg() + UsedSize());
165   }
166 };
167 
168 class LargeChunkHeader {
169   static constexpr uptr kAllocBegMagic =
170       FIRST_32_SECOND_64(0xCC6E96B9, 0xCC6E96B9CC6E96B9ULL);
171   atomic_uintptr_t magic;
172   AsanChunk *chunk_header;
173 
174  public:
175   AsanChunk *Get() const {
176     return atomic_load(&magic, memory_order_acquire) == kAllocBegMagic
177                ? chunk_header
178                : nullptr;
179   }
180 
181   void Set(AsanChunk *p) {
182     if (p) {
183       chunk_header = p;
184       atomic_store(&magic, kAllocBegMagic, memory_order_release);
185       return;
186     }
187 
188     uptr old = kAllocBegMagic;
189     if (!atomic_compare_exchange_strong(&magic, &old, 0,
190                                         memory_order_release)) {
191       CHECK_EQ(old, kAllocBegMagic);
192     }
193   }
194 };
195 
196 static void FillChunk(AsanChunk *m) {
197   // FIXME: Use ReleaseMemoryPagesToOS.
198   Flags &fl = *flags();
199 
200   if (fl.max_free_fill_size > 0) {
201     // We have to skip the chunk header, it contains free_context_id.
202     uptr scribble_start = (uptr)m + kChunkHeaderSize + kChunkHeader2Size;
203     if (m->UsedSize() >= kChunkHeader2Size) {  // Skip Header2 in user area.
204       uptr size_to_fill = m->UsedSize() - kChunkHeader2Size;
205       size_to_fill = Min(size_to_fill, (uptr)fl.max_free_fill_size);
206       REAL(memset)((void *)scribble_start, fl.free_fill_byte, size_to_fill);
207     }
208   }
209 }
210 
211 struct QuarantineCallback {
212   QuarantineCallback(AllocatorCache *cache, BufferedStackTrace *stack)
213       : cache_(cache),
214         stack_(stack) {
215   }
216 
217   void PreQuarantine(AsanChunk *m) const {
218     FillChunk(m);
219     // Poison the region.
220     PoisonShadow(m->Beg(), RoundUpTo(m->UsedSize(), ASAN_SHADOW_GRANULARITY),
221                  kAsanHeapFreeMagic);
222   }
223 
224   void Recycle(AsanChunk *m) const {
225     void *p = get_allocator().GetBlockBegin(m);
226 
227     // The secondary will immediately unpoison and unmap the memory, so this
228     // branch is unnecessary.
229     if (get_allocator().FromPrimary(p)) {
230       if (p != m) {
231         // Clear the magic value, as allocator internals may overwrite the
232         // contents of deallocated chunk, confusing GetAsanChunk lookup.
233         reinterpret_cast<LargeChunkHeader *>(p)->Set(nullptr);
234       }
235 
236       u8 old_chunk_state = CHUNK_QUARANTINE;
237       if (!atomic_compare_exchange_strong(&m->chunk_state, &old_chunk_state,
238                                           CHUNK_INVALID,
239                                           memory_order_acquire)) {
240         CHECK_EQ(old_chunk_state, CHUNK_QUARANTINE);
241       }
242 
243       PoisonShadow(m->Beg(), RoundUpTo(m->UsedSize(), ASAN_SHADOW_GRANULARITY),
244                    kAsanHeapLeftRedzoneMagic);
245     }
246 
247     // Statistics.
248     AsanStats &thread_stats = GetCurrentThreadStats();
249     thread_stats.real_frees++;
250     thread_stats.really_freed += m->UsedSize();
251 
252     get_allocator().Deallocate(cache_, p);
253   }
254 
255   void RecyclePassThrough(AsanChunk *m) const {
256     // Recycle for the secondary will immediately unpoison and unmap the
257     // memory, so quarantine preparation is unnecessary.
258     if (get_allocator().FromPrimary(m)) {
259       // The primary allocation may need pattern fill if enabled.
260       FillChunk(m);
261     }
262     Recycle(m);
263   }
264 
265   void *Allocate(uptr size) const {
266     void *res = get_allocator().Allocate(cache_, size, 1);
267     // TODO(alekseys): Consider making quarantine OOM-friendly.
268     if (UNLIKELY(!res))
269       ReportOutOfMemory(size, stack_);
270     return res;
271   }
272 
273   void Deallocate(void *p) const { get_allocator().Deallocate(cache_, p); }
274 
275  private:
276   AllocatorCache* const cache_;
277   BufferedStackTrace* const stack_;
278 };
279 
280 typedef Quarantine<QuarantineCallback, AsanChunk> AsanQuarantine;
281 typedef AsanQuarantine::Cache QuarantineCache;
282 
283 void AsanMapUnmapCallback::OnMap(uptr p, uptr size) const {
284   PoisonShadow(p, size, kAsanHeapLeftRedzoneMagic);
285   // Statistics.
286   AsanStats &thread_stats = GetCurrentThreadStats();
287   thread_stats.mmaps++;
288   thread_stats.mmaped += size;
289 }
290 
291 void AsanMapUnmapCallback::OnMapSecondary(uptr p, uptr size, uptr user_begin,
292                                           uptr user_size) const {
293   uptr user_end = RoundDownTo(user_begin + user_size, ASAN_SHADOW_GRANULARITY);
294   user_begin = RoundUpTo(user_begin, ASAN_SHADOW_GRANULARITY);
295   // The secondary mapping will be immediately returned to user, no value
296   // poisoning that with non-zero just before unpoisoning by Allocate(). So just
297   // poison head/tail invisible to Allocate().
298   PoisonShadow(p, user_begin - p, kAsanHeapLeftRedzoneMagic);
299   PoisonShadow(user_end, size - (user_end - p), kAsanHeapLeftRedzoneMagic);
300   // Statistics.
301   AsanStats &thread_stats = GetCurrentThreadStats();
302   thread_stats.mmaps++;
303   thread_stats.mmaped += size;
304 }
305 
306 void AsanMapUnmapCallback::OnUnmap(uptr p, uptr size) const {
307   PoisonShadow(p, size, 0);
308   // We are about to unmap a chunk of user memory.
309   // Mark the corresponding shadow memory as not needed.
310   FlushUnneededASanShadowMemory(p, size);
311   // Statistics.
312   AsanStats &thread_stats = GetCurrentThreadStats();
313   thread_stats.munmaps++;
314   thread_stats.munmaped += size;
315 }
316 
317 // We can not use THREADLOCAL because it is not supported on some of the
318 // platforms we care about (OSX 10.6, Android).
319 // static THREADLOCAL AllocatorCache cache;
320 AllocatorCache *GetAllocatorCache(AsanThreadLocalMallocStorage *ms) {
321   CHECK(ms);
322   return &ms->allocator_cache;
323 }
324 
325 QuarantineCache *GetQuarantineCache(AsanThreadLocalMallocStorage *ms) {
326   CHECK(ms);
327   CHECK_LE(sizeof(QuarantineCache), sizeof(ms->quarantine_cache));
328   return reinterpret_cast<QuarantineCache *>(ms->quarantine_cache);
329 }
330 
331 void AllocatorOptions::SetFrom(const Flags *f, const CommonFlags *cf) {
332   quarantine_size_mb = f->quarantine_size_mb;
333   thread_local_quarantine_size_kb = f->thread_local_quarantine_size_kb;
334   min_redzone = f->redzone;
335   max_redzone = f->max_redzone;
336   may_return_null = cf->allocator_may_return_null;
337   alloc_dealloc_mismatch = f->alloc_dealloc_mismatch;
338   release_to_os_interval_ms = cf->allocator_release_to_os_interval_ms;
339 }
340 
341 void AllocatorOptions::CopyTo(Flags *f, CommonFlags *cf) {
342   f->quarantine_size_mb = quarantine_size_mb;
343   f->thread_local_quarantine_size_kb = thread_local_quarantine_size_kb;
344   f->redzone = min_redzone;
345   f->max_redzone = max_redzone;
346   cf->allocator_may_return_null = may_return_null;
347   f->alloc_dealloc_mismatch = alloc_dealloc_mismatch;
348   cf->allocator_release_to_os_interval_ms = release_to_os_interval_ms;
349 }
350 
351 struct Allocator {
352   static const uptr kMaxAllowedMallocSize =
353       FIRST_32_SECOND_64(3UL << 30, 1ULL << 40);
354 
355   AsanAllocator allocator;
356   AsanQuarantine quarantine;
357   StaticSpinMutex fallback_mutex;
358   AllocatorCache fallback_allocator_cache;
359   QuarantineCache fallback_quarantine_cache;
360 
361   uptr max_user_defined_malloc_size;
362 
363   // ------------------- Options --------------------------
364   atomic_uint16_t min_redzone;
365   atomic_uint16_t max_redzone;
366   atomic_uint8_t alloc_dealloc_mismatch;
367 
368   // ------------------- Initialization ------------------------
369   explicit Allocator(LinkerInitialized)
370       : quarantine(LINKER_INITIALIZED),
371         fallback_quarantine_cache(LINKER_INITIALIZED) {}
372 
373   void CheckOptions(const AllocatorOptions &options) const {
374     CHECK_GE(options.min_redzone, 16);
375     CHECK_GE(options.max_redzone, options.min_redzone);
376     CHECK_LE(options.max_redzone, 2048);
377     CHECK(IsPowerOfTwo(options.min_redzone));
378     CHECK(IsPowerOfTwo(options.max_redzone));
379   }
380 
381   void SharedInitCode(const AllocatorOptions &options) {
382     CheckOptions(options);
383     quarantine.Init((uptr)options.quarantine_size_mb << 20,
384                     (uptr)options.thread_local_quarantine_size_kb << 10);
385     atomic_store(&alloc_dealloc_mismatch, options.alloc_dealloc_mismatch,
386                  memory_order_release);
387     atomic_store(&min_redzone, options.min_redzone, memory_order_release);
388     atomic_store(&max_redzone, options.max_redzone, memory_order_release);
389   }
390 
391   void InitLinkerInitialized(const AllocatorOptions &options) {
392     SetAllocatorMayReturnNull(options.may_return_null);
393     allocator.InitLinkerInitialized(options.release_to_os_interval_ms);
394     SharedInitCode(options);
395     max_user_defined_malloc_size = common_flags()->max_allocation_size_mb
396                                        ? common_flags()->max_allocation_size_mb
397                                              << 20
398                                        : kMaxAllowedMallocSize;
399   }
400 
401   void RePoisonChunk(uptr chunk) {
402     // This could be a user-facing chunk (with redzones), or some internal
403     // housekeeping chunk, like TransferBatch. Start by assuming the former.
404     AsanChunk *ac = GetAsanChunk((void *)chunk);
405     uptr allocated_size = allocator.GetActuallyAllocatedSize((void *)chunk);
406     if (ac && atomic_load(&ac->chunk_state, memory_order_acquire) ==
407                   CHUNK_ALLOCATED) {
408       uptr beg = ac->Beg();
409       uptr end = ac->Beg() + ac->UsedSize();
410       uptr chunk_end = chunk + allocated_size;
411       if (chunk < beg && beg < end && end <= chunk_end) {
412         // Looks like a valid AsanChunk in use, poison redzones only.
413         PoisonShadow(chunk, beg - chunk, kAsanHeapLeftRedzoneMagic);
414         uptr end_aligned_down = RoundDownTo(end, ASAN_SHADOW_GRANULARITY);
415         FastPoisonShadowPartialRightRedzone(
416             end_aligned_down, end - end_aligned_down,
417             chunk_end - end_aligned_down, kAsanHeapLeftRedzoneMagic);
418         return;
419       }
420     }
421 
422     // This is either not an AsanChunk or freed or quarantined AsanChunk.
423     // In either case, poison everything.
424     PoisonShadow(chunk, allocated_size, kAsanHeapLeftRedzoneMagic);
425   }
426 
427   // Apply provided AllocatorOptions to an Allocator
428   void ApplyOptions(const AllocatorOptions &options) {
429     SetAllocatorMayReturnNull(options.may_return_null);
430     allocator.SetReleaseToOSIntervalMs(options.release_to_os_interval_ms);
431     SharedInitCode(options);
432   }
433 
434   void ReInitialize(const AllocatorOptions &options) {
435     ApplyOptions(options);
436 
437     // Poison all existing allocation's redzones.
438     if (CanPoisonMemory()) {
439       allocator.ForceLock();
440       allocator.ForEachChunk(
441           [](uptr chunk, void *alloc) {
442             ((Allocator *)alloc)->RePoisonChunk(chunk);
443           },
444           this);
445       allocator.ForceUnlock();
446     }
447   }
448 
449   void GetOptions(AllocatorOptions *options) const {
450     options->quarantine_size_mb = quarantine.GetMaxSize() >> 20;
451     options->thread_local_quarantine_size_kb =
452         quarantine.GetMaxCacheSize() >> 10;
453     options->min_redzone = atomic_load(&min_redzone, memory_order_acquire);
454     options->max_redzone = atomic_load(&max_redzone, memory_order_acquire);
455     options->may_return_null = AllocatorMayReturnNull();
456     options->alloc_dealloc_mismatch =
457         atomic_load(&alloc_dealloc_mismatch, memory_order_acquire);
458     options->release_to_os_interval_ms = allocator.ReleaseToOSIntervalMs();
459   }
460 
461   // -------------------- Helper methods. -------------------------
462   uptr ComputeRZLog(uptr user_requested_size) {
463     u32 rz_log = user_requested_size <= 64 - 16            ? 0
464                  : user_requested_size <= 128 - 32         ? 1
465                  : user_requested_size <= 512 - 64         ? 2
466                  : user_requested_size <= 4096 - 128       ? 3
467                  : user_requested_size <= (1 << 14) - 256  ? 4
468                  : user_requested_size <= (1 << 15) - 512  ? 5
469                  : user_requested_size <= (1 << 16) - 1024 ? 6
470                                                            : 7;
471     u32 hdr_log = RZSize2Log(RoundUpToPowerOfTwo(sizeof(ChunkHeader)));
472     u32 min_log = RZSize2Log(atomic_load(&min_redzone, memory_order_acquire));
473     u32 max_log = RZSize2Log(atomic_load(&max_redzone, memory_order_acquire));
474     return Min(Max(rz_log, Max(min_log, hdr_log)), Max(max_log, hdr_log));
475   }
476 
477   static uptr ComputeUserRequestedAlignmentLog(uptr user_requested_alignment) {
478     if (user_requested_alignment < 8)
479       return 0;
480     if (user_requested_alignment > 512)
481       user_requested_alignment = 512;
482     return Log2(user_requested_alignment) - 2;
483   }
484 
485   static uptr ComputeUserAlignment(uptr user_requested_alignment_log) {
486     if (user_requested_alignment_log == 0)
487       return 0;
488     return 1LL << (user_requested_alignment_log + 2);
489   }
490 
491   // We have an address between two chunks, and we want to report just one.
492   AsanChunk *ChooseChunk(uptr addr, AsanChunk *left_chunk,
493                          AsanChunk *right_chunk) {
494     if (!left_chunk)
495       return right_chunk;
496     if (!right_chunk)
497       return left_chunk;
498     // Prefer an allocated chunk over freed chunk and freed chunk
499     // over available chunk.
500     u8 left_state = atomic_load(&left_chunk->chunk_state, memory_order_relaxed);
501     u8 right_state =
502         atomic_load(&right_chunk->chunk_state, memory_order_relaxed);
503     if (left_state != right_state) {
504       if (left_state == CHUNK_ALLOCATED)
505         return left_chunk;
506       if (right_state == CHUNK_ALLOCATED)
507         return right_chunk;
508       if (left_state == CHUNK_QUARANTINE)
509         return left_chunk;
510       if (right_state == CHUNK_QUARANTINE)
511         return right_chunk;
512     }
513     // Same chunk_state: choose based on offset.
514     sptr l_offset = 0, r_offset = 0;
515     CHECK(AsanChunkView(left_chunk).AddrIsAtRight(addr, 1, &l_offset));
516     CHECK(AsanChunkView(right_chunk).AddrIsAtLeft(addr, 1, &r_offset));
517     if (l_offset < r_offset)
518       return left_chunk;
519     return right_chunk;
520   }
521 
522   bool UpdateAllocationStack(uptr addr, BufferedStackTrace *stack) {
523     AsanChunk *m = GetAsanChunkByAddr(addr);
524     if (!m) return false;
525     if (atomic_load(&m->chunk_state, memory_order_acquire) != CHUNK_ALLOCATED)
526       return false;
527     if (m->Beg() != addr) return false;
528     AsanThread *t = GetCurrentThread();
529     m->SetAllocContext(t ? t->tid() : kMainTid, StackDepotPut(*stack));
530     return true;
531   }
532 
533   // -------------------- Allocation/Deallocation routines ---------------
534   void *Allocate(uptr size, uptr alignment, BufferedStackTrace *stack,
535                  AllocType alloc_type, bool can_fill) {
536     if (UNLIKELY(!AsanInited()))
537       AsanInitFromRtl();
538     if (UNLIKELY(IsRssLimitExceeded())) {
539       if (AllocatorMayReturnNull())
540         return nullptr;
541       ReportRssLimitExceeded(stack);
542     }
543     Flags &fl = *flags();
544     CHECK(stack);
545     const uptr min_alignment = ASAN_SHADOW_GRANULARITY;
546     const uptr user_requested_alignment_log =
547         ComputeUserRequestedAlignmentLog(alignment);
548     if (alignment < min_alignment)
549       alignment = min_alignment;
550     if (size == 0) {
551       // We'd be happy to avoid allocating memory for zero-size requests, but
552       // some programs/tests depend on this behavior and assume that malloc
553       // would not return NULL even for zero-size allocations. Moreover, it
554       // looks like operator new should never return NULL, and results of
555       // consecutive "new" calls must be different even if the allocated size
556       // is zero.
557       size = 1;
558     }
559     CHECK(IsPowerOfTwo(alignment));
560     uptr rz_log = ComputeRZLog(size);
561     uptr rz_size = RZLog2Size(rz_log);
562     uptr rounded_size = RoundUpTo(Max(size, kChunkHeader2Size), alignment);
563     uptr needed_size = rounded_size + rz_size;
564     if (alignment > min_alignment)
565       needed_size += alignment;
566     bool from_primary = PrimaryAllocator::CanAllocate(needed_size, alignment);
567     // If we are allocating from the secondary allocator, there will be no
568     // automatic right redzone, so add the right redzone manually.
569     if (!from_primary)
570       needed_size += rz_size;
571     CHECK(IsAligned(needed_size, min_alignment));
572     if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize ||
573         size > max_user_defined_malloc_size) {
574       if (AllocatorMayReturnNull()) {
575         Report("WARNING: AddressSanitizer failed to allocate 0x%zx bytes\n",
576                size);
577         return nullptr;
578       }
579       uptr malloc_limit =
580           Min(kMaxAllowedMallocSize, max_user_defined_malloc_size);
581       ReportAllocationSizeTooBig(size, needed_size, malloc_limit, stack);
582     }
583 
584     AsanThread *t = GetCurrentThread();
585     void *allocated;
586     if (t) {
587       AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage());
588       allocated = allocator.Allocate(cache, needed_size, 8);
589     } else {
590       SpinMutexLock l(&fallback_mutex);
591       AllocatorCache *cache = &fallback_allocator_cache;
592       allocated = allocator.Allocate(cache, needed_size, 8);
593     }
594     if (UNLIKELY(!allocated)) {
595       SetAllocatorOutOfMemory();
596       if (AllocatorMayReturnNull())
597         return nullptr;
598       ReportOutOfMemory(size, stack);
599     }
600 
601     uptr alloc_beg = reinterpret_cast<uptr>(allocated);
602     uptr alloc_end = alloc_beg + needed_size;
603     uptr user_beg = alloc_beg + rz_size;
604     if (!IsAligned(user_beg, alignment))
605       user_beg = RoundUpTo(user_beg, alignment);
606     uptr user_end = user_beg + size;
607     CHECK_LE(user_end, alloc_end);
608     uptr chunk_beg = user_beg - kChunkHeaderSize;
609     AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
610     m->alloc_type = alloc_type;
611     CHECK(size);
612     m->SetUsedSize(size);
613     m->user_requested_alignment_log = user_requested_alignment_log;
614 
615     m->SetAllocContext(t ? t->tid() : kMainTid, StackDepotPut(*stack));
616 
617     if (!from_primary || *(u8 *)MEM_TO_SHADOW((uptr)allocated) == 0) {
618       // The allocator provides an unpoisoned chunk. This is possible for the
619       // secondary allocator, or if CanPoisonMemory() was false for some time,
620       // for example, due to flags()->start_disabled. Anyway, poison left and
621       // right of the block before using it for anything else.
622       uptr tail_beg = RoundUpTo(user_end, ASAN_SHADOW_GRANULARITY);
623       uptr tail_end = alloc_beg + allocator.GetActuallyAllocatedSize(allocated);
624       PoisonShadow(alloc_beg, user_beg - alloc_beg, kAsanHeapLeftRedzoneMagic);
625       PoisonShadow(tail_beg, tail_end - tail_beg, kAsanHeapLeftRedzoneMagic);
626     }
627 
628     uptr size_rounded_down_to_granularity =
629         RoundDownTo(size, ASAN_SHADOW_GRANULARITY);
630     // Unpoison the bulk of the memory region.
631     if (size_rounded_down_to_granularity)
632       PoisonShadow(user_beg, size_rounded_down_to_granularity, 0);
633     // Deal with the end of the region if size is not aligned to granularity.
634     if (size != size_rounded_down_to_granularity && CanPoisonMemory()) {
635       u8 *shadow =
636           (u8 *)MemToShadow(user_beg + size_rounded_down_to_granularity);
637       *shadow = fl.poison_partial ? (size & (ASAN_SHADOW_GRANULARITY - 1)) : 0;
638     }
639 
640     AsanStats &thread_stats = GetCurrentThreadStats();
641     thread_stats.mallocs++;
642     thread_stats.malloced += size;
643     thread_stats.malloced_redzones += needed_size - size;
644     if (needed_size > SizeClassMap::kMaxSize)
645       thread_stats.malloc_large++;
646     else
647       thread_stats.malloced_by_size[SizeClassMap::ClassID(needed_size)]++;
648 
649     void *res = reinterpret_cast<void *>(user_beg);
650     if (can_fill && fl.max_malloc_fill_size) {
651       uptr fill_size = Min(size, (uptr)fl.max_malloc_fill_size);
652       REAL(memset)(res, fl.malloc_fill_byte, fill_size);
653     }
654 #if CAN_SANITIZE_LEAKS
655     m->lsan_tag = __lsan::DisabledInThisThread() ? __lsan::kIgnored
656                                                  : __lsan::kDirectlyLeaked;
657 #endif
658     // Must be the last mutation of metadata in this function.
659     atomic_store(&m->chunk_state, CHUNK_ALLOCATED, memory_order_release);
660     if (alloc_beg != chunk_beg) {
661       CHECK_LE(alloc_beg + sizeof(LargeChunkHeader), chunk_beg);
662       reinterpret_cast<LargeChunkHeader *>(alloc_beg)->Set(m);
663     }
664     RunMallocHooks(res, size);
665     return res;
666   }
667 
668   // Set quarantine flag if chunk is allocated, issue ASan error report on
669   // available and quarantined chunks. Return true on success, false otherwise.
670   bool AtomicallySetQuarantineFlagIfAllocated(AsanChunk *m, void *ptr,
671                                               BufferedStackTrace *stack) {
672     u8 old_chunk_state = CHUNK_ALLOCATED;
673     // Flip the chunk_state atomically to avoid race on double-free.
674     if (!atomic_compare_exchange_strong(&m->chunk_state, &old_chunk_state,
675                                         CHUNK_QUARANTINE,
676                                         memory_order_acquire)) {
677       ReportInvalidFree(ptr, old_chunk_state, stack);
678       // It's not safe to push a chunk in quarantine on invalid free.
679       return false;
680     }
681     CHECK_EQ(CHUNK_ALLOCATED, old_chunk_state);
682     // It was a user data.
683     m->SetFreeContext(kInvalidTid, 0);
684     return true;
685   }
686 
687   // Expects the chunk to already be marked as quarantined by using
688   // AtomicallySetQuarantineFlagIfAllocated.
689   void QuarantineChunk(AsanChunk *m, void *ptr, BufferedStackTrace *stack) {
690     CHECK_EQ(atomic_load(&m->chunk_state, memory_order_relaxed),
691              CHUNK_QUARANTINE);
692     AsanThread *t = GetCurrentThread();
693     m->SetFreeContext(t ? t->tid() : 0, StackDepotPut(*stack));
694 
695     // Push into quarantine.
696     if (t) {
697       AsanThreadLocalMallocStorage *ms = &t->malloc_storage();
698       AllocatorCache *ac = GetAllocatorCache(ms);
699       quarantine.Put(GetQuarantineCache(ms), QuarantineCallback(ac, stack), m,
700                      m->UsedSize());
701     } else {
702       SpinMutexLock l(&fallback_mutex);
703       AllocatorCache *ac = &fallback_allocator_cache;
704       quarantine.Put(&fallback_quarantine_cache, QuarantineCallback(ac, stack),
705                      m, m->UsedSize());
706     }
707   }
708 
709   void Deallocate(void *ptr, uptr delete_size, uptr delete_alignment,
710                   BufferedStackTrace *stack, AllocType alloc_type) {
711     uptr p = reinterpret_cast<uptr>(ptr);
712     if (p == 0) return;
713 
714     uptr chunk_beg = p - kChunkHeaderSize;
715     AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
716 
717     // On Windows, uninstrumented DLLs may allocate memory before ASan hooks
718     // malloc. Don't report an invalid free in this case.
719     if (SANITIZER_WINDOWS &&
720         !get_allocator().PointerIsMine(ptr)) {
721       if (!IsSystemHeapAddress(p))
722         ReportFreeNotMalloced(p, stack);
723       return;
724     }
725 
726     if (RunFreeHooks(ptr)) {
727       // Someone used __sanitizer_ignore_free_hook() and decided that they
728       // didn't want the memory to __sanitizer_ignore_free_hook freed right now.
729       // When they call free() on this pointer again at a later time, we should
730       // ignore the alloc-type mismatch and allow them to deallocate the pointer
731       // through free(), rather than the initial alloc type.
732       m->alloc_type = FROM_MALLOC;
733       return;
734     }
735 
736     // Must mark the chunk as quarantined before any changes to its metadata.
737     // Do not quarantine given chunk if we failed to set CHUNK_QUARANTINE flag.
738     if (!AtomicallySetQuarantineFlagIfAllocated(m, ptr, stack)) return;
739 
740     if (m->alloc_type != alloc_type) {
741       if (atomic_load(&alloc_dealloc_mismatch, memory_order_acquire) &&
742           !IsAllocDeallocMismatchSuppressed(stack)) {
743         ReportAllocTypeMismatch((uptr)ptr, stack, (AllocType)m->alloc_type,
744                                 (AllocType)alloc_type);
745       }
746     } else {
747       if (flags()->new_delete_type_mismatch &&
748           (alloc_type == FROM_NEW || alloc_type == FROM_NEW_BR) &&
749           ((delete_size && delete_size != m->UsedSize()) ||
750            ComputeUserRequestedAlignmentLog(delete_alignment) !=
751                m->user_requested_alignment_log)) {
752         ReportNewDeleteTypeMismatch(p, delete_size, delete_alignment, stack);
753       }
754     }
755 
756     AsanStats &thread_stats = GetCurrentThreadStats();
757     thread_stats.frees++;
758     thread_stats.freed += m->UsedSize();
759 
760     QuarantineChunk(m, ptr, stack);
761   }
762 
763   void *Reallocate(void *old_ptr, uptr new_size, BufferedStackTrace *stack) {
764     CHECK(old_ptr && new_size);
765     uptr p = reinterpret_cast<uptr>(old_ptr);
766     uptr chunk_beg = p - kChunkHeaderSize;
767     AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
768 
769     AsanStats &thread_stats = GetCurrentThreadStats();
770     thread_stats.reallocs++;
771     thread_stats.realloced += new_size;
772 
773     void *new_ptr = Allocate(new_size, 8, stack, FROM_MALLOC, true);
774     if (new_ptr) {
775       u8 chunk_state = atomic_load(&m->chunk_state, memory_order_acquire);
776       if (chunk_state != CHUNK_ALLOCATED)
777         ReportInvalidFree(old_ptr, chunk_state, stack);
778       CHECK_NE(REAL(memcpy), nullptr);
779       uptr memcpy_size = Min(new_size, m->UsedSize());
780       // If realloc() races with free(), we may start copying freed memory.
781       // However, we will report racy double-free later anyway.
782       REAL(memcpy)(new_ptr, old_ptr, memcpy_size);
783       Deallocate(old_ptr, 0, 0, stack, FROM_MALLOC);
784     }
785     return new_ptr;
786   }
787 
788   void *Calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) {
789     if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
790       if (AllocatorMayReturnNull())
791         return nullptr;
792       ReportCallocOverflow(nmemb, size, stack);
793     }
794     void *ptr = Allocate(nmemb * size, 8, stack, FROM_MALLOC, false);
795     // If the memory comes from the secondary allocator no need to clear it
796     // as it comes directly from mmap.
797     if (ptr && allocator.FromPrimary(ptr))
798       REAL(memset)(ptr, 0, nmemb * size);
799     return ptr;
800   }
801 
802   void ReportInvalidFree(void *ptr, u8 chunk_state, BufferedStackTrace *stack) {
803     if (chunk_state == CHUNK_QUARANTINE)
804       ReportDoubleFree((uptr)ptr, stack);
805     else
806       ReportFreeNotMalloced((uptr)ptr, stack);
807   }
808 
809   void CommitBack(AsanThreadLocalMallocStorage *ms, BufferedStackTrace *stack) {
810     AllocatorCache *ac = GetAllocatorCache(ms);
811     quarantine.Drain(GetQuarantineCache(ms), QuarantineCallback(ac, stack));
812     allocator.SwallowCache(ac);
813   }
814 
815   // -------------------------- Chunk lookup ----------------------
816 
817   // Assumes alloc_beg == allocator.GetBlockBegin(alloc_beg).
818   // Returns nullptr if AsanChunk is not yet initialized just after
819   // get_allocator().Allocate(), or is being destroyed just before
820   // get_allocator().Deallocate().
821   AsanChunk *GetAsanChunk(void *alloc_beg) {
822     if (!alloc_beg)
823       return nullptr;
824     AsanChunk *p = reinterpret_cast<LargeChunkHeader *>(alloc_beg)->Get();
825     if (!p) {
826       if (!allocator.FromPrimary(alloc_beg))
827         return nullptr;
828       p = reinterpret_cast<AsanChunk *>(alloc_beg);
829     }
830     u8 state = atomic_load(&p->chunk_state, memory_order_relaxed);
831     // It does not guaranty that Chunk is initialized, but it's
832     // definitely not for any other value.
833     if (state == CHUNK_ALLOCATED || state == CHUNK_QUARANTINE)
834       return p;
835     return nullptr;
836   }
837 
838   AsanChunk *GetAsanChunkByAddr(uptr p) {
839     void *alloc_beg = allocator.GetBlockBegin(reinterpret_cast<void *>(p));
840     return GetAsanChunk(alloc_beg);
841   }
842 
843   // Allocator must be locked when this function is called.
844   AsanChunk *GetAsanChunkByAddrFastLocked(uptr p) {
845     void *alloc_beg =
846         allocator.GetBlockBeginFastLocked(reinterpret_cast<void *>(p));
847     return GetAsanChunk(alloc_beg);
848   }
849 
850   uptr AllocationSize(uptr p) {
851     AsanChunk *m = GetAsanChunkByAddr(p);
852     if (!m) return 0;
853     if (atomic_load(&m->chunk_state, memory_order_acquire) != CHUNK_ALLOCATED)
854       return 0;
855     if (m->Beg() != p) return 0;
856     return m->UsedSize();
857   }
858 
859   uptr AllocationSizeFast(uptr p) {
860     return reinterpret_cast<AsanChunk *>(p - kChunkHeaderSize)->UsedSize();
861   }
862 
863   AsanChunkView FindHeapChunkByAddress(uptr addr) {
864     AsanChunk *m1 = GetAsanChunkByAddr(addr);
865     sptr offset = 0;
866     if (!m1 || AsanChunkView(m1).AddrIsAtLeft(addr, 1, &offset)) {
867       // The address is in the chunk's left redzone, so maybe it is actually
868       // a right buffer overflow from the other chunk before.
869       // Search a bit before to see if there is another chunk.
870       AsanChunk *m2 = nullptr;
871       for (uptr l = 1; l < GetPageSizeCached(); l++) {
872         m2 = GetAsanChunkByAddr(addr - l);
873         if (m2 == m1) continue;  // Still the same chunk.
874         break;
875       }
876       if (m2 && AsanChunkView(m2).AddrIsAtRight(addr, 1, &offset))
877         m1 = ChooseChunk(addr, m2, m1);
878     }
879     return AsanChunkView(m1);
880   }
881 
882   void Purge(BufferedStackTrace *stack) {
883     AsanThread *t = GetCurrentThread();
884     if (t) {
885       AsanThreadLocalMallocStorage *ms = &t->malloc_storage();
886       quarantine.DrainAndRecycle(GetQuarantineCache(ms),
887                                  QuarantineCallback(GetAllocatorCache(ms),
888                                                     stack));
889     }
890     {
891       SpinMutexLock l(&fallback_mutex);
892       quarantine.DrainAndRecycle(&fallback_quarantine_cache,
893                                  QuarantineCallback(&fallback_allocator_cache,
894                                                     stack));
895     }
896 
897     allocator.ForceReleaseToOS();
898   }
899 
900   void PrintStats() {
901     allocator.PrintStats();
902     quarantine.PrintStats();
903   }
904 
905   void ForceLock() SANITIZER_ACQUIRE(fallback_mutex) {
906     allocator.ForceLock();
907     fallback_mutex.Lock();
908   }
909 
910   void ForceUnlock() SANITIZER_RELEASE(fallback_mutex) {
911     fallback_mutex.Unlock();
912     allocator.ForceUnlock();
913   }
914 };
915 
916 static Allocator instance(LINKER_INITIALIZED);
917 
918 static AsanAllocator &get_allocator() {
919   return instance.allocator;
920 }
921 
922 bool AsanChunkView::IsValid() const {
923   return chunk_ && atomic_load(&chunk_->chunk_state, memory_order_relaxed) !=
924                        CHUNK_INVALID;
925 }
926 bool AsanChunkView::IsAllocated() const {
927   return chunk_ && atomic_load(&chunk_->chunk_state, memory_order_relaxed) ==
928                        CHUNK_ALLOCATED;
929 }
930 bool AsanChunkView::IsQuarantined() const {
931   return chunk_ && atomic_load(&chunk_->chunk_state, memory_order_relaxed) ==
932                        CHUNK_QUARANTINE;
933 }
934 uptr AsanChunkView::Beg() const { return chunk_->Beg(); }
935 uptr AsanChunkView::End() const { return Beg() + UsedSize(); }
936 uptr AsanChunkView::UsedSize() const { return chunk_->UsedSize(); }
937 u32 AsanChunkView::UserRequestedAlignment() const {
938   return Allocator::ComputeUserAlignment(chunk_->user_requested_alignment_log);
939 }
940 
941 uptr AsanChunkView::AllocTid() const {
942   u32 tid = 0;
943   u32 stack = 0;
944   chunk_->GetAllocContext(tid, stack);
945   return tid;
946 }
947 
948 uptr AsanChunkView::FreeTid() const {
949   if (!IsQuarantined())
950     return kInvalidTid;
951   u32 tid = 0;
952   u32 stack = 0;
953   chunk_->GetFreeContext(tid, stack);
954   return tid;
955 }
956 
957 AllocType AsanChunkView::GetAllocType() const {
958   return (AllocType)chunk_->alloc_type;
959 }
960 
961 u32 AsanChunkView::GetAllocStackId() const {
962   u32 tid = 0;
963   u32 stack = 0;
964   chunk_->GetAllocContext(tid, stack);
965   return stack;
966 }
967 
968 u32 AsanChunkView::GetFreeStackId() const {
969   if (!IsQuarantined())
970     return 0;
971   u32 tid = 0;
972   u32 stack = 0;
973   chunk_->GetFreeContext(tid, stack);
974   return stack;
975 }
976 
977 void InitializeAllocator(const AllocatorOptions &options) {
978   instance.InitLinkerInitialized(options);
979 }
980 
981 void ReInitializeAllocator(const AllocatorOptions &options) {
982   instance.ReInitialize(options);
983 }
984 
985 // Apply provided AllocatorOptions to an Allocator
986 void ApplyAllocatorOptions(const AllocatorOptions &options) {
987   instance.ApplyOptions(options);
988 }
989 
990 void GetAllocatorOptions(AllocatorOptions *options) {
991   instance.GetOptions(options);
992 }
993 
994 AsanChunkView FindHeapChunkByAddress(uptr addr) {
995   return instance.FindHeapChunkByAddress(addr);
996 }
997 AsanChunkView FindHeapChunkByAllocBeg(uptr addr) {
998   return AsanChunkView(instance.GetAsanChunk(reinterpret_cast<void*>(addr)));
999 }
1000 
1001 void AsanThreadLocalMallocStorage::CommitBack() {
1002   GET_STACK_TRACE_MALLOC;
1003   instance.CommitBack(this, &stack);
1004 }
1005 
1006 void PrintInternalAllocatorStats() {
1007   instance.PrintStats();
1008 }
1009 
1010 void asan_free(void *ptr, BufferedStackTrace *stack, AllocType alloc_type) {
1011   instance.Deallocate(ptr, 0, 0, stack, alloc_type);
1012 }
1013 
1014 void asan_delete(void *ptr, uptr size, uptr alignment,
1015                  BufferedStackTrace *stack, AllocType alloc_type) {
1016   instance.Deallocate(ptr, size, alignment, stack, alloc_type);
1017 }
1018 
1019 void *asan_malloc(uptr size, BufferedStackTrace *stack) {
1020   return SetErrnoOnNull(instance.Allocate(size, 8, stack, FROM_MALLOC, true));
1021 }
1022 
1023 void *asan_calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) {
1024   return SetErrnoOnNull(instance.Calloc(nmemb, size, stack));
1025 }
1026 
1027 void *asan_reallocarray(void *p, uptr nmemb, uptr size,
1028                         BufferedStackTrace *stack) {
1029   if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
1030     errno = errno_ENOMEM;
1031     if (AllocatorMayReturnNull())
1032       return nullptr;
1033     ReportReallocArrayOverflow(nmemb, size, stack);
1034   }
1035   return asan_realloc(p, nmemb * size, stack);
1036 }
1037 
1038 void *asan_realloc(void *p, uptr size, BufferedStackTrace *stack) {
1039   if (!p)
1040     return SetErrnoOnNull(instance.Allocate(size, 8, stack, FROM_MALLOC, true));
1041   if (size == 0) {
1042     if (flags()->allocator_frees_and_returns_null_on_realloc_zero) {
1043       instance.Deallocate(p, 0, 0, stack, FROM_MALLOC);
1044       return nullptr;
1045     }
1046     // Allocate a size of 1 if we shouldn't free() on Realloc to 0
1047     size = 1;
1048   }
1049   return SetErrnoOnNull(instance.Reallocate(p, size, stack));
1050 }
1051 
1052 void *asan_valloc(uptr size, BufferedStackTrace *stack) {
1053   return SetErrnoOnNull(
1054       instance.Allocate(size, GetPageSizeCached(), stack, FROM_MALLOC, true));
1055 }
1056 
1057 void *asan_pvalloc(uptr size, BufferedStackTrace *stack) {
1058   uptr PageSize = GetPageSizeCached();
1059   if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) {
1060     errno = errno_ENOMEM;
1061     if (AllocatorMayReturnNull())
1062       return nullptr;
1063     ReportPvallocOverflow(size, stack);
1064   }
1065   // pvalloc(0) should allocate one page.
1066   size = size ? RoundUpTo(size, PageSize) : PageSize;
1067   return SetErrnoOnNull(
1068       instance.Allocate(size, PageSize, stack, FROM_MALLOC, true));
1069 }
1070 
1071 void *asan_memalign(uptr alignment, uptr size, BufferedStackTrace *stack,
1072                     AllocType alloc_type) {
1073   if (UNLIKELY(!IsPowerOfTwo(alignment))) {
1074     errno = errno_EINVAL;
1075     if (AllocatorMayReturnNull())
1076       return nullptr;
1077     ReportInvalidAllocationAlignment(alignment, stack);
1078   }
1079   return SetErrnoOnNull(
1080       instance.Allocate(size, alignment, stack, alloc_type, true));
1081 }
1082 
1083 void *asan_aligned_alloc(uptr alignment, uptr size, BufferedStackTrace *stack) {
1084   if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) {
1085     errno = errno_EINVAL;
1086     if (AllocatorMayReturnNull())
1087       return nullptr;
1088     ReportInvalidAlignedAllocAlignment(size, alignment, stack);
1089   }
1090   return SetErrnoOnNull(
1091       instance.Allocate(size, alignment, stack, FROM_MALLOC, true));
1092 }
1093 
1094 int asan_posix_memalign(void **memptr, uptr alignment, uptr size,
1095                         BufferedStackTrace *stack) {
1096   if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) {
1097     if (AllocatorMayReturnNull())
1098       return errno_EINVAL;
1099     ReportInvalidPosixMemalignAlignment(alignment, stack);
1100   }
1101   void *ptr = instance.Allocate(size, alignment, stack, FROM_MALLOC, true);
1102   if (UNLIKELY(!ptr))
1103     // OOM error is already taken care of by Allocate.
1104     return errno_ENOMEM;
1105   CHECK(IsAligned((uptr)ptr, alignment));
1106   *memptr = ptr;
1107   return 0;
1108 }
1109 
1110 uptr asan_malloc_usable_size(const void *ptr, uptr pc, uptr bp) {
1111   if (!ptr) return 0;
1112   uptr usable_size = instance.AllocationSize(reinterpret_cast<uptr>(ptr));
1113   if (flags()->check_malloc_usable_size && (usable_size == 0)) {
1114     GET_STACK_TRACE_FATAL(pc, bp);
1115     ReportMallocUsableSizeNotOwned((uptr)ptr, &stack);
1116   }
1117   return usable_size;
1118 }
1119 
1120 uptr asan_mz_size(const void *ptr) {
1121   return instance.AllocationSize(reinterpret_cast<uptr>(ptr));
1122 }
1123 
1124 void asan_mz_force_lock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
1125   instance.ForceLock();
1126 }
1127 
1128 void asan_mz_force_unlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
1129   instance.ForceUnlock();
1130 }
1131 
1132 }  // namespace __asan
1133 
1134 // --- Implementation of LSan-specific functions --- {{{1
1135 namespace __lsan {
1136 void LockAllocator() {
1137   __asan::get_allocator().ForceLock();
1138 }
1139 
1140 void UnlockAllocator() {
1141   __asan::get_allocator().ForceUnlock();
1142 }
1143 
1144 void GetAllocatorGlobalRange(uptr *begin, uptr *end) {
1145   *begin = (uptr)&__asan::get_allocator();
1146   *end = *begin + sizeof(__asan::get_allocator());
1147 }
1148 
1149 uptr PointsIntoChunk(void *p) {
1150   uptr addr = reinterpret_cast<uptr>(p);
1151   __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddrFastLocked(addr);
1152   if (!m || atomic_load(&m->chunk_state, memory_order_acquire) !=
1153                 __asan::CHUNK_ALLOCATED)
1154     return 0;
1155   uptr chunk = m->Beg();
1156   if (m->AddrIsInside(addr))
1157     return chunk;
1158   if (IsSpecialCaseOfOperatorNew0(chunk, m->UsedSize(), addr))
1159     return chunk;
1160   return 0;
1161 }
1162 
1163 uptr GetUserBegin(uptr chunk) {
1164   // FIXME: All usecases provide chunk address, GetAsanChunkByAddrFastLocked is
1165   // not needed.
1166   __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddrFastLocked(chunk);
1167   return m ? m->Beg() : 0;
1168 }
1169 
1170 uptr GetUserAddr(uptr chunk) {
1171   return chunk;
1172 }
1173 
1174 LsanMetadata::LsanMetadata(uptr chunk) {
1175   metadata_ = chunk ? reinterpret_cast<void *>(chunk - __asan::kChunkHeaderSize)
1176                     : nullptr;
1177 }
1178 
1179 bool LsanMetadata::allocated() const {
1180   if (!metadata_)
1181     return false;
1182   __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
1183   return atomic_load(&m->chunk_state, memory_order_relaxed) ==
1184          __asan::CHUNK_ALLOCATED;
1185 }
1186 
1187 ChunkTag LsanMetadata::tag() const {
1188   __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
1189   return static_cast<ChunkTag>(m->lsan_tag);
1190 }
1191 
1192 void LsanMetadata::set_tag(ChunkTag value) {
1193   __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
1194   m->lsan_tag = value;
1195 }
1196 
1197 uptr LsanMetadata::requested_size() const {
1198   __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
1199   return m->UsedSize();
1200 }
1201 
1202 u32 LsanMetadata::stack_trace_id() const {
1203   __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
1204   u32 tid = 0;
1205   u32 stack = 0;
1206   m->GetAllocContext(tid, stack);
1207   return stack;
1208 }
1209 
1210 void ForEachChunk(ForEachChunkCallback callback, void *arg) {
1211   __asan::get_allocator().ForEachChunk(callback, arg);
1212 }
1213 
1214 IgnoreObjectResult IgnoreObject(const void *p) {
1215   uptr addr = reinterpret_cast<uptr>(p);
1216   __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddr(addr);
1217   if (!m ||
1218       (atomic_load(&m->chunk_state, memory_order_acquire) !=
1219        __asan::CHUNK_ALLOCATED) ||
1220       !m->AddrIsInside(addr)) {
1221     return kIgnoreObjectInvalid;
1222   }
1223   if (m->lsan_tag == kIgnored)
1224     return kIgnoreObjectAlreadyIgnored;
1225   m->lsan_tag = __lsan::kIgnored;
1226   return kIgnoreObjectSuccess;
1227 }
1228 
1229 }  // namespace __lsan
1230 
1231 // ---------------------- Interface ---------------- {{{1
1232 using namespace __asan;
1233 
1234 static const void *AllocationBegin(const void *p) {
1235   AsanChunk *m = __asan::instance.GetAsanChunkByAddr((uptr)p);
1236   if (!m)
1237     return nullptr;
1238   if (atomic_load(&m->chunk_state, memory_order_acquire) != CHUNK_ALLOCATED)
1239     return nullptr;
1240   if (m->UsedSize() == 0)
1241     return nullptr;
1242   return (const void *)(m->Beg());
1243 }
1244 
1245 // ASan allocator doesn't reserve extra bytes, so normally we would
1246 // just return "size". We don't want to expose our redzone sizes, etc here.
1247 uptr __sanitizer_get_estimated_allocated_size(uptr size) {
1248   return size;
1249 }
1250 
1251 int __sanitizer_get_ownership(const void *p) {
1252   uptr ptr = reinterpret_cast<uptr>(p);
1253   return instance.AllocationSize(ptr) > 0;
1254 }
1255 
1256 uptr __sanitizer_get_allocated_size(const void *p) {
1257   if (!p) return 0;
1258   uptr ptr = reinterpret_cast<uptr>(p);
1259   uptr allocated_size = instance.AllocationSize(ptr);
1260   // Die if p is not malloced or if it is already freed.
1261   if (allocated_size == 0) {
1262     GET_STACK_TRACE_FATAL_HERE;
1263     ReportSanitizerGetAllocatedSizeNotOwned(ptr, &stack);
1264   }
1265   return allocated_size;
1266 }
1267 
1268 uptr __sanitizer_get_allocated_size_fast(const void *p) {
1269   DCHECK_EQ(p, __sanitizer_get_allocated_begin(p));
1270   uptr ret = instance.AllocationSizeFast(reinterpret_cast<uptr>(p));
1271   DCHECK_EQ(ret, __sanitizer_get_allocated_size(p));
1272   return ret;
1273 }
1274 
1275 const void *__sanitizer_get_allocated_begin(const void *p) {
1276   return AllocationBegin(p);
1277 }
1278 
1279 void __sanitizer_purge_allocator() {
1280   GET_STACK_TRACE_MALLOC;
1281   instance.Purge(&stack);
1282 }
1283 
1284 int __asan_update_allocation_context(void* addr) {
1285   GET_STACK_TRACE_MALLOC;
1286   return instance.UpdateAllocationStack((uptr)addr, &stack);
1287 }
1288