xref: /freebsd/contrib/llvm-project/compiler-rt/lib/asan/asan_allocator.cpp (revision 0fca6ea1d4eea4c934cfff25ac9ee8ad6fe95583)
1 //===-- asan_allocator.cpp ------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of AddressSanitizer, an address sanity checker.
10 //
11 // Implementation of ASan's memory allocator, 2-nd version.
12 // This variant uses the allocator from sanitizer_common, i.e. the one shared
13 // with ThreadSanitizer and MemorySanitizer.
14 //
15 //===----------------------------------------------------------------------===//
16 
17 #include "asan_allocator.h"
18 
19 #include "asan_internal.h"
20 #include "asan_mapping.h"
21 #include "asan_poisoning.h"
22 #include "asan_report.h"
23 #include "asan_stack.h"
24 #include "asan_thread.h"
25 #include "lsan/lsan_common.h"
26 #include "sanitizer_common/sanitizer_allocator_checks.h"
27 #include "sanitizer_common/sanitizer_allocator_interface.h"
28 #include "sanitizer_common/sanitizer_common.h"
29 #include "sanitizer_common/sanitizer_errno.h"
30 #include "sanitizer_common/sanitizer_flags.h"
31 #include "sanitizer_common/sanitizer_internal_defs.h"
32 #include "sanitizer_common/sanitizer_list.h"
33 #include "sanitizer_common/sanitizer_quarantine.h"
34 #include "sanitizer_common/sanitizer_stackdepot.h"
35 
36 namespace __asan {
37 
38 // Valid redzone sizes are 16, 32, 64, ... 2048, so we encode them in 3 bits.
39 // We use adaptive redzones: for larger allocation larger redzones are used.
RZLog2Size(u32 rz_log)40 static u32 RZLog2Size(u32 rz_log) {
41   CHECK_LT(rz_log, 8);
42   return 16 << rz_log;
43 }
44 
RZSize2Log(u32 rz_size)45 static u32 RZSize2Log(u32 rz_size) {
46   CHECK_GE(rz_size, 16);
47   CHECK_LE(rz_size, 2048);
48   CHECK(IsPowerOfTwo(rz_size));
49   u32 res = Log2(rz_size) - 4;
50   CHECK_EQ(rz_size, RZLog2Size(res));
51   return res;
52 }
53 
54 static AsanAllocator &get_allocator();
55 
AtomicContextStore(volatile atomic_uint64_t * atomic_context,u32 tid,u32 stack)56 static void AtomicContextStore(volatile atomic_uint64_t *atomic_context,
57                                u32 tid, u32 stack) {
58   u64 context = tid;
59   context <<= 32;
60   context += stack;
61   atomic_store(atomic_context, context, memory_order_relaxed);
62 }
63 
AtomicContextLoad(const volatile atomic_uint64_t * atomic_context,u32 & tid,u32 & stack)64 static void AtomicContextLoad(const volatile atomic_uint64_t *atomic_context,
65                               u32 &tid, u32 &stack) {
66   u64 context = atomic_load(atomic_context, memory_order_relaxed);
67   stack = context;
68   context >>= 32;
69   tid = context;
70 }
71 
72 // The memory chunk allocated from the underlying allocator looks like this:
73 // L L L L L L H H U U U U U U R R
74 //   L -- left redzone words (0 or more bytes)
75 //   H -- ChunkHeader (16 bytes), which is also a part of the left redzone.
76 //   U -- user memory.
77 //   R -- right redzone (0 or more bytes)
78 // ChunkBase consists of ChunkHeader and other bytes that overlap with user
79 // memory.
80 
81 // If the left redzone is greater than the ChunkHeader size we store a magic
82 // value in the first uptr word of the memory block and store the address of
83 // ChunkBase in the next uptr.
84 // M B L L L L L L L L L  H H U U U U U U
85 //   |                    ^
86 //   ---------------------|
87 //   M -- magic value kAllocBegMagic
88 //   B -- address of ChunkHeader pointing to the first 'H'
89 
90 class ChunkHeader {
91  public:
92   atomic_uint8_t chunk_state;
93   u8 alloc_type : 2;
94   u8 lsan_tag : 2;
95 
96   // align < 8 -> 0
97   // else      -> log2(min(align, 512)) - 2
98   u8 user_requested_alignment_log : 3;
99 
100  private:
101   u16 user_requested_size_hi;
102   u32 user_requested_size_lo;
103   atomic_uint64_t alloc_context_id;
104 
105  public:
UsedSize() const106   uptr UsedSize() const {
107     static_assert(sizeof(user_requested_size_lo) == 4,
108                   "Expression below requires this");
109     return FIRST_32_SECOND_64(0, ((uptr)user_requested_size_hi << 32)) +
110            user_requested_size_lo;
111   }
112 
SetUsedSize(uptr size)113   void SetUsedSize(uptr size) {
114     user_requested_size_lo = size;
115     static_assert(sizeof(user_requested_size_lo) == 4,
116                   "Expression below requires this");
117     user_requested_size_hi = FIRST_32_SECOND_64(0, size >> 32);
118     CHECK_EQ(UsedSize(), size);
119   }
120 
SetAllocContext(u32 tid,u32 stack)121   void SetAllocContext(u32 tid, u32 stack) {
122     AtomicContextStore(&alloc_context_id, tid, stack);
123   }
124 
GetAllocContext(u32 & tid,u32 & stack) const125   void GetAllocContext(u32 &tid, u32 &stack) const {
126     AtomicContextLoad(&alloc_context_id, tid, stack);
127   }
128 };
129 
130 class ChunkBase : public ChunkHeader {
131   atomic_uint64_t free_context_id;
132 
133  public:
SetFreeContext(u32 tid,u32 stack)134   void SetFreeContext(u32 tid, u32 stack) {
135     AtomicContextStore(&free_context_id, tid, stack);
136   }
137 
GetFreeContext(u32 & tid,u32 & stack) const138   void GetFreeContext(u32 &tid, u32 &stack) const {
139     AtomicContextLoad(&free_context_id, tid, stack);
140   }
141 };
142 
143 static const uptr kChunkHeaderSize = sizeof(ChunkHeader);
144 static const uptr kChunkHeader2Size = sizeof(ChunkBase) - kChunkHeaderSize;
145 COMPILER_CHECK(kChunkHeaderSize == 16);
146 COMPILER_CHECK(kChunkHeader2Size <= 16);
147 
148 enum {
149   // Either just allocated by underlying allocator, but AsanChunk is not yet
150   // ready, or almost returned to undelying allocator and AsanChunk is already
151   // meaningless.
152   CHUNK_INVALID = 0,
153   // The chunk is allocated and not yet freed.
154   CHUNK_ALLOCATED = 2,
155   // The chunk was freed and put into quarantine zone.
156   CHUNK_QUARANTINE = 3,
157 };
158 
159 class AsanChunk : public ChunkBase {
160  public:
Beg()161   uptr Beg() { return reinterpret_cast<uptr>(this) + kChunkHeaderSize; }
AddrIsInside(uptr addr)162   bool AddrIsInside(uptr addr) {
163     return (addr >= Beg()) && (addr < Beg() + UsedSize());
164   }
165 };
166 
167 class LargeChunkHeader {
168   static constexpr uptr kAllocBegMagic =
169       FIRST_32_SECOND_64(0xCC6E96B9, 0xCC6E96B9CC6E96B9ULL);
170   atomic_uintptr_t magic;
171   AsanChunk *chunk_header;
172 
173  public:
Get() const174   AsanChunk *Get() const {
175     return atomic_load(&magic, memory_order_acquire) == kAllocBegMagic
176                ? chunk_header
177                : nullptr;
178   }
179 
Set(AsanChunk * p)180   void Set(AsanChunk *p) {
181     if (p) {
182       chunk_header = p;
183       atomic_store(&magic, kAllocBegMagic, memory_order_release);
184       return;
185     }
186 
187     uptr old = kAllocBegMagic;
188     if (!atomic_compare_exchange_strong(&magic, &old, 0,
189                                         memory_order_release)) {
190       CHECK_EQ(old, kAllocBegMagic);
191     }
192   }
193 };
194 
FillChunk(AsanChunk * m)195 static void FillChunk(AsanChunk *m) {
196   // FIXME: Use ReleaseMemoryPagesToOS.
197   Flags &fl = *flags();
198 
199   if (fl.max_free_fill_size > 0) {
200     // We have to skip the chunk header, it contains free_context_id.
201     uptr scribble_start = (uptr)m + kChunkHeaderSize + kChunkHeader2Size;
202     if (m->UsedSize() >= kChunkHeader2Size) {  // Skip Header2 in user area.
203       uptr size_to_fill = m->UsedSize() - kChunkHeader2Size;
204       size_to_fill = Min(size_to_fill, (uptr)fl.max_free_fill_size);
205       REAL(memset)((void *)scribble_start, fl.free_fill_byte, size_to_fill);
206     }
207   }
208 }
209 
210 struct QuarantineCallback {
QuarantineCallback__asan::QuarantineCallback211   QuarantineCallback(AllocatorCache *cache, BufferedStackTrace *stack)
212       : cache_(cache),
213         stack_(stack) {
214   }
215 
PreQuarantine__asan::QuarantineCallback216   void PreQuarantine(AsanChunk *m) const {
217     FillChunk(m);
218     // Poison the region.
219     PoisonShadow(m->Beg(), RoundUpTo(m->UsedSize(), ASAN_SHADOW_GRANULARITY),
220                  kAsanHeapFreeMagic);
221   }
222 
Recycle__asan::QuarantineCallback223   void Recycle(AsanChunk *m) const {
224     void *p = get_allocator().GetBlockBegin(m);
225 
226     // The secondary will immediately unpoison and unmap the memory, so this
227     // branch is unnecessary.
228     if (get_allocator().FromPrimary(p)) {
229       if (p != m) {
230         // Clear the magic value, as allocator internals may overwrite the
231         // contents of deallocated chunk, confusing GetAsanChunk lookup.
232         reinterpret_cast<LargeChunkHeader *>(p)->Set(nullptr);
233       }
234 
235       u8 old_chunk_state = CHUNK_QUARANTINE;
236       if (!atomic_compare_exchange_strong(&m->chunk_state, &old_chunk_state,
237                                           CHUNK_INVALID,
238                                           memory_order_acquire)) {
239         CHECK_EQ(old_chunk_state, CHUNK_QUARANTINE);
240       }
241 
242       PoisonShadow(m->Beg(), RoundUpTo(m->UsedSize(), ASAN_SHADOW_GRANULARITY),
243                    kAsanHeapLeftRedzoneMagic);
244     }
245 
246     // Statistics.
247     AsanStats &thread_stats = GetCurrentThreadStats();
248     thread_stats.real_frees++;
249     thread_stats.really_freed += m->UsedSize();
250 
251     get_allocator().Deallocate(cache_, p);
252   }
253 
RecyclePassThrough__asan::QuarantineCallback254   void RecyclePassThrough(AsanChunk *m) const {
255     // Recycle for the secondary will immediately unpoison and unmap the
256     // memory, so quarantine preparation is unnecessary.
257     if (get_allocator().FromPrimary(m)) {
258       // The primary allocation may need pattern fill if enabled.
259       FillChunk(m);
260     }
261     Recycle(m);
262   }
263 
Allocate__asan::QuarantineCallback264   void *Allocate(uptr size) const {
265     void *res = get_allocator().Allocate(cache_, size, 1);
266     // TODO(alekseys): Consider making quarantine OOM-friendly.
267     if (UNLIKELY(!res))
268       ReportOutOfMemory(size, stack_);
269     return res;
270   }
271 
Deallocate__asan::QuarantineCallback272   void Deallocate(void *p) const { get_allocator().Deallocate(cache_, p); }
273 
274  private:
275   AllocatorCache* const cache_;
276   BufferedStackTrace* const stack_;
277 };
278 
279 typedef Quarantine<QuarantineCallback, AsanChunk> AsanQuarantine;
280 typedef AsanQuarantine::Cache QuarantineCache;
281 
OnMap(uptr p,uptr size) const282 void AsanMapUnmapCallback::OnMap(uptr p, uptr size) const {
283   PoisonShadow(p, size, kAsanHeapLeftRedzoneMagic);
284   // Statistics.
285   AsanStats &thread_stats = GetCurrentThreadStats();
286   thread_stats.mmaps++;
287   thread_stats.mmaped += size;
288 }
289 
OnMapSecondary(uptr p,uptr size,uptr user_begin,uptr user_size) const290 void AsanMapUnmapCallback::OnMapSecondary(uptr p, uptr size, uptr user_begin,
291                                           uptr user_size) const {
292   uptr user_end = RoundDownTo(user_begin + user_size, ASAN_SHADOW_GRANULARITY);
293   user_begin = RoundUpTo(user_begin, ASAN_SHADOW_GRANULARITY);
294   // The secondary mapping will be immediately returned to user, no value
295   // poisoning that with non-zero just before unpoisoning by Allocate(). So just
296   // poison head/tail invisible to Allocate().
297   PoisonShadow(p, user_begin - p, kAsanHeapLeftRedzoneMagic);
298   PoisonShadow(user_end, size - (user_end - p), kAsanHeapLeftRedzoneMagic);
299   // Statistics.
300   AsanStats &thread_stats = GetCurrentThreadStats();
301   thread_stats.mmaps++;
302   thread_stats.mmaped += size;
303 }
304 
OnUnmap(uptr p,uptr size) const305 void AsanMapUnmapCallback::OnUnmap(uptr p, uptr size) const {
306   PoisonShadow(p, size, 0);
307   // We are about to unmap a chunk of user memory.
308   // Mark the corresponding shadow memory as not needed.
309   FlushUnneededASanShadowMemory(p, size);
310   // Statistics.
311   AsanStats &thread_stats = GetCurrentThreadStats();
312   thread_stats.munmaps++;
313   thread_stats.munmaped += size;
314 }
315 
316 // We can not use THREADLOCAL because it is not supported on some of the
317 // platforms we care about (OSX 10.6, Android).
318 // static THREADLOCAL AllocatorCache cache;
GetAllocatorCache(AsanThreadLocalMallocStorage * ms)319 AllocatorCache *GetAllocatorCache(AsanThreadLocalMallocStorage *ms) {
320   CHECK(ms);
321   return &ms->allocator_cache;
322 }
323 
GetQuarantineCache(AsanThreadLocalMallocStorage * ms)324 QuarantineCache *GetQuarantineCache(AsanThreadLocalMallocStorage *ms) {
325   CHECK(ms);
326   CHECK_LE(sizeof(QuarantineCache), sizeof(ms->quarantine_cache));
327   return reinterpret_cast<QuarantineCache *>(ms->quarantine_cache);
328 }
329 
SetFrom(const Flags * f,const CommonFlags * cf)330 void AllocatorOptions::SetFrom(const Flags *f, const CommonFlags *cf) {
331   quarantine_size_mb = f->quarantine_size_mb;
332   thread_local_quarantine_size_kb = f->thread_local_quarantine_size_kb;
333   min_redzone = f->redzone;
334   max_redzone = f->max_redzone;
335   may_return_null = cf->allocator_may_return_null;
336   alloc_dealloc_mismatch = f->alloc_dealloc_mismatch;
337   release_to_os_interval_ms = cf->allocator_release_to_os_interval_ms;
338 }
339 
CopyTo(Flags * f,CommonFlags * cf)340 void AllocatorOptions::CopyTo(Flags *f, CommonFlags *cf) {
341   f->quarantine_size_mb = quarantine_size_mb;
342   f->thread_local_quarantine_size_kb = thread_local_quarantine_size_kb;
343   f->redzone = min_redzone;
344   f->max_redzone = max_redzone;
345   cf->allocator_may_return_null = may_return_null;
346   f->alloc_dealloc_mismatch = alloc_dealloc_mismatch;
347   cf->allocator_release_to_os_interval_ms = release_to_os_interval_ms;
348 }
349 
350 struct Allocator {
351   static const uptr kMaxAllowedMallocSize =
352       FIRST_32_SECOND_64(3UL << 30, 1ULL << 40);
353 
354   AsanAllocator allocator;
355   AsanQuarantine quarantine;
356   StaticSpinMutex fallback_mutex;
357   AllocatorCache fallback_allocator_cache;
358   QuarantineCache fallback_quarantine_cache;
359 
360   uptr max_user_defined_malloc_size;
361 
362   // ------------------- Options --------------------------
363   atomic_uint16_t min_redzone;
364   atomic_uint16_t max_redzone;
365   atomic_uint8_t alloc_dealloc_mismatch;
366 
367   // ------------------- Initialization ------------------------
Allocator__asan::Allocator368   explicit Allocator(LinkerInitialized)
369       : quarantine(LINKER_INITIALIZED),
370         fallback_quarantine_cache(LINKER_INITIALIZED) {}
371 
CheckOptions__asan::Allocator372   void CheckOptions(const AllocatorOptions &options) const {
373     CHECK_GE(options.min_redzone, 16);
374     CHECK_GE(options.max_redzone, options.min_redzone);
375     CHECK_LE(options.max_redzone, 2048);
376     CHECK(IsPowerOfTwo(options.min_redzone));
377     CHECK(IsPowerOfTwo(options.max_redzone));
378   }
379 
SharedInitCode__asan::Allocator380   void SharedInitCode(const AllocatorOptions &options) {
381     CheckOptions(options);
382     quarantine.Init((uptr)options.quarantine_size_mb << 20,
383                     (uptr)options.thread_local_quarantine_size_kb << 10);
384     atomic_store(&alloc_dealloc_mismatch, options.alloc_dealloc_mismatch,
385                  memory_order_release);
386     atomic_store(&min_redzone, options.min_redzone, memory_order_release);
387     atomic_store(&max_redzone, options.max_redzone, memory_order_release);
388   }
389 
InitLinkerInitialized__asan::Allocator390   void InitLinkerInitialized(const AllocatorOptions &options) {
391     SetAllocatorMayReturnNull(options.may_return_null);
392     allocator.InitLinkerInitialized(options.release_to_os_interval_ms);
393     SharedInitCode(options);
394     max_user_defined_malloc_size = common_flags()->max_allocation_size_mb
395                                        ? common_flags()->max_allocation_size_mb
396                                              << 20
397                                        : kMaxAllowedMallocSize;
398   }
399 
RePoisonChunk__asan::Allocator400   void RePoisonChunk(uptr chunk) {
401     // This could be a user-facing chunk (with redzones), or some internal
402     // housekeeping chunk, like TransferBatch. Start by assuming the former.
403     AsanChunk *ac = GetAsanChunk((void *)chunk);
404     uptr allocated_size = allocator.GetActuallyAllocatedSize((void *)chunk);
405     if (ac && atomic_load(&ac->chunk_state, memory_order_acquire) ==
406                   CHUNK_ALLOCATED) {
407       uptr beg = ac->Beg();
408       uptr end = ac->Beg() + ac->UsedSize();
409       uptr chunk_end = chunk + allocated_size;
410       if (chunk < beg && beg < end && end <= chunk_end) {
411         // Looks like a valid AsanChunk in use, poison redzones only.
412         PoisonShadow(chunk, beg - chunk, kAsanHeapLeftRedzoneMagic);
413         uptr end_aligned_down = RoundDownTo(end, ASAN_SHADOW_GRANULARITY);
414         FastPoisonShadowPartialRightRedzone(
415             end_aligned_down, end - end_aligned_down,
416             chunk_end - end_aligned_down, kAsanHeapLeftRedzoneMagic);
417         return;
418       }
419     }
420 
421     // This is either not an AsanChunk or freed or quarantined AsanChunk.
422     // In either case, poison everything.
423     PoisonShadow(chunk, allocated_size, kAsanHeapLeftRedzoneMagic);
424   }
425 
ReInitialize__asan::Allocator426   void ReInitialize(const AllocatorOptions &options) {
427     SetAllocatorMayReturnNull(options.may_return_null);
428     allocator.SetReleaseToOSIntervalMs(options.release_to_os_interval_ms);
429     SharedInitCode(options);
430 
431     // Poison all existing allocation's redzones.
432     if (CanPoisonMemory()) {
433       allocator.ForceLock();
434       allocator.ForEachChunk(
435           [](uptr chunk, void *alloc) {
436             ((Allocator *)alloc)->RePoisonChunk(chunk);
437           },
438           this);
439       allocator.ForceUnlock();
440     }
441   }
442 
GetOptions__asan::Allocator443   void GetOptions(AllocatorOptions *options) const {
444     options->quarantine_size_mb = quarantine.GetMaxSize() >> 20;
445     options->thread_local_quarantine_size_kb =
446         quarantine.GetMaxCacheSize() >> 10;
447     options->min_redzone = atomic_load(&min_redzone, memory_order_acquire);
448     options->max_redzone = atomic_load(&max_redzone, memory_order_acquire);
449     options->may_return_null = AllocatorMayReturnNull();
450     options->alloc_dealloc_mismatch =
451         atomic_load(&alloc_dealloc_mismatch, memory_order_acquire);
452     options->release_to_os_interval_ms = allocator.ReleaseToOSIntervalMs();
453   }
454 
455   // -------------------- Helper methods. -------------------------
ComputeRZLog__asan::Allocator456   uptr ComputeRZLog(uptr user_requested_size) {
457     u32 rz_log = user_requested_size <= 64 - 16            ? 0
458                  : user_requested_size <= 128 - 32         ? 1
459                  : user_requested_size <= 512 - 64         ? 2
460                  : user_requested_size <= 4096 - 128       ? 3
461                  : user_requested_size <= (1 << 14) - 256  ? 4
462                  : user_requested_size <= (1 << 15) - 512  ? 5
463                  : user_requested_size <= (1 << 16) - 1024 ? 6
464                                                            : 7;
465     u32 hdr_log = RZSize2Log(RoundUpToPowerOfTwo(sizeof(ChunkHeader)));
466     u32 min_log = RZSize2Log(atomic_load(&min_redzone, memory_order_acquire));
467     u32 max_log = RZSize2Log(atomic_load(&max_redzone, memory_order_acquire));
468     return Min(Max(rz_log, Max(min_log, hdr_log)), Max(max_log, hdr_log));
469   }
470 
ComputeUserRequestedAlignmentLog__asan::Allocator471   static uptr ComputeUserRequestedAlignmentLog(uptr user_requested_alignment) {
472     if (user_requested_alignment < 8)
473       return 0;
474     if (user_requested_alignment > 512)
475       user_requested_alignment = 512;
476     return Log2(user_requested_alignment) - 2;
477   }
478 
ComputeUserAlignment__asan::Allocator479   static uptr ComputeUserAlignment(uptr user_requested_alignment_log) {
480     if (user_requested_alignment_log == 0)
481       return 0;
482     return 1LL << (user_requested_alignment_log + 2);
483   }
484 
485   // We have an address between two chunks, and we want to report just one.
ChooseChunk__asan::Allocator486   AsanChunk *ChooseChunk(uptr addr, AsanChunk *left_chunk,
487                          AsanChunk *right_chunk) {
488     if (!left_chunk)
489       return right_chunk;
490     if (!right_chunk)
491       return left_chunk;
492     // Prefer an allocated chunk over freed chunk and freed chunk
493     // over available chunk.
494     u8 left_state = atomic_load(&left_chunk->chunk_state, memory_order_relaxed);
495     u8 right_state =
496         atomic_load(&right_chunk->chunk_state, memory_order_relaxed);
497     if (left_state != right_state) {
498       if (left_state == CHUNK_ALLOCATED)
499         return left_chunk;
500       if (right_state == CHUNK_ALLOCATED)
501         return right_chunk;
502       if (left_state == CHUNK_QUARANTINE)
503         return left_chunk;
504       if (right_state == CHUNK_QUARANTINE)
505         return right_chunk;
506     }
507     // Same chunk_state: choose based on offset.
508     sptr l_offset = 0, r_offset = 0;
509     CHECK(AsanChunkView(left_chunk).AddrIsAtRight(addr, 1, &l_offset));
510     CHECK(AsanChunkView(right_chunk).AddrIsAtLeft(addr, 1, &r_offset));
511     if (l_offset < r_offset)
512       return left_chunk;
513     return right_chunk;
514   }
515 
UpdateAllocationStack__asan::Allocator516   bool UpdateAllocationStack(uptr addr, BufferedStackTrace *stack) {
517     AsanChunk *m = GetAsanChunkByAddr(addr);
518     if (!m) return false;
519     if (atomic_load(&m->chunk_state, memory_order_acquire) != CHUNK_ALLOCATED)
520       return false;
521     if (m->Beg() != addr) return false;
522     AsanThread *t = GetCurrentThread();
523     m->SetAllocContext(t ? t->tid() : kMainTid, StackDepotPut(*stack));
524     return true;
525   }
526 
527   // -------------------- Allocation/Deallocation routines ---------------
Allocate__asan::Allocator528   void *Allocate(uptr size, uptr alignment, BufferedStackTrace *stack,
529                  AllocType alloc_type, bool can_fill) {
530     if (UNLIKELY(!AsanInited()))
531       AsanInitFromRtl();
532     if (UNLIKELY(IsRssLimitExceeded())) {
533       if (AllocatorMayReturnNull())
534         return nullptr;
535       ReportRssLimitExceeded(stack);
536     }
537     Flags &fl = *flags();
538     CHECK(stack);
539     const uptr min_alignment = ASAN_SHADOW_GRANULARITY;
540     const uptr user_requested_alignment_log =
541         ComputeUserRequestedAlignmentLog(alignment);
542     if (alignment < min_alignment)
543       alignment = min_alignment;
544     if (size == 0) {
545       // We'd be happy to avoid allocating memory for zero-size requests, but
546       // some programs/tests depend on this behavior and assume that malloc
547       // would not return NULL even for zero-size allocations. Moreover, it
548       // looks like operator new should never return NULL, and results of
549       // consecutive "new" calls must be different even if the allocated size
550       // is zero.
551       size = 1;
552     }
553     CHECK(IsPowerOfTwo(alignment));
554     uptr rz_log = ComputeRZLog(size);
555     uptr rz_size = RZLog2Size(rz_log);
556     uptr rounded_size = RoundUpTo(Max(size, kChunkHeader2Size), alignment);
557     uptr needed_size = rounded_size + rz_size;
558     if (alignment > min_alignment)
559       needed_size += alignment;
560     bool from_primary = PrimaryAllocator::CanAllocate(needed_size, alignment);
561     // If we are allocating from the secondary allocator, there will be no
562     // automatic right redzone, so add the right redzone manually.
563     if (!from_primary)
564       needed_size += rz_size;
565     CHECK(IsAligned(needed_size, min_alignment));
566     if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize ||
567         size > max_user_defined_malloc_size) {
568       if (AllocatorMayReturnNull()) {
569         Report("WARNING: AddressSanitizer failed to allocate 0x%zx bytes\n",
570                size);
571         return nullptr;
572       }
573       uptr malloc_limit =
574           Min(kMaxAllowedMallocSize, max_user_defined_malloc_size);
575       ReportAllocationSizeTooBig(size, needed_size, malloc_limit, stack);
576     }
577 
578     AsanThread *t = GetCurrentThread();
579     void *allocated;
580     if (t) {
581       AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage());
582       allocated = allocator.Allocate(cache, needed_size, 8);
583     } else {
584       SpinMutexLock l(&fallback_mutex);
585       AllocatorCache *cache = &fallback_allocator_cache;
586       allocated = allocator.Allocate(cache, needed_size, 8);
587     }
588     if (UNLIKELY(!allocated)) {
589       SetAllocatorOutOfMemory();
590       if (AllocatorMayReturnNull())
591         return nullptr;
592       ReportOutOfMemory(size, stack);
593     }
594 
595     uptr alloc_beg = reinterpret_cast<uptr>(allocated);
596     uptr alloc_end = alloc_beg + needed_size;
597     uptr user_beg = alloc_beg + rz_size;
598     if (!IsAligned(user_beg, alignment))
599       user_beg = RoundUpTo(user_beg, alignment);
600     uptr user_end = user_beg + size;
601     CHECK_LE(user_end, alloc_end);
602     uptr chunk_beg = user_beg - kChunkHeaderSize;
603     AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
604     m->alloc_type = alloc_type;
605     CHECK(size);
606     m->SetUsedSize(size);
607     m->user_requested_alignment_log = user_requested_alignment_log;
608 
609     m->SetAllocContext(t ? t->tid() : kMainTid, StackDepotPut(*stack));
610 
611     if (!from_primary || *(u8 *)MEM_TO_SHADOW((uptr)allocated) == 0) {
612       // The allocator provides an unpoisoned chunk. This is possible for the
613       // secondary allocator, or if CanPoisonMemory() was false for some time,
614       // for example, due to flags()->start_disabled. Anyway, poison left and
615       // right of the block before using it for anything else.
616       uptr tail_beg = RoundUpTo(user_end, ASAN_SHADOW_GRANULARITY);
617       uptr tail_end = alloc_beg + allocator.GetActuallyAllocatedSize(allocated);
618       PoisonShadow(alloc_beg, user_beg - alloc_beg, kAsanHeapLeftRedzoneMagic);
619       PoisonShadow(tail_beg, tail_end - tail_beg, kAsanHeapLeftRedzoneMagic);
620     }
621 
622     uptr size_rounded_down_to_granularity =
623         RoundDownTo(size, ASAN_SHADOW_GRANULARITY);
624     // Unpoison the bulk of the memory region.
625     if (size_rounded_down_to_granularity)
626       PoisonShadow(user_beg, size_rounded_down_to_granularity, 0);
627     // Deal with the end of the region if size is not aligned to granularity.
628     if (size != size_rounded_down_to_granularity && CanPoisonMemory()) {
629       u8 *shadow =
630           (u8 *)MemToShadow(user_beg + size_rounded_down_to_granularity);
631       *shadow = fl.poison_partial ? (size & (ASAN_SHADOW_GRANULARITY - 1)) : 0;
632     }
633 
634     AsanStats &thread_stats = GetCurrentThreadStats();
635     thread_stats.mallocs++;
636     thread_stats.malloced += size;
637     thread_stats.malloced_redzones += needed_size - size;
638     if (needed_size > SizeClassMap::kMaxSize)
639       thread_stats.malloc_large++;
640     else
641       thread_stats.malloced_by_size[SizeClassMap::ClassID(needed_size)]++;
642 
643     void *res = reinterpret_cast<void *>(user_beg);
644     if (can_fill && fl.max_malloc_fill_size) {
645       uptr fill_size = Min(size, (uptr)fl.max_malloc_fill_size);
646       REAL(memset)(res, fl.malloc_fill_byte, fill_size);
647     }
648 #if CAN_SANITIZE_LEAKS
649     m->lsan_tag = __lsan::DisabledInThisThread() ? __lsan::kIgnored
650                                                  : __lsan::kDirectlyLeaked;
651 #endif
652     // Must be the last mutation of metadata in this function.
653     atomic_store(&m->chunk_state, CHUNK_ALLOCATED, memory_order_release);
654     if (alloc_beg != chunk_beg) {
655       CHECK_LE(alloc_beg + sizeof(LargeChunkHeader), chunk_beg);
656       reinterpret_cast<LargeChunkHeader *>(alloc_beg)->Set(m);
657     }
658     RunMallocHooks(res, size);
659     return res;
660   }
661 
662   // Set quarantine flag if chunk is allocated, issue ASan error report on
663   // available and quarantined chunks. Return true on success, false otherwise.
AtomicallySetQuarantineFlagIfAllocated__asan::Allocator664   bool AtomicallySetQuarantineFlagIfAllocated(AsanChunk *m, void *ptr,
665                                               BufferedStackTrace *stack) {
666     u8 old_chunk_state = CHUNK_ALLOCATED;
667     // Flip the chunk_state atomically to avoid race on double-free.
668     if (!atomic_compare_exchange_strong(&m->chunk_state, &old_chunk_state,
669                                         CHUNK_QUARANTINE,
670                                         memory_order_acquire)) {
671       ReportInvalidFree(ptr, old_chunk_state, stack);
672       // It's not safe to push a chunk in quarantine on invalid free.
673       return false;
674     }
675     CHECK_EQ(CHUNK_ALLOCATED, old_chunk_state);
676     // It was a user data.
677     m->SetFreeContext(kInvalidTid, 0);
678     return true;
679   }
680 
681   // Expects the chunk to already be marked as quarantined by using
682   // AtomicallySetQuarantineFlagIfAllocated.
QuarantineChunk__asan::Allocator683   void QuarantineChunk(AsanChunk *m, void *ptr, BufferedStackTrace *stack) {
684     CHECK_EQ(atomic_load(&m->chunk_state, memory_order_relaxed),
685              CHUNK_QUARANTINE);
686     AsanThread *t = GetCurrentThread();
687     m->SetFreeContext(t ? t->tid() : 0, StackDepotPut(*stack));
688 
689     // Push into quarantine.
690     if (t) {
691       AsanThreadLocalMallocStorage *ms = &t->malloc_storage();
692       AllocatorCache *ac = GetAllocatorCache(ms);
693       quarantine.Put(GetQuarantineCache(ms), QuarantineCallback(ac, stack), m,
694                      m->UsedSize());
695     } else {
696       SpinMutexLock l(&fallback_mutex);
697       AllocatorCache *ac = &fallback_allocator_cache;
698       quarantine.Put(&fallback_quarantine_cache, QuarantineCallback(ac, stack),
699                      m, m->UsedSize());
700     }
701   }
702 
Deallocate__asan::Allocator703   void Deallocate(void *ptr, uptr delete_size, uptr delete_alignment,
704                   BufferedStackTrace *stack, AllocType alloc_type) {
705     uptr p = reinterpret_cast<uptr>(ptr);
706     if (p == 0) return;
707 
708     uptr chunk_beg = p - kChunkHeaderSize;
709     AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
710 
711     // On Windows, uninstrumented DLLs may allocate memory before ASan hooks
712     // malloc. Don't report an invalid free in this case.
713     if (SANITIZER_WINDOWS &&
714         !get_allocator().PointerIsMine(ptr)) {
715       if (!IsSystemHeapAddress(p))
716         ReportFreeNotMalloced(p, stack);
717       return;
718     }
719 
720     if (RunFreeHooks(ptr)) {
721       // Someone used __sanitizer_ignore_free_hook() and decided that they
722       // didn't want the memory to __sanitizer_ignore_free_hook freed right now.
723       // When they call free() on this pointer again at a later time, we should
724       // ignore the alloc-type mismatch and allow them to deallocate the pointer
725       // through free(), rather than the initial alloc type.
726       m->alloc_type = FROM_MALLOC;
727       return;
728     }
729 
730     // Must mark the chunk as quarantined before any changes to its metadata.
731     // Do not quarantine given chunk if we failed to set CHUNK_QUARANTINE flag.
732     if (!AtomicallySetQuarantineFlagIfAllocated(m, ptr, stack)) return;
733 
734     if (m->alloc_type != alloc_type) {
735       if (atomic_load(&alloc_dealloc_mismatch, memory_order_acquire)) {
736         ReportAllocTypeMismatch((uptr)ptr, stack, (AllocType)m->alloc_type,
737                                 (AllocType)alloc_type);
738       }
739     } else {
740       if (flags()->new_delete_type_mismatch &&
741           (alloc_type == FROM_NEW || alloc_type == FROM_NEW_BR) &&
742           ((delete_size && delete_size != m->UsedSize()) ||
743            ComputeUserRequestedAlignmentLog(delete_alignment) !=
744                m->user_requested_alignment_log)) {
745         ReportNewDeleteTypeMismatch(p, delete_size, delete_alignment, stack);
746       }
747     }
748 
749     AsanStats &thread_stats = GetCurrentThreadStats();
750     thread_stats.frees++;
751     thread_stats.freed += m->UsedSize();
752 
753     QuarantineChunk(m, ptr, stack);
754   }
755 
Reallocate__asan::Allocator756   void *Reallocate(void *old_ptr, uptr new_size, BufferedStackTrace *stack) {
757     CHECK(old_ptr && new_size);
758     uptr p = reinterpret_cast<uptr>(old_ptr);
759     uptr chunk_beg = p - kChunkHeaderSize;
760     AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
761 
762     AsanStats &thread_stats = GetCurrentThreadStats();
763     thread_stats.reallocs++;
764     thread_stats.realloced += new_size;
765 
766     void *new_ptr = Allocate(new_size, 8, stack, FROM_MALLOC, true);
767     if (new_ptr) {
768       u8 chunk_state = atomic_load(&m->chunk_state, memory_order_acquire);
769       if (chunk_state != CHUNK_ALLOCATED)
770         ReportInvalidFree(old_ptr, chunk_state, stack);
771       CHECK_NE(REAL(memcpy), nullptr);
772       uptr memcpy_size = Min(new_size, m->UsedSize());
773       // If realloc() races with free(), we may start copying freed memory.
774       // However, we will report racy double-free later anyway.
775       REAL(memcpy)(new_ptr, old_ptr, memcpy_size);
776       Deallocate(old_ptr, 0, 0, stack, FROM_MALLOC);
777     }
778     return new_ptr;
779   }
780 
Calloc__asan::Allocator781   void *Calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) {
782     if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
783       if (AllocatorMayReturnNull())
784         return nullptr;
785       ReportCallocOverflow(nmemb, size, stack);
786     }
787     void *ptr = Allocate(nmemb * size, 8, stack, FROM_MALLOC, false);
788     // If the memory comes from the secondary allocator no need to clear it
789     // as it comes directly from mmap.
790     if (ptr && allocator.FromPrimary(ptr))
791       REAL(memset)(ptr, 0, nmemb * size);
792     return ptr;
793   }
794 
ReportInvalidFree__asan::Allocator795   void ReportInvalidFree(void *ptr, u8 chunk_state, BufferedStackTrace *stack) {
796     if (chunk_state == CHUNK_QUARANTINE)
797       ReportDoubleFree((uptr)ptr, stack);
798     else
799       ReportFreeNotMalloced((uptr)ptr, stack);
800   }
801 
CommitBack__asan::Allocator802   void CommitBack(AsanThreadLocalMallocStorage *ms, BufferedStackTrace *stack) {
803     AllocatorCache *ac = GetAllocatorCache(ms);
804     quarantine.Drain(GetQuarantineCache(ms), QuarantineCallback(ac, stack));
805     allocator.SwallowCache(ac);
806   }
807 
808   // -------------------------- Chunk lookup ----------------------
809 
810   // Assumes alloc_beg == allocator.GetBlockBegin(alloc_beg).
811   // Returns nullptr if AsanChunk is not yet initialized just after
812   // get_allocator().Allocate(), or is being destroyed just before
813   // get_allocator().Deallocate().
GetAsanChunk__asan::Allocator814   AsanChunk *GetAsanChunk(void *alloc_beg) {
815     if (!alloc_beg)
816       return nullptr;
817     AsanChunk *p = reinterpret_cast<LargeChunkHeader *>(alloc_beg)->Get();
818     if (!p) {
819       if (!allocator.FromPrimary(alloc_beg))
820         return nullptr;
821       p = reinterpret_cast<AsanChunk *>(alloc_beg);
822     }
823     u8 state = atomic_load(&p->chunk_state, memory_order_relaxed);
824     // It does not guaranty that Chunk is initialized, but it's
825     // definitely not for any other value.
826     if (state == CHUNK_ALLOCATED || state == CHUNK_QUARANTINE)
827       return p;
828     return nullptr;
829   }
830 
GetAsanChunkByAddr__asan::Allocator831   AsanChunk *GetAsanChunkByAddr(uptr p) {
832     void *alloc_beg = allocator.GetBlockBegin(reinterpret_cast<void *>(p));
833     return GetAsanChunk(alloc_beg);
834   }
835 
836   // Allocator must be locked when this function is called.
GetAsanChunkByAddrFastLocked__asan::Allocator837   AsanChunk *GetAsanChunkByAddrFastLocked(uptr p) {
838     void *alloc_beg =
839         allocator.GetBlockBeginFastLocked(reinterpret_cast<void *>(p));
840     return GetAsanChunk(alloc_beg);
841   }
842 
AllocationSize__asan::Allocator843   uptr AllocationSize(uptr p) {
844     AsanChunk *m = GetAsanChunkByAddr(p);
845     if (!m) return 0;
846     if (atomic_load(&m->chunk_state, memory_order_acquire) != CHUNK_ALLOCATED)
847       return 0;
848     if (m->Beg() != p) return 0;
849     return m->UsedSize();
850   }
851 
AllocationSizeFast__asan::Allocator852   uptr AllocationSizeFast(uptr p) {
853     return reinterpret_cast<AsanChunk *>(p - kChunkHeaderSize)->UsedSize();
854   }
855 
FindHeapChunkByAddress__asan::Allocator856   AsanChunkView FindHeapChunkByAddress(uptr addr) {
857     AsanChunk *m1 = GetAsanChunkByAddr(addr);
858     sptr offset = 0;
859     if (!m1 || AsanChunkView(m1).AddrIsAtLeft(addr, 1, &offset)) {
860       // The address is in the chunk's left redzone, so maybe it is actually
861       // a right buffer overflow from the other chunk before.
862       // Search a bit before to see if there is another chunk.
863       AsanChunk *m2 = nullptr;
864       for (uptr l = 1; l < GetPageSizeCached(); l++) {
865         m2 = GetAsanChunkByAddr(addr - l);
866         if (m2 == m1) continue;  // Still the same chunk.
867         break;
868       }
869       if (m2 && AsanChunkView(m2).AddrIsAtRight(addr, 1, &offset))
870         m1 = ChooseChunk(addr, m2, m1);
871     }
872     return AsanChunkView(m1);
873   }
874 
Purge__asan::Allocator875   void Purge(BufferedStackTrace *stack) {
876     AsanThread *t = GetCurrentThread();
877     if (t) {
878       AsanThreadLocalMallocStorage *ms = &t->malloc_storage();
879       quarantine.DrainAndRecycle(GetQuarantineCache(ms),
880                                  QuarantineCallback(GetAllocatorCache(ms),
881                                                     stack));
882     }
883     {
884       SpinMutexLock l(&fallback_mutex);
885       quarantine.DrainAndRecycle(&fallback_quarantine_cache,
886                                  QuarantineCallback(&fallback_allocator_cache,
887                                                     stack));
888     }
889 
890     allocator.ForceReleaseToOS();
891   }
892 
PrintStats__asan::Allocator893   void PrintStats() {
894     allocator.PrintStats();
895     quarantine.PrintStats();
896   }
897 
ForceLock__asan::Allocator898   void ForceLock() SANITIZER_ACQUIRE(fallback_mutex) {
899     allocator.ForceLock();
900     fallback_mutex.Lock();
901   }
902 
ForceUnlock__asan::Allocator903   void ForceUnlock() SANITIZER_RELEASE(fallback_mutex) {
904     fallback_mutex.Unlock();
905     allocator.ForceUnlock();
906   }
907 };
908 
909 static Allocator instance(LINKER_INITIALIZED);
910 
get_allocator()911 static AsanAllocator &get_allocator() {
912   return instance.allocator;
913 }
914 
IsValid() const915 bool AsanChunkView::IsValid() const {
916   return chunk_ && atomic_load(&chunk_->chunk_state, memory_order_relaxed) !=
917                        CHUNK_INVALID;
918 }
IsAllocated() const919 bool AsanChunkView::IsAllocated() const {
920   return chunk_ && atomic_load(&chunk_->chunk_state, memory_order_relaxed) ==
921                        CHUNK_ALLOCATED;
922 }
IsQuarantined() const923 bool AsanChunkView::IsQuarantined() const {
924   return chunk_ && atomic_load(&chunk_->chunk_state, memory_order_relaxed) ==
925                        CHUNK_QUARANTINE;
926 }
Beg() const927 uptr AsanChunkView::Beg() const { return chunk_->Beg(); }
End() const928 uptr AsanChunkView::End() const { return Beg() + UsedSize(); }
UsedSize() const929 uptr AsanChunkView::UsedSize() const { return chunk_->UsedSize(); }
UserRequestedAlignment() const930 u32 AsanChunkView::UserRequestedAlignment() const {
931   return Allocator::ComputeUserAlignment(chunk_->user_requested_alignment_log);
932 }
933 
AllocTid() const934 uptr AsanChunkView::AllocTid() const {
935   u32 tid = 0;
936   u32 stack = 0;
937   chunk_->GetAllocContext(tid, stack);
938   return tid;
939 }
940 
FreeTid() const941 uptr AsanChunkView::FreeTid() const {
942   if (!IsQuarantined())
943     return kInvalidTid;
944   u32 tid = 0;
945   u32 stack = 0;
946   chunk_->GetFreeContext(tid, stack);
947   return tid;
948 }
949 
GetAllocType() const950 AllocType AsanChunkView::GetAllocType() const {
951   return (AllocType)chunk_->alloc_type;
952 }
953 
GetAllocStackId() const954 u32 AsanChunkView::GetAllocStackId() const {
955   u32 tid = 0;
956   u32 stack = 0;
957   chunk_->GetAllocContext(tid, stack);
958   return stack;
959 }
960 
GetFreeStackId() const961 u32 AsanChunkView::GetFreeStackId() const {
962   if (!IsQuarantined())
963     return 0;
964   u32 tid = 0;
965   u32 stack = 0;
966   chunk_->GetFreeContext(tid, stack);
967   return stack;
968 }
969 
InitializeAllocator(const AllocatorOptions & options)970 void InitializeAllocator(const AllocatorOptions &options) {
971   instance.InitLinkerInitialized(options);
972 }
973 
ReInitializeAllocator(const AllocatorOptions & options)974 void ReInitializeAllocator(const AllocatorOptions &options) {
975   instance.ReInitialize(options);
976 }
977 
GetAllocatorOptions(AllocatorOptions * options)978 void GetAllocatorOptions(AllocatorOptions *options) {
979   instance.GetOptions(options);
980 }
981 
FindHeapChunkByAddress(uptr addr)982 AsanChunkView FindHeapChunkByAddress(uptr addr) {
983   return instance.FindHeapChunkByAddress(addr);
984 }
FindHeapChunkByAllocBeg(uptr addr)985 AsanChunkView FindHeapChunkByAllocBeg(uptr addr) {
986   return AsanChunkView(instance.GetAsanChunk(reinterpret_cast<void*>(addr)));
987 }
988 
CommitBack()989 void AsanThreadLocalMallocStorage::CommitBack() {
990   GET_STACK_TRACE_MALLOC;
991   instance.CommitBack(this, &stack);
992 }
993 
PrintInternalAllocatorStats()994 void PrintInternalAllocatorStats() {
995   instance.PrintStats();
996 }
997 
asan_free(void * ptr,BufferedStackTrace * stack,AllocType alloc_type)998 void asan_free(void *ptr, BufferedStackTrace *stack, AllocType alloc_type) {
999   instance.Deallocate(ptr, 0, 0, stack, alloc_type);
1000 }
1001 
asan_delete(void * ptr,uptr size,uptr alignment,BufferedStackTrace * stack,AllocType alloc_type)1002 void asan_delete(void *ptr, uptr size, uptr alignment,
1003                  BufferedStackTrace *stack, AllocType alloc_type) {
1004   instance.Deallocate(ptr, size, alignment, stack, alloc_type);
1005 }
1006 
asan_malloc(uptr size,BufferedStackTrace * stack)1007 void *asan_malloc(uptr size, BufferedStackTrace *stack) {
1008   return SetErrnoOnNull(instance.Allocate(size, 8, stack, FROM_MALLOC, true));
1009 }
1010 
asan_calloc(uptr nmemb,uptr size,BufferedStackTrace * stack)1011 void *asan_calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) {
1012   return SetErrnoOnNull(instance.Calloc(nmemb, size, stack));
1013 }
1014 
asan_reallocarray(void * p,uptr nmemb,uptr size,BufferedStackTrace * stack)1015 void *asan_reallocarray(void *p, uptr nmemb, uptr size,
1016                         BufferedStackTrace *stack) {
1017   if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
1018     errno = errno_ENOMEM;
1019     if (AllocatorMayReturnNull())
1020       return nullptr;
1021     ReportReallocArrayOverflow(nmemb, size, stack);
1022   }
1023   return asan_realloc(p, nmemb * size, stack);
1024 }
1025 
asan_realloc(void * p,uptr size,BufferedStackTrace * stack)1026 void *asan_realloc(void *p, uptr size, BufferedStackTrace *stack) {
1027   if (!p)
1028     return SetErrnoOnNull(instance.Allocate(size, 8, stack, FROM_MALLOC, true));
1029   if (size == 0) {
1030     if (flags()->allocator_frees_and_returns_null_on_realloc_zero) {
1031       instance.Deallocate(p, 0, 0, stack, FROM_MALLOC);
1032       return nullptr;
1033     }
1034     // Allocate a size of 1 if we shouldn't free() on Realloc to 0
1035     size = 1;
1036   }
1037   return SetErrnoOnNull(instance.Reallocate(p, size, stack));
1038 }
1039 
asan_valloc(uptr size,BufferedStackTrace * stack)1040 void *asan_valloc(uptr size, BufferedStackTrace *stack) {
1041   return SetErrnoOnNull(
1042       instance.Allocate(size, GetPageSizeCached(), stack, FROM_MALLOC, true));
1043 }
1044 
asan_pvalloc(uptr size,BufferedStackTrace * stack)1045 void *asan_pvalloc(uptr size, BufferedStackTrace *stack) {
1046   uptr PageSize = GetPageSizeCached();
1047   if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) {
1048     errno = errno_ENOMEM;
1049     if (AllocatorMayReturnNull())
1050       return nullptr;
1051     ReportPvallocOverflow(size, stack);
1052   }
1053   // pvalloc(0) should allocate one page.
1054   size = size ? RoundUpTo(size, PageSize) : PageSize;
1055   return SetErrnoOnNull(
1056       instance.Allocate(size, PageSize, stack, FROM_MALLOC, true));
1057 }
1058 
asan_memalign(uptr alignment,uptr size,BufferedStackTrace * stack,AllocType alloc_type)1059 void *asan_memalign(uptr alignment, uptr size, BufferedStackTrace *stack,
1060                     AllocType alloc_type) {
1061   if (UNLIKELY(!IsPowerOfTwo(alignment))) {
1062     errno = errno_EINVAL;
1063     if (AllocatorMayReturnNull())
1064       return nullptr;
1065     ReportInvalidAllocationAlignment(alignment, stack);
1066   }
1067   return SetErrnoOnNull(
1068       instance.Allocate(size, alignment, stack, alloc_type, true));
1069 }
1070 
asan_aligned_alloc(uptr alignment,uptr size,BufferedStackTrace * stack)1071 void *asan_aligned_alloc(uptr alignment, uptr size, BufferedStackTrace *stack) {
1072   if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) {
1073     errno = errno_EINVAL;
1074     if (AllocatorMayReturnNull())
1075       return nullptr;
1076     ReportInvalidAlignedAllocAlignment(size, alignment, stack);
1077   }
1078   return SetErrnoOnNull(
1079       instance.Allocate(size, alignment, stack, FROM_MALLOC, true));
1080 }
1081 
asan_posix_memalign(void ** memptr,uptr alignment,uptr size,BufferedStackTrace * stack)1082 int asan_posix_memalign(void **memptr, uptr alignment, uptr size,
1083                         BufferedStackTrace *stack) {
1084   if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) {
1085     if (AllocatorMayReturnNull())
1086       return errno_EINVAL;
1087     ReportInvalidPosixMemalignAlignment(alignment, stack);
1088   }
1089   void *ptr = instance.Allocate(size, alignment, stack, FROM_MALLOC, true);
1090   if (UNLIKELY(!ptr))
1091     // OOM error is already taken care of by Allocate.
1092     return errno_ENOMEM;
1093   CHECK(IsAligned((uptr)ptr, alignment));
1094   *memptr = ptr;
1095   return 0;
1096 }
1097 
asan_malloc_usable_size(const void * ptr,uptr pc,uptr bp)1098 uptr asan_malloc_usable_size(const void *ptr, uptr pc, uptr bp) {
1099   if (!ptr) return 0;
1100   uptr usable_size = instance.AllocationSize(reinterpret_cast<uptr>(ptr));
1101   if (flags()->check_malloc_usable_size && (usable_size == 0)) {
1102     GET_STACK_TRACE_FATAL(pc, bp);
1103     ReportMallocUsableSizeNotOwned((uptr)ptr, &stack);
1104   }
1105   return usable_size;
1106 }
1107 
asan_mz_size(const void * ptr)1108 uptr asan_mz_size(const void *ptr) {
1109   return instance.AllocationSize(reinterpret_cast<uptr>(ptr));
1110 }
1111 
asan_mz_force_lock()1112 void asan_mz_force_lock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
1113   instance.ForceLock();
1114 }
1115 
asan_mz_force_unlock()1116 void asan_mz_force_unlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
1117   instance.ForceUnlock();
1118 }
1119 
1120 }  // namespace __asan
1121 
1122 // --- Implementation of LSan-specific functions --- {{{1
1123 namespace __lsan {
LockAllocator()1124 void LockAllocator() {
1125   __asan::get_allocator().ForceLock();
1126 }
1127 
UnlockAllocator()1128 void UnlockAllocator() {
1129   __asan::get_allocator().ForceUnlock();
1130 }
1131 
GetAllocatorGlobalRange(uptr * begin,uptr * end)1132 void GetAllocatorGlobalRange(uptr *begin, uptr *end) {
1133   *begin = (uptr)&__asan::get_allocator();
1134   *end = *begin + sizeof(__asan::get_allocator());
1135 }
1136 
PointsIntoChunk(void * p)1137 uptr PointsIntoChunk(void *p) {
1138   uptr addr = reinterpret_cast<uptr>(p);
1139   __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddrFastLocked(addr);
1140   if (!m || atomic_load(&m->chunk_state, memory_order_acquire) !=
1141                 __asan::CHUNK_ALLOCATED)
1142     return 0;
1143   uptr chunk = m->Beg();
1144   if (m->AddrIsInside(addr))
1145     return chunk;
1146   if (IsSpecialCaseOfOperatorNew0(chunk, m->UsedSize(), addr))
1147     return chunk;
1148   return 0;
1149 }
1150 
GetUserBegin(uptr chunk)1151 uptr GetUserBegin(uptr chunk) {
1152   // FIXME: All usecases provide chunk address, GetAsanChunkByAddrFastLocked is
1153   // not needed.
1154   __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddrFastLocked(chunk);
1155   return m ? m->Beg() : 0;
1156 }
1157 
GetUserAddr(uptr chunk)1158 uptr GetUserAddr(uptr chunk) {
1159   return chunk;
1160 }
1161 
LsanMetadata(uptr chunk)1162 LsanMetadata::LsanMetadata(uptr chunk) {
1163   metadata_ = chunk ? reinterpret_cast<void *>(chunk - __asan::kChunkHeaderSize)
1164                     : nullptr;
1165 }
1166 
allocated() const1167 bool LsanMetadata::allocated() const {
1168   if (!metadata_)
1169     return false;
1170   __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
1171   return atomic_load(&m->chunk_state, memory_order_relaxed) ==
1172          __asan::CHUNK_ALLOCATED;
1173 }
1174 
tag() const1175 ChunkTag LsanMetadata::tag() const {
1176   __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
1177   return static_cast<ChunkTag>(m->lsan_tag);
1178 }
1179 
set_tag(ChunkTag value)1180 void LsanMetadata::set_tag(ChunkTag value) {
1181   __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
1182   m->lsan_tag = value;
1183 }
1184 
requested_size() const1185 uptr LsanMetadata::requested_size() const {
1186   __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
1187   return m->UsedSize();
1188 }
1189 
stack_trace_id() const1190 u32 LsanMetadata::stack_trace_id() const {
1191   __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
1192   u32 tid = 0;
1193   u32 stack = 0;
1194   m->GetAllocContext(tid, stack);
1195   return stack;
1196 }
1197 
ForEachChunk(ForEachChunkCallback callback,void * arg)1198 void ForEachChunk(ForEachChunkCallback callback, void *arg) {
1199   __asan::get_allocator().ForEachChunk(callback, arg);
1200 }
1201 
IgnoreObject(const void * p)1202 IgnoreObjectResult IgnoreObject(const void *p) {
1203   uptr addr = reinterpret_cast<uptr>(p);
1204   __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddr(addr);
1205   if (!m ||
1206       (atomic_load(&m->chunk_state, memory_order_acquire) !=
1207        __asan::CHUNK_ALLOCATED) ||
1208       !m->AddrIsInside(addr)) {
1209     return kIgnoreObjectInvalid;
1210   }
1211   if (m->lsan_tag == kIgnored)
1212     return kIgnoreObjectAlreadyIgnored;
1213   m->lsan_tag = __lsan::kIgnored;
1214   return kIgnoreObjectSuccess;
1215 }
1216 
1217 }  // namespace __lsan
1218 
1219 // ---------------------- Interface ---------------- {{{1
1220 using namespace __asan;
1221 
AllocationBegin(const void * p)1222 static const void *AllocationBegin(const void *p) {
1223   AsanChunk *m = __asan::instance.GetAsanChunkByAddr((uptr)p);
1224   if (!m)
1225     return nullptr;
1226   if (atomic_load(&m->chunk_state, memory_order_acquire) != CHUNK_ALLOCATED)
1227     return nullptr;
1228   if (m->UsedSize() == 0)
1229     return nullptr;
1230   return (const void *)(m->Beg());
1231 }
1232 
1233 // ASan allocator doesn't reserve extra bytes, so normally we would
1234 // just return "size". We don't want to expose our redzone sizes, etc here.
__sanitizer_get_estimated_allocated_size(uptr size)1235 uptr __sanitizer_get_estimated_allocated_size(uptr size) {
1236   return size;
1237 }
1238 
__sanitizer_get_ownership(const void * p)1239 int __sanitizer_get_ownership(const void *p) {
1240   uptr ptr = reinterpret_cast<uptr>(p);
1241   return instance.AllocationSize(ptr) > 0;
1242 }
1243 
__sanitizer_get_allocated_size(const void * p)1244 uptr __sanitizer_get_allocated_size(const void *p) {
1245   if (!p) return 0;
1246   uptr ptr = reinterpret_cast<uptr>(p);
1247   uptr allocated_size = instance.AllocationSize(ptr);
1248   // Die if p is not malloced or if it is already freed.
1249   if (allocated_size == 0) {
1250     GET_STACK_TRACE_FATAL_HERE;
1251     ReportSanitizerGetAllocatedSizeNotOwned(ptr, &stack);
1252   }
1253   return allocated_size;
1254 }
1255 
__sanitizer_get_allocated_size_fast(const void * p)1256 uptr __sanitizer_get_allocated_size_fast(const void *p) {
1257   DCHECK_EQ(p, __sanitizer_get_allocated_begin(p));
1258   uptr ret = instance.AllocationSizeFast(reinterpret_cast<uptr>(p));
1259   DCHECK_EQ(ret, __sanitizer_get_allocated_size(p));
1260   return ret;
1261 }
1262 
__sanitizer_get_allocated_begin(const void * p)1263 const void *__sanitizer_get_allocated_begin(const void *p) {
1264   return AllocationBegin(p);
1265 }
1266 
__sanitizer_purge_allocator()1267 void __sanitizer_purge_allocator() {
1268   GET_STACK_TRACE_MALLOC;
1269   instance.Purge(&stack);
1270 }
1271 
__asan_update_allocation_context(void * addr)1272 int __asan_update_allocation_context(void* addr) {
1273   GET_STACK_TRACE_MALLOC;
1274   return instance.UpdateAllocationStack((uptr)addr, &stack);
1275 }
1276