1 //===-- asan_allocator.cpp ------------------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file is a part of AddressSanitizer, an address sanity checker. 10 // 11 // Implementation of ASan's memory allocator, 2-nd version. 12 // This variant uses the allocator from sanitizer_common, i.e. the one shared 13 // with ThreadSanitizer and MemorySanitizer. 14 // 15 //===----------------------------------------------------------------------===// 16 17 #include "asan_allocator.h" 18 19 #include "asan_mapping.h" 20 #include "asan_poisoning.h" 21 #include "asan_report.h" 22 #include "asan_stack.h" 23 #include "asan_thread.h" 24 #include "lsan/lsan_common.h" 25 #include "sanitizer_common/sanitizer_allocator_checks.h" 26 #include "sanitizer_common/sanitizer_allocator_interface.h" 27 #include "sanitizer_common/sanitizer_errno.h" 28 #include "sanitizer_common/sanitizer_flags.h" 29 #include "sanitizer_common/sanitizer_internal_defs.h" 30 #include "sanitizer_common/sanitizer_list.h" 31 #include "sanitizer_common/sanitizer_quarantine.h" 32 #include "sanitizer_common/sanitizer_stackdepot.h" 33 34 namespace __asan { 35 36 // Valid redzone sizes are 16, 32, 64, ... 2048, so we encode them in 3 bits. 37 // We use adaptive redzones: for larger allocation larger redzones are used. 38 static u32 RZLog2Size(u32 rz_log) { 39 CHECK_LT(rz_log, 8); 40 return 16 << rz_log; 41 } 42 43 static u32 RZSize2Log(u32 rz_size) { 44 CHECK_GE(rz_size, 16); 45 CHECK_LE(rz_size, 2048); 46 CHECK(IsPowerOfTwo(rz_size)); 47 u32 res = Log2(rz_size) - 4; 48 CHECK_EQ(rz_size, RZLog2Size(res)); 49 return res; 50 } 51 52 static AsanAllocator &get_allocator(); 53 54 static void AtomicContextStore(volatile atomic_uint64_t *atomic_context, 55 u32 tid, u32 stack) { 56 u64 context = tid; 57 context <<= 32; 58 context += stack; 59 atomic_store(atomic_context, context, memory_order_relaxed); 60 } 61 62 static void AtomicContextLoad(const volatile atomic_uint64_t *atomic_context, 63 u32 &tid, u32 &stack) { 64 u64 context = atomic_load(atomic_context, memory_order_relaxed); 65 stack = context; 66 context >>= 32; 67 tid = context; 68 } 69 70 // The memory chunk allocated from the underlying allocator looks like this: 71 // L L L L L L H H U U U U U U R R 72 // L -- left redzone words (0 or more bytes) 73 // H -- ChunkHeader (16 bytes), which is also a part of the left redzone. 74 // U -- user memory. 75 // R -- right redzone (0 or more bytes) 76 // ChunkBase consists of ChunkHeader and other bytes that overlap with user 77 // memory. 78 79 // If the left redzone is greater than the ChunkHeader size we store a magic 80 // value in the first uptr word of the memory block and store the address of 81 // ChunkBase in the next uptr. 82 // M B L L L L L L L L L H H U U U U U U 83 // | ^ 84 // ---------------------| 85 // M -- magic value kAllocBegMagic 86 // B -- address of ChunkHeader pointing to the first 'H' 87 88 class ChunkHeader { 89 public: 90 atomic_uint8_t chunk_state; 91 u8 alloc_type : 2; 92 u8 lsan_tag : 2; 93 94 // align < 8 -> 0 95 // else -> log2(min(align, 512)) - 2 96 u8 user_requested_alignment_log : 3; 97 98 private: 99 u16 user_requested_size_hi; 100 u32 user_requested_size_lo; 101 atomic_uint64_t alloc_context_id; 102 103 public: 104 uptr UsedSize() const { 105 static_assert(sizeof(user_requested_size_lo) == 4, 106 "Expression below requires this"); 107 return FIRST_32_SECOND_64(0, ((uptr)user_requested_size_hi << 32)) + 108 user_requested_size_lo; 109 } 110 111 void SetUsedSize(uptr size) { 112 user_requested_size_lo = size; 113 static_assert(sizeof(user_requested_size_lo) == 4, 114 "Expression below requires this"); 115 user_requested_size_hi = FIRST_32_SECOND_64(0, size >> 32); 116 CHECK_EQ(UsedSize(), size); 117 } 118 119 void SetAllocContext(u32 tid, u32 stack) { 120 AtomicContextStore(&alloc_context_id, tid, stack); 121 } 122 123 void GetAllocContext(u32 &tid, u32 &stack) const { 124 AtomicContextLoad(&alloc_context_id, tid, stack); 125 } 126 }; 127 128 class ChunkBase : public ChunkHeader { 129 atomic_uint64_t free_context_id; 130 131 public: 132 void SetFreeContext(u32 tid, u32 stack) { 133 AtomicContextStore(&free_context_id, tid, stack); 134 } 135 136 void GetFreeContext(u32 &tid, u32 &stack) const { 137 AtomicContextLoad(&free_context_id, tid, stack); 138 } 139 }; 140 141 static const uptr kChunkHeaderSize = sizeof(ChunkHeader); 142 static const uptr kChunkHeader2Size = sizeof(ChunkBase) - kChunkHeaderSize; 143 COMPILER_CHECK(kChunkHeaderSize == 16); 144 COMPILER_CHECK(kChunkHeader2Size <= 16); 145 146 enum { 147 // Either just allocated by underlying allocator, but AsanChunk is not yet 148 // ready, or almost returned to undelying allocator and AsanChunk is already 149 // meaningless. 150 CHUNK_INVALID = 0, 151 // The chunk is allocated and not yet freed. 152 CHUNK_ALLOCATED = 2, 153 // The chunk was freed and put into quarantine zone. 154 CHUNK_QUARANTINE = 3, 155 }; 156 157 class AsanChunk : public ChunkBase { 158 public: 159 uptr Beg() { return reinterpret_cast<uptr>(this) + kChunkHeaderSize; } 160 bool AddrIsInside(uptr addr) { 161 return (addr >= Beg()) && (addr < Beg() + UsedSize()); 162 } 163 }; 164 165 class LargeChunkHeader { 166 static constexpr uptr kAllocBegMagic = 167 FIRST_32_SECOND_64(0xCC6E96B9, 0xCC6E96B9CC6E96B9ULL); 168 atomic_uintptr_t magic; 169 AsanChunk *chunk_header; 170 171 public: 172 AsanChunk *Get() const { 173 return atomic_load(&magic, memory_order_acquire) == kAllocBegMagic 174 ? chunk_header 175 : nullptr; 176 } 177 178 void Set(AsanChunk *p) { 179 if (p) { 180 chunk_header = p; 181 atomic_store(&magic, kAllocBegMagic, memory_order_release); 182 return; 183 } 184 185 uptr old = kAllocBegMagic; 186 if (!atomic_compare_exchange_strong(&magic, &old, 0, 187 memory_order_release)) { 188 CHECK_EQ(old, kAllocBegMagic); 189 } 190 } 191 }; 192 193 struct QuarantineCallback { 194 QuarantineCallback(AllocatorCache *cache, BufferedStackTrace *stack) 195 : cache_(cache), 196 stack_(stack) { 197 } 198 199 void Recycle(AsanChunk *m) { 200 void *p = get_allocator().GetBlockBegin(m); 201 if (p != m) { 202 // Clear the magic value, as allocator internals may overwrite the 203 // contents of deallocated chunk, confusing GetAsanChunk lookup. 204 reinterpret_cast<LargeChunkHeader *>(p)->Set(nullptr); 205 } 206 207 u8 old_chunk_state = CHUNK_QUARANTINE; 208 if (!atomic_compare_exchange_strong(&m->chunk_state, &old_chunk_state, 209 CHUNK_INVALID, memory_order_acquire)) { 210 CHECK_EQ(old_chunk_state, CHUNK_QUARANTINE); 211 } 212 213 PoisonShadow(m->Beg(), RoundUpTo(m->UsedSize(), ASAN_SHADOW_GRANULARITY), 214 kAsanHeapLeftRedzoneMagic); 215 216 // Statistics. 217 AsanStats &thread_stats = GetCurrentThreadStats(); 218 thread_stats.real_frees++; 219 thread_stats.really_freed += m->UsedSize(); 220 221 get_allocator().Deallocate(cache_, p); 222 } 223 224 void *Allocate(uptr size) { 225 void *res = get_allocator().Allocate(cache_, size, 1); 226 // TODO(alekseys): Consider making quarantine OOM-friendly. 227 if (UNLIKELY(!res)) 228 ReportOutOfMemory(size, stack_); 229 return res; 230 } 231 232 void Deallocate(void *p) { 233 get_allocator().Deallocate(cache_, p); 234 } 235 236 private: 237 AllocatorCache* const cache_; 238 BufferedStackTrace* const stack_; 239 }; 240 241 typedef Quarantine<QuarantineCallback, AsanChunk> AsanQuarantine; 242 typedef AsanQuarantine::Cache QuarantineCache; 243 244 void AsanMapUnmapCallback::OnMap(uptr p, uptr size) const { 245 PoisonShadow(p, size, kAsanHeapLeftRedzoneMagic); 246 // Statistics. 247 AsanStats &thread_stats = GetCurrentThreadStats(); 248 thread_stats.mmaps++; 249 thread_stats.mmaped += size; 250 } 251 void AsanMapUnmapCallback::OnUnmap(uptr p, uptr size) const { 252 PoisonShadow(p, size, 0); 253 // We are about to unmap a chunk of user memory. 254 // Mark the corresponding shadow memory as not needed. 255 FlushUnneededASanShadowMemory(p, size); 256 // Statistics. 257 AsanStats &thread_stats = GetCurrentThreadStats(); 258 thread_stats.munmaps++; 259 thread_stats.munmaped += size; 260 } 261 262 // We can not use THREADLOCAL because it is not supported on some of the 263 // platforms we care about (OSX 10.6, Android). 264 // static THREADLOCAL AllocatorCache cache; 265 AllocatorCache *GetAllocatorCache(AsanThreadLocalMallocStorage *ms) { 266 CHECK(ms); 267 return &ms->allocator_cache; 268 } 269 270 QuarantineCache *GetQuarantineCache(AsanThreadLocalMallocStorage *ms) { 271 CHECK(ms); 272 CHECK_LE(sizeof(QuarantineCache), sizeof(ms->quarantine_cache)); 273 return reinterpret_cast<QuarantineCache *>(ms->quarantine_cache); 274 } 275 276 void AllocatorOptions::SetFrom(const Flags *f, const CommonFlags *cf) { 277 quarantine_size_mb = f->quarantine_size_mb; 278 thread_local_quarantine_size_kb = f->thread_local_quarantine_size_kb; 279 min_redzone = f->redzone; 280 max_redzone = f->max_redzone; 281 may_return_null = cf->allocator_may_return_null; 282 alloc_dealloc_mismatch = f->alloc_dealloc_mismatch; 283 release_to_os_interval_ms = cf->allocator_release_to_os_interval_ms; 284 } 285 286 void AllocatorOptions::CopyTo(Flags *f, CommonFlags *cf) { 287 f->quarantine_size_mb = quarantine_size_mb; 288 f->thread_local_quarantine_size_kb = thread_local_quarantine_size_kb; 289 f->redzone = min_redzone; 290 f->max_redzone = max_redzone; 291 cf->allocator_may_return_null = may_return_null; 292 f->alloc_dealloc_mismatch = alloc_dealloc_mismatch; 293 cf->allocator_release_to_os_interval_ms = release_to_os_interval_ms; 294 } 295 296 struct Allocator { 297 static const uptr kMaxAllowedMallocSize = 298 FIRST_32_SECOND_64(3UL << 30, 1ULL << 40); 299 300 AsanAllocator allocator; 301 AsanQuarantine quarantine; 302 StaticSpinMutex fallback_mutex; 303 AllocatorCache fallback_allocator_cache; 304 QuarantineCache fallback_quarantine_cache; 305 306 uptr max_user_defined_malloc_size; 307 308 // ------------------- Options -------------------------- 309 atomic_uint16_t min_redzone; 310 atomic_uint16_t max_redzone; 311 atomic_uint8_t alloc_dealloc_mismatch; 312 313 // ------------------- Initialization ------------------------ 314 explicit Allocator(LinkerInitialized) 315 : quarantine(LINKER_INITIALIZED), 316 fallback_quarantine_cache(LINKER_INITIALIZED) {} 317 318 void CheckOptions(const AllocatorOptions &options) const { 319 CHECK_GE(options.min_redzone, 16); 320 CHECK_GE(options.max_redzone, options.min_redzone); 321 CHECK_LE(options.max_redzone, 2048); 322 CHECK(IsPowerOfTwo(options.min_redzone)); 323 CHECK(IsPowerOfTwo(options.max_redzone)); 324 } 325 326 void SharedInitCode(const AllocatorOptions &options) { 327 CheckOptions(options); 328 quarantine.Init((uptr)options.quarantine_size_mb << 20, 329 (uptr)options.thread_local_quarantine_size_kb << 10); 330 atomic_store(&alloc_dealloc_mismatch, options.alloc_dealloc_mismatch, 331 memory_order_release); 332 atomic_store(&min_redzone, options.min_redzone, memory_order_release); 333 atomic_store(&max_redzone, options.max_redzone, memory_order_release); 334 } 335 336 void InitLinkerInitialized(const AllocatorOptions &options) { 337 SetAllocatorMayReturnNull(options.may_return_null); 338 allocator.InitLinkerInitialized(options.release_to_os_interval_ms); 339 SharedInitCode(options); 340 max_user_defined_malloc_size = common_flags()->max_allocation_size_mb 341 ? common_flags()->max_allocation_size_mb 342 << 20 343 : kMaxAllowedMallocSize; 344 } 345 346 void RePoisonChunk(uptr chunk) { 347 // This could be a user-facing chunk (with redzones), or some internal 348 // housekeeping chunk, like TransferBatch. Start by assuming the former. 349 AsanChunk *ac = GetAsanChunk((void *)chunk); 350 uptr allocated_size = allocator.GetActuallyAllocatedSize((void *)chunk); 351 if (ac && atomic_load(&ac->chunk_state, memory_order_acquire) == 352 CHUNK_ALLOCATED) { 353 uptr beg = ac->Beg(); 354 uptr end = ac->Beg() + ac->UsedSize(); 355 uptr chunk_end = chunk + allocated_size; 356 if (chunk < beg && beg < end && end <= chunk_end) { 357 // Looks like a valid AsanChunk in use, poison redzones only. 358 PoisonShadow(chunk, beg - chunk, kAsanHeapLeftRedzoneMagic); 359 uptr end_aligned_down = RoundDownTo(end, ASAN_SHADOW_GRANULARITY); 360 FastPoisonShadowPartialRightRedzone( 361 end_aligned_down, end - end_aligned_down, 362 chunk_end - end_aligned_down, kAsanHeapLeftRedzoneMagic); 363 return; 364 } 365 } 366 367 // This is either not an AsanChunk or freed or quarantined AsanChunk. 368 // In either case, poison everything. 369 PoisonShadow(chunk, allocated_size, kAsanHeapLeftRedzoneMagic); 370 } 371 372 void ReInitialize(const AllocatorOptions &options) { 373 SetAllocatorMayReturnNull(options.may_return_null); 374 allocator.SetReleaseToOSIntervalMs(options.release_to_os_interval_ms); 375 SharedInitCode(options); 376 377 // Poison all existing allocation's redzones. 378 if (CanPoisonMemory()) { 379 allocator.ForceLock(); 380 allocator.ForEachChunk( 381 [](uptr chunk, void *alloc) { 382 ((Allocator *)alloc)->RePoisonChunk(chunk); 383 }, 384 this); 385 allocator.ForceUnlock(); 386 } 387 } 388 389 void GetOptions(AllocatorOptions *options) const { 390 options->quarantine_size_mb = quarantine.GetSize() >> 20; 391 options->thread_local_quarantine_size_kb = quarantine.GetCacheSize() >> 10; 392 options->min_redzone = atomic_load(&min_redzone, memory_order_acquire); 393 options->max_redzone = atomic_load(&max_redzone, memory_order_acquire); 394 options->may_return_null = AllocatorMayReturnNull(); 395 options->alloc_dealloc_mismatch = 396 atomic_load(&alloc_dealloc_mismatch, memory_order_acquire); 397 options->release_to_os_interval_ms = allocator.ReleaseToOSIntervalMs(); 398 } 399 400 // -------------------- Helper methods. ------------------------- 401 uptr ComputeRZLog(uptr user_requested_size) { 402 u32 rz_log = user_requested_size <= 64 - 16 ? 0 403 : user_requested_size <= 128 - 32 ? 1 404 : user_requested_size <= 512 - 64 ? 2 405 : user_requested_size <= 4096 - 128 ? 3 406 : user_requested_size <= (1 << 14) - 256 ? 4 407 : user_requested_size <= (1 << 15) - 512 ? 5 408 : user_requested_size <= (1 << 16) - 1024 ? 6 409 : 7; 410 u32 hdr_log = RZSize2Log(RoundUpToPowerOfTwo(sizeof(ChunkHeader))); 411 u32 min_log = RZSize2Log(atomic_load(&min_redzone, memory_order_acquire)); 412 u32 max_log = RZSize2Log(atomic_load(&max_redzone, memory_order_acquire)); 413 return Min(Max(rz_log, Max(min_log, hdr_log)), Max(max_log, hdr_log)); 414 } 415 416 static uptr ComputeUserRequestedAlignmentLog(uptr user_requested_alignment) { 417 if (user_requested_alignment < 8) 418 return 0; 419 if (user_requested_alignment > 512) 420 user_requested_alignment = 512; 421 return Log2(user_requested_alignment) - 2; 422 } 423 424 static uptr ComputeUserAlignment(uptr user_requested_alignment_log) { 425 if (user_requested_alignment_log == 0) 426 return 0; 427 return 1LL << (user_requested_alignment_log + 2); 428 } 429 430 // We have an address between two chunks, and we want to report just one. 431 AsanChunk *ChooseChunk(uptr addr, AsanChunk *left_chunk, 432 AsanChunk *right_chunk) { 433 if (!left_chunk) 434 return right_chunk; 435 if (!right_chunk) 436 return left_chunk; 437 // Prefer an allocated chunk over freed chunk and freed chunk 438 // over available chunk. 439 u8 left_state = atomic_load(&left_chunk->chunk_state, memory_order_relaxed); 440 u8 right_state = 441 atomic_load(&right_chunk->chunk_state, memory_order_relaxed); 442 if (left_state != right_state) { 443 if (left_state == CHUNK_ALLOCATED) 444 return left_chunk; 445 if (right_state == CHUNK_ALLOCATED) 446 return right_chunk; 447 if (left_state == CHUNK_QUARANTINE) 448 return left_chunk; 449 if (right_state == CHUNK_QUARANTINE) 450 return right_chunk; 451 } 452 // Same chunk_state: choose based on offset. 453 sptr l_offset = 0, r_offset = 0; 454 CHECK(AsanChunkView(left_chunk).AddrIsAtRight(addr, 1, &l_offset)); 455 CHECK(AsanChunkView(right_chunk).AddrIsAtLeft(addr, 1, &r_offset)); 456 if (l_offset < r_offset) 457 return left_chunk; 458 return right_chunk; 459 } 460 461 bool UpdateAllocationStack(uptr addr, BufferedStackTrace *stack) { 462 AsanChunk *m = GetAsanChunkByAddr(addr); 463 if (!m) return false; 464 if (atomic_load(&m->chunk_state, memory_order_acquire) != CHUNK_ALLOCATED) 465 return false; 466 if (m->Beg() != addr) return false; 467 AsanThread *t = GetCurrentThread(); 468 m->SetAllocContext(t ? t->tid() : kMainTid, StackDepotPut(*stack)); 469 return true; 470 } 471 472 // -------------------- Allocation/Deallocation routines --------------- 473 void *Allocate(uptr size, uptr alignment, BufferedStackTrace *stack, 474 AllocType alloc_type, bool can_fill) { 475 if (UNLIKELY(!asan_inited)) 476 AsanInitFromRtl(); 477 if (UNLIKELY(IsRssLimitExceeded())) { 478 if (AllocatorMayReturnNull()) 479 return nullptr; 480 ReportRssLimitExceeded(stack); 481 } 482 Flags &fl = *flags(); 483 CHECK(stack); 484 const uptr min_alignment = ASAN_SHADOW_GRANULARITY; 485 const uptr user_requested_alignment_log = 486 ComputeUserRequestedAlignmentLog(alignment); 487 if (alignment < min_alignment) 488 alignment = min_alignment; 489 if (size == 0) { 490 // We'd be happy to avoid allocating memory for zero-size requests, but 491 // some programs/tests depend on this behavior and assume that malloc 492 // would not return NULL even for zero-size allocations. Moreover, it 493 // looks like operator new should never return NULL, and results of 494 // consecutive "new" calls must be different even if the allocated size 495 // is zero. 496 size = 1; 497 } 498 CHECK(IsPowerOfTwo(alignment)); 499 uptr rz_log = ComputeRZLog(size); 500 uptr rz_size = RZLog2Size(rz_log); 501 uptr rounded_size = RoundUpTo(Max(size, kChunkHeader2Size), alignment); 502 uptr needed_size = rounded_size + rz_size; 503 if (alignment > min_alignment) 504 needed_size += alignment; 505 // If we are allocating from the secondary allocator, there will be no 506 // automatic right redzone, so add the right redzone manually. 507 if (!PrimaryAllocator::CanAllocate(needed_size, alignment)) 508 needed_size += rz_size; 509 CHECK(IsAligned(needed_size, min_alignment)); 510 if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize || 511 size > max_user_defined_malloc_size) { 512 if (AllocatorMayReturnNull()) { 513 Report("WARNING: AddressSanitizer failed to allocate 0x%zx bytes\n", 514 size); 515 return nullptr; 516 } 517 uptr malloc_limit = 518 Min(kMaxAllowedMallocSize, max_user_defined_malloc_size); 519 ReportAllocationSizeTooBig(size, needed_size, malloc_limit, stack); 520 } 521 522 AsanThread *t = GetCurrentThread(); 523 void *allocated; 524 if (t) { 525 AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage()); 526 allocated = allocator.Allocate(cache, needed_size, 8); 527 } else { 528 SpinMutexLock l(&fallback_mutex); 529 AllocatorCache *cache = &fallback_allocator_cache; 530 allocated = allocator.Allocate(cache, needed_size, 8); 531 } 532 if (UNLIKELY(!allocated)) { 533 SetAllocatorOutOfMemory(); 534 if (AllocatorMayReturnNull()) 535 return nullptr; 536 ReportOutOfMemory(size, stack); 537 } 538 539 if (*(u8 *)MEM_TO_SHADOW((uptr)allocated) == 0 && CanPoisonMemory()) { 540 // Heap poisoning is enabled, but the allocator provides an unpoisoned 541 // chunk. This is possible if CanPoisonMemory() was false for some 542 // time, for example, due to flags()->start_disabled. 543 // Anyway, poison the block before using it for anything else. 544 uptr allocated_size = allocator.GetActuallyAllocatedSize(allocated); 545 PoisonShadow((uptr)allocated, allocated_size, kAsanHeapLeftRedzoneMagic); 546 } 547 548 uptr alloc_beg = reinterpret_cast<uptr>(allocated); 549 uptr alloc_end = alloc_beg + needed_size; 550 uptr user_beg = alloc_beg + rz_size; 551 if (!IsAligned(user_beg, alignment)) 552 user_beg = RoundUpTo(user_beg, alignment); 553 uptr user_end = user_beg + size; 554 CHECK_LE(user_end, alloc_end); 555 uptr chunk_beg = user_beg - kChunkHeaderSize; 556 AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg); 557 m->alloc_type = alloc_type; 558 CHECK(size); 559 m->SetUsedSize(size); 560 m->user_requested_alignment_log = user_requested_alignment_log; 561 562 m->SetAllocContext(t ? t->tid() : kMainTid, StackDepotPut(*stack)); 563 564 uptr size_rounded_down_to_granularity = 565 RoundDownTo(size, ASAN_SHADOW_GRANULARITY); 566 // Unpoison the bulk of the memory region. 567 if (size_rounded_down_to_granularity) 568 PoisonShadow(user_beg, size_rounded_down_to_granularity, 0); 569 // Deal with the end of the region if size is not aligned to granularity. 570 if (size != size_rounded_down_to_granularity && CanPoisonMemory()) { 571 u8 *shadow = 572 (u8 *)MemToShadow(user_beg + size_rounded_down_to_granularity); 573 *shadow = fl.poison_partial ? (size & (ASAN_SHADOW_GRANULARITY - 1)) : 0; 574 } 575 576 AsanStats &thread_stats = GetCurrentThreadStats(); 577 thread_stats.mallocs++; 578 thread_stats.malloced += size; 579 thread_stats.malloced_redzones += needed_size - size; 580 if (needed_size > SizeClassMap::kMaxSize) 581 thread_stats.malloc_large++; 582 else 583 thread_stats.malloced_by_size[SizeClassMap::ClassID(needed_size)]++; 584 585 void *res = reinterpret_cast<void *>(user_beg); 586 if (can_fill && fl.max_malloc_fill_size) { 587 uptr fill_size = Min(size, (uptr)fl.max_malloc_fill_size); 588 REAL(memset)(res, fl.malloc_fill_byte, fill_size); 589 } 590 #if CAN_SANITIZE_LEAKS 591 m->lsan_tag = __lsan::DisabledInThisThread() ? __lsan::kIgnored 592 : __lsan::kDirectlyLeaked; 593 #endif 594 // Must be the last mutation of metadata in this function. 595 atomic_store(&m->chunk_state, CHUNK_ALLOCATED, memory_order_release); 596 if (alloc_beg != chunk_beg) { 597 CHECK_LE(alloc_beg + sizeof(LargeChunkHeader), chunk_beg); 598 reinterpret_cast<LargeChunkHeader *>(alloc_beg)->Set(m); 599 } 600 RunMallocHooks(res, size); 601 return res; 602 } 603 604 // Set quarantine flag if chunk is allocated, issue ASan error report on 605 // available and quarantined chunks. Return true on success, false otherwise. 606 bool AtomicallySetQuarantineFlagIfAllocated(AsanChunk *m, void *ptr, 607 BufferedStackTrace *stack) { 608 u8 old_chunk_state = CHUNK_ALLOCATED; 609 // Flip the chunk_state atomically to avoid race on double-free. 610 if (!atomic_compare_exchange_strong(&m->chunk_state, &old_chunk_state, 611 CHUNK_QUARANTINE, 612 memory_order_acquire)) { 613 ReportInvalidFree(ptr, old_chunk_state, stack); 614 // It's not safe to push a chunk in quarantine on invalid free. 615 return false; 616 } 617 CHECK_EQ(CHUNK_ALLOCATED, old_chunk_state); 618 // It was a user data. 619 m->SetFreeContext(kInvalidTid, 0); 620 return true; 621 } 622 623 // Expects the chunk to already be marked as quarantined by using 624 // AtomicallySetQuarantineFlagIfAllocated. 625 void QuarantineChunk(AsanChunk *m, void *ptr, BufferedStackTrace *stack) { 626 CHECK_EQ(atomic_load(&m->chunk_state, memory_order_relaxed), 627 CHUNK_QUARANTINE); 628 AsanThread *t = GetCurrentThread(); 629 m->SetFreeContext(t ? t->tid() : 0, StackDepotPut(*stack)); 630 631 Flags &fl = *flags(); 632 if (fl.max_free_fill_size > 0) { 633 // We have to skip the chunk header, it contains free_context_id. 634 uptr scribble_start = (uptr)m + kChunkHeaderSize + kChunkHeader2Size; 635 if (m->UsedSize() >= kChunkHeader2Size) { // Skip Header2 in user area. 636 uptr size_to_fill = m->UsedSize() - kChunkHeader2Size; 637 size_to_fill = Min(size_to_fill, (uptr)fl.max_free_fill_size); 638 REAL(memset)((void *)scribble_start, fl.free_fill_byte, size_to_fill); 639 } 640 } 641 642 // Poison the region. 643 PoisonShadow(m->Beg(), RoundUpTo(m->UsedSize(), ASAN_SHADOW_GRANULARITY), 644 kAsanHeapFreeMagic); 645 646 AsanStats &thread_stats = GetCurrentThreadStats(); 647 thread_stats.frees++; 648 thread_stats.freed += m->UsedSize(); 649 650 // Push into quarantine. 651 if (t) { 652 AsanThreadLocalMallocStorage *ms = &t->malloc_storage(); 653 AllocatorCache *ac = GetAllocatorCache(ms); 654 quarantine.Put(GetQuarantineCache(ms), QuarantineCallback(ac, stack), m, 655 m->UsedSize()); 656 } else { 657 SpinMutexLock l(&fallback_mutex); 658 AllocatorCache *ac = &fallback_allocator_cache; 659 quarantine.Put(&fallback_quarantine_cache, QuarantineCallback(ac, stack), 660 m, m->UsedSize()); 661 } 662 } 663 664 void Deallocate(void *ptr, uptr delete_size, uptr delete_alignment, 665 BufferedStackTrace *stack, AllocType alloc_type) { 666 uptr p = reinterpret_cast<uptr>(ptr); 667 if (p == 0) return; 668 669 uptr chunk_beg = p - kChunkHeaderSize; 670 AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg); 671 672 // On Windows, uninstrumented DLLs may allocate memory before ASan hooks 673 // malloc. Don't report an invalid free in this case. 674 if (SANITIZER_WINDOWS && 675 !get_allocator().PointerIsMine(ptr)) { 676 if (!IsSystemHeapAddress(p)) 677 ReportFreeNotMalloced(p, stack); 678 return; 679 } 680 681 RunFreeHooks(ptr); 682 683 // Must mark the chunk as quarantined before any changes to its metadata. 684 // Do not quarantine given chunk if we failed to set CHUNK_QUARANTINE flag. 685 if (!AtomicallySetQuarantineFlagIfAllocated(m, ptr, stack)) return; 686 687 if (m->alloc_type != alloc_type) { 688 if (atomic_load(&alloc_dealloc_mismatch, memory_order_acquire)) { 689 ReportAllocTypeMismatch((uptr)ptr, stack, (AllocType)m->alloc_type, 690 (AllocType)alloc_type); 691 } 692 } else { 693 if (flags()->new_delete_type_mismatch && 694 (alloc_type == FROM_NEW || alloc_type == FROM_NEW_BR) && 695 ((delete_size && delete_size != m->UsedSize()) || 696 ComputeUserRequestedAlignmentLog(delete_alignment) != 697 m->user_requested_alignment_log)) { 698 ReportNewDeleteTypeMismatch(p, delete_size, delete_alignment, stack); 699 } 700 } 701 702 QuarantineChunk(m, ptr, stack); 703 } 704 705 void *Reallocate(void *old_ptr, uptr new_size, BufferedStackTrace *stack) { 706 CHECK(old_ptr && new_size); 707 uptr p = reinterpret_cast<uptr>(old_ptr); 708 uptr chunk_beg = p - kChunkHeaderSize; 709 AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg); 710 711 AsanStats &thread_stats = GetCurrentThreadStats(); 712 thread_stats.reallocs++; 713 thread_stats.realloced += new_size; 714 715 void *new_ptr = Allocate(new_size, 8, stack, FROM_MALLOC, true); 716 if (new_ptr) { 717 u8 chunk_state = atomic_load(&m->chunk_state, memory_order_acquire); 718 if (chunk_state != CHUNK_ALLOCATED) 719 ReportInvalidFree(old_ptr, chunk_state, stack); 720 CHECK_NE(REAL(memcpy), nullptr); 721 uptr memcpy_size = Min(new_size, m->UsedSize()); 722 // If realloc() races with free(), we may start copying freed memory. 723 // However, we will report racy double-free later anyway. 724 REAL(memcpy)(new_ptr, old_ptr, memcpy_size); 725 Deallocate(old_ptr, 0, 0, stack, FROM_MALLOC); 726 } 727 return new_ptr; 728 } 729 730 void *Calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) { 731 if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) { 732 if (AllocatorMayReturnNull()) 733 return nullptr; 734 ReportCallocOverflow(nmemb, size, stack); 735 } 736 void *ptr = Allocate(nmemb * size, 8, stack, FROM_MALLOC, false); 737 // If the memory comes from the secondary allocator no need to clear it 738 // as it comes directly from mmap. 739 if (ptr && allocator.FromPrimary(ptr)) 740 REAL(memset)(ptr, 0, nmemb * size); 741 return ptr; 742 } 743 744 void ReportInvalidFree(void *ptr, u8 chunk_state, BufferedStackTrace *stack) { 745 if (chunk_state == CHUNK_QUARANTINE) 746 ReportDoubleFree((uptr)ptr, stack); 747 else 748 ReportFreeNotMalloced((uptr)ptr, stack); 749 } 750 751 void CommitBack(AsanThreadLocalMallocStorage *ms, BufferedStackTrace *stack) { 752 AllocatorCache *ac = GetAllocatorCache(ms); 753 quarantine.Drain(GetQuarantineCache(ms), QuarantineCallback(ac, stack)); 754 allocator.SwallowCache(ac); 755 } 756 757 // -------------------------- Chunk lookup ---------------------- 758 759 // Assumes alloc_beg == allocator.GetBlockBegin(alloc_beg). 760 // Returns nullptr if AsanChunk is not yet initialized just after 761 // get_allocator().Allocate(), or is being destroyed just before 762 // get_allocator().Deallocate(). 763 AsanChunk *GetAsanChunk(void *alloc_beg) { 764 if (!alloc_beg) 765 return nullptr; 766 AsanChunk *p = reinterpret_cast<LargeChunkHeader *>(alloc_beg)->Get(); 767 if (!p) { 768 if (!allocator.FromPrimary(alloc_beg)) 769 return nullptr; 770 p = reinterpret_cast<AsanChunk *>(alloc_beg); 771 } 772 u8 state = atomic_load(&p->chunk_state, memory_order_relaxed); 773 // It does not guaranty that Chunk is initialized, but it's 774 // definitely not for any other value. 775 if (state == CHUNK_ALLOCATED || state == CHUNK_QUARANTINE) 776 return p; 777 return nullptr; 778 } 779 780 AsanChunk *GetAsanChunkByAddr(uptr p) { 781 void *alloc_beg = allocator.GetBlockBegin(reinterpret_cast<void *>(p)); 782 return GetAsanChunk(alloc_beg); 783 } 784 785 // Allocator must be locked when this function is called. 786 AsanChunk *GetAsanChunkByAddrFastLocked(uptr p) { 787 void *alloc_beg = 788 allocator.GetBlockBeginFastLocked(reinterpret_cast<void *>(p)); 789 return GetAsanChunk(alloc_beg); 790 } 791 792 uptr AllocationSize(uptr p) { 793 AsanChunk *m = GetAsanChunkByAddr(p); 794 if (!m) return 0; 795 if (atomic_load(&m->chunk_state, memory_order_acquire) != CHUNK_ALLOCATED) 796 return 0; 797 if (m->Beg() != p) return 0; 798 return m->UsedSize(); 799 } 800 801 AsanChunkView FindHeapChunkByAddress(uptr addr) { 802 AsanChunk *m1 = GetAsanChunkByAddr(addr); 803 sptr offset = 0; 804 if (!m1 || AsanChunkView(m1).AddrIsAtLeft(addr, 1, &offset)) { 805 // The address is in the chunk's left redzone, so maybe it is actually 806 // a right buffer overflow from the other chunk before. 807 // Search a bit before to see if there is another chunk. 808 AsanChunk *m2 = nullptr; 809 for (uptr l = 1; l < GetPageSizeCached(); l++) { 810 m2 = GetAsanChunkByAddr(addr - l); 811 if (m2 == m1) continue; // Still the same chunk. 812 break; 813 } 814 if (m2 && AsanChunkView(m2).AddrIsAtRight(addr, 1, &offset)) 815 m1 = ChooseChunk(addr, m2, m1); 816 } 817 return AsanChunkView(m1); 818 } 819 820 void Purge(BufferedStackTrace *stack) { 821 AsanThread *t = GetCurrentThread(); 822 if (t) { 823 AsanThreadLocalMallocStorage *ms = &t->malloc_storage(); 824 quarantine.DrainAndRecycle(GetQuarantineCache(ms), 825 QuarantineCallback(GetAllocatorCache(ms), 826 stack)); 827 } 828 { 829 SpinMutexLock l(&fallback_mutex); 830 quarantine.DrainAndRecycle(&fallback_quarantine_cache, 831 QuarantineCallback(&fallback_allocator_cache, 832 stack)); 833 } 834 835 allocator.ForceReleaseToOS(); 836 } 837 838 void PrintStats() { 839 allocator.PrintStats(); 840 quarantine.PrintStats(); 841 } 842 843 void ForceLock() SANITIZER_ACQUIRE(fallback_mutex) { 844 allocator.ForceLock(); 845 fallback_mutex.Lock(); 846 } 847 848 void ForceUnlock() SANITIZER_RELEASE(fallback_mutex) { 849 fallback_mutex.Unlock(); 850 allocator.ForceUnlock(); 851 } 852 }; 853 854 static Allocator instance(LINKER_INITIALIZED); 855 856 static AsanAllocator &get_allocator() { 857 return instance.allocator; 858 } 859 860 bool AsanChunkView::IsValid() const { 861 return chunk_ && atomic_load(&chunk_->chunk_state, memory_order_relaxed) != 862 CHUNK_INVALID; 863 } 864 bool AsanChunkView::IsAllocated() const { 865 return chunk_ && atomic_load(&chunk_->chunk_state, memory_order_relaxed) == 866 CHUNK_ALLOCATED; 867 } 868 bool AsanChunkView::IsQuarantined() const { 869 return chunk_ && atomic_load(&chunk_->chunk_state, memory_order_relaxed) == 870 CHUNK_QUARANTINE; 871 } 872 uptr AsanChunkView::Beg() const { return chunk_->Beg(); } 873 uptr AsanChunkView::End() const { return Beg() + UsedSize(); } 874 uptr AsanChunkView::UsedSize() const { return chunk_->UsedSize(); } 875 u32 AsanChunkView::UserRequestedAlignment() const { 876 return Allocator::ComputeUserAlignment(chunk_->user_requested_alignment_log); 877 } 878 879 uptr AsanChunkView::AllocTid() const { 880 u32 tid = 0; 881 u32 stack = 0; 882 chunk_->GetAllocContext(tid, stack); 883 return tid; 884 } 885 886 uptr AsanChunkView::FreeTid() const { 887 if (!IsQuarantined()) 888 return kInvalidTid; 889 u32 tid = 0; 890 u32 stack = 0; 891 chunk_->GetFreeContext(tid, stack); 892 return tid; 893 } 894 895 AllocType AsanChunkView::GetAllocType() const { 896 return (AllocType)chunk_->alloc_type; 897 } 898 899 u32 AsanChunkView::GetAllocStackId() const { 900 u32 tid = 0; 901 u32 stack = 0; 902 chunk_->GetAllocContext(tid, stack); 903 return stack; 904 } 905 906 u32 AsanChunkView::GetFreeStackId() const { 907 if (!IsQuarantined()) 908 return 0; 909 u32 tid = 0; 910 u32 stack = 0; 911 chunk_->GetFreeContext(tid, stack); 912 return stack; 913 } 914 915 void InitializeAllocator(const AllocatorOptions &options) { 916 instance.InitLinkerInitialized(options); 917 } 918 919 void ReInitializeAllocator(const AllocatorOptions &options) { 920 instance.ReInitialize(options); 921 } 922 923 void GetAllocatorOptions(AllocatorOptions *options) { 924 instance.GetOptions(options); 925 } 926 927 AsanChunkView FindHeapChunkByAddress(uptr addr) { 928 return instance.FindHeapChunkByAddress(addr); 929 } 930 AsanChunkView FindHeapChunkByAllocBeg(uptr addr) { 931 return AsanChunkView(instance.GetAsanChunk(reinterpret_cast<void*>(addr))); 932 } 933 934 void AsanThreadLocalMallocStorage::CommitBack() { 935 GET_STACK_TRACE_MALLOC; 936 instance.CommitBack(this, &stack); 937 } 938 939 void PrintInternalAllocatorStats() { 940 instance.PrintStats(); 941 } 942 943 void asan_free(void *ptr, BufferedStackTrace *stack, AllocType alloc_type) { 944 instance.Deallocate(ptr, 0, 0, stack, alloc_type); 945 } 946 947 void asan_delete(void *ptr, uptr size, uptr alignment, 948 BufferedStackTrace *stack, AllocType alloc_type) { 949 instance.Deallocate(ptr, size, alignment, stack, alloc_type); 950 } 951 952 void *asan_malloc(uptr size, BufferedStackTrace *stack) { 953 return SetErrnoOnNull(instance.Allocate(size, 8, stack, FROM_MALLOC, true)); 954 } 955 956 void *asan_calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) { 957 return SetErrnoOnNull(instance.Calloc(nmemb, size, stack)); 958 } 959 960 void *asan_reallocarray(void *p, uptr nmemb, uptr size, 961 BufferedStackTrace *stack) { 962 if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) { 963 errno = errno_ENOMEM; 964 if (AllocatorMayReturnNull()) 965 return nullptr; 966 ReportReallocArrayOverflow(nmemb, size, stack); 967 } 968 return asan_realloc(p, nmemb * size, stack); 969 } 970 971 void *asan_realloc(void *p, uptr size, BufferedStackTrace *stack) { 972 if (!p) 973 return SetErrnoOnNull(instance.Allocate(size, 8, stack, FROM_MALLOC, true)); 974 if (size == 0) { 975 if (flags()->allocator_frees_and_returns_null_on_realloc_zero) { 976 instance.Deallocate(p, 0, 0, stack, FROM_MALLOC); 977 return nullptr; 978 } 979 // Allocate a size of 1 if we shouldn't free() on Realloc to 0 980 size = 1; 981 } 982 return SetErrnoOnNull(instance.Reallocate(p, size, stack)); 983 } 984 985 void *asan_valloc(uptr size, BufferedStackTrace *stack) { 986 return SetErrnoOnNull( 987 instance.Allocate(size, GetPageSizeCached(), stack, FROM_MALLOC, true)); 988 } 989 990 void *asan_pvalloc(uptr size, BufferedStackTrace *stack) { 991 uptr PageSize = GetPageSizeCached(); 992 if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) { 993 errno = errno_ENOMEM; 994 if (AllocatorMayReturnNull()) 995 return nullptr; 996 ReportPvallocOverflow(size, stack); 997 } 998 // pvalloc(0) should allocate one page. 999 size = size ? RoundUpTo(size, PageSize) : PageSize; 1000 return SetErrnoOnNull( 1001 instance.Allocate(size, PageSize, stack, FROM_MALLOC, true)); 1002 } 1003 1004 void *asan_memalign(uptr alignment, uptr size, BufferedStackTrace *stack, 1005 AllocType alloc_type) { 1006 if (UNLIKELY(!IsPowerOfTwo(alignment))) { 1007 errno = errno_EINVAL; 1008 if (AllocatorMayReturnNull()) 1009 return nullptr; 1010 ReportInvalidAllocationAlignment(alignment, stack); 1011 } 1012 return SetErrnoOnNull( 1013 instance.Allocate(size, alignment, stack, alloc_type, true)); 1014 } 1015 1016 void *asan_aligned_alloc(uptr alignment, uptr size, BufferedStackTrace *stack) { 1017 if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) { 1018 errno = errno_EINVAL; 1019 if (AllocatorMayReturnNull()) 1020 return nullptr; 1021 ReportInvalidAlignedAllocAlignment(size, alignment, stack); 1022 } 1023 return SetErrnoOnNull( 1024 instance.Allocate(size, alignment, stack, FROM_MALLOC, true)); 1025 } 1026 1027 int asan_posix_memalign(void **memptr, uptr alignment, uptr size, 1028 BufferedStackTrace *stack) { 1029 if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) { 1030 if (AllocatorMayReturnNull()) 1031 return errno_EINVAL; 1032 ReportInvalidPosixMemalignAlignment(alignment, stack); 1033 } 1034 void *ptr = instance.Allocate(size, alignment, stack, FROM_MALLOC, true); 1035 if (UNLIKELY(!ptr)) 1036 // OOM error is already taken care of by Allocate. 1037 return errno_ENOMEM; 1038 CHECK(IsAligned((uptr)ptr, alignment)); 1039 *memptr = ptr; 1040 return 0; 1041 } 1042 1043 uptr asan_malloc_usable_size(const void *ptr, uptr pc, uptr bp) { 1044 if (!ptr) return 0; 1045 uptr usable_size = instance.AllocationSize(reinterpret_cast<uptr>(ptr)); 1046 if (flags()->check_malloc_usable_size && (usable_size == 0)) { 1047 GET_STACK_TRACE_FATAL(pc, bp); 1048 ReportMallocUsableSizeNotOwned((uptr)ptr, &stack); 1049 } 1050 return usable_size; 1051 } 1052 1053 uptr asan_mz_size(const void *ptr) { 1054 return instance.AllocationSize(reinterpret_cast<uptr>(ptr)); 1055 } 1056 1057 void asan_mz_force_lock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS { 1058 instance.ForceLock(); 1059 } 1060 1061 void asan_mz_force_unlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS { 1062 instance.ForceUnlock(); 1063 } 1064 1065 } // namespace __asan 1066 1067 // --- Implementation of LSan-specific functions --- {{{1 1068 namespace __lsan { 1069 void LockAllocator() { 1070 __asan::get_allocator().ForceLock(); 1071 } 1072 1073 void UnlockAllocator() { 1074 __asan::get_allocator().ForceUnlock(); 1075 } 1076 1077 void GetAllocatorGlobalRange(uptr *begin, uptr *end) { 1078 *begin = (uptr)&__asan::get_allocator(); 1079 *end = *begin + sizeof(__asan::get_allocator()); 1080 } 1081 1082 uptr PointsIntoChunk(void *p) { 1083 uptr addr = reinterpret_cast<uptr>(p); 1084 __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddrFastLocked(addr); 1085 if (!m || atomic_load(&m->chunk_state, memory_order_acquire) != 1086 __asan::CHUNK_ALLOCATED) 1087 return 0; 1088 uptr chunk = m->Beg(); 1089 if (m->AddrIsInside(addr)) 1090 return chunk; 1091 if (IsSpecialCaseOfOperatorNew0(chunk, m->UsedSize(), addr)) 1092 return chunk; 1093 return 0; 1094 } 1095 1096 uptr GetUserBegin(uptr chunk) { 1097 // FIXME: All usecases provide chunk address, GetAsanChunkByAddrFastLocked is 1098 // not needed. 1099 __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddrFastLocked(chunk); 1100 return m ? m->Beg() : 0; 1101 } 1102 1103 LsanMetadata::LsanMetadata(uptr chunk) { 1104 metadata_ = chunk ? reinterpret_cast<void *>(chunk - __asan::kChunkHeaderSize) 1105 : nullptr; 1106 } 1107 1108 bool LsanMetadata::allocated() const { 1109 if (!metadata_) 1110 return false; 1111 __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_); 1112 return atomic_load(&m->chunk_state, memory_order_relaxed) == 1113 __asan::CHUNK_ALLOCATED; 1114 } 1115 1116 ChunkTag LsanMetadata::tag() const { 1117 __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_); 1118 return static_cast<ChunkTag>(m->lsan_tag); 1119 } 1120 1121 void LsanMetadata::set_tag(ChunkTag value) { 1122 __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_); 1123 m->lsan_tag = value; 1124 } 1125 1126 uptr LsanMetadata::requested_size() const { 1127 __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_); 1128 return m->UsedSize(); 1129 } 1130 1131 u32 LsanMetadata::stack_trace_id() const { 1132 __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_); 1133 u32 tid = 0; 1134 u32 stack = 0; 1135 m->GetAllocContext(tid, stack); 1136 return stack; 1137 } 1138 1139 void ForEachChunk(ForEachChunkCallback callback, void *arg) { 1140 __asan::get_allocator().ForEachChunk(callback, arg); 1141 } 1142 1143 IgnoreObjectResult IgnoreObjectLocked(const void *p) { 1144 uptr addr = reinterpret_cast<uptr>(p); 1145 __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddr(addr); 1146 if (!m || 1147 (atomic_load(&m->chunk_state, memory_order_acquire) != 1148 __asan::CHUNK_ALLOCATED) || 1149 !m->AddrIsInside(addr)) { 1150 return kIgnoreObjectInvalid; 1151 } 1152 if (m->lsan_tag == kIgnored) 1153 return kIgnoreObjectAlreadyIgnored; 1154 m->lsan_tag = __lsan::kIgnored; 1155 return kIgnoreObjectSuccess; 1156 } 1157 1158 } // namespace __lsan 1159 1160 // ---------------------- Interface ---------------- {{{1 1161 using namespace __asan; 1162 1163 // ASan allocator doesn't reserve extra bytes, so normally we would 1164 // just return "size". We don't want to expose our redzone sizes, etc here. 1165 uptr __sanitizer_get_estimated_allocated_size(uptr size) { 1166 return size; 1167 } 1168 1169 int __sanitizer_get_ownership(const void *p) { 1170 uptr ptr = reinterpret_cast<uptr>(p); 1171 return instance.AllocationSize(ptr) > 0; 1172 } 1173 1174 uptr __sanitizer_get_allocated_size(const void *p) { 1175 if (!p) return 0; 1176 uptr ptr = reinterpret_cast<uptr>(p); 1177 uptr allocated_size = instance.AllocationSize(ptr); 1178 // Die if p is not malloced or if it is already freed. 1179 if (allocated_size == 0) { 1180 GET_STACK_TRACE_FATAL_HERE; 1181 ReportSanitizerGetAllocatedSizeNotOwned(ptr, &stack); 1182 } 1183 return allocated_size; 1184 } 1185 1186 void __sanitizer_purge_allocator() { 1187 GET_STACK_TRACE_MALLOC; 1188 instance.Purge(&stack); 1189 } 1190 1191 int __asan_update_allocation_context(void* addr) { 1192 GET_STACK_TRACE_MALLOC; 1193 return instance.UpdateAllocationStack((uptr)addr, &stack); 1194 } 1195