1 //=-- lsan_allocator.cpp --------------------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file is a part of LeakSanitizer. 10 // See lsan_allocator.h for details. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "lsan_allocator.h" 15 16 #include "sanitizer_common/sanitizer_allocator.h" 17 #include "sanitizer_common/sanitizer_allocator_checks.h" 18 #include "sanitizer_common/sanitizer_allocator_interface.h" 19 #include "sanitizer_common/sanitizer_allocator_report.h" 20 #include "sanitizer_common/sanitizer_errno.h" 21 #include "sanitizer_common/sanitizer_internal_defs.h" 22 #include "sanitizer_common/sanitizer_stackdepot.h" 23 #include "sanitizer_common/sanitizer_stacktrace.h" 24 #include "lsan_common.h" 25 26 extern "C" void *memset(void *ptr, int value, uptr num); 27 28 namespace __lsan { 29 #if defined(__i386__) || defined(__arm__) 30 static const uptr kMaxAllowedMallocSize = 1UL << 30; 31 #elif defined(__mips64) || defined(__aarch64__) 32 static const uptr kMaxAllowedMallocSize = 4UL << 30; 33 #else 34 static const uptr kMaxAllowedMallocSize = 8UL << 30; 35 #endif 36 37 static Allocator allocator; 38 39 void InitializeAllocator() { 40 SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null); 41 allocator.InitLinkerInitialized( 42 common_flags()->allocator_release_to_os_interval_ms); 43 } 44 45 void AllocatorThreadFinish() { 46 allocator.SwallowCache(GetAllocatorCache()); 47 } 48 49 static ChunkMetadata *Metadata(const void *p) { 50 return reinterpret_cast<ChunkMetadata *>(allocator.GetMetaData(p)); 51 } 52 53 static void RegisterAllocation(const StackTrace &stack, void *p, uptr size) { 54 if (!p) return; 55 ChunkMetadata *m = Metadata(p); 56 CHECK(m); 57 m->tag = DisabledInThisThread() ? kIgnored : kDirectlyLeaked; 58 m->stack_trace_id = StackDepotPut(stack); 59 m->requested_size = size; 60 atomic_store(reinterpret_cast<atomic_uint8_t *>(m), 1, memory_order_relaxed); 61 } 62 63 static void RegisterDeallocation(void *p) { 64 if (!p) return; 65 ChunkMetadata *m = Metadata(p); 66 CHECK(m); 67 atomic_store(reinterpret_cast<atomic_uint8_t *>(m), 0, memory_order_relaxed); 68 } 69 70 static void *ReportAllocationSizeTooBig(uptr size, const StackTrace &stack) { 71 if (AllocatorMayReturnNull()) { 72 Report("WARNING: LeakSanitizer failed to allocate 0x%zx bytes\n", size); 73 return nullptr; 74 } 75 ReportAllocationSizeTooBig(size, kMaxAllowedMallocSize, &stack); 76 } 77 78 void *Allocate(const StackTrace &stack, uptr size, uptr alignment, 79 bool cleared) { 80 if (size == 0) 81 size = 1; 82 if (size > kMaxAllowedMallocSize) 83 return ReportAllocationSizeTooBig(size, stack); 84 void *p = allocator.Allocate(GetAllocatorCache(), size, alignment); 85 if (UNLIKELY(!p)) { 86 SetAllocatorOutOfMemory(); 87 if (AllocatorMayReturnNull()) 88 return nullptr; 89 ReportOutOfMemory(size, &stack); 90 } 91 // Do not rely on the allocator to clear the memory (it's slow). 92 if (cleared && allocator.FromPrimary(p)) 93 memset(p, 0, size); 94 RegisterAllocation(stack, p, size); 95 if (&__sanitizer_malloc_hook) __sanitizer_malloc_hook(p, size); 96 RunMallocHooks(p, size); 97 return p; 98 } 99 100 static void *Calloc(uptr nmemb, uptr size, const StackTrace &stack) { 101 if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) { 102 if (AllocatorMayReturnNull()) 103 return nullptr; 104 ReportCallocOverflow(nmemb, size, &stack); 105 } 106 size *= nmemb; 107 return Allocate(stack, size, 1, true); 108 } 109 110 void Deallocate(void *p) { 111 if (&__sanitizer_free_hook) __sanitizer_free_hook(p); 112 RunFreeHooks(p); 113 RegisterDeallocation(p); 114 allocator.Deallocate(GetAllocatorCache(), p); 115 } 116 117 void *Reallocate(const StackTrace &stack, void *p, uptr new_size, 118 uptr alignment) { 119 RegisterDeallocation(p); 120 if (new_size > kMaxAllowedMallocSize) { 121 allocator.Deallocate(GetAllocatorCache(), p); 122 return ReportAllocationSizeTooBig(new_size, stack); 123 } 124 p = allocator.Reallocate(GetAllocatorCache(), p, new_size, alignment); 125 RegisterAllocation(stack, p, new_size); 126 return p; 127 } 128 129 void GetAllocatorCacheRange(uptr *begin, uptr *end) { 130 *begin = (uptr)GetAllocatorCache(); 131 *end = *begin + sizeof(AllocatorCache); 132 } 133 134 uptr GetMallocUsableSize(const void *p) { 135 ChunkMetadata *m = Metadata(p); 136 if (!m) return 0; 137 return m->requested_size; 138 } 139 140 int lsan_posix_memalign(void **memptr, uptr alignment, uptr size, 141 const StackTrace &stack) { 142 if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) { 143 if (AllocatorMayReturnNull()) 144 return errno_EINVAL; 145 ReportInvalidPosixMemalignAlignment(alignment, &stack); 146 } 147 void *ptr = Allocate(stack, size, alignment, kAlwaysClearMemory); 148 if (UNLIKELY(!ptr)) 149 // OOM error is already taken care of by Allocate. 150 return errno_ENOMEM; 151 CHECK(IsAligned((uptr)ptr, alignment)); 152 *memptr = ptr; 153 return 0; 154 } 155 156 void *lsan_aligned_alloc(uptr alignment, uptr size, const StackTrace &stack) { 157 if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) { 158 errno = errno_EINVAL; 159 if (AllocatorMayReturnNull()) 160 return nullptr; 161 ReportInvalidAlignedAllocAlignment(size, alignment, &stack); 162 } 163 return SetErrnoOnNull(Allocate(stack, size, alignment, kAlwaysClearMemory)); 164 } 165 166 void *lsan_memalign(uptr alignment, uptr size, const StackTrace &stack) { 167 if (UNLIKELY(!IsPowerOfTwo(alignment))) { 168 errno = errno_EINVAL; 169 if (AllocatorMayReturnNull()) 170 return nullptr; 171 ReportInvalidAllocationAlignment(alignment, &stack); 172 } 173 return SetErrnoOnNull(Allocate(stack, size, alignment, kAlwaysClearMemory)); 174 } 175 176 void *lsan_malloc(uptr size, const StackTrace &stack) { 177 return SetErrnoOnNull(Allocate(stack, size, 1, kAlwaysClearMemory)); 178 } 179 180 void lsan_free(void *p) { 181 Deallocate(p); 182 } 183 184 void *lsan_realloc(void *p, uptr size, const StackTrace &stack) { 185 return SetErrnoOnNull(Reallocate(stack, p, size, 1)); 186 } 187 188 void *lsan_reallocarray(void *ptr, uptr nmemb, uptr size, 189 const StackTrace &stack) { 190 if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) { 191 errno = errno_ENOMEM; 192 if (AllocatorMayReturnNull()) 193 return nullptr; 194 ReportReallocArrayOverflow(nmemb, size, &stack); 195 } 196 return lsan_realloc(ptr, nmemb * size, stack); 197 } 198 199 void *lsan_calloc(uptr nmemb, uptr size, const StackTrace &stack) { 200 return SetErrnoOnNull(Calloc(nmemb, size, stack)); 201 } 202 203 void *lsan_valloc(uptr size, const StackTrace &stack) { 204 return SetErrnoOnNull( 205 Allocate(stack, size, GetPageSizeCached(), kAlwaysClearMemory)); 206 } 207 208 void *lsan_pvalloc(uptr size, const StackTrace &stack) { 209 uptr PageSize = GetPageSizeCached(); 210 if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) { 211 errno = errno_ENOMEM; 212 if (AllocatorMayReturnNull()) 213 return nullptr; 214 ReportPvallocOverflow(size, &stack); 215 } 216 // pvalloc(0) should allocate one page. 217 size = size ? RoundUpTo(size, PageSize) : PageSize; 218 return SetErrnoOnNull(Allocate(stack, size, PageSize, kAlwaysClearMemory)); 219 } 220 221 uptr lsan_mz_size(const void *p) { 222 return GetMallocUsableSize(p); 223 } 224 225 ///// Interface to the common LSan module. ///// 226 227 void LockAllocator() { 228 allocator.ForceLock(); 229 } 230 231 void UnlockAllocator() { 232 allocator.ForceUnlock(); 233 } 234 235 void GetAllocatorGlobalRange(uptr *begin, uptr *end) { 236 *begin = (uptr)&allocator; 237 *end = *begin + sizeof(allocator); 238 } 239 240 uptr PointsIntoChunk(void* p) { 241 uptr addr = reinterpret_cast<uptr>(p); 242 uptr chunk = reinterpret_cast<uptr>(allocator.GetBlockBeginFastLocked(p)); 243 if (!chunk) return 0; 244 // LargeMmapAllocator considers pointers to the meta-region of a chunk to be 245 // valid, but we don't want that. 246 if (addr < chunk) return 0; 247 ChunkMetadata *m = Metadata(reinterpret_cast<void *>(chunk)); 248 CHECK(m); 249 if (!m->allocated) 250 return 0; 251 if (addr < chunk + m->requested_size) 252 return chunk; 253 if (IsSpecialCaseOfOperatorNew0(chunk, m->requested_size, addr)) 254 return chunk; 255 return 0; 256 } 257 258 uptr GetUserBegin(uptr chunk) { 259 return chunk; 260 } 261 262 LsanMetadata::LsanMetadata(uptr chunk) { 263 metadata_ = Metadata(reinterpret_cast<void *>(chunk)); 264 CHECK(metadata_); 265 } 266 267 bool LsanMetadata::allocated() const { 268 return reinterpret_cast<ChunkMetadata *>(metadata_)->allocated; 269 } 270 271 ChunkTag LsanMetadata::tag() const { 272 return reinterpret_cast<ChunkMetadata *>(metadata_)->tag; 273 } 274 275 void LsanMetadata::set_tag(ChunkTag value) { 276 reinterpret_cast<ChunkMetadata *>(metadata_)->tag = value; 277 } 278 279 uptr LsanMetadata::requested_size() const { 280 return reinterpret_cast<ChunkMetadata *>(metadata_)->requested_size; 281 } 282 283 u32 LsanMetadata::stack_trace_id() const { 284 return reinterpret_cast<ChunkMetadata *>(metadata_)->stack_trace_id; 285 } 286 287 void ForEachChunk(ForEachChunkCallback callback, void *arg) { 288 allocator.ForEachChunk(callback, arg); 289 } 290 291 IgnoreObjectResult IgnoreObjectLocked(const void *p) { 292 void *chunk = allocator.GetBlockBegin(p); 293 if (!chunk || p < chunk) return kIgnoreObjectInvalid; 294 ChunkMetadata *m = Metadata(chunk); 295 CHECK(m); 296 if (m->allocated && (uptr)p < (uptr)chunk + m->requested_size) { 297 if (m->tag == kIgnored) 298 return kIgnoreObjectAlreadyIgnored; 299 m->tag = kIgnored; 300 return kIgnoreObjectSuccess; 301 } else { 302 return kIgnoreObjectInvalid; 303 } 304 } 305 } // namespace __lsan 306 307 using namespace __lsan; 308 309 extern "C" { 310 SANITIZER_INTERFACE_ATTRIBUTE 311 uptr __sanitizer_get_current_allocated_bytes() { 312 uptr stats[AllocatorStatCount]; 313 allocator.GetStats(stats); 314 return stats[AllocatorStatAllocated]; 315 } 316 317 SANITIZER_INTERFACE_ATTRIBUTE 318 uptr __sanitizer_get_heap_size() { 319 uptr stats[AllocatorStatCount]; 320 allocator.GetStats(stats); 321 return stats[AllocatorStatMapped]; 322 } 323 324 SANITIZER_INTERFACE_ATTRIBUTE 325 uptr __sanitizer_get_free_bytes() { return 0; } 326 327 SANITIZER_INTERFACE_ATTRIBUTE 328 uptr __sanitizer_get_unmapped_bytes() { return 0; } 329 330 SANITIZER_INTERFACE_ATTRIBUTE 331 uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; } 332 333 SANITIZER_INTERFACE_ATTRIBUTE 334 int __sanitizer_get_ownership(const void *p) { return Metadata(p) != nullptr; } 335 336 SANITIZER_INTERFACE_ATTRIBUTE 337 uptr __sanitizer_get_allocated_size(const void *p) { 338 return GetMallocUsableSize(p); 339 } 340 341 #if !SANITIZER_SUPPORTS_WEAK_HOOKS 342 // Provide default (no-op) implementation of malloc hooks. 343 SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE 344 void __sanitizer_malloc_hook(void *ptr, uptr size) { 345 (void)ptr; 346 (void)size; 347 } 348 SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE 349 void __sanitizer_free_hook(void *ptr) { 350 (void)ptr; 351 } 352 #endif 353 } // extern "C" 354