1 //===-- hwasan_allocator.cpp ------------------------ ---------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file is a part of HWAddressSanitizer. 10 // 11 // HWAddressSanitizer allocator. 12 //===----------------------------------------------------------------------===// 13 14 #include "sanitizer_common/sanitizer_atomic.h" 15 #include "sanitizer_common/sanitizer_errno.h" 16 #include "sanitizer_common/sanitizer_stackdepot.h" 17 #include "hwasan.h" 18 #include "hwasan_allocator.h" 19 #include "hwasan_checks.h" 20 #include "hwasan_mapping.h" 21 #include "hwasan_malloc_bisect.h" 22 #include "hwasan_thread.h" 23 #include "hwasan_report.h" 24 25 #if HWASAN_WITH_INTERCEPTORS 26 DEFINE_REAL(void *, realloc, void *ptr, uptr size) 27 DEFINE_REAL(void, free, void *ptr) 28 #endif 29 30 namespace __hwasan { 31 32 static Allocator allocator; 33 static AllocatorCache fallback_allocator_cache; 34 static SpinMutex fallback_mutex; 35 static atomic_uint8_t hwasan_allocator_tagging_enabled; 36 37 static const tag_t kFallbackAllocTag = 0xBB; 38 static const tag_t kFallbackFreeTag = 0xBC; 39 40 enum RightAlignMode { 41 kRightAlignNever, 42 kRightAlignSometimes, 43 kRightAlignAlways 44 }; 45 46 // Initialized in HwasanAllocatorInit, an never changed. 47 static ALIGNED(16) u8 tail_magic[kShadowAlignment - 1]; 48 49 bool HwasanChunkView::IsAllocated() const { 50 return metadata_ && metadata_->alloc_context_id && metadata_->requested_size; 51 } 52 53 // Aligns the 'addr' right to the granule boundary. 54 static uptr AlignRight(uptr addr, uptr requested_size) { 55 uptr tail_size = requested_size % kShadowAlignment; 56 if (!tail_size) return addr; 57 return addr + kShadowAlignment - tail_size; 58 } 59 60 uptr HwasanChunkView::Beg() const { 61 if (metadata_ && metadata_->right_aligned) 62 return AlignRight(block_, metadata_->requested_size); 63 return block_; 64 } 65 uptr HwasanChunkView::End() const { 66 return Beg() + UsedSize(); 67 } 68 uptr HwasanChunkView::UsedSize() const { 69 return metadata_->requested_size; 70 } 71 u32 HwasanChunkView::GetAllocStackId() const { 72 return metadata_->alloc_context_id; 73 } 74 75 uptr HwasanChunkView::ActualSize() const { 76 return allocator.GetActuallyAllocatedSize(reinterpret_cast<void *>(block_)); 77 } 78 79 bool HwasanChunkView::FromSmallHeap() const { 80 return allocator.FromPrimary(reinterpret_cast<void *>(block_)); 81 } 82 83 void GetAllocatorStats(AllocatorStatCounters s) { 84 allocator.GetStats(s); 85 } 86 87 void HwasanAllocatorInit() { 88 atomic_store_relaxed(&hwasan_allocator_tagging_enabled, 89 !flags()->disable_allocator_tagging); 90 SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null); 91 allocator.Init(common_flags()->allocator_release_to_os_interval_ms); 92 for (uptr i = 0; i < sizeof(tail_magic); i++) 93 tail_magic[i] = GetCurrentThread()->GenerateRandomTag(); 94 } 95 96 void AllocatorSwallowThreadLocalCache(AllocatorCache *cache) { 97 allocator.SwallowCache(cache); 98 } 99 100 static uptr TaggedSize(uptr size) { 101 if (!size) size = 1; 102 uptr new_size = RoundUpTo(size, kShadowAlignment); 103 CHECK_GE(new_size, size); 104 return new_size; 105 } 106 107 static void *HwasanAllocate(StackTrace *stack, uptr orig_size, uptr alignment, 108 bool zeroise) { 109 if (orig_size > kMaxAllowedMallocSize) { 110 if (AllocatorMayReturnNull()) { 111 Report("WARNING: HWAddressSanitizer failed to allocate 0x%zx bytes\n", 112 orig_size); 113 return nullptr; 114 } 115 ReportAllocationSizeTooBig(orig_size, kMaxAllowedMallocSize, stack); 116 } 117 118 alignment = Max(alignment, kShadowAlignment); 119 uptr size = TaggedSize(orig_size); 120 Thread *t = GetCurrentThread(); 121 void *allocated; 122 if (t) { 123 allocated = allocator.Allocate(t->allocator_cache(), size, alignment); 124 } else { 125 SpinMutexLock l(&fallback_mutex); 126 AllocatorCache *cache = &fallback_allocator_cache; 127 allocated = allocator.Allocate(cache, size, alignment); 128 } 129 if (UNLIKELY(!allocated)) { 130 SetAllocatorOutOfMemory(); 131 if (AllocatorMayReturnNull()) 132 return nullptr; 133 ReportOutOfMemory(size, stack); 134 } 135 Metadata *meta = 136 reinterpret_cast<Metadata *>(allocator.GetMetaData(allocated)); 137 meta->requested_size = static_cast<u32>(orig_size); 138 meta->alloc_context_id = StackDepotPut(*stack); 139 meta->right_aligned = false; 140 if (zeroise) { 141 internal_memset(allocated, 0, size); 142 } else if (flags()->max_malloc_fill_size > 0) { 143 uptr fill_size = Min(size, (uptr)flags()->max_malloc_fill_size); 144 internal_memset(allocated, flags()->malloc_fill_byte, fill_size); 145 } 146 if (size != orig_size) { 147 internal_memcpy(reinterpret_cast<u8 *>(allocated) + orig_size, tail_magic, 148 size - orig_size - 1); 149 } 150 151 void *user_ptr = allocated; 152 // Tagging can only be skipped when both tag_in_malloc and tag_in_free are 153 // false. When tag_in_malloc = false and tag_in_free = true malloc needs to 154 // retag to 0. 155 if ((flags()->tag_in_malloc || flags()->tag_in_free) && 156 atomic_load_relaxed(&hwasan_allocator_tagging_enabled)) { 157 if (flags()->tag_in_malloc && malloc_bisect(stack, orig_size)) { 158 tag_t tag = t ? t->GenerateRandomTag() : kFallbackAllocTag; 159 uptr tag_size = orig_size ? orig_size : 1; 160 uptr full_granule_size = RoundDownTo(tag_size, kShadowAlignment); 161 user_ptr = 162 (void *)TagMemoryAligned((uptr)user_ptr, full_granule_size, tag); 163 if (full_granule_size != tag_size) { 164 u8 *short_granule = 165 reinterpret_cast<u8 *>(allocated) + full_granule_size; 166 TagMemoryAligned((uptr)short_granule, kShadowAlignment, 167 tag_size % kShadowAlignment); 168 short_granule[kShadowAlignment - 1] = tag; 169 } 170 } else { 171 user_ptr = (void *)TagMemoryAligned((uptr)user_ptr, size, 0); 172 } 173 } 174 175 HWASAN_MALLOC_HOOK(user_ptr, size); 176 return user_ptr; 177 } 178 179 static bool PointerAndMemoryTagsMatch(void *tagged_ptr) { 180 CHECK(tagged_ptr); 181 uptr tagged_uptr = reinterpret_cast<uptr>(tagged_ptr); 182 tag_t mem_tag = *reinterpret_cast<tag_t *>( 183 MemToShadow(reinterpret_cast<uptr>(UntagPtr(tagged_ptr)))); 184 return PossiblyShortTagMatches(mem_tag, tagged_uptr, 1); 185 } 186 187 static void HwasanDeallocate(StackTrace *stack, void *tagged_ptr) { 188 CHECK(tagged_ptr); 189 HWASAN_FREE_HOOK(tagged_ptr); 190 191 if (!PointerAndMemoryTagsMatch(tagged_ptr)) 192 ReportInvalidFree(stack, reinterpret_cast<uptr>(tagged_ptr)); 193 194 void *untagged_ptr = UntagPtr(tagged_ptr); 195 void *aligned_ptr = reinterpret_cast<void *>( 196 RoundDownTo(reinterpret_cast<uptr>(untagged_ptr), kShadowAlignment)); 197 Metadata *meta = 198 reinterpret_cast<Metadata *>(allocator.GetMetaData(aligned_ptr)); 199 uptr orig_size = meta->requested_size; 200 u32 free_context_id = StackDepotPut(*stack); 201 u32 alloc_context_id = meta->alloc_context_id; 202 203 // Check tail magic. 204 uptr tagged_size = TaggedSize(orig_size); 205 if (flags()->free_checks_tail_magic && orig_size && 206 tagged_size != orig_size) { 207 uptr tail_size = tagged_size - orig_size - 1; 208 CHECK_LT(tail_size, kShadowAlignment); 209 void *tail_beg = reinterpret_cast<void *>( 210 reinterpret_cast<uptr>(aligned_ptr) + orig_size); 211 if (tail_size && internal_memcmp(tail_beg, tail_magic, tail_size)) 212 ReportTailOverwritten(stack, reinterpret_cast<uptr>(tagged_ptr), 213 orig_size, tail_magic); 214 } 215 216 meta->requested_size = 0; 217 meta->alloc_context_id = 0; 218 // This memory will not be reused by anyone else, so we are free to keep it 219 // poisoned. 220 Thread *t = GetCurrentThread(); 221 if (flags()->max_free_fill_size > 0) { 222 uptr fill_size = 223 Min(TaggedSize(orig_size), (uptr)flags()->max_free_fill_size); 224 internal_memset(aligned_ptr, flags()->free_fill_byte, fill_size); 225 } 226 if (flags()->tag_in_free && malloc_bisect(stack, 0) && 227 atomic_load_relaxed(&hwasan_allocator_tagging_enabled)) 228 TagMemoryAligned(reinterpret_cast<uptr>(aligned_ptr), TaggedSize(orig_size), 229 t ? t->GenerateRandomTag() : kFallbackFreeTag); 230 if (t) { 231 allocator.Deallocate(t->allocator_cache(), aligned_ptr); 232 if (auto *ha = t->heap_allocations()) 233 ha->push({reinterpret_cast<uptr>(tagged_ptr), alloc_context_id, 234 free_context_id, static_cast<u32>(orig_size)}); 235 } else { 236 SpinMutexLock l(&fallback_mutex); 237 AllocatorCache *cache = &fallback_allocator_cache; 238 allocator.Deallocate(cache, aligned_ptr); 239 } 240 } 241 242 static void *HwasanReallocate(StackTrace *stack, void *tagged_ptr_old, 243 uptr new_size, uptr alignment) { 244 if (!PointerAndMemoryTagsMatch(tagged_ptr_old)) 245 ReportInvalidFree(stack, reinterpret_cast<uptr>(tagged_ptr_old)); 246 247 void *tagged_ptr_new = 248 HwasanAllocate(stack, new_size, alignment, false /*zeroise*/); 249 if (tagged_ptr_old && tagged_ptr_new) { 250 void *untagged_ptr_old = UntagPtr(tagged_ptr_old); 251 Metadata *meta = 252 reinterpret_cast<Metadata *>(allocator.GetMetaData(untagged_ptr_old)); 253 internal_memcpy(UntagPtr(tagged_ptr_new), untagged_ptr_old, 254 Min(new_size, static_cast<uptr>(meta->requested_size))); 255 HwasanDeallocate(stack, tagged_ptr_old); 256 } 257 return tagged_ptr_new; 258 } 259 260 static void *HwasanCalloc(StackTrace *stack, uptr nmemb, uptr size) { 261 if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) { 262 if (AllocatorMayReturnNull()) 263 return nullptr; 264 ReportCallocOverflow(nmemb, size, stack); 265 } 266 return HwasanAllocate(stack, nmemb * size, sizeof(u64), true); 267 } 268 269 HwasanChunkView FindHeapChunkByAddress(uptr address) { 270 void *block = allocator.GetBlockBegin(reinterpret_cast<void*>(address)); 271 if (!block) 272 return HwasanChunkView(); 273 Metadata *metadata = 274 reinterpret_cast<Metadata*>(allocator.GetMetaData(block)); 275 return HwasanChunkView(reinterpret_cast<uptr>(block), metadata); 276 } 277 278 static uptr AllocationSize(const void *tagged_ptr) { 279 const void *untagged_ptr = UntagPtr(tagged_ptr); 280 if (!untagged_ptr) return 0; 281 const void *beg = allocator.GetBlockBegin(untagged_ptr); 282 Metadata *b = (Metadata *)allocator.GetMetaData(untagged_ptr); 283 if (b->right_aligned) { 284 if (beg != reinterpret_cast<void *>(RoundDownTo( 285 reinterpret_cast<uptr>(untagged_ptr), kShadowAlignment))) 286 return 0; 287 } else { 288 if (beg != untagged_ptr) return 0; 289 } 290 return b->requested_size; 291 } 292 293 void *hwasan_malloc(uptr size, StackTrace *stack) { 294 return SetErrnoOnNull(HwasanAllocate(stack, size, sizeof(u64), false)); 295 } 296 297 void *hwasan_calloc(uptr nmemb, uptr size, StackTrace *stack) { 298 return SetErrnoOnNull(HwasanCalloc(stack, nmemb, size)); 299 } 300 301 void *hwasan_realloc(void *ptr, uptr size, StackTrace *stack) { 302 if (!ptr) 303 return SetErrnoOnNull(HwasanAllocate(stack, size, sizeof(u64), false)); 304 305 #if HWASAN_WITH_INTERCEPTORS 306 // A tag of 0 means that this is a system allocator allocation, so we must use 307 // the system allocator to realloc it. 308 if (!flags()->disable_allocator_tagging && GetTagFromPointer((uptr)ptr) == 0) 309 return REAL(realloc)(ptr, size); 310 #endif 311 312 if (size == 0) { 313 HwasanDeallocate(stack, ptr); 314 return nullptr; 315 } 316 return SetErrnoOnNull(HwasanReallocate(stack, ptr, size, sizeof(u64))); 317 } 318 319 void *hwasan_reallocarray(void *ptr, uptr nmemb, uptr size, StackTrace *stack) { 320 if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) { 321 errno = errno_ENOMEM; 322 if (AllocatorMayReturnNull()) 323 return nullptr; 324 ReportReallocArrayOverflow(nmemb, size, stack); 325 } 326 return hwasan_realloc(ptr, nmemb * size, stack); 327 } 328 329 void *hwasan_valloc(uptr size, StackTrace *stack) { 330 return SetErrnoOnNull( 331 HwasanAllocate(stack, size, GetPageSizeCached(), false)); 332 } 333 334 void *hwasan_pvalloc(uptr size, StackTrace *stack) { 335 uptr PageSize = GetPageSizeCached(); 336 if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) { 337 errno = errno_ENOMEM; 338 if (AllocatorMayReturnNull()) 339 return nullptr; 340 ReportPvallocOverflow(size, stack); 341 } 342 // pvalloc(0) should allocate one page. 343 size = size ? RoundUpTo(size, PageSize) : PageSize; 344 return SetErrnoOnNull(HwasanAllocate(stack, size, PageSize, false)); 345 } 346 347 void *hwasan_aligned_alloc(uptr alignment, uptr size, StackTrace *stack) { 348 if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) { 349 errno = errno_EINVAL; 350 if (AllocatorMayReturnNull()) 351 return nullptr; 352 ReportInvalidAlignedAllocAlignment(size, alignment, stack); 353 } 354 return SetErrnoOnNull(HwasanAllocate(stack, size, alignment, false)); 355 } 356 357 void *hwasan_memalign(uptr alignment, uptr size, StackTrace *stack) { 358 if (UNLIKELY(!IsPowerOfTwo(alignment))) { 359 errno = errno_EINVAL; 360 if (AllocatorMayReturnNull()) 361 return nullptr; 362 ReportInvalidAllocationAlignment(alignment, stack); 363 } 364 return SetErrnoOnNull(HwasanAllocate(stack, size, alignment, false)); 365 } 366 367 int hwasan_posix_memalign(void **memptr, uptr alignment, uptr size, 368 StackTrace *stack) { 369 if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) { 370 if (AllocatorMayReturnNull()) 371 return errno_EINVAL; 372 ReportInvalidPosixMemalignAlignment(alignment, stack); 373 } 374 void *ptr = HwasanAllocate(stack, size, alignment, false); 375 if (UNLIKELY(!ptr)) 376 // OOM error is already taken care of by HwasanAllocate. 377 return errno_ENOMEM; 378 CHECK(IsAligned((uptr)ptr, alignment)); 379 *memptr = ptr; 380 return 0; 381 } 382 383 void hwasan_free(void *ptr, StackTrace *stack) { 384 #if HWASAN_WITH_INTERCEPTORS 385 // A tag of 0 means that this is a system allocator allocation, so we must use 386 // the system allocator to free it. 387 if (!flags()->disable_allocator_tagging && GetTagFromPointer((uptr)ptr) == 0) 388 return REAL(free)(ptr); 389 #endif 390 391 return HwasanDeallocate(stack, ptr); 392 } 393 394 } // namespace __hwasan 395 396 using namespace __hwasan; 397 398 void __hwasan_enable_allocator_tagging() { 399 atomic_store_relaxed(&hwasan_allocator_tagging_enabled, 1); 400 } 401 402 void __hwasan_disable_allocator_tagging() { 403 #if HWASAN_WITH_INTERCEPTORS 404 // Allocator tagging must be enabled for the system allocator fallback to work 405 // correctly. This means that we can't disable it at runtime if it was enabled 406 // at startup since that might result in our deallocations going to the system 407 // allocator. If tagging was disabled at startup we avoid this problem by 408 // disabling the fallback altogether. 409 CHECK(flags()->disable_allocator_tagging); 410 #endif 411 412 atomic_store_relaxed(&hwasan_allocator_tagging_enabled, 0); 413 } 414 415 uptr __sanitizer_get_current_allocated_bytes() { 416 uptr stats[AllocatorStatCount]; 417 allocator.GetStats(stats); 418 return stats[AllocatorStatAllocated]; 419 } 420 421 uptr __sanitizer_get_heap_size() { 422 uptr stats[AllocatorStatCount]; 423 allocator.GetStats(stats); 424 return stats[AllocatorStatMapped]; 425 } 426 427 uptr __sanitizer_get_free_bytes() { return 1; } 428 429 uptr __sanitizer_get_unmapped_bytes() { return 1; } 430 431 uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; } 432 433 int __sanitizer_get_ownership(const void *p) { return AllocationSize(p) != 0; } 434 435 uptr __sanitizer_get_allocated_size(const void *p) { return AllocationSize(p); } 436