1 //===-- msan_allocator.cpp -------------------------- ---------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file is a part of MemorySanitizer. 10 // 11 // MemorySanitizer allocator. 12 //===----------------------------------------------------------------------===// 13 14 #include "sanitizer_common/sanitizer_allocator.h" 15 #include "sanitizer_common/sanitizer_allocator_checks.h" 16 #include "sanitizer_common/sanitizer_allocator_interface.h" 17 #include "sanitizer_common/sanitizer_allocator_report.h" 18 #include "sanitizer_common/sanitizer_errno.h" 19 #include "msan.h" 20 #include "msan_allocator.h" 21 #include "msan_origin.h" 22 #include "msan_thread.h" 23 #include "msan_poisoning.h" 24 25 namespace __msan { 26 27 struct Metadata { 28 uptr requested_size; 29 }; 30 31 struct MsanMapUnmapCallback { 32 void OnMap(uptr p, uptr size) const {} 33 void OnUnmap(uptr p, uptr size) const { 34 __msan_unpoison((void *)p, size); 35 36 // We are about to unmap a chunk of user memory. 37 // Mark the corresponding shadow memory as not needed. 38 uptr shadow_p = MEM_TO_SHADOW(p); 39 ReleaseMemoryPagesToOS(shadow_p, shadow_p + size); 40 if (__msan_get_track_origins()) { 41 uptr origin_p = MEM_TO_ORIGIN(p); 42 ReleaseMemoryPagesToOS(origin_p, origin_p + size); 43 } 44 } 45 }; 46 47 #if defined(__mips64) 48 static const uptr kMaxAllowedMallocSize = 2UL << 30; 49 50 struct AP32 { 51 static const uptr kSpaceBeg = 0; 52 static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE; 53 static const uptr kMetadataSize = sizeof(Metadata); 54 typedef __sanitizer::CompactSizeClassMap SizeClassMap; 55 static const uptr kRegionSizeLog = 20; 56 using AddressSpaceView = LocalAddressSpaceView; 57 typedef MsanMapUnmapCallback MapUnmapCallback; 58 static const uptr kFlags = 0; 59 }; 60 typedef SizeClassAllocator32<AP32> PrimaryAllocator; 61 #elif defined(__x86_64__) 62 #if SANITIZER_NETBSD || \ 63 (SANITIZER_LINUX && !defined(MSAN_LINUX_X86_64_OLD_MAPPING)) 64 static const uptr kAllocatorSpace = 0x700000000000ULL; 65 #else 66 static const uptr kAllocatorSpace = 0x600000000000ULL; 67 #endif 68 static const uptr kMaxAllowedMallocSize = 8UL << 30; 69 70 struct AP64 { // Allocator64 parameters. Deliberately using a short name. 71 static const uptr kSpaceBeg = kAllocatorSpace; 72 static const uptr kSpaceSize = 0x40000000000; // 4T. 73 static const uptr kMetadataSize = sizeof(Metadata); 74 typedef DefaultSizeClassMap SizeClassMap; 75 typedef MsanMapUnmapCallback MapUnmapCallback; 76 static const uptr kFlags = 0; 77 using AddressSpaceView = LocalAddressSpaceView; 78 }; 79 80 typedef SizeClassAllocator64<AP64> PrimaryAllocator; 81 82 #elif defined(__powerpc64__) 83 static const uptr kMaxAllowedMallocSize = 2UL << 30; // 2G 84 85 struct AP64 { // Allocator64 parameters. Deliberately using a short name. 86 static const uptr kSpaceBeg = 0x300000000000; 87 static const uptr kSpaceSize = 0x020000000000; // 2T. 88 static const uptr kMetadataSize = sizeof(Metadata); 89 typedef DefaultSizeClassMap SizeClassMap; 90 typedef MsanMapUnmapCallback MapUnmapCallback; 91 static const uptr kFlags = 0; 92 using AddressSpaceView = LocalAddressSpaceView; 93 }; 94 95 typedef SizeClassAllocator64<AP64> PrimaryAllocator; 96 #elif defined(__s390x__) 97 static const uptr kMaxAllowedMallocSize = 2UL << 30; // 2G 98 99 struct AP64 { // Allocator64 parameters. Deliberately using a short name. 100 static const uptr kSpaceBeg = 0x440000000000; 101 static const uptr kSpaceSize = 0x020000000000; // 2T. 102 static const uptr kMetadataSize = sizeof(Metadata); 103 typedef DefaultSizeClassMap SizeClassMap; 104 typedef MsanMapUnmapCallback MapUnmapCallback; 105 static const uptr kFlags = 0; 106 using AddressSpaceView = LocalAddressSpaceView; 107 }; 108 109 typedef SizeClassAllocator64<AP64> PrimaryAllocator; 110 #elif defined(__aarch64__) 111 static const uptr kMaxAllowedMallocSize = 2UL << 30; // 2G 112 113 struct AP32 { 114 static const uptr kSpaceBeg = 0; 115 static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE; 116 static const uptr kMetadataSize = sizeof(Metadata); 117 typedef __sanitizer::CompactSizeClassMap SizeClassMap; 118 static const uptr kRegionSizeLog = 20; 119 using AddressSpaceView = LocalAddressSpaceView; 120 typedef MsanMapUnmapCallback MapUnmapCallback; 121 static const uptr kFlags = 0; 122 }; 123 typedef SizeClassAllocator32<AP32> PrimaryAllocator; 124 #endif 125 typedef CombinedAllocator<PrimaryAllocator> Allocator; 126 typedef Allocator::AllocatorCache AllocatorCache; 127 128 static Allocator allocator; 129 static AllocatorCache fallback_allocator_cache; 130 static StaticSpinMutex fallback_mutex; 131 132 static uptr max_malloc_size; 133 134 void MsanAllocatorInit() { 135 SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null); 136 allocator.Init(common_flags()->allocator_release_to_os_interval_ms); 137 if (common_flags()->max_allocation_size_mb) 138 max_malloc_size = Min(common_flags()->max_allocation_size_mb << 20, 139 kMaxAllowedMallocSize); 140 else 141 max_malloc_size = kMaxAllowedMallocSize; 142 } 143 144 AllocatorCache *GetAllocatorCache(MsanThreadLocalMallocStorage *ms) { 145 CHECK(ms); 146 CHECK_LE(sizeof(AllocatorCache), sizeof(ms->allocator_cache)); 147 return reinterpret_cast<AllocatorCache *>(ms->allocator_cache); 148 } 149 150 void MsanThreadLocalMallocStorage::CommitBack() { 151 allocator.SwallowCache(GetAllocatorCache(this)); 152 } 153 154 static void *MsanAllocate(StackTrace *stack, uptr size, uptr alignment, 155 bool zeroise) { 156 if (size > max_malloc_size) { 157 if (AllocatorMayReturnNull()) { 158 Report("WARNING: MemorySanitizer failed to allocate 0x%zx bytes\n", size); 159 return nullptr; 160 } 161 ReportAllocationSizeTooBig(size, max_malloc_size, stack); 162 } 163 if (UNLIKELY(IsRssLimitExceeded())) { 164 if (AllocatorMayReturnNull()) 165 return nullptr; 166 ReportRssLimitExceeded(stack); 167 } 168 MsanThread *t = GetCurrentThread(); 169 void *allocated; 170 if (t) { 171 AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage()); 172 allocated = allocator.Allocate(cache, size, alignment); 173 } else { 174 SpinMutexLock l(&fallback_mutex); 175 AllocatorCache *cache = &fallback_allocator_cache; 176 allocated = allocator.Allocate(cache, size, alignment); 177 } 178 if (UNLIKELY(!allocated)) { 179 SetAllocatorOutOfMemory(); 180 if (AllocatorMayReturnNull()) 181 return nullptr; 182 ReportOutOfMemory(size, stack); 183 } 184 Metadata *meta = 185 reinterpret_cast<Metadata *>(allocator.GetMetaData(allocated)); 186 meta->requested_size = size; 187 if (zeroise) { 188 __msan_clear_and_unpoison(allocated, size); 189 } else if (flags()->poison_in_malloc) { 190 __msan_poison(allocated, size); 191 if (__msan_get_track_origins()) { 192 stack->tag = StackTrace::TAG_ALLOC; 193 Origin o = Origin::CreateHeapOrigin(stack); 194 __msan_set_origin(allocated, size, o.raw_id()); 195 } 196 } 197 UnpoisonParam(2); 198 RunMallocHooks(allocated, size); 199 return allocated; 200 } 201 202 void MsanDeallocate(StackTrace *stack, void *p) { 203 CHECK(p); 204 UnpoisonParam(1); 205 RunFreeHooks(p); 206 207 Metadata *meta = reinterpret_cast<Metadata *>(allocator.GetMetaData(p)); 208 uptr size = meta->requested_size; 209 meta->requested_size = 0; 210 // This memory will not be reused by anyone else, so we are free to keep it 211 // poisoned. 212 if (flags()->poison_in_free) { 213 __msan_poison(p, size); 214 if (__msan_get_track_origins()) { 215 stack->tag = StackTrace::TAG_DEALLOC; 216 Origin o = Origin::CreateHeapOrigin(stack); 217 __msan_set_origin(p, size, o.raw_id()); 218 } 219 } 220 MsanThread *t = GetCurrentThread(); 221 if (t) { 222 AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage()); 223 allocator.Deallocate(cache, p); 224 } else { 225 SpinMutexLock l(&fallback_mutex); 226 AllocatorCache *cache = &fallback_allocator_cache; 227 allocator.Deallocate(cache, p); 228 } 229 } 230 231 static void *MsanReallocate(StackTrace *stack, void *old_p, uptr new_size, 232 uptr alignment) { 233 Metadata *meta = reinterpret_cast<Metadata*>(allocator.GetMetaData(old_p)); 234 uptr old_size = meta->requested_size; 235 uptr actually_allocated_size = allocator.GetActuallyAllocatedSize(old_p); 236 if (new_size <= actually_allocated_size) { 237 // We are not reallocating here. 238 meta->requested_size = new_size; 239 if (new_size > old_size) { 240 if (flags()->poison_in_malloc) { 241 stack->tag = StackTrace::TAG_ALLOC; 242 PoisonMemory((char *)old_p + old_size, new_size - old_size, stack); 243 } 244 } 245 return old_p; 246 } 247 uptr memcpy_size = Min(new_size, old_size); 248 void *new_p = MsanAllocate(stack, new_size, alignment, false /*zeroise*/); 249 if (new_p) { 250 CopyMemory(new_p, old_p, memcpy_size, stack); 251 MsanDeallocate(stack, old_p); 252 } 253 return new_p; 254 } 255 256 static void *MsanCalloc(StackTrace *stack, uptr nmemb, uptr size) { 257 if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) { 258 if (AllocatorMayReturnNull()) 259 return nullptr; 260 ReportCallocOverflow(nmemb, size, stack); 261 } 262 return MsanAllocate(stack, nmemb * size, sizeof(u64), true); 263 } 264 265 static uptr AllocationSize(const void *p) { 266 if (!p) return 0; 267 const void *beg = allocator.GetBlockBegin(p); 268 if (beg != p) return 0; 269 Metadata *b = (Metadata *)allocator.GetMetaData(p); 270 return b->requested_size; 271 } 272 273 void *msan_malloc(uptr size, StackTrace *stack) { 274 return SetErrnoOnNull(MsanAllocate(stack, size, sizeof(u64), false)); 275 } 276 277 void *msan_calloc(uptr nmemb, uptr size, StackTrace *stack) { 278 return SetErrnoOnNull(MsanCalloc(stack, nmemb, size)); 279 } 280 281 void *msan_realloc(void *ptr, uptr size, StackTrace *stack) { 282 if (!ptr) 283 return SetErrnoOnNull(MsanAllocate(stack, size, sizeof(u64), false)); 284 if (size == 0) { 285 MsanDeallocate(stack, ptr); 286 return nullptr; 287 } 288 return SetErrnoOnNull(MsanReallocate(stack, ptr, size, sizeof(u64))); 289 } 290 291 void *msan_reallocarray(void *ptr, uptr nmemb, uptr size, StackTrace *stack) { 292 if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) { 293 errno = errno_ENOMEM; 294 if (AllocatorMayReturnNull()) 295 return nullptr; 296 ReportReallocArrayOverflow(nmemb, size, stack); 297 } 298 return msan_realloc(ptr, nmemb * size, stack); 299 } 300 301 void *msan_valloc(uptr size, StackTrace *stack) { 302 return SetErrnoOnNull(MsanAllocate(stack, size, GetPageSizeCached(), false)); 303 } 304 305 void *msan_pvalloc(uptr size, StackTrace *stack) { 306 uptr PageSize = GetPageSizeCached(); 307 if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) { 308 errno = errno_ENOMEM; 309 if (AllocatorMayReturnNull()) 310 return nullptr; 311 ReportPvallocOverflow(size, stack); 312 } 313 // pvalloc(0) should allocate one page. 314 size = size ? RoundUpTo(size, PageSize) : PageSize; 315 return SetErrnoOnNull(MsanAllocate(stack, size, PageSize, false)); 316 } 317 318 void *msan_aligned_alloc(uptr alignment, uptr size, StackTrace *stack) { 319 if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) { 320 errno = errno_EINVAL; 321 if (AllocatorMayReturnNull()) 322 return nullptr; 323 ReportInvalidAlignedAllocAlignment(size, alignment, stack); 324 } 325 return SetErrnoOnNull(MsanAllocate(stack, size, alignment, false)); 326 } 327 328 void *msan_memalign(uptr alignment, uptr size, StackTrace *stack) { 329 if (UNLIKELY(!IsPowerOfTwo(alignment))) { 330 errno = errno_EINVAL; 331 if (AllocatorMayReturnNull()) 332 return nullptr; 333 ReportInvalidAllocationAlignment(alignment, stack); 334 } 335 return SetErrnoOnNull(MsanAllocate(stack, size, alignment, false)); 336 } 337 338 int msan_posix_memalign(void **memptr, uptr alignment, uptr size, 339 StackTrace *stack) { 340 if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) { 341 if (AllocatorMayReturnNull()) 342 return errno_EINVAL; 343 ReportInvalidPosixMemalignAlignment(alignment, stack); 344 } 345 void *ptr = MsanAllocate(stack, size, alignment, false); 346 if (UNLIKELY(!ptr)) 347 // OOM error is already taken care of by MsanAllocate. 348 return errno_ENOMEM; 349 CHECK(IsAligned((uptr)ptr, alignment)); 350 *memptr = ptr; 351 return 0; 352 } 353 354 } // namespace __msan 355 356 using namespace __msan; 357 358 uptr __sanitizer_get_current_allocated_bytes() { 359 uptr stats[AllocatorStatCount]; 360 allocator.GetStats(stats); 361 return stats[AllocatorStatAllocated]; 362 } 363 364 uptr __sanitizer_get_heap_size() { 365 uptr stats[AllocatorStatCount]; 366 allocator.GetStats(stats); 367 return stats[AllocatorStatMapped]; 368 } 369 370 uptr __sanitizer_get_free_bytes() { return 1; } 371 372 uptr __sanitizer_get_unmapped_bytes() { return 1; } 373 374 uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; } 375 376 int __sanitizer_get_ownership(const void *p) { return AllocationSize(p) != 0; } 377 378 uptr __sanitizer_get_allocated_size(const void *p) { return AllocationSize(p); } 379