1 //===-- msan_allocator.cpp -------------------------- ---------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file is a part of MemorySanitizer. 10 // 11 // MemorySanitizer allocator. 12 //===----------------------------------------------------------------------===// 13 14 #include "sanitizer_common/sanitizer_allocator.h" 15 #include "sanitizer_common/sanitizer_allocator_checks.h" 16 #include "sanitizer_common/sanitizer_allocator_interface.h" 17 #include "sanitizer_common/sanitizer_allocator_report.h" 18 #include "sanitizer_common/sanitizer_errno.h" 19 #include "msan.h" 20 #include "msan_allocator.h" 21 #include "msan_origin.h" 22 #include "msan_thread.h" 23 #include "msan_poisoning.h" 24 25 namespace __msan { 26 27 struct Metadata { 28 uptr requested_size; 29 }; 30 31 struct MsanMapUnmapCallback { 32 void OnMap(uptr p, uptr size) const {} 33 void OnUnmap(uptr p, uptr size) const { 34 __msan_unpoison((void *)p, size); 35 36 // We are about to unmap a chunk of user memory. 37 // Mark the corresponding shadow memory as not needed. 38 uptr shadow_p = MEM_TO_SHADOW(p); 39 ReleaseMemoryPagesToOS(shadow_p, shadow_p + size); 40 if (__msan_get_track_origins()) { 41 uptr origin_p = MEM_TO_ORIGIN(p); 42 ReleaseMemoryPagesToOS(origin_p, origin_p + size); 43 } 44 } 45 }; 46 47 #if defined(__mips64) 48 static const uptr kMaxAllowedMallocSize = 2UL << 30; 49 50 struct AP32 { 51 static const uptr kSpaceBeg = 0; 52 static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE; 53 static const uptr kMetadataSize = sizeof(Metadata); 54 typedef __sanitizer::CompactSizeClassMap SizeClassMap; 55 static const uptr kRegionSizeLog = 20; 56 using AddressSpaceView = LocalAddressSpaceView; 57 typedef MsanMapUnmapCallback MapUnmapCallback; 58 static const uptr kFlags = 0; 59 }; 60 typedef SizeClassAllocator32<AP32> PrimaryAllocator; 61 #elif defined(__x86_64__) 62 #if SANITIZER_NETBSD || SANITIZER_LINUX 63 static const uptr kAllocatorSpace = 0x700000000000ULL; 64 #else 65 static const uptr kAllocatorSpace = 0x600000000000ULL; 66 #endif 67 static const uptr kMaxAllowedMallocSize = 8UL << 30; 68 69 struct AP64 { // Allocator64 parameters. Deliberately using a short name. 70 static const uptr kSpaceBeg = kAllocatorSpace; 71 static const uptr kSpaceSize = 0x40000000000; // 4T. 72 static const uptr kMetadataSize = sizeof(Metadata); 73 typedef DefaultSizeClassMap SizeClassMap; 74 typedef MsanMapUnmapCallback MapUnmapCallback; 75 static const uptr kFlags = 0; 76 using AddressSpaceView = LocalAddressSpaceView; 77 }; 78 79 typedef SizeClassAllocator64<AP64> PrimaryAllocator; 80 81 #elif defined(__powerpc64__) 82 static const uptr kMaxAllowedMallocSize = 2UL << 30; // 2G 83 84 struct AP64 { // Allocator64 parameters. Deliberately using a short name. 85 static const uptr kSpaceBeg = 0x300000000000; 86 static const uptr kSpaceSize = 0x020000000000; // 2T. 87 static const uptr kMetadataSize = sizeof(Metadata); 88 typedef DefaultSizeClassMap SizeClassMap; 89 typedef MsanMapUnmapCallback MapUnmapCallback; 90 static const uptr kFlags = 0; 91 using AddressSpaceView = LocalAddressSpaceView; 92 }; 93 94 typedef SizeClassAllocator64<AP64> PrimaryAllocator; 95 #elif defined(__s390x__) 96 static const uptr kMaxAllowedMallocSize = 2UL << 30; // 2G 97 98 struct AP64 { // Allocator64 parameters. Deliberately using a short name. 99 static const uptr kSpaceBeg = 0x440000000000; 100 static const uptr kSpaceSize = 0x020000000000; // 2T. 101 static const uptr kMetadataSize = sizeof(Metadata); 102 typedef DefaultSizeClassMap SizeClassMap; 103 typedef MsanMapUnmapCallback MapUnmapCallback; 104 static const uptr kFlags = 0; 105 using AddressSpaceView = LocalAddressSpaceView; 106 }; 107 108 typedef SizeClassAllocator64<AP64> PrimaryAllocator; 109 #elif defined(__aarch64__) 110 static const uptr kMaxAllowedMallocSize = 8UL << 30; 111 112 struct AP64 { 113 static const uptr kSpaceBeg = 0xE00000000000ULL; 114 static const uptr kSpaceSize = 0x40000000000; // 4T. 115 static const uptr kMetadataSize = sizeof(Metadata); 116 typedef DefaultSizeClassMap SizeClassMap; 117 typedef MsanMapUnmapCallback MapUnmapCallback; 118 static const uptr kFlags = 0; 119 using AddressSpaceView = LocalAddressSpaceView; 120 }; 121 typedef SizeClassAllocator64<AP64> PrimaryAllocator; 122 #endif 123 typedef CombinedAllocator<PrimaryAllocator> Allocator; 124 typedef Allocator::AllocatorCache AllocatorCache; 125 126 static Allocator allocator; 127 static AllocatorCache fallback_allocator_cache; 128 static StaticSpinMutex fallback_mutex; 129 130 static uptr max_malloc_size; 131 132 void MsanAllocatorInit() { 133 SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null); 134 allocator.Init(common_flags()->allocator_release_to_os_interval_ms); 135 if (common_flags()->max_allocation_size_mb) 136 max_malloc_size = Min(common_flags()->max_allocation_size_mb << 20, 137 kMaxAllowedMallocSize); 138 else 139 max_malloc_size = kMaxAllowedMallocSize; 140 } 141 142 AllocatorCache *GetAllocatorCache(MsanThreadLocalMallocStorage *ms) { 143 CHECK(ms); 144 CHECK_LE(sizeof(AllocatorCache), sizeof(ms->allocator_cache)); 145 return reinterpret_cast<AllocatorCache *>(ms->allocator_cache); 146 } 147 148 void MsanThreadLocalMallocStorage::CommitBack() { 149 allocator.SwallowCache(GetAllocatorCache(this)); 150 } 151 152 static void *MsanAllocate(StackTrace *stack, uptr size, uptr alignment, 153 bool zeroise) { 154 if (size > max_malloc_size) { 155 if (AllocatorMayReturnNull()) { 156 Report("WARNING: MemorySanitizer failed to allocate 0x%zx bytes\n", size); 157 return nullptr; 158 } 159 ReportAllocationSizeTooBig(size, max_malloc_size, stack); 160 } 161 if (UNLIKELY(IsRssLimitExceeded())) { 162 if (AllocatorMayReturnNull()) 163 return nullptr; 164 ReportRssLimitExceeded(stack); 165 } 166 MsanThread *t = GetCurrentThread(); 167 void *allocated; 168 if (t) { 169 AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage()); 170 allocated = allocator.Allocate(cache, size, alignment); 171 } else { 172 SpinMutexLock l(&fallback_mutex); 173 AllocatorCache *cache = &fallback_allocator_cache; 174 allocated = allocator.Allocate(cache, size, alignment); 175 } 176 if (UNLIKELY(!allocated)) { 177 SetAllocatorOutOfMemory(); 178 if (AllocatorMayReturnNull()) 179 return nullptr; 180 ReportOutOfMemory(size, stack); 181 } 182 Metadata *meta = 183 reinterpret_cast<Metadata *>(allocator.GetMetaData(allocated)); 184 meta->requested_size = size; 185 if (zeroise) { 186 __msan_clear_and_unpoison(allocated, size); 187 } else if (flags()->poison_in_malloc) { 188 __msan_poison(allocated, size); 189 if (__msan_get_track_origins()) { 190 stack->tag = StackTrace::TAG_ALLOC; 191 Origin o = Origin::CreateHeapOrigin(stack); 192 __msan_set_origin(allocated, size, o.raw_id()); 193 } 194 } 195 UnpoisonParam(2); 196 RunMallocHooks(allocated, size); 197 return allocated; 198 } 199 200 void MsanDeallocate(StackTrace *stack, void *p) { 201 CHECK(p); 202 UnpoisonParam(1); 203 RunFreeHooks(p); 204 205 Metadata *meta = reinterpret_cast<Metadata *>(allocator.GetMetaData(p)); 206 uptr size = meta->requested_size; 207 meta->requested_size = 0; 208 // This memory will not be reused by anyone else, so we are free to keep it 209 // poisoned. 210 if (flags()->poison_in_free) { 211 __msan_poison(p, size); 212 if (__msan_get_track_origins()) { 213 stack->tag = StackTrace::TAG_DEALLOC; 214 Origin o = Origin::CreateHeapOrigin(stack); 215 __msan_set_origin(p, size, o.raw_id()); 216 } 217 } 218 MsanThread *t = GetCurrentThread(); 219 if (t) { 220 AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage()); 221 allocator.Deallocate(cache, p); 222 } else { 223 SpinMutexLock l(&fallback_mutex); 224 AllocatorCache *cache = &fallback_allocator_cache; 225 allocator.Deallocate(cache, p); 226 } 227 } 228 229 static void *MsanReallocate(StackTrace *stack, void *old_p, uptr new_size, 230 uptr alignment) { 231 Metadata *meta = reinterpret_cast<Metadata*>(allocator.GetMetaData(old_p)); 232 uptr old_size = meta->requested_size; 233 uptr actually_allocated_size = allocator.GetActuallyAllocatedSize(old_p); 234 if (new_size <= actually_allocated_size) { 235 // We are not reallocating here. 236 meta->requested_size = new_size; 237 if (new_size > old_size) { 238 if (flags()->poison_in_malloc) { 239 stack->tag = StackTrace::TAG_ALLOC; 240 PoisonMemory((char *)old_p + old_size, new_size - old_size, stack); 241 } 242 } 243 return old_p; 244 } 245 uptr memcpy_size = Min(new_size, old_size); 246 void *new_p = MsanAllocate(stack, new_size, alignment, false /*zeroise*/); 247 if (new_p) { 248 CopyMemory(new_p, old_p, memcpy_size, stack); 249 MsanDeallocate(stack, old_p); 250 } 251 return new_p; 252 } 253 254 static void *MsanCalloc(StackTrace *stack, uptr nmemb, uptr size) { 255 if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) { 256 if (AllocatorMayReturnNull()) 257 return nullptr; 258 ReportCallocOverflow(nmemb, size, stack); 259 } 260 return MsanAllocate(stack, nmemb * size, sizeof(u64), true); 261 } 262 263 static uptr AllocationSize(const void *p) { 264 if (!p) return 0; 265 const void *beg = allocator.GetBlockBegin(p); 266 if (beg != p) return 0; 267 Metadata *b = (Metadata *)allocator.GetMetaData(p); 268 return b->requested_size; 269 } 270 271 void *msan_malloc(uptr size, StackTrace *stack) { 272 return SetErrnoOnNull(MsanAllocate(stack, size, sizeof(u64), false)); 273 } 274 275 void *msan_calloc(uptr nmemb, uptr size, StackTrace *stack) { 276 return SetErrnoOnNull(MsanCalloc(stack, nmemb, size)); 277 } 278 279 void *msan_realloc(void *ptr, uptr size, StackTrace *stack) { 280 if (!ptr) 281 return SetErrnoOnNull(MsanAllocate(stack, size, sizeof(u64), false)); 282 if (size == 0) { 283 MsanDeallocate(stack, ptr); 284 return nullptr; 285 } 286 return SetErrnoOnNull(MsanReallocate(stack, ptr, size, sizeof(u64))); 287 } 288 289 void *msan_reallocarray(void *ptr, uptr nmemb, uptr size, StackTrace *stack) { 290 if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) { 291 errno = errno_ENOMEM; 292 if (AllocatorMayReturnNull()) 293 return nullptr; 294 ReportReallocArrayOverflow(nmemb, size, stack); 295 } 296 return msan_realloc(ptr, nmemb * size, stack); 297 } 298 299 void *msan_valloc(uptr size, StackTrace *stack) { 300 return SetErrnoOnNull(MsanAllocate(stack, size, GetPageSizeCached(), false)); 301 } 302 303 void *msan_pvalloc(uptr size, StackTrace *stack) { 304 uptr PageSize = GetPageSizeCached(); 305 if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) { 306 errno = errno_ENOMEM; 307 if (AllocatorMayReturnNull()) 308 return nullptr; 309 ReportPvallocOverflow(size, stack); 310 } 311 // pvalloc(0) should allocate one page. 312 size = size ? RoundUpTo(size, PageSize) : PageSize; 313 return SetErrnoOnNull(MsanAllocate(stack, size, PageSize, false)); 314 } 315 316 void *msan_aligned_alloc(uptr alignment, uptr size, StackTrace *stack) { 317 if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) { 318 errno = errno_EINVAL; 319 if (AllocatorMayReturnNull()) 320 return nullptr; 321 ReportInvalidAlignedAllocAlignment(size, alignment, stack); 322 } 323 return SetErrnoOnNull(MsanAllocate(stack, size, alignment, false)); 324 } 325 326 void *msan_memalign(uptr alignment, uptr size, StackTrace *stack) { 327 if (UNLIKELY(!IsPowerOfTwo(alignment))) { 328 errno = errno_EINVAL; 329 if (AllocatorMayReturnNull()) 330 return nullptr; 331 ReportInvalidAllocationAlignment(alignment, stack); 332 } 333 return SetErrnoOnNull(MsanAllocate(stack, size, alignment, false)); 334 } 335 336 int msan_posix_memalign(void **memptr, uptr alignment, uptr size, 337 StackTrace *stack) { 338 if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) { 339 if (AllocatorMayReturnNull()) 340 return errno_EINVAL; 341 ReportInvalidPosixMemalignAlignment(alignment, stack); 342 } 343 void *ptr = MsanAllocate(stack, size, alignment, false); 344 if (UNLIKELY(!ptr)) 345 // OOM error is already taken care of by MsanAllocate. 346 return errno_ENOMEM; 347 CHECK(IsAligned((uptr)ptr, alignment)); 348 *memptr = ptr; 349 return 0; 350 } 351 352 } // namespace __msan 353 354 using namespace __msan; 355 356 uptr __sanitizer_get_current_allocated_bytes() { 357 uptr stats[AllocatorStatCount]; 358 allocator.GetStats(stats); 359 return stats[AllocatorStatAllocated]; 360 } 361 362 uptr __sanitizer_get_heap_size() { 363 uptr stats[AllocatorStatCount]; 364 allocator.GetStats(stats); 365 return stats[AllocatorStatMapped]; 366 } 367 368 uptr __sanitizer_get_free_bytes() { return 1; } 369 370 uptr __sanitizer_get_unmapped_bytes() { return 1; } 371 372 uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; } 373 374 int __sanitizer_get_ownership(const void *p) { return AllocationSize(p) != 0; } 375 376 uptr __sanitizer_get_allocated_size(const void *p) { return AllocationSize(p); } 377