1 //===-- dfsan_allocator.cpp -------------------------- --------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file is a part of DataflowSanitizer. 10 // 11 // DataflowSanitizer allocator. 12 //===----------------------------------------------------------------------===// 13 14 #include "dfsan_allocator.h" 15 16 #include "dfsan.h" 17 #include "dfsan_flags.h" 18 #include "dfsan_thread.h" 19 #include "sanitizer_common/sanitizer_allocator.h" 20 #include "sanitizer_common/sanitizer_allocator_checks.h" 21 #include "sanitizer_common/sanitizer_allocator_interface.h" 22 #include "sanitizer_common/sanitizer_allocator_report.h" 23 #include "sanitizer_common/sanitizer_errno.h" 24 25 namespace __dfsan { 26 27 struct Metadata { 28 uptr requested_size; 29 }; 30 31 struct DFsanMapUnmapCallback { 32 void OnMap(uptr p, uptr size) const { dfsan_set_label(0, (void *)p, size); } 33 void OnMapSecondary(uptr p, uptr size, uptr user_begin, 34 uptr user_size) const { 35 OnMap(p, size); 36 } 37 void OnUnmap(uptr p, uptr size) const { dfsan_set_label(0, (void *)p, size); } 38 }; 39 40 // Note: to ensure that the allocator is compatible with the application memory 41 // layout (especially with high-entropy ASLR), kSpaceBeg and kSpaceSize must be 42 // duplicated as MappingDesc::ALLOCATOR in dfsan_platform.h. 43 #if defined(__aarch64__) 44 const uptr kAllocatorSpace = 0xE00000000000ULL; 45 #else 46 const uptr kAllocatorSpace = 0x700000000000ULL; 47 #endif 48 const uptr kMaxAllowedMallocSize = 1ULL << 40; 49 50 struct AP64 { // Allocator64 parameters. Deliberately using a short name. 51 static const uptr kSpaceBeg = kAllocatorSpace; 52 static const uptr kSpaceSize = 0x40000000000; // 4T. 53 static const uptr kMetadataSize = sizeof(Metadata); 54 typedef DefaultSizeClassMap SizeClassMap; 55 typedef DFsanMapUnmapCallback MapUnmapCallback; 56 static const uptr kFlags = 0; 57 using AddressSpaceView = LocalAddressSpaceView; 58 }; 59 60 typedef SizeClassAllocator64<AP64> PrimaryAllocator; 61 62 typedef CombinedAllocator<PrimaryAllocator> Allocator; 63 typedef Allocator::AllocatorCache AllocatorCache; 64 65 static Allocator allocator; 66 static AllocatorCache fallback_allocator_cache; 67 static StaticSpinMutex fallback_mutex; 68 69 static uptr max_malloc_size; 70 71 void dfsan_allocator_init() { 72 SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null); 73 allocator.Init(common_flags()->allocator_release_to_os_interval_ms); 74 if (common_flags()->max_allocation_size_mb) 75 max_malloc_size = Min(common_flags()->max_allocation_size_mb << 20, 76 kMaxAllowedMallocSize); 77 else 78 max_malloc_size = kMaxAllowedMallocSize; 79 } 80 81 AllocatorCache *GetAllocatorCache(DFsanThreadLocalMallocStorage *ms) { 82 CHECK(ms); 83 CHECK_LE(sizeof(AllocatorCache), sizeof(ms->allocator_cache)); 84 return reinterpret_cast<AllocatorCache *>(ms->allocator_cache); 85 } 86 87 void DFsanThreadLocalMallocStorage::CommitBack() { 88 allocator.SwallowCache(GetAllocatorCache(this)); 89 } 90 91 static void *DFsanAllocate(uptr size, uptr alignment, bool zeroise) { 92 if (size > max_malloc_size) { 93 if (AllocatorMayReturnNull()) { 94 Report("WARNING: DataflowSanitizer failed to allocate 0x%zx bytes\n", 95 size); 96 return nullptr; 97 } 98 BufferedStackTrace stack; 99 ReportAllocationSizeTooBig(size, max_malloc_size, &stack); 100 } 101 if (UNLIKELY(IsRssLimitExceeded())) { 102 if (AllocatorMayReturnNull()) 103 return nullptr; 104 BufferedStackTrace stack; 105 ReportRssLimitExceeded(&stack); 106 } 107 DFsanThread *t = GetCurrentThread(); 108 void *allocated; 109 if (t) { 110 AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage()); 111 allocated = allocator.Allocate(cache, size, alignment); 112 } else { 113 SpinMutexLock l(&fallback_mutex); 114 AllocatorCache *cache = &fallback_allocator_cache; 115 allocated = allocator.Allocate(cache, size, alignment); 116 } 117 if (UNLIKELY(!allocated)) { 118 SetAllocatorOutOfMemory(); 119 if (AllocatorMayReturnNull()) 120 return nullptr; 121 BufferedStackTrace stack; 122 ReportOutOfMemory(size, &stack); 123 } 124 Metadata *meta = 125 reinterpret_cast<Metadata *>(allocator.GetMetaData(allocated)); 126 meta->requested_size = size; 127 if (zeroise) { 128 internal_memset(allocated, 0, size); 129 dfsan_set_label(0, allocated, size); 130 } else if (flags().zero_in_malloc) { 131 dfsan_set_label(0, allocated, size); 132 } 133 return allocated; 134 } 135 136 void dfsan_deallocate(void *p) { 137 CHECK(p); 138 Metadata *meta = reinterpret_cast<Metadata *>(allocator.GetMetaData(p)); 139 uptr size = meta->requested_size; 140 meta->requested_size = 0; 141 if (flags().zero_in_free) 142 dfsan_set_label(0, p, size); 143 DFsanThread *t = GetCurrentThread(); 144 if (t) { 145 AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage()); 146 allocator.Deallocate(cache, p); 147 } else { 148 SpinMutexLock l(&fallback_mutex); 149 AllocatorCache *cache = &fallback_allocator_cache; 150 allocator.Deallocate(cache, p); 151 } 152 } 153 154 void *DFsanReallocate(void *old_p, uptr new_size, uptr alignment) { 155 Metadata *meta = reinterpret_cast<Metadata *>(allocator.GetMetaData(old_p)); 156 uptr old_size = meta->requested_size; 157 uptr actually_allocated_size = allocator.GetActuallyAllocatedSize(old_p); 158 if (new_size <= actually_allocated_size) { 159 // We are not reallocating here. 160 meta->requested_size = new_size; 161 if (new_size > old_size && flags().zero_in_malloc) 162 dfsan_set_label(0, (char *)old_p + old_size, new_size - old_size); 163 return old_p; 164 } 165 uptr memcpy_size = Min(new_size, old_size); 166 void *new_p = DFsanAllocate(new_size, alignment, false /*zeroise*/); 167 if (new_p) { 168 dfsan_copy_memory(new_p, old_p, memcpy_size); 169 dfsan_deallocate(old_p); 170 } 171 return new_p; 172 } 173 174 void *DFsanCalloc(uptr nmemb, uptr size) { 175 if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) { 176 if (AllocatorMayReturnNull()) 177 return nullptr; 178 BufferedStackTrace stack; 179 ReportCallocOverflow(nmemb, size, &stack); 180 } 181 return DFsanAllocate(nmemb * size, sizeof(u64), true /*zeroise*/); 182 } 183 184 static const void *AllocationBegin(const void *p) { 185 if (!p) 186 return nullptr; 187 void *beg = allocator.GetBlockBegin(p); 188 if (!beg) 189 return nullptr; 190 Metadata *b = (Metadata *)allocator.GetMetaData(beg); 191 if (!b) 192 return nullptr; 193 if (b->requested_size == 0) 194 return nullptr; 195 return (const void *)beg; 196 } 197 198 static uptr AllocationSize(const void *p) { 199 if (!p) 200 return 0; 201 const void *beg = allocator.GetBlockBegin(p); 202 if (beg != p) 203 return 0; 204 Metadata *b = (Metadata *)allocator.GetMetaData(p); 205 return b->requested_size; 206 } 207 208 static uptr AllocationSizeFast(const void *p) { 209 return reinterpret_cast<Metadata *>(allocator.GetMetaData(p))->requested_size; 210 } 211 212 void *dfsan_malloc(uptr size) { 213 return SetErrnoOnNull(DFsanAllocate(size, sizeof(u64), false /*zeroise*/)); 214 } 215 216 void *dfsan_calloc(uptr nmemb, uptr size) { 217 return SetErrnoOnNull(DFsanCalloc(nmemb, size)); 218 } 219 220 void *dfsan_realloc(void *ptr, uptr size) { 221 if (!ptr) 222 return SetErrnoOnNull(DFsanAllocate(size, sizeof(u64), false /*zeroise*/)); 223 if (size == 0) { 224 dfsan_deallocate(ptr); 225 return nullptr; 226 } 227 return SetErrnoOnNull(DFsanReallocate(ptr, size, sizeof(u64))); 228 } 229 230 void *dfsan_reallocarray(void *ptr, uptr nmemb, uptr size) { 231 if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) { 232 errno = errno_ENOMEM; 233 if (AllocatorMayReturnNull()) 234 return nullptr; 235 BufferedStackTrace stack; 236 ReportReallocArrayOverflow(nmemb, size, &stack); 237 } 238 return dfsan_realloc(ptr, nmemb * size); 239 } 240 241 void *dfsan_valloc(uptr size) { 242 return SetErrnoOnNull( 243 DFsanAllocate(size, GetPageSizeCached(), false /*zeroise*/)); 244 } 245 246 void *dfsan_pvalloc(uptr size) { 247 uptr PageSize = GetPageSizeCached(); 248 if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) { 249 errno = errno_ENOMEM; 250 if (AllocatorMayReturnNull()) 251 return nullptr; 252 BufferedStackTrace stack; 253 ReportPvallocOverflow(size, &stack); 254 } 255 // pvalloc(0) should allocate one page. 256 size = size ? RoundUpTo(size, PageSize) : PageSize; 257 return SetErrnoOnNull(DFsanAllocate(size, PageSize, false /*zeroise*/)); 258 } 259 260 void *dfsan_aligned_alloc(uptr alignment, uptr size) { 261 if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) { 262 errno = errno_EINVAL; 263 if (AllocatorMayReturnNull()) 264 return nullptr; 265 BufferedStackTrace stack; 266 ReportInvalidAlignedAllocAlignment(size, alignment, &stack); 267 } 268 return SetErrnoOnNull(DFsanAllocate(size, alignment, false /*zeroise*/)); 269 } 270 271 void *dfsan_memalign(uptr alignment, uptr size) { 272 if (UNLIKELY(!IsPowerOfTwo(alignment))) { 273 errno = errno_EINVAL; 274 if (AllocatorMayReturnNull()) 275 return nullptr; 276 BufferedStackTrace stack; 277 ReportInvalidAllocationAlignment(alignment, &stack); 278 } 279 return SetErrnoOnNull(DFsanAllocate(size, alignment, false /*zeroise*/)); 280 } 281 282 int dfsan_posix_memalign(void **memptr, uptr alignment, uptr size) { 283 if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) { 284 if (AllocatorMayReturnNull()) 285 return errno_EINVAL; 286 BufferedStackTrace stack; 287 ReportInvalidPosixMemalignAlignment(alignment, &stack); 288 } 289 void *ptr = DFsanAllocate(size, alignment, false /*zeroise*/); 290 if (UNLIKELY(!ptr)) 291 // OOM error is already taken care of by DFsanAllocate. 292 return errno_ENOMEM; 293 CHECK(IsAligned((uptr)ptr, alignment)); 294 *memptr = ptr; 295 return 0; 296 } 297 298 } // namespace __dfsan 299 300 using namespace __dfsan; 301 302 uptr __sanitizer_get_current_allocated_bytes() { 303 uptr stats[AllocatorStatCount]; 304 allocator.GetStats(stats); 305 return stats[AllocatorStatAllocated]; 306 } 307 308 uptr __sanitizer_get_heap_size() { 309 uptr stats[AllocatorStatCount]; 310 allocator.GetStats(stats); 311 return stats[AllocatorStatMapped]; 312 } 313 314 uptr __sanitizer_get_free_bytes() { return 1; } 315 316 uptr __sanitizer_get_unmapped_bytes() { return 1; } 317 318 uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; } 319 320 int __sanitizer_get_ownership(const void *p) { return AllocationSize(p) != 0; } 321 322 const void *__sanitizer_get_allocated_begin(const void *p) { 323 return AllocationBegin(p); 324 } 325 326 uptr __sanitizer_get_allocated_size(const void *p) { return AllocationSize(p); } 327 328 uptr __sanitizer_get_allocated_size_fast(const void *p) { 329 DCHECK_EQ(p, __sanitizer_get_allocated_begin(p)); 330 uptr ret = AllocationSizeFast(p); 331 DCHECK_EQ(ret, __sanitizer_get_allocated_size(p)); 332 return ret; 333 } 334