1 //===-- dfsan_allocator.cpp -------------------------- --------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file is a part of DataflowSanitizer. 10 // 11 // DataflowSanitizer allocator. 12 //===----------------------------------------------------------------------===// 13 14 #include "dfsan_allocator.h" 15 16 #include "dfsan.h" 17 #include "dfsan_flags.h" 18 #include "dfsan_thread.h" 19 #include "sanitizer_common/sanitizer_allocator.h" 20 #include "sanitizer_common/sanitizer_allocator_checks.h" 21 #include "sanitizer_common/sanitizer_allocator_interface.h" 22 #include "sanitizer_common/sanitizer_allocator_report.h" 23 #include "sanitizer_common/sanitizer_errno.h" 24 25 namespace __dfsan { 26 27 struct Metadata { 28 uptr requested_size; 29 }; 30 31 struct DFsanMapUnmapCallback { 32 void OnMap(uptr p, uptr size) const { dfsan_set_label(0, (void *)p, size); } 33 void OnMapSecondary(uptr p, uptr size, uptr user_begin, 34 uptr user_size) const { 35 OnMap(p, size); 36 } 37 void OnUnmap(uptr p, uptr size) const { dfsan_set_label(0, (void *)p, size); } 38 }; 39 40 #if defined(__aarch64__) 41 const uptr kAllocatorSpace = 0xE00000000000ULL; 42 #else 43 const uptr kAllocatorSpace = 0x700000000000ULL; 44 #endif 45 const uptr kMaxAllowedMallocSize = 8UL << 30; 46 47 struct AP64 { // Allocator64 parameters. Deliberately using a short name. 48 static const uptr kSpaceBeg = kAllocatorSpace; 49 static const uptr kSpaceSize = 0x40000000000; // 4T. 50 static const uptr kMetadataSize = sizeof(Metadata); 51 typedef DefaultSizeClassMap SizeClassMap; 52 typedef DFsanMapUnmapCallback MapUnmapCallback; 53 static const uptr kFlags = 0; 54 using AddressSpaceView = LocalAddressSpaceView; 55 }; 56 57 typedef SizeClassAllocator64<AP64> PrimaryAllocator; 58 59 typedef CombinedAllocator<PrimaryAllocator> Allocator; 60 typedef Allocator::AllocatorCache AllocatorCache; 61 62 static Allocator allocator; 63 static AllocatorCache fallback_allocator_cache; 64 static StaticSpinMutex fallback_mutex; 65 66 static uptr max_malloc_size; 67 68 void dfsan_allocator_init() { 69 SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null); 70 allocator.Init(common_flags()->allocator_release_to_os_interval_ms); 71 if (common_flags()->max_allocation_size_mb) 72 max_malloc_size = Min(common_flags()->max_allocation_size_mb << 20, 73 kMaxAllowedMallocSize); 74 else 75 max_malloc_size = kMaxAllowedMallocSize; 76 } 77 78 AllocatorCache *GetAllocatorCache(DFsanThreadLocalMallocStorage *ms) { 79 CHECK(ms); 80 CHECK_LE(sizeof(AllocatorCache), sizeof(ms->allocator_cache)); 81 return reinterpret_cast<AllocatorCache *>(ms->allocator_cache); 82 } 83 84 void DFsanThreadLocalMallocStorage::CommitBack() { 85 allocator.SwallowCache(GetAllocatorCache(this)); 86 } 87 88 static void *DFsanAllocate(uptr size, uptr alignment, bool zeroise) { 89 if (size > max_malloc_size) { 90 if (AllocatorMayReturnNull()) { 91 Report("WARNING: DataflowSanitizer failed to allocate 0x%zx bytes\n", 92 size); 93 return nullptr; 94 } 95 BufferedStackTrace stack; 96 ReportAllocationSizeTooBig(size, max_malloc_size, &stack); 97 } 98 if (UNLIKELY(IsRssLimitExceeded())) { 99 if (AllocatorMayReturnNull()) 100 return nullptr; 101 BufferedStackTrace stack; 102 ReportRssLimitExceeded(&stack); 103 } 104 DFsanThread *t = GetCurrentThread(); 105 void *allocated; 106 if (t) { 107 AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage()); 108 allocated = allocator.Allocate(cache, size, alignment); 109 } else { 110 SpinMutexLock l(&fallback_mutex); 111 AllocatorCache *cache = &fallback_allocator_cache; 112 allocated = allocator.Allocate(cache, size, alignment); 113 } 114 if (UNLIKELY(!allocated)) { 115 SetAllocatorOutOfMemory(); 116 if (AllocatorMayReturnNull()) 117 return nullptr; 118 BufferedStackTrace stack; 119 ReportOutOfMemory(size, &stack); 120 } 121 Metadata *meta = 122 reinterpret_cast<Metadata *>(allocator.GetMetaData(allocated)); 123 meta->requested_size = size; 124 if (zeroise) { 125 internal_memset(allocated, 0, size); 126 dfsan_set_label(0, allocated, size); 127 } else if (flags().zero_in_malloc) { 128 dfsan_set_label(0, allocated, size); 129 } 130 return allocated; 131 } 132 133 void dfsan_deallocate(void *p) { 134 CHECK(p); 135 Metadata *meta = reinterpret_cast<Metadata *>(allocator.GetMetaData(p)); 136 uptr size = meta->requested_size; 137 meta->requested_size = 0; 138 if (flags().zero_in_free) 139 dfsan_set_label(0, p, size); 140 DFsanThread *t = GetCurrentThread(); 141 if (t) { 142 AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage()); 143 allocator.Deallocate(cache, p); 144 } else { 145 SpinMutexLock l(&fallback_mutex); 146 AllocatorCache *cache = &fallback_allocator_cache; 147 allocator.Deallocate(cache, p); 148 } 149 } 150 151 void *DFsanReallocate(void *old_p, uptr new_size, uptr alignment) { 152 Metadata *meta = reinterpret_cast<Metadata *>(allocator.GetMetaData(old_p)); 153 uptr old_size = meta->requested_size; 154 uptr actually_allocated_size = allocator.GetActuallyAllocatedSize(old_p); 155 if (new_size <= actually_allocated_size) { 156 // We are not reallocating here. 157 meta->requested_size = new_size; 158 if (new_size > old_size && flags().zero_in_malloc) 159 dfsan_set_label(0, (char *)old_p + old_size, new_size - old_size); 160 return old_p; 161 } 162 uptr memcpy_size = Min(new_size, old_size); 163 void *new_p = DFsanAllocate(new_size, alignment, false /*zeroise*/); 164 if (new_p) { 165 dfsan_copy_memory(new_p, old_p, memcpy_size); 166 dfsan_deallocate(old_p); 167 } 168 return new_p; 169 } 170 171 void *DFsanCalloc(uptr nmemb, uptr size) { 172 if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) { 173 if (AllocatorMayReturnNull()) 174 return nullptr; 175 BufferedStackTrace stack; 176 ReportCallocOverflow(nmemb, size, &stack); 177 } 178 return DFsanAllocate(nmemb * size, sizeof(u64), true /*zeroise*/); 179 } 180 181 static const void *AllocationBegin(const void *p) { 182 if (!p) 183 return nullptr; 184 void *beg = allocator.GetBlockBegin(p); 185 if (!beg) 186 return nullptr; 187 Metadata *b = (Metadata *)allocator.GetMetaData(beg); 188 if (!b) 189 return nullptr; 190 if (b->requested_size == 0) 191 return nullptr; 192 return (const void *)beg; 193 } 194 195 static uptr AllocationSize(const void *p) { 196 if (!p) 197 return 0; 198 const void *beg = allocator.GetBlockBegin(p); 199 if (beg != p) 200 return 0; 201 Metadata *b = (Metadata *)allocator.GetMetaData(p); 202 return b->requested_size; 203 } 204 205 static uptr AllocationSizeFast(const void *p) { 206 return reinterpret_cast<Metadata *>(allocator.GetMetaData(p))->requested_size; 207 } 208 209 void *dfsan_malloc(uptr size) { 210 return SetErrnoOnNull(DFsanAllocate(size, sizeof(u64), false /*zeroise*/)); 211 } 212 213 void *dfsan_calloc(uptr nmemb, uptr size) { 214 return SetErrnoOnNull(DFsanCalloc(nmemb, size)); 215 } 216 217 void *dfsan_realloc(void *ptr, uptr size) { 218 if (!ptr) 219 return SetErrnoOnNull(DFsanAllocate(size, sizeof(u64), false /*zeroise*/)); 220 if (size == 0) { 221 dfsan_deallocate(ptr); 222 return nullptr; 223 } 224 return SetErrnoOnNull(DFsanReallocate(ptr, size, sizeof(u64))); 225 } 226 227 void *dfsan_reallocarray(void *ptr, uptr nmemb, uptr size) { 228 if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) { 229 errno = errno_ENOMEM; 230 if (AllocatorMayReturnNull()) 231 return nullptr; 232 BufferedStackTrace stack; 233 ReportReallocArrayOverflow(nmemb, size, &stack); 234 } 235 return dfsan_realloc(ptr, nmemb * size); 236 } 237 238 void *dfsan_valloc(uptr size) { 239 return SetErrnoOnNull( 240 DFsanAllocate(size, GetPageSizeCached(), false /*zeroise*/)); 241 } 242 243 void *dfsan_pvalloc(uptr size) { 244 uptr PageSize = GetPageSizeCached(); 245 if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) { 246 errno = errno_ENOMEM; 247 if (AllocatorMayReturnNull()) 248 return nullptr; 249 BufferedStackTrace stack; 250 ReportPvallocOverflow(size, &stack); 251 } 252 // pvalloc(0) should allocate one page. 253 size = size ? RoundUpTo(size, PageSize) : PageSize; 254 return SetErrnoOnNull(DFsanAllocate(size, PageSize, false /*zeroise*/)); 255 } 256 257 void *dfsan_aligned_alloc(uptr alignment, uptr size) { 258 if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) { 259 errno = errno_EINVAL; 260 if (AllocatorMayReturnNull()) 261 return nullptr; 262 BufferedStackTrace stack; 263 ReportInvalidAlignedAllocAlignment(size, alignment, &stack); 264 } 265 return SetErrnoOnNull(DFsanAllocate(size, alignment, false /*zeroise*/)); 266 } 267 268 void *dfsan_memalign(uptr alignment, uptr size) { 269 if (UNLIKELY(!IsPowerOfTwo(alignment))) { 270 errno = errno_EINVAL; 271 if (AllocatorMayReturnNull()) 272 return nullptr; 273 BufferedStackTrace stack; 274 ReportInvalidAllocationAlignment(alignment, &stack); 275 } 276 return SetErrnoOnNull(DFsanAllocate(size, alignment, false /*zeroise*/)); 277 } 278 279 int dfsan_posix_memalign(void **memptr, uptr alignment, uptr size) { 280 if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) { 281 if (AllocatorMayReturnNull()) 282 return errno_EINVAL; 283 BufferedStackTrace stack; 284 ReportInvalidPosixMemalignAlignment(alignment, &stack); 285 } 286 void *ptr = DFsanAllocate(size, alignment, false /*zeroise*/); 287 if (UNLIKELY(!ptr)) 288 // OOM error is already taken care of by DFsanAllocate. 289 return errno_ENOMEM; 290 CHECK(IsAligned((uptr)ptr, alignment)); 291 *memptr = ptr; 292 return 0; 293 } 294 295 } // namespace __dfsan 296 297 using namespace __dfsan; 298 299 uptr __sanitizer_get_current_allocated_bytes() { 300 uptr stats[AllocatorStatCount]; 301 allocator.GetStats(stats); 302 return stats[AllocatorStatAllocated]; 303 } 304 305 uptr __sanitizer_get_heap_size() { 306 uptr stats[AllocatorStatCount]; 307 allocator.GetStats(stats); 308 return stats[AllocatorStatMapped]; 309 } 310 311 uptr __sanitizer_get_free_bytes() { return 1; } 312 313 uptr __sanitizer_get_unmapped_bytes() { return 1; } 314 315 uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; } 316 317 int __sanitizer_get_ownership(const void *p) { return AllocationSize(p) != 0; } 318 319 const void *__sanitizer_get_allocated_begin(const void *p) { 320 return AllocationBegin(p); 321 } 322 323 uptr __sanitizer_get_allocated_size(const void *p) { return AllocationSize(p); } 324 325 uptr __sanitizer_get_allocated_size_fast(const void *p) { 326 DCHECK_EQ(p, __sanitizer_get_allocated_begin(p)); 327 uptr ret = AllocationSizeFast(p); 328 DCHECK_EQ(ret, __sanitizer_get_allocated_size(p)); 329 return ret; 330 } 331