1 //===-- dfsan_allocator.cpp -------------------------- --------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file is a part of DataflowSanitizer. 10 // 11 // DataflowSanitizer allocator. 12 //===----------------------------------------------------------------------===// 13 14 #include "dfsan_allocator.h" 15 16 #include "dfsan.h" 17 #include "dfsan_flags.h" 18 #include "dfsan_thread.h" 19 #include "sanitizer_common/sanitizer_allocator.h" 20 #include "sanitizer_common/sanitizer_allocator_checks.h" 21 #include "sanitizer_common/sanitizer_allocator_interface.h" 22 #include "sanitizer_common/sanitizer_allocator_report.h" 23 #include "sanitizer_common/sanitizer_errno.h" 24 25 namespace __dfsan { 26 27 struct Metadata { 28 uptr requested_size; 29 }; 30 31 struct DFsanMapUnmapCallback { 32 void OnMap(uptr p, uptr size) const { dfsan_set_label(0, (void *)p, size); } 33 void OnUnmap(uptr p, uptr size) const { dfsan_set_label(0, (void *)p, size); } 34 }; 35 36 #if defined(__aarch64__) 37 const uptr kAllocatorSpace = 0xE00000000000ULL; 38 #else 39 const uptr kAllocatorSpace = 0x700000000000ULL; 40 #endif 41 const uptr kMaxAllowedMallocSize = 8UL << 30; 42 43 struct AP64 { // Allocator64 parameters. Deliberately using a short name. 44 static const uptr kSpaceBeg = kAllocatorSpace; 45 static const uptr kSpaceSize = 0x40000000000; // 4T. 46 static const uptr kMetadataSize = sizeof(Metadata); 47 typedef DefaultSizeClassMap SizeClassMap; 48 typedef DFsanMapUnmapCallback MapUnmapCallback; 49 static const uptr kFlags = 0; 50 using AddressSpaceView = LocalAddressSpaceView; 51 }; 52 53 typedef SizeClassAllocator64<AP64> PrimaryAllocator; 54 55 typedef CombinedAllocator<PrimaryAllocator> Allocator; 56 typedef Allocator::AllocatorCache AllocatorCache; 57 58 static Allocator allocator; 59 static AllocatorCache fallback_allocator_cache; 60 static StaticSpinMutex fallback_mutex; 61 62 static uptr max_malloc_size; 63 64 void dfsan_allocator_init() { 65 SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null); 66 allocator.Init(common_flags()->allocator_release_to_os_interval_ms); 67 if (common_flags()->max_allocation_size_mb) 68 max_malloc_size = Min(common_flags()->max_allocation_size_mb << 20, 69 kMaxAllowedMallocSize); 70 else 71 max_malloc_size = kMaxAllowedMallocSize; 72 } 73 74 AllocatorCache *GetAllocatorCache(DFsanThreadLocalMallocStorage *ms) { 75 CHECK(ms); 76 CHECK_LE(sizeof(AllocatorCache), sizeof(ms->allocator_cache)); 77 return reinterpret_cast<AllocatorCache *>(ms->allocator_cache); 78 } 79 80 void DFsanThreadLocalMallocStorage::CommitBack() { 81 allocator.SwallowCache(GetAllocatorCache(this)); 82 } 83 84 static void *DFsanAllocate(uptr size, uptr alignment, bool zeroise) { 85 if (size > max_malloc_size) { 86 if (AllocatorMayReturnNull()) { 87 Report("WARNING: DataflowSanitizer failed to allocate 0x%zx bytes\n", 88 size); 89 return nullptr; 90 } 91 BufferedStackTrace stack; 92 ReportAllocationSizeTooBig(size, max_malloc_size, &stack); 93 } 94 if (UNLIKELY(IsRssLimitExceeded())) { 95 if (AllocatorMayReturnNull()) 96 return nullptr; 97 BufferedStackTrace stack; 98 ReportRssLimitExceeded(&stack); 99 } 100 DFsanThread *t = GetCurrentThread(); 101 void *allocated; 102 if (t) { 103 AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage()); 104 allocated = allocator.Allocate(cache, size, alignment); 105 } else { 106 SpinMutexLock l(&fallback_mutex); 107 AllocatorCache *cache = &fallback_allocator_cache; 108 allocated = allocator.Allocate(cache, size, alignment); 109 } 110 if (UNLIKELY(!allocated)) { 111 SetAllocatorOutOfMemory(); 112 if (AllocatorMayReturnNull()) 113 return nullptr; 114 BufferedStackTrace stack; 115 ReportOutOfMemory(size, &stack); 116 } 117 Metadata *meta = 118 reinterpret_cast<Metadata *>(allocator.GetMetaData(allocated)); 119 meta->requested_size = size; 120 if (zeroise) { 121 internal_memset(allocated, 0, size); 122 dfsan_set_label(0, allocated, size); 123 } else if (flags().zero_in_malloc) { 124 dfsan_set_label(0, allocated, size); 125 } 126 return allocated; 127 } 128 129 void dfsan_deallocate(void *p) { 130 CHECK(p); 131 Metadata *meta = reinterpret_cast<Metadata *>(allocator.GetMetaData(p)); 132 uptr size = meta->requested_size; 133 meta->requested_size = 0; 134 if (flags().zero_in_free) 135 dfsan_set_label(0, p, size); 136 DFsanThread *t = GetCurrentThread(); 137 if (t) { 138 AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage()); 139 allocator.Deallocate(cache, p); 140 } else { 141 SpinMutexLock l(&fallback_mutex); 142 AllocatorCache *cache = &fallback_allocator_cache; 143 allocator.Deallocate(cache, p); 144 } 145 } 146 147 void *DFsanReallocate(void *old_p, uptr new_size, uptr alignment) { 148 Metadata *meta = reinterpret_cast<Metadata *>(allocator.GetMetaData(old_p)); 149 uptr old_size = meta->requested_size; 150 uptr actually_allocated_size = allocator.GetActuallyAllocatedSize(old_p); 151 if (new_size <= actually_allocated_size) { 152 // We are not reallocating here. 153 meta->requested_size = new_size; 154 if (new_size > old_size && flags().zero_in_malloc) 155 dfsan_set_label(0, (char *)old_p + old_size, new_size - old_size); 156 return old_p; 157 } 158 uptr memcpy_size = Min(new_size, old_size); 159 void *new_p = DFsanAllocate(new_size, alignment, false /*zeroise*/); 160 if (new_p) { 161 dfsan_copy_memory(new_p, old_p, memcpy_size); 162 dfsan_deallocate(old_p); 163 } 164 return new_p; 165 } 166 167 void *DFsanCalloc(uptr nmemb, uptr size) { 168 if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) { 169 if (AllocatorMayReturnNull()) 170 return nullptr; 171 BufferedStackTrace stack; 172 ReportCallocOverflow(nmemb, size, &stack); 173 } 174 return DFsanAllocate(nmemb * size, sizeof(u64), true /*zeroise*/); 175 } 176 177 static uptr AllocationSize(const void *p) { 178 if (!p) 179 return 0; 180 const void *beg = allocator.GetBlockBegin(p); 181 if (beg != p) 182 return 0; 183 Metadata *b = (Metadata *)allocator.GetMetaData(p); 184 return b->requested_size; 185 } 186 187 void *dfsan_malloc(uptr size) { 188 return SetErrnoOnNull(DFsanAllocate(size, sizeof(u64), false /*zeroise*/)); 189 } 190 191 void *dfsan_calloc(uptr nmemb, uptr size) { 192 return SetErrnoOnNull(DFsanCalloc(nmemb, size)); 193 } 194 195 void *dfsan_realloc(void *ptr, uptr size) { 196 if (!ptr) 197 return SetErrnoOnNull(DFsanAllocate(size, sizeof(u64), false /*zeroise*/)); 198 if (size == 0) { 199 dfsan_deallocate(ptr); 200 return nullptr; 201 } 202 return SetErrnoOnNull(DFsanReallocate(ptr, size, sizeof(u64))); 203 } 204 205 void *dfsan_reallocarray(void *ptr, uptr nmemb, uptr size) { 206 if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) { 207 errno = errno_ENOMEM; 208 if (AllocatorMayReturnNull()) 209 return nullptr; 210 BufferedStackTrace stack; 211 ReportReallocArrayOverflow(nmemb, size, &stack); 212 } 213 return dfsan_realloc(ptr, nmemb * size); 214 } 215 216 void *dfsan_valloc(uptr size) { 217 return SetErrnoOnNull( 218 DFsanAllocate(size, GetPageSizeCached(), false /*zeroise*/)); 219 } 220 221 void *dfsan_pvalloc(uptr size) { 222 uptr PageSize = GetPageSizeCached(); 223 if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) { 224 errno = errno_ENOMEM; 225 if (AllocatorMayReturnNull()) 226 return nullptr; 227 BufferedStackTrace stack; 228 ReportPvallocOverflow(size, &stack); 229 } 230 // pvalloc(0) should allocate one page. 231 size = size ? RoundUpTo(size, PageSize) : PageSize; 232 return SetErrnoOnNull(DFsanAllocate(size, PageSize, false /*zeroise*/)); 233 } 234 235 void *dfsan_aligned_alloc(uptr alignment, uptr size) { 236 if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) { 237 errno = errno_EINVAL; 238 if (AllocatorMayReturnNull()) 239 return nullptr; 240 BufferedStackTrace stack; 241 ReportInvalidAlignedAllocAlignment(size, alignment, &stack); 242 } 243 return SetErrnoOnNull(DFsanAllocate(size, alignment, false /*zeroise*/)); 244 } 245 246 void *dfsan_memalign(uptr alignment, uptr size) { 247 if (UNLIKELY(!IsPowerOfTwo(alignment))) { 248 errno = errno_EINVAL; 249 if (AllocatorMayReturnNull()) 250 return nullptr; 251 BufferedStackTrace stack; 252 ReportInvalidAllocationAlignment(alignment, &stack); 253 } 254 return SetErrnoOnNull(DFsanAllocate(size, alignment, false /*zeroise*/)); 255 } 256 257 int dfsan_posix_memalign(void **memptr, uptr alignment, uptr size) { 258 if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) { 259 if (AllocatorMayReturnNull()) 260 return errno_EINVAL; 261 BufferedStackTrace stack; 262 ReportInvalidPosixMemalignAlignment(alignment, &stack); 263 } 264 void *ptr = DFsanAllocate(size, alignment, false /*zeroise*/); 265 if (UNLIKELY(!ptr)) 266 // OOM error is already taken care of by DFsanAllocate. 267 return errno_ENOMEM; 268 CHECK(IsAligned((uptr)ptr, alignment)); 269 *memptr = ptr; 270 return 0; 271 } 272 273 } // namespace __dfsan 274 275 using namespace __dfsan; 276 277 uptr __sanitizer_get_current_allocated_bytes() { 278 uptr stats[AllocatorStatCount]; 279 allocator.GetStats(stats); 280 return stats[AllocatorStatAllocated]; 281 } 282 283 uptr __sanitizer_get_heap_size() { 284 uptr stats[AllocatorStatCount]; 285 allocator.GetStats(stats); 286 return stats[AllocatorStatMapped]; 287 } 288 289 uptr __sanitizer_get_free_bytes() { return 1; } 290 291 uptr __sanitizer_get_unmapped_bytes() { return 1; } 292 293 uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; } 294 295 int __sanitizer_get_ownership(const void *p) { return AllocationSize(p) != 0; } 296 297 uptr __sanitizer_get_allocated_size(const void *p) { return AllocationSize(p); } 298