1//===-- wrappers_c.inc ------------------------------------------*- C++ -*-===// 2// 3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4// See https://llvm.org/LICENSE.txt for license information. 5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6// 7//===----------------------------------------------------------------------===// 8 9#ifndef SCUDO_PREFIX 10#error "Define SCUDO_PREFIX prior to including this file!" 11#endif 12 13// malloc-type functions have to be aligned to std::max_align_t. This is 14// distinct from (1U << SCUDO_MIN_ALIGNMENT_LOG), since C++ new-type functions 15// do not have to abide by the same requirement. 16#ifndef SCUDO_MALLOC_ALIGNMENT 17#define SCUDO_MALLOC_ALIGNMENT FIRST_32_SECOND_64(8U, 16U) 18#endif 19 20static void reportAllocation(void *ptr, size_t size) { 21 if (SCUDO_ENABLE_HOOKS) 22 if (__scudo_allocate_hook && ptr) 23 __scudo_allocate_hook(ptr, size); 24} 25static void reportDeallocation(void *ptr) { 26 if (SCUDO_ENABLE_HOOKS) 27 if (__scudo_deallocate_hook) 28 __scudo_deallocate_hook(ptr); 29} 30static void reportReallocAllocation(void *old_ptr, void *new_ptr, size_t size) { 31 DCHECK_NE(new_ptr, nullptr); 32 33 if (SCUDO_ENABLE_HOOKS) { 34 if (__scudo_realloc_allocate_hook) 35 __scudo_realloc_allocate_hook(old_ptr, new_ptr, size); 36 else if (__scudo_allocate_hook) 37 __scudo_allocate_hook(new_ptr, size); 38 } 39} 40static void reportReallocDeallocation(void *old_ptr) { 41 if (SCUDO_ENABLE_HOOKS) { 42 if (__scudo_realloc_deallocate_hook) 43 __scudo_realloc_deallocate_hook(old_ptr); 44 else if (__scudo_deallocate_hook) 45 __scudo_deallocate_hook(old_ptr); 46 } 47} 48 49extern "C" { 50 51INTERFACE WEAK void *SCUDO_PREFIX(calloc)(size_t nmemb, size_t size) { 52 scudo::uptr Product; 53 if (UNLIKELY(scudo::checkForCallocOverflow(size, nmemb, &Product))) { 54 if (SCUDO_ALLOCATOR.canReturnNull()) { 55 errno = ENOMEM; 56 return nullptr; 57 } 58 scudo::reportCallocOverflow(nmemb, size); 59 } 60 void *Ptr = SCUDO_ALLOCATOR.allocate(Product, scudo::Chunk::Origin::Malloc, 61 SCUDO_MALLOC_ALIGNMENT, true); 62 reportAllocation(Ptr, Product); 63 return scudo::setErrnoOnNull(Ptr); 64} 65 66INTERFACE WEAK void SCUDO_PREFIX(free)(void *ptr) { 67 reportDeallocation(ptr); 68 SCUDO_ALLOCATOR.deallocate(ptr, scudo::Chunk::Origin::Malloc); 69} 70 71INTERFACE WEAK struct SCUDO_MALLINFO SCUDO_PREFIX(mallinfo)(void) { 72 struct SCUDO_MALLINFO Info = {}; 73 scudo::StatCounters Stats; 74 SCUDO_ALLOCATOR.getStats(Stats); 75 // Space allocated in mmapped regions (bytes) 76 Info.hblkhd = static_cast<__scudo_mallinfo_data_t>(Stats[scudo::StatMapped]); 77 // Maximum total allocated space (bytes) 78 Info.usmblks = Info.hblkhd; 79 // Space in freed fastbin blocks (bytes) 80 Info.fsmblks = static_cast<__scudo_mallinfo_data_t>(Stats[scudo::StatFree]); 81 // Total allocated space (bytes) 82 Info.uordblks = 83 static_cast<__scudo_mallinfo_data_t>(Stats[scudo::StatAllocated]); 84 // Total free space (bytes) 85 Info.fordblks = Info.fsmblks; 86 return Info; 87} 88 89// On Android, mallinfo2 is an alias of mallinfo, so don't define both. 90#if !SCUDO_ANDROID 91INTERFACE WEAK struct __scudo_mallinfo2 SCUDO_PREFIX(mallinfo2)(void) { 92 struct __scudo_mallinfo2 Info = {}; 93 scudo::StatCounters Stats; 94 SCUDO_ALLOCATOR.getStats(Stats); 95 // Space allocated in mmapped regions (bytes) 96 Info.hblkhd = Stats[scudo::StatMapped]; 97 // Maximum total allocated space (bytes) 98 Info.usmblks = Info.hblkhd; 99 // Space in freed fastbin blocks (bytes) 100 Info.fsmblks = Stats[scudo::StatFree]; 101 // Total allocated space (bytes) 102 Info.uordblks = Stats[scudo::StatAllocated]; 103 // Total free space (bytes) 104 Info.fordblks = Info.fsmblks; 105 return Info; 106} 107#endif 108 109INTERFACE WEAK void *SCUDO_PREFIX(malloc)(size_t size) { 110 void *Ptr = SCUDO_ALLOCATOR.allocate(size, scudo::Chunk::Origin::Malloc, 111 SCUDO_MALLOC_ALIGNMENT); 112 reportAllocation(Ptr, size); 113 return scudo::setErrnoOnNull(Ptr); 114} 115 116#if SCUDO_ANDROID 117INTERFACE WEAK size_t SCUDO_PREFIX(malloc_usable_size)(const void *ptr) { 118#else 119INTERFACE WEAK size_t SCUDO_PREFIX(malloc_usable_size)(void *ptr) { 120#endif 121 return SCUDO_ALLOCATOR.getUsableSize(ptr); 122} 123 124INTERFACE WEAK void *SCUDO_PREFIX(memalign)(size_t alignment, size_t size) { 125 // Android rounds up the alignment to a power of two if it isn't one. 126 if (SCUDO_ANDROID) { 127 if (UNLIKELY(!alignment)) { 128 alignment = 1U; 129 } else { 130 if (UNLIKELY(!scudo::isPowerOfTwo(alignment))) 131 alignment = scudo::roundUpPowerOfTwo(alignment); 132 } 133 } else { 134 if (UNLIKELY(!scudo::isPowerOfTwo(alignment))) { 135 if (SCUDO_ALLOCATOR.canReturnNull()) { 136 errno = EINVAL; 137 return nullptr; 138 } 139 scudo::reportAlignmentNotPowerOfTwo(alignment); 140 } 141 } 142 void *Ptr = 143 SCUDO_ALLOCATOR.allocate(size, scudo::Chunk::Origin::Memalign, alignment); 144 reportAllocation(Ptr, size); 145 return Ptr; 146} 147 148INTERFACE WEAK int SCUDO_PREFIX(posix_memalign)(void **memptr, size_t alignment, 149 size_t size) { 150 if (UNLIKELY(scudo::checkPosixMemalignAlignment(alignment))) { 151 if (!SCUDO_ALLOCATOR.canReturnNull()) 152 scudo::reportInvalidPosixMemalignAlignment(alignment); 153 return EINVAL; 154 } 155 void *Ptr = 156 SCUDO_ALLOCATOR.allocate(size, scudo::Chunk::Origin::Memalign, alignment); 157 if (UNLIKELY(!Ptr)) 158 return ENOMEM; 159 reportAllocation(Ptr, size); 160 161 *memptr = Ptr; 162 return 0; 163} 164 165INTERFACE WEAK void *SCUDO_PREFIX(pvalloc)(size_t size) { 166 const scudo::uptr PageSize = scudo::getPageSizeCached(); 167 if (UNLIKELY(scudo::checkForPvallocOverflow(size, PageSize))) { 168 if (SCUDO_ALLOCATOR.canReturnNull()) { 169 errno = ENOMEM; 170 return nullptr; 171 } 172 scudo::reportPvallocOverflow(size); 173 } 174 // pvalloc(0) should allocate one page. 175 void *Ptr = 176 SCUDO_ALLOCATOR.allocate(size ? scudo::roundUp(size, PageSize) : PageSize, 177 scudo::Chunk::Origin::Memalign, PageSize); 178 reportAllocation(Ptr, scudo::roundUp(size, PageSize)); 179 180 return scudo::setErrnoOnNull(Ptr); 181} 182 183INTERFACE WEAK void *SCUDO_PREFIX(realloc)(void *ptr, size_t size) { 184 if (!ptr) { 185 void *Ptr = SCUDO_ALLOCATOR.allocate(size, scudo::Chunk::Origin::Malloc, 186 SCUDO_MALLOC_ALIGNMENT); 187 reportAllocation(Ptr, size); 188 return scudo::setErrnoOnNull(Ptr); 189 } 190 if (size == 0) { 191 reportDeallocation(ptr); 192 SCUDO_ALLOCATOR.deallocate(ptr, scudo::Chunk::Origin::Malloc); 193 return nullptr; 194 } 195 196 // Given that the reporting of deallocation and allocation are not atomic, we 197 // always pretend the old pointer will be released so that the user doesn't 198 // need to worry about the false double-use case from the view of hooks. 199 // 200 // For example, assume that `realloc` releases the old pointer and allocates a 201 // new pointer. Before the reporting of both operations has been done, another 202 // thread may get the old pointer from `malloc`. It may be misinterpreted as 203 // double-use if it's not handled properly on the hook side. 204 reportReallocDeallocation(ptr); 205 void *NewPtr = SCUDO_ALLOCATOR.reallocate(ptr, size, SCUDO_MALLOC_ALIGNMENT); 206 if (NewPtr != nullptr) { 207 // Note that even if NewPtr == ptr, the size has changed. We still need to 208 // report the new size. 209 reportReallocAllocation(/*OldPtr=*/ptr, NewPtr, size); 210 } else { 211 // If `realloc` fails, the old pointer is not released. Report the old 212 // pointer as allocated again. 213 reportReallocAllocation(/*OldPtr=*/ptr, /*NewPtr=*/ptr, 214 SCUDO_ALLOCATOR.getAllocSize(ptr)); 215 } 216 217 return scudo::setErrnoOnNull(NewPtr); 218} 219 220INTERFACE WEAK void *SCUDO_PREFIX(valloc)(size_t size) { 221 void *Ptr = SCUDO_ALLOCATOR.allocate(size, scudo::Chunk::Origin::Memalign, 222 scudo::getPageSizeCached()); 223 reportAllocation(Ptr, size); 224 225 return scudo::setErrnoOnNull(Ptr); 226} 227 228INTERFACE WEAK int SCUDO_PREFIX(malloc_iterate)( 229 uintptr_t base, size_t size, 230 void (*callback)(uintptr_t base, size_t size, void *arg), void *arg) { 231 SCUDO_ALLOCATOR.iterateOverChunks(base, size, callback, arg); 232 return 0; 233} 234 235INTERFACE WEAK void SCUDO_PREFIX(malloc_enable)() { SCUDO_ALLOCATOR.enable(); } 236 237INTERFACE WEAK void SCUDO_PREFIX(malloc_disable)() { 238 SCUDO_ALLOCATOR.disable(); 239} 240 241void SCUDO_PREFIX(malloc_postinit)() { 242 SCUDO_ALLOCATOR.initGwpAsan(); 243 pthread_atfork(SCUDO_PREFIX(malloc_disable), SCUDO_PREFIX(malloc_enable), 244 SCUDO_PREFIX(malloc_enable)); 245} 246 247INTERFACE WEAK int SCUDO_PREFIX(mallopt)(int param, int value) { 248 if (param == M_DECAY_TIME) { 249 if (SCUDO_ANDROID) { 250 if (value == 0) { 251 // Will set the release values to their minimum values. 252 value = INT32_MIN; 253 } else { 254 // Will set the release values to their maximum values. 255 value = INT32_MAX; 256 } 257 } 258 259 SCUDO_ALLOCATOR.setOption(scudo::Option::ReleaseInterval, 260 static_cast<scudo::sptr>(value)); 261 return 1; 262 } else if (param == M_PURGE) { 263 SCUDO_ALLOCATOR.releaseToOS(scudo::ReleaseToOS::Force); 264 return 1; 265 } else if (param == M_PURGE_ALL) { 266 SCUDO_ALLOCATOR.releaseToOS(scudo::ReleaseToOS::ForceAll); 267 return 1; 268 } else if (param == M_LOG_STATS) { 269 SCUDO_ALLOCATOR.printStats(); 270 SCUDO_ALLOCATOR.printFragmentationInfo(); 271 return 1; 272 } else { 273 scudo::Option option; 274 switch (param) { 275 case M_MEMTAG_TUNING: 276 option = scudo::Option::MemtagTuning; 277 break; 278 case M_THREAD_DISABLE_MEM_INIT: 279 option = scudo::Option::ThreadDisableMemInit; 280 break; 281 case M_CACHE_COUNT_MAX: 282 option = scudo::Option::MaxCacheEntriesCount; 283 break; 284 case M_CACHE_SIZE_MAX: 285 option = scudo::Option::MaxCacheEntrySize; 286 break; 287 case M_TSDS_COUNT_MAX: 288 option = scudo::Option::MaxTSDsCount; 289 break; 290 default: 291 return 0; 292 } 293 return SCUDO_ALLOCATOR.setOption(option, static_cast<scudo::sptr>(value)); 294 } 295} 296 297INTERFACE WEAK void *SCUDO_PREFIX(aligned_alloc)(size_t alignment, 298 size_t size) { 299 if (UNLIKELY(scudo::checkAlignedAllocAlignmentAndSize(alignment, size))) { 300 if (SCUDO_ALLOCATOR.canReturnNull()) { 301 errno = EINVAL; 302 return nullptr; 303 } 304 scudo::reportInvalidAlignedAllocAlignment(alignment, size); 305 } 306 307 void *Ptr = 308 SCUDO_ALLOCATOR.allocate(size, scudo::Chunk::Origin::Malloc, alignment); 309 reportAllocation(Ptr, size); 310 311 return scudo::setErrnoOnNull(Ptr); 312} 313 314INTERFACE WEAK int SCUDO_PREFIX(malloc_info)(UNUSED int options, FILE *stream) { 315 const scudo::uptr max_size = 316 decltype(SCUDO_ALLOCATOR)::PrimaryT::SizeClassMap::MaxSize; 317 auto *sizes = static_cast<scudo::uptr *>( 318 SCUDO_PREFIX(calloc)(max_size, sizeof(scudo::uptr))); 319 auto callback = [](uintptr_t, size_t size, void *arg) { 320 auto *sizes = reinterpret_cast<scudo::uptr *>(arg); 321 if (size < max_size) 322 sizes[size]++; 323 }; 324 325 SCUDO_ALLOCATOR.disable(); 326 SCUDO_ALLOCATOR.iterateOverChunks(0, -1ul, callback, sizes); 327 SCUDO_ALLOCATOR.enable(); 328 329 fputs("<malloc version=\"scudo-1\">\n", stream); 330 for (scudo::uptr i = 0; i != max_size; ++i) 331 if (sizes[i]) 332 fprintf(stream, "<alloc size=\"%zu\" count=\"%zu\"/>\n", i, sizes[i]); 333 fputs("</malloc>\n", stream); 334 SCUDO_PREFIX(free)(sizes); 335 return 0; 336} 337 338// Disable memory tagging for the heap. The caller must disable memory tag 339// checks globally (e.g. by clearing TCF0 on aarch64) before calling this 340// function, and may not re-enable them after calling the function. 341INTERFACE WEAK void SCUDO_PREFIX(malloc_disable_memory_tagging)() { 342 SCUDO_ALLOCATOR.disableMemoryTagging(); 343} 344 345// Sets whether scudo records stack traces and other metadata for allocations 346// and deallocations. This function only has an effect if the allocator and 347// hardware support memory tagging. 348INTERFACE WEAK void 349SCUDO_PREFIX(malloc_set_track_allocation_stacks)(int track) { 350 SCUDO_ALLOCATOR.setTrackAllocationStacks(track); 351} 352 353// Sets whether scudo zero-initializes all allocated memory. 354INTERFACE WEAK void SCUDO_PREFIX(malloc_set_zero_contents)(int zero_contents) { 355 SCUDO_ALLOCATOR.setFillContents(zero_contents ? scudo::ZeroFill 356 : scudo::NoFill); 357} 358 359// Sets whether scudo pattern-initializes all allocated memory. 360INTERFACE WEAK void 361SCUDO_PREFIX(malloc_set_pattern_fill_contents)(int pattern_fill_contents) { 362 SCUDO_ALLOCATOR.setFillContents( 363 pattern_fill_contents ? scudo::PatternOrZeroFill : scudo::NoFill); 364} 365 366// Sets whether scudo adds a small amount of slack at the end of large 367// allocations, before the guard page. This can be enabled to work around buggy 368// applications that read a few bytes past the end of their allocation. 369INTERFACE WEAK void 370SCUDO_PREFIX(malloc_set_add_large_allocation_slack)(int add_slack) { 371 SCUDO_ALLOCATOR.setAddLargeAllocationSlack(add_slack); 372} 373 374} // extern "C" 375