1//===-- wrappers_c.inc ------------------------------------------*- C++ -*-===// 2// 3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4// See https://llvm.org/LICENSE.txt for license information. 5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6// 7//===----------------------------------------------------------------------===// 8 9#ifndef SCUDO_PREFIX 10#error "Define SCUDO_PREFIX prior to including this file!" 11#endif 12 13// malloc-type functions have to be aligned to std::max_align_t. This is 14// distinct from (1U << SCUDO_MIN_ALIGNMENT_LOG), since C++ new-type functions 15// do not have to abide by the same requirement. 16#ifndef SCUDO_MALLOC_ALIGNMENT 17#define SCUDO_MALLOC_ALIGNMENT FIRST_32_SECOND_64(8U, 16U) 18#endif 19 20extern "C" { 21 22INTERFACE WEAK void *SCUDO_PREFIX(calloc)(size_t nmemb, size_t size) { 23 scudo::uptr Product; 24 if (UNLIKELY(scudo::checkForCallocOverflow(size, nmemb, &Product))) { 25 if (SCUDO_ALLOCATOR.canReturnNull()) { 26 errno = ENOMEM; 27 return nullptr; 28 } 29 scudo::reportCallocOverflow(nmemb, size); 30 } 31 return scudo::setErrnoOnNull(SCUDO_ALLOCATOR.allocate( 32 Product, scudo::Chunk::Origin::Malloc, SCUDO_MALLOC_ALIGNMENT, true)); 33} 34 35INTERFACE WEAK void SCUDO_PREFIX(free)(void *ptr) { 36 SCUDO_ALLOCATOR.deallocate(ptr, scudo::Chunk::Origin::Malloc); 37} 38 39INTERFACE WEAK struct SCUDO_MALLINFO SCUDO_PREFIX(mallinfo)(void) { 40 struct SCUDO_MALLINFO Info = {}; 41 scudo::StatCounters Stats; 42 SCUDO_ALLOCATOR.getStats(Stats); 43 // Space allocated in mmapped regions (bytes) 44 Info.hblkhd = static_cast<__scudo_mallinfo_data_t>(Stats[scudo::StatMapped]); 45 // Maximum total allocated space (bytes) 46 Info.usmblks = Info.hblkhd; 47 // Space in freed fastbin blocks (bytes) 48 Info.fsmblks = static_cast<__scudo_mallinfo_data_t>(Stats[scudo::StatFree]); 49 // Total allocated space (bytes) 50 Info.uordblks = 51 static_cast<__scudo_mallinfo_data_t>(Stats[scudo::StatAllocated]); 52 // Total free space (bytes) 53 Info.fordblks = Info.fsmblks; 54 return Info; 55} 56 57// On Android, mallinfo2 is an alias of mallinfo, so don't define both. 58#if !SCUDO_ANDROID 59INTERFACE WEAK struct __scudo_mallinfo2 SCUDO_PREFIX(mallinfo2)(void) { 60 struct __scudo_mallinfo2 Info = {}; 61 scudo::StatCounters Stats; 62 SCUDO_ALLOCATOR.getStats(Stats); 63 // Space allocated in mmapped regions (bytes) 64 Info.hblkhd = Stats[scudo::StatMapped]; 65 // Maximum total allocated space (bytes) 66 Info.usmblks = Info.hblkhd; 67 // Space in freed fastbin blocks (bytes) 68 Info.fsmblks = Stats[scudo::StatFree]; 69 // Total allocated space (bytes) 70 Info.uordblks = Stats[scudo::StatAllocated]; 71 // Total free space (bytes) 72 Info.fordblks = Info.fsmblks; 73 return Info; 74} 75#endif 76 77INTERFACE WEAK void *SCUDO_PREFIX(malloc)(size_t size) { 78 return scudo::setErrnoOnNull(SCUDO_ALLOCATOR.allocate( 79 size, scudo::Chunk::Origin::Malloc, SCUDO_MALLOC_ALIGNMENT)); 80} 81 82#if SCUDO_ANDROID 83INTERFACE WEAK size_t SCUDO_PREFIX(malloc_usable_size)(const void *ptr) { 84#else 85INTERFACE WEAK size_t SCUDO_PREFIX(malloc_usable_size)(void *ptr) { 86#endif 87 return SCUDO_ALLOCATOR.getUsableSize(ptr); 88} 89 90INTERFACE WEAK void *SCUDO_PREFIX(memalign)(size_t alignment, size_t size) { 91 // Android rounds up the alignment to a power of two if it isn't one. 92 if (SCUDO_ANDROID) { 93 if (UNLIKELY(!alignment)) { 94 alignment = 1U; 95 } else { 96 if (UNLIKELY(!scudo::isPowerOfTwo(alignment))) 97 alignment = scudo::roundUpPowerOfTwo(alignment); 98 } 99 } else { 100 if (UNLIKELY(!scudo::isPowerOfTwo(alignment))) { 101 if (SCUDO_ALLOCATOR.canReturnNull()) { 102 errno = EINVAL; 103 return nullptr; 104 } 105 scudo::reportAlignmentNotPowerOfTwo(alignment); 106 } 107 } 108 return SCUDO_ALLOCATOR.allocate(size, scudo::Chunk::Origin::Memalign, 109 alignment); 110} 111 112INTERFACE WEAK int SCUDO_PREFIX(posix_memalign)(void **memptr, size_t alignment, 113 size_t size) { 114 if (UNLIKELY(scudo::checkPosixMemalignAlignment(alignment))) { 115 if (!SCUDO_ALLOCATOR.canReturnNull()) 116 scudo::reportInvalidPosixMemalignAlignment(alignment); 117 return EINVAL; 118 } 119 void *Ptr = 120 SCUDO_ALLOCATOR.allocate(size, scudo::Chunk::Origin::Memalign, alignment); 121 if (UNLIKELY(!Ptr)) 122 return ENOMEM; 123 *memptr = Ptr; 124 return 0; 125} 126 127INTERFACE WEAK void *SCUDO_PREFIX(pvalloc)(size_t size) { 128 const scudo::uptr PageSize = scudo::getPageSizeCached(); 129 if (UNLIKELY(scudo::checkForPvallocOverflow(size, PageSize))) { 130 if (SCUDO_ALLOCATOR.canReturnNull()) { 131 errno = ENOMEM; 132 return nullptr; 133 } 134 scudo::reportPvallocOverflow(size); 135 } 136 // pvalloc(0) should allocate one page. 137 return scudo::setErrnoOnNull( 138 SCUDO_ALLOCATOR.allocate(size ? scudo::roundUp(size, PageSize) : PageSize, 139 scudo::Chunk::Origin::Memalign, PageSize)); 140} 141 142INTERFACE WEAK void *SCUDO_PREFIX(realloc)(void *ptr, size_t size) { 143 if (!ptr) 144 return scudo::setErrnoOnNull(SCUDO_ALLOCATOR.allocate( 145 size, scudo::Chunk::Origin::Malloc, SCUDO_MALLOC_ALIGNMENT)); 146 if (size == 0) { 147 SCUDO_ALLOCATOR.deallocate(ptr, scudo::Chunk::Origin::Malloc); 148 return nullptr; 149 } 150 return scudo::setErrnoOnNull( 151 SCUDO_ALLOCATOR.reallocate(ptr, size, SCUDO_MALLOC_ALIGNMENT)); 152} 153 154INTERFACE WEAK void *SCUDO_PREFIX(valloc)(size_t size) { 155 return scudo::setErrnoOnNull(SCUDO_ALLOCATOR.allocate( 156 size, scudo::Chunk::Origin::Memalign, scudo::getPageSizeCached())); 157} 158 159INTERFACE WEAK int SCUDO_PREFIX(malloc_iterate)( 160 uintptr_t base, size_t size, 161 void (*callback)(uintptr_t base, size_t size, void *arg), void *arg) { 162 SCUDO_ALLOCATOR.iterateOverChunks(base, size, callback, arg); 163 return 0; 164} 165 166INTERFACE WEAK void SCUDO_PREFIX(malloc_enable)() { SCUDO_ALLOCATOR.enable(); } 167 168INTERFACE WEAK void SCUDO_PREFIX(malloc_disable)() { 169 SCUDO_ALLOCATOR.disable(); 170} 171 172void SCUDO_PREFIX(malloc_postinit)() { 173 SCUDO_ALLOCATOR.initGwpAsan(); 174 pthread_atfork(SCUDO_PREFIX(malloc_disable), SCUDO_PREFIX(malloc_enable), 175 SCUDO_PREFIX(malloc_enable)); 176} 177 178INTERFACE WEAK int SCUDO_PREFIX(mallopt)(int param, int value) { 179 if (param == M_DECAY_TIME) { 180 if (SCUDO_ANDROID) { 181 if (value == 0) { 182 // Will set the release values to their minimum values. 183 value = INT32_MIN; 184 } else { 185 // Will set the release values to their maximum values. 186 value = INT32_MAX; 187 } 188 } 189 190 SCUDO_ALLOCATOR.setOption(scudo::Option::ReleaseInterval, 191 static_cast<scudo::sptr>(value)); 192 return 1; 193 } else if (param == M_PURGE) { 194 SCUDO_ALLOCATOR.releaseToOS(scudo::ReleaseToOS::Force); 195 return 1; 196 } else if (param == M_PURGE_ALL) { 197 SCUDO_ALLOCATOR.releaseToOS(scudo::ReleaseToOS::ForceAll); 198 return 1; 199 } else if (param == M_LOG_STATS) { 200 SCUDO_ALLOCATOR.printStats(); 201 return 1; 202 } else { 203 scudo::Option option; 204 switch (param) { 205 case M_MEMTAG_TUNING: 206 option = scudo::Option::MemtagTuning; 207 break; 208 case M_THREAD_DISABLE_MEM_INIT: 209 option = scudo::Option::ThreadDisableMemInit; 210 break; 211 case M_CACHE_COUNT_MAX: 212 option = scudo::Option::MaxCacheEntriesCount; 213 break; 214 case M_CACHE_SIZE_MAX: 215 option = scudo::Option::MaxCacheEntrySize; 216 break; 217 case M_TSDS_COUNT_MAX: 218 option = scudo::Option::MaxTSDsCount; 219 break; 220 default: 221 return 0; 222 } 223 return SCUDO_ALLOCATOR.setOption(option, static_cast<scudo::sptr>(value)); 224 } 225} 226 227INTERFACE WEAK void *SCUDO_PREFIX(aligned_alloc)(size_t alignment, 228 size_t size) { 229 if (UNLIKELY(scudo::checkAlignedAllocAlignmentAndSize(alignment, size))) { 230 if (SCUDO_ALLOCATOR.canReturnNull()) { 231 errno = EINVAL; 232 return nullptr; 233 } 234 scudo::reportInvalidAlignedAllocAlignment(alignment, size); 235 } 236 return scudo::setErrnoOnNull( 237 SCUDO_ALLOCATOR.allocate(size, scudo::Chunk::Origin::Malloc, alignment)); 238} 239 240INTERFACE WEAK int SCUDO_PREFIX(malloc_info)(UNUSED int options, FILE *stream) { 241 const scudo::uptr max_size = 242 decltype(SCUDO_ALLOCATOR)::PrimaryT::SizeClassMap::MaxSize; 243 auto *sizes = static_cast<scudo::uptr *>( 244 SCUDO_PREFIX(calloc)(max_size, sizeof(scudo::uptr))); 245 auto callback = [](uintptr_t, size_t size, void *arg) { 246 auto *sizes = reinterpret_cast<scudo::uptr *>(arg); 247 if (size < max_size) 248 sizes[size]++; 249 }; 250 251 SCUDO_ALLOCATOR.disable(); 252 SCUDO_ALLOCATOR.iterateOverChunks(0, -1ul, callback, sizes); 253 SCUDO_ALLOCATOR.enable(); 254 255 fputs("<malloc version=\"scudo-1\">\n", stream); 256 for (scudo::uptr i = 0; i != max_size; ++i) 257 if (sizes[i]) 258 fprintf(stream, "<alloc size=\"%zu\" count=\"%zu\"/>\n", i, sizes[i]); 259 fputs("</malloc>\n", stream); 260 SCUDO_PREFIX(free)(sizes); 261 return 0; 262} 263 264// Disable memory tagging for the heap. The caller must disable memory tag 265// checks globally (e.g. by clearing TCF0 on aarch64) before calling this 266// function, and may not re-enable them after calling the function. 267INTERFACE WEAK void SCUDO_PREFIX(malloc_disable_memory_tagging)() { 268 SCUDO_ALLOCATOR.disableMemoryTagging(); 269} 270 271// Sets whether scudo records stack traces and other metadata for allocations 272// and deallocations. This function only has an effect if the allocator and 273// hardware support memory tagging. 274INTERFACE WEAK void 275SCUDO_PREFIX(malloc_set_track_allocation_stacks)(int track) { 276 SCUDO_ALLOCATOR.setTrackAllocationStacks(track); 277} 278 279// Sets whether scudo zero-initializes all allocated memory. 280INTERFACE WEAK void SCUDO_PREFIX(malloc_set_zero_contents)(int zero_contents) { 281 SCUDO_ALLOCATOR.setFillContents(zero_contents ? scudo::ZeroFill 282 : scudo::NoFill); 283} 284 285// Sets whether scudo pattern-initializes all allocated memory. 286INTERFACE WEAK void 287SCUDO_PREFIX(malloc_set_pattern_fill_contents)(int pattern_fill_contents) { 288 SCUDO_ALLOCATOR.setFillContents( 289 pattern_fill_contents ? scudo::PatternOrZeroFill : scudo::NoFill); 290} 291 292// Sets whether scudo adds a small amount of slack at the end of large 293// allocations, before the guard page. This can be enabled to work around buggy 294// applications that read a few bytes past the end of their allocation. 295INTERFACE WEAK void 296SCUDO_PREFIX(malloc_set_add_large_allocation_slack)(int add_slack) { 297 SCUDO_ALLOCATOR.setAddLargeAllocationSlack(add_slack); 298} 299 300} // extern "C" 301