xref: /freebsd/contrib/llvm-project/compiler-rt/lib/scudo/standalone/wrappers_c.inc (revision 6ba2210ee039f2f12878c217bcf058e9c8b26b29)
1//===-- wrappers_c.inc ------------------------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#ifndef SCUDO_PREFIX
10#error "Define SCUDO_PREFIX prior to including this file!"
11#endif
12
13// malloc-type functions have to be aligned to std::max_align_t. This is
14// distinct from (1U << SCUDO_MIN_ALIGNMENT_LOG), since C++ new-type functions
15// do not have to abide by the same requirement.
16#ifndef SCUDO_MALLOC_ALIGNMENT
17#define SCUDO_MALLOC_ALIGNMENT FIRST_32_SECOND_64(8U, 16U)
18#endif
19
20extern "C" {
21
22INTERFACE WEAK void *SCUDO_PREFIX(calloc)(size_t nmemb, size_t size) {
23  scudo::uptr Product;
24  if (UNLIKELY(scudo::checkForCallocOverflow(size, nmemb, &Product))) {
25    if (SCUDO_ALLOCATOR.canReturnNull()) {
26      errno = ENOMEM;
27      return nullptr;
28    }
29    scudo::reportCallocOverflow(nmemb, size);
30  }
31  return scudo::setErrnoOnNull(SCUDO_ALLOCATOR.allocate(
32      Product, scudo::Chunk::Origin::Malloc, SCUDO_MALLOC_ALIGNMENT, true));
33}
34
35INTERFACE WEAK void SCUDO_PREFIX(free)(void *ptr) {
36  SCUDO_ALLOCATOR.deallocate(ptr, scudo::Chunk::Origin::Malloc);
37}
38
39INTERFACE WEAK struct SCUDO_MALLINFO SCUDO_PREFIX(mallinfo)(void) {
40  struct SCUDO_MALLINFO Info = {};
41  scudo::StatCounters Stats;
42  SCUDO_ALLOCATOR.getStats(Stats);
43  // Space allocated in mmapped regions (bytes)
44  Info.hblkhd = static_cast<__scudo_mallinfo_data_t>(Stats[scudo::StatMapped]);
45  // Maximum total allocated space (bytes)
46  Info.usmblks = Info.hblkhd;
47  // Space in freed fastbin blocks (bytes)
48  Info.fsmblks = static_cast<__scudo_mallinfo_data_t>(Stats[scudo::StatFree]);
49  // Total allocated space (bytes)
50  Info.uordblks =
51      static_cast<__scudo_mallinfo_data_t>(Stats[scudo::StatAllocated]);
52  // Total free space (bytes)
53  Info.fordblks = Info.fsmblks;
54  return Info;
55}
56
57INTERFACE WEAK void *SCUDO_PREFIX(malloc)(size_t size) {
58  return scudo::setErrnoOnNull(SCUDO_ALLOCATOR.allocate(
59      size, scudo::Chunk::Origin::Malloc, SCUDO_MALLOC_ALIGNMENT));
60}
61
62#if SCUDO_ANDROID
63INTERFACE WEAK size_t SCUDO_PREFIX(malloc_usable_size)(const void *ptr) {
64#else
65INTERFACE WEAK size_t SCUDO_PREFIX(malloc_usable_size)(void *ptr) {
66#endif
67  return SCUDO_ALLOCATOR.getUsableSize(ptr);
68}
69
70INTERFACE WEAK void *SCUDO_PREFIX(memalign)(size_t alignment, size_t size) {
71  // Android rounds up the alignment to a power of two if it isn't one.
72  if (SCUDO_ANDROID) {
73    if (UNLIKELY(!alignment)) {
74      alignment = 1U;
75    } else {
76      if (UNLIKELY(!scudo::isPowerOfTwo(alignment)))
77        alignment = scudo::roundUpToPowerOfTwo(alignment);
78    }
79  } else {
80    if (UNLIKELY(!scudo::isPowerOfTwo(alignment))) {
81      if (SCUDO_ALLOCATOR.canReturnNull()) {
82        errno = EINVAL;
83        return nullptr;
84      }
85      scudo::reportAlignmentNotPowerOfTwo(alignment);
86    }
87  }
88  return SCUDO_ALLOCATOR.allocate(size, scudo::Chunk::Origin::Memalign,
89                                  alignment);
90}
91
92INTERFACE WEAK int SCUDO_PREFIX(posix_memalign)(void **memptr, size_t alignment,
93                                                size_t size) {
94  if (UNLIKELY(scudo::checkPosixMemalignAlignment(alignment))) {
95    if (!SCUDO_ALLOCATOR.canReturnNull())
96      scudo::reportInvalidPosixMemalignAlignment(alignment);
97    return EINVAL;
98  }
99  void *Ptr =
100      SCUDO_ALLOCATOR.allocate(size, scudo::Chunk::Origin::Memalign, alignment);
101  if (UNLIKELY(!Ptr))
102    return ENOMEM;
103  *memptr = Ptr;
104  return 0;
105}
106
107INTERFACE WEAK void *SCUDO_PREFIX(pvalloc)(size_t size) {
108  const scudo::uptr PageSize = scudo::getPageSizeCached();
109  if (UNLIKELY(scudo::checkForPvallocOverflow(size, PageSize))) {
110    if (SCUDO_ALLOCATOR.canReturnNull()) {
111      errno = ENOMEM;
112      return nullptr;
113    }
114    scudo::reportPvallocOverflow(size);
115  }
116  // pvalloc(0) should allocate one page.
117  return scudo::setErrnoOnNull(SCUDO_ALLOCATOR.allocate(
118      size ? scudo::roundUpTo(size, PageSize) : PageSize,
119      scudo::Chunk::Origin::Memalign, PageSize));
120}
121
122INTERFACE WEAK void *SCUDO_PREFIX(realloc)(void *ptr, size_t size) {
123  if (!ptr)
124    return scudo::setErrnoOnNull(SCUDO_ALLOCATOR.allocate(
125        size, scudo::Chunk::Origin::Malloc, SCUDO_MALLOC_ALIGNMENT));
126  if (size == 0) {
127    SCUDO_ALLOCATOR.deallocate(ptr, scudo::Chunk::Origin::Malloc);
128    return nullptr;
129  }
130  return scudo::setErrnoOnNull(
131      SCUDO_ALLOCATOR.reallocate(ptr, size, SCUDO_MALLOC_ALIGNMENT));
132}
133
134INTERFACE WEAK void *SCUDO_PREFIX(valloc)(size_t size) {
135  return scudo::setErrnoOnNull(SCUDO_ALLOCATOR.allocate(
136      size, scudo::Chunk::Origin::Memalign, scudo::getPageSizeCached()));
137}
138
139INTERFACE WEAK int SCUDO_PREFIX(malloc_iterate)(
140    uintptr_t base, size_t size,
141    void (*callback)(uintptr_t base, size_t size, void *arg), void *arg) {
142  SCUDO_ALLOCATOR.iterateOverChunks(base, size, callback, arg);
143  return 0;
144}
145
146INTERFACE WEAK void SCUDO_PREFIX(malloc_enable)() { SCUDO_ALLOCATOR.enable(); }
147
148INTERFACE WEAK void SCUDO_PREFIX(malloc_disable)() {
149  SCUDO_ALLOCATOR.disable();
150}
151
152void SCUDO_PREFIX(malloc_postinit)() {
153  SCUDO_ALLOCATOR.initGwpAsan();
154  pthread_atfork(SCUDO_PREFIX(malloc_disable), SCUDO_PREFIX(malloc_enable),
155                 SCUDO_PREFIX(malloc_enable));
156}
157
158INTERFACE WEAK int SCUDO_PREFIX(mallopt)(int param, int value) {
159  if (param == M_DECAY_TIME) {
160    if (SCUDO_ANDROID) {
161      if (value == 0) {
162        // Will set the release values to their minimum values.
163        value = INT32_MIN;
164      } else {
165        // Will set the release values to their maximum values.
166        value = INT32_MAX;
167      }
168    }
169
170    SCUDO_ALLOCATOR.setOption(scudo::Option::ReleaseInterval,
171                              static_cast<scudo::sptr>(value));
172    return 1;
173  } else if (param == M_PURGE) {
174    SCUDO_ALLOCATOR.releaseToOS();
175    return 1;
176  } else {
177    scudo::Option option;
178    switch (param) {
179    case M_MEMTAG_TUNING:
180      option = scudo::Option::MemtagTuning;
181      break;
182    case M_THREAD_DISABLE_MEM_INIT:
183      option = scudo::Option::ThreadDisableMemInit;
184      break;
185    case M_CACHE_COUNT_MAX:
186      option = scudo::Option::MaxCacheEntriesCount;
187      break;
188    case M_CACHE_SIZE_MAX:
189      option = scudo::Option::MaxCacheEntrySize;
190      break;
191    case M_TSDS_COUNT_MAX:
192      option = scudo::Option::MaxTSDsCount;
193      break;
194    default:
195      return 0;
196    }
197    return SCUDO_ALLOCATOR.setOption(option, static_cast<scudo::sptr>(value));
198  }
199}
200
201INTERFACE WEAK void *SCUDO_PREFIX(aligned_alloc)(size_t alignment,
202                                                 size_t size) {
203  if (UNLIKELY(scudo::checkAlignedAllocAlignmentAndSize(alignment, size))) {
204    if (SCUDO_ALLOCATOR.canReturnNull()) {
205      errno = EINVAL;
206      return nullptr;
207    }
208    scudo::reportInvalidAlignedAllocAlignment(alignment, size);
209  }
210  return scudo::setErrnoOnNull(
211      SCUDO_ALLOCATOR.allocate(size, scudo::Chunk::Origin::Malloc, alignment));
212}
213
214INTERFACE WEAK int SCUDO_PREFIX(malloc_info)(UNUSED int options, FILE *stream) {
215  const scudo::uptr max_size =
216      decltype(SCUDO_ALLOCATOR)::PrimaryT::SizeClassMap::MaxSize;
217  auto *sizes = static_cast<scudo::uptr *>(
218      SCUDO_PREFIX(calloc)(max_size, sizeof(scudo::uptr)));
219  auto callback = [](uintptr_t, size_t size, void *arg) {
220    auto *sizes = reinterpret_cast<scudo::uptr *>(arg);
221    if (size < max_size)
222      sizes[size]++;
223  };
224  SCUDO_ALLOCATOR.iterateOverChunks(0, -1ul, callback, sizes);
225
226  fputs("<malloc version=\"scudo-1\">\n", stream);
227  for (scudo::uptr i = 0; i != max_size; ++i)
228    if (sizes[i])
229      fprintf(stream, "<alloc size=\"%lu\" count=\"%lu\"/>\n", i, sizes[i]);
230  fputs("</malloc>\n", stream);
231  SCUDO_PREFIX(free)(sizes);
232  return 0;
233}
234
235// Disable memory tagging for the heap. The caller must disable memory tag
236// checks globally (e.g. by clearing TCF0 on aarch64) before calling this
237// function, and may not re-enable them after calling the function.
238INTERFACE WEAK void SCUDO_PREFIX(malloc_disable_memory_tagging)() {
239  SCUDO_ALLOCATOR.disableMemoryTagging();
240}
241
242// Sets whether scudo records stack traces and other metadata for allocations
243// and deallocations. This function only has an effect if the allocator and
244// hardware support memory tagging.
245INTERFACE WEAK void
246SCUDO_PREFIX(malloc_set_track_allocation_stacks)(int track) {
247  SCUDO_ALLOCATOR.setTrackAllocationStacks(track);
248}
249
250// Sets whether scudo zero-initializes all allocated memory.
251INTERFACE WEAK void SCUDO_PREFIX(malloc_set_zero_contents)(int zero_contents) {
252  SCUDO_ALLOCATOR.setFillContents(zero_contents ? scudo::ZeroFill
253                                                : scudo::NoFill);
254}
255
256// Sets whether scudo pattern-initializes all allocated memory.
257INTERFACE WEAK void
258SCUDO_PREFIX(malloc_set_pattern_fill_contents)(int pattern_fill_contents) {
259  SCUDO_ALLOCATOR.setFillContents(
260      pattern_fill_contents ? scudo::PatternOrZeroFill : scudo::NoFill);
261}
262
263} // extern "C"
264