xref: /freebsd/contrib/llvm-project/compiler-rt/lib/memprof/memprof_allocator.cpp (revision 4d846d260e2b9a3d4d0a701462568268cbfe7a5b)
1 //===-- memprof_allocator.cpp --------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of MemProfiler, a memory profiler.
10 //
11 // Implementation of MemProf's memory allocator, which uses the allocator
12 // from sanitizer_common.
13 //
14 //===----------------------------------------------------------------------===//
15 
16 #include "memprof_allocator.h"
17 #include "memprof_mapping.h"
18 #include "memprof_mibmap.h"
19 #include "memprof_rawprofile.h"
20 #include "memprof_stack.h"
21 #include "memprof_thread.h"
22 #include "profile/MemProfData.inc"
23 #include "sanitizer_common/sanitizer_allocator_checks.h"
24 #include "sanitizer_common/sanitizer_allocator_interface.h"
25 #include "sanitizer_common/sanitizer_allocator_report.h"
26 #include "sanitizer_common/sanitizer_errno.h"
27 #include "sanitizer_common/sanitizer_file.h"
28 #include "sanitizer_common/sanitizer_flags.h"
29 #include "sanitizer_common/sanitizer_internal_defs.h"
30 #include "sanitizer_common/sanitizer_list.h"
31 #include "sanitizer_common/sanitizer_procmaps.h"
32 #include "sanitizer_common/sanitizer_stackdepot.h"
33 #include "sanitizer_common/sanitizer_vector.h"
34 
35 #include <sched.h>
36 #include <time.h>
37 
38 namespace __memprof {
39 namespace {
40 using ::llvm::memprof::MemInfoBlock;
41 
42 void Print(const MemInfoBlock &M, const u64 id, bool print_terse) {
43   u64 p;
44 
45   if (print_terse) {
46     p = M.TotalSize * 100 / M.AllocCount;
47     Printf("MIB:%llu/%u/%llu.%02llu/%u/%u/", id, M.AllocCount, p / 100, p % 100,
48            M.MinSize, M.MaxSize);
49     p = M.TotalAccessCount * 100 / M.AllocCount;
50     Printf("%llu.%02llu/%llu/%llu/", p / 100, p % 100, M.MinAccessCount,
51            M.MaxAccessCount);
52     p = M.TotalLifetime * 100 / M.AllocCount;
53     Printf("%llu.%02llu/%u/%u/", p / 100, p % 100, M.MinLifetime,
54            M.MaxLifetime);
55     Printf("%u/%u/%u/%u\n", M.NumMigratedCpu, M.NumLifetimeOverlaps,
56            M.NumSameAllocCpu, M.NumSameDeallocCpu);
57   } else {
58     p = M.TotalSize * 100 / M.AllocCount;
59     Printf("Memory allocation stack id = %llu\n", id);
60     Printf("\talloc_count %u, size (ave/min/max) %llu.%02llu / %u / %u\n",
61            M.AllocCount, p / 100, p % 100, M.MinSize, M.MaxSize);
62     p = M.TotalAccessCount * 100 / M.AllocCount;
63     Printf("\taccess_count (ave/min/max): %llu.%02llu / %llu / %llu\n", p / 100,
64            p % 100, M.MinAccessCount, M.MaxAccessCount);
65     p = M.TotalLifetime * 100 / M.AllocCount;
66     Printf("\tlifetime (ave/min/max): %llu.%02llu / %u / %u\n", p / 100,
67            p % 100, M.MinLifetime, M.MaxLifetime);
68     Printf("\tnum migrated: %u, num lifetime overlaps: %u, num same alloc "
69            "cpu: %u, num same dealloc_cpu: %u\n",
70            M.NumMigratedCpu, M.NumLifetimeOverlaps, M.NumSameAllocCpu,
71            M.NumSameDeallocCpu);
72   }
73 }
74 } // namespace
75 
76 static int GetCpuId(void) {
77   // _memprof_preinit is called via the preinit_array, which subsequently calls
78   // malloc. Since this is before _dl_init calls VDSO_SETUP, sched_getcpu
79   // will seg fault as the address of __vdso_getcpu will be null.
80   if (!memprof_init_done)
81     return -1;
82   return sched_getcpu();
83 }
84 
85 // Compute the timestamp in ms.
86 static int GetTimestamp(void) {
87   // timespec_get will segfault if called from dl_init
88   if (!memprof_timestamp_inited) {
89     // By returning 0, this will be effectively treated as being
90     // timestamped at memprof init time (when memprof_init_timestamp_s
91     // is initialized).
92     return 0;
93   }
94   timespec ts;
95   clock_gettime(CLOCK_REALTIME, &ts);
96   return (ts.tv_sec - memprof_init_timestamp_s) * 1000 + ts.tv_nsec / 1000000;
97 }
98 
99 static MemprofAllocator &get_allocator();
100 
101 // The memory chunk allocated from the underlying allocator looks like this:
102 // H H U U U U U U
103 //   H -- ChunkHeader (32 bytes)
104 //   U -- user memory.
105 
106 // If there is left padding before the ChunkHeader (due to use of memalign),
107 // we store a magic value in the first uptr word of the memory block and
108 // store the address of ChunkHeader in the next uptr.
109 // M B L L L L L L L L L  H H U U U U U U
110 //   |                    ^
111 //   ---------------------|
112 //   M -- magic value kAllocBegMagic
113 //   B -- address of ChunkHeader pointing to the first 'H'
114 
115 constexpr uptr kMaxAllowedMallocBits = 40;
116 
117 // Should be no more than 32-bytes
118 struct ChunkHeader {
119   // 1-st 4 bytes.
120   u32 alloc_context_id;
121   // 2-nd 4 bytes
122   u32 cpu_id;
123   // 3-rd 4 bytes
124   u32 timestamp_ms;
125   // 4-th 4 bytes
126   // Note only 1 bit is needed for this flag if we need space in the future for
127   // more fields.
128   u32 from_memalign;
129   // 5-th and 6-th 4 bytes
130   // The max size of an allocation is 2^40 (kMaxAllowedMallocSize), so this
131   // could be shrunk to kMaxAllowedMallocBits if we need space in the future for
132   // more fields.
133   atomic_uint64_t user_requested_size;
134   // 23 bits available
135   // 7-th and 8-th 4 bytes
136   u64 data_type_id; // TODO: hash of type name
137 };
138 
139 static const uptr kChunkHeaderSize = sizeof(ChunkHeader);
140 COMPILER_CHECK(kChunkHeaderSize == 32);
141 
142 struct MemprofChunk : ChunkHeader {
143   uptr Beg() { return reinterpret_cast<uptr>(this) + kChunkHeaderSize; }
144   uptr UsedSize() {
145     return atomic_load(&user_requested_size, memory_order_relaxed);
146   }
147   void *AllocBeg() {
148     if (from_memalign)
149       return get_allocator().GetBlockBegin(reinterpret_cast<void *>(this));
150     return reinterpret_cast<void *>(this);
151   }
152 };
153 
154 class LargeChunkHeader {
155   static constexpr uptr kAllocBegMagic =
156       FIRST_32_SECOND_64(0xCC6E96B9, 0xCC6E96B9CC6E96B9ULL);
157   atomic_uintptr_t magic;
158   MemprofChunk *chunk_header;
159 
160 public:
161   MemprofChunk *Get() const {
162     return atomic_load(&magic, memory_order_acquire) == kAllocBegMagic
163                ? chunk_header
164                : nullptr;
165   }
166 
167   void Set(MemprofChunk *p) {
168     if (p) {
169       chunk_header = p;
170       atomic_store(&magic, kAllocBegMagic, memory_order_release);
171       return;
172     }
173 
174     uptr old = kAllocBegMagic;
175     if (!atomic_compare_exchange_strong(&magic, &old, 0,
176                                         memory_order_release)) {
177       CHECK_EQ(old, kAllocBegMagic);
178     }
179   }
180 };
181 
182 void FlushUnneededMemProfShadowMemory(uptr p, uptr size) {
183   // Since memprof's mapping is compacting, the shadow chunk may be
184   // not page-aligned, so we only flush the page-aligned portion.
185   ReleaseMemoryPagesToOS(MemToShadow(p), MemToShadow(p + size));
186 }
187 
188 void MemprofMapUnmapCallback::OnMap(uptr p, uptr size) const {
189   // Statistics.
190   MemprofStats &thread_stats = GetCurrentThreadStats();
191   thread_stats.mmaps++;
192   thread_stats.mmaped += size;
193 }
194 void MemprofMapUnmapCallback::OnUnmap(uptr p, uptr size) const {
195   // We are about to unmap a chunk of user memory.
196   // Mark the corresponding shadow memory as not needed.
197   FlushUnneededMemProfShadowMemory(p, size);
198   // Statistics.
199   MemprofStats &thread_stats = GetCurrentThreadStats();
200   thread_stats.munmaps++;
201   thread_stats.munmaped += size;
202 }
203 
204 AllocatorCache *GetAllocatorCache(MemprofThreadLocalMallocStorage *ms) {
205   CHECK(ms);
206   return &ms->allocator_cache;
207 }
208 
209 // Accumulates the access count from the shadow for the given pointer and size.
210 u64 GetShadowCount(uptr p, u32 size) {
211   u64 *shadow = (u64 *)MEM_TO_SHADOW(p);
212   u64 *shadow_end = (u64 *)MEM_TO_SHADOW(p + size);
213   u64 count = 0;
214   for (; shadow <= shadow_end; shadow++)
215     count += *shadow;
216   return count;
217 }
218 
219 // Clears the shadow counters (when memory is allocated).
220 void ClearShadow(uptr addr, uptr size) {
221   CHECK(AddrIsAlignedByGranularity(addr));
222   CHECK(AddrIsInMem(addr));
223   CHECK(AddrIsAlignedByGranularity(addr + size));
224   CHECK(AddrIsInMem(addr + size - SHADOW_GRANULARITY));
225   CHECK(REAL(memset));
226   uptr shadow_beg = MEM_TO_SHADOW(addr);
227   uptr shadow_end = MEM_TO_SHADOW(addr + size - SHADOW_GRANULARITY) + 1;
228   if (shadow_end - shadow_beg < common_flags()->clear_shadow_mmap_threshold) {
229     REAL(memset)((void *)shadow_beg, 0, shadow_end - shadow_beg);
230   } else {
231     uptr page_size = GetPageSizeCached();
232     uptr page_beg = RoundUpTo(shadow_beg, page_size);
233     uptr page_end = RoundDownTo(shadow_end, page_size);
234 
235     if (page_beg >= page_end) {
236       REAL(memset)((void *)shadow_beg, 0, shadow_end - shadow_beg);
237     } else {
238       if (page_beg != shadow_beg) {
239         REAL(memset)((void *)shadow_beg, 0, page_beg - shadow_beg);
240       }
241       if (page_end != shadow_end) {
242         REAL(memset)((void *)page_end, 0, shadow_end - page_end);
243       }
244       ReserveShadowMemoryRange(page_beg, page_end - 1, nullptr);
245     }
246   }
247 }
248 
249 struct Allocator {
250   static const uptr kMaxAllowedMallocSize = 1ULL << kMaxAllowedMallocBits;
251 
252   MemprofAllocator allocator;
253   StaticSpinMutex fallback_mutex;
254   AllocatorCache fallback_allocator_cache;
255 
256   uptr max_user_defined_malloc_size;
257 
258   // Holds the mapping of stack ids to MemInfoBlocks.
259   MIBMapTy MIBMap;
260 
261   atomic_uint8_t destructing;
262   atomic_uint8_t constructed;
263   bool print_text;
264 
265   // ------------------- Initialization ------------------------
266   explicit Allocator(LinkerInitialized) : print_text(flags()->print_text) {
267     atomic_store_relaxed(&destructing, 0);
268     atomic_store_relaxed(&constructed, 1);
269   }
270 
271   ~Allocator() {
272     atomic_store_relaxed(&destructing, 1);
273     FinishAndWrite();
274   }
275 
276   static void PrintCallback(const uptr Key, LockedMemInfoBlock *const &Value,
277                             void *Arg) {
278     SpinMutexLock(&Value->mutex);
279     Print(Value->mib, Key, bool(Arg));
280   }
281 
282   void FinishAndWrite() {
283     if (print_text && common_flags()->print_module_map)
284       DumpProcessMap();
285 
286     allocator.ForceLock();
287 
288     InsertLiveBlocks();
289     if (print_text) {
290       if (!flags()->print_terse)
291         Printf("Recorded MIBs (incl. live on exit):\n");
292       MIBMap.ForEach(PrintCallback,
293                      reinterpret_cast<void *>(flags()->print_terse));
294       StackDepotPrintAll();
295     } else {
296       // Serialize the contents to a raw profile. Format documented in
297       // memprof_rawprofile.h.
298       char *Buffer = nullptr;
299 
300       MemoryMappingLayout Layout(/*cache_enabled=*/true);
301       u64 BytesSerialized = SerializeToRawProfile(MIBMap, Layout, Buffer);
302       CHECK(Buffer && BytesSerialized && "could not serialize to buffer");
303       report_file.Write(Buffer, BytesSerialized);
304     }
305 
306     allocator.ForceUnlock();
307   }
308 
309   // Inserts any blocks which have been allocated but not yet deallocated.
310   void InsertLiveBlocks() {
311     allocator.ForEachChunk(
312         [](uptr chunk, void *alloc) {
313           u64 user_requested_size;
314           Allocator *A = (Allocator *)alloc;
315           MemprofChunk *m =
316               A->GetMemprofChunk((void *)chunk, user_requested_size);
317           if (!m)
318             return;
319           uptr user_beg = ((uptr)m) + kChunkHeaderSize;
320           u64 c = GetShadowCount(user_beg, user_requested_size);
321           long curtime = GetTimestamp();
322           MemInfoBlock newMIB(user_requested_size, c, m->timestamp_ms, curtime,
323                               m->cpu_id, GetCpuId());
324           InsertOrMerge(m->alloc_context_id, newMIB, A->MIBMap);
325         },
326         this);
327   }
328 
329   void InitLinkerInitialized() {
330     SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
331     allocator.InitLinkerInitialized(
332         common_flags()->allocator_release_to_os_interval_ms);
333     max_user_defined_malloc_size = common_flags()->max_allocation_size_mb
334                                        ? common_flags()->max_allocation_size_mb
335                                              << 20
336                                        : kMaxAllowedMallocSize;
337   }
338 
339   // -------------------- Allocation/Deallocation routines ---------------
340   void *Allocate(uptr size, uptr alignment, BufferedStackTrace *stack,
341                  AllocType alloc_type) {
342     if (UNLIKELY(!memprof_inited))
343       MemprofInitFromRtl();
344     if (UNLIKELY(IsRssLimitExceeded())) {
345       if (AllocatorMayReturnNull())
346         return nullptr;
347       ReportRssLimitExceeded(stack);
348     }
349     CHECK(stack);
350     const uptr min_alignment = MEMPROF_ALIGNMENT;
351     if (alignment < min_alignment)
352       alignment = min_alignment;
353     if (size == 0) {
354       // We'd be happy to avoid allocating memory for zero-size requests, but
355       // some programs/tests depend on this behavior and assume that malloc
356       // would not return NULL even for zero-size allocations. Moreover, it
357       // looks like operator new should never return NULL, and results of
358       // consecutive "new" calls must be different even if the allocated size
359       // is zero.
360       size = 1;
361     }
362     CHECK(IsPowerOfTwo(alignment));
363     uptr rounded_size = RoundUpTo(size, alignment);
364     uptr needed_size = rounded_size + kChunkHeaderSize;
365     if (alignment > min_alignment)
366       needed_size += alignment;
367     CHECK(IsAligned(needed_size, min_alignment));
368     if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize ||
369         size > max_user_defined_malloc_size) {
370       if (AllocatorMayReturnNull()) {
371         Report("WARNING: MemProfiler failed to allocate 0x%zx bytes\n", size);
372         return nullptr;
373       }
374       uptr malloc_limit =
375           Min(kMaxAllowedMallocSize, max_user_defined_malloc_size);
376       ReportAllocationSizeTooBig(size, malloc_limit, stack);
377     }
378 
379     MemprofThread *t = GetCurrentThread();
380     void *allocated;
381     if (t) {
382       AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage());
383       allocated = allocator.Allocate(cache, needed_size, 8);
384     } else {
385       SpinMutexLock l(&fallback_mutex);
386       AllocatorCache *cache = &fallback_allocator_cache;
387       allocated = allocator.Allocate(cache, needed_size, 8);
388     }
389     if (UNLIKELY(!allocated)) {
390       SetAllocatorOutOfMemory();
391       if (AllocatorMayReturnNull())
392         return nullptr;
393       ReportOutOfMemory(size, stack);
394     }
395 
396     uptr alloc_beg = reinterpret_cast<uptr>(allocated);
397     uptr alloc_end = alloc_beg + needed_size;
398     uptr beg_plus_header = alloc_beg + kChunkHeaderSize;
399     uptr user_beg = beg_plus_header;
400     if (!IsAligned(user_beg, alignment))
401       user_beg = RoundUpTo(user_beg, alignment);
402     uptr user_end = user_beg + size;
403     CHECK_LE(user_end, alloc_end);
404     uptr chunk_beg = user_beg - kChunkHeaderSize;
405     MemprofChunk *m = reinterpret_cast<MemprofChunk *>(chunk_beg);
406     m->from_memalign = alloc_beg != chunk_beg;
407     CHECK(size);
408 
409     m->cpu_id = GetCpuId();
410     m->timestamp_ms = GetTimestamp();
411     m->alloc_context_id = StackDepotPut(*stack);
412 
413     uptr size_rounded_down_to_granularity =
414         RoundDownTo(size, SHADOW_GRANULARITY);
415     if (size_rounded_down_to_granularity)
416       ClearShadow(user_beg, size_rounded_down_to_granularity);
417 
418     MemprofStats &thread_stats = GetCurrentThreadStats();
419     thread_stats.mallocs++;
420     thread_stats.malloced += size;
421     thread_stats.malloced_overhead += needed_size - size;
422     if (needed_size > SizeClassMap::kMaxSize)
423       thread_stats.malloc_large++;
424     else
425       thread_stats.malloced_by_size[SizeClassMap::ClassID(needed_size)]++;
426 
427     void *res = reinterpret_cast<void *>(user_beg);
428     atomic_store(&m->user_requested_size, size, memory_order_release);
429     if (alloc_beg != chunk_beg) {
430       CHECK_LE(alloc_beg + sizeof(LargeChunkHeader), chunk_beg);
431       reinterpret_cast<LargeChunkHeader *>(alloc_beg)->Set(m);
432     }
433     RunMallocHooks(res, size);
434     return res;
435   }
436 
437   void Deallocate(void *ptr, uptr delete_size, uptr delete_alignment,
438                   BufferedStackTrace *stack, AllocType alloc_type) {
439     uptr p = reinterpret_cast<uptr>(ptr);
440     if (p == 0)
441       return;
442 
443     RunFreeHooks(ptr);
444 
445     uptr chunk_beg = p - kChunkHeaderSize;
446     MemprofChunk *m = reinterpret_cast<MemprofChunk *>(chunk_beg);
447 
448     u64 user_requested_size =
449         atomic_exchange(&m->user_requested_size, 0, memory_order_acquire);
450     if (memprof_inited && memprof_init_done &&
451         atomic_load_relaxed(&constructed) &&
452         !atomic_load_relaxed(&destructing)) {
453       u64 c = GetShadowCount(p, user_requested_size);
454       long curtime = GetTimestamp();
455 
456       MemInfoBlock newMIB(user_requested_size, c, m->timestamp_ms, curtime,
457                           m->cpu_id, GetCpuId());
458       InsertOrMerge(m->alloc_context_id, newMIB, MIBMap);
459     }
460 
461     MemprofStats &thread_stats = GetCurrentThreadStats();
462     thread_stats.frees++;
463     thread_stats.freed += user_requested_size;
464 
465     void *alloc_beg = m->AllocBeg();
466     if (alloc_beg != m) {
467       // Clear the magic value, as allocator internals may overwrite the
468       // contents of deallocated chunk, confusing GetMemprofChunk lookup.
469       reinterpret_cast<LargeChunkHeader *>(alloc_beg)->Set(nullptr);
470     }
471 
472     MemprofThread *t = GetCurrentThread();
473     if (t) {
474       AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage());
475       allocator.Deallocate(cache, alloc_beg);
476     } else {
477       SpinMutexLock l(&fallback_mutex);
478       AllocatorCache *cache = &fallback_allocator_cache;
479       allocator.Deallocate(cache, alloc_beg);
480     }
481   }
482 
483   void *Reallocate(void *old_ptr, uptr new_size, BufferedStackTrace *stack) {
484     CHECK(old_ptr && new_size);
485     uptr p = reinterpret_cast<uptr>(old_ptr);
486     uptr chunk_beg = p - kChunkHeaderSize;
487     MemprofChunk *m = reinterpret_cast<MemprofChunk *>(chunk_beg);
488 
489     MemprofStats &thread_stats = GetCurrentThreadStats();
490     thread_stats.reallocs++;
491     thread_stats.realloced += new_size;
492 
493     void *new_ptr = Allocate(new_size, 8, stack, FROM_MALLOC);
494     if (new_ptr) {
495       CHECK_NE(REAL(memcpy), nullptr);
496       uptr memcpy_size = Min(new_size, m->UsedSize());
497       REAL(memcpy)(new_ptr, old_ptr, memcpy_size);
498       Deallocate(old_ptr, 0, 0, stack, FROM_MALLOC);
499     }
500     return new_ptr;
501   }
502 
503   void *Calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) {
504     if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
505       if (AllocatorMayReturnNull())
506         return nullptr;
507       ReportCallocOverflow(nmemb, size, stack);
508     }
509     void *ptr = Allocate(nmemb * size, 8, stack, FROM_MALLOC);
510     // If the memory comes from the secondary allocator no need to clear it
511     // as it comes directly from mmap.
512     if (ptr && allocator.FromPrimary(ptr))
513       REAL(memset)(ptr, 0, nmemb * size);
514     return ptr;
515   }
516 
517   void CommitBack(MemprofThreadLocalMallocStorage *ms,
518                   BufferedStackTrace *stack) {
519     AllocatorCache *ac = GetAllocatorCache(ms);
520     allocator.SwallowCache(ac);
521   }
522 
523   // -------------------------- Chunk lookup ----------------------
524 
525   // Assumes alloc_beg == allocator.GetBlockBegin(alloc_beg).
526   MemprofChunk *GetMemprofChunk(void *alloc_beg, u64 &user_requested_size) {
527     if (!alloc_beg)
528       return nullptr;
529     MemprofChunk *p = reinterpret_cast<LargeChunkHeader *>(alloc_beg)->Get();
530     if (!p) {
531       if (!allocator.FromPrimary(alloc_beg))
532         return nullptr;
533       p = reinterpret_cast<MemprofChunk *>(alloc_beg);
534     }
535     // The size is reset to 0 on deallocation (and a min of 1 on
536     // allocation).
537     user_requested_size =
538         atomic_load(&p->user_requested_size, memory_order_acquire);
539     if (user_requested_size)
540       return p;
541     return nullptr;
542   }
543 
544   MemprofChunk *GetMemprofChunkByAddr(uptr p, u64 &user_requested_size) {
545     void *alloc_beg = allocator.GetBlockBegin(reinterpret_cast<void *>(p));
546     return GetMemprofChunk(alloc_beg, user_requested_size);
547   }
548 
549   uptr AllocationSize(uptr p) {
550     u64 user_requested_size;
551     MemprofChunk *m = GetMemprofChunkByAddr(p, user_requested_size);
552     if (!m)
553       return 0;
554     if (m->Beg() != p)
555       return 0;
556     return user_requested_size;
557   }
558 
559   void Purge(BufferedStackTrace *stack) { allocator.ForceReleaseToOS(); }
560 
561   void PrintStats() { allocator.PrintStats(); }
562 
563   void ForceLock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
564     allocator.ForceLock();
565     fallback_mutex.Lock();
566   }
567 
568   void ForceUnlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
569     fallback_mutex.Unlock();
570     allocator.ForceUnlock();
571   }
572 };
573 
574 static Allocator instance(LINKER_INITIALIZED);
575 
576 static MemprofAllocator &get_allocator() { return instance.allocator; }
577 
578 void InitializeAllocator() { instance.InitLinkerInitialized(); }
579 
580 void MemprofThreadLocalMallocStorage::CommitBack() {
581   GET_STACK_TRACE_MALLOC;
582   instance.CommitBack(this, &stack);
583 }
584 
585 void PrintInternalAllocatorStats() { instance.PrintStats(); }
586 
587 void memprof_free(void *ptr, BufferedStackTrace *stack, AllocType alloc_type) {
588   instance.Deallocate(ptr, 0, 0, stack, alloc_type);
589 }
590 
591 void memprof_delete(void *ptr, uptr size, uptr alignment,
592                     BufferedStackTrace *stack, AllocType alloc_type) {
593   instance.Deallocate(ptr, size, alignment, stack, alloc_type);
594 }
595 
596 void *memprof_malloc(uptr size, BufferedStackTrace *stack) {
597   return SetErrnoOnNull(instance.Allocate(size, 8, stack, FROM_MALLOC));
598 }
599 
600 void *memprof_calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) {
601   return SetErrnoOnNull(instance.Calloc(nmemb, size, stack));
602 }
603 
604 void *memprof_reallocarray(void *p, uptr nmemb, uptr size,
605                            BufferedStackTrace *stack) {
606   if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
607     errno = errno_ENOMEM;
608     if (AllocatorMayReturnNull())
609       return nullptr;
610     ReportReallocArrayOverflow(nmemb, size, stack);
611   }
612   return memprof_realloc(p, nmemb * size, stack);
613 }
614 
615 void *memprof_realloc(void *p, uptr size, BufferedStackTrace *stack) {
616   if (!p)
617     return SetErrnoOnNull(instance.Allocate(size, 8, stack, FROM_MALLOC));
618   if (size == 0) {
619     if (flags()->allocator_frees_and_returns_null_on_realloc_zero) {
620       instance.Deallocate(p, 0, 0, stack, FROM_MALLOC);
621       return nullptr;
622     }
623     // Allocate a size of 1 if we shouldn't free() on Realloc to 0
624     size = 1;
625   }
626   return SetErrnoOnNull(instance.Reallocate(p, size, stack));
627 }
628 
629 void *memprof_valloc(uptr size, BufferedStackTrace *stack) {
630   return SetErrnoOnNull(
631       instance.Allocate(size, GetPageSizeCached(), stack, FROM_MALLOC));
632 }
633 
634 void *memprof_pvalloc(uptr size, BufferedStackTrace *stack) {
635   uptr PageSize = GetPageSizeCached();
636   if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) {
637     errno = errno_ENOMEM;
638     if (AllocatorMayReturnNull())
639       return nullptr;
640     ReportPvallocOverflow(size, stack);
641   }
642   // pvalloc(0) should allocate one page.
643   size = size ? RoundUpTo(size, PageSize) : PageSize;
644   return SetErrnoOnNull(instance.Allocate(size, PageSize, stack, FROM_MALLOC));
645 }
646 
647 void *memprof_memalign(uptr alignment, uptr size, BufferedStackTrace *stack,
648                        AllocType alloc_type) {
649   if (UNLIKELY(!IsPowerOfTwo(alignment))) {
650     errno = errno_EINVAL;
651     if (AllocatorMayReturnNull())
652       return nullptr;
653     ReportInvalidAllocationAlignment(alignment, stack);
654   }
655   return SetErrnoOnNull(instance.Allocate(size, alignment, stack, alloc_type));
656 }
657 
658 void *memprof_aligned_alloc(uptr alignment, uptr size,
659                             BufferedStackTrace *stack) {
660   if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) {
661     errno = errno_EINVAL;
662     if (AllocatorMayReturnNull())
663       return nullptr;
664     ReportInvalidAlignedAllocAlignment(size, alignment, stack);
665   }
666   return SetErrnoOnNull(instance.Allocate(size, alignment, stack, FROM_MALLOC));
667 }
668 
669 int memprof_posix_memalign(void **memptr, uptr alignment, uptr size,
670                            BufferedStackTrace *stack) {
671   if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) {
672     if (AllocatorMayReturnNull())
673       return errno_EINVAL;
674     ReportInvalidPosixMemalignAlignment(alignment, stack);
675   }
676   void *ptr = instance.Allocate(size, alignment, stack, FROM_MALLOC);
677   if (UNLIKELY(!ptr))
678     // OOM error is already taken care of by Allocate.
679     return errno_ENOMEM;
680   CHECK(IsAligned((uptr)ptr, alignment));
681   *memptr = ptr;
682   return 0;
683 }
684 
685 uptr memprof_malloc_usable_size(const void *ptr, uptr pc, uptr bp) {
686   if (!ptr)
687     return 0;
688   uptr usable_size = instance.AllocationSize(reinterpret_cast<uptr>(ptr));
689   return usable_size;
690 }
691 
692 } // namespace __memprof
693 
694 // ---------------------- Interface ---------------- {{{1
695 using namespace __memprof;
696 
697 uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; }
698 
699 int __sanitizer_get_ownership(const void *p) {
700   return memprof_malloc_usable_size(p, 0, 0) != 0;
701 }
702 
703 uptr __sanitizer_get_allocated_size(const void *p) {
704   return memprof_malloc_usable_size(p, 0, 0);
705 }
706 
707 int __memprof_profile_dump() {
708   instance.FinishAndWrite();
709   // In the future we may want to return non-zero if there are any errors
710   // detected during the dumping process.
711   return 0;
712 }
713