xref: /freebsd/contrib/llvm-project/compiler-rt/lib/scudo/standalone/combined.h (revision 5ca8e32633c4ffbbcd6762e5888b6a4ba0708c6c)
1 //===-- combined.h ----------------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #ifndef SCUDO_COMBINED_H_
10 #define SCUDO_COMBINED_H_
11 
12 #include "chunk.h"
13 #include "common.h"
14 #include "flags.h"
15 #include "flags_parser.h"
16 #include "local_cache.h"
17 #include "memtag.h"
18 #include "options.h"
19 #include "quarantine.h"
20 #include "report.h"
21 #include "rss_limit_checker.h"
22 #include "secondary.h"
23 #include "stack_depot.h"
24 #include "string_utils.h"
25 #include "tsd.h"
26 
27 #include "scudo/interface.h"
28 
29 #ifdef GWP_ASAN_HOOKS
30 #include "gwp_asan/guarded_pool_allocator.h"
31 #include "gwp_asan/optional/backtrace.h"
32 #include "gwp_asan/optional/segv_handler.h"
33 #endif // GWP_ASAN_HOOKS
34 
35 extern "C" inline void EmptyCallback() {}
36 
37 #ifdef HAVE_ANDROID_UNSAFE_FRAME_POINTER_CHASE
38 // This function is not part of the NDK so it does not appear in any public
39 // header files. We only declare/use it when targeting the platform.
40 extern "C" size_t android_unsafe_frame_pointer_chase(scudo::uptr *buf,
41                                                      size_t num_entries);
42 #endif
43 
44 namespace scudo {
45 
46 template <class Config, void (*PostInitCallback)(void) = EmptyCallback>
47 class Allocator {
48 public:
49   using PrimaryT = typename Config::template PrimaryT<Config>;
50   using SecondaryT = typename Config::template SecondaryT<Config>;
51   using CacheT = typename PrimaryT::CacheT;
52   typedef Allocator<Config, PostInitCallback> ThisT;
53   typedef typename Config::template TSDRegistryT<ThisT> TSDRegistryT;
54 
55   void callPostInitCallback() {
56     pthread_once(&PostInitNonce, PostInitCallback);
57   }
58 
59   struct QuarantineCallback {
60     explicit QuarantineCallback(ThisT &Instance, CacheT &LocalCache)
61         : Allocator(Instance), Cache(LocalCache) {}
62 
63     // Chunk recycling function, returns a quarantined chunk to the backend,
64     // first making sure it hasn't been tampered with.
65     void recycle(void *Ptr) {
66       Chunk::UnpackedHeader Header;
67       Chunk::loadHeader(Allocator.Cookie, Ptr, &Header);
68       if (UNLIKELY(Header.State != Chunk::State::Quarantined))
69         reportInvalidChunkState(AllocatorAction::Recycling, Ptr);
70 
71       Chunk::UnpackedHeader NewHeader = Header;
72       NewHeader.State = Chunk::State::Available;
73       Chunk::compareExchangeHeader(Allocator.Cookie, Ptr, &NewHeader, &Header);
74 
75       if (allocatorSupportsMemoryTagging<Config>())
76         Ptr = untagPointer(Ptr);
77       void *BlockBegin = Allocator::getBlockBegin(Ptr, &NewHeader);
78       Cache.deallocate(NewHeader.ClassId, BlockBegin);
79     }
80 
81     // We take a shortcut when allocating a quarantine batch by working with the
82     // appropriate class ID instead of using Size. The compiler should optimize
83     // the class ID computation and work with the associated cache directly.
84     void *allocate(UNUSED uptr Size) {
85       const uptr QuarantineClassId = SizeClassMap::getClassIdBySize(
86           sizeof(QuarantineBatch) + Chunk::getHeaderSize());
87       void *Ptr = Cache.allocate(QuarantineClassId);
88       // Quarantine batch allocation failure is fatal.
89       if (UNLIKELY(!Ptr))
90         reportOutOfMemory(SizeClassMap::getSizeByClassId(QuarantineClassId));
91 
92       Ptr = reinterpret_cast<void *>(reinterpret_cast<uptr>(Ptr) +
93                                      Chunk::getHeaderSize());
94       Chunk::UnpackedHeader Header = {};
95       Header.ClassId = QuarantineClassId & Chunk::ClassIdMask;
96       Header.SizeOrUnusedBytes = sizeof(QuarantineBatch);
97       Header.State = Chunk::State::Allocated;
98       Chunk::storeHeader(Allocator.Cookie, Ptr, &Header);
99 
100       // Reset tag to 0 as this chunk may have been previously used for a tagged
101       // user allocation.
102       if (UNLIKELY(useMemoryTagging<Config>(Allocator.Primary.Options.load())))
103         storeTags(reinterpret_cast<uptr>(Ptr),
104                   reinterpret_cast<uptr>(Ptr) + sizeof(QuarantineBatch));
105 
106       return Ptr;
107     }
108 
109     void deallocate(void *Ptr) {
110       const uptr QuarantineClassId = SizeClassMap::getClassIdBySize(
111           sizeof(QuarantineBatch) + Chunk::getHeaderSize());
112       Chunk::UnpackedHeader Header;
113       Chunk::loadHeader(Allocator.Cookie, Ptr, &Header);
114 
115       if (UNLIKELY(Header.State != Chunk::State::Allocated))
116         reportInvalidChunkState(AllocatorAction::Deallocating, Ptr);
117       DCHECK_EQ(Header.ClassId, QuarantineClassId);
118       DCHECK_EQ(Header.Offset, 0);
119       DCHECK_EQ(Header.SizeOrUnusedBytes, sizeof(QuarantineBatch));
120 
121       Chunk::UnpackedHeader NewHeader = Header;
122       NewHeader.State = Chunk::State::Available;
123       Chunk::compareExchangeHeader(Allocator.Cookie, Ptr, &NewHeader, &Header);
124       Cache.deallocate(QuarantineClassId,
125                        reinterpret_cast<void *>(reinterpret_cast<uptr>(Ptr) -
126                                                 Chunk::getHeaderSize()));
127     }
128 
129   private:
130     ThisT &Allocator;
131     CacheT &Cache;
132   };
133 
134   typedef GlobalQuarantine<QuarantineCallback, void> QuarantineT;
135   typedef typename QuarantineT::CacheT QuarantineCacheT;
136 
137   void init() {
138     performSanityChecks();
139 
140     // Check if hardware CRC32 is supported in the binary and by the platform,
141     // if so, opt for the CRC32 hardware version of the checksum.
142     if (&computeHardwareCRC32 && hasHardwareCRC32())
143       HashAlgorithm = Checksum::HardwareCRC32;
144 
145     if (UNLIKELY(!getRandom(&Cookie, sizeof(Cookie))))
146       Cookie = static_cast<u32>(getMonotonicTime() ^
147                                 (reinterpret_cast<uptr>(this) >> 4));
148 
149     initFlags();
150     reportUnrecognizedFlags();
151 
152     RssChecker.init(scudo::getFlags()->soft_rss_limit_mb,
153                     scudo::getFlags()->hard_rss_limit_mb);
154 
155     // Store some flags locally.
156     if (getFlags()->may_return_null)
157       Primary.Options.set(OptionBit::MayReturnNull);
158     if (getFlags()->zero_contents)
159       Primary.Options.setFillContentsMode(ZeroFill);
160     else if (getFlags()->pattern_fill_contents)
161       Primary.Options.setFillContentsMode(PatternOrZeroFill);
162     if (getFlags()->dealloc_type_mismatch)
163       Primary.Options.set(OptionBit::DeallocTypeMismatch);
164     if (getFlags()->delete_size_mismatch)
165       Primary.Options.set(OptionBit::DeleteSizeMismatch);
166     if (allocatorSupportsMemoryTagging<Config>() &&
167         systemSupportsMemoryTagging())
168       Primary.Options.set(OptionBit::UseMemoryTagging);
169 
170     QuarantineMaxChunkSize =
171         static_cast<u32>(getFlags()->quarantine_max_chunk_size);
172 
173     Stats.init();
174     const s32 ReleaseToOsIntervalMs = getFlags()->release_to_os_interval_ms;
175     Primary.init(ReleaseToOsIntervalMs);
176     Secondary.init(&Stats, ReleaseToOsIntervalMs);
177     Quarantine.init(
178         static_cast<uptr>(getFlags()->quarantine_size_kb << 10),
179         static_cast<uptr>(getFlags()->thread_local_quarantine_size_kb << 10));
180 
181     mapAndInitializeRingBuffer();
182   }
183 
184   // Initialize the embedded GWP-ASan instance. Requires the main allocator to
185   // be functional, best called from PostInitCallback.
186   void initGwpAsan() {
187 #ifdef GWP_ASAN_HOOKS
188     gwp_asan::options::Options Opt;
189     Opt.Enabled = getFlags()->GWP_ASAN_Enabled;
190     Opt.MaxSimultaneousAllocations =
191         getFlags()->GWP_ASAN_MaxSimultaneousAllocations;
192     Opt.SampleRate = getFlags()->GWP_ASAN_SampleRate;
193     Opt.InstallSignalHandlers = getFlags()->GWP_ASAN_InstallSignalHandlers;
194     Opt.Recoverable = getFlags()->GWP_ASAN_Recoverable;
195     // Embedded GWP-ASan is locked through the Scudo atfork handler (via
196     // Allocator::disable calling GWPASan.disable). Disable GWP-ASan's atfork
197     // handler.
198     Opt.InstallForkHandlers = false;
199     Opt.Backtrace = gwp_asan::backtrace::getBacktraceFunction();
200     GuardedAlloc.init(Opt);
201 
202     if (Opt.InstallSignalHandlers)
203       gwp_asan::segv_handler::installSignalHandlers(
204           &GuardedAlloc, Printf,
205           gwp_asan::backtrace::getPrintBacktraceFunction(),
206           gwp_asan::backtrace::getSegvBacktraceFunction(),
207           Opt.Recoverable);
208 
209     GuardedAllocSlotSize =
210         GuardedAlloc.getAllocatorState()->maximumAllocationSize();
211     Stats.add(StatFree, static_cast<uptr>(Opt.MaxSimultaneousAllocations) *
212                             GuardedAllocSlotSize);
213 #endif // GWP_ASAN_HOOKS
214   }
215 
216 #ifdef GWP_ASAN_HOOKS
217   const gwp_asan::AllocationMetadata *getGwpAsanAllocationMetadata() {
218     return GuardedAlloc.getMetadataRegion();
219   }
220 
221   const gwp_asan::AllocatorState *getGwpAsanAllocatorState() {
222     return GuardedAlloc.getAllocatorState();
223   }
224 #endif // GWP_ASAN_HOOKS
225 
226   ALWAYS_INLINE void initThreadMaybe(bool MinimalInit = false) {
227     TSDRegistry.initThreadMaybe(this, MinimalInit);
228   }
229 
230   void unmapTestOnly() {
231     unmapRingBuffer();
232     TSDRegistry.unmapTestOnly(this);
233     Primary.unmapTestOnly();
234     Secondary.unmapTestOnly();
235 #ifdef GWP_ASAN_HOOKS
236     if (getFlags()->GWP_ASAN_InstallSignalHandlers)
237       gwp_asan::segv_handler::uninstallSignalHandlers();
238     GuardedAlloc.uninitTestOnly();
239 #endif // GWP_ASAN_HOOKS
240   }
241 
242   TSDRegistryT *getTSDRegistry() { return &TSDRegistry; }
243   QuarantineT *getQuarantine() { return &Quarantine; }
244 
245   // The Cache must be provided zero-initialized.
246   void initCache(CacheT *Cache) { Cache->init(&Stats, &Primary); }
247 
248   // Release the resources used by a TSD, which involves:
249   // - draining the local quarantine cache to the global quarantine;
250   // - releasing the cached pointers back to the Primary;
251   // - unlinking the local stats from the global ones (destroying the cache does
252   //   the last two items).
253   void commitBack(TSD<ThisT> *TSD) {
254     Quarantine.drain(&TSD->getQuarantineCache(),
255                      QuarantineCallback(*this, TSD->getCache()));
256     TSD->getCache().destroy(&Stats);
257   }
258 
259   void drainCache(TSD<ThisT> *TSD) {
260     Quarantine.drainAndRecycle(&TSD->getQuarantineCache(),
261                                QuarantineCallback(*this, TSD->getCache()));
262     TSD->getCache().drain();
263   }
264   void drainCaches() { TSDRegistry.drainCaches(this); }
265 
266   ALWAYS_INLINE void *getHeaderTaggedPointer(void *Ptr) {
267     if (!allocatorSupportsMemoryTagging<Config>())
268       return Ptr;
269     auto UntaggedPtr = untagPointer(Ptr);
270     if (UntaggedPtr != Ptr)
271       return UntaggedPtr;
272     // Secondary, or pointer allocated while memory tagging is unsupported or
273     // disabled. The tag mismatch is okay in the latter case because tags will
274     // not be checked.
275     return addHeaderTag(Ptr);
276   }
277 
278   ALWAYS_INLINE uptr addHeaderTag(uptr Ptr) {
279     if (!allocatorSupportsMemoryTagging<Config>())
280       return Ptr;
281     return addFixedTag(Ptr, 2);
282   }
283 
284   ALWAYS_INLINE void *addHeaderTag(void *Ptr) {
285     return reinterpret_cast<void *>(addHeaderTag(reinterpret_cast<uptr>(Ptr)));
286   }
287 
288   NOINLINE u32 collectStackTrace() {
289 #ifdef HAVE_ANDROID_UNSAFE_FRAME_POINTER_CHASE
290     // Discard collectStackTrace() frame and allocator function frame.
291     constexpr uptr DiscardFrames = 2;
292     uptr Stack[MaxTraceSize + DiscardFrames];
293     uptr Size =
294         android_unsafe_frame_pointer_chase(Stack, MaxTraceSize + DiscardFrames);
295     Size = Min<uptr>(Size, MaxTraceSize + DiscardFrames);
296     return Depot.insert(Stack + Min<uptr>(DiscardFrames, Size), Stack + Size);
297 #else
298     return 0;
299 #endif
300   }
301 
302   uptr computeOddEvenMaskForPointerMaybe(Options Options, uptr Ptr,
303                                          uptr ClassId) {
304     if (!Options.get(OptionBit::UseOddEvenTags))
305       return 0;
306 
307     // If a chunk's tag is odd, we want the tags of the surrounding blocks to be
308     // even, and vice versa. Blocks are laid out Size bytes apart, and adding
309     // Size to Ptr will flip the least significant set bit of Size in Ptr, so
310     // that bit will have the pattern 010101... for consecutive blocks, which we
311     // can use to determine which tag mask to use.
312     return 0x5555U << ((Ptr >> SizeClassMap::getSizeLSBByClassId(ClassId)) & 1);
313   }
314 
315   NOINLINE void *allocate(uptr Size, Chunk::Origin Origin,
316                           uptr Alignment = MinAlignment,
317                           bool ZeroContents = false) NO_THREAD_SAFETY_ANALYSIS {
318     initThreadMaybe();
319 
320     const Options Options = Primary.Options.load();
321     if (UNLIKELY(Alignment > MaxAlignment)) {
322       if (Options.get(OptionBit::MayReturnNull))
323         return nullptr;
324       reportAlignmentTooBig(Alignment, MaxAlignment);
325     }
326     if (Alignment < MinAlignment)
327       Alignment = MinAlignment;
328 
329 #ifdef GWP_ASAN_HOOKS
330     if (UNLIKELY(GuardedAlloc.shouldSample())) {
331       if (void *Ptr = GuardedAlloc.allocate(Size, Alignment)) {
332         if (UNLIKELY(&__scudo_allocate_hook))
333           __scudo_allocate_hook(Ptr, Size);
334         Stats.lock();
335         Stats.add(StatAllocated, GuardedAllocSlotSize);
336         Stats.sub(StatFree, GuardedAllocSlotSize);
337         Stats.unlock();
338         return Ptr;
339       }
340     }
341 #endif // GWP_ASAN_HOOKS
342 
343     const FillContentsMode FillContents = ZeroContents ? ZeroFill
344                                           : TSDRegistry.getDisableMemInit()
345                                               ? NoFill
346                                               : Options.getFillContentsMode();
347 
348     // If the requested size happens to be 0 (more common than you might think),
349     // allocate MinAlignment bytes on top of the header. Then add the extra
350     // bytes required to fulfill the alignment requirements: we allocate enough
351     // to be sure that there will be an address in the block that will satisfy
352     // the alignment.
353     const uptr NeededSize =
354         roundUp(Size, MinAlignment) +
355         ((Alignment > MinAlignment) ? Alignment : Chunk::getHeaderSize());
356 
357     // Takes care of extravagantly large sizes as well as integer overflows.
358     static_assert(MaxAllowedMallocSize < UINTPTR_MAX - MaxAlignment, "");
359     if (UNLIKELY(Size >= MaxAllowedMallocSize)) {
360       if (Options.get(OptionBit::MayReturnNull))
361         return nullptr;
362       reportAllocationSizeTooBig(Size, NeededSize, MaxAllowedMallocSize);
363     }
364     DCHECK_LE(Size, NeededSize);
365 
366     switch (RssChecker.getRssLimitExceeded()) {
367     case RssLimitChecker::Neither:
368       break;
369     case RssLimitChecker::Soft:
370       if (Options.get(OptionBit::MayReturnNull))
371         return nullptr;
372       reportSoftRSSLimit(RssChecker.getSoftRssLimit());
373       break;
374     case RssLimitChecker::Hard:
375       reportHardRSSLimit(RssChecker.getHardRssLimit());
376       break;
377     }
378 
379     void *Block = nullptr;
380     uptr ClassId = 0;
381     uptr SecondaryBlockEnd = 0;
382     if (LIKELY(PrimaryT::canAllocate(NeededSize))) {
383       ClassId = SizeClassMap::getClassIdBySize(NeededSize);
384       DCHECK_NE(ClassId, 0U);
385       bool UnlockRequired;
386       auto *TSD = TSDRegistry.getTSDAndLock(&UnlockRequired);
387       Block = TSD->getCache().allocate(ClassId);
388       // If the allocation failed, the most likely reason with a 32-bit primary
389       // is the region being full. In that event, retry in each successively
390       // larger class until it fits. If it fails to fit in the largest class,
391       // fallback to the Secondary.
392       if (UNLIKELY(!Block)) {
393         while (ClassId < SizeClassMap::LargestClassId && !Block)
394           Block = TSD->getCache().allocate(++ClassId);
395         if (!Block)
396           ClassId = 0;
397       }
398       if (UnlockRequired)
399         TSD->unlock();
400     }
401     if (UNLIKELY(ClassId == 0)) {
402       Block = Secondary.allocate(Options, Size, Alignment, &SecondaryBlockEnd,
403                                  FillContents);
404     }
405 
406     if (UNLIKELY(!Block)) {
407       if (Options.get(OptionBit::MayReturnNull))
408         return nullptr;
409       reportOutOfMemory(NeededSize);
410     }
411 
412     const uptr BlockUptr = reinterpret_cast<uptr>(Block);
413     const uptr UnalignedUserPtr = BlockUptr + Chunk::getHeaderSize();
414     const uptr UserPtr = roundUp(UnalignedUserPtr, Alignment);
415 
416     void *Ptr = reinterpret_cast<void *>(UserPtr);
417     void *TaggedPtr = Ptr;
418     if (LIKELY(ClassId)) {
419       // We only need to zero or tag the contents for Primary backed
420       // allocations. We only set tags for primary allocations in order to avoid
421       // faulting potentially large numbers of pages for large secondary
422       // allocations. We assume that guard pages are enough to protect these
423       // allocations.
424       //
425       // FIXME: When the kernel provides a way to set the background tag of a
426       // mapping, we should be able to tag secondary allocations as well.
427       //
428       // When memory tagging is enabled, zeroing the contents is done as part of
429       // setting the tag.
430       if (UNLIKELY(useMemoryTagging<Config>(Options))) {
431         uptr PrevUserPtr;
432         Chunk::UnpackedHeader Header;
433         const uptr BlockSize = PrimaryT::getSizeByClassId(ClassId);
434         const uptr BlockEnd = BlockUptr + BlockSize;
435         // If possible, try to reuse the UAF tag that was set by deallocate().
436         // For simplicity, only reuse tags if we have the same start address as
437         // the previous allocation. This handles the majority of cases since
438         // most allocations will not be more aligned than the minimum alignment.
439         //
440         // We need to handle situations involving reclaimed chunks, and retag
441         // the reclaimed portions if necessary. In the case where the chunk is
442         // fully reclaimed, the chunk's header will be zero, which will trigger
443         // the code path for new mappings and invalid chunks that prepares the
444         // chunk from scratch. There are three possibilities for partial
445         // reclaiming:
446         //
447         // (1) Header was reclaimed, data was partially reclaimed.
448         // (2) Header was not reclaimed, all data was reclaimed (e.g. because
449         //     data started on a page boundary).
450         // (3) Header was not reclaimed, data was partially reclaimed.
451         //
452         // Case (1) will be handled in the same way as for full reclaiming,
453         // since the header will be zero.
454         //
455         // We can detect case (2) by loading the tag from the start
456         // of the chunk. If it is zero, it means that either all data was
457         // reclaimed (since we never use zero as the chunk tag), or that the
458         // previous allocation was of size zero. Either way, we need to prepare
459         // a new chunk from scratch.
460         //
461         // We can detect case (3) by moving to the next page (if covered by the
462         // chunk) and loading the tag of its first granule. If it is zero, it
463         // means that all following pages may need to be retagged. On the other
464         // hand, if it is nonzero, we can assume that all following pages are
465         // still tagged, according to the logic that if any of the pages
466         // following the next page were reclaimed, the next page would have been
467         // reclaimed as well.
468         uptr TaggedUserPtr;
469         if (getChunkFromBlock(BlockUptr, &PrevUserPtr, &Header) &&
470             PrevUserPtr == UserPtr &&
471             (TaggedUserPtr = loadTag(UserPtr)) != UserPtr) {
472           uptr PrevEnd = TaggedUserPtr + Header.SizeOrUnusedBytes;
473           const uptr NextPage = roundUp(TaggedUserPtr, getPageSizeCached());
474           if (NextPage < PrevEnd && loadTag(NextPage) != NextPage)
475             PrevEnd = NextPage;
476           TaggedPtr = reinterpret_cast<void *>(TaggedUserPtr);
477           resizeTaggedChunk(PrevEnd, TaggedUserPtr + Size, Size, BlockEnd);
478           if (UNLIKELY(FillContents != NoFill && !Header.OriginOrWasZeroed)) {
479             // If an allocation needs to be zeroed (i.e. calloc) we can normally
480             // avoid zeroing the memory now since we can rely on memory having
481             // been zeroed on free, as this is normally done while setting the
482             // UAF tag. But if tagging was disabled per-thread when the memory
483             // was freed, it would not have been retagged and thus zeroed, and
484             // therefore it needs to be zeroed now.
485             memset(TaggedPtr, 0,
486                    Min(Size, roundUp(PrevEnd - TaggedUserPtr,
487                                      archMemoryTagGranuleSize())));
488           } else if (Size) {
489             // Clear any stack metadata that may have previously been stored in
490             // the chunk data.
491             memset(TaggedPtr, 0, archMemoryTagGranuleSize());
492           }
493         } else {
494           const uptr OddEvenMask =
495               computeOddEvenMaskForPointerMaybe(Options, BlockUptr, ClassId);
496           TaggedPtr = prepareTaggedChunk(Ptr, Size, OddEvenMask, BlockEnd);
497         }
498         storePrimaryAllocationStackMaybe(Options, Ptr);
499       } else {
500         Block = addHeaderTag(Block);
501         Ptr = addHeaderTag(Ptr);
502         if (UNLIKELY(FillContents != NoFill)) {
503           // This condition is not necessarily unlikely, but since memset is
504           // costly, we might as well mark it as such.
505           memset(Block, FillContents == ZeroFill ? 0 : PatternFillByte,
506                  PrimaryT::getSizeByClassId(ClassId));
507         }
508       }
509     } else {
510       Block = addHeaderTag(Block);
511       Ptr = addHeaderTag(Ptr);
512       if (UNLIKELY(useMemoryTagging<Config>(Options))) {
513         storeTags(reinterpret_cast<uptr>(Block), reinterpret_cast<uptr>(Ptr));
514         storeSecondaryAllocationStackMaybe(Options, Ptr, Size);
515       }
516     }
517 
518     Chunk::UnpackedHeader Header = {};
519     if (UNLIKELY(UnalignedUserPtr != UserPtr)) {
520       const uptr Offset = UserPtr - UnalignedUserPtr;
521       DCHECK_GE(Offset, 2 * sizeof(u32));
522       // The BlockMarker has no security purpose, but is specifically meant for
523       // the chunk iteration function that can be used in debugging situations.
524       // It is the only situation where we have to locate the start of a chunk
525       // based on its block address.
526       reinterpret_cast<u32 *>(Block)[0] = BlockMarker;
527       reinterpret_cast<u32 *>(Block)[1] = static_cast<u32>(Offset);
528       Header.Offset = (Offset >> MinAlignmentLog) & Chunk::OffsetMask;
529     }
530     Header.ClassId = ClassId & Chunk::ClassIdMask;
531     Header.State = Chunk::State::Allocated;
532     Header.OriginOrWasZeroed = Origin & Chunk::OriginMask;
533     Header.SizeOrUnusedBytes =
534         (ClassId ? Size : SecondaryBlockEnd - (UserPtr + Size)) &
535         Chunk::SizeOrUnusedBytesMask;
536     Chunk::storeHeader(Cookie, Ptr, &Header);
537 
538     if (UNLIKELY(&__scudo_allocate_hook))
539       __scudo_allocate_hook(TaggedPtr, Size);
540 
541     return TaggedPtr;
542   }
543 
544   NOINLINE void deallocate(void *Ptr, Chunk::Origin Origin, uptr DeleteSize = 0,
545                            UNUSED uptr Alignment = MinAlignment) {
546     // For a deallocation, we only ensure minimal initialization, meaning thread
547     // local data will be left uninitialized for now (when using ELF TLS). The
548     // fallback cache will be used instead. This is a workaround for a situation
549     // where the only heap operation performed in a thread would be a free past
550     // the TLS destructors, ending up in initialized thread specific data never
551     // being destroyed properly. Any other heap operation will do a full init.
552     initThreadMaybe(/*MinimalInit=*/true);
553 
554     if (UNLIKELY(&__scudo_deallocate_hook))
555       __scudo_deallocate_hook(Ptr);
556 
557     if (UNLIKELY(!Ptr))
558       return;
559 
560 #ifdef GWP_ASAN_HOOKS
561     if (UNLIKELY(GuardedAlloc.pointerIsMine(Ptr))) {
562       GuardedAlloc.deallocate(Ptr);
563       Stats.lock();
564       Stats.add(StatFree, GuardedAllocSlotSize);
565       Stats.sub(StatAllocated, GuardedAllocSlotSize);
566       Stats.unlock();
567       return;
568     }
569 #endif // GWP_ASAN_HOOKS
570 
571     if (UNLIKELY(!isAligned(reinterpret_cast<uptr>(Ptr), MinAlignment)))
572       reportMisalignedPointer(AllocatorAction::Deallocating, Ptr);
573 
574     void *TaggedPtr = Ptr;
575     Ptr = getHeaderTaggedPointer(Ptr);
576 
577     Chunk::UnpackedHeader Header;
578     Chunk::loadHeader(Cookie, Ptr, &Header);
579 
580     if (UNLIKELY(Header.State != Chunk::State::Allocated))
581       reportInvalidChunkState(AllocatorAction::Deallocating, Ptr);
582 
583     const Options Options = Primary.Options.load();
584     if (Options.get(OptionBit::DeallocTypeMismatch)) {
585       if (UNLIKELY(Header.OriginOrWasZeroed != Origin)) {
586         // With the exception of memalign'd chunks, that can be still be free'd.
587         if (Header.OriginOrWasZeroed != Chunk::Origin::Memalign ||
588             Origin != Chunk::Origin::Malloc)
589           reportDeallocTypeMismatch(AllocatorAction::Deallocating, Ptr,
590                                     Header.OriginOrWasZeroed, Origin);
591       }
592     }
593 
594     const uptr Size = getSize(Ptr, &Header);
595     if (DeleteSize && Options.get(OptionBit::DeleteSizeMismatch)) {
596       if (UNLIKELY(DeleteSize != Size))
597         reportDeleteSizeMismatch(Ptr, DeleteSize, Size);
598     }
599 
600     quarantineOrDeallocateChunk(Options, TaggedPtr, &Header, Size);
601   }
602 
603   void *reallocate(void *OldPtr, uptr NewSize, uptr Alignment = MinAlignment) {
604     initThreadMaybe();
605 
606     const Options Options = Primary.Options.load();
607     if (UNLIKELY(NewSize >= MaxAllowedMallocSize)) {
608       if (Options.get(OptionBit::MayReturnNull))
609         return nullptr;
610       reportAllocationSizeTooBig(NewSize, 0, MaxAllowedMallocSize);
611     }
612 
613     // The following cases are handled by the C wrappers.
614     DCHECK_NE(OldPtr, nullptr);
615     DCHECK_NE(NewSize, 0);
616 
617 #ifdef GWP_ASAN_HOOKS
618     if (UNLIKELY(GuardedAlloc.pointerIsMine(OldPtr))) {
619       uptr OldSize = GuardedAlloc.getSize(OldPtr);
620       void *NewPtr = allocate(NewSize, Chunk::Origin::Malloc, Alignment);
621       if (NewPtr)
622         memcpy(NewPtr, OldPtr, (NewSize < OldSize) ? NewSize : OldSize);
623       GuardedAlloc.deallocate(OldPtr);
624       Stats.lock();
625       Stats.add(StatFree, GuardedAllocSlotSize);
626       Stats.sub(StatAllocated, GuardedAllocSlotSize);
627       Stats.unlock();
628       return NewPtr;
629     }
630 #endif // GWP_ASAN_HOOKS
631 
632     void *OldTaggedPtr = OldPtr;
633     OldPtr = getHeaderTaggedPointer(OldPtr);
634 
635     if (UNLIKELY(!isAligned(reinterpret_cast<uptr>(OldPtr), MinAlignment)))
636       reportMisalignedPointer(AllocatorAction::Reallocating, OldPtr);
637 
638     Chunk::UnpackedHeader OldHeader;
639     Chunk::loadHeader(Cookie, OldPtr, &OldHeader);
640 
641     if (UNLIKELY(OldHeader.State != Chunk::State::Allocated))
642       reportInvalidChunkState(AllocatorAction::Reallocating, OldPtr);
643 
644     // Pointer has to be allocated with a malloc-type function. Some
645     // applications think that it is OK to realloc a memalign'ed pointer, which
646     // will trigger this check. It really isn't.
647     if (Options.get(OptionBit::DeallocTypeMismatch)) {
648       if (UNLIKELY(OldHeader.OriginOrWasZeroed != Chunk::Origin::Malloc))
649         reportDeallocTypeMismatch(AllocatorAction::Reallocating, OldPtr,
650                                   OldHeader.OriginOrWasZeroed,
651                                   Chunk::Origin::Malloc);
652     }
653 
654     void *BlockBegin = getBlockBegin(OldTaggedPtr, &OldHeader);
655     uptr BlockEnd;
656     uptr OldSize;
657     const uptr ClassId = OldHeader.ClassId;
658     if (LIKELY(ClassId)) {
659       BlockEnd = reinterpret_cast<uptr>(BlockBegin) +
660                  SizeClassMap::getSizeByClassId(ClassId);
661       OldSize = OldHeader.SizeOrUnusedBytes;
662     } else {
663       BlockEnd = SecondaryT::getBlockEnd(BlockBegin);
664       OldSize = BlockEnd - (reinterpret_cast<uptr>(OldTaggedPtr) +
665                             OldHeader.SizeOrUnusedBytes);
666     }
667     // If the new chunk still fits in the previously allocated block (with a
668     // reasonable delta), we just keep the old block, and update the chunk
669     // header to reflect the size change.
670     if (reinterpret_cast<uptr>(OldTaggedPtr) + NewSize <= BlockEnd) {
671       if (NewSize > OldSize || (OldSize - NewSize) < getPageSizeCached()) {
672         Chunk::UnpackedHeader NewHeader = OldHeader;
673         NewHeader.SizeOrUnusedBytes =
674             (ClassId ? NewSize
675                      : BlockEnd -
676                            (reinterpret_cast<uptr>(OldTaggedPtr) + NewSize)) &
677             Chunk::SizeOrUnusedBytesMask;
678         Chunk::compareExchangeHeader(Cookie, OldPtr, &NewHeader, &OldHeader);
679         if (UNLIKELY(useMemoryTagging<Config>(Options))) {
680           if (ClassId) {
681             resizeTaggedChunk(reinterpret_cast<uptr>(OldTaggedPtr) + OldSize,
682                               reinterpret_cast<uptr>(OldTaggedPtr) + NewSize,
683                               NewSize, untagPointer(BlockEnd));
684             storePrimaryAllocationStackMaybe(Options, OldPtr);
685           } else {
686             storeSecondaryAllocationStackMaybe(Options, OldPtr, NewSize);
687           }
688         }
689         return OldTaggedPtr;
690       }
691     }
692 
693     // Otherwise we allocate a new one, and deallocate the old one. Some
694     // allocators will allocate an even larger chunk (by a fixed factor) to
695     // allow for potential further in-place realloc. The gains of such a trick
696     // are currently unclear.
697     void *NewPtr = allocate(NewSize, Chunk::Origin::Malloc, Alignment);
698     if (LIKELY(NewPtr)) {
699       memcpy(NewPtr, OldTaggedPtr, Min(NewSize, OldSize));
700       if (UNLIKELY(&__scudo_deallocate_hook))
701         __scudo_deallocate_hook(OldTaggedPtr);
702       quarantineOrDeallocateChunk(Options, OldTaggedPtr, &OldHeader, OldSize);
703     }
704     return NewPtr;
705   }
706 
707   // TODO(kostyak): disable() is currently best-effort. There are some small
708   //                windows of time when an allocation could still succeed after
709   //                this function finishes. We will revisit that later.
710   void disable() NO_THREAD_SAFETY_ANALYSIS {
711     initThreadMaybe();
712 #ifdef GWP_ASAN_HOOKS
713     GuardedAlloc.disable();
714 #endif
715     TSDRegistry.disable();
716     Stats.disable();
717     Quarantine.disable();
718     Primary.disable();
719     Secondary.disable();
720   }
721 
722   void enable() NO_THREAD_SAFETY_ANALYSIS {
723     initThreadMaybe();
724     Secondary.enable();
725     Primary.enable();
726     Quarantine.enable();
727     Stats.enable();
728     TSDRegistry.enable();
729 #ifdef GWP_ASAN_HOOKS
730     GuardedAlloc.enable();
731 #endif
732   }
733 
734   // The function returns the amount of bytes required to store the statistics,
735   // which might be larger than the amount of bytes provided. Note that the
736   // statistics buffer is not necessarily constant between calls to this
737   // function. This can be called with a null buffer or zero size for buffer
738   // sizing purposes.
739   uptr getStats(char *Buffer, uptr Size) {
740     ScopedString Str;
741     const uptr Length = getStats(&Str) + 1;
742     if (Length < Size)
743       Size = Length;
744     if (Buffer && Size) {
745       memcpy(Buffer, Str.data(), Size);
746       Buffer[Size - 1] = '\0';
747     }
748     return Length;
749   }
750 
751   void printStats() {
752     ScopedString Str;
753     getStats(&Str);
754     Str.output();
755   }
756 
757   void releaseToOS(ReleaseToOS ReleaseType) {
758     initThreadMaybe();
759     if (ReleaseType == ReleaseToOS::ForceAll)
760       drainCaches();
761     Primary.releaseToOS(ReleaseType);
762     Secondary.releaseToOS();
763   }
764 
765   // Iterate over all chunks and call a callback for all busy chunks located
766   // within the provided memory range. Said callback must not use this allocator
767   // or a deadlock can ensue. This fits Android's malloc_iterate() needs.
768   void iterateOverChunks(uptr Base, uptr Size, iterate_callback Callback,
769                          void *Arg) {
770     initThreadMaybe();
771     if (archSupportsMemoryTagging())
772       Base = untagPointer(Base);
773     const uptr From = Base;
774     const uptr To = Base + Size;
775     bool MayHaveTaggedPrimary = allocatorSupportsMemoryTagging<Config>() &&
776                                 systemSupportsMemoryTagging();
777     auto Lambda = [this, From, To, MayHaveTaggedPrimary, Callback,
778                    Arg](uptr Block) {
779       if (Block < From || Block >= To)
780         return;
781       uptr Chunk;
782       Chunk::UnpackedHeader Header;
783       if (MayHaveTaggedPrimary) {
784         // A chunk header can either have a zero tag (tagged primary) or the
785         // header tag (secondary, or untagged primary). We don't know which so
786         // try both.
787         ScopedDisableMemoryTagChecks x;
788         if (!getChunkFromBlock(Block, &Chunk, &Header) &&
789             !getChunkFromBlock(addHeaderTag(Block), &Chunk, &Header))
790           return;
791       } else {
792         if (!getChunkFromBlock(addHeaderTag(Block), &Chunk, &Header))
793           return;
794       }
795       if (Header.State == Chunk::State::Allocated) {
796         uptr TaggedChunk = Chunk;
797         if (allocatorSupportsMemoryTagging<Config>())
798           TaggedChunk = untagPointer(TaggedChunk);
799         if (useMemoryTagging<Config>(Primary.Options.load()))
800           TaggedChunk = loadTag(Chunk);
801         Callback(TaggedChunk, getSize(reinterpret_cast<void *>(Chunk), &Header),
802                  Arg);
803       }
804     };
805     Primary.iterateOverBlocks(Lambda);
806     Secondary.iterateOverBlocks(Lambda);
807 #ifdef GWP_ASAN_HOOKS
808     GuardedAlloc.iterate(reinterpret_cast<void *>(Base), Size, Callback, Arg);
809 #endif
810   }
811 
812   bool canReturnNull() {
813     initThreadMaybe();
814     return Primary.Options.load().get(OptionBit::MayReturnNull);
815   }
816 
817   bool setOption(Option O, sptr Value) {
818     initThreadMaybe();
819     if (O == Option::MemtagTuning) {
820       // Enabling odd/even tags involves a tradeoff between use-after-free
821       // detection and buffer overflow detection. Odd/even tags make it more
822       // likely for buffer overflows to be detected by increasing the size of
823       // the guaranteed "red zone" around the allocation, but on the other hand
824       // use-after-free is less likely to be detected because the tag space for
825       // any particular chunk is cut in half. Therefore we use this tuning
826       // setting to control whether odd/even tags are enabled.
827       if (Value == M_MEMTAG_TUNING_BUFFER_OVERFLOW)
828         Primary.Options.set(OptionBit::UseOddEvenTags);
829       else if (Value == M_MEMTAG_TUNING_UAF)
830         Primary.Options.clear(OptionBit::UseOddEvenTags);
831       return true;
832     } else {
833       // We leave it to the various sub-components to decide whether or not they
834       // want to handle the option, but we do not want to short-circuit
835       // execution if one of the setOption was to return false.
836       const bool PrimaryResult = Primary.setOption(O, Value);
837       const bool SecondaryResult = Secondary.setOption(O, Value);
838       const bool RegistryResult = TSDRegistry.setOption(O, Value);
839       return PrimaryResult && SecondaryResult && RegistryResult;
840     }
841     return false;
842   }
843 
844   // Return the usable size for a given chunk. Technically we lie, as we just
845   // report the actual size of a chunk. This is done to counteract code actively
846   // writing past the end of a chunk (like sqlite3) when the usable size allows
847   // for it, which then forces realloc to copy the usable size of a chunk as
848   // opposed to its actual size.
849   uptr getUsableSize(const void *Ptr) {
850     initThreadMaybe();
851     if (UNLIKELY(!Ptr))
852       return 0;
853 
854 #ifdef GWP_ASAN_HOOKS
855     if (UNLIKELY(GuardedAlloc.pointerIsMine(Ptr)))
856       return GuardedAlloc.getSize(Ptr);
857 #endif // GWP_ASAN_HOOKS
858 
859     Ptr = getHeaderTaggedPointer(const_cast<void *>(Ptr));
860     Chunk::UnpackedHeader Header;
861     Chunk::loadHeader(Cookie, Ptr, &Header);
862     // Getting the usable size of a chunk only makes sense if it's allocated.
863     if (UNLIKELY(Header.State != Chunk::State::Allocated))
864       reportInvalidChunkState(AllocatorAction::Sizing, const_cast<void *>(Ptr));
865     return getSize(Ptr, &Header);
866   }
867 
868   void getStats(StatCounters S) {
869     initThreadMaybe();
870     Stats.get(S);
871   }
872 
873   // Returns true if the pointer provided was allocated by the current
874   // allocator instance, which is compliant with tcmalloc's ownership concept.
875   // A corrupted chunk will not be reported as owned, which is WAI.
876   bool isOwned(const void *Ptr) {
877     initThreadMaybe();
878 #ifdef GWP_ASAN_HOOKS
879     if (GuardedAlloc.pointerIsMine(Ptr))
880       return true;
881 #endif // GWP_ASAN_HOOKS
882     if (!Ptr || !isAligned(reinterpret_cast<uptr>(Ptr), MinAlignment))
883       return false;
884     Ptr = getHeaderTaggedPointer(const_cast<void *>(Ptr));
885     Chunk::UnpackedHeader Header;
886     return Chunk::isValid(Cookie, Ptr, &Header) &&
887            Header.State == Chunk::State::Allocated;
888   }
889 
890   void setRssLimitsTestOnly(int SoftRssLimitMb, int HardRssLimitMb,
891                             bool MayReturnNull) {
892     RssChecker.init(SoftRssLimitMb, HardRssLimitMb);
893     if (MayReturnNull)
894       Primary.Options.set(OptionBit::MayReturnNull);
895   }
896 
897   bool useMemoryTaggingTestOnly() const {
898     return useMemoryTagging<Config>(Primary.Options.load());
899   }
900   void disableMemoryTagging() {
901     // If we haven't been initialized yet, we need to initialize now in order to
902     // prevent a future call to initThreadMaybe() from enabling memory tagging
903     // based on feature detection. But don't call initThreadMaybe() because it
904     // may end up calling the allocator (via pthread_atfork, via the post-init
905     // callback), which may cause mappings to be created with memory tagging
906     // enabled.
907     TSDRegistry.initOnceMaybe(this);
908     if (allocatorSupportsMemoryTagging<Config>()) {
909       Secondary.disableMemoryTagging();
910       Primary.Options.clear(OptionBit::UseMemoryTagging);
911     }
912   }
913 
914   void setTrackAllocationStacks(bool Track) {
915     initThreadMaybe();
916     if (getFlags()->allocation_ring_buffer_size == 0) {
917       DCHECK(!Primary.Options.load().get(OptionBit::TrackAllocationStacks));
918       return;
919     }
920     if (Track)
921       Primary.Options.set(OptionBit::TrackAllocationStacks);
922     else
923       Primary.Options.clear(OptionBit::TrackAllocationStacks);
924   }
925 
926   void setFillContents(FillContentsMode FillContents) {
927     initThreadMaybe();
928     Primary.Options.setFillContentsMode(FillContents);
929   }
930 
931   void setAddLargeAllocationSlack(bool AddSlack) {
932     initThreadMaybe();
933     if (AddSlack)
934       Primary.Options.set(OptionBit::AddLargeAllocationSlack);
935     else
936       Primary.Options.clear(OptionBit::AddLargeAllocationSlack);
937   }
938 
939   const char *getStackDepotAddress() const {
940     return reinterpret_cast<const char *>(&Depot);
941   }
942 
943   const char *getRegionInfoArrayAddress() const {
944     return Primary.getRegionInfoArrayAddress();
945   }
946 
947   static uptr getRegionInfoArraySize() {
948     return PrimaryT::getRegionInfoArraySize();
949   }
950 
951   const char *getRingBufferAddress() {
952     initThreadMaybe();
953     return RawRingBuffer;
954   }
955 
956   uptr getRingBufferSize() {
957     initThreadMaybe();
958     auto *RingBuffer = getRingBuffer();
959     return RingBuffer ? ringBufferSizeInBytes(RingBuffer->Size) : 0;
960   }
961 
962   static bool setRingBufferSizeForBuffer(char *Buffer, size_t Size) {
963     // Need at least one entry.
964     if (Size < sizeof(AllocationRingBuffer) +
965                    sizeof(typename AllocationRingBuffer::Entry)) {
966       return false;
967     }
968     AllocationRingBuffer *RingBuffer =
969         reinterpret_cast<AllocationRingBuffer *>(Buffer);
970     RingBuffer->Size = (Size - sizeof(AllocationRingBuffer)) /
971                        sizeof(typename AllocationRingBuffer::Entry);
972     return true;
973   }
974 
975   static const uptr MaxTraceSize = 64;
976 
977   static void collectTraceMaybe(const StackDepot *Depot,
978                                 uintptr_t (&Trace)[MaxTraceSize], u32 Hash) {
979     uptr RingPos, Size;
980     if (!Depot->find(Hash, &RingPos, &Size))
981       return;
982     for (unsigned I = 0; I != Size && I != MaxTraceSize; ++I)
983       Trace[I] = static_cast<uintptr_t>((*Depot)[RingPos + I]);
984   }
985 
986   static void getErrorInfo(struct scudo_error_info *ErrorInfo,
987                            uintptr_t FaultAddr, const char *DepotPtr,
988                            const char *RegionInfoPtr, const char *RingBufferPtr,
989                            const char *Memory, const char *MemoryTags,
990                            uintptr_t MemoryAddr, size_t MemorySize) {
991     *ErrorInfo = {};
992     if (!allocatorSupportsMemoryTagging<Config>() ||
993         MemoryAddr + MemorySize < MemoryAddr)
994       return;
995 
996     auto *Depot = reinterpret_cast<const StackDepot *>(DepotPtr);
997     size_t NextErrorReport = 0;
998 
999     // Check for OOB in the current block and the two surrounding blocks. Beyond
1000     // that, UAF is more likely.
1001     if (extractTag(FaultAddr) != 0)
1002       getInlineErrorInfo(ErrorInfo, NextErrorReport, FaultAddr, Depot,
1003                          RegionInfoPtr, Memory, MemoryTags, MemoryAddr,
1004                          MemorySize, 0, 2);
1005 
1006     // Check the ring buffer. For primary allocations this will only find UAF;
1007     // for secondary allocations we can find either UAF or OOB.
1008     getRingBufferErrorInfo(ErrorInfo, NextErrorReport, FaultAddr, Depot,
1009                            RingBufferPtr);
1010 
1011     // Check for OOB in the 28 blocks surrounding the 3 we checked earlier.
1012     // Beyond that we are likely to hit false positives.
1013     if (extractTag(FaultAddr) != 0)
1014       getInlineErrorInfo(ErrorInfo, NextErrorReport, FaultAddr, Depot,
1015                          RegionInfoPtr, Memory, MemoryTags, MemoryAddr,
1016                          MemorySize, 2, 16);
1017   }
1018 
1019 private:
1020   typedef typename PrimaryT::SizeClassMap SizeClassMap;
1021 
1022   static const uptr MinAlignmentLog = SCUDO_MIN_ALIGNMENT_LOG;
1023   static const uptr MaxAlignmentLog = 24U; // 16 MB seems reasonable.
1024   static const uptr MinAlignment = 1UL << MinAlignmentLog;
1025   static const uptr MaxAlignment = 1UL << MaxAlignmentLog;
1026   static const uptr MaxAllowedMallocSize =
1027       FIRST_32_SECOND_64(1UL << 31, 1ULL << 40);
1028 
1029   static_assert(MinAlignment >= sizeof(Chunk::PackedHeader),
1030                 "Minimal alignment must at least cover a chunk header.");
1031   static_assert(!allocatorSupportsMemoryTagging<Config>() ||
1032                     MinAlignment >= archMemoryTagGranuleSize(),
1033                 "");
1034 
1035   static const u32 BlockMarker = 0x44554353U;
1036 
1037   // These are indexes into an "array" of 32-bit values that store information
1038   // inline with a chunk that is relevant to diagnosing memory tag faults, where
1039   // 0 corresponds to the address of the user memory. This means that only
1040   // negative indexes may be used. The smallest index that may be used is -2,
1041   // which corresponds to 8 bytes before the user memory, because the chunk
1042   // header size is 8 bytes and in allocators that support memory tagging the
1043   // minimum alignment is at least the tag granule size (16 on aarch64).
1044   static const sptr MemTagAllocationTraceIndex = -2;
1045   static const sptr MemTagAllocationTidIndex = -1;
1046 
1047   u32 Cookie = 0;
1048   u32 QuarantineMaxChunkSize = 0;
1049 
1050   GlobalStats Stats;
1051   PrimaryT Primary;
1052   SecondaryT Secondary;
1053   QuarantineT Quarantine;
1054   TSDRegistryT TSDRegistry;
1055   pthread_once_t PostInitNonce = PTHREAD_ONCE_INIT;
1056   RssLimitChecker RssChecker;
1057 
1058 #ifdef GWP_ASAN_HOOKS
1059   gwp_asan::GuardedPoolAllocator GuardedAlloc;
1060   uptr GuardedAllocSlotSize = 0;
1061 #endif // GWP_ASAN_HOOKS
1062 
1063   StackDepot Depot;
1064 
1065   struct AllocationRingBuffer {
1066     struct Entry {
1067       atomic_uptr Ptr;
1068       atomic_uptr AllocationSize;
1069       atomic_u32 AllocationTrace;
1070       atomic_u32 AllocationTid;
1071       atomic_u32 DeallocationTrace;
1072       atomic_u32 DeallocationTid;
1073     };
1074 
1075     atomic_uptr Pos;
1076     u32 Size;
1077     // An array of Size (at least one) elements of type Entry is immediately
1078     // following to this struct.
1079   };
1080   // Pointer to memory mapped area starting with AllocationRingBuffer struct,
1081   // and immediately followed by Size elements of type Entry.
1082   char *RawRingBuffer = {};
1083 
1084   // The following might get optimized out by the compiler.
1085   NOINLINE void performSanityChecks() {
1086     // Verify that the header offset field can hold the maximum offset. In the
1087     // case of the Secondary allocator, it takes care of alignment and the
1088     // offset will always be small. In the case of the Primary, the worst case
1089     // scenario happens in the last size class, when the backend allocation
1090     // would already be aligned on the requested alignment, which would happen
1091     // to be the maximum alignment that would fit in that size class. As a
1092     // result, the maximum offset will be at most the maximum alignment for the
1093     // last size class minus the header size, in multiples of MinAlignment.
1094     Chunk::UnpackedHeader Header = {};
1095     const uptr MaxPrimaryAlignment = 1UL << getMostSignificantSetBitIndex(
1096                                          SizeClassMap::MaxSize - MinAlignment);
1097     const uptr MaxOffset =
1098         (MaxPrimaryAlignment - Chunk::getHeaderSize()) >> MinAlignmentLog;
1099     Header.Offset = MaxOffset & Chunk::OffsetMask;
1100     if (UNLIKELY(Header.Offset != MaxOffset))
1101       reportSanityCheckError("offset");
1102 
1103     // Verify that we can fit the maximum size or amount of unused bytes in the
1104     // header. Given that the Secondary fits the allocation to a page, the worst
1105     // case scenario happens in the Primary. It will depend on the second to
1106     // last and last class sizes, as well as the dynamic base for the Primary.
1107     // The following is an over-approximation that works for our needs.
1108     const uptr MaxSizeOrUnusedBytes = SizeClassMap::MaxSize - 1;
1109     Header.SizeOrUnusedBytes = MaxSizeOrUnusedBytes;
1110     if (UNLIKELY(Header.SizeOrUnusedBytes != MaxSizeOrUnusedBytes))
1111       reportSanityCheckError("size (or unused bytes)");
1112 
1113     const uptr LargestClassId = SizeClassMap::LargestClassId;
1114     Header.ClassId = LargestClassId;
1115     if (UNLIKELY(Header.ClassId != LargestClassId))
1116       reportSanityCheckError("class ID");
1117   }
1118 
1119   static inline void *getBlockBegin(const void *Ptr,
1120                                     Chunk::UnpackedHeader *Header) {
1121     return reinterpret_cast<void *>(
1122         reinterpret_cast<uptr>(Ptr) - Chunk::getHeaderSize() -
1123         (static_cast<uptr>(Header->Offset) << MinAlignmentLog));
1124   }
1125 
1126   // Return the size of a chunk as requested during its allocation.
1127   inline uptr getSize(const void *Ptr, Chunk::UnpackedHeader *Header) {
1128     const uptr SizeOrUnusedBytes = Header->SizeOrUnusedBytes;
1129     if (LIKELY(Header->ClassId))
1130       return SizeOrUnusedBytes;
1131     if (allocatorSupportsMemoryTagging<Config>())
1132       Ptr = untagPointer(const_cast<void *>(Ptr));
1133     return SecondaryT::getBlockEnd(getBlockBegin(Ptr, Header)) -
1134            reinterpret_cast<uptr>(Ptr) - SizeOrUnusedBytes;
1135   }
1136 
1137   void quarantineOrDeallocateChunk(Options Options, void *TaggedPtr,
1138                                    Chunk::UnpackedHeader *Header,
1139                                    uptr Size) NO_THREAD_SAFETY_ANALYSIS {
1140     void *Ptr = getHeaderTaggedPointer(TaggedPtr);
1141     Chunk::UnpackedHeader NewHeader = *Header;
1142     // If the quarantine is disabled, the actual size of a chunk is 0 or larger
1143     // than the maximum allowed, we return a chunk directly to the backend.
1144     // This purposefully underflows for Size == 0.
1145     const bool BypassQuarantine = !Quarantine.getCacheSize() ||
1146                                   ((Size - 1) >= QuarantineMaxChunkSize) ||
1147                                   !NewHeader.ClassId;
1148     if (BypassQuarantine)
1149       NewHeader.State = Chunk::State::Available;
1150     else
1151       NewHeader.State = Chunk::State::Quarantined;
1152     NewHeader.OriginOrWasZeroed = useMemoryTagging<Config>(Options) &&
1153                                   NewHeader.ClassId &&
1154                                   !TSDRegistry.getDisableMemInit();
1155     Chunk::compareExchangeHeader(Cookie, Ptr, &NewHeader, Header);
1156 
1157     if (UNLIKELY(useMemoryTagging<Config>(Options))) {
1158       u8 PrevTag = extractTag(reinterpret_cast<uptr>(TaggedPtr));
1159       storeDeallocationStackMaybe(Options, Ptr, PrevTag, Size);
1160       if (NewHeader.ClassId) {
1161         if (!TSDRegistry.getDisableMemInit()) {
1162           uptr TaggedBegin, TaggedEnd;
1163           const uptr OddEvenMask = computeOddEvenMaskForPointerMaybe(
1164               Options, reinterpret_cast<uptr>(getBlockBegin(Ptr, &NewHeader)),
1165               NewHeader.ClassId);
1166           // Exclude the previous tag so that immediate use after free is
1167           // detected 100% of the time.
1168           setRandomTag(Ptr, Size, OddEvenMask | (1UL << PrevTag), &TaggedBegin,
1169                        &TaggedEnd);
1170         }
1171       }
1172     }
1173     if (BypassQuarantine) {
1174       if (allocatorSupportsMemoryTagging<Config>())
1175         Ptr = untagPointer(Ptr);
1176       void *BlockBegin = getBlockBegin(Ptr, &NewHeader);
1177       const uptr ClassId = NewHeader.ClassId;
1178       if (LIKELY(ClassId)) {
1179         bool UnlockRequired;
1180         auto *TSD = TSDRegistry.getTSDAndLock(&UnlockRequired);
1181         const bool CacheDrained =
1182             TSD->getCache().deallocate(ClassId, BlockBegin);
1183         if (UnlockRequired)
1184           TSD->unlock();
1185         // When we have drained some blocks back to the Primary from TSD, that
1186         // implies that we may have the chance to release some pages as well.
1187         // Note that in order not to block other thread's accessing the TSD,
1188         // release the TSD first then try the page release.
1189         if (CacheDrained)
1190           Primary.tryReleaseToOS(ClassId, ReleaseToOS::Normal);
1191       } else {
1192         if (UNLIKELY(useMemoryTagging<Config>(Options)))
1193           storeTags(reinterpret_cast<uptr>(BlockBegin),
1194                     reinterpret_cast<uptr>(Ptr));
1195         Secondary.deallocate(Options, BlockBegin);
1196       }
1197     } else {
1198       bool UnlockRequired;
1199       auto *TSD = TSDRegistry.getTSDAndLock(&UnlockRequired);
1200       Quarantine.put(&TSD->getQuarantineCache(),
1201                      QuarantineCallback(*this, TSD->getCache()), Ptr, Size);
1202       if (UnlockRequired)
1203         TSD->unlock();
1204     }
1205   }
1206 
1207   bool getChunkFromBlock(uptr Block, uptr *Chunk,
1208                          Chunk::UnpackedHeader *Header) {
1209     *Chunk =
1210         Block + getChunkOffsetFromBlock(reinterpret_cast<const char *>(Block));
1211     return Chunk::isValid(Cookie, reinterpret_cast<void *>(*Chunk), Header);
1212   }
1213 
1214   static uptr getChunkOffsetFromBlock(const char *Block) {
1215     u32 Offset = 0;
1216     if (reinterpret_cast<const u32 *>(Block)[0] == BlockMarker)
1217       Offset = reinterpret_cast<const u32 *>(Block)[1];
1218     return Offset + Chunk::getHeaderSize();
1219   }
1220 
1221   // Set the tag of the granule past the end of the allocation to 0, to catch
1222   // linear overflows even if a previous larger allocation used the same block
1223   // and tag. Only do this if the granule past the end is in our block, because
1224   // this would otherwise lead to a SEGV if the allocation covers the entire
1225   // block and our block is at the end of a mapping. The tag of the next block's
1226   // header granule will be set to 0, so it will serve the purpose of catching
1227   // linear overflows in this case.
1228   //
1229   // For allocations of size 0 we do not end up storing the address tag to the
1230   // memory tag space, which getInlineErrorInfo() normally relies on to match
1231   // address tags against chunks. To allow matching in this case we store the
1232   // address tag in the first byte of the chunk.
1233   void storeEndMarker(uptr End, uptr Size, uptr BlockEnd) {
1234     DCHECK_EQ(BlockEnd, untagPointer(BlockEnd));
1235     uptr UntaggedEnd = untagPointer(End);
1236     if (UntaggedEnd != BlockEnd) {
1237       storeTag(UntaggedEnd);
1238       if (Size == 0)
1239         *reinterpret_cast<u8 *>(UntaggedEnd) = extractTag(End);
1240     }
1241   }
1242 
1243   void *prepareTaggedChunk(void *Ptr, uptr Size, uptr ExcludeMask,
1244                            uptr BlockEnd) {
1245     // Prepare the granule before the chunk to store the chunk header by setting
1246     // its tag to 0. Normally its tag will already be 0, but in the case where a
1247     // chunk holding a low alignment allocation is reused for a higher alignment
1248     // allocation, the chunk may already have a non-zero tag from the previous
1249     // allocation.
1250     storeTag(reinterpret_cast<uptr>(Ptr) - archMemoryTagGranuleSize());
1251 
1252     uptr TaggedBegin, TaggedEnd;
1253     setRandomTag(Ptr, Size, ExcludeMask, &TaggedBegin, &TaggedEnd);
1254 
1255     storeEndMarker(TaggedEnd, Size, BlockEnd);
1256     return reinterpret_cast<void *>(TaggedBegin);
1257   }
1258 
1259   void resizeTaggedChunk(uptr OldPtr, uptr NewPtr, uptr NewSize,
1260                          uptr BlockEnd) {
1261     uptr RoundOldPtr = roundUp(OldPtr, archMemoryTagGranuleSize());
1262     uptr RoundNewPtr;
1263     if (RoundOldPtr >= NewPtr) {
1264       // If the allocation is shrinking we just need to set the tag past the end
1265       // of the allocation to 0. See explanation in storeEndMarker() above.
1266       RoundNewPtr = roundUp(NewPtr, archMemoryTagGranuleSize());
1267     } else {
1268       // Set the memory tag of the region
1269       // [RoundOldPtr, roundUp(NewPtr, archMemoryTagGranuleSize()))
1270       // to the pointer tag stored in OldPtr.
1271       RoundNewPtr = storeTags(RoundOldPtr, NewPtr);
1272     }
1273     storeEndMarker(RoundNewPtr, NewSize, BlockEnd);
1274   }
1275 
1276   void storePrimaryAllocationStackMaybe(Options Options, void *Ptr) {
1277     if (!UNLIKELY(Options.get(OptionBit::TrackAllocationStacks)))
1278       return;
1279     auto *Ptr32 = reinterpret_cast<u32 *>(Ptr);
1280     Ptr32[MemTagAllocationTraceIndex] = collectStackTrace();
1281     Ptr32[MemTagAllocationTidIndex] = getThreadID();
1282   }
1283 
1284   void storeRingBufferEntry(void *Ptr, u32 AllocationTrace, u32 AllocationTid,
1285                             uptr AllocationSize, u32 DeallocationTrace,
1286                             u32 DeallocationTid) {
1287     uptr Pos = atomic_fetch_add(&getRingBuffer()->Pos, 1, memory_order_relaxed);
1288     typename AllocationRingBuffer::Entry *Entry =
1289         getRingBufferEntry(RawRingBuffer, Pos % getRingBuffer()->Size);
1290 
1291     // First invalidate our entry so that we don't attempt to interpret a
1292     // partially written state in getSecondaryErrorInfo(). The fences below
1293     // ensure that the compiler does not move the stores to Ptr in between the
1294     // stores to the other fields.
1295     atomic_store_relaxed(&Entry->Ptr, 0);
1296 
1297     __atomic_signal_fence(__ATOMIC_SEQ_CST);
1298     atomic_store_relaxed(&Entry->AllocationTrace, AllocationTrace);
1299     atomic_store_relaxed(&Entry->AllocationTid, AllocationTid);
1300     atomic_store_relaxed(&Entry->AllocationSize, AllocationSize);
1301     atomic_store_relaxed(&Entry->DeallocationTrace, DeallocationTrace);
1302     atomic_store_relaxed(&Entry->DeallocationTid, DeallocationTid);
1303     __atomic_signal_fence(__ATOMIC_SEQ_CST);
1304 
1305     atomic_store_relaxed(&Entry->Ptr, reinterpret_cast<uptr>(Ptr));
1306   }
1307 
1308   void storeSecondaryAllocationStackMaybe(Options Options, void *Ptr,
1309                                           uptr Size) {
1310     if (!UNLIKELY(Options.get(OptionBit::TrackAllocationStacks)))
1311       return;
1312 
1313     u32 Trace = collectStackTrace();
1314     u32 Tid = getThreadID();
1315 
1316     auto *Ptr32 = reinterpret_cast<u32 *>(Ptr);
1317     Ptr32[MemTagAllocationTraceIndex] = Trace;
1318     Ptr32[MemTagAllocationTidIndex] = Tid;
1319 
1320     storeRingBufferEntry(untagPointer(Ptr), Trace, Tid, Size, 0, 0);
1321   }
1322 
1323   void storeDeallocationStackMaybe(Options Options, void *Ptr, u8 PrevTag,
1324                                    uptr Size) {
1325     if (!UNLIKELY(Options.get(OptionBit::TrackAllocationStacks)))
1326       return;
1327 
1328     auto *Ptr32 = reinterpret_cast<u32 *>(Ptr);
1329     u32 AllocationTrace = Ptr32[MemTagAllocationTraceIndex];
1330     u32 AllocationTid = Ptr32[MemTagAllocationTidIndex];
1331 
1332     u32 DeallocationTrace = collectStackTrace();
1333     u32 DeallocationTid = getThreadID();
1334 
1335     storeRingBufferEntry(addFixedTag(untagPointer(Ptr), PrevTag),
1336                          AllocationTrace, AllocationTid, Size,
1337                          DeallocationTrace, DeallocationTid);
1338   }
1339 
1340   static const size_t NumErrorReports =
1341       sizeof(((scudo_error_info *)nullptr)->reports) /
1342       sizeof(((scudo_error_info *)nullptr)->reports[0]);
1343 
1344   static void getInlineErrorInfo(struct scudo_error_info *ErrorInfo,
1345                                  size_t &NextErrorReport, uintptr_t FaultAddr,
1346                                  const StackDepot *Depot,
1347                                  const char *RegionInfoPtr, const char *Memory,
1348                                  const char *MemoryTags, uintptr_t MemoryAddr,
1349                                  size_t MemorySize, size_t MinDistance,
1350                                  size_t MaxDistance) {
1351     uptr UntaggedFaultAddr = untagPointer(FaultAddr);
1352     u8 FaultAddrTag = extractTag(FaultAddr);
1353     BlockInfo Info =
1354         PrimaryT::findNearestBlock(RegionInfoPtr, UntaggedFaultAddr);
1355 
1356     auto GetGranule = [&](uptr Addr, const char **Data, uint8_t *Tag) -> bool {
1357       if (Addr < MemoryAddr || Addr + archMemoryTagGranuleSize() < Addr ||
1358           Addr + archMemoryTagGranuleSize() > MemoryAddr + MemorySize)
1359         return false;
1360       *Data = &Memory[Addr - MemoryAddr];
1361       *Tag = static_cast<u8>(
1362           MemoryTags[(Addr - MemoryAddr) / archMemoryTagGranuleSize()]);
1363       return true;
1364     };
1365 
1366     auto ReadBlock = [&](uptr Addr, uptr *ChunkAddr,
1367                          Chunk::UnpackedHeader *Header, const u32 **Data,
1368                          u8 *Tag) {
1369       const char *BlockBegin;
1370       u8 BlockBeginTag;
1371       if (!GetGranule(Addr, &BlockBegin, &BlockBeginTag))
1372         return false;
1373       uptr ChunkOffset = getChunkOffsetFromBlock(BlockBegin);
1374       *ChunkAddr = Addr + ChunkOffset;
1375 
1376       const char *ChunkBegin;
1377       if (!GetGranule(*ChunkAddr, &ChunkBegin, Tag))
1378         return false;
1379       *Header = *reinterpret_cast<const Chunk::UnpackedHeader *>(
1380           ChunkBegin - Chunk::getHeaderSize());
1381       *Data = reinterpret_cast<const u32 *>(ChunkBegin);
1382 
1383       // Allocations of size 0 will have stashed the tag in the first byte of
1384       // the chunk, see storeEndMarker().
1385       if (Header->SizeOrUnusedBytes == 0)
1386         *Tag = static_cast<u8>(*ChunkBegin);
1387 
1388       return true;
1389     };
1390 
1391     if (NextErrorReport == NumErrorReports)
1392       return;
1393 
1394     auto CheckOOB = [&](uptr BlockAddr) {
1395       if (BlockAddr < Info.RegionBegin || BlockAddr >= Info.RegionEnd)
1396         return false;
1397 
1398       uptr ChunkAddr;
1399       Chunk::UnpackedHeader Header;
1400       const u32 *Data;
1401       uint8_t Tag;
1402       if (!ReadBlock(BlockAddr, &ChunkAddr, &Header, &Data, &Tag) ||
1403           Header.State != Chunk::State::Allocated || Tag != FaultAddrTag)
1404         return false;
1405 
1406       auto *R = &ErrorInfo->reports[NextErrorReport++];
1407       R->error_type =
1408           UntaggedFaultAddr < ChunkAddr ? BUFFER_UNDERFLOW : BUFFER_OVERFLOW;
1409       R->allocation_address = ChunkAddr;
1410       R->allocation_size = Header.SizeOrUnusedBytes;
1411       collectTraceMaybe(Depot, R->allocation_trace,
1412                         Data[MemTagAllocationTraceIndex]);
1413       R->allocation_tid = Data[MemTagAllocationTidIndex];
1414       return NextErrorReport == NumErrorReports;
1415     };
1416 
1417     if (MinDistance == 0 && CheckOOB(Info.BlockBegin))
1418       return;
1419 
1420     for (size_t I = Max<size_t>(MinDistance, 1); I != MaxDistance; ++I)
1421       if (CheckOOB(Info.BlockBegin + I * Info.BlockSize) ||
1422           CheckOOB(Info.BlockBegin - I * Info.BlockSize))
1423         return;
1424   }
1425 
1426   static void getRingBufferErrorInfo(struct scudo_error_info *ErrorInfo,
1427                                      size_t &NextErrorReport,
1428                                      uintptr_t FaultAddr,
1429                                      const StackDepot *Depot,
1430                                      const char *RingBufferPtr) {
1431     auto *RingBuffer =
1432         reinterpret_cast<const AllocationRingBuffer *>(RingBufferPtr);
1433     if (!RingBuffer || RingBuffer->Size == 0)
1434       return;
1435     uptr Pos = atomic_load_relaxed(&RingBuffer->Pos);
1436 
1437     for (uptr I = Pos - 1;
1438          I != Pos - 1 - RingBuffer->Size && NextErrorReport != NumErrorReports;
1439          --I) {
1440       auto *Entry = getRingBufferEntry(RingBufferPtr, I % RingBuffer->Size);
1441       uptr EntryPtr = atomic_load_relaxed(&Entry->Ptr);
1442       if (!EntryPtr)
1443         continue;
1444 
1445       uptr UntaggedEntryPtr = untagPointer(EntryPtr);
1446       uptr EntrySize = atomic_load_relaxed(&Entry->AllocationSize);
1447       u32 AllocationTrace = atomic_load_relaxed(&Entry->AllocationTrace);
1448       u32 AllocationTid = atomic_load_relaxed(&Entry->AllocationTid);
1449       u32 DeallocationTrace = atomic_load_relaxed(&Entry->DeallocationTrace);
1450       u32 DeallocationTid = atomic_load_relaxed(&Entry->DeallocationTid);
1451 
1452       if (DeallocationTid) {
1453         // For UAF we only consider in-bounds fault addresses because
1454         // out-of-bounds UAF is rare and attempting to detect it is very likely
1455         // to result in false positives.
1456         if (FaultAddr < EntryPtr || FaultAddr >= EntryPtr + EntrySize)
1457           continue;
1458       } else {
1459         // Ring buffer OOB is only possible with secondary allocations. In this
1460         // case we are guaranteed a guard region of at least a page on either
1461         // side of the allocation (guard page on the right, guard page + tagged
1462         // region on the left), so ignore any faults outside of that range.
1463         if (FaultAddr < EntryPtr - getPageSizeCached() ||
1464             FaultAddr >= EntryPtr + EntrySize + getPageSizeCached())
1465           continue;
1466 
1467         // For UAF the ring buffer will contain two entries, one for the
1468         // allocation and another for the deallocation. Don't report buffer
1469         // overflow/underflow using the allocation entry if we have already
1470         // collected a report from the deallocation entry.
1471         bool Found = false;
1472         for (uptr J = 0; J != NextErrorReport; ++J) {
1473           if (ErrorInfo->reports[J].allocation_address == UntaggedEntryPtr) {
1474             Found = true;
1475             break;
1476           }
1477         }
1478         if (Found)
1479           continue;
1480       }
1481 
1482       auto *R = &ErrorInfo->reports[NextErrorReport++];
1483       if (DeallocationTid)
1484         R->error_type = USE_AFTER_FREE;
1485       else if (FaultAddr < EntryPtr)
1486         R->error_type = BUFFER_UNDERFLOW;
1487       else
1488         R->error_type = BUFFER_OVERFLOW;
1489 
1490       R->allocation_address = UntaggedEntryPtr;
1491       R->allocation_size = EntrySize;
1492       collectTraceMaybe(Depot, R->allocation_trace, AllocationTrace);
1493       R->allocation_tid = AllocationTid;
1494       collectTraceMaybe(Depot, R->deallocation_trace, DeallocationTrace);
1495       R->deallocation_tid = DeallocationTid;
1496     }
1497   }
1498 
1499   uptr getStats(ScopedString *Str) {
1500     Primary.getStats(Str);
1501     Secondary.getStats(Str);
1502     Quarantine.getStats(Str);
1503     TSDRegistry.getStats(Str);
1504     return Str->length();
1505   }
1506 
1507   static typename AllocationRingBuffer::Entry *
1508   getRingBufferEntry(char *RawRingBuffer, uptr N) {
1509     return &reinterpret_cast<typename AllocationRingBuffer::Entry *>(
1510         &RawRingBuffer[sizeof(AllocationRingBuffer)])[N];
1511   }
1512   static const typename AllocationRingBuffer::Entry *
1513   getRingBufferEntry(const char *RawRingBuffer, uptr N) {
1514     return &reinterpret_cast<const typename AllocationRingBuffer::Entry *>(
1515         &RawRingBuffer[sizeof(AllocationRingBuffer)])[N];
1516   }
1517 
1518   void mapAndInitializeRingBuffer() {
1519     u32 AllocationRingBufferSize =
1520         static_cast<u32>(getFlags()->allocation_ring_buffer_size);
1521     if (AllocationRingBufferSize < 1)
1522       return;
1523     RawRingBuffer = static_cast<char *>(
1524         map(/*Addr=*/nullptr,
1525             roundUp(ringBufferSizeInBytes(AllocationRingBufferSize),
1526                     getPageSizeCached()),
1527             "AllocatorRingBuffer"));
1528     auto *RingBuffer = reinterpret_cast<AllocationRingBuffer *>(RawRingBuffer);
1529     RingBuffer->Size = AllocationRingBufferSize;
1530     static_assert(sizeof(AllocationRingBuffer) %
1531                           alignof(typename AllocationRingBuffer::Entry) ==
1532                       0,
1533                   "invalid alignment");
1534   }
1535 
1536   void unmapRingBuffer() {
1537     unmap(RawRingBuffer, roundUp(getRingBufferSize(), getPageSizeCached()));
1538     RawRingBuffer = nullptr;
1539   }
1540 
1541   static constexpr size_t ringBufferSizeInBytes(u32 AllocationRingBufferSize) {
1542     return sizeof(AllocationRingBuffer) +
1543            AllocationRingBufferSize *
1544                sizeof(typename AllocationRingBuffer::Entry);
1545   }
1546 
1547   inline AllocationRingBuffer *getRingBuffer() {
1548     return reinterpret_cast<AllocationRingBuffer *>(RawRingBuffer);
1549   }
1550 };
1551 
1552 } // namespace scudo
1553 
1554 #endif // SCUDO_COMBINED_H_
1555