xref: /freebsd/contrib/llvm-project/compiler-rt/lib/scudo/standalone/combined.h (revision 8ddb146abcdf061be9f2c0db7e391697dafad85c)
1 //===-- combined.h ----------------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #ifndef SCUDO_COMBINED_H_
10 #define SCUDO_COMBINED_H_
11 
12 #include "chunk.h"
13 #include "common.h"
14 #include "flags.h"
15 #include "flags_parser.h"
16 #include "local_cache.h"
17 #include "memtag.h"
18 #include "options.h"
19 #include "quarantine.h"
20 #include "report.h"
21 #include "secondary.h"
22 #include "stack_depot.h"
23 #include "string_utils.h"
24 #include "tsd.h"
25 
26 #include "scudo/interface.h"
27 
28 #ifdef GWP_ASAN_HOOKS
29 #include "gwp_asan/guarded_pool_allocator.h"
30 #include "gwp_asan/optional/backtrace.h"
31 #include "gwp_asan/optional/segv_handler.h"
32 #endif // GWP_ASAN_HOOKS
33 
34 extern "C" inline void EmptyCallback() {}
35 
36 #ifdef HAVE_ANDROID_UNSAFE_FRAME_POINTER_CHASE
37 // This function is not part of the NDK so it does not appear in any public
38 // header files. We only declare/use it when targeting the platform.
39 extern "C" size_t android_unsafe_frame_pointer_chase(scudo::uptr *buf,
40                                                      size_t num_entries);
41 #endif
42 
43 namespace scudo {
44 
45 template <class Params, void (*PostInitCallback)(void) = EmptyCallback>
46 class Allocator {
47 public:
48   using PrimaryT = typename Params::Primary;
49   using CacheT = typename PrimaryT::CacheT;
50   typedef Allocator<Params, PostInitCallback> ThisT;
51   typedef typename Params::template TSDRegistryT<ThisT> TSDRegistryT;
52 
53   void callPostInitCallback() {
54     pthread_once(&PostInitNonce, PostInitCallback);
55   }
56 
57   struct QuarantineCallback {
58     explicit QuarantineCallback(ThisT &Instance, CacheT &LocalCache)
59         : Allocator(Instance), Cache(LocalCache) {}
60 
61     // Chunk recycling function, returns a quarantined chunk to the backend,
62     // first making sure it hasn't been tampered with.
63     void recycle(void *Ptr) {
64       Chunk::UnpackedHeader Header;
65       Chunk::loadHeader(Allocator.Cookie, Ptr, &Header);
66       if (UNLIKELY(Header.State != Chunk::State::Quarantined))
67         reportInvalidChunkState(AllocatorAction::Recycling, Ptr);
68 
69       Chunk::UnpackedHeader NewHeader = Header;
70       NewHeader.State = Chunk::State::Available;
71       Chunk::compareExchangeHeader(Allocator.Cookie, Ptr, &NewHeader, &Header);
72 
73       if (allocatorSupportsMemoryTagging<Params>())
74         Ptr = untagPointer(Ptr);
75       void *BlockBegin = Allocator::getBlockBegin(Ptr, &NewHeader);
76       Cache.deallocate(NewHeader.ClassId, BlockBegin);
77     }
78 
79     // We take a shortcut when allocating a quarantine batch by working with the
80     // appropriate class ID instead of using Size. The compiler should optimize
81     // the class ID computation and work with the associated cache directly.
82     void *allocate(UNUSED uptr Size) {
83       const uptr QuarantineClassId = SizeClassMap::getClassIdBySize(
84           sizeof(QuarantineBatch) + Chunk::getHeaderSize());
85       void *Ptr = Cache.allocate(QuarantineClassId);
86       // Quarantine batch allocation failure is fatal.
87       if (UNLIKELY(!Ptr))
88         reportOutOfMemory(SizeClassMap::getSizeByClassId(QuarantineClassId));
89 
90       Ptr = reinterpret_cast<void *>(reinterpret_cast<uptr>(Ptr) +
91                                      Chunk::getHeaderSize());
92       Chunk::UnpackedHeader Header = {};
93       Header.ClassId = QuarantineClassId & Chunk::ClassIdMask;
94       Header.SizeOrUnusedBytes = sizeof(QuarantineBatch);
95       Header.State = Chunk::State::Allocated;
96       Chunk::storeHeader(Allocator.Cookie, Ptr, &Header);
97 
98       // Reset tag to 0 as this chunk may have been previously used for a tagged
99       // user allocation.
100       if (UNLIKELY(useMemoryTagging<Params>(Allocator.Primary.Options.load())))
101         storeTags(reinterpret_cast<uptr>(Ptr),
102                   reinterpret_cast<uptr>(Ptr) + sizeof(QuarantineBatch));
103 
104       return Ptr;
105     }
106 
107     void deallocate(void *Ptr) {
108       const uptr QuarantineClassId = SizeClassMap::getClassIdBySize(
109           sizeof(QuarantineBatch) + Chunk::getHeaderSize());
110       Chunk::UnpackedHeader Header;
111       Chunk::loadHeader(Allocator.Cookie, Ptr, &Header);
112 
113       if (UNLIKELY(Header.State != Chunk::State::Allocated))
114         reportInvalidChunkState(AllocatorAction::Deallocating, Ptr);
115       DCHECK_EQ(Header.ClassId, QuarantineClassId);
116       DCHECK_EQ(Header.Offset, 0);
117       DCHECK_EQ(Header.SizeOrUnusedBytes, sizeof(QuarantineBatch));
118 
119       Chunk::UnpackedHeader NewHeader = Header;
120       NewHeader.State = Chunk::State::Available;
121       Chunk::compareExchangeHeader(Allocator.Cookie, Ptr, &NewHeader, &Header);
122       Cache.deallocate(QuarantineClassId,
123                        reinterpret_cast<void *>(reinterpret_cast<uptr>(Ptr) -
124                                                 Chunk::getHeaderSize()));
125     }
126 
127   private:
128     ThisT &Allocator;
129     CacheT &Cache;
130   };
131 
132   typedef GlobalQuarantine<QuarantineCallback, void> QuarantineT;
133   typedef typename QuarantineT::CacheT QuarantineCacheT;
134 
135   void init() {
136     performSanityChecks();
137 
138     // Check if hardware CRC32 is supported in the binary and by the platform,
139     // if so, opt for the CRC32 hardware version of the checksum.
140     if (&computeHardwareCRC32 && hasHardwareCRC32())
141       HashAlgorithm = Checksum::HardwareCRC32;
142 
143     if (UNLIKELY(!getRandom(&Cookie, sizeof(Cookie))))
144       Cookie = static_cast<u32>(getMonotonicTime() ^
145                                 (reinterpret_cast<uptr>(this) >> 4));
146 
147     initFlags();
148     reportUnrecognizedFlags();
149 
150     // Store some flags locally.
151     if (getFlags()->may_return_null)
152       Primary.Options.set(OptionBit::MayReturnNull);
153     if (getFlags()->zero_contents)
154       Primary.Options.setFillContentsMode(ZeroFill);
155     else if (getFlags()->pattern_fill_contents)
156       Primary.Options.setFillContentsMode(PatternOrZeroFill);
157     if (getFlags()->dealloc_type_mismatch)
158       Primary.Options.set(OptionBit::DeallocTypeMismatch);
159     if (getFlags()->delete_size_mismatch)
160       Primary.Options.set(OptionBit::DeleteSizeMismatch);
161     if (allocatorSupportsMemoryTagging<Params>() &&
162         systemSupportsMemoryTagging())
163       Primary.Options.set(OptionBit::UseMemoryTagging);
164     Primary.Options.set(OptionBit::UseOddEvenTags);
165 
166     QuarantineMaxChunkSize =
167         static_cast<u32>(getFlags()->quarantine_max_chunk_size);
168 
169     Stats.init();
170     const s32 ReleaseToOsIntervalMs = getFlags()->release_to_os_interval_ms;
171     Primary.init(ReleaseToOsIntervalMs);
172     Secondary.init(&Stats, ReleaseToOsIntervalMs);
173     Quarantine.init(
174         static_cast<uptr>(getFlags()->quarantine_size_kb << 10),
175         static_cast<uptr>(getFlags()->thread_local_quarantine_size_kb << 10));
176   }
177 
178   // Initialize the embedded GWP-ASan instance. Requires the main allocator to
179   // be functional, best called from PostInitCallback.
180   void initGwpAsan() {
181 #ifdef GWP_ASAN_HOOKS
182     gwp_asan::options::Options Opt;
183     Opt.Enabled = getFlags()->GWP_ASAN_Enabled;
184     Opt.MaxSimultaneousAllocations =
185         getFlags()->GWP_ASAN_MaxSimultaneousAllocations;
186     Opt.SampleRate = getFlags()->GWP_ASAN_SampleRate;
187     Opt.InstallSignalHandlers = getFlags()->GWP_ASAN_InstallSignalHandlers;
188     // Embedded GWP-ASan is locked through the Scudo atfork handler (via
189     // Allocator::disable calling GWPASan.disable). Disable GWP-ASan's atfork
190     // handler.
191     Opt.InstallForkHandlers = false;
192     Opt.Backtrace = gwp_asan::backtrace::getBacktraceFunction();
193     GuardedAlloc.init(Opt);
194 
195     if (Opt.InstallSignalHandlers)
196       gwp_asan::segv_handler::installSignalHandlers(
197           &GuardedAlloc, Printf,
198           gwp_asan::backtrace::getPrintBacktraceFunction(),
199           gwp_asan::backtrace::getSegvBacktraceFunction());
200 
201     GuardedAllocSlotSize =
202         GuardedAlloc.getAllocatorState()->maximumAllocationSize();
203     Stats.add(StatFree, static_cast<uptr>(Opt.MaxSimultaneousAllocations) *
204                             GuardedAllocSlotSize);
205 #endif // GWP_ASAN_HOOKS
206   }
207 
208 #ifdef GWP_ASAN_HOOKS
209   const gwp_asan::AllocationMetadata *getGwpAsanAllocationMetadata() {
210     return GuardedAlloc.getMetadataRegion();
211   }
212 
213   const gwp_asan::AllocatorState *getGwpAsanAllocatorState() {
214     return GuardedAlloc.getAllocatorState();
215   }
216 #endif // GWP_ASAN_HOOKS
217 
218   ALWAYS_INLINE void initThreadMaybe(bool MinimalInit = false) {
219     TSDRegistry.initThreadMaybe(this, MinimalInit);
220   }
221 
222   void unmapTestOnly() {
223     TSDRegistry.unmapTestOnly(this);
224     Primary.unmapTestOnly();
225     Secondary.unmapTestOnly();
226 #ifdef GWP_ASAN_HOOKS
227     if (getFlags()->GWP_ASAN_InstallSignalHandlers)
228       gwp_asan::segv_handler::uninstallSignalHandlers();
229     GuardedAlloc.uninitTestOnly();
230 #endif // GWP_ASAN_HOOKS
231   }
232 
233   TSDRegistryT *getTSDRegistry() { return &TSDRegistry; }
234 
235   // The Cache must be provided zero-initialized.
236   void initCache(CacheT *Cache) { Cache->init(&Stats, &Primary); }
237 
238   // Release the resources used by a TSD, which involves:
239   // - draining the local quarantine cache to the global quarantine;
240   // - releasing the cached pointers back to the Primary;
241   // - unlinking the local stats from the global ones (destroying the cache does
242   //   the last two items).
243   void commitBack(TSD<ThisT> *TSD) {
244     Quarantine.drain(&TSD->QuarantineCache,
245                      QuarantineCallback(*this, TSD->Cache));
246     TSD->Cache.destroy(&Stats);
247   }
248 
249   ALWAYS_INLINE void *getHeaderTaggedPointer(void *Ptr) {
250     if (!allocatorSupportsMemoryTagging<Params>())
251       return Ptr;
252     auto UntaggedPtr = untagPointer(Ptr);
253     if (UntaggedPtr != Ptr)
254       return UntaggedPtr;
255     // Secondary, or pointer allocated while memory tagging is unsupported or
256     // disabled. The tag mismatch is okay in the latter case because tags will
257     // not be checked.
258     return addHeaderTag(Ptr);
259   }
260 
261   ALWAYS_INLINE uptr addHeaderTag(uptr Ptr) {
262     if (!allocatorSupportsMemoryTagging<Params>())
263       return Ptr;
264     return addFixedTag(Ptr, 2);
265   }
266 
267   ALWAYS_INLINE void *addHeaderTag(void *Ptr) {
268     return reinterpret_cast<void *>(addHeaderTag(reinterpret_cast<uptr>(Ptr)));
269   }
270 
271   NOINLINE u32 collectStackTrace() {
272 #ifdef HAVE_ANDROID_UNSAFE_FRAME_POINTER_CHASE
273     // Discard collectStackTrace() frame and allocator function frame.
274     constexpr uptr DiscardFrames = 2;
275     uptr Stack[MaxTraceSize + DiscardFrames];
276     uptr Size =
277         android_unsafe_frame_pointer_chase(Stack, MaxTraceSize + DiscardFrames);
278     Size = Min<uptr>(Size, MaxTraceSize + DiscardFrames);
279     return Depot.insert(Stack + Min<uptr>(DiscardFrames, Size), Stack + Size);
280 #else
281     return 0;
282 #endif
283   }
284 
285   uptr computeOddEvenMaskForPointerMaybe(Options Options, uptr Ptr,
286                                          uptr ClassId) {
287     if (!Options.get(OptionBit::UseOddEvenTags))
288       return 0;
289 
290     // If a chunk's tag is odd, we want the tags of the surrounding blocks to be
291     // even, and vice versa. Blocks are laid out Size bytes apart, and adding
292     // Size to Ptr will flip the least significant set bit of Size in Ptr, so
293     // that bit will have the pattern 010101... for consecutive blocks, which we
294     // can use to determine which tag mask to use.
295     return 0x5555U << ((Ptr >> SizeClassMap::getSizeLSBByClassId(ClassId)) & 1);
296   }
297 
298   NOINLINE void *allocate(uptr Size, Chunk::Origin Origin,
299                           uptr Alignment = MinAlignment,
300                           bool ZeroContents = false) {
301     initThreadMaybe();
302 
303     const Options Options = Primary.Options.load();
304     if (UNLIKELY(Alignment > MaxAlignment)) {
305       if (Options.get(OptionBit::MayReturnNull))
306         return nullptr;
307       reportAlignmentTooBig(Alignment, MaxAlignment);
308     }
309     if (Alignment < MinAlignment)
310       Alignment = MinAlignment;
311 
312 #ifdef GWP_ASAN_HOOKS
313     if (UNLIKELY(GuardedAlloc.shouldSample())) {
314       if (void *Ptr = GuardedAlloc.allocate(Size, Alignment)) {
315         if (UNLIKELY(&__scudo_allocate_hook))
316           __scudo_allocate_hook(Ptr, Size);
317         Stats.lock();
318         Stats.add(StatAllocated, GuardedAllocSlotSize);
319         Stats.sub(StatFree, GuardedAllocSlotSize);
320         Stats.unlock();
321         return Ptr;
322       }
323     }
324 #endif // GWP_ASAN_HOOKS
325 
326     const FillContentsMode FillContents = ZeroContents ? ZeroFill
327                                           : TSDRegistry.getDisableMemInit()
328                                               ? NoFill
329                                               : Options.getFillContentsMode();
330 
331     // If the requested size happens to be 0 (more common than you might think),
332     // allocate MinAlignment bytes on top of the header. Then add the extra
333     // bytes required to fulfill the alignment requirements: we allocate enough
334     // to be sure that there will be an address in the block that will satisfy
335     // the alignment.
336     const uptr NeededSize =
337         roundUpTo(Size, MinAlignment) +
338         ((Alignment > MinAlignment) ? Alignment : Chunk::getHeaderSize());
339 
340     // Takes care of extravagantly large sizes as well as integer overflows.
341     static_assert(MaxAllowedMallocSize < UINTPTR_MAX - MaxAlignment, "");
342     if (UNLIKELY(Size >= MaxAllowedMallocSize)) {
343       if (Options.get(OptionBit::MayReturnNull))
344         return nullptr;
345       reportAllocationSizeTooBig(Size, NeededSize, MaxAllowedMallocSize);
346     }
347     DCHECK_LE(Size, NeededSize);
348 
349     void *Block = nullptr;
350     uptr ClassId = 0;
351     uptr SecondaryBlockEnd = 0;
352     if (LIKELY(PrimaryT::canAllocate(NeededSize))) {
353       ClassId = SizeClassMap::getClassIdBySize(NeededSize);
354       DCHECK_NE(ClassId, 0U);
355       bool UnlockRequired;
356       auto *TSD = TSDRegistry.getTSDAndLock(&UnlockRequired);
357       Block = TSD->Cache.allocate(ClassId);
358       // If the allocation failed, the most likely reason with a 32-bit primary
359       // is the region being full. In that event, retry in each successively
360       // larger class until it fits. If it fails to fit in the largest class,
361       // fallback to the Secondary.
362       if (UNLIKELY(!Block)) {
363         while (ClassId < SizeClassMap::LargestClassId && !Block)
364           Block = TSD->Cache.allocate(++ClassId);
365         if (!Block)
366           ClassId = 0;
367       }
368       if (UnlockRequired)
369         TSD->unlock();
370     }
371     if (UNLIKELY(ClassId == 0))
372       Block = Secondary.allocate(Options, Size, Alignment, &SecondaryBlockEnd,
373                                  FillContents);
374 
375     if (UNLIKELY(!Block)) {
376       if (Options.get(OptionBit::MayReturnNull))
377         return nullptr;
378       reportOutOfMemory(NeededSize);
379     }
380 
381     const uptr BlockUptr = reinterpret_cast<uptr>(Block);
382     const uptr UnalignedUserPtr = BlockUptr + Chunk::getHeaderSize();
383     const uptr UserPtr = roundUpTo(UnalignedUserPtr, Alignment);
384 
385     void *Ptr = reinterpret_cast<void *>(UserPtr);
386     void *TaggedPtr = Ptr;
387     if (LIKELY(ClassId)) {
388       // We only need to zero or tag the contents for Primary backed
389       // allocations. We only set tags for primary allocations in order to avoid
390       // faulting potentially large numbers of pages for large secondary
391       // allocations. We assume that guard pages are enough to protect these
392       // allocations.
393       //
394       // FIXME: When the kernel provides a way to set the background tag of a
395       // mapping, we should be able to tag secondary allocations as well.
396       //
397       // When memory tagging is enabled, zeroing the contents is done as part of
398       // setting the tag.
399       if (UNLIKELY(useMemoryTagging<Params>(Options))) {
400         uptr PrevUserPtr;
401         Chunk::UnpackedHeader Header;
402         const uptr BlockSize = PrimaryT::getSizeByClassId(ClassId);
403         const uptr BlockEnd = BlockUptr + BlockSize;
404         // If possible, try to reuse the UAF tag that was set by deallocate().
405         // For simplicity, only reuse tags if we have the same start address as
406         // the previous allocation. This handles the majority of cases since
407         // most allocations will not be more aligned than the minimum alignment.
408         //
409         // We need to handle situations involving reclaimed chunks, and retag
410         // the reclaimed portions if necessary. In the case where the chunk is
411         // fully reclaimed, the chunk's header will be zero, which will trigger
412         // the code path for new mappings and invalid chunks that prepares the
413         // chunk from scratch. There are three possibilities for partial
414         // reclaiming:
415         //
416         // (1) Header was reclaimed, data was partially reclaimed.
417         // (2) Header was not reclaimed, all data was reclaimed (e.g. because
418         //     data started on a page boundary).
419         // (3) Header was not reclaimed, data was partially reclaimed.
420         //
421         // Case (1) will be handled in the same way as for full reclaiming,
422         // since the header will be zero.
423         //
424         // We can detect case (2) by loading the tag from the start
425         // of the chunk. If it is zero, it means that either all data was
426         // reclaimed (since we never use zero as the chunk tag), or that the
427         // previous allocation was of size zero. Either way, we need to prepare
428         // a new chunk from scratch.
429         //
430         // We can detect case (3) by moving to the next page (if covered by the
431         // chunk) and loading the tag of its first granule. If it is zero, it
432         // means that all following pages may need to be retagged. On the other
433         // hand, if it is nonzero, we can assume that all following pages are
434         // still tagged, according to the logic that if any of the pages
435         // following the next page were reclaimed, the next page would have been
436         // reclaimed as well.
437         uptr TaggedUserPtr;
438         if (getChunkFromBlock(BlockUptr, &PrevUserPtr, &Header) &&
439             PrevUserPtr == UserPtr &&
440             (TaggedUserPtr = loadTag(UserPtr)) != UserPtr) {
441           uptr PrevEnd = TaggedUserPtr + Header.SizeOrUnusedBytes;
442           const uptr NextPage = roundUpTo(TaggedUserPtr, getPageSizeCached());
443           if (NextPage < PrevEnd && loadTag(NextPage) != NextPage)
444             PrevEnd = NextPage;
445           TaggedPtr = reinterpret_cast<void *>(TaggedUserPtr);
446           resizeTaggedChunk(PrevEnd, TaggedUserPtr + Size, Size, BlockEnd);
447           if (UNLIKELY(FillContents != NoFill && !Header.OriginOrWasZeroed)) {
448             // If an allocation needs to be zeroed (i.e. calloc) we can normally
449             // avoid zeroing the memory now since we can rely on memory having
450             // been zeroed on free, as this is normally done while setting the
451             // UAF tag. But if tagging was disabled per-thread when the memory
452             // was freed, it would not have been retagged and thus zeroed, and
453             // therefore it needs to be zeroed now.
454             memset(TaggedPtr, 0,
455                    Min(Size, roundUpTo(PrevEnd - TaggedUserPtr,
456                                        archMemoryTagGranuleSize())));
457           } else if (Size) {
458             // Clear any stack metadata that may have previously been stored in
459             // the chunk data.
460             memset(TaggedPtr, 0, archMemoryTagGranuleSize());
461           }
462         } else {
463           const uptr OddEvenMask =
464               computeOddEvenMaskForPointerMaybe(Options, BlockUptr, ClassId);
465           TaggedPtr = prepareTaggedChunk(Ptr, Size, OddEvenMask, BlockEnd);
466         }
467         storePrimaryAllocationStackMaybe(Options, Ptr);
468       } else {
469         Block = addHeaderTag(Block);
470         Ptr = addHeaderTag(Ptr);
471         if (UNLIKELY(FillContents != NoFill)) {
472           // This condition is not necessarily unlikely, but since memset is
473           // costly, we might as well mark it as such.
474           memset(Block, FillContents == ZeroFill ? 0 : PatternFillByte,
475                  PrimaryT::getSizeByClassId(ClassId));
476         }
477       }
478     } else {
479       Block = addHeaderTag(Block);
480       Ptr = addHeaderTag(Ptr);
481       if (UNLIKELY(useMemoryTagging<Params>(Options))) {
482         storeTags(reinterpret_cast<uptr>(Block), reinterpret_cast<uptr>(Ptr));
483         storeSecondaryAllocationStackMaybe(Options, Ptr, Size);
484       }
485     }
486 
487     Chunk::UnpackedHeader Header = {};
488     if (UNLIKELY(UnalignedUserPtr != UserPtr)) {
489       const uptr Offset = UserPtr - UnalignedUserPtr;
490       DCHECK_GE(Offset, 2 * sizeof(u32));
491       // The BlockMarker has no security purpose, but is specifically meant for
492       // the chunk iteration function that can be used in debugging situations.
493       // It is the only situation where we have to locate the start of a chunk
494       // based on its block address.
495       reinterpret_cast<u32 *>(Block)[0] = BlockMarker;
496       reinterpret_cast<u32 *>(Block)[1] = static_cast<u32>(Offset);
497       Header.Offset = (Offset >> MinAlignmentLog) & Chunk::OffsetMask;
498     }
499     Header.ClassId = ClassId & Chunk::ClassIdMask;
500     Header.State = Chunk::State::Allocated;
501     Header.OriginOrWasZeroed = Origin & Chunk::OriginMask;
502     Header.SizeOrUnusedBytes =
503         (ClassId ? Size : SecondaryBlockEnd - (UserPtr + Size)) &
504         Chunk::SizeOrUnusedBytesMask;
505     Chunk::storeHeader(Cookie, Ptr, &Header);
506 
507     if (UNLIKELY(&__scudo_allocate_hook))
508       __scudo_allocate_hook(TaggedPtr, Size);
509 
510     return TaggedPtr;
511   }
512 
513   NOINLINE void deallocate(void *Ptr, Chunk::Origin Origin, uptr DeleteSize = 0,
514                            UNUSED uptr Alignment = MinAlignment) {
515     // For a deallocation, we only ensure minimal initialization, meaning thread
516     // local data will be left uninitialized for now (when using ELF TLS). The
517     // fallback cache will be used instead. This is a workaround for a situation
518     // where the only heap operation performed in a thread would be a free past
519     // the TLS destructors, ending up in initialized thread specific data never
520     // being destroyed properly. Any other heap operation will do a full init.
521     initThreadMaybe(/*MinimalInit=*/true);
522 
523     if (UNLIKELY(&__scudo_deallocate_hook))
524       __scudo_deallocate_hook(Ptr);
525 
526     if (UNLIKELY(!Ptr))
527       return;
528 
529 #ifdef GWP_ASAN_HOOKS
530     if (UNLIKELY(GuardedAlloc.pointerIsMine(Ptr))) {
531       GuardedAlloc.deallocate(Ptr);
532       Stats.lock();
533       Stats.add(StatFree, GuardedAllocSlotSize);
534       Stats.sub(StatAllocated, GuardedAllocSlotSize);
535       Stats.unlock();
536       return;
537     }
538 #endif // GWP_ASAN_HOOKS
539 
540     if (UNLIKELY(!isAligned(reinterpret_cast<uptr>(Ptr), MinAlignment)))
541       reportMisalignedPointer(AllocatorAction::Deallocating, Ptr);
542 
543     void *TaggedPtr = Ptr;
544     Ptr = getHeaderTaggedPointer(Ptr);
545 
546     Chunk::UnpackedHeader Header;
547     Chunk::loadHeader(Cookie, Ptr, &Header);
548 
549     if (UNLIKELY(Header.State != Chunk::State::Allocated))
550       reportInvalidChunkState(AllocatorAction::Deallocating, Ptr);
551 
552     const Options Options = Primary.Options.load();
553     if (Options.get(OptionBit::DeallocTypeMismatch)) {
554       if (UNLIKELY(Header.OriginOrWasZeroed != Origin)) {
555         // With the exception of memalign'd chunks, that can be still be free'd.
556         if (Header.OriginOrWasZeroed != Chunk::Origin::Memalign ||
557             Origin != Chunk::Origin::Malloc)
558           reportDeallocTypeMismatch(AllocatorAction::Deallocating, Ptr,
559                                     Header.OriginOrWasZeroed, Origin);
560       }
561     }
562 
563     const uptr Size = getSize(Ptr, &Header);
564     if (DeleteSize && Options.get(OptionBit::DeleteSizeMismatch)) {
565       if (UNLIKELY(DeleteSize != Size))
566         reportDeleteSizeMismatch(Ptr, DeleteSize, Size);
567     }
568 
569     quarantineOrDeallocateChunk(Options, TaggedPtr, &Header, Size);
570   }
571 
572   void *reallocate(void *OldPtr, uptr NewSize, uptr Alignment = MinAlignment) {
573     initThreadMaybe();
574 
575     const Options Options = Primary.Options.load();
576     if (UNLIKELY(NewSize >= MaxAllowedMallocSize)) {
577       if (Options.get(OptionBit::MayReturnNull))
578         return nullptr;
579       reportAllocationSizeTooBig(NewSize, 0, MaxAllowedMallocSize);
580     }
581 
582     // The following cases are handled by the C wrappers.
583     DCHECK_NE(OldPtr, nullptr);
584     DCHECK_NE(NewSize, 0);
585 
586 #ifdef GWP_ASAN_HOOKS
587     if (UNLIKELY(GuardedAlloc.pointerIsMine(OldPtr))) {
588       uptr OldSize = GuardedAlloc.getSize(OldPtr);
589       void *NewPtr = allocate(NewSize, Chunk::Origin::Malloc, Alignment);
590       if (NewPtr)
591         memcpy(NewPtr, OldPtr, (NewSize < OldSize) ? NewSize : OldSize);
592       GuardedAlloc.deallocate(OldPtr);
593       Stats.lock();
594       Stats.add(StatFree, GuardedAllocSlotSize);
595       Stats.sub(StatAllocated, GuardedAllocSlotSize);
596       Stats.unlock();
597       return NewPtr;
598     }
599 #endif // GWP_ASAN_HOOKS
600 
601     void *OldTaggedPtr = OldPtr;
602     OldPtr = getHeaderTaggedPointer(OldPtr);
603 
604     if (UNLIKELY(!isAligned(reinterpret_cast<uptr>(OldPtr), MinAlignment)))
605       reportMisalignedPointer(AllocatorAction::Reallocating, OldPtr);
606 
607     Chunk::UnpackedHeader OldHeader;
608     Chunk::loadHeader(Cookie, OldPtr, &OldHeader);
609 
610     if (UNLIKELY(OldHeader.State != Chunk::State::Allocated))
611       reportInvalidChunkState(AllocatorAction::Reallocating, OldPtr);
612 
613     // Pointer has to be allocated with a malloc-type function. Some
614     // applications think that it is OK to realloc a memalign'ed pointer, which
615     // will trigger this check. It really isn't.
616     if (Options.get(OptionBit::DeallocTypeMismatch)) {
617       if (UNLIKELY(OldHeader.OriginOrWasZeroed != Chunk::Origin::Malloc))
618         reportDeallocTypeMismatch(AllocatorAction::Reallocating, OldPtr,
619                                   OldHeader.OriginOrWasZeroed,
620                                   Chunk::Origin::Malloc);
621     }
622 
623     void *BlockBegin = getBlockBegin(OldTaggedPtr, &OldHeader);
624     uptr BlockEnd;
625     uptr OldSize;
626     const uptr ClassId = OldHeader.ClassId;
627     if (LIKELY(ClassId)) {
628       BlockEnd = reinterpret_cast<uptr>(BlockBegin) +
629                  SizeClassMap::getSizeByClassId(ClassId);
630       OldSize = OldHeader.SizeOrUnusedBytes;
631     } else {
632       BlockEnd = SecondaryT::getBlockEnd(BlockBegin);
633       OldSize = BlockEnd - (reinterpret_cast<uptr>(OldTaggedPtr) +
634                             OldHeader.SizeOrUnusedBytes);
635     }
636     // If the new chunk still fits in the previously allocated block (with a
637     // reasonable delta), we just keep the old block, and update the chunk
638     // header to reflect the size change.
639     if (reinterpret_cast<uptr>(OldTaggedPtr) + NewSize <= BlockEnd) {
640       if (NewSize > OldSize || (OldSize - NewSize) < getPageSizeCached()) {
641         Chunk::UnpackedHeader NewHeader = OldHeader;
642         NewHeader.SizeOrUnusedBytes =
643             (ClassId ? NewSize
644                      : BlockEnd -
645                            (reinterpret_cast<uptr>(OldTaggedPtr) + NewSize)) &
646             Chunk::SizeOrUnusedBytesMask;
647         Chunk::compareExchangeHeader(Cookie, OldPtr, &NewHeader, &OldHeader);
648         if (UNLIKELY(useMemoryTagging<Params>(Options))) {
649           if (ClassId) {
650             resizeTaggedChunk(reinterpret_cast<uptr>(OldTaggedPtr) + OldSize,
651                               reinterpret_cast<uptr>(OldTaggedPtr) + NewSize,
652                               NewSize, untagPointer(BlockEnd));
653             storePrimaryAllocationStackMaybe(Options, OldPtr);
654           } else {
655             storeSecondaryAllocationStackMaybe(Options, OldPtr, NewSize);
656           }
657         }
658         return OldTaggedPtr;
659       }
660     }
661 
662     // Otherwise we allocate a new one, and deallocate the old one. Some
663     // allocators will allocate an even larger chunk (by a fixed factor) to
664     // allow for potential further in-place realloc. The gains of such a trick
665     // are currently unclear.
666     void *NewPtr = allocate(NewSize, Chunk::Origin::Malloc, Alignment);
667     if (LIKELY(NewPtr)) {
668       memcpy(NewPtr, OldTaggedPtr, Min(NewSize, OldSize));
669       quarantineOrDeallocateChunk(Options, OldTaggedPtr, &OldHeader, OldSize);
670     }
671     return NewPtr;
672   }
673 
674   // TODO(kostyak): disable() is currently best-effort. There are some small
675   //                windows of time when an allocation could still succeed after
676   //                this function finishes. We will revisit that later.
677   void disable() {
678     initThreadMaybe();
679 #ifdef GWP_ASAN_HOOKS
680     GuardedAlloc.disable();
681 #endif
682     TSDRegistry.disable();
683     Stats.disable();
684     Quarantine.disable();
685     Primary.disable();
686     Secondary.disable();
687   }
688 
689   void enable() {
690     initThreadMaybe();
691     Secondary.enable();
692     Primary.enable();
693     Quarantine.enable();
694     Stats.enable();
695     TSDRegistry.enable();
696 #ifdef GWP_ASAN_HOOKS
697     GuardedAlloc.enable();
698 #endif
699   }
700 
701   // The function returns the amount of bytes required to store the statistics,
702   // which might be larger than the amount of bytes provided. Note that the
703   // statistics buffer is not necessarily constant between calls to this
704   // function. This can be called with a null buffer or zero size for buffer
705   // sizing purposes.
706   uptr getStats(char *Buffer, uptr Size) {
707     ScopedString Str;
708     disable();
709     const uptr Length = getStats(&Str) + 1;
710     enable();
711     if (Length < Size)
712       Size = Length;
713     if (Buffer && Size) {
714       memcpy(Buffer, Str.data(), Size);
715       Buffer[Size - 1] = '\0';
716     }
717     return Length;
718   }
719 
720   void printStats() {
721     ScopedString Str;
722     disable();
723     getStats(&Str);
724     enable();
725     Str.output();
726   }
727 
728   void releaseToOS() {
729     initThreadMaybe();
730     Primary.releaseToOS();
731     Secondary.releaseToOS();
732   }
733 
734   // Iterate over all chunks and call a callback for all busy chunks located
735   // within the provided memory range. Said callback must not use this allocator
736   // or a deadlock can ensue. This fits Android's malloc_iterate() needs.
737   void iterateOverChunks(uptr Base, uptr Size, iterate_callback Callback,
738                          void *Arg) {
739     initThreadMaybe();
740     if (archSupportsMemoryTagging())
741       Base = untagPointer(Base);
742     const uptr From = Base;
743     const uptr To = Base + Size;
744     bool MayHaveTaggedPrimary = allocatorSupportsMemoryTagging<Params>() &&
745                                 systemSupportsMemoryTagging();
746     auto Lambda = [this, From, To, MayHaveTaggedPrimary, Callback,
747                    Arg](uptr Block) {
748       if (Block < From || Block >= To)
749         return;
750       uptr Chunk;
751       Chunk::UnpackedHeader Header;
752       if (MayHaveTaggedPrimary) {
753         // A chunk header can either have a zero tag (tagged primary) or the
754         // header tag (secondary, or untagged primary). We don't know which so
755         // try both.
756         ScopedDisableMemoryTagChecks x;
757         if (!getChunkFromBlock(Block, &Chunk, &Header) &&
758             !getChunkFromBlock(addHeaderTag(Block), &Chunk, &Header))
759           return;
760       } else {
761         if (!getChunkFromBlock(addHeaderTag(Block), &Chunk, &Header))
762           return;
763       }
764       if (Header.State == Chunk::State::Allocated) {
765         uptr TaggedChunk = Chunk;
766         if (allocatorSupportsMemoryTagging<Params>())
767           TaggedChunk = untagPointer(TaggedChunk);
768         if (useMemoryTagging<Params>(Primary.Options.load()))
769           TaggedChunk = loadTag(Chunk);
770         Callback(TaggedChunk, getSize(reinterpret_cast<void *>(Chunk), &Header),
771                  Arg);
772       }
773     };
774     Primary.iterateOverBlocks(Lambda);
775     Secondary.iterateOverBlocks(Lambda);
776 #ifdef GWP_ASAN_HOOKS
777     GuardedAlloc.iterate(reinterpret_cast<void *>(Base), Size, Callback, Arg);
778 #endif
779   }
780 
781   bool canReturnNull() {
782     initThreadMaybe();
783     return Primary.Options.load().get(OptionBit::MayReturnNull);
784   }
785 
786   bool setOption(Option O, sptr Value) {
787     initThreadMaybe();
788     if (O == Option::MemtagTuning) {
789       // Enabling odd/even tags involves a tradeoff between use-after-free
790       // detection and buffer overflow detection. Odd/even tags make it more
791       // likely for buffer overflows to be detected by increasing the size of
792       // the guaranteed "red zone" around the allocation, but on the other hand
793       // use-after-free is less likely to be detected because the tag space for
794       // any particular chunk is cut in half. Therefore we use this tuning
795       // setting to control whether odd/even tags are enabled.
796       if (Value == M_MEMTAG_TUNING_BUFFER_OVERFLOW)
797         Primary.Options.set(OptionBit::UseOddEvenTags);
798       else if (Value == M_MEMTAG_TUNING_UAF)
799         Primary.Options.clear(OptionBit::UseOddEvenTags);
800       return true;
801     } else {
802       // We leave it to the various sub-components to decide whether or not they
803       // want to handle the option, but we do not want to short-circuit
804       // execution if one of the setOption was to return false.
805       const bool PrimaryResult = Primary.setOption(O, Value);
806       const bool SecondaryResult = Secondary.setOption(O, Value);
807       const bool RegistryResult = TSDRegistry.setOption(O, Value);
808       return PrimaryResult && SecondaryResult && RegistryResult;
809     }
810     return false;
811   }
812 
813   // Return the usable size for a given chunk. Technically we lie, as we just
814   // report the actual size of a chunk. This is done to counteract code actively
815   // writing past the end of a chunk (like sqlite3) when the usable size allows
816   // for it, which then forces realloc to copy the usable size of a chunk as
817   // opposed to its actual size.
818   uptr getUsableSize(const void *Ptr) {
819     initThreadMaybe();
820     if (UNLIKELY(!Ptr))
821       return 0;
822 
823 #ifdef GWP_ASAN_HOOKS
824     if (UNLIKELY(GuardedAlloc.pointerIsMine(Ptr)))
825       return GuardedAlloc.getSize(Ptr);
826 #endif // GWP_ASAN_HOOKS
827 
828     Ptr = getHeaderTaggedPointer(const_cast<void *>(Ptr));
829     Chunk::UnpackedHeader Header;
830     Chunk::loadHeader(Cookie, Ptr, &Header);
831     // Getting the usable size of a chunk only makes sense if it's allocated.
832     if (UNLIKELY(Header.State != Chunk::State::Allocated))
833       reportInvalidChunkState(AllocatorAction::Sizing, const_cast<void *>(Ptr));
834     return getSize(Ptr, &Header);
835   }
836 
837   void getStats(StatCounters S) {
838     initThreadMaybe();
839     Stats.get(S);
840   }
841 
842   // Returns true if the pointer provided was allocated by the current
843   // allocator instance, which is compliant with tcmalloc's ownership concept.
844   // A corrupted chunk will not be reported as owned, which is WAI.
845   bool isOwned(const void *Ptr) {
846     initThreadMaybe();
847 #ifdef GWP_ASAN_HOOKS
848     if (GuardedAlloc.pointerIsMine(Ptr))
849       return true;
850 #endif // GWP_ASAN_HOOKS
851     if (!Ptr || !isAligned(reinterpret_cast<uptr>(Ptr), MinAlignment))
852       return false;
853     Ptr = getHeaderTaggedPointer(const_cast<void *>(Ptr));
854     Chunk::UnpackedHeader Header;
855     return Chunk::isValid(Cookie, Ptr, &Header) &&
856            Header.State == Chunk::State::Allocated;
857   }
858 
859   bool useMemoryTaggingTestOnly() const {
860     return useMemoryTagging<Params>(Primary.Options.load());
861   }
862   void disableMemoryTagging() {
863     // If we haven't been initialized yet, we need to initialize now in order to
864     // prevent a future call to initThreadMaybe() from enabling memory tagging
865     // based on feature detection. But don't call initThreadMaybe() because it
866     // may end up calling the allocator (via pthread_atfork, via the post-init
867     // callback), which may cause mappings to be created with memory tagging
868     // enabled.
869     TSDRegistry.initOnceMaybe(this);
870     if (allocatorSupportsMemoryTagging<Params>()) {
871       Secondary.disableMemoryTagging();
872       Primary.Options.clear(OptionBit::UseMemoryTagging);
873     }
874   }
875 
876   void setTrackAllocationStacks(bool Track) {
877     initThreadMaybe();
878     if (Track)
879       Primary.Options.set(OptionBit::TrackAllocationStacks);
880     else
881       Primary.Options.clear(OptionBit::TrackAllocationStacks);
882   }
883 
884   void setFillContents(FillContentsMode FillContents) {
885     initThreadMaybe();
886     Primary.Options.setFillContentsMode(FillContents);
887   }
888 
889   void setAddLargeAllocationSlack(bool AddSlack) {
890     initThreadMaybe();
891     if (AddSlack)
892       Primary.Options.set(OptionBit::AddLargeAllocationSlack);
893     else
894       Primary.Options.clear(OptionBit::AddLargeAllocationSlack);
895   }
896 
897   const char *getStackDepotAddress() const {
898     return reinterpret_cast<const char *>(&Depot);
899   }
900 
901   const char *getRegionInfoArrayAddress() const {
902     return Primary.getRegionInfoArrayAddress();
903   }
904 
905   static uptr getRegionInfoArraySize() {
906     return PrimaryT::getRegionInfoArraySize();
907   }
908 
909   const char *getRingBufferAddress() const {
910     return reinterpret_cast<const char *>(&RingBuffer);
911   }
912 
913   static uptr getRingBufferSize() { return sizeof(RingBuffer); }
914 
915   static const uptr MaxTraceSize = 64;
916 
917   static void collectTraceMaybe(const StackDepot *Depot,
918                                 uintptr_t (&Trace)[MaxTraceSize], u32 Hash) {
919     uptr RingPos, Size;
920     if (!Depot->find(Hash, &RingPos, &Size))
921       return;
922     for (unsigned I = 0; I != Size && I != MaxTraceSize; ++I)
923       Trace[I] = static_cast<uintptr_t>((*Depot)[RingPos + I]);
924   }
925 
926   static void getErrorInfo(struct scudo_error_info *ErrorInfo,
927                            uintptr_t FaultAddr, const char *DepotPtr,
928                            const char *RegionInfoPtr, const char *RingBufferPtr,
929                            const char *Memory, const char *MemoryTags,
930                            uintptr_t MemoryAddr, size_t MemorySize) {
931     *ErrorInfo = {};
932     if (!allocatorSupportsMemoryTagging<Params>() ||
933         MemoryAddr + MemorySize < MemoryAddr)
934       return;
935 
936     auto *Depot = reinterpret_cast<const StackDepot *>(DepotPtr);
937     size_t NextErrorReport = 0;
938 
939     // Check for OOB in the current block and the two surrounding blocks. Beyond
940     // that, UAF is more likely.
941     if (extractTag(FaultAddr) != 0)
942       getInlineErrorInfo(ErrorInfo, NextErrorReport, FaultAddr, Depot,
943                          RegionInfoPtr, Memory, MemoryTags, MemoryAddr,
944                          MemorySize, 0, 2);
945 
946     // Check the ring buffer. For primary allocations this will only find UAF;
947     // for secondary allocations we can find either UAF or OOB.
948     getRingBufferErrorInfo(ErrorInfo, NextErrorReport, FaultAddr, Depot,
949                            RingBufferPtr);
950 
951     // Check for OOB in the 28 blocks surrounding the 3 we checked earlier.
952     // Beyond that we are likely to hit false positives.
953     if (extractTag(FaultAddr) != 0)
954       getInlineErrorInfo(ErrorInfo, NextErrorReport, FaultAddr, Depot,
955                          RegionInfoPtr, Memory, MemoryTags, MemoryAddr,
956                          MemorySize, 2, 16);
957   }
958 
959 private:
960   using SecondaryT = MapAllocator<Params>;
961   typedef typename PrimaryT::SizeClassMap SizeClassMap;
962 
963   static const uptr MinAlignmentLog = SCUDO_MIN_ALIGNMENT_LOG;
964   static const uptr MaxAlignmentLog = 24U; // 16 MB seems reasonable.
965   static const uptr MinAlignment = 1UL << MinAlignmentLog;
966   static const uptr MaxAlignment = 1UL << MaxAlignmentLog;
967   static const uptr MaxAllowedMallocSize =
968       FIRST_32_SECOND_64(1UL << 31, 1ULL << 40);
969 
970   static_assert(MinAlignment >= sizeof(Chunk::PackedHeader),
971                 "Minimal alignment must at least cover a chunk header.");
972   static_assert(!allocatorSupportsMemoryTagging<Params>() ||
973                     MinAlignment >= archMemoryTagGranuleSize(),
974                 "");
975 
976   static const u32 BlockMarker = 0x44554353U;
977 
978   // These are indexes into an "array" of 32-bit values that store information
979   // inline with a chunk that is relevant to diagnosing memory tag faults, where
980   // 0 corresponds to the address of the user memory. This means that only
981   // negative indexes may be used. The smallest index that may be used is -2,
982   // which corresponds to 8 bytes before the user memory, because the chunk
983   // header size is 8 bytes and in allocators that support memory tagging the
984   // minimum alignment is at least the tag granule size (16 on aarch64).
985   static const sptr MemTagAllocationTraceIndex = -2;
986   static const sptr MemTagAllocationTidIndex = -1;
987 
988   u32 Cookie = 0;
989   u32 QuarantineMaxChunkSize = 0;
990 
991   GlobalStats Stats;
992   PrimaryT Primary;
993   SecondaryT Secondary;
994   QuarantineT Quarantine;
995   TSDRegistryT TSDRegistry;
996   pthread_once_t PostInitNonce = PTHREAD_ONCE_INIT;
997 
998 #ifdef GWP_ASAN_HOOKS
999   gwp_asan::GuardedPoolAllocator GuardedAlloc;
1000   uptr GuardedAllocSlotSize = 0;
1001 #endif // GWP_ASAN_HOOKS
1002 
1003   StackDepot Depot;
1004 
1005   struct AllocationRingBuffer {
1006     struct Entry {
1007       atomic_uptr Ptr;
1008       atomic_uptr AllocationSize;
1009       atomic_u32 AllocationTrace;
1010       atomic_u32 AllocationTid;
1011       atomic_u32 DeallocationTrace;
1012       atomic_u32 DeallocationTid;
1013     };
1014 
1015     atomic_uptr Pos;
1016 #ifdef SCUDO_FUZZ
1017     static const uptr NumEntries = 2;
1018 #else
1019     static const uptr NumEntries = 32768;
1020 #endif
1021     Entry Entries[NumEntries];
1022   };
1023   AllocationRingBuffer RingBuffer = {};
1024 
1025   // The following might get optimized out by the compiler.
1026   NOINLINE void performSanityChecks() {
1027     // Verify that the header offset field can hold the maximum offset. In the
1028     // case of the Secondary allocator, it takes care of alignment and the
1029     // offset will always be small. In the case of the Primary, the worst case
1030     // scenario happens in the last size class, when the backend allocation
1031     // would already be aligned on the requested alignment, which would happen
1032     // to be the maximum alignment that would fit in that size class. As a
1033     // result, the maximum offset will be at most the maximum alignment for the
1034     // last size class minus the header size, in multiples of MinAlignment.
1035     Chunk::UnpackedHeader Header = {};
1036     const uptr MaxPrimaryAlignment = 1UL << getMostSignificantSetBitIndex(
1037                                          SizeClassMap::MaxSize - MinAlignment);
1038     const uptr MaxOffset =
1039         (MaxPrimaryAlignment - Chunk::getHeaderSize()) >> MinAlignmentLog;
1040     Header.Offset = MaxOffset & Chunk::OffsetMask;
1041     if (UNLIKELY(Header.Offset != MaxOffset))
1042       reportSanityCheckError("offset");
1043 
1044     // Verify that we can fit the maximum size or amount of unused bytes in the
1045     // header. Given that the Secondary fits the allocation to a page, the worst
1046     // case scenario happens in the Primary. It will depend on the second to
1047     // last and last class sizes, as well as the dynamic base for the Primary.
1048     // The following is an over-approximation that works for our needs.
1049     const uptr MaxSizeOrUnusedBytes = SizeClassMap::MaxSize - 1;
1050     Header.SizeOrUnusedBytes = MaxSizeOrUnusedBytes;
1051     if (UNLIKELY(Header.SizeOrUnusedBytes != MaxSizeOrUnusedBytes))
1052       reportSanityCheckError("size (or unused bytes)");
1053 
1054     const uptr LargestClassId = SizeClassMap::LargestClassId;
1055     Header.ClassId = LargestClassId;
1056     if (UNLIKELY(Header.ClassId != LargestClassId))
1057       reportSanityCheckError("class ID");
1058   }
1059 
1060   static inline void *getBlockBegin(const void *Ptr,
1061                                     Chunk::UnpackedHeader *Header) {
1062     return reinterpret_cast<void *>(
1063         reinterpret_cast<uptr>(Ptr) - Chunk::getHeaderSize() -
1064         (static_cast<uptr>(Header->Offset) << MinAlignmentLog));
1065   }
1066 
1067   // Return the size of a chunk as requested during its allocation.
1068   inline uptr getSize(const void *Ptr, Chunk::UnpackedHeader *Header) {
1069     const uptr SizeOrUnusedBytes = Header->SizeOrUnusedBytes;
1070     if (LIKELY(Header->ClassId))
1071       return SizeOrUnusedBytes;
1072     if (allocatorSupportsMemoryTagging<Params>())
1073       Ptr = untagPointer(const_cast<void *>(Ptr));
1074     return SecondaryT::getBlockEnd(getBlockBegin(Ptr, Header)) -
1075            reinterpret_cast<uptr>(Ptr) - SizeOrUnusedBytes;
1076   }
1077 
1078   void quarantineOrDeallocateChunk(Options Options, void *TaggedPtr,
1079                                    Chunk::UnpackedHeader *Header, uptr Size) {
1080     void *Ptr = getHeaderTaggedPointer(TaggedPtr);
1081     Chunk::UnpackedHeader NewHeader = *Header;
1082     // If the quarantine is disabled, the actual size of a chunk is 0 or larger
1083     // than the maximum allowed, we return a chunk directly to the backend.
1084     // This purposefully underflows for Size == 0.
1085     const bool BypassQuarantine = !Quarantine.getCacheSize() ||
1086                                   ((Size - 1) >= QuarantineMaxChunkSize) ||
1087                                   !NewHeader.ClassId;
1088     if (BypassQuarantine)
1089       NewHeader.State = Chunk::State::Available;
1090     else
1091       NewHeader.State = Chunk::State::Quarantined;
1092     NewHeader.OriginOrWasZeroed = useMemoryTagging<Params>(Options) &&
1093                                   NewHeader.ClassId &&
1094                                   !TSDRegistry.getDisableMemInit();
1095     Chunk::compareExchangeHeader(Cookie, Ptr, &NewHeader, Header);
1096 
1097     if (UNLIKELY(useMemoryTagging<Params>(Options))) {
1098       u8 PrevTag = extractTag(reinterpret_cast<uptr>(TaggedPtr));
1099       storeDeallocationStackMaybe(Options, Ptr, PrevTag, Size);
1100       if (NewHeader.ClassId) {
1101         if (!TSDRegistry.getDisableMemInit()) {
1102           uptr TaggedBegin, TaggedEnd;
1103           const uptr OddEvenMask = computeOddEvenMaskForPointerMaybe(
1104               Options, reinterpret_cast<uptr>(getBlockBegin(Ptr, &NewHeader)),
1105               NewHeader.ClassId);
1106           // Exclude the previous tag so that immediate use after free is
1107           // detected 100% of the time.
1108           setRandomTag(Ptr, Size, OddEvenMask | (1UL << PrevTag), &TaggedBegin,
1109                        &TaggedEnd);
1110         }
1111       }
1112     }
1113     if (BypassQuarantine) {
1114       if (allocatorSupportsMemoryTagging<Params>())
1115         Ptr = untagPointer(Ptr);
1116       void *BlockBegin = getBlockBegin(Ptr, &NewHeader);
1117       const uptr ClassId = NewHeader.ClassId;
1118       if (LIKELY(ClassId)) {
1119         bool UnlockRequired;
1120         auto *TSD = TSDRegistry.getTSDAndLock(&UnlockRequired);
1121         TSD->Cache.deallocate(ClassId, BlockBegin);
1122         if (UnlockRequired)
1123           TSD->unlock();
1124       } else {
1125         if (UNLIKELY(useMemoryTagging<Params>(Options)))
1126           storeTags(reinterpret_cast<uptr>(BlockBegin),
1127                     reinterpret_cast<uptr>(Ptr));
1128         Secondary.deallocate(Options, BlockBegin);
1129       }
1130     } else {
1131       bool UnlockRequired;
1132       auto *TSD = TSDRegistry.getTSDAndLock(&UnlockRequired);
1133       Quarantine.put(&TSD->QuarantineCache,
1134                      QuarantineCallback(*this, TSD->Cache), Ptr, Size);
1135       if (UnlockRequired)
1136         TSD->unlock();
1137     }
1138   }
1139 
1140   bool getChunkFromBlock(uptr Block, uptr *Chunk,
1141                          Chunk::UnpackedHeader *Header) {
1142     *Chunk =
1143         Block + getChunkOffsetFromBlock(reinterpret_cast<const char *>(Block));
1144     return Chunk::isValid(Cookie, reinterpret_cast<void *>(*Chunk), Header);
1145   }
1146 
1147   static uptr getChunkOffsetFromBlock(const char *Block) {
1148     u32 Offset = 0;
1149     if (reinterpret_cast<const u32 *>(Block)[0] == BlockMarker)
1150       Offset = reinterpret_cast<const u32 *>(Block)[1];
1151     return Offset + Chunk::getHeaderSize();
1152   }
1153 
1154   // Set the tag of the granule past the end of the allocation to 0, to catch
1155   // linear overflows even if a previous larger allocation used the same block
1156   // and tag. Only do this if the granule past the end is in our block, because
1157   // this would otherwise lead to a SEGV if the allocation covers the entire
1158   // block and our block is at the end of a mapping. The tag of the next block's
1159   // header granule will be set to 0, so it will serve the purpose of catching
1160   // linear overflows in this case.
1161   //
1162   // For allocations of size 0 we do not end up storing the address tag to the
1163   // memory tag space, which getInlineErrorInfo() normally relies on to match
1164   // address tags against chunks. To allow matching in this case we store the
1165   // address tag in the first byte of the chunk.
1166   void storeEndMarker(uptr End, uptr Size, uptr BlockEnd) {
1167     DCHECK_EQ(BlockEnd, untagPointer(BlockEnd));
1168     uptr UntaggedEnd = untagPointer(End);
1169     if (UntaggedEnd != BlockEnd) {
1170       storeTag(UntaggedEnd);
1171       if (Size == 0)
1172         *reinterpret_cast<u8 *>(UntaggedEnd) = extractTag(End);
1173     }
1174   }
1175 
1176   void *prepareTaggedChunk(void *Ptr, uptr Size, uptr ExcludeMask,
1177                            uptr BlockEnd) {
1178     // Prepare the granule before the chunk to store the chunk header by setting
1179     // its tag to 0. Normally its tag will already be 0, but in the case where a
1180     // chunk holding a low alignment allocation is reused for a higher alignment
1181     // allocation, the chunk may already have a non-zero tag from the previous
1182     // allocation.
1183     storeTag(reinterpret_cast<uptr>(Ptr) - archMemoryTagGranuleSize());
1184 
1185     uptr TaggedBegin, TaggedEnd;
1186     setRandomTag(Ptr, Size, ExcludeMask, &TaggedBegin, &TaggedEnd);
1187 
1188     storeEndMarker(TaggedEnd, Size, BlockEnd);
1189     return reinterpret_cast<void *>(TaggedBegin);
1190   }
1191 
1192   void resizeTaggedChunk(uptr OldPtr, uptr NewPtr, uptr NewSize,
1193                          uptr BlockEnd) {
1194     uptr RoundOldPtr = roundUpTo(OldPtr, archMemoryTagGranuleSize());
1195     uptr RoundNewPtr;
1196     if (RoundOldPtr >= NewPtr) {
1197       // If the allocation is shrinking we just need to set the tag past the end
1198       // of the allocation to 0. See explanation in storeEndMarker() above.
1199       RoundNewPtr = roundUpTo(NewPtr, archMemoryTagGranuleSize());
1200     } else {
1201       // Set the memory tag of the region
1202       // [RoundOldPtr, roundUpTo(NewPtr, archMemoryTagGranuleSize()))
1203       // to the pointer tag stored in OldPtr.
1204       RoundNewPtr = storeTags(RoundOldPtr, NewPtr);
1205     }
1206     storeEndMarker(RoundNewPtr, NewSize, BlockEnd);
1207   }
1208 
1209   void storePrimaryAllocationStackMaybe(Options Options, void *Ptr) {
1210     if (!UNLIKELY(Options.get(OptionBit::TrackAllocationStacks)))
1211       return;
1212     auto *Ptr32 = reinterpret_cast<u32 *>(Ptr);
1213     Ptr32[MemTagAllocationTraceIndex] = collectStackTrace();
1214     Ptr32[MemTagAllocationTidIndex] = getThreadID();
1215   }
1216 
1217   void storeRingBufferEntry(void *Ptr, u32 AllocationTrace, u32 AllocationTid,
1218                             uptr AllocationSize, u32 DeallocationTrace,
1219                             u32 DeallocationTid) {
1220     uptr Pos = atomic_fetch_add(&RingBuffer.Pos, 1, memory_order_relaxed);
1221     typename AllocationRingBuffer::Entry *Entry =
1222         &RingBuffer.Entries[Pos % AllocationRingBuffer::NumEntries];
1223 
1224     // First invalidate our entry so that we don't attempt to interpret a
1225     // partially written state in getSecondaryErrorInfo(). The fences below
1226     // ensure that the compiler does not move the stores to Ptr in between the
1227     // stores to the other fields.
1228     atomic_store_relaxed(&Entry->Ptr, 0);
1229 
1230     __atomic_signal_fence(__ATOMIC_SEQ_CST);
1231     atomic_store_relaxed(&Entry->AllocationTrace, AllocationTrace);
1232     atomic_store_relaxed(&Entry->AllocationTid, AllocationTid);
1233     atomic_store_relaxed(&Entry->AllocationSize, AllocationSize);
1234     atomic_store_relaxed(&Entry->DeallocationTrace, DeallocationTrace);
1235     atomic_store_relaxed(&Entry->DeallocationTid, DeallocationTid);
1236     __atomic_signal_fence(__ATOMIC_SEQ_CST);
1237 
1238     atomic_store_relaxed(&Entry->Ptr, reinterpret_cast<uptr>(Ptr));
1239   }
1240 
1241   void storeSecondaryAllocationStackMaybe(Options Options, void *Ptr,
1242                                           uptr Size) {
1243     if (!UNLIKELY(Options.get(OptionBit::TrackAllocationStacks)))
1244       return;
1245 
1246     u32 Trace = collectStackTrace();
1247     u32 Tid = getThreadID();
1248 
1249     auto *Ptr32 = reinterpret_cast<u32 *>(Ptr);
1250     Ptr32[MemTagAllocationTraceIndex] = Trace;
1251     Ptr32[MemTagAllocationTidIndex] = Tid;
1252 
1253     storeRingBufferEntry(untagPointer(Ptr), Trace, Tid, Size, 0, 0);
1254   }
1255 
1256   void storeDeallocationStackMaybe(Options Options, void *Ptr, u8 PrevTag,
1257                                    uptr Size) {
1258     if (!UNLIKELY(Options.get(OptionBit::TrackAllocationStacks)))
1259       return;
1260 
1261     auto *Ptr32 = reinterpret_cast<u32 *>(Ptr);
1262     u32 AllocationTrace = Ptr32[MemTagAllocationTraceIndex];
1263     u32 AllocationTid = Ptr32[MemTagAllocationTidIndex];
1264 
1265     u32 DeallocationTrace = collectStackTrace();
1266     u32 DeallocationTid = getThreadID();
1267 
1268     storeRingBufferEntry(addFixedTag(untagPointer(Ptr), PrevTag),
1269                          AllocationTrace, AllocationTid, Size,
1270                          DeallocationTrace, DeallocationTid);
1271   }
1272 
1273   static const size_t NumErrorReports =
1274       sizeof(((scudo_error_info *)0)->reports) /
1275       sizeof(((scudo_error_info *)0)->reports[0]);
1276 
1277   static void getInlineErrorInfo(struct scudo_error_info *ErrorInfo,
1278                                  size_t &NextErrorReport, uintptr_t FaultAddr,
1279                                  const StackDepot *Depot,
1280                                  const char *RegionInfoPtr, const char *Memory,
1281                                  const char *MemoryTags, uintptr_t MemoryAddr,
1282                                  size_t MemorySize, size_t MinDistance,
1283                                  size_t MaxDistance) {
1284     uptr UntaggedFaultAddr = untagPointer(FaultAddr);
1285     u8 FaultAddrTag = extractTag(FaultAddr);
1286     BlockInfo Info =
1287         PrimaryT::findNearestBlock(RegionInfoPtr, UntaggedFaultAddr);
1288 
1289     auto GetGranule = [&](uptr Addr, const char **Data, uint8_t *Tag) -> bool {
1290       if (Addr < MemoryAddr || Addr + archMemoryTagGranuleSize() < Addr ||
1291           Addr + archMemoryTagGranuleSize() > MemoryAddr + MemorySize)
1292         return false;
1293       *Data = &Memory[Addr - MemoryAddr];
1294       *Tag = static_cast<u8>(
1295           MemoryTags[(Addr - MemoryAddr) / archMemoryTagGranuleSize()]);
1296       return true;
1297     };
1298 
1299     auto ReadBlock = [&](uptr Addr, uptr *ChunkAddr,
1300                          Chunk::UnpackedHeader *Header, const u32 **Data,
1301                          u8 *Tag) {
1302       const char *BlockBegin;
1303       u8 BlockBeginTag;
1304       if (!GetGranule(Addr, &BlockBegin, &BlockBeginTag))
1305         return false;
1306       uptr ChunkOffset = getChunkOffsetFromBlock(BlockBegin);
1307       *ChunkAddr = Addr + ChunkOffset;
1308 
1309       const char *ChunkBegin;
1310       if (!GetGranule(*ChunkAddr, &ChunkBegin, Tag))
1311         return false;
1312       *Header = *reinterpret_cast<const Chunk::UnpackedHeader *>(
1313           ChunkBegin - Chunk::getHeaderSize());
1314       *Data = reinterpret_cast<const u32 *>(ChunkBegin);
1315 
1316       // Allocations of size 0 will have stashed the tag in the first byte of
1317       // the chunk, see storeEndMarker().
1318       if (Header->SizeOrUnusedBytes == 0)
1319         *Tag = static_cast<u8>(*ChunkBegin);
1320 
1321       return true;
1322     };
1323 
1324     if (NextErrorReport == NumErrorReports)
1325       return;
1326 
1327     auto CheckOOB = [&](uptr BlockAddr) {
1328       if (BlockAddr < Info.RegionBegin || BlockAddr >= Info.RegionEnd)
1329         return false;
1330 
1331       uptr ChunkAddr;
1332       Chunk::UnpackedHeader Header;
1333       const u32 *Data;
1334       uint8_t Tag;
1335       if (!ReadBlock(BlockAddr, &ChunkAddr, &Header, &Data, &Tag) ||
1336           Header.State != Chunk::State::Allocated || Tag != FaultAddrTag)
1337         return false;
1338 
1339       auto *R = &ErrorInfo->reports[NextErrorReport++];
1340       R->error_type =
1341           UntaggedFaultAddr < ChunkAddr ? BUFFER_UNDERFLOW : BUFFER_OVERFLOW;
1342       R->allocation_address = ChunkAddr;
1343       R->allocation_size = Header.SizeOrUnusedBytes;
1344       collectTraceMaybe(Depot, R->allocation_trace,
1345                         Data[MemTagAllocationTraceIndex]);
1346       R->allocation_tid = Data[MemTagAllocationTidIndex];
1347       return NextErrorReport == NumErrorReports;
1348     };
1349 
1350     if (MinDistance == 0 && CheckOOB(Info.BlockBegin))
1351       return;
1352 
1353     for (size_t I = Max<size_t>(MinDistance, 1); I != MaxDistance; ++I)
1354       if (CheckOOB(Info.BlockBegin + I * Info.BlockSize) ||
1355           CheckOOB(Info.BlockBegin - I * Info.BlockSize))
1356         return;
1357   }
1358 
1359   static void getRingBufferErrorInfo(struct scudo_error_info *ErrorInfo,
1360                                      size_t &NextErrorReport,
1361                                      uintptr_t FaultAddr,
1362                                      const StackDepot *Depot,
1363                                      const char *RingBufferPtr) {
1364     auto *RingBuffer =
1365         reinterpret_cast<const AllocationRingBuffer *>(RingBufferPtr);
1366     uptr Pos = atomic_load_relaxed(&RingBuffer->Pos);
1367 
1368     for (uptr I = Pos - 1; I != Pos - 1 - AllocationRingBuffer::NumEntries &&
1369                            NextErrorReport != NumErrorReports;
1370          --I) {
1371       auto *Entry = &RingBuffer->Entries[I % AllocationRingBuffer::NumEntries];
1372       uptr EntryPtr = atomic_load_relaxed(&Entry->Ptr);
1373       if (!EntryPtr)
1374         continue;
1375 
1376       uptr UntaggedEntryPtr = untagPointer(EntryPtr);
1377       uptr EntrySize = atomic_load_relaxed(&Entry->AllocationSize);
1378       u32 AllocationTrace = atomic_load_relaxed(&Entry->AllocationTrace);
1379       u32 AllocationTid = atomic_load_relaxed(&Entry->AllocationTid);
1380       u32 DeallocationTrace = atomic_load_relaxed(&Entry->DeallocationTrace);
1381       u32 DeallocationTid = atomic_load_relaxed(&Entry->DeallocationTid);
1382 
1383       if (DeallocationTid) {
1384         // For UAF we only consider in-bounds fault addresses because
1385         // out-of-bounds UAF is rare and attempting to detect it is very likely
1386         // to result in false positives.
1387         if (FaultAddr < EntryPtr || FaultAddr >= EntryPtr + EntrySize)
1388           continue;
1389       } else {
1390         // Ring buffer OOB is only possible with secondary allocations. In this
1391         // case we are guaranteed a guard region of at least a page on either
1392         // side of the allocation (guard page on the right, guard page + tagged
1393         // region on the left), so ignore any faults outside of that range.
1394         if (FaultAddr < EntryPtr - getPageSizeCached() ||
1395             FaultAddr >= EntryPtr + EntrySize + getPageSizeCached())
1396           continue;
1397 
1398         // For UAF the ring buffer will contain two entries, one for the
1399         // allocation and another for the deallocation. Don't report buffer
1400         // overflow/underflow using the allocation entry if we have already
1401         // collected a report from the deallocation entry.
1402         bool Found = false;
1403         for (uptr J = 0; J != NextErrorReport; ++J) {
1404           if (ErrorInfo->reports[J].allocation_address == UntaggedEntryPtr) {
1405             Found = true;
1406             break;
1407           }
1408         }
1409         if (Found)
1410           continue;
1411       }
1412 
1413       auto *R = &ErrorInfo->reports[NextErrorReport++];
1414       if (DeallocationTid)
1415         R->error_type = USE_AFTER_FREE;
1416       else if (FaultAddr < EntryPtr)
1417         R->error_type = BUFFER_UNDERFLOW;
1418       else
1419         R->error_type = BUFFER_OVERFLOW;
1420 
1421       R->allocation_address = UntaggedEntryPtr;
1422       R->allocation_size = EntrySize;
1423       collectTraceMaybe(Depot, R->allocation_trace, AllocationTrace);
1424       R->allocation_tid = AllocationTid;
1425       collectTraceMaybe(Depot, R->deallocation_trace, DeallocationTrace);
1426       R->deallocation_tid = DeallocationTid;
1427     }
1428   }
1429 
1430   uptr getStats(ScopedString *Str) {
1431     Primary.getStats(Str);
1432     Secondary.getStats(Str);
1433     Quarantine.getStats(Str);
1434     return Str->length();
1435   }
1436 };
1437 
1438 } // namespace scudo
1439 
1440 #endif // SCUDO_COMBINED_H_
1441