xref: /freebsd/contrib/llvm-project/compiler-rt/lib/scudo/standalone/combined.h (revision 2f513db72b034fd5ef7f080b11be5c711c15186a)
1 //===-- combined.h ----------------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #ifndef SCUDO_COMBINED_H_
10 #define SCUDO_COMBINED_H_
11 
12 #include "chunk.h"
13 #include "common.h"
14 #include "flags.h"
15 #include "flags_parser.h"
16 #include "interface.h"
17 #include "local_cache.h"
18 #include "quarantine.h"
19 #include "report.h"
20 #include "secondary.h"
21 #include "tsd.h"
22 
23 namespace scudo {
24 
25 template <class Params> class Allocator {
26 public:
27   using PrimaryT = typename Params::Primary;
28   using CacheT = typename PrimaryT::CacheT;
29   typedef Allocator<Params> ThisT;
30   typedef typename Params::template TSDRegistryT<ThisT> TSDRegistryT;
31 
32   struct QuarantineCallback {
33     explicit QuarantineCallback(ThisT &Instance, CacheT &LocalCache)
34         : Allocator(Instance), Cache(LocalCache) {}
35 
36     // Chunk recycling function, returns a quarantined chunk to the backend,
37     // first making sure it hasn't been tampered with.
38     void recycle(void *Ptr) {
39       Chunk::UnpackedHeader Header;
40       Chunk::loadHeader(Allocator.Cookie, Ptr, &Header);
41       if (UNLIKELY(Header.State != Chunk::State::Quarantined))
42         reportInvalidChunkState(AllocatorAction::Recycling, Ptr);
43 
44       Chunk::UnpackedHeader NewHeader = Header;
45       NewHeader.State = Chunk::State::Available;
46       Chunk::compareExchangeHeader(Allocator.Cookie, Ptr, &NewHeader, &Header);
47 
48       void *BlockBegin = Allocator::getBlockBegin(Ptr, &NewHeader);
49       const uptr ClassId = Header.ClassId;
50       if (ClassId)
51         Cache.deallocate(ClassId, BlockBegin);
52       else
53         Allocator.Secondary.deallocate(BlockBegin);
54     }
55 
56     // We take a shortcut when allocating a quarantine batch by working with the
57     // appropriate class ID instead of using Size. The compiler should optimize
58     // the class ID computation and work with the associated cache directly.
59     void *allocate(UNUSED uptr Size) {
60       const uptr QuarantineClassId = SizeClassMap::getClassIdBySize(
61           sizeof(QuarantineBatch) + Chunk::getHeaderSize());
62       void *Ptr = Cache.allocate(QuarantineClassId);
63       // Quarantine batch allocation failure is fatal.
64       if (UNLIKELY(!Ptr))
65         reportOutOfMemory(SizeClassMap::getSizeByClassId(QuarantineClassId));
66 
67       Ptr = reinterpret_cast<void *>(reinterpret_cast<uptr>(Ptr) +
68                                      Chunk::getHeaderSize());
69       Chunk::UnpackedHeader Header = {};
70       Header.ClassId = QuarantineClassId & Chunk::ClassIdMask;
71       Header.SizeOrUnusedBytes = sizeof(QuarantineBatch);
72       Header.State = Chunk::State::Allocated;
73       Chunk::storeHeader(Allocator.Cookie, Ptr, &Header);
74 
75       return Ptr;
76     }
77 
78     void deallocate(void *Ptr) {
79       const uptr QuarantineClassId = SizeClassMap::getClassIdBySize(
80           sizeof(QuarantineBatch) + Chunk::getHeaderSize());
81       Chunk::UnpackedHeader Header;
82       Chunk::loadHeader(Allocator.Cookie, Ptr, &Header);
83 
84       if (UNLIKELY(Header.State != Chunk::State::Allocated))
85         reportInvalidChunkState(AllocatorAction::Deallocating, Ptr);
86       DCHECK_EQ(Header.ClassId, QuarantineClassId);
87       DCHECK_EQ(Header.Offset, 0);
88       DCHECK_EQ(Header.SizeOrUnusedBytes, sizeof(QuarantineBatch));
89 
90       Chunk::UnpackedHeader NewHeader = Header;
91       NewHeader.State = Chunk::State::Available;
92       Chunk::compareExchangeHeader(Allocator.Cookie, Ptr, &NewHeader, &Header);
93       Cache.deallocate(QuarantineClassId,
94                        reinterpret_cast<void *>(reinterpret_cast<uptr>(Ptr) -
95                                                 Chunk::getHeaderSize()));
96     }
97 
98   private:
99     ThisT &Allocator;
100     CacheT &Cache;
101   };
102 
103   typedef GlobalQuarantine<QuarantineCallback, void> QuarantineT;
104   typedef typename QuarantineT::CacheT QuarantineCacheT;
105 
106   void initLinkerInitialized() {
107     performSanityChecks();
108 
109     // Check if hardware CRC32 is supported in the binary and by the platform,
110     // if so, opt for the CRC32 hardware version of the checksum.
111     if (&computeHardwareCRC32 && hasHardwareCRC32())
112       HashAlgorithm = Checksum::HardwareCRC32;
113 
114     if (UNLIKELY(!getRandom(&Cookie, sizeof(Cookie))))
115       Cookie = static_cast<u32>(getMonotonicTime() ^
116                                 (reinterpret_cast<uptr>(this) >> 4));
117 
118     initFlags();
119     reportUnrecognizedFlags();
120 
121     // Store some flags locally.
122     Options.MayReturnNull = getFlags()->may_return_null;
123     Options.ZeroContents = getFlags()->zero_contents;
124     Options.DeallocTypeMismatch = getFlags()->dealloc_type_mismatch;
125     Options.DeleteSizeMismatch = getFlags()->delete_size_mismatch;
126     Options.QuarantineMaxChunkSize = getFlags()->quarantine_max_chunk_size;
127 
128     Stats.initLinkerInitialized();
129     Primary.initLinkerInitialized(getFlags()->release_to_os_interval_ms);
130     Secondary.initLinkerInitialized(&Stats);
131 
132     Quarantine.init(getFlags()->quarantine_size_kb << 10,
133                     getFlags()->thread_local_quarantine_size_kb << 10);
134   }
135 
136   void reset() { memset(this, 0, sizeof(*this)); }
137 
138   void unmapTestOnly() {
139     TSDRegistry.unmapTestOnly();
140     Primary.unmapTestOnly();
141   }
142 
143   TSDRegistryT *getTSDRegistry() { return &TSDRegistry; }
144 
145   void initCache(CacheT *Cache) { Cache->init(&Stats, &Primary); }
146 
147   // Release the resources used by a TSD, which involves:
148   // - draining the local quarantine cache to the global quarantine;
149   // - releasing the cached pointers back to the Primary;
150   // - unlinking the local stats from the global ones (destroying the cache does
151   //   the last two items).
152   void commitBack(TSD<ThisT> *TSD) {
153     Quarantine.drain(&TSD->QuarantineCache,
154                      QuarantineCallback(*this, TSD->Cache));
155     TSD->Cache.destroy(&Stats);
156   }
157 
158   NOINLINE void *allocate(uptr Size, Chunk::Origin Origin,
159                           uptr Alignment = MinAlignment,
160                           bool ZeroContents = false) {
161     initThreadMaybe();
162 
163     if (UNLIKELY(Alignment > MaxAlignment)) {
164       if (Options.MayReturnNull)
165         return nullptr;
166       reportAlignmentTooBig(Alignment, MaxAlignment);
167     }
168     if (UNLIKELY(Alignment < MinAlignment))
169       Alignment = MinAlignment;
170 
171     // If the requested size happens to be 0 (more common than you might think),
172     // allocate 1 byte on top of the header. Then add the extra bytes required
173     // to fulfill the alignment requirements: we allocate enough to be sure that
174     // there will be an address in the block that will satisfy the alignment.
175     const uptr NeededSize =
176         Chunk::getHeaderSize() + roundUpTo(Size ? Size : 1, MinAlignment) +
177         ((Alignment > MinAlignment) ? (Alignment - Chunk::getHeaderSize()) : 0);
178 
179     // Takes care of extravagantly large sizes as well as integer overflows.
180     if (UNLIKELY(Size >= MaxAllowedMallocSize ||
181                  NeededSize >= MaxAllowedMallocSize)) {
182       if (Options.MayReturnNull)
183         return nullptr;
184       reportAllocationSizeTooBig(Size, NeededSize, MaxAllowedMallocSize);
185     }
186 
187     void *Block;
188     uptr ClassId;
189     uptr BlockEnd = 0;
190     if (PrimaryT::canAllocate(NeededSize)) {
191       ClassId = SizeClassMap::getClassIdBySize(NeededSize);
192       bool UnlockRequired;
193       auto *TSD = TSDRegistry.getTSDAndLock(&UnlockRequired);
194       Block = TSD->Cache.allocate(ClassId);
195       if (UnlockRequired)
196         TSD->unlock();
197     } else {
198       ClassId = 0;
199       Block = Secondary.allocate(NeededSize, Alignment, &BlockEnd);
200     }
201 
202     if (UNLIKELY(!Block)) {
203       if (Options.MayReturnNull)
204         return nullptr;
205       reportOutOfMemory(NeededSize);
206     }
207 
208     // We only need to zero the contents for Primary backed allocations.
209     if ((ZeroContents || Options.ZeroContents) && ClassId)
210       memset(Block, 0, PrimaryT::getSizeByClassId(ClassId));
211 
212     Chunk::UnpackedHeader Header = {};
213     uptr UserPtr = reinterpret_cast<uptr>(Block) + Chunk::getHeaderSize();
214     // The following condition isn't necessarily "UNLIKELY".
215     if (!isAligned(UserPtr, Alignment)) {
216       const uptr AlignedUserPtr = roundUpTo(UserPtr, Alignment);
217       const uptr Offset = AlignedUserPtr - UserPtr;
218       Header.Offset = (Offset >> MinAlignmentLog) & Chunk::OffsetMask;
219       DCHECK_GT(Offset, 2 * sizeof(u32));
220       // The BlockMarker has no security purpose, but is specifically meant for
221       // the chunk iteration function that can be used in debugging situations.
222       // It is the only situation where we have to locate the start of a chunk
223       // based on its block address.
224       reinterpret_cast<u32 *>(Block)[0] = BlockMarker;
225       reinterpret_cast<u32 *>(Block)[1] = static_cast<u32>(Offset);
226       UserPtr = AlignedUserPtr;
227     }
228     Header.State = Chunk::State::Allocated;
229     Header.Origin = Origin & Chunk::OriginMask;
230     if (ClassId) {
231       Header.ClassId = ClassId & Chunk::ClassIdMask;
232       Header.SizeOrUnusedBytes = Size & Chunk::SizeOrUnusedBytesMask;
233     } else {
234       Header.SizeOrUnusedBytes =
235           (BlockEnd - (UserPtr + Size)) & Chunk::SizeOrUnusedBytesMask;
236     }
237     void *Ptr = reinterpret_cast<void *>(UserPtr);
238     Chunk::storeHeader(Cookie, Ptr, &Header);
239 
240     if (&__scudo_allocate_hook)
241       __scudo_allocate_hook(Ptr, Size);
242 
243     return Ptr;
244   }
245 
246   NOINLINE void deallocate(void *Ptr, Chunk::Origin Origin, uptr DeleteSize = 0,
247                            UNUSED uptr Alignment = MinAlignment) {
248     // For a deallocation, we only ensure minimal initialization, meaning thread
249     // local data will be left uninitialized for now (when using ELF TLS). The
250     // fallback cache will be used instead. This is a workaround for a situation
251     // where the only heap operation performed in a thread would be a free past
252     // the TLS destructors, ending up in initialized thread specific data never
253     // being destroyed properly. Any other heap operation will do a full init.
254     initThreadMaybe(/*MinimalInit=*/true);
255 
256     if (&__scudo_deallocate_hook)
257       __scudo_deallocate_hook(Ptr);
258 
259     if (UNLIKELY(!Ptr))
260       return;
261     if (UNLIKELY(!isAligned(reinterpret_cast<uptr>(Ptr), MinAlignment)))
262       reportMisalignedPointer(AllocatorAction::Deallocating, Ptr);
263 
264     Chunk::UnpackedHeader Header;
265     Chunk::loadHeader(Cookie, Ptr, &Header);
266 
267     if (UNLIKELY(Header.State != Chunk::State::Allocated))
268       reportInvalidChunkState(AllocatorAction::Deallocating, Ptr);
269     if (Options.DeallocTypeMismatch) {
270       if (Header.Origin != Origin) {
271         // With the exception of memalign'd chunks, that can be still be free'd.
272         if (UNLIKELY(Header.Origin != Chunk::Origin::Memalign ||
273                      Origin != Chunk::Origin::Malloc))
274           reportDeallocTypeMismatch(AllocatorAction::Deallocating, Ptr,
275                                     Header.Origin, Origin);
276       }
277     }
278 
279     const uptr Size = getSize(Ptr, &Header);
280     if (DeleteSize && Options.DeleteSizeMismatch) {
281       if (UNLIKELY(DeleteSize != Size))
282         reportDeleteSizeMismatch(Ptr, DeleteSize, Size);
283     }
284 
285     quarantineOrDeallocateChunk(Ptr, &Header, Size);
286   }
287 
288   void *reallocate(void *OldPtr, uptr NewSize, uptr Alignment = MinAlignment) {
289     initThreadMaybe();
290 
291     // The following cases are handled by the C wrappers.
292     DCHECK_NE(OldPtr, nullptr);
293     DCHECK_NE(NewSize, 0);
294 
295     if (UNLIKELY(!isAligned(reinterpret_cast<uptr>(OldPtr), MinAlignment)))
296       reportMisalignedPointer(AllocatorAction::Reallocating, OldPtr);
297 
298     Chunk::UnpackedHeader OldHeader;
299     Chunk::loadHeader(Cookie, OldPtr, &OldHeader);
300 
301     if (UNLIKELY(OldHeader.State != Chunk::State::Allocated))
302       reportInvalidChunkState(AllocatorAction::Reallocating, OldPtr);
303 
304     // Pointer has to be allocated with a malloc-type function. Some
305     // applications think that it is OK to realloc a memalign'ed pointer, which
306     // will trigger this check. It really isn't.
307     if (Options.DeallocTypeMismatch) {
308       if (UNLIKELY(OldHeader.Origin != Chunk::Origin::Malloc))
309         reportDeallocTypeMismatch(AllocatorAction::Reallocating, OldPtr,
310                                   OldHeader.Origin, Chunk::Origin::Malloc);
311     }
312 
313     const uptr OldSize = getSize(OldPtr, &OldHeader);
314     // If the new size is identical to the old one, or lower but within an
315     // acceptable range, we just keep the old chunk, and update its header.
316     if (NewSize == OldSize)
317       return OldPtr;
318     if (NewSize < OldSize) {
319       const uptr Delta = OldSize - NewSize;
320       if (Delta < (SizeClassMap::MaxSize / 2)) {
321         Chunk::UnpackedHeader NewHeader = OldHeader;
322         NewHeader.SizeOrUnusedBytes =
323             (OldHeader.ClassId ? NewHeader.SizeOrUnusedBytes - Delta
324                                : NewHeader.SizeOrUnusedBytes + Delta) &
325             Chunk::SizeOrUnusedBytesMask;
326         Chunk::compareExchangeHeader(Cookie, OldPtr, &NewHeader, &OldHeader);
327         return OldPtr;
328       }
329     }
330 
331     // Otherwise we allocate a new one, and deallocate the old one. Some
332     // allocators will allocate an even larger chunk (by a fixed factor) to
333     // allow for potential further in-place realloc. The gains of such a trick
334     // are currently unclear.
335     void *NewPtr = allocate(NewSize, Chunk::Origin::Malloc, Alignment);
336     if (NewPtr) {
337       memcpy(NewPtr, OldPtr, Min(NewSize, OldSize));
338       quarantineOrDeallocateChunk(OldPtr, &OldHeader, OldSize);
339     }
340     return NewPtr;
341   }
342 
343   // TODO(kostyak): while this locks the Primary & Secondary, it still allows
344   //                pointers to be fetched from the TSD. We ultimately want to
345   //                lock the registry as well. For now, it's good enough.
346   void disable() {
347     initThreadMaybe();
348     Primary.disable();
349     Secondary.disable();
350   }
351 
352   void enable() {
353     initThreadMaybe();
354     Secondary.enable();
355     Primary.enable();
356   }
357 
358   void printStats() {
359     disable();
360     Primary.printStats();
361     Secondary.printStats();
362     Quarantine.printStats();
363     enable();
364   }
365 
366   void releaseToOS() { Primary.releaseToOS(); }
367 
368   // Iterate over all chunks and call a callback for all busy chunks located
369   // within the provided memory range. Said callback must not use this allocator
370   // or a deadlock can ensue. This fits Android's malloc_iterate() needs.
371   void iterateOverChunks(uptr Base, uptr Size, iterate_callback Callback,
372                          void *Arg) {
373     initThreadMaybe();
374     const uptr From = Base;
375     const uptr To = Base + Size;
376     auto Lambda = [this, From, To, Callback, Arg](uptr Block) {
377       if (Block < From || Block > To)
378         return;
379       uptr ChunkSize;
380       const uptr ChunkBase = getChunkFromBlock(Block, &ChunkSize);
381       if (ChunkBase != InvalidChunk)
382         Callback(ChunkBase, ChunkSize, Arg);
383     };
384     Primary.iterateOverBlocks(Lambda);
385     Secondary.iterateOverBlocks(Lambda);
386   }
387 
388   bool canReturnNull() {
389     initThreadMaybe();
390     return Options.MayReturnNull;
391   }
392 
393   // TODO(kostyak): implement this as a "backend" to mallopt.
394   bool setOption(UNUSED uptr Option, UNUSED uptr Value) { return false; }
395 
396   // Return the usable size for a given chunk. Technically we lie, as we just
397   // report the actual size of a chunk. This is done to counteract code actively
398   // writing past the end of a chunk (like sqlite3) when the usable size allows
399   // for it, which then forces realloc to copy the usable size of a chunk as
400   // opposed to its actual size.
401   uptr getUsableSize(const void *Ptr) {
402     initThreadMaybe();
403     if (UNLIKELY(!Ptr))
404       return 0;
405     Chunk::UnpackedHeader Header;
406     Chunk::loadHeader(Cookie, Ptr, &Header);
407     // Getting the usable size of a chunk only makes sense if it's allocated.
408     if (UNLIKELY(Header.State != Chunk::State::Allocated))
409       reportInvalidChunkState(AllocatorAction::Sizing, const_cast<void *>(Ptr));
410     return getSize(Ptr, &Header);
411   }
412 
413   void getStats(StatCounters S) {
414     initThreadMaybe();
415     Stats.get(S);
416   }
417 
418 private:
419   typedef MapAllocator SecondaryT;
420   typedef typename PrimaryT::SizeClassMap SizeClassMap;
421 
422   static const uptr MinAlignmentLog = SCUDO_MIN_ALIGNMENT_LOG;
423   static const uptr MaxAlignmentLog = 24U; // 16 MB seems reasonable.
424   static const uptr MinAlignment = 1UL << MinAlignmentLog;
425   static const uptr MaxAlignment = 1UL << MaxAlignmentLog;
426   static const uptr MaxAllowedMallocSize =
427       FIRST_32_SECOND_64(1UL << 31, 1ULL << 40);
428 
429   // Constants used by the chunk iteration mechanism.
430   static const u32 BlockMarker = 0x44554353U;
431   static const uptr InvalidChunk = ~static_cast<uptr>(0);
432 
433   GlobalStats Stats;
434   TSDRegistryT TSDRegistry;
435   PrimaryT Primary;
436   SecondaryT Secondary;
437   QuarantineT Quarantine;
438 
439   u32 Cookie;
440 
441   struct {
442     u8 MayReturnNull : 1;       // may_return_null
443     u8 ZeroContents : 1;        // zero_contents
444     u8 DeallocTypeMismatch : 1; // dealloc_type_mismatch
445     u8 DeleteSizeMismatch : 1;  // delete_size_mismatch
446     u32 QuarantineMaxChunkSize; // quarantine_max_chunk_size
447   } Options;
448 
449   // The following might get optimized out by the compiler.
450   NOINLINE void performSanityChecks() {
451     // Verify that the header offset field can hold the maximum offset. In the
452     // case of the Secondary allocator, it takes care of alignment and the
453     // offset will always be small. In the case of the Primary, the worst case
454     // scenario happens in the last size class, when the backend allocation
455     // would already be aligned on the requested alignment, which would happen
456     // to be the maximum alignment that would fit in that size class. As a
457     // result, the maximum offset will be at most the maximum alignment for the
458     // last size class minus the header size, in multiples of MinAlignment.
459     Chunk::UnpackedHeader Header = {};
460     const uptr MaxPrimaryAlignment = 1UL << getMostSignificantSetBitIndex(
461                                          SizeClassMap::MaxSize - MinAlignment);
462     const uptr MaxOffset =
463         (MaxPrimaryAlignment - Chunk::getHeaderSize()) >> MinAlignmentLog;
464     Header.Offset = MaxOffset & Chunk::OffsetMask;
465     if (UNLIKELY(Header.Offset != MaxOffset))
466       reportSanityCheckError("offset");
467 
468     // Verify that we can fit the maximum size or amount of unused bytes in the
469     // header. Given that the Secondary fits the allocation to a page, the worst
470     // case scenario happens in the Primary. It will depend on the second to
471     // last and last class sizes, as well as the dynamic base for the Primary.
472     // The following is an over-approximation that works for our needs.
473     const uptr MaxSizeOrUnusedBytes = SizeClassMap::MaxSize - 1;
474     Header.SizeOrUnusedBytes =
475         MaxSizeOrUnusedBytes & Chunk::SizeOrUnusedBytesMask;
476     if (UNLIKELY(Header.SizeOrUnusedBytes != MaxSizeOrUnusedBytes))
477       reportSanityCheckError("size (or unused bytes)");
478 
479     const uptr LargestClassId = SizeClassMap::LargestClassId;
480     Header.ClassId = LargestClassId;
481     if (UNLIKELY(Header.ClassId != LargestClassId))
482       reportSanityCheckError("class ID");
483   }
484 
485   static INLINE void *getBlockBegin(const void *Ptr,
486                                     Chunk::UnpackedHeader *Header) {
487     return reinterpret_cast<void *>(reinterpret_cast<uptr>(Ptr) -
488                                     Chunk::getHeaderSize() -
489                                     (Header->Offset << MinAlignmentLog));
490   }
491 
492   // Return the size of a chunk as requested during its allocation.
493   INLINE uptr getSize(const void *Ptr, Chunk::UnpackedHeader *Header) {
494     const uptr SizeOrUnusedBytes = Header->SizeOrUnusedBytes;
495     if (Header->ClassId)
496       return SizeOrUnusedBytes;
497     return SecondaryT::getBlockEnd(getBlockBegin(Ptr, Header)) -
498            reinterpret_cast<uptr>(Ptr) - SizeOrUnusedBytes;
499   }
500 
501   ALWAYS_INLINE void initThreadMaybe(bool MinimalInit = false) {
502     TSDRegistry.initThreadMaybe(this, MinimalInit);
503   }
504 
505   void quarantineOrDeallocateChunk(void *Ptr, Chunk::UnpackedHeader *Header,
506                                    uptr Size) {
507     Chunk::UnpackedHeader NewHeader = *Header;
508     // If the quarantine is disabled, the actual size of a chunk is 0 or larger
509     // than the maximum allowed, we return a chunk directly to the backend.
510     const bool BypassQuarantine = !Quarantine.getCacheSize() || !Size ||
511                                   (Size > Options.QuarantineMaxChunkSize);
512     if (BypassQuarantine) {
513       NewHeader.State = Chunk::State::Available;
514       Chunk::compareExchangeHeader(Cookie, Ptr, &NewHeader, Header);
515       void *BlockBegin = getBlockBegin(Ptr, &NewHeader);
516       const uptr ClassId = NewHeader.ClassId;
517       if (ClassId) {
518         bool UnlockRequired;
519         auto *TSD = TSDRegistry.getTSDAndLock(&UnlockRequired);
520         TSD->Cache.deallocate(ClassId, BlockBegin);
521         if (UnlockRequired)
522           TSD->unlock();
523       } else {
524         Secondary.deallocate(BlockBegin);
525       }
526     } else {
527       NewHeader.State = Chunk::State::Quarantined;
528       Chunk::compareExchangeHeader(Cookie, Ptr, &NewHeader, Header);
529       bool UnlockRequired;
530       auto *TSD = TSDRegistry.getTSDAndLock(&UnlockRequired);
531       Quarantine.put(&TSD->QuarantineCache,
532                      QuarantineCallback(*this, TSD->Cache), Ptr, Size);
533       if (UnlockRequired)
534         TSD->unlock();
535     }
536   }
537 
538   // This only cares about valid busy chunks. This might change in the future.
539   uptr getChunkFromBlock(uptr Block, uptr *Size) {
540     u32 Offset = 0;
541     if (reinterpret_cast<u32 *>(Block)[0] == BlockMarker)
542       Offset = reinterpret_cast<u32 *>(Block)[1];
543     const uptr P = Block + Offset + Chunk::getHeaderSize();
544     const void *Ptr = reinterpret_cast<const void *>(P);
545     Chunk::UnpackedHeader Header;
546     if (!Chunk::isValid(Cookie, Ptr, &Header) ||
547         Header.State != Chunk::State::Allocated)
548       return InvalidChunk;
549     if (Size)
550       *Size = getSize(Ptr, &Header);
551     return P;
552   }
553 };
554 
555 } // namespace scudo
556 
557 #endif // SCUDO_COMBINED_H_
558