Lines Matching +full:can +full:- +full:secondary
1 //===-- combined.h ----------------------------------------------*- C++ -*-===//
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
25 #include "secondary.h"
130 reinterpret_cast<void *>(reinterpret_cast<uptr>(Ptr) - in deallocate()
158 if (getFlags()->may_return_null) in init()
160 if (getFlags()->zero_contents) in init()
162 else if (getFlags()->pattern_fill_contents) in init()
164 if (getFlags()->dealloc_type_mismatch) in init()
166 if (getFlags()->delete_size_mismatch) in init()
173 static_cast<u32>(getFlags()->quarantine_max_chunk_size); in init()
179 const s32 ReleaseToOsIntervalMs = getFlags()->release_to_os_interval_ms; in init()
181 Secondary.init(&Stats, ReleaseToOsIntervalMs); in init()
183 static_cast<uptr>(getFlags()->quarantine_size_kb << 10), in init()
184 static_cast<uptr>(getFlags()->thread_local_quarantine_size_kb << 10)); in init()
190 RB->Depot->enable(); in enableRingBuffer()
198 RB->Depot->disable(); in disableRingBuffer()
201 // Initialize the embedded GWP-ASan instance. Requires the main allocator to
206 Opt.Enabled = getFlags()->GWP_ASAN_Enabled; in initGwpAsan()
208 getFlags()->GWP_ASAN_MaxSimultaneousAllocations; in initGwpAsan()
209 Opt.SampleRate = getFlags()->GWP_ASAN_SampleRate; in initGwpAsan()
210 Opt.InstallSignalHandlers = getFlags()->GWP_ASAN_InstallSignalHandlers; in initGwpAsan()
211 Opt.Recoverable = getFlags()->GWP_ASAN_Recoverable; in initGwpAsan()
212 // Embedded GWP-ASan is locked through the Scudo atfork handler (via in initGwpAsan()
213 // Allocator::disable calling GWPASan.disable). Disable GWP-ASan's atfork in initGwpAsan()
227 GuardedAlloc.getAllocatorState()->maximumAllocationSize(); in initGwpAsan()
251 Secondary.unmapTestOnly(); in unmapTestOnly()
253 if (getFlags()->GWP_ASAN_InstallSignalHandlers) in unmapTestOnly()
262 // The Cache must be provided zero-initialized.
263 void initCache(CacheT *Cache) { Cache->init(&Stats, &Primary); } in initCache()
266 // - draining the local quarantine cache to the global quarantine;
267 // - releasing the cached pointers back to the Primary;
268 // - unlinking the local stats from the global ones (destroying the cache does
271 TSD->assertLocked(/*BypassCheck=*/true); in commitBack()
272 Quarantine.drain(&TSD->getQuarantineCache(), in commitBack()
273 QuarantineCallback(*this, TSD->getCache())); in commitBack()
274 TSD->getCache().destroy(&Stats); in commitBack()
278 TSD->assertLocked(/*BypassCheck=*/true); in drainCache()
279 Quarantine.drainAndRecycle(&TSD->getQuarantineCache(), in drainCache()
280 QuarantineCallback(*this, TSD->getCache())); in drainCache()
281 TSD->getCache().drain(); in drainCache()
291 // Secondary, or pointer allocated while memory tagging is unsupported or in getHeaderTaggedPointer()
315 return Depot->insert(Stack + Min<uptr>(DiscardFrames, Size), Stack + Size); in collectStackTrace()
330 // can use to determine which tag mask to use. in computeOddEvenMaskForPointerMaybe()
375 static_assert(MaxAllowedMallocSize < UINTPTR_MAX - MaxAlignment, "");
390 Block = TSD->getCache().allocate(ClassId);
393 // Secondary.
396 Block = TSD->getCache().allocate(++ClassId);
402 Block = Secondary.allocate(Options, Size, Alignment, &SecondaryBlockEnd,
416 ClassId ? Size : SecondaryBlockEnd - (UserPtr + Size);
466 // With the exception of memalign'd chunks, that can be still be free'd.
524 // Pointer has to be allocated with a malloc-type function. Some
544 OldSize = BlockEnd - (reinterpret_cast<uptr>(OldTaggedPtr) +
551 if (NewSize > OldSize || (OldSize - NewSize) < getPageSizeCached()) {
561 OldSize - NewSize);
567 : BlockEnd -
587 // allow for potential further in-place realloc. The gains of such a trick
597 // TODO(kostyak): disable() is currently best-effort. There are some small
609 Secondary.disable(); in disable()
616 Secondary.enable(); in enable()
629 // function. This can be called with a null buffer or zero size for buffer
638 Buffer[Size - 1] = '\0'; in getStats()
652 // Secondary allocator dumps the fragmentation data in getStats(). in printFragmentationInfo()
661 Secondary.releaseToOS(); in releaseToOS()
666 // or a deadlock can ensue. This fits Android's malloc_iterate() needs.
684 // A chunk header can either have a zero tag (tagged primary) or the in iterateOverChunks()
685 // header tag (secondary, or untagged primary). We don't know which so in iterateOverChunks()
706 Secondary.iterateOverBlocks(Lambda); in iterateOverChunks()
720 // Enabling odd/even tags involves a tradeoff between use-after-free in setOption()
724 // use-after-free is less likely to be detected because the tag space for in setOption()
733 // We leave it to the various sub-components to decide whether or not they in setOption()
734 // want to handle the option, but we do not want to short-circuit in setOption()
737 const bool SecondaryResult = Secondary.setOption(O, Value); in setOption()
804 // may end up calling the allocator (via pthread_atfork, via the post-init in disableMemoryTagging()
809 Secondary.disableMemoryTagging(); in disableMemoryTagging()
816 if (getFlags()->allocation_ring_buffer_size <= 0) { in setTrackAllocationStacks()
844 return RB ? reinterpret_cast<char *>(RB->Depot) : nullptr; in getStackDepotAddress()
850 return RB ? RB->StackDepotSize : 0; in getStackDepotSize()
869 return RB && RB->RingBufferElements in getRingBufferSize()
870 ? ringBufferSizeInBytes(RB->RingBufferElements) in getRingBufferSize()
879 if (!Depot->find(Hash, &RingPos, &Size)) in collectTraceMaybe()
882 Trace[I] = static_cast<uintptr_t>(Depot->at(RingPos + I)); in collectTraceMaybe()
902 // check for corrupted StackDepot. First we need to check whether we can in getErrorInfo()
907 if (!Depot->isValid(DepotSize)) in getErrorInfo()
921 // for secondary allocations we can find either UAF or OOB. in getErrorInfo()
951 // These are indexes into an "array" of 32-bit values that store information
954 // negative indexes may be used. The smallest index that may be used is -2,
958 static const sptr MemTagAllocationTraceIndex = -2;
959 static const sptr MemTagAllocationTidIndex = -1;
966 SecondaryT Secondary; variable
1013 // Verify that the header offset field can hold the maximum offset. In the in performSanityChecks()
1014 // case of the Secondary allocator, it takes care of alignment and the in performSanityChecks()
1023 SizeClassMap::MaxSize - MinAlignment); in performSanityChecks()
1025 (MaxPrimaryAlignment - Chunk::getHeaderSize()) >> MinAlignmentLog; in performSanityChecks()
1030 // Verify that we can fit the maximum size or amount of unused bytes in the in performSanityChecks()
1031 // header. Given that the Secondary fits the allocation to a page, the worst in performSanityChecks()
1034 // The following is an over-approximation that works for our needs. in performSanityChecks()
1035 const uptr MaxSizeOrUnusedBytes = SizeClassMap::MaxSize - 1; in performSanityChecks()
1049 reinterpret_cast<uptr>(Ptr) - Chunk::getHeaderSize() - in getBlockBegin()
1050 (static_cast<uptr>(Header->Offset) << MinAlignmentLog)); in getBlockBegin()
1055 const uptr SizeOrUnusedBytes = Header->SizeOrUnusedBytes; in getSize()
1056 if (LIKELY(Header->ClassId)) in getSize()
1060 return SecondaryT::getBlockEnd(getBlockBegin(Ptr, Header)) - in getSize()
1061 reinterpret_cast<uptr>(Ptr) - SizeOrUnusedBytes; in getSize()
1073 // Only do content fill when it's from primary allocator because secondary in initChunk()
1085 const uptr Offset = UserPtr - DefaultAlignedPtr; in initChunk()
1088 // the chunk iteration function that can be used in debugging situations. in initChunk()
1126 // faulting potentially large numbers of pages for large secondary in initChunkWithMemoryTagging()
1131 // mapping, we should be able to tag secondary allocations as well. in initChunkWithMemoryTagging()
1160 // We can detect case (2) by loading the tag from the start in initChunkWithMemoryTagging()
1166 // We can detect case (3) by moving to the next page (if covered by the in initChunkWithMemoryTagging()
1169 // hand, if it is nonzero, we can assume that all following pages are in initChunkWithMemoryTagging()
1185 // If an allocation needs to be zeroed (i.e. calloc) we can normally in initChunkWithMemoryTagging()
1186 // avoid zeroing the memory now since we can rely on memory having in initChunkWithMemoryTagging()
1188 // UAF tag. But if tagging was disabled per-thread when the memory in initChunkWithMemoryTagging()
1192 Min(Size, roundUp(PrevEnd - TaggedUserPtr, in initChunkWithMemoryTagging()
1206 // Init the secondary chunk. in initChunkWithMemoryTagging()
1217 const uptr Offset = UserPtr - DefaultAlignedPtr; in initChunkWithMemoryTagging()
1220 // the chunk iteration function that can be used in debugging situations. in initChunkWithMemoryTagging()
1245 ((Size - 1) >= QuarantineMaxChunkSize) || in quarantineOrDeallocateChunk()
1246 !Header->ClassId; in quarantineOrDeallocateChunk()
1248 Header->State = Chunk::State::Available; in quarantineOrDeallocateChunk()
1250 Header->State = Chunk::State::Quarantined; in quarantineOrDeallocateChunk()
1254 Header->OriginOrWasZeroed = 0U; in quarantineOrDeallocateChunk()
1259 Header->OriginOrWasZeroed = in quarantineOrDeallocateChunk()
1260 Header->ClassId && !TSDRegistry.getDisableMemInit(); in quarantineOrDeallocateChunk()
1268 const uptr ClassId = Header->ClassId; in quarantineOrDeallocateChunk()
1273 CacheDrained = TSD->getCache().deallocate(ClassId, BlockBegin); in quarantineOrDeallocateChunk()
1282 Secondary.deallocate(Options, BlockBegin); in quarantineOrDeallocateChunk()
1286 Quarantine.put(&TSD->getQuarantineCache(), in quarantineOrDeallocateChunk()
1287 QuarantineCallback(*this, TSD->getCache()), Ptr, Size); in quarantineOrDeallocateChunk()
1298 if (Header->ClassId && !TSDRegistry.getDisableMemInit()) { in retagBlock()
1302 Header->ClassId); in retagBlock()
1311 if (BypassQuarantine && !Header->ClassId) { in retagBlock()
1360 // allocation, the chunk may already have a non-zero tag from the previous in prepareTaggedChunk()
1362 storeTag(reinterpret_cast<uptr>(Ptr) - archMemoryTagGranuleSize()); in prepareTaggedChunk()
1395 Ptr32[MemTagAllocationTraceIndex] = collectStackTrace(RB->Depot); in storePrimaryAllocationStackMaybe()
1403 uptr Pos = atomic_fetch_add(&RB->Pos, 1, memory_order_relaxed); in storeRingBufferEntry()
1405 getRingBufferEntry(RB, Pos % RB->RingBufferElements); in storeRingBufferEntry()
1411 atomic_store_relaxed(&Entry->Ptr, 0); in storeRingBufferEntry()
1414 atomic_store_relaxed(&Entry->AllocationTrace, AllocationTrace); in storeRingBufferEntry()
1415 atomic_store_relaxed(&Entry->AllocationTid, AllocationTid); in storeRingBufferEntry()
1416 atomic_store_relaxed(&Entry->AllocationSize, AllocationSize); in storeRingBufferEntry()
1417 atomic_store_relaxed(&Entry->DeallocationTrace, DeallocationTrace); in storeRingBufferEntry()
1418 atomic_store_relaxed(&Entry->DeallocationTid, DeallocationTid); in storeRingBufferEntry()
1421 atomic_store_relaxed(&Entry->Ptr, reinterpret_cast<uptr>(Ptr)); in storeRingBufferEntry()
1431 u32 Trace = collectStackTrace(RB->Depot); in storeSecondaryAllocationStackMaybe()
1452 u32 DeallocationTrace = collectStackTrace(RB->Depot); in storeDeallocationStackMaybe()
1461 sizeof(((scudo_error_info *)nullptr)->reports) /
1462 sizeof(((scudo_error_info *)nullptr)->reports[0]);
1476 auto GetGranule = [&](uptr Addr, const char **Data, uint8_t *Tag) -> bool { in getInlineErrorInfo()
1480 *Data = &Memory[Addr - MemoryAddr]; in getInlineErrorInfo()
1482 MemoryTags[(Addr - MemoryAddr) / archMemoryTagGranuleSize()]); in getInlineErrorInfo()
1500 ChunkBegin - Chunk::getHeaderSize()); in getInlineErrorInfo()
1505 if (Header->SizeOrUnusedBytes == 0) in getInlineErrorInfo()
1526 auto *R = &ErrorInfo->reports[NextErrorReport++]; in getInlineErrorInfo()
1527 R->error_type = in getInlineErrorInfo()
1529 R->allocation_address = ChunkAddr; in getInlineErrorInfo()
1530 R->allocation_size = Header.SizeOrUnusedBytes; in getInlineErrorInfo()
1532 collectTraceMaybe(Depot, R->allocation_trace, in getInlineErrorInfo()
1535 R->allocation_tid = Data[MemTagAllocationTidIndex]; in getInlineErrorInfo()
1544 CheckOOB(Info.BlockBegin - I * Info.BlockSize)) in getInlineErrorInfo()
1559 uptr Pos = atomic_load_relaxed(&RingBuffer->Pos); in getRingBufferErrorInfo()
1561 for (uptr I = Pos - 1; I != Pos - 1 - RingBufferElements && in getRingBufferErrorInfo()
1563 --I) { in getRingBufferErrorInfo()
1565 uptr EntryPtr = atomic_load_relaxed(&Entry->Ptr); in getRingBufferErrorInfo()
1570 uptr EntrySize = atomic_load_relaxed(&Entry->AllocationSize); in getRingBufferErrorInfo()
1571 u32 AllocationTrace = atomic_load_relaxed(&Entry->AllocationTrace); in getRingBufferErrorInfo()
1572 u32 AllocationTid = atomic_load_relaxed(&Entry->AllocationTid); in getRingBufferErrorInfo()
1573 u32 DeallocationTrace = atomic_load_relaxed(&Entry->DeallocationTrace); in getRingBufferErrorInfo()
1574 u32 DeallocationTid = atomic_load_relaxed(&Entry->DeallocationTid); in getRingBufferErrorInfo()
1577 // For UAF we only consider in-bounds fault addresses because in getRingBufferErrorInfo()
1578 // out-of-bounds UAF is rare and attempting to detect it is very likely in getRingBufferErrorInfo()
1583 // Ring buffer OOB is only possible with secondary allocations. In this in getRingBufferErrorInfo()
1587 if (FaultAddr < EntryPtr - getPageSizeCached() || in getRingBufferErrorInfo()
1597 if (ErrorInfo->reports[J].allocation_address == UntaggedEntryPtr) { in getRingBufferErrorInfo()
1606 auto *R = &ErrorInfo->reports[NextErrorReport++]; in getRingBufferErrorInfo()
1608 R->error_type = USE_AFTER_FREE; in getRingBufferErrorInfo()
1610 R->error_type = BUFFER_UNDERFLOW; in getRingBufferErrorInfo()
1612 R->error_type = BUFFER_OVERFLOW; in getRingBufferErrorInfo()
1614 R->allocation_address = UntaggedEntryPtr; in getRingBufferErrorInfo()
1615 R->allocation_size = EntrySize; in getRingBufferErrorInfo()
1616 collectTraceMaybe(Depot, R->allocation_trace, AllocationTrace); in getRingBufferErrorInfo()
1617 R->allocation_tid = AllocationTid; in getRingBufferErrorInfo()
1618 collectTraceMaybe(Depot, R->deallocation_trace, DeallocationTrace); in getRingBufferErrorInfo()
1619 R->deallocation_tid = DeallocationTid; in getRingBufferErrorInfo()
1625 Secondary.getStats(Str); in getStats()
1628 return Str->length(); in getStats()
1651 int ring_buffer_size = getFlags()->allocation_ring_buffer_size; in initRingBufferMaybe()
1684 Depot->init(RingSize, TabSize); in initRingBufferMaybe()
1693 RB->RawRingBufferMap = MemMap; in initRingBufferMaybe()
1694 RB->RingBufferElements = AllocationRingBufferSize; in initRingBufferMaybe()
1695 RB->Depot = Depot; in initRingBufferMaybe()
1696 RB->StackDepotSize = StackDepotSize; in initRingBufferMaybe()
1697 RB->RawStackDepotMap = DepotMap; in initRingBufferMaybe()
1709 RB->RawStackDepotMap.unmap(RB->RawStackDepotMap.getBase(), in unmapRingBuffer()
1710 RB->RawStackDepotMap.getCapacity()); in unmapRingBuffer()
1711 // Note that the `RB->RawRingBufferMap` is stored on the pages managed by in unmapRingBuffer()
1714 MemMapT RawRingBufferMap = RB->RawRingBufferMap; in unmapRingBuffer()
1729 return (Bytes - sizeof(AllocationRingBuffer)) / in ringBufferElementsFromBytes()