Lines Matching +full:can +full:- +full:primary
1 //===-- combined.h ----------------------------------------------*- C++ -*-===//
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
108 Allocator.Primary.Options.load()))) in allocate()
130 reinterpret_cast<void *>(reinterpret_cast<uptr>(Ptr) - in deallocate()
158 if (getFlags()->may_return_null) in init()
159 Primary.Options.set(OptionBit::MayReturnNull); in init()
160 if (getFlags()->zero_contents) in init()
161 Primary.Options.setFillContentsMode(ZeroFill); in init()
162 else if (getFlags()->pattern_fill_contents) in init()
163 Primary.Options.setFillContentsMode(PatternOrZeroFill); in init()
164 if (getFlags()->dealloc_type_mismatch) in init()
165 Primary.Options.set(OptionBit::DeallocTypeMismatch); in init()
166 if (getFlags()->delete_size_mismatch) in init()
167 Primary.Options.set(OptionBit::DeleteSizeMismatch); in init()
170 Primary.Options.set(OptionBit::UseMemoryTagging); in init()
173 static_cast<u32>(getFlags()->quarantine_max_chunk_size); in init()
179 const s32 ReleaseToOsIntervalMs = getFlags()->release_to_os_interval_ms; in init()
180 Primary.init(ReleaseToOsIntervalMs); in init()
183 static_cast<uptr>(getFlags()->quarantine_size_kb << 10), in init()
184 static_cast<uptr>(getFlags()->thread_local_quarantine_size_kb << 10)); in init()
190 RB->Depot->enable(); in enableRingBuffer()
198 RB->Depot->disable(); in disableRingBuffer()
201 // Initialize the embedded GWP-ASan instance. Requires the main allocator to
206 Opt.Enabled = getFlags()->GWP_ASAN_Enabled; in initGwpAsan()
208 getFlags()->GWP_ASAN_MaxSimultaneousAllocations; in initGwpAsan()
209 Opt.SampleRate = getFlags()->GWP_ASAN_SampleRate; in initGwpAsan()
210 Opt.InstallSignalHandlers = getFlags()->GWP_ASAN_InstallSignalHandlers; in initGwpAsan()
211 Opt.Recoverable = getFlags()->GWP_ASAN_Recoverable; in initGwpAsan()
212 // Embedded GWP-ASan is locked through the Scudo atfork handler (via in initGwpAsan()
213 // Allocator::disable calling GWPASan.disable). Disable GWP-ASan's atfork in initGwpAsan()
227 GuardedAlloc.getAllocatorState()->maximumAllocationSize(); in initGwpAsan()
250 Primary.unmapTestOnly(); in unmapTestOnly()
253 if (getFlags()->GWP_ASAN_InstallSignalHandlers) in unmapTestOnly()
262 // The Cache must be provided zero-initialized.
263 void initCache(CacheT *Cache) { Cache->init(&Stats, &Primary); } in initCache()
266 // - draining the local quarantine cache to the global quarantine;
267 // - releasing the cached pointers back to the Primary;
268 // - unlinking the local stats from the global ones (destroying the cache does
271 TSD->assertLocked(/*BypassCheck=*/true); in commitBack()
272 Quarantine.drain(&TSD->getQuarantineCache(), in commitBack()
273 QuarantineCallback(*this, TSD->getCache())); in commitBack()
274 TSD->getCache().destroy(&Stats); in commitBack()
278 TSD->assertLocked(/*BypassCheck=*/true); in drainCache()
279 Quarantine.drainAndRecycle(&TSD->getQuarantineCache(), in drainCache()
280 QuarantineCallback(*this, TSD->getCache())); in drainCache()
281 TSD->getCache().drain(); in drainCache()
315 return Depot->insert(Stack + Min<uptr>(DiscardFrames, Size), Stack + Size); in collectStackTrace()
330 // can use to determine which tag mask to use. in computeOddEvenMaskForPointerMaybe()
339 const Options Options = Primary.Options.load();
375 static_assert(MaxAllowedMallocSize < UINTPTR_MAX - MaxAlignment, "");
390 Block = TSD->getCache().allocate(ClassId);
396 Block = TSD->getCache().allocate(++ClassId);
416 ClassId ? Size : SecondaryBlockEnd - (UserPtr + Size);
463 const Options Options = Primary.Options.load();
466 // With the exception of memalign'd chunks, that can be still be free'd.
486 const Options Options = Primary.Options.load();
524 // Pointer has to be allocated with a malloc-type function. Some
544 OldSize = BlockEnd - (reinterpret_cast<uptr>(OldTaggedPtr) +
551 if (NewSize > OldSize || (OldSize - NewSize) < getPageSizeCached()) {
561 OldSize - NewSize);
567 : BlockEnd -
587 // allow for potential further in-place realloc. The gains of such a trick
597 // TODO(kostyak): disable() is currently best-effort. There are some small
608 Primary.disable(); in disable()
617 Primary.enable(); in enable()
629 // function. This can be called with a null buffer or zero size for buffer
638 Buffer[Size - 1] = '\0'; in getStats()
651 Primary.getFragmentationInfo(&Str); in printFragmentationInfo()
660 Primary.releaseToOS(ReleaseType); in releaseToOS()
666 // or a deadlock can ensue. This fits Android's malloc_iterate() needs.
684 // A chunk header can either have a zero tag (tagged primary) or the in iterateOverChunks()
685 // header tag (secondary, or untagged primary). We don't know which so in iterateOverChunks()
699 if (useMemoryTagging<AllocatorConfig>(Primary.Options.load())) in iterateOverChunks()
705 Primary.iterateOverBlocks(Lambda); in iterateOverChunks()
714 return Primary.Options.load().get(OptionBit::MayReturnNull); in canReturnNull()
720 // Enabling odd/even tags involves a tradeoff between use-after-free in setOption()
724 // use-after-free is less likely to be detected because the tag space for in setOption()
728 Primary.Options.set(OptionBit::UseOddEvenTags); in setOption()
730 Primary.Options.clear(OptionBit::UseOddEvenTags); in setOption()
733 // We leave it to the various sub-components to decide whether or not they in setOption()
734 // want to handle the option, but we do not want to short-circuit in setOption()
736 const bool PrimaryResult = Primary.setOption(O, Value); in setOption()
798 return useMemoryTagging<AllocatorConfig>(Primary.Options.load()); in useMemoryTaggingTestOnly()
804 // may end up calling the allocator (via pthread_atfork, via the post-init in disableMemoryTagging()
810 Primary.Options.clear(OptionBit::UseMemoryTagging); in disableMemoryTagging()
816 if (getFlags()->allocation_ring_buffer_size <= 0) { in setTrackAllocationStacks()
817 DCHECK(!Primary.Options.load().get(OptionBit::TrackAllocationStacks)); in setTrackAllocationStacks()
823 Primary.Options.set(OptionBit::TrackAllocationStacks); in setTrackAllocationStacks()
825 Primary.Options.clear(OptionBit::TrackAllocationStacks); in setTrackAllocationStacks()
830 Primary.Options.setFillContentsMode(FillContents); in setFillContents()
836 Primary.Options.set(OptionBit::AddLargeAllocationSlack); in setAddLargeAllocationSlack()
838 Primary.Options.clear(OptionBit::AddLargeAllocationSlack); in setAddLargeAllocationSlack()
844 return RB ? reinterpret_cast<char *>(RB->Depot) : nullptr; in getStackDepotAddress()
850 return RB ? RB->StackDepotSize : 0; in getStackDepotSize()
854 return Primary.getRegionInfoArrayAddress(); in getRegionInfoArrayAddress()
869 return RB && RB->RingBufferElements in getRingBufferSize()
870 ? ringBufferSizeInBytes(RB->RingBufferElements) in getRingBufferSize()
879 if (!Depot->find(Hash, &RingPos, &Size)) in collectTraceMaybe()
882 Trace[I] = static_cast<uintptr_t>(Depot->at(RingPos + I)); in collectTraceMaybe()
902 // check for corrupted StackDepot. First we need to check whether we can in getErrorInfo()
907 if (!Depot->isValid(DepotSize)) in getErrorInfo()
920 // Check the ring buffer. For primary allocations this will only find UAF; in getErrorInfo()
921 // for secondary allocations we can find either UAF or OOB. in getErrorInfo()
951 // These are indexes into an "array" of 32-bit values that store information
954 // negative indexes may be used. The smallest index that may be used is -2,
958 static const sptr MemTagAllocationTraceIndex = -2;
959 static const sptr MemTagAllocationTidIndex = -1;
965 PrimaryT Primary; variable
1013 // Verify that the header offset field can hold the maximum offset. In the in performSanityChecks()
1015 // offset will always be small. In the case of the Primary, the worst case in performSanityChecks()
1023 SizeClassMap::MaxSize - MinAlignment); in performSanityChecks()
1025 (MaxPrimaryAlignment - Chunk::getHeaderSize()) >> MinAlignmentLog; in performSanityChecks()
1030 // Verify that we can fit the maximum size or amount of unused bytes in the in performSanityChecks()
1032 // case scenario happens in the Primary. It will depend on the second to in performSanityChecks()
1033 // last and last class sizes, as well as the dynamic base for the Primary. in performSanityChecks()
1034 // The following is an over-approximation that works for our needs. in performSanityChecks()
1035 const uptr MaxSizeOrUnusedBytes = SizeClassMap::MaxSize - 1; in performSanityChecks()
1049 reinterpret_cast<uptr>(Ptr) - Chunk::getHeaderSize() - in getBlockBegin()
1050 (static_cast<uptr>(Header->Offset) << MinAlignmentLog)); in getBlockBegin()
1055 const uptr SizeOrUnusedBytes = Header->SizeOrUnusedBytes; in getSize()
1056 if (LIKELY(Header->ClassId)) in getSize()
1060 return SecondaryT::getBlockEnd(getBlockBegin(Ptr, Header)) - in getSize()
1061 reinterpret_cast<uptr>(Ptr) - SizeOrUnusedBytes; in getSize()
1073 // Only do content fill when it's from primary allocator because secondary in initChunk()
1085 const uptr Offset = UserPtr - DefaultAlignedPtr; in initChunk()
1088 // the chunk iteration function that can be used in debugging situations. in initChunk()
1111 const Options Options = Primary.Options.load(); in initChunkWithMemoryTagging()
1122 // Init the primary chunk. in initChunkWithMemoryTagging()
1124 // We only need to zero or tag the contents for Primary backed in initChunkWithMemoryTagging()
1125 // allocations. We only set tags for primary allocations in order to avoid in initChunkWithMemoryTagging()
1160 // We can detect case (2) by loading the tag from the start in initChunkWithMemoryTagging()
1166 // We can detect case (3) by moving to the next page (if covered by the in initChunkWithMemoryTagging()
1169 // hand, if it is nonzero, we can assume that all following pages are in initChunkWithMemoryTagging()
1185 // If an allocation needs to be zeroed (i.e. calloc) we can normally in initChunkWithMemoryTagging()
1186 // avoid zeroing the memory now since we can rely on memory having in initChunkWithMemoryTagging()
1188 // UAF tag. But if tagging was disabled per-thread when the memory in initChunkWithMemoryTagging()
1192 Min(Size, roundUp(PrevEnd - TaggedUserPtr, in initChunkWithMemoryTagging()
1217 const uptr Offset = UserPtr - DefaultAlignedPtr; in initChunkWithMemoryTagging()
1220 // the chunk iteration function that can be used in debugging situations. in initChunkWithMemoryTagging()
1245 ((Size - 1) >= QuarantineMaxChunkSize) || in quarantineOrDeallocateChunk()
1246 !Header->ClassId; in quarantineOrDeallocateChunk()
1248 Header->State = Chunk::State::Available; in quarantineOrDeallocateChunk()
1250 Header->State = Chunk::State::Quarantined; in quarantineOrDeallocateChunk()
1254 Header->OriginOrWasZeroed = 0U; in quarantineOrDeallocateChunk()
1259 Header->OriginOrWasZeroed = in quarantineOrDeallocateChunk()
1260 Header->ClassId && !TSDRegistry.getDisableMemInit(); in quarantineOrDeallocateChunk()
1268 const uptr ClassId = Header->ClassId; in quarantineOrDeallocateChunk()
1273 CacheDrained = TSD->getCache().deallocate(ClassId, BlockBegin); in quarantineOrDeallocateChunk()
1275 // When we have drained some blocks back to the Primary from TSD, that in quarantineOrDeallocateChunk()
1280 Primary.tryReleaseToOS(ClassId, ReleaseToOS::Normal); in quarantineOrDeallocateChunk()
1286 Quarantine.put(&TSD->getQuarantineCache(), in quarantineOrDeallocateChunk()
1287 QuarantineCallback(*this, TSD->getCache()), Ptr, Size); in quarantineOrDeallocateChunk()
1298 if (Header->ClassId && !TSDRegistry.getDisableMemInit()) { in retagBlock()
1302 Header->ClassId); in retagBlock()
1311 if (BypassQuarantine && !Header->ClassId) { in retagBlock()
1360 // allocation, the chunk may already have a non-zero tag from the previous in prepareTaggedChunk()
1362 storeTag(reinterpret_cast<uptr>(Ptr) - archMemoryTagGranuleSize()); in prepareTaggedChunk()
1395 Ptr32[MemTagAllocationTraceIndex] = collectStackTrace(RB->Depot); in storePrimaryAllocationStackMaybe()
1403 uptr Pos = atomic_fetch_add(&RB->Pos, 1, memory_order_relaxed); in storeRingBufferEntry()
1405 getRingBufferEntry(RB, Pos % RB->RingBufferElements); in storeRingBufferEntry()
1411 atomic_store_relaxed(&Entry->Ptr, 0); in storeRingBufferEntry()
1414 atomic_store_relaxed(&Entry->AllocationTrace, AllocationTrace); in storeRingBufferEntry()
1415 atomic_store_relaxed(&Entry->AllocationTid, AllocationTid); in storeRingBufferEntry()
1416 atomic_store_relaxed(&Entry->AllocationSize, AllocationSize); in storeRingBufferEntry()
1417 atomic_store_relaxed(&Entry->DeallocationTrace, DeallocationTrace); in storeRingBufferEntry()
1418 atomic_store_relaxed(&Entry->DeallocationTid, DeallocationTid); in storeRingBufferEntry()
1421 atomic_store_relaxed(&Entry->Ptr, reinterpret_cast<uptr>(Ptr)); in storeRingBufferEntry()
1431 u32 Trace = collectStackTrace(RB->Depot); in storeSecondaryAllocationStackMaybe()
1452 u32 DeallocationTrace = collectStackTrace(RB->Depot); in storeDeallocationStackMaybe()
1461 sizeof(((scudo_error_info *)nullptr)->reports) /
1462 sizeof(((scudo_error_info *)nullptr)->reports[0]);
1476 auto GetGranule = [&](uptr Addr, const char **Data, uint8_t *Tag) -> bool { in getInlineErrorInfo()
1480 *Data = &Memory[Addr - MemoryAddr]; in getInlineErrorInfo()
1482 MemoryTags[(Addr - MemoryAddr) / archMemoryTagGranuleSize()]); in getInlineErrorInfo()
1500 ChunkBegin - Chunk::getHeaderSize()); in getInlineErrorInfo()
1505 if (Header->SizeOrUnusedBytes == 0) in getInlineErrorInfo()
1526 auto *R = &ErrorInfo->reports[NextErrorReport++]; in getInlineErrorInfo()
1527 R->error_type = in getInlineErrorInfo()
1529 R->allocation_address = ChunkAddr; in getInlineErrorInfo()
1530 R->allocation_size = Header.SizeOrUnusedBytes; in getInlineErrorInfo()
1532 collectTraceMaybe(Depot, R->allocation_trace, in getInlineErrorInfo()
1535 R->allocation_tid = Data[MemTagAllocationTidIndex]; in getInlineErrorInfo()
1544 CheckOOB(Info.BlockBegin - I * Info.BlockSize)) in getInlineErrorInfo()
1559 uptr Pos = atomic_load_relaxed(&RingBuffer->Pos); in getRingBufferErrorInfo()
1561 for (uptr I = Pos - 1; I != Pos - 1 - RingBufferElements && in getRingBufferErrorInfo()
1563 --I) { in getRingBufferErrorInfo()
1565 uptr EntryPtr = atomic_load_relaxed(&Entry->Ptr); in getRingBufferErrorInfo()
1570 uptr EntrySize = atomic_load_relaxed(&Entry->AllocationSize); in getRingBufferErrorInfo()
1571 u32 AllocationTrace = atomic_load_relaxed(&Entry->AllocationTrace); in getRingBufferErrorInfo()
1572 u32 AllocationTid = atomic_load_relaxed(&Entry->AllocationTid); in getRingBufferErrorInfo()
1573 u32 DeallocationTrace = atomic_load_relaxed(&Entry->DeallocationTrace); in getRingBufferErrorInfo()
1574 u32 DeallocationTid = atomic_load_relaxed(&Entry->DeallocationTid); in getRingBufferErrorInfo()
1577 // For UAF we only consider in-bounds fault addresses because in getRingBufferErrorInfo()
1578 // out-of-bounds UAF is rare and attempting to detect it is very likely in getRingBufferErrorInfo()
1587 if (FaultAddr < EntryPtr - getPageSizeCached() || in getRingBufferErrorInfo()
1597 if (ErrorInfo->reports[J].allocation_address == UntaggedEntryPtr) { in getRingBufferErrorInfo()
1606 auto *R = &ErrorInfo->reports[NextErrorReport++]; in getRingBufferErrorInfo()
1608 R->error_type = USE_AFTER_FREE; in getRingBufferErrorInfo()
1610 R->error_type = BUFFER_UNDERFLOW; in getRingBufferErrorInfo()
1612 R->error_type = BUFFER_OVERFLOW; in getRingBufferErrorInfo()
1614 R->allocation_address = UntaggedEntryPtr; in getRingBufferErrorInfo()
1615 R->allocation_size = EntrySize; in getRingBufferErrorInfo()
1616 collectTraceMaybe(Depot, R->allocation_trace, AllocationTrace); in getRingBufferErrorInfo()
1617 R->allocation_tid = AllocationTid; in getRingBufferErrorInfo()
1618 collectTraceMaybe(Depot, R->deallocation_trace, DeallocationTrace); in getRingBufferErrorInfo()
1619 R->deallocation_tid = DeallocationTid; in getRingBufferErrorInfo()
1624 Primary.getStats(Str); in getStats()
1628 return Str->length(); in getStats()
1651 int ring_buffer_size = getFlags()->allocation_ring_buffer_size; in initRingBufferMaybe()
1684 Depot->init(RingSize, TabSize); in initRingBufferMaybe()
1693 RB->RawRingBufferMap = MemMap; in initRingBufferMaybe()
1694 RB->RingBufferElements = AllocationRingBufferSize; in initRingBufferMaybe()
1695 RB->Depot = Depot; in initRingBufferMaybe()
1696 RB->StackDepotSize = StackDepotSize; in initRingBufferMaybe()
1697 RB->RawStackDepotMap = DepotMap; in initRingBufferMaybe()
1709 RB->RawStackDepotMap.unmap(RB->RawStackDepotMap.getBase(), in unmapRingBuffer()
1710 RB->RawStackDepotMap.getCapacity()); in unmapRingBuffer()
1711 // Note that the `RB->RawRingBufferMap` is stored on the pages managed by in unmapRingBuffer()
1714 MemMapT RawRingBufferMap = RB->RawRingBufferMap; in unmapRingBuffer()
1729 return (Bytes - sizeof(AllocationRingBuffer)) / in ringBufferElementsFromBytes()