Lines Matching +full:trim +full:- +full:config

1 //===-- secondary.h ---------------------------------------------*- C++ -*-===//
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
51 template <typename Config> static uptr addHeaderTag(uptr Ptr) { in addHeaderTag()
52 if (allocatorSupportsMemoryTagging<Config>()) in addHeaderTag()
57 template <typename Config> static Header *getHeader(uptr Ptr) { in getHeader()
58 return reinterpret_cast<Header *>(addHeaderTag<Config>(Ptr)) - 1; in getHeader()
61 template <typename Config> static Header *getHeader(const void *Ptr) { in getHeader()
62 return getHeader<Config>(reinterpret_cast<uptr>(Ptr)); in getHeader()
68 // Note that the `H->MapMap` is stored on the pages managed by itself. Take in unmap()
71 MemMapT MemMap = H->MemMap; in unmap()
89 template <typename Config> class MapAllocatorNoCache {
113 Str->append("Secondary Cache Disabled\n"); in getStats()
119 template <typename Config>
130 * and not over it. We can only do this if the address is page-aligned. in mapSecondary()
132 const uptr TaggedSize = AllocPos - CommitBase; in mapSecondary()
133 if (useMemoryTagging<Config>(Options) && isAligned(TaggedSize, PageSize)) { in mapSecondary()
137 MemMap.remap(AllocPos, CommitSize - TaggedSize, "scudo:secondary", in mapSecondary()
141 (useMemoryTagging<Config>(Options) ? MAP_MEMTAG : 0) | Flags; in mapSecondary()
148 if (useMemoryTagging<Config>(Options) && CommitSize > MaxUnusedCacheBytes) { in mapSecondary()
150 return MemMap.remap(CommitBase, UntaggedPos - CommitBase, "scudo:secondary", in mapSecondary()
152 MemMap.remap(UntaggedPos, CommitBase + CommitSize - UntaggedPos, in mapSecondary()
156 (useMemoryTagging<Config>(Options) ? MAP_MEMTAG : 0) | Flags; in mapSecondary()
161 // Template specialization to avoid producing zero-length array
174 template <typename Config> class MapAllocatorCache {
183 Str->append( in getStats()
187 atomic_load_relaxed(&MaxEntrySize), Interval >= 0 ? Interval : -1); in getStats()
188 Str->append("Stats: CacheRetrievalStats: SuccessRate: %u/%u " in getStats()
194 Str->append("StartBlockAddress: 0x%zx, EndBlockAddress: 0x%zx, " in getStats()
202 static_assert(Config::getDefaultMaxEntriesCount() <=
203 Config::getEntriesArraySize(),
209 static_cast<sptr>(Config::getDefaultMaxEntriesCount())); in init()
211 static_cast<sptr>(Config::getDefaultMaxEntrySize())); in init()
212 // The default value in the cache config has the higher priority. in init()
213 if (Config::getDefaultReleaseToOsIntervalMs() != INT32_MIN) in init()
214 ReleaseToOsInterval = Config::getDefaultReleaseToOsIntervalMs(); in init()
219 if (!canCache(H->CommitSize)) in store()
228 Entry.CommitBase = H->CommitBase; in store()
229 Entry.CommitSize = H->CommitSize; in store()
231 Entry.MemMap = H->MemMap; in store()
233 if (useMemoryTagging<Config>(Options)) { in store()
240 mapSecondary<Config>(Options, Entry.CommitBase, Entry.CommitSize, in store()
252 if (useMemoryTagging<Config>(Options) && QuarantinePos == -1U) { in store()
259 if (Config::getQuarantineSize() && useMemoryTagging<Config>(Options)) { in store()
261 (QuarantinePos + 1) % Max(Config::getQuarantineSize(), 1u); in store()
293 releaseOlderThan(Time - static_cast<u64>(Interval) * 1000000); in store()
321 roundDown(CommitBase + CommitSize - Size, Alignment); in retrieve()
322 const uptr HeaderPos = AllocPos - HeadersSize; in retrieve()
330 const uptr Diff = HeaderPos - CommitBase; in retrieve()
334 (CommitBase + CommitSize - HeaderPos) / FragmentedBytesDivisor; in retrieve()
351 EntriesCount--; in retrieve()
359 LargeBlock::addHeaderTag<Config>(EntryHeaderPos)); in retrieve()
361 if (useMemoryTagging<Config>(Options)) in retrieve()
364 if (useMemoryTagging<Config>(Options)) { in retrieve()
366 storeTags(LargeBlock::addHeaderTag<Config>(Entry.CommitBase), in retrieve()
374 (*H)->CommitBase = Entry.CommitBase; in retrieve()
375 (*H)->CommitSize = Entry.CommitSize; in retrieve()
376 (*H)->MemMap = Entry.MemMap; in retrieve()
388 Min(static_cast<s32>(Value), Config::getMaxReleaseToOsIntervalMs()), in setOption()
389 Config::getMinReleaseToOsIntervalMs()); in setOption()
398 Min<u32>(static_cast<u32>(Value), Config::getEntriesArraySize())); in setOption()
413 for (u32 I = 0; I != Config::getQuarantineSize(); ++I) { in disableMemoryTagging()
427 QuarantinePos = -1U; in disableMemoryTagging()
438 MemMapT MapInfo[Config::getEntriesArraySize()]; in empty()
442 for (uptr I = 0; I < Config::getEntriesArraySize(); I++) { in empty()
475 for (uptr I = 0; I < Config::getQuarantineSize(); I++) in releaseOlderThan()
477 for (uptr I = 0; I < Config::getEntriesArraySize(); I++) in releaseOlderThan()
492 CachedBlock Entries[Config::getEntriesArraySize()] GUARDED_BY(Mutex) = {};
493 NonZeroLengthArray<CachedBlock, Config::getQuarantineSize()>
497 template <typename Config> class MapAllocator {
500 s32 ReleaseToOsInterval = -1) NO_THREAD_SAFETY_ANALYSIS {
506 S->link(&Stats);
516 auto *B = LargeBlock::getHeader<Config>(Ptr); in getBlockEnd()
517 return B->CommitBase + B->CommitSize; in getBlockEnd()
521 return getBlockEnd(Ptr) - reinterpret_cast<uptr>(Ptr); in getBlockSize()
543 if (allocatorSupportsMemoryTagging<Config>()) in iterateOverBlocks()
562 typename Config::template CacheT<typename Config::CacheConfig> Cache;
578 // (greater than a page) alignments on 32-bit platforms.
582 // to trim off some of the reserved space.
584 // the committed memory will amount to something close to Size - AlignmentHint
586 template <typename Config>
587 void *MapAllocator<Config>::allocate(const Options &Options, uptr Size, in allocate()
604 const uptr BlockEnd = H->CommitBase + H->CommitSize; in allocate()
608 if (allocatorSupportsMemoryTagging<Config>()) in allocate()
614 BlockEnd - PtrInt); in allocate()
618 AllocatedBytes += H->CommitSize; in allocate()
619 FragmentedBytes += H->MemMap.getCapacity() - H->CommitSize; in allocate()
621 Stats.add(StatAllocated, H->CommitSize); in allocate()
622 Stats.add(StatMapped, H->MemMap.getCapacity()); in allocate()
631 RoundedSize += Alignment - PageSize; in allocate()
648 // of memory we want to commit, and trim the extra memory. in allocate()
653 CommitBase = roundUp(MapBase + PageSize + 1, Alignment) - PageSize; in allocate()
654 const uptr NewMapBase = CommitBase - PageSize; in allocate()
656 // We only trim the extra memory on 32-bit platforms: 64-bit platforms in allocate()
659 MemMap.unmap(MapBase, NewMapBase - MapBase); in allocate()
666 MemMap.unmap(NewMapEnd, MapEnd - NewMapEnd); in allocate()
671 const uptr CommitSize = MapEnd - PageSize - CommitBase; in allocate()
672 const uptr AllocPos = roundDown(CommitBase + CommitSize - Size, Alignment); in allocate()
673 if (!mapSecondary<Config>(Options, CommitBase, CommitSize, AllocPos, 0, in allocate()
678 const uptr HeaderPos = AllocPos - getHeadersSize(); in allocate()
680 LargeBlock::addHeaderTag<Config>(HeaderPos)); in allocate()
681 if (useMemoryTagging<Config>(Options)) in allocate()
682 storeTags(LargeBlock::addHeaderTag<Config>(CommitBase), in allocate()
684 H->CommitBase = CommitBase; in allocate()
685 H->CommitSize = CommitSize; in allocate()
686 H->MemMap = MemMap; in allocate()
693 FragmentedBytes += H->MemMap.getCapacity() - CommitSize; in allocate()
698 Stats.add(StatMapped, H->MemMap.getCapacity()); in allocate()
703 template <typename Config>
704 void MapAllocator<Config>::deallocate(const Options &Options, void *Ptr) in deallocate()
706 LargeBlock::Header *H = LargeBlock::getHeader<Config>(Ptr); in deallocate()
707 const uptr CommitSize = H->CommitSize; in deallocate()
712 FragmentedBytes -= H->MemMap.getCapacity() - CommitSize; in deallocate()
715 Stats.sub(StatMapped, H->MemMap.getCapacity()); in deallocate()
720 template <typename Config>
721 void MapAllocator<Config>::getStats(ScopedString *Str) EXCLUDES(Mutex) { in getStats()
723 Str->append("Stats: MapAllocator: allocated %u times (%zuK), freed %u times " in getStats()
726 FreedBytes >> 10, NumberOfAllocs - NumberOfFrees, in getStats()
727 (AllocatedBytes - FreedBytes) >> 10, LargestSize >> 20, in getStats()