xref: /freebsd/contrib/llvm-project/compiler-rt/lib/scudo/standalone/secondary.h (revision a90b9d0159070121c221b966469c3e36d912bf82)
1 //===-- secondary.h ---------------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #ifndef SCUDO_SECONDARY_H_
10 #define SCUDO_SECONDARY_H_
11 
12 #include "chunk.h"
13 #include "common.h"
14 #include "list.h"
15 #include "mem_map.h"
16 #include "memtag.h"
17 #include "mutex.h"
18 #include "options.h"
19 #include "stats.h"
20 #include "string_utils.h"
21 #include "thread_annotations.h"
22 
23 namespace scudo {
24 
25 // This allocator wraps the platform allocation primitives, and as such is on
26 // the slower side and should preferably be used for larger sized allocations.
27 // Blocks allocated will be preceded and followed by a guard page, and hold
28 // their own header that is not checksummed: the guard pages and the Combined
29 // header should be enough for our purpose.
30 
31 namespace LargeBlock {
32 
33 struct alignas(Max<uptr>(archSupportsMemoryTagging()
34                              ? archMemoryTagGranuleSize()
35                              : 1,
36                          1U << SCUDO_MIN_ALIGNMENT_LOG)) Header {
37   LargeBlock::Header *Prev;
38   LargeBlock::Header *Next;
39   uptr CommitBase;
40   uptr CommitSize;
41   MemMapT MemMap;
42 };
43 
44 static_assert(sizeof(Header) % (1U << SCUDO_MIN_ALIGNMENT_LOG) == 0, "");
45 static_assert(!archSupportsMemoryTagging() ||
46                   sizeof(Header) % archMemoryTagGranuleSize() == 0,
47               "");
48 
49 constexpr uptr getHeaderSize() { return sizeof(Header); }
50 
51 template <typename Config> static uptr addHeaderTag(uptr Ptr) {
52   if (allocatorSupportsMemoryTagging<Config>())
53     return addFixedTag(Ptr, 1);
54   return Ptr;
55 }
56 
57 template <typename Config> static Header *getHeader(uptr Ptr) {
58   return reinterpret_cast<Header *>(addHeaderTag<Config>(Ptr)) - 1;
59 }
60 
61 template <typename Config> static Header *getHeader(const void *Ptr) {
62   return getHeader<Config>(reinterpret_cast<uptr>(Ptr));
63 }
64 
65 } // namespace LargeBlock
66 
67 static inline void unmap(LargeBlock::Header *H) {
68   // Note that the `H->MapMap` is stored on the pages managed by itself. Take
69   // over the ownership before unmap() so that any operation along with unmap()
70   // won't touch inaccessible pages.
71   MemMapT MemMap = H->MemMap;
72   MemMap.unmap(MemMap.getBase(), MemMap.getCapacity());
73 }
74 
75 namespace {
76 struct CachedBlock {
77   uptr CommitBase = 0;
78   uptr CommitSize = 0;
79   uptr BlockBegin = 0;
80   MemMapT MemMap = {};
81   u64 Time = 0;
82 
83   bool isValid() { return CommitBase != 0; }
84 
85   void invalidate() { CommitBase = 0; }
86 };
87 } // namespace
88 
89 template <typename Config> class MapAllocatorNoCache {
90 public:
91   void init(UNUSED s32 ReleaseToOsInterval) {}
92   bool retrieve(UNUSED Options Options, UNUSED uptr Size, UNUSED uptr Alignment,
93                 UNUSED uptr HeadersSize, UNUSED LargeBlock::Header **H,
94                 UNUSED bool *Zeroed) {
95     return false;
96   }
97   void store(UNUSED Options Options, LargeBlock::Header *H) { unmap(H); }
98   bool canCache(UNUSED uptr Size) { return false; }
99   void disable() {}
100   void enable() {}
101   void releaseToOS() {}
102   void disableMemoryTagging() {}
103   void unmapTestOnly() {}
104   bool setOption(Option O, UNUSED sptr Value) {
105     if (O == Option::ReleaseInterval || O == Option::MaxCacheEntriesCount ||
106         O == Option::MaxCacheEntrySize)
107       return false;
108     // Not supported by the Secondary Cache, but not an error either.
109     return true;
110   }
111 
112   void getStats(UNUSED ScopedString *Str) {
113     Str->append("Secondary Cache Disabled\n");
114   }
115 };
116 
117 static const uptr MaxUnusedCachePages = 4U;
118 
119 template <typename Config>
120 bool mapSecondary(const Options &Options, uptr CommitBase, uptr CommitSize,
121                   uptr AllocPos, uptr Flags, MemMapT &MemMap) {
122   Flags |= MAP_RESIZABLE;
123   Flags |= MAP_ALLOWNOMEM;
124 
125   const uptr MaxUnusedCacheBytes = MaxUnusedCachePages * getPageSizeCached();
126   if (useMemoryTagging<Config>(Options) && CommitSize > MaxUnusedCacheBytes) {
127     const uptr UntaggedPos = Max(AllocPos, CommitBase + MaxUnusedCacheBytes);
128     return MemMap.remap(CommitBase, UntaggedPos - CommitBase, "scudo:secondary",
129                         MAP_MEMTAG | Flags) &&
130            MemMap.remap(UntaggedPos, CommitBase + CommitSize - UntaggedPos,
131                         "scudo:secondary", Flags);
132   } else {
133     const uptr RemapFlags =
134         (useMemoryTagging<Config>(Options) ? MAP_MEMTAG : 0) | Flags;
135     return MemMap.remap(CommitBase, CommitSize, "scudo:secondary", RemapFlags);
136   }
137 }
138 
139 // Template specialization to avoid producing zero-length array
140 template <typename T, size_t Size> class NonZeroLengthArray {
141 public:
142   T &operator[](uptr Idx) { return values[Idx]; }
143 
144 private:
145   T values[Size];
146 };
147 template <typename T> class NonZeroLengthArray<T, 0> {
148 public:
149   T &operator[](uptr UNUSED Idx) { UNREACHABLE("Unsupported!"); }
150 };
151 
152 template <typename Config> class MapAllocatorCache {
153 public:
154   using CacheConfig = typename Config::Secondary::Cache;
155 
156   void getStats(ScopedString *Str) {
157     ScopedLock L(Mutex);
158     uptr Integral;
159     uptr Fractional;
160     computePercentage(SuccessfulRetrieves, CallsToRetrieve, &Integral,
161                       &Fractional);
162     Str->append("Stats: MapAllocatorCache: EntriesCount: %d, "
163                 "MaxEntriesCount: %u, MaxEntrySize: %zu\n",
164                 EntriesCount, atomic_load_relaxed(&MaxEntriesCount),
165                 atomic_load_relaxed(&MaxEntrySize));
166     Str->append("Stats: CacheRetrievalStats: SuccessRate: %u/%u "
167                 "(%zu.%02zu%%)\n",
168                 SuccessfulRetrieves, CallsToRetrieve, Integral, Fractional);
169     for (CachedBlock Entry : Entries) {
170       if (!Entry.isValid())
171         continue;
172       Str->append("StartBlockAddress: 0x%zx, EndBlockAddress: 0x%zx, "
173                   "BlockSize: %zu %s\n",
174                   Entry.CommitBase, Entry.CommitBase + Entry.CommitSize,
175                   Entry.CommitSize, Entry.Time == 0 ? "[R]" : "");
176     }
177   }
178 
179   // Ensure the default maximum specified fits the array.
180   static_assert(CacheConfig::DefaultMaxEntriesCount <=
181                     CacheConfig::EntriesArraySize,
182                 "");
183 
184   void init(s32 ReleaseToOsInterval) NO_THREAD_SAFETY_ANALYSIS {
185     DCHECK_EQ(EntriesCount, 0U);
186     setOption(Option::MaxCacheEntriesCount,
187               static_cast<sptr>(CacheConfig::DefaultMaxEntriesCount));
188     setOption(Option::MaxCacheEntrySize,
189               static_cast<sptr>(CacheConfig::DefaultMaxEntrySize));
190     setOption(Option::ReleaseInterval, static_cast<sptr>(ReleaseToOsInterval));
191   }
192 
193   void store(const Options &Options, LargeBlock::Header *H) EXCLUDES(Mutex) {
194     if (!canCache(H->CommitSize))
195       return unmap(H);
196 
197     bool EntryCached = false;
198     bool EmptyCache = false;
199     const s32 Interval = atomic_load_relaxed(&ReleaseToOsIntervalMs);
200     const u64 Time = getMonotonicTimeFast();
201     const u32 MaxCount = atomic_load_relaxed(&MaxEntriesCount);
202     CachedBlock Entry;
203     Entry.CommitBase = H->CommitBase;
204     Entry.CommitSize = H->CommitSize;
205     Entry.BlockBegin = reinterpret_cast<uptr>(H + 1);
206     Entry.MemMap = H->MemMap;
207     Entry.Time = Time;
208     if (useMemoryTagging<Config>(Options)) {
209       if (Interval == 0 && !SCUDO_FUCHSIA) {
210         // Release the memory and make it inaccessible at the same time by
211         // creating a new MAP_NOACCESS mapping on top of the existing mapping.
212         // Fuchsia does not support replacing mappings by creating a new mapping
213         // on top so we just do the two syscalls there.
214         Entry.Time = 0;
215         mapSecondary<Config>(Options, Entry.CommitBase, Entry.CommitSize,
216                              Entry.CommitBase, MAP_NOACCESS, Entry.MemMap);
217       } else {
218         Entry.MemMap.setMemoryPermission(Entry.CommitBase, Entry.CommitSize,
219                                          MAP_NOACCESS);
220       }
221     } else if (Interval == 0) {
222       Entry.MemMap.releaseAndZeroPagesToOS(Entry.CommitBase, Entry.CommitSize);
223       Entry.Time = 0;
224     }
225     do {
226       ScopedLock L(Mutex);
227       if (useMemoryTagging<Config>(Options) && QuarantinePos == -1U) {
228         // If we get here then memory tagging was disabled in between when we
229         // read Options and when we locked Mutex. We can't insert our entry into
230         // the quarantine or the cache because the permissions would be wrong so
231         // just unmap it.
232         break;
233       }
234       if (CacheConfig::QuarantineSize && useMemoryTagging<Config>(Options)) {
235         QuarantinePos =
236             (QuarantinePos + 1) % Max(CacheConfig::QuarantineSize, 1u);
237         if (!Quarantine[QuarantinePos].isValid()) {
238           Quarantine[QuarantinePos] = Entry;
239           return;
240         }
241         CachedBlock PrevEntry = Quarantine[QuarantinePos];
242         Quarantine[QuarantinePos] = Entry;
243         if (OldestTime == 0)
244           OldestTime = Entry.Time;
245         Entry = PrevEntry;
246       }
247       if (EntriesCount >= MaxCount) {
248         if (IsFullEvents++ == 4U)
249           EmptyCache = true;
250       } else {
251         for (u32 I = 0; I < MaxCount; I++) {
252           if (Entries[I].isValid())
253             continue;
254           if (I != 0)
255             Entries[I] = Entries[0];
256           Entries[0] = Entry;
257           EntriesCount++;
258           if (OldestTime == 0)
259             OldestTime = Entry.Time;
260           EntryCached = true;
261           break;
262         }
263       }
264     } while (0);
265     if (EmptyCache)
266       empty();
267     else if (Interval >= 0)
268       releaseOlderThan(Time - static_cast<u64>(Interval) * 1000000);
269     if (!EntryCached)
270       Entry.MemMap.unmap(Entry.MemMap.getBase(), Entry.MemMap.getCapacity());
271   }
272 
273   bool retrieve(Options Options, uptr Size, uptr Alignment, uptr HeadersSize,
274                 LargeBlock::Header **H, bool *Zeroed) EXCLUDES(Mutex) {
275     const uptr PageSize = getPageSizeCached();
276     const u32 MaxCount = atomic_load_relaxed(&MaxEntriesCount);
277     // 10% of the requested size proved to be the optimal choice for
278     // retrieving cached blocks after testing several options.
279     constexpr u32 FragmentedBytesDivisor = 10;
280     bool Found = false;
281     CachedBlock Entry;
282     uptr EntryHeaderPos = 0;
283     {
284       ScopedLock L(Mutex);
285       CallsToRetrieve++;
286       if (EntriesCount == 0)
287         return false;
288       u32 OptimalFitIndex = 0;
289       uptr MinDiff = UINTPTR_MAX;
290       for (u32 I = 0; I < MaxCount; I++) {
291         if (!Entries[I].isValid())
292           continue;
293         const uptr CommitBase = Entries[I].CommitBase;
294         const uptr CommitSize = Entries[I].CommitSize;
295         const uptr AllocPos =
296             roundDown(CommitBase + CommitSize - Size, Alignment);
297         const uptr HeaderPos = AllocPos - HeadersSize;
298         if (HeaderPos > CommitBase + CommitSize)
299           continue;
300         if (HeaderPos < CommitBase ||
301             AllocPos > CommitBase + PageSize * MaxUnusedCachePages) {
302           continue;
303         }
304         Found = true;
305         const uptr Diff = HeaderPos - CommitBase;
306         // immediately use a cached block if it's size is close enough to the
307         // requested size.
308         const uptr MaxAllowedFragmentedBytes =
309             (CommitBase + CommitSize - HeaderPos) / FragmentedBytesDivisor;
310         if (Diff <= MaxAllowedFragmentedBytes) {
311           OptimalFitIndex = I;
312           EntryHeaderPos = HeaderPos;
313           break;
314         }
315         // keep track of the smallest cached block
316         // that is greater than (AllocSize + HeaderSize)
317         if (Diff > MinDiff)
318           continue;
319         OptimalFitIndex = I;
320         MinDiff = Diff;
321         EntryHeaderPos = HeaderPos;
322       }
323       if (Found) {
324         Entry = Entries[OptimalFitIndex];
325         Entries[OptimalFitIndex].invalidate();
326         EntriesCount--;
327         SuccessfulRetrieves++;
328       }
329     }
330     if (!Found)
331       return false;
332 
333     *H = reinterpret_cast<LargeBlock::Header *>(
334         LargeBlock::addHeaderTag<Config>(EntryHeaderPos));
335     *Zeroed = Entry.Time == 0;
336     if (useMemoryTagging<Config>(Options))
337       Entry.MemMap.setMemoryPermission(Entry.CommitBase, Entry.CommitSize, 0);
338     uptr NewBlockBegin = reinterpret_cast<uptr>(*H + 1);
339     if (useMemoryTagging<Config>(Options)) {
340       if (*Zeroed) {
341         storeTags(LargeBlock::addHeaderTag<Config>(Entry.CommitBase),
342                   NewBlockBegin);
343       } else if (Entry.BlockBegin < NewBlockBegin) {
344         storeTags(Entry.BlockBegin, NewBlockBegin);
345       } else {
346         storeTags(untagPointer(NewBlockBegin), untagPointer(Entry.BlockBegin));
347       }
348     }
349     (*H)->CommitBase = Entry.CommitBase;
350     (*H)->CommitSize = Entry.CommitSize;
351     (*H)->MemMap = Entry.MemMap;
352     return true;
353   }
354 
355   bool canCache(uptr Size) {
356     return atomic_load_relaxed(&MaxEntriesCount) != 0U &&
357            Size <= atomic_load_relaxed(&MaxEntrySize);
358   }
359 
360   bool setOption(Option O, sptr Value) {
361     if (O == Option::ReleaseInterval) {
362       const s32 Interval = Max(
363           Min(static_cast<s32>(Value), CacheConfig::MaxReleaseToOsIntervalMs),
364           CacheConfig::MinReleaseToOsIntervalMs);
365       atomic_store_relaxed(&ReleaseToOsIntervalMs, Interval);
366       return true;
367     }
368     if (O == Option::MaxCacheEntriesCount) {
369       const u32 MaxCount = static_cast<u32>(Value);
370       if (MaxCount > CacheConfig::EntriesArraySize)
371         return false;
372       atomic_store_relaxed(&MaxEntriesCount, MaxCount);
373       return true;
374     }
375     if (O == Option::MaxCacheEntrySize) {
376       atomic_store_relaxed(&MaxEntrySize, static_cast<uptr>(Value));
377       return true;
378     }
379     // Not supported by the Secondary Cache, but not an error either.
380     return true;
381   }
382 
383   void releaseToOS() { releaseOlderThan(UINT64_MAX); }
384 
385   void disableMemoryTagging() EXCLUDES(Mutex) {
386     ScopedLock L(Mutex);
387     for (u32 I = 0; I != CacheConfig::QuarantineSize; ++I) {
388       if (Quarantine[I].isValid()) {
389         MemMapT &MemMap = Quarantine[I].MemMap;
390         MemMap.unmap(MemMap.getBase(), MemMap.getCapacity());
391         Quarantine[I].invalidate();
392       }
393     }
394     const u32 MaxCount = atomic_load_relaxed(&MaxEntriesCount);
395     for (u32 I = 0; I < MaxCount; I++) {
396       if (Entries[I].isValid()) {
397         Entries[I].MemMap.setMemoryPermission(Entries[I].CommitBase,
398                                               Entries[I].CommitSize, 0);
399       }
400     }
401     QuarantinePos = -1U;
402   }
403 
404   void disable() NO_THREAD_SAFETY_ANALYSIS { Mutex.lock(); }
405 
406   void enable() NO_THREAD_SAFETY_ANALYSIS { Mutex.unlock(); }
407 
408   void unmapTestOnly() { empty(); }
409 
410 private:
411   void empty() {
412     MemMapT MapInfo[CacheConfig::EntriesArraySize];
413     uptr N = 0;
414     {
415       ScopedLock L(Mutex);
416       for (uptr I = 0; I < CacheConfig::EntriesArraySize; I++) {
417         if (!Entries[I].isValid())
418           continue;
419         MapInfo[N] = Entries[I].MemMap;
420         Entries[I].invalidate();
421         N++;
422       }
423       EntriesCount = 0;
424       IsFullEvents = 0;
425     }
426     for (uptr I = 0; I < N; I++) {
427       MemMapT &MemMap = MapInfo[I];
428       MemMap.unmap(MemMap.getBase(), MemMap.getCapacity());
429     }
430   }
431 
432   void releaseIfOlderThan(CachedBlock &Entry, u64 Time) REQUIRES(Mutex) {
433     if (!Entry.isValid() || !Entry.Time)
434       return;
435     if (Entry.Time > Time) {
436       if (OldestTime == 0 || Entry.Time < OldestTime)
437         OldestTime = Entry.Time;
438       return;
439     }
440     Entry.MemMap.releaseAndZeroPagesToOS(Entry.CommitBase, Entry.CommitSize);
441     Entry.Time = 0;
442   }
443 
444   void releaseOlderThan(u64 Time) EXCLUDES(Mutex) {
445     ScopedLock L(Mutex);
446     if (!EntriesCount || OldestTime == 0 || OldestTime > Time)
447       return;
448     OldestTime = 0;
449     for (uptr I = 0; I < CacheConfig::QuarantineSize; I++)
450       releaseIfOlderThan(Quarantine[I], Time);
451     for (uptr I = 0; I < CacheConfig::EntriesArraySize; I++)
452       releaseIfOlderThan(Entries[I], Time);
453   }
454 
455   HybridMutex Mutex;
456   u32 EntriesCount GUARDED_BY(Mutex) = 0;
457   u32 QuarantinePos GUARDED_BY(Mutex) = 0;
458   atomic_u32 MaxEntriesCount = {};
459   atomic_uptr MaxEntrySize = {};
460   u64 OldestTime GUARDED_BY(Mutex) = 0;
461   u32 IsFullEvents GUARDED_BY(Mutex) = 0;
462   atomic_s32 ReleaseToOsIntervalMs = {};
463   u32 CallsToRetrieve GUARDED_BY(Mutex) = 0;
464   u32 SuccessfulRetrieves GUARDED_BY(Mutex) = 0;
465 
466   CachedBlock Entries[CacheConfig::EntriesArraySize] GUARDED_BY(Mutex) = {};
467   NonZeroLengthArray<CachedBlock, CacheConfig::QuarantineSize>
468       Quarantine GUARDED_BY(Mutex) = {};
469 };
470 
471 template <typename Config> class MapAllocator {
472 public:
473   void init(GlobalStats *S,
474             s32 ReleaseToOsInterval = -1) NO_THREAD_SAFETY_ANALYSIS {
475     DCHECK_EQ(AllocatedBytes, 0U);
476     DCHECK_EQ(FreedBytes, 0U);
477     Cache.init(ReleaseToOsInterval);
478     Stats.init();
479     if (LIKELY(S))
480       S->link(&Stats);
481   }
482 
483   void *allocate(const Options &Options, uptr Size, uptr AlignmentHint = 0,
484                  uptr *BlockEnd = nullptr,
485                  FillContentsMode FillContents = NoFill);
486 
487   void deallocate(const Options &Options, void *Ptr);
488 
489   static uptr getBlockEnd(void *Ptr) {
490     auto *B = LargeBlock::getHeader<Config>(Ptr);
491     return B->CommitBase + B->CommitSize;
492   }
493 
494   static uptr getBlockSize(void *Ptr) {
495     return getBlockEnd(Ptr) - reinterpret_cast<uptr>(Ptr);
496   }
497 
498   static constexpr uptr getHeadersSize() {
499     return Chunk::getHeaderSize() + LargeBlock::getHeaderSize();
500   }
501 
502   void disable() NO_THREAD_SAFETY_ANALYSIS {
503     Mutex.lock();
504     Cache.disable();
505   }
506 
507   void enable() NO_THREAD_SAFETY_ANALYSIS {
508     Cache.enable();
509     Mutex.unlock();
510   }
511 
512   template <typename F> void iterateOverBlocks(F Callback) const {
513     Mutex.assertHeld();
514 
515     for (const auto &H : InUseBlocks) {
516       uptr Ptr = reinterpret_cast<uptr>(&H) + LargeBlock::getHeaderSize();
517       if (allocatorSupportsMemoryTagging<Config>())
518         Ptr = untagPointer(Ptr);
519       Callback(Ptr);
520     }
521   }
522 
523   bool canCache(uptr Size) { return Cache.canCache(Size); }
524 
525   bool setOption(Option O, sptr Value) { return Cache.setOption(O, Value); }
526 
527   void releaseToOS() { Cache.releaseToOS(); }
528 
529   void disableMemoryTagging() { Cache.disableMemoryTagging(); }
530 
531   void unmapTestOnly() { Cache.unmapTestOnly(); }
532 
533   void getStats(ScopedString *Str);
534 
535 private:
536   typename Config::Secondary::template CacheT<Config> Cache;
537 
538   mutable HybridMutex Mutex;
539   DoublyLinkedList<LargeBlock::Header> InUseBlocks GUARDED_BY(Mutex);
540   uptr AllocatedBytes GUARDED_BY(Mutex) = 0;
541   uptr FreedBytes GUARDED_BY(Mutex) = 0;
542   uptr FragmentedBytes GUARDED_BY(Mutex) = 0;
543   uptr LargestSize GUARDED_BY(Mutex) = 0;
544   u32 NumberOfAllocs GUARDED_BY(Mutex) = 0;
545   u32 NumberOfFrees GUARDED_BY(Mutex) = 0;
546   LocalStats Stats GUARDED_BY(Mutex);
547 };
548 
549 // As with the Primary, the size passed to this function includes any desired
550 // alignment, so that the frontend can align the user allocation. The hint
551 // parameter allows us to unmap spurious memory when dealing with larger
552 // (greater than a page) alignments on 32-bit platforms.
553 // Due to the sparsity of address space available on those platforms, requesting
554 // an allocation from the Secondary with a large alignment would end up wasting
555 // VA space (even though we are not committing the whole thing), hence the need
556 // to trim off some of the reserved space.
557 // For allocations requested with an alignment greater than or equal to a page,
558 // the committed memory will amount to something close to Size - AlignmentHint
559 // (pending rounding and headers).
560 template <typename Config>
561 void *MapAllocator<Config>::allocate(const Options &Options, uptr Size,
562                                      uptr Alignment, uptr *BlockEndPtr,
563                                      FillContentsMode FillContents) {
564   if (Options.get(OptionBit::AddLargeAllocationSlack))
565     Size += 1UL << SCUDO_MIN_ALIGNMENT_LOG;
566   Alignment = Max(Alignment, uptr(1U) << SCUDO_MIN_ALIGNMENT_LOG);
567   const uptr PageSize = getPageSizeCached();
568 
569   // Note that cached blocks may have aligned address already. Thus we simply
570   // pass the required size (`Size` + `getHeadersSize()`) to do cache look up.
571   const uptr MinNeededSizeForCache = roundUp(Size + getHeadersSize(), PageSize);
572 
573   if (Alignment < PageSize && Cache.canCache(MinNeededSizeForCache)) {
574     LargeBlock::Header *H;
575     bool Zeroed;
576     if (Cache.retrieve(Options, Size, Alignment, getHeadersSize(), &H,
577                        &Zeroed)) {
578       const uptr BlockEnd = H->CommitBase + H->CommitSize;
579       if (BlockEndPtr)
580         *BlockEndPtr = BlockEnd;
581       uptr HInt = reinterpret_cast<uptr>(H);
582       if (allocatorSupportsMemoryTagging<Config>())
583         HInt = untagPointer(HInt);
584       const uptr PtrInt = HInt + LargeBlock::getHeaderSize();
585       void *Ptr = reinterpret_cast<void *>(PtrInt);
586       if (FillContents && !Zeroed)
587         memset(Ptr, FillContents == ZeroFill ? 0 : PatternFillByte,
588                BlockEnd - PtrInt);
589       {
590         ScopedLock L(Mutex);
591         InUseBlocks.push_back(H);
592         AllocatedBytes += H->CommitSize;
593         FragmentedBytes += H->MemMap.getCapacity() - H->CommitSize;
594         NumberOfAllocs++;
595         Stats.add(StatAllocated, H->CommitSize);
596         Stats.add(StatMapped, H->MemMap.getCapacity());
597       }
598       return Ptr;
599     }
600   }
601 
602   uptr RoundedSize =
603       roundUp(roundUp(Size, Alignment) + getHeadersSize(), PageSize);
604   if (Alignment > PageSize)
605     RoundedSize += Alignment - PageSize;
606 
607   ReservedMemoryT ReservedMemory;
608   const uptr MapSize = RoundedSize + 2 * PageSize;
609   if (UNLIKELY(!ReservedMemory.create(/*Addr=*/0U, MapSize, nullptr,
610                                       MAP_ALLOWNOMEM))) {
611     return nullptr;
612   }
613 
614   // Take the entire ownership of reserved region.
615   MemMapT MemMap = ReservedMemory.dispatch(ReservedMemory.getBase(),
616                                            ReservedMemory.getCapacity());
617   uptr MapBase = MemMap.getBase();
618   uptr CommitBase = MapBase + PageSize;
619   uptr MapEnd = MapBase + MapSize;
620 
621   // In the unlikely event of alignments larger than a page, adjust the amount
622   // of memory we want to commit, and trim the extra memory.
623   if (UNLIKELY(Alignment >= PageSize)) {
624     // For alignments greater than or equal to a page, the user pointer (eg: the
625     // pointer that is returned by the C or C++ allocation APIs) ends up on a
626     // page boundary , and our headers will live in the preceding page.
627     CommitBase = roundUp(MapBase + PageSize + 1, Alignment) - PageSize;
628     const uptr NewMapBase = CommitBase - PageSize;
629     DCHECK_GE(NewMapBase, MapBase);
630     // We only trim the extra memory on 32-bit platforms: 64-bit platforms
631     // are less constrained memory wise, and that saves us two syscalls.
632     if (SCUDO_WORDSIZE == 32U && NewMapBase != MapBase) {
633       MemMap.unmap(MapBase, NewMapBase - MapBase);
634       MapBase = NewMapBase;
635     }
636     const uptr NewMapEnd =
637         CommitBase + PageSize + roundUp(Size, PageSize) + PageSize;
638     DCHECK_LE(NewMapEnd, MapEnd);
639     if (SCUDO_WORDSIZE == 32U && NewMapEnd != MapEnd) {
640       MemMap.unmap(NewMapEnd, MapEnd - NewMapEnd);
641       MapEnd = NewMapEnd;
642     }
643   }
644 
645   const uptr CommitSize = MapEnd - PageSize - CommitBase;
646   const uptr AllocPos = roundDown(CommitBase + CommitSize - Size, Alignment);
647   if (!mapSecondary<Config>(Options, CommitBase, CommitSize, AllocPos, 0,
648                             MemMap)) {
649     MemMap.unmap(MemMap.getBase(), MemMap.getCapacity());
650     return nullptr;
651   }
652   const uptr HeaderPos = AllocPos - getHeadersSize();
653   LargeBlock::Header *H = reinterpret_cast<LargeBlock::Header *>(
654       LargeBlock::addHeaderTag<Config>(HeaderPos));
655   if (useMemoryTagging<Config>(Options))
656     storeTags(LargeBlock::addHeaderTag<Config>(CommitBase),
657               reinterpret_cast<uptr>(H + 1));
658   H->CommitBase = CommitBase;
659   H->CommitSize = CommitSize;
660   H->MemMap = MemMap;
661   if (BlockEndPtr)
662     *BlockEndPtr = CommitBase + CommitSize;
663   {
664     ScopedLock L(Mutex);
665     InUseBlocks.push_back(H);
666     AllocatedBytes += CommitSize;
667     FragmentedBytes += H->MemMap.getCapacity() - CommitSize;
668     if (LargestSize < CommitSize)
669       LargestSize = CommitSize;
670     NumberOfAllocs++;
671     Stats.add(StatAllocated, CommitSize);
672     Stats.add(StatMapped, H->MemMap.getCapacity());
673   }
674   return reinterpret_cast<void *>(HeaderPos + LargeBlock::getHeaderSize());
675 }
676 
677 template <typename Config>
678 void MapAllocator<Config>::deallocate(const Options &Options, void *Ptr)
679     EXCLUDES(Mutex) {
680   LargeBlock::Header *H = LargeBlock::getHeader<Config>(Ptr);
681   const uptr CommitSize = H->CommitSize;
682   {
683     ScopedLock L(Mutex);
684     InUseBlocks.remove(H);
685     FreedBytes += CommitSize;
686     FragmentedBytes -= H->MemMap.getCapacity() - CommitSize;
687     NumberOfFrees++;
688     Stats.sub(StatAllocated, CommitSize);
689     Stats.sub(StatMapped, H->MemMap.getCapacity());
690   }
691   Cache.store(Options, H);
692 }
693 
694 template <typename Config>
695 void MapAllocator<Config>::getStats(ScopedString *Str) EXCLUDES(Mutex) {
696   ScopedLock L(Mutex);
697   Str->append("Stats: MapAllocator: allocated %u times (%zuK), freed %u times "
698               "(%zuK), remains %u (%zuK) max %zuM, Fragmented %zuK\n",
699               NumberOfAllocs, AllocatedBytes >> 10, NumberOfFrees,
700               FreedBytes >> 10, NumberOfAllocs - NumberOfFrees,
701               (AllocatedBytes - FreedBytes) >> 10, LargestSize >> 20,
702               FragmentedBytes >> 10);
703   Cache.getStats(Str);
704 }
705 
706 } // namespace scudo
707 
708 #endif // SCUDO_SECONDARY_H_
709