xref: /freebsd/contrib/llvm-project/compiler-rt/lib/scudo/standalone/secondary.h (revision 0fca6ea1d4eea4c934cfff25ac9ee8ad6fe95583)
1 //===-- secondary.h ---------------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #ifndef SCUDO_SECONDARY_H_
10 #define SCUDO_SECONDARY_H_
11 
12 #include "chunk.h"
13 #include "common.h"
14 #include "list.h"
15 #include "mem_map.h"
16 #include "memtag.h"
17 #include "mutex.h"
18 #include "options.h"
19 #include "stats.h"
20 #include "string_utils.h"
21 #include "thread_annotations.h"
22 
23 namespace scudo {
24 
25 // This allocator wraps the platform allocation primitives, and as such is on
26 // the slower side and should preferably be used for larger sized allocations.
27 // Blocks allocated will be preceded and followed by a guard page, and hold
28 // their own header that is not checksummed: the guard pages and the Combined
29 // header should be enough for our purpose.
30 
31 namespace LargeBlock {
32 
33 struct alignas(Max<uptr>(archSupportsMemoryTagging()
34                              ? archMemoryTagGranuleSize()
35                              : 1,
36                          1U << SCUDO_MIN_ALIGNMENT_LOG)) Header {
37   LargeBlock::Header *Prev;
38   LargeBlock::Header *Next;
39   uptr CommitBase;
40   uptr CommitSize;
41   MemMapT MemMap;
42 };
43 
44 static_assert(sizeof(Header) % (1U << SCUDO_MIN_ALIGNMENT_LOG) == 0, "");
45 static_assert(!archSupportsMemoryTagging() ||
46                   sizeof(Header) % archMemoryTagGranuleSize() == 0,
47               "");
48 
getHeaderSize()49 constexpr uptr getHeaderSize() { return sizeof(Header); }
50 
addHeaderTag(uptr Ptr)51 template <typename Config> static uptr addHeaderTag(uptr Ptr) {
52   if (allocatorSupportsMemoryTagging<Config>())
53     return addFixedTag(Ptr, 1);
54   return Ptr;
55 }
56 
getHeader(uptr Ptr)57 template <typename Config> static Header *getHeader(uptr Ptr) {
58   return reinterpret_cast<Header *>(addHeaderTag<Config>(Ptr)) - 1;
59 }
60 
getHeader(const void * Ptr)61 template <typename Config> static Header *getHeader(const void *Ptr) {
62   return getHeader<Config>(reinterpret_cast<uptr>(Ptr));
63 }
64 
65 } // namespace LargeBlock
66 
unmap(LargeBlock::Header * H)67 static inline void unmap(LargeBlock::Header *H) {
68   // Note that the `H->MapMap` is stored on the pages managed by itself. Take
69   // over the ownership before unmap() so that any operation along with unmap()
70   // won't touch inaccessible pages.
71   MemMapT MemMap = H->MemMap;
72   MemMap.unmap(MemMap.getBase(), MemMap.getCapacity());
73 }
74 
75 namespace {
76 struct CachedBlock {
77   uptr CommitBase = 0;
78   uptr CommitSize = 0;
79   uptr BlockBegin = 0;
80   MemMapT MemMap = {};
81   u64 Time = 0;
82 
isValidCachedBlock83   bool isValid() { return CommitBase != 0; }
84 
invalidateCachedBlock85   void invalidate() { CommitBase = 0; }
86 };
87 } // namespace
88 
89 template <typename Config> class MapAllocatorNoCache {
90 public:
init(UNUSED s32 ReleaseToOsInterval)91   void init(UNUSED s32 ReleaseToOsInterval) {}
retrieve(UNUSED Options Options,UNUSED uptr Size,UNUSED uptr Alignment,UNUSED uptr HeadersSize,UNUSED LargeBlock::Header ** H,UNUSED bool * Zeroed)92   bool retrieve(UNUSED Options Options, UNUSED uptr Size, UNUSED uptr Alignment,
93                 UNUSED uptr HeadersSize, UNUSED LargeBlock::Header **H,
94                 UNUSED bool *Zeroed) {
95     return false;
96   }
store(UNUSED Options Options,LargeBlock::Header * H)97   void store(UNUSED Options Options, LargeBlock::Header *H) { unmap(H); }
canCache(UNUSED uptr Size)98   bool canCache(UNUSED uptr Size) { return false; }
disable()99   void disable() {}
enable()100   void enable() {}
releaseToOS()101   void releaseToOS() {}
disableMemoryTagging()102   void disableMemoryTagging() {}
unmapTestOnly()103   void unmapTestOnly() {}
setOption(Option O,UNUSED sptr Value)104   bool setOption(Option O, UNUSED sptr Value) {
105     if (O == Option::ReleaseInterval || O == Option::MaxCacheEntriesCount ||
106         O == Option::MaxCacheEntrySize)
107       return false;
108     // Not supported by the Secondary Cache, but not an error either.
109     return true;
110   }
111 
getStats(UNUSED ScopedString * Str)112   void getStats(UNUSED ScopedString *Str) {
113     Str->append("Secondary Cache Disabled\n");
114   }
115 };
116 
117 static const uptr MaxUnusedCachePages = 4U;
118 
119 template <typename Config>
mapSecondary(const Options & Options,uptr CommitBase,uptr CommitSize,uptr AllocPos,uptr Flags,MemMapT & MemMap)120 bool mapSecondary(const Options &Options, uptr CommitBase, uptr CommitSize,
121                   uptr AllocPos, uptr Flags, MemMapT &MemMap) {
122   Flags |= MAP_RESIZABLE;
123   Flags |= MAP_ALLOWNOMEM;
124 
125   const uptr PageSize = getPageSizeCached();
126   if (SCUDO_TRUSTY) {
127     /*
128      * On Trusty we need AllocPos to be usable for shared memory, which cannot
129      * cross multiple mappings. This means we need to split around AllocPos
130      * and not over it. We can only do this if the address is page-aligned.
131      */
132     const uptr TaggedSize = AllocPos - CommitBase;
133     if (useMemoryTagging<Config>(Options) && isAligned(TaggedSize, PageSize)) {
134       DCHECK_GT(TaggedSize, 0);
135       return MemMap.remap(CommitBase, TaggedSize, "scudo:secondary",
136                           MAP_MEMTAG | Flags) &&
137              MemMap.remap(AllocPos, CommitSize - TaggedSize, "scudo:secondary",
138                           Flags);
139     } else {
140       const uptr RemapFlags =
141           (useMemoryTagging<Config>(Options) ? MAP_MEMTAG : 0) | Flags;
142       return MemMap.remap(CommitBase, CommitSize, "scudo:secondary",
143                           RemapFlags);
144     }
145   }
146 
147   const uptr MaxUnusedCacheBytes = MaxUnusedCachePages * PageSize;
148   if (useMemoryTagging<Config>(Options) && CommitSize > MaxUnusedCacheBytes) {
149     const uptr UntaggedPos = Max(AllocPos, CommitBase + MaxUnusedCacheBytes);
150     return MemMap.remap(CommitBase, UntaggedPos - CommitBase, "scudo:secondary",
151                         MAP_MEMTAG | Flags) &&
152            MemMap.remap(UntaggedPos, CommitBase + CommitSize - UntaggedPos,
153                         "scudo:secondary", Flags);
154   } else {
155     const uptr RemapFlags =
156         (useMemoryTagging<Config>(Options) ? MAP_MEMTAG : 0) | Flags;
157     return MemMap.remap(CommitBase, CommitSize, "scudo:secondary", RemapFlags);
158   }
159 }
160 
161 // Template specialization to avoid producing zero-length array
162 template <typename T, size_t Size> class NonZeroLengthArray {
163 public:
164   T &operator[](uptr Idx) { return values[Idx]; }
165 
166 private:
167   T values[Size];
168 };
169 template <typename T> class NonZeroLengthArray<T, 0> {
170 public:
171   T &operator[](uptr UNUSED Idx) { UNREACHABLE("Unsupported!"); }
172 };
173 
174 template <typename Config> class MapAllocatorCache {
175 public:
getStats(ScopedString * Str)176   void getStats(ScopedString *Str) {
177     ScopedLock L(Mutex);
178     uptr Integral;
179     uptr Fractional;
180     computePercentage(SuccessfulRetrieves, CallsToRetrieve, &Integral,
181                       &Fractional);
182     const s32 Interval = atomic_load_relaxed(&ReleaseToOsIntervalMs);
183     Str->append(
184         "Stats: MapAllocatorCache: EntriesCount: %d, "
185         "MaxEntriesCount: %u, MaxEntrySize: %zu, ReleaseToOsIntervalMs = %d\n",
186         EntriesCount, atomic_load_relaxed(&MaxEntriesCount),
187         atomic_load_relaxed(&MaxEntrySize), Interval >= 0 ? Interval : -1);
188     Str->append("Stats: CacheRetrievalStats: SuccessRate: %u/%u "
189                 "(%zu.%02zu%%)\n",
190                 SuccessfulRetrieves, CallsToRetrieve, Integral, Fractional);
191     for (CachedBlock Entry : Entries) {
192       if (!Entry.isValid())
193         continue;
194       Str->append("StartBlockAddress: 0x%zx, EndBlockAddress: 0x%zx, "
195                   "BlockSize: %zu %s\n",
196                   Entry.CommitBase, Entry.CommitBase + Entry.CommitSize,
197                   Entry.CommitSize, Entry.Time == 0 ? "[R]" : "");
198     }
199   }
200 
201   // Ensure the default maximum specified fits the array.
202   static_assert(Config::getDefaultMaxEntriesCount() <=
203                     Config::getEntriesArraySize(),
204                 "");
205 
init(s32 ReleaseToOsInterval)206   void init(s32 ReleaseToOsInterval) NO_THREAD_SAFETY_ANALYSIS {
207     DCHECK_EQ(EntriesCount, 0U);
208     setOption(Option::MaxCacheEntriesCount,
209               static_cast<sptr>(Config::getDefaultMaxEntriesCount()));
210     setOption(Option::MaxCacheEntrySize,
211               static_cast<sptr>(Config::getDefaultMaxEntrySize()));
212     // The default value in the cache config has the higher priority.
213     if (Config::getDefaultReleaseToOsIntervalMs() != INT32_MIN)
214       ReleaseToOsInterval = Config::getDefaultReleaseToOsIntervalMs();
215     setOption(Option::ReleaseInterval, static_cast<sptr>(ReleaseToOsInterval));
216   }
217 
store(const Options & Options,LargeBlock::Header * H)218   void store(const Options &Options, LargeBlock::Header *H) EXCLUDES(Mutex) {
219     if (!canCache(H->CommitSize))
220       return unmap(H);
221 
222     bool EntryCached = false;
223     bool EmptyCache = false;
224     const s32 Interval = atomic_load_relaxed(&ReleaseToOsIntervalMs);
225     const u64 Time = getMonotonicTimeFast();
226     const u32 MaxCount = atomic_load_relaxed(&MaxEntriesCount);
227     CachedBlock Entry;
228     Entry.CommitBase = H->CommitBase;
229     Entry.CommitSize = H->CommitSize;
230     Entry.BlockBegin = reinterpret_cast<uptr>(H + 1);
231     Entry.MemMap = H->MemMap;
232     Entry.Time = Time;
233     if (useMemoryTagging<Config>(Options)) {
234       if (Interval == 0 && !SCUDO_FUCHSIA) {
235         // Release the memory and make it inaccessible at the same time by
236         // creating a new MAP_NOACCESS mapping on top of the existing mapping.
237         // Fuchsia does not support replacing mappings by creating a new mapping
238         // on top so we just do the two syscalls there.
239         Entry.Time = 0;
240         mapSecondary<Config>(Options, Entry.CommitBase, Entry.CommitSize,
241                              Entry.CommitBase, MAP_NOACCESS, Entry.MemMap);
242       } else {
243         Entry.MemMap.setMemoryPermission(Entry.CommitBase, Entry.CommitSize,
244                                          MAP_NOACCESS);
245       }
246     } else if (Interval == 0) {
247       Entry.MemMap.releaseAndZeroPagesToOS(Entry.CommitBase, Entry.CommitSize);
248       Entry.Time = 0;
249     }
250     do {
251       ScopedLock L(Mutex);
252       if (useMemoryTagging<Config>(Options) && QuarantinePos == -1U) {
253         // If we get here then memory tagging was disabled in between when we
254         // read Options and when we locked Mutex. We can't insert our entry into
255         // the quarantine or the cache because the permissions would be wrong so
256         // just unmap it.
257         break;
258       }
259       if (Config::getQuarantineSize() && useMemoryTagging<Config>(Options)) {
260         QuarantinePos =
261             (QuarantinePos + 1) % Max(Config::getQuarantineSize(), 1u);
262         if (!Quarantine[QuarantinePos].isValid()) {
263           Quarantine[QuarantinePos] = Entry;
264           return;
265         }
266         CachedBlock PrevEntry = Quarantine[QuarantinePos];
267         Quarantine[QuarantinePos] = Entry;
268         if (OldestTime == 0)
269           OldestTime = Entry.Time;
270         Entry = PrevEntry;
271       }
272       if (EntriesCount >= MaxCount) {
273         if (IsFullEvents++ == 4U)
274           EmptyCache = true;
275       } else {
276         for (u32 I = 0; I < MaxCount; I++) {
277           if (Entries[I].isValid())
278             continue;
279           if (I != 0)
280             Entries[I] = Entries[0];
281           Entries[0] = Entry;
282           EntriesCount++;
283           if (OldestTime == 0)
284             OldestTime = Entry.Time;
285           EntryCached = true;
286           break;
287         }
288       }
289     } while (0);
290     if (EmptyCache)
291       empty();
292     else if (Interval >= 0)
293       releaseOlderThan(Time - static_cast<u64>(Interval) * 1000000);
294     if (!EntryCached)
295       Entry.MemMap.unmap(Entry.MemMap.getBase(), Entry.MemMap.getCapacity());
296   }
297 
retrieve(Options Options,uptr Size,uptr Alignment,uptr HeadersSize,LargeBlock::Header ** H,bool * Zeroed)298   bool retrieve(Options Options, uptr Size, uptr Alignment, uptr HeadersSize,
299                 LargeBlock::Header **H, bool *Zeroed) EXCLUDES(Mutex) {
300     const uptr PageSize = getPageSizeCached();
301     const u32 MaxCount = atomic_load_relaxed(&MaxEntriesCount);
302     // 10% of the requested size proved to be the optimal choice for
303     // retrieving cached blocks after testing several options.
304     constexpr u32 FragmentedBytesDivisor = 10;
305     bool Found = false;
306     CachedBlock Entry;
307     uptr EntryHeaderPos = 0;
308     {
309       ScopedLock L(Mutex);
310       CallsToRetrieve++;
311       if (EntriesCount == 0)
312         return false;
313       u32 OptimalFitIndex = 0;
314       uptr MinDiff = UINTPTR_MAX;
315       for (u32 I = 0; I < MaxCount; I++) {
316         if (!Entries[I].isValid())
317           continue;
318         const uptr CommitBase = Entries[I].CommitBase;
319         const uptr CommitSize = Entries[I].CommitSize;
320         const uptr AllocPos =
321             roundDown(CommitBase + CommitSize - Size, Alignment);
322         const uptr HeaderPos = AllocPos - HeadersSize;
323         if (HeaderPos > CommitBase + CommitSize)
324           continue;
325         if (HeaderPos < CommitBase ||
326             AllocPos > CommitBase + PageSize * MaxUnusedCachePages) {
327           continue;
328         }
329         Found = true;
330         const uptr Diff = HeaderPos - CommitBase;
331         // immediately use a cached block if it's size is close enough to the
332         // requested size.
333         const uptr MaxAllowedFragmentedBytes =
334             (CommitBase + CommitSize - HeaderPos) / FragmentedBytesDivisor;
335         if (Diff <= MaxAllowedFragmentedBytes) {
336           OptimalFitIndex = I;
337           EntryHeaderPos = HeaderPos;
338           break;
339         }
340         // keep track of the smallest cached block
341         // that is greater than (AllocSize + HeaderSize)
342         if (Diff > MinDiff)
343           continue;
344         OptimalFitIndex = I;
345         MinDiff = Diff;
346         EntryHeaderPos = HeaderPos;
347       }
348       if (Found) {
349         Entry = Entries[OptimalFitIndex];
350         Entries[OptimalFitIndex].invalidate();
351         EntriesCount--;
352         SuccessfulRetrieves++;
353       }
354     }
355     if (!Found)
356       return false;
357 
358     *H = reinterpret_cast<LargeBlock::Header *>(
359         LargeBlock::addHeaderTag<Config>(EntryHeaderPos));
360     *Zeroed = Entry.Time == 0;
361     if (useMemoryTagging<Config>(Options))
362       Entry.MemMap.setMemoryPermission(Entry.CommitBase, Entry.CommitSize, 0);
363     uptr NewBlockBegin = reinterpret_cast<uptr>(*H + 1);
364     if (useMemoryTagging<Config>(Options)) {
365       if (*Zeroed) {
366         storeTags(LargeBlock::addHeaderTag<Config>(Entry.CommitBase),
367                   NewBlockBegin);
368       } else if (Entry.BlockBegin < NewBlockBegin) {
369         storeTags(Entry.BlockBegin, NewBlockBegin);
370       } else {
371         storeTags(untagPointer(NewBlockBegin), untagPointer(Entry.BlockBegin));
372       }
373     }
374     (*H)->CommitBase = Entry.CommitBase;
375     (*H)->CommitSize = Entry.CommitSize;
376     (*H)->MemMap = Entry.MemMap;
377     return true;
378   }
379 
canCache(uptr Size)380   bool canCache(uptr Size) {
381     return atomic_load_relaxed(&MaxEntriesCount) != 0U &&
382            Size <= atomic_load_relaxed(&MaxEntrySize);
383   }
384 
setOption(Option O,sptr Value)385   bool setOption(Option O, sptr Value) {
386     if (O == Option::ReleaseInterval) {
387       const s32 Interval = Max(
388           Min(static_cast<s32>(Value), Config::getMaxReleaseToOsIntervalMs()),
389           Config::getMinReleaseToOsIntervalMs());
390       atomic_store_relaxed(&ReleaseToOsIntervalMs, Interval);
391       return true;
392     }
393     if (O == Option::MaxCacheEntriesCount) {
394       if (Value < 0)
395         return false;
396       atomic_store_relaxed(
397           &MaxEntriesCount,
398           Min<u32>(static_cast<u32>(Value), Config::getEntriesArraySize()));
399       return true;
400     }
401     if (O == Option::MaxCacheEntrySize) {
402       atomic_store_relaxed(&MaxEntrySize, static_cast<uptr>(Value));
403       return true;
404     }
405     // Not supported by the Secondary Cache, but not an error either.
406     return true;
407   }
408 
releaseToOS()409   void releaseToOS() { releaseOlderThan(UINT64_MAX); }
410 
disableMemoryTagging()411   void disableMemoryTagging() EXCLUDES(Mutex) {
412     ScopedLock L(Mutex);
413     for (u32 I = 0; I != Config::getQuarantineSize(); ++I) {
414       if (Quarantine[I].isValid()) {
415         MemMapT &MemMap = Quarantine[I].MemMap;
416         MemMap.unmap(MemMap.getBase(), MemMap.getCapacity());
417         Quarantine[I].invalidate();
418       }
419     }
420     const u32 MaxCount = atomic_load_relaxed(&MaxEntriesCount);
421     for (u32 I = 0; I < MaxCount; I++) {
422       if (Entries[I].isValid()) {
423         Entries[I].MemMap.setMemoryPermission(Entries[I].CommitBase,
424                                               Entries[I].CommitSize, 0);
425       }
426     }
427     QuarantinePos = -1U;
428   }
429 
disable()430   void disable() NO_THREAD_SAFETY_ANALYSIS { Mutex.lock(); }
431 
enable()432   void enable() NO_THREAD_SAFETY_ANALYSIS { Mutex.unlock(); }
433 
unmapTestOnly()434   void unmapTestOnly() { empty(); }
435 
436 private:
empty()437   void empty() {
438     MemMapT MapInfo[Config::getEntriesArraySize()];
439     uptr N = 0;
440     {
441       ScopedLock L(Mutex);
442       for (uptr I = 0; I < Config::getEntriesArraySize(); I++) {
443         if (!Entries[I].isValid())
444           continue;
445         MapInfo[N] = Entries[I].MemMap;
446         Entries[I].invalidate();
447         N++;
448       }
449       EntriesCount = 0;
450       IsFullEvents = 0;
451     }
452     for (uptr I = 0; I < N; I++) {
453       MemMapT &MemMap = MapInfo[I];
454       MemMap.unmap(MemMap.getBase(), MemMap.getCapacity());
455     }
456   }
457 
releaseIfOlderThan(CachedBlock & Entry,u64 Time)458   void releaseIfOlderThan(CachedBlock &Entry, u64 Time) REQUIRES(Mutex) {
459     if (!Entry.isValid() || !Entry.Time)
460       return;
461     if (Entry.Time > Time) {
462       if (OldestTime == 0 || Entry.Time < OldestTime)
463         OldestTime = Entry.Time;
464       return;
465     }
466     Entry.MemMap.releaseAndZeroPagesToOS(Entry.CommitBase, Entry.CommitSize);
467     Entry.Time = 0;
468   }
469 
releaseOlderThan(u64 Time)470   void releaseOlderThan(u64 Time) EXCLUDES(Mutex) {
471     ScopedLock L(Mutex);
472     if (!EntriesCount || OldestTime == 0 || OldestTime > Time)
473       return;
474     OldestTime = 0;
475     for (uptr I = 0; I < Config::getQuarantineSize(); I++)
476       releaseIfOlderThan(Quarantine[I], Time);
477     for (uptr I = 0; I < Config::getEntriesArraySize(); I++)
478       releaseIfOlderThan(Entries[I], Time);
479   }
480 
481   HybridMutex Mutex;
482   u32 EntriesCount GUARDED_BY(Mutex) = 0;
483   u32 QuarantinePos GUARDED_BY(Mutex) = 0;
484   atomic_u32 MaxEntriesCount = {};
485   atomic_uptr MaxEntrySize = {};
486   u64 OldestTime GUARDED_BY(Mutex) = 0;
487   u32 IsFullEvents GUARDED_BY(Mutex) = 0;
488   atomic_s32 ReleaseToOsIntervalMs = {};
489   u32 CallsToRetrieve GUARDED_BY(Mutex) = 0;
490   u32 SuccessfulRetrieves GUARDED_BY(Mutex) = 0;
491 
492   CachedBlock Entries[Config::getEntriesArraySize()] GUARDED_BY(Mutex) = {};
493   NonZeroLengthArray<CachedBlock, Config::getQuarantineSize()>
494       Quarantine GUARDED_BY(Mutex) = {};
495 };
496 
497 template <typename Config> class MapAllocator {
498 public:
499   void init(GlobalStats *S,
500             s32 ReleaseToOsInterval = -1) NO_THREAD_SAFETY_ANALYSIS {
501     DCHECK_EQ(AllocatedBytes, 0U);
502     DCHECK_EQ(FreedBytes, 0U);
503     Cache.init(ReleaseToOsInterval);
504     Stats.init();
505     if (LIKELY(S))
506       S->link(&Stats);
507   }
508 
509   void *allocate(const Options &Options, uptr Size, uptr AlignmentHint = 0,
510                  uptr *BlockEnd = nullptr,
511                  FillContentsMode FillContents = NoFill);
512 
513   void deallocate(const Options &Options, void *Ptr);
514 
getBlockEnd(void * Ptr)515   static uptr getBlockEnd(void *Ptr) {
516     auto *B = LargeBlock::getHeader<Config>(Ptr);
517     return B->CommitBase + B->CommitSize;
518   }
519 
getBlockSize(void * Ptr)520   static uptr getBlockSize(void *Ptr) {
521     return getBlockEnd(Ptr) - reinterpret_cast<uptr>(Ptr);
522   }
523 
getHeadersSize()524   static constexpr uptr getHeadersSize() {
525     return Chunk::getHeaderSize() + LargeBlock::getHeaderSize();
526   }
527 
disable()528   void disable() NO_THREAD_SAFETY_ANALYSIS {
529     Mutex.lock();
530     Cache.disable();
531   }
532 
enable()533   void enable() NO_THREAD_SAFETY_ANALYSIS {
534     Cache.enable();
535     Mutex.unlock();
536   }
537 
iterateOverBlocks(F Callback)538   template <typename F> void iterateOverBlocks(F Callback) const {
539     Mutex.assertHeld();
540 
541     for (const auto &H : InUseBlocks) {
542       uptr Ptr = reinterpret_cast<uptr>(&H) + LargeBlock::getHeaderSize();
543       if (allocatorSupportsMemoryTagging<Config>())
544         Ptr = untagPointer(Ptr);
545       Callback(Ptr);
546     }
547   }
548 
canCache(uptr Size)549   bool canCache(uptr Size) { return Cache.canCache(Size); }
550 
setOption(Option O,sptr Value)551   bool setOption(Option O, sptr Value) { return Cache.setOption(O, Value); }
552 
releaseToOS()553   void releaseToOS() { Cache.releaseToOS(); }
554 
disableMemoryTagging()555   void disableMemoryTagging() { Cache.disableMemoryTagging(); }
556 
unmapTestOnly()557   void unmapTestOnly() { Cache.unmapTestOnly(); }
558 
559   void getStats(ScopedString *Str);
560 
561 private:
562   typename Config::template CacheT<typename Config::CacheConfig> Cache;
563 
564   mutable HybridMutex Mutex;
565   DoublyLinkedList<LargeBlock::Header> InUseBlocks GUARDED_BY(Mutex);
566   uptr AllocatedBytes GUARDED_BY(Mutex) = 0;
567   uptr FreedBytes GUARDED_BY(Mutex) = 0;
568   uptr FragmentedBytes GUARDED_BY(Mutex) = 0;
569   uptr LargestSize GUARDED_BY(Mutex) = 0;
570   u32 NumberOfAllocs GUARDED_BY(Mutex) = 0;
571   u32 NumberOfFrees GUARDED_BY(Mutex) = 0;
572   LocalStats Stats GUARDED_BY(Mutex);
573 };
574 
575 // As with the Primary, the size passed to this function includes any desired
576 // alignment, so that the frontend can align the user allocation. The hint
577 // parameter allows us to unmap spurious memory when dealing with larger
578 // (greater than a page) alignments on 32-bit platforms.
579 // Due to the sparsity of address space available on those platforms, requesting
580 // an allocation from the Secondary with a large alignment would end up wasting
581 // VA space (even though we are not committing the whole thing), hence the need
582 // to trim off some of the reserved space.
583 // For allocations requested with an alignment greater than or equal to a page,
584 // the committed memory will amount to something close to Size - AlignmentHint
585 // (pending rounding and headers).
586 template <typename Config>
allocate(const Options & Options,uptr Size,uptr Alignment,uptr * BlockEndPtr,FillContentsMode FillContents)587 void *MapAllocator<Config>::allocate(const Options &Options, uptr Size,
588                                      uptr Alignment, uptr *BlockEndPtr,
589                                      FillContentsMode FillContents) {
590   if (Options.get(OptionBit::AddLargeAllocationSlack))
591     Size += 1UL << SCUDO_MIN_ALIGNMENT_LOG;
592   Alignment = Max(Alignment, uptr(1U) << SCUDO_MIN_ALIGNMENT_LOG);
593   const uptr PageSize = getPageSizeCached();
594 
595   // Note that cached blocks may have aligned address already. Thus we simply
596   // pass the required size (`Size` + `getHeadersSize()`) to do cache look up.
597   const uptr MinNeededSizeForCache = roundUp(Size + getHeadersSize(), PageSize);
598 
599   if (Alignment < PageSize && Cache.canCache(MinNeededSizeForCache)) {
600     LargeBlock::Header *H;
601     bool Zeroed;
602     if (Cache.retrieve(Options, Size, Alignment, getHeadersSize(), &H,
603                        &Zeroed)) {
604       const uptr BlockEnd = H->CommitBase + H->CommitSize;
605       if (BlockEndPtr)
606         *BlockEndPtr = BlockEnd;
607       uptr HInt = reinterpret_cast<uptr>(H);
608       if (allocatorSupportsMemoryTagging<Config>())
609         HInt = untagPointer(HInt);
610       const uptr PtrInt = HInt + LargeBlock::getHeaderSize();
611       void *Ptr = reinterpret_cast<void *>(PtrInt);
612       if (FillContents && !Zeroed)
613         memset(Ptr, FillContents == ZeroFill ? 0 : PatternFillByte,
614                BlockEnd - PtrInt);
615       {
616         ScopedLock L(Mutex);
617         InUseBlocks.push_back(H);
618         AllocatedBytes += H->CommitSize;
619         FragmentedBytes += H->MemMap.getCapacity() - H->CommitSize;
620         NumberOfAllocs++;
621         Stats.add(StatAllocated, H->CommitSize);
622         Stats.add(StatMapped, H->MemMap.getCapacity());
623       }
624       return Ptr;
625     }
626   }
627 
628   uptr RoundedSize =
629       roundUp(roundUp(Size, Alignment) + getHeadersSize(), PageSize);
630   if (Alignment > PageSize)
631     RoundedSize += Alignment - PageSize;
632 
633   ReservedMemoryT ReservedMemory;
634   const uptr MapSize = RoundedSize + 2 * PageSize;
635   if (UNLIKELY(!ReservedMemory.create(/*Addr=*/0U, MapSize, nullptr,
636                                       MAP_ALLOWNOMEM))) {
637     return nullptr;
638   }
639 
640   // Take the entire ownership of reserved region.
641   MemMapT MemMap = ReservedMemory.dispatch(ReservedMemory.getBase(),
642                                            ReservedMemory.getCapacity());
643   uptr MapBase = MemMap.getBase();
644   uptr CommitBase = MapBase + PageSize;
645   uptr MapEnd = MapBase + MapSize;
646 
647   // In the unlikely event of alignments larger than a page, adjust the amount
648   // of memory we want to commit, and trim the extra memory.
649   if (UNLIKELY(Alignment >= PageSize)) {
650     // For alignments greater than or equal to a page, the user pointer (eg: the
651     // pointer that is returned by the C or C++ allocation APIs) ends up on a
652     // page boundary , and our headers will live in the preceding page.
653     CommitBase = roundUp(MapBase + PageSize + 1, Alignment) - PageSize;
654     const uptr NewMapBase = CommitBase - PageSize;
655     DCHECK_GE(NewMapBase, MapBase);
656     // We only trim the extra memory on 32-bit platforms: 64-bit platforms
657     // are less constrained memory wise, and that saves us two syscalls.
658     if (SCUDO_WORDSIZE == 32U && NewMapBase != MapBase) {
659       MemMap.unmap(MapBase, NewMapBase - MapBase);
660       MapBase = NewMapBase;
661     }
662     const uptr NewMapEnd =
663         CommitBase + PageSize + roundUp(Size, PageSize) + PageSize;
664     DCHECK_LE(NewMapEnd, MapEnd);
665     if (SCUDO_WORDSIZE == 32U && NewMapEnd != MapEnd) {
666       MemMap.unmap(NewMapEnd, MapEnd - NewMapEnd);
667       MapEnd = NewMapEnd;
668     }
669   }
670 
671   const uptr CommitSize = MapEnd - PageSize - CommitBase;
672   const uptr AllocPos = roundDown(CommitBase + CommitSize - Size, Alignment);
673   if (!mapSecondary<Config>(Options, CommitBase, CommitSize, AllocPos, 0,
674                             MemMap)) {
675     MemMap.unmap(MemMap.getBase(), MemMap.getCapacity());
676     return nullptr;
677   }
678   const uptr HeaderPos = AllocPos - getHeadersSize();
679   LargeBlock::Header *H = reinterpret_cast<LargeBlock::Header *>(
680       LargeBlock::addHeaderTag<Config>(HeaderPos));
681   if (useMemoryTagging<Config>(Options))
682     storeTags(LargeBlock::addHeaderTag<Config>(CommitBase),
683               reinterpret_cast<uptr>(H + 1));
684   H->CommitBase = CommitBase;
685   H->CommitSize = CommitSize;
686   H->MemMap = MemMap;
687   if (BlockEndPtr)
688     *BlockEndPtr = CommitBase + CommitSize;
689   {
690     ScopedLock L(Mutex);
691     InUseBlocks.push_back(H);
692     AllocatedBytes += CommitSize;
693     FragmentedBytes += H->MemMap.getCapacity() - CommitSize;
694     if (LargestSize < CommitSize)
695       LargestSize = CommitSize;
696     NumberOfAllocs++;
697     Stats.add(StatAllocated, CommitSize);
698     Stats.add(StatMapped, H->MemMap.getCapacity());
699   }
700   return reinterpret_cast<void *>(HeaderPos + LargeBlock::getHeaderSize());
701 }
702 
703 template <typename Config>
deallocate(const Options & Options,void * Ptr)704 void MapAllocator<Config>::deallocate(const Options &Options, void *Ptr)
705     EXCLUDES(Mutex) {
706   LargeBlock::Header *H = LargeBlock::getHeader<Config>(Ptr);
707   const uptr CommitSize = H->CommitSize;
708   {
709     ScopedLock L(Mutex);
710     InUseBlocks.remove(H);
711     FreedBytes += CommitSize;
712     FragmentedBytes -= H->MemMap.getCapacity() - CommitSize;
713     NumberOfFrees++;
714     Stats.sub(StatAllocated, CommitSize);
715     Stats.sub(StatMapped, H->MemMap.getCapacity());
716   }
717   Cache.store(Options, H);
718 }
719 
720 template <typename Config>
getStats(ScopedString * Str)721 void MapAllocator<Config>::getStats(ScopedString *Str) EXCLUDES(Mutex) {
722   ScopedLock L(Mutex);
723   Str->append("Stats: MapAllocator: allocated %u times (%zuK), freed %u times "
724               "(%zuK), remains %u (%zuK) max %zuM, Fragmented %zuK\n",
725               NumberOfAllocs, AllocatedBytes >> 10, NumberOfFrees,
726               FreedBytes >> 10, NumberOfAllocs - NumberOfFrees,
727               (AllocatedBytes - FreedBytes) >> 10, LargestSize >> 20,
728               FragmentedBytes >> 10);
729   Cache.getStats(Str);
730 }
731 
732 } // namespace scudo
733 
734 #endif // SCUDO_SECONDARY_H_
735