xref: /freebsd/contrib/llvm-project/compiler-rt/lib/scudo/standalone/secondary.h (revision 8ddb146abcdf061be9f2c0db7e391697dafad85c)
1 //===-- secondary.h ---------------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #ifndef SCUDO_SECONDARY_H_
10 #define SCUDO_SECONDARY_H_
11 
12 #include "chunk.h"
13 #include "common.h"
14 #include "list.h"
15 #include "memtag.h"
16 #include "mutex.h"
17 #include "options.h"
18 #include "stats.h"
19 #include "string_utils.h"
20 
21 namespace scudo {
22 
23 // This allocator wraps the platform allocation primitives, and as such is on
24 // the slower side and should preferably be used for larger sized allocations.
25 // Blocks allocated will be preceded and followed by a guard page, and hold
26 // their own header that is not checksummed: the guard pages and the Combined
27 // header should be enough for our purpose.
28 
29 namespace LargeBlock {
30 
31 struct alignas(Max<uptr>(archSupportsMemoryTagging()
32                              ? archMemoryTagGranuleSize()
33                              : 1,
34                          1U << SCUDO_MIN_ALIGNMENT_LOG)) Header {
35   LargeBlock::Header *Prev;
36   LargeBlock::Header *Next;
37   uptr CommitBase;
38   uptr CommitSize;
39   uptr MapBase;
40   uptr MapSize;
41   [[no_unique_address]] MapPlatformData Data;
42 };
43 
44 static_assert(sizeof(Header) % (1U << SCUDO_MIN_ALIGNMENT_LOG) == 0, "");
45 static_assert(!archSupportsMemoryTagging() ||
46                   sizeof(Header) % archMemoryTagGranuleSize() == 0,
47               "");
48 
49 constexpr uptr getHeaderSize() { return sizeof(Header); }
50 
51 template <typename Config> static uptr addHeaderTag(uptr Ptr) {
52   if (allocatorSupportsMemoryTagging<Config>())
53     return addFixedTag(Ptr, 1);
54   return Ptr;
55 }
56 
57 template <typename Config> static Header *getHeader(uptr Ptr) {
58   return reinterpret_cast<Header *>(addHeaderTag<Config>(Ptr)) - 1;
59 }
60 
61 template <typename Config> static Header *getHeader(const void *Ptr) {
62   return getHeader<Config>(reinterpret_cast<uptr>(Ptr));
63 }
64 
65 } // namespace LargeBlock
66 
67 static void unmap(LargeBlock::Header *H) {
68   MapPlatformData Data = H->Data;
69   unmap(reinterpret_cast<void *>(H->MapBase), H->MapSize, UNMAP_ALL, &Data);
70 }
71 
72 class MapAllocatorNoCache {
73 public:
74   void init(UNUSED s32 ReleaseToOsInterval) {}
75   bool retrieve(UNUSED Options Options, UNUSED uptr Size, UNUSED uptr Alignment,
76                 UNUSED LargeBlock::Header **H, UNUSED bool *Zeroed) {
77     return false;
78   }
79   void store(UNUSED Options Options, LargeBlock::Header *H) { unmap(H); }
80   bool canCache(UNUSED uptr Size) { return false; }
81   void disable() {}
82   void enable() {}
83   void releaseToOS() {}
84   void disableMemoryTagging() {}
85   void unmapTestOnly() {}
86   bool setOption(Option O, UNUSED sptr Value) {
87     if (O == Option::ReleaseInterval || O == Option::MaxCacheEntriesCount ||
88         O == Option::MaxCacheEntrySize)
89       return false;
90     // Not supported by the Secondary Cache, but not an error either.
91     return true;
92   }
93 };
94 
95 static const uptr MaxUnusedCachePages = 4U;
96 
97 template <typename Config>
98 void mapSecondary(Options Options, uptr CommitBase, uptr CommitSize,
99                   uptr AllocPos, uptr Flags, MapPlatformData *Data) {
100   const uptr MaxUnusedCacheBytes = MaxUnusedCachePages * getPageSizeCached();
101   if (useMemoryTagging<Config>(Options) && CommitSize > MaxUnusedCacheBytes) {
102     const uptr UntaggedPos = Max(AllocPos, CommitBase + MaxUnusedCacheBytes);
103     map(reinterpret_cast<void *>(CommitBase), UntaggedPos - CommitBase,
104         "scudo:secondary", MAP_RESIZABLE | MAP_MEMTAG | Flags, Data);
105     map(reinterpret_cast<void *>(UntaggedPos),
106         CommitBase + CommitSize - UntaggedPos, "scudo:secondary",
107         MAP_RESIZABLE | Flags, Data);
108   } else {
109     map(reinterpret_cast<void *>(CommitBase), CommitSize, "scudo:secondary",
110         MAP_RESIZABLE | (useMemoryTagging<Config>(Options) ? MAP_MEMTAG : 0) |
111             Flags,
112         Data);
113   }
114 }
115 
116 template <typename Config> class MapAllocatorCache {
117 public:
118   // Ensure the default maximum specified fits the array.
119   static_assert(Config::SecondaryCacheDefaultMaxEntriesCount <=
120                     Config::SecondaryCacheEntriesArraySize,
121                 "");
122 
123   void init(s32 ReleaseToOsInterval) {
124     DCHECK_EQ(EntriesCount, 0U);
125     setOption(Option::MaxCacheEntriesCount,
126               static_cast<sptr>(Config::SecondaryCacheDefaultMaxEntriesCount));
127     setOption(Option::MaxCacheEntrySize,
128               static_cast<sptr>(Config::SecondaryCacheDefaultMaxEntrySize));
129     setOption(Option::ReleaseInterval, static_cast<sptr>(ReleaseToOsInterval));
130   }
131 
132   void store(Options Options, LargeBlock::Header *H) {
133     if (!canCache(H->CommitSize))
134       return unmap(H);
135 
136     bool EntryCached = false;
137     bool EmptyCache = false;
138     const s32 Interval = atomic_load_relaxed(&ReleaseToOsIntervalMs);
139     const u64 Time = getMonotonicTime();
140     const u32 MaxCount = atomic_load_relaxed(&MaxEntriesCount);
141     CachedBlock Entry;
142     Entry.CommitBase = H->CommitBase;
143     Entry.CommitSize = H->CommitSize;
144     Entry.MapBase = H->MapBase;
145     Entry.MapSize = H->MapSize;
146     Entry.BlockBegin = reinterpret_cast<uptr>(H + 1);
147     Entry.Data = H->Data;
148     Entry.Time = Time;
149     if (useMemoryTagging<Config>(Options)) {
150       if (Interval == 0 && !SCUDO_FUCHSIA) {
151         // Release the memory and make it inaccessible at the same time by
152         // creating a new MAP_NOACCESS mapping on top of the existing mapping.
153         // Fuchsia does not support replacing mappings by creating a new mapping
154         // on top so we just do the two syscalls there.
155         Entry.Time = 0;
156         mapSecondary<Config>(Options, Entry.CommitBase, Entry.CommitSize,
157                              Entry.CommitBase, MAP_NOACCESS, &Entry.Data);
158       } else {
159         setMemoryPermission(Entry.CommitBase, Entry.CommitSize, MAP_NOACCESS,
160                             &Entry.Data);
161       }
162     } else if (Interval == 0) {
163       releasePagesToOS(Entry.CommitBase, 0, Entry.CommitSize, &Entry.Data);
164       Entry.Time = 0;
165     }
166     do {
167       ScopedLock L(Mutex);
168       if (useMemoryTagging<Config>(Options) && QuarantinePos == -1U) {
169         // If we get here then memory tagging was disabled in between when we
170         // read Options and when we locked Mutex. We can't insert our entry into
171         // the quarantine or the cache because the permissions would be wrong so
172         // just unmap it.
173         break;
174       }
175       if (Config::SecondaryCacheQuarantineSize &&
176           useMemoryTagging<Config>(Options)) {
177         QuarantinePos =
178             (QuarantinePos + 1) % Max(Config::SecondaryCacheQuarantineSize, 1u);
179         if (!Quarantine[QuarantinePos].CommitBase) {
180           Quarantine[QuarantinePos] = Entry;
181           return;
182         }
183         CachedBlock PrevEntry = Quarantine[QuarantinePos];
184         Quarantine[QuarantinePos] = Entry;
185         if (OldestTime == 0)
186           OldestTime = Entry.Time;
187         Entry = PrevEntry;
188       }
189       if (EntriesCount >= MaxCount) {
190         if (IsFullEvents++ == 4U)
191           EmptyCache = true;
192       } else {
193         for (u32 I = 0; I < MaxCount; I++) {
194           if (Entries[I].CommitBase)
195             continue;
196           if (I != 0)
197             Entries[I] = Entries[0];
198           Entries[0] = Entry;
199           EntriesCount++;
200           if (OldestTime == 0)
201             OldestTime = Entry.Time;
202           EntryCached = true;
203           break;
204         }
205       }
206     } while (0);
207     if (EmptyCache)
208       empty();
209     else if (Interval >= 0)
210       releaseOlderThan(Time - static_cast<u64>(Interval) * 1000000);
211     if (!EntryCached)
212       unmap(reinterpret_cast<void *>(Entry.MapBase), Entry.MapSize, UNMAP_ALL,
213             &Entry.Data);
214   }
215 
216   bool retrieve(Options Options, uptr Size, uptr Alignment,
217                 LargeBlock::Header **H, bool *Zeroed) {
218     const uptr PageSize = getPageSizeCached();
219     const u32 MaxCount = atomic_load_relaxed(&MaxEntriesCount);
220     bool Found = false;
221     CachedBlock Entry;
222     uptr HeaderPos;
223     {
224       ScopedLock L(Mutex);
225       if (EntriesCount == 0)
226         return false;
227       for (u32 I = 0; I < MaxCount; I++) {
228         const uptr CommitBase = Entries[I].CommitBase;
229         if (!CommitBase)
230           continue;
231         const uptr CommitSize = Entries[I].CommitSize;
232         const uptr AllocPos =
233             roundDownTo(CommitBase + CommitSize - Size, Alignment);
234         HeaderPos =
235             AllocPos - Chunk::getHeaderSize() - LargeBlock::getHeaderSize();
236         if (HeaderPos > CommitBase + CommitSize)
237           continue;
238         if (HeaderPos < CommitBase ||
239             AllocPos > CommitBase + PageSize * MaxUnusedCachePages)
240           continue;
241         Found = true;
242         Entry = Entries[I];
243         Entries[I].CommitBase = 0;
244         break;
245       }
246     }
247     if (Found) {
248       *H = reinterpret_cast<LargeBlock::Header *>(
249           LargeBlock::addHeaderTag<Config>(HeaderPos));
250       *Zeroed = Entry.Time == 0;
251       if (useMemoryTagging<Config>(Options))
252         setMemoryPermission(Entry.CommitBase, Entry.CommitSize, 0, &Entry.Data);
253       uptr NewBlockBegin = reinterpret_cast<uptr>(*H + 1);
254       if (useMemoryTagging<Config>(Options)) {
255         if (*Zeroed)
256           storeTags(LargeBlock::addHeaderTag<Config>(Entry.CommitBase),
257                     NewBlockBegin);
258         else if (Entry.BlockBegin < NewBlockBegin)
259           storeTags(Entry.BlockBegin, NewBlockBegin);
260         else
261           storeTags(untagPointer(NewBlockBegin),
262                     untagPointer(Entry.BlockBegin));
263       }
264       (*H)->CommitBase = Entry.CommitBase;
265       (*H)->CommitSize = Entry.CommitSize;
266       (*H)->MapBase = Entry.MapBase;
267       (*H)->MapSize = Entry.MapSize;
268       (*H)->Data = Entry.Data;
269       EntriesCount--;
270     }
271     return Found;
272   }
273 
274   bool canCache(uptr Size) {
275     return atomic_load_relaxed(&MaxEntriesCount) != 0U &&
276            Size <= atomic_load_relaxed(&MaxEntrySize);
277   }
278 
279   bool setOption(Option O, sptr Value) {
280     if (O == Option::ReleaseInterval) {
281       const s32 Interval =
282           Max(Min(static_cast<s32>(Value),
283                   Config::SecondaryCacheMaxReleaseToOsIntervalMs),
284               Config::SecondaryCacheMinReleaseToOsIntervalMs);
285       atomic_store_relaxed(&ReleaseToOsIntervalMs, Interval);
286       return true;
287     }
288     if (O == Option::MaxCacheEntriesCount) {
289       const u32 MaxCount = static_cast<u32>(Value);
290       if (MaxCount > Config::SecondaryCacheEntriesArraySize)
291         return false;
292       atomic_store_relaxed(&MaxEntriesCount, MaxCount);
293       return true;
294     }
295     if (O == Option::MaxCacheEntrySize) {
296       atomic_store_relaxed(&MaxEntrySize, static_cast<uptr>(Value));
297       return true;
298     }
299     // Not supported by the Secondary Cache, but not an error either.
300     return true;
301   }
302 
303   void releaseToOS() { releaseOlderThan(UINT64_MAX); }
304 
305   void disableMemoryTagging() {
306     ScopedLock L(Mutex);
307     for (u32 I = 0; I != Config::SecondaryCacheQuarantineSize; ++I) {
308       if (Quarantine[I].CommitBase) {
309         unmap(reinterpret_cast<void *>(Quarantine[I].MapBase),
310               Quarantine[I].MapSize, UNMAP_ALL, &Quarantine[I].Data);
311         Quarantine[I].CommitBase = 0;
312       }
313     }
314     const u32 MaxCount = atomic_load_relaxed(&MaxEntriesCount);
315     for (u32 I = 0; I < MaxCount; I++)
316       if (Entries[I].CommitBase)
317         setMemoryPermission(Entries[I].CommitBase, Entries[I].CommitSize, 0,
318                             &Entries[I].Data);
319     QuarantinePos = -1U;
320   }
321 
322   void disable() { Mutex.lock(); }
323 
324   void enable() { Mutex.unlock(); }
325 
326   void unmapTestOnly() { empty(); }
327 
328 private:
329   void empty() {
330     struct {
331       void *MapBase;
332       uptr MapSize;
333       MapPlatformData Data;
334     } MapInfo[Config::SecondaryCacheEntriesArraySize];
335     uptr N = 0;
336     {
337       ScopedLock L(Mutex);
338       for (uptr I = 0; I < Config::SecondaryCacheEntriesArraySize; I++) {
339         if (!Entries[I].CommitBase)
340           continue;
341         MapInfo[N].MapBase = reinterpret_cast<void *>(Entries[I].MapBase);
342         MapInfo[N].MapSize = Entries[I].MapSize;
343         MapInfo[N].Data = Entries[I].Data;
344         Entries[I].CommitBase = 0;
345         N++;
346       }
347       EntriesCount = 0;
348       IsFullEvents = 0;
349     }
350     for (uptr I = 0; I < N; I++)
351       unmap(MapInfo[I].MapBase, MapInfo[I].MapSize, UNMAP_ALL,
352             &MapInfo[I].Data);
353   }
354 
355   struct CachedBlock {
356     uptr CommitBase;
357     uptr CommitSize;
358     uptr MapBase;
359     uptr MapSize;
360     uptr BlockBegin;
361     [[no_unique_address]] MapPlatformData Data;
362     u64 Time;
363   };
364 
365   void releaseIfOlderThan(CachedBlock &Entry, u64 Time) {
366     if (!Entry.CommitBase || !Entry.Time)
367       return;
368     if (Entry.Time > Time) {
369       if (OldestTime == 0 || Entry.Time < OldestTime)
370         OldestTime = Entry.Time;
371       return;
372     }
373     releasePagesToOS(Entry.CommitBase, 0, Entry.CommitSize, &Entry.Data);
374     Entry.Time = 0;
375   }
376 
377   void releaseOlderThan(u64 Time) {
378     ScopedLock L(Mutex);
379     if (!EntriesCount || OldestTime == 0 || OldestTime > Time)
380       return;
381     OldestTime = 0;
382     for (uptr I = 0; I < Config::SecondaryCacheQuarantineSize; I++)
383       releaseIfOlderThan(Quarantine[I], Time);
384     for (uptr I = 0; I < Config::SecondaryCacheEntriesArraySize; I++)
385       releaseIfOlderThan(Entries[I], Time);
386   }
387 
388   HybridMutex Mutex;
389   u32 EntriesCount = 0;
390   u32 QuarantinePos = 0;
391   atomic_u32 MaxEntriesCount = {};
392   atomic_uptr MaxEntrySize = {};
393   u64 OldestTime = 0;
394   u32 IsFullEvents = 0;
395   atomic_s32 ReleaseToOsIntervalMs = {};
396 
397   CachedBlock Entries[Config::SecondaryCacheEntriesArraySize] = {};
398   CachedBlock Quarantine[Config::SecondaryCacheQuarantineSize] = {};
399 };
400 
401 template <typename Config> class MapAllocator {
402 public:
403   void init(GlobalStats *S, s32 ReleaseToOsInterval = -1) {
404     DCHECK_EQ(AllocatedBytes, 0U);
405     DCHECK_EQ(FreedBytes, 0U);
406     Cache.init(ReleaseToOsInterval);
407     Stats.init();
408     if (LIKELY(S))
409       S->link(&Stats);
410   }
411 
412   void *allocate(Options Options, uptr Size, uptr AlignmentHint = 0,
413                  uptr *BlockEnd = nullptr,
414                  FillContentsMode FillContents = NoFill);
415 
416   void deallocate(Options Options, void *Ptr);
417 
418   static uptr getBlockEnd(void *Ptr) {
419     auto *B = LargeBlock::getHeader<Config>(Ptr);
420     return B->CommitBase + B->CommitSize;
421   }
422 
423   static uptr getBlockSize(void *Ptr) {
424     return getBlockEnd(Ptr) - reinterpret_cast<uptr>(Ptr);
425   }
426 
427   void getStats(ScopedString *Str) const;
428 
429   void disable() {
430     Mutex.lock();
431     Cache.disable();
432   }
433 
434   void enable() {
435     Cache.enable();
436     Mutex.unlock();
437   }
438 
439   template <typename F> void iterateOverBlocks(F Callback) const {
440     for (const auto &H : InUseBlocks) {
441       uptr Ptr = reinterpret_cast<uptr>(&H) + LargeBlock::getHeaderSize();
442       if (allocatorSupportsMemoryTagging<Config>())
443         Ptr = untagPointer(Ptr);
444       Callback(Ptr);
445     }
446   }
447 
448   uptr canCache(uptr Size) { return Cache.canCache(Size); }
449 
450   bool setOption(Option O, sptr Value) { return Cache.setOption(O, Value); }
451 
452   void releaseToOS() { Cache.releaseToOS(); }
453 
454   void disableMemoryTagging() { Cache.disableMemoryTagging(); }
455 
456   void unmapTestOnly() { Cache.unmapTestOnly(); }
457 
458 private:
459   typename Config::SecondaryCache Cache;
460 
461   HybridMutex Mutex;
462   DoublyLinkedList<LargeBlock::Header> InUseBlocks;
463   uptr AllocatedBytes = 0;
464   uptr FreedBytes = 0;
465   uptr LargestSize = 0;
466   u32 NumberOfAllocs = 0;
467   u32 NumberOfFrees = 0;
468   LocalStats Stats;
469 };
470 
471 // As with the Primary, the size passed to this function includes any desired
472 // alignment, so that the frontend can align the user allocation. The hint
473 // parameter allows us to unmap spurious memory when dealing with larger
474 // (greater than a page) alignments on 32-bit platforms.
475 // Due to the sparsity of address space available on those platforms, requesting
476 // an allocation from the Secondary with a large alignment would end up wasting
477 // VA space (even though we are not committing the whole thing), hence the need
478 // to trim off some of the reserved space.
479 // For allocations requested with an alignment greater than or equal to a page,
480 // the committed memory will amount to something close to Size - AlignmentHint
481 // (pending rounding and headers).
482 template <typename Config>
483 void *MapAllocator<Config>::allocate(Options Options, uptr Size, uptr Alignment,
484                                      uptr *BlockEndPtr,
485                                      FillContentsMode FillContents) {
486   if (Options.get(OptionBit::AddLargeAllocationSlack))
487     Size += 1UL << SCUDO_MIN_ALIGNMENT_LOG;
488   Alignment = Max(Alignment, uptr(1U) << SCUDO_MIN_ALIGNMENT_LOG);
489   const uptr PageSize = getPageSizeCached();
490   uptr RoundedSize =
491       roundUpTo(roundUpTo(Size, Alignment) + LargeBlock::getHeaderSize() +
492                     Chunk::getHeaderSize(),
493                 PageSize);
494   if (Alignment > PageSize)
495     RoundedSize += Alignment - PageSize;
496 
497   if (Alignment < PageSize && Cache.canCache(RoundedSize)) {
498     LargeBlock::Header *H;
499     bool Zeroed;
500     if (Cache.retrieve(Options, Size, Alignment, &H, &Zeroed)) {
501       const uptr BlockEnd = H->CommitBase + H->CommitSize;
502       if (BlockEndPtr)
503         *BlockEndPtr = BlockEnd;
504       uptr HInt = reinterpret_cast<uptr>(H);
505       if (allocatorSupportsMemoryTagging<Config>())
506         HInt = untagPointer(HInt);
507       const uptr PtrInt = HInt + LargeBlock::getHeaderSize();
508       void *Ptr = reinterpret_cast<void *>(PtrInt);
509       if (FillContents && !Zeroed)
510         memset(Ptr, FillContents == ZeroFill ? 0 : PatternFillByte,
511                BlockEnd - PtrInt);
512       const uptr BlockSize = BlockEnd - HInt;
513       {
514         ScopedLock L(Mutex);
515         InUseBlocks.push_back(H);
516         AllocatedBytes += BlockSize;
517         NumberOfAllocs++;
518         Stats.add(StatAllocated, BlockSize);
519         Stats.add(StatMapped, H->MapSize);
520       }
521       return Ptr;
522     }
523   }
524 
525   MapPlatformData Data = {};
526   const uptr MapSize = RoundedSize + 2 * PageSize;
527   uptr MapBase = reinterpret_cast<uptr>(
528       map(nullptr, MapSize, nullptr, MAP_NOACCESS | MAP_ALLOWNOMEM, &Data));
529   if (UNLIKELY(!MapBase))
530     return nullptr;
531   uptr CommitBase = MapBase + PageSize;
532   uptr MapEnd = MapBase + MapSize;
533 
534   // In the unlikely event of alignments larger than a page, adjust the amount
535   // of memory we want to commit, and trim the extra memory.
536   if (UNLIKELY(Alignment >= PageSize)) {
537     // For alignments greater than or equal to a page, the user pointer (eg: the
538     // pointer that is returned by the C or C++ allocation APIs) ends up on a
539     // page boundary , and our headers will live in the preceding page.
540     CommitBase = roundUpTo(MapBase + PageSize + 1, Alignment) - PageSize;
541     const uptr NewMapBase = CommitBase - PageSize;
542     DCHECK_GE(NewMapBase, MapBase);
543     // We only trim the extra memory on 32-bit platforms: 64-bit platforms
544     // are less constrained memory wise, and that saves us two syscalls.
545     if (SCUDO_WORDSIZE == 32U && NewMapBase != MapBase) {
546       unmap(reinterpret_cast<void *>(MapBase), NewMapBase - MapBase, 0, &Data);
547       MapBase = NewMapBase;
548     }
549     const uptr NewMapEnd =
550         CommitBase + PageSize + roundUpTo(Size, PageSize) + PageSize;
551     DCHECK_LE(NewMapEnd, MapEnd);
552     if (SCUDO_WORDSIZE == 32U && NewMapEnd != MapEnd) {
553       unmap(reinterpret_cast<void *>(NewMapEnd), MapEnd - NewMapEnd, 0, &Data);
554       MapEnd = NewMapEnd;
555     }
556   }
557 
558   const uptr CommitSize = MapEnd - PageSize - CommitBase;
559   const uptr AllocPos = roundDownTo(CommitBase + CommitSize - Size, Alignment);
560   mapSecondary<Config>(Options, CommitBase, CommitSize, AllocPos, 0, &Data);
561   const uptr HeaderPos =
562       AllocPos - Chunk::getHeaderSize() - LargeBlock::getHeaderSize();
563   LargeBlock::Header *H = reinterpret_cast<LargeBlock::Header *>(
564       LargeBlock::addHeaderTag<Config>(HeaderPos));
565   if (useMemoryTagging<Config>(Options))
566     storeTags(LargeBlock::addHeaderTag<Config>(CommitBase),
567               reinterpret_cast<uptr>(H + 1));
568   H->MapBase = MapBase;
569   H->MapSize = MapEnd - MapBase;
570   H->CommitBase = CommitBase;
571   H->CommitSize = CommitSize;
572   H->Data = Data;
573   if (BlockEndPtr)
574     *BlockEndPtr = CommitBase + CommitSize;
575   {
576     ScopedLock L(Mutex);
577     InUseBlocks.push_back(H);
578     AllocatedBytes += CommitSize;
579     if (LargestSize < CommitSize)
580       LargestSize = CommitSize;
581     NumberOfAllocs++;
582     Stats.add(StatAllocated, CommitSize);
583     Stats.add(StatMapped, H->MapSize);
584   }
585   return reinterpret_cast<void *>(HeaderPos + LargeBlock::getHeaderSize());
586 }
587 
588 template <typename Config>
589 void MapAllocator<Config>::deallocate(Options Options, void *Ptr) {
590   LargeBlock::Header *H = LargeBlock::getHeader<Config>(Ptr);
591   const uptr CommitSize = H->CommitSize;
592   {
593     ScopedLock L(Mutex);
594     InUseBlocks.remove(H);
595     FreedBytes += CommitSize;
596     NumberOfFrees++;
597     Stats.sub(StatAllocated, CommitSize);
598     Stats.sub(StatMapped, H->MapSize);
599   }
600   Cache.store(Options, H);
601 }
602 
603 template <typename Config>
604 void MapAllocator<Config>::getStats(ScopedString *Str) const {
605   Str->append("Stats: MapAllocator: allocated %u times (%zuK), freed %u times "
606               "(%zuK), remains %u (%zuK) max %zuM\n",
607               NumberOfAllocs, AllocatedBytes >> 10, NumberOfFrees,
608               FreedBytes >> 10, NumberOfAllocs - NumberOfFrees,
609               (AllocatedBytes - FreedBytes) >> 10, LargestSize >> 20);
610 }
611 
612 } // namespace scudo
613 
614 #endif // SCUDO_SECONDARY_H_
615