xref: /freebsd/contrib/llvm-project/compiler-rt/lib/scudo/standalone/local_cache.h (revision 77013d11e6483b970af25e13c9b892075742f7e5)
1 //===-- local_cache.h -------------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #ifndef SCUDO_LOCAL_CACHE_H_
10 #define SCUDO_LOCAL_CACHE_H_
11 
12 #include "internal_defs.h"
13 #include "report.h"
14 #include "stats.h"
15 
16 namespace scudo {
17 
18 template <class SizeClassAllocator> struct SizeClassAllocatorLocalCache {
19   typedef typename SizeClassAllocator::SizeClassMap SizeClassMap;
20 
21   struct TransferBatch {
22     static const u32 MaxNumCached = SizeClassMap::MaxNumCachedHint;
23     void setFromArray(void **Array, u32 N) {
24       DCHECK_LE(N, MaxNumCached);
25       Count = N;
26       memcpy(Batch, Array, sizeof(void *) * Count);
27     }
28     void clear() { Count = 0; }
29     void add(void *P) {
30       DCHECK_LT(Count, MaxNumCached);
31       Batch[Count++] = P;
32     }
33     void copyToArray(void **Array) const {
34       memcpy(Array, Batch, sizeof(void *) * Count);
35     }
36     u32 getCount() const { return Count; }
37     void *get(u32 I) const {
38       DCHECK_LE(I, Count);
39       return Batch[I];
40     }
41     static u32 getMaxCached(uptr Size) {
42       return Min(MaxNumCached, SizeClassMap::getMaxCachedHint(Size));
43     }
44     TransferBatch *Next;
45 
46   private:
47     u32 Count;
48     void *Batch[MaxNumCached];
49   };
50 
51   void initLinkerInitialized(GlobalStats *S, SizeClassAllocator *A) {
52     Stats.initLinkerInitialized();
53     if (LIKELY(S))
54       S->link(&Stats);
55     Allocator = A;
56   }
57 
58   void init(GlobalStats *S, SizeClassAllocator *A) {
59     memset(this, 0, sizeof(*this));
60     initLinkerInitialized(S, A);
61   }
62 
63   void destroy(GlobalStats *S) {
64     drain();
65     if (LIKELY(S))
66       S->unlink(&Stats);
67   }
68 
69   void *allocate(uptr ClassId) {
70     DCHECK_LT(ClassId, NumClasses);
71     PerClass *C = &PerClassArray[ClassId];
72     if (C->Count == 0) {
73       if (UNLIKELY(!refill(C, ClassId)))
74         return nullptr;
75       DCHECK_GT(C->Count, 0);
76     }
77     // We read ClassSize first before accessing Chunks because it's adjacent to
78     // Count, while Chunks might be further off (depending on Count). That keeps
79     // the memory accesses in close quarters.
80     const uptr ClassSize = C->ClassSize;
81     void *P = C->Chunks[--C->Count];
82     // The jury is still out as to whether any kind of PREFETCH here increases
83     // performance. It definitely decreases performance on Android though.
84     // if (!SCUDO_ANDROID) PREFETCH(P);
85     Stats.add(StatAllocated, ClassSize);
86     Stats.sub(StatFree, ClassSize);
87     return P;
88   }
89 
90   void deallocate(uptr ClassId, void *P) {
91     CHECK_LT(ClassId, NumClasses);
92     PerClass *C = &PerClassArray[ClassId];
93     // We still have to initialize the cache in the event that the first heap
94     // operation in a thread is a deallocation.
95     initCacheMaybe(C);
96     if (C->Count == C->MaxCount)
97       drain(C, ClassId);
98     // See comment in allocate() about memory accesses.
99     const uptr ClassSize = C->ClassSize;
100     C->Chunks[C->Count++] = P;
101     Stats.sub(StatAllocated, ClassSize);
102     Stats.add(StatFree, ClassSize);
103   }
104 
105   void drain() {
106     for (uptr I = 0; I < NumClasses; I++) {
107       PerClass *C = &PerClassArray[I];
108       while (C->Count > 0)
109         drain(C, I);
110     }
111   }
112 
113   TransferBatch *createBatch(uptr ClassId, void *B) {
114     if (ClassId != SizeClassMap::BatchClassId)
115       B = allocate(SizeClassMap::BatchClassId);
116     return reinterpret_cast<TransferBatch *>(B);
117   }
118 
119   LocalStats &getStats() { return Stats; }
120 
121 private:
122   static const uptr NumClasses = SizeClassMap::NumClasses;
123   struct PerClass {
124     u32 Count;
125     u32 MaxCount;
126     uptr ClassSize;
127     void *Chunks[2 * TransferBatch::MaxNumCached];
128   };
129   PerClass PerClassArray[NumClasses];
130   LocalStats Stats;
131   SizeClassAllocator *Allocator;
132 
133   ALWAYS_INLINE void initCacheMaybe(PerClass *C) {
134     if (LIKELY(C->MaxCount))
135       return;
136     initCache();
137     DCHECK_NE(C->MaxCount, 0U);
138   }
139 
140   NOINLINE void initCache() {
141     for (uptr I = 0; I < NumClasses; I++) {
142       PerClass *P = &PerClassArray[I];
143       const uptr Size = SizeClassAllocator::getSizeByClassId(I);
144       P->MaxCount = 2 * TransferBatch::getMaxCached(Size);
145       P->ClassSize = Size;
146     }
147   }
148 
149   void destroyBatch(uptr ClassId, void *B) {
150     if (ClassId != SizeClassMap::BatchClassId)
151       deallocate(SizeClassMap::BatchClassId, B);
152   }
153 
154   NOINLINE bool refill(PerClass *C, uptr ClassId) {
155     initCacheMaybe(C);
156     TransferBatch *B = Allocator->popBatch(this, ClassId);
157     if (UNLIKELY(!B))
158       return false;
159     DCHECK_GT(B->getCount(), 0);
160     C->Count = B->getCount();
161     B->copyToArray(C->Chunks);
162     B->clear();
163     destroyBatch(ClassId, B);
164     return true;
165   }
166 
167   NOINLINE void drain(PerClass *C, uptr ClassId) {
168     const u32 Count = Min(C->MaxCount / 2, C->Count);
169     TransferBatch *B = createBatch(ClassId, C->Chunks[0]);
170     if (UNLIKELY(!B))
171       reportOutOfMemory(
172           SizeClassAllocator::getSizeByClassId(SizeClassMap::BatchClassId));
173     B->setFromArray(&C->Chunks[0], Count);
174     C->Count -= Count;
175     for (uptr I = 0; I < C->Count; I++)
176       C->Chunks[I] = C->Chunks[I + Count];
177     Allocator->pushBatch(ClassId, B);
178   }
179 };
180 
181 } // namespace scudo
182 
183 #endif // SCUDO_LOCAL_CACHE_H_
184