xref: /freebsd/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_dense_alloc.h (revision 18054d0220cfc8df9c9568c437bd6fbb59d53c3c)
1 //===-- tsan_dense_alloc.h --------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of ThreadSanitizer (TSan), a race detector.
10 //
11 // A DenseSlabAlloc is a freelist-based allocator of fixed-size objects.
12 // DenseSlabAllocCache is a thread-local cache for DenseSlabAlloc.
13 // The only difference with traditional slab allocators is that DenseSlabAlloc
14 // allocates/free indices of objects and provide a functionality to map
15 // the index onto the real pointer. The index is u32, that is, 2 times smaller
16 // than uptr (hense the Dense prefix).
17 //===----------------------------------------------------------------------===//
18 #ifndef TSAN_DENSE_ALLOC_H
19 #define TSAN_DENSE_ALLOC_H
20 
21 #include "sanitizer_common/sanitizer_common.h"
22 #include "tsan_defs.h"
23 
24 namespace __tsan {
25 
26 class DenseSlabAllocCache {
27   static const uptr kSize = 128;
28   typedef u32 IndexT;
29   uptr pos;
30   IndexT cache[kSize];
31   template <typename, uptr, uptr, u64>
32   friend class DenseSlabAlloc;
33 };
34 
35 template <typename T, uptr kL1Size, uptr kL2Size, u64 kReserved = 0>
36 class DenseSlabAlloc {
37  public:
38   typedef DenseSlabAllocCache Cache;
39   typedef typename Cache::IndexT IndexT;
40 
41   static_assert((kL1Size & (kL1Size - 1)) == 0,
42                 "kL1Size must be a power-of-two");
43   static_assert((kL2Size & (kL2Size - 1)) == 0,
44                 "kL2Size must be a power-of-two");
45   static_assert((kL1Size * kL2Size) <= (1ull << (sizeof(IndexT) * 8)),
46                 "kL1Size/kL2Size are too large");
47   static_assert(((kL1Size * kL2Size - 1) & kReserved) == 0,
48                 "reserved bits don't fit");
49   static_assert(sizeof(T) > sizeof(IndexT),
50                 "it doesn't make sense to use dense alloc");
51 
52   DenseSlabAlloc(LinkerInitialized, const char *name) : name_(name) {}
53 
54   explicit DenseSlabAlloc(const char *name)
55       : DenseSlabAlloc(LINKER_INITIALIZED, name) {
56     // It can be very large.
57     // Don't page it in for linker initialized objects.
58     internal_memset(map_, 0, sizeof(map_));
59   }
60 
61   ~DenseSlabAlloc() {
62     for (uptr i = 0; i < kL1Size; i++) {
63       if (map_[i] != 0)
64         UnmapOrDie(map_[i], kL2Size * sizeof(T));
65     }
66   }
67 
68   IndexT Alloc(Cache *c) {
69     if (c->pos == 0)
70       Refill(c);
71     return c->cache[--c->pos];
72   }
73 
74   void Free(Cache *c, IndexT idx) {
75     DCHECK_NE(idx, 0);
76     if (c->pos == Cache::kSize)
77       Drain(c);
78     c->cache[c->pos++] = idx;
79   }
80 
81   T *Map(IndexT idx) {
82     DCHECK_NE(idx, 0);
83     DCHECK_LE(idx, kL1Size * kL2Size);
84     return &map_[idx / kL2Size][idx % kL2Size];
85   }
86 
87   void FlushCache(Cache *c) {
88     if (!c->pos)
89       return;
90     SpinMutexLock lock(&mtx_);
91     while (c->pos) {
92       IndexT idx = c->cache[--c->pos];
93       *(IndexT*)Map(idx) = freelist_;
94       freelist_ = idx;
95     }
96   }
97 
98   void InitCache(Cache *c) {
99     c->pos = 0;
100     internal_memset(c->cache, 0, sizeof(c->cache));
101   }
102 
103   uptr AllocatedMemory() const {
104     return atomic_load_relaxed(&fillpos_) * kL2Size * sizeof(T);
105   }
106 
107   template <typename Func>
108   void ForEach(Func func) {
109     SpinMutexLock lock(&mtx_);
110     uptr fillpos = atomic_load_relaxed(&fillpos_);
111     for (uptr l1 = 0; l1 < fillpos; l1++) {
112       for (IndexT l2 = l1 == 0 ? 1 : 0; l2 < kL2Size; l2++) func(&map_[l1][l2]);
113     }
114   }
115 
116  private:
117   T *map_[kL1Size];
118   SpinMutex mtx_;
119   IndexT freelist_ = {0};
120   atomic_uintptr_t fillpos_ = {0};
121   const char *const name_;
122 
123   void Refill(Cache *c) {
124     SpinMutexLock lock(&mtx_);
125     if (freelist_ == 0) {
126       uptr fillpos = atomic_load_relaxed(&fillpos_);
127       if (fillpos == kL1Size) {
128         Printf("ThreadSanitizer: %s overflow (%zu*%zu). Dying.\n",
129             name_, kL1Size, kL2Size);
130         Die();
131       }
132       VPrintf(2, "ThreadSanitizer: growing %s: %zu out of %zu*%zu\n", name_,
133               fillpos, kL1Size, kL2Size);
134       T *batch = (T*)MmapOrDie(kL2Size * sizeof(T), name_);
135       // Reserve 0 as invalid index.
136       IndexT start = fillpos == 0 ? 1 : 0;
137       for (IndexT i = start; i < kL2Size; i++) {
138         new(batch + i) T;
139         *(IndexT *)(batch + i) = i + 1 + fillpos * kL2Size;
140       }
141       *(IndexT*)(batch + kL2Size - 1) = 0;
142       freelist_ = fillpos * kL2Size + start;
143       map_[fillpos] = batch;
144       atomic_store_relaxed(&fillpos_, fillpos + 1);
145     }
146     for (uptr i = 0; i < Cache::kSize / 2 && freelist_ != 0; i++) {
147       IndexT idx = freelist_;
148       c->cache[c->pos++] = idx;
149       freelist_ = *(IndexT*)Map(idx);
150     }
151   }
152 
153   void Drain(Cache *c) {
154     SpinMutexLock lock(&mtx_);
155     for (uptr i = 0; i < Cache::kSize / 2; i++) {
156       IndexT idx = c->cache[--c->pos];
157       *(IndexT*)Map(idx) = freelist_;
158       freelist_ = idx;
159     }
160   }
161 };
162 
163 }  // namespace __tsan
164 
165 #endif  // TSAN_DENSE_ALLOC_H
166