xref: /freebsd/contrib/llvm-project/compiler-rt/lib/xray/xray_allocator.h (revision 0eae32dcef82f6f06de6419a0d623d7def0cc8f6)
1 //===-- xray_allocator.h ---------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of XRay, a dynamic runtime instrumentation system.
10 //
11 // Defines the allocator interface for an arena allocator, used primarily for
12 // the profiling runtime.
13 //
14 //===----------------------------------------------------------------------===//
15 #ifndef XRAY_ALLOCATOR_H
16 #define XRAY_ALLOCATOR_H
17 
18 #include "sanitizer_common/sanitizer_common.h"
19 #include "sanitizer_common/sanitizer_internal_defs.h"
20 #include "sanitizer_common/sanitizer_mutex.h"
21 #if SANITIZER_FUCHSIA
22 #include <zircon/process.h>
23 #include <zircon/status.h>
24 #include <zircon/syscalls.h>
25 #else
26 #include "sanitizer_common/sanitizer_posix.h"
27 #endif
28 #include "xray_defs.h"
29 #include "xray_utils.h"
30 #include <cstddef>
31 #include <cstdint>
32 #include <sys/mman.h>
33 
34 namespace __xray {
35 
36 // We implement our own memory allocation routine which will bypass the
37 // internal allocator. This allows us to manage the memory directly, using
38 // mmap'ed memory to back the allocators.
allocate()39 template <class T> T *allocate() XRAY_NEVER_INSTRUMENT {
40   uptr RoundedSize = RoundUpTo(sizeof(T), GetPageSizeCached());
41 #if SANITIZER_FUCHSIA
42   zx_handle_t Vmo;
43   zx_status_t Status = _zx_vmo_create(RoundedSize, 0, &Vmo);
44   if (Status != ZX_OK) {
45     if (Verbosity())
46       Report("XRay Profiling: Failed to create VMO of size %zu: %s\n",
47              sizeof(T), _zx_status_get_string(Status));
48     return nullptr;
49   }
50   uintptr_t B;
51   Status =
52       _zx_vmar_map(_zx_vmar_root_self(), ZX_VM_PERM_READ | ZX_VM_PERM_WRITE, 0,
53                    Vmo, 0, sizeof(T), &B);
54   _zx_handle_close(Vmo);
55   if (Status != ZX_OK) {
56     if (Verbosity())
57       Report("XRay Profiling: Failed to map VMAR of size %zu: %s\n", sizeof(T),
58              _zx_status_get_string(Status));
59     return nullptr;
60   }
61   return reinterpret_cast<T *>(B);
62 #else
63   uptr B = internal_mmap(NULL, RoundedSize, PROT_READ | PROT_WRITE,
64                          MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
65   int ErrNo = 0;
66   if (UNLIKELY(internal_iserror(B, &ErrNo))) {
67     if (Verbosity())
68       Report("XRay Profiling: Failed to allocate memory of size %zu; Error = "
69              "%zu\n",
70              RoundedSize, B);
71     return nullptr;
72   }
73 #endif
74   return reinterpret_cast<T *>(B);
75 }
76 
deallocate(T * B)77 template <class T> void deallocate(T *B) XRAY_NEVER_INSTRUMENT {
78   if (B == nullptr)
79     return;
80   uptr RoundedSize = RoundUpTo(sizeof(T), GetPageSizeCached());
81 #if SANITIZER_FUCHSIA
82   _zx_vmar_unmap(_zx_vmar_root_self(), reinterpret_cast<uintptr_t>(B),
83                  RoundedSize);
84 #else
85   internal_munmap(B, RoundedSize);
86 #endif
87 }
88 
89 template <class T = unsigned char>
allocateBuffer(size_t S)90 T *allocateBuffer(size_t S) XRAY_NEVER_INSTRUMENT {
91   uptr RoundedSize = RoundUpTo(S * sizeof(T), GetPageSizeCached());
92 #if SANITIZER_FUCHSIA
93   zx_handle_t Vmo;
94   zx_status_t Status = _zx_vmo_create(RoundedSize, 0, &Vmo);
95   if (Status != ZX_OK) {
96     if (Verbosity())
97       Report("XRay Profiling: Failed to create VMO of size %zu: %s\n", S,
98              _zx_status_get_string(Status));
99     return nullptr;
100   }
101   uintptr_t B;
102   Status = _zx_vmar_map(_zx_vmar_root_self(),
103                         ZX_VM_PERM_READ | ZX_VM_PERM_WRITE, 0, Vmo, 0, S, &B);
104   _zx_handle_close(Vmo);
105   if (Status != ZX_OK) {
106     if (Verbosity())
107       Report("XRay Profiling: Failed to map VMAR of size %zu: %s\n", S,
108              _zx_status_get_string(Status));
109     return nullptr;
110   }
111 #else
112   uptr B = internal_mmap(NULL, RoundedSize, PROT_READ | PROT_WRITE,
113                          MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
114   int ErrNo = 0;
115   if (UNLIKELY(internal_iserror(B, &ErrNo))) {
116     if (Verbosity())
117       Report("XRay Profiling: Failed to allocate memory of size %zu; Error = "
118              "%zu\n",
119              RoundedSize, B);
120     return nullptr;
121   }
122 #endif
123   return reinterpret_cast<T *>(B);
124 }
125 
deallocateBuffer(T * B,size_t S)126 template <class T> void deallocateBuffer(T *B, size_t S) XRAY_NEVER_INSTRUMENT {
127   if (B == nullptr)
128     return;
129   uptr RoundedSize = RoundUpTo(S * sizeof(T), GetPageSizeCached());
130 #if SANITIZER_FUCHSIA
131   _zx_vmar_unmap(_zx_vmar_root_self(), reinterpret_cast<uintptr_t>(B),
132                  RoundedSize);
133 #else
134   internal_munmap(B, RoundedSize);
135 #endif
136 }
137 
138 template <class T, class... U>
initArray(size_t N,U &&...Us)139 T *initArray(size_t N, U &&... Us) XRAY_NEVER_INSTRUMENT {
140   auto A = allocateBuffer<T>(N);
141   if (A != nullptr)
142     while (N > 0)
143       new (A + (--N)) T(std::forward<U>(Us)...);
144   return A;
145 }
146 
147 /// The Allocator type hands out fixed-sized chunks of memory that are
148 /// cache-line aligned and sized. This is useful for placement of
149 /// performance-sensitive data in memory that's frequently accessed. The
150 /// allocator also self-limits the peak memory usage to a dynamically defined
151 /// maximum.
152 ///
153 /// N is the lower-bound size of the block of memory to return from the
154 /// allocation function. N is used to compute the size of a block, which is
155 /// cache-line-size multiples worth of memory. We compute the size of a block by
156 /// determining how many cache lines worth of memory is required to subsume N.
157 ///
158 /// The Allocator instance will manage its own memory acquired through mmap.
159 /// This severely constrains the platforms on which this can be used to POSIX
160 /// systems where mmap semantics are well-defined.
161 ///
162 /// FIXME: Isolate the lower-level memory management to a different abstraction
163 /// that can be platform-specific.
164 template <size_t N> struct Allocator {
165   // The Allocator returns memory as Block instances.
166   struct Block {
167     /// Compute the minimum cache-line size multiple that is >= N.
168     static constexpr auto Size = nearest_boundary(N, kCacheLineSize);
169     void *Data;
170   };
171 
172 private:
173   size_t MaxMemory{0};
174   unsigned char *BackingStore = nullptr;
175   unsigned char *AlignedNextBlock = nullptr;
176   size_t AllocatedBlocks = 0;
177   bool Owned;
178   SpinMutex Mutex{};
179 
AllocAllocator180   void *Alloc() XRAY_NEVER_INSTRUMENT {
181     SpinMutexLock Lock(&Mutex);
182     if (UNLIKELY(BackingStore == nullptr)) {
183       BackingStore = allocateBuffer(MaxMemory);
184       if (BackingStore == nullptr) {
185         if (Verbosity())
186           Report("XRay Profiling: Failed to allocate memory for allocator\n");
187         return nullptr;
188       }
189 
190       AlignedNextBlock = BackingStore;
191 
192       // Ensure that NextBlock is aligned appropriately.
193       auto BackingStoreNum = reinterpret_cast<uintptr_t>(BackingStore);
194       auto AlignedNextBlockNum = nearest_boundary(
195           reinterpret_cast<uintptr_t>(AlignedNextBlock), kCacheLineSize);
196       if (diff(AlignedNextBlockNum, BackingStoreNum) > ptrdiff_t(MaxMemory)) {
197         deallocateBuffer(BackingStore, MaxMemory);
198         AlignedNextBlock = BackingStore = nullptr;
199         if (Verbosity())
200           Report("XRay Profiling: Cannot obtain enough memory from "
201                  "preallocated region\n");
202         return nullptr;
203       }
204 
205       AlignedNextBlock = reinterpret_cast<unsigned char *>(AlignedNextBlockNum);
206 
207       // Assert that AlignedNextBlock is cache-line aligned.
208       DCHECK_EQ(reinterpret_cast<uintptr_t>(AlignedNextBlock) % kCacheLineSize,
209                 0);
210     }
211 
212     if (((AllocatedBlocks + 1) * Block::Size) > MaxMemory)
213       return nullptr;
214 
215     // Align the pointer we'd like to return to an appropriate alignment, then
216     // advance the pointer from where to start allocations.
217     void *Result = AlignedNextBlock;
218     AlignedNextBlock =
219         reinterpret_cast<unsigned char *>(AlignedNextBlock) + Block::Size;
220     ++AllocatedBlocks;
221     return Result;
222   }
223 
224 public:
AllocatorAllocator225   explicit Allocator(size_t M) XRAY_NEVER_INSTRUMENT
226       : MaxMemory(RoundUpTo(M, kCacheLineSize)),
227         BackingStore(nullptr),
228         AlignedNextBlock(nullptr),
229         AllocatedBlocks(0),
230         Owned(true),
231         Mutex() {}
232 
AllocatorAllocator233   explicit Allocator(void *P, size_t M) XRAY_NEVER_INSTRUMENT
234       : MaxMemory(M),
235         BackingStore(reinterpret_cast<unsigned char *>(P)),
236         AlignedNextBlock(reinterpret_cast<unsigned char *>(P)),
237         AllocatedBlocks(0),
238         Owned(false),
239         Mutex() {}
240 
241   Allocator(const Allocator &) = delete;
242   Allocator &operator=(const Allocator &) = delete;
243 
AllocatorAllocator244   Allocator(Allocator &&O) XRAY_NEVER_INSTRUMENT {
245     SpinMutexLock L0(&Mutex);
246     SpinMutexLock L1(&O.Mutex);
247     MaxMemory = O.MaxMemory;
248     O.MaxMemory = 0;
249     BackingStore = O.BackingStore;
250     O.BackingStore = nullptr;
251     AlignedNextBlock = O.AlignedNextBlock;
252     O.AlignedNextBlock = nullptr;
253     AllocatedBlocks = O.AllocatedBlocks;
254     O.AllocatedBlocks = 0;
255     Owned = O.Owned;
256     O.Owned = false;
257   }
258 
259   Allocator &operator=(Allocator &&O) XRAY_NEVER_INSTRUMENT {
260     SpinMutexLock L0(&Mutex);
261     SpinMutexLock L1(&O.Mutex);
262     MaxMemory = O.MaxMemory;
263     O.MaxMemory = 0;
264     if (BackingStore != nullptr)
265       deallocateBuffer(BackingStore, MaxMemory);
266     BackingStore = O.BackingStore;
267     O.BackingStore = nullptr;
268     AlignedNextBlock = O.AlignedNextBlock;
269     O.AlignedNextBlock = nullptr;
270     AllocatedBlocks = O.AllocatedBlocks;
271     O.AllocatedBlocks = 0;
272     Owned = O.Owned;
273     O.Owned = false;
274     return *this;
275   }
276 
AllocateAllocator277   Block Allocate() XRAY_NEVER_INSTRUMENT { return {Alloc()}; }
278 
~AllocatorAllocator279   ~Allocator() NOEXCEPT XRAY_NEVER_INSTRUMENT {
280     if (Owned && BackingStore != nullptr) {
281       deallocateBuffer(BackingStore, MaxMemory);
282     }
283   }
284 };
285 
286 } // namespace __xray
287 
288 #endif // XRAY_ALLOCATOR_H
289