xref: /freebsd/contrib/llvm-project/compiler-rt/lib/xray/xray_buffer_queue.cpp (revision 924226fba12cc9a228c73b956e1b7fa24c60b055)
1 //===-- xray_buffer_queue.cpp ----------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of XRay, a dynamic runtime instrumentation system.
10 //
11 // Defines the interface for a buffer queue implementation.
12 //
13 //===----------------------------------------------------------------------===//
14 #include "xray_buffer_queue.h"
15 #include "sanitizer_common/sanitizer_atomic.h"
16 #include "sanitizer_common/sanitizer_common.h"
17 #include "sanitizer_common/sanitizer_libc.h"
18 #if !SANITIZER_FUCHSIA
19 #include "sanitizer_common/sanitizer_posix.h"
20 #endif
21 #include "xray_allocator.h"
22 #include "xray_defs.h"
23 #include <memory>
24 #include <sys/mman.h>
25 
26 using namespace __xray;
27 
28 namespace {
29 
30 BufferQueue::ControlBlock *allocControlBlock(size_t Size, size_t Count) {
31   auto B =
32       allocateBuffer((sizeof(BufferQueue::ControlBlock) - 1) + (Size * Count));
33   return B == nullptr ? nullptr
34                       : reinterpret_cast<BufferQueue::ControlBlock *>(B);
35 }
36 
37 void deallocControlBlock(BufferQueue::ControlBlock *C, size_t Size,
38                          size_t Count) {
39   deallocateBuffer(reinterpret_cast<unsigned char *>(C),
40                    (sizeof(BufferQueue::ControlBlock) - 1) + (Size * Count));
41 }
42 
43 void decRefCount(BufferQueue::ControlBlock *C, size_t Size, size_t Count) {
44   if (C == nullptr)
45     return;
46   if (atomic_fetch_sub(&C->RefCount, 1, memory_order_acq_rel) == 1)
47     deallocControlBlock(C, Size, Count);
48 }
49 
50 void incRefCount(BufferQueue::ControlBlock *C) {
51   if (C == nullptr)
52     return;
53   atomic_fetch_add(&C->RefCount, 1, memory_order_acq_rel);
54 }
55 
56 // We use a struct to ensure that we are allocating one atomic_uint64_t per
57 // cache line. This allows us to not worry about false-sharing among atomic
58 // objects being updated (constantly) by different threads.
59 struct ExtentsPadded {
60   union {
61     atomic_uint64_t Extents;
62     unsigned char Storage[kCacheLineSize];
63   };
64 };
65 
66 constexpr size_t kExtentsSize = sizeof(ExtentsPadded);
67 
68 } // namespace
69 
70 BufferQueue::ErrorCode BufferQueue::init(size_t BS, size_t BC) {
71   SpinMutexLock Guard(&Mutex);
72 
73   if (!finalizing())
74     return BufferQueue::ErrorCode::AlreadyInitialized;
75 
76   cleanupBuffers();
77 
78   bool Success = false;
79   BufferSize = BS;
80   BufferCount = BC;
81 
82   BackingStore = allocControlBlock(BufferSize, BufferCount);
83   if (BackingStore == nullptr)
84     return BufferQueue::ErrorCode::NotEnoughMemory;
85 
86   auto CleanupBackingStore = at_scope_exit([&, this] {
87     if (Success)
88       return;
89     deallocControlBlock(BackingStore, BufferSize, BufferCount);
90     BackingStore = nullptr;
91   });
92 
93   // Initialize enough atomic_uint64_t instances, each
94   ExtentsBackingStore = allocControlBlock(kExtentsSize, BufferCount);
95   if (ExtentsBackingStore == nullptr)
96     return BufferQueue::ErrorCode::NotEnoughMemory;
97 
98   auto CleanupExtentsBackingStore = at_scope_exit([&, this] {
99     if (Success)
100       return;
101     deallocControlBlock(ExtentsBackingStore, kExtentsSize, BufferCount);
102     ExtentsBackingStore = nullptr;
103   });
104 
105   Buffers = initArray<BufferRep>(BufferCount);
106   if (Buffers == nullptr)
107     return BufferQueue::ErrorCode::NotEnoughMemory;
108 
109   // At this point we increment the generation number to associate the buffers
110   // to the new generation.
111   atomic_fetch_add(&Generation, 1, memory_order_acq_rel);
112 
113   // First, we initialize the refcount in the ControlBlock, which we treat as
114   // being at the start of the BackingStore pointer.
115   atomic_store(&BackingStore->RefCount, 1, memory_order_release);
116   atomic_store(&ExtentsBackingStore->RefCount, 1, memory_order_release);
117 
118   // Then we initialise the individual buffers that sub-divide the whole backing
119   // store. Each buffer will start at the `Data` member of the ControlBlock, and
120   // will be offsets from these locations.
121   for (size_t i = 0; i < BufferCount; ++i) {
122     auto &T = Buffers[i];
123     auto &Buf = T.Buff;
124     auto *E = reinterpret_cast<ExtentsPadded *>(&ExtentsBackingStore->Data +
125                                                 (kExtentsSize * i));
126     Buf.Extents = &E->Extents;
127     atomic_store(Buf.Extents, 0, memory_order_release);
128     Buf.Generation = generation();
129     Buf.Data = &BackingStore->Data + (BufferSize * i);
130     Buf.Size = BufferSize;
131     Buf.BackingStore = BackingStore;
132     Buf.ExtentsBackingStore = ExtentsBackingStore;
133     Buf.Count = BufferCount;
134     T.Used = false;
135   }
136 
137   Next = Buffers;
138   First = Buffers;
139   LiveBuffers = 0;
140   atomic_store(&Finalizing, 0, memory_order_release);
141   Success = true;
142   return BufferQueue::ErrorCode::Ok;
143 }
144 
145 BufferQueue::BufferQueue(size_t B, size_t N,
146                          bool &Success) XRAY_NEVER_INSTRUMENT
147     : BufferSize(B),
148       BufferCount(N),
149       Mutex(),
150       Finalizing{1},
151       BackingStore(nullptr),
152       ExtentsBackingStore(nullptr),
153       Buffers(nullptr),
154       Next(Buffers),
155       First(Buffers),
156       LiveBuffers(0),
157       Generation{0} {
158   Success = init(B, N) == BufferQueue::ErrorCode::Ok;
159 }
160 
161 BufferQueue::ErrorCode BufferQueue::getBuffer(Buffer &Buf) {
162   if (atomic_load(&Finalizing, memory_order_acquire))
163     return ErrorCode::QueueFinalizing;
164 
165   BufferRep *B = nullptr;
166   {
167     SpinMutexLock Guard(&Mutex);
168     if (LiveBuffers == BufferCount)
169       return ErrorCode::NotEnoughMemory;
170     B = Next++;
171     if (Next == (Buffers + BufferCount))
172       Next = Buffers;
173     ++LiveBuffers;
174   }
175 
176   incRefCount(BackingStore);
177   incRefCount(ExtentsBackingStore);
178   Buf = B->Buff;
179   Buf.Generation = generation();
180   B->Used = true;
181   return ErrorCode::Ok;
182 }
183 
184 BufferQueue::ErrorCode BufferQueue::releaseBuffer(Buffer &Buf) {
185   // Check whether the buffer being referred to is within the bounds of the
186   // backing store's range.
187   BufferRep *B = nullptr;
188   {
189     SpinMutexLock Guard(&Mutex);
190     if (Buf.Generation != generation() || LiveBuffers == 0) {
191       Buf = {};
192       decRefCount(Buf.BackingStore, Buf.Size, Buf.Count);
193       decRefCount(Buf.ExtentsBackingStore, kExtentsSize, Buf.Count);
194       return BufferQueue::ErrorCode::Ok;
195     }
196 
197     if (Buf.Data < &BackingStore->Data ||
198         Buf.Data > &BackingStore->Data + (BufferCount * BufferSize))
199       return BufferQueue::ErrorCode::UnrecognizedBuffer;
200 
201     --LiveBuffers;
202     B = First++;
203     if (First == (Buffers + BufferCount))
204       First = Buffers;
205   }
206 
207   // Now that the buffer has been released, we mark it as "used".
208   B->Buff = Buf;
209   B->Used = true;
210   decRefCount(Buf.BackingStore, Buf.Size, Buf.Count);
211   decRefCount(Buf.ExtentsBackingStore, kExtentsSize, Buf.Count);
212   atomic_store(B->Buff.Extents, atomic_load(Buf.Extents, memory_order_acquire),
213                memory_order_release);
214   Buf = {};
215   return ErrorCode::Ok;
216 }
217 
218 BufferQueue::ErrorCode BufferQueue::finalize() {
219   if (atomic_exchange(&Finalizing, 1, memory_order_acq_rel))
220     return ErrorCode::QueueFinalizing;
221   return ErrorCode::Ok;
222 }
223 
224 void BufferQueue::cleanupBuffers() {
225   for (auto B = Buffers, E = Buffers + BufferCount; B != E; ++B)
226     B->~BufferRep();
227   deallocateBuffer(Buffers, BufferCount);
228   decRefCount(BackingStore, BufferSize, BufferCount);
229   decRefCount(ExtentsBackingStore, kExtentsSize, BufferCount);
230   BackingStore = nullptr;
231   ExtentsBackingStore = nullptr;
232   Buffers = nullptr;
233   BufferCount = 0;
234   BufferSize = 0;
235 }
236 
237 BufferQueue::~BufferQueue() { cleanupBuffers(); }
238