xref: /freebsd/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_trace.h (revision 5e801ac66d24704442eba426ed13c3effb8a34e7)
1 //===-- tsan_trace.h --------------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of ThreadSanitizer (TSan), a race detector.
10 //
11 //===----------------------------------------------------------------------===//
12 #ifndef TSAN_TRACE_H
13 #define TSAN_TRACE_H
14 
15 #include "tsan_defs.h"
16 #include "tsan_ilist.h"
17 #include "tsan_mutexset.h"
18 #include "tsan_stack_trace.h"
19 
20 namespace __tsan {
21 
22 const int kTracePartSizeBits = 13;
23 const int kTracePartSize = 1 << kTracePartSizeBits;
24 const int kTraceParts = 2 * 1024 * 1024 / kTracePartSize;
25 const int kTraceSize = kTracePartSize * kTraceParts;
26 
27 // Must fit into 3 bits.
28 enum EventType {
29   EventTypeMop,
30   EventTypeFuncEnter,
31   EventTypeFuncExit,
32   EventTypeLock,
33   EventTypeUnlock,
34   EventTypeRLock,
35   EventTypeRUnlock
36 };
37 
38 // Represents a thread event (from most significant bit):
39 // u64 typ  : 3;   // EventType.
40 // u64 addr : 61;  // Associated pc.
41 typedef u64 Event;
42 
43 const uptr kEventPCBits = 61;
44 
45 struct TraceHeader {
46 #if !SANITIZER_GO
47   BufferedStackTrace stack0;  // Start stack for the trace.
48 #else
49   VarSizeStackTrace stack0;
50 #endif
51   u64        epoch0;  // Start epoch for the trace.
52   MutexSet   mset0;
53 
54   TraceHeader() : stack0(), epoch0() {}
55 };
56 
57 struct Trace {
58   Mutex mtx;
59 #if !SANITIZER_GO
60   // Must be last to catch overflow as paging fault.
61   // Go shadow stack is dynamically allocated.
62   uptr shadow_stack[kShadowStackSize];
63 #endif
64   // Must be the last field, because we unmap the unused part in
65   // CreateThreadContext.
66   TraceHeader headers[kTraceParts];
67 
68   Trace() : mtx(MutexTypeTrace) {}
69 };
70 
71 namespace v3 {
72 
73 enum class EventType : u64 {
74   kAccessExt,
75   kAccessRange,
76   kLock,
77   kRLock,
78   kUnlock,
79   kTime,
80 };
81 
82 // "Base" type for all events for type dispatch.
83 struct Event {
84   // We use variable-length type encoding to give more bits to some event
85   // types that need them. If is_access is set, this is EventAccess.
86   // Otherwise, if is_func is set, this is EventFunc.
87   // Otherwise type denotes the type.
88   u64 is_access : 1;
89   u64 is_func : 1;
90   EventType type : 3;
91   u64 _ : 59;
92 };
93 static_assert(sizeof(Event) == 8, "bad Event size");
94 
95 // Nop event used as padding and does not affect state during replay.
96 static constexpr Event NopEvent = {1, 0, EventType::kAccessExt, 0};
97 
98 // Compressed memory access can represent only some events with PCs
99 // close enough to each other. Otherwise we fall back to EventAccessExt.
100 struct EventAccess {
101   static constexpr uptr kPCBits = 15;
102   static_assert(kPCBits + kCompressedAddrBits + 5 == 64,
103                 "unused bits in EventAccess");
104 
105   u64 is_access : 1;  // = 1
106   u64 is_read : 1;
107   u64 is_atomic : 1;
108   u64 size_log : 2;
109   u64 pc_delta : kPCBits;  // signed delta from the previous memory access PC
110   u64 addr : kCompressedAddrBits;
111 };
112 static_assert(sizeof(EventAccess) == 8, "bad EventAccess size");
113 
114 // Function entry (pc != 0) or exit (pc == 0).
115 struct EventFunc {
116   u64 is_access : 1;  // = 0
117   u64 is_func : 1;    // = 1
118   u64 pc : 62;
119 };
120 static_assert(sizeof(EventFunc) == 8, "bad EventFunc size");
121 
122 // Extended memory access with full PC.
123 struct EventAccessExt {
124   // Note: precisely specifying the unused parts of the bitfield is critical for
125   // performance. If we don't specify them, compiler will generate code to load
126   // the old value and shuffle it to extract the unused bits to apply to the new
127   // value. If we specify the unused part and store 0 in there, all that
128   // unnecessary code goes away (store of the 0 const is combined with other
129   // constant parts).
130   static constexpr uptr kUnusedBits = 11;
131   static_assert(kCompressedAddrBits + kUnusedBits + 9 == 64,
132                 "unused bits in EventAccessExt");
133 
134   u64 is_access : 1;   // = 0
135   u64 is_func : 1;     // = 0
136   EventType type : 3;  // = EventType::kAccessExt
137   u64 is_read : 1;
138   u64 is_atomic : 1;
139   u64 size_log : 2;
140   u64 _ : kUnusedBits;
141   u64 addr : kCompressedAddrBits;
142   u64 pc;
143 };
144 static_assert(sizeof(EventAccessExt) == 16, "bad EventAccessExt size");
145 
146 // Access to a memory range.
147 struct EventAccessRange {
148   static constexpr uptr kSizeLoBits = 13;
149   static_assert(kCompressedAddrBits + kSizeLoBits + 7 == 64,
150                 "unused bits in EventAccessRange");
151 
152   u64 is_access : 1;   // = 0
153   u64 is_func : 1;     // = 0
154   EventType type : 3;  // = EventType::kAccessRange
155   u64 is_read : 1;
156   u64 is_free : 1;
157   u64 size_lo : kSizeLoBits;
158   u64 pc : kCompressedAddrBits;
159   u64 addr : kCompressedAddrBits;
160   u64 size_hi : 64 - kCompressedAddrBits;
161 };
162 static_assert(sizeof(EventAccessRange) == 16, "bad EventAccessRange size");
163 
164 // Mutex lock.
165 struct EventLock {
166   static constexpr uptr kStackIDLoBits = 15;
167   static constexpr uptr kStackIDHiBits =
168       sizeof(StackID) * kByteBits - kStackIDLoBits;
169   static constexpr uptr kUnusedBits = 3;
170   static_assert(kCompressedAddrBits + kStackIDLoBits + 5 == 64,
171                 "unused bits in EventLock");
172   static_assert(kCompressedAddrBits + kStackIDHiBits + kUnusedBits == 64,
173                 "unused bits in EventLock");
174 
175   u64 is_access : 1;   // = 0
176   u64 is_func : 1;     // = 0
177   EventType type : 3;  // = EventType::kLock or EventType::kRLock
178   u64 pc : kCompressedAddrBits;
179   u64 stack_lo : kStackIDLoBits;
180   u64 stack_hi : sizeof(StackID) * kByteBits - kStackIDLoBits;
181   u64 _ : kUnusedBits;
182   u64 addr : kCompressedAddrBits;
183 };
184 static_assert(sizeof(EventLock) == 16, "bad EventLock size");
185 
186 // Mutex unlock.
187 struct EventUnlock {
188   static constexpr uptr kUnusedBits = 15;
189   static_assert(kCompressedAddrBits + kUnusedBits + 5 == 64,
190                 "unused bits in EventUnlock");
191 
192   u64 is_access : 1;   // = 0
193   u64 is_func : 1;     // = 0
194   EventType type : 3;  // = EventType::kUnlock
195   u64 _ : kUnusedBits;
196   u64 addr : kCompressedAddrBits;
197 };
198 static_assert(sizeof(EventUnlock) == 8, "bad EventUnlock size");
199 
200 // Time change event.
201 struct EventTime {
202   static constexpr uptr kUnusedBits = 37;
203   static_assert(kUnusedBits + sizeof(Sid) * kByteBits + kEpochBits + 5 == 64,
204                 "unused bits in EventTime");
205 
206   u64 is_access : 1;   // = 0
207   u64 is_func : 1;     // = 0
208   EventType type : 3;  // = EventType::kTime
209   u64 sid : sizeof(Sid) * kByteBits;
210   u64 epoch : kEpochBits;
211   u64 _ : kUnusedBits;
212 };
213 static_assert(sizeof(EventTime) == 8, "bad EventTime size");
214 
215 struct Trace;
216 
217 struct TraceHeader {
218   Trace* trace = nullptr;  // back-pointer to Trace containing this part
219   INode trace_parts;       // in Trace::parts
220 };
221 
222 struct TracePart : TraceHeader {
223   // There are a lot of goroutines in Go, so we use smaller parts.
224   static constexpr uptr kByteSize = (SANITIZER_GO ? 128 : 256) << 10;
225   static constexpr uptr kSize =
226       (kByteSize - sizeof(TraceHeader)) / sizeof(Event);
227   // TraceAcquire does a fast event pointer overflow check by comparing
228   // pointer into TracePart::events with kAlignment mask. Since TracePart's
229   // are allocated page-aligned, this check detects end of the array
230   // (it also have false positives in the middle that are filtered separately).
231   // This also requires events to be the last field.
232   static constexpr uptr kAlignment = 0xff0;
233   Event events[kSize];
234 
235   TracePart() {}
236 };
237 static_assert(sizeof(TracePart) == TracePart::kByteSize, "bad TracePart size");
238 
239 struct Trace {
240   Mutex mtx;
241   IList<TraceHeader, &TraceHeader::trace_parts, TracePart> parts;
242   Event* final_pos =
243       nullptr;  // final position in the last part for finished threads
244 
245   Trace() : mtx(MutexTypeTrace) {}
246 };
247 
248 }  // namespace v3
249 
250 }  // namespace __tsan
251 
252 #endif  // TSAN_TRACE_H
253