xref: /freebsd/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_shadow.h (revision 5e801ac66d24704442eba426ed13c3effb8a34e7)
1 //===-- tsan_shadow.h -------------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #ifndef TSAN_SHADOW_H
10 #define TSAN_SHADOW_H
11 
12 #include "tsan_defs.h"
13 #include "tsan_trace.h"
14 
15 namespace __tsan {
16 
17 // FastState (from most significant bit):
18 //   ignore          : 1
19 //   tid             : kTidBits
20 //   unused          : -
21 //   history_size    : 3
22 //   epoch           : kClkBits
23 class FastState {
24  public:
25   FastState(u64 tid, u64 epoch) {
26     x_ = tid << kTidShift;
27     x_ |= epoch;
28     DCHECK_EQ(tid, this->tid());
29     DCHECK_EQ(epoch, this->epoch());
30     DCHECK_EQ(GetIgnoreBit(), false);
31   }
32 
33   explicit FastState(u64 x) : x_(x) {}
34 
35   u64 raw() const { return x_; }
36 
37   u64 tid() const {
38     u64 res = (x_ & ~kIgnoreBit) >> kTidShift;
39     return res;
40   }
41 
42   u64 TidWithIgnore() const {
43     u64 res = x_ >> kTidShift;
44     return res;
45   }
46 
47   u64 epoch() const {
48     u64 res = x_ & ((1ull << kClkBits) - 1);
49     return res;
50   }
51 
52   void IncrementEpoch() {
53     u64 old_epoch = epoch();
54     x_ += 1;
55     DCHECK_EQ(old_epoch + 1, epoch());
56     (void)old_epoch;
57   }
58 
59   void SetIgnoreBit() { x_ |= kIgnoreBit; }
60   void ClearIgnoreBit() { x_ &= ~kIgnoreBit; }
61   bool GetIgnoreBit() const { return (s64)x_ < 0; }
62 
63   void SetHistorySize(int hs) {
64     CHECK_GE(hs, 0);
65     CHECK_LE(hs, 7);
66     x_ = (x_ & ~(kHistoryMask << kHistoryShift)) | (u64(hs) << kHistoryShift);
67   }
68 
69   ALWAYS_INLINE
70   int GetHistorySize() const {
71     return (int)((x_ >> kHistoryShift) & kHistoryMask);
72   }
73 
74   void ClearHistorySize() { SetHistorySize(0); }
75 
76   ALWAYS_INLINE
77   u64 GetTracePos() const {
78     const int hs = GetHistorySize();
79     // When hs == 0, the trace consists of 2 parts.
80     const u64 mask = (1ull << (kTracePartSizeBits + hs + 1)) - 1;
81     return epoch() & mask;
82   }
83 
84  private:
85   friend class Shadow;
86   static const int kTidShift = 64 - kTidBits - 1;
87   static const u64 kIgnoreBit = 1ull << 63;
88   static const u64 kFreedBit = 1ull << 63;
89   static const u64 kHistoryShift = kClkBits;
90   static const u64 kHistoryMask = 7;
91   u64 x_;
92 };
93 
94 // Shadow (from most significant bit):
95 //   freed           : 1
96 //   tid             : kTidBits
97 //   is_atomic       : 1
98 //   is_read         : 1
99 //   size_log        : 2
100 //   addr0           : 3
101 //   epoch           : kClkBits
102 class Shadow : public FastState {
103  public:
104   explicit Shadow(u64 x) : FastState(x) {}
105 
106   explicit Shadow(const FastState &s) : FastState(s.x_) { ClearHistorySize(); }
107 
108   void SetAddr0AndSizeLog(u64 addr0, unsigned kAccessSizeLog) {
109     DCHECK_EQ((x_ >> kClkBits) & 31, 0);
110     DCHECK_LE(addr0, 7);
111     DCHECK_LE(kAccessSizeLog, 3);
112     x_ |= ((kAccessSizeLog << 3) | addr0) << kClkBits;
113     DCHECK_EQ(kAccessSizeLog, size_log());
114     DCHECK_EQ(addr0, this->addr0());
115   }
116 
117   void SetWrite(unsigned kAccessIsWrite) {
118     DCHECK_EQ(x_ & kReadBit, 0);
119     if (!kAccessIsWrite)
120       x_ |= kReadBit;
121     DCHECK_EQ(kAccessIsWrite, IsWrite());
122   }
123 
124   void SetAtomic(bool kIsAtomic) {
125     DCHECK(!IsAtomic());
126     if (kIsAtomic)
127       x_ |= kAtomicBit;
128     DCHECK_EQ(IsAtomic(), kIsAtomic);
129   }
130 
131   bool IsAtomic() const { return x_ & kAtomicBit; }
132 
133   bool IsZero() const { return x_ == 0; }
134 
135   static inline bool TidsAreEqual(const Shadow s1, const Shadow s2) {
136     u64 shifted_xor = (s1.x_ ^ s2.x_) >> kTidShift;
137     DCHECK_EQ(shifted_xor == 0, s1.TidWithIgnore() == s2.TidWithIgnore());
138     return shifted_xor == 0;
139   }
140 
141   static ALWAYS_INLINE bool Addr0AndSizeAreEqual(const Shadow s1,
142                                                  const Shadow s2) {
143     u64 masked_xor = ((s1.x_ ^ s2.x_) >> kClkBits) & 31;
144     return masked_xor == 0;
145   }
146 
147   static ALWAYS_INLINE bool TwoRangesIntersect(Shadow s1, Shadow s2,
148                                                unsigned kS2AccessSize) {
149     bool res = false;
150     u64 diff = s1.addr0() - s2.addr0();
151     if ((s64)diff < 0) {  // s1.addr0 < s2.addr0
152       // if (s1.addr0() + size1) > s2.addr0()) return true;
153       if (s1.size() > -diff)
154         res = true;
155     } else {
156       // if (s2.addr0() + kS2AccessSize > s1.addr0()) return true;
157       if (kS2AccessSize > diff)
158         res = true;
159     }
160     DCHECK_EQ(res, TwoRangesIntersectSlow(s1, s2));
161     DCHECK_EQ(res, TwoRangesIntersectSlow(s2, s1));
162     return res;
163   }
164 
165   u64 ALWAYS_INLINE addr0() const { return (x_ >> kClkBits) & 7; }
166   u64 ALWAYS_INLINE size() const { return 1ull << size_log(); }
167   bool ALWAYS_INLINE IsWrite() const { return !IsRead(); }
168   bool ALWAYS_INLINE IsRead() const { return x_ & kReadBit; }
169 
170   // The idea behind the freed bit is as follows.
171   // When the memory is freed (or otherwise unaccessible) we write to the shadow
172   // values with tid/epoch related to the free and the freed bit set.
173   // During memory accesses processing the freed bit is considered
174   // as msb of tid. So any access races with shadow with freed bit set
175   // (it is as if write from a thread with which we never synchronized before).
176   // This allows us to detect accesses to freed memory w/o additional
177   // overheads in memory access processing and at the same time restore
178   // tid/epoch of free.
179   void MarkAsFreed() { x_ |= kFreedBit; }
180 
181   bool IsFreed() const { return x_ & kFreedBit; }
182 
183   bool GetFreedAndReset() {
184     bool res = x_ & kFreedBit;
185     x_ &= ~kFreedBit;
186     return res;
187   }
188 
189   bool ALWAYS_INLINE IsBothReadsOrAtomic(bool kIsWrite, bool kIsAtomic) const {
190     bool v = x_ & ((u64(kIsWrite ^ 1) << kReadShift) |
191                    (u64(kIsAtomic) << kAtomicShift));
192     DCHECK_EQ(v, (!IsWrite() && !kIsWrite) || (IsAtomic() && kIsAtomic));
193     return v;
194   }
195 
196   bool ALWAYS_INLINE IsRWNotWeaker(bool kIsWrite, bool kIsAtomic) const {
197     bool v = ((x_ >> kReadShift) & 3) <= u64((kIsWrite ^ 1) | (kIsAtomic << 1));
198     DCHECK_EQ(v, (IsAtomic() < kIsAtomic) ||
199                      (IsAtomic() == kIsAtomic && !IsWrite() <= !kIsWrite));
200     return v;
201   }
202 
203   bool ALWAYS_INLINE IsRWWeakerOrEqual(bool kIsWrite, bool kIsAtomic) const {
204     bool v = ((x_ >> kReadShift) & 3) >= u64((kIsWrite ^ 1) | (kIsAtomic << 1));
205     DCHECK_EQ(v, (IsAtomic() > kIsAtomic) ||
206                      (IsAtomic() == kIsAtomic && !IsWrite() >= !kIsWrite));
207     return v;
208   }
209 
210  private:
211   static const u64 kReadShift = 5 + kClkBits;
212   static const u64 kReadBit = 1ull << kReadShift;
213   static const u64 kAtomicShift = 6 + kClkBits;
214   static const u64 kAtomicBit = 1ull << kAtomicShift;
215 
216   u64 size_log() const { return (x_ >> (3 + kClkBits)) & 3; }
217 
218   static bool TwoRangesIntersectSlow(const Shadow s1, const Shadow s2) {
219     if (s1.addr0() == s2.addr0())
220       return true;
221     if (s1.addr0() < s2.addr0() && s1.addr0() + s1.size() > s2.addr0())
222       return true;
223     if (s2.addr0() < s1.addr0() && s2.addr0() + s2.size() > s1.addr0())
224       return true;
225     return false;
226   }
227 };
228 
229 const RawShadow kShadowRodata = (RawShadow)-1;  // .rodata shadow marker
230 
231 }  // namespace __tsan
232 
233 #endif
234