1 //===-- tsan_shadow.h -------------------------------------------*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #ifndef TSAN_SHADOW_H 10 #define TSAN_SHADOW_H 11 12 #include "tsan_defs.h" 13 14 namespace __tsan { 15 16 class FastState { 17 public: 18 FastState() { Reset(); } 19 20 void Reset() { 21 part_.unused0_ = 0; 22 part_.sid_ = static_cast<u8>(kFreeSid); 23 part_.epoch_ = static_cast<u16>(kEpochLast); 24 part_.unused1_ = 0; 25 part_.ignore_accesses_ = false; 26 } 27 28 void SetSid(Sid sid) { part_.sid_ = static_cast<u8>(sid); } 29 30 Sid sid() const { return static_cast<Sid>(part_.sid_); } 31 32 Epoch epoch() const { return static_cast<Epoch>(part_.epoch_); } 33 34 void SetEpoch(Epoch epoch) { part_.epoch_ = static_cast<u16>(epoch); } 35 36 void SetIgnoreBit() { part_.ignore_accesses_ = 1; } 37 void ClearIgnoreBit() { part_.ignore_accesses_ = 0; } 38 bool GetIgnoreBit() const { return part_.ignore_accesses_; } 39 40 private: 41 friend class Shadow; 42 struct Parts { 43 u32 unused0_ : 8; 44 u32 sid_ : 8; 45 u32 epoch_ : kEpochBits; 46 u32 unused1_ : 1; 47 u32 ignore_accesses_ : 1; 48 }; 49 union { 50 Parts part_; 51 u32 raw_; 52 }; 53 }; 54 55 static_assert(sizeof(FastState) == kShadowSize, "bad FastState size"); 56 57 class Shadow { 58 public: 59 static constexpr RawShadow kEmpty = static_cast<RawShadow>(0); 60 61 Shadow(FastState state, u32 addr, u32 size, AccessType typ) { 62 raw_ = state.raw_; 63 DCHECK_GT(size, 0); 64 DCHECK_LE(size, 8); 65 UNUSED Sid sid0 = part_.sid_; 66 UNUSED u16 epoch0 = part_.epoch_; 67 raw_ |= (!!(typ & kAccessAtomic) << kIsAtomicShift) | 68 (!!(typ & kAccessRead) << kIsReadShift) | 69 (((((1u << size) - 1) << (addr & 0x7)) & 0xff) << kAccessShift); 70 // Note: we don't check kAccessAtomic because it overlaps with 71 // FastState::ignore_accesses_ and it may be set spuriously. 72 DCHECK_EQ(part_.is_read_, !!(typ & kAccessRead)); 73 DCHECK_EQ(sid(), sid0); 74 DCHECK_EQ(epoch(), epoch0); 75 } 76 77 explicit Shadow(RawShadow x = Shadow::kEmpty) { raw_ = static_cast<u32>(x); } 78 79 RawShadow raw() const { return static_cast<RawShadow>(raw_); } 80 Sid sid() const { return part_.sid_; } 81 Epoch epoch() const { return static_cast<Epoch>(part_.epoch_); } 82 u8 access() const { return part_.access_; } 83 84 void GetAccess(uptr *addr, uptr *size, AccessType *typ) const { 85 DCHECK(part_.access_ != 0 || raw_ == static_cast<u32>(Shadow::kRodata)); 86 if (addr) 87 *addr = part_.access_ ? __builtin_ffs(part_.access_) - 1 : 0; 88 if (size) 89 *size = part_.access_ == kFreeAccess ? kShadowCell 90 : __builtin_popcount(part_.access_); 91 if (typ) 92 *typ = (part_.is_read_ ? kAccessRead : kAccessWrite) | 93 (part_.is_atomic_ ? kAccessAtomic : 0) | 94 (part_.access_ == kFreeAccess ? kAccessFree : 0); 95 } 96 97 ALWAYS_INLINE 98 bool IsBothReadsOrAtomic(AccessType typ) const { 99 u32 is_read = !!(typ & kAccessRead); 100 u32 is_atomic = !!(typ & kAccessAtomic); 101 bool res = 102 raw_ & ((is_atomic << kIsAtomicShift) | (is_read << kIsReadShift)); 103 DCHECK_EQ(res, 104 (part_.is_read_ && is_read) || (part_.is_atomic_ && is_atomic)); 105 return res; 106 } 107 108 ALWAYS_INLINE 109 bool IsRWWeakerOrEqual(AccessType typ) const { 110 u32 is_read = !!(typ & kAccessRead); 111 u32 is_atomic = !!(typ & kAccessAtomic); 112 UNUSED u32 res0 = 113 (part_.is_atomic_ > is_atomic) || 114 (part_.is_atomic_ == is_atomic && part_.is_read_ >= is_read); 115 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ 116 const u32 kAtomicReadMask = (1 << kIsAtomicShift) | (1 << kIsReadShift); 117 bool res = (raw_ & kAtomicReadMask) >= 118 ((is_atomic << kIsAtomicShift) | (is_read << kIsReadShift)); 119 120 DCHECK_EQ(res, res0); 121 return res; 122 #else 123 return res0; 124 #endif 125 } 126 127 // The FreedMarker must not pass "the same access check" so that we don't 128 // return from the race detection algorithm early. 129 static RawShadow FreedMarker() { 130 FastState fs; 131 fs.SetSid(kFreeSid); 132 fs.SetEpoch(kEpochLast); 133 Shadow s(fs, 0, 8, kAccessWrite); 134 return s.raw(); 135 } 136 137 static RawShadow FreedInfo(Sid sid, Epoch epoch) { 138 Shadow s; 139 s.part_.sid_ = sid; 140 s.part_.epoch_ = static_cast<u16>(epoch); 141 s.part_.access_ = kFreeAccess; 142 return s.raw(); 143 } 144 145 private: 146 struct Parts { 147 u8 access_; 148 Sid sid_; 149 u16 epoch_ : kEpochBits; 150 u16 is_read_ : 1; 151 u16 is_atomic_ : 1; 152 }; 153 union { 154 Parts part_; 155 u32 raw_; 156 }; 157 158 static constexpr u8 kFreeAccess = 0x81; 159 160 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ 161 static constexpr uptr kAccessShift = 0; 162 static constexpr uptr kIsReadShift = 30; 163 static constexpr uptr kIsAtomicShift = 31; 164 #else 165 static constexpr uptr kAccessShift = 24; 166 static constexpr uptr kIsReadShift = 1; 167 static constexpr uptr kIsAtomicShift = 0; 168 #endif 169 170 public: 171 // .rodata shadow marker, see MapRodata and ContainsSameAccessFast. 172 static constexpr RawShadow kRodata = 173 static_cast<RawShadow>(1 << kIsReadShift); 174 }; 175 176 static_assert(sizeof(Shadow) == kShadowSize, "bad Shadow size"); 177 178 } // namespace __tsan 179 180 #endif 181