1 2 #include "hwasan_thread.h" 3 4 #include "hwasan.h" 5 #include "hwasan_interface_internal.h" 6 #include "hwasan_mapping.h" 7 #include "hwasan_poisoning.h" 8 #include "hwasan_thread_list.h" 9 #include "sanitizer_common/sanitizer_atomic.h" 10 #include "sanitizer_common/sanitizer_file.h" 11 #include "sanitizer_common/sanitizer_placement_new.h" 12 #include "sanitizer_common/sanitizer_tls_get_addr.h" 13 14 namespace __hwasan { 15 16 static u32 RandomSeed() { 17 u32 seed; 18 do { 19 if (UNLIKELY(!GetRandom(reinterpret_cast<void *>(&seed), sizeof(seed), 20 /*blocking=*/false))) { 21 seed = static_cast<u32>( 22 (NanoTime() >> 12) ^ 23 (reinterpret_cast<uptr>(__builtin_frame_address(0)) >> 4)); 24 } 25 } while (!seed); 26 return seed; 27 } 28 29 void Thread::InitRandomState() { 30 random_state_ = flags()->random_tags ? RandomSeed() : unique_id_; 31 random_state_inited_ = true; 32 33 // Push a random number of zeros onto the ring buffer so that the first stack 34 // tag base will be random. 35 for (tag_t i = 0, e = GenerateRandomTag(); i != e; ++i) 36 stack_allocations_->push(0); 37 } 38 39 void Thread::Init(uptr stack_buffer_start, uptr stack_buffer_size, 40 const InitState *state) { 41 CHECK_EQ(0, unique_id_); // try to catch bad stack reuse 42 CHECK_EQ(0, stack_top_); 43 CHECK_EQ(0, stack_bottom_); 44 45 static atomic_uint64_t unique_id; 46 unique_id_ = atomic_fetch_add(&unique_id, 1, memory_order_relaxed); 47 48 if (auto sz = flags()->heap_history_size) 49 heap_allocations_ = HeapAllocationsRingBuffer::New(sz); 50 51 #if !SANITIZER_FUCHSIA 52 // Do not initialize the stack ring buffer just yet on Fuchsia. Threads will 53 // be initialized before we enter the thread itself, so we will instead call 54 // this later. 55 InitStackRingBuffer(stack_buffer_start, stack_buffer_size); 56 #endif 57 InitStackAndTls(state); 58 dtls_ = DTLS_Get(); 59 } 60 61 void Thread::InitStackRingBuffer(uptr stack_buffer_start, 62 uptr stack_buffer_size) { 63 HwasanTSDThreadInit(); // Only needed with interceptors. 64 uptr *ThreadLong = GetCurrentThreadLongPtr(); 65 // The following implicitly sets (this) as the current thread. 66 stack_allocations_ = new (ThreadLong) 67 StackAllocationsRingBuffer((void *)stack_buffer_start, stack_buffer_size); 68 // Check that it worked. 69 CHECK_EQ(GetCurrentThread(), this); 70 71 // ScopedTaggingDisable needs GetCurrentThread to be set up. 72 ScopedTaggingDisabler disabler; 73 74 if (stack_bottom_) { 75 int local; 76 CHECK(AddrIsInStack((uptr)&local)); 77 CHECK(MemIsApp(stack_bottom_)); 78 CHECK(MemIsApp(stack_top_ - 1)); 79 } 80 81 if (flags()->verbose_threads) { 82 if (IsMainThread()) { 83 Printf("sizeof(Thread): %zd sizeof(HeapRB): %zd sizeof(StackRB): %zd\n", 84 sizeof(Thread), heap_allocations_->SizeInBytes(), 85 stack_allocations_->size() * sizeof(uptr)); 86 } 87 Print("Creating : "); 88 } 89 } 90 91 void Thread::ClearShadowForThreadStackAndTLS() { 92 if (stack_top_ != stack_bottom_) 93 TagMemory(stack_bottom_, stack_top_ - stack_bottom_, 0); 94 if (tls_begin_ != tls_end_) 95 TagMemory(tls_begin_, tls_end_ - tls_begin_, 0); 96 } 97 98 void Thread::Destroy() { 99 if (flags()->verbose_threads) 100 Print("Destroying: "); 101 AllocatorSwallowThreadLocalCache(allocator_cache()); 102 ClearShadowForThreadStackAndTLS(); 103 if (heap_allocations_) 104 heap_allocations_->Delete(); 105 DTLS_Destroy(); 106 // Unregister this as the current thread. 107 // Instrumented code can not run on this thread from this point onwards, but 108 // malloc/free can still be served. Glibc may call free() very late, after all 109 // TSD destructors are done. 110 CHECK_EQ(GetCurrentThread(), this); 111 *GetCurrentThreadLongPtr() = 0; 112 } 113 114 void Thread::Print(const char *Prefix) { 115 Printf("%sT%zd %p stack: [%p,%p) sz: %zd tls: [%p,%p)\n", Prefix, unique_id_, 116 (void *)this, stack_bottom(), stack_top(), 117 stack_top() - stack_bottom(), tls_begin(), tls_end()); 118 } 119 120 static u32 xorshift(u32 state) { 121 state ^= state << 13; 122 state ^= state >> 17; 123 state ^= state << 5; 124 return state; 125 } 126 127 // Generate a (pseudo-)random non-zero tag. 128 tag_t Thread::GenerateRandomTag(uptr num_bits) { 129 DCHECK_GT(num_bits, 0); 130 if (tagging_disabled_) 131 return 0; 132 tag_t tag; 133 const uptr tag_mask = (1ULL << num_bits) - 1; 134 do { 135 if (flags()->random_tags) { 136 if (!random_buffer_) { 137 EnsureRandomStateInited(); 138 random_buffer_ = random_state_ = xorshift(random_state_); 139 } 140 CHECK(random_buffer_); 141 tag = random_buffer_ & tag_mask; 142 random_buffer_ >>= num_bits; 143 } else { 144 EnsureRandomStateInited(); 145 random_state_ += 1; 146 tag = random_state_ & tag_mask; 147 } 148 } while (!tag); 149 return tag; 150 } 151 152 } // namespace __hwasan 153 154 // --- Implementation of LSan-specific functions --- {{{1 155 namespace __lsan { 156 157 static __hwasan::HwasanThreadList *GetHwasanThreadListLocked() { 158 auto &tl = __hwasan::hwasanThreadList(); 159 tl.CheckLocked(); 160 return &tl; 161 } 162 163 static __hwasan::Thread *GetThreadByOsIDLocked(tid_t os_id) { 164 return GetHwasanThreadListLocked()->FindThreadLocked( 165 [os_id](__hwasan::Thread *t) { return t->os_id() == os_id; }); 166 } 167 168 void LockThreadRegistry() { __hwasan::hwasanThreadList().Lock(); } 169 170 void UnlockThreadRegistry() { __hwasan::hwasanThreadList().Unlock(); } 171 172 void EnsureMainThreadIDIsCorrect() { 173 auto *t = __hwasan::GetCurrentThread(); 174 if (t && (t->IsMainThread())) 175 t->set_os_id(GetTid()); 176 } 177 178 bool GetThreadRangesLocked(tid_t os_id, uptr *stack_begin, uptr *stack_end, 179 uptr *tls_begin, uptr *tls_end, uptr *cache_begin, 180 uptr *cache_end, DTLS **dtls) { 181 auto *t = GetThreadByOsIDLocked(os_id); 182 if (!t) 183 return false; 184 *stack_begin = t->stack_bottom(); 185 *stack_end = t->stack_top(); 186 *tls_begin = t->tls_begin(); 187 *tls_end = t->tls_end(); 188 // Fixme: is this correct for HWASan. 189 *cache_begin = 0; 190 *cache_end = 0; 191 *dtls = t->dtls(); 192 return true; 193 } 194 195 void GetAllThreadAllocatorCachesLocked(InternalMmapVector<uptr> *caches) {} 196 197 void GetThreadExtraStackRangesLocked(tid_t os_id, 198 InternalMmapVector<Range> *ranges) {} 199 void GetThreadExtraStackRangesLocked(InternalMmapVector<Range> *ranges) {} 200 201 void GetAdditionalThreadContextPtrsLocked(InternalMmapVector<uptr> *ptrs) {} 202 void GetRunningThreadsLocked(InternalMmapVector<tid_t> *threads) {} 203 204 } // namespace __lsan 205