xref: /freebsd/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_thread_list.h (revision 0fca6ea1d4eea4c934cfff25ac9ee8ad6fe95583)
1 //===-- hwasan_thread_list.h ------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of HWAddressSanitizer.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 // HwasanThreadList is a registry for live threads, as well as an allocator for
14 // HwasanThread objects and their stack history ring buffers. There are
15 // constraints on memory layout of the shadow region and CompactRingBuffer that
16 // are part of the ABI contract between compiler-rt and llvm.
17 //
18 // * Start of the shadow memory region is aligned to 2**kShadowBaseAlignment.
19 // * All stack ring buffers are located within (2**kShadowBaseAlignment)
20 // sized region below and adjacent to the shadow region.
21 // * Each ring buffer has a size of (2**N)*4096 where N is in [0, 7), and is
22 // aligned to twice its size. The value of N can be different for each buffer.
23 //
24 // These constrains guarantee that, given an address A of any element of the
25 // ring buffer,
26 //     A_next = (A + sizeof(uptr)) & ~((1 << (N + 13)) - 1)
27 //   is the address of the next element of that ring buffer (with wrap-around).
28 // And, with K = kShadowBaseAlignment,
29 //     S = (A | ((1 << K) - 1)) + 1
30 //   (align up to kShadowBaseAlignment) is the start of the shadow region.
31 //
32 // These calculations are used in compiler instrumentation to update the ring
33 // buffer and obtain the base address of shadow using only two inputs: address
34 // of the current element of the ring buffer, and N (i.e. size of the ring
35 // buffer). Since the value of N is very limited, we pack both inputs into a
36 // single thread-local word as
37 //   (1 << (N + 56)) | A
38 // See the implementation of class CompactRingBuffer, which is what is stored in
39 // said thread-local word.
40 //
41 // Note the unusual way of aligning up the address of the shadow:
42 //   (A | ((1 << K) - 1)) + 1
43 // It is only correct if A is not already equal to the shadow base address, but
44 // it saves 2 instructions on AArch64.
45 
46 #include "hwasan.h"
47 #include "hwasan_allocator.h"
48 #include "hwasan_flags.h"
49 #include "hwasan_thread.h"
50 #include "sanitizer_common/sanitizer_thread_arg_retval.h"
51 
52 namespace __hwasan {
53 
RingBufferSize()54 static uptr RingBufferSize() {
55   uptr desired_bytes = flags()->stack_history_size * sizeof(uptr);
56   // FIXME: increase the limit to 8 once this bug is fixed:
57   // https://bugs.llvm.org/show_bug.cgi?id=39030
58   // Note that we *cannot* do that on Android, as the runtime will indefinitely
59   // have to support code that is compiled with ashr, which only works with
60   // shifts up to 6.
61   for (int shift = 0; shift < 7; ++shift) {
62     uptr size = 4096 * (1ULL << shift);
63     if (size >= desired_bytes)
64       return size;
65   }
66   Printf("stack history size too large: %d\n", flags()->stack_history_size);
67   CHECK(0);
68   return 0;
69 }
70 
71 struct ThreadStats {
72   uptr n_live_threads;
73   uptr total_stack_size;
74 };
75 
76 class SANITIZER_MUTEX HwasanThreadList {
77  public:
HwasanThreadList(uptr storage,uptr size)78   HwasanThreadList(uptr storage, uptr size)
79       : free_space_(storage), free_space_end_(storage + size) {
80     // [storage, storage + size) is used as a vector of
81     // thread_alloc_size_-sized, ring_buffer_size_*2-aligned elements.
82     // Each element contains
83     // * a ring buffer at offset 0,
84     // * a Thread object at offset ring_buffer_size_.
85     ring_buffer_size_ = RingBufferSize();
86     thread_alloc_size_ =
87         RoundUpTo(ring_buffer_size_ + sizeof(Thread), ring_buffer_size_ * 2);
88   }
89 
90   Thread *CreateCurrentThread(const Thread::InitState *state = nullptr)
SANITIZER_EXCLUDES(free_list_mutex_,live_list_mutex_)91       SANITIZER_EXCLUDES(free_list_mutex_, live_list_mutex_) {
92     Thread *t = nullptr;
93     {
94       SpinMutexLock l(&free_list_mutex_);
95       if (!free_list_.empty()) {
96         t = free_list_.back();
97         free_list_.pop_back();
98       }
99     }
100     if (t) {
101       uptr start = (uptr)t - ring_buffer_size_;
102       internal_memset((void *)start, 0, ring_buffer_size_ + sizeof(Thread));
103     } else {
104       t = AllocThread();
105     }
106     {
107       SpinMutexLock l(&live_list_mutex_);
108       live_list_.push_back(t);
109     }
110     t->Init((uptr)t - ring_buffer_size_, ring_buffer_size_, state);
111     AddThreadStats(t);
112     return t;
113   }
114 
DontNeedThread(Thread * t)115   void DontNeedThread(Thread *t) {
116     uptr start = (uptr)t - ring_buffer_size_;
117     ReleaseMemoryPagesToOS(start, start + thread_alloc_size_);
118   }
119 
RemoveThreadFromLiveList(Thread * t)120   void RemoveThreadFromLiveList(Thread *t)
121       SANITIZER_EXCLUDES(live_list_mutex_) {
122     SpinMutexLock l(&live_list_mutex_);
123     for (Thread *&t2 : live_list_)
124       if (t2 == t) {
125         // To remove t2, copy the last element of the list in t2's position, and
126         // pop_back(). This works even if t2 is itself the last element.
127         t2 = live_list_.back();
128         live_list_.pop_back();
129         return;
130       }
131     CHECK(0 && "thread not found in live list");
132   }
133 
ReleaseThread(Thread * t)134   void ReleaseThread(Thread *t) SANITIZER_EXCLUDES(free_list_mutex_) {
135     RemoveThreadStats(t);
136     RemoveThreadFromLiveList(t);
137     t->Destroy();
138     DontNeedThread(t);
139     SpinMutexLock l(&free_list_mutex_);
140     free_list_.push_back(t);
141   }
142 
GetThreadByBufferAddress(uptr p)143   Thread *GetThreadByBufferAddress(uptr p) {
144     return (Thread *)(RoundDownTo(p, ring_buffer_size_ * 2) +
145                       ring_buffer_size_);
146   }
147 
MemoryUsedPerThread()148   uptr MemoryUsedPerThread() {
149     uptr res = sizeof(Thread) + ring_buffer_size_;
150     if (auto sz = flags()->heap_history_size)
151       res += HeapAllocationsRingBuffer::SizeInBytes(sz);
152     return res;
153   }
154 
155   template <class CB>
VisitAllLiveThreads(CB cb)156   void VisitAllLiveThreads(CB cb) SANITIZER_EXCLUDES(live_list_mutex_) {
157     SpinMutexLock l(&live_list_mutex_);
158     for (Thread *t : live_list_) cb(t);
159   }
160 
161   template <class CB>
FindThreadLocked(CB cb)162   Thread *FindThreadLocked(CB cb) SANITIZER_CHECK_LOCKED(live_list_mutex_) {
163     CheckLocked();
164     for (Thread *t : live_list_)
165       if (cb(t))
166         return t;
167     return nullptr;
168   }
169 
AddThreadStats(Thread * t)170   void AddThreadStats(Thread *t) SANITIZER_EXCLUDES(stats_mutex_) {
171     SpinMutexLock l(&stats_mutex_);
172     stats_.n_live_threads++;
173     stats_.total_stack_size += t->stack_size();
174   }
175 
RemoveThreadStats(Thread * t)176   void RemoveThreadStats(Thread *t) SANITIZER_EXCLUDES(stats_mutex_) {
177     SpinMutexLock l(&stats_mutex_);
178     stats_.n_live_threads--;
179     stats_.total_stack_size -= t->stack_size();
180   }
181 
GetThreadStats()182   ThreadStats GetThreadStats() SANITIZER_EXCLUDES(stats_mutex_) {
183     SpinMutexLock l(&stats_mutex_);
184     return stats_;
185   }
186 
GetRingBufferSize()187   uptr GetRingBufferSize() const { return ring_buffer_size_; }
188 
Lock()189   void Lock() SANITIZER_ACQUIRE(live_list_mutex_) { live_list_mutex_.Lock(); }
CheckLocked()190   void CheckLocked() const SANITIZER_CHECK_LOCKED(live_list_mutex_) {
191     live_list_mutex_.CheckLocked();
192   }
Unlock()193   void Unlock() SANITIZER_RELEASE(live_list_mutex_) {
194     live_list_mutex_.Unlock();
195   }
196 
197  private:
AllocThread()198   Thread *AllocThread() {
199     SpinMutexLock l(&free_space_mutex_);
200     uptr align = ring_buffer_size_ * 2;
201     CHECK(IsAligned(free_space_, align));
202     Thread *t = (Thread *)(free_space_ + ring_buffer_size_);
203     free_space_ += thread_alloc_size_;
204     CHECK_LE(free_space_, free_space_end_);
205     return t;
206   }
207 
208   SpinMutex free_space_mutex_;
209   uptr free_space_;
210   uptr free_space_end_;
211   uptr ring_buffer_size_;
212   uptr thread_alloc_size_;
213 
214   SpinMutex free_list_mutex_;
215   InternalMmapVector<Thread *> free_list_
216       SANITIZER_GUARDED_BY(free_list_mutex_);
217   SpinMutex live_list_mutex_;
218   InternalMmapVector<Thread *> live_list_
219       SANITIZER_GUARDED_BY(live_list_mutex_);
220 
221   SpinMutex stats_mutex_;
222   ThreadStats stats_ SANITIZER_GUARDED_BY(stats_mutex_);
223 };
224 
225 void InitThreadList(uptr storage, uptr size);
226 HwasanThreadList &hwasanThreadList();
227 ThreadArgRetval &hwasanThreadArgRetval();
228 
229 } // namespace __hwasan
230