xref: /freebsd/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_mutex.h (revision 5956d97f4b3204318ceb6aa9c77bd0bc6ea87a41)
1 //===-- sanitizer_mutex.h ---------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of ThreadSanitizer/AddressSanitizer runtime.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #ifndef SANITIZER_MUTEX_H
14 #define SANITIZER_MUTEX_H
15 
16 #include "sanitizer_atomic.h"
17 #include "sanitizer_internal_defs.h"
18 #include "sanitizer_libc.h"
19 #include "sanitizer_thread_safety.h"
20 
21 namespace __sanitizer {
22 
23 class SANITIZER_MUTEX StaticSpinMutex {
24  public:
25   void Init() {
26     atomic_store(&state_, 0, memory_order_relaxed);
27   }
28 
29   void Lock() SANITIZER_ACQUIRE() {
30     if (LIKELY(TryLock()))
31       return;
32     LockSlow();
33   }
34 
35   bool TryLock() SANITIZER_TRY_ACQUIRE(true) {
36     return atomic_exchange(&state_, 1, memory_order_acquire) == 0;
37   }
38 
39   void Unlock() SANITIZER_RELEASE() {
40     atomic_store(&state_, 0, memory_order_release);
41   }
42 
43   void CheckLocked() const SANITIZER_CHECK_LOCKED() {
44     CHECK_EQ(atomic_load(&state_, memory_order_relaxed), 1);
45   }
46 
47  private:
48   atomic_uint8_t state_;
49 
50   void LockSlow();
51 };
52 
53 class SANITIZER_MUTEX SpinMutex : public StaticSpinMutex {
54  public:
55   SpinMutex() {
56     Init();
57   }
58 
59   SpinMutex(const SpinMutex &) = delete;
60   void operator=(const SpinMutex &) = delete;
61 };
62 
63 // Semaphore provides an OS-dependent way to park/unpark threads.
64 // The last thread returned from Wait can destroy the object
65 // (destruction-safety).
66 class Semaphore {
67  public:
68   constexpr Semaphore() {}
69   Semaphore(const Semaphore &) = delete;
70   void operator=(const Semaphore &) = delete;
71 
72   void Wait();
73   void Post(u32 count = 1);
74 
75  private:
76   atomic_uint32_t state_ = {0};
77 };
78 
79 typedef int MutexType;
80 
81 enum {
82   // Used as sentinel and to catch unassigned types
83   // (should not be used as real Mutex type).
84   MutexInvalid = 0,
85   MutexThreadRegistry,
86   // Each tool own mutexes must start at this number.
87   MutexLastCommon,
88   // Type for legacy mutexes that are not checked for deadlocks.
89   MutexUnchecked = -1,
90   // Special marks that can be used in MutexMeta::can_lock table.
91   // The leaf mutexes can be locked under any other non-leaf mutex,
92   // but no other mutex can be locked while under a leaf mutex.
93   MutexLeaf = -1,
94   // Multiple mutexes of this type can be locked at the same time.
95   MutexMulti = -3,
96 };
97 
98 // Go linker does not support THREADLOCAL variables,
99 // so we can't use per-thread state.
100 // Disable checked locks on Darwin. Although Darwin platforms support
101 // THREADLOCAL variables they are not usable early on during process init when
102 // `__sanitizer::Mutex` is used.
103 #define SANITIZER_CHECK_DEADLOCKS \
104   (SANITIZER_DEBUG && !SANITIZER_GO && SANITIZER_SUPPORTS_THREADLOCAL && !SANITIZER_MAC)
105 
106 #if SANITIZER_CHECK_DEADLOCKS
107 struct MutexMeta {
108   MutexType type;
109   const char *name;
110   // The table fixes what mutexes can be locked under what mutexes.
111   // If the entry for MutexTypeFoo contains MutexTypeBar,
112   // then Bar mutex can be locked while under Foo mutex.
113   // Can also contain the special MutexLeaf/MutexMulti marks.
114   MutexType can_lock[10];
115 };
116 #endif
117 
118 class CheckedMutex {
119  public:
120   explicit constexpr CheckedMutex(MutexType type)
121 #if SANITIZER_CHECK_DEADLOCKS
122       : type_(type)
123 #endif
124   {
125   }
126 
127   ALWAYS_INLINE void Lock() {
128 #if SANITIZER_CHECK_DEADLOCKS
129     LockImpl(GET_CALLER_PC());
130 #endif
131   }
132 
133   ALWAYS_INLINE void Unlock() {
134 #if SANITIZER_CHECK_DEADLOCKS
135     UnlockImpl();
136 #endif
137   }
138 
139   // Checks that the current thread does not hold any mutexes
140   // (e.g. when returning from a runtime function to user code).
141   static void CheckNoLocks() {
142 #if SANITIZER_CHECK_DEADLOCKS
143     CheckNoLocksImpl();
144 #endif
145   }
146 
147  private:
148 #if SANITIZER_CHECK_DEADLOCKS
149   const MutexType type_;
150 
151   void LockImpl(uptr pc);
152   void UnlockImpl();
153   static void CheckNoLocksImpl();
154 #endif
155 };
156 
157 // Reader-writer mutex.
158 // Derive from CheckedMutex for the purposes of EBO.
159 // We could make it a field marked with [[no_unique_address]],
160 // but this attribute is not supported by some older compilers.
161 class SANITIZER_MUTEX Mutex : CheckedMutex {
162  public:
163   explicit constexpr Mutex(MutexType type = MutexUnchecked)
164       : CheckedMutex(type) {}
165 
166   void Lock() SANITIZER_ACQUIRE() {
167     CheckedMutex::Lock();
168     u64 reset_mask = ~0ull;
169     u64 state = atomic_load_relaxed(&state_);
170     for (uptr spin_iters = 0;; spin_iters++) {
171       u64 new_state;
172       bool locked = (state & (kWriterLock | kReaderLockMask)) != 0;
173       if (LIKELY(!locked)) {
174         // The mutex is not read-/write-locked, try to lock.
175         new_state = (state | kWriterLock) & reset_mask;
176       } else if (spin_iters > kMaxSpinIters) {
177         // We've spun enough, increment waiting writers count and block.
178         // The counter will be decremented by whoever wakes us.
179         new_state = (state + kWaitingWriterInc) & reset_mask;
180       } else if ((state & kWriterSpinWait) == 0) {
181         // Active spinning, but denote our presence so that unlocking
182         // thread does not wake up other threads.
183         new_state = state | kWriterSpinWait;
184       } else {
185         // Active spinning.
186         state = atomic_load(&state_, memory_order_relaxed);
187         continue;
188       }
189       if (UNLIKELY(!atomic_compare_exchange_weak(&state_, &state, new_state,
190                                                  memory_order_acquire)))
191         continue;
192       if (LIKELY(!locked))
193         return;  // We've locked the mutex.
194       if (spin_iters > kMaxSpinIters) {
195         // We've incremented waiting writers, so now block.
196         writers_.Wait();
197         spin_iters = 0;
198       } else {
199         // We've set kWriterSpinWait, but we are still in active spinning.
200       }
201       // We either blocked and were unblocked,
202       // or we just spun but set kWriterSpinWait.
203       // Either way we need to reset kWriterSpinWait
204       // next time we take the lock or block again.
205       reset_mask = ~kWriterSpinWait;
206       state = atomic_load(&state_, memory_order_relaxed);
207       DCHECK_NE(state & kWriterSpinWait, 0);
208     }
209   }
210 
211   void Unlock() SANITIZER_RELEASE() {
212     CheckedMutex::Unlock();
213     bool wake_writer;
214     u64 wake_readers;
215     u64 new_state;
216     u64 state = atomic_load_relaxed(&state_);
217     do {
218       DCHECK_NE(state & kWriterLock, 0);
219       DCHECK_EQ(state & kReaderLockMask, 0);
220       new_state = state & ~kWriterLock;
221       wake_writer = (state & (kWriterSpinWait | kReaderSpinWait)) == 0 &&
222                     (state & kWaitingWriterMask) != 0;
223       if (wake_writer)
224         new_state = (new_state - kWaitingWriterInc) | kWriterSpinWait;
225       wake_readers =
226           wake_writer || (state & kWriterSpinWait) != 0
227               ? 0
228               : ((state & kWaitingReaderMask) >> kWaitingReaderShift);
229       if (wake_readers)
230         new_state = (new_state & ~kWaitingReaderMask) | kReaderSpinWait;
231     } while (UNLIKELY(!atomic_compare_exchange_weak(&state_, &state, new_state,
232                                                     memory_order_release)));
233     if (UNLIKELY(wake_writer))
234       writers_.Post();
235     else if (UNLIKELY(wake_readers))
236       readers_.Post(wake_readers);
237   }
238 
239   void ReadLock() SANITIZER_ACQUIRE_SHARED() {
240     CheckedMutex::Lock();
241     u64 reset_mask = ~0ull;
242     u64 state = atomic_load_relaxed(&state_);
243     for (uptr spin_iters = 0;; spin_iters++) {
244       bool locked = (state & kWriterLock) != 0;
245       u64 new_state;
246       if (LIKELY(!locked)) {
247         new_state = (state + kReaderLockInc) & reset_mask;
248       } else if (spin_iters > kMaxSpinIters) {
249         new_state = (state + kWaitingReaderInc) & reset_mask;
250       } else if ((state & kReaderSpinWait) == 0) {
251         // Active spinning, but denote our presence so that unlocking
252         // thread does not wake up other threads.
253         new_state = state | kReaderSpinWait;
254       } else {
255         // Active spinning.
256         state = atomic_load(&state_, memory_order_relaxed);
257         continue;
258       }
259       if (UNLIKELY(!atomic_compare_exchange_weak(&state_, &state, new_state,
260                                                  memory_order_acquire)))
261         continue;
262       if (LIKELY(!locked))
263         return;  // We've locked the mutex.
264       if (spin_iters > kMaxSpinIters) {
265         // We've incremented waiting readers, so now block.
266         readers_.Wait();
267         spin_iters = 0;
268       } else {
269         // We've set kReaderSpinWait, but we are still in active spinning.
270       }
271       reset_mask = ~kReaderSpinWait;
272       state = atomic_load(&state_, memory_order_relaxed);
273     }
274   }
275 
276   void ReadUnlock() SANITIZER_RELEASE_SHARED() {
277     CheckedMutex::Unlock();
278     bool wake;
279     u64 new_state;
280     u64 state = atomic_load_relaxed(&state_);
281     do {
282       DCHECK_NE(state & kReaderLockMask, 0);
283       DCHECK_EQ(state & kWriterLock, 0);
284       new_state = state - kReaderLockInc;
285       wake = (new_state &
286               (kReaderLockMask | kWriterSpinWait | kReaderSpinWait)) == 0 &&
287              (new_state & kWaitingWriterMask) != 0;
288       if (wake)
289         new_state = (new_state - kWaitingWriterInc) | kWriterSpinWait;
290     } while (UNLIKELY(!atomic_compare_exchange_weak(&state_, &state, new_state,
291                                                     memory_order_release)));
292     if (UNLIKELY(wake))
293       writers_.Post();
294   }
295 
296   // This function does not guarantee an explicit check that the calling thread
297   // is the thread which owns the mutex. This behavior, while more strictly
298   // correct, causes problems in cases like StopTheWorld, where a parent thread
299   // owns the mutex but a child checks that it is locked. Rather than
300   // maintaining complex state to work around those situations, the check only
301   // checks that the mutex is owned.
302   void CheckWriteLocked() const SANITIZER_CHECK_LOCKED() {
303     CHECK(atomic_load(&state_, memory_order_relaxed) & kWriterLock);
304   }
305 
306   void CheckLocked() const SANITIZER_CHECK_LOCKED() { CheckWriteLocked(); }
307 
308   void CheckReadLocked() const SANITIZER_CHECK_LOCKED() {
309     CHECK(atomic_load(&state_, memory_order_relaxed) & kReaderLockMask);
310   }
311 
312  private:
313   atomic_uint64_t state_ = {0};
314   Semaphore writers_;
315   Semaphore readers_;
316 
317   // The state has 3 counters:
318   //  - number of readers holding the lock,
319   //    if non zero, the mutex is read-locked
320   //  - number of waiting readers,
321   //    if not zero, the mutex is write-locked
322   //  - number of waiting writers,
323   //    if non zero, the mutex is read- or write-locked
324   // And 2 flags:
325   //  - writer lock
326   //    if set, the mutex is write-locked
327   //  - a writer is awake and spin-waiting
328   //    the flag is used to prevent thundering herd problem
329   //    (new writers are not woken if this flag is set)
330   //  - a reader is awake and spin-waiting
331   //
332   // Both writers and readers use active spinning before blocking.
333   // But readers are more aggressive and always take the mutex
334   // if there are any other readers.
335   // After wake up both writers and readers compete to lock the
336   // mutex again. This is needed to allow repeated locks even in presence
337   // of other blocked threads.
338   static constexpr u64 kCounterWidth = 20;
339   static constexpr u64 kReaderLockShift = 0;
340   static constexpr u64 kReaderLockInc = 1ull << kReaderLockShift;
341   static constexpr u64 kReaderLockMask = ((1ull << kCounterWidth) - 1)
342                                          << kReaderLockShift;
343   static constexpr u64 kWaitingReaderShift = kCounterWidth;
344   static constexpr u64 kWaitingReaderInc = 1ull << kWaitingReaderShift;
345   static constexpr u64 kWaitingReaderMask = ((1ull << kCounterWidth) - 1)
346                                             << kWaitingReaderShift;
347   static constexpr u64 kWaitingWriterShift = 2 * kCounterWidth;
348   static constexpr u64 kWaitingWriterInc = 1ull << kWaitingWriterShift;
349   static constexpr u64 kWaitingWriterMask = ((1ull << kCounterWidth) - 1)
350                                             << kWaitingWriterShift;
351   static constexpr u64 kWriterLock = 1ull << (3 * kCounterWidth);
352   static constexpr u64 kWriterSpinWait = 1ull << (3 * kCounterWidth + 1);
353   static constexpr u64 kReaderSpinWait = 1ull << (3 * kCounterWidth + 2);
354 
355   static constexpr uptr kMaxSpinIters = 1500;
356 
357   Mutex(LinkerInitialized) = delete;
358   Mutex(const Mutex &) = delete;
359   void operator=(const Mutex &) = delete;
360 };
361 
362 void FutexWait(atomic_uint32_t *p, u32 cmp);
363 void FutexWake(atomic_uint32_t *p, u32 count);
364 
365 template <typename MutexType>
366 class SANITIZER_SCOPED_LOCK GenericScopedLock {
367  public:
368   explicit GenericScopedLock(MutexType *mu) SANITIZER_ACQUIRE(mu) : mu_(mu) {
369     mu_->Lock();
370   }
371 
372   ~GenericScopedLock() SANITIZER_RELEASE() { mu_->Unlock(); }
373 
374  private:
375   MutexType *mu_;
376 
377   GenericScopedLock(const GenericScopedLock &) = delete;
378   void operator=(const GenericScopedLock &) = delete;
379 };
380 
381 template <typename MutexType>
382 class SANITIZER_SCOPED_LOCK GenericScopedReadLock {
383  public:
384   explicit GenericScopedReadLock(MutexType *mu) SANITIZER_ACQUIRE(mu)
385       : mu_(mu) {
386     mu_->ReadLock();
387   }
388 
389   ~GenericScopedReadLock() SANITIZER_RELEASE() { mu_->ReadUnlock(); }
390 
391  private:
392   MutexType *mu_;
393 
394   GenericScopedReadLock(const GenericScopedReadLock &) = delete;
395   void operator=(const GenericScopedReadLock &) = delete;
396 };
397 
398 template <typename MutexType>
399 class SANITIZER_SCOPED_LOCK GenericScopedRWLock {
400  public:
401   ALWAYS_INLINE explicit GenericScopedRWLock(MutexType *mu, bool write)
402       SANITIZER_ACQUIRE(mu)
403       : mu_(mu), write_(write) {
404     if (write_)
405       mu_->Lock();
406     else
407       mu_->ReadLock();
408   }
409 
410   ALWAYS_INLINE ~GenericScopedRWLock() SANITIZER_RELEASE() {
411     if (write_)
412       mu_->Unlock();
413     else
414       mu_->ReadUnlock();
415   }
416 
417  private:
418   MutexType *mu_;
419   bool write_;
420 
421   GenericScopedRWLock(const GenericScopedRWLock &) = delete;
422   void operator=(const GenericScopedRWLock &) = delete;
423 };
424 
425 typedef GenericScopedLock<StaticSpinMutex> SpinMutexLock;
426 typedef GenericScopedLock<Mutex> Lock;
427 typedef GenericScopedReadLock<Mutex> ReadLock;
428 typedef GenericScopedRWLock<Mutex> RWLock;
429 
430 }  // namespace __sanitizer
431 
432 #endif  // SANITIZER_MUTEX_H
433