xref: /freebsd/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_mutex.h (revision ec0ea6efa1ad229d75c394c1a9b9cac33af2b1d3)
1 //===-- sanitizer_mutex.h ---------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of ThreadSanitizer/AddressSanitizer runtime.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #ifndef SANITIZER_MUTEX_H
14 #define SANITIZER_MUTEX_H
15 
16 #include "sanitizer_atomic.h"
17 #include "sanitizer_internal_defs.h"
18 #include "sanitizer_libc.h"
19 #include "sanitizer_thread_safety.h"
20 
21 namespace __sanitizer {
22 
23 class MUTEX StaticSpinMutex {
24  public:
25   void Init() {
26     atomic_store(&state_, 0, memory_order_relaxed);
27   }
28 
29   void Lock() ACQUIRE() {
30     if (LIKELY(TryLock()))
31       return;
32     LockSlow();
33   }
34 
35   bool TryLock() TRY_ACQUIRE(true) {
36     return atomic_exchange(&state_, 1, memory_order_acquire) == 0;
37   }
38 
39   void Unlock() RELEASE() { atomic_store(&state_, 0, memory_order_release); }
40 
41   void CheckLocked() const CHECK_LOCKED() {
42     CHECK_EQ(atomic_load(&state_, memory_order_relaxed), 1);
43   }
44 
45  private:
46   atomic_uint8_t state_;
47 
48   void LockSlow();
49 };
50 
51 class MUTEX SpinMutex : public StaticSpinMutex {
52  public:
53   SpinMutex() {
54     Init();
55   }
56 
57   SpinMutex(const SpinMutex &) = delete;
58   void operator=(const SpinMutex &) = delete;
59 };
60 
61 // Semaphore provides an OS-dependent way to park/unpark threads.
62 // The last thread returned from Wait can destroy the object
63 // (destruction-safety).
64 class Semaphore {
65  public:
66   constexpr Semaphore() {}
67   Semaphore(const Semaphore &) = delete;
68   void operator=(const Semaphore &) = delete;
69 
70   void Wait();
71   void Post(u32 count = 1);
72 
73  private:
74   atomic_uint32_t state_ = {0};
75 };
76 
77 typedef int MutexType;
78 
79 enum {
80   // Used as sentinel and to catch unassigned types
81   // (should not be used as real Mutex type).
82   MutexInvalid = 0,
83   MutexThreadRegistry,
84   // Each tool own mutexes must start at this number.
85   MutexLastCommon,
86   // Type for legacy mutexes that are not checked for deadlocks.
87   MutexUnchecked = -1,
88   // Special marks that can be used in MutexMeta::can_lock table.
89   // The leaf mutexes can be locked under any other non-leaf mutex,
90   // but no other mutex can be locked while under a leaf mutex.
91   MutexLeaf = -1,
92   // Multiple mutexes of this type can be locked at the same time.
93   MutexMulti = -3,
94 };
95 
96 // Go linker does not support THREADLOCAL variables,
97 // so we can't use per-thread state.
98 #define SANITIZER_CHECK_DEADLOCKS (SANITIZER_DEBUG && !SANITIZER_GO)
99 
100 #if SANITIZER_CHECK_DEADLOCKS
101 struct MutexMeta {
102   MutexType type;
103   const char *name;
104   // The table fixes what mutexes can be locked under what mutexes.
105   // If the entry for MutexTypeFoo contains MutexTypeBar,
106   // then Bar mutex can be locked while under Foo mutex.
107   // Can also contain the special MutexLeaf/MutexMulti marks.
108   MutexType can_lock[10];
109 };
110 #endif
111 
112 class CheckedMutex {
113  public:
114   constexpr CheckedMutex(MutexType type)
115 #if SANITIZER_CHECK_DEADLOCKS
116       : type_(type)
117 #endif
118   {
119   }
120 
121   ALWAYS_INLINE void Lock() {
122 #if SANITIZER_CHECK_DEADLOCKS
123     LockImpl(GET_CALLER_PC());
124 #endif
125   }
126 
127   ALWAYS_INLINE void Unlock() {
128 #if SANITIZER_CHECK_DEADLOCKS
129     UnlockImpl();
130 #endif
131   }
132 
133   // Checks that the current thread does not hold any mutexes
134   // (e.g. when returning from a runtime function to user code).
135   static void CheckNoLocks() {
136 #if SANITIZER_CHECK_DEADLOCKS
137     CheckNoLocksImpl();
138 #endif
139   }
140 
141  private:
142 #if SANITIZER_CHECK_DEADLOCKS
143   const MutexType type_;
144 
145   void LockImpl(uptr pc);
146   void UnlockImpl();
147   static void CheckNoLocksImpl();
148 #endif
149 };
150 
151 // Reader-writer mutex.
152 // Derive from CheckedMutex for the purposes of EBO.
153 // We could make it a field marked with [[no_unique_address]],
154 // but this attribute is not supported by some older compilers.
155 class MUTEX Mutex : CheckedMutex {
156  public:
157   constexpr Mutex(MutexType type = MutexUnchecked) : CheckedMutex(type) {}
158 
159   void Lock() ACQUIRE() {
160     CheckedMutex::Lock();
161     u64 reset_mask = ~0ull;
162     u64 state = atomic_load_relaxed(&state_);
163     const uptr kMaxSpinIters = 1500;
164     for (uptr spin_iters = 0;; spin_iters++) {
165       u64 new_state;
166       bool locked = (state & (kWriterLock | kReaderLockMask)) != 0;
167       if (LIKELY(!locked)) {
168         // The mutex is not read-/write-locked, try to lock.
169         new_state = (state | kWriterLock) & reset_mask;
170       } else if (spin_iters > kMaxSpinIters) {
171         // We've spun enough, increment waiting writers count and block.
172         // The counter will be decremented by whoever wakes us.
173         new_state = (state + kWaitingWriterInc) & reset_mask;
174       } else if ((state & kWriterSpinWait) == 0) {
175         // Active spinning, but denote our presence so that unlocking
176         // thread does not wake up other threads.
177         new_state = state | kWriterSpinWait;
178       } else {
179         // Active spinning.
180         state = atomic_load(&state_, memory_order_relaxed);
181         continue;
182       }
183       if (UNLIKELY(!atomic_compare_exchange_weak(&state_, &state, new_state,
184                                                  memory_order_acquire)))
185         continue;
186       if (LIKELY(!locked))
187         return;  // We've locked the mutex.
188       if (spin_iters > kMaxSpinIters) {
189         // We've incremented waiting writers, so now block.
190         writers_.Wait();
191         spin_iters = 0;
192         state = atomic_load(&state_, memory_order_relaxed);
193         DCHECK_NE(state & kWriterSpinWait, 0);
194       } else {
195         // We've set kWriterSpinWait, but we are still in active spinning.
196       }
197       // We either blocked and were unblocked,
198       // or we just spun but set kWriterSpinWait.
199       // Either way we need to reset kWriterSpinWait
200       // next time we take the lock or block again.
201       reset_mask = ~kWriterSpinWait;
202     }
203   }
204 
205   void Unlock() RELEASE() {
206     CheckedMutex::Unlock();
207     bool wake_writer;
208     u64 wake_readers;
209     u64 new_state;
210     u64 state = atomic_load_relaxed(&state_);
211     do {
212       DCHECK_NE(state & kWriterLock, 0);
213       DCHECK_EQ(state & kReaderLockMask, 0);
214       new_state = state & ~kWriterLock;
215       wake_writer =
216           (state & kWriterSpinWait) == 0 && (state & kWaitingWriterMask) != 0;
217       if (wake_writer)
218         new_state = (new_state - kWaitingWriterInc) | kWriterSpinWait;
219       wake_readers =
220           (state & (kWriterSpinWait | kWaitingWriterMask)) != 0
221               ? 0
222               : ((state & kWaitingReaderMask) >> kWaitingReaderShift);
223       if (wake_readers)
224         new_state = (new_state & ~kWaitingReaderMask) +
225                     (wake_readers << kReaderLockShift);
226     } while (UNLIKELY(!atomic_compare_exchange_weak(&state_, &state, new_state,
227                                                     memory_order_release)));
228     if (UNLIKELY(wake_writer))
229       writers_.Post();
230     else if (UNLIKELY(wake_readers))
231       readers_.Post(wake_readers);
232   }
233 
234   void ReadLock() ACQUIRE_SHARED() {
235     CheckedMutex::Lock();
236     bool locked;
237     u64 new_state;
238     u64 state = atomic_load_relaxed(&state_);
239     do {
240       locked =
241           (state & kReaderLockMask) == 0 &&
242           (state & (kWriterLock | kWriterSpinWait | kWaitingWriterMask)) != 0;
243       if (LIKELY(!locked))
244         new_state = state + kReaderLockInc;
245       else
246         new_state = state + kWaitingReaderInc;
247     } while (UNLIKELY(!atomic_compare_exchange_weak(&state_, &state, new_state,
248                                                     memory_order_acquire)));
249     if (UNLIKELY(locked))
250       readers_.Wait();
251     DCHECK_EQ(atomic_load_relaxed(&state_) & kWriterLock, 0);
252     DCHECK_NE(atomic_load_relaxed(&state_) & kReaderLockMask, 0);
253   }
254 
255   void ReadUnlock() RELEASE_SHARED() {
256     CheckedMutex::Unlock();
257     bool wake;
258     u64 new_state;
259     u64 state = atomic_load_relaxed(&state_);
260     do {
261       DCHECK_NE(state & kReaderLockMask, 0);
262       DCHECK_EQ(state & (kWaitingReaderMask | kWriterLock), 0);
263       new_state = state - kReaderLockInc;
264       wake = (new_state & (kReaderLockMask | kWriterSpinWait)) == 0 &&
265              (new_state & kWaitingWriterMask) != 0;
266       if (wake)
267         new_state = (new_state - kWaitingWriterInc) | kWriterSpinWait;
268     } while (UNLIKELY(!atomic_compare_exchange_weak(&state_, &state, new_state,
269                                                     memory_order_release)));
270     if (UNLIKELY(wake))
271       writers_.Post();
272   }
273 
274   // This function does not guarantee an explicit check that the calling thread
275   // is the thread which owns the mutex. This behavior, while more strictly
276   // correct, causes problems in cases like StopTheWorld, where a parent thread
277   // owns the mutex but a child checks that it is locked. Rather than
278   // maintaining complex state to work around those situations, the check only
279   // checks that the mutex is owned.
280   void CheckWriteLocked() const CHECK_LOCKED() {
281     CHECK(atomic_load(&state_, memory_order_relaxed) & kWriterLock);
282   }
283 
284   void CheckLocked() const CHECK_LOCKED() { CheckWriteLocked(); }
285 
286   void CheckReadLocked() const CHECK_LOCKED() {
287     CHECK(atomic_load(&state_, memory_order_relaxed) & kReaderLockMask);
288   }
289 
290  private:
291   atomic_uint64_t state_ = {0};
292   Semaphore writers_;
293   Semaphore readers_;
294 
295   // The state has 3 counters:
296   //  - number of readers holding the lock,
297   //    if non zero, the mutex is read-locked
298   //  - number of waiting readers,
299   //    if not zero, the mutex is write-locked
300   //  - number of waiting writers,
301   //    if non zero, the mutex is read- or write-locked
302   // And 2 flags:
303   //  - writer lock
304   //    if set, the mutex is write-locked
305   //  - a writer is awake and spin-waiting
306   //    the flag is used to prevent thundering herd problem
307   //    (new writers are not woken if this flag is set)
308   //
309   // Writer support active spinning, readers does not.
310   // But readers are more aggressive and always take the mutex
311   // if there are any other readers.
312   // Writers hand off the mutex to readers: after wake up readers
313   // already assume ownership of the mutex (don't need to do any
314   // state updates). But the mutex is not handed off to writers,
315   // after wake up writers compete to lock the mutex again.
316   // This is needed to allow repeated write locks even in presence
317   // of other blocked writers.
318   static constexpr u64 kCounterWidth = 20;
319   static constexpr u64 kReaderLockShift = 0;
320   static constexpr u64 kReaderLockInc = 1ull << kReaderLockShift;
321   static constexpr u64 kReaderLockMask = ((1ull << kCounterWidth) - 1)
322                                          << kReaderLockShift;
323   static constexpr u64 kWaitingReaderShift = kCounterWidth;
324   static constexpr u64 kWaitingReaderInc = 1ull << kWaitingReaderShift;
325   static constexpr u64 kWaitingReaderMask = ((1ull << kCounterWidth) - 1)
326                                             << kWaitingReaderShift;
327   static constexpr u64 kWaitingWriterShift = 2 * kCounterWidth;
328   static constexpr u64 kWaitingWriterInc = 1ull << kWaitingWriterShift;
329   static constexpr u64 kWaitingWriterMask = ((1ull << kCounterWidth) - 1)
330                                             << kWaitingWriterShift;
331   static constexpr u64 kWriterLock = 1ull << (3 * kCounterWidth);
332   static constexpr u64 kWriterSpinWait = 1ull << (3 * kCounterWidth + 1);
333 
334   Mutex(const Mutex &) = delete;
335   void operator=(const Mutex &) = delete;
336 };
337 
338 void FutexWait(atomic_uint32_t *p, u32 cmp);
339 void FutexWake(atomic_uint32_t *p, u32 count);
340 
341 class MUTEX BlockingMutex {
342  public:
343   explicit constexpr BlockingMutex(LinkerInitialized)
344       : opaque_storage_ {0, }, owner_ {0} {}
345   BlockingMutex();
346   void Lock() ACQUIRE();
347   void Unlock() RELEASE();
348 
349   // This function does not guarantee an explicit check that the calling thread
350   // is the thread which owns the mutex. This behavior, while more strictly
351   // correct, causes problems in cases like StopTheWorld, where a parent thread
352   // owns the mutex but a child checks that it is locked. Rather than
353   // maintaining complex state to work around those situations, the check only
354   // checks that the mutex is owned, and assumes callers to be generally
355   // well-behaved.
356   void CheckLocked() const CHECK_LOCKED();
357 
358  private:
359   // Solaris mutex_t has a member that requires 64-bit alignment.
360   ALIGNED(8) uptr opaque_storage_[10];
361   uptr owner_;  // for debugging
362 };
363 
364 // Reader-writer spin mutex.
365 class MUTEX RWMutex {
366  public:
367   RWMutex() {
368     atomic_store(&state_, kUnlocked, memory_order_relaxed);
369   }
370 
371   ~RWMutex() {
372     CHECK_EQ(atomic_load(&state_, memory_order_relaxed), kUnlocked);
373   }
374 
375   void Lock() ACQUIRE() {
376     u32 cmp = kUnlocked;
377     if (atomic_compare_exchange_strong(&state_, &cmp, kWriteLock,
378                                        memory_order_acquire))
379       return;
380     LockSlow();
381   }
382 
383   void Unlock() RELEASE() {
384     u32 prev = atomic_fetch_sub(&state_, kWriteLock, memory_order_release);
385     DCHECK_NE(prev & kWriteLock, 0);
386     (void)prev;
387   }
388 
389   void ReadLock() ACQUIRE_SHARED() {
390     u32 prev = atomic_fetch_add(&state_, kReadLock, memory_order_acquire);
391     if ((prev & kWriteLock) == 0)
392       return;
393     ReadLockSlow();
394   }
395 
396   void ReadUnlock() RELEASE_SHARED() {
397     u32 prev = atomic_fetch_sub(&state_, kReadLock, memory_order_release);
398     DCHECK_EQ(prev & kWriteLock, 0);
399     DCHECK_GT(prev & ~kWriteLock, 0);
400     (void)prev;
401   }
402 
403   void CheckLocked() const CHECK_LOCKED() {
404     CHECK_NE(atomic_load(&state_, memory_order_relaxed), kUnlocked);
405   }
406 
407  private:
408   atomic_uint32_t state_;
409 
410   enum {
411     kUnlocked = 0,
412     kWriteLock = 1,
413     kReadLock = 2
414   };
415 
416   void NOINLINE LockSlow() {
417     for (int i = 0;; i++) {
418       if (i < 10)
419         proc_yield(10);
420       else
421         internal_sched_yield();
422       u32 cmp = atomic_load(&state_, memory_order_relaxed);
423       if (cmp == kUnlocked &&
424           atomic_compare_exchange_weak(&state_, &cmp, kWriteLock,
425                                        memory_order_acquire))
426           return;
427     }
428   }
429 
430   void NOINLINE ReadLockSlow() {
431     for (int i = 0;; i++) {
432       if (i < 10)
433         proc_yield(10);
434       else
435         internal_sched_yield();
436       u32 prev = atomic_load(&state_, memory_order_acquire);
437       if ((prev & kWriteLock) == 0)
438         return;
439     }
440   }
441 
442   RWMutex(const RWMutex &) = delete;
443   void operator=(const RWMutex &) = delete;
444 };
445 
446 template <typename MutexType>
447 class SCOPED_LOCK GenericScopedLock {
448  public:
449   explicit GenericScopedLock(MutexType *mu) ACQUIRE(mu) : mu_(mu) {
450     mu_->Lock();
451   }
452 
453   ~GenericScopedLock() RELEASE() { mu_->Unlock(); }
454 
455  private:
456   MutexType *mu_;
457 
458   GenericScopedLock(const GenericScopedLock &) = delete;
459   void operator=(const GenericScopedLock &) = delete;
460 };
461 
462 template <typename MutexType>
463 class SCOPED_LOCK GenericScopedReadLock {
464  public:
465   explicit GenericScopedReadLock(MutexType *mu) ACQUIRE(mu) : mu_(mu) {
466     mu_->ReadLock();
467   }
468 
469   ~GenericScopedReadLock() RELEASE() { mu_->ReadUnlock(); }
470 
471  private:
472   MutexType *mu_;
473 
474   GenericScopedReadLock(const GenericScopedReadLock &) = delete;
475   void operator=(const GenericScopedReadLock &) = delete;
476 };
477 
478 typedef GenericScopedLock<StaticSpinMutex> SpinMutexLock;
479 typedef GenericScopedLock<BlockingMutex> BlockingMutexLock;
480 typedef GenericScopedLock<RWMutex> RWMutexLock;
481 typedef GenericScopedReadLock<RWMutex> RWMutexReadLock;
482 typedef GenericScopedLock<Mutex> Lock;
483 typedef GenericScopedReadLock<Mutex> ReadLock;
484 
485 }  // namespace __sanitizer
486 
487 #endif  // SANITIZER_MUTEX_H
488