xref: /freebsd/contrib/llvm-project/compiler-rt/lib/scudo/standalone/tsd.h (revision 5ca8e32633c4ffbbcd6762e5888b6a4ba0708c6c)
1 //===-- tsd.h ---------------------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #ifndef SCUDO_TSD_H_
10 #define SCUDO_TSD_H_
11 
12 #include "atomic_helpers.h"
13 #include "common.h"
14 #include "mutex.h"
15 #include "thread_annotations.h"
16 
17 #include <limits.h> // for PTHREAD_DESTRUCTOR_ITERATIONS
18 #include <pthread.h>
19 
20 // With some build setups, this might still not be defined.
21 #ifndef PTHREAD_DESTRUCTOR_ITERATIONS
22 #define PTHREAD_DESTRUCTOR_ITERATIONS 4
23 #endif
24 
25 namespace scudo {
26 
27 template <class Allocator> struct alignas(SCUDO_CACHE_LINE_SIZE) TSD {
28   using ThisT = TSD<Allocator>;
29   u8 DestructorIterations = 0;
30 
31   void init(Allocator *Instance) NO_THREAD_SAFETY_ANALYSIS {
32     DCHECK_EQ(DestructorIterations, 0U);
33     DCHECK(isAligned(reinterpret_cast<uptr>(this), alignof(ThisT)));
34     Instance->initCache(&Cache);
35     DestructorIterations = PTHREAD_DESTRUCTOR_ITERATIONS;
36   }
37 
38   inline bool tryLock() NO_THREAD_SAFETY_ANALYSIS {
39     if (Mutex.tryLock()) {
40       atomic_store_relaxed(&Precedence, 0);
41       return true;
42     }
43     if (atomic_load_relaxed(&Precedence) == 0)
44       atomic_store_relaxed(&Precedence,
45                            static_cast<uptr>(getMonotonicTimeFast() >>
46                                              FIRST_32_SECOND_64(16, 0)));
47     return false;
48   }
49   inline void lock() NO_THREAD_SAFETY_ANALYSIS {
50     atomic_store_relaxed(&Precedence, 0);
51     Mutex.lock();
52   }
53   inline void unlock() NO_THREAD_SAFETY_ANALYSIS { Mutex.unlock(); }
54   inline uptr getPrecedence() { return atomic_load_relaxed(&Precedence); }
55 
56   void commitBack(Allocator *Instance) ASSERT_CAPABILITY(Mutex) {
57     Instance->commitBack(this);
58   }
59 
60   // Ideally, we may want to assert that all the operations on
61   // Cache/QuarantineCache always have the `Mutex` acquired. However, the
62   // current architecture of accessing TSD is not easy to cooperate with the
63   // thread-safety analysis because of pointer aliasing. So now we just add the
64   // assertion on the getters of Cache/QuarantineCache.
65   //
66   // TODO(chiahungduan): Ideally, we want to do `Mutex.assertHeld` but acquiring
67   // TSD doesn't always require holding the lock. Add this assertion while the
68   // lock is always acquired.
69   typename Allocator::CacheT &getCache() ASSERT_CAPABILITY(Mutex) {
70     return Cache;
71   }
72   typename Allocator::QuarantineCacheT &getQuarantineCache()
73       ASSERT_CAPABILITY(Mutex) {
74     return QuarantineCache;
75   }
76 
77 private:
78   HybridMutex Mutex;
79   atomic_uptr Precedence = {};
80 
81   typename Allocator::CacheT Cache GUARDED_BY(Mutex);
82   typename Allocator::QuarantineCacheT QuarantineCache GUARDED_BY(Mutex);
83 };
84 
85 } // namespace scudo
86 
87 #endif // SCUDO_TSD_H_
88