xref: /freebsd/contrib/llvm-project/compiler-rt/lib/scudo/standalone/atomic_helpers.h (revision f976241773df2260e6170317080761d1c5814fe5)
1 //===-- atomic_helpers.h ----------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #ifndef SCUDO_ATOMIC_H_
10 #define SCUDO_ATOMIC_H_
11 
12 #include "internal_defs.h"
13 
14 namespace scudo {
15 
16 enum memory_order {
17   memory_order_relaxed = 0,
18   memory_order_consume = 1,
19   memory_order_acquire = 2,
20   memory_order_release = 3,
21   memory_order_acq_rel = 4,
22   memory_order_seq_cst = 5
23 };
24 COMPILER_CHECK(memory_order_relaxed == __ATOMIC_RELAXED);
25 COMPILER_CHECK(memory_order_consume == __ATOMIC_CONSUME);
26 COMPILER_CHECK(memory_order_acquire == __ATOMIC_ACQUIRE);
27 COMPILER_CHECK(memory_order_release == __ATOMIC_RELEASE);
28 COMPILER_CHECK(memory_order_acq_rel == __ATOMIC_ACQ_REL);
29 COMPILER_CHECK(memory_order_seq_cst == __ATOMIC_SEQ_CST);
30 
31 struct atomic_u8 {
32   typedef u8 Type;
33   volatile Type ValDoNotUse;
34 };
35 
36 struct atomic_u16 {
37   typedef u16 Type;
38   volatile Type ValDoNotUse;
39 };
40 
41 struct atomic_s32 {
42   typedef s32 Type;
43   volatile Type ValDoNotUse;
44 };
45 
46 struct atomic_u32 {
47   typedef u32 Type;
48   volatile Type ValDoNotUse;
49 };
50 
51 struct atomic_u64 {
52   typedef u64 Type;
53   // On 32-bit platforms u64 is not necessarily aligned on 8 bytes.
54   ALIGNED(8) volatile Type ValDoNotUse;
55 };
56 
57 struct atomic_uptr {
58   typedef uptr Type;
59   volatile Type ValDoNotUse;
60 };
61 
62 template <typename T>
63 INLINE typename T::Type atomic_load(const volatile T *A, memory_order MO) {
64   DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A)));
65   typename T::Type V;
66   __atomic_load(&A->ValDoNotUse, &V, MO);
67   return V;
68 }
69 
70 template <typename T>
71 INLINE void atomic_store(volatile T *A, typename T::Type V, memory_order MO) {
72   DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A)));
73   __atomic_store(&A->ValDoNotUse, &V, MO);
74 }
75 
76 INLINE void atomic_thread_fence(memory_order) { __sync_synchronize(); }
77 
78 template <typename T>
79 INLINE typename T::Type atomic_fetch_add(volatile T *A, typename T::Type V,
80                                          memory_order MO) {
81   DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A)));
82   return __atomic_fetch_add(&A->ValDoNotUse, V, MO);
83 }
84 
85 template <typename T>
86 INLINE typename T::Type atomic_fetch_sub(volatile T *A, typename T::Type V,
87                                          memory_order MO) {
88   DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A)));
89   return __atomic_fetch_sub(&A->ValDoNotUse, V, MO);
90 }
91 
92 template <typename T>
93 INLINE typename T::Type atomic_exchange(volatile T *A, typename T::Type V,
94                                         memory_order MO) {
95   DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A)));
96   typename T::Type R;
97   __atomic_exchange(&A->ValDoNotUse, &V, &R, MO);
98   return R;
99 }
100 
101 template <typename T>
102 INLINE bool atomic_compare_exchange_strong(volatile T *A, typename T::Type *Cmp,
103                                            typename T::Type Xchg,
104                                            memory_order MO) {
105   return __atomic_compare_exchange(&A->ValDoNotUse, Cmp, &Xchg, false, MO,
106                                    __ATOMIC_RELAXED);
107 }
108 
109 template <typename T>
110 INLINE bool atomic_compare_exchange_weak(volatile T *A, typename T::Type *Cmp,
111                                          typename T::Type Xchg,
112                                          memory_order MO) {
113   return __atomic_compare_exchange(&A->ValDoNotUse, Cmp, &Xchg, true, MO,
114                                    __ATOMIC_RELAXED);
115 }
116 
117 // Clutter-reducing helpers.
118 
119 template <typename T>
120 INLINE typename T::Type atomic_load_relaxed(const volatile T *A) {
121   return atomic_load(A, memory_order_relaxed);
122 }
123 
124 template <typename T>
125 INLINE void atomic_store_relaxed(volatile T *A, typename T::Type V) {
126   atomic_store(A, V, memory_order_relaxed);
127 }
128 
129 template <typename T>
130 INLINE typename T::Type atomic_compare_exchange(volatile T *A,
131                                                 typename T::Type Cmp,
132                                                 typename T::Type Xchg) {
133   atomic_compare_exchange_strong(A, &Cmp, Xchg, memory_order_acquire);
134   return Cmp;
135 }
136 
137 } // namespace scudo
138 
139 #endif // SCUDO_ATOMIC_H_
140