xref: /freebsd/contrib/jemalloc/include/jemalloc/internal/atomic.h (revision c43cad87172039ccf38172129c79755ea79e6102)
1 #ifndef JEMALLOC_INTERNAL_ATOMIC_H
2 #define JEMALLOC_INTERNAL_ATOMIC_H
3 
4 #define ATOMIC_INLINE JEMALLOC_ALWAYS_INLINE
5 
6 #define JEMALLOC_U8_ATOMICS
7 #if defined(JEMALLOC_GCC_ATOMIC_ATOMICS)
8 #  include "jemalloc/internal/atomic_gcc_atomic.h"
9 #  if !defined(JEMALLOC_GCC_U8_ATOMIC_ATOMICS)
10 #    undef JEMALLOC_U8_ATOMICS
11 #  endif
12 #elif defined(JEMALLOC_GCC_SYNC_ATOMICS)
13 #  include "jemalloc/internal/atomic_gcc_sync.h"
14 #  if !defined(JEMALLOC_GCC_U8_SYNC_ATOMICS)
15 #    undef JEMALLOC_U8_ATOMICS
16 #  endif
17 #elif defined(_MSC_VER)
18 #  include "jemalloc/internal/atomic_msvc.h"
19 #elif defined(JEMALLOC_C11_ATOMICS)
20 #  include "jemalloc/internal/atomic_c11.h"
21 #else
22 #  error "Don't have atomics implemented on this platform."
23 #endif
24 
25 /*
26  * This header gives more or less a backport of C11 atomics. The user can write
27  * JEMALLOC_GENERATE_ATOMICS(type, short_type, lg_sizeof_type); to generate
28  * counterparts of the C11 atomic functions for type, as so:
29  *   JEMALLOC_GENERATE_ATOMICS(int *, pi, 3);
30  * and then write things like:
31  *   int *some_ptr;
32  *   atomic_pi_t atomic_ptr_to_int;
33  *   atomic_store_pi(&atomic_ptr_to_int, some_ptr, ATOMIC_RELAXED);
34  *   int *prev_value = atomic_exchange_pi(&ptr_to_int, NULL, ATOMIC_ACQ_REL);
35  *   assert(some_ptr == prev_value);
36  * and expect things to work in the obvious way.
37  *
38  * Also included (with naming differences to avoid conflicts with the standard
39  * library):
40  *   atomic_fence(atomic_memory_order_t) (mimics C11's atomic_thread_fence).
41  *   ATOMIC_INIT (mimics C11's ATOMIC_VAR_INIT).
42  */
43 
44 /*
45  * Pure convenience, so that we don't have to type "atomic_memory_order_"
46  * quite so often.
47  */
48 #define ATOMIC_RELAXED atomic_memory_order_relaxed
49 #define ATOMIC_ACQUIRE atomic_memory_order_acquire
50 #define ATOMIC_RELEASE atomic_memory_order_release
51 #define ATOMIC_ACQ_REL atomic_memory_order_acq_rel
52 #define ATOMIC_SEQ_CST atomic_memory_order_seq_cst
53 
54 /*
55  * Another convenience -- simple atomic helper functions.
56  */
57 #define JEMALLOC_GENERATE_EXPANDED_INT_ATOMICS(type, short_type,	\
58     lg_size)								\
59     JEMALLOC_GENERATE_INT_ATOMICS(type, short_type, lg_size)		\
60     ATOMIC_INLINE void							\
61     atomic_load_add_store_##short_type(atomic_##short_type##_t *a,	\
62 	type inc) {							\
63 	    type oldval = atomic_load_##short_type(a, ATOMIC_RELAXED);	\
64 	    type newval = oldval + inc;					\
65 	    atomic_store_##short_type(a, newval, ATOMIC_RELAXED);	\
66 	}								\
67     ATOMIC_INLINE void							\
68     atomic_load_sub_store_##short_type(atomic_##short_type##_t *a,	\
69 	type inc) {							\
70 	    type oldval = atomic_load_##short_type(a, ATOMIC_RELAXED);	\
71 	    type newval = oldval - inc;					\
72 	    atomic_store_##short_type(a, newval, ATOMIC_RELAXED);	\
73 	}
74 
75 /*
76  * Not all platforms have 64-bit atomics.  If we do, this #define exposes that
77  * fact.
78  */
79 #if (LG_SIZEOF_PTR == 3 || LG_SIZEOF_INT == 3)
80 #  define JEMALLOC_ATOMIC_U64
81 #endif
82 
83 JEMALLOC_GENERATE_ATOMICS(void *, p, LG_SIZEOF_PTR)
84 
85 /*
86  * There's no actual guarantee that sizeof(bool) == 1, but it's true on the only
87  * platform that actually needs to know the size, MSVC.
88  */
89 JEMALLOC_GENERATE_ATOMICS(bool, b, 0)
90 
91 JEMALLOC_GENERATE_EXPANDED_INT_ATOMICS(unsigned, u, LG_SIZEOF_INT)
92 
93 JEMALLOC_GENERATE_EXPANDED_INT_ATOMICS(size_t, zu, LG_SIZEOF_PTR)
94 
95 JEMALLOC_GENERATE_EXPANDED_INT_ATOMICS(ssize_t, zd, LG_SIZEOF_PTR)
96 
97 JEMALLOC_GENERATE_EXPANDED_INT_ATOMICS(uint8_t, u8, 0)
98 
99 JEMALLOC_GENERATE_EXPANDED_INT_ATOMICS(uint32_t, u32, 2)
100 
101 #ifdef JEMALLOC_ATOMIC_U64
102 JEMALLOC_GENERATE_EXPANDED_INT_ATOMICS(uint64_t, u64, 3)
103 #endif
104 
105 #undef ATOMIC_INLINE
106 
107 #endif /* JEMALLOC_INTERNAL_ATOMIC_H */
108