1 /*
2 * Copyright 2016-2024 The OpenSSL Project Authors. All Rights Reserved.
3 *
4 * Licensed under the Apache License 2.0 (the "License"). You may not use
5 * this file except in compliance with the License. You can obtain a copy
6 * in the file LICENSE in the source distribution or at
7 * https://www.openssl.org/source/license.html
8 */
9 #ifndef OSSL_INTERNAL_REFCOUNT_H
10 # define OSSL_INTERNAL_REFCOUNT_H
11 # pragma once
12
13 # include <openssl/e_os2.h>
14 # include <openssl/trace.h>
15 # include <openssl/err.h>
16
17 # if defined(OPENSSL_THREADS) && !defined(OPENSSL_DEV_NO_ATOMICS)
18 # if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L \
19 && !defined(__STDC_NO_ATOMICS__)
20 # include <stdatomic.h>
21 # define HAVE_C11_ATOMICS
22 # endif
23
24 # if defined(HAVE_C11_ATOMICS) && defined(ATOMIC_INT_LOCK_FREE) \
25 && ATOMIC_INT_LOCK_FREE > 0
26
27 # define HAVE_ATOMICS 1
28
29 # if defined(__has_feature)
30 # if __has_feature(thread_sanitizer)
31 # define OSSL_TSAN_BUILD
32 # endif
33 # endif
34
35 typedef struct {
36 _Atomic int val;
37 } CRYPTO_REF_COUNT;
38
CRYPTO_UP_REF(CRYPTO_REF_COUNT * refcnt,int * ret)39 static inline int CRYPTO_UP_REF(CRYPTO_REF_COUNT *refcnt, int *ret)
40 {
41 *ret = atomic_fetch_add_explicit(&refcnt->val, 1, memory_order_relaxed) + 1;
42 return 1;
43 }
44
45 /*
46 * Changes to shared structure other than reference counter have to be
47 * serialized. And any kind of serialization implies a release fence. This
48 * means that by the time reference counter is decremented all other
49 * changes are visible on all processors. Hence decrement itself can be
50 * relaxed. In case it hits zero, object will be destructed. Since it's
51 * last use of the object, destructor programmer might reason that access
52 * to mutable members doesn't have to be serialized anymore, which would
53 * otherwise imply an acquire fence. Hence conditional acquire fence...
54 */
CRYPTO_DOWN_REF(CRYPTO_REF_COUNT * refcnt,int * ret)55 static inline int CRYPTO_DOWN_REF(CRYPTO_REF_COUNT *refcnt, int *ret)
56 {
57 # ifdef OSSL_TSAN_BUILD
58 /*
59 * TSAN requires acq_rel as it indicates a false positive error when
60 * the object that contains the refcount is freed otherwise.
61 */
62 *ret = atomic_fetch_sub_explicit(&refcnt->val, 1, memory_order_acq_rel) - 1;
63 # else
64 *ret = atomic_fetch_sub_explicit(&refcnt->val, 1, memory_order_release) - 1;
65 if (*ret == 0)
66 atomic_thread_fence(memory_order_acquire);
67 # endif
68 return 1;
69 }
70
CRYPTO_GET_REF(CRYPTO_REF_COUNT * refcnt,int * ret)71 static inline int CRYPTO_GET_REF(CRYPTO_REF_COUNT *refcnt, int *ret)
72 {
73 *ret = atomic_load_explicit(&refcnt->val, memory_order_acquire);
74 return 1;
75 }
76
77 # elif defined(__GNUC__) && defined(__ATOMIC_RELAXED) && __GCC_ATOMIC_INT_LOCK_FREE > 0
78
79 # define HAVE_ATOMICS 1
80
81 typedef struct {
82 int val;
83 } CRYPTO_REF_COUNT;
84
CRYPTO_UP_REF(CRYPTO_REF_COUNT * refcnt,int * ret)85 static __inline__ int CRYPTO_UP_REF(CRYPTO_REF_COUNT *refcnt, int *ret)
86 {
87 *ret = __atomic_fetch_add(&refcnt->val, 1, __ATOMIC_RELAXED) + 1;
88 return 1;
89 }
90
CRYPTO_DOWN_REF(CRYPTO_REF_COUNT * refcnt,int * ret)91 static __inline__ int CRYPTO_DOWN_REF(CRYPTO_REF_COUNT *refcnt, int *ret)
92 {
93 *ret = __atomic_fetch_sub(&refcnt->val, 1, __ATOMIC_RELEASE) - 1;
94 if (*ret == 0)
95 __atomic_thread_fence(__ATOMIC_ACQUIRE);
96 return 1;
97 }
98
CRYPTO_GET_REF(CRYPTO_REF_COUNT * refcnt,int * ret)99 static __inline__ int CRYPTO_GET_REF(CRYPTO_REF_COUNT *refcnt, int *ret)
100 {
101 *ret = __atomic_load_n(&refcnt->val, __ATOMIC_RELAXED);
102 return 1;
103 }
104
105 # elif defined(__ICL) && defined(_WIN32)
106 # define HAVE_ATOMICS 1
107
108 typedef struct {
109 volatile int val;
110 } CRYPTO_REF_COUNT;
111
CRYPTO_UP_REF(CRYPTO_REF_COUNT * refcnt,int * ret)112 static __inline int CRYPTO_UP_REF(CRYPTO_REF_COUNT *refcnt, int *ret)
113 {
114 *ret = _InterlockedExchangeAdd((void *)&refcnt->val, 1) + 1;
115 return 1;
116 }
117
CRYPTO_DOWN_REF(CRYPTO_REF_COUNT * refcnt,int * ret)118 static __inline int CRYPTO_DOWN_REF(CRYPTO_REF_COUNT *refcnt, int *ret)
119 {
120 *ret = _InterlockedExchangeAdd((void *)&refcnt->val, -1) - 1;
121 return 1;
122 }
123
CRYPTO_GET_REF(CRYPTO_REF_COUNT * refcnt,int * ret)124 static __inline int CRYPTO_GET_REF(CRYPTO_REF_COUNT *refcnt, int *ret)
125 {
126 *ret = _InterlockedExchangeAdd((void *)&refcnt->val, 0);
127 return 1;
128 }
129
130 # elif defined(_MSC_VER) && _MSC_VER>=1200
131
132 # define HAVE_ATOMICS 1
133
134 typedef struct {
135 volatile int val;
136 } CRYPTO_REF_COUNT;
137
138 # if (defined(_M_ARM) && _M_ARM>=7 && !defined(_WIN32_WCE)) || defined(_M_ARM64)
139 # include <intrin.h>
140 # if defined(_M_ARM64) && !defined(_ARM_BARRIER_ISH)
141 # define _ARM_BARRIER_ISH _ARM64_BARRIER_ISH
142 # endif
143
CRYPTO_UP_REF(CRYPTO_REF_COUNT * refcnt,int * ret)144 static __inline int CRYPTO_UP_REF(CRYPTO_REF_COUNT *refcnt, int *ret)
145 {
146 *ret = _InterlockedExchangeAdd_nf(&refcnt->val, 1) + 1;
147 return 1;
148 }
149
CRYPTO_DOWN_REF(CRYPTO_REF_COUNT * refcnt,int * ret)150 static __inline int CRYPTO_DOWN_REF(CRYPTO_REF_COUNT *refcnt, int *ret)
151 {
152 *ret = _InterlockedExchangeAdd(&refcnt->val, -1) - 1;
153 return 1;
154 }
155
CRYPTO_GET_REF(CRYPTO_REF_COUNT * refcnt,int * ret)156 static __inline int CRYPTO_GET_REF(CRYPTO_REF_COUNT *refcnt, int *ret)
157 {
158 *ret = _InterlockedExchangeAdd_acq((void *)&refcnt->val, 0);
159 return 1;
160 }
161
162 # else
163 # if !defined(_WIN32_WCE)
164 # pragma intrinsic(_InterlockedExchangeAdd)
165 # else
166 # if _WIN32_WCE >= 0x600
167 extern long __cdecl _InterlockedExchangeAdd(long volatile*, long);
168 # else
169 /* under Windows CE we still have old-style Interlocked* functions */
170 extern long __cdecl InterlockedExchangeAdd(long volatile*, long);
171 # define _InterlockedExchangeAdd InterlockedExchangeAdd
172 # endif
173 # endif
174
CRYPTO_UP_REF(CRYPTO_REF_COUNT * refcnt,int * ret)175 static __inline int CRYPTO_UP_REF(CRYPTO_REF_COUNT *refcnt, int *ret)
176 {
177 *ret = _InterlockedExchangeAdd(&refcnt->val, 1) + 1;
178 return 1;
179 }
180
CRYPTO_DOWN_REF(CRYPTO_REF_COUNT * refcnt,int * ret)181 static __inline int CRYPTO_DOWN_REF(CRYPTO_REF_COUNT *refcnt, int *ret)
182 {
183 *ret = _InterlockedExchangeAdd(&refcnt->val, -1) - 1;
184 return 1;
185 }
186
CRYPTO_GET_REF(CRYPTO_REF_COUNT * refcnt,int * ret)187 static __inline int CRYPTO_GET_REF(CRYPTO_REF_COUNT *refcnt, int *ret)
188 {
189 *ret = _InterlockedExchangeAdd(&refcnt->val, 0);
190 return 1;
191 }
192
193 # endif
194
195 # endif
196 # endif /* !OPENSSL_DEV_NO_ATOMICS */
197
198 /*
199 * All the refcounting implementations above define HAVE_ATOMICS, so if it's
200 * still undefined here (such as when OPENSSL_DEV_NO_ATOMICS is defined), it
201 * means we need to implement a fallback. This fallback uses locks.
202 */
203 # ifndef HAVE_ATOMICS
204
205 typedef struct {
206 int val;
207 # ifdef OPENSSL_THREADS
208 CRYPTO_RWLOCK *lock;
209 # endif
210 } CRYPTO_REF_COUNT;
211
212 # ifdef OPENSSL_THREADS
213
CRYPTO_UP_REF(CRYPTO_REF_COUNT * refcnt,int * ret)214 static ossl_unused ossl_inline int CRYPTO_UP_REF(CRYPTO_REF_COUNT *refcnt,
215 int *ret)
216 {
217 return CRYPTO_atomic_add(&refcnt->val, 1, ret, refcnt->lock);
218 }
219
CRYPTO_DOWN_REF(CRYPTO_REF_COUNT * refcnt,int * ret)220 static ossl_unused ossl_inline int CRYPTO_DOWN_REF(CRYPTO_REF_COUNT *refcnt,
221 int *ret)
222 {
223 return CRYPTO_atomic_add(&refcnt->val, -1, ret, refcnt->lock);
224 }
225
CRYPTO_GET_REF(CRYPTO_REF_COUNT * refcnt,int * ret)226 static ossl_unused ossl_inline int CRYPTO_GET_REF(CRYPTO_REF_COUNT *refcnt,
227 int *ret)
228 {
229 return CRYPTO_atomic_load_int(&refcnt->val, ret, refcnt->lock);
230 }
231
232 # define CRYPTO_NEW_FREE_DEFINED 1
CRYPTO_NEW_REF(CRYPTO_REF_COUNT * refcnt,int n)233 static ossl_unused ossl_inline int CRYPTO_NEW_REF(CRYPTO_REF_COUNT *refcnt, int n)
234 {
235 refcnt->val = n;
236 refcnt->lock = CRYPTO_THREAD_lock_new();
237 if (refcnt->lock == NULL) {
238 ERR_raise(ERR_LIB_CRYPTO, ERR_R_CRYPTO_LIB);
239 return 0;
240 }
241 return 1;
242 }
243
CRYPTO_FREE_REF(CRYPTO_REF_COUNT * refcnt)244 static ossl_unused ossl_inline void CRYPTO_FREE_REF(CRYPTO_REF_COUNT *refcnt) \
245 {
246 if (refcnt != NULL)
247 CRYPTO_THREAD_lock_free(refcnt->lock);
248 }
249
250 # else /* OPENSSL_THREADS */
251
CRYPTO_UP_REF(CRYPTO_REF_COUNT * refcnt,int * ret)252 static ossl_unused ossl_inline int CRYPTO_UP_REF(CRYPTO_REF_COUNT *refcnt,
253 int *ret)
254 {
255 refcnt->val++;
256 *ret = refcnt->val;
257 return 1;
258 }
259
CRYPTO_DOWN_REF(CRYPTO_REF_COUNT * refcnt,int * ret)260 static ossl_unused ossl_inline int CRYPTO_DOWN_REF(CRYPTO_REF_COUNT *refcnt,
261 int *ret)
262 {
263 refcnt->val--;
264 *ret = refcnt->val;
265 return 1;
266 }
267
CRYPTO_GET_REF(CRYPTO_REF_COUNT * refcnt,int * ret)268 static ossl_unused ossl_inline int CRYPTO_GET_REF(CRYPTO_REF_COUNT *refcnt,
269 int *ret)
270 {
271 *ret = refcnt->val;
272 return 1;
273 }
274
275 # endif /* OPENSSL_THREADS */
276 # endif
277
278 # ifndef CRYPTO_NEW_FREE_DEFINED
CRYPTO_NEW_REF(CRYPTO_REF_COUNT * refcnt,int n)279 static ossl_unused ossl_inline int CRYPTO_NEW_REF(CRYPTO_REF_COUNT *refcnt, int n)
280 {
281 refcnt->val = n;
282 return 1;
283 }
284
CRYPTO_FREE_REF(CRYPTO_REF_COUNT * refcnt)285 static ossl_unused ossl_inline void CRYPTO_FREE_REF(CRYPTO_REF_COUNT *refcnt) \
286 {
287 }
288 # endif /* CRYPTO_NEW_FREE_DEFINED */
289 #undef CRYPTO_NEW_FREE_DEFINED
290
291 # if !defined(NDEBUG) && !defined(OPENSSL_NO_STDIO)
292 # define REF_ASSERT_ISNT(test) \
293 (void)((test) ? (OPENSSL_die("refcount error", __FILE__, __LINE__), 1) : 0)
294 # else
295 # define REF_ASSERT_ISNT(i)
296 # endif
297
298 # define REF_PRINT_EX(text, count, object) \
299 OSSL_TRACE3(REF_COUNT, "%p:%4d:%s\n", (object), (count), (text));
300 # define REF_PRINT_COUNT(text, val, object) \
301 REF_PRINT_EX(text, val, (void *)object)
302
303 #endif
304