xref: /freebsd/crypto/openssl/include/internal/refcount.h (revision b077aed33b7b6aefca7b17ddb250cf521f938613)
1e71b7053SJung-uk Kim /*
2*b077aed3SPierre Pronchery  * Copyright 2016-2023 The OpenSSL Project Authors. All Rights Reserved.
3e71b7053SJung-uk Kim  *
4*b077aed3SPierre Pronchery  * Licensed under the Apache License 2.0 (the "License").  You may not use
5e71b7053SJung-uk Kim  * this file except in compliance with the License.  You can obtain a copy
6e71b7053SJung-uk Kim  * in the file LICENSE in the source distribution or at
7e71b7053SJung-uk Kim  * https://www.openssl.org/source/license.html
8e71b7053SJung-uk Kim  */
917f01e99SJung-uk Kim #ifndef OSSL_INTERNAL_REFCOUNT_H
1017f01e99SJung-uk Kim # define OSSL_INTERNAL_REFCOUNT_H
11*b077aed3SPierre Pronchery # pragma once
12e71b7053SJung-uk Kim 
13*b077aed3SPierre Pronchery # include <openssl/e_os2.h>
14*b077aed3SPierre Pronchery # include <openssl/trace.h>
15e71b7053SJung-uk Kim 
16*b077aed3SPierre Pronchery # if defined(OPENSSL_THREADS) && !defined(OPENSSL_DEV_NO_ATOMICS)
17e71b7053SJung-uk Kim #  if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L \
18e71b7053SJung-uk Kim       && !defined(__STDC_NO_ATOMICS__)
19e71b7053SJung-uk Kim #   include <stdatomic.h>
20e71b7053SJung-uk Kim #   define HAVE_C11_ATOMICS
21e71b7053SJung-uk Kim #  endif
22e71b7053SJung-uk Kim 
23e71b7053SJung-uk Kim #  if defined(HAVE_C11_ATOMICS) && defined(ATOMIC_INT_LOCK_FREE) \
24e71b7053SJung-uk Kim       && ATOMIC_INT_LOCK_FREE > 0
25e71b7053SJung-uk Kim 
26e71b7053SJung-uk Kim #   define HAVE_ATOMICS 1
27e71b7053SJung-uk Kim 
28e71b7053SJung-uk Kim typedef _Atomic int CRYPTO_REF_COUNT;
29e71b7053SJung-uk Kim 
30*b077aed3SPierre Pronchery static inline int CRYPTO_UP_REF(_Atomic int *val, int *ret,
31*b077aed3SPierre Pronchery                                 ossl_unused void *lock)
32e71b7053SJung-uk Kim {
33e71b7053SJung-uk Kim     *ret = atomic_fetch_add_explicit(val, 1, memory_order_relaxed) + 1;
34e71b7053SJung-uk Kim     return 1;
35e71b7053SJung-uk Kim }
36e71b7053SJung-uk Kim 
37e71b7053SJung-uk Kim /*
38e71b7053SJung-uk Kim  * Changes to shared structure other than reference counter have to be
39e71b7053SJung-uk Kim  * serialized. And any kind of serialization implies a release fence. This
40e71b7053SJung-uk Kim  * means that by the time reference counter is decremented all other
41e71b7053SJung-uk Kim  * changes are visible on all processors. Hence decrement itself can be
42e71b7053SJung-uk Kim  * relaxed. In case it hits zero, object will be destructed. Since it's
43e71b7053SJung-uk Kim  * last use of the object, destructor programmer might reason that access
44e71b7053SJung-uk Kim  * to mutable members doesn't have to be serialized anymore, which would
45e71b7053SJung-uk Kim  * otherwise imply an acquire fence. Hence conditional acquire fence...
46e71b7053SJung-uk Kim  */
47*b077aed3SPierre Pronchery static inline int CRYPTO_DOWN_REF(_Atomic int *val, int *ret,
48*b077aed3SPierre Pronchery                                   ossl_unused void *lock)
49e71b7053SJung-uk Kim {
50e71b7053SJung-uk Kim     *ret = atomic_fetch_sub_explicit(val, 1, memory_order_relaxed) - 1;
51e71b7053SJung-uk Kim     if (*ret == 0)
52e71b7053SJung-uk Kim         atomic_thread_fence(memory_order_acquire);
53e71b7053SJung-uk Kim     return 1;
54e71b7053SJung-uk Kim }
55e71b7053SJung-uk Kim 
56e71b7053SJung-uk Kim #  elif defined(__GNUC__) && defined(__ATOMIC_RELAXED) && __GCC_ATOMIC_INT_LOCK_FREE > 0
57e71b7053SJung-uk Kim 
58e71b7053SJung-uk Kim #   define HAVE_ATOMICS 1
59e71b7053SJung-uk Kim 
60e71b7053SJung-uk Kim typedef int CRYPTO_REF_COUNT;
61e71b7053SJung-uk Kim 
62*b077aed3SPierre Pronchery static __inline__ int CRYPTO_UP_REF(int *val, int *ret, ossl_unused void *lock)
63e71b7053SJung-uk Kim {
64e71b7053SJung-uk Kim     *ret = __atomic_fetch_add(val, 1, __ATOMIC_RELAXED) + 1;
65e71b7053SJung-uk Kim     return 1;
66e71b7053SJung-uk Kim }
67e71b7053SJung-uk Kim 
68*b077aed3SPierre Pronchery static __inline__ int CRYPTO_DOWN_REF(int *val, int *ret,
69*b077aed3SPierre Pronchery                                       ossl_unused void *lock)
70e71b7053SJung-uk Kim {
71e71b7053SJung-uk Kim     *ret = __atomic_fetch_sub(val, 1, __ATOMIC_RELAXED) - 1;
72e71b7053SJung-uk Kim     if (*ret == 0)
73e71b7053SJung-uk Kim         __atomic_thread_fence(__ATOMIC_ACQUIRE);
74e71b7053SJung-uk Kim     return 1;
75e71b7053SJung-uk Kim }
76*b077aed3SPierre Pronchery #  elif defined(__ICL) && defined(_WIN32)
77*b077aed3SPierre Pronchery #   define HAVE_ATOMICS 1
78*b077aed3SPierre Pronchery typedef volatile int CRYPTO_REF_COUNT;
79*b077aed3SPierre Pronchery 
80*b077aed3SPierre Pronchery static __inline int CRYPTO_UP_REF(volatile int *val, int *ret,
81*b077aed3SPierre Pronchery                                   ossl_unused void *lock)
82*b077aed3SPierre Pronchery {
83*b077aed3SPierre Pronchery     *ret = _InterlockedExchangeAdd((void *)val, 1) + 1;
84*b077aed3SPierre Pronchery     return 1;
85*b077aed3SPierre Pronchery }
86*b077aed3SPierre Pronchery 
87*b077aed3SPierre Pronchery static __inline int CRYPTO_DOWN_REF(volatile int *val, int *ret,
88*b077aed3SPierre Pronchery                                     ossl_unused void *lock)
89*b077aed3SPierre Pronchery {
90*b077aed3SPierre Pronchery     *ret = _InterlockedExchangeAdd((void *)val, -1) - 1;
91*b077aed3SPierre Pronchery     return 1;
92*b077aed3SPierre Pronchery }
93e71b7053SJung-uk Kim 
94e71b7053SJung-uk Kim #  elif defined(_MSC_VER) && _MSC_VER>=1200
95e71b7053SJung-uk Kim 
96e71b7053SJung-uk Kim #   define HAVE_ATOMICS 1
97e71b7053SJung-uk Kim 
98e71b7053SJung-uk Kim typedef volatile int CRYPTO_REF_COUNT;
99e71b7053SJung-uk Kim 
100610a21fdSJung-uk Kim #   if (defined(_M_ARM) && _M_ARM>=7 && !defined(_WIN32_WCE)) || defined(_M_ARM64)
101e71b7053SJung-uk Kim #    include <intrin.h>
102e71b7053SJung-uk Kim #    if defined(_M_ARM64) && !defined(_ARM_BARRIER_ISH)
103e71b7053SJung-uk Kim #     define _ARM_BARRIER_ISH _ARM64_BARRIER_ISH
104e71b7053SJung-uk Kim #    endif
105e71b7053SJung-uk Kim 
106*b077aed3SPierre Pronchery static __inline int CRYPTO_UP_REF(volatile int *val, int *ret,
107*b077aed3SPierre Pronchery                                   ossl_unused void *lock)
108e71b7053SJung-uk Kim {
109e71b7053SJung-uk Kim     *ret = _InterlockedExchangeAdd_nf(val, 1) + 1;
110e71b7053SJung-uk Kim     return 1;
111e71b7053SJung-uk Kim }
112e71b7053SJung-uk Kim 
113*b077aed3SPierre Pronchery static __inline int CRYPTO_DOWN_REF(volatile int *val, int *ret,
114*b077aed3SPierre Pronchery                                     ossl_unused void *lock)
115e71b7053SJung-uk Kim {
116e71b7053SJung-uk Kim     *ret = _InterlockedExchangeAdd_nf(val, -1) - 1;
117e71b7053SJung-uk Kim     if (*ret == 0)
118e71b7053SJung-uk Kim         __dmb(_ARM_BARRIER_ISH);
119e71b7053SJung-uk Kim     return 1;
120e71b7053SJung-uk Kim }
121e71b7053SJung-uk Kim #   else
122610a21fdSJung-uk Kim #    if !defined(_WIN32_WCE)
123e71b7053SJung-uk Kim #     pragma intrinsic(_InterlockedExchangeAdd)
124610a21fdSJung-uk Kim #    else
125610a21fdSJung-uk Kim #     if _WIN32_WCE >= 0x600
126610a21fdSJung-uk Kim        extern long __cdecl _InterlockedExchangeAdd(long volatile*, long);
127610a21fdSJung-uk Kim #     else
128da327cd2SJung-uk Kim        /* under Windows CE we still have old-style Interlocked* functions */
129610a21fdSJung-uk Kim        extern long __cdecl InterlockedExchangeAdd(long volatile*, long);
130610a21fdSJung-uk Kim #      define _InterlockedExchangeAdd InterlockedExchangeAdd
131610a21fdSJung-uk Kim #     endif
132610a21fdSJung-uk Kim #    endif
133e71b7053SJung-uk Kim 
134*b077aed3SPierre Pronchery static __inline int CRYPTO_UP_REF(volatile int *val, int *ret,
135*b077aed3SPierre Pronchery                                   ossl_unused void *lock)
136e71b7053SJung-uk Kim {
137e71b7053SJung-uk Kim     *ret = _InterlockedExchangeAdd(val, 1) + 1;
138e71b7053SJung-uk Kim     return 1;
139e71b7053SJung-uk Kim }
140e71b7053SJung-uk Kim 
141*b077aed3SPierre Pronchery static __inline int CRYPTO_DOWN_REF(volatile int *val, int *ret,
142*b077aed3SPierre Pronchery                                     ossl_unused void *lock)
143e71b7053SJung-uk Kim {
144e71b7053SJung-uk Kim     *ret = _InterlockedExchangeAdd(val, -1) - 1;
145e71b7053SJung-uk Kim     return 1;
146e71b7053SJung-uk Kim }
147e71b7053SJung-uk Kim #   endif
148e71b7053SJung-uk Kim 
149*b077aed3SPierre Pronchery #  endif
150*b077aed3SPierre Pronchery # endif  /* !OPENSSL_DEV_NO_ATOMICS */
151*b077aed3SPierre Pronchery 
152*b077aed3SPierre Pronchery /*
153*b077aed3SPierre Pronchery  * All the refcounting implementations above define HAVE_ATOMICS, so if it's
154*b077aed3SPierre Pronchery  * still undefined here (such as when OPENSSL_DEV_NO_ATOMICS is defined), it
155*b077aed3SPierre Pronchery  * means we need to implement a fallback.  This fallback uses locks.
156*b077aed3SPierre Pronchery  */
157*b077aed3SPierre Pronchery # ifndef HAVE_ATOMICS
158e71b7053SJung-uk Kim 
159e71b7053SJung-uk Kim typedef int CRYPTO_REF_COUNT;
160e71b7053SJung-uk Kim 
161e71b7053SJung-uk Kim # define CRYPTO_UP_REF(val, ret, lock) CRYPTO_atomic_add(val, 1, ret, lock)
162e71b7053SJung-uk Kim # define CRYPTO_DOWN_REF(val, ret, lock) CRYPTO_atomic_add(val, -1, ret, lock)
163e71b7053SJung-uk Kim 
164e71b7053SJung-uk Kim # endif
165e71b7053SJung-uk Kim 
166e71b7053SJung-uk Kim # if !defined(NDEBUG) && !defined(OPENSSL_NO_STDIO)
167e71b7053SJung-uk Kim #  define REF_ASSERT_ISNT(test) \
168e71b7053SJung-uk Kim     (void)((test) ? (OPENSSL_die("refcount error", __FILE__, __LINE__), 1) : 0)
169e71b7053SJung-uk Kim # else
170e71b7053SJung-uk Kim #  define REF_ASSERT_ISNT(i)
171e71b7053SJung-uk Kim # endif
172e71b7053SJung-uk Kim 
173*b077aed3SPierre Pronchery # define REF_PRINT_EX(text, count, object) \
174*b077aed3SPierre Pronchery     OSSL_TRACE3(REF_COUNT, "%p:%4d:%s\n", (object), (count), (text));
175*b077aed3SPierre Pronchery # define REF_PRINT_COUNT(text, object) \
176*b077aed3SPierre Pronchery     REF_PRINT_EX(text, object->references, (void *)object)
177e71b7053SJung-uk Kim 
178e71b7053SJung-uk Kim #endif
179