1 /* 2 * Copyright 2016-2023 The OpenSSL Project Authors. All Rights Reserved. 3 * 4 * Licensed under the Apache License 2.0 (the "License"). You may not use 5 * this file except in compliance with the License. You can obtain a copy 6 * in the file LICENSE in the source distribution or at 7 * https://www.openssl.org/source/license.html 8 */ 9 #ifndef OSSL_INTERNAL_REFCOUNT_H 10 # define OSSL_INTERNAL_REFCOUNT_H 11 # pragma once 12 13 # include <openssl/e_os2.h> 14 # include <openssl/trace.h> 15 16 # if defined(OPENSSL_THREADS) && !defined(OPENSSL_DEV_NO_ATOMICS) 17 # if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L \ 18 && !defined(__STDC_NO_ATOMICS__) 19 # include <stdatomic.h> 20 # define HAVE_C11_ATOMICS 21 # endif 22 23 # if defined(HAVE_C11_ATOMICS) && defined(ATOMIC_INT_LOCK_FREE) \ 24 && ATOMIC_INT_LOCK_FREE > 0 25 26 # define HAVE_ATOMICS 1 27 28 typedef _Atomic int CRYPTO_REF_COUNT; 29 30 static inline int CRYPTO_UP_REF(_Atomic int *val, int *ret, 31 ossl_unused void *lock) 32 { 33 *ret = atomic_fetch_add_explicit(val, 1, memory_order_relaxed) + 1; 34 return 1; 35 } 36 37 /* 38 * Changes to shared structure other than reference counter have to be 39 * serialized. And any kind of serialization implies a release fence. This 40 * means that by the time reference counter is decremented all other 41 * changes are visible on all processors. Hence decrement itself can be 42 * relaxed. In case it hits zero, object will be destructed. Since it's 43 * last use of the object, destructor programmer might reason that access 44 * to mutable members doesn't have to be serialized anymore, which would 45 * otherwise imply an acquire fence. Hence conditional acquire fence... 46 */ 47 static inline int CRYPTO_DOWN_REF(_Atomic int *val, int *ret, 48 ossl_unused void *lock) 49 { 50 *ret = atomic_fetch_sub_explicit(val, 1, memory_order_relaxed) - 1; 51 if (*ret == 0) 52 atomic_thread_fence(memory_order_acquire); 53 return 1; 54 } 55 56 # elif defined(__GNUC__) && defined(__ATOMIC_RELAXED) && __GCC_ATOMIC_INT_LOCK_FREE > 0 57 58 # define HAVE_ATOMICS 1 59 60 typedef int CRYPTO_REF_COUNT; 61 62 static __inline__ int CRYPTO_UP_REF(int *val, int *ret, ossl_unused void *lock) 63 { 64 *ret = __atomic_fetch_add(val, 1, __ATOMIC_RELAXED) + 1; 65 return 1; 66 } 67 68 static __inline__ int CRYPTO_DOWN_REF(int *val, int *ret, 69 ossl_unused void *lock) 70 { 71 *ret = __atomic_fetch_sub(val, 1, __ATOMIC_RELAXED) - 1; 72 if (*ret == 0) 73 __atomic_thread_fence(__ATOMIC_ACQUIRE); 74 return 1; 75 } 76 # elif defined(__ICL) && defined(_WIN32) 77 # define HAVE_ATOMICS 1 78 typedef volatile int CRYPTO_REF_COUNT; 79 80 static __inline int CRYPTO_UP_REF(volatile int *val, int *ret, 81 ossl_unused void *lock) 82 { 83 *ret = _InterlockedExchangeAdd((void *)val, 1) + 1; 84 return 1; 85 } 86 87 static __inline int CRYPTO_DOWN_REF(volatile int *val, int *ret, 88 ossl_unused void *lock) 89 { 90 *ret = _InterlockedExchangeAdd((void *)val, -1) - 1; 91 return 1; 92 } 93 94 # elif defined(_MSC_VER) && _MSC_VER>=1200 95 96 # define HAVE_ATOMICS 1 97 98 typedef volatile int CRYPTO_REF_COUNT; 99 100 # if (defined(_M_ARM) && _M_ARM>=7 && !defined(_WIN32_WCE)) || defined(_M_ARM64) 101 # include <intrin.h> 102 # if defined(_M_ARM64) && !defined(_ARM_BARRIER_ISH) 103 # define _ARM_BARRIER_ISH _ARM64_BARRIER_ISH 104 # endif 105 106 static __inline int CRYPTO_UP_REF(volatile int *val, int *ret, 107 ossl_unused void *lock) 108 { 109 *ret = _InterlockedExchangeAdd_nf(val, 1) + 1; 110 return 1; 111 } 112 113 static __inline int CRYPTO_DOWN_REF(volatile int *val, int *ret, 114 ossl_unused void *lock) 115 { 116 *ret = _InterlockedExchangeAdd_nf(val, -1) - 1; 117 if (*ret == 0) 118 __dmb(_ARM_BARRIER_ISH); 119 return 1; 120 } 121 # else 122 # if !defined(_WIN32_WCE) 123 # pragma intrinsic(_InterlockedExchangeAdd) 124 # else 125 # if _WIN32_WCE >= 0x600 126 extern long __cdecl _InterlockedExchangeAdd(long volatile*, long); 127 # else 128 /* under Windows CE we still have old-style Interlocked* functions */ 129 extern long __cdecl InterlockedExchangeAdd(long volatile*, long); 130 # define _InterlockedExchangeAdd InterlockedExchangeAdd 131 # endif 132 # endif 133 134 static __inline int CRYPTO_UP_REF(volatile int *val, int *ret, 135 ossl_unused void *lock) 136 { 137 *ret = _InterlockedExchangeAdd((long volatile *)val, 1) + 1; 138 return 1; 139 } 140 141 static __inline int CRYPTO_DOWN_REF(volatile int *val, int *ret, 142 ossl_unused void *lock) 143 { 144 *ret = _InterlockedExchangeAdd((long volatile *)val, -1) - 1; 145 return 1; 146 } 147 # endif 148 149 # endif 150 # endif /* !OPENSSL_DEV_NO_ATOMICS */ 151 152 /* 153 * All the refcounting implementations above define HAVE_ATOMICS, so if it's 154 * still undefined here (such as when OPENSSL_DEV_NO_ATOMICS is defined), it 155 * means we need to implement a fallback. This fallback uses locks. 156 */ 157 # ifndef HAVE_ATOMICS 158 159 typedef int CRYPTO_REF_COUNT; 160 161 # define CRYPTO_UP_REF(val, ret, lock) CRYPTO_atomic_add(val, 1, ret, lock) 162 # define CRYPTO_DOWN_REF(val, ret, lock) CRYPTO_atomic_add(val, -1, ret, lock) 163 164 # endif 165 166 # if !defined(NDEBUG) && !defined(OPENSSL_NO_STDIO) 167 # define REF_ASSERT_ISNT(test) \ 168 (void)((test) ? (OPENSSL_die("refcount error", __FILE__, __LINE__), 1) : 0) 169 # else 170 # define REF_ASSERT_ISNT(i) 171 # endif 172 173 # define REF_PRINT_EX(text, count, object) \ 174 OSSL_TRACE3(REF_COUNT, "%p:%4d:%s\n", (object), (count), (text)); 175 # define REF_PRINT_COUNT(text, object) \ 176 REF_PRINT_EX(text, object->references, (void *)object) 177 178 #endif 179