1 //===----------------------------------------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include <__config> 10 #ifdef _LIBCPP_DEPRECATED_ABI_LEGACY_LIBRARY_DEFINITIONS_FOR_INLINE_FUNCTIONS 11 # define _LIBCPP_SHARED_PTR_DEFINE_LEGACY_INLINE_FUNCTIONS 12 #endif 13 14 #include <__functional/hash.h> 15 #include <memory> 16 #include <typeinfo> 17 18 #if _LIBCPP_HAS_THREADS 19 # include <mutex> 20 # include <thread> 21 # if defined(__ELF__) && defined(_LIBCPP_LINK_PTHREAD_LIB) 22 # pragma comment(lib, "pthread") 23 # endif 24 #endif 25 26 #include "include/atomic_support.h" 27 28 _LIBCPP_BEGIN_NAMESPACE_STD 29 30 bad_weak_ptr::~bad_weak_ptr() noexcept {} 31 32 const char* bad_weak_ptr::what() const noexcept { return "bad_weak_ptr"; } 33 34 __shared_count::~__shared_count() {} 35 36 __shared_weak_count::~__shared_weak_count() {} 37 38 #if defined(_LIBCPP_SHARED_PTR_DEFINE_LEGACY_INLINE_FUNCTIONS) 39 void __shared_count::__add_shared() noexcept { __libcpp_atomic_refcount_increment(__shared_owners_); } 40 41 bool __shared_count::__release_shared() noexcept { 42 if (__libcpp_atomic_refcount_decrement(__shared_owners_) == -1) { 43 __on_zero_shared(); 44 return true; 45 } 46 return false; 47 } 48 49 void __shared_weak_count::__add_shared() noexcept { __shared_count::__add_shared(); } 50 51 void __shared_weak_count::__add_weak() noexcept { __libcpp_atomic_refcount_increment(__shared_weak_owners_); } 52 53 void __shared_weak_count::__release_shared() noexcept { 54 if (__shared_count::__release_shared()) 55 __release_weak(); 56 } 57 #endif // _LIBCPP_SHARED_PTR_DEFINE_LEGACY_INLINE_FUNCTIONS 58 59 void __shared_weak_count::__release_weak() noexcept { 60 // NOTE: The acquire load here is an optimization of the very 61 // common case where a shared pointer is being destructed while 62 // having no other contended references. 63 // 64 // BENEFIT: We avoid expensive atomic stores like XADD and STREX 65 // in a common case. Those instructions are slow and do nasty 66 // things to caches. 67 // 68 // IS THIS SAFE? Yes. During weak destruction, if we see that we 69 // are the last reference, we know that no-one else is accessing 70 // us. If someone were accessing us, then they would be doing so 71 // while the last shared / weak_ptr was being destructed, and 72 // that's undefined anyway. 73 // 74 // If we see anything other than a 0, then we have possible 75 // contention, and need to use an atomicrmw primitive. 76 // The same arguments don't apply for increment, where it is legal 77 // (though inadvisable) to share shared_ptr references between 78 // threads, and have them all get copied at once. The argument 79 // also doesn't apply for __release_shared, because an outstanding 80 // weak_ptr::lock() could read / modify the shared count. 81 if (__libcpp_atomic_load(&__shared_weak_owners_, _AO_Acquire) == 0) { 82 // no need to do this store, because we are about 83 // to destroy everything. 84 //__libcpp_atomic_store(&__shared_weak_owners_, -1, _AO_Release); 85 __on_zero_shared_weak(); 86 } else if (__libcpp_atomic_refcount_decrement(__shared_weak_owners_) == -1) 87 __on_zero_shared_weak(); 88 } 89 90 __shared_weak_count* __shared_weak_count::lock() noexcept { 91 long object_owners = __libcpp_atomic_load(&__shared_owners_); 92 while (object_owners != -1) { 93 if (__libcpp_atomic_compare_exchange(&__shared_owners_, &object_owners, object_owners + 1)) 94 return this; 95 } 96 return nullptr; 97 } 98 99 const void* __shared_weak_count::__get_deleter(const type_info&) const noexcept { return nullptr; } 100 101 #if _LIBCPP_HAS_THREADS 102 103 static constexpr std::size_t __sp_mut_count = 32; 104 static constinit __libcpp_mutex_t mut_back[__sp_mut_count] = { 105 _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, 106 _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, 107 _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, 108 _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, 109 _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, 110 _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, 111 _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, 112 _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER}; 113 114 constexpr __sp_mut::__sp_mut(void* p) noexcept : __lx_(p) {} 115 116 void __sp_mut::lock() noexcept { 117 auto m = static_cast<__libcpp_mutex_t*>(__lx_); 118 __libcpp_mutex_lock(m); 119 } 120 121 void __sp_mut::unlock() noexcept { __libcpp_mutex_unlock(static_cast<__libcpp_mutex_t*>(__lx_)); } 122 123 __sp_mut& __get_sp_mut(const void* p) { 124 static constinit __sp_mut muts[__sp_mut_count] = { 125 &mut_back[0], &mut_back[1], &mut_back[2], &mut_back[3], &mut_back[4], &mut_back[5], &mut_back[6], 126 &mut_back[7], &mut_back[8], &mut_back[9], &mut_back[10], &mut_back[11], &mut_back[12], &mut_back[13], 127 &mut_back[14], &mut_back[15], &mut_back[16], &mut_back[17], &mut_back[18], &mut_back[19], &mut_back[20], 128 &mut_back[21], &mut_back[22], &mut_back[23], &mut_back[24], &mut_back[25], &mut_back[26], &mut_back[27], 129 &mut_back[28], &mut_back[29], &mut_back[30], &mut_back[31]}; 130 return muts[hash<const void*>()(p) & (__sp_mut_count - 1)]; 131 } 132 133 #endif // _LIBCPP_HAS_THREADS 134 135 void* align(size_t alignment, size_t size, void*& ptr, size_t& space) { 136 void* r = nullptr; 137 if (size <= space) { 138 char* p1 = static_cast<char*>(ptr); 139 char* p2 = reinterpret_cast<char*>(reinterpret_cast<uintptr_t>(p1 + (alignment - 1)) & -alignment); 140 size_t d = static_cast<size_t>(p2 - p1); 141 if (d <= space - size) { 142 r = p2; 143 ptr = r; 144 space -= d; 145 } 146 } 147 return r; 148 } 149 150 _LIBCPP_END_NAMESPACE_STD 151