| /freebsd/contrib/llvm-project/libcxx/include/__atomic/ |
| H A D | to_gcc_order.h | 28 : (__order == memory_order_acquire in __to_gcc_order() 41 : (__order == memory_order_acquire in __to_gcc_failure_order()
|
| H A D | memory_order.h | 45 inline constexpr auto memory_order_acquire = memory_order::acquire; variable 55 memory_order_acquire = __mo_acquire, enumerator
|
| H A D | check_memory_order.h | 19 …_LIBCPP_DIAGNOSE_WARNING(__m == memory_order_consume || __m == memory_order_acquire || __m == memo…
|
| /freebsd/contrib/llvm-project/compiler-rt/lib/xray/ |
| H A D | xray_interface.cpp | 211 memory_order_acquire)) in patchFunction() 266 memory_order_acquire)) in controlPatching() 416 memory_order_acquire)) { in __xray_set_handler() 429 memory_order_acquire)) { in __xray_set_customevent_handler() 441 memory_order_acquire)) { in __xray_set_typedevent_handler() 492 memory_order_acquire)) in __xray_set_handler_arg1()
|
| H A D | xray_init.cpp | 66 if (atomic_load(&XRayInitialized, memory_order_acquire)) in __xray_init() 72 if (!atomic_load(&XRayFlagsInitialized, memory_order_acquire)) { in __xray_init()
|
| H A D | xray_basic_logging.cpp | 94 if (atomic_load(&UseRealTSC, memory_order_acquire)) in getLog() 106 Header.CycleFrequency = atomic_load(&CycleFrequency, memory_order_acquire); in getLog() 424 atomic_load(&TicksPerSec, memory_order_acquire) * in basicLoggingInit() 427 __xray_set_handler_arg1(atomic_load(&UseRealTSC, memory_order_acquire) in basicLoggingInit() 430 __xray_set_handler(atomic_load(&UseRealTSC, memory_order_acquire) in basicLoggingInit()
|
| H A D | xray_fdr_logging.cpp | 249 atomic_thread_fence(memory_order_acquire); in fdrIterator() 250 auto BufferSize = atomic_load(It->Extents, memory_order_acquire); in fdrIterator() 277 if (atomic_load(&LoggingStatus, memory_order_acquire) != in fdrLoggingFlush() 363 auto BufferExtents = atomic_load(B.Extents, memory_order_acquire); in fdrLoggingFlush() 445 auto Status = atomic_load(&LoggingStatus, memory_order_acquire); in setupTLD()
|
| H A D | xray_buffer_queue.h | 233 return atomic_load(&Finalizing, memory_order_acquire); in finalizing() 237 return atomic_load(&Generation, memory_order_acquire); in generation()
|
| /freebsd/contrib/llvm-project/compiler-rt/lib/asan/ |
| H A D | asan_allocator.cpp | 175 return atomic_load(&magic, memory_order_acquire) == kAllocBegMagic in Get() 238 memory_order_acquire)) { in Recycle() 405 if (ac && atomic_load(&ac->chunk_state, memory_order_acquire) == in RePoisonChunk() 447 options->min_redzone = atomic_load(&min_redzone, memory_order_acquire); in GetOptions() 448 options->max_redzone = atomic_load(&max_redzone, memory_order_acquire); in GetOptions() 451 atomic_load(&alloc_dealloc_mismatch, memory_order_acquire); in GetOptions() 466 u32 min_log = RZSize2Log(atomic_load(&min_redzone, memory_order_acquire)); in ComputeRZLog() 467 u32 max_log = RZSize2Log(atomic_load(&max_redzone, memory_order_acquire)); in ComputeRZLog() 519 if (atomic_load(&m->chunk_state, memory_order_acquire) != CHUNK_ALLOCATED) in UpdateAllocationStack() 670 memory_order_acquire)) { in AtomicallySetQuarantineFlagIfAllocated() [all …]
|
| /freebsd/contrib/llvm-project/compiler-rt/lib/sanitizer_common/ |
| H A D | sanitizer_tls_get_addr.h | 67 (DTLS::DTVBlock *)atomic_load(&dtls->dtv_block, memory_order_acquire); in ForEachDVT() 71 block = (DTLS::DTVBlock *)atomic_load(&block->next, memory_order_acquire); in ForEachDVT()
|
| H A D | sanitizer_atomic.h | 27 memory_order_acquire = __ATOMIC_ACQUIRE, enumerator 34 memory_order_acquire = 1 << 2,
|
| H A D | sanitizer_lfstack.h | 48 u64 cmp = atomic_load(&head_, memory_order_acquire); in Pop() 57 memory_order_acquire)) in Pop()
|
| H A D | sanitizer_libignore.h | 91 const uptr n = atomic_load(&ignored_ranges_count_, memory_order_acquire); in IsIgnored() 105 const uptr n = atomic_load(&instrumented_ranges_count_, memory_order_acquire); in IsPcInstrumented()
|
| H A D | sanitizer_mutex.h | 36 return atomic_exchange(&state_, 1, memory_order_acquire) == 0; in TryLock() 190 memory_order_acquire))) in Lock() 218 &state_, &state, state | kWriterLock, memory_order_acquire))) { in TryLock() 274 memory_order_acquire))) in ReadLock()
|
| H A D | sanitizer_addrhashmap.h | 121 uptr addr1 = atomic_load(&c->addr, memory_order_acquire); in ForEach() 128 (AddBucket *)atomic_load(&bucket->add, memory_order_acquire)) { in ForEach() 131 uptr addr1 = atomic_load(&c->addr, memory_order_acquire); in ForEach() 226 uptr addr1 = atomic_load(&c->addr, memory_order_acquire); in acquire()
|
| H A D | sanitizer_tls_get_addr.cpp | 47 uptr v = atomic_load(cur, memory_order_acquire); in DTLS_NextBlock() 85 (DTLS::DTVBlock *)atomic_load(&block->next, memory_order_acquire); in DTLS_Destroy()
|
| /freebsd/contrib/llvm-project/libcxx/include/__stop_token/ |
| H A D | stop_state.h | 92 return (__state_.load(std::memory_order_acquire) & __stop_requested_bit) != 0; in __stop_requested() 99 __state_t __curent_state = __state_.load(std::memory_order_acquire); in __stop_possible_for_stop_token() 180 __cb->__completed_.wait(false, std::memory_order_acquire); in __remove_callback()
|
| H A D | atomic_unique_lock.h | 46 __is_locked_ = __lock_impl(__give_up_locking, __set_locked_bit, std::memory_order_acquire); in __atomic_unique_lock() 74 // std::memory_order_acquire because we'd like to make sure that all the read operations after the lock can read the in __lock() 76 __lock_impl(__never_give_up_locking, __set_locked_bit, std::memory_order_acquire); in __lock() 125 __locked_ordering, // sucessful exchange order. Usually it should be std::memory_order_acquire. in __lock_impl()
|
| /freebsd/contrib/llvm-project/lldb/source/Utility/ |
| H A D | Timer.cpp | 141 uint64_t nanos = i->m_nanos.load(std::memory_order_acquire); in DumpCategoryTimes() 143 uint64_t nanos_total = i->m_nanos_total.load(std::memory_order_acquire); in DumpCategoryTimes() 144 uint64_t count = i->m_count.load(std::memory_order_acquire); in DumpCategoryTimes()
|
| /freebsd/contrib/llvm-project/compiler-rt/lib/scudo/standalone/ |
| H A D | atomic_helpers.h | 19 memory_order_acquire = 2, enumerator 26 static_assert(memory_order_acquire == __ATOMIC_ACQUIRE, "");
|
| H A D | linux.cpp | 113 memory_order_acquire) == Unlocked; in lockSlow() 119 memory_order_acquire); in lockSlow() 123 V = atomic_exchange(&M, Sleeping, memory_order_acquire); 127 V = atomic_exchange(&M, Sleeping, memory_order_acquire); in unlock() 140 CHECK(atomic_load(&M, memory_order_acquire) != Unlocked); in getMonotonicTime()
|
| /freebsd/tools/test/stress2/misc/ |
| H A D | vm_reserv_populate.sh | 99 memory_order_acquire = 1 << 2, 116 | memory_order_acquire | memory_order_seq_cst));
|
| /freebsd/contrib/llvm-project/llvm/include/llvm/Support/ |
| H A D | ManagedStatic.h | 87 void *Tmp = Ptr.load(std::memory_order_acquire); 97 void *Tmp = Ptr.load(std::memory_order_acquire);
|
| /freebsd/sys/dev/qcom_rnd/ |
| H A D | qcom_rnd.c | 124 sc, memory_order_release, memory_order_acquire)) { in qcom_rnd_attach() 173 atomic_load_explicit(&g_qcom_rnd_softc, memory_order_acquire) == sc, in qcom_rnd_detach()
|
| /freebsd/contrib/llvm-project/libcxx/include/ |
| H A D | latch | 98 auto __value = __a_.load(memory_order_acquire); 103 …__a_, [this](ptrdiff_t& __value) -> bool { return try_wait_impl(__value); }, memory_order_acquire);
|