/freebsd/sys/sys/ |
H A D | refcount.h | 69 return (atomic_load_int(count)); in refcount_load() 103 old = atomic_load_int(count); in refcount_acquire_checked() 122 old = atomic_load_int(count); in refcount_acquire_if_gt() 181 old = atomic_load_int(count); \
|
H A D | _blockcount.h | 47 return (_BLOCKCOUNT_COUNT(atomic_load_int(&count->__count))); in blockcount_read()
|
H A D | smr.h | 92 return (atomic_load_int(&s->s_wr.seq)); in smr_shared_current()
|
H A D | atomic_common.h | 100 #define atomic_load_int(p) __atomic_load_generic(p, int, u_int, int) macro
|
/freebsd/sys/dev/vmware/vmci/ |
H A D | vmci_driver.c | 57 "event (type=%d).\n", atomic_load_int(&vm_context_id), in vmci_util_cid_update() 389 if (atomic_load_int(&vm_context_id) == VMCI_INVALID_ID) { in vmci_get_context_id() 399 return (atomic_load_int(&vm_context_id)); in vmci_get_context_id()
|
/freebsd/tools/test/stress2/misc/ |
H A D | fork2.sh | 116 while ((atomic_load_int(&share[SYNC])) > MX) 124 while (atomic_load_int(&share[SYNC]) <= MX)
|
/freebsd/sys/riscv/riscv/ |
H A D | mp_machdep.c | 172 while (!atomic_load_int(&aps_ready)) in init_secondary() 399 naps = atomic_load_int(&aps_started); in cpu_init_fdt() 407 while (atomic_load_int(&aps_started) < naps + 1) in cpu_init_fdt()
|
/freebsd/sys/netinet/ |
H A D | tcp_hostcache.c | 355 if (atomic_load_int(&hc_entry->hc_expire) != in tcp_hc_lookup() 472 if (atomic_load_int(&hc_entry->hc_expire) != in tcp_hc_update() 492 atomic_load_int(&V_tcp_hostcache.cache_count) >= in tcp_hc_update() 644 len = (atomic_load_int(&V_tcp_hostcache.cache_count) + 1) * in sysctl_tcp_hc_list() 772 atomic_load_int(&hc_entry->hc_expire) <= 0) { in tcp_hc_purge_internal()
|
/freebsd/lib/libc/amd64/string/ |
H A D | amd64_archlevel.c | 204 islevel = atomic_load_int(&amd64_archlevel); in archlevel() 222 return (atomic_load_int(&amd64_archlevel)); in archlevel()
|
/freebsd/sys/vm/ |
H A D | vm_object.c | 198 KASSERT(atomic_load_int(&object->shadow_count) == 0, in vm_object_zdtor() 200 object, atomic_load_int(&object->shadow_count))); in vm_object_zdtor() 578 atomic_load_int(&backing_object->shadow_count) == 1, in vm_object_deallocate_anon() 581 atomic_load_int(&backing_object->shadow_count))); in vm_object_deallocate_anon() 665 atomic_load_int(&object->shadow_count) == 0) { in vm_object_deallocate() 1845 object->ref_count > atomic_load_int(&object->shadow_count), in vm_object_collapse() 1847 object->ref_count, atomic_load_int(&object->shadow_count))); in vm_object_collapse() 1861 KASSERT(atomic_load_int(&backing_object->shadow_count) in vm_object_collapse() 1864 atomic_load_int(&backing_object->shadow_count))); in vm_object_collapse() 2413 return (obj->ref_count > atomic_load_int(&obj->shadow_count)); in vm_object_is_active() [all …]
|
/freebsd/sys/x86/x86/ |
H A D | cpu_machdep.c | 275 KASSERT(atomic_load_int(state) == STATE_SLEEPING, in acpi_cpu_idle_mwait() 286 if (atomic_load_int(state) == STATE_MWAIT) in acpi_cpu_idle_mwait() 533 KASSERT(atomic_load_int(statep) == STATE_RUNNING, in cpu_idle_enter() 534 ("%s: state %d", __func__, atomic_load_int(statep))); in cpu_idle_enter() 617 if (atomic_load_int(state) == STATE_MWAIT) in cpu_idle_mwait() 703 switch (atomic_load_int(state)) { in cpu_idle_wakeup() 942 while (atomic_load_int(&hp->running) != 0) in nmi_remove_handler()
|
H A D | stack_machdep.c | 133 cpuid = atomic_load_int(&td->td_oncpu); in stack_save_td()
|
/freebsd/sys/i386/include/ |
H A D | pmap_nopae.h | 86 #define pte_load(ptep) atomic_load_int(ptep)
|
/freebsd/sys/arm64/arm64/ |
H A D | mp_machdep.c | 233 while (!atomic_load_int(&aps_ready)) in init_secondary() 488 naps = atomic_load_int(&aps_started); in start_cpu() 518 while (atomic_load_int(&aps_started) < naps + 1) in start_cpu()
|
/freebsd/sys/kern/ |
H A D | subr_smr.c | 373 c_seq = atomic_load_int(&c->c_seq); in smr_poll_cpu() 453 s_rd_seq = atomic_load_int(&s->s_rd_seq); in smr_poll_scan()
|
H A D | vfs_default.c | 1161 return ((int)atomic_load_int(&ap->a_vp->v_writecount) < 0); in vop_stdis_text() 1173 n = atomic_load_int(&vp->v_writecount); in vop_stdset_text() 1216 n = atomic_load_int(&vp->v_writecount); in vop_stdunset_text() 1263 n = atomic_load_int(&vp->v_writecount); in vop_stdadd_writecount_impl()
|
H A D | subr_syscall.c | 66 if (__predict_false(td->td_cowgen != atomic_load_int(&p->p_cowgen))) in syscallenter()
|
H A D | tty_info.c | 369 kstacks_val = atomic_load_int(&tty_info_kstacks); in tty_info()
|
H A D | subr_trap.c | 201 if (td->td_cowgen != atomic_load_int(&td->td_proc->p_cowgen)) in ast_prep()
|
/freebsd/sys/netinet6/ |
H A D | frag6.c | 539 else if (atomic_load_int(&frag6_nfrags) >= (u_int)ip6_maxfrags) in frag6_input() 594 atomic_load_int(&V_frag6_nfragpackets) >= in frag6_input() 940 if (atomic_load_int(&frag6_nfrags) == 0) in frag6_slowtimo() 983 atomic_load_int(&V_frag6_nfragpackets) > in frag6_slowtimo()
|
/freebsd/sys/compat/linuxkpi/common/include/linux/ |
H A D | seqlock.h | 102 return (atomic_load_int(seqcp)); in lkpi_seqprop_sequence()
|
/freebsd/sys/dev/cxgbe/ |
H A D | t4_netmap.c | 527 nm_state = atomic_load_int(&nm_rxq->nm_state); in cxgbe_netmap_simple_rss() 546 nm_state = atomic_load_int(&nm_rxq->nm_state); in cxgbe_netmap_simple_rss() 596 nm_state = atomic_load_int(&nm_rxq->nm_state); in cxgbe_netmap_split_rss() 628 nm_state = atomic_load_int(&nm_rxq[j].nm_state); in cxgbe_netmap_split_rss() 646 nm_state = atomic_load_int(&nm_rxq[j].nm_state); in cxgbe_netmap_split_rss() 819 nm_state = atomic_load_int(&nm_rxq->nm_state); in cxgbe_netmap_off()
|
/freebsd/lib/libc/gen/ |
H A D | dlfcn.c | 330 r = atomic_load_int(&ret); in _rtld_get_stack_prot()
|
/freebsd/sys/contrib/openzfs/lib/libspl/include/ |
H A D | atomic.h | 253 #define atomic_load_int(p) (*(volatile uint_t *)(p)) macro
|
/freebsd/sys/geom/eli/ |
H A D | g_eli_privacy.c | 312 batch = atomic_load_int(&g_eli_batch) != 0; in g_eli_crypto_run()
|