/linux/rust/kernel/sync/ |
H A D | atomic.rs | 3 //! Atomic primitives. 13 //! - A normal write from C side is treated as an atomic write if 15 //! - Mixed-size atomic accesses don't cause data races. 35 /// The atomic operations are implemented in a way that is fully compatible with the [Linux Kernel 37 /// [`LKMM`][LKMM] atomic primitives. With the help of [`Atomic::from_ptr()`] and 38 /// [`Atomic::as_ptr()`], this provides a way to interact with [C-side atomic operations] 39 /// (including those without the `atomic` prefix, e.g. `READ_ONCE()`, `WRITE_ONCE()`, 48 /// [C-side atomic operations]: srctree/Documentation/atomic_t.txt 50 pub struct Atomic<T: AtomicType>(AtomicRepr<T::Repr>); struct 52 // SAFETY: `Atomic<T>` is safe to share among execution contexts because all accesses are atomic. [all …]
|
H A D | refcount.rs | 3 //! Atomic reference counting. 8 use crate::sync::atomic::Atomic; 11 /// Atomic reference counter. 13 /// This type is conceptually an atomic integer, but provides saturation semantics compared to 14 /// normal atomic integers. Values in the negative range when viewed as a signed integer are 38 /// Get the underlying atomic counter that backs the refcount. 44 pub fn as_atomic(&self) -> &Atomic<i32> { in as_atomic() 46 // SAFETY: `refcount_t` is a transparent wrapper of `atomic_t`, which is an atomic 32-bit in as_atomic() 47 // integer that is layout-wise compatible with `Atomic<i32>`. All values are valid for in as_atomic()
|
/linux/arch/mips/include/asm/octeon/ |
H A D | cvmx-fau.h | 123 * @reg: FAU atomic register to access. 0 <= reg < 2048. 127 * Returns Address to store for atomic update 139 * @tagwait: Should the atomic add wait for the current tag switch 143 * @reg: FAU atomic register to access. 0 <= reg < 2048. 150 * Returns Address to read from for atomic update 162 * Perform an atomic 64 bit add 164 * @reg: FAU atomic register to access. 0 <= reg < 2048. 177 * Perform an atomic 32 bit add 179 * @reg: FAU atomic register to access. 0 <= reg < 2048. 193 * Perform an atomic 16 bit add [all …]
|
/linux/lib/ |
H A D | dec_and_lock.c | 4 #include <linux/atomic.h> 12 * if (atomic_dec_and_test(&atomic)) { 19 * "atomic". 21 int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock) in _atomic_dec_and_lock() argument 24 if (atomic_add_unless(atomic, -1, 1)) in _atomic_dec_and_lock() 29 if (atomic_dec_and_test(atomic)) in _atomic_dec_and_lock() 37 int _atomic_dec_and_lock_irqsave(atomic_t *atomic, spinlock_t *lock, in _atomic_dec_and_lock_irqsave() argument 41 if (atomic_add_unless(atomic, -1, 1)) in _atomic_dec_and_lock_irqsave() 46 if (atomic_dec_and_test(atomic)) in _atomic_dec_and_lock_irqsave() 53 int _atomic_dec_and_raw_lock(atomic_t *atomic, raw_spinlock_t *lock) in _atomic_dec_and_raw_lock() argument [all …]
|
H A D | percpu-refcount.c | 57 * change the start state to atomic with the latter setting the initial refcount 193 * time is equivalent and saves us atomic operations: in percpu_ref_switch_to_atomic_rcu() 198 "percpu ref (%ps) <= 0 (%ld) after switching to atomic", in percpu_ref_switch_to_atomic_rcu() 222 /* switching from percpu to atomic */ in __percpu_ref_switch_to_atomic() 273 * If the previous ATOMIC switching hasn't finished yet, wait for in __percpu_ref_switch_mode() 274 * its completion. If the caller ensures that ATOMIC switching in __percpu_ref_switch_mode() 287 * percpu_ref_switch_to_atomic - switch a percpu_ref to atomic mode 288 * @ref: percpu_ref to switch to atomic mode 294 * Schedule switching of @ref to atomic mode. All its percpu counts will 295 * be collected to the main atomic counter. On completion, when all CPUs [all …]
|
/linux/scripts/atomic/ |
H A D | gen-atomics.sh | 4 # Generate atomic headers 11 gen-atomic-instrumented.sh linux/atomic/atomic-instrumented.h 12 gen-atomic-long.sh linux/atomic/atomic-long.h 13 gen-atomic-fallback.sh linux/atomic/atomic-arch-fallback.h 14 gen-rust-atomic-helpers.sh ../rust/helpers/atomic.c
|
H A D | atomic-tbl.sh | 112 # gen_param_type(arg, int, atomic) 117 local atomic="$1"; shift 122 v) type="${atomic}_t *";; 123 cv) type="const ${atomic}_t *";; 129 #gen_param(arg, int, atomic) 134 local atomic="$1"; shift 136 local type="$(gen_param_type "${arg}" "${int}" "${atomic}")" 141 #gen_params(int, atomic, arg...) 145 local atomic="$1"; shift 148 gen_param "$1" "${int}" "${atomic}" [all …]
|
H A D | gen-rust-atomic-helpers.sh | 6 . ${ATOMICDIR}/atomic-tbl.sh 8 #gen_proto_order_variant(meta, pfx, name, sfx, order, atomic, int, arg...) 16 local atomic="$1"; shift 19 local atomicname="${atomic}_${pfx}${name}${sfx}${order}" 22 local params="$(gen_params "${int}" "${atomic}" "$@")" 43 * This file provides helpers for the various atomic functions for Rust. 48 #include <linux/atomic.h> 58 gen_proto "${meta}" "${name}" "atomic" "int" ${args}
|
H A D | gen-atomic-long.sh | 6 . ${ATOMICDIR}/atomic-tbl.sh 8 #gen_cast(arg, int, atomic) 13 local atomic="$1"; shift 17 printf "($(gen_param_type "${arg}" "${int}" "${atomic}"))" 20 #gen_args_cast(int, atomic, arg...) 24 local atomic="$1"; shift 27 local cast="$(gen_cast "$1" "${int}" "${atomic}")" 48 local argscast_32="$(gen_args_cast "int" "atomic" "$@")"
|
H A D | gen-atomic-fallback.sh | 6 . ${ATOMICDIR}/atomic-tbl.sh 8 #gen_template_fallback(template, meta, pfx, name, sfx, order, atomic, int, args...) 17 local atomic="$1"; shift 22 local params="$(gen_params "${int}" "${atomic}" "$@")" 28 #gen_order_fallback(meta, pfx, name, sfx, order, atomic, int, args...) 42 #gen_proto_fallback(meta, pfx, name, sfx, order, atomic, int, args...) 55 #gen_proto_order_variant(meta, pfx, name, sfx, order, atomic, int, args...) 63 local atomic="$1"; shift 66 local atomicname="${atomic}_${pfx}${name}${sfx}${order}" 67 local basename="${atomic}_${pfx}${name}${sfx}" [all …]
|
H A D | gen-atomic-instrumented.sh | 6 . ${ATOMICDIR}/atomic-tbl.sh 25 # An atomic RMW: if this parameter is not a constant, and this atomic is 52 #gen_proto_order_variant(meta, pfx, name, sfx, order, atomic, int, arg...) 60 local atomic="$1"; shift 63 local atomicname="${atomic}_${pfx}${name}${sfx}${order}" 66 local params="$(gen_params "${int}" "${atomic}" "$@")" 71 gen_kerneldoc "" "${meta}" "${pfx}" "${name}" "${sfx}" "${order}" "${atomic}" "${int}" "$@" 138 * This file provoides atomic operations with explicit instrumentation (e.g. 153 gen_proto "${meta}" "${name}" "atomic" "int" ${args}
|
/linux/drivers/firmware/arm_scmi/transports/ |
H A D | Kconfig | 52 bool "Enable atomic mode support for SCMI SMC transport" 55 Enable support of atomic operation for SCMI SMC based transport. 57 If you want the SCMI SMC based transport to operate in atomic 60 Enabling atomic mode operations allows any SCMI driver using this 61 transport to optionally ask for atomic SCMI transactions and operate 62 in atomic context too, at the price of using a number of busy-waiting 109 bool "Enable atomic mode for SCMI VirtIO transport" 112 Enable support of atomic operation for SCMI VirtIO based transport. 114 If you want the SCMI VirtIO based transport to operate in atomic 118 Enabling atomic mode operations allows any SCMI driver using this [all …]
|
/linux/Documentation/ |
H A D | atomic_bitops.txt | 2 Atomic bitops 5 While our bitmap_{}() functions are non-atomic, we have a number of operations 6 operating on single bits in a bitmap that are atomic. 18 RMW atomic operations without return value: 23 RMW atomic operations with return value: 33 All RMW atomic operations have a '__' prefixed variant which is non-atomic. 39 Non-atomic ops: 67 Since a platform only has a single means of achieving atomic operations
|
H A D | atomic_t.txt | 2 On atomic types (atomic_t atomic64_t and atomic_long_t). 4 The atomic type provides an interface to the architecture's means of atomic 5 RMW operations between CPUs (atomic operations on MMIO are not supported and 20 RMW atomic operations: 67 Therefore, an explicitly unsigned variant of the atomic ops is strictly 91 C Atomic-RMW-ops-are-atomic-WRT-atomic_set 118 The obvious case where this is not so is when we need to implement atomic ops 155 All these operations are SMP atomic; that is, the operations (for a single 156 atomic variable) can be fully ordered and no intermediate state is lost or 192 only apply to the RMW atomic ops and can be used to augment/upgrade the [all …]
|
/linux/rust/kernel/sync/atomic/ |
H A D | internal.rs | 3 //! Atomic internal implementations. 5 //! Provides 1:1 mapping to the C atomic operations. 12 /// Sealed trait marker to disable customized impls on atomic implementation traits. 16 // `i32` and `i64` are only supported atomic implementations. 20 /// A marker trait for types that implement atomic operations with C side primitives. 31 /// [`Self`], but it may be different for the atomic pointer type. 35 // `atomic_t` implements atomic operations on `i32`. 40 // `atomic64_t` implements atomic operations on `i64`. 45 /// Atomic representation. 50 /// Creates a new atomic representation `T`. [all …]
|
/linux/include/linux/ |
H A D | atomic.h | 2 /* Atomic operations usable in machine independent code */ 7 #include <asm/atomic.h> 11 * Relaxed variants of xchg, cmpxchg and some atomic operations. 80 #include <linux/atomic/atomic-arch-fallback.h> 81 #include <linux/atomic/atomic-long.h> 82 #include <linux/atomic/atomic-instrumented.h>
|
H A D | async.h | 50 * Note: This function may be called from atomic or non-atomic contexts. 66 * Note: This function may be called from atomic or non-atomic contexts. 85 * Note: This function may be called from atomic or non-atomic contexts. 108 * Note: This function may be called from atomic or non-atomic contexts.
|
H A D | spinlock.h | 170 * architectures imply an smp_mb() for each atomic instruction and equally don't 489 * (asm-mips/atomic.h needs above definitions) 491 #include <linux/atomic.h> 494 * @atomic: the atomic counter 497 * Decrements @atomic by 1. If the result is 0, returns true and locks 500 extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock); 501 #define atomic_dec_and_lock(atomic, lock) \ argument 502 __cond_lock(lock, _atomic_dec_and_lock(atomic, lock)) 504 extern int _atomic_dec_and_lock_irqsave(atomic_t *atomic, spinlock_ 506 atomic_dec_and_lock_irqsave(atomic,lock,flags) global() argument 510 atomic_dec_and_raw_lock(atomic,lock) global() argument 515 atomic_dec_and_raw_lock_irqsave(atomic,lock,flags) global() argument [all...] |
/linux/net/rds/ |
H A D | rdma.c | 866 * Fill in rds_message for an atomic request. 876 || rm->atomic.op_active) in rds_cmsg_atomic() 884 rm->atomic.op_type = RDS_ATOMIC_TYPE_FADD; in rds_cmsg_atomic() 885 rm->atomic.op_m_fadd.add = args->fadd.add; in rds_cmsg_atomic() 886 rm->atomic.op_m_fadd.nocarry_mask = 0; in rds_cmsg_atomic() 889 rm->atomic.op_type = RDS_ATOMIC_TYPE_FADD; in rds_cmsg_atomic() 890 rm->atomic.op_m_fadd.add = args->m_fadd.add; in rds_cmsg_atomic() 891 rm->atomic.op_m_fadd.nocarry_mask = args->m_fadd.nocarry_mask; in rds_cmsg_atomic() 894 rm->atomic.op_type = RDS_ATOMIC_TYPE_CSWP; in rds_cmsg_atomic() 895 rm->atomic.op_m_cswp.compare = args->cswp.compare; in rds_cmsg_atomic() [all …]
|
/linux/Documentation/core-api/ |
H A D | local_ops.rst | 5 Semantics and Behavior of Local Atomic Operations 11 This document explains the purpose of the local atomic operations, how 26 Purpose of local atomic operations 29 Local atomic operations are meant to provide fast and highly reentrant per CPU 30 counters. They minimize the performance cost of standard atomic operations by 34 Having fast per CPU atomic counters is interesting in many cases: it does not 39 Local atomic operations only guarantee variable modification atomicity wrt the 50 It can be done by slightly modifying the standard atomic operations: only 63 Rules to follow when using local atomic operations 82 "``long``", aligned, variables are always atomic. Since no memory [all …]
|
/linux/include/asm-generic/bitops/ |
H A D | instrumented-non-atomic.h | 4 * This file provides wrappers with sanitizer instrumentation for non-atomic 21 * Unlike set_bit(), this function is non-atomic. If it is called on the same 37 * Unlike clear_bit(), this function is non-atomic. If it is called on the same 53 * Unlike change_bit(), this function is non-atomic. If it is called on the same 68 * We treat non-atomic read-write bitops a little more special. in __instrument_read_write_bitop() 72 * assume-plain-writes-atomic rule): in __instrument_read_write_bitop() 75 * races with unmarked readers -> check "atomic" write. in __instrument_read_write_bitop() 93 * This operation is non-atomic. If two instances of this operation race, one 108 * This operation is non-atomic. If two instances of this operation race, one 123 * This operation is non-atomic. If two instances of this operation race, one
|
H A D | instrumented-atomic.h | 4 * This file provides wrappers with sanitizer instrumentation for atomic bit 21 * This is a relaxed atomic operation (no implied memory barriers). 37 * This is a relaxed atomic operation (no implied memory barriers). 50 * This is a relaxed atomic operation (no implied memory barriers). 66 * This is an atomic fully-ordered operation (implied full memory barrier). 80 * This is an atomic fully-ordered operation (implied full memory barrier). 94 * This is an atomic fully-ordered operation (implied full memory barrier).
|
/linux/ |
H A D | Kbuild | 57 # Check the manual modification of atomic headers 72 atomic-checks += $(addprefix $(obj)/.checked-, \ 73 atomic-arch-fallback.h \ 74 atomic-instrumented.h \ 75 atomic-long.h) 77 targets += $(atomic-checks) 78 $(atomic-checks): $(obj)/.checked-%: include/linux/atomic/% FORCE 84 prepare: $(offsets-file) missing-syscalls $(atomic-checks)
|
/linux/Documentation/litmus-tests/ |
H A D | README | 12 atomic (/atomic directory) 15 Atomic-RMW+mb__after_atomic-is-stronger-than-acquire.litmus 16 Test that an atomic RMW followed by a smp_mb__after_atomic() is 20 Atomic-RMW-ops-are-atomic-WRT-atomic_set.litmus 21 Test that atomic_set() cannot break the atomicity of atomic RMWs.
|
/linux/arch/sh/include/asm/ |
H A D | atomic.h | 7 #include <asm-generic/atomic.h> 12 * Atomic operations that C can't guarantee us. Useful for 26 #include <asm/atomic-grb.h> 28 #include <asm/atomic-llsc.h> 30 #include <asm/atomic-irq.h>
|