Lines Matching +full:- +full:42

1 // SPDX-License-Identifier: GPL-2.0
13 //! - A normal write from C side is treated as an atomic write if
15 //! - Mixed-size atomic accesses don't cause data races.
17 //! [`LKMM`]: srctree/tools/memory-model/
38 /// [`Atomic::as_ptr()`], this provides a way to interact with [C-side atomic operations]
47 /// [LKMM]: srctree/tools/memory-model/
48 /// [C-side atomic operations]: srctree/Documentation/atomic_t.txt
57 /// # Round-trip transmutability
59 /// `T` is round-trip transmutable to `U` if and only if both of these properties hold:
61 /// - Any valid bit pattern for `T` is also a valid bit pattern for `U`.
62 /// - Transmuting (e.g. using [`transmute()`]) a value of type `T` to `U` and then to `T` again
67 /// - [`Self`] must have the same size and alignment as [`Self::Repr`].
68 /// - [`Self`] must be [round-trip transmutable] to [`Self::Repr`].
70 /// Note that this is more relaxed than requiring the bi-directional transmutability (i.e.
72 /// variables over unit-only enums, see [Examples].
77 /// valid object of a type to operate on (i.e. no `MaybeUninit<_>`), hence at the Rust <-> C
83 /// A unit-only enum that implements [`AtomicType`]:
96 /// // SAFETY: `State` and `i32` has the same size and alignment, and it's round-trip
107 /// [round-trip transmutable]: AtomicType#round-trip-transmutability
124 fn rhs_into_delta(rhs: Rhs) -> <Self::Repr as AtomicImpl>::Delta; in rhs_into_delta()
128 const fn into_repr<T: AtomicType>(v: T) -> T::Repr { in into_repr()
129 // SAFETY: Per the safety requirement of `AtomicType`, `T` is round-trip transmutable to in into_repr()
138 const unsafe fn from_repr<T: AtomicType>(r: T::Repr) -> T { in from_repr()
145 pub const fn new(v: T) -> Self { in new()
157 /// - `ptr` is aligned to `align_of::<T>()`.
158 /// - `ptr` is valid for reads and writes for `'a`.
159 /// - For the duration of `'a`, other accesses to `*ptr` must not cause data races (defined
163 /// [`LKMM`]: srctree/tools/memory-model
192 /// // a = READ_ONCE(foo_ptr->a);
199 /// // smp_store_release(&foo_ptr->a, 2);
205 pub unsafe fn from_ptr<'a>(ptr: *mut T) -> &'a Self in from_ptr()
224 /// [`LKMM`]: srctree/tools/memory-model
226 pub const fn as_ptr(&self) -> *mut T { in as_ptr()
236 pub fn get_mut(&mut self) -> &mut T { in get_mut()
257 /// let x = Atomic::new(42i32);
259 /// assert_eq!(42, x.load(Relaxed));
261 /// let x = Atomic::new(42i64);
263 /// assert_eq!(42, x.load(Relaxed));
267 pub fn load<Ordering: ordering::AcquireOrRelaxed>(&self, _: Ordering) -> T { in load()
287 /// let x = Atomic::new(42i32);
289 /// assert_eq!(42, x.load(Relaxed));
322 /// let x = Atomic::new(42);
324 /// assert_eq!(42, x.xchg(52, Acquire));
329 pub fn xchg<Ordering: ordering::Ordering>(&self, v: T, _: Ordering) -> T { in xchg()
366 /// let x = Atomic::new(42);
376 /// // Uses the old value if failed, probably re-try cmpxchg.
381 /// # assert_eq!(old, 42);
386 /// let latest = x.cmpxchg(42, 64, Full).unwrap_or_else(|old| old);
387 /// # assert_eq!(42, latest);
405 ) -> Result<T, T> { in cmpxchg()
415 // let latest = x.cmpxchg(42, 64, Full).unwrap_or_else(|old| old); in cmpxchg()
447 fn try_cmpxchg<Ordering: ordering::Ordering>(&self, old: &mut T, new: T, _: Ordering) -> bool { in try_cmpxchg()
488 /// let x = Atomic::new(42);
490 /// assert_eq!(42, x.load(Relaxed));
518 /// let x = Atomic::new(42);
520 /// assert_eq!(42, x.load(Relaxed));
524 /// let x = Atomic::new(42);
526 /// assert_eq!(42, x.load(Relaxed));
531 pub fn fetch_add<Rhs, Ordering: ordering::Ordering>(&self, v: Rhs, _: Ordering) -> T in fetch_add()