1 // SPDX-License-Identifier: GPL-2.0
2
3 //! Atomic primitives.
4 //!
5 //! These primitives have the same semantics as their C counterparts: and the precise definitions of
6 //! semantics can be found at [`LKMM`]. Note that Linux Kernel Memory (Consistency) Model is the
7 //! only model for Rust code in kernel, and Rust's own atomics should be avoided.
8 //!
9 //! # Data races
10 //!
11 //! [`LKMM`] atomics have different rules regarding data races:
12 //!
13 //! - A normal write from C side is treated as an atomic write if
14 //! CONFIG_KCSAN_ASSUME_PLAIN_WRITES_ATOMIC=y.
15 //! - Mixed-size atomic accesses don't cause data races.
16 //!
17 //! [`LKMM`]: srctree/tools/memory-model/
18
19 mod internal;
20 pub mod ordering;
21 mod predefine;
22
23 pub use internal::AtomicImpl;
24 pub use ordering::{Acquire, Full, Relaxed, Release};
25
26 use crate::build_error;
27 use internal::{AtomicArithmeticOps, AtomicBasicOps, AtomicExchangeOps, AtomicRepr};
28 use ordering::OrderingType;
29
30 /// A memory location which can be safely modified from multiple execution contexts.
31 ///
32 /// This has the same size, alignment and bit validity as the underlying type `T`. And it disables
33 /// niche optimization for the same reason as [`UnsafeCell`].
34 ///
35 /// The atomic operations are implemented in a way that is fully compatible with the [Linux Kernel
36 /// Memory (Consistency) Model][LKMM], hence they should be modeled as the corresponding
37 /// [`LKMM`][LKMM] atomic primitives. With the help of [`Atomic::from_ptr()`] and
38 /// [`Atomic::as_ptr()`], this provides a way to interact with [C-side atomic operations]
39 /// (including those without the `atomic` prefix, e.g. `READ_ONCE()`, `WRITE_ONCE()`,
40 /// `smp_load_acquire()` and `smp_store_release()`).
41 ///
42 /// # Invariants
43 ///
44 /// `self.0` is a valid `T`.
45 ///
46 /// [`UnsafeCell`]: core::cell::UnsafeCell
47 /// [LKMM]: srctree/tools/memory-model/
48 /// [C-side atomic operations]: srctree/Documentation/atomic_t.txt
49 #[repr(transparent)]
50 pub struct Atomic<T: AtomicType>(AtomicRepr<T::Repr>);
51
52 // SAFETY: `Atomic<T>` is safe to share among execution contexts because all accesses are atomic.
53 unsafe impl<T: AtomicType> Sync for Atomic<T> {}
54
55 /// Types that support basic atomic operations.
56 ///
57 /// # Round-trip transmutability
58 ///
59 /// `T` is round-trip transmutable to `U` if and only if both of these properties hold:
60 ///
61 /// - Any valid bit pattern for `T` is also a valid bit pattern for `U`.
62 /// - Transmuting (e.g. using [`transmute()`]) a value of type `T` to `U` and then to `T` again
63 /// yields a value that is in all aspects equivalent to the original value.
64 ///
65 /// # Safety
66 ///
67 /// - [`Self`] must have the same size and alignment as [`Self::Repr`].
68 /// - [`Self`] must be [round-trip transmutable] to [`Self::Repr`].
69 ///
70 /// Note that this is more relaxed than requiring the bi-directional transmutability (i.e.
71 /// [`transmute()`] is always sound between `U` and `T`) because of the support for atomic
72 /// variables over unit-only enums, see [Examples].
73 ///
74 /// # Limitations
75 ///
76 /// Because C primitives are used to implement the atomic operations, and a C function requires a
77 /// valid object of a type to operate on (i.e. no `MaybeUninit<_>`), hence at the Rust <-> C
78 /// surface, only types with all the bits initialized can be passed. As a result, types like `(u8,
79 /// u16)` (padding bytes are uninitialized) are currently not supported.
80 ///
81 /// # Examples
82 ///
83 /// A unit-only enum that implements [`AtomicType`]:
84 ///
85 /// ```
86 /// use kernel::sync::atomic::{AtomicType, Atomic, Relaxed};
87 ///
88 /// #[derive(Clone, Copy, PartialEq, Eq)]
89 /// #[repr(i32)]
90 /// enum State {
91 /// Uninit = 0,
92 /// Working = 1,
93 /// Done = 2,
94 /// };
95 ///
96 /// // SAFETY: `State` and `i32` has the same size and alignment, and it's round-trip
97 /// // transmutable to `i32`.
98 /// unsafe impl AtomicType for State {
99 /// type Repr = i32;
100 /// }
101 ///
102 /// let s = Atomic::new(State::Uninit);
103 ///
104 /// assert_eq!(State::Uninit, s.load(Relaxed));
105 /// ```
106 /// [`transmute()`]: core::mem::transmute
107 /// [round-trip transmutable]: AtomicType#round-trip-transmutability
108 /// [Examples]: AtomicType#examples
109 pub unsafe trait AtomicType: Sized + Send + Copy {
110 /// The backing atomic implementation type.
111 type Repr: AtomicImpl;
112 }
113
114 /// Types that support atomic add operations.
115 ///
116 /// # Safety
117 ///
118 // TODO: Properly defines `wrapping_add` in the following comment.
119 /// `wrapping_add` any value of type `Self::Repr::Delta` obtained by [`Self::rhs_into_delta()`] to
120 /// any value of type `Self::Repr` obtained through transmuting a value of type `Self` to must
121 /// yield a value with a bit pattern also valid for `Self`.
122 pub unsafe trait AtomicAdd<Rhs = Self>: AtomicType {
123 /// Converts `Rhs` into the `Delta` type of the atomic implementation.
rhs_into_delta(rhs: Rhs) -> <Self::Repr as AtomicImpl>::Delta124 fn rhs_into_delta(rhs: Rhs) -> <Self::Repr as AtomicImpl>::Delta;
125 }
126
127 #[inline(always)]
into_repr<T: AtomicType>(v: T) -> T::Repr128 const fn into_repr<T: AtomicType>(v: T) -> T::Repr {
129 // SAFETY: Per the safety requirement of `AtomicType`, `T` is round-trip transmutable to
130 // `T::Repr`, therefore the transmute operation is sound.
131 unsafe { core::mem::transmute_copy(&v) }
132 }
133
134 /// # Safety
135 ///
136 /// `r` must be a valid bit pattern of `T`.
137 #[inline(always)]
from_repr<T: AtomicType>(r: T::Repr) -> T138 const unsafe fn from_repr<T: AtomicType>(r: T::Repr) -> T {
139 // SAFETY: Per the safety requirement of the function, the transmute operation is sound.
140 unsafe { core::mem::transmute_copy(&r) }
141 }
142
143 impl<T: AtomicType> Atomic<T> {
144 /// Creates a new atomic `T`.
new(v: T) -> Self145 pub const fn new(v: T) -> Self {
146 // INVARIANT: Per the safety requirement of `AtomicType`, `into_repr(v)` is a valid `T`.
147 Self(AtomicRepr::new(into_repr(v)))
148 }
149
150 /// Creates a reference to an atomic `T` from a pointer of `T`.
151 ///
152 /// This usually is used when communicating with C side or manipulating a C struct, see
153 /// examples below.
154 ///
155 /// # Safety
156 ///
157 /// - `ptr` is aligned to `align_of::<T>()`.
158 /// - `ptr` is valid for reads and writes for `'a`.
159 /// - For the duration of `'a`, other accesses to `*ptr` must not cause data races (defined
160 /// by [`LKMM`]) against atomic operations on the returned reference. Note that if all other
161 /// accesses are atomic, then this safety requirement is trivially fulfilled.
162 ///
163 /// [`LKMM`]: srctree/tools/memory-model
164 ///
165 /// # Examples
166 ///
167 /// Using [`Atomic::from_ptr()`] combined with [`Atomic::load()`] or [`Atomic::store()`] can
168 /// achieve the same functionality as `READ_ONCE()`/`smp_load_acquire()` or
169 /// `WRITE_ONCE()`/`smp_store_release()` in C side:
170 ///
171 /// ```
172 /// # use kernel::types::Opaque;
173 /// use kernel::sync::atomic::{Atomic, Relaxed, Release};
174 ///
175 /// // Assume there is a C struct `foo`.
176 /// mod cbindings {
177 /// #[repr(C)]
178 /// pub(crate) struct foo {
179 /// pub(crate) a: i32,
180 /// pub(crate) b: i32
181 /// }
182 /// }
183 ///
184 /// let tmp = Opaque::new(cbindings::foo { a: 1, b: 2 });
185 ///
186 /// // struct foo *foo_ptr = ..;
187 /// let foo_ptr = tmp.get();
188 ///
189 /// // SAFETY: `foo_ptr` is valid, and `.a` is in bounds.
190 /// let foo_a_ptr = unsafe { &raw mut (*foo_ptr).a };
191 ///
192 /// // a = READ_ONCE(foo_ptr->a);
193 /// //
194 /// // SAFETY: `foo_a_ptr` is valid for read, and all other accesses on it is atomic, so no
195 /// // data race.
196 /// let a = unsafe { Atomic::from_ptr(foo_a_ptr) }.load(Relaxed);
197 /// # assert_eq!(a, 1);
198 ///
199 /// // smp_store_release(&foo_ptr->a, 2);
200 /// //
201 /// // SAFETY: `foo_a_ptr` is valid for writes, and all other accesses on it is atomic, so
202 /// // no data race.
203 /// unsafe { Atomic::from_ptr(foo_a_ptr) }.store(2, Release);
204 /// ```
from_ptr<'a>(ptr: *mut T) -> &'a Self where T: Sync,205 pub unsafe fn from_ptr<'a>(ptr: *mut T) -> &'a Self
206 where
207 T: Sync,
208 {
209 // CAST: `T` and `Atomic<T>` have the same size, alignment and bit validity.
210 // SAFETY: Per function safety requirement, `ptr` is a valid pointer and the object will
211 // live long enough. It's safe to return a `&Atomic<T>` because function safety requirement
212 // guarantees other accesses won't cause data races.
213 unsafe { &*ptr.cast::<Self>() }
214 }
215
216 /// Returns a pointer to the underlying atomic `T`.
217 ///
218 /// Note that use of the return pointer must not cause data races defined by [`LKMM`].
219 ///
220 /// # Guarantees
221 ///
222 /// The returned pointer is valid and properly aligned (i.e. aligned to [`align_of::<T>()`]).
223 ///
224 /// [`LKMM`]: srctree/tools/memory-model
225 /// [`align_of::<T>()`]: core::mem::align_of
as_ptr(&self) -> *mut T226 pub const fn as_ptr(&self) -> *mut T {
227 // GUARANTEE: Per the function guarantee of `AtomicRepr::as_ptr()`, the `self.0.as_ptr()`
228 // must be a valid and properly aligned pointer for `T::Repr`, and per the safety guarantee
229 // of `AtomicType`, it's a valid and properly aligned pointer of `T`.
230 self.0.as_ptr().cast()
231 }
232
233 /// Returns a mutable reference to the underlying atomic `T`.
234 ///
235 /// This is safe because the mutable reference of the atomic `T` guarantees exclusive access.
get_mut(&mut self) -> &mut T236 pub fn get_mut(&mut self) -> &mut T {
237 // CAST: `T` and `T::Repr` has the same size and alignment per the safety requirement of
238 // `AtomicType`, and per the type invariants `self.0` is a valid `T`, therefore the casting
239 // result is a valid pointer of `T`.
240 // SAFETY: The pointer is valid per the CAST comment above, and the mutable reference
241 // guarantees exclusive access.
242 unsafe { &mut *self.0.as_ptr().cast() }
243 }
244 }
245
246 impl<T: AtomicType> Atomic<T>
247 where
248 T::Repr: AtomicBasicOps,
249 {
250 /// Loads the value from the atomic `T`.
251 ///
252 /// # Examples
253 ///
254 /// ```
255 /// use kernel::sync::atomic::{Atomic, Relaxed};
256 ///
257 /// let x = Atomic::new(42i32);
258 ///
259 /// assert_eq!(42, x.load(Relaxed));
260 ///
261 /// let x = Atomic::new(42i64);
262 ///
263 /// assert_eq!(42, x.load(Relaxed));
264 /// ```
265 #[doc(alias("atomic_read", "atomic64_read"))]
266 #[inline(always)]
load<Ordering: ordering::AcquireOrRelaxed>(&self, _: Ordering) -> T267 pub fn load<Ordering: ordering::AcquireOrRelaxed>(&self, _: Ordering) -> T {
268 let v = {
269 match Ordering::TYPE {
270 OrderingType::Relaxed => T::Repr::atomic_read(&self.0),
271 OrderingType::Acquire => T::Repr::atomic_read_acquire(&self.0),
272 _ => build_error!("Wrong ordering"),
273 }
274 };
275
276 // SAFETY: `v` comes from reading `self.0`, which is a valid `T` per the type invariants.
277 unsafe { from_repr(v) }
278 }
279
280 /// Stores a value to the atomic `T`.
281 ///
282 /// # Examples
283 ///
284 /// ```
285 /// use kernel::sync::atomic::{Atomic, Relaxed};
286 ///
287 /// let x = Atomic::new(42i32);
288 ///
289 /// assert_eq!(42, x.load(Relaxed));
290 ///
291 /// x.store(43, Relaxed);
292 ///
293 /// assert_eq!(43, x.load(Relaxed));
294 /// ```
295 #[doc(alias("atomic_set", "atomic64_set"))]
296 #[inline(always)]
store<Ordering: ordering::ReleaseOrRelaxed>(&self, v: T, _: Ordering)297 pub fn store<Ordering: ordering::ReleaseOrRelaxed>(&self, v: T, _: Ordering) {
298 let v = into_repr(v);
299
300 // INVARIANT: `v` is a valid `T`, and is stored to `self.0` by `atomic_set*()`.
301 match Ordering::TYPE {
302 OrderingType::Relaxed => T::Repr::atomic_set(&self.0, v),
303 OrderingType::Release => T::Repr::atomic_set_release(&self.0, v),
304 _ => build_error!("Wrong ordering"),
305 }
306 }
307 }
308
309 impl<T: AtomicType> Atomic<T>
310 where
311 T::Repr: AtomicExchangeOps,
312 {
313 /// Atomic exchange.
314 ///
315 /// Atomically updates `*self` to `v` and returns the old value of `*self`.
316 ///
317 /// # Examples
318 ///
319 /// ```
320 /// use kernel::sync::atomic::{Atomic, Acquire, Relaxed};
321 ///
322 /// let x = Atomic::new(42);
323 ///
324 /// assert_eq!(42, x.xchg(52, Acquire));
325 /// assert_eq!(52, x.load(Relaxed));
326 /// ```
327 #[doc(alias("atomic_xchg", "atomic64_xchg", "swap"))]
328 #[inline(always)]
xchg<Ordering: ordering::Ordering>(&self, v: T, _: Ordering) -> T329 pub fn xchg<Ordering: ordering::Ordering>(&self, v: T, _: Ordering) -> T {
330 let v = into_repr(v);
331
332 // INVARIANT: `self.0` is a valid `T` after `atomic_xchg*()` because `v` is transmutable to
333 // `T`.
334 let ret = {
335 match Ordering::TYPE {
336 OrderingType::Full => T::Repr::atomic_xchg(&self.0, v),
337 OrderingType::Acquire => T::Repr::atomic_xchg_acquire(&self.0, v),
338 OrderingType::Release => T::Repr::atomic_xchg_release(&self.0, v),
339 OrderingType::Relaxed => T::Repr::atomic_xchg_relaxed(&self.0, v),
340 }
341 };
342
343 // SAFETY: `ret` comes from reading `*self`, which is a valid `T` per type invariants.
344 unsafe { from_repr(ret) }
345 }
346
347 /// Atomic compare and exchange.
348 ///
349 /// If `*self` == `old`, atomically updates `*self` to `new`. Otherwise, `*self` is not
350 /// modified.
351 ///
352 /// Compare: The comparison is done via the byte level comparison between `*self` and `old`.
353 ///
354 /// Ordering: When succeeds, provides the corresponding ordering as the `Ordering` type
355 /// parameter indicates, and a failed one doesn't provide any ordering, the load part of a
356 /// failed cmpxchg is a [`Relaxed`] load.
357 ///
358 /// Returns `Ok(value)` if cmpxchg succeeds, and `value` is guaranteed to be equal to `old`,
359 /// otherwise returns `Err(value)`, and `value` is the current value of `*self`.
360 ///
361 /// # Examples
362 ///
363 /// ```
364 /// use kernel::sync::atomic::{Atomic, Full, Relaxed};
365 ///
366 /// let x = Atomic::new(42);
367 ///
368 /// // Checks whether cmpxchg succeeded.
369 /// let success = x.cmpxchg(52, 64, Relaxed).is_ok();
370 /// # assert!(!success);
371 ///
372 /// // Checks whether cmpxchg failed.
373 /// let failure = x.cmpxchg(52, 64, Relaxed).is_err();
374 /// # assert!(failure);
375 ///
376 /// // Uses the old value if failed, probably re-try cmpxchg.
377 /// match x.cmpxchg(52, 64, Relaxed) {
378 /// Ok(_) => { },
379 /// Err(old) => {
380 /// // do something with `old`.
381 /// # assert_eq!(old, 42);
382 /// }
383 /// }
384 ///
385 /// // Uses the latest value regardlessly, same as atomic_cmpxchg() in C.
386 /// let latest = x.cmpxchg(42, 64, Full).unwrap_or_else(|old| old);
387 /// # assert_eq!(42, latest);
388 /// assert_eq!(64, x.load(Relaxed));
389 /// ```
390 ///
391 /// [`Relaxed`]: ordering::Relaxed
392 #[doc(alias(
393 "atomic_cmpxchg",
394 "atomic64_cmpxchg",
395 "atomic_try_cmpxchg",
396 "atomic64_try_cmpxchg",
397 "compare_exchange"
398 ))]
399 #[inline(always)]
cmpxchg<Ordering: ordering::Ordering>( &self, mut old: T, new: T, o: Ordering, ) -> Result<T, T>400 pub fn cmpxchg<Ordering: ordering::Ordering>(
401 &self,
402 mut old: T,
403 new: T,
404 o: Ordering,
405 ) -> Result<T, T> {
406 // Note on code generation:
407 //
408 // try_cmpxchg() is used to implement cmpxchg(), and if the helper functions are inlined,
409 // the compiler is able to figure out that branch is not needed if the users don't care
410 // about whether the operation succeeds or not. One exception is on x86, due to commit
411 // 44fe84459faf ("locking/atomic: Fix atomic_try_cmpxchg() semantics"), the
412 // atomic_try_cmpxchg() on x86 has a branch even if the caller doesn't care about the
413 // success of cmpxchg and only wants to use the old value. For example, for code like:
414 //
415 // let latest = x.cmpxchg(42, 64, Full).unwrap_or_else(|old| old);
416 //
417 // It will still generate code:
418 //
419 // movl $0x40, %ecx
420 // movl $0x34, %eax
421 // lock
422 // cmpxchgl %ecx, 0x4(%rsp)
423 // jne 1f
424 // 2:
425 // ...
426 // 1: movl %eax, %ecx
427 // jmp 2b
428 //
429 // This might be "fixed" by introducing a try_cmpxchg_exclusive() that knows the "*old"
430 // location in the C function is always safe to write.
431 if self.try_cmpxchg(&mut old, new, o) {
432 Ok(old)
433 } else {
434 Err(old)
435 }
436 }
437
438 /// Atomic compare and exchange and returns whether the operation succeeds.
439 ///
440 /// If `*self` == `old`, atomically updates `*self` to `new`. Otherwise, `*self` is not
441 /// modified, `*old` is updated to the current value of `*self`.
442 ///
443 /// "Compare" and "Ordering" part are the same as [`Atomic::cmpxchg()`].
444 ///
445 /// Returns `true` means the cmpxchg succeeds otherwise returns `false`.
446 #[inline(always)]
try_cmpxchg<Ordering: ordering::Ordering>(&self, old: &mut T, new: T, _: Ordering) -> bool447 fn try_cmpxchg<Ordering: ordering::Ordering>(&self, old: &mut T, new: T, _: Ordering) -> bool {
448 let mut tmp = into_repr(*old);
449 let new = into_repr(new);
450
451 // INVARIANT: `self.0` is a valid `T` after `atomic_try_cmpxchg*()` because `new` is
452 // transmutable to `T`.
453 let ret = {
454 match Ordering::TYPE {
455 OrderingType::Full => T::Repr::atomic_try_cmpxchg(&self.0, &mut tmp, new),
456 OrderingType::Acquire => {
457 T::Repr::atomic_try_cmpxchg_acquire(&self.0, &mut tmp, new)
458 }
459 OrderingType::Release => {
460 T::Repr::atomic_try_cmpxchg_release(&self.0, &mut tmp, new)
461 }
462 OrderingType::Relaxed => {
463 T::Repr::atomic_try_cmpxchg_relaxed(&self.0, &mut tmp, new)
464 }
465 }
466 };
467
468 // SAFETY: `tmp` comes from reading `*self`, which is a valid `T` per type invariants.
469 *old = unsafe { from_repr(tmp) };
470
471 ret
472 }
473 }
474
475 impl<T: AtomicType> Atomic<T>
476 where
477 T::Repr: AtomicArithmeticOps,
478 {
479 /// Atomic add.
480 ///
481 /// Atomically updates `*self` to `(*self).wrapping_add(v)`.
482 ///
483 /// # Examples
484 ///
485 /// ```
486 /// use kernel::sync::atomic::{Atomic, Relaxed};
487 ///
488 /// let x = Atomic::new(42);
489 ///
490 /// assert_eq!(42, x.load(Relaxed));
491 ///
492 /// x.add(12, Relaxed);
493 ///
494 /// assert_eq!(54, x.load(Relaxed));
495 /// ```
496 #[inline(always)]
add<Rhs>(&self, v: Rhs, _: ordering::Relaxed) where T: AtomicAdd<Rhs>,497 pub fn add<Rhs>(&self, v: Rhs, _: ordering::Relaxed)
498 where
499 T: AtomicAdd<Rhs>,
500 {
501 let v = T::rhs_into_delta(v);
502
503 // INVARIANT: `self.0` is a valid `T` after `atomic_add()` due to safety requirement of
504 // `AtomicAdd`.
505 T::Repr::atomic_add(&self.0, v);
506 }
507
508 /// Atomic fetch and add.
509 ///
510 /// Atomically updates `*self` to `(*self).wrapping_add(v)`, and returns the value of `*self`
511 /// before the update.
512 ///
513 /// # Examples
514 ///
515 /// ```
516 /// use kernel::sync::atomic::{Atomic, Acquire, Full, Relaxed};
517 ///
518 /// let x = Atomic::new(42);
519 ///
520 /// assert_eq!(42, x.load(Relaxed));
521 ///
522 /// assert_eq!(54, { x.fetch_add(12, Acquire); x.load(Relaxed) });
523 ///
524 /// let x = Atomic::new(42);
525 ///
526 /// assert_eq!(42, x.load(Relaxed));
527 ///
528 /// assert_eq!(54, { x.fetch_add(12, Full); x.load(Relaxed) } );
529 /// ```
530 #[inline(always)]
fetch_add<Rhs, Ordering: ordering::Ordering>(&self, v: Rhs, _: Ordering) -> T where T: AtomicAdd<Rhs>,531 pub fn fetch_add<Rhs, Ordering: ordering::Ordering>(&self, v: Rhs, _: Ordering) -> T
532 where
533 T: AtomicAdd<Rhs>,
534 {
535 let v = T::rhs_into_delta(v);
536
537 // INVARIANT: `self.0` is a valid `T` after `atomic_fetch_add*()` due to safety requirement
538 // of `AtomicAdd`.
539 let ret = {
540 match Ordering::TYPE {
541 OrderingType::Full => T::Repr::atomic_fetch_add(&self.0, v),
542 OrderingType::Acquire => T::Repr::atomic_fetch_add_acquire(&self.0, v),
543 OrderingType::Release => T::Repr::atomic_fetch_add_release(&self.0, v),
544 OrderingType::Relaxed => T::Repr::atomic_fetch_add_relaxed(&self.0, v),
545 }
546 };
547
548 // SAFETY: `ret` comes from reading `self.0`, which is a valid `T` per type invariants.
549 unsafe { from_repr(ret) }
550 }
551 }
552