1 // SPDX-License-Identifier: GPL-2.0
2
3 //! Atomic primitives.
4 //!
5 //! These primitives have the same semantics as their C counterparts: and the precise definitions of
6 //! semantics can be found at [`LKMM`]. Note that Linux Kernel Memory (Consistency) Model is the
7 //! only model for Rust code in kernel, and Rust's own atomics should be avoided.
8 //!
9 //! # Data races
10 //!
11 //! [`LKMM`] atomics have different rules regarding data races:
12 //!
13 //! - A normal write from C side is treated as an atomic write if
14 //! CONFIG_KCSAN_ASSUME_PLAIN_WRITES_ATOMIC=y.
15 //! - Mixed-size atomic accesses don't cause data races.
16 //!
17 //! [`LKMM`]: srctree/tools/memory-model/
18
19 mod internal;
20 pub mod ordering;
21 mod predefine;
22
23 pub use internal::AtomicImpl;
24 pub use ordering::{Acquire, Full, Relaxed, Release};
25
26 pub(crate) use internal::{AtomicArithmeticOps, AtomicBasicOps, AtomicExchangeOps};
27
28 use crate::build_error;
29 use internal::AtomicRepr;
30 use ordering::OrderingType;
31
32 /// A memory location which can be safely modified from multiple execution contexts.
33 ///
34 /// This has the same size, alignment and bit validity as the underlying type `T`. And it disables
35 /// niche optimization for the same reason as [`UnsafeCell`].
36 ///
37 /// The atomic operations are implemented in a way that is fully compatible with the [Linux Kernel
38 /// Memory (Consistency) Model][LKMM], hence they should be modeled as the corresponding
39 /// [`LKMM`][LKMM] atomic primitives. With the help of [`Atomic::from_ptr()`] and
40 /// [`Atomic::as_ptr()`], this provides a way to interact with [C-side atomic operations]
41 /// (including those without the `atomic` prefix, e.g. `READ_ONCE()`, `WRITE_ONCE()`,
42 /// `smp_load_acquire()` and `smp_store_release()`).
43 ///
44 /// # Invariants
45 ///
46 /// `self.0` is a valid `T`.
47 ///
48 /// [`UnsafeCell`]: core::cell::UnsafeCell
49 /// [LKMM]: srctree/tools/memory-model/
50 /// [C-side atomic operations]: srctree/Documentation/atomic_t.txt
51 #[repr(transparent)]
52 pub struct Atomic<T: AtomicType>(AtomicRepr<T::Repr>);
53
54 // SAFETY: `Atomic<T>` is safe to share among execution contexts because all accesses are atomic.
55 unsafe impl<T: AtomicType> Sync for Atomic<T> {}
56
57 /// Types that support basic atomic operations.
58 ///
59 /// # Round-trip transmutability
60 ///
61 /// `T` is round-trip transmutable to `U` if and only if both of these properties hold:
62 ///
63 /// - Any valid bit pattern for `T` is also a valid bit pattern for `U`.
64 /// - Transmuting (e.g. using [`transmute()`]) a value of type `T` to `U` and then to `T` again
65 /// yields a value that is in all aspects equivalent to the original value.
66 ///
67 /// # Safety
68 ///
69 /// - [`Self`] must have the same size and alignment as [`Self::Repr`].
70 /// - [`Self`] must be [round-trip transmutable] to [`Self::Repr`].
71 ///
72 /// Note that this is more relaxed than requiring the bi-directional transmutability (i.e.
73 /// [`transmute()`] is always sound between `U` and `T`) because of the support for atomic
74 /// variables over unit-only enums, see [Examples].
75 ///
76 /// # Limitations
77 ///
78 /// Because C primitives are used to implement the atomic operations, and a C function requires a
79 /// valid object of a type to operate on (i.e. no `MaybeUninit<_>`), hence at the Rust <-> C
80 /// surface, only types with all the bits initialized can be passed. As a result, types like `(u8,
81 /// u16)` (padding bytes are uninitialized) are currently not supported.
82 ///
83 /// # Examples
84 ///
85 /// A unit-only enum that implements [`AtomicType`]:
86 ///
87 /// ```
88 /// use kernel::sync::atomic::{AtomicType, Atomic, Relaxed};
89 ///
90 /// #[derive(Clone, Copy, PartialEq, Eq)]
91 /// #[repr(i32)]
92 /// enum State {
93 /// Uninit = 0,
94 /// Working = 1,
95 /// Done = 2,
96 /// };
97 ///
98 /// // SAFETY: `State` and `i32` has the same size and alignment, and it's round-trip
99 /// // transmutable to `i32`.
100 /// unsafe impl AtomicType for State {
101 /// type Repr = i32;
102 /// }
103 ///
104 /// let s = Atomic::new(State::Uninit);
105 ///
106 /// assert_eq!(State::Uninit, s.load(Relaxed));
107 /// ```
108 /// [`transmute()`]: core::mem::transmute
109 /// [round-trip transmutable]: AtomicType#round-trip-transmutability
110 /// [Examples]: AtomicType#examples
111 pub unsafe trait AtomicType: Sized + Send + Copy {
112 /// The backing atomic implementation type.
113 type Repr: AtomicImpl;
114 }
115
116 /// Types that support atomic add operations.
117 ///
118 /// # Safety
119 ///
120 // TODO: Properly defines `wrapping_add` in the following comment.
121 /// `wrapping_add` any value of type `Self::Repr::Delta` obtained by [`Self::rhs_into_delta()`] to
122 /// any value of type `Self::Repr` obtained through transmuting a value of type `Self` to must
123 /// yield a value with a bit pattern also valid for `Self`.
124 pub unsafe trait AtomicAdd<Rhs = Self>: AtomicType {
125 /// Converts `Rhs` into the `Delta` type of the atomic implementation.
rhs_into_delta(rhs: Rhs) -> <Self::Repr as AtomicImpl>::Delta126 fn rhs_into_delta(rhs: Rhs) -> <Self::Repr as AtomicImpl>::Delta;
127 }
128
129 #[inline(always)]
into_repr<T: AtomicType>(v: T) -> T::Repr130 const fn into_repr<T: AtomicType>(v: T) -> T::Repr {
131 // SAFETY: Per the safety requirement of `AtomicType`, `T` is round-trip transmutable to
132 // `T::Repr`, therefore the transmute operation is sound.
133 unsafe { core::mem::transmute_copy(&v) }
134 }
135
136 /// # Safety
137 ///
138 /// `r` must be a valid bit pattern of `T`.
139 #[inline(always)]
from_repr<T: AtomicType>(r: T::Repr) -> T140 const unsafe fn from_repr<T: AtomicType>(r: T::Repr) -> T {
141 // SAFETY: Per the safety requirement of the function, the transmute operation is sound.
142 unsafe { core::mem::transmute_copy(&r) }
143 }
144
145 impl<T: AtomicType> Atomic<T> {
146 /// Creates a new atomic `T`.
new(v: T) -> Self147 pub const fn new(v: T) -> Self {
148 // INVARIANT: Per the safety requirement of `AtomicType`, `into_repr(v)` is a valid `T`.
149 Self(AtomicRepr::new(into_repr(v)))
150 }
151
152 /// Creates a reference to an atomic `T` from a pointer of `T`.
153 ///
154 /// This usually is used when communicating with C side or manipulating a C struct, see
155 /// examples below.
156 ///
157 /// # Safety
158 ///
159 /// - `ptr` is aligned to `align_of::<T>()`.
160 /// - `ptr` is valid for reads and writes for `'a`.
161 /// - For the duration of `'a`, other accesses to `*ptr` must not cause data races (defined
162 /// by [`LKMM`]) against atomic operations on the returned reference. Note that if all other
163 /// accesses are atomic, then this safety requirement is trivially fulfilled.
164 ///
165 /// [`LKMM`]: srctree/tools/memory-model
166 ///
167 /// # Examples
168 ///
169 /// Using [`Atomic::from_ptr()`] combined with [`Atomic::load()`] or [`Atomic::store()`] can
170 /// achieve the same functionality as `READ_ONCE()`/`smp_load_acquire()` or
171 /// `WRITE_ONCE()`/`smp_store_release()` in C side:
172 ///
173 /// ```
174 /// # use kernel::types::Opaque;
175 /// use kernel::sync::atomic::{Atomic, Relaxed, Release};
176 ///
177 /// // Assume there is a C struct `foo`.
178 /// mod cbindings {
179 /// #[repr(C)]
180 /// pub(crate) struct foo {
181 /// pub(crate) a: i32,
182 /// pub(crate) b: i32
183 /// }
184 /// }
185 ///
186 /// let tmp = Opaque::new(cbindings::foo { a: 1, b: 2 });
187 ///
188 /// // struct foo *foo_ptr = ..;
189 /// let foo_ptr = tmp.get();
190 ///
191 /// // SAFETY: `foo_ptr` is valid, and `.a` is in bounds.
192 /// let foo_a_ptr = unsafe { &raw mut (*foo_ptr).a };
193 ///
194 /// // a = READ_ONCE(foo_ptr->a);
195 /// //
196 /// // SAFETY: `foo_a_ptr` is valid for read, and all other accesses on it is atomic, so no
197 /// // data race.
198 /// let a = unsafe { Atomic::from_ptr(foo_a_ptr) }.load(Relaxed);
199 /// # assert_eq!(a, 1);
200 ///
201 /// // smp_store_release(&foo_ptr->a, 2);
202 /// //
203 /// // SAFETY: `foo_a_ptr` is valid for writes, and all other accesses on it is atomic, so
204 /// // no data race.
205 /// unsafe { Atomic::from_ptr(foo_a_ptr) }.store(2, Release);
206 /// ```
from_ptr<'a>(ptr: *mut T) -> &'a Self where T: Sync,207 pub unsafe fn from_ptr<'a>(ptr: *mut T) -> &'a Self
208 where
209 T: Sync,
210 {
211 // CAST: `T` and `Atomic<T>` have the same size, alignment and bit validity.
212 // SAFETY: Per function safety requirement, `ptr` is a valid pointer and the object will
213 // live long enough. It's safe to return a `&Atomic<T>` because function safety requirement
214 // guarantees other accesses won't cause data races.
215 unsafe { &*ptr.cast::<Self>() }
216 }
217
218 /// Returns a pointer to the underlying atomic `T`.
219 ///
220 /// Note that use of the return pointer must not cause data races defined by [`LKMM`].
221 ///
222 /// # Guarantees
223 ///
224 /// The returned pointer is valid and properly aligned (i.e. aligned to [`align_of::<T>()`]).
225 ///
226 /// [`LKMM`]: srctree/tools/memory-model
227 /// [`align_of::<T>()`]: core::mem::align_of
as_ptr(&self) -> *mut T228 pub const fn as_ptr(&self) -> *mut T {
229 // GUARANTEE: Per the function guarantee of `AtomicRepr::as_ptr()`, the `self.0.as_ptr()`
230 // must be a valid and properly aligned pointer for `T::Repr`, and per the safety guarantee
231 // of `AtomicType`, it's a valid and properly aligned pointer of `T`.
232 self.0.as_ptr().cast()
233 }
234
235 /// Returns a mutable reference to the underlying atomic `T`.
236 ///
237 /// This is safe because the mutable reference of the atomic `T` guarantees exclusive access.
get_mut(&mut self) -> &mut T238 pub fn get_mut(&mut self) -> &mut T {
239 // CAST: `T` and `T::Repr` has the same size and alignment per the safety requirement of
240 // `AtomicType`, and per the type invariants `self.0` is a valid `T`, therefore the casting
241 // result is a valid pointer of `T`.
242 // SAFETY: The pointer is valid per the CAST comment above, and the mutable reference
243 // guarantees exclusive access.
244 unsafe { &mut *self.0.as_ptr().cast() }
245 }
246 }
247
248 impl<T: AtomicType> Atomic<T>
249 where
250 T::Repr: AtomicBasicOps,
251 {
252 /// Loads the value from the atomic `T`.
253 ///
254 /// # Examples
255 ///
256 /// ```
257 /// use kernel::sync::atomic::{Atomic, Relaxed};
258 ///
259 /// let x = Atomic::new(42i32);
260 ///
261 /// assert_eq!(42, x.load(Relaxed));
262 ///
263 /// let x = Atomic::new(42i64);
264 ///
265 /// assert_eq!(42, x.load(Relaxed));
266 /// ```
267 #[doc(alias("atomic_read", "atomic64_read"))]
268 #[inline(always)]
load<Ordering: ordering::AcquireOrRelaxed>(&self, _: Ordering) -> T269 pub fn load<Ordering: ordering::AcquireOrRelaxed>(&self, _: Ordering) -> T {
270 let v = {
271 match Ordering::TYPE {
272 OrderingType::Relaxed => T::Repr::atomic_read(&self.0),
273 OrderingType::Acquire => T::Repr::atomic_read_acquire(&self.0),
274 _ => build_error!("Wrong ordering"),
275 }
276 };
277
278 // SAFETY: `v` comes from reading `self.0`, which is a valid `T` per the type invariants.
279 unsafe { from_repr(v) }
280 }
281
282 /// Stores a value to the atomic `T`.
283 ///
284 /// # Examples
285 ///
286 /// ```
287 /// use kernel::sync::atomic::{Atomic, Relaxed};
288 ///
289 /// let x = Atomic::new(42i32);
290 ///
291 /// assert_eq!(42, x.load(Relaxed));
292 ///
293 /// x.store(43, Relaxed);
294 ///
295 /// assert_eq!(43, x.load(Relaxed));
296 /// ```
297 #[doc(alias("atomic_set", "atomic64_set"))]
298 #[inline(always)]
store<Ordering: ordering::ReleaseOrRelaxed>(&self, v: T, _: Ordering)299 pub fn store<Ordering: ordering::ReleaseOrRelaxed>(&self, v: T, _: Ordering) {
300 let v = into_repr(v);
301
302 // INVARIANT: `v` is a valid `T`, and is stored to `self.0` by `atomic_set*()`.
303 match Ordering::TYPE {
304 OrderingType::Relaxed => T::Repr::atomic_set(&self.0, v),
305 OrderingType::Release => T::Repr::atomic_set_release(&self.0, v),
306 _ => build_error!("Wrong ordering"),
307 }
308 }
309 }
310
311 impl<T: AtomicType + core::fmt::Debug> core::fmt::Debug for Atomic<T>
312 where
313 T::Repr: AtomicBasicOps,
314 {
fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result315 fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
316 core::fmt::Debug::fmt(&self.load(Relaxed), f)
317 }
318 }
319
320 impl<T: AtomicType> Atomic<T>
321 where
322 T::Repr: AtomicExchangeOps,
323 {
324 /// Atomic exchange.
325 ///
326 /// Atomically updates `*self` to `v` and returns the old value of `*self`.
327 ///
328 /// # Examples
329 ///
330 /// ```
331 /// use kernel::sync::atomic::{Atomic, Acquire, Relaxed};
332 ///
333 /// let x = Atomic::new(42);
334 ///
335 /// assert_eq!(42, x.xchg(52, Acquire));
336 /// assert_eq!(52, x.load(Relaxed));
337 /// ```
338 #[doc(alias("atomic_xchg", "atomic64_xchg", "swap"))]
339 #[inline(always)]
xchg<Ordering: ordering::Ordering>(&self, v: T, _: Ordering) -> T340 pub fn xchg<Ordering: ordering::Ordering>(&self, v: T, _: Ordering) -> T {
341 let v = into_repr(v);
342
343 // INVARIANT: `self.0` is a valid `T` after `atomic_xchg*()` because `v` is transmutable to
344 // `T`.
345 let ret = {
346 match Ordering::TYPE {
347 OrderingType::Full => T::Repr::atomic_xchg(&self.0, v),
348 OrderingType::Acquire => T::Repr::atomic_xchg_acquire(&self.0, v),
349 OrderingType::Release => T::Repr::atomic_xchg_release(&self.0, v),
350 OrderingType::Relaxed => T::Repr::atomic_xchg_relaxed(&self.0, v),
351 }
352 };
353
354 // SAFETY: `ret` comes from reading `*self`, which is a valid `T` per type invariants.
355 unsafe { from_repr(ret) }
356 }
357
358 /// Atomic compare and exchange.
359 ///
360 /// If `*self` == `old`, atomically updates `*self` to `new`. Otherwise, `*self` is not
361 /// modified.
362 ///
363 /// Compare: The comparison is done via the byte level comparison between `*self` and `old`.
364 ///
365 /// Ordering: When succeeds, provides the corresponding ordering as the `Ordering` type
366 /// parameter indicates, and a failed one doesn't provide any ordering, the load part of a
367 /// failed cmpxchg is a [`Relaxed`] load.
368 ///
369 /// Returns `Ok(value)` if cmpxchg succeeds, and `value` is guaranteed to be equal to `old`,
370 /// otherwise returns `Err(value)`, and `value` is the current value of `*self`.
371 ///
372 /// # Examples
373 ///
374 /// ```
375 /// use kernel::sync::atomic::{Atomic, Full, Relaxed};
376 ///
377 /// let x = Atomic::new(42);
378 ///
379 /// // Checks whether cmpxchg succeeded.
380 /// let success = x.cmpxchg(52, 64, Relaxed).is_ok();
381 /// # assert!(!success);
382 ///
383 /// // Checks whether cmpxchg failed.
384 /// let failure = x.cmpxchg(52, 64, Relaxed).is_err();
385 /// # assert!(failure);
386 ///
387 /// // Uses the old value if failed, probably re-try cmpxchg.
388 /// match x.cmpxchg(52, 64, Relaxed) {
389 /// Ok(_) => { },
390 /// Err(old) => {
391 /// // do something with `old`.
392 /// # assert_eq!(old, 42);
393 /// }
394 /// }
395 ///
396 /// // Uses the latest value regardlessly, same as atomic_cmpxchg() in C.
397 /// let latest = x.cmpxchg(42, 64, Full).unwrap_or_else(|old| old);
398 /// # assert_eq!(42, latest);
399 /// assert_eq!(64, x.load(Relaxed));
400 /// ```
401 ///
402 /// [`Relaxed`]: ordering::Relaxed
403 #[doc(alias(
404 "atomic_cmpxchg",
405 "atomic64_cmpxchg",
406 "atomic_try_cmpxchg",
407 "atomic64_try_cmpxchg",
408 "compare_exchange"
409 ))]
410 #[inline(always)]
cmpxchg<Ordering: ordering::Ordering>( &self, mut old: T, new: T, o: Ordering, ) -> Result<T, T>411 pub fn cmpxchg<Ordering: ordering::Ordering>(
412 &self,
413 mut old: T,
414 new: T,
415 o: Ordering,
416 ) -> Result<T, T> {
417 // Note on code generation:
418 //
419 // try_cmpxchg() is used to implement cmpxchg(), and if the helper functions are inlined,
420 // the compiler is able to figure out that branch is not needed if the users don't care
421 // about whether the operation succeeds or not. One exception is on x86, due to commit
422 // 44fe84459faf ("locking/atomic: Fix atomic_try_cmpxchg() semantics"), the
423 // atomic_try_cmpxchg() on x86 has a branch even if the caller doesn't care about the
424 // success of cmpxchg and only wants to use the old value. For example, for code like:
425 //
426 // let latest = x.cmpxchg(42, 64, Full).unwrap_or_else(|old| old);
427 //
428 // It will still generate code:
429 //
430 // movl $0x40, %ecx
431 // movl $0x34, %eax
432 // lock
433 // cmpxchgl %ecx, 0x4(%rsp)
434 // jne 1f
435 // 2:
436 // ...
437 // 1: movl %eax, %ecx
438 // jmp 2b
439 //
440 // This might be "fixed" by introducing a try_cmpxchg_exclusive() that knows the "*old"
441 // location in the C function is always safe to write.
442 if self.try_cmpxchg(&mut old, new, o) {
443 Ok(old)
444 } else {
445 Err(old)
446 }
447 }
448
449 /// Atomic compare and exchange and returns whether the operation succeeds.
450 ///
451 /// If `*self` == `old`, atomically updates `*self` to `new`. Otherwise, `*self` is not
452 /// modified, `*old` is updated to the current value of `*self`.
453 ///
454 /// "Compare" and "Ordering" part are the same as [`Atomic::cmpxchg()`].
455 ///
456 /// Returns `true` means the cmpxchg succeeds otherwise returns `false`.
457 #[inline(always)]
try_cmpxchg<Ordering: ordering::Ordering>(&self, old: &mut T, new: T, _: Ordering) -> bool458 fn try_cmpxchg<Ordering: ordering::Ordering>(&self, old: &mut T, new: T, _: Ordering) -> bool {
459 let mut tmp = into_repr(*old);
460 let new = into_repr(new);
461
462 // INVARIANT: `self.0` is a valid `T` after `atomic_try_cmpxchg*()` because `new` is
463 // transmutable to `T`.
464 let ret = {
465 match Ordering::TYPE {
466 OrderingType::Full => T::Repr::atomic_try_cmpxchg(&self.0, &mut tmp, new),
467 OrderingType::Acquire => {
468 T::Repr::atomic_try_cmpxchg_acquire(&self.0, &mut tmp, new)
469 }
470 OrderingType::Release => {
471 T::Repr::atomic_try_cmpxchg_release(&self.0, &mut tmp, new)
472 }
473 OrderingType::Relaxed => {
474 T::Repr::atomic_try_cmpxchg_relaxed(&self.0, &mut tmp, new)
475 }
476 }
477 };
478
479 // SAFETY: `tmp` comes from reading `*self`, which is a valid `T` per type invariants.
480 *old = unsafe { from_repr(tmp) };
481
482 ret
483 }
484 }
485
486 impl<T: AtomicType> Atomic<T>
487 where
488 T::Repr: AtomicArithmeticOps,
489 {
490 /// Atomic add.
491 ///
492 /// Atomically updates `*self` to `(*self).wrapping_add(v)`.
493 ///
494 /// # Examples
495 ///
496 /// ```
497 /// use kernel::sync::atomic::{Atomic, Relaxed};
498 ///
499 /// let x = Atomic::new(42);
500 ///
501 /// assert_eq!(42, x.load(Relaxed));
502 ///
503 /// x.add(12, Relaxed);
504 ///
505 /// assert_eq!(54, x.load(Relaxed));
506 /// ```
507 #[inline(always)]
add<Rhs>(&self, v: Rhs, _: ordering::Relaxed) where T: AtomicAdd<Rhs>,508 pub fn add<Rhs>(&self, v: Rhs, _: ordering::Relaxed)
509 where
510 T: AtomicAdd<Rhs>,
511 {
512 let v = T::rhs_into_delta(v);
513
514 // INVARIANT: `self.0` is a valid `T` after `atomic_add()` due to safety requirement of
515 // `AtomicAdd`.
516 T::Repr::atomic_add(&self.0, v);
517 }
518
519 /// Atomic fetch and add.
520 ///
521 /// Atomically updates `*self` to `(*self).wrapping_add(v)`, and returns the value of `*self`
522 /// before the update.
523 ///
524 /// # Examples
525 ///
526 /// ```
527 /// use kernel::sync::atomic::{Atomic, Acquire, Full, Relaxed};
528 ///
529 /// let x = Atomic::new(42);
530 ///
531 /// assert_eq!(42, x.load(Relaxed));
532 ///
533 /// assert_eq!(54, { x.fetch_add(12, Acquire); x.load(Relaxed) });
534 ///
535 /// let x = Atomic::new(42);
536 ///
537 /// assert_eq!(42, x.load(Relaxed));
538 ///
539 /// assert_eq!(54, { x.fetch_add(12, Full); x.load(Relaxed) } );
540 /// ```
541 #[inline(always)]
fetch_add<Rhs, Ordering: ordering::Ordering>(&self, v: Rhs, _: Ordering) -> T where T: AtomicAdd<Rhs>,542 pub fn fetch_add<Rhs, Ordering: ordering::Ordering>(&self, v: Rhs, _: Ordering) -> T
543 where
544 T: AtomicAdd<Rhs>,
545 {
546 let v = T::rhs_into_delta(v);
547
548 // INVARIANT: `self.0` is a valid `T` after `atomic_fetch_add*()` due to safety requirement
549 // of `AtomicAdd`.
550 let ret = {
551 match Ordering::TYPE {
552 OrderingType::Full => T::Repr::atomic_fetch_add(&self.0, v),
553 OrderingType::Acquire => T::Repr::atomic_fetch_add_acquire(&self.0, v),
554 OrderingType::Release => T::Repr::atomic_fetch_add_release(&self.0, v),
555 OrderingType::Relaxed => T::Repr::atomic_fetch_add_relaxed(&self.0, v),
556 }
557 };
558
559 // SAFETY: `ret` comes from reading `self.0`, which is a valid `T` per type invariants.
560 unsafe { from_repr(ret) }
561 }
562 }
563