xref: /linux/rust/kernel/sync/atomic/internal.rs (revision b91d5d4bcf1266257a9e0199e1b4ad7fa8771baa)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 //! Atomic internal implementations.
4 //!
5 //! Provides 1:1 mapping to the C atomic operations.
6 
7 use crate::bindings;
8 use crate::macros::paste;
9 use core::cell::UnsafeCell;
10 use ffi::c_void;
11 
12 mod private {
13     /// Sealed trait marker to disable customized impls on atomic implementation traits.
14     pub trait Sealed {}
15 }
16 
17 // The C side supports atomic primitives only for `i32` and `i64` (`atomic_t` and `atomic64_t`),
18 // while the Rust side also provides atomic support for `i8`, `i16` and `*const c_void` on top of
19 // lower-level C primitives.
20 impl private::Sealed for i8 {}
21 impl private::Sealed for i16 {}
22 impl private::Sealed for *const c_void {}
23 impl private::Sealed for i32 {}
24 impl private::Sealed for i64 {}
25 
26 /// A marker trait for types that implement atomic operations with C side primitives.
27 ///
28 /// This trait is sealed, and only types that map directly to the C side atomics
29 /// or can be implemented with lower-level C primitives are allowed to implement this:
30 ///
31 /// - `i8`, `i16` and `*const c_void` are implemented with lower-level C primitives.
32 /// - `i32` map to `atomic_t`
33 /// - `i64` map to `atomic64_t`
34 pub trait AtomicImpl: Sized + Copy + private::Sealed {
35     /// The type of the delta in arithmetic or logical operations.
36     ///
37     /// For example, in `atomic_add(ptr, v)`, it's the type of `v`. Usually it's the same type of
38     /// [`Self`], but it may be different for the atomic pointer type.
39     type Delta;
40 }
41 
42 // The current helpers of load/store of atomic `i8`, `i16` and pointers use `{WRITE,READ}_ONCE()`
43 // hence the atomicity is only guaranteed against read-modify-write operations if the architecture
44 // supports native atomic RmW.
45 //
46 // In the future when a CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=n architecture plans to support Rust, the
47 // load/store helpers that guarantee atomicity against RmW operations (usually via a lock) need to
48 // be added.
49 crate::static_assert!(
50     cfg!(CONFIG_ARCH_SUPPORTS_ATOMIC_RMW),
51     "The current implementation of atomic i8/i16/ptr relies on the architecure being \
52     ARCH_SUPPORTS_ATOMIC_RMW"
53 );
54 
55 impl AtomicImpl for i8 {
56     type Delta = Self;
57 }
58 
59 impl AtomicImpl for i16 {
60     type Delta = Self;
61 }
62 
63 impl AtomicImpl for *const c_void {
64     type Delta = isize;
65 }
66 
67 // `atomic_t` implements atomic operations on `i32`.
68 impl AtomicImpl for i32 {
69     type Delta = Self;
70 }
71 
72 // `atomic64_t` implements atomic operations on `i64`.
73 impl AtomicImpl for i64 {
74     type Delta = Self;
75 }
76 
77 /// Atomic representation.
78 #[repr(transparent)]
79 pub struct AtomicRepr<T: AtomicImpl>(UnsafeCell<T>);
80 
81 impl<T: AtomicImpl> AtomicRepr<T> {
82     /// Creates a new atomic representation `T`.
83     pub const fn new(v: T) -> Self {
84         Self(UnsafeCell::new(v))
85     }
86 
87     /// Returns a pointer to the underlying `T`.
88     ///
89     /// # Guarantees
90     ///
91     /// The returned pointer is valid and properly aligned (i.e. aligned to [`align_of::<T>()`]).
92     pub const fn as_ptr(&self) -> *mut T {
93         // GUARANTEE: `self.0` is an `UnsafeCell<T>`, therefore the pointer returned by `.get()`
94         // must be valid and properly aligned.
95         self.0.get()
96     }
97 }
98 
99 // This macro generates the function signature with given argument list and return type.
100 macro_rules! declare_atomic_method {
101     (
102         $(#[doc=$doc:expr])*
103         $func:ident($($arg:ident : $arg_type:ty),*) $(-> $ret:ty)?
104     ) => {
105         paste!(
106             $(#[doc = $doc])*
107             fn [< atomic_ $func >]($($arg: $arg_type,)*) $(-> $ret)?;
108         );
109     };
110     (
111         $(#[doc=$doc:expr])*
112         $func:ident [$variant:ident $($rest:ident)*]($($arg_sig:tt)*) $(-> $ret:ty)?
113     ) => {
114         paste!(
115             declare_atomic_method!(
116                 $(#[doc = $doc])*
117                 [< $func _ $variant >]($($arg_sig)*) $(-> $ret)?
118             );
119         );
120 
121         declare_atomic_method!(
122             $(#[doc = $doc])*
123             $func [$($rest)*]($($arg_sig)*) $(-> $ret)?
124         );
125     };
126     (
127         $(#[doc=$doc:expr])*
128         $func:ident []($($arg_sig:tt)*) $(-> $ret:ty)?
129     ) => {
130         declare_atomic_method!(
131             $(#[doc = $doc])*
132             $func($($arg_sig)*) $(-> $ret)?
133         );
134     }
135 }
136 
137 // This macro generates the function implementation with given argument list and return type, and it
138 // will replace "call(...)" expression with "$ctype _ $func" to call the real C function.
139 macro_rules! impl_atomic_method {
140     (
141         ($ctype:ident) $func:ident($($arg:ident: $arg_type:ty),*) $(-> $ret:ty)? {
142             $unsafe:tt { call($($c_arg:expr),*) }
143         }
144     ) => {
145         paste!(
146             #[inline(always)]
147             fn [< atomic_ $func >]($($arg: $arg_type,)*) $(-> $ret)? {
148                 // TODO: Ideally we want to use the SAFETY comments written at the macro invocation
149                 // (e.g. in `declare_and_impl_atomic_methods!()`, however, since SAFETY comments
150                 // are just comments, and they are not passed to macros as tokens, therefore we
151                 // cannot use them here. One potential improvement is that if we support using
152                 // attributes as an alternative for SAFETY comments, then we can use that for macro
153                 // generating code.
154                 //
155                 // SAFETY: specified on macro invocation.
156                 $unsafe { bindings::[< $ctype _ $func >]($($c_arg,)*) }
157             }
158         );
159     };
160     (
161         ($ctype:ident) $func:ident[$variant:ident $($rest:ident)*]($($arg_sig:tt)*) $(-> $ret:ty)? {
162             $unsafe:tt { call($($arg:tt)*) }
163         }
164     ) => {
165         paste!(
166             impl_atomic_method!(
167                 ($ctype) [< $func _ $variant >]($($arg_sig)*) $( -> $ret)? {
168                     $unsafe { call($($arg)*) }
169             }
170             );
171         );
172         impl_atomic_method!(
173             ($ctype) $func [$($rest)*]($($arg_sig)*) $( -> $ret)? {
174                 $unsafe { call($($arg)*) }
175             }
176         );
177     };
178     (
179         ($ctype:ident) $func:ident[]($($arg_sig:tt)*) $( -> $ret:ty)? {
180             $unsafe:tt { call($($arg:tt)*) }
181         }
182     ) => {
183         impl_atomic_method!(
184             ($ctype) $func($($arg_sig)*) $(-> $ret)? {
185                 $unsafe { call($($arg)*) }
186             }
187         );
188     }
189 }
190 
191 macro_rules! declare_atomic_ops_trait {
192     (
193         $(#[$attr:meta])* $pub:vis trait $ops:ident {
194             $(
195                 $(#[doc=$doc:expr])*
196                 fn $func:ident [$($variant:ident),*]($($arg_sig:tt)*) $( -> $ret:ty)? {
197                     $unsafe:tt { bindings::#call($($arg:tt)*) }
198                 }
199             )*
200         }
201     ) => {
202         $(#[$attr])*
203         $pub trait $ops: AtomicImpl {
204             $(
205                 declare_atomic_method!(
206                     $(#[doc=$doc])*
207                     $func[$($variant)*]($($arg_sig)*) $(-> $ret)?
208                 );
209             )*
210         }
211     }
212 }
213 
214 macro_rules! impl_atomic_ops_for_one {
215     (
216         $ty:ty => $ctype:ident,
217         $(#[$attr:meta])* $pub:vis trait $ops:ident {
218             $(
219                 $(#[doc=$doc:expr])*
220                 fn $func:ident [$($variant:ident),*]($($arg_sig:tt)*) $( -> $ret:ty)? {
221                     $unsafe:tt { bindings::#call($($arg:tt)*) }
222                 }
223             )*
224         }
225     ) => {
226         impl $ops for $ty {
227             $(
228                 impl_atomic_method!(
229                     ($ctype) $func[$($variant)*]($($arg_sig)*) $(-> $ret)? {
230                         $unsafe { call($($arg)*) }
231                     }
232                 );
233             )*
234         }
235     }
236 }
237 
238 // Declares $ops trait with methods and implements the trait.
239 macro_rules! declare_and_impl_atomic_methods {
240     (
241         [ $($map:tt)* ]
242         $(#[$attr:meta])* $pub:vis trait $ops:ident { $($body:tt)* }
243     ) => {
244         declare_and_impl_atomic_methods!(
245             @with_ops_def
246             [ $($map)* ]
247             ( $(#[$attr])* $pub trait $ops { $($body)* } )
248         );
249     };
250 
251     (@with_ops_def [ $($map:tt)* ] ( $($ops_def:tt)* )) => {
252         declare_atomic_ops_trait!( $($ops_def)* );
253 
254         declare_and_impl_atomic_methods!(
255             @munch
256             [ $($map)* ]
257             ( $($ops_def)* )
258         );
259     };
260 
261     (@munch [] ( $($ops_def:tt)* )) => {};
262 
263     (@munch [ $ty:ty => $ctype:ident $(, $($rest:tt)*)? ] ( $($ops_def:tt)* )) => {
264         impl_atomic_ops_for_one!(
265             $ty => $ctype,
266             $($ops_def)*
267         );
268 
269         declare_and_impl_atomic_methods!(
270             @munch
271             [ $($($rest)*)? ]
272             ( $($ops_def)* )
273         );
274     };
275 }
276 
277 declare_and_impl_atomic_methods!(
278     [ i8 => atomic_i8, i16 => atomic_i16, *const c_void => atomic_ptr, i32 => atomic, i64 => atomic64 ]
279     /// Basic atomic operations
280     pub trait AtomicBasicOps {
281         /// Atomic read (load).
282         fn read[acquire](a: &AtomicRepr<Self>) -> Self {
283             // SAFETY: `a.as_ptr()` is valid and properly aligned.
284             unsafe { bindings::#call(a.as_ptr().cast()) }
285         }
286 
287         /// Atomic set (store).
288         fn set[release](a: &AtomicRepr<Self>, v: Self) {
289             // SAFETY: `a.as_ptr()` is valid and properly aligned.
290             unsafe { bindings::#call(a.as_ptr().cast(), v) }
291         }
292     }
293 );
294 
295 declare_and_impl_atomic_methods!(
296     [ i8 => atomic_i8, i16 => atomic_i16, *const c_void => atomic_ptr, i32 => atomic, i64 => atomic64 ]
297     /// Exchange and compare-and-exchange atomic operations
298     pub trait AtomicExchangeOps {
299         /// Atomic exchange.
300         ///
301         /// Atomically updates `*a` to `v` and returns the old value.
302         fn xchg[acquire, release, relaxed](a: &AtomicRepr<Self>, v: Self) -> Self {
303             // SAFETY: `a.as_ptr()` is valid and properly aligned.
304             unsafe { bindings::#call(a.as_ptr().cast(), v) }
305         }
306 
307         /// Atomic compare and exchange.
308         ///
309         /// If `*a` == `*old`, atomically updates `*a` to `new`. Otherwise, `*a` is not
310         /// modified, `*old` is updated to the current value of `*a`.
311         ///
312         /// Return `true` if the update of `*a` occurred, `false` otherwise.
313         fn try_cmpxchg[acquire, release, relaxed](
314             a: &AtomicRepr<Self>, old: &mut Self, new: Self
315         ) -> bool {
316             // SAFETY: `a.as_ptr()` is valid and properly aligned. `core::ptr::from_mut(old)`
317             // is valid and properly aligned.
318             unsafe { bindings::#call(a.as_ptr().cast(), core::ptr::from_mut(old), new) }
319         }
320     }
321 );
322 
323 declare_and_impl_atomic_methods!(
324     [ i32 => atomic, i64 => atomic64 ]
325     /// Atomic arithmetic operations
326     pub trait AtomicArithmeticOps {
327         /// Atomic add (wrapping).
328         ///
329         /// Atomically updates `*a` to `(*a).wrapping_add(v)`.
330         fn add[](a: &AtomicRepr<Self>, v: Self::Delta) {
331             // SAFETY: `a.as_ptr()` is valid and properly aligned.
332             unsafe { bindings::#call(v, a.as_ptr().cast()) }
333         }
334 
335         /// Atomic fetch and add (wrapping).
336         ///
337         /// Atomically updates `*a` to `(*a).wrapping_add(v)`, and returns the value of `*a`
338         /// before the update.
339         fn fetch_add[acquire, release, relaxed](a: &AtomicRepr<Self>, v: Self::Delta) -> Self {
340             // SAFETY: `a.as_ptr()` guarantees the returned pointer is valid and properly aligned.
341             unsafe { bindings::#call(v, a.as_ptr().cast()) }
342         }
343 
344         fn fetch_sub[acquire, release, relaxed](a: &AtomicRepr<Self>, v: Self::Delta) -> Self {
345             // SAFETY: `a.as_ptr()` guarantees the returned pointer is valid and properly aligned.
346             unsafe { bindings::#call(v, a.as_ptr().cast()) }
347         }
348     }
349 );
350