xref: /linux/rust/kernel/sync/lock.rs (revision 7fc2cd2e4b398c57c9cf961cfea05eadbf34c05c)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 //! Generic kernel lock and guard.
4 //!
5 //! It contains a generic Rust lock and guard that allow for different backends (e.g., mutexes,
6 //! spinlocks, raw spinlocks) to be provided with minimal effort.
7 
8 use super::LockClassKey;
9 use crate::{
10     str::CStr,
11     types::{NotThreadSafe, Opaque, ScopeGuard},
12 };
13 use core::{cell::UnsafeCell, marker::PhantomPinned, pin::Pin};
14 use pin_init::{pin_data, pin_init, PinInit, Wrapper};
15 
16 pub mod mutex;
17 pub mod spinlock;
18 
19 pub(super) mod global;
20 pub use global::{GlobalGuard, GlobalLock, GlobalLockBackend, GlobalLockedBy};
21 
22 /// The "backend" of a lock.
23 ///
24 /// It is the actual implementation of the lock, without the need to repeat patterns used in all
25 /// locks.
26 ///
27 /// # Safety
28 ///
29 /// - Implementers must ensure that only one thread/CPU may access the protected data once the lock
30 ///   is owned, that is, between calls to [`lock`] and [`unlock`].
31 /// - Implementers must also ensure that [`relock`] uses the same locking method as the original
32 ///   lock operation.
33 ///
34 /// [`lock`]: Backend::lock
35 /// [`unlock`]: Backend::unlock
36 /// [`relock`]: Backend::relock
37 pub unsafe trait Backend {
38     /// The state required by the lock.
39     type State;
40 
41     /// The state required to be kept between [`lock`] and [`unlock`].
42     ///
43     /// [`lock`]: Backend::lock
44     /// [`unlock`]: Backend::unlock
45     type GuardState;
46 
47     /// Initialises the lock.
48     ///
49     /// # Safety
50     ///
51     /// `ptr` must be valid for write for the duration of the call, while `name` and `key` must
52     /// remain valid for read indefinitely.
53     unsafe fn init(
54         ptr: *mut Self::State,
55         name: *const crate::ffi::c_char,
56         key: *mut bindings::lock_class_key,
57     );
58 
59     /// Acquires the lock, making the caller its owner.
60     ///
61     /// # Safety
62     ///
63     /// Callers must ensure that [`Backend::init`] has been previously called.
64     #[must_use]
65     unsafe fn lock(ptr: *mut Self::State) -> Self::GuardState;
66 
67     /// Tries to acquire the lock.
68     ///
69     /// # Safety
70     ///
71     /// Callers must ensure that [`Backend::init`] has been previously called.
72     unsafe fn try_lock(ptr: *mut Self::State) -> Option<Self::GuardState>;
73 
74     /// Releases the lock, giving up its ownership.
75     ///
76     /// # Safety
77     ///
78     /// It must only be called by the current owner of the lock.
79     unsafe fn unlock(ptr: *mut Self::State, guard_state: &Self::GuardState);
80 
81     /// Reacquires the lock, making the caller its owner.
82     ///
83     /// # Safety
84     ///
85     /// Callers must ensure that `guard_state` comes from a previous call to [`Backend::lock`] (or
86     /// variant) that has been unlocked with [`Backend::unlock`] and will be relocked now.
87     unsafe fn relock(ptr: *mut Self::State, guard_state: &mut Self::GuardState) {
88         // SAFETY: The safety requirements ensure that the lock is initialised.
89         *guard_state = unsafe { Self::lock(ptr) };
90     }
91 
92     /// Asserts that the lock is held using lockdep.
93     ///
94     /// # Safety
95     ///
96     /// Callers must ensure that [`Backend::init`] has been previously called.
97     unsafe fn assert_is_held(ptr: *mut Self::State);
98 }
99 
100 /// A mutual exclusion primitive.
101 ///
102 /// Exposes one of the kernel locking primitives. Which one is exposed depends on the lock
103 /// [`Backend`] specified as the generic parameter `B`.
104 #[repr(C)]
105 #[pin_data]
106 pub struct Lock<T: ?Sized, B: Backend> {
107     /// The kernel lock object.
108     #[pin]
109     state: Opaque<B::State>,
110 
111     /// Some locks are known to be self-referential (e.g., mutexes), while others are architecture
112     /// or config defined (e.g., spinlocks). So we conservatively require them to be pinned in case
113     /// some architecture uses self-references now or in the future.
114     #[pin]
115     _pin: PhantomPinned,
116 
117     /// The data protected by the lock.
118     #[pin]
119     pub(crate) data: UnsafeCell<T>,
120 }
121 
122 // SAFETY: `Lock` can be transferred across thread boundaries iff the data it protects can.
123 unsafe impl<T: ?Sized + Send, B: Backend> Send for Lock<T, B> {}
124 
125 // SAFETY: `Lock` serialises the interior mutability it provides, so it is `Sync` as long as the
126 // data it protects is `Send`.
127 unsafe impl<T: ?Sized + Send, B: Backend> Sync for Lock<T, B> {}
128 
129 impl<T, B: Backend> Lock<T, B> {
130     /// Constructs a new lock initialiser.
131     pub fn new(
132         t: impl PinInit<T>,
133         name: &'static CStr,
134         key: Pin<&'static LockClassKey>,
135     ) -> impl PinInit<Self> {
136         pin_init!(Self {
137             data <- UnsafeCell::pin_init(t),
138             _pin: PhantomPinned,
139             // SAFETY: `slot` is valid while the closure is called and both `name` and `key` have
140             // static lifetimes so they live indefinitely.
141             state <- Opaque::ffi_init(|slot| unsafe {
142                 B::init(slot, name.as_char_ptr(), key.as_ptr())
143             }),
144         })
145     }
146 }
147 
148 impl<B: Backend> Lock<(), B> {
149     /// Constructs a [`Lock`] from a raw pointer.
150     ///
151     /// This can be useful for interacting with a lock which was initialised outside of Rust.
152     ///
153     /// # Safety
154     ///
155     /// The caller promises that `ptr` points to a valid initialised instance of [`State`] during
156     /// the whole lifetime of `'a`.
157     ///
158     /// [`State`]: Backend::State
159     pub unsafe fn from_raw<'a>(ptr: *mut B::State) -> &'a Self {
160         // SAFETY:
161         // - By the safety contract `ptr` must point to a valid initialised instance of `B::State`
162         // - Since the lock data type is `()` which is a ZST, `state` is the only non-ZST member of
163         //   the struct
164         // - Combined with `#[repr(C)]`, this guarantees `Self` has an equivalent data layout to
165         //   `B::State`.
166         unsafe { &*ptr.cast() }
167     }
168 }
169 
170 impl<T: ?Sized, B: Backend> Lock<T, B> {
171     /// Acquires the lock and gives the caller access to the data protected by it.
172     pub fn lock(&self) -> Guard<'_, T, B> {
173         // SAFETY: The constructor of the type calls `init`, so the existence of the object proves
174         // that `init` was called.
175         let state = unsafe { B::lock(self.state.get()) };
176         // SAFETY: The lock was just acquired.
177         unsafe { Guard::new(self, state) }
178     }
179 
180     /// Tries to acquire the lock.
181     ///
182     /// Returns a guard that can be used to access the data protected by the lock if successful.
183     // `Option<T>` is not `#[must_use]` even if `T` is, thus the attribute is needed here.
184     #[must_use = "if unused, the lock will be immediately unlocked"]
185     pub fn try_lock(&self) -> Option<Guard<'_, T, B>> {
186         // SAFETY: The constructor of the type calls `init`, so the existence of the object proves
187         // that `init` was called.
188         unsafe { B::try_lock(self.state.get()).map(|state| Guard::new(self, state)) }
189     }
190 }
191 
192 /// A lock guard.
193 ///
194 /// Allows mutual exclusion primitives that implement the [`Backend`] trait to automatically unlock
195 /// when a guard goes out of scope. It also provides a safe and convenient way to access the data
196 /// protected by the lock.
197 #[must_use = "the lock unlocks immediately when the guard is unused"]
198 pub struct Guard<'a, T: ?Sized, B: Backend> {
199     pub(crate) lock: &'a Lock<T, B>,
200     pub(crate) state: B::GuardState,
201     _not_send: NotThreadSafe,
202 }
203 
204 // SAFETY: `Guard` is sync when the data protected by the lock is also sync.
205 unsafe impl<T: Sync + ?Sized, B: Backend> Sync for Guard<'_, T, B> {}
206 
207 impl<'a, T: ?Sized, B: Backend> Guard<'a, T, B> {
208     /// Returns the lock that this guard originates from.
209     ///
210     /// # Examples
211     ///
212     /// The following example shows how to use [`Guard::lock_ref()`] to assert the corresponding
213     /// lock is held.
214     ///
215     /// ```
216     /// # use kernel::{new_spinlock, sync::lock::{Backend, Guard, Lock}};
217     /// # use pin_init::stack_pin_init;
218     ///
219     /// fn assert_held<T, B: Backend>(guard: &Guard<'_, T, B>, lock: &Lock<T, B>) {
220     ///     // Address-equal means the same lock.
221     ///     assert!(core::ptr::eq(guard.lock_ref(), lock));
222     /// }
223     ///
224     /// // Creates a new lock on the stack.
225     /// stack_pin_init!{
226     ///     let l = new_spinlock!(42)
227     /// }
228     ///
229     /// let g = l.lock();
230     ///
231     /// // `g` originates from `l`.
232     /// assert_held(&g, &l);
233     /// ```
234     pub fn lock_ref(&self) -> &'a Lock<T, B> {
235         self.lock
236     }
237 
238     pub(crate) fn do_unlocked<U>(&mut self, cb: impl FnOnce() -> U) -> U {
239         // SAFETY: The caller owns the lock, so it is safe to unlock it.
240         unsafe { B::unlock(self.lock.state.get(), &self.state) };
241 
242         let _relock = ScopeGuard::new(||
243                 // SAFETY: The lock was just unlocked above and is being relocked now.
244                 unsafe { B::relock(self.lock.state.get(), &mut self.state) });
245 
246         cb()
247     }
248 
249     /// Returns a pinned mutable reference to the protected data.
250     ///
251     /// The guard implements [`DerefMut`] when `T: Unpin`, so for [`Unpin`]
252     /// types [`DerefMut`] should be used instead of this function.
253     ///
254     /// [`DerefMut`]: core::ops::DerefMut
255     /// [`Unpin`]: core::marker::Unpin
256     ///
257     /// # Examples
258     ///
259     /// ```
260     /// # use kernel::sync::{Mutex, MutexGuard};
261     /// # use core::{pin::Pin, marker::PhantomPinned};
262     /// struct Data(PhantomPinned);
263     ///
264     /// fn example(mutex: &Mutex<Data>) {
265     ///     let mut data: MutexGuard<'_, Data> = mutex.lock();
266     ///     let mut data: Pin<&mut Data> = data.as_mut();
267     /// }
268     /// ```
269     pub fn as_mut(&mut self) -> Pin<&mut T> {
270         // SAFETY: `self.lock.data` is structurally pinned.
271         unsafe { Pin::new_unchecked(&mut *self.lock.data.get()) }
272     }
273 }
274 
275 impl<T: ?Sized, B: Backend> core::ops::Deref for Guard<'_, T, B> {
276     type Target = T;
277 
278     fn deref(&self) -> &Self::Target {
279         // SAFETY: The caller owns the lock, so it is safe to deref the protected data.
280         unsafe { &*self.lock.data.get() }
281     }
282 }
283 
284 impl<T: ?Sized, B: Backend> core::ops::DerefMut for Guard<'_, T, B>
285 where
286     T: Unpin,
287 {
288     fn deref_mut(&mut self) -> &mut Self::Target {
289         // SAFETY: The caller owns the lock, so it is safe to deref the protected data.
290         unsafe { &mut *self.lock.data.get() }
291     }
292 }
293 
294 impl<T: ?Sized, B: Backend> Drop for Guard<'_, T, B> {
295     fn drop(&mut self) {
296         // SAFETY: The caller owns the lock, so it is safe to unlock it.
297         unsafe { B::unlock(self.lock.state.get(), &self.state) };
298     }
299 }
300 
301 impl<'a, T: ?Sized, B: Backend> Guard<'a, T, B> {
302     /// Constructs a new immutable lock guard.
303     ///
304     /// # Safety
305     ///
306     /// The caller must ensure that it owns the lock.
307     pub unsafe fn new(lock: &'a Lock<T, B>, state: B::GuardState) -> Self {
308         // SAFETY: The caller can only hold the lock if `Backend::init` has already been called.
309         unsafe { B::assert_is_held(lock.state.get()) };
310 
311         Self {
312             lock,
313             state,
314             _not_send: NotThreadSafe,
315         }
316     }
317 }
318