xref: /linux/rust/kernel/sync/lock.rs (revision 5bb6ba448fe3598a7668838942db1f008beb581b)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 //! Generic kernel lock and guard.
4 //!
5 //! It contains a generic Rust lock and guard that allow for different backends (e.g., mutexes,
6 //! spinlocks, raw spinlocks) to be provided with minimal effort.
7 
8 use super::LockClassKey;
9 use crate::{
10     init::PinInit,
11     pin_init,
12     str::CStr,
13     types::{NotThreadSafe, Opaque, ScopeGuard},
14 };
15 use core::{cell::UnsafeCell, marker::PhantomPinned};
16 use macros::pin_data;
17 
18 pub mod mutex;
19 pub mod spinlock;
20 
21 /// The "backend" of a lock.
22 ///
23 /// It is the actual implementation of the lock, without the need to repeat patterns used in all
24 /// locks.
25 ///
26 /// # Safety
27 ///
28 /// - Implementers must ensure that only one thread/CPU may access the protected data once the lock
29 ///   is owned, that is, between calls to [`lock`] and [`unlock`].
30 /// - Implementers must also ensure that [`relock`] uses the same locking method as the original
31 ///   lock operation.
32 ///
33 /// [`lock`]: Backend::lock
34 /// [`unlock`]: Backend::unlock
35 /// [`relock`]: Backend::relock
36 pub unsafe trait Backend {
37     /// The state required by the lock.
38     type State;
39 
40     /// The state required to be kept between [`lock`] and [`unlock`].
41     ///
42     /// [`lock`]: Backend::lock
43     /// [`unlock`]: Backend::unlock
44     type GuardState;
45 
46     /// Initialises the lock.
47     ///
48     /// # Safety
49     ///
50     /// `ptr` must be valid for write for the duration of the call, while `name` and `key` must
51     /// remain valid for read indefinitely.
52     unsafe fn init(
53         ptr: *mut Self::State,
54         name: *const core::ffi::c_char,
55         key: *mut bindings::lock_class_key,
56     );
57 
58     /// Acquires the lock, making the caller its owner.
59     ///
60     /// # Safety
61     ///
62     /// Callers must ensure that [`Backend::init`] has been previously called.
63     #[must_use]
64     unsafe fn lock(ptr: *mut Self::State) -> Self::GuardState;
65 
66     /// Releases the lock, giving up its ownership.
67     ///
68     /// # Safety
69     ///
70     /// It must only be called by the current owner of the lock.
71     unsafe fn unlock(ptr: *mut Self::State, guard_state: &Self::GuardState);
72 
73     /// Reacquires the lock, making the caller its owner.
74     ///
75     /// # Safety
76     ///
77     /// Callers must ensure that `guard_state` comes from a previous call to [`Backend::lock`] (or
78     /// variant) that has been unlocked with [`Backend::unlock`] and will be relocked now.
79     unsafe fn relock(ptr: *mut Self::State, guard_state: &mut Self::GuardState) {
80         // SAFETY: The safety requirements ensure that the lock is initialised.
81         *guard_state = unsafe { Self::lock(ptr) };
82     }
83 }
84 
85 /// A mutual exclusion primitive.
86 ///
87 /// Exposes one of the kernel locking primitives. Which one is exposed depends on the lock
88 /// [`Backend`] specified as the generic parameter `B`.
89 #[pin_data]
90 pub struct Lock<T: ?Sized, B: Backend> {
91     /// The kernel lock object.
92     #[pin]
93     state: Opaque<B::State>,
94 
95     /// Some locks are known to be self-referential (e.g., mutexes), while others are architecture
96     /// or config defined (e.g., spinlocks). So we conservatively require them to be pinned in case
97     /// some architecture uses self-references now or in the future.
98     #[pin]
99     _pin: PhantomPinned,
100 
101     /// The data protected by the lock.
102     pub(crate) data: UnsafeCell<T>,
103 }
104 
105 // SAFETY: `Lock` can be transferred across thread boundaries iff the data it protects can.
106 unsafe impl<T: ?Sized + Send, B: Backend> Send for Lock<T, B> {}
107 
108 // SAFETY: `Lock` serialises the interior mutability it provides, so it is `Sync` as long as the
109 // data it protects is `Send`.
110 unsafe impl<T: ?Sized + Send, B: Backend> Sync for Lock<T, B> {}
111 
112 impl<T, B: Backend> Lock<T, B> {
113     /// Constructs a new lock initialiser.
114     pub fn new(t: T, name: &'static CStr, key: &'static LockClassKey) -> impl PinInit<Self> {
115         pin_init!(Self {
116             data: UnsafeCell::new(t),
117             _pin: PhantomPinned,
118             // SAFETY: `slot` is valid while the closure is called and both `name` and `key` have
119             // static lifetimes so they live indefinitely.
120             state <- Opaque::ffi_init(|slot| unsafe {
121                 B::init(slot, name.as_char_ptr(), key.as_ptr())
122             }),
123         })
124     }
125 }
126 
127 impl<T: ?Sized, B: Backend> Lock<T, B> {
128     /// Acquires the lock and gives the caller access to the data protected by it.
129     pub fn lock(&self) -> Guard<'_, T, B> {
130         // SAFETY: The constructor of the type calls `init`, so the existence of the object proves
131         // that `init` was called.
132         let state = unsafe { B::lock(self.state.get()) };
133         // SAFETY: The lock was just acquired.
134         unsafe { Guard::new(self, state) }
135     }
136 }
137 
138 /// A lock guard.
139 ///
140 /// Allows mutual exclusion primitives that implement the [`Backend`] trait to automatically unlock
141 /// when a guard goes out of scope. It also provides a safe and convenient way to access the data
142 /// protected by the lock.
143 #[must_use = "the lock unlocks immediately when the guard is unused"]
144 pub struct Guard<'a, T: ?Sized, B: Backend> {
145     pub(crate) lock: &'a Lock<T, B>,
146     pub(crate) state: B::GuardState,
147     _not_send: NotThreadSafe,
148 }
149 
150 // SAFETY: `Guard` is sync when the data protected by the lock is also sync.
151 unsafe impl<T: Sync + ?Sized, B: Backend> Sync for Guard<'_, T, B> {}
152 
153 impl<T: ?Sized, B: Backend> Guard<'_, T, B> {
154     pub(crate) fn do_unlocked<U>(&mut self, cb: impl FnOnce() -> U) -> U {
155         // SAFETY: The caller owns the lock, so it is safe to unlock it.
156         unsafe { B::unlock(self.lock.state.get(), &self.state) };
157 
158         // SAFETY: The lock was just unlocked above and is being relocked now.
159         let _relock =
160             ScopeGuard::new(|| unsafe { B::relock(self.lock.state.get(), &mut self.state) });
161 
162         cb()
163     }
164 }
165 
166 impl<T: ?Sized, B: Backend> core::ops::Deref for Guard<'_, T, B> {
167     type Target = T;
168 
169     fn deref(&self) -> &Self::Target {
170         // SAFETY: The caller owns the lock, so it is safe to deref the protected data.
171         unsafe { &*self.lock.data.get() }
172     }
173 }
174 
175 impl<T: ?Sized, B: Backend> core::ops::DerefMut for Guard<'_, T, B> {
176     fn deref_mut(&mut self) -> &mut Self::Target {
177         // SAFETY: The caller owns the lock, so it is safe to deref the protected data.
178         unsafe { &mut *self.lock.data.get() }
179     }
180 }
181 
182 impl<T: ?Sized, B: Backend> Drop for Guard<'_, T, B> {
183     fn drop(&mut self) {
184         // SAFETY: The caller owns the lock, so it is safe to unlock it.
185         unsafe { B::unlock(self.lock.state.get(), &self.state) };
186     }
187 }
188 
189 impl<'a, T: ?Sized, B: Backend> Guard<'a, T, B> {
190     /// Constructs a new immutable lock guard.
191     ///
192     /// # Safety
193     ///
194     /// The caller must ensure that it owns the lock.
195     pub(crate) unsafe fn new(lock: &'a Lock<T, B>, state: B::GuardState) -> Self {
196         Self {
197             lock,
198             state,
199             _not_send: NotThreadSafe,
200         }
201     }
202 }
203