1 // SPDX-License-Identifier: GPL-2.0 2 3 //! Generic kernel lock and guard. 4 //! 5 //! It contains a generic Rust lock and guard that allow for different backends (e.g., mutexes, 6 //! spinlocks, raw spinlocks) to be provided with minimal effort. 7 8 use super::LockClassKey; 9 use crate::{init::PinInit, pin_init, str::CStr, types::Opaque, types::ScopeGuard}; 10 use core::{cell::UnsafeCell, marker::PhantomData, marker::PhantomPinned}; 11 use macros::pin_data; 12 13 pub mod mutex; 14 pub mod spinlock; 15 16 /// The "backend" of a lock. 17 /// 18 /// It is the actual implementation of the lock, without the need to repeat patterns used in all 19 /// locks. 20 /// 21 /// # Safety 22 /// 23 /// - Implementers must ensure that only one thread/CPU may access the protected data once the lock 24 /// is owned, that is, between calls to [`lock`] and [`unlock`]. 25 /// - Implementers must also ensure that [`relock`] uses the same locking method as the original 26 /// lock operation. 27 /// 28 /// [`lock`]: Backend::lock 29 /// [`unlock`]: Backend::unlock 30 /// [`relock`]: Backend::relock 31 pub unsafe trait Backend { 32 /// The state required by the lock. 33 type State; 34 35 /// The state required to be kept between [`lock`] and [`unlock`]. 36 /// 37 /// [`lock`]: Backend::lock 38 /// [`unlock`]: Backend::unlock 39 type GuardState; 40 41 /// Initialises the lock. 42 /// 43 /// # Safety 44 /// 45 /// `ptr` must be valid for write for the duration of the call, while `name` and `key` must 46 /// remain valid for read indefinitely. 47 unsafe fn init( 48 ptr: *mut Self::State, 49 name: *const core::ffi::c_char, 50 key: *mut bindings::lock_class_key, 51 ); 52 53 /// Acquires the lock, making the caller its owner. 54 /// 55 /// # Safety 56 /// 57 /// Callers must ensure that [`Backend::init`] has been previously called. 58 #[must_use] 59 unsafe fn lock(ptr: *mut Self::State) -> Self::GuardState; 60 61 /// Tries to acquire the lock. 62 /// 63 /// # Safety 64 /// 65 /// Callers must ensure that [`Backend::init`] has been previously called. 66 unsafe fn try_lock(ptr: *mut Self::State) -> Option<Self::GuardState>; 67 68 /// Releases the lock, giving up its ownership. 69 /// 70 /// # Safety 71 /// 72 /// It must only be called by the current owner of the lock. 73 unsafe fn unlock(ptr: *mut Self::State, guard_state: &Self::GuardState); 74 75 /// Reacquires the lock, making the caller its owner. 76 /// 77 /// # Safety 78 /// 79 /// Callers must ensure that `guard_state` comes from a previous call to [`Backend::lock`] (or 80 /// variant) that has been unlocked with [`Backend::unlock`] and will be relocked now. 81 unsafe fn relock(ptr: *mut Self::State, guard_state: &mut Self::GuardState) { 82 // SAFETY: The safety requirements ensure that the lock is initialised. 83 *guard_state = unsafe { Self::lock(ptr) }; 84 } 85 } 86 87 /// A mutual exclusion primitive. 88 /// 89 /// Exposes one of the kernel locking primitives. Which one is exposed depends on the lock 90 /// [`Backend`] specified as the generic parameter `B`. 91 #[pin_data] 92 pub struct Lock<T: ?Sized, B: Backend> { 93 /// The kernel lock object. 94 #[pin] 95 state: Opaque<B::State>, 96 97 /// Some locks are known to be self-referential (e.g., mutexes), while others are architecture 98 /// or config defined (e.g., spinlocks). So we conservatively require them to be pinned in case 99 /// some architecture uses self-references now or in the future. 100 #[pin] 101 _pin: PhantomPinned, 102 103 /// The data protected by the lock. 104 pub(crate) data: UnsafeCell<T>, 105 } 106 107 // SAFETY: `Lock` can be transferred across thread boundaries iff the data it protects can. 108 unsafe impl<T: ?Sized + Send, B: Backend> Send for Lock<T, B> {} 109 110 // SAFETY: `Lock` serialises the interior mutability it provides, so it is `Sync` as long as the 111 // data it protects is `Send`. 112 unsafe impl<T: ?Sized + Send, B: Backend> Sync for Lock<T, B> {} 113 114 impl<T, B: Backend> Lock<T, B> { 115 /// Constructs a new lock initialiser. 116 pub fn new(t: T, name: &'static CStr, key: &'static LockClassKey) -> impl PinInit<Self> { 117 pin_init!(Self { 118 data: UnsafeCell::new(t), 119 _pin: PhantomPinned, 120 // SAFETY: `slot` is valid while the closure is called and both `name` and `key` have 121 // static lifetimes so they live indefinitely. 122 state <- Opaque::ffi_init(|slot| unsafe { 123 B::init(slot, name.as_char_ptr(), key.as_ptr()) 124 }), 125 }) 126 } 127 } 128 129 impl<T: ?Sized, B: Backend> Lock<T, B> { 130 /// Acquires the lock and gives the caller access to the data protected by it. 131 pub fn lock(&self) -> Guard<'_, T, B> { 132 // SAFETY: The constructor of the type calls `init`, so the existence of the object proves 133 // that `init` was called. 134 let state = unsafe { B::lock(self.state.get()) }; 135 // SAFETY: The lock was just acquired. 136 unsafe { Guard::new(self, state) } 137 } 138 139 /// Tries to acquire the lock. 140 /// 141 /// Returns a guard that can be used to access the data protected by the lock if successful. 142 pub fn try_lock(&self) -> Option<Guard<'_, T, B>> { 143 // SAFETY: The constructor of the type calls `init`, so the existence of the object proves 144 // that `init` was called. 145 unsafe { B::try_lock(self.state.get()).map(|state| Guard::new(self, state)) } 146 } 147 } 148 149 /// A lock guard. 150 /// 151 /// Allows mutual exclusion primitives that implement the [`Backend`] trait to automatically unlock 152 /// when a guard goes out of scope. It also provides a safe and convenient way to access the data 153 /// protected by the lock. 154 #[must_use = "the lock unlocks immediately when the guard is unused"] 155 pub struct Guard<'a, T: ?Sized, B: Backend> { 156 pub(crate) lock: &'a Lock<T, B>, 157 pub(crate) state: B::GuardState, 158 _not_send: PhantomData<*mut ()>, 159 } 160 161 // SAFETY: `Guard` is sync when the data protected by the lock is also sync. 162 unsafe impl<T: Sync + ?Sized, B: Backend> Sync for Guard<'_, T, B> {} 163 164 impl<T: ?Sized, B: Backend> Guard<'_, T, B> { 165 pub(crate) fn do_unlocked<U>(&mut self, cb: impl FnOnce() -> U) -> U { 166 // SAFETY: The caller owns the lock, so it is safe to unlock it. 167 unsafe { B::unlock(self.lock.state.get(), &self.state) }; 168 169 let _relock = ScopeGuard::new(|| 170 // SAFETY: The lock was just unlocked above and is being relocked now. 171 unsafe { B::relock(self.lock.state.get(), &mut self.state) }); 172 173 cb() 174 } 175 } 176 177 impl<T: ?Sized, B: Backend> core::ops::Deref for Guard<'_, T, B> { 178 type Target = T; 179 180 fn deref(&self) -> &Self::Target { 181 // SAFETY: The caller owns the lock, so it is safe to deref the protected data. 182 unsafe { &*self.lock.data.get() } 183 } 184 } 185 186 impl<T: ?Sized, B: Backend> core::ops::DerefMut for Guard<'_, T, B> { 187 fn deref_mut(&mut self) -> &mut Self::Target { 188 // SAFETY: The caller owns the lock, so it is safe to deref the protected data. 189 unsafe { &mut *self.lock.data.get() } 190 } 191 } 192 193 impl<T: ?Sized, B: Backend> Drop for Guard<'_, T, B> { 194 fn drop(&mut self) { 195 // SAFETY: The caller owns the lock, so it is safe to unlock it. 196 unsafe { B::unlock(self.lock.state.get(), &self.state) }; 197 } 198 } 199 200 impl<'a, T: ?Sized, B: Backend> Guard<'a, T, B> { 201 /// Constructs a new immutable lock guard. 202 /// 203 /// # Safety 204 /// 205 /// The caller must ensure that it owns the lock. 206 pub(crate) unsafe fn new(lock: &'a Lock<T, B>, state: B::GuardState) -> Self { 207 Self { 208 lock, 209 state, 210 _not_send: PhantomData, 211 } 212 } 213 } 214