xref: /linux/rust/kernel/types.rs (revision 7a4ffec9fd54ea27395e24dff726dbf58e2fe06b)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 //! Kernel types.
4 
5 use crate::init::{self, PinInit};
6 use alloc::boxed::Box;
7 use core::{
8     cell::UnsafeCell,
9     marker::{PhantomData, PhantomPinned},
10     mem::{ManuallyDrop, MaybeUninit},
11     ops::{Deref, DerefMut},
12     pin::Pin,
13     ptr::NonNull,
14 };
15 
16 /// Used to transfer ownership to and from foreign (non-Rust) languages.
17 ///
18 /// Ownership is transferred from Rust to a foreign language by calling [`Self::into_foreign`] and
19 /// later may be transferred back to Rust by calling [`Self::from_foreign`].
20 ///
21 /// This trait is meant to be used in cases when Rust objects are stored in C objects and
22 /// eventually "freed" back to Rust.
23 pub trait ForeignOwnable: Sized {
24     /// Type of values borrowed between calls to [`ForeignOwnable::into_foreign`] and
25     /// [`ForeignOwnable::from_foreign`].
26     type Borrowed<'a>;
27 
28     /// Converts a Rust-owned object to a foreign-owned one.
29     ///
30     /// The foreign representation is a pointer to void. There are no guarantees for this pointer.
31     /// For example, it might be invalid, dangling or pointing to uninitialized memory. Using it in
32     /// any way except for [`ForeignOwnable::from_foreign`], [`ForeignOwnable::borrow`],
33     /// [`ForeignOwnable::try_from_foreign`] can result in undefined behavior.
34     fn into_foreign(self) -> *const core::ffi::c_void;
35 
36     /// Borrows a foreign-owned object.
37     ///
38     /// # Safety
39     ///
40     /// `ptr` must have been returned by a previous call to [`ForeignOwnable::into_foreign`] for
41     /// which a previous matching [`ForeignOwnable::from_foreign`] hasn't been called yet.
42     unsafe fn borrow<'a>(ptr: *const core::ffi::c_void) -> Self::Borrowed<'a>;
43 
44     /// Converts a foreign-owned object back to a Rust-owned one.
45     ///
46     /// # Safety
47     ///
48     /// `ptr` must have been returned by a previous call to [`ForeignOwnable::into_foreign`] for
49     /// which a previous matching [`ForeignOwnable::from_foreign`] hasn't been called yet.
50     /// Additionally, all instances (if any) of values returned by [`ForeignOwnable::borrow`] for
51     /// this object must have been dropped.
52     unsafe fn from_foreign(ptr: *const core::ffi::c_void) -> Self;
53 
54     /// Tries to convert a foreign-owned object back to a Rust-owned one.
55     ///
56     /// A convenience wrapper over [`ForeignOwnable::from_foreign`] that returns [`None`] if `ptr`
57     /// is null.
58     ///
59     /// # Safety
60     ///
61     /// `ptr` must either be null or satisfy the safety requirements for
62     /// [`ForeignOwnable::from_foreign`].
63     unsafe fn try_from_foreign(ptr: *const core::ffi::c_void) -> Option<Self> {
64         if ptr.is_null() {
65             None
66         } else {
67             // SAFETY: Since `ptr` is not null here, then `ptr` satisfies the safety requirements
68             // of `from_foreign` given the safety requirements of this function.
69             unsafe { Some(Self::from_foreign(ptr)) }
70         }
71     }
72 }
73 
74 impl<T: 'static> ForeignOwnable for Box<T> {
75     type Borrowed<'a> = &'a T;
76 
77     fn into_foreign(self) -> *const core::ffi::c_void {
78         Box::into_raw(self) as _
79     }
80 
81     unsafe fn borrow<'a>(ptr: *const core::ffi::c_void) -> &'a T {
82         // SAFETY: The safety requirements for this function ensure that the object is still alive,
83         // so it is safe to dereference the raw pointer.
84         // The safety requirements of `from_foreign` also ensure that the object remains alive for
85         // the lifetime of the returned value.
86         unsafe { &*ptr.cast() }
87     }
88 
89     unsafe fn from_foreign(ptr: *const core::ffi::c_void) -> Self {
90         // SAFETY: The safety requirements of this function ensure that `ptr` comes from a previous
91         // call to `Self::into_foreign`.
92         unsafe { Box::from_raw(ptr as _) }
93     }
94 }
95 
96 impl<T: 'static> ForeignOwnable for Pin<Box<T>> {
97     type Borrowed<'a> = Pin<&'a T>;
98 
99     fn into_foreign(self) -> *const core::ffi::c_void {
100         // SAFETY: We are still treating the box as pinned.
101         Box::into_raw(unsafe { Pin::into_inner_unchecked(self) }) as _
102     }
103 
104     unsafe fn borrow<'a>(ptr: *const core::ffi::c_void) -> Pin<&'a T> {
105         // SAFETY: The safety requirements for this function ensure that the object is still alive,
106         // so it is safe to dereference the raw pointer.
107         // The safety requirements of `from_foreign` also ensure that the object remains alive for
108         // the lifetime of the returned value.
109         let r = unsafe { &*ptr.cast() };
110 
111         // SAFETY: This pointer originates from a `Pin<Box<T>>`.
112         unsafe { Pin::new_unchecked(r) }
113     }
114 
115     unsafe fn from_foreign(ptr: *const core::ffi::c_void) -> Self {
116         // SAFETY: The safety requirements of this function ensure that `ptr` comes from a previous
117         // call to `Self::into_foreign`.
118         unsafe { Pin::new_unchecked(Box::from_raw(ptr as _)) }
119     }
120 }
121 
122 impl ForeignOwnable for () {
123     type Borrowed<'a> = ();
124 
125     fn into_foreign(self) -> *const core::ffi::c_void {
126         core::ptr::NonNull::dangling().as_ptr()
127     }
128 
129     unsafe fn borrow<'a>(_: *const core::ffi::c_void) -> Self::Borrowed<'a> {}
130 
131     unsafe fn from_foreign(_: *const core::ffi::c_void) -> Self {}
132 }
133 
134 /// Runs a cleanup function/closure when dropped.
135 ///
136 /// The [`ScopeGuard::dismiss`] function prevents the cleanup function from running.
137 ///
138 /// # Examples
139 ///
140 /// In the example below, we have multiple exit paths and we want to log regardless of which one is
141 /// taken:
142 ///
143 /// ```
144 /// # use kernel::types::ScopeGuard;
145 /// fn example1(arg: bool) {
146 ///     let _log = ScopeGuard::new(|| pr_info!("example1 completed\n"));
147 ///
148 ///     if arg {
149 ///         return;
150 ///     }
151 ///
152 ///     pr_info!("Do something...\n");
153 /// }
154 ///
155 /// # example1(false);
156 /// # example1(true);
157 /// ```
158 ///
159 /// In the example below, we want to log the same message on all early exits but a different one on
160 /// the main exit path:
161 ///
162 /// ```
163 /// # use kernel::types::ScopeGuard;
164 /// fn example2(arg: bool) {
165 ///     let log = ScopeGuard::new(|| pr_info!("example2 returned early\n"));
166 ///
167 ///     if arg {
168 ///         return;
169 ///     }
170 ///
171 ///     // (Other early returns...)
172 ///
173 ///     log.dismiss();
174 ///     pr_info!("example2 no early return\n");
175 /// }
176 ///
177 /// # example2(false);
178 /// # example2(true);
179 /// ```
180 ///
181 /// In the example below, we need a mutable object (the vector) to be accessible within the log
182 /// function, so we wrap it in the [`ScopeGuard`]:
183 ///
184 /// ```
185 /// # use kernel::types::ScopeGuard;
186 /// fn example3(arg: bool) -> Result {
187 ///     let mut vec =
188 ///         ScopeGuard::new_with_data(Vec::new(), |v| pr_info!("vec had {} elements\n", v.len()));
189 ///
190 ///     vec.push(10u8, GFP_KERNEL)?;
191 ///     if arg {
192 ///         return Ok(());
193 ///     }
194 ///     vec.push(20u8, GFP_KERNEL)?;
195 ///     Ok(())
196 /// }
197 ///
198 /// # assert_eq!(example3(false), Ok(()));
199 /// # assert_eq!(example3(true), Ok(()));
200 /// ```
201 ///
202 /// # Invariants
203 ///
204 /// The value stored in the struct is nearly always `Some(_)`, except between
205 /// [`ScopeGuard::dismiss`] and [`ScopeGuard::drop`]: in this case, it will be `None` as the value
206 /// will have been returned to the caller. Since  [`ScopeGuard::dismiss`] consumes the guard,
207 /// callers won't be able to use it anymore.
208 pub struct ScopeGuard<T, F: FnOnce(T)>(Option<(T, F)>);
209 
210 impl<T, F: FnOnce(T)> ScopeGuard<T, F> {
211     /// Creates a new guarded object wrapping the given data and with the given cleanup function.
212     pub fn new_with_data(data: T, cleanup_func: F) -> Self {
213         // INVARIANT: The struct is being initialised with `Some(_)`.
214         Self(Some((data, cleanup_func)))
215     }
216 
217     /// Prevents the cleanup function from running and returns the guarded data.
218     pub fn dismiss(mut self) -> T {
219         // INVARIANT: This is the exception case in the invariant; it is not visible to callers
220         // because this function consumes `self`.
221         self.0.take().unwrap().0
222     }
223 }
224 
225 impl ScopeGuard<(), fn(())> {
226     /// Creates a new guarded object with the given cleanup function.
227     pub fn new(cleanup: impl FnOnce()) -> ScopeGuard<(), impl FnOnce(())> {
228         ScopeGuard::new_with_data((), move |_| cleanup())
229     }
230 }
231 
232 impl<T, F: FnOnce(T)> Deref for ScopeGuard<T, F> {
233     type Target = T;
234 
235     fn deref(&self) -> &T {
236         // The type invariants guarantee that `unwrap` will succeed.
237         &self.0.as_ref().unwrap().0
238     }
239 }
240 
241 impl<T, F: FnOnce(T)> DerefMut for ScopeGuard<T, F> {
242     fn deref_mut(&mut self) -> &mut T {
243         // The type invariants guarantee that `unwrap` will succeed.
244         &mut self.0.as_mut().unwrap().0
245     }
246 }
247 
248 impl<T, F: FnOnce(T)> Drop for ScopeGuard<T, F> {
249     fn drop(&mut self) {
250         // Run the cleanup function if one is still present.
251         if let Some((data, cleanup)) = self.0.take() {
252             cleanup(data)
253         }
254     }
255 }
256 
257 /// Stores an opaque value.
258 ///
259 /// This is meant to be used with FFI objects that are never interpreted by Rust code.
260 #[repr(transparent)]
261 pub struct Opaque<T> {
262     value: UnsafeCell<MaybeUninit<T>>,
263     _pin: PhantomPinned,
264 }
265 
266 impl<T> Opaque<T> {
267     /// Creates a new opaque value.
268     pub const fn new(value: T) -> Self {
269         Self {
270             value: UnsafeCell::new(MaybeUninit::new(value)),
271             _pin: PhantomPinned,
272         }
273     }
274 
275     /// Creates an uninitialised value.
276     pub const fn uninit() -> Self {
277         Self {
278             value: UnsafeCell::new(MaybeUninit::uninit()),
279             _pin: PhantomPinned,
280         }
281     }
282 
283     /// Creates a pin-initializer from the given initializer closure.
284     ///
285     /// The returned initializer calls the given closure with the pointer to the inner `T` of this
286     /// `Opaque`. Since this memory is uninitialized, the closure is not allowed to read from it.
287     ///
288     /// This function is safe, because the `T` inside of an `Opaque` is allowed to be
289     /// uninitialized. Additionally, access to the inner `T` requires `unsafe`, so the caller needs
290     /// to verify at that point that the inner value is valid.
291     pub fn ffi_init(init_func: impl FnOnce(*mut T)) -> impl PinInit<Self> {
292         // SAFETY: We contain a `MaybeUninit`, so it is OK for the `init_func` to not fully
293         // initialize the `T`.
294         unsafe {
295             init::pin_init_from_closure::<_, ::core::convert::Infallible>(move |slot| {
296                 init_func(Self::raw_get(slot));
297                 Ok(())
298             })
299         }
300     }
301 
302     /// Returns a raw pointer to the opaque data.
303     pub const fn get(&self) -> *mut T {
304         UnsafeCell::get(&self.value).cast::<T>()
305     }
306 
307     /// Gets the value behind `this`.
308     ///
309     /// This function is useful to get access to the value without creating intermediate
310     /// references.
311     pub const fn raw_get(this: *const Self) -> *mut T {
312         UnsafeCell::raw_get(this.cast::<UnsafeCell<MaybeUninit<T>>>()).cast::<T>()
313     }
314 }
315 
316 /// Types that are _always_ reference counted.
317 ///
318 /// It allows such types to define their own custom ref increment and decrement functions.
319 /// Additionally, it allows users to convert from a shared reference `&T` to an owned reference
320 /// [`ARef<T>`].
321 ///
322 /// This is usually implemented by wrappers to existing structures on the C side of the code. For
323 /// Rust code, the recommendation is to use [`Arc`](crate::sync::Arc) to create reference-counted
324 /// instances of a type.
325 ///
326 /// # Safety
327 ///
328 /// Implementers must ensure that increments to the reference count keep the object alive in memory
329 /// at least until matching decrements are performed.
330 ///
331 /// Implementers must also ensure that all instances are reference-counted. (Otherwise they
332 /// won't be able to honour the requirement that [`AlwaysRefCounted::inc_ref`] keep the object
333 /// alive.)
334 pub unsafe trait AlwaysRefCounted {
335     /// Increments the reference count on the object.
336     fn inc_ref(&self);
337 
338     /// Decrements the reference count on the object.
339     ///
340     /// Frees the object when the count reaches zero.
341     ///
342     /// # Safety
343     ///
344     /// Callers must ensure that there was a previous matching increment to the reference count,
345     /// and that the object is no longer used after its reference count is decremented (as it may
346     /// result in the object being freed), unless the caller owns another increment on the refcount
347     /// (e.g., it calls [`AlwaysRefCounted::inc_ref`] twice, then calls
348     /// [`AlwaysRefCounted::dec_ref`] once).
349     unsafe fn dec_ref(obj: NonNull<Self>);
350 }
351 
352 /// An owned reference to an always-reference-counted object.
353 ///
354 /// The object's reference count is automatically decremented when an instance of [`ARef`] is
355 /// dropped. It is also automatically incremented when a new instance is created via
356 /// [`ARef::clone`].
357 ///
358 /// # Invariants
359 ///
360 /// The pointer stored in `ptr` is non-null and valid for the lifetime of the [`ARef`] instance. In
361 /// particular, the [`ARef`] instance owns an increment on the underlying object's reference count.
362 pub struct ARef<T: AlwaysRefCounted> {
363     ptr: NonNull<T>,
364     _p: PhantomData<T>,
365 }
366 
367 // SAFETY: It is safe to send `ARef<T>` to another thread when the underlying `T` is `Sync` because
368 // it effectively means sharing `&T` (which is safe because `T` is `Sync`); additionally, it needs
369 // `T` to be `Send` because any thread that has an `ARef<T>` may ultimately access `T` using a
370 // mutable reference, for example, when the reference count reaches zero and `T` is dropped.
371 unsafe impl<T: AlwaysRefCounted + Sync + Send> Send for ARef<T> {}
372 
373 // SAFETY: It is safe to send `&ARef<T>` to another thread when the underlying `T` is `Sync`
374 // because it effectively means sharing `&T` (which is safe because `T` is `Sync`); additionally,
375 // it needs `T` to be `Send` because any thread that has a `&ARef<T>` may clone it and get an
376 // `ARef<T>` on that thread, so the thread may ultimately access `T` using a mutable reference, for
377 // example, when the reference count reaches zero and `T` is dropped.
378 unsafe impl<T: AlwaysRefCounted + Sync + Send> Sync for ARef<T> {}
379 
380 impl<T: AlwaysRefCounted> ARef<T> {
381     /// Creates a new instance of [`ARef`].
382     ///
383     /// It takes over an increment of the reference count on the underlying object.
384     ///
385     /// # Safety
386     ///
387     /// Callers must ensure that the reference count was incremented at least once, and that they
388     /// are properly relinquishing one increment. That is, if there is only one increment, callers
389     /// must not use the underlying object anymore -- it is only safe to do so via the newly
390     /// created [`ARef`].
391     pub unsafe fn from_raw(ptr: NonNull<T>) -> Self {
392         // INVARIANT: The safety requirements guarantee that the new instance now owns the
393         // increment on the refcount.
394         Self {
395             ptr,
396             _p: PhantomData,
397         }
398     }
399 
400     /// Consumes the `ARef`, returning a raw pointer.
401     ///
402     /// This function does not change the refcount. After calling this function, the caller is
403     /// responsible for the refcount previously managed by the `ARef`.
404     ///
405     /// # Examples
406     ///
407     /// ```
408     /// use core::ptr::NonNull;
409     /// use kernel::types::{ARef, AlwaysRefCounted};
410     ///
411     /// struct Empty {}
412     ///
413     /// unsafe impl AlwaysRefCounted for Empty {
414     ///     fn inc_ref(&self) {}
415     ///     unsafe fn dec_ref(_obj: NonNull<Self>) {}
416     /// }
417     ///
418     /// let mut data = Empty {};
419     /// let ptr = NonNull::<Empty>::new(&mut data as *mut _).unwrap();
420     /// let data_ref: ARef<Empty> = unsafe { ARef::from_raw(ptr) };
421     /// let raw_ptr: NonNull<Empty> = ARef::into_raw(data_ref);
422     ///
423     /// assert_eq!(ptr, raw_ptr);
424     /// ```
425     pub fn into_raw(me: Self) -> NonNull<T> {
426         ManuallyDrop::new(me).ptr
427     }
428 }
429 
430 impl<T: AlwaysRefCounted> Clone for ARef<T> {
431     fn clone(&self) -> Self {
432         self.inc_ref();
433         // SAFETY: We just incremented the refcount above.
434         unsafe { Self::from_raw(self.ptr) }
435     }
436 }
437 
438 impl<T: AlwaysRefCounted> Deref for ARef<T> {
439     type Target = T;
440 
441     fn deref(&self) -> &Self::Target {
442         // SAFETY: The type invariants guarantee that the object is valid.
443         unsafe { self.ptr.as_ref() }
444     }
445 }
446 
447 impl<T: AlwaysRefCounted> From<&T> for ARef<T> {
448     fn from(b: &T) -> Self {
449         b.inc_ref();
450         // SAFETY: We just incremented the refcount above.
451         unsafe { Self::from_raw(NonNull::from(b)) }
452     }
453 }
454 
455 impl<T: AlwaysRefCounted> Drop for ARef<T> {
456     fn drop(&mut self) {
457         // SAFETY: The type invariants guarantee that the `ARef` owns the reference we're about to
458         // decrement.
459         unsafe { T::dec_ref(self.ptr) };
460     }
461 }
462 
463 /// A sum type that always holds either a value of type `L` or `R`.
464 pub enum Either<L, R> {
465     /// Constructs an instance of [`Either`] containing a value of type `L`.
466     Left(L),
467 
468     /// Constructs an instance of [`Either`] containing a value of type `R`.
469     Right(R),
470 }
471 
472 /// Types for which any bit pattern is valid.
473 ///
474 /// Not all types are valid for all values. For example, a `bool` must be either zero or one, so
475 /// reading arbitrary bytes into something that contains a `bool` is not okay.
476 ///
477 /// It's okay for the type to have padding, as initializing those bytes has no effect.
478 ///
479 /// # Safety
480 ///
481 /// All bit-patterns must be valid for this type. This type must not have interior mutability.
482 pub unsafe trait FromBytes {}
483 
484 // SAFETY: All bit patterns are acceptable values of the types below.
485 unsafe impl FromBytes for u8 {}
486 unsafe impl FromBytes for u16 {}
487 unsafe impl FromBytes for u32 {}
488 unsafe impl FromBytes for u64 {}
489 unsafe impl FromBytes for usize {}
490 unsafe impl FromBytes for i8 {}
491 unsafe impl FromBytes for i16 {}
492 unsafe impl FromBytes for i32 {}
493 unsafe impl FromBytes for i64 {}
494 unsafe impl FromBytes for isize {}
495 // SAFETY: If all bit patterns are acceptable for individual values in an array, then all bit
496 // patterns are also acceptable for arrays of that type.
497 unsafe impl<T: FromBytes> FromBytes for [T] {}
498 unsafe impl<T: FromBytes, const N: usize> FromBytes for [T; N] {}
499 
500 /// Types that can be viewed as an immutable slice of initialized bytes.
501 ///
502 /// If a struct implements this trait, then it is okay to copy it byte-for-byte to userspace. This
503 /// means that it should not have any padding, as padding bytes are uninitialized. Reading
504 /// uninitialized memory is not just undefined behavior, it may even lead to leaking sensitive
505 /// information on the stack to userspace.
506 ///
507 /// The struct should also not hold kernel pointers, as kernel pointer addresses are also considered
508 /// sensitive. However, leaking kernel pointers is not considered undefined behavior by Rust, so
509 /// this is a correctness requirement, but not a safety requirement.
510 ///
511 /// # Safety
512 ///
513 /// Values of this type may not contain any uninitialized bytes. This type must not have interior
514 /// mutability.
515 pub unsafe trait AsBytes {}
516 
517 // SAFETY: Instances of the following types have no uninitialized portions.
518 unsafe impl AsBytes for u8 {}
519 unsafe impl AsBytes for u16 {}
520 unsafe impl AsBytes for u32 {}
521 unsafe impl AsBytes for u64 {}
522 unsafe impl AsBytes for usize {}
523 unsafe impl AsBytes for i8 {}
524 unsafe impl AsBytes for i16 {}
525 unsafe impl AsBytes for i32 {}
526 unsafe impl AsBytes for i64 {}
527 unsafe impl AsBytes for isize {}
528 unsafe impl AsBytes for bool {}
529 unsafe impl AsBytes for char {}
530 unsafe impl AsBytes for str {}
531 // SAFETY: If individual values in an array have no uninitialized portions, then the array itself
532 // does not have any uninitialized portions either.
533 unsafe impl<T: AsBytes> AsBytes for [T] {}
534 unsafe impl<T: AsBytes, const N: usize> AsBytes for [T; N] {}
535