xref: /linux/rust/kernel/mm.rs (revision fd1f8473503e5bf897bd3e8efe3545c0352954e6)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 // Copyright (C) 2024 Google LLC.
4 
5 //! Memory management.
6 //!
7 //! This module deals with managing the address space of userspace processes. Each process has an
8 //! instance of [`Mm`], which keeps track of multiple VMAs (virtual memory areas). Each VMA
9 //! corresponds to a region of memory that the userspace process can access, and the VMA lets you
10 //! control what happens when userspace reads or writes to that region of memory.
11 //!
12 //! C header: [`include/linux/mm.h`](srctree/include/linux/mm.h)
13 
14 use crate::{
15     bindings,
16     types::{ARef, AlwaysRefCounted, NotThreadSafe, Opaque},
17 };
18 use core::{ops::Deref, ptr::NonNull};
19 
20 pub mod virt;
21 use virt::VmaRef;
22 
23 #[cfg(CONFIG_MMU)]
24 pub use mmput_async::MmWithUserAsync;
25 mod mmput_async;
26 
27 /// A wrapper for the kernel's `struct mm_struct`.
28 ///
29 /// This represents the address space of a userspace process, so each process has one `Mm`
30 /// instance. It may hold many VMAs internally.
31 ///
32 /// There is a counter called `mm_users` that counts the users of the address space; this includes
33 /// the userspace process itself, but can also include kernel threads accessing the address space.
34 /// Once `mm_users` reaches zero, this indicates that the address space can be destroyed. To access
35 /// the address space, you must prevent `mm_users` from reaching zero while you are accessing it.
36 /// The [`MmWithUser`] type represents an address space where this is guaranteed, and you can
37 /// create one using [`mmget_not_zero`].
38 ///
39 /// The `ARef<Mm>` smart pointer holds an `mmgrab` refcount. Its destructor may sleep.
40 ///
41 /// # Invariants
42 ///
43 /// Values of this type are always refcounted using `mmgrab`.
44 ///
45 /// [`mmget_not_zero`]: Mm::mmget_not_zero
46 #[repr(transparent)]
47 pub struct Mm {
48     mm: Opaque<bindings::mm_struct>,
49 }
50 
51 // SAFETY: It is safe to call `mmdrop` on another thread than where `mmgrab` was called.
52 unsafe impl Send for Mm {}
53 // SAFETY: All methods on `Mm` can be called in parallel from several threads.
54 unsafe impl Sync for Mm {}
55 
56 // SAFETY: By the type invariants, this type is always refcounted.
57 unsafe impl AlwaysRefCounted for Mm {
58     #[inline]
59     fn inc_ref(&self) {
60         // SAFETY: The pointer is valid since self is a reference.
61         unsafe { bindings::mmgrab(self.as_raw()) };
62     }
63 
64     #[inline]
65     unsafe fn dec_ref(obj: NonNull<Self>) {
66         // SAFETY: The caller is giving up their refcount.
67         unsafe { bindings::mmdrop(obj.cast().as_ptr()) };
68     }
69 }
70 
71 /// A wrapper for the kernel's `struct mm_struct`.
72 ///
73 /// This type is like [`Mm`], but with non-zero `mm_users`. It can only be used when `mm_users` can
74 /// be proven to be non-zero at compile-time, usually because the relevant code holds an `mmget`
75 /// refcount. It can be used to access the associated address space.
76 ///
77 /// The `ARef<MmWithUser>` smart pointer holds an `mmget` refcount. Its destructor may sleep.
78 ///
79 /// # Invariants
80 ///
81 /// Values of this type are always refcounted using `mmget`. The value of `mm_users` is non-zero.
82 #[repr(transparent)]
83 pub struct MmWithUser {
84     mm: Mm,
85 }
86 
87 // SAFETY: It is safe to call `mmput` on another thread than where `mmget` was called.
88 unsafe impl Send for MmWithUser {}
89 // SAFETY: All methods on `MmWithUser` can be called in parallel from several threads.
90 unsafe impl Sync for MmWithUser {}
91 
92 // SAFETY: By the type invariants, this type is always refcounted.
93 unsafe impl AlwaysRefCounted for MmWithUser {
94     #[inline]
95     fn inc_ref(&self) {
96         // SAFETY: The pointer is valid since self is a reference.
97         unsafe { bindings::mmget(self.as_raw()) };
98     }
99 
100     #[inline]
101     unsafe fn dec_ref(obj: NonNull<Self>) {
102         // SAFETY: The caller is giving up their refcount.
103         unsafe { bindings::mmput(obj.cast().as_ptr()) };
104     }
105 }
106 
107 // Make all `Mm` methods available on `MmWithUser`.
108 impl Deref for MmWithUser {
109     type Target = Mm;
110 
111     #[inline]
112     fn deref(&self) -> &Mm {
113         &self.mm
114     }
115 }
116 
117 // These methods are safe to call even if `mm_users` is zero.
118 impl Mm {
119     /// Returns a raw pointer to the inner `mm_struct`.
120     #[inline]
121     pub fn as_raw(&self) -> *mut bindings::mm_struct {
122         self.mm.get()
123     }
124 
125     /// Obtain a reference from a raw pointer.
126     ///
127     /// # Safety
128     ///
129     /// The caller must ensure that `ptr` points at an `mm_struct`, and that it is not deallocated
130     /// during the lifetime 'a.
131     #[inline]
132     pub unsafe fn from_raw<'a>(ptr: *const bindings::mm_struct) -> &'a Mm {
133         // SAFETY: Caller promises that the pointer is valid for 'a. Layouts are compatible due to
134         // repr(transparent).
135         unsafe { &*ptr.cast() }
136     }
137 
138     /// Calls `mmget_not_zero` and returns a handle if it succeeds.
139     #[inline]
140     pub fn mmget_not_zero(&self) -> Option<ARef<MmWithUser>> {
141         // SAFETY: The pointer is valid since self is a reference.
142         let success = unsafe { bindings::mmget_not_zero(self.as_raw()) };
143 
144         if success {
145             // SAFETY: We just created an `mmget` refcount.
146             Some(unsafe { ARef::from_raw(NonNull::new_unchecked(self.as_raw().cast())) })
147         } else {
148             None
149         }
150     }
151 }
152 
153 // These methods require `mm_users` to be non-zero.
154 impl MmWithUser {
155     /// Obtain a reference from a raw pointer.
156     ///
157     /// # Safety
158     ///
159     /// The caller must ensure that `ptr` points at an `mm_struct`, and that `mm_users` remains
160     /// non-zero for the duration of the lifetime 'a.
161     #[inline]
162     pub unsafe fn from_raw<'a>(ptr: *const bindings::mm_struct) -> &'a MmWithUser {
163         // SAFETY: Caller promises that the pointer is valid for 'a. The layout is compatible due
164         // to repr(transparent).
165         unsafe { &*ptr.cast() }
166     }
167 
168     /// Attempt to access a vma using the vma read lock.
169     ///
170     /// This is an optimistic trylock operation, so it may fail if there is contention. In that
171     /// case, you should fall back to taking the mmap read lock.
172     ///
173     /// When per-vma locks are disabled, this always returns `None`.
174     #[inline]
175     pub fn lock_vma_under_rcu(&self, vma_addr: usize) -> Option<VmaReadGuard<'_>> {
176         #[cfg(CONFIG_PER_VMA_LOCK)]
177         {
178             // SAFETY: Calling `bindings::lock_vma_under_rcu` is always okay given an mm where
179             // `mm_users` is non-zero.
180             let vma = unsafe { bindings::lock_vma_under_rcu(self.as_raw(), vma_addr) };
181             if !vma.is_null() {
182                 return Some(VmaReadGuard {
183                     // SAFETY: If `lock_vma_under_rcu` returns a non-null ptr, then it points at a
184                     // valid vma. The vma is stable for as long as the vma read lock is held.
185                     vma: unsafe { VmaRef::from_raw(vma) },
186                     _nts: NotThreadSafe,
187                 });
188             }
189         }
190 
191         // Silence warnings about unused variables.
192         #[cfg(not(CONFIG_PER_VMA_LOCK))]
193         let _ = vma_addr;
194 
195         None
196     }
197 
198     /// Lock the mmap read lock.
199     #[inline]
200     pub fn mmap_read_lock(&self) -> MmapReadGuard<'_> {
201         // SAFETY: The pointer is valid since self is a reference.
202         unsafe { bindings::mmap_read_lock(self.as_raw()) };
203 
204         // INVARIANT: We just acquired the read lock.
205         MmapReadGuard {
206             mm: self,
207             _nts: NotThreadSafe,
208         }
209     }
210 
211     /// Try to lock the mmap read lock.
212     #[inline]
213     pub fn mmap_read_trylock(&self) -> Option<MmapReadGuard<'_>> {
214         // SAFETY: The pointer is valid since self is a reference.
215         let success = unsafe { bindings::mmap_read_trylock(self.as_raw()) };
216 
217         if success {
218             // INVARIANT: We just acquired the read lock.
219             Some(MmapReadGuard {
220                 mm: self,
221                 _nts: NotThreadSafe,
222             })
223         } else {
224             None
225         }
226     }
227 }
228 
229 /// A guard for the mmap read lock.
230 ///
231 /// # Invariants
232 ///
233 /// This `MmapReadGuard` guard owns the mmap read lock.
234 pub struct MmapReadGuard<'a> {
235     mm: &'a MmWithUser,
236     // `mmap_read_lock` and `mmap_read_unlock` must be called on the same thread
237     _nts: NotThreadSafe,
238 }
239 
240 impl<'a> MmapReadGuard<'a> {
241     /// Look up a vma at the given address.
242     #[inline]
243     pub fn vma_lookup(&self, vma_addr: usize) -> Option<&virt::VmaRef> {
244         // SAFETY: By the type invariants we hold the mmap read guard, so we can safely call this
245         // method. Any value is okay for `vma_addr`.
246         let vma = unsafe { bindings::vma_lookup(self.mm.as_raw(), vma_addr) };
247 
248         if vma.is_null() {
249             None
250         } else {
251             // SAFETY: We just checked that a vma was found, so the pointer references a valid vma.
252             //
253             // Furthermore, the returned vma is still under the protection of the read lock guard
254             // and can be used while the mmap read lock is still held. That the vma is not used
255             // after the MmapReadGuard gets dropped is enforced by the borrow-checker.
256             unsafe { Some(virt::VmaRef::from_raw(vma)) }
257         }
258     }
259 }
260 
261 impl Drop for MmapReadGuard<'_> {
262     #[inline]
263     fn drop(&mut self) {
264         // SAFETY: We hold the read lock by the type invariants.
265         unsafe { bindings::mmap_read_unlock(self.mm.as_raw()) };
266     }
267 }
268 
269 /// A guard for the vma read lock.
270 ///
271 /// # Invariants
272 ///
273 /// This `VmaReadGuard` guard owns the vma read lock.
274 pub struct VmaReadGuard<'a> {
275     vma: &'a VmaRef,
276     // `vma_end_read` must be called on the same thread as where the lock was taken
277     _nts: NotThreadSafe,
278 }
279 
280 // Make all `VmaRef` methods available on `VmaReadGuard`.
281 impl Deref for VmaReadGuard<'_> {
282     type Target = VmaRef;
283 
284     #[inline]
285     fn deref(&self) -> &VmaRef {
286         self.vma
287     }
288 }
289 
290 impl Drop for VmaReadGuard<'_> {
291     #[inline]
292     fn drop(&mut self) {
293         // SAFETY: We hold the read lock by the type invariants.
294         unsafe { bindings::vma_end_read(self.vma.as_ptr()) };
295     }
296 }
297