xref: /linux/rust/kernel/mm.rs (revision dcb81aeab406e417bc0b4cf68de6eb07a1d2e6ce)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 // Copyright (C) 2024 Google LLC.
4 
5 //! Memory management.
6 //!
7 //! This module deals with managing the address space of userspace processes. Each process has an
8 //! instance of [`Mm`], which keeps track of multiple VMAs (virtual memory areas). Each VMA
9 //! corresponds to a region of memory that the userspace process can access, and the VMA lets you
10 //! control what happens when userspace reads or writes to that region of memory.
11 //!
12 //! C header: [`include/linux/mm.h`](srctree/include/linux/mm.h)
13 #![cfg(CONFIG_MMU)]
14 
15 use crate::{
16     bindings,
17     types::{ARef, AlwaysRefCounted, NotThreadSafe, Opaque},
18 };
19 use core::{ops::Deref, ptr::NonNull};
20 
21 pub mod virt;
22 use virt::VmaRef;
23 
24 /// A wrapper for the kernel's `struct mm_struct`.
25 ///
26 /// This represents the address space of a userspace process, so each process has one `Mm`
27 /// instance. It may hold many VMAs internally.
28 ///
29 /// There is a counter called `mm_users` that counts the users of the address space; this includes
30 /// the userspace process itself, but can also include kernel threads accessing the address space.
31 /// Once `mm_users` reaches zero, this indicates that the address space can be destroyed. To access
32 /// the address space, you must prevent `mm_users` from reaching zero while you are accessing it.
33 /// The [`MmWithUser`] type represents an address space where this is guaranteed, and you can
34 /// create one using [`mmget_not_zero`].
35 ///
36 /// The `ARef<Mm>` smart pointer holds an `mmgrab` refcount. Its destructor may sleep.
37 ///
38 /// # Invariants
39 ///
40 /// Values of this type are always refcounted using `mmgrab`.
41 ///
42 /// [`mmget_not_zero`]: Mm::mmget_not_zero
43 #[repr(transparent)]
44 pub struct Mm {
45     mm: Opaque<bindings::mm_struct>,
46 }
47 
48 // SAFETY: It is safe to call `mmdrop` on another thread than where `mmgrab` was called.
49 unsafe impl Send for Mm {}
50 // SAFETY: All methods on `Mm` can be called in parallel from several threads.
51 unsafe impl Sync for Mm {}
52 
53 // SAFETY: By the type invariants, this type is always refcounted.
54 unsafe impl AlwaysRefCounted for Mm {
55     #[inline]
56     fn inc_ref(&self) {
57         // SAFETY: The pointer is valid since self is a reference.
58         unsafe { bindings::mmgrab(self.as_raw()) };
59     }
60 
61     #[inline]
62     unsafe fn dec_ref(obj: NonNull<Self>) {
63         // SAFETY: The caller is giving up their refcount.
64         unsafe { bindings::mmdrop(obj.cast().as_ptr()) };
65     }
66 }
67 
68 /// A wrapper for the kernel's `struct mm_struct`.
69 ///
70 /// This type is like [`Mm`], but with non-zero `mm_users`. It can only be used when `mm_users` can
71 /// be proven to be non-zero at compile-time, usually because the relevant code holds an `mmget`
72 /// refcount. It can be used to access the associated address space.
73 ///
74 /// The `ARef<MmWithUser>` smart pointer holds an `mmget` refcount. Its destructor may sleep.
75 ///
76 /// # Invariants
77 ///
78 /// Values of this type are always refcounted using `mmget`. The value of `mm_users` is non-zero.
79 #[repr(transparent)]
80 pub struct MmWithUser {
81     mm: Mm,
82 }
83 
84 // SAFETY: It is safe to call `mmput` on another thread than where `mmget` was called.
85 unsafe impl Send for MmWithUser {}
86 // SAFETY: All methods on `MmWithUser` can be called in parallel from several threads.
87 unsafe impl Sync for MmWithUser {}
88 
89 // SAFETY: By the type invariants, this type is always refcounted.
90 unsafe impl AlwaysRefCounted for MmWithUser {
91     #[inline]
92     fn inc_ref(&self) {
93         // SAFETY: The pointer is valid since self is a reference.
94         unsafe { bindings::mmget(self.as_raw()) };
95     }
96 
97     #[inline]
98     unsafe fn dec_ref(obj: NonNull<Self>) {
99         // SAFETY: The caller is giving up their refcount.
100         unsafe { bindings::mmput(obj.cast().as_ptr()) };
101     }
102 }
103 
104 // Make all `Mm` methods available on `MmWithUser`.
105 impl Deref for MmWithUser {
106     type Target = Mm;
107 
108     #[inline]
109     fn deref(&self) -> &Mm {
110         &self.mm
111     }
112 }
113 
114 /// A wrapper for the kernel's `struct mm_struct`.
115 ///
116 /// This type is identical to `MmWithUser` except that it uses `mmput_async` when dropping a
117 /// refcount. This means that the destructor of `ARef<MmWithUserAsync>` is safe to call in atomic
118 /// context.
119 ///
120 /// # Invariants
121 ///
122 /// Values of this type are always refcounted using `mmget`. The value of `mm_users` is non-zero.
123 #[repr(transparent)]
124 pub struct MmWithUserAsync {
125     mm: MmWithUser,
126 }
127 
128 // SAFETY: It is safe to call `mmput_async` on another thread than where `mmget` was called.
129 unsafe impl Send for MmWithUserAsync {}
130 // SAFETY: All methods on `MmWithUserAsync` can be called in parallel from several threads.
131 unsafe impl Sync for MmWithUserAsync {}
132 
133 // SAFETY: By the type invariants, this type is always refcounted.
134 unsafe impl AlwaysRefCounted for MmWithUserAsync {
135     #[inline]
136     fn inc_ref(&self) {
137         // SAFETY: The pointer is valid since self is a reference.
138         unsafe { bindings::mmget(self.as_raw()) };
139     }
140 
141     #[inline]
142     unsafe fn dec_ref(obj: NonNull<Self>) {
143         // SAFETY: The caller is giving up their refcount.
144         unsafe { bindings::mmput_async(obj.cast().as_ptr()) };
145     }
146 }
147 
148 // Make all `MmWithUser` methods available on `MmWithUserAsync`.
149 impl Deref for MmWithUserAsync {
150     type Target = MmWithUser;
151 
152     #[inline]
153     fn deref(&self) -> &MmWithUser {
154         &self.mm
155     }
156 }
157 
158 // These methods are safe to call even if `mm_users` is zero.
159 impl Mm {
160     /// Returns a raw pointer to the inner `mm_struct`.
161     #[inline]
162     pub fn as_raw(&self) -> *mut bindings::mm_struct {
163         self.mm.get()
164     }
165 
166     /// Obtain a reference from a raw pointer.
167     ///
168     /// # Safety
169     ///
170     /// The caller must ensure that `ptr` points at an `mm_struct`, and that it is not deallocated
171     /// during the lifetime 'a.
172     #[inline]
173     pub unsafe fn from_raw<'a>(ptr: *const bindings::mm_struct) -> &'a Mm {
174         // SAFETY: Caller promises that the pointer is valid for 'a. Layouts are compatible due to
175         // repr(transparent).
176         unsafe { &*ptr.cast() }
177     }
178 
179     /// Calls `mmget_not_zero` and returns a handle if it succeeds.
180     #[inline]
181     pub fn mmget_not_zero(&self) -> Option<ARef<MmWithUser>> {
182         // SAFETY: The pointer is valid since self is a reference.
183         let success = unsafe { bindings::mmget_not_zero(self.as_raw()) };
184 
185         if success {
186             // SAFETY: We just created an `mmget` refcount.
187             Some(unsafe { ARef::from_raw(NonNull::new_unchecked(self.as_raw().cast())) })
188         } else {
189             None
190         }
191     }
192 }
193 
194 // These methods require `mm_users` to be non-zero.
195 impl MmWithUser {
196     /// Obtain a reference from a raw pointer.
197     ///
198     /// # Safety
199     ///
200     /// The caller must ensure that `ptr` points at an `mm_struct`, and that `mm_users` remains
201     /// non-zero for the duration of the lifetime 'a.
202     #[inline]
203     pub unsafe fn from_raw<'a>(ptr: *const bindings::mm_struct) -> &'a MmWithUser {
204         // SAFETY: Caller promises that the pointer is valid for 'a. The layout is compatible due
205         // to repr(transparent).
206         unsafe { &*ptr.cast() }
207     }
208 
209     /// Use `mmput_async` when dropping this refcount.
210     #[inline]
211     pub fn into_mmput_async(me: ARef<MmWithUser>) -> ARef<MmWithUserAsync> {
212         // SAFETY: The layouts and invariants are compatible.
213         unsafe { ARef::from_raw(ARef::into_raw(me).cast()) }
214     }
215 
216     /// Attempt to access a vma using the vma read lock.
217     ///
218     /// This is an optimistic trylock operation, so it may fail if there is contention. In that
219     /// case, you should fall back to taking the mmap read lock.
220     ///
221     /// When per-vma locks are disabled, this always returns `None`.
222     #[inline]
223     pub fn lock_vma_under_rcu(&self, vma_addr: usize) -> Option<VmaReadGuard<'_>> {
224         #[cfg(CONFIG_PER_VMA_LOCK)]
225         {
226             // SAFETY: Calling `bindings::lock_vma_under_rcu` is always okay given an mm where
227             // `mm_users` is non-zero.
228             let vma = unsafe { bindings::lock_vma_under_rcu(self.as_raw(), vma_addr) };
229             if !vma.is_null() {
230                 return Some(VmaReadGuard {
231                     // SAFETY: If `lock_vma_under_rcu` returns a non-null ptr, then it points at a
232                     // valid vma. The vma is stable for as long as the vma read lock is held.
233                     vma: unsafe { VmaRef::from_raw(vma) },
234                     _nts: NotThreadSafe,
235                 });
236             }
237         }
238 
239         // Silence warnings about unused variables.
240         #[cfg(not(CONFIG_PER_VMA_LOCK))]
241         let _ = vma_addr;
242 
243         None
244     }
245 
246     /// Lock the mmap read lock.
247     #[inline]
248     pub fn mmap_read_lock(&self) -> MmapReadGuard<'_> {
249         // SAFETY: The pointer is valid since self is a reference.
250         unsafe { bindings::mmap_read_lock(self.as_raw()) };
251 
252         // INVARIANT: We just acquired the read lock.
253         MmapReadGuard {
254             mm: self,
255             _nts: NotThreadSafe,
256         }
257     }
258 
259     /// Try to lock the mmap read lock.
260     #[inline]
261     pub fn mmap_read_trylock(&self) -> Option<MmapReadGuard<'_>> {
262         // SAFETY: The pointer is valid since self is a reference.
263         let success = unsafe { bindings::mmap_read_trylock(self.as_raw()) };
264 
265         if success {
266             // INVARIANT: We just acquired the read lock.
267             Some(MmapReadGuard {
268                 mm: self,
269                 _nts: NotThreadSafe,
270             })
271         } else {
272             None
273         }
274     }
275 }
276 
277 /// A guard for the mmap read lock.
278 ///
279 /// # Invariants
280 ///
281 /// This `MmapReadGuard` guard owns the mmap read lock.
282 pub struct MmapReadGuard<'a> {
283     mm: &'a MmWithUser,
284     // `mmap_read_lock` and `mmap_read_unlock` must be called on the same thread
285     _nts: NotThreadSafe,
286 }
287 
288 impl<'a> MmapReadGuard<'a> {
289     /// Look up a vma at the given address.
290     #[inline]
291     pub fn vma_lookup(&self, vma_addr: usize) -> Option<&virt::VmaRef> {
292         // SAFETY: By the type invariants we hold the mmap read guard, so we can safely call this
293         // method. Any value is okay for `vma_addr`.
294         let vma = unsafe { bindings::vma_lookup(self.mm.as_raw(), vma_addr) };
295 
296         if vma.is_null() {
297             None
298         } else {
299             // SAFETY: We just checked that a vma was found, so the pointer references a valid vma.
300             //
301             // Furthermore, the returned vma is still under the protection of the read lock guard
302             // and can be used while the mmap read lock is still held. That the vma is not used
303             // after the MmapReadGuard gets dropped is enforced by the borrow-checker.
304             unsafe { Some(virt::VmaRef::from_raw(vma)) }
305         }
306     }
307 }
308 
309 impl Drop for MmapReadGuard<'_> {
310     #[inline]
311     fn drop(&mut self) {
312         // SAFETY: We hold the read lock by the type invariants.
313         unsafe { bindings::mmap_read_unlock(self.mm.as_raw()) };
314     }
315 }
316 
317 /// A guard for the vma read lock.
318 ///
319 /// # Invariants
320 ///
321 /// This `VmaReadGuard` guard owns the vma read lock.
322 pub struct VmaReadGuard<'a> {
323     vma: &'a VmaRef,
324     // `vma_end_read` must be called on the same thread as where the lock was taken
325     _nts: NotThreadSafe,
326 }
327 
328 // Make all `VmaRef` methods available on `VmaReadGuard`.
329 impl Deref for VmaReadGuard<'_> {
330     type Target = VmaRef;
331 
332     #[inline]
333     fn deref(&self) -> &VmaRef {
334         self.vma
335     }
336 }
337 
338 impl Drop for VmaReadGuard<'_> {
339     #[inline]
340     fn drop(&mut self) {
341         // SAFETY: We hold the read lock by the type invariants.
342         unsafe { bindings::vma_end_read(self.vma.as_ptr()) };
343     }
344 }
345