xref: /linux/rust/kernel/mm/virt.rs (revision 00c010e130e58301db2ea0cec1eadc931e1cb8cf)
1040f404bSAlice Ryhl // SPDX-License-Identifier: GPL-2.0
2040f404bSAlice Ryhl 
3040f404bSAlice Ryhl // Copyright (C) 2024 Google LLC.
4040f404bSAlice Ryhl 
5040f404bSAlice Ryhl //! Virtual memory.
6040f404bSAlice Ryhl //!
7040f404bSAlice Ryhl //! This module deals with managing a single VMA in the address space of a userspace process. Each
8040f404bSAlice Ryhl //! VMA corresponds to a region of memory that the userspace process can access, and the VMA lets
9040f404bSAlice Ryhl //! you control what happens when userspace reads or writes to that region of memory.
10040f404bSAlice Ryhl //!
11040f404bSAlice Ryhl //! The module has several different Rust types that all correspond to the C type called
12040f404bSAlice Ryhl //! `vm_area_struct`. The different structs represent what kind of access you have to the VMA, e.g.
13040f404bSAlice Ryhl //! [`VmaRef`] is used when you hold the mmap or vma read lock. Using the appropriate struct
14040f404bSAlice Ryhl //! ensures that you can't, for example, accidentally call a function that requires holding the
15040f404bSAlice Ryhl //! write lock when you only hold the read lock.
16040f404bSAlice Ryhl 
17bf3d331bSAlice Ryhl use crate::{
18bf3d331bSAlice Ryhl     bindings,
19*dcb81aeaSAlice Ryhl     error::{code::EINVAL, to_result, Result},
20bf3d331bSAlice Ryhl     mm::MmWithUser,
21bf3d331bSAlice Ryhl     page::Page,
22bf3d331bSAlice Ryhl     types::Opaque,
23bf3d331bSAlice Ryhl };
24bf3d331bSAlice Ryhl 
25bf3d331bSAlice Ryhl use core::ops::Deref;
26040f404bSAlice Ryhl 
27040f404bSAlice Ryhl /// A wrapper for the kernel's `struct vm_area_struct` with read access.
28040f404bSAlice Ryhl ///
29040f404bSAlice Ryhl /// It represents an area of virtual memory.
30040f404bSAlice Ryhl ///
31040f404bSAlice Ryhl /// # Invariants
32040f404bSAlice Ryhl ///
33040f404bSAlice Ryhl /// The caller must hold the mmap read lock or the vma read lock.
34040f404bSAlice Ryhl #[repr(transparent)]
35040f404bSAlice Ryhl pub struct VmaRef {
36040f404bSAlice Ryhl     vma: Opaque<bindings::vm_area_struct>,
37040f404bSAlice Ryhl }
38040f404bSAlice Ryhl 
39040f404bSAlice Ryhl // Methods you can call when holding the mmap or vma read lock (or stronger). They must be usable
40040f404bSAlice Ryhl // no matter what the vma flags are.
41040f404bSAlice Ryhl impl VmaRef {
42040f404bSAlice Ryhl     /// Access a virtual memory area given a raw pointer.
43040f404bSAlice Ryhl     ///
44040f404bSAlice Ryhl     /// # Safety
45040f404bSAlice Ryhl     ///
46040f404bSAlice Ryhl     /// Callers must ensure that `vma` is valid for the duration of 'a, and that the mmap or vma
47040f404bSAlice Ryhl     /// read lock (or stronger) is held for at least the duration of 'a.
48040f404bSAlice Ryhl     #[inline]
from_raw<'a>(vma: *const bindings::vm_area_struct) -> &'a Self49040f404bSAlice Ryhl     pub unsafe fn from_raw<'a>(vma: *const bindings::vm_area_struct) -> &'a Self {
50040f404bSAlice Ryhl         // SAFETY: The caller ensures that the invariants are satisfied for the duration of 'a.
51040f404bSAlice Ryhl         unsafe { &*vma.cast() }
52040f404bSAlice Ryhl     }
53040f404bSAlice Ryhl 
54040f404bSAlice Ryhl     /// Returns a raw pointer to this area.
55040f404bSAlice Ryhl     #[inline]
as_ptr(&self) -> *mut bindings::vm_area_struct56040f404bSAlice Ryhl     pub fn as_ptr(&self) -> *mut bindings::vm_area_struct {
57040f404bSAlice Ryhl         self.vma.get()
58040f404bSAlice Ryhl     }
59040f404bSAlice Ryhl 
60040f404bSAlice Ryhl     /// Access the underlying `mm_struct`.
61040f404bSAlice Ryhl     #[inline]
mm(&self) -> &MmWithUser62040f404bSAlice Ryhl     pub fn mm(&self) -> &MmWithUser {
63040f404bSAlice Ryhl         // SAFETY: By the type invariants, this `vm_area_struct` is valid and we hold the mmap/vma
64040f404bSAlice Ryhl         // read lock or stronger. This implies that the underlying mm has a non-zero value of
65040f404bSAlice Ryhl         // `mm_users`.
66040f404bSAlice Ryhl         unsafe { MmWithUser::from_raw((*self.as_ptr()).vm_mm) }
67040f404bSAlice Ryhl     }
68040f404bSAlice Ryhl 
69040f404bSAlice Ryhl     /// Returns the flags associated with the virtual memory area.
70040f404bSAlice Ryhl     ///
71040f404bSAlice Ryhl     /// The possible flags are a combination of the constants in [`flags`].
72040f404bSAlice Ryhl     #[inline]
flags(&self) -> vm_flags_t73040f404bSAlice Ryhl     pub fn flags(&self) -> vm_flags_t {
74040f404bSAlice Ryhl         // SAFETY: By the type invariants, the caller holds at least the mmap read lock, so this
75040f404bSAlice Ryhl         // access is not a data race.
76040f404bSAlice Ryhl         unsafe { (*self.as_ptr()).__bindgen_anon_2.vm_flags }
77040f404bSAlice Ryhl     }
78040f404bSAlice Ryhl 
79040f404bSAlice Ryhl     /// Returns the (inclusive) start address of the virtual memory area.
80040f404bSAlice Ryhl     #[inline]
start(&self) -> usize81040f404bSAlice Ryhl     pub fn start(&self) -> usize {
82040f404bSAlice Ryhl         // SAFETY: By the type invariants, the caller holds at least the mmap read lock, so this
83040f404bSAlice Ryhl         // access is not a data race.
84040f404bSAlice Ryhl         unsafe { (*self.as_ptr()).__bindgen_anon_1.__bindgen_anon_1.vm_start }
85040f404bSAlice Ryhl     }
86040f404bSAlice Ryhl 
87040f404bSAlice Ryhl     /// Returns the (exclusive) end address of the virtual memory area.
88040f404bSAlice Ryhl     #[inline]
end(&self) -> usize89040f404bSAlice Ryhl     pub fn end(&self) -> usize {
90040f404bSAlice Ryhl         // SAFETY: By the type invariants, the caller holds at least the mmap read lock, so this
91040f404bSAlice Ryhl         // access is not a data race.
92040f404bSAlice Ryhl         unsafe { (*self.as_ptr()).__bindgen_anon_1.__bindgen_anon_1.vm_end }
93040f404bSAlice Ryhl     }
94040f404bSAlice Ryhl 
95040f404bSAlice Ryhl     /// Zap pages in the given page range.
96040f404bSAlice Ryhl     ///
97040f404bSAlice Ryhl     /// This clears page table mappings for the range at the leaf level, leaving all other page
98040f404bSAlice Ryhl     /// tables intact, and freeing any memory referenced by the VMA in this range. That is,
99040f404bSAlice Ryhl     /// anonymous memory is completely freed, file-backed memory has its reference count on page
100040f404bSAlice Ryhl     /// cache folio's dropped, any dirty data will still be written back to disk as usual.
101040f404bSAlice Ryhl     ///
102040f404bSAlice Ryhl     /// It may seem odd that we clear at the leaf level, this is however a product of the page
103040f404bSAlice Ryhl     /// table structure used to map physical memory into a virtual address space - each virtual
104040f404bSAlice Ryhl     /// address actually consists of a bitmap of array indices into page tables, which form a
105040f404bSAlice Ryhl     /// hierarchical page table level structure.
106040f404bSAlice Ryhl     ///
107040f404bSAlice Ryhl     /// As a result, each page table level maps a multiple of page table levels below, and thus
108040f404bSAlice Ryhl     /// span ever increasing ranges of pages. At the leaf or PTE level, we map the actual physical
109040f404bSAlice Ryhl     /// memory.
110040f404bSAlice Ryhl     ///
111040f404bSAlice Ryhl     /// It is here where a zap operates, as it the only place we can be certain of clearing without
112040f404bSAlice Ryhl     /// impacting any other virtual mappings. It is an implementation detail as to whether the
113040f404bSAlice Ryhl     /// kernel goes further in freeing unused page tables, but for the purposes of this operation
114040f404bSAlice Ryhl     /// we must only assume that the leaf level is cleared.
115040f404bSAlice Ryhl     #[inline]
zap_page_range_single(&self, address: usize, size: usize)116040f404bSAlice Ryhl     pub fn zap_page_range_single(&self, address: usize, size: usize) {
117040f404bSAlice Ryhl         let (end, did_overflow) = address.overflowing_add(size);
118040f404bSAlice Ryhl         if did_overflow || address < self.start() || self.end() < end {
119040f404bSAlice Ryhl             // TODO: call WARN_ONCE once Rust version of it is added
120040f404bSAlice Ryhl             return;
121040f404bSAlice Ryhl         }
122040f404bSAlice Ryhl 
123040f404bSAlice Ryhl         // SAFETY: By the type invariants, the caller has read access to this VMA, which is
124040f404bSAlice Ryhl         // sufficient for this method call. This method has no requirements on the vma flags. The
125040f404bSAlice Ryhl         // address range is checked to be within the vma.
126040f404bSAlice Ryhl         unsafe {
127040f404bSAlice Ryhl             bindings::zap_page_range_single(self.as_ptr(), address, size, core::ptr::null_mut())
128040f404bSAlice Ryhl         };
129040f404bSAlice Ryhl     }
130bf3d331bSAlice Ryhl 
131bf3d331bSAlice Ryhl     /// If the [`VM_MIXEDMAP`] flag is set, returns a [`VmaMixedMap`] to this VMA, otherwise
132bf3d331bSAlice Ryhl     /// returns `None`.
133bf3d331bSAlice Ryhl     ///
134bf3d331bSAlice Ryhl     /// This can be used to access methods that require [`VM_MIXEDMAP`] to be set.
135bf3d331bSAlice Ryhl     ///
136bf3d331bSAlice Ryhl     /// [`VM_MIXEDMAP`]: flags::MIXEDMAP
137bf3d331bSAlice Ryhl     #[inline]
as_mixedmap_vma(&self) -> Option<&VmaMixedMap>138bf3d331bSAlice Ryhl     pub fn as_mixedmap_vma(&self) -> Option<&VmaMixedMap> {
139bf3d331bSAlice Ryhl         if self.flags() & flags::MIXEDMAP != 0 {
140bf3d331bSAlice Ryhl             // SAFETY: We just checked that `VM_MIXEDMAP` is set. All other requirements are
141bf3d331bSAlice Ryhl             // satisfied by the type invariants of `VmaRef`.
142bf3d331bSAlice Ryhl             Some(unsafe { VmaMixedMap::from_raw(self.as_ptr()) })
143bf3d331bSAlice Ryhl         } else {
144bf3d331bSAlice Ryhl             None
145bf3d331bSAlice Ryhl         }
146bf3d331bSAlice Ryhl     }
147bf3d331bSAlice Ryhl }
148bf3d331bSAlice Ryhl 
149bf3d331bSAlice Ryhl /// A wrapper for the kernel's `struct vm_area_struct` with read access and [`VM_MIXEDMAP`] set.
150bf3d331bSAlice Ryhl ///
151bf3d331bSAlice Ryhl /// It represents an area of virtual memory.
152bf3d331bSAlice Ryhl ///
153bf3d331bSAlice Ryhl /// This struct is identical to [`VmaRef`] except that it must only be used when the
154bf3d331bSAlice Ryhl /// [`VM_MIXEDMAP`] flag is set on the vma.
155bf3d331bSAlice Ryhl ///
156bf3d331bSAlice Ryhl /// # Invariants
157bf3d331bSAlice Ryhl ///
158bf3d331bSAlice Ryhl /// The caller must hold the mmap read lock or the vma read lock. The `VM_MIXEDMAP` flag must be
159bf3d331bSAlice Ryhl /// set.
160bf3d331bSAlice Ryhl ///
161bf3d331bSAlice Ryhl /// [`VM_MIXEDMAP`]: flags::MIXEDMAP
162bf3d331bSAlice Ryhl #[repr(transparent)]
163bf3d331bSAlice Ryhl pub struct VmaMixedMap {
164bf3d331bSAlice Ryhl     vma: VmaRef,
165bf3d331bSAlice Ryhl }
166bf3d331bSAlice Ryhl 
167bf3d331bSAlice Ryhl // Make all `VmaRef` methods available on `VmaMixedMap`.
168bf3d331bSAlice Ryhl impl Deref for VmaMixedMap {
169bf3d331bSAlice Ryhl     type Target = VmaRef;
170bf3d331bSAlice Ryhl 
171bf3d331bSAlice Ryhl     #[inline]
deref(&self) -> &VmaRef172bf3d331bSAlice Ryhl     fn deref(&self) -> &VmaRef {
173bf3d331bSAlice Ryhl         &self.vma
174bf3d331bSAlice Ryhl     }
175bf3d331bSAlice Ryhl }
176bf3d331bSAlice Ryhl 
177bf3d331bSAlice Ryhl impl VmaMixedMap {
178bf3d331bSAlice Ryhl     /// Access a virtual memory area given a raw pointer.
179bf3d331bSAlice Ryhl     ///
180bf3d331bSAlice Ryhl     /// # Safety
181bf3d331bSAlice Ryhl     ///
182bf3d331bSAlice Ryhl     /// Callers must ensure that `vma` is valid for the duration of 'a, and that the mmap read lock
183bf3d331bSAlice Ryhl     /// (or stronger) is held for at least the duration of 'a. The `VM_MIXEDMAP` flag must be set.
184bf3d331bSAlice Ryhl     #[inline]
from_raw<'a>(vma: *const bindings::vm_area_struct) -> &'a Self185bf3d331bSAlice Ryhl     pub unsafe fn from_raw<'a>(vma: *const bindings::vm_area_struct) -> &'a Self {
186bf3d331bSAlice Ryhl         // SAFETY: The caller ensures that the invariants are satisfied for the duration of 'a.
187bf3d331bSAlice Ryhl         unsafe { &*vma.cast() }
188bf3d331bSAlice Ryhl     }
189bf3d331bSAlice Ryhl 
190bf3d331bSAlice Ryhl     /// Maps a single page at the given address within the virtual memory area.
191bf3d331bSAlice Ryhl     ///
192bf3d331bSAlice Ryhl     /// This operation does not take ownership of the page.
193bf3d331bSAlice Ryhl     #[inline]
vm_insert_page(&self, address: usize, page: &Page) -> Result194bf3d331bSAlice Ryhl     pub fn vm_insert_page(&self, address: usize, page: &Page) -> Result {
195bf3d331bSAlice Ryhl         // SAFETY: By the type invariant of `Self` caller has read access and has verified that
196bf3d331bSAlice Ryhl         // `VM_MIXEDMAP` is set. By invariant on `Page` the page has order 0.
197bf3d331bSAlice Ryhl         to_result(unsafe { bindings::vm_insert_page(self.as_ptr(), address, page.as_ptr()) })
198bf3d331bSAlice Ryhl     }
199040f404bSAlice Ryhl }
200040f404bSAlice Ryhl 
201*dcb81aeaSAlice Ryhl /// A configuration object for setting up a VMA in an `f_ops->mmap()` hook.
202*dcb81aeaSAlice Ryhl ///
203*dcb81aeaSAlice Ryhl /// The `f_ops->mmap()` hook is called when a new VMA is being created, and the hook is able to
204*dcb81aeaSAlice Ryhl /// configure the VMA in various ways to fit the driver that owns it. Using `VmaNew` indicates that
205*dcb81aeaSAlice Ryhl /// you are allowed to perform operations on the VMA that can only be performed before the VMA is
206*dcb81aeaSAlice Ryhl /// fully initialized.
207*dcb81aeaSAlice Ryhl ///
208*dcb81aeaSAlice Ryhl /// # Invariants
209*dcb81aeaSAlice Ryhl ///
210*dcb81aeaSAlice Ryhl /// For the duration of 'a, the referenced vma must be undergoing initialization in an
211*dcb81aeaSAlice Ryhl /// `f_ops->mmap()` hook.
212*dcb81aeaSAlice Ryhl pub struct VmaNew {
213*dcb81aeaSAlice Ryhl     vma: VmaRef,
214*dcb81aeaSAlice Ryhl }
215*dcb81aeaSAlice Ryhl 
216*dcb81aeaSAlice Ryhl // Make all `VmaRef` methods available on `VmaNew`.
217*dcb81aeaSAlice Ryhl impl Deref for VmaNew {
218*dcb81aeaSAlice Ryhl     type Target = VmaRef;
219*dcb81aeaSAlice Ryhl 
220*dcb81aeaSAlice Ryhl     #[inline]
deref(&self) -> &VmaRef221*dcb81aeaSAlice Ryhl     fn deref(&self) -> &VmaRef {
222*dcb81aeaSAlice Ryhl         &self.vma
223*dcb81aeaSAlice Ryhl     }
224*dcb81aeaSAlice Ryhl }
225*dcb81aeaSAlice Ryhl 
226*dcb81aeaSAlice Ryhl impl VmaNew {
227*dcb81aeaSAlice Ryhl     /// Access a virtual memory area given a raw pointer.
228*dcb81aeaSAlice Ryhl     ///
229*dcb81aeaSAlice Ryhl     /// # Safety
230*dcb81aeaSAlice Ryhl     ///
231*dcb81aeaSAlice Ryhl     /// Callers must ensure that `vma` is undergoing initial vma setup for the duration of 'a.
232*dcb81aeaSAlice Ryhl     #[inline]
from_raw<'a>(vma: *mut bindings::vm_area_struct) -> &'a Self233*dcb81aeaSAlice Ryhl     pub unsafe fn from_raw<'a>(vma: *mut bindings::vm_area_struct) -> &'a Self {
234*dcb81aeaSAlice Ryhl         // SAFETY: The caller ensures that the invariants are satisfied for the duration of 'a.
235*dcb81aeaSAlice Ryhl         unsafe { &*vma.cast() }
236*dcb81aeaSAlice Ryhl     }
237*dcb81aeaSAlice Ryhl 
238*dcb81aeaSAlice Ryhl     /// Internal method for updating the vma flags.
239*dcb81aeaSAlice Ryhl     ///
240*dcb81aeaSAlice Ryhl     /// # Safety
241*dcb81aeaSAlice Ryhl     ///
242*dcb81aeaSAlice Ryhl     /// This must not be used to set the flags to an invalid value.
243*dcb81aeaSAlice Ryhl     #[inline]
update_flags(&self, set: vm_flags_t, unset: vm_flags_t)244*dcb81aeaSAlice Ryhl     unsafe fn update_flags(&self, set: vm_flags_t, unset: vm_flags_t) {
245*dcb81aeaSAlice Ryhl         let mut flags = self.flags();
246*dcb81aeaSAlice Ryhl         flags |= set;
247*dcb81aeaSAlice Ryhl         flags &= !unset;
248*dcb81aeaSAlice Ryhl 
249*dcb81aeaSAlice Ryhl         // SAFETY: This is not a data race: the vma is undergoing initial setup, so it's not yet
250*dcb81aeaSAlice Ryhl         // shared. Additionally, `VmaNew` is `!Sync`, so it cannot be used to write in parallel.
251*dcb81aeaSAlice Ryhl         // The caller promises that this does not set the flags to an invalid value.
252*dcb81aeaSAlice Ryhl         unsafe { (*self.as_ptr()).__bindgen_anon_2.__vm_flags = flags };
253*dcb81aeaSAlice Ryhl     }
254*dcb81aeaSAlice Ryhl 
255*dcb81aeaSAlice Ryhl     /// Set the `VM_MIXEDMAP` flag on this vma.
256*dcb81aeaSAlice Ryhl     ///
257*dcb81aeaSAlice Ryhl     /// This enables the vma to contain both `struct page` and pure PFN pages. Returns a reference
258*dcb81aeaSAlice Ryhl     /// that can be used to call `vm_insert_page` on the vma.
259*dcb81aeaSAlice Ryhl     #[inline]
set_mixedmap(&self) -> &VmaMixedMap260*dcb81aeaSAlice Ryhl     pub fn set_mixedmap(&self) -> &VmaMixedMap {
261*dcb81aeaSAlice Ryhl         // SAFETY: We don't yet provide a way to set VM_PFNMAP, so this cannot put the flags in an
262*dcb81aeaSAlice Ryhl         // invalid state.
263*dcb81aeaSAlice Ryhl         unsafe { self.update_flags(flags::MIXEDMAP, 0) };
264*dcb81aeaSAlice Ryhl 
265*dcb81aeaSAlice Ryhl         // SAFETY: We just set `VM_MIXEDMAP` on the vma.
266*dcb81aeaSAlice Ryhl         unsafe { VmaMixedMap::from_raw(self.vma.as_ptr()) }
267*dcb81aeaSAlice Ryhl     }
268*dcb81aeaSAlice Ryhl 
269*dcb81aeaSAlice Ryhl     /// Set the `VM_IO` flag on this vma.
270*dcb81aeaSAlice Ryhl     ///
271*dcb81aeaSAlice Ryhl     /// This is used for memory mapped IO and similar. The flag tells other parts of the kernel to
272*dcb81aeaSAlice Ryhl     /// avoid looking at the pages. For memory mapped IO this is useful as accesses to the pages
273*dcb81aeaSAlice Ryhl     /// could have side effects.
274*dcb81aeaSAlice Ryhl     #[inline]
set_io(&self)275*dcb81aeaSAlice Ryhl     pub fn set_io(&self) {
276*dcb81aeaSAlice Ryhl         // SAFETY: Setting the VM_IO flag is always okay.
277*dcb81aeaSAlice Ryhl         unsafe { self.update_flags(flags::IO, 0) };
278*dcb81aeaSAlice Ryhl     }
279*dcb81aeaSAlice Ryhl 
280*dcb81aeaSAlice Ryhl     /// Set the `VM_DONTEXPAND` flag on this vma.
281*dcb81aeaSAlice Ryhl     ///
282*dcb81aeaSAlice Ryhl     /// This prevents the vma from being expanded with `mremap()`.
283*dcb81aeaSAlice Ryhl     #[inline]
set_dontexpand(&self)284*dcb81aeaSAlice Ryhl     pub fn set_dontexpand(&self) {
285*dcb81aeaSAlice Ryhl         // SAFETY: Setting the VM_DONTEXPAND flag is always okay.
286*dcb81aeaSAlice Ryhl         unsafe { self.update_flags(flags::DONTEXPAND, 0) };
287*dcb81aeaSAlice Ryhl     }
288*dcb81aeaSAlice Ryhl 
289*dcb81aeaSAlice Ryhl     /// Set the `VM_DONTCOPY` flag on this vma.
290*dcb81aeaSAlice Ryhl     ///
291*dcb81aeaSAlice Ryhl     /// This prevents the vma from being copied on fork. This option is only permanent if `VM_IO`
292*dcb81aeaSAlice Ryhl     /// is set.
293*dcb81aeaSAlice Ryhl     #[inline]
set_dontcopy(&self)294*dcb81aeaSAlice Ryhl     pub fn set_dontcopy(&self) {
295*dcb81aeaSAlice Ryhl         // SAFETY: Setting the VM_DONTCOPY flag is always okay.
296*dcb81aeaSAlice Ryhl         unsafe { self.update_flags(flags::DONTCOPY, 0) };
297*dcb81aeaSAlice Ryhl     }
298*dcb81aeaSAlice Ryhl 
299*dcb81aeaSAlice Ryhl     /// Set the `VM_DONTDUMP` flag on this vma.
300*dcb81aeaSAlice Ryhl     ///
301*dcb81aeaSAlice Ryhl     /// This prevents the vma from being included in core dumps. This option is only permanent if
302*dcb81aeaSAlice Ryhl     /// `VM_IO` is set.
303*dcb81aeaSAlice Ryhl     #[inline]
set_dontdump(&self)304*dcb81aeaSAlice Ryhl     pub fn set_dontdump(&self) {
305*dcb81aeaSAlice Ryhl         // SAFETY: Setting the VM_DONTDUMP flag is always okay.
306*dcb81aeaSAlice Ryhl         unsafe { self.update_flags(flags::DONTDUMP, 0) };
307*dcb81aeaSAlice Ryhl     }
308*dcb81aeaSAlice Ryhl 
309*dcb81aeaSAlice Ryhl     /// Returns whether `VM_READ` is set.
310*dcb81aeaSAlice Ryhl     ///
311*dcb81aeaSAlice Ryhl     /// This flag indicates whether userspace is mapping this vma as readable.
312*dcb81aeaSAlice Ryhl     #[inline]
readable(&self) -> bool313*dcb81aeaSAlice Ryhl     pub fn readable(&self) -> bool {
314*dcb81aeaSAlice Ryhl         (self.flags() & flags::READ) != 0
315*dcb81aeaSAlice Ryhl     }
316*dcb81aeaSAlice Ryhl 
317*dcb81aeaSAlice Ryhl     /// Try to clear the `VM_MAYREAD` flag, failing if `VM_READ` is set.
318*dcb81aeaSAlice Ryhl     ///
319*dcb81aeaSAlice Ryhl     /// This flag indicates whether userspace is allowed to make this vma readable with
320*dcb81aeaSAlice Ryhl     /// `mprotect()`.
321*dcb81aeaSAlice Ryhl     ///
322*dcb81aeaSAlice Ryhl     /// Note that this operation is irreversible. Once `VM_MAYREAD` has been cleared, it can never
323*dcb81aeaSAlice Ryhl     /// be set again.
324*dcb81aeaSAlice Ryhl     #[inline]
try_clear_mayread(&self) -> Result325*dcb81aeaSAlice Ryhl     pub fn try_clear_mayread(&self) -> Result {
326*dcb81aeaSAlice Ryhl         if self.readable() {
327*dcb81aeaSAlice Ryhl             return Err(EINVAL);
328*dcb81aeaSAlice Ryhl         }
329*dcb81aeaSAlice Ryhl         // SAFETY: Clearing `VM_MAYREAD` is okay when `VM_READ` is not set.
330*dcb81aeaSAlice Ryhl         unsafe { self.update_flags(0, flags::MAYREAD) };
331*dcb81aeaSAlice Ryhl         Ok(())
332*dcb81aeaSAlice Ryhl     }
333*dcb81aeaSAlice Ryhl 
334*dcb81aeaSAlice Ryhl     /// Returns whether `VM_WRITE` is set.
335*dcb81aeaSAlice Ryhl     ///
336*dcb81aeaSAlice Ryhl     /// This flag indicates whether userspace is mapping this vma as writable.
337*dcb81aeaSAlice Ryhl     #[inline]
writable(&self) -> bool338*dcb81aeaSAlice Ryhl     pub fn writable(&self) -> bool {
339*dcb81aeaSAlice Ryhl         (self.flags() & flags::WRITE) != 0
340*dcb81aeaSAlice Ryhl     }
341*dcb81aeaSAlice Ryhl 
342*dcb81aeaSAlice Ryhl     /// Try to clear the `VM_MAYWRITE` flag, failing if `VM_WRITE` is set.
343*dcb81aeaSAlice Ryhl     ///
344*dcb81aeaSAlice Ryhl     /// This flag indicates whether userspace is allowed to make this vma writable with
345*dcb81aeaSAlice Ryhl     /// `mprotect()`.
346*dcb81aeaSAlice Ryhl     ///
347*dcb81aeaSAlice Ryhl     /// Note that this operation is irreversible. Once `VM_MAYWRITE` has been cleared, it can never
348*dcb81aeaSAlice Ryhl     /// be set again.
349*dcb81aeaSAlice Ryhl     #[inline]
try_clear_maywrite(&self) -> Result350*dcb81aeaSAlice Ryhl     pub fn try_clear_maywrite(&self) -> Result {
351*dcb81aeaSAlice Ryhl         if self.writable() {
352*dcb81aeaSAlice Ryhl             return Err(EINVAL);
353*dcb81aeaSAlice Ryhl         }
354*dcb81aeaSAlice Ryhl         // SAFETY: Clearing `VM_MAYWRITE` is okay when `VM_WRITE` is not set.
355*dcb81aeaSAlice Ryhl         unsafe { self.update_flags(0, flags::MAYWRITE) };
356*dcb81aeaSAlice Ryhl         Ok(())
357*dcb81aeaSAlice Ryhl     }
358*dcb81aeaSAlice Ryhl 
359*dcb81aeaSAlice Ryhl     /// Returns whether `VM_EXEC` is set.
360*dcb81aeaSAlice Ryhl     ///
361*dcb81aeaSAlice Ryhl     /// This flag indicates whether userspace is mapping this vma as executable.
362*dcb81aeaSAlice Ryhl     #[inline]
executable(&self) -> bool363*dcb81aeaSAlice Ryhl     pub fn executable(&self) -> bool {
364*dcb81aeaSAlice Ryhl         (self.flags() & flags::EXEC) != 0
365*dcb81aeaSAlice Ryhl     }
366*dcb81aeaSAlice Ryhl 
367*dcb81aeaSAlice Ryhl     /// Try to clear the `VM_MAYEXEC` flag, failing if `VM_EXEC` is set.
368*dcb81aeaSAlice Ryhl     ///
369*dcb81aeaSAlice Ryhl     /// This flag indicates whether userspace is allowed to make this vma executable with
370*dcb81aeaSAlice Ryhl     /// `mprotect()`.
371*dcb81aeaSAlice Ryhl     ///
372*dcb81aeaSAlice Ryhl     /// Note that this operation is irreversible. Once `VM_MAYEXEC` has been cleared, it can never
373*dcb81aeaSAlice Ryhl     /// be set again.
374*dcb81aeaSAlice Ryhl     #[inline]
try_clear_mayexec(&self) -> Result375*dcb81aeaSAlice Ryhl     pub fn try_clear_mayexec(&self) -> Result {
376*dcb81aeaSAlice Ryhl         if self.executable() {
377*dcb81aeaSAlice Ryhl             return Err(EINVAL);
378*dcb81aeaSAlice Ryhl         }
379*dcb81aeaSAlice Ryhl         // SAFETY: Clearing `VM_MAYEXEC` is okay when `VM_EXEC` is not set.
380*dcb81aeaSAlice Ryhl         unsafe { self.update_flags(0, flags::MAYEXEC) };
381*dcb81aeaSAlice Ryhl         Ok(())
382*dcb81aeaSAlice Ryhl     }
383*dcb81aeaSAlice Ryhl }
384*dcb81aeaSAlice Ryhl 
385040f404bSAlice Ryhl /// The integer type used for vma flags.
386040f404bSAlice Ryhl #[doc(inline)]
387040f404bSAlice Ryhl pub use bindings::vm_flags_t;
388040f404bSAlice Ryhl 
389040f404bSAlice Ryhl /// All possible flags for [`VmaRef`].
390040f404bSAlice Ryhl pub mod flags {
391040f404bSAlice Ryhl     use super::vm_flags_t;
392040f404bSAlice Ryhl     use crate::bindings;
393040f404bSAlice Ryhl 
394040f404bSAlice Ryhl     /// No flags are set.
395040f404bSAlice Ryhl     pub const NONE: vm_flags_t = bindings::VM_NONE as _;
396040f404bSAlice Ryhl 
397040f404bSAlice Ryhl     /// Mapping allows reads.
398040f404bSAlice Ryhl     pub const READ: vm_flags_t = bindings::VM_READ as _;
399040f404bSAlice Ryhl 
400040f404bSAlice Ryhl     /// Mapping allows writes.
401040f404bSAlice Ryhl     pub const WRITE: vm_flags_t = bindings::VM_WRITE as _;
402040f404bSAlice Ryhl 
403040f404bSAlice Ryhl     /// Mapping allows execution.
404040f404bSAlice Ryhl     pub const EXEC: vm_flags_t = bindings::VM_EXEC as _;
405040f404bSAlice Ryhl 
406040f404bSAlice Ryhl     /// Mapping is shared.
407040f404bSAlice Ryhl     pub const SHARED: vm_flags_t = bindings::VM_SHARED as _;
408040f404bSAlice Ryhl 
409040f404bSAlice Ryhl     /// Mapping may be updated to allow reads.
410040f404bSAlice Ryhl     pub const MAYREAD: vm_flags_t = bindings::VM_MAYREAD as _;
411040f404bSAlice Ryhl 
412040f404bSAlice Ryhl     /// Mapping may be updated to allow writes.
413040f404bSAlice Ryhl     pub const MAYWRITE: vm_flags_t = bindings::VM_MAYWRITE as _;
414040f404bSAlice Ryhl 
415040f404bSAlice Ryhl     /// Mapping may be updated to allow execution.
416040f404bSAlice Ryhl     pub const MAYEXEC: vm_flags_t = bindings::VM_MAYEXEC as _;
417040f404bSAlice Ryhl 
418040f404bSAlice Ryhl     /// Mapping may be updated to be shared.
419040f404bSAlice Ryhl     pub const MAYSHARE: vm_flags_t = bindings::VM_MAYSHARE as _;
420040f404bSAlice Ryhl 
421040f404bSAlice Ryhl     /// Page-ranges managed without `struct page`, just pure PFN.
422040f404bSAlice Ryhl     pub const PFNMAP: vm_flags_t = bindings::VM_PFNMAP as _;
423040f404bSAlice Ryhl 
424040f404bSAlice Ryhl     /// Memory mapped I/O or similar.
425040f404bSAlice Ryhl     pub const IO: vm_flags_t = bindings::VM_IO as _;
426040f404bSAlice Ryhl 
427040f404bSAlice Ryhl     /// Do not copy this vma on fork.
428040f404bSAlice Ryhl     pub const DONTCOPY: vm_flags_t = bindings::VM_DONTCOPY as _;
429040f404bSAlice Ryhl 
430040f404bSAlice Ryhl     /// Cannot expand with mremap().
431040f404bSAlice Ryhl     pub const DONTEXPAND: vm_flags_t = bindings::VM_DONTEXPAND as _;
432040f404bSAlice Ryhl 
433040f404bSAlice Ryhl     /// Lock the pages covered when they are faulted in.
434040f404bSAlice Ryhl     pub const LOCKONFAULT: vm_flags_t = bindings::VM_LOCKONFAULT as _;
435040f404bSAlice Ryhl 
436040f404bSAlice Ryhl     /// Is a VM accounted object.
437040f404bSAlice Ryhl     pub const ACCOUNT: vm_flags_t = bindings::VM_ACCOUNT as _;
438040f404bSAlice Ryhl 
439040f404bSAlice Ryhl     /// Should the VM suppress accounting.
440040f404bSAlice Ryhl     pub const NORESERVE: vm_flags_t = bindings::VM_NORESERVE as _;
441040f404bSAlice Ryhl 
442040f404bSAlice Ryhl     /// Huge TLB Page VM.
443040f404bSAlice Ryhl     pub const HUGETLB: vm_flags_t = bindings::VM_HUGETLB as _;
444040f404bSAlice Ryhl 
445040f404bSAlice Ryhl     /// Synchronous page faults. (DAX-specific)
446040f404bSAlice Ryhl     pub const SYNC: vm_flags_t = bindings::VM_SYNC as _;
447040f404bSAlice Ryhl 
448040f404bSAlice Ryhl     /// Architecture-specific flag.
449040f404bSAlice Ryhl     pub const ARCH_1: vm_flags_t = bindings::VM_ARCH_1 as _;
450040f404bSAlice Ryhl 
451040f404bSAlice Ryhl     /// Wipe VMA contents in child on fork.
452040f404bSAlice Ryhl     pub const WIPEONFORK: vm_flags_t = bindings::VM_WIPEONFORK as _;
453040f404bSAlice Ryhl 
454040f404bSAlice Ryhl     /// Do not include in the core dump.
455040f404bSAlice Ryhl     pub const DONTDUMP: vm_flags_t = bindings::VM_DONTDUMP as _;
456040f404bSAlice Ryhl 
457040f404bSAlice Ryhl     /// Not soft dirty clean area.
458040f404bSAlice Ryhl     pub const SOFTDIRTY: vm_flags_t = bindings::VM_SOFTDIRTY as _;
459040f404bSAlice Ryhl 
460040f404bSAlice Ryhl     /// Can contain `struct page` and pure PFN pages.
461040f404bSAlice Ryhl     pub const MIXEDMAP: vm_flags_t = bindings::VM_MIXEDMAP as _;
462040f404bSAlice Ryhl 
463040f404bSAlice Ryhl     /// MADV_HUGEPAGE marked this vma.
464040f404bSAlice Ryhl     pub const HUGEPAGE: vm_flags_t = bindings::VM_HUGEPAGE as _;
465040f404bSAlice Ryhl 
466040f404bSAlice Ryhl     /// MADV_NOHUGEPAGE marked this vma.
467040f404bSAlice Ryhl     pub const NOHUGEPAGE: vm_flags_t = bindings::VM_NOHUGEPAGE as _;
468040f404bSAlice Ryhl 
469040f404bSAlice Ryhl     /// KSM may merge identical pages.
470040f404bSAlice Ryhl     pub const MERGEABLE: vm_flags_t = bindings::VM_MERGEABLE as _;
471040f404bSAlice Ryhl }
472