xref: /linux/rust/kernel/alloc.rs (revision 9907e1df31c0f4bdcebe16de809121baa754e5b5)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 //! Implementation of the kernel's memory allocation infrastructure.
4 
5 #[cfg(not(any(test, testlib)))]
6 pub mod allocator;
7 pub mod kbox;
8 pub mod kvec;
9 pub mod layout;
10 
11 #[cfg(any(test, testlib))]
12 pub mod allocator_test;
13 
14 #[cfg(any(test, testlib))]
15 pub use self::allocator_test as allocator;
16 
17 pub use self::kbox::Box;
18 pub use self::kbox::KBox;
19 pub use self::kbox::KVBox;
20 pub use self::kbox::VBox;
21 
22 pub use self::kvec::IntoIter;
23 pub use self::kvec::KVVec;
24 pub use self::kvec::KVec;
25 pub use self::kvec::VVec;
26 pub use self::kvec::Vec;
27 
28 /// Indicates an allocation error.
29 #[derive(Copy, Clone, PartialEq, Eq, Debug)]
30 pub struct AllocError;
31 
32 use crate::error::{code::EINVAL, Result};
33 use core::{alloc::Layout, ptr::NonNull};
34 
35 /// Flags to be used when allocating memory.
36 ///
37 /// They can be combined with the operators `|`, `&`, and `!`.
38 ///
39 /// Values can be used from the [`flags`] module.
40 #[derive(Clone, Copy, PartialEq)]
41 pub struct Flags(u32);
42 
43 impl Flags {
44     /// Get the raw representation of this flag.
45     pub(crate) fn as_raw(self) -> u32 {
46         self.0
47     }
48 
49     /// Check whether `flags` is contained in `self`.
50     pub fn contains(self, flags: Flags) -> bool {
51         (self & flags) == flags
52     }
53 }
54 
55 impl core::ops::BitOr for Flags {
56     type Output = Self;
57     fn bitor(self, rhs: Self) -> Self::Output {
58         Self(self.0 | rhs.0)
59     }
60 }
61 
62 impl core::ops::BitAnd for Flags {
63     type Output = Self;
64     fn bitand(self, rhs: Self) -> Self::Output {
65         Self(self.0 & rhs.0)
66     }
67 }
68 
69 impl core::ops::Not for Flags {
70     type Output = Self;
71     fn not(self) -> Self::Output {
72         Self(!self.0)
73     }
74 }
75 
76 /// Allocation flags.
77 ///
78 /// These are meant to be used in functions that can allocate memory.
79 pub mod flags {
80     use super::Flags;
81 
82     /// Zeroes out the allocated memory.
83     ///
84     /// This is normally or'd with other flags.
85     pub const __GFP_ZERO: Flags = Flags(bindings::__GFP_ZERO);
86 
87     /// Allow the allocation to be in high memory.
88     ///
89     /// Allocations in high memory may not be mapped into the kernel's address space, so this can't
90     /// be used with `kmalloc` and other similar methods.
91     ///
92     /// This is normally or'd with other flags.
93     pub const __GFP_HIGHMEM: Flags = Flags(bindings::__GFP_HIGHMEM);
94 
95     /// Users can not sleep and need the allocation to succeed.
96     ///
97     /// A lower watermark is applied to allow access to "atomic reserves". The current
98     /// implementation doesn't support NMI and few other strict non-preemptive contexts (e.g.
99     /// `raw_spin_lock`). The same applies to [`GFP_NOWAIT`].
100     pub const GFP_ATOMIC: Flags = Flags(bindings::GFP_ATOMIC);
101 
102     /// Typical for kernel-internal allocations. The caller requires `ZONE_NORMAL` or a lower zone
103     /// for direct access but can direct reclaim.
104     pub const GFP_KERNEL: Flags = Flags(bindings::GFP_KERNEL);
105 
106     /// The same as [`GFP_KERNEL`], except the allocation is accounted to kmemcg.
107     pub const GFP_KERNEL_ACCOUNT: Flags = Flags(bindings::GFP_KERNEL_ACCOUNT);
108 
109     /// For kernel allocations that should not stall for direct reclaim, start physical IO or
110     /// use any filesystem callback.  It is very likely to fail to allocate memory, even for very
111     /// small allocations.
112     pub const GFP_NOWAIT: Flags = Flags(bindings::GFP_NOWAIT);
113 
114     /// Suppresses allocation failure reports.
115     ///
116     /// This is normally or'd with other flags.
117     pub const __GFP_NOWARN: Flags = Flags(bindings::__GFP_NOWARN);
118 }
119 
120 /// Non Uniform Memory Access (NUMA) node identifier.
121 #[derive(Clone, Copy, PartialEq)]
122 pub struct NumaNode(i32);
123 
124 impl NumaNode {
125     /// Create a new NUMA node identifier (non-negative integer).
126     ///
127     /// Returns [`EINVAL`] if a negative id or an id exceeding [`bindings::MAX_NUMNODES`] is
128     /// specified.
129     pub fn new(node: i32) -> Result<Self> {
130         // MAX_NUMNODES never exceeds 2**10 because NODES_SHIFT is 0..10.
131         if node < 0 || node >= bindings::MAX_NUMNODES as i32 {
132             return Err(EINVAL);
133         }
134         Ok(Self(node))
135     }
136 }
137 
138 /// Specify necessary constant to pass the information to Allocator that the caller doesn't care
139 /// about the NUMA node to allocate memory from.
140 impl NumaNode {
141     /// No node preference.
142     pub const NO_NODE: NumaNode = NumaNode(bindings::NUMA_NO_NODE);
143 }
144 
145 /// The kernel's [`Allocator`] trait.
146 ///
147 /// An implementation of [`Allocator`] can allocate, re-allocate and free memory buffers described
148 /// via [`Layout`].
149 ///
150 /// [`Allocator`] is designed to be implemented as a ZST; [`Allocator`] functions do not operate on
151 /// an object instance.
152 ///
153 /// In order to be able to support `#[derive(CoercePointee)]` later on, we need to avoid a design
154 /// that requires an `Allocator` to be instantiated, hence its functions must not contain any kind
155 /// of `self` parameter.
156 ///
157 /// # Safety
158 ///
159 /// - A memory allocation returned from an allocator must remain valid until it is explicitly freed.
160 ///
161 /// - Any pointer to a valid memory allocation must be valid to be passed to any other [`Allocator`]
162 ///   function of the same type.
163 ///
164 /// - Implementers must ensure that all trait functions abide by the guarantees documented in the
165 ///   `# Guarantees` sections.
166 pub unsafe trait Allocator {
167     /// Allocate memory based on `layout`, `flags` and `nid`.
168     ///
169     /// On success, returns a buffer represented as `NonNull<[u8]>` that satisfies the layout
170     /// constraints (i.e. minimum size and alignment as specified by `layout`).
171     ///
172     /// This function is equivalent to `realloc` when called with `None`.
173     ///
174     /// # Guarantees
175     ///
176     /// When the return value is `Ok(ptr)`, then `ptr` is
177     /// - valid for reads and writes for `layout.size()` bytes, until it is passed to
178     ///   [`Allocator::free`] or [`Allocator::realloc`],
179     /// - aligned to `layout.align()`,
180     ///
181     /// Additionally, `Flags` are honored as documented in
182     /// <https://docs.kernel.org/core-api/mm-api.html#mm-api-gfp-flags>.
183     fn alloc(layout: Layout, flags: Flags, nid: NumaNode) -> Result<NonNull<[u8]>, AllocError> {
184         // SAFETY: Passing `None` to `realloc` is valid by its safety requirements and asks for a
185         // new memory allocation.
186         unsafe { Self::realloc(None, layout, Layout::new::<()>(), flags, nid) }
187     }
188 
189     /// Re-allocate an existing memory allocation to satisfy the requested `layout` and
190     /// a specific NUMA node request to allocate the memory for.
191     ///
192     /// Systems employing a Non Uniform Memory Access (NUMA) architecture contain collections of
193     /// hardware resources including processors, memory, and I/O buses, that comprise what is
194     /// commonly known as a NUMA node.
195     ///
196     /// `nid` stands for NUMA id, i. e. NUMA node identifier, which is a non-negative integer
197     /// if a node needs to be specified, or [`NumaNode::NO_NODE`] if the caller doesn't care.
198     ///
199     /// If the requested size is zero, `realloc` behaves equivalent to `free`.
200     ///
201     /// If the requested size is larger than the size of the existing allocation, a successful call
202     /// to `realloc` guarantees that the new or grown buffer has at least `Layout::size` bytes, but
203     /// may also be larger.
204     ///
205     /// If the requested size is smaller than the size of the existing allocation, `realloc` may or
206     /// may not shrink the buffer; this is implementation specific to the allocator.
207     ///
208     /// On allocation failure, the existing buffer, if any, remains valid.
209     ///
210     /// The buffer is represented as `NonNull<[u8]>`.
211     ///
212     /// # Safety
213     ///
214     /// - If `ptr == Some(p)`, then `p` must point to an existing and valid memory allocation
215     ///   created by this [`Allocator`]; if `old_layout` is zero-sized `p` does not need to be a
216     ///   pointer returned by this [`Allocator`].
217     /// - `ptr` is allowed to be `None`; in this case a new memory allocation is created and
218     ///   `old_layout` is ignored.
219     /// - `old_layout` must match the `Layout` the allocation has been created with.
220     ///
221     /// # Guarantees
222     ///
223     /// This function has the same guarantees as [`Allocator::alloc`]. When `ptr == Some(p)`, then
224     /// it additionally guarantees that:
225     /// - the contents of the memory pointed to by `p` are preserved up to the lesser of the new
226     ///   and old size, i.e. `ret_ptr[0..min(layout.size(), old_layout.size())] ==
227     ///   p[0..min(layout.size(), old_layout.size())]`.
228     /// - when the return value is `Err(AllocError)`, then `ptr` is still valid.
229     unsafe fn realloc(
230         ptr: Option<NonNull<u8>>,
231         layout: Layout,
232         old_layout: Layout,
233         flags: Flags,
234         nid: NumaNode,
235     ) -> Result<NonNull<[u8]>, AllocError>;
236 
237     /// Free an existing memory allocation.
238     ///
239     /// # Safety
240     ///
241     /// - `ptr` must point to an existing and valid memory allocation created by this [`Allocator`];
242     ///   if `old_layout` is zero-sized `p` does not need to be a pointer returned by this
243     ///   [`Allocator`].
244     /// - `layout` must match the `Layout` the allocation has been created with.
245     /// - The memory allocation at `ptr` must never again be read from or written to.
246     unsafe fn free(ptr: NonNull<u8>, layout: Layout) {
247         // SAFETY: The caller guarantees that `ptr` points at a valid allocation created by this
248         // allocator. We are passing a `Layout` with the smallest possible alignment, so it is
249         // smaller than or equal to the alignment previously used with this allocation.
250         let _ = unsafe {
251             Self::realloc(
252                 Some(ptr),
253                 Layout::new::<()>(),
254                 layout,
255                 Flags(0),
256                 NumaNode::NO_NODE,
257             )
258         };
259     }
260 }
261 
262 /// Returns a properly aligned dangling pointer from the given `layout`.
263 pub(crate) fn dangling_from_layout(layout: Layout) -> NonNull<u8> {
264     let ptr = layout.align() as *mut u8;
265 
266     // SAFETY: `layout.align()` (and hence `ptr`) is guaranteed to be non-zero.
267     unsafe { NonNull::new_unchecked(ptr) }
268 }
269