xref: /linux/rust/kernel/alloc.rs (revision 8804d970fab45726b3c7cd7f240b31122aa94219)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 //! Implementation of the kernel's memory allocation infrastructure.
4 
5 pub mod allocator;
6 pub mod kbox;
7 pub mod kvec;
8 pub mod layout;
9 
10 pub use self::kbox::Box;
11 pub use self::kbox::KBox;
12 pub use self::kbox::KVBox;
13 pub use self::kbox::VBox;
14 
15 pub use self::kvec::IntoIter;
16 pub use self::kvec::KVVec;
17 pub use self::kvec::KVec;
18 pub use self::kvec::VVec;
19 pub use self::kvec::Vec;
20 
21 /// Indicates an allocation error.
22 #[derive(Copy, Clone, PartialEq, Eq, Debug)]
23 pub struct AllocError;
24 
25 use crate::error::{code::EINVAL, Result};
26 use core::{alloc::Layout, ptr::NonNull};
27 
28 /// Flags to be used when allocating memory.
29 ///
30 /// They can be combined with the operators `|`, `&`, and `!`.
31 ///
32 /// Values can be used from the [`flags`] module.
33 #[derive(Clone, Copy, PartialEq)]
34 pub struct Flags(u32);
35 
36 impl Flags {
37     /// Get the raw representation of this flag.
as_raw(self) -> u3238     pub(crate) fn as_raw(self) -> u32 {
39         self.0
40     }
41 
42     /// Check whether `flags` is contained in `self`.
contains(self, flags: Flags) -> bool43     pub fn contains(self, flags: Flags) -> bool {
44         (self & flags) == flags
45     }
46 }
47 
48 impl core::ops::BitOr for Flags {
49     type Output = Self;
bitor(self, rhs: Self) -> Self::Output50     fn bitor(self, rhs: Self) -> Self::Output {
51         Self(self.0 | rhs.0)
52     }
53 }
54 
55 impl core::ops::BitAnd for Flags {
56     type Output = Self;
bitand(self, rhs: Self) -> Self::Output57     fn bitand(self, rhs: Self) -> Self::Output {
58         Self(self.0 & rhs.0)
59     }
60 }
61 
62 impl core::ops::Not for Flags {
63     type Output = Self;
not(self) -> Self::Output64     fn not(self) -> Self::Output {
65         Self(!self.0)
66     }
67 }
68 
69 /// Allocation flags.
70 ///
71 /// These are meant to be used in functions that can allocate memory.
72 pub mod flags {
73     use super::Flags;
74 
75     /// Zeroes out the allocated memory.
76     ///
77     /// This is normally or'd with other flags.
78     pub const __GFP_ZERO: Flags = Flags(bindings::__GFP_ZERO);
79 
80     /// Allow the allocation to be in high memory.
81     ///
82     /// Allocations in high memory may not be mapped into the kernel's address space, so this can't
83     /// be used with `kmalloc` and other similar methods.
84     ///
85     /// This is normally or'd with other flags.
86     pub const __GFP_HIGHMEM: Flags = Flags(bindings::__GFP_HIGHMEM);
87 
88     /// Users can not sleep and need the allocation to succeed.
89     ///
90     /// A lower watermark is applied to allow access to "atomic reserves". The current
91     /// implementation doesn't support NMI and few other strict non-preemptive contexts (e.g.
92     /// `raw_spin_lock`). The same applies to [`GFP_NOWAIT`].
93     pub const GFP_ATOMIC: Flags = Flags(bindings::GFP_ATOMIC);
94 
95     /// Typical for kernel-internal allocations. The caller requires `ZONE_NORMAL` or a lower zone
96     /// for direct access but can direct reclaim.
97     pub const GFP_KERNEL: Flags = Flags(bindings::GFP_KERNEL);
98 
99     /// The same as [`GFP_KERNEL`], except the allocation is accounted to kmemcg.
100     pub const GFP_KERNEL_ACCOUNT: Flags = Flags(bindings::GFP_KERNEL_ACCOUNT);
101 
102     /// For kernel allocations that should not stall for direct reclaim, start physical IO or
103     /// use any filesystem callback.  It is very likely to fail to allocate memory, even for very
104     /// small allocations.
105     pub const GFP_NOWAIT: Flags = Flags(bindings::GFP_NOWAIT);
106 
107     /// Suppresses allocation failure reports.
108     ///
109     /// This is normally or'd with other flags.
110     pub const __GFP_NOWARN: Flags = Flags(bindings::__GFP_NOWARN);
111 }
112 
113 /// Non Uniform Memory Access (NUMA) node identifier.
114 #[derive(Clone, Copy, PartialEq)]
115 pub struct NumaNode(i32);
116 
117 impl NumaNode {
118     /// Create a new NUMA node identifier (non-negative integer).
119     ///
120     /// Returns [`EINVAL`] if a negative id or an id exceeding [`bindings::MAX_NUMNODES`] is
121     /// specified.
new(node: i32) -> Result<Self>122     pub fn new(node: i32) -> Result<Self> {
123         // MAX_NUMNODES never exceeds 2**10 because NODES_SHIFT is 0..10.
124         if node < 0 || node >= bindings::MAX_NUMNODES as i32 {
125             return Err(EINVAL);
126         }
127         Ok(Self(node))
128     }
129 }
130 
131 /// Specify necessary constant to pass the information to Allocator that the caller doesn't care
132 /// about the NUMA node to allocate memory from.
133 impl NumaNode {
134     /// No node preference.
135     pub const NO_NODE: NumaNode = NumaNode(bindings::NUMA_NO_NODE);
136 }
137 
138 /// The kernel's [`Allocator`] trait.
139 ///
140 /// An implementation of [`Allocator`] can allocate, re-allocate and free memory buffers described
141 /// via [`Layout`].
142 ///
143 /// [`Allocator`] is designed to be implemented as a ZST; [`Allocator`] functions do not operate on
144 /// an object instance.
145 ///
146 /// In order to be able to support `#[derive(CoercePointee)]` later on, we need to avoid a design
147 /// that requires an `Allocator` to be instantiated, hence its functions must not contain any kind
148 /// of `self` parameter.
149 ///
150 /// # Safety
151 ///
152 /// - A memory allocation returned from an allocator must remain valid until it is explicitly freed.
153 ///
154 /// - Any pointer to a valid memory allocation must be valid to be passed to any other [`Allocator`]
155 ///   function of the same type.
156 ///
157 /// - Implementers must ensure that all trait functions abide by the guarantees documented in the
158 ///   `# Guarantees` sections.
159 pub unsafe trait Allocator {
160     /// The minimum alignment satisfied by all allocations from this allocator.
161     ///
162     /// # Guarantees
163     ///
164     /// Any pointer allocated by this allocator is guaranteed to be aligned to `MIN_ALIGN` even if
165     /// the requested layout has a smaller alignment.
166     const MIN_ALIGN: usize;
167 
168     /// Allocate memory based on `layout`, `flags` and `nid`.
169     ///
170     /// On success, returns a buffer represented as `NonNull<[u8]>` that satisfies the layout
171     /// constraints (i.e. minimum size and alignment as specified by `layout`).
172     ///
173     /// This function is equivalent to `realloc` when called with `None`.
174     ///
175     /// # Guarantees
176     ///
177     /// When the return value is `Ok(ptr)`, then `ptr` is
178     /// - valid for reads and writes for `layout.size()` bytes, until it is passed to
179     ///   [`Allocator::free`] or [`Allocator::realloc`],
180     /// - aligned to `layout.align()`,
181     ///
182     /// Additionally, `Flags` are honored as documented in
183     /// <https://docs.kernel.org/core-api/mm-api.html#mm-api-gfp-flags>.
alloc(layout: Layout, flags: Flags, nid: NumaNode) -> Result<NonNull<[u8]>, AllocError>184     fn alloc(layout: Layout, flags: Flags, nid: NumaNode) -> Result<NonNull<[u8]>, AllocError> {
185         // SAFETY: Passing `None` to `realloc` is valid by its safety requirements and asks for a
186         // new memory allocation.
187         unsafe { Self::realloc(None, layout, Layout::new::<()>(), flags, nid) }
188     }
189 
190     /// Re-allocate an existing memory allocation to satisfy the requested `layout` and
191     /// a specific NUMA node request to allocate the memory for.
192     ///
193     /// Systems employing a Non Uniform Memory Access (NUMA) architecture contain collections of
194     /// hardware resources including processors, memory, and I/O buses, that comprise what is
195     /// commonly known as a NUMA node.
196     ///
197     /// `nid` stands for NUMA id, i. e. NUMA node identifier, which is a non-negative integer
198     /// if a node needs to be specified, or [`NumaNode::NO_NODE`] if the caller doesn't care.
199     ///
200     /// If the requested size is zero, `realloc` behaves equivalent to `free`.
201     ///
202     /// If the requested size is larger than the size of the existing allocation, a successful call
203     /// to `realloc` guarantees that the new or grown buffer has at least `Layout::size` bytes, but
204     /// may also be larger.
205     ///
206     /// If the requested size is smaller than the size of the existing allocation, `realloc` may or
207     /// may not shrink the buffer; this is implementation specific to the allocator.
208     ///
209     /// On allocation failure, the existing buffer, if any, remains valid.
210     ///
211     /// The buffer is represented as `NonNull<[u8]>`.
212     ///
213     /// # Safety
214     ///
215     /// - If `ptr == Some(p)`, then `p` must point to an existing and valid memory allocation
216     ///   created by this [`Allocator`]; if `old_layout` is zero-sized `p` does not need to be a
217     ///   pointer returned by this [`Allocator`].
218     /// - `ptr` is allowed to be `None`; in this case a new memory allocation is created and
219     ///   `old_layout` is ignored.
220     /// - `old_layout` must match the `Layout` the allocation has been created with.
221     ///
222     /// # Guarantees
223     ///
224     /// This function has the same guarantees as [`Allocator::alloc`]. When `ptr == Some(p)`, then
225     /// it additionally guarantees that:
226     /// - the contents of the memory pointed to by `p` are preserved up to the lesser of the new
227     ///   and old size, i.e. `ret_ptr[0..min(layout.size(), old_layout.size())] ==
228     ///   p[0..min(layout.size(), old_layout.size())]`.
229     /// - when the return value is `Err(AllocError)`, then `ptr` is still valid.
realloc( ptr: Option<NonNull<u8>>, layout: Layout, old_layout: Layout, flags: Flags, nid: NumaNode, ) -> Result<NonNull<[u8]>, AllocError>230     unsafe fn realloc(
231         ptr: Option<NonNull<u8>>,
232         layout: Layout,
233         old_layout: Layout,
234         flags: Flags,
235         nid: NumaNode,
236     ) -> Result<NonNull<[u8]>, AllocError>;
237 
238     /// Free an existing memory allocation.
239     ///
240     /// # Safety
241     ///
242     /// - `ptr` must point to an existing and valid memory allocation created by this [`Allocator`];
243     ///   if `old_layout` is zero-sized `p` does not need to be a pointer returned by this
244     ///   [`Allocator`].
245     /// - `layout` must match the `Layout` the allocation has been created with.
246     /// - The memory allocation at `ptr` must never again be read from or written to.
free(ptr: NonNull<u8>, layout: Layout)247     unsafe fn free(ptr: NonNull<u8>, layout: Layout) {
248         // SAFETY: The caller guarantees that `ptr` points at a valid allocation created by this
249         // allocator. We are passing a `Layout` with the smallest possible alignment, so it is
250         // smaller than or equal to the alignment previously used with this allocation.
251         let _ = unsafe {
252             Self::realloc(
253                 Some(ptr),
254                 Layout::new::<()>(),
255                 layout,
256                 Flags(0),
257                 NumaNode::NO_NODE,
258             )
259         };
260     }
261 }
262 
263 /// Returns a properly aligned dangling pointer from the given `layout`.
dangling_from_layout(layout: Layout) -> NonNull<u8>264 pub(crate) fn dangling_from_layout(layout: Layout) -> NonNull<u8> {
265     let ptr = layout.align() as *mut u8;
266 
267     // SAFETY: `layout.align()` (and hence `ptr`) is guaranteed to be non-zero.
268     unsafe { NonNull::new_unchecked(ptr) }
269 }
270