1 // SPDX-License-Identifier: GPL-2.0
2
3 //! Maple trees.
4 //!
5 //! C header: [`include/linux/maple_tree.h`](srctree/include/linux/maple_tree.h)
6 //!
7 //! Reference: <https://docs.kernel.org/core-api/maple_tree.html>
8
9 use core::{
10 marker::PhantomData,
11 ops::{Bound, RangeBounds},
12 ptr,
13 };
14
15 use kernel::{
16 alloc::Flags,
17 error::to_result,
18 prelude::*,
19 types::{ForeignOwnable, Opaque},
20 };
21
22 /// A maple tree optimized for storing non-overlapping ranges.
23 ///
24 /// # Invariants
25 ///
26 /// Each range in the maple tree owns an instance of `T`.
27 #[pin_data(PinnedDrop)]
28 #[repr(transparent)]
29 pub struct MapleTree<T: ForeignOwnable> {
30 #[pin]
31 tree: Opaque<bindings::maple_tree>,
32 _p: PhantomData<T>,
33 }
34
35 /// A maple tree with `MT_FLAGS_ALLOC_RANGE` set.
36 ///
37 /// All methods on [`MapleTree`] are also accessible on this type.
38 #[pin_data]
39 #[repr(transparent)]
40 pub struct MapleTreeAlloc<T: ForeignOwnable> {
41 #[pin]
42 tree: MapleTree<T>,
43 }
44
45 // Make MapleTree methods usable on MapleTreeAlloc.
46 impl<T: ForeignOwnable> core::ops::Deref for MapleTreeAlloc<T> {
47 type Target = MapleTree<T>;
48
49 #[inline]
deref(&self) -> &MapleTree<T>50 fn deref(&self) -> &MapleTree<T> {
51 &self.tree
52 }
53 }
54
55 #[inline]
to_maple_range(range: impl RangeBounds<usize>) -> Option<(usize, usize)>56 fn to_maple_range(range: impl RangeBounds<usize>) -> Option<(usize, usize)> {
57 let first = match range.start_bound() {
58 Bound::Included(start) => *start,
59 Bound::Excluded(start) => start.checked_add(1)?,
60 Bound::Unbounded => 0,
61 };
62
63 let last = match range.end_bound() {
64 Bound::Included(end) => *end,
65 Bound::Excluded(end) => end.checked_sub(1)?,
66 Bound::Unbounded => usize::MAX,
67 };
68
69 if last < first {
70 return None;
71 }
72
73 Some((first, last))
74 }
75
76 impl<T: ForeignOwnable> MapleTree<T> {
77 /// Create a new maple tree.
78 ///
79 /// The tree will use the regular implementation with a higher branching factor, rather than
80 /// the allocation tree.
81 #[inline]
new() -> impl PinInit<Self>82 pub fn new() -> impl PinInit<Self> {
83 pin_init!(MapleTree {
84 // SAFETY: This initializes a maple tree into a pinned slot. The maple tree will be
85 // destroyed in Drop before the memory location becomes invalid.
86 tree <- Opaque::ffi_init(|slot| unsafe { bindings::mt_init_flags(slot, 0) }),
87 _p: PhantomData,
88 })
89 }
90
91 /// Insert the value at the given index.
92 ///
93 /// # Errors
94 ///
95 /// If the maple tree already contains a range using the given index, then this call will
96 /// return an [`InsertErrorKind::Occupied`]. It may also fail if memory allocation fails.
97 ///
98 /// # Examples
99 ///
100 /// ```
101 /// use kernel::maple_tree::{InsertErrorKind, MapleTree};
102 ///
103 /// let tree = KBox::pin_init(MapleTree::<KBox<i32>>::new(), GFP_KERNEL)?;
104 ///
105 /// let ten = KBox::new(10, GFP_KERNEL)?;
106 /// let twenty = KBox::new(20, GFP_KERNEL)?;
107 /// let the_answer = KBox::new(42, GFP_KERNEL)?;
108 ///
109 /// // These calls will succeed.
110 /// tree.insert(100, ten, GFP_KERNEL)?;
111 /// tree.insert(101, twenty, GFP_KERNEL)?;
112 ///
113 /// // This will fail because the index is already in use.
114 /// assert_eq!(
115 /// tree.insert(100, the_answer, GFP_KERNEL).unwrap_err().cause,
116 /// InsertErrorKind::Occupied,
117 /// );
118 /// # Ok::<_, Error>(())
119 /// ```
120 #[inline]
insert(&self, index: usize, value: T, gfp: Flags) -> Result<(), InsertError<T>>121 pub fn insert(&self, index: usize, value: T, gfp: Flags) -> Result<(), InsertError<T>> {
122 self.insert_range(index..=index, value, gfp)
123 }
124
125 /// Insert a value to the specified range, failing on overlap.
126 ///
127 /// This accepts the usual types of Rust ranges using the `..` and `..=` syntax for exclusive
128 /// and inclusive ranges respectively. The range must not be empty, and must not overlap with
129 /// any existing range.
130 ///
131 /// # Errors
132 ///
133 /// If the maple tree already contains an overlapping range, then this call will return an
134 /// [`InsertErrorKind::Occupied`]. It may also fail if memory allocation fails or if the
135 /// requested range is invalid (e.g. empty).
136 ///
137 /// # Examples
138 ///
139 /// ```
140 /// use kernel::maple_tree::{InsertErrorKind, MapleTree};
141 ///
142 /// let tree = KBox::pin_init(MapleTree::<KBox<i32>>::new(), GFP_KERNEL)?;
143 ///
144 /// let ten = KBox::new(10, GFP_KERNEL)?;
145 /// let twenty = KBox::new(20, GFP_KERNEL)?;
146 /// let the_answer = KBox::new(42, GFP_KERNEL)?;
147 /// let hundred = KBox::new(100, GFP_KERNEL)?;
148 ///
149 /// // Insert the value 10 at the indices 100 to 499.
150 /// tree.insert_range(100..500, ten, GFP_KERNEL)?;
151 ///
152 /// // Insert the value 20 at the indices 500 to 1000.
153 /// tree.insert_range(500..=1000, twenty, GFP_KERNEL)?;
154 ///
155 /// // This will fail due to overlap with the previous range on index 1000.
156 /// assert_eq!(
157 /// tree.insert_range(1000..1200, the_answer, GFP_KERNEL).unwrap_err().cause,
158 /// InsertErrorKind::Occupied,
159 /// );
160 ///
161 /// // When using .. to specify the range, you must be careful to ensure that the range is
162 /// // non-empty.
163 /// assert_eq!(
164 /// tree.insert_range(72..72, hundred, GFP_KERNEL).unwrap_err().cause,
165 /// InsertErrorKind::InvalidRequest,
166 /// );
167 /// # Ok::<_, Error>(())
168 /// ```
insert_range<R>(&self, range: R, value: T, gfp: Flags) -> Result<(), InsertError<T>> where R: RangeBounds<usize>,169 pub fn insert_range<R>(&self, range: R, value: T, gfp: Flags) -> Result<(), InsertError<T>>
170 where
171 R: RangeBounds<usize>,
172 {
173 let Some((first, last)) = to_maple_range(range) else {
174 return Err(InsertError {
175 value,
176 cause: InsertErrorKind::InvalidRequest,
177 });
178 };
179
180 let ptr = T::into_foreign(value);
181
182 // SAFETY: The tree is valid, and we are passing a pointer to an owned instance of `T`.
183 let res = to_result(unsafe {
184 bindings::mtree_insert_range(self.tree.get(), first, last, ptr, gfp.as_raw())
185 });
186
187 if let Err(err) = res {
188 // SAFETY: As `mtree_insert_range` failed, it is safe to take back ownership.
189 let value = unsafe { T::from_foreign(ptr) };
190
191 let cause = if err == ENOMEM {
192 InsertErrorKind::AllocError(kernel::alloc::AllocError)
193 } else if err == EEXIST {
194 InsertErrorKind::Occupied
195 } else {
196 InsertErrorKind::InvalidRequest
197 };
198 Err(InsertError { value, cause })
199 } else {
200 Ok(())
201 }
202 }
203
204 /// Erase the range containing the given index.
205 ///
206 /// # Examples
207 ///
208 /// ```
209 /// use kernel::maple_tree::MapleTree;
210 ///
211 /// let tree = KBox::pin_init(MapleTree::<KBox<i32>>::new(), GFP_KERNEL)?;
212 ///
213 /// let ten = KBox::new(10, GFP_KERNEL)?;
214 /// let twenty = KBox::new(20, GFP_KERNEL)?;
215 ///
216 /// tree.insert_range(100..500, ten, GFP_KERNEL)?;
217 /// tree.insert(67, twenty, GFP_KERNEL)?;
218 ///
219 /// assert_eq!(tree.erase(67).map(|v| *v), Some(20));
220 /// assert_eq!(tree.erase(275).map(|v| *v), Some(10));
221 ///
222 /// // The previous call erased the entire range, not just index 275.
223 /// assert!(tree.erase(127).is_none());
224 /// # Ok::<_, Error>(())
225 /// ```
226 #[inline]
erase(&self, index: usize) -> Option<T>227 pub fn erase(&self, index: usize) -> Option<T> {
228 // SAFETY: `self.tree` contains a valid maple tree.
229 let ret = unsafe { bindings::mtree_erase(self.tree.get(), index) };
230
231 // SAFETY: If the pointer is not null, then we took ownership of a valid instance of `T`
232 // from the tree.
233 unsafe { T::try_from_foreign(ret) }
234 }
235
236 /// Lock the internal spinlock.
237 #[inline]
lock(&self) -> MapleGuard<'_, T>238 pub fn lock(&self) -> MapleGuard<'_, T> {
239 // SAFETY: It's safe to lock the spinlock in a maple tree.
240 unsafe { bindings::spin_lock(self.ma_lock()) };
241
242 // INVARIANT: We just took the spinlock.
243 MapleGuard(self)
244 }
245
246 #[inline]
ma_lock(&self) -> *mut bindings::spinlock_t247 fn ma_lock(&self) -> *mut bindings::spinlock_t {
248 // SAFETY: This pointer offset operation stays in-bounds.
249 let lock_ptr = unsafe { &raw mut (*self.tree.get()).__bindgen_anon_1.ma_lock };
250 lock_ptr.cast()
251 }
252
253 /// Free all `T` instances in this tree.
254 ///
255 /// # Safety
256 ///
257 /// This frees Rust data referenced by the maple tree without removing it from the maple tree,
258 /// leaving it in an invalid state. The caller must ensure that this invalid state cannot be
259 /// observed by the end-user.
free_all_entries(self: Pin<&mut Self>)260 unsafe fn free_all_entries(self: Pin<&mut Self>) {
261 // SAFETY: The caller provides exclusive access to the entire maple tree, so we have
262 // exclusive access to the entire maple tree despite not holding the lock.
263 let mut ma_state = unsafe { MaState::new_raw(self.into_ref().get_ref(), 0, usize::MAX) };
264
265 loop {
266 // This uses the raw accessor because we're destroying pointers without removing them
267 // from the maple tree, which is only valid because this is the destructor.
268 let ptr = ma_state.mas_find_raw(usize::MAX);
269 if ptr.is_null() {
270 break;
271 }
272 // SAFETY: By the type invariants, this pointer references a valid value of type `T`.
273 // By the safety requirements, it is okay to free it without removing it from the maple
274 // tree.
275 drop(unsafe { T::from_foreign(ptr) });
276 }
277 }
278 }
279
280 #[pinned_drop]
281 impl<T: ForeignOwnable> PinnedDrop for MapleTree<T> {
282 #[inline]
drop(mut self: Pin<&mut Self>)283 fn drop(mut self: Pin<&mut Self>) {
284 // We only iterate the tree if the Rust value has a destructor.
285 if core::mem::needs_drop::<T>() {
286 // SAFETY: Other than the below `mtree_destroy` call, the tree will not be accessed
287 // after this call.
288 unsafe { self.as_mut().free_all_entries() };
289 }
290
291 // SAFETY: The tree is valid, and will not be accessed after this call.
292 unsafe { bindings::mtree_destroy(self.tree.get()) };
293 }
294 }
295
296 /// A reference to a [`MapleTree`] that owns the inner lock.
297 ///
298 /// # Invariants
299 ///
300 /// This guard owns the inner spinlock.
301 #[must_use = "if unused, the lock will be immediately unlocked"]
302 pub struct MapleGuard<'tree, T: ForeignOwnable>(&'tree MapleTree<T>);
303
304 impl<'tree, T: ForeignOwnable> Drop for MapleGuard<'tree, T> {
305 #[inline]
drop(&mut self)306 fn drop(&mut self) {
307 // SAFETY: By the type invariants, we hold this spinlock.
308 unsafe { bindings::spin_unlock(self.0.ma_lock()) };
309 }
310 }
311
312 impl<'tree, T: ForeignOwnable> MapleGuard<'tree, T> {
313 /// Create a [`MaState`] protected by this lock guard.
ma_state(&mut self, first: usize, end: usize) -> MaState<'_, T>314 pub fn ma_state(&mut self, first: usize, end: usize) -> MaState<'_, T> {
315 // SAFETY: The `MaState` borrows this `MapleGuard`, so it can also borrow the `MapleGuard`s
316 // read/write permissions to the maple tree.
317 unsafe { MaState::new_raw(self.0, first, end) }
318 }
319
320 /// Load the value at the given index.
321 ///
322 /// # Examples
323 ///
324 /// Read the value while holding the spinlock.
325 ///
326 /// ```
327 /// use kernel::maple_tree::MapleTree;
328 ///
329 /// let tree = KBox::pin_init(MapleTree::<KBox<i32>>::new(), GFP_KERNEL)?;
330 ///
331 /// let ten = KBox::new(10, GFP_KERNEL)?;
332 /// let twenty = KBox::new(20, GFP_KERNEL)?;
333 /// tree.insert(100, ten, GFP_KERNEL)?;
334 /// tree.insert(200, twenty, GFP_KERNEL)?;
335 ///
336 /// let mut lock = tree.lock();
337 /// assert_eq!(lock.load(100).map(|v| *v), Some(10));
338 /// assert_eq!(lock.load(200).map(|v| *v), Some(20));
339 /// assert_eq!(lock.load(300).map(|v| *v), None);
340 /// # Ok::<_, Error>(())
341 /// ```
342 ///
343 /// Increment refcount under the lock, to keep value alive afterwards.
344 ///
345 /// ```
346 /// use kernel::maple_tree::MapleTree;
347 /// use kernel::sync::Arc;
348 ///
349 /// let tree = KBox::pin_init(MapleTree::<Arc<i32>>::new(), GFP_KERNEL)?;
350 ///
351 /// let ten = Arc::new(10, GFP_KERNEL)?;
352 /// let twenty = Arc::new(20, GFP_KERNEL)?;
353 /// tree.insert(100, ten, GFP_KERNEL)?;
354 /// tree.insert(200, twenty, GFP_KERNEL)?;
355 ///
356 /// // Briefly take the lock to increment the refcount.
357 /// let value = tree.lock().load(100).map(Arc::from);
358 ///
359 /// // At this point, another thread might remove the value.
360 /// tree.erase(100);
361 ///
362 /// // But we can still access it because we took a refcount.
363 /// assert_eq!(value.map(|v| *v), Some(10));
364 /// # Ok::<_, Error>(())
365 /// ```
366 #[inline]
load(&mut self, index: usize) -> Option<T::BorrowedMut<'_>>367 pub fn load(&mut self, index: usize) -> Option<T::BorrowedMut<'_>> {
368 // SAFETY: `self.tree` contains a valid maple tree.
369 let ret = unsafe { bindings::mtree_load(self.0.tree.get(), index) };
370 if ret.is_null() {
371 return None;
372 }
373
374 // SAFETY: If the pointer is not null, then it references a valid instance of `T`. It is
375 // safe to borrow the instance mutably because the signature of this function enforces that
376 // the mutable borrow is not used after the spinlock is dropped.
377 Some(unsafe { T::borrow_mut(ret) })
378 }
379 }
380
381 impl<T: ForeignOwnable> MapleTreeAlloc<T> {
382 /// Create a new allocation tree.
new() -> impl PinInit<Self>383 pub fn new() -> impl PinInit<Self> {
384 let tree = pin_init!(MapleTree {
385 // SAFETY: This initializes a maple tree into a pinned slot. The maple tree will be
386 // destroyed in Drop before the memory location becomes invalid.
387 tree <- Opaque::ffi_init(|slot| unsafe {
388 bindings::mt_init_flags(slot, bindings::MT_FLAGS_ALLOC_RANGE)
389 }),
390 _p: PhantomData,
391 });
392
393 pin_init!(MapleTreeAlloc { tree <- tree })
394 }
395
396 /// Insert an entry with the given size somewhere in the given range.
397 ///
398 /// The maple tree will search for a location in the given range where there is space to insert
399 /// the new range. If there is not enough available space, then an error will be returned.
400 ///
401 /// The index of the new range is returned.
402 ///
403 /// # Examples
404 ///
405 /// ```
406 /// use kernel::maple_tree::{MapleTreeAlloc, AllocErrorKind};
407 ///
408 /// let tree = KBox::pin_init(MapleTreeAlloc::<KBox<i32>>::new(), GFP_KERNEL)?;
409 ///
410 /// let ten = KBox::new(10, GFP_KERNEL)?;
411 /// let twenty = KBox::new(20, GFP_KERNEL)?;
412 /// let thirty = KBox::new(30, GFP_KERNEL)?;
413 /// let hundred = KBox::new(100, GFP_KERNEL)?;
414 ///
415 /// // Allocate three ranges.
416 /// let idx1 = tree.alloc_range(100, ten, ..1000, GFP_KERNEL)?;
417 /// let idx2 = tree.alloc_range(100, twenty, ..1000, GFP_KERNEL)?;
418 /// let idx3 = tree.alloc_range(100, thirty, ..1000, GFP_KERNEL)?;
419 ///
420 /// assert_eq!(idx1, 0);
421 /// assert_eq!(idx2, 100);
422 /// assert_eq!(idx3, 200);
423 ///
424 /// // This will fail because the remaining space is too small.
425 /// assert_eq!(
426 /// tree.alloc_range(800, hundred, ..1000, GFP_KERNEL).unwrap_err().cause,
427 /// AllocErrorKind::Busy,
428 /// );
429 /// # Ok::<_, Error>(())
430 /// ```
alloc_range<R>( &self, size: usize, value: T, range: R, gfp: Flags, ) -> Result<usize, AllocError<T>> where R: RangeBounds<usize>,431 pub fn alloc_range<R>(
432 &self,
433 size: usize,
434 value: T,
435 range: R,
436 gfp: Flags,
437 ) -> Result<usize, AllocError<T>>
438 where
439 R: RangeBounds<usize>,
440 {
441 let Some((min, max)) = to_maple_range(range) else {
442 return Err(AllocError {
443 value,
444 cause: AllocErrorKind::InvalidRequest,
445 });
446 };
447
448 let ptr = T::into_foreign(value);
449 let mut index = 0;
450
451 // SAFETY: The tree is valid, and we are passing a pointer to an owned instance of `T`.
452 let res = to_result(unsafe {
453 bindings::mtree_alloc_range(
454 self.tree.tree.get(),
455 &mut index,
456 ptr,
457 size,
458 min,
459 max,
460 gfp.as_raw(),
461 )
462 });
463
464 if let Err(err) = res {
465 // SAFETY: As `mtree_alloc_range` failed, it is safe to take back ownership.
466 let value = unsafe { T::from_foreign(ptr) };
467
468 let cause = if err == ENOMEM {
469 AllocErrorKind::AllocError(kernel::alloc::AllocError)
470 } else if err == EBUSY {
471 AllocErrorKind::Busy
472 } else {
473 AllocErrorKind::InvalidRequest
474 };
475 Err(AllocError { value, cause })
476 } else {
477 Ok(index)
478 }
479 }
480 }
481
482 /// A helper type used for navigating a [`MapleTree`].
483 ///
484 /// # Invariants
485 ///
486 /// For the duration of `'tree`:
487 ///
488 /// * The `ma_state` references a valid `MapleTree<T>`.
489 /// * The `ma_state` has read/write access to the tree.
490 pub struct MaState<'tree, T: ForeignOwnable> {
491 state: bindings::ma_state,
492 _phantom: PhantomData<&'tree mut MapleTree<T>>,
493 }
494
495 impl<'tree, T: ForeignOwnable> MaState<'tree, T> {
496 /// Initialize a new `MaState` with the given tree.
497 ///
498 /// # Safety
499 ///
500 /// The caller must ensure that this `MaState` has read/write access to the maple tree.
501 #[inline]
new_raw(mt: &'tree MapleTree<T>, first: usize, end: usize) -> Self502 unsafe fn new_raw(mt: &'tree MapleTree<T>, first: usize, end: usize) -> Self {
503 // INVARIANT:
504 // * Having a reference ensures that the `MapleTree<T>` is valid for `'tree`.
505 // * The caller ensures that we have read/write access.
506 Self {
507 state: bindings::ma_state {
508 tree: mt.tree.get(),
509 index: first,
510 last: end,
511 node: ptr::null_mut(),
512 status: bindings::maple_status_ma_start,
513 min: 0,
514 max: usize::MAX,
515 alloc: ptr::null_mut(),
516 mas_flags: 0,
517 store_type: bindings::store_type_wr_invalid,
518 ..Default::default()
519 },
520 _phantom: PhantomData,
521 }
522 }
523
524 #[inline]
as_raw(&mut self) -> *mut bindings::ma_state525 fn as_raw(&mut self) -> *mut bindings::ma_state {
526 &raw mut self.state
527 }
528
529 #[inline]
mas_find_raw(&mut self, max: usize) -> *mut c_void530 fn mas_find_raw(&mut self, max: usize) -> *mut c_void {
531 // SAFETY: By the type invariants, the `ma_state` is active and we have read/write access
532 // to the tree.
533 unsafe { bindings::mas_find(self.as_raw(), max) }
534 }
535
536 /// Find the next entry in the maple tree.
537 ///
538 /// # Examples
539 ///
540 /// Iterate the maple tree.
541 ///
542 /// ```
543 /// use kernel::maple_tree::MapleTree;
544 /// use kernel::sync::Arc;
545 ///
546 /// let tree = KBox::pin_init(MapleTree::<Arc<i32>>::new(), GFP_KERNEL)?;
547 ///
548 /// let ten = Arc::new(10, GFP_KERNEL)?;
549 /// let twenty = Arc::new(20, GFP_KERNEL)?;
550 /// tree.insert(100, ten, GFP_KERNEL)?;
551 /// tree.insert(200, twenty, GFP_KERNEL)?;
552 ///
553 /// let mut ma_lock = tree.lock();
554 /// let mut iter = ma_lock.ma_state(0, usize::MAX);
555 ///
556 /// assert_eq!(iter.find(usize::MAX).map(|v| *v), Some(10));
557 /// assert_eq!(iter.find(usize::MAX).map(|v| *v), Some(20));
558 /// assert!(iter.find(usize::MAX).is_none());
559 /// # Ok::<_, Error>(())
560 /// ```
561 #[inline]
find(&mut self, max: usize) -> Option<T::BorrowedMut<'_>>562 pub fn find(&mut self, max: usize) -> Option<T::BorrowedMut<'_>> {
563 let ret = self.mas_find_raw(max);
564 if ret.is_null() {
565 return None;
566 }
567
568 // SAFETY: If the pointer is not null, then it references a valid instance of `T`. It's
569 // safe to access it mutably as the returned reference borrows this `MaState`, and the
570 // `MaState` has read/write access to the maple tree.
571 Some(unsafe { T::borrow_mut(ret) })
572 }
573 }
574
575 /// Error type for failure to insert a new value.
576 pub struct InsertError<T> {
577 /// The value that could not be inserted.
578 pub value: T,
579 /// The reason for the failure to insert.
580 pub cause: InsertErrorKind,
581 }
582
583 /// The reason for the failure to insert.
584 #[derive(PartialEq, Eq, Copy, Clone, Debug)]
585 pub enum InsertErrorKind {
586 /// There is already a value in the requested range.
587 Occupied,
588 /// Failure to allocate memory.
589 AllocError(kernel::alloc::AllocError),
590 /// The insertion request was invalid.
591 InvalidRequest,
592 }
593
594 impl From<InsertErrorKind> for Error {
595 #[inline]
from(kind: InsertErrorKind) -> Error596 fn from(kind: InsertErrorKind) -> Error {
597 match kind {
598 InsertErrorKind::Occupied => EEXIST,
599 InsertErrorKind::AllocError(kernel::alloc::AllocError) => ENOMEM,
600 InsertErrorKind::InvalidRequest => EINVAL,
601 }
602 }
603 }
604
605 impl<T> From<InsertError<T>> for Error {
606 #[inline]
from(insert_err: InsertError<T>) -> Error607 fn from(insert_err: InsertError<T>) -> Error {
608 Error::from(insert_err.cause)
609 }
610 }
611
612 /// Error type for failure to insert a new value.
613 pub struct AllocError<T> {
614 /// The value that could not be inserted.
615 pub value: T,
616 /// The reason for the failure to insert.
617 pub cause: AllocErrorKind,
618 }
619
620 /// The reason for the failure to insert.
621 #[derive(PartialEq, Eq, Copy, Clone)]
622 pub enum AllocErrorKind {
623 /// There is not enough space for the requested allocation.
624 Busy,
625 /// Failure to allocate memory.
626 AllocError(kernel::alloc::AllocError),
627 /// The insertion request was invalid.
628 InvalidRequest,
629 }
630
631 impl From<AllocErrorKind> for Error {
632 #[inline]
from(kind: AllocErrorKind) -> Error633 fn from(kind: AllocErrorKind) -> Error {
634 match kind {
635 AllocErrorKind::Busy => EBUSY,
636 AllocErrorKind::AllocError(kernel::alloc::AllocError) => ENOMEM,
637 AllocErrorKind::InvalidRequest => EINVAL,
638 }
639 }
640 }
641
642 impl<T> From<AllocError<T>> for Error {
643 #[inline]
from(insert_err: AllocError<T>) -> Error644 fn from(insert_err: AllocError<T>) -> Error {
645 Error::from(insert_err.cause)
646 }
647 }
648