xref: /linux/rust/kernel/scatterlist.rs (revision 644672e93a1aa6bfc3ebc102cbf9b8efad16e786)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 //! Abstractions for scatter-gather lists.
4 //!
5 //! C header: [`include/linux/scatterlist.h`](srctree/include/linux/scatterlist.h)
6 //!
7 //! Scatter-gather (SG) I/O is a memory access technique that allows devices to perform DMA
8 //! operations on data buffers that are not physically contiguous in memory. It works by creating a
9 //! "scatter-gather list", an array where each entry specifies the address and length of a
10 //! physically contiguous memory segment.
11 //!
12 //! The device's DMA controller can then read this list and process the segments sequentially as
13 //! part of one logical I/O request. This avoids the need for a single, large, physically contiguous
14 //! memory buffer, which can be difficult or impossible to allocate.
15 //!
16 //! This module provides safe Rust abstractions over the kernel's `struct scatterlist` and
17 //! `struct sg_table` types.
18 //!
19 //! The main entry point is the [`SGTable`] type, which represents a complete scatter-gather table.
20 //! It can be either:
21 //!
22 //! - An owned table ([`SGTable<Owned<P>>`]), created from a Rust memory buffer (e.g., [`VVec`]).
23 //!   This type manages the allocation of the `struct sg_table`, the DMA mapping of the buffer, and
24 //!   the automatic cleanup of all resources.
25 //! - A borrowed reference (&[`SGTable`]), which provides safe, read-only access to a table that was
26 //!   allocated by other (e.g., C) code.
27 //!
28 //! Individual entries in the table are represented by [`SGEntry`], which can be accessed by
29 //! iterating over an [`SGTable`].
30 
31 use crate::{
32     alloc,
33     alloc::allocator::VmallocPageIter,
34     bindings,
35     device::{Bound, Device},
36     devres::Devres,
37     dma, error,
38     io::ResourceSize,
39     page,
40     prelude::*,
41     sync::aref::ARef,
42     types::Opaque,
43 };
44 use core::{ops::Deref, ptr::NonNull};
45 
46 /// A single entry in a scatter-gather list.
47 ///
48 /// An `SGEntry` represents a single, physically contiguous segment of memory that has been mapped
49 /// for DMA.
50 ///
51 /// Instances of this struct are obtained by iterating over an [`SGTable`]. Drivers do not create
52 /// or own [`SGEntry`] objects directly.
53 #[repr(transparent)]
54 pub struct SGEntry(Opaque<bindings::scatterlist>);
55 
56 // SAFETY: `SGEntry` can be sent to any task.
57 unsafe impl Send for SGEntry {}
58 
59 // SAFETY: `SGEntry` has no interior mutability and can be accessed concurrently.
60 unsafe impl Sync for SGEntry {}
61 
62 impl SGEntry {
63     /// Convert a raw `struct scatterlist *` to a `&'a SGEntry`.
64     ///
65     /// # Safety
66     ///
67     /// Callers must ensure that the `struct scatterlist` pointed to by `ptr` is valid for the
68     /// lifetime `'a`.
69     #[inline]
70     unsafe fn from_raw<'a>(ptr: *mut bindings::scatterlist) -> &'a Self {
71         // SAFETY: The safety requirements of this function guarantee that `ptr` is a valid pointer
72         // to a `struct scatterlist` for the duration of `'a`.
73         unsafe { &*ptr.cast() }
74     }
75 
76     /// Obtain the raw `struct scatterlist *`.
77     #[inline]
78     fn as_raw(&self) -> *mut bindings::scatterlist {
79         self.0.get()
80     }
81 
82     /// Returns the DMA address of this SG entry.
83     ///
84     /// This is the address that the device should use to access the memory segment.
85     #[inline]
86     pub fn dma_address(&self) -> dma::DmaAddress {
87         // SAFETY: `self.as_raw()` is a valid pointer to a `struct scatterlist`.
88         unsafe { bindings::sg_dma_address(self.as_raw()) }
89     }
90 
91     /// Returns the length of this SG entry in bytes.
92     #[inline]
93     pub fn dma_len(&self) -> ResourceSize {
94         #[allow(clippy::useless_conversion)]
95         // SAFETY: `self.as_raw()` is a valid pointer to a `struct scatterlist`.
96         unsafe { bindings::sg_dma_len(self.as_raw()) }.into()
97     }
98 }
99 
100 /// The borrowed generic type of an [`SGTable`], representing a borrowed or externally managed
101 /// table.
102 #[repr(transparent)]
103 pub struct Borrowed(Opaque<bindings::sg_table>);
104 
105 // SAFETY: `Borrowed` can be sent to any task.
106 unsafe impl Send for Borrowed {}
107 
108 // SAFETY: `Borrowed` has no interior mutability and can be accessed concurrently.
109 unsafe impl Sync for Borrowed {}
110 
111 /// A scatter-gather table.
112 ///
113 /// This struct is a wrapper around the kernel's `struct sg_table`. It manages a list of DMA-mapped
114 /// memory segments that can be passed to a device for I/O operations.
115 ///
116 /// The generic parameter `T` is used as a generic type to distinguish between owned and borrowed
117 /// tables.
118 ///
119 ///  - [`SGTable<Owned>`]: An owned table created and managed entirely by Rust code. It handles
120 ///    allocation, DMA mapping, and cleanup of all associated resources. See [`SGTable::new`].
121 ///  - [`SGTable<Borrowed>`} (or simply [`SGTable`]): Represents a table whose lifetime is managed
122 ///    externally. It can be used safely via a borrowed reference `&'a SGTable`, where `'a` is the
123 ///    external lifetime.
124 ///
125 /// All [`SGTable`] variants can be iterated over the individual [`SGEntry`]s.
126 #[repr(transparent)]
127 #[pin_data]
128 pub struct SGTable<T: private::Sealed = Borrowed> {
129     #[pin]
130     inner: T,
131 }
132 
133 impl SGTable {
134     /// Creates a borrowed `&'a SGTable` from a raw `struct sg_table` pointer.
135     ///
136     /// This allows safe access to an `sg_table` that is managed elsewhere (for example, in C code).
137     ///
138     /// # Safety
139     ///
140     /// Callers must ensure that:
141     ///
142     /// - the `struct sg_table` pointed to by `ptr` is valid for the entire lifetime of `'a`,
143     /// - the data behind `ptr` is not modified concurrently for the duration of `'a`.
144     #[inline]
145     pub unsafe fn from_raw<'a>(ptr: *mut bindings::sg_table) -> &'a Self {
146         // SAFETY: The safety requirements of this function guarantee that `ptr` is a valid pointer
147         // to a `struct sg_table` for the duration of `'a`.
148         unsafe { &*ptr.cast() }
149     }
150 
151     #[inline]
152     fn as_raw(&self) -> *mut bindings::sg_table {
153         self.inner.0.get()
154     }
155 
156     /// Returns an [`SGTableIter`] bound to the lifetime of `self`.
157     pub fn iter(&self) -> SGTableIter<'_> {
158         // SAFETY: `self.as_raw()` is a valid pointer to a `struct sg_table`.
159         let nents = unsafe { (*self.as_raw()).nents };
160 
161         let pos = if nents > 0 {
162             // SAFETY: `self.as_raw()` is a valid pointer to a `struct sg_table`.
163             let ptr = unsafe { (*self.as_raw()).sgl };
164 
165             // SAFETY: `ptr` is guaranteed to be a valid pointer to a `struct scatterlist`.
166             Some(unsafe { SGEntry::from_raw(ptr) })
167         } else {
168             None
169         };
170 
171         SGTableIter { pos, nents }
172     }
173 }
174 
175 /// Represents the DMA mapping state of a `struct sg_table`.
176 ///
177 /// This is used as an inner type of [`Owned`] to manage the DMA mapping lifecycle.
178 ///
179 /// # Invariants
180 ///
181 /// - `sgt` is a valid pointer to a `struct sg_table` for the entire lifetime of the
182 ///   [`DmaMappedSgt`].
183 /// - `sgt` is always DMA mapped.
184 struct DmaMappedSgt {
185     sgt: NonNull<bindings::sg_table>,
186     dev: ARef<Device>,
187     dir: dma::DataDirection,
188 }
189 
190 // SAFETY: `DmaMappedSgt` can be sent to any task.
191 unsafe impl Send for DmaMappedSgt {}
192 
193 // SAFETY: `DmaMappedSgt` has no interior mutability and can be accessed concurrently.
194 unsafe impl Sync for DmaMappedSgt {}
195 
196 impl DmaMappedSgt {
197     /// # Safety
198     ///
199     /// - `sgt` must be a valid pointer to a `struct sg_table` for the entire lifetime of the
200     ///   returned [`DmaMappedSgt`].
201     /// - The caller must guarantee that `sgt` remains DMA mapped for the entire lifetime of
202     ///   [`DmaMappedSgt`].
203     unsafe fn new(
204         sgt: NonNull<bindings::sg_table>,
205         dev: &Device<Bound>,
206         dir: dma::DataDirection,
207     ) -> Result<Self> {
208         // SAFETY:
209         // - `dev.as_raw()` is a valid pointer to a `struct device`, which is guaranteed to be
210         //   bound to a driver for the duration of this call.
211         // - `sgt` is a valid pointer to a `struct sg_table`.
212         error::to_result(unsafe {
213             bindings::dma_map_sgtable(dev.as_raw(), sgt.as_ptr(), dir.into(), 0)
214         })?;
215 
216         // INVARIANT: By the safety requirements of this function it is guaranteed that `sgt` is
217         // valid for the entire lifetime of this object instance.
218         Ok(Self {
219             sgt,
220             dev: dev.into(),
221             dir,
222         })
223     }
224 }
225 
226 impl Drop for DmaMappedSgt {
227     #[inline]
228     fn drop(&mut self) {
229         // SAFETY:
230         // - `self.dev.as_raw()` is a pointer to a valid `struct device`.
231         // - `self.dev` is the same device the mapping has been created for in `Self::new()`.
232         // - `self.sgt.as_ptr()` is a valid pointer to a `struct sg_table` by the type invariants
233         //   of `Self`.
234         // - `self.dir` is the same `dma::DataDirection` the mapping has been created with in
235         //   `Self::new()`.
236         unsafe {
237             bindings::dma_unmap_sgtable(self.dev.as_raw(), self.sgt.as_ptr(), self.dir.into(), 0)
238         };
239     }
240 }
241 
242 /// A transparent wrapper around a `struct sg_table`.
243 ///
244 /// While we could also create the `struct sg_table` in the constructor of [`Owned`], we can't tear
245 /// down the `struct sg_table` in [`Owned::drop`]; the drop order in [`Owned`] matters.
246 #[repr(transparent)]
247 struct RawSGTable(Opaque<bindings::sg_table>);
248 
249 // SAFETY: `RawSGTable` can be sent to any task.
250 unsafe impl Send for RawSGTable {}
251 
252 // SAFETY: `RawSGTable` has no interior mutability and can be accessed concurrently.
253 unsafe impl Sync for RawSGTable {}
254 
255 impl RawSGTable {
256     /// # Safety
257     ///
258     /// - `pages` must be a slice of valid `struct page *`.
259     /// - The pages pointed to by `pages` must remain valid for the entire lifetime of the returned
260     ///   [`RawSGTable`].
261     unsafe fn new(
262         pages: &mut [*mut bindings::page],
263         size: usize,
264         max_segment: u32,
265         flags: alloc::Flags,
266     ) -> Result<Self> {
267         // `sg_alloc_table_from_pages_segment()` expects at least one page, otherwise it
268         // produces a NPE.
269         if pages.is_empty() {
270             return Err(EINVAL);
271         }
272 
273         let sgt = Opaque::zeroed();
274         // SAFETY:
275         // - `sgt.get()` is a valid pointer to uninitialized memory.
276         // - As by the check above, `pages` is not empty.
277         error::to_result(unsafe {
278             bindings::sg_alloc_table_from_pages_segment(
279                 sgt.get(),
280                 pages.as_mut_ptr(),
281                 pages.len().try_into()?,
282                 0,
283                 size,
284                 max_segment,
285                 flags.as_raw(),
286             )
287         })?;
288 
289         Ok(Self(sgt))
290     }
291 
292     #[inline]
293     fn as_raw(&self) -> *mut bindings::sg_table {
294         self.0.get()
295     }
296 }
297 
298 impl Drop for RawSGTable {
299     #[inline]
300     fn drop(&mut self) {
301         // SAFETY: `sgt` is a valid and initialized `struct sg_table`.
302         unsafe { bindings::sg_free_table(self.0.get()) };
303     }
304 }
305 
306 /// The [`Owned`] generic type of an [`SGTable`].
307 ///
308 /// A [`SGTable<Owned>`] signifies that the [`SGTable`] owns all associated resources:
309 ///
310 /// - The backing memory pages.
311 /// - The `struct sg_table` allocation (`sgt`).
312 /// - The DMA mapping, managed through a [`Devres`]-managed `DmaMappedSgt`.
313 ///
314 /// Users interact with this type through the [`SGTable`] handle and do not need to manage
315 /// [`Owned`] directly.
316 #[pin_data]
317 pub struct Owned<P> {
318     // Note: The drop order is relevant; we first have to unmap the `struct sg_table`, then free the
319     // `struct sg_table` and finally free the backing pages.
320     #[pin]
321     dma: Devres<DmaMappedSgt>,
322     sgt: RawSGTable,
323     _pages: P,
324 }
325 
326 // SAFETY: `Owned` can be sent to any task if `P` can be send to any task.
327 unsafe impl<P: Send> Send for Owned<P> {}
328 
329 // SAFETY: `Owned` has no interior mutability and can be accessed concurrently if `P` can be
330 // accessed concurrently.
331 unsafe impl<P: Sync> Sync for Owned<P> {}
332 
333 impl<P> Owned<P>
334 where
335     for<'a> P: page::AsPageIter<Iter<'a> = VmallocPageIter<'a>> + 'static,
336 {
337     fn new(
338         dev: &Device<Bound>,
339         mut pages: P,
340         dir: dma::DataDirection,
341         flags: alloc::Flags,
342     ) -> Result<impl PinInit<Self, Error> + '_> {
343         let page_iter = pages.page_iter();
344         let size = page_iter.size();
345 
346         let mut page_vec: KVec<*mut bindings::page> =
347             KVec::with_capacity(page_iter.page_count(), flags)?;
348 
349         for page in page_iter {
350             page_vec.push(page.as_ptr(), flags)?;
351         }
352 
353         // `dma_max_mapping_size` returns `size_t`, but `sg_alloc_table_from_pages_segment()` takes
354         // an `unsigned int`.
355         //
356         // SAFETY: `dev.as_raw()` is a valid pointer to a `struct device`.
357         let max_segment = match unsafe { bindings::dma_max_mapping_size(dev.as_raw()) } {
358             0 => u32::MAX,
359             max_segment => u32::try_from(max_segment).unwrap_or(u32::MAX),
360         };
361 
362         Ok(try_pin_init!(&this in Self {
363             // SAFETY:
364             // - `page_vec` is a `KVec` of valid `struct page *` obtained from `pages`.
365             // - The pages contained in `pages` remain valid for the entire lifetime of the
366             //   `RawSGTable`.
367             sgt: unsafe { RawSGTable::new(&mut page_vec, size, max_segment, flags) }?,
368             dma <- {
369                 // SAFETY: `this` is a valid pointer to uninitialized memory.
370                 let sgt = unsafe { &raw mut (*this.as_ptr()).sgt }.cast();
371 
372                 // SAFETY: `sgt` is guaranteed to be non-null.
373                 let sgt = unsafe { NonNull::new_unchecked(sgt) };
374 
375                 // SAFETY:
376                 // - It is guaranteed that the object returned by `DmaMappedSgt::new` won't out-live
377                 //   `sgt`.
378                 // - `sgt` is never DMA unmapped manually.
379                 Devres::new(dev, unsafe { DmaMappedSgt::new(sgt, dev, dir) })
380             },
381             _pages: pages,
382         }))
383     }
384 }
385 
386 impl<P> SGTable<Owned<P>>
387 where
388     for<'a> P: page::AsPageIter<Iter<'a> = VmallocPageIter<'a>> + 'static,
389 {
390     /// Allocates a new scatter-gather table from the given pages and maps it for DMA.
391     ///
392     /// This constructor creates a new [`SGTable<Owned>`] that takes ownership of `P`.
393     /// It allocates a `struct sg_table`, populates it with entries corresponding to the physical
394     /// pages of `P`, and maps the table for DMA with the specified [`Device`] and
395     /// [`dma::DataDirection`].
396     ///
397     /// The DMA mapping is managed through [`Devres`], ensuring that the DMA mapping is unmapped
398     /// once the associated [`Device`] is unbound, or when the [`SGTable<Owned>`] is dropped.
399     ///
400     /// # Parameters
401     ///
402     /// * `dev`: The [`Device`] that will be performing the DMA.
403     /// * `pages`: The entity providing the backing pages. It must implement [`page::AsPageIter`].
404     ///   The ownership of this entity is moved into the new [`SGTable<Owned>`].
405     /// * `dir`: The [`dma::DataDirection`] of the DMA transfer.
406     /// * `flags`: Allocation flags for internal allocations (e.g., [`GFP_KERNEL`]).
407     ///
408     /// # Examples
409     ///
410     /// ```
411     /// use kernel::{
412     ///     device::{Bound, Device},
413     ///     dma, page,
414     ///     prelude::*,
415     ///     scatterlist::{SGTable, Owned},
416     /// };
417     ///
418     /// fn test(dev: &Device<Bound>) -> Result {
419     ///     let size = 4 * page::PAGE_SIZE;
420     ///     let pages = VVec::<u8>::with_capacity(size, GFP_KERNEL)?;
421     ///
422     ///     let sgt = KBox::pin_init(SGTable::new(
423     ///         dev,
424     ///         pages,
425     ///         dma::DataDirection::ToDevice,
426     ///         GFP_KERNEL,
427     ///     ), GFP_KERNEL)?;
428     ///
429     ///     Ok(())
430     /// }
431     /// ```
432     pub fn new(
433         dev: &Device<Bound>,
434         pages: P,
435         dir: dma::DataDirection,
436         flags: alloc::Flags,
437     ) -> impl PinInit<Self, Error> + '_ {
438         try_pin_init!(Self {
439             inner <- Owned::new(dev, pages, dir, flags)?
440         })
441     }
442 }
443 
444 impl<P> Deref for SGTable<Owned<P>> {
445     type Target = SGTable;
446 
447     #[inline]
448     fn deref(&self) -> &Self::Target {
449         // SAFETY:
450         // - `self.inner.sgt.as_raw()` is a valid pointer to a `struct sg_table` for the entire
451         //   lifetime of `self`.
452         // - The backing `struct sg_table` is not modified for the entire lifetime of `self`.
453         unsafe { SGTable::from_raw(self.inner.sgt.as_raw()) }
454     }
455 }
456 
457 mod private {
458     pub trait Sealed {}
459 
460     impl Sealed for super::Borrowed {}
461     impl<P> Sealed for super::Owned<P> {}
462 }
463 
464 /// An [`Iterator`] over the DMA mapped [`SGEntry`] items of an [`SGTable`].
465 ///
466 /// Note that the existence of an [`SGTableIter`] does not guarantee that the [`SGEntry`] items
467 /// actually remain DMA mapped; they are prone to be unmapped on device unbind.
468 pub struct SGTableIter<'a> {
469     pos: Option<&'a SGEntry>,
470     /// The number of DMA mapped entries in a `struct sg_table`.
471     nents: c_uint,
472 }
473 
474 impl<'a> Iterator for SGTableIter<'a> {
475     type Item = &'a SGEntry;
476 
477     fn next(&mut self) -> Option<Self::Item> {
478         let entry = self.pos?;
479         self.nents = self.nents.saturating_sub(1);
480 
481         // SAFETY: `entry.as_raw()` is a valid pointer to a `struct scatterlist`.
482         let next = unsafe { bindings::sg_next(entry.as_raw()) };
483 
484         self.pos = (!next.is_null() && self.nents > 0).then(|| {
485             // SAFETY: If `next` is not NULL, `sg_next()` guarantees to return a valid pointer to
486             // the next `struct scatterlist`.
487             unsafe { SGEntry::from_raw(next) }
488         });
489 
490         Some(entry)
491     }
492 }
493