xref: /linux/rust/kernel/dma.rs (revision 416f99c3b16f582a3fc6d64a1f77f39d94b76de5)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 //! Direct memory access (DMA).
4 //!
5 //! C header: [`include/linux/dma-mapping.h`](srctree/include/linux/dma-mapping.h)
6 
7 use crate::{
8     bindings, build_assert, device,
9     device::{Bound, Core},
10     error::{to_result, Result},
11     prelude::*,
12     sync::aref::ARef,
13     transmute::{AsBytes, FromBytes},
14 };
15 use core::ptr::NonNull;
16 
17 /// DMA address type.
18 ///
19 /// Represents a bus address used for Direct Memory Access (DMA) operations.
20 ///
21 /// This is an alias of the kernel's `dma_addr_t`, which may be `u32` or `u64` depending on
22 /// `CONFIG_ARCH_DMA_ADDR_T_64BIT`.
23 ///
24 /// Note that this may be `u64` even on 32-bit architectures.
25 pub type DmaAddress = bindings::dma_addr_t;
26 
27 /// Trait to be implemented by DMA capable bus devices.
28 ///
29 /// The [`dma::Device`](Device) trait should be implemented by bus specific device representations,
30 /// where the underlying bus is DMA capable, such as [`pci::Device`](::kernel::pci::Device) or
31 /// [`platform::Device`](::kernel::platform::Device).
32 pub trait Device: AsRef<device::Device<Core>> {
33     /// Set up the device's DMA streaming addressing capabilities.
34     ///
35     /// This method is usually called once from `probe()` as soon as the device capabilities are
36     /// known.
37     ///
38     /// # Safety
39     ///
40     /// This method must not be called concurrently with any DMA allocation or mapping primitives,
41     /// such as [`CoherentAllocation::alloc_attrs`].
dma_set_mask(&self, mask: DmaMask) -> Result42     unsafe fn dma_set_mask(&self, mask: DmaMask) -> Result {
43         // SAFETY:
44         // - By the type invariant of `device::Device`, `self.as_ref().as_raw()` is valid.
45         // - The safety requirement of this function guarantees that there are no concurrent calls
46         //   to DMA allocation and mapping primitives using this mask.
47         to_result(unsafe { bindings::dma_set_mask(self.as_ref().as_raw(), mask.value()) })
48     }
49 
50     /// Set up the device's DMA coherent addressing capabilities.
51     ///
52     /// This method is usually called once from `probe()` as soon as the device capabilities are
53     /// known.
54     ///
55     /// # Safety
56     ///
57     /// This method must not be called concurrently with any DMA allocation or mapping primitives,
58     /// such as [`CoherentAllocation::alloc_attrs`].
dma_set_coherent_mask(&self, mask: DmaMask) -> Result59     unsafe fn dma_set_coherent_mask(&self, mask: DmaMask) -> Result {
60         // SAFETY:
61         // - By the type invariant of `device::Device`, `self.as_ref().as_raw()` is valid.
62         // - The safety requirement of this function guarantees that there are no concurrent calls
63         //   to DMA allocation and mapping primitives using this mask.
64         to_result(unsafe { bindings::dma_set_coherent_mask(self.as_ref().as_raw(), mask.value()) })
65     }
66 
67     /// Set up the device's DMA addressing capabilities.
68     ///
69     /// This is a combination of [`Device::dma_set_mask`] and [`Device::dma_set_coherent_mask`].
70     ///
71     /// This method is usually called once from `probe()` as soon as the device capabilities are
72     /// known.
73     ///
74     /// # Safety
75     ///
76     /// This method must not be called concurrently with any DMA allocation or mapping primitives,
77     /// such as [`CoherentAllocation::alloc_attrs`].
dma_set_mask_and_coherent(&self, mask: DmaMask) -> Result78     unsafe fn dma_set_mask_and_coherent(&self, mask: DmaMask) -> Result {
79         // SAFETY:
80         // - By the type invariant of `device::Device`, `self.as_ref().as_raw()` is valid.
81         // - The safety requirement of this function guarantees that there are no concurrent calls
82         //   to DMA allocation and mapping primitives using this mask.
83         to_result(unsafe {
84             bindings::dma_set_mask_and_coherent(self.as_ref().as_raw(), mask.value())
85         })
86     }
87 }
88 
89 /// A DMA mask that holds a bitmask with the lowest `n` bits set.
90 ///
91 /// Use [`DmaMask::new`] or [`DmaMask::try_new`] to construct a value. Values
92 /// are guaranteed to never exceed the bit width of `u64`.
93 ///
94 /// This is the Rust equivalent of the C macro `DMA_BIT_MASK()`.
95 #[derive(Debug, Clone, Copy, PartialEq, Eq)]
96 pub struct DmaMask(u64);
97 
98 impl DmaMask {
99     /// Constructs a `DmaMask` with the lowest `n` bits set to `1`.
100     ///
101     /// For `n <= 64`, sets exactly the lowest `n` bits.
102     /// For `n > 64`, results in a build error.
103     ///
104     /// # Examples
105     ///
106     /// ```
107     /// use kernel::dma::DmaMask;
108     ///
109     /// let mask0 = DmaMask::new::<0>();
110     /// assert_eq!(mask0.value(), 0);
111     ///
112     /// let mask1 = DmaMask::new::<1>();
113     /// assert_eq!(mask1.value(), 0b1);
114     ///
115     /// let mask64 = DmaMask::new::<64>();
116     /// assert_eq!(mask64.value(), u64::MAX);
117     ///
118     /// // Build failure.
119     /// // let mask_overflow = DmaMask::new::<100>();
120     /// ```
121     #[inline]
new<const N: u32>() -> Self122     pub const fn new<const N: u32>() -> Self {
123         let Ok(mask) = Self::try_new(N) else {
124             build_error!("Invalid DMA Mask.");
125         };
126 
127         mask
128     }
129 
130     /// Constructs a `DmaMask` with the lowest `n` bits set to `1`.
131     ///
132     /// For `n <= 64`, sets exactly the lowest `n` bits.
133     /// For `n > 64`, returns [`EINVAL`].
134     ///
135     /// # Examples
136     ///
137     /// ```
138     /// use kernel::dma::DmaMask;
139     ///
140     /// let mask0 = DmaMask::try_new(0)?;
141     /// assert_eq!(mask0.value(), 0);
142     ///
143     /// let mask1 = DmaMask::try_new(1)?;
144     /// assert_eq!(mask1.value(), 0b1);
145     ///
146     /// let mask64 = DmaMask::try_new(64)?;
147     /// assert_eq!(mask64.value(), u64::MAX);
148     ///
149     /// let mask_overflow = DmaMask::try_new(100);
150     /// assert!(mask_overflow.is_err());
151     /// # Ok::<(), Error>(())
152     /// ```
153     #[inline]
try_new(n: u32) -> Result<Self>154     pub const fn try_new(n: u32) -> Result<Self> {
155         Ok(Self(match n {
156             0 => 0,
157             1..=64 => u64::MAX >> (64 - n),
158             _ => return Err(EINVAL),
159         }))
160     }
161 
162     /// Returns the underlying `u64` bitmask value.
163     #[inline]
value(&self) -> u64164     pub const fn value(&self) -> u64 {
165         self.0
166     }
167 }
168 
169 /// Possible attributes associated with a DMA mapping.
170 ///
171 /// They can be combined with the operators `|`, `&`, and `!`.
172 ///
173 /// Values can be used from the [`attrs`] module.
174 ///
175 /// # Examples
176 ///
177 /// ```
178 /// # use kernel::device::{Bound, Device};
179 /// use kernel::dma::{attrs::*, CoherentAllocation};
180 ///
181 /// # fn test(dev: &Device<Bound>) -> Result {
182 /// let attribs = DMA_ATTR_FORCE_CONTIGUOUS | DMA_ATTR_NO_WARN;
183 /// let c: CoherentAllocation<u64> =
184 ///     CoherentAllocation::alloc_attrs(dev, 4, GFP_KERNEL, attribs)?;
185 /// # Ok::<(), Error>(()) }
186 /// ```
187 #[derive(Clone, Copy, PartialEq)]
188 #[repr(transparent)]
189 pub struct Attrs(u32);
190 
191 impl Attrs {
192     /// Get the raw representation of this attribute.
as_raw(self) -> crate::ffi::c_ulong193     pub(crate) fn as_raw(self) -> crate::ffi::c_ulong {
194         self.0 as crate::ffi::c_ulong
195     }
196 
197     /// Check whether `flags` is contained in `self`.
contains(self, flags: Attrs) -> bool198     pub fn contains(self, flags: Attrs) -> bool {
199         (self & flags) == flags
200     }
201 }
202 
203 impl core::ops::BitOr for Attrs {
204     type Output = Self;
bitor(self, rhs: Self) -> Self::Output205     fn bitor(self, rhs: Self) -> Self::Output {
206         Self(self.0 | rhs.0)
207     }
208 }
209 
210 impl core::ops::BitAnd for Attrs {
211     type Output = Self;
bitand(self, rhs: Self) -> Self::Output212     fn bitand(self, rhs: Self) -> Self::Output {
213         Self(self.0 & rhs.0)
214     }
215 }
216 
217 impl core::ops::Not for Attrs {
218     type Output = Self;
not(self) -> Self::Output219     fn not(self) -> Self::Output {
220         Self(!self.0)
221     }
222 }
223 
224 /// DMA mapping attributes.
225 pub mod attrs {
226     use super::Attrs;
227 
228     /// Specifies that reads and writes to the mapping may be weakly ordered, that is that reads
229     /// and writes may pass each other.
230     pub const DMA_ATTR_WEAK_ORDERING: Attrs = Attrs(bindings::DMA_ATTR_WEAK_ORDERING);
231 
232     /// Specifies that writes to the mapping may be buffered to improve performance.
233     pub const DMA_ATTR_WRITE_COMBINE: Attrs = Attrs(bindings::DMA_ATTR_WRITE_COMBINE);
234 
235     /// Lets the platform to avoid creating a kernel virtual mapping for the allocated buffer.
236     pub const DMA_ATTR_NO_KERNEL_MAPPING: Attrs = Attrs(bindings::DMA_ATTR_NO_KERNEL_MAPPING);
237 
238     /// Allows platform code to skip synchronization of the CPU cache for the given buffer assuming
239     /// that it has been already transferred to 'device' domain.
240     pub const DMA_ATTR_SKIP_CPU_SYNC: Attrs = Attrs(bindings::DMA_ATTR_SKIP_CPU_SYNC);
241 
242     /// Forces contiguous allocation of the buffer in physical memory.
243     pub const DMA_ATTR_FORCE_CONTIGUOUS: Attrs = Attrs(bindings::DMA_ATTR_FORCE_CONTIGUOUS);
244 
245     /// Hints DMA-mapping subsystem that it's probably not worth the time to try
246     /// to allocate memory to in a way that gives better TLB efficiency.
247     pub const DMA_ATTR_ALLOC_SINGLE_PAGES: Attrs = Attrs(bindings::DMA_ATTR_ALLOC_SINGLE_PAGES);
248 
249     /// This tells the DMA-mapping subsystem to suppress allocation failure reports (similarly to
250     /// `__GFP_NOWARN`).
251     pub const DMA_ATTR_NO_WARN: Attrs = Attrs(bindings::DMA_ATTR_NO_WARN);
252 
253     /// Indicates that the buffer is fully accessible at an elevated privilege level (and
254     /// ideally inaccessible or at least read-only at lesser-privileged levels).
255     pub const DMA_ATTR_PRIVILEGED: Attrs = Attrs(bindings::DMA_ATTR_PRIVILEGED);
256 
257     /// Indicates that the buffer is MMIO memory.
258     pub const DMA_ATTR_MMIO: Attrs = Attrs(bindings::DMA_ATTR_MMIO);
259 }
260 
261 /// DMA data direction.
262 ///
263 /// Corresponds to the C [`enum dma_data_direction`].
264 ///
265 /// [`enum dma_data_direction`]: srctree/include/linux/dma-direction.h
266 #[derive(Copy, Clone, PartialEq, Eq, Debug)]
267 #[repr(u32)]
268 pub enum DataDirection {
269     /// The DMA mapping is for bidirectional data transfer.
270     ///
271     /// This is used when the buffer can be both read from and written to by the device.
272     /// The cache for the corresponding memory region is both flushed and invalidated.
273     Bidirectional = Self::const_cast(bindings::dma_data_direction_DMA_BIDIRECTIONAL),
274 
275     /// The DMA mapping is for data transfer from memory to the device (write).
276     ///
277     /// The CPU has prepared data in the buffer, and the device will read it.
278     /// The cache for the corresponding memory region is flushed before device access.
279     ToDevice = Self::const_cast(bindings::dma_data_direction_DMA_TO_DEVICE),
280 
281     /// The DMA mapping is for data transfer from the device to memory (read).
282     ///
283     /// The device will write data into the buffer for the CPU to read.
284     /// The cache for the corresponding memory region is invalidated before CPU access.
285     FromDevice = Self::const_cast(bindings::dma_data_direction_DMA_FROM_DEVICE),
286 
287     /// The DMA mapping is not for data transfer.
288     ///
289     /// This is primarily for debugging purposes. With this direction, the DMA mapping API
290     /// will not perform any cache coherency operations.
291     None = Self::const_cast(bindings::dma_data_direction_DMA_NONE),
292 }
293 
294 impl DataDirection {
295     /// Casts the bindgen-generated enum type to a `u32` at compile time.
296     ///
297     /// This function will cause a compile-time error if the underlying value of the
298     /// C enum is out of bounds for `u32`.
const_cast(val: bindings::dma_data_direction) -> u32299     const fn const_cast(val: bindings::dma_data_direction) -> u32 {
300         // CAST: The C standard allows compilers to choose different integer types for enums.
301         // To safely check the value, we cast it to a wide signed integer type (`i128`)
302         // which can hold any standard C integer enum type without truncation.
303         let wide_val = val as i128;
304 
305         // Check if the value is outside the valid range for the target type `u32`.
306         // CAST: `u32::MAX` is cast to `i128` to match the type of `wide_val` for the comparison.
307         if wide_val < 0 || wide_val > u32::MAX as i128 {
308             // Trigger a compile-time error in a const context.
309             build_error!("C enum value is out of bounds for the target type `u32`.");
310         }
311 
312         // CAST: This cast is valid because the check above guarantees that `wide_val`
313         // is within the representable range of `u32`.
314         wide_val as u32
315     }
316 }
317 
318 impl From<DataDirection> for bindings::dma_data_direction {
319     /// Returns the raw representation of [`enum dma_data_direction`].
from(direction: DataDirection) -> Self320     fn from(direction: DataDirection) -> Self {
321         // CAST: `direction as u32` gets the underlying representation of our `#[repr(u32)]` enum.
322         // The subsequent cast to `Self` (the bindgen type) assumes the C enum is compatible
323         // with the enum variants of `DataDirection`, which is a valid assumption given our
324         // compile-time checks.
325         direction as u32 as Self
326     }
327 }
328 
329 /// An abstraction of the `dma_alloc_coherent` API.
330 ///
331 /// This is an abstraction around the `dma_alloc_coherent` API which is used to allocate and map
332 /// large coherent DMA regions.
333 ///
334 /// A [`CoherentAllocation`] instance contains a pointer to the allocated region (in the
335 /// processor's virtual address space) and the device address which can be given to the device
336 /// as the DMA address base of the region. The region is released once [`CoherentAllocation`]
337 /// is dropped.
338 ///
339 /// # Invariants
340 ///
341 /// - For the lifetime of an instance of [`CoherentAllocation`], the `cpu_addr` is a valid pointer
342 ///   to an allocated region of coherent memory and `dma_handle` is the DMA address base of the
343 ///   region.
344 /// - The size in bytes of the allocation is equal to `size_of::<T> * count`.
345 /// - `size_of::<T> * count` fits into a `usize`.
346 // TODO
347 //
348 // DMA allocations potentially carry device resources (e.g.IOMMU mappings), hence for soundness
349 // reasons DMA allocation would need to be embedded in a `Devres` container, in order to ensure
350 // that device resources can never survive device unbind.
351 //
352 // However, it is neither desirable nor necessary to protect the allocated memory of the DMA
353 // allocation from surviving device unbind; it would require RCU read side critical sections to
354 // access the memory, which may require subsequent unnecessary copies.
355 //
356 // Hence, find a way to revoke the device resources of a `CoherentAllocation`, but not the
357 // entire `CoherentAllocation` including the allocated memory itself.
358 pub struct CoherentAllocation<T: AsBytes + FromBytes> {
359     dev: ARef<device::Device>,
360     dma_handle: DmaAddress,
361     count: usize,
362     cpu_addr: NonNull<T>,
363     dma_attrs: Attrs,
364 }
365 
366 impl<T: AsBytes + FromBytes> CoherentAllocation<T> {
367     /// Allocates a region of `size_of::<T> * count` of coherent memory.
368     ///
369     /// # Examples
370     ///
371     /// ```
372     /// # use kernel::device::{Bound, Device};
373     /// use kernel::dma::{attrs::*, CoherentAllocation};
374     ///
375     /// # fn test(dev: &Device<Bound>) -> Result {
376     /// let c: CoherentAllocation<u64> =
377     ///     CoherentAllocation::alloc_attrs(dev, 4, GFP_KERNEL, DMA_ATTR_NO_WARN)?;
378     /// # Ok::<(), Error>(()) }
379     /// ```
alloc_attrs( dev: &device::Device<Bound>, count: usize, gfp_flags: kernel::alloc::Flags, dma_attrs: Attrs, ) -> Result<CoherentAllocation<T>>380     pub fn alloc_attrs(
381         dev: &device::Device<Bound>,
382         count: usize,
383         gfp_flags: kernel::alloc::Flags,
384         dma_attrs: Attrs,
385     ) -> Result<CoherentAllocation<T>> {
386         build_assert!(
387             core::mem::size_of::<T>() > 0,
388             "It doesn't make sense for the allocated type to be a ZST"
389         );
390 
391         let size = count
392             .checked_mul(core::mem::size_of::<T>())
393             .ok_or(EOVERFLOW)?;
394         let mut dma_handle = 0;
395         // SAFETY: Device pointer is guaranteed as valid by the type invariant on `Device`.
396         let addr = unsafe {
397             bindings::dma_alloc_attrs(
398                 dev.as_raw(),
399                 size,
400                 &mut dma_handle,
401                 gfp_flags.as_raw(),
402                 dma_attrs.as_raw(),
403             )
404         };
405         let addr = NonNull::new(addr).ok_or(ENOMEM)?;
406         // INVARIANT:
407         // - We just successfully allocated a coherent region which is accessible for
408         //   `count` elements, hence the cpu address is valid. We also hold a refcounted reference
409         //   to the device.
410         // - The allocated `size` is equal to `size_of::<T> * count`.
411         // - The allocated `size` fits into a `usize`.
412         Ok(Self {
413             dev: dev.into(),
414             dma_handle,
415             count,
416             cpu_addr: addr.cast(),
417             dma_attrs,
418         })
419     }
420 
421     /// Performs the same functionality as [`CoherentAllocation::alloc_attrs`], except the
422     /// `dma_attrs` is 0 by default.
alloc_coherent( dev: &device::Device<Bound>, count: usize, gfp_flags: kernel::alloc::Flags, ) -> Result<CoherentAllocation<T>>423     pub fn alloc_coherent(
424         dev: &device::Device<Bound>,
425         count: usize,
426         gfp_flags: kernel::alloc::Flags,
427     ) -> Result<CoherentAllocation<T>> {
428         CoherentAllocation::alloc_attrs(dev, count, gfp_flags, Attrs(0))
429     }
430 
431     /// Returns the number of elements `T` in this allocation.
432     ///
433     /// Note that this is not the size of the allocation in bytes, which is provided by
434     /// [`Self::size`].
count(&self) -> usize435     pub fn count(&self) -> usize {
436         self.count
437     }
438 
439     /// Returns the size in bytes of this allocation.
size(&self) -> usize440     pub fn size(&self) -> usize {
441         // INVARIANT: The type invariant of `Self` guarantees that `size_of::<T> * count` fits into
442         // a `usize`.
443         self.count * core::mem::size_of::<T>()
444     }
445 
446     /// Returns the base address to the allocated region in the CPU's virtual address space.
start_ptr(&self) -> *const T447     pub fn start_ptr(&self) -> *const T {
448         self.cpu_addr.as_ptr()
449     }
450 
451     /// Returns the base address to the allocated region in the CPU's virtual address space as
452     /// a mutable pointer.
start_ptr_mut(&mut self) -> *mut T453     pub fn start_ptr_mut(&mut self) -> *mut T {
454         self.cpu_addr.as_ptr()
455     }
456 
457     /// Returns a DMA handle which may be given to the device as the DMA address base of
458     /// the region.
dma_handle(&self) -> DmaAddress459     pub fn dma_handle(&self) -> DmaAddress {
460         self.dma_handle
461     }
462 
463     /// Returns a DMA handle starting at `offset` (in units of `T`) which may be given to the
464     /// device as the DMA address base of the region.
465     ///
466     /// Returns `EINVAL` if `offset` is not within the bounds of the allocation.
dma_handle_with_offset(&self, offset: usize) -> Result<DmaAddress>467     pub fn dma_handle_with_offset(&self, offset: usize) -> Result<DmaAddress> {
468         if offset >= self.count {
469             Err(EINVAL)
470         } else {
471             // INVARIANT: The type invariant of `Self` guarantees that `size_of::<T> * count` fits
472             // into a `usize`, and `offset` is inferior to `count`.
473             Ok(self.dma_handle + (offset * core::mem::size_of::<T>()) as DmaAddress)
474         }
475     }
476 
477     /// Common helper to validate a range applied from the allocated region in the CPU's virtual
478     /// address space.
validate_range(&self, offset: usize, count: usize) -> Result479     fn validate_range(&self, offset: usize, count: usize) -> Result {
480         if offset.checked_add(count).ok_or(EOVERFLOW)? > self.count {
481             return Err(EINVAL);
482         }
483         Ok(())
484     }
485 
486     /// Returns the data from the region starting from `offset` as a slice.
487     /// `offset` and `count` are in units of `T`, not the number of bytes.
488     ///
489     /// For ringbuffer type of r/w access or use-cases where the pointer to the live data is needed,
490     /// [`CoherentAllocation::start_ptr`] or [`CoherentAllocation::start_ptr_mut`] could be used
491     /// instead.
492     ///
493     /// # Safety
494     ///
495     /// * Callers must ensure that the device does not read/write to/from memory while the returned
496     ///   slice is live.
497     /// * Callers must ensure that this call does not race with a write to the same region while
498     ///   the returned slice is live.
as_slice(&self, offset: usize, count: usize) -> Result<&[T]>499     pub unsafe fn as_slice(&self, offset: usize, count: usize) -> Result<&[T]> {
500         self.validate_range(offset, count)?;
501         // SAFETY:
502         // - The pointer is valid due to type invariant on `CoherentAllocation`,
503         //   we've just checked that the range and index is within bounds. The immutability of the
504         //   data is also guaranteed by the safety requirements of the function.
505         // - `offset + count` can't overflow since it is smaller than `self.count` and we've checked
506         //   that `self.count` won't overflow early in the constructor.
507         Ok(unsafe { core::slice::from_raw_parts(self.start_ptr().add(offset), count) })
508     }
509 
510     /// Performs the same functionality as [`CoherentAllocation::as_slice`], except that a mutable
511     /// slice is returned.
512     ///
513     /// # Safety
514     ///
515     /// * Callers must ensure that the device does not read/write to/from memory while the returned
516     ///   slice is live.
517     /// * Callers must ensure that this call does not race with a read or write to the same region
518     ///   while the returned slice is live.
as_slice_mut(&mut self, offset: usize, count: usize) -> Result<&mut [T]>519     pub unsafe fn as_slice_mut(&mut self, offset: usize, count: usize) -> Result<&mut [T]> {
520         self.validate_range(offset, count)?;
521         // SAFETY:
522         // - The pointer is valid due to type invariant on `CoherentAllocation`,
523         //   we've just checked that the range and index is within bounds. The immutability of the
524         //   data is also guaranteed by the safety requirements of the function.
525         // - `offset + count` can't overflow since it is smaller than `self.count` and we've checked
526         //   that `self.count` won't overflow early in the constructor.
527         Ok(unsafe { core::slice::from_raw_parts_mut(self.start_ptr_mut().add(offset), count) })
528     }
529 
530     /// Writes data to the region starting from `offset`. `offset` is in units of `T`, not the
531     /// number of bytes.
532     ///
533     /// # Safety
534     ///
535     /// * Callers must ensure that the device does not read/write to/from memory while the returned
536     ///   slice is live.
537     /// * Callers must ensure that this call does not race with a read or write to the same region
538     ///   that overlaps with this write.
539     ///
540     /// # Examples
541     ///
542     /// ```
543     /// # fn test(alloc: &mut kernel::dma::CoherentAllocation<u8>) -> Result {
544     /// let somedata: [u8; 4] = [0xf; 4];
545     /// let buf: &[u8] = &somedata;
546     /// // SAFETY: There is no concurrent HW operation on the device and no other R/W access to the
547     /// // region.
548     /// unsafe { alloc.write(buf, 0)?; }
549     /// # Ok::<(), Error>(()) }
550     /// ```
write(&mut self, src: &[T], offset: usize) -> Result551     pub unsafe fn write(&mut self, src: &[T], offset: usize) -> Result {
552         self.validate_range(offset, src.len())?;
553         // SAFETY:
554         // - The pointer is valid due to type invariant on `CoherentAllocation`
555         //   and we've just checked that the range and index is within bounds.
556         // - `offset + count` can't overflow since it is smaller than `self.count` and we've checked
557         //   that `self.count` won't overflow early in the constructor.
558         unsafe {
559             core::ptr::copy_nonoverlapping(
560                 src.as_ptr(),
561                 self.start_ptr_mut().add(offset),
562                 src.len(),
563             )
564         };
565         Ok(())
566     }
567 
568     /// Returns a pointer to an element from the region with bounds checking. `offset` is in
569     /// units of `T`, not the number of bytes.
570     ///
571     /// Public but hidden since it should only be used from [`dma_read`] and [`dma_write`] macros.
572     #[doc(hidden)]
item_from_index(&self, offset: usize) -> Result<*mut T>573     pub fn item_from_index(&self, offset: usize) -> Result<*mut T> {
574         if offset >= self.count {
575             return Err(EINVAL);
576         }
577         // SAFETY:
578         // - The pointer is valid due to type invariant on `CoherentAllocation`
579         // and we've just checked that the range and index is within bounds.
580         // - `offset` can't overflow since it is smaller than `self.count` and we've checked
581         // that `self.count` won't overflow early in the constructor.
582         Ok(unsafe { self.cpu_addr.as_ptr().add(offset) })
583     }
584 
585     /// Reads the value of `field` and ensures that its type is [`FromBytes`].
586     ///
587     /// # Safety
588     ///
589     /// This must be called from the [`dma_read`] macro which ensures that the `field` pointer is
590     /// validated beforehand.
591     ///
592     /// Public but hidden since it should only be used from [`dma_read`] macro.
593     #[doc(hidden)]
field_read<F: FromBytes>(&self, field: *const F) -> F594     pub unsafe fn field_read<F: FromBytes>(&self, field: *const F) -> F {
595         // SAFETY:
596         // - By the safety requirements field is valid.
597         // - Using read_volatile() here is not sound as per the usual rules, the usage here is
598         // a special exception with the following notes in place. When dealing with a potential
599         // race from a hardware or code outside kernel (e.g. user-space program), we need that
600         // read on a valid memory is not UB. Currently read_volatile() is used for this, and the
601         // rationale behind is that it should generate the same code as READ_ONCE() which the
602         // kernel already relies on to avoid UB on data races. Note that the usage of
603         // read_volatile() is limited to this particular case, it cannot be used to prevent
604         // the UB caused by racing between two kernel functions nor do they provide atomicity.
605         unsafe { field.read_volatile() }
606     }
607 
608     /// Writes a value to `field` and ensures that its type is [`AsBytes`].
609     ///
610     /// # Safety
611     ///
612     /// This must be called from the [`dma_write`] macro which ensures that the `field` pointer is
613     /// validated beforehand.
614     ///
615     /// Public but hidden since it should only be used from [`dma_write`] macro.
616     #[doc(hidden)]
field_write<F: AsBytes>(&self, field: *mut F, val: F)617     pub unsafe fn field_write<F: AsBytes>(&self, field: *mut F, val: F) {
618         // SAFETY:
619         // - By the safety requirements field is valid.
620         // - Using write_volatile() here is not sound as per the usual rules, the usage here is
621         // a special exception with the following notes in place. When dealing with a potential
622         // race from a hardware or code outside kernel (e.g. user-space program), we need that
623         // write on a valid memory is not UB. Currently write_volatile() is used for this, and the
624         // rationale behind is that it should generate the same code as WRITE_ONCE() which the
625         // kernel already relies on to avoid UB on data races. Note that the usage of
626         // write_volatile() is limited to this particular case, it cannot be used to prevent
627         // the UB caused by racing between two kernel functions nor do they provide atomicity.
628         unsafe { field.write_volatile(val) }
629     }
630 }
631 
632 /// Note that the device configured to do DMA must be halted before this object is dropped.
633 impl<T: AsBytes + FromBytes> Drop for CoherentAllocation<T> {
drop(&mut self)634     fn drop(&mut self) {
635         let size = self.count * core::mem::size_of::<T>();
636         // SAFETY: Device pointer is guaranteed as valid by the type invariant on `Device`.
637         // The cpu address, and the dma handle are valid due to the type invariants on
638         // `CoherentAllocation`.
639         unsafe {
640             bindings::dma_free_attrs(
641                 self.dev.as_raw(),
642                 size,
643                 self.start_ptr_mut().cast(),
644                 self.dma_handle,
645                 self.dma_attrs.as_raw(),
646             )
647         }
648     }
649 }
650 
651 // SAFETY: It is safe to send a `CoherentAllocation` to another thread if `T`
652 // can be sent to another thread.
653 unsafe impl<T: AsBytes + FromBytes + Send> Send for CoherentAllocation<T> {}
654 
655 /// Reads a field of an item from an allocated region of structs.
656 ///
657 /// # Examples
658 ///
659 /// ```
660 /// use kernel::device::Device;
661 /// use kernel::dma::{attrs::*, CoherentAllocation};
662 ///
663 /// struct MyStruct { field: u32, }
664 ///
665 /// // SAFETY: All bit patterns are acceptable values for `MyStruct`.
666 /// unsafe impl kernel::transmute::FromBytes for MyStruct{};
667 /// // SAFETY: Instances of `MyStruct` have no uninitialized portions.
668 /// unsafe impl kernel::transmute::AsBytes for MyStruct{};
669 ///
670 /// # fn test(alloc: &kernel::dma::CoherentAllocation<MyStruct>) -> Result {
671 /// let whole = kernel::dma_read!(alloc[2]);
672 /// let field = kernel::dma_read!(alloc[1].field);
673 /// # Ok::<(), Error>(()) }
674 /// ```
675 #[macro_export]
676 macro_rules! dma_read {
677     ($dma:expr, $idx: expr, $($field:tt)*) => {{
678         (|| -> ::core::result::Result<_, $crate::error::Error> {
679             let item = $crate::dma::CoherentAllocation::item_from_index(&$dma, $idx)?;
680             // SAFETY: `item_from_index` ensures that `item` is always a valid pointer and can be
681             // dereferenced. The compiler also further validates the expression on whether `field`
682             // is a member of `item` when expanded by the macro.
683             unsafe {
684                 let ptr_field = ::core::ptr::addr_of!((*item) $($field)*);
685                 ::core::result::Result::Ok(
686                     $crate::dma::CoherentAllocation::field_read(&$dma, ptr_field)
687                 )
688             }
689         })()
690     }};
691     ($dma:ident [ $idx:expr ] $($field:tt)* ) => {
692         $crate::dma_read!($dma, $idx, $($field)*)
693     };
694     ($($dma:ident).* [ $idx:expr ] $($field:tt)* ) => {
695         $crate::dma_read!($($dma).*, $idx, $($field)*)
696     };
697 }
698 
699 /// Writes to a field of an item from an allocated region of structs.
700 ///
701 /// # Examples
702 ///
703 /// ```
704 /// use kernel::device::Device;
705 /// use kernel::dma::{attrs::*, CoherentAllocation};
706 ///
707 /// struct MyStruct { member: u32, }
708 ///
709 /// // SAFETY: All bit patterns are acceptable values for `MyStruct`.
710 /// unsafe impl kernel::transmute::FromBytes for MyStruct{};
711 /// // SAFETY: Instances of `MyStruct` have no uninitialized portions.
712 /// unsafe impl kernel::transmute::AsBytes for MyStruct{};
713 ///
714 /// # fn test(alloc: &kernel::dma::CoherentAllocation<MyStruct>) -> Result {
715 /// kernel::dma_write!(alloc[2].member = 0xf);
716 /// kernel::dma_write!(alloc[1] = MyStruct { member: 0xf });
717 /// # Ok::<(), Error>(()) }
718 /// ```
719 #[macro_export]
720 macro_rules! dma_write {
721     ($dma:ident [ $idx:expr ] $($field:tt)*) => {{
722         $crate::dma_write!($dma, $idx, $($field)*)
723     }};
724     ($($dma:ident).* [ $idx:expr ] $($field:tt)* ) => {{
725         $crate::dma_write!($($dma).*, $idx, $($field)*)
726     }};
727     ($dma:expr, $idx: expr, = $val:expr) => {
728         (|| -> ::core::result::Result<_, $crate::error::Error> {
729             let item = $crate::dma::CoherentAllocation::item_from_index(&$dma, $idx)?;
730             // SAFETY: `item_from_index` ensures that `item` is always a valid item.
731             unsafe { $crate::dma::CoherentAllocation::field_write(&$dma, item, $val) }
732             ::core::result::Result::Ok(())
733         })()
734     };
735     ($dma:expr, $idx: expr, $(.$field:ident)* = $val:expr) => {
736         (|| -> ::core::result::Result<_, $crate::error::Error> {
737             let item = $crate::dma::CoherentAllocation::item_from_index(&$dma, $idx)?;
738             // SAFETY: `item_from_index` ensures that `item` is always a valid pointer and can be
739             // dereferenced. The compiler also further validates the expression on whether `field`
740             // is a member of `item` when expanded by the macro.
741             unsafe {
742                 let ptr_field = ::core::ptr::addr_of_mut!((*item) $(.$field)*);
743                 $crate::dma::CoherentAllocation::field_write(&$dma, ptr_field, $val)
744             }
745             ::core::result::Result::Ok(())
746         })()
747     };
748 }
749