xref: /linux/rust/kernel/io.rs (revision 493fc33ec25294cb2e444dfa77c105aa774c83f2)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 //! Memory-mapped IO.
4 //!
5 //! C header: [`include/asm-generic/io.h`](srctree/include/asm-generic/io.h)
6 
7 use crate::error::{code::EINVAL, Result};
8 use crate::{bindings, build_assert};
9 
10 pub mod resource;
11 
12 pub use resource::Resource;
13 
14 /// Raw representation of an MMIO region.
15 ///
16 /// By itself, the existence of an instance of this structure does not provide any guarantees that
17 /// the represented MMIO region does exist or is properly mapped.
18 ///
19 /// Instead, the bus specific MMIO implementation must convert this raw representation into an `Io`
20 /// instance providing the actual memory accessors. Only by the conversion into an `Io` structure
21 /// any guarantees are given.
22 pub struct IoRaw<const SIZE: usize = 0> {
23     addr: usize,
24     maxsize: usize,
25 }
26 
27 impl<const SIZE: usize> IoRaw<SIZE> {
28     /// Returns a new `IoRaw` instance on success, an error otherwise.
29     pub fn new(addr: usize, maxsize: usize) -> Result<Self> {
30         if maxsize < SIZE {
31             return Err(EINVAL);
32         }
33 
34         Ok(Self { addr, maxsize })
35     }
36 
37     /// Returns the base address of the MMIO region.
38     #[inline]
39     pub fn addr(&self) -> usize {
40         self.addr
41     }
42 
43     /// Returns the maximum size of the MMIO region.
44     #[inline]
45     pub fn maxsize(&self) -> usize {
46         self.maxsize
47     }
48 }
49 
50 /// IO-mapped memory, starting at the base address @addr and spanning @maxlen bytes.
51 ///
52 /// The creator (usually a subsystem / bus such as PCI) is responsible for creating the
53 /// mapping, performing an additional region request etc.
54 ///
55 /// # Invariant
56 ///
57 /// `addr` is the start and `maxsize` the length of valid I/O mapped memory region of size
58 /// `maxsize`.
59 ///
60 /// # Examples
61 ///
62 /// ```no_run
63 /// # use kernel::{bindings, io::{Io, IoRaw}};
64 /// # use core::ops::Deref;
65 ///
66 /// // See also [`pci::Bar`] for a real example.
67 /// struct IoMem<const SIZE: usize>(IoRaw<SIZE>);
68 ///
69 /// impl<const SIZE: usize> IoMem<SIZE> {
70 ///     /// # Safety
71 ///     ///
72 ///     /// [`paddr`, `paddr` + `SIZE`) must be a valid MMIO region that is mappable into the CPUs
73 ///     /// virtual address space.
74 ///     unsafe fn new(paddr: usize) -> Result<Self>{
75 ///         // SAFETY: By the safety requirements of this function [`paddr`, `paddr` + `SIZE`) is
76 ///         // valid for `ioremap`.
77 ///         let addr = unsafe { bindings::ioremap(paddr as _, SIZE as _) };
78 ///         if addr.is_null() {
79 ///             return Err(ENOMEM);
80 ///         }
81 ///
82 ///         Ok(IoMem(IoRaw::new(addr as _, SIZE)?))
83 ///     }
84 /// }
85 ///
86 /// impl<const SIZE: usize> Drop for IoMem<SIZE> {
87 ///     fn drop(&mut self) {
88 ///         // SAFETY: `self.0.addr()` is guaranteed to be properly mapped by `Self::new`.
89 ///         unsafe { bindings::iounmap(self.0.addr() as _); };
90 ///     }
91 /// }
92 ///
93 /// impl<const SIZE: usize> Deref for IoMem<SIZE> {
94 ///    type Target = Io<SIZE>;
95 ///
96 ///    fn deref(&self) -> &Self::Target {
97 ///         // SAFETY: The memory range stored in `self` has been properly mapped in `Self::new`.
98 ///         unsafe { Io::from_raw(&self.0) }
99 ///    }
100 /// }
101 ///
102 ///# fn no_run() -> Result<(), Error> {
103 /// // SAFETY: Invalid usage for example purposes.
104 /// let iomem = unsafe { IoMem::<{ core::mem::size_of::<u32>() }>::new(0xBAAAAAAD)? };
105 /// iomem.write32(0x42, 0x0);
106 /// assert!(iomem.try_write32(0x42, 0x0).is_ok());
107 /// assert!(iomem.try_write32(0x42, 0x4).is_err());
108 /// # Ok(())
109 /// # }
110 /// ```
111 #[repr(transparent)]
112 pub struct Io<const SIZE: usize = 0>(IoRaw<SIZE>);
113 
114 macro_rules! define_read {
115     ($(#[$attr:meta])* $name:ident, $try_name:ident, $c_fn:ident -> $type_name:ty) => {
116         /// Read IO data from a given offset known at compile time.
117         ///
118         /// Bound checks are performed on compile time, hence if the offset is not known at compile
119         /// time, the build will fail.
120         $(#[$attr])*
121         #[inline]
122         pub fn $name(&self, offset: usize) -> $type_name {
123             let addr = self.io_addr_assert::<$type_name>(offset);
124 
125             // SAFETY: By the type invariant `addr` is a valid address for MMIO operations.
126             unsafe { bindings::$c_fn(addr as _) }
127         }
128 
129         /// Read IO data from a given offset.
130         ///
131         /// Bound checks are performed on runtime, it fails if the offset (plus the type size) is
132         /// out of bounds.
133         $(#[$attr])*
134         pub fn $try_name(&self, offset: usize) -> Result<$type_name> {
135             let addr = self.io_addr::<$type_name>(offset)?;
136 
137             // SAFETY: By the type invariant `addr` is a valid address for MMIO operations.
138             Ok(unsafe { bindings::$c_fn(addr as _) })
139         }
140     };
141 }
142 
143 macro_rules! define_write {
144     ($(#[$attr:meta])* $name:ident, $try_name:ident, $c_fn:ident <- $type_name:ty) => {
145         /// Write IO data from a given offset known at compile time.
146         ///
147         /// Bound checks are performed on compile time, hence if the offset is not known at compile
148         /// time, the build will fail.
149         $(#[$attr])*
150         #[inline]
151         pub fn $name(&self, value: $type_name, offset: usize) {
152             let addr = self.io_addr_assert::<$type_name>(offset);
153 
154             // SAFETY: By the type invariant `addr` is a valid address for MMIO operations.
155             unsafe { bindings::$c_fn(value, addr as _, ) }
156         }
157 
158         /// Write IO data from a given offset.
159         ///
160         /// Bound checks are performed on runtime, it fails if the offset (plus the type size) is
161         /// out of bounds.
162         $(#[$attr])*
163         pub fn $try_name(&self, value: $type_name, offset: usize) -> Result {
164             let addr = self.io_addr::<$type_name>(offset)?;
165 
166             // SAFETY: By the type invariant `addr` is a valid address for MMIO operations.
167             unsafe { bindings::$c_fn(value, addr as _) }
168             Ok(())
169         }
170     };
171 }
172 
173 impl<const SIZE: usize> Io<SIZE> {
174     /// Converts an `IoRaw` into an `Io` instance, providing the accessors to the MMIO mapping.
175     ///
176     /// # Safety
177     ///
178     /// Callers must ensure that `addr` is the start of a valid I/O mapped memory region of size
179     /// `maxsize`.
180     pub unsafe fn from_raw(raw: &IoRaw<SIZE>) -> &Self {
181         // SAFETY: `Io` is a transparent wrapper around `IoRaw`.
182         unsafe { &*core::ptr::from_ref(raw).cast() }
183     }
184 
185     /// Returns the base address of this mapping.
186     #[inline]
187     pub fn addr(&self) -> usize {
188         self.0.addr()
189     }
190 
191     /// Returns the maximum size of this mapping.
192     #[inline]
193     pub fn maxsize(&self) -> usize {
194         self.0.maxsize()
195     }
196 
197     #[inline]
198     const fn offset_valid<U>(offset: usize, size: usize) -> bool {
199         let type_size = core::mem::size_of::<U>();
200         if let Some(end) = offset.checked_add(type_size) {
201             end <= size && offset % type_size == 0
202         } else {
203             false
204         }
205     }
206 
207     #[inline]
208     fn io_addr<U>(&self, offset: usize) -> Result<usize> {
209         if !Self::offset_valid::<U>(offset, self.maxsize()) {
210             return Err(EINVAL);
211         }
212 
213         // Probably no need to check, since the safety requirements of `Self::new` guarantee that
214         // this can't overflow.
215         self.addr().checked_add(offset).ok_or(EINVAL)
216     }
217 
218     #[inline]
219     fn io_addr_assert<U>(&self, offset: usize) -> usize {
220         build_assert!(Self::offset_valid::<U>(offset, SIZE));
221 
222         self.addr() + offset
223     }
224 
225     define_read!(read8, try_read8, readb -> u8);
226     define_read!(read16, try_read16, readw -> u16);
227     define_read!(read32, try_read32, readl -> u32);
228     define_read!(
229         #[cfg(CONFIG_64BIT)]
230         read64,
231         try_read64,
232         readq -> u64
233     );
234 
235     define_read!(read8_relaxed, try_read8_relaxed, readb_relaxed -> u8);
236     define_read!(read16_relaxed, try_read16_relaxed, readw_relaxed -> u16);
237     define_read!(read32_relaxed, try_read32_relaxed, readl_relaxed -> u32);
238     define_read!(
239         #[cfg(CONFIG_64BIT)]
240         read64_relaxed,
241         try_read64_relaxed,
242         readq_relaxed -> u64
243     );
244 
245     define_write!(write8, try_write8, writeb <- u8);
246     define_write!(write16, try_write16, writew <- u16);
247     define_write!(write32, try_write32, writel <- u32);
248     define_write!(
249         #[cfg(CONFIG_64BIT)]
250         write64,
251         try_write64,
252         writeq <- u64
253     );
254 
255     define_write!(write8_relaxed, try_write8_relaxed, writeb_relaxed <- u8);
256     define_write!(write16_relaxed, try_write16_relaxed, writew_relaxed <- u16);
257     define_write!(write32_relaxed, try_write32_relaxed, writel_relaxed <- u32);
258     define_write!(
259         #[cfg(CONFIG_64BIT)]
260         write64_relaxed,
261         try_write64_relaxed,
262         writeq_relaxed <- u64
263     );
264 }
265