xref: /linux/rust/kernel/io.rs (revision d8407396f128d8bf4d06282b636df3f26db208c1)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 //! Memory-mapped IO.
4 //!
5 //! C header: [`include/asm-generic/io.h`](srctree/include/asm-generic/io.h)
6 
7 use crate::{
8     bindings,
9     prelude::*, //
10 };
11 
12 pub mod mem;
13 pub mod poll;
14 pub mod resource;
15 
16 pub use resource::Resource;
17 
18 /// Raw representation of an MMIO region.
19 ///
20 /// By itself, the existence of an instance of this structure does not provide any guarantees that
21 /// the represented MMIO region does exist or is properly mapped.
22 ///
23 /// Instead, the bus specific MMIO implementation must convert this raw representation into an `Io`
24 /// instance providing the actual memory accessors. Only by the conversion into an `Io` structure
25 /// any guarantees are given.
26 pub struct IoRaw<const SIZE: usize = 0> {
27     addr: usize,
28     maxsize: usize,
29 }
30 
31 impl<const SIZE: usize> IoRaw<SIZE> {
32     /// Returns a new `IoRaw` instance on success, an error otherwise.
33     pub fn new(addr: usize, maxsize: usize) -> Result<Self> {
34         if maxsize < SIZE {
35             return Err(EINVAL);
36         }
37 
38         Ok(Self { addr, maxsize })
39     }
40 
41     /// Returns the base address of the MMIO region.
42     #[inline]
43     pub fn addr(&self) -> usize {
44         self.addr
45     }
46 
47     /// Returns the maximum size of the MMIO region.
48     #[inline]
49     pub fn maxsize(&self) -> usize {
50         self.maxsize
51     }
52 }
53 
54 /// IO-mapped memory region.
55 ///
56 /// The creator (usually a subsystem / bus such as PCI) is responsible for creating the
57 /// mapping, performing an additional region request etc.
58 ///
59 /// # Invariant
60 ///
61 /// `addr` is the start and `maxsize` the length of valid I/O mapped memory region of size
62 /// `maxsize`.
63 ///
64 /// # Examples
65 ///
66 /// ```no_run
67 /// # use kernel::{bindings, ffi::c_void, io::{Io, IoRaw}};
68 /// # use core::ops::Deref;
69 ///
70 /// // See also [`pci::Bar`] for a real example.
71 /// struct IoMem<const SIZE: usize>(IoRaw<SIZE>);
72 ///
73 /// impl<const SIZE: usize> IoMem<SIZE> {
74 ///     /// # Safety
75 ///     ///
76 ///     /// [`paddr`, `paddr` + `SIZE`) must be a valid MMIO region that is mappable into the CPUs
77 ///     /// virtual address space.
78 ///     unsafe fn new(paddr: usize) -> Result<Self>{
79 ///         // SAFETY: By the safety requirements of this function [`paddr`, `paddr` + `SIZE`) is
80 ///         // valid for `ioremap`.
81 ///         let addr = unsafe { bindings::ioremap(paddr as bindings::phys_addr_t, SIZE) };
82 ///         if addr.is_null() {
83 ///             return Err(ENOMEM);
84 ///         }
85 ///
86 ///         Ok(IoMem(IoRaw::new(addr as usize, SIZE)?))
87 ///     }
88 /// }
89 ///
90 /// impl<const SIZE: usize> Drop for IoMem<SIZE> {
91 ///     fn drop(&mut self) {
92 ///         // SAFETY: `self.0.addr()` is guaranteed to be properly mapped by `Self::new`.
93 ///         unsafe { bindings::iounmap(self.0.addr() as *mut c_void); };
94 ///     }
95 /// }
96 ///
97 /// impl<const SIZE: usize> Deref for IoMem<SIZE> {
98 ///    type Target = Io<SIZE>;
99 ///
100 ///    fn deref(&self) -> &Self::Target {
101 ///         // SAFETY: The memory range stored in `self` has been properly mapped in `Self::new`.
102 ///         unsafe { Io::from_raw(&self.0) }
103 ///    }
104 /// }
105 ///
106 ///# fn no_run() -> Result<(), Error> {
107 /// // SAFETY: Invalid usage for example purposes.
108 /// let iomem = unsafe { IoMem::<{ core::mem::size_of::<u32>() }>::new(0xBAAAAAAD)? };
109 /// iomem.write32(0x42, 0x0);
110 /// assert!(iomem.try_write32(0x42, 0x0).is_ok());
111 /// assert!(iomem.try_write32(0x42, 0x4).is_err());
112 /// # Ok(())
113 /// # }
114 /// ```
115 #[repr(transparent)]
116 pub struct Io<const SIZE: usize = 0>(IoRaw<SIZE>);
117 
118 macro_rules! define_read {
119     ($(#[$attr:meta])* $name:ident, $try_name:ident, $c_fn:ident -> $type_name:ty) => {
120         /// Read IO data from a given offset known at compile time.
121         ///
122         /// Bound checks are performed on compile time, hence if the offset is not known at compile
123         /// time, the build will fail.
124         $(#[$attr])*
125         #[inline]
126         pub fn $name(&self, offset: usize) -> $type_name {
127             let addr = self.io_addr_assert::<$type_name>(offset);
128 
129             // SAFETY: By the type invariant `addr` is a valid address for MMIO operations.
130             unsafe { bindings::$c_fn(addr as *const c_void) }
131         }
132 
133         /// Read IO data from a given offset.
134         ///
135         /// Bound checks are performed on runtime, it fails if the offset (plus the type size) is
136         /// out of bounds.
137         $(#[$attr])*
138         pub fn $try_name(&self, offset: usize) -> Result<$type_name> {
139             let addr = self.io_addr::<$type_name>(offset)?;
140 
141             // SAFETY: By the type invariant `addr` is a valid address for MMIO operations.
142             Ok(unsafe { bindings::$c_fn(addr as *const c_void) })
143         }
144     };
145 }
146 
147 macro_rules! define_write {
148     ($(#[$attr:meta])* $name:ident, $try_name:ident, $c_fn:ident <- $type_name:ty) => {
149         /// Write IO data from a given offset known at compile time.
150         ///
151         /// Bound checks are performed on compile time, hence if the offset is not known at compile
152         /// time, the build will fail.
153         $(#[$attr])*
154         #[inline]
155         pub fn $name(&self, value: $type_name, offset: usize) {
156             let addr = self.io_addr_assert::<$type_name>(offset);
157 
158             // SAFETY: By the type invariant `addr` is a valid address for MMIO operations.
159             unsafe { bindings::$c_fn(value, addr as *mut c_void) }
160         }
161 
162         /// Write IO data from a given offset.
163         ///
164         /// Bound checks are performed on runtime, it fails if the offset (plus the type size) is
165         /// out of bounds.
166         $(#[$attr])*
167         pub fn $try_name(&self, value: $type_name, offset: usize) -> Result {
168             let addr = self.io_addr::<$type_name>(offset)?;
169 
170             // SAFETY: By the type invariant `addr` is a valid address for MMIO operations.
171             unsafe { bindings::$c_fn(value, addr as *mut c_void) }
172             Ok(())
173         }
174     };
175 }
176 
177 impl<const SIZE: usize> Io<SIZE> {
178     /// Converts an `IoRaw` into an `Io` instance, providing the accessors to the MMIO mapping.
179     ///
180     /// # Safety
181     ///
182     /// Callers must ensure that `addr` is the start of a valid I/O mapped memory region of size
183     /// `maxsize`.
184     pub unsafe fn from_raw(raw: &IoRaw<SIZE>) -> &Self {
185         // SAFETY: `Io` is a transparent wrapper around `IoRaw`.
186         unsafe { &*core::ptr::from_ref(raw).cast() }
187     }
188 
189     /// Returns the base address of this mapping.
190     #[inline]
191     pub fn addr(&self) -> usize {
192         self.0.addr()
193     }
194 
195     /// Returns the maximum size of this mapping.
196     #[inline]
197     pub fn maxsize(&self) -> usize {
198         self.0.maxsize()
199     }
200 
201     #[inline]
202     const fn offset_valid<U>(offset: usize, size: usize) -> bool {
203         let type_size = core::mem::size_of::<U>();
204         if let Some(end) = offset.checked_add(type_size) {
205             end <= size && offset % type_size == 0
206         } else {
207             false
208         }
209     }
210 
211     #[inline]
212     fn io_addr<U>(&self, offset: usize) -> Result<usize> {
213         if !Self::offset_valid::<U>(offset, self.maxsize()) {
214             return Err(EINVAL);
215         }
216 
217         // Probably no need to check, since the safety requirements of `Self::new` guarantee that
218         // this can't overflow.
219         self.addr().checked_add(offset).ok_or(EINVAL)
220     }
221 
222     #[inline]
223     fn io_addr_assert<U>(&self, offset: usize) -> usize {
224         build_assert!(Self::offset_valid::<U>(offset, SIZE));
225 
226         self.addr() + offset
227     }
228 
229     define_read!(read8, try_read8, readb -> u8);
230     define_read!(read16, try_read16, readw -> u16);
231     define_read!(read32, try_read32, readl -> u32);
232     define_read!(
233         #[cfg(CONFIG_64BIT)]
234         read64,
235         try_read64,
236         readq -> u64
237     );
238 
239     define_read!(read8_relaxed, try_read8_relaxed, readb_relaxed -> u8);
240     define_read!(read16_relaxed, try_read16_relaxed, readw_relaxed -> u16);
241     define_read!(read32_relaxed, try_read32_relaxed, readl_relaxed -> u32);
242     define_read!(
243         #[cfg(CONFIG_64BIT)]
244         read64_relaxed,
245         try_read64_relaxed,
246         readq_relaxed -> u64
247     );
248 
249     define_write!(write8, try_write8, writeb <- u8);
250     define_write!(write16, try_write16, writew <- u16);
251     define_write!(write32, try_write32, writel <- u32);
252     define_write!(
253         #[cfg(CONFIG_64BIT)]
254         write64,
255         try_write64,
256         writeq <- u64
257     );
258 
259     define_write!(write8_relaxed, try_write8_relaxed, writeb_relaxed <- u8);
260     define_write!(write16_relaxed, try_write16_relaxed, writew_relaxed <- u16);
261     define_write!(write32_relaxed, try_write32_relaxed, writel_relaxed <- u32);
262     define_write!(
263         #[cfg(CONFIG_64BIT)]
264         write64_relaxed,
265         try_write64_relaxed,
266         writeq_relaxed <- u64
267     );
268 }
269