xref: /linux/rust/kernel/io.rs (revision 121d87b28e1d9061d3aaa156c43a627d3cb5e620)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 //! Memory-mapped IO.
4 //!
5 //! C header: [`include/asm-generic/io.h`](srctree/include/asm-generic/io.h)
6 
7 use crate::{
8     bindings,
9     prelude::*, //
10 };
11 
12 pub mod mem;
13 pub mod poll;
14 pub mod resource;
15 
16 pub use resource::Resource;
17 
18 /// Physical address type.
19 ///
20 /// This is a type alias to either `u32` or `u64` depending on the config option
21 /// `CONFIG_PHYS_ADDR_T_64BIT`, and it can be a u64 even on 32-bit architectures.
22 pub type PhysAddr = bindings::phys_addr_t;
23 
24 /// Resource Size type.
25 ///
26 /// This is a type alias to either `u32` or `u64` depending on the config option
27 /// `CONFIG_PHYS_ADDR_T_64BIT`, and it can be a u64 even on 32-bit architectures.
28 pub type ResourceSize = bindings::resource_size_t;
29 
30 /// Raw representation of an MMIO region.
31 ///
32 /// By itself, the existence of an instance of this structure does not provide any guarantees that
33 /// the represented MMIO region does exist or is properly mapped.
34 ///
35 /// Instead, the bus specific MMIO implementation must convert this raw representation into an
36 /// `Mmio` instance providing the actual memory accessors. Only by the conversion into an `Mmio`
37 /// structure any guarantees are given.
38 pub struct MmioRaw<const SIZE: usize = 0> {
39     addr: usize,
40     maxsize: usize,
41 }
42 
43 impl<const SIZE: usize> MmioRaw<SIZE> {
44     /// Returns a new `MmioRaw` instance on success, an error otherwise.
45     pub fn new(addr: usize, maxsize: usize) -> Result<Self> {
46         if maxsize < SIZE {
47             return Err(EINVAL);
48         }
49 
50         Ok(Self { addr, maxsize })
51     }
52 
53     /// Returns the base address of the MMIO region.
54     #[inline]
55     pub fn addr(&self) -> usize {
56         self.addr
57     }
58 
59     /// Returns the maximum size of the MMIO region.
60     #[inline]
61     pub fn maxsize(&self) -> usize {
62         self.maxsize
63     }
64 }
65 
66 /// IO-mapped memory region.
67 ///
68 /// The creator (usually a subsystem / bus such as PCI) is responsible for creating the
69 /// mapping, performing an additional region request etc.
70 ///
71 /// # Invariant
72 ///
73 /// `addr` is the start and `maxsize` the length of valid I/O mapped memory region of size
74 /// `maxsize`.
75 ///
76 /// # Examples
77 ///
78 /// ```no_run
79 /// use kernel::{
80 ///     bindings,
81 ///     ffi::c_void,
82 ///     io::{
83 ///         Io,
84 ///         IoKnownSize,
85 ///         Mmio,
86 ///         MmioRaw,
87 ///         PhysAddr,
88 ///     },
89 /// };
90 /// use core::ops::Deref;
91 ///
92 /// // See also `pci::Bar` for a real example.
93 /// struct IoMem<const SIZE: usize>(MmioRaw<SIZE>);
94 ///
95 /// impl<const SIZE: usize> IoMem<SIZE> {
96 ///     /// # Safety
97 ///     ///
98 ///     /// [`paddr`, `paddr` + `SIZE`) must be a valid MMIO region that is mappable into the CPUs
99 ///     /// virtual address space.
100 ///     unsafe fn new(paddr: usize) -> Result<Self>{
101 ///         // SAFETY: By the safety requirements of this function [`paddr`, `paddr` + `SIZE`) is
102 ///         // valid for `ioremap`.
103 ///         let addr = unsafe { bindings::ioremap(paddr as PhysAddr, SIZE) };
104 ///         if addr.is_null() {
105 ///             return Err(ENOMEM);
106 ///         }
107 ///
108 ///         Ok(IoMem(MmioRaw::new(addr as usize, SIZE)?))
109 ///     }
110 /// }
111 ///
112 /// impl<const SIZE: usize> Drop for IoMem<SIZE> {
113 ///     fn drop(&mut self) {
114 ///         // SAFETY: `self.0.addr()` is guaranteed to be properly mapped by `Self::new`.
115 ///         unsafe { bindings::iounmap(self.0.addr() as *mut c_void); };
116 ///     }
117 /// }
118 ///
119 /// impl<const SIZE: usize> Deref for IoMem<SIZE> {
120 ///    type Target = Mmio<SIZE>;
121 ///
122 ///    fn deref(&self) -> &Self::Target {
123 ///         // SAFETY: The memory range stored in `self` has been properly mapped in `Self::new`.
124 ///         unsafe { Mmio::from_raw(&self.0) }
125 ///    }
126 /// }
127 ///
128 ///# fn no_run() -> Result<(), Error> {
129 /// // SAFETY: Invalid usage for example purposes.
130 /// let iomem = unsafe { IoMem::<{ core::mem::size_of::<u32>() }>::new(0xBAAAAAAD)? };
131 /// iomem.write32(0x42, 0x0);
132 /// assert!(iomem.try_write32(0x42, 0x0).is_ok());
133 /// assert!(iomem.try_write32(0x42, 0x4).is_err());
134 /// # Ok(())
135 /// # }
136 /// ```
137 #[repr(transparent)]
138 pub struct Mmio<const SIZE: usize = 0>(MmioRaw<SIZE>);
139 
140 macro_rules! define_read {
141     (infallible, $(#[$attr:meta])* $vis:vis $name:ident, $c_fn:ident -> $type_name:ty) => {
142         /// Read IO data from a given offset known at compile time.
143         ///
144         /// Bound checks are performed on compile time, hence if the offset is not known at compile
145         /// time, the build will fail.
146         $(#[$attr])*
147         #[inline]
148         $vis fn $name(&self, offset: usize) -> $type_name {
149             let addr = self.io_addr_assert::<$type_name>(offset);
150 
151             // SAFETY: By the type invariant `addr` is a valid address for MMIO operations.
152             unsafe { bindings::$c_fn(addr as *const c_void) }
153         }
154     };
155 
156     (fallible, $(#[$attr:meta])* $vis:vis $try_name:ident, $c_fn:ident -> $type_name:ty) => {
157         /// Read IO data from a given offset.
158         ///
159         /// Bound checks are performed on runtime, it fails if the offset (plus the type size) is
160         /// out of bounds.
161         $(#[$attr])*
162         $vis fn $try_name(&self, offset: usize) -> Result<$type_name> {
163             let addr = self.io_addr::<$type_name>(offset)?;
164 
165             // SAFETY: By the type invariant `addr` is a valid address for MMIO operations.
166             Ok(unsafe { bindings::$c_fn(addr as *const c_void) })
167         }
168     };
169 }
170 #[expect(unused)]
171 pub(crate) use define_read;
172 
173 macro_rules! define_write {
174     (infallible, $(#[$attr:meta])* $vis:vis $name:ident, $c_fn:ident <- $type_name:ty) => {
175         /// Write IO data from a given offset known at compile time.
176         ///
177         /// Bound checks are performed on compile time, hence if the offset is not known at compile
178         /// time, the build will fail.
179         $(#[$attr])*
180         #[inline]
181         $vis fn $name(&self, value: $type_name, offset: usize) {
182             let addr = self.io_addr_assert::<$type_name>(offset);
183 
184             // SAFETY: By the type invariant `addr` is a valid address for MMIO operations.
185             unsafe { bindings::$c_fn(value, addr as *mut c_void) }
186         }
187     };
188 
189     (fallible, $(#[$attr:meta])* $vis:vis $try_name:ident, $c_fn:ident <- $type_name:ty) => {
190         /// Write IO data from a given offset.
191         ///
192         /// Bound checks are performed on runtime, it fails if the offset (plus the type size) is
193         /// out of bounds.
194         $(#[$attr])*
195         $vis fn $try_name(&self, value: $type_name, offset: usize) -> Result {
196             let addr = self.io_addr::<$type_name>(offset)?;
197 
198             // SAFETY: By the type invariant `addr` is a valid address for MMIO operations.
199             unsafe { bindings::$c_fn(value, addr as *mut c_void) };
200             Ok(())
201         }
202     };
203 }
204 #[expect(unused)]
205 pub(crate) use define_write;
206 
207 /// Checks whether an access of type `U` at the given `offset`
208 /// is valid within this region.
209 #[inline]
210 const fn offset_valid<U>(offset: usize, size: usize) -> bool {
211     let type_size = core::mem::size_of::<U>();
212     if let Some(end) = offset.checked_add(type_size) {
213         end <= size && offset % type_size == 0
214     } else {
215         false
216     }
217 }
218 
219 /// Marker trait indicating that an I/O backend supports operations of a certain type.
220 ///
221 /// Different I/O backends can implement this trait to expose only the operations they support.
222 ///
223 /// For example, a PCI configuration space may implement `IoCapable<u8>`, `IoCapable<u16>`,
224 /// and `IoCapable<u32>`, but not `IoCapable<u64>`, while an MMIO region on a 64-bit
225 /// system might implement all four.
226 pub trait IoCapable<T> {}
227 
228 /// Types implementing this trait (e.g. MMIO BARs or PCI config regions)
229 /// can perform I/O operations on regions of memory.
230 ///
231 /// This is an abstract representation to be implemented by arbitrary I/O
232 /// backends (e.g. MMIO, PCI config space, etc.).
233 ///
234 /// The [`Io`] trait provides:
235 /// - Base address and size information
236 /// - Helper methods for offset validation and address calculation
237 /// - Fallible (runtime checked) accessors for different data widths
238 ///
239 /// Which I/O methods are available depends on which [`IoCapable<T>`] traits
240 /// are implemented for the type.
241 ///
242 /// # Examples
243 ///
244 /// For MMIO regions, all widths (u8, u16, u32, and u64 on 64-bit systems) are typically
245 /// supported. For PCI configuration space, u8, u16, and u32 are supported but u64 is not.
246 pub trait Io {
247     /// Minimum usable size of this region.
248     const MIN_SIZE: usize;
249 
250     /// Returns the base address of this mapping.
251     fn addr(&self) -> usize;
252 
253     /// Returns the maximum size of this mapping.
254     fn maxsize(&self) -> usize;
255 
256     /// Returns the absolute I/O address for a given `offset`,
257     /// performing runtime bound checks.
258     #[inline]
259     fn io_addr<U>(&self, offset: usize) -> Result<usize> {
260         if !offset_valid::<U>(offset, self.maxsize()) {
261             return Err(EINVAL);
262         }
263 
264         // Probably no need to check, since the safety requirements of `Self::new` guarantee that
265         // this can't overflow.
266         self.addr().checked_add(offset).ok_or(EINVAL)
267     }
268 
269     /// Returns the absolute I/O address for a given `offset`,
270     /// performing compile-time bound checks.
271     #[inline]
272     fn io_addr_assert<U>(&self, offset: usize) -> usize {
273         build_assert!(offset_valid::<U>(offset, Self::MIN_SIZE));
274 
275         self.addr() + offset
276     }
277 
278     /// Fallible 8-bit read with runtime bounds check.
279     #[inline(always)]
280     fn try_read8(&self, _offset: usize) -> Result<u8>
281     where
282         Self: IoCapable<u8>,
283     {
284         build_error!("Backend does not support fallible 8-bit read")
285     }
286 
287     /// Fallible 16-bit read with runtime bounds check.
288     #[inline(always)]
289     fn try_read16(&self, _offset: usize) -> Result<u16>
290     where
291         Self: IoCapable<u16>,
292     {
293         build_error!("Backend does not support fallible 16-bit read")
294     }
295 
296     /// Fallible 32-bit read with runtime bounds check.
297     #[inline(always)]
298     fn try_read32(&self, _offset: usize) -> Result<u32>
299     where
300         Self: IoCapable<u32>,
301     {
302         build_error!("Backend does not support fallible 32-bit read")
303     }
304 
305     /// Fallible 64-bit read with runtime bounds check.
306     #[inline(always)]
307     fn try_read64(&self, _offset: usize) -> Result<u64>
308     where
309         Self: IoCapable<u64>,
310     {
311         build_error!("Backend does not support fallible 64-bit read")
312     }
313 
314     /// Fallible 8-bit write with runtime bounds check.
315     #[inline(always)]
316     fn try_write8(&self, _value: u8, _offset: usize) -> Result
317     where
318         Self: IoCapable<u8>,
319     {
320         build_error!("Backend does not support fallible 8-bit write")
321     }
322 
323     /// Fallible 16-bit write with runtime bounds check.
324     #[inline(always)]
325     fn try_write16(&self, _value: u16, _offset: usize) -> Result
326     where
327         Self: IoCapable<u16>,
328     {
329         build_error!("Backend does not support fallible 16-bit write")
330     }
331 
332     /// Fallible 32-bit write with runtime bounds check.
333     #[inline(always)]
334     fn try_write32(&self, _value: u32, _offset: usize) -> Result
335     where
336         Self: IoCapable<u32>,
337     {
338         build_error!("Backend does not support fallible 32-bit write")
339     }
340 
341     /// Fallible 64-bit write with runtime bounds check.
342     #[inline(always)]
343     fn try_write64(&self, _value: u64, _offset: usize) -> Result
344     where
345         Self: IoCapable<u64>,
346     {
347         build_error!("Backend does not support fallible 64-bit write")
348     }
349 
350     /// Infallible 8-bit read with compile-time bounds check.
351     #[inline(always)]
352     fn read8(&self, _offset: usize) -> u8
353     where
354         Self: IoKnownSize + IoCapable<u8>,
355     {
356         build_error!("Backend does not support infallible 8-bit read")
357     }
358 
359     /// Infallible 16-bit read with compile-time bounds check.
360     #[inline(always)]
361     fn read16(&self, _offset: usize) -> u16
362     where
363         Self: IoKnownSize + IoCapable<u16>,
364     {
365         build_error!("Backend does not support infallible 16-bit read")
366     }
367 
368     /// Infallible 32-bit read with compile-time bounds check.
369     #[inline(always)]
370     fn read32(&self, _offset: usize) -> u32
371     where
372         Self: IoKnownSize + IoCapable<u32>,
373     {
374         build_error!("Backend does not support infallible 32-bit read")
375     }
376 
377     /// Infallible 64-bit read with compile-time bounds check.
378     #[inline(always)]
379     fn read64(&self, _offset: usize) -> u64
380     where
381         Self: IoKnownSize + IoCapable<u64>,
382     {
383         build_error!("Backend does not support infallible 64-bit read")
384     }
385 
386     /// Infallible 8-bit write with compile-time bounds check.
387     #[inline(always)]
388     fn write8(&self, _value: u8, _offset: usize)
389     where
390         Self: IoKnownSize + IoCapable<u8>,
391     {
392         build_error!("Backend does not support infallible 8-bit write")
393     }
394 
395     /// Infallible 16-bit write with compile-time bounds check.
396     #[inline(always)]
397     fn write16(&self, _value: u16, _offset: usize)
398     where
399         Self: IoKnownSize + IoCapable<u16>,
400     {
401         build_error!("Backend does not support infallible 16-bit write")
402     }
403 
404     /// Infallible 32-bit write with compile-time bounds check.
405     #[inline(always)]
406     fn write32(&self, _value: u32, _offset: usize)
407     where
408         Self: IoKnownSize + IoCapable<u32>,
409     {
410         build_error!("Backend does not support infallible 32-bit write")
411     }
412 
413     /// Infallible 64-bit write with compile-time bounds check.
414     #[inline(always)]
415     fn write64(&self, _value: u64, _offset: usize)
416     where
417         Self: IoKnownSize + IoCapable<u64>,
418     {
419         build_error!("Backend does not support infallible 64-bit write")
420     }
421 }
422 
423 /// Marker trait for types with a known size at compile time.
424 ///
425 /// This trait is implemented by I/O backends that have a compile-time known size,
426 /// enabling the use of infallible I/O accessors with compile-time bounds checking.
427 ///
428 /// Types implementing this trait can use the infallible methods in [`Io`] trait
429 /// (e.g., `read8`, `write32`), which require `Self: IoKnownSize` bound.
430 pub trait IoKnownSize: Io {}
431 
432 // MMIO regions support 8, 16, and 32-bit accesses.
433 impl<const SIZE: usize> IoCapable<u8> for Mmio<SIZE> {}
434 impl<const SIZE: usize> IoCapable<u16> for Mmio<SIZE> {}
435 impl<const SIZE: usize> IoCapable<u32> for Mmio<SIZE> {}
436 
437 // MMIO regions on 64-bit systems also support 64-bit accesses.
438 #[cfg(CONFIG_64BIT)]
439 impl<const SIZE: usize> IoCapable<u64> for Mmio<SIZE> {}
440 
441 impl<const SIZE: usize> Io for Mmio<SIZE> {
442     const MIN_SIZE: usize = SIZE;
443 
444     /// Returns the base address of this mapping.
445     #[inline]
446     fn addr(&self) -> usize {
447         self.0.addr()
448     }
449 
450     /// Returns the maximum size of this mapping.
451     #[inline]
452     fn maxsize(&self) -> usize {
453         self.0.maxsize()
454     }
455 
456     define_read!(fallible, try_read8, readb -> u8);
457     define_read!(fallible, try_read16, readw -> u16);
458     define_read!(fallible, try_read32, readl -> u32);
459     define_read!(
460         fallible,
461         #[cfg(CONFIG_64BIT)]
462         try_read64,
463         readq -> u64
464     );
465 
466     define_write!(fallible, try_write8, writeb <- u8);
467     define_write!(fallible, try_write16, writew <- u16);
468     define_write!(fallible, try_write32, writel <- u32);
469     define_write!(
470         fallible,
471         #[cfg(CONFIG_64BIT)]
472         try_write64,
473         writeq <- u64
474     );
475 
476     define_read!(infallible, read8, readb -> u8);
477     define_read!(infallible, read16, readw -> u16);
478     define_read!(infallible, read32, readl -> u32);
479     define_read!(
480         infallible,
481         #[cfg(CONFIG_64BIT)]
482         read64,
483         readq -> u64
484     );
485 
486     define_write!(infallible, write8, writeb <- u8);
487     define_write!(infallible, write16, writew <- u16);
488     define_write!(infallible, write32, writel <- u32);
489     define_write!(
490         infallible,
491         #[cfg(CONFIG_64BIT)]
492         write64,
493         writeq <- u64
494     );
495 }
496 
497 impl<const SIZE: usize> IoKnownSize for Mmio<SIZE> {}
498 
499 impl<const SIZE: usize> Mmio<SIZE> {
500     /// Converts an `MmioRaw` into an `Mmio` instance, providing the accessors to the MMIO mapping.
501     ///
502     /// # Safety
503     ///
504     /// Callers must ensure that `addr` is the start of a valid I/O mapped memory region of size
505     /// `maxsize`.
506     pub unsafe fn from_raw(raw: &MmioRaw<SIZE>) -> &Self {
507         // SAFETY: `Mmio` is a transparent wrapper around `MmioRaw`.
508         unsafe { &*core::ptr::from_ref(raw).cast() }
509     }
510 
511     define_read!(infallible, pub read8_relaxed, readb_relaxed -> u8);
512     define_read!(infallible, pub read16_relaxed, readw_relaxed -> u16);
513     define_read!(infallible, pub read32_relaxed, readl_relaxed -> u32);
514     define_read!(
515         infallible,
516         #[cfg(CONFIG_64BIT)]
517         pub read64_relaxed,
518         readq_relaxed -> u64
519     );
520 
521     define_read!(fallible, pub try_read8_relaxed, readb_relaxed -> u8);
522     define_read!(fallible, pub try_read16_relaxed, readw_relaxed -> u16);
523     define_read!(fallible, pub try_read32_relaxed, readl_relaxed -> u32);
524     define_read!(
525         fallible,
526         #[cfg(CONFIG_64BIT)]
527         pub try_read64_relaxed,
528         readq_relaxed -> u64
529     );
530 
531     define_write!(infallible, pub write8_relaxed, writeb_relaxed <- u8);
532     define_write!(infallible, pub write16_relaxed, writew_relaxed <- u16);
533     define_write!(infallible, pub write32_relaxed, writel_relaxed <- u32);
534     define_write!(
535         infallible,
536         #[cfg(CONFIG_64BIT)]
537         pub write64_relaxed,
538         writeq_relaxed <- u64
539     );
540 
541     define_write!(fallible, pub try_write8_relaxed, writeb_relaxed <- u8);
542     define_write!(fallible, pub try_write16_relaxed, writew_relaxed <- u16);
543     define_write!(fallible, pub try_write32_relaxed, writel_relaxed <- u32);
544     define_write!(
545         fallible,
546         #[cfg(CONFIG_64BIT)]
547         pub try_write64_relaxed,
548         writeq_relaxed <- u64
549     );
550 }
551