xref: /linux/rust/kernel/io.rs (revision 4dc0bacb1d3c4722cbd002c4aab6bd458d30d869)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 //! Memory-mapped IO.
4 //!
5 //! C header: [`include/asm-generic/io.h`](srctree/include/asm-generic/io.h)
6 
7 use crate::{
8     bindings,
9     prelude::*, //
10 };
11 
12 pub mod mem;
13 pub mod poll;
14 pub mod resource;
15 
16 pub use resource::Resource;
17 
18 /// Physical address type.
19 ///
20 /// This is a type alias to either `u32` or `u64` depending on the config option
21 /// `CONFIG_PHYS_ADDR_T_64BIT`, and it can be a u64 even on 32-bit architectures.
22 pub type PhysAddr = bindings::phys_addr_t;
23 
24 /// Resource Size type.
25 ///
26 /// This is a type alias to either `u32` or `u64` depending on the config option
27 /// `CONFIG_PHYS_ADDR_T_64BIT`, and it can be a u64 even on 32-bit architectures.
28 pub type ResourceSize = bindings::resource_size_t;
29 
30 /// Raw representation of an MMIO region.
31 ///
32 /// By itself, the existence of an instance of this structure does not provide any guarantees that
33 /// the represented MMIO region does exist or is properly mapped.
34 ///
35 /// Instead, the bus specific MMIO implementation must convert this raw representation into an
36 /// `Mmio` instance providing the actual memory accessors. Only by the conversion into an `Mmio`
37 /// structure any guarantees are given.
38 pub struct MmioRaw<const SIZE: usize = 0> {
39     addr: usize,
40     maxsize: usize,
41 }
42 
43 impl<const SIZE: usize> MmioRaw<SIZE> {
44     /// Returns a new `MmioRaw` instance on success, an error otherwise.
45     pub fn new(addr: usize, maxsize: usize) -> Result<Self> {
46         if maxsize < SIZE {
47             return Err(EINVAL);
48         }
49 
50         Ok(Self { addr, maxsize })
51     }
52 
53     /// Returns the base address of the MMIO region.
54     #[inline]
55     pub fn addr(&self) -> usize {
56         self.addr
57     }
58 
59     /// Returns the maximum size of the MMIO region.
60     #[inline]
61     pub fn maxsize(&self) -> usize {
62         self.maxsize
63     }
64 }
65 
66 /// IO-mapped memory region.
67 ///
68 /// The creator (usually a subsystem / bus such as PCI) is responsible for creating the
69 /// mapping, performing an additional region request etc.
70 ///
71 /// # Invariant
72 ///
73 /// `addr` is the start and `maxsize` the length of valid I/O mapped memory region of size
74 /// `maxsize`.
75 ///
76 /// # Examples
77 ///
78 /// ```no_run
79 /// use kernel::{
80 ///     bindings,
81 ///     ffi::c_void,
82 ///     io::{
83 ///         Io,
84 ///         IoKnownSize,
85 ///         Mmio,
86 ///         MmioRaw,
87 ///         PhysAddr,
88 ///     },
89 /// };
90 /// use core::ops::Deref;
91 ///
92 /// // See also `pci::Bar` for a real example.
93 /// struct IoMem<const SIZE: usize>(MmioRaw<SIZE>);
94 ///
95 /// impl<const SIZE: usize> IoMem<SIZE> {
96 ///     /// # Safety
97 ///     ///
98 ///     /// [`paddr`, `paddr` + `SIZE`) must be a valid MMIO region that is mappable into the CPUs
99 ///     /// virtual address space.
100 ///     unsafe fn new(paddr: usize) -> Result<Self>{
101 ///         // SAFETY: By the safety requirements of this function [`paddr`, `paddr` + `SIZE`) is
102 ///         // valid for `ioremap`.
103 ///         let addr = unsafe { bindings::ioremap(paddr as PhysAddr, SIZE) };
104 ///         if addr.is_null() {
105 ///             return Err(ENOMEM);
106 ///         }
107 ///
108 ///         Ok(IoMem(MmioRaw::new(addr as usize, SIZE)?))
109 ///     }
110 /// }
111 ///
112 /// impl<const SIZE: usize> Drop for IoMem<SIZE> {
113 ///     fn drop(&mut self) {
114 ///         // SAFETY: `self.0.addr()` is guaranteed to be properly mapped by `Self::new`.
115 ///         unsafe { bindings::iounmap(self.0.addr() as *mut c_void); };
116 ///     }
117 /// }
118 ///
119 /// impl<const SIZE: usize> Deref for IoMem<SIZE> {
120 ///    type Target = Mmio<SIZE>;
121 ///
122 ///    fn deref(&self) -> &Self::Target {
123 ///         // SAFETY: The memory range stored in `self` has been properly mapped in `Self::new`.
124 ///         unsafe { Mmio::from_raw(&self.0) }
125 ///    }
126 /// }
127 ///
128 ///# fn no_run() -> Result<(), Error> {
129 /// // SAFETY: Invalid usage for example purposes.
130 /// let iomem = unsafe { IoMem::<{ core::mem::size_of::<u32>() }>::new(0xBAAAAAAD)? };
131 /// iomem.write32(0x42, 0x0);
132 /// assert!(iomem.try_write32(0x42, 0x0).is_ok());
133 /// assert!(iomem.try_write32(0x42, 0x4).is_err());
134 /// # Ok(())
135 /// # }
136 /// ```
137 #[repr(transparent)]
138 pub struct Mmio<const SIZE: usize = 0>(MmioRaw<SIZE>);
139 
140 /// Internal helper macros used to invoke C MMIO read functions.
141 ///
142 /// This macro is intended to be used by higher-level MMIO access macros (define_read) and provides
143 /// a unified expansion for infallible vs. fallible read semantics. It emits a direct call into the
144 /// corresponding C helper and performs the required cast to the Rust return type.
145 ///
146 /// # Parameters
147 ///
148 /// * `$c_fn` – The C function performing the MMIO read.
149 /// * `$self` – The I/O backend object.
150 /// * `$ty` – The type of the value to be read.
151 /// * `$addr` – The MMIO address to read.
152 ///
153 /// This macro does not perform any validation; all invariants must be upheld by the higher-level
154 /// abstraction invoking it.
155 macro_rules! call_mmio_read {
156     (infallible, $c_fn:ident, $self:ident, $type:ty, $addr:expr) => {
157         // SAFETY: By the type invariant `addr` is a valid address for MMIO operations.
158         unsafe { bindings::$c_fn($addr as *const c_void) as $type }
159     };
160 
161     (fallible, $c_fn:ident, $self:ident, $type:ty, $addr:expr) => {{
162         // SAFETY: By the type invariant `addr` is a valid address for MMIO operations.
163         Ok(unsafe { bindings::$c_fn($addr as *const c_void) as $type })
164     }};
165 }
166 
167 /// Internal helper macros used to invoke C MMIO write functions.
168 ///
169 /// This macro is intended to be used by higher-level MMIO access macros (define_write) and provides
170 /// a unified expansion for infallible vs. fallible write semantics. It emits a direct call into the
171 /// corresponding C helper and performs the required cast to the Rust return type.
172 ///
173 /// # Parameters
174 ///
175 /// * `$c_fn` – The C function performing the MMIO write.
176 /// * `$self` – The I/O backend object.
177 /// * `$ty` – The type of the written value.
178 /// * `$addr` – The MMIO address to write.
179 /// * `$value` – The value to write.
180 ///
181 /// This macro does not perform any validation; all invariants must be upheld by the higher-level
182 /// abstraction invoking it.
183 macro_rules! call_mmio_write {
184     (infallible, $c_fn:ident, $self:ident, $ty:ty, $addr:expr, $value:expr) => {
185         // SAFETY: By the type invariant `addr` is a valid address for MMIO operations.
186         unsafe { bindings::$c_fn($value, $addr as *mut c_void) }
187     };
188 
189     (fallible, $c_fn:ident, $self:ident, $ty:ty, $addr:expr, $value:expr) => {{
190         // SAFETY: By the type invariant `addr` is a valid address for MMIO operations.
191         unsafe { bindings::$c_fn($value, $addr as *mut c_void) };
192         Ok(())
193     }};
194 }
195 
196 macro_rules! define_read {
197     (infallible, $(#[$attr:meta])* $vis:vis $name:ident, $call_macro:ident($c_fn:ident) ->
198      $type_name:ty) => {
199         /// Read IO data from a given offset known at compile time.
200         ///
201         /// Bound checks are performed on compile time, hence if the offset is not known at compile
202         /// time, the build will fail.
203         $(#[$attr])*
204         #[inline]
205         $vis fn $name(&self, offset: usize) -> $type_name {
206             let addr = self.io_addr_assert::<$type_name>(offset);
207 
208             // SAFETY: By the type invariant `addr` is a valid address for IO operations.
209             $call_macro!(infallible, $c_fn, self, $type_name, addr)
210         }
211     };
212 
213     (fallible, $(#[$attr:meta])* $vis:vis $try_name:ident, $call_macro:ident($c_fn:ident) ->
214      $type_name:ty) => {
215         /// Read IO data from a given offset.
216         ///
217         /// Bound checks are performed on runtime, it fails if the offset (plus the type size) is
218         /// out of bounds.
219         $(#[$attr])*
220         $vis fn $try_name(&self, offset: usize) -> Result<$type_name> {
221             let addr = self.io_addr::<$type_name>(offset)?;
222 
223             // SAFETY: By the type invariant `addr` is a valid address for IO operations.
224             $call_macro!(fallible, $c_fn, self, $type_name, addr)
225         }
226     };
227 }
228 pub(crate) use define_read;
229 
230 macro_rules! define_write {
231     (infallible, $(#[$attr:meta])* $vis:vis $name:ident, $call_macro:ident($c_fn:ident) <-
232      $type_name:ty) => {
233         /// Write IO data from a given offset known at compile time.
234         ///
235         /// Bound checks are performed on compile time, hence if the offset is not known at compile
236         /// time, the build will fail.
237         $(#[$attr])*
238         #[inline]
239         $vis fn $name(&self, value: $type_name, offset: usize) {
240             let addr = self.io_addr_assert::<$type_name>(offset);
241 
242             $call_macro!(infallible, $c_fn, self, $type_name, addr, value);
243         }
244     };
245 
246     (fallible, $(#[$attr:meta])* $vis:vis $try_name:ident, $call_macro:ident($c_fn:ident) <-
247      $type_name:ty) => {
248         /// Write IO data from a given offset.
249         ///
250         /// Bound checks are performed on runtime, it fails if the offset (plus the type size) is
251         /// out of bounds.
252         $(#[$attr])*
253         $vis fn $try_name(&self, value: $type_name, offset: usize) -> Result {
254             let addr = self.io_addr::<$type_name>(offset)?;
255 
256             $call_macro!(fallible, $c_fn, self, $type_name, addr, value)
257         }
258     };
259 }
260 pub(crate) use define_write;
261 
262 /// Checks whether an access of type `U` at the given `offset`
263 /// is valid within this region.
264 #[inline]
265 const fn offset_valid<U>(offset: usize, size: usize) -> bool {
266     let type_size = core::mem::size_of::<U>();
267     if let Some(end) = offset.checked_add(type_size) {
268         end <= size && offset % type_size == 0
269     } else {
270         false
271     }
272 }
273 
274 /// Marker trait indicating that an I/O backend supports operations of a certain type.
275 ///
276 /// Different I/O backends can implement this trait to expose only the operations they support.
277 ///
278 /// For example, a PCI configuration space may implement `IoCapable<u8>`, `IoCapable<u16>`,
279 /// and `IoCapable<u32>`, but not `IoCapable<u64>`, while an MMIO region on a 64-bit
280 /// system might implement all four.
281 pub trait IoCapable<T> {}
282 
283 /// Types implementing this trait (e.g. MMIO BARs or PCI config regions)
284 /// can perform I/O operations on regions of memory.
285 ///
286 /// This is an abstract representation to be implemented by arbitrary I/O
287 /// backends (e.g. MMIO, PCI config space, etc.).
288 ///
289 /// The [`Io`] trait provides:
290 /// - Base address and size information
291 /// - Helper methods for offset validation and address calculation
292 /// - Fallible (runtime checked) accessors for different data widths
293 ///
294 /// Which I/O methods are available depends on which [`IoCapable<T>`] traits
295 /// are implemented for the type.
296 ///
297 /// # Examples
298 ///
299 /// For MMIO regions, all widths (u8, u16, u32, and u64 on 64-bit systems) are typically
300 /// supported. For PCI configuration space, u8, u16, and u32 are supported but u64 is not.
301 pub trait Io {
302     /// Minimum usable size of this region.
303     const MIN_SIZE: usize;
304 
305     /// Returns the base address of this mapping.
306     fn addr(&self) -> usize;
307 
308     /// Returns the maximum size of this mapping.
309     fn maxsize(&self) -> usize;
310 
311     /// Returns the absolute I/O address for a given `offset`,
312     /// performing runtime bound checks.
313     #[inline]
314     fn io_addr<U>(&self, offset: usize) -> Result<usize> {
315         if !offset_valid::<U>(offset, self.maxsize()) {
316             return Err(EINVAL);
317         }
318 
319         // Probably no need to check, since the safety requirements of `Self::new` guarantee that
320         // this can't overflow.
321         self.addr().checked_add(offset).ok_or(EINVAL)
322     }
323 
324     /// Returns the absolute I/O address for a given `offset`,
325     /// performing compile-time bound checks.
326     #[inline]
327     fn io_addr_assert<U>(&self, offset: usize) -> usize {
328         build_assert!(offset_valid::<U>(offset, Self::MIN_SIZE));
329 
330         self.addr() + offset
331     }
332 
333     /// Fallible 8-bit read with runtime bounds check.
334     #[inline(always)]
335     fn try_read8(&self, _offset: usize) -> Result<u8>
336     where
337         Self: IoCapable<u8>,
338     {
339         build_error!("Backend does not support fallible 8-bit read")
340     }
341 
342     /// Fallible 16-bit read with runtime bounds check.
343     #[inline(always)]
344     fn try_read16(&self, _offset: usize) -> Result<u16>
345     where
346         Self: IoCapable<u16>,
347     {
348         build_error!("Backend does not support fallible 16-bit read")
349     }
350 
351     /// Fallible 32-bit read with runtime bounds check.
352     #[inline(always)]
353     fn try_read32(&self, _offset: usize) -> Result<u32>
354     where
355         Self: IoCapable<u32>,
356     {
357         build_error!("Backend does not support fallible 32-bit read")
358     }
359 
360     /// Fallible 64-bit read with runtime bounds check.
361     #[inline(always)]
362     fn try_read64(&self, _offset: usize) -> Result<u64>
363     where
364         Self: IoCapable<u64>,
365     {
366         build_error!("Backend does not support fallible 64-bit read")
367     }
368 
369     /// Fallible 8-bit write with runtime bounds check.
370     #[inline(always)]
371     fn try_write8(&self, _value: u8, _offset: usize) -> Result
372     where
373         Self: IoCapable<u8>,
374     {
375         build_error!("Backend does not support fallible 8-bit write")
376     }
377 
378     /// Fallible 16-bit write with runtime bounds check.
379     #[inline(always)]
380     fn try_write16(&self, _value: u16, _offset: usize) -> Result
381     where
382         Self: IoCapable<u16>,
383     {
384         build_error!("Backend does not support fallible 16-bit write")
385     }
386 
387     /// Fallible 32-bit write with runtime bounds check.
388     #[inline(always)]
389     fn try_write32(&self, _value: u32, _offset: usize) -> Result
390     where
391         Self: IoCapable<u32>,
392     {
393         build_error!("Backend does not support fallible 32-bit write")
394     }
395 
396     /// Fallible 64-bit write with runtime bounds check.
397     #[inline(always)]
398     fn try_write64(&self, _value: u64, _offset: usize) -> Result
399     where
400         Self: IoCapable<u64>,
401     {
402         build_error!("Backend does not support fallible 64-bit write")
403     }
404 
405     /// Infallible 8-bit read with compile-time bounds check.
406     #[inline(always)]
407     fn read8(&self, _offset: usize) -> u8
408     where
409         Self: IoKnownSize + IoCapable<u8>,
410     {
411         build_error!("Backend does not support infallible 8-bit read")
412     }
413 
414     /// Infallible 16-bit read with compile-time bounds check.
415     #[inline(always)]
416     fn read16(&self, _offset: usize) -> u16
417     where
418         Self: IoKnownSize + IoCapable<u16>,
419     {
420         build_error!("Backend does not support infallible 16-bit read")
421     }
422 
423     /// Infallible 32-bit read with compile-time bounds check.
424     #[inline(always)]
425     fn read32(&self, _offset: usize) -> u32
426     where
427         Self: IoKnownSize + IoCapable<u32>,
428     {
429         build_error!("Backend does not support infallible 32-bit read")
430     }
431 
432     /// Infallible 64-bit read with compile-time bounds check.
433     #[inline(always)]
434     fn read64(&self, _offset: usize) -> u64
435     where
436         Self: IoKnownSize + IoCapable<u64>,
437     {
438         build_error!("Backend does not support infallible 64-bit read")
439     }
440 
441     /// Infallible 8-bit write with compile-time bounds check.
442     #[inline(always)]
443     fn write8(&self, _value: u8, _offset: usize)
444     where
445         Self: IoKnownSize + IoCapable<u8>,
446     {
447         build_error!("Backend does not support infallible 8-bit write")
448     }
449 
450     /// Infallible 16-bit write with compile-time bounds check.
451     #[inline(always)]
452     fn write16(&self, _value: u16, _offset: usize)
453     where
454         Self: IoKnownSize + IoCapable<u16>,
455     {
456         build_error!("Backend does not support infallible 16-bit write")
457     }
458 
459     /// Infallible 32-bit write with compile-time bounds check.
460     #[inline(always)]
461     fn write32(&self, _value: u32, _offset: usize)
462     where
463         Self: IoKnownSize + IoCapable<u32>,
464     {
465         build_error!("Backend does not support infallible 32-bit write")
466     }
467 
468     /// Infallible 64-bit write with compile-time bounds check.
469     #[inline(always)]
470     fn write64(&self, _value: u64, _offset: usize)
471     where
472         Self: IoKnownSize + IoCapable<u64>,
473     {
474         build_error!("Backend does not support infallible 64-bit write")
475     }
476 }
477 
478 /// Marker trait for types with a known size at compile time.
479 ///
480 /// This trait is implemented by I/O backends that have a compile-time known size,
481 /// enabling the use of infallible I/O accessors with compile-time bounds checking.
482 ///
483 /// Types implementing this trait can use the infallible methods in [`Io`] trait
484 /// (e.g., `read8`, `write32`), which require `Self: IoKnownSize` bound.
485 pub trait IoKnownSize: Io {}
486 
487 // MMIO regions support 8, 16, and 32-bit accesses.
488 impl<const SIZE: usize> IoCapable<u8> for Mmio<SIZE> {}
489 impl<const SIZE: usize> IoCapable<u16> for Mmio<SIZE> {}
490 impl<const SIZE: usize> IoCapable<u32> for Mmio<SIZE> {}
491 
492 // MMIO regions on 64-bit systems also support 64-bit accesses.
493 #[cfg(CONFIG_64BIT)]
494 impl<const SIZE: usize> IoCapable<u64> for Mmio<SIZE> {}
495 
496 impl<const SIZE: usize> Io for Mmio<SIZE> {
497     const MIN_SIZE: usize = SIZE;
498 
499     /// Returns the base address of this mapping.
500     #[inline]
501     fn addr(&self) -> usize {
502         self.0.addr()
503     }
504 
505     /// Returns the maximum size of this mapping.
506     #[inline]
507     fn maxsize(&self) -> usize {
508         self.0.maxsize()
509     }
510 
511     define_read!(fallible, try_read8, call_mmio_read(readb) -> u8);
512     define_read!(fallible, try_read16, call_mmio_read(readw) -> u16);
513     define_read!(fallible, try_read32, call_mmio_read(readl) -> u32);
514     define_read!(
515         fallible,
516         #[cfg(CONFIG_64BIT)]
517         try_read64,
518         call_mmio_read(readq) -> u64
519     );
520 
521     define_write!(fallible, try_write8, call_mmio_write(writeb) <- u8);
522     define_write!(fallible, try_write16, call_mmio_write(writew) <- u16);
523     define_write!(fallible, try_write32, call_mmio_write(writel) <- u32);
524     define_write!(
525         fallible,
526         #[cfg(CONFIG_64BIT)]
527         try_write64,
528         call_mmio_write(writeq) <- u64
529     );
530 
531     define_read!(infallible, read8, call_mmio_read(readb) -> u8);
532     define_read!(infallible, read16, call_mmio_read(readw) -> u16);
533     define_read!(infallible, read32, call_mmio_read(readl) -> u32);
534     define_read!(
535         infallible,
536         #[cfg(CONFIG_64BIT)]
537         read64,
538         call_mmio_read(readq) -> u64
539     );
540 
541     define_write!(infallible, write8, call_mmio_write(writeb) <- u8);
542     define_write!(infallible, write16, call_mmio_write(writew) <- u16);
543     define_write!(infallible, write32, call_mmio_write(writel) <- u32);
544     define_write!(
545         infallible,
546         #[cfg(CONFIG_64BIT)]
547         write64,
548         call_mmio_write(writeq) <- u64
549     );
550 }
551 
552 impl<const SIZE: usize> IoKnownSize for Mmio<SIZE> {}
553 
554 impl<const SIZE: usize> Mmio<SIZE> {
555     /// Converts an `MmioRaw` into an `Mmio` instance, providing the accessors to the MMIO mapping.
556     ///
557     /// # Safety
558     ///
559     /// Callers must ensure that `addr` is the start of a valid I/O mapped memory region of size
560     /// `maxsize`.
561     pub unsafe fn from_raw(raw: &MmioRaw<SIZE>) -> &Self {
562         // SAFETY: `Mmio` is a transparent wrapper around `MmioRaw`.
563         unsafe { &*core::ptr::from_ref(raw).cast() }
564     }
565 
566     define_read!(infallible, pub read8_relaxed, call_mmio_read(readb_relaxed) -> u8);
567     define_read!(infallible, pub read16_relaxed, call_mmio_read(readw_relaxed) -> u16);
568     define_read!(infallible, pub read32_relaxed, call_mmio_read(readl_relaxed) -> u32);
569     define_read!(
570         infallible,
571         #[cfg(CONFIG_64BIT)]
572         pub read64_relaxed,
573         call_mmio_read(readq_relaxed) -> u64
574     );
575 
576     define_read!(fallible, pub try_read8_relaxed, call_mmio_read(readb_relaxed) -> u8);
577     define_read!(fallible, pub try_read16_relaxed, call_mmio_read(readw_relaxed) -> u16);
578     define_read!(fallible, pub try_read32_relaxed, call_mmio_read(readl_relaxed) -> u32);
579     define_read!(
580         fallible,
581         #[cfg(CONFIG_64BIT)]
582         pub try_read64_relaxed,
583         call_mmio_read(readq_relaxed) -> u64
584     );
585 
586     define_write!(infallible, pub write8_relaxed, call_mmio_write(writeb_relaxed) <- u8);
587     define_write!(infallible, pub write16_relaxed, call_mmio_write(writew_relaxed) <- u16);
588     define_write!(infallible, pub write32_relaxed, call_mmio_write(writel_relaxed) <- u32);
589     define_write!(
590         infallible,
591         #[cfg(CONFIG_64BIT)]
592         pub write64_relaxed,
593         call_mmio_write(writeq_relaxed) <- u64
594     );
595 
596     define_write!(fallible, pub try_write8_relaxed, call_mmio_write(writeb_relaxed) <- u8);
597     define_write!(fallible, pub try_write16_relaxed, call_mmio_write(writew_relaxed) <- u16);
598     define_write!(fallible, pub try_write32_relaxed, call_mmio_write(writel_relaxed) <- u32);
599     define_write!(
600         fallible,
601         #[cfg(CONFIG_64BIT)]
602         pub try_write64_relaxed,
603         call_mmio_write(writeq_relaxed) <- u64
604     );
605 }
606