xref: /linux/rust/kernel/io.rs (revision eb3dad518e4da48ab6c6df16aa8895b8b0bd6ecf)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 //! Memory-mapped IO.
4 //!
5 //! C header: [`include/asm-generic/io.h`](srctree/include/asm-generic/io.h)
6 
7 use crate::{
8     bindings,
9     prelude::*, //
10 };
11 
12 pub mod mem;
13 pub mod poll;
14 pub mod resource;
15 
16 pub use resource::Resource;
17 
18 /// Physical address type.
19 ///
20 /// This is a type alias to either `u32` or `u64` depending on the config option
21 /// `CONFIG_PHYS_ADDR_T_64BIT`, and it can be a u64 even on 32-bit architectures.
22 pub type PhysAddr = bindings::phys_addr_t;
23 
24 /// Resource Size type.
25 ///
26 /// This is a type alias to either `u32` or `u64` depending on the config option
27 /// `CONFIG_PHYS_ADDR_T_64BIT`, and it can be a u64 even on 32-bit architectures.
28 pub type ResourceSize = bindings::resource_size_t;
29 
30 /// Raw representation of an MMIO region.
31 ///
32 /// By itself, the existence of an instance of this structure does not provide any guarantees that
33 /// the represented MMIO region does exist or is properly mapped.
34 ///
35 /// Instead, the bus specific MMIO implementation must convert this raw representation into an
36 /// `Mmio` instance providing the actual memory accessors. Only by the conversion into an `Mmio`
37 /// structure any guarantees are given.
38 pub struct MmioRaw<const SIZE: usize = 0> {
39     addr: usize,
40     maxsize: usize,
41 }
42 
43 impl<const SIZE: usize> MmioRaw<SIZE> {
44     /// Returns a new `MmioRaw` instance on success, an error otherwise.
45     pub fn new(addr: usize, maxsize: usize) -> Result<Self> {
46         if maxsize < SIZE {
47             return Err(EINVAL);
48         }
49 
50         Ok(Self { addr, maxsize })
51     }
52 
53     /// Returns the base address of the MMIO region.
54     #[inline]
55     pub fn addr(&self) -> usize {
56         self.addr
57     }
58 
59     /// Returns the maximum size of the MMIO region.
60     #[inline]
61     pub fn maxsize(&self) -> usize {
62         self.maxsize
63     }
64 }
65 
66 /// IO-mapped memory region.
67 ///
68 /// The creator (usually a subsystem / bus such as PCI) is responsible for creating the
69 /// mapping, performing an additional region request etc.
70 ///
71 /// # Invariant
72 ///
73 /// `addr` is the start and `maxsize` the length of valid I/O mapped memory region of size
74 /// `maxsize`.
75 ///
76 /// # Examples
77 ///
78 /// ```no_run
79 /// use kernel::{
80 ///     bindings,
81 ///     ffi::c_void,
82 ///     io::{
83 ///         Io,
84 ///         IoKnownSize,
85 ///         Mmio,
86 ///         MmioRaw,
87 ///         PhysAddr,
88 ///     },
89 /// };
90 /// use core::ops::Deref;
91 ///
92 /// // See also `pci::Bar` for a real example.
93 /// struct IoMem<const SIZE: usize>(MmioRaw<SIZE>);
94 ///
95 /// impl<const SIZE: usize> IoMem<SIZE> {
96 ///     /// # Safety
97 ///     ///
98 ///     /// [`paddr`, `paddr` + `SIZE`) must be a valid MMIO region that is mappable into the CPUs
99 ///     /// virtual address space.
100 ///     unsafe fn new(paddr: usize) -> Result<Self>{
101 ///         // SAFETY: By the safety requirements of this function [`paddr`, `paddr` + `SIZE`) is
102 ///         // valid for `ioremap`.
103 ///         let addr = unsafe { bindings::ioremap(paddr as PhysAddr, SIZE) };
104 ///         if addr.is_null() {
105 ///             return Err(ENOMEM);
106 ///         }
107 ///
108 ///         Ok(IoMem(MmioRaw::new(addr as usize, SIZE)?))
109 ///     }
110 /// }
111 ///
112 /// impl<const SIZE: usize> Drop for IoMem<SIZE> {
113 ///     fn drop(&mut self) {
114 ///         // SAFETY: `self.0.addr()` is guaranteed to be properly mapped by `Self::new`.
115 ///         unsafe { bindings::iounmap(self.0.addr() as *mut c_void); };
116 ///     }
117 /// }
118 ///
119 /// impl<const SIZE: usize> Deref for IoMem<SIZE> {
120 ///    type Target = Mmio<SIZE>;
121 ///
122 ///    fn deref(&self) -> &Self::Target {
123 ///         // SAFETY: The memory range stored in `self` has been properly mapped in `Self::new`.
124 ///         unsafe { Mmio::from_raw(&self.0) }
125 ///    }
126 /// }
127 ///
128 ///# fn no_run() -> Result<(), Error> {
129 /// // SAFETY: Invalid usage for example purposes.
130 /// let iomem = unsafe { IoMem::<{ core::mem::size_of::<u32>() }>::new(0xBAAAAAAD)? };
131 /// iomem.write32(0x42, 0x0);
132 /// assert!(iomem.try_write32(0x42, 0x0).is_ok());
133 /// assert!(iomem.try_write32(0x42, 0x4).is_err());
134 /// # Ok(())
135 /// # }
136 /// ```
137 #[repr(transparent)]
138 pub struct Mmio<const SIZE: usize = 0>(MmioRaw<SIZE>);
139 
140 /// Internal helper macros used to invoke C MMIO read functions.
141 ///
142 /// This macro is intended to be used by higher-level MMIO access macros (define_read) and provides
143 /// a unified expansion for infallible vs. fallible read semantics. It emits a direct call into the
144 /// corresponding C helper and performs the required cast to the Rust return type.
145 ///
146 /// # Parameters
147 ///
148 /// * `$c_fn` – The C function performing the MMIO read.
149 /// * `$self` – The I/O backend object.
150 /// * `$ty` – The type of the value to be read.
151 /// * `$addr` – The MMIO address to read.
152 ///
153 /// This macro does not perform any validation; all invariants must be upheld by the higher-level
154 /// abstraction invoking it.
155 macro_rules! call_mmio_read {
156     (infallible, $c_fn:ident, $self:ident, $type:ty, $addr:expr) => {
157         // SAFETY: By the type invariant `addr` is a valid address for MMIO operations.
158         unsafe { bindings::$c_fn($addr as *const c_void) as $type }
159     };
160 
161     (fallible, $c_fn:ident, $self:ident, $type:ty, $addr:expr) => {{
162         // SAFETY: By the type invariant `addr` is a valid address for MMIO operations.
163         Ok(unsafe { bindings::$c_fn($addr as *const c_void) as $type })
164     }};
165 }
166 
167 /// Internal helper macros used to invoke C MMIO write functions.
168 ///
169 /// This macro is intended to be used by higher-level MMIO access macros (define_write) and provides
170 /// a unified expansion for infallible vs. fallible write semantics. It emits a direct call into the
171 /// corresponding C helper and performs the required cast to the Rust return type.
172 ///
173 /// # Parameters
174 ///
175 /// * `$c_fn` – The C function performing the MMIO write.
176 /// * `$self` – The I/O backend object.
177 /// * `$ty` – The type of the written value.
178 /// * `$addr` – The MMIO address to write.
179 /// * `$value` – The value to write.
180 ///
181 /// This macro does not perform any validation; all invariants must be upheld by the higher-level
182 /// abstraction invoking it.
183 macro_rules! call_mmio_write {
184     (infallible, $c_fn:ident, $self:ident, $ty:ty, $addr:expr, $value:expr) => {
185         // SAFETY: By the type invariant `addr` is a valid address for MMIO operations.
186         unsafe { bindings::$c_fn($value, $addr as *mut c_void) }
187     };
188 
189     (fallible, $c_fn:ident, $self:ident, $ty:ty, $addr:expr, $value:expr) => {{
190         // SAFETY: By the type invariant `addr` is a valid address for MMIO operations.
191         unsafe { bindings::$c_fn($value, $addr as *mut c_void) };
192         Ok(())
193     }};
194 }
195 
196 macro_rules! define_read {
197     (infallible, $(#[$attr:meta])* $vis:vis $name:ident, $call_macro:ident($c_fn:ident) ->
198      $type_name:ty) => {
199         /// Read IO data from a given offset known at compile time.
200         ///
201         /// Bound checks are performed on compile time, hence if the offset is not known at compile
202         /// time, the build will fail.
203         $(#[$attr])*
204         // Always inline to optimize out error path of `io_addr_assert`.
205         #[inline(always)]
206         $vis fn $name(&self, offset: usize) -> $type_name {
207             let addr = self.io_addr_assert::<$type_name>(offset);
208 
209             // SAFETY: By the type invariant `addr` is a valid address for IO operations.
210             $call_macro!(infallible, $c_fn, self, $type_name, addr)
211         }
212     };
213 
214     (fallible, $(#[$attr:meta])* $vis:vis $try_name:ident, $call_macro:ident($c_fn:ident) ->
215      $type_name:ty) => {
216         /// Read IO data from a given offset.
217         ///
218         /// Bound checks are performed on runtime, it fails if the offset (plus the type size) is
219         /// out of bounds.
220         $(#[$attr])*
221         $vis fn $try_name(&self, offset: usize) -> Result<$type_name> {
222             let addr = self.io_addr::<$type_name>(offset)?;
223 
224             // SAFETY: By the type invariant `addr` is a valid address for IO operations.
225             $call_macro!(fallible, $c_fn, self, $type_name, addr)
226         }
227     };
228 }
229 pub(crate) use define_read;
230 
231 macro_rules! define_write {
232     (infallible, $(#[$attr:meta])* $vis:vis $name:ident, $call_macro:ident($c_fn:ident) <-
233      $type_name:ty) => {
234         /// Write IO data from a given offset known at compile time.
235         ///
236         /// Bound checks are performed on compile time, hence if the offset is not known at compile
237         /// time, the build will fail.
238         $(#[$attr])*
239         // Always inline to optimize out error path of `io_addr_assert`.
240         #[inline(always)]
241         $vis fn $name(&self, value: $type_name, offset: usize) {
242             let addr = self.io_addr_assert::<$type_name>(offset);
243 
244             $call_macro!(infallible, $c_fn, self, $type_name, addr, value);
245         }
246     };
247 
248     (fallible, $(#[$attr:meta])* $vis:vis $try_name:ident, $call_macro:ident($c_fn:ident) <-
249      $type_name:ty) => {
250         /// Write IO data from a given offset.
251         ///
252         /// Bound checks are performed on runtime, it fails if the offset (plus the type size) is
253         /// out of bounds.
254         $(#[$attr])*
255         $vis fn $try_name(&self, value: $type_name, offset: usize) -> Result {
256             let addr = self.io_addr::<$type_name>(offset)?;
257 
258             $call_macro!(fallible, $c_fn, self, $type_name, addr, value)
259         }
260     };
261 }
262 pub(crate) use define_write;
263 
264 /// Checks whether an access of type `U` at the given `offset`
265 /// is valid within this region.
266 #[inline]
267 const fn offset_valid<U>(offset: usize, size: usize) -> bool {
268     let type_size = core::mem::size_of::<U>();
269     if let Some(end) = offset.checked_add(type_size) {
270         end <= size && offset % type_size == 0
271     } else {
272         false
273     }
274 }
275 
276 /// Marker trait indicating that an I/O backend supports operations of a certain type.
277 ///
278 /// Different I/O backends can implement this trait to expose only the operations they support.
279 ///
280 /// For example, a PCI configuration space may implement `IoCapable<u8>`, `IoCapable<u16>`,
281 /// and `IoCapable<u32>`, but not `IoCapable<u64>`, while an MMIO region on a 64-bit
282 /// system might implement all four.
283 pub trait IoCapable<T> {}
284 
285 /// Types implementing this trait (e.g. MMIO BARs or PCI config regions)
286 /// can perform I/O operations on regions of memory.
287 ///
288 /// This is an abstract representation to be implemented by arbitrary I/O
289 /// backends (e.g. MMIO, PCI config space, etc.).
290 ///
291 /// The [`Io`] trait provides:
292 /// - Base address and size information
293 /// - Helper methods for offset validation and address calculation
294 /// - Fallible (runtime checked) accessors for different data widths
295 ///
296 /// Which I/O methods are available depends on which [`IoCapable<T>`] traits
297 /// are implemented for the type.
298 ///
299 /// # Examples
300 ///
301 /// For MMIO regions, all widths (u8, u16, u32, and u64 on 64-bit systems) are typically
302 /// supported. For PCI configuration space, u8, u16, and u32 are supported but u64 is not.
303 pub trait Io {
304     /// Minimum usable size of this region.
305     const MIN_SIZE: usize;
306 
307     /// Returns the base address of this mapping.
308     fn addr(&self) -> usize;
309 
310     /// Returns the maximum size of this mapping.
311     fn maxsize(&self) -> usize;
312 
313     /// Returns the absolute I/O address for a given `offset`,
314     /// performing runtime bound checks.
315     #[inline]
316     fn io_addr<U>(&self, offset: usize) -> Result<usize> {
317         if !offset_valid::<U>(offset, self.maxsize()) {
318             return Err(EINVAL);
319         }
320 
321         // Probably no need to check, since the safety requirements of `Self::new` guarantee that
322         // this can't overflow.
323         self.addr().checked_add(offset).ok_or(EINVAL)
324     }
325 
326     /// Returns the absolute I/O address for a given `offset`,
327     /// performing compile-time bound checks.
328     // Always inline to optimize out error path of `build_assert`.
329     #[inline(always)]
330     fn io_addr_assert<U>(&self, offset: usize) -> usize {
331         build_assert!(offset_valid::<U>(offset, Self::MIN_SIZE));
332 
333         self.addr() + offset
334     }
335 
336     /// Fallible 8-bit read with runtime bounds check.
337     #[inline(always)]
338     fn try_read8(&self, _offset: usize) -> Result<u8>
339     where
340         Self: IoCapable<u8>,
341     {
342         build_error!("Backend does not support fallible 8-bit read")
343     }
344 
345     /// Fallible 16-bit read with runtime bounds check.
346     #[inline(always)]
347     fn try_read16(&self, _offset: usize) -> Result<u16>
348     where
349         Self: IoCapable<u16>,
350     {
351         build_error!("Backend does not support fallible 16-bit read")
352     }
353 
354     /// Fallible 32-bit read with runtime bounds check.
355     #[inline(always)]
356     fn try_read32(&self, _offset: usize) -> Result<u32>
357     where
358         Self: IoCapable<u32>,
359     {
360         build_error!("Backend does not support fallible 32-bit read")
361     }
362 
363     /// Fallible 64-bit read with runtime bounds check.
364     #[inline(always)]
365     fn try_read64(&self, _offset: usize) -> Result<u64>
366     where
367         Self: IoCapable<u64>,
368     {
369         build_error!("Backend does not support fallible 64-bit read")
370     }
371 
372     /// Fallible 8-bit write with runtime bounds check.
373     #[inline(always)]
374     fn try_write8(&self, _value: u8, _offset: usize) -> Result
375     where
376         Self: IoCapable<u8>,
377     {
378         build_error!("Backend does not support fallible 8-bit write")
379     }
380 
381     /// Fallible 16-bit write with runtime bounds check.
382     #[inline(always)]
383     fn try_write16(&self, _value: u16, _offset: usize) -> Result
384     where
385         Self: IoCapable<u16>,
386     {
387         build_error!("Backend does not support fallible 16-bit write")
388     }
389 
390     /// Fallible 32-bit write with runtime bounds check.
391     #[inline(always)]
392     fn try_write32(&self, _value: u32, _offset: usize) -> Result
393     where
394         Self: IoCapable<u32>,
395     {
396         build_error!("Backend does not support fallible 32-bit write")
397     }
398 
399     /// Fallible 64-bit write with runtime bounds check.
400     #[inline(always)]
401     fn try_write64(&self, _value: u64, _offset: usize) -> Result
402     where
403         Self: IoCapable<u64>,
404     {
405         build_error!("Backend does not support fallible 64-bit write")
406     }
407 
408     /// Infallible 8-bit read with compile-time bounds check.
409     #[inline(always)]
410     fn read8(&self, _offset: usize) -> u8
411     where
412         Self: IoKnownSize + IoCapable<u8>,
413     {
414         build_error!("Backend does not support infallible 8-bit read")
415     }
416 
417     /// Infallible 16-bit read with compile-time bounds check.
418     #[inline(always)]
419     fn read16(&self, _offset: usize) -> u16
420     where
421         Self: IoKnownSize + IoCapable<u16>,
422     {
423         build_error!("Backend does not support infallible 16-bit read")
424     }
425 
426     /// Infallible 32-bit read with compile-time bounds check.
427     #[inline(always)]
428     fn read32(&self, _offset: usize) -> u32
429     where
430         Self: IoKnownSize + IoCapable<u32>,
431     {
432         build_error!("Backend does not support infallible 32-bit read")
433     }
434 
435     /// Infallible 64-bit read with compile-time bounds check.
436     #[inline(always)]
437     fn read64(&self, _offset: usize) -> u64
438     where
439         Self: IoKnownSize + IoCapable<u64>,
440     {
441         build_error!("Backend does not support infallible 64-bit read")
442     }
443 
444     /// Infallible 8-bit write with compile-time bounds check.
445     #[inline(always)]
446     fn write8(&self, _value: u8, _offset: usize)
447     where
448         Self: IoKnownSize + IoCapable<u8>,
449     {
450         build_error!("Backend does not support infallible 8-bit write")
451     }
452 
453     /// Infallible 16-bit write with compile-time bounds check.
454     #[inline(always)]
455     fn write16(&self, _value: u16, _offset: usize)
456     where
457         Self: IoKnownSize + IoCapable<u16>,
458     {
459         build_error!("Backend does not support infallible 16-bit write")
460     }
461 
462     /// Infallible 32-bit write with compile-time bounds check.
463     #[inline(always)]
464     fn write32(&self, _value: u32, _offset: usize)
465     where
466         Self: IoKnownSize + IoCapable<u32>,
467     {
468         build_error!("Backend does not support infallible 32-bit write")
469     }
470 
471     /// Infallible 64-bit write with compile-time bounds check.
472     #[inline(always)]
473     fn write64(&self, _value: u64, _offset: usize)
474     where
475         Self: IoKnownSize + IoCapable<u64>,
476     {
477         build_error!("Backend does not support infallible 64-bit write")
478     }
479 }
480 
481 /// Marker trait for types with a known size at compile time.
482 ///
483 /// This trait is implemented by I/O backends that have a compile-time known size,
484 /// enabling the use of infallible I/O accessors with compile-time bounds checking.
485 ///
486 /// Types implementing this trait can use the infallible methods in [`Io`] trait
487 /// (e.g., `read8`, `write32`), which require `Self: IoKnownSize` bound.
488 pub trait IoKnownSize: Io {}
489 
490 // MMIO regions support 8, 16, and 32-bit accesses.
491 impl<const SIZE: usize> IoCapable<u8> for Mmio<SIZE> {}
492 impl<const SIZE: usize> IoCapable<u16> for Mmio<SIZE> {}
493 impl<const SIZE: usize> IoCapable<u32> for Mmio<SIZE> {}
494 
495 // MMIO regions on 64-bit systems also support 64-bit accesses.
496 #[cfg(CONFIG_64BIT)]
497 impl<const SIZE: usize> IoCapable<u64> for Mmio<SIZE> {}
498 
499 impl<const SIZE: usize> Io for Mmio<SIZE> {
500     const MIN_SIZE: usize = SIZE;
501 
502     /// Returns the base address of this mapping.
503     #[inline]
504     fn addr(&self) -> usize {
505         self.0.addr()
506     }
507 
508     /// Returns the maximum size of this mapping.
509     #[inline]
510     fn maxsize(&self) -> usize {
511         self.0.maxsize()
512     }
513 
514     define_read!(fallible, try_read8, call_mmio_read(readb) -> u8);
515     define_read!(fallible, try_read16, call_mmio_read(readw) -> u16);
516     define_read!(fallible, try_read32, call_mmio_read(readl) -> u32);
517     define_read!(
518         fallible,
519         #[cfg(CONFIG_64BIT)]
520         try_read64,
521         call_mmio_read(readq) -> u64
522     );
523 
524     define_write!(fallible, try_write8, call_mmio_write(writeb) <- u8);
525     define_write!(fallible, try_write16, call_mmio_write(writew) <- u16);
526     define_write!(fallible, try_write32, call_mmio_write(writel) <- u32);
527     define_write!(
528         fallible,
529         #[cfg(CONFIG_64BIT)]
530         try_write64,
531         call_mmio_write(writeq) <- u64
532     );
533 
534     define_read!(infallible, read8, call_mmio_read(readb) -> u8);
535     define_read!(infallible, read16, call_mmio_read(readw) -> u16);
536     define_read!(infallible, read32, call_mmio_read(readl) -> u32);
537     define_read!(
538         infallible,
539         #[cfg(CONFIG_64BIT)]
540         read64,
541         call_mmio_read(readq) -> u64
542     );
543 
544     define_write!(infallible, write8, call_mmio_write(writeb) <- u8);
545     define_write!(infallible, write16, call_mmio_write(writew) <- u16);
546     define_write!(infallible, write32, call_mmio_write(writel) <- u32);
547     define_write!(
548         infallible,
549         #[cfg(CONFIG_64BIT)]
550         write64,
551         call_mmio_write(writeq) <- u64
552     );
553 }
554 
555 impl<const SIZE: usize> IoKnownSize for Mmio<SIZE> {}
556 
557 impl<const SIZE: usize> Mmio<SIZE> {
558     /// Converts an `MmioRaw` into an `Mmio` instance, providing the accessors to the MMIO mapping.
559     ///
560     /// # Safety
561     ///
562     /// Callers must ensure that `addr` is the start of a valid I/O mapped memory region of size
563     /// `maxsize`.
564     pub unsafe fn from_raw(raw: &MmioRaw<SIZE>) -> &Self {
565         // SAFETY: `Mmio` is a transparent wrapper around `MmioRaw`.
566         unsafe { &*core::ptr::from_ref(raw).cast() }
567     }
568 
569     define_read!(infallible, pub read8_relaxed, call_mmio_read(readb_relaxed) -> u8);
570     define_read!(infallible, pub read16_relaxed, call_mmio_read(readw_relaxed) -> u16);
571     define_read!(infallible, pub read32_relaxed, call_mmio_read(readl_relaxed) -> u32);
572     define_read!(
573         infallible,
574         #[cfg(CONFIG_64BIT)]
575         pub read64_relaxed,
576         call_mmio_read(readq_relaxed) -> u64
577     );
578 
579     define_read!(fallible, pub try_read8_relaxed, call_mmio_read(readb_relaxed) -> u8);
580     define_read!(fallible, pub try_read16_relaxed, call_mmio_read(readw_relaxed) -> u16);
581     define_read!(fallible, pub try_read32_relaxed, call_mmio_read(readl_relaxed) -> u32);
582     define_read!(
583         fallible,
584         #[cfg(CONFIG_64BIT)]
585         pub try_read64_relaxed,
586         call_mmio_read(readq_relaxed) -> u64
587     );
588 
589     define_write!(infallible, pub write8_relaxed, call_mmio_write(writeb_relaxed) <- u8);
590     define_write!(infallible, pub write16_relaxed, call_mmio_write(writew_relaxed) <- u16);
591     define_write!(infallible, pub write32_relaxed, call_mmio_write(writel_relaxed) <- u32);
592     define_write!(
593         infallible,
594         #[cfg(CONFIG_64BIT)]
595         pub write64_relaxed,
596         call_mmio_write(writeq_relaxed) <- u64
597     );
598 
599     define_write!(fallible, pub try_write8_relaxed, call_mmio_write(writeb_relaxed) <- u8);
600     define_write!(fallible, pub try_write16_relaxed, call_mmio_write(writew_relaxed) <- u16);
601     define_write!(fallible, pub try_write32_relaxed, call_mmio_write(writel_relaxed) <- u32);
602     define_write!(
603         fallible,
604         #[cfg(CONFIG_64BIT)]
605         pub try_write64_relaxed,
606         call_mmio_write(writeq_relaxed) <- u64
607     );
608 }
609