1 // SPDX-License-Identifier: GPL-2.0
2 
3 //! Memory-mapped IO.
4 //!
5 //! C header: [`include/asm-generic/io.h`](srctree/include/asm-generic/io.h)
6 
7 use crate::error::{code::EINVAL, Result};
8 use crate::{bindings, build_assert};
9 
10 /// Raw representation of an MMIO region.
11 ///
12 /// By itself, the existence of an instance of this structure does not provide any guarantees that
13 /// the represented MMIO region does exist or is properly mapped.
14 ///
15 /// Instead, the bus specific MMIO implementation must convert this raw representation into an `Io`
16 /// instance providing the actual memory accessors. Only by the conversion into an `Io` structure
17 /// any guarantees are given.
18 pub struct IoRaw<const SIZE: usize = 0> {
19     addr: usize,
20     maxsize: usize,
21 }
22 
23 impl<const SIZE: usize> IoRaw<SIZE> {
24     /// Returns a new `IoRaw` instance on success, an error otherwise.
new(addr: usize, maxsize: usize) -> Result<Self>25     pub fn new(addr: usize, maxsize: usize) -> Result<Self> {
26         if maxsize < SIZE {
27             return Err(EINVAL);
28         }
29 
30         Ok(Self { addr, maxsize })
31     }
32 
33     /// Returns the base address of the MMIO region.
34     #[inline]
addr(&self) -> usize35     pub fn addr(&self) -> usize {
36         self.addr
37     }
38 
39     /// Returns the maximum size of the MMIO region.
40     #[inline]
maxsize(&self) -> usize41     pub fn maxsize(&self) -> usize {
42         self.maxsize
43     }
44 }
45 
46 /// IO-mapped memory, starting at the base address @addr and spanning @maxlen bytes.
47 ///
48 /// The creator (usually a subsystem / bus such as PCI) is responsible for creating the
49 /// mapping, performing an additional region request etc.
50 ///
51 /// # Invariant
52 ///
53 /// `addr` is the start and `maxsize` the length of valid I/O mapped memory region of size
54 /// `maxsize`.
55 ///
56 /// # Examples
57 ///
58 /// ```no_run
59 /// # use kernel::{bindings, io::{Io, IoRaw}};
60 /// # use core::ops::Deref;
61 ///
62 /// // See also [`pci::Bar`] for a real example.
63 /// struct IoMem<const SIZE: usize>(IoRaw<SIZE>);
64 ///
65 /// impl<const SIZE: usize> IoMem<SIZE> {
66 ///     /// # Safety
67 ///     ///
68 ///     /// [`paddr`, `paddr` + `SIZE`) must be a valid MMIO region that is mappable into the CPUs
69 ///     /// virtual address space.
70 ///     unsafe fn new(paddr: usize) -> Result<Self>{
71 ///         // SAFETY: By the safety requirements of this function [`paddr`, `paddr` + `SIZE`) is
72 ///         // valid for `ioremap`.
73 ///         let addr = unsafe { bindings::ioremap(paddr as _, SIZE as _) };
74 ///         if addr.is_null() {
75 ///             return Err(ENOMEM);
76 ///         }
77 ///
78 ///         Ok(IoMem(IoRaw::new(addr as _, SIZE)?))
79 ///     }
80 /// }
81 ///
82 /// impl<const SIZE: usize> Drop for IoMem<SIZE> {
83 ///     fn drop(&mut self) {
84 ///         // SAFETY: `self.0.addr()` is guaranteed to be properly mapped by `Self::new`.
85 ///         unsafe { bindings::iounmap(self.0.addr() as _); };
86 ///     }
87 /// }
88 ///
89 /// impl<const SIZE: usize> Deref for IoMem<SIZE> {
90 ///    type Target = Io<SIZE>;
91 ///
92 ///    fn deref(&self) -> &Self::Target {
93 ///         // SAFETY: The memory range stored in `self` has been properly mapped in `Self::new`.
94 ///         unsafe { Io::from_raw(&self.0) }
95 ///    }
96 /// }
97 ///
98 ///# fn no_run() -> Result<(), Error> {
99 /// // SAFETY: Invalid usage for example purposes.
100 /// let iomem = unsafe { IoMem::<{ core::mem::size_of::<u32>() }>::new(0xBAAAAAAD)? };
101 /// iomem.writel(0x42, 0x0);
102 /// assert!(iomem.try_writel(0x42, 0x0).is_ok());
103 /// assert!(iomem.try_writel(0x42, 0x4).is_err());
104 /// # Ok(())
105 /// # }
106 /// ```
107 #[repr(transparent)]
108 pub struct Io<const SIZE: usize = 0>(IoRaw<SIZE>);
109 
110 macro_rules! define_read {
111     ($(#[$attr:meta])* $name:ident, $try_name:ident, $type_name:ty) => {
112         /// Read IO data from a given offset known at compile time.
113         ///
114         /// Bound checks are performed on compile time, hence if the offset is not known at compile
115         /// time, the build will fail.
116         $(#[$attr])*
117         #[inline]
118         pub fn $name(&self, offset: usize) -> $type_name {
119             let addr = self.io_addr_assert::<$type_name>(offset);
120 
121             // SAFETY: By the type invariant `addr` is a valid address for MMIO operations.
122             unsafe { bindings::$name(addr as _) }
123         }
124 
125         /// Read IO data from a given offset.
126         ///
127         /// Bound checks are performed on runtime, it fails if the offset (plus the type size) is
128         /// out of bounds.
129         $(#[$attr])*
130         pub fn $try_name(&self, offset: usize) -> Result<$type_name> {
131             let addr = self.io_addr::<$type_name>(offset)?;
132 
133             // SAFETY: By the type invariant `addr` is a valid address for MMIO operations.
134             Ok(unsafe { bindings::$name(addr as _) })
135         }
136     };
137 }
138 
139 macro_rules! define_write {
140     ($(#[$attr:meta])* $name:ident, $try_name:ident, $type_name:ty) => {
141         /// Write IO data from a given offset known at compile time.
142         ///
143         /// Bound checks are performed on compile time, hence if the offset is not known at compile
144         /// time, the build will fail.
145         $(#[$attr])*
146         #[inline]
147         pub fn $name(&self, value: $type_name, offset: usize) {
148             let addr = self.io_addr_assert::<$type_name>(offset);
149 
150             // SAFETY: By the type invariant `addr` is a valid address for MMIO operations.
151             unsafe { bindings::$name(value, addr as _, ) }
152         }
153 
154         /// Write IO data from a given offset.
155         ///
156         /// Bound checks are performed on runtime, it fails if the offset (plus the type size) is
157         /// out of bounds.
158         $(#[$attr])*
159         pub fn $try_name(&self, value: $type_name, offset: usize) -> Result {
160             let addr = self.io_addr::<$type_name>(offset)?;
161 
162             // SAFETY: By the type invariant `addr` is a valid address for MMIO operations.
163             unsafe { bindings::$name(value, addr as _) }
164             Ok(())
165         }
166     };
167 }
168 
169 impl<const SIZE: usize> Io<SIZE> {
170     /// Converts an `IoRaw` into an `Io` instance, providing the accessors to the MMIO mapping.
171     ///
172     /// # Safety
173     ///
174     /// Callers must ensure that `addr` is the start of a valid I/O mapped memory region of size
175     /// `maxsize`.
from_raw(raw: &IoRaw<SIZE>) -> &Self176     pub unsafe fn from_raw(raw: &IoRaw<SIZE>) -> &Self {
177         // SAFETY: `Io` is a transparent wrapper around `IoRaw`.
178         unsafe { &*core::ptr::from_ref(raw).cast() }
179     }
180 
181     /// Returns the base address of this mapping.
182     #[inline]
addr(&self) -> usize183     pub fn addr(&self) -> usize {
184         self.0.addr()
185     }
186 
187     /// Returns the maximum size of this mapping.
188     #[inline]
maxsize(&self) -> usize189     pub fn maxsize(&self) -> usize {
190         self.0.maxsize()
191     }
192 
193     #[inline]
offset_valid<U>(offset: usize, size: usize) -> bool194     const fn offset_valid<U>(offset: usize, size: usize) -> bool {
195         let type_size = core::mem::size_of::<U>();
196         if let Some(end) = offset.checked_add(type_size) {
197             end <= size && offset % type_size == 0
198         } else {
199             false
200         }
201     }
202 
203     #[inline]
io_addr<U>(&self, offset: usize) -> Result<usize>204     fn io_addr<U>(&self, offset: usize) -> Result<usize> {
205         if !Self::offset_valid::<U>(offset, self.maxsize()) {
206             return Err(EINVAL);
207         }
208 
209         // Probably no need to check, since the safety requirements of `Self::new` guarantee that
210         // this can't overflow.
211         self.addr().checked_add(offset).ok_or(EINVAL)
212     }
213 
214     #[inline]
io_addr_assert<U>(&self, offset: usize) -> usize215     fn io_addr_assert<U>(&self, offset: usize) -> usize {
216         build_assert!(Self::offset_valid::<U>(offset, SIZE));
217 
218         self.addr() + offset
219     }
220 
221     define_read!(readb, try_readb, u8);
222     define_read!(readw, try_readw, u16);
223     define_read!(readl, try_readl, u32);
224     define_read!(
225         #[cfg(CONFIG_64BIT)]
226         readq,
227         try_readq,
228         u64
229     );
230 
231     define_read!(readb_relaxed, try_readb_relaxed, u8);
232     define_read!(readw_relaxed, try_readw_relaxed, u16);
233     define_read!(readl_relaxed, try_readl_relaxed, u32);
234     define_read!(
235         #[cfg(CONFIG_64BIT)]
236         readq_relaxed,
237         try_readq_relaxed,
238         u64
239     );
240 
241     define_write!(writeb, try_writeb, u8);
242     define_write!(writew, try_writew, u16);
243     define_write!(writel, try_writel, u32);
244     define_write!(
245         #[cfg(CONFIG_64BIT)]
246         writeq,
247         try_writeq,
248         u64
249     );
250 
251     define_write!(writeb_relaxed, try_writeb_relaxed, u8);
252     define_write!(writew_relaxed, try_writew_relaxed, u16);
253     define_write!(writel_relaxed, try_writel_relaxed, u32);
254     define_write!(
255         #[cfg(CONFIG_64BIT)]
256         writeq_relaxed,
257         try_writeq_relaxed,
258         u64
259     );
260 }
261