xref: /aosp_15_r20/external/crosvm/base/src/mmap.rs (revision bb4ee6a4ae7042d18b07a98463b9c8b875e44b39)
1*bb4ee6a4SAndroid Build Coastguard Worker // Copyright 2020 The ChromiumOS Authors
2*bb4ee6a4SAndroid Build Coastguard Worker // Use of this source code is governed by a BSD-style license that can be
3*bb4ee6a4SAndroid Build Coastguard Worker // found in the LICENSE file.
4*bb4ee6a4SAndroid Build Coastguard Worker 
5*bb4ee6a4SAndroid Build Coastguard Worker use std::cmp::min;
6*bb4ee6a4SAndroid Build Coastguard Worker use std::fs::File;
7*bb4ee6a4SAndroid Build Coastguard Worker use std::intrinsics::copy_nonoverlapping;
8*bb4ee6a4SAndroid Build Coastguard Worker use std::io;
9*bb4ee6a4SAndroid Build Coastguard Worker use std::mem::size_of;
10*bb4ee6a4SAndroid Build Coastguard Worker use std::ptr::read_unaligned;
11*bb4ee6a4SAndroid Build Coastguard Worker use std::ptr::read_volatile;
12*bb4ee6a4SAndroid Build Coastguard Worker use std::ptr::write_unaligned;
13*bb4ee6a4SAndroid Build Coastguard Worker use std::ptr::write_volatile;
14*bb4ee6a4SAndroid Build Coastguard Worker use std::sync::atomic::fence;
15*bb4ee6a4SAndroid Build Coastguard Worker use std::sync::atomic::Ordering;
16*bb4ee6a4SAndroid Build Coastguard Worker use std::sync::OnceLock;
17*bb4ee6a4SAndroid Build Coastguard Worker 
18*bb4ee6a4SAndroid Build Coastguard Worker use remain::sorted;
19*bb4ee6a4SAndroid Build Coastguard Worker use serde::Deserialize;
20*bb4ee6a4SAndroid Build Coastguard Worker use serde::Serialize;
21*bb4ee6a4SAndroid Build Coastguard Worker use zerocopy::AsBytes;
22*bb4ee6a4SAndroid Build Coastguard Worker use zerocopy::FromBytes;
23*bb4ee6a4SAndroid Build Coastguard Worker 
24*bb4ee6a4SAndroid Build Coastguard Worker use crate::descriptor::AsRawDescriptor;
25*bb4ee6a4SAndroid Build Coastguard Worker use crate::descriptor::SafeDescriptor;
26*bb4ee6a4SAndroid Build Coastguard Worker use crate::platform::MemoryMapping as PlatformMmap;
27*bb4ee6a4SAndroid Build Coastguard Worker use crate::SharedMemory;
28*bb4ee6a4SAndroid Build Coastguard Worker use crate::VolatileMemory;
29*bb4ee6a4SAndroid Build Coastguard Worker use crate::VolatileMemoryError;
30*bb4ee6a4SAndroid Build Coastguard Worker use crate::VolatileMemoryResult;
31*bb4ee6a4SAndroid Build Coastguard Worker use crate::VolatileSlice;
32*bb4ee6a4SAndroid Build Coastguard Worker 
33*bb4ee6a4SAndroid Build Coastguard Worker static CACHELINE_SIZE: OnceLock<usize> = OnceLock::new();
34*bb4ee6a4SAndroid Build Coastguard Worker 
35*bb4ee6a4SAndroid Build Coastguard Worker #[allow(unused_assignments)]
get_cacheline_size_once() -> usize36*bb4ee6a4SAndroid Build Coastguard Worker fn get_cacheline_size_once() -> usize {
37*bb4ee6a4SAndroid Build Coastguard Worker     let mut assume_reason: &str = "unknown";
38*bb4ee6a4SAndroid Build Coastguard Worker     cfg_if::cfg_if! {
39*bb4ee6a4SAndroid Build Coastguard Worker         if #[cfg(all(any(target_os = "android", target_os = "linux"), not(target_env = "musl")))] {
40*bb4ee6a4SAndroid Build Coastguard Worker             // TODO: Remove once available in libc bindings
41*bb4ee6a4SAndroid Build Coastguard Worker             #[cfg(target_os = "android")]
42*bb4ee6a4SAndroid Build Coastguard Worker             const _SC_LEVEL1_DCACHE_LINESIZE: i32 = 0x0094;
43*bb4ee6a4SAndroid Build Coastguard Worker             #[cfg(target_os = "linux")]
44*bb4ee6a4SAndroid Build Coastguard Worker             use libc::_SC_LEVEL1_DCACHE_LINESIZE;
45*bb4ee6a4SAndroid Build Coastguard Worker 
46*bb4ee6a4SAndroid Build Coastguard Worker             // SAFETY:
47*bb4ee6a4SAndroid Build Coastguard Worker             // Safe because we check the return value for errors or unsupported requests
48*bb4ee6a4SAndroid Build Coastguard Worker             let linesize = unsafe { libc::sysconf(_SC_LEVEL1_DCACHE_LINESIZE) };
49*bb4ee6a4SAndroid Build Coastguard Worker             if linesize > 0 {
50*bb4ee6a4SAndroid Build Coastguard Worker                 return linesize as usize;
51*bb4ee6a4SAndroid Build Coastguard Worker             } else {
52*bb4ee6a4SAndroid Build Coastguard Worker                 assume_reason = "sysconf cacheline size query failed";
53*bb4ee6a4SAndroid Build Coastguard Worker             }
54*bb4ee6a4SAndroid Build Coastguard Worker         } else {
55*bb4ee6a4SAndroid Build Coastguard Worker             assume_reason = "cacheline size query not implemented for platform/arch";
56*bb4ee6a4SAndroid Build Coastguard Worker         }
57*bb4ee6a4SAndroid Build Coastguard Worker     }
58*bb4ee6a4SAndroid Build Coastguard Worker 
59*bb4ee6a4SAndroid Build Coastguard Worker     let assumed_size = 64;
60*bb4ee6a4SAndroid Build Coastguard Worker     log::debug!(
61*bb4ee6a4SAndroid Build Coastguard Worker         "assuming cacheline_size={}; reason: {}.",
62*bb4ee6a4SAndroid Build Coastguard Worker         assumed_size,
63*bb4ee6a4SAndroid Build Coastguard Worker         assume_reason
64*bb4ee6a4SAndroid Build Coastguard Worker     );
65*bb4ee6a4SAndroid Build Coastguard Worker     assumed_size
66*bb4ee6a4SAndroid Build Coastguard Worker }
67*bb4ee6a4SAndroid Build Coastguard Worker 
68*bb4ee6a4SAndroid Build Coastguard Worker /// Returns the system's effective cacheline size (e.g. the granularity at which arch-specific
69*bb4ee6a4SAndroid Build Coastguard Worker /// cacheline management, such as with the clflush instruction, is expected to occur).
70*bb4ee6a4SAndroid Build Coastguard Worker #[inline(always)]
get_cacheline_size() -> usize71*bb4ee6a4SAndroid Build Coastguard Worker fn get_cacheline_size() -> usize {
72*bb4ee6a4SAndroid Build Coastguard Worker     let size = *CACHELINE_SIZE.get_or_init(get_cacheline_size_once);
73*bb4ee6a4SAndroid Build Coastguard Worker     assert!(size > 0);
74*bb4ee6a4SAndroid Build Coastguard Worker     size
75*bb4ee6a4SAndroid Build Coastguard Worker }
76*bb4ee6a4SAndroid Build Coastguard Worker 
77*bb4ee6a4SAndroid Build Coastguard Worker #[sorted]
78*bb4ee6a4SAndroid Build Coastguard Worker #[derive(Debug, thiserror::Error)]
79*bb4ee6a4SAndroid Build Coastguard Worker pub enum Error {
80*bb4ee6a4SAndroid Build Coastguard Worker     #[error("`add_fd_mapping` is unsupported")]
81*bb4ee6a4SAndroid Build Coastguard Worker     AddFdMappingIsUnsupported,
82*bb4ee6a4SAndroid Build Coastguard Worker     #[error("requested memory out of range")]
83*bb4ee6a4SAndroid Build Coastguard Worker     InvalidAddress,
84*bb4ee6a4SAndroid Build Coastguard Worker     #[error("requested alignment is incompatible")]
85*bb4ee6a4SAndroid Build Coastguard Worker     InvalidAlignment,
86*bb4ee6a4SAndroid Build Coastguard Worker     #[error("invalid argument provided when creating mapping")]
87*bb4ee6a4SAndroid Build Coastguard Worker     InvalidArgument,
88*bb4ee6a4SAndroid Build Coastguard Worker     #[error("requested offset is out of range of off_t")]
89*bb4ee6a4SAndroid Build Coastguard Worker     InvalidOffset,
90*bb4ee6a4SAndroid Build Coastguard Worker     #[error("requested memory range spans past the end of the region: offset={0} count={1} region_size={2}")]
91*bb4ee6a4SAndroid Build Coastguard Worker     InvalidRange(usize, usize, usize),
92*bb4ee6a4SAndroid Build Coastguard Worker     #[error("operation is not implemented on platform/architecture: {0}")]
93*bb4ee6a4SAndroid Build Coastguard Worker     NotImplemented(&'static str),
94*bb4ee6a4SAndroid Build Coastguard Worker     #[error("requested memory is not page aligned")]
95*bb4ee6a4SAndroid Build Coastguard Worker     NotPageAligned,
96*bb4ee6a4SAndroid Build Coastguard Worker     #[error("failed to read from file to memory: {0}")]
97*bb4ee6a4SAndroid Build Coastguard Worker     ReadToMemory(#[source] io::Error),
98*bb4ee6a4SAndroid Build Coastguard Worker     #[error("`remove_mapping` is unsupported")]
99*bb4ee6a4SAndroid Build Coastguard Worker     RemoveMappingIsUnsupported,
100*bb4ee6a4SAndroid Build Coastguard Worker     #[error("system call failed while creating the mapping: {0}")]
101*bb4ee6a4SAndroid Build Coastguard Worker     StdSyscallFailed(io::Error),
102*bb4ee6a4SAndroid Build Coastguard Worker     #[error("mmap related system call failed: {0}")]
103*bb4ee6a4SAndroid Build Coastguard Worker     SystemCallFailed(#[source] crate::Error),
104*bb4ee6a4SAndroid Build Coastguard Worker     #[error("failed to write from memory to file: {0}")]
105*bb4ee6a4SAndroid Build Coastguard Worker     WriteFromMemory(#[source] io::Error),
106*bb4ee6a4SAndroid Build Coastguard Worker }
107*bb4ee6a4SAndroid Build Coastguard Worker pub type Result<T> = std::result::Result<T, Error>;
108*bb4ee6a4SAndroid Build Coastguard Worker 
109*bb4ee6a4SAndroid Build Coastguard Worker /// Memory access type for anonymous shared memory mapping.
110*bb4ee6a4SAndroid Build Coastguard Worker #[derive(Copy, Clone, Default, Eq, PartialEq, Serialize, Deserialize, Debug)]
111*bb4ee6a4SAndroid Build Coastguard Worker pub struct Protection {
112*bb4ee6a4SAndroid Build Coastguard Worker     pub(crate) read: bool,
113*bb4ee6a4SAndroid Build Coastguard Worker     pub(crate) write: bool,
114*bb4ee6a4SAndroid Build Coastguard Worker }
115*bb4ee6a4SAndroid Build Coastguard Worker 
116*bb4ee6a4SAndroid Build Coastguard Worker impl Protection {
117*bb4ee6a4SAndroid Build Coastguard Worker     /// Returns Protection allowing read/write access.
118*bb4ee6a4SAndroid Build Coastguard Worker     #[inline(always)]
read_write() -> Protection119*bb4ee6a4SAndroid Build Coastguard Worker     pub fn read_write() -> Protection {
120*bb4ee6a4SAndroid Build Coastguard Worker         Protection {
121*bb4ee6a4SAndroid Build Coastguard Worker             read: true,
122*bb4ee6a4SAndroid Build Coastguard Worker             write: true,
123*bb4ee6a4SAndroid Build Coastguard Worker         }
124*bb4ee6a4SAndroid Build Coastguard Worker     }
125*bb4ee6a4SAndroid Build Coastguard Worker 
126*bb4ee6a4SAndroid Build Coastguard Worker     /// Returns Protection allowing read access.
127*bb4ee6a4SAndroid Build Coastguard Worker     #[inline(always)]
read() -> Protection128*bb4ee6a4SAndroid Build Coastguard Worker     pub fn read() -> Protection {
129*bb4ee6a4SAndroid Build Coastguard Worker         Protection {
130*bb4ee6a4SAndroid Build Coastguard Worker             read: true,
131*bb4ee6a4SAndroid Build Coastguard Worker             ..Default::default()
132*bb4ee6a4SAndroid Build Coastguard Worker         }
133*bb4ee6a4SAndroid Build Coastguard Worker     }
134*bb4ee6a4SAndroid Build Coastguard Worker 
135*bb4ee6a4SAndroid Build Coastguard Worker     /// Returns Protection allowing write access.
136*bb4ee6a4SAndroid Build Coastguard Worker     #[inline(always)]
write() -> Protection137*bb4ee6a4SAndroid Build Coastguard Worker     pub fn write() -> Protection {
138*bb4ee6a4SAndroid Build Coastguard Worker         Protection {
139*bb4ee6a4SAndroid Build Coastguard Worker             write: true,
140*bb4ee6a4SAndroid Build Coastguard Worker             ..Default::default()
141*bb4ee6a4SAndroid Build Coastguard Worker         }
142*bb4ee6a4SAndroid Build Coastguard Worker     }
143*bb4ee6a4SAndroid Build Coastguard Worker 
144*bb4ee6a4SAndroid Build Coastguard Worker     /// Set read events.
145*bb4ee6a4SAndroid Build Coastguard Worker     #[inline(always)]
set_read(self) -> Protection146*bb4ee6a4SAndroid Build Coastguard Worker     pub fn set_read(self) -> Protection {
147*bb4ee6a4SAndroid Build Coastguard Worker         Protection { read: true, ..self }
148*bb4ee6a4SAndroid Build Coastguard Worker     }
149*bb4ee6a4SAndroid Build Coastguard Worker 
150*bb4ee6a4SAndroid Build Coastguard Worker     /// Set write events.
151*bb4ee6a4SAndroid Build Coastguard Worker     #[inline(always)]
set_write(self) -> Protection152*bb4ee6a4SAndroid Build Coastguard Worker     pub fn set_write(self) -> Protection {
153*bb4ee6a4SAndroid Build Coastguard Worker         Protection {
154*bb4ee6a4SAndroid Build Coastguard Worker             write: true,
155*bb4ee6a4SAndroid Build Coastguard Worker             ..self
156*bb4ee6a4SAndroid Build Coastguard Worker         }
157*bb4ee6a4SAndroid Build Coastguard Worker     }
158*bb4ee6a4SAndroid Build Coastguard Worker 
159*bb4ee6a4SAndroid Build Coastguard Worker     /// Returns true if all access allowed by |other| is also allowed by |self|.
160*bb4ee6a4SAndroid Build Coastguard Worker     #[inline(always)]
allows(&self, other: &Protection) -> bool161*bb4ee6a4SAndroid Build Coastguard Worker     pub fn allows(&self, other: &Protection) -> bool {
162*bb4ee6a4SAndroid Build Coastguard Worker         self.read >= other.read && self.write >= other.write
163*bb4ee6a4SAndroid Build Coastguard Worker     }
164*bb4ee6a4SAndroid Build Coastguard Worker }
165*bb4ee6a4SAndroid Build Coastguard Worker 
166*bb4ee6a4SAndroid Build Coastguard Worker /// See [MemoryMapping](crate::platform::MemoryMapping) for struct- and method-level
167*bb4ee6a4SAndroid Build Coastguard Worker /// documentation.
168*bb4ee6a4SAndroid Build Coastguard Worker #[derive(Debug)]
169*bb4ee6a4SAndroid Build Coastguard Worker pub struct MemoryMapping {
170*bb4ee6a4SAndroid Build Coastguard Worker     pub(crate) mapping: PlatformMmap,
171*bb4ee6a4SAndroid Build Coastguard Worker 
172*bb4ee6a4SAndroid Build Coastguard Worker     // File backed mappings on Windows need to keep the underlying file open while the mapping is
173*bb4ee6a4SAndroid Build Coastguard Worker     // open.
174*bb4ee6a4SAndroid Build Coastguard Worker     // This will be a None in non-windows case. The variable will not be read so the '^_'.
175*bb4ee6a4SAndroid Build Coastguard Worker     //
176*bb4ee6a4SAndroid Build Coastguard Worker     // TODO(b:230902713) There was a concern about relying on the kernel's refcounting to keep the
177*bb4ee6a4SAndroid Build Coastguard Worker     // file object's locks (e.g. exclusive read/write) in place. We need to revisit/validate that
178*bb4ee6a4SAndroid Build Coastguard Worker     // concern.
179*bb4ee6a4SAndroid Build Coastguard Worker     pub(crate) _file_descriptor: Option<SafeDescriptor>,
180*bb4ee6a4SAndroid Build Coastguard Worker }
181*bb4ee6a4SAndroid Build Coastguard Worker 
182*bb4ee6a4SAndroid Build Coastguard Worker #[inline(always)]
flush_one(_addr: *const u8) -> Result<()>183*bb4ee6a4SAndroid Build Coastguard Worker unsafe fn flush_one(_addr: *const u8) -> Result<()> {
184*bb4ee6a4SAndroid Build Coastguard Worker     cfg_if::cfg_if! {
185*bb4ee6a4SAndroid Build Coastguard Worker         if #[cfg(target_arch = "x86_64")] {
186*bb4ee6a4SAndroid Build Coastguard Worker             // As per table 11-7 of the SDM, processors are not required to
187*bb4ee6a4SAndroid Build Coastguard Worker             // snoop UC mappings, so flush the target to memory.
188*bb4ee6a4SAndroid Build Coastguard Worker             // SAFETY: assumes that the caller has supplied a valid address.
189*bb4ee6a4SAndroid Build Coastguard Worker             unsafe { core::arch::x86_64::_mm_clflush(_addr) };
190*bb4ee6a4SAndroid Build Coastguard Worker             Ok(())
191*bb4ee6a4SAndroid Build Coastguard Worker         } else if #[cfg(target_arch = "aarch64")] {
192*bb4ee6a4SAndroid Build Coastguard Worker             // Data cache clean by VA to PoC.
193*bb4ee6a4SAndroid Build Coastguard Worker             std::arch::asm!("DC CVAC, {x}", x = in(reg) _addr);
194*bb4ee6a4SAndroid Build Coastguard Worker             Ok(())
195*bb4ee6a4SAndroid Build Coastguard Worker         } else if #[cfg(target_arch = "arm")] {
196*bb4ee6a4SAndroid Build Coastguard Worker             Err(Error::NotImplemented("Userspace cannot flush to PoC"))
197*bb4ee6a4SAndroid Build Coastguard Worker         } else {
198*bb4ee6a4SAndroid Build Coastguard Worker             Err(Error::NotImplemented("Cache flush not implemented"))
199*bb4ee6a4SAndroid Build Coastguard Worker         }
200*bb4ee6a4SAndroid Build Coastguard Worker     }
201*bb4ee6a4SAndroid Build Coastguard Worker }
202*bb4ee6a4SAndroid Build Coastguard Worker 
203*bb4ee6a4SAndroid Build Coastguard Worker impl MemoryMapping {
write_slice(&self, buf: &[u8], offset: usize) -> Result<usize>204*bb4ee6a4SAndroid Build Coastguard Worker     pub fn write_slice(&self, buf: &[u8], offset: usize) -> Result<usize> {
205*bb4ee6a4SAndroid Build Coastguard Worker         match self.mapping.size().checked_sub(offset) {
206*bb4ee6a4SAndroid Build Coastguard Worker             Some(size_past_offset) => {
207*bb4ee6a4SAndroid Build Coastguard Worker                 let bytes_copied = min(size_past_offset, buf.len());
208*bb4ee6a4SAndroid Build Coastguard Worker                 // SAFETY:
209*bb4ee6a4SAndroid Build Coastguard Worker                 // The bytes_copied equation above ensures we don't copy bytes out of range of
210*bb4ee6a4SAndroid Build Coastguard Worker                 // either buf or this slice. We also know that the buffers do not overlap because
211*bb4ee6a4SAndroid Build Coastguard Worker                 // slices can never occupy the same memory as a volatile slice.
212*bb4ee6a4SAndroid Build Coastguard Worker                 unsafe {
213*bb4ee6a4SAndroid Build Coastguard Worker                     copy_nonoverlapping(buf.as_ptr(), self.as_ptr().add(offset), bytes_copied);
214*bb4ee6a4SAndroid Build Coastguard Worker                 }
215*bb4ee6a4SAndroid Build Coastguard Worker                 Ok(bytes_copied)
216*bb4ee6a4SAndroid Build Coastguard Worker             }
217*bb4ee6a4SAndroid Build Coastguard Worker             None => Err(Error::InvalidAddress),
218*bb4ee6a4SAndroid Build Coastguard Worker         }
219*bb4ee6a4SAndroid Build Coastguard Worker     }
220*bb4ee6a4SAndroid Build Coastguard Worker 
read_slice(&self, buf: &mut [u8], offset: usize) -> Result<usize>221*bb4ee6a4SAndroid Build Coastguard Worker     pub fn read_slice(&self, buf: &mut [u8], offset: usize) -> Result<usize> {
222*bb4ee6a4SAndroid Build Coastguard Worker         match self.size().checked_sub(offset) {
223*bb4ee6a4SAndroid Build Coastguard Worker             Some(size_past_offset) => {
224*bb4ee6a4SAndroid Build Coastguard Worker                 let bytes_copied = min(size_past_offset, buf.len());
225*bb4ee6a4SAndroid Build Coastguard Worker                 // SAFETY:
226*bb4ee6a4SAndroid Build Coastguard Worker                 // The bytes_copied equation above ensures we don't copy bytes out of range of
227*bb4ee6a4SAndroid Build Coastguard Worker                 // either buf or this slice. We also know that the buffers do not overlap because
228*bb4ee6a4SAndroid Build Coastguard Worker                 // slices can never occupy the same memory as a volatile slice.
229*bb4ee6a4SAndroid Build Coastguard Worker                 unsafe {
230*bb4ee6a4SAndroid Build Coastguard Worker                     copy_nonoverlapping(self.as_ptr().add(offset), buf.as_mut_ptr(), bytes_copied);
231*bb4ee6a4SAndroid Build Coastguard Worker                 }
232*bb4ee6a4SAndroid Build Coastguard Worker                 Ok(bytes_copied)
233*bb4ee6a4SAndroid Build Coastguard Worker             }
234*bb4ee6a4SAndroid Build Coastguard Worker             None => Err(Error::InvalidAddress),
235*bb4ee6a4SAndroid Build Coastguard Worker         }
236*bb4ee6a4SAndroid Build Coastguard Worker     }
237*bb4ee6a4SAndroid Build Coastguard Worker 
238*bb4ee6a4SAndroid Build Coastguard Worker     /// Writes an object to the memory region at the specified offset.
239*bb4ee6a4SAndroid Build Coastguard Worker     /// Returns Ok(()) if the object fits, or Err if it extends past the end.
240*bb4ee6a4SAndroid Build Coastguard Worker     ///
241*bb4ee6a4SAndroid Build Coastguard Worker     /// This method is for writing to regular memory. If writing to a mapped
242*bb4ee6a4SAndroid Build Coastguard Worker     /// I/O region, use [`MemoryMapping::write_obj_volatile`].
243*bb4ee6a4SAndroid Build Coastguard Worker     ///
244*bb4ee6a4SAndroid Build Coastguard Worker     /// # Examples
245*bb4ee6a4SAndroid Build Coastguard Worker     /// * Write a u64 at offset 16.
246*bb4ee6a4SAndroid Build Coastguard Worker     ///
247*bb4ee6a4SAndroid Build Coastguard Worker     /// ```
248*bb4ee6a4SAndroid Build Coastguard Worker     /// #   use base::MemoryMappingBuilder;
249*bb4ee6a4SAndroid Build Coastguard Worker     /// #   use base::SharedMemory;
250*bb4ee6a4SAndroid Build Coastguard Worker     /// #   let shm = SharedMemory::new("test", 1024).unwrap();
251*bb4ee6a4SAndroid Build Coastguard Worker     /// #   let mut mem_map = MemoryMappingBuilder::new(1024).from_shared_memory(&shm).build().unwrap();
252*bb4ee6a4SAndroid Build Coastguard Worker     ///     let res = mem_map.write_obj(55u64, 16);
253*bb4ee6a4SAndroid Build Coastguard Worker     ///     assert!(res.is_ok());
254*bb4ee6a4SAndroid Build Coastguard Worker     /// ```
write_obj<T: AsBytes>(&self, val: T, offset: usize) -> Result<()>255*bb4ee6a4SAndroid Build Coastguard Worker     pub fn write_obj<T: AsBytes>(&self, val: T, offset: usize) -> Result<()> {
256*bb4ee6a4SAndroid Build Coastguard Worker         self.mapping.range_end(offset, size_of::<T>())?;
257*bb4ee6a4SAndroid Build Coastguard Worker         // SAFETY:
258*bb4ee6a4SAndroid Build Coastguard Worker         // This is safe because we checked the bounds above.
259*bb4ee6a4SAndroid Build Coastguard Worker         unsafe {
260*bb4ee6a4SAndroid Build Coastguard Worker             write_unaligned(self.as_ptr().add(offset) as *mut T, val);
261*bb4ee6a4SAndroid Build Coastguard Worker         }
262*bb4ee6a4SAndroid Build Coastguard Worker         Ok(())
263*bb4ee6a4SAndroid Build Coastguard Worker     }
264*bb4ee6a4SAndroid Build Coastguard Worker 
265*bb4ee6a4SAndroid Build Coastguard Worker     /// Reads on object from the memory region at the given offset.
266*bb4ee6a4SAndroid Build Coastguard Worker     /// Reading from a volatile area isn't strictly safe as it could change
267*bb4ee6a4SAndroid Build Coastguard Worker     /// mid-read.  However, as long as the type T is plain old data and can
268*bb4ee6a4SAndroid Build Coastguard Worker     /// handle random initialization, everything will be OK.
269*bb4ee6a4SAndroid Build Coastguard Worker     ///
270*bb4ee6a4SAndroid Build Coastguard Worker     /// This method is for reading from regular memory. If reading from a
271*bb4ee6a4SAndroid Build Coastguard Worker     /// mapped I/O region, use [`MemoryMapping::read_obj_volatile`].
272*bb4ee6a4SAndroid Build Coastguard Worker     ///
273*bb4ee6a4SAndroid Build Coastguard Worker     /// # Examples
274*bb4ee6a4SAndroid Build Coastguard Worker     /// * Read a u64 written to offset 32.
275*bb4ee6a4SAndroid Build Coastguard Worker     ///
276*bb4ee6a4SAndroid Build Coastguard Worker     /// ```
277*bb4ee6a4SAndroid Build Coastguard Worker     /// #   use base::MemoryMappingBuilder;
278*bb4ee6a4SAndroid Build Coastguard Worker     /// #   let mut mem_map = MemoryMappingBuilder::new(1024).build().unwrap();
279*bb4ee6a4SAndroid Build Coastguard Worker     ///     let res = mem_map.write_obj(55u64, 32);
280*bb4ee6a4SAndroid Build Coastguard Worker     ///     assert!(res.is_ok());
281*bb4ee6a4SAndroid Build Coastguard Worker     ///     let num: u64 = mem_map.read_obj(32).unwrap();
282*bb4ee6a4SAndroid Build Coastguard Worker     ///     assert_eq!(55, num);
283*bb4ee6a4SAndroid Build Coastguard Worker     /// ```
read_obj<T: FromBytes>(&self, offset: usize) -> Result<T>284*bb4ee6a4SAndroid Build Coastguard Worker     pub fn read_obj<T: FromBytes>(&self, offset: usize) -> Result<T> {
285*bb4ee6a4SAndroid Build Coastguard Worker         self.mapping.range_end(offset, size_of::<T>())?;
286*bb4ee6a4SAndroid Build Coastguard Worker         // SAFETY:
287*bb4ee6a4SAndroid Build Coastguard Worker         // This is safe because by definition Copy types can have their bits set arbitrarily and
288*bb4ee6a4SAndroid Build Coastguard Worker         // still be valid.
289*bb4ee6a4SAndroid Build Coastguard Worker         unsafe {
290*bb4ee6a4SAndroid Build Coastguard Worker             Ok(read_unaligned(
291*bb4ee6a4SAndroid Build Coastguard Worker                 self.as_ptr().add(offset) as *const u8 as *const T
292*bb4ee6a4SAndroid Build Coastguard Worker             ))
293*bb4ee6a4SAndroid Build Coastguard Worker         }
294*bb4ee6a4SAndroid Build Coastguard Worker     }
295*bb4ee6a4SAndroid Build Coastguard Worker 
296*bb4ee6a4SAndroid Build Coastguard Worker     /// Writes an object to the memory region at the specified offset.
297*bb4ee6a4SAndroid Build Coastguard Worker     /// Returns Ok(()) if the object fits, or Err if it extends past the end.
298*bb4ee6a4SAndroid Build Coastguard Worker     ///
299*bb4ee6a4SAndroid Build Coastguard Worker     /// The write operation will be volatile, i.e. it will not be reordered by
300*bb4ee6a4SAndroid Build Coastguard Worker     /// the compiler and is suitable for I/O, but must be aligned. When writing
301*bb4ee6a4SAndroid Build Coastguard Worker     /// to regular memory, prefer [`MemoryMapping::write_obj`].
302*bb4ee6a4SAndroid Build Coastguard Worker     ///
303*bb4ee6a4SAndroid Build Coastguard Worker     /// # Examples
304*bb4ee6a4SAndroid Build Coastguard Worker     /// * Write a u32 at offset 16.
305*bb4ee6a4SAndroid Build Coastguard Worker     ///
306*bb4ee6a4SAndroid Build Coastguard Worker     /// ```
307*bb4ee6a4SAndroid Build Coastguard Worker     /// #   use base::MemoryMappingBuilder;
308*bb4ee6a4SAndroid Build Coastguard Worker     /// #   use base::SharedMemory;
309*bb4ee6a4SAndroid Build Coastguard Worker     /// #   let shm = SharedMemory::new("test", 1024).unwrap();
310*bb4ee6a4SAndroid Build Coastguard Worker     /// #   let mut mem_map = MemoryMappingBuilder::new(1024).from_shared_memory(&shm).build().unwrap();
311*bb4ee6a4SAndroid Build Coastguard Worker     ///     let res = mem_map.write_obj_volatile(0xf00u32, 16);
312*bb4ee6a4SAndroid Build Coastguard Worker     ///     assert!(res.is_ok());
313*bb4ee6a4SAndroid Build Coastguard Worker     /// ```
write_obj_volatile<T: AsBytes>(&self, val: T, offset: usize) -> Result<()>314*bb4ee6a4SAndroid Build Coastguard Worker     pub fn write_obj_volatile<T: AsBytes>(&self, val: T, offset: usize) -> Result<()> {
315*bb4ee6a4SAndroid Build Coastguard Worker         self.mapping.range_end(offset, size_of::<T>())?;
316*bb4ee6a4SAndroid Build Coastguard Worker         // Make sure writes to memory have been committed before performing I/O that could
317*bb4ee6a4SAndroid Build Coastguard Worker         // potentially depend on them.
318*bb4ee6a4SAndroid Build Coastguard Worker         fence(Ordering::SeqCst);
319*bb4ee6a4SAndroid Build Coastguard Worker         // SAFETY:
320*bb4ee6a4SAndroid Build Coastguard Worker         // This is safe because we checked the bounds above.
321*bb4ee6a4SAndroid Build Coastguard Worker         unsafe {
322*bb4ee6a4SAndroid Build Coastguard Worker             write_volatile(self.as_ptr().add(offset) as *mut T, val);
323*bb4ee6a4SAndroid Build Coastguard Worker         }
324*bb4ee6a4SAndroid Build Coastguard Worker         Ok(())
325*bb4ee6a4SAndroid Build Coastguard Worker     }
326*bb4ee6a4SAndroid Build Coastguard Worker 
327*bb4ee6a4SAndroid Build Coastguard Worker     /// Reads on object from the memory region at the given offset.
328*bb4ee6a4SAndroid Build Coastguard Worker     /// Reading from a volatile area isn't strictly safe as it could change
329*bb4ee6a4SAndroid Build Coastguard Worker     /// mid-read.  However, as long as the type T is plain old data and can
330*bb4ee6a4SAndroid Build Coastguard Worker     /// handle random initialization, everything will be OK.
331*bb4ee6a4SAndroid Build Coastguard Worker     ///
332*bb4ee6a4SAndroid Build Coastguard Worker     /// The read operation will be volatile, i.e. it will not be reordered by
333*bb4ee6a4SAndroid Build Coastguard Worker     /// the compiler and is suitable for I/O, but must be aligned. When reading
334*bb4ee6a4SAndroid Build Coastguard Worker     /// from regular memory, prefer [`MemoryMapping::read_obj`].
335*bb4ee6a4SAndroid Build Coastguard Worker     ///
336*bb4ee6a4SAndroid Build Coastguard Worker     /// # Examples
337*bb4ee6a4SAndroid Build Coastguard Worker     /// * Read a u32 written to offset 16.
338*bb4ee6a4SAndroid Build Coastguard Worker     ///
339*bb4ee6a4SAndroid Build Coastguard Worker     /// ```
340*bb4ee6a4SAndroid Build Coastguard Worker     /// #   use base::MemoryMappingBuilder;
341*bb4ee6a4SAndroid Build Coastguard Worker     /// #   use base::SharedMemory;
342*bb4ee6a4SAndroid Build Coastguard Worker     /// #   let shm = SharedMemory::new("test", 1024).unwrap();
343*bb4ee6a4SAndroid Build Coastguard Worker     /// #   let mut mem_map = MemoryMappingBuilder::new(1024).from_shared_memory(&shm).build().unwrap();
344*bb4ee6a4SAndroid Build Coastguard Worker     ///     let res = mem_map.write_obj(0xf00u32, 16);
345*bb4ee6a4SAndroid Build Coastguard Worker     ///     assert!(res.is_ok());
346*bb4ee6a4SAndroid Build Coastguard Worker     ///     let num: u32 = mem_map.read_obj_volatile(16).unwrap();
347*bb4ee6a4SAndroid Build Coastguard Worker     ///     assert_eq!(0xf00, num);
348*bb4ee6a4SAndroid Build Coastguard Worker     /// ```
read_obj_volatile<T: FromBytes>(&self, offset: usize) -> Result<T>349*bb4ee6a4SAndroid Build Coastguard Worker     pub fn read_obj_volatile<T: FromBytes>(&self, offset: usize) -> Result<T> {
350*bb4ee6a4SAndroid Build Coastguard Worker         self.mapping.range_end(offset, size_of::<T>())?;
351*bb4ee6a4SAndroid Build Coastguard Worker         // SAFETY:
352*bb4ee6a4SAndroid Build Coastguard Worker         // This is safe because by definition Copy types can have their bits set arbitrarily and
353*bb4ee6a4SAndroid Build Coastguard Worker         // still be valid.
354*bb4ee6a4SAndroid Build Coastguard Worker         unsafe {
355*bb4ee6a4SAndroid Build Coastguard Worker             Ok(read_volatile(
356*bb4ee6a4SAndroid Build Coastguard Worker                 self.as_ptr().add(offset) as *const u8 as *const T
357*bb4ee6a4SAndroid Build Coastguard Worker             ))
358*bb4ee6a4SAndroid Build Coastguard Worker         }
359*bb4ee6a4SAndroid Build Coastguard Worker     }
360*bb4ee6a4SAndroid Build Coastguard Worker 
msync(&self) -> Result<()>361*bb4ee6a4SAndroid Build Coastguard Worker     pub fn msync(&self) -> Result<()> {
362*bb4ee6a4SAndroid Build Coastguard Worker         self.mapping.msync()
363*bb4ee6a4SAndroid Build Coastguard Worker     }
364*bb4ee6a4SAndroid Build Coastguard Worker 
365*bb4ee6a4SAndroid Build Coastguard Worker     /// Flush a region of the MemoryMapping from the system's caching hierarchy.
366*bb4ee6a4SAndroid Build Coastguard Worker     /// There are several uses for flushing:
367*bb4ee6a4SAndroid Build Coastguard Worker     ///
368*bb4ee6a4SAndroid Build Coastguard Worker     /// * Cached memory which the guest may be reading through an uncached mapping:
369*bb4ee6a4SAndroid Build Coastguard Worker     ///
370*bb4ee6a4SAndroid Build Coastguard Worker     ///     Guest reads via an uncached mapping can bypass the cache and directly access main
371*bb4ee6a4SAndroid Build Coastguard Worker     ///     memory. This is outside the memory model of Rust, which means that even with proper
372*bb4ee6a4SAndroid Build Coastguard Worker     ///     synchronization, guest reads via an uncached mapping might not see updates from the
373*bb4ee6a4SAndroid Build Coastguard Worker     ///     host. As such, it is necessary to perform architectural cache maintainance to flush the
374*bb4ee6a4SAndroid Build Coastguard Worker     ///     host writes to main memory.
375*bb4ee6a4SAndroid Build Coastguard Worker     ///
376*bb4ee6a4SAndroid Build Coastguard Worker     ///     Note that this does not support writable uncached guest mappings, as doing so
377*bb4ee6a4SAndroid Build Coastguard Worker     ///     requires invalidating the cache, not flushing the cache.
378*bb4ee6a4SAndroid Build Coastguard Worker     ///
379*bb4ee6a4SAndroid Build Coastguard Worker     /// * Uncached memory which the guest may be writing through a cached mapping:
380*bb4ee6a4SAndroid Build Coastguard Worker     ///
381*bb4ee6a4SAndroid Build Coastguard Worker     ///     Guest writes via a cached mapping of a host's uncached memory may never make it to
382*bb4ee6a4SAndroid Build Coastguard Worker     ///     system/device memory prior to being read. In such cases, explicit flushing of the cached
383*bb4ee6a4SAndroid Build Coastguard Worker     ///     writes is necessary, since other managers of the host's uncached mapping (e.g. DRM) see
384*bb4ee6a4SAndroid Build Coastguard Worker     ///     no need to flush, as they believe all writes would explicitly bypass the caches.
385*bb4ee6a4SAndroid Build Coastguard Worker     ///
386*bb4ee6a4SAndroid Build Coastguard Worker     /// Currently only supported on x86_64 and aarch64. Cannot be supported on 32-bit arm.
flush_region(&self, offset: usize, len: usize) -> Result<()>387*bb4ee6a4SAndroid Build Coastguard Worker     pub fn flush_region(&self, offset: usize, len: usize) -> Result<()> {
388*bb4ee6a4SAndroid Build Coastguard Worker         let addr: *const u8 = self.as_ptr();
389*bb4ee6a4SAndroid Build Coastguard Worker         let size = self.size();
390*bb4ee6a4SAndroid Build Coastguard Worker 
391*bb4ee6a4SAndroid Build Coastguard Worker         // disallow overflow/wrapping ranges and subregion extending beyond mapped range
392*bb4ee6a4SAndroid Build Coastguard Worker         if usize::MAX - size < addr as usize || offset >= size || size - offset < len {
393*bb4ee6a4SAndroid Build Coastguard Worker             return Err(Error::InvalidRange(offset, len, size));
394*bb4ee6a4SAndroid Build Coastguard Worker         }
395*bb4ee6a4SAndroid Build Coastguard Worker 
396*bb4ee6a4SAndroid Build Coastguard Worker         // SAFETY:
397*bb4ee6a4SAndroid Build Coastguard Worker         // Safe because already validated that `next` will be an address in the mapping:
398*bb4ee6a4SAndroid Build Coastguard Worker         //     * mapped region is non-wrapping
399*bb4ee6a4SAndroid Build Coastguard Worker         //     * subregion is bounded within the mapped region
400*bb4ee6a4SAndroid Build Coastguard Worker         let mut next: *const u8 = unsafe { addr.add(offset) };
401*bb4ee6a4SAndroid Build Coastguard Worker 
402*bb4ee6a4SAndroid Build Coastguard Worker         let cacheline_size = get_cacheline_size();
403*bb4ee6a4SAndroid Build Coastguard Worker         let cacheline_count = len.div_ceil(cacheline_size);
404*bb4ee6a4SAndroid Build Coastguard Worker 
405*bb4ee6a4SAndroid Build Coastguard Worker         for _ in 0..cacheline_count {
406*bb4ee6a4SAndroid Build Coastguard Worker             // SAFETY:
407*bb4ee6a4SAndroid Build Coastguard Worker             // Safe because `next` is guaranteed to be within the mapped region (see earlier
408*bb4ee6a4SAndroid Build Coastguard Worker             // validations), and flushing the cache doesn't affect any rust safety properties.
409*bb4ee6a4SAndroid Build Coastguard Worker             unsafe { flush_one(next)? };
410*bb4ee6a4SAndroid Build Coastguard Worker 
411*bb4ee6a4SAndroid Build Coastguard Worker             // SAFETY:
412*bb4ee6a4SAndroid Build Coastguard Worker             // Safe because we never use next if it goes out of the mapped region or overflows its
413*bb4ee6a4SAndroid Build Coastguard Worker             // storage type (based on earlier validations and the loop bounds).
414*bb4ee6a4SAndroid Build Coastguard Worker             next = unsafe { next.add(cacheline_size) };
415*bb4ee6a4SAndroid Build Coastguard Worker         }
416*bb4ee6a4SAndroid Build Coastguard Worker         Ok(())
417*bb4ee6a4SAndroid Build Coastguard Worker     }
418*bb4ee6a4SAndroid Build Coastguard Worker 
419*bb4ee6a4SAndroid Build Coastguard Worker     /// Flush all backing memory for a mapping in an arch-specific manner (see `flush_region()`).
flush_all(&self) -> Result<()>420*bb4ee6a4SAndroid Build Coastguard Worker     pub fn flush_all(&self) -> Result<()> {
421*bb4ee6a4SAndroid Build Coastguard Worker         self.flush_region(0, self.size())
422*bb4ee6a4SAndroid Build Coastguard Worker     }
423*bb4ee6a4SAndroid Build Coastguard Worker }
424*bb4ee6a4SAndroid Build Coastguard Worker 
425*bb4ee6a4SAndroid Build Coastguard Worker pub struct MemoryMappingBuilder<'a> {
426*bb4ee6a4SAndroid Build Coastguard Worker     pub(crate) descriptor: Option<&'a dyn AsRawDescriptor>,
427*bb4ee6a4SAndroid Build Coastguard Worker     pub(crate) is_file_descriptor: bool,
428*bb4ee6a4SAndroid Build Coastguard Worker     #[cfg_attr(target_os = "macos", allow(unused))]
429*bb4ee6a4SAndroid Build Coastguard Worker     pub(crate) size: usize,
430*bb4ee6a4SAndroid Build Coastguard Worker     pub(crate) offset: Option<u64>,
431*bb4ee6a4SAndroid Build Coastguard Worker     pub(crate) align: Option<u64>,
432*bb4ee6a4SAndroid Build Coastguard Worker     pub(crate) protection: Option<Protection>,
433*bb4ee6a4SAndroid Build Coastguard Worker     #[cfg_attr(target_os = "macos", allow(unused))]
434*bb4ee6a4SAndroid Build Coastguard Worker     #[cfg_attr(windows, allow(unused))]
435*bb4ee6a4SAndroid Build Coastguard Worker     pub(crate) populate: bool,
436*bb4ee6a4SAndroid Build Coastguard Worker }
437*bb4ee6a4SAndroid Build Coastguard Worker 
438*bb4ee6a4SAndroid Build Coastguard Worker /// Builds a MemoryMapping object from the specified arguments.
439*bb4ee6a4SAndroid Build Coastguard Worker impl<'a> MemoryMappingBuilder<'a> {
440*bb4ee6a4SAndroid Build Coastguard Worker     /// Creates a new builder specifying size of the memory region in bytes.
new(size: usize) -> MemoryMappingBuilder<'a>441*bb4ee6a4SAndroid Build Coastguard Worker     pub fn new(size: usize) -> MemoryMappingBuilder<'a> {
442*bb4ee6a4SAndroid Build Coastguard Worker         MemoryMappingBuilder {
443*bb4ee6a4SAndroid Build Coastguard Worker             descriptor: None,
444*bb4ee6a4SAndroid Build Coastguard Worker             size,
445*bb4ee6a4SAndroid Build Coastguard Worker             is_file_descriptor: false,
446*bb4ee6a4SAndroid Build Coastguard Worker             offset: None,
447*bb4ee6a4SAndroid Build Coastguard Worker             align: None,
448*bb4ee6a4SAndroid Build Coastguard Worker             protection: None,
449*bb4ee6a4SAndroid Build Coastguard Worker             populate: false,
450*bb4ee6a4SAndroid Build Coastguard Worker         }
451*bb4ee6a4SAndroid Build Coastguard Worker     }
452*bb4ee6a4SAndroid Build Coastguard Worker 
453*bb4ee6a4SAndroid Build Coastguard Worker     /// Build the memory mapping given the specified File to mapped memory
454*bb4ee6a4SAndroid Build Coastguard Worker     ///
455*bb4ee6a4SAndroid Build Coastguard Worker     /// Default: Create a new memory mapping.
456*bb4ee6a4SAndroid Build Coastguard Worker     ///
457*bb4ee6a4SAndroid Build Coastguard Worker     /// Note: this is a forward looking interface to accomodate platforms that
458*bb4ee6a4SAndroid Build Coastguard Worker     /// require special handling for file backed mappings.
459*bb4ee6a4SAndroid Build Coastguard Worker     #[allow(clippy::wrong_self_convention, unused_mut)]
from_file(mut self, file: &'a File) -> MemoryMappingBuilder460*bb4ee6a4SAndroid Build Coastguard Worker     pub fn from_file(mut self, file: &'a File) -> MemoryMappingBuilder {
461*bb4ee6a4SAndroid Build Coastguard Worker         // On Windows, files require special handling (next day shipping if possible).
462*bb4ee6a4SAndroid Build Coastguard Worker         self.is_file_descriptor = true;
463*bb4ee6a4SAndroid Build Coastguard Worker 
464*bb4ee6a4SAndroid Build Coastguard Worker         self.descriptor = Some(file as &dyn AsRawDescriptor);
465*bb4ee6a4SAndroid Build Coastguard Worker         self
466*bb4ee6a4SAndroid Build Coastguard Worker     }
467*bb4ee6a4SAndroid Build Coastguard Worker 
468*bb4ee6a4SAndroid Build Coastguard Worker     /// Build the memory mapping given the specified SharedMemory to mapped memory
469*bb4ee6a4SAndroid Build Coastguard Worker     ///
470*bb4ee6a4SAndroid Build Coastguard Worker     /// Default: Create a new memory mapping.
from_shared_memory(mut self, shm: &'a SharedMemory) -> MemoryMappingBuilder471*bb4ee6a4SAndroid Build Coastguard Worker     pub fn from_shared_memory(mut self, shm: &'a SharedMemory) -> MemoryMappingBuilder {
472*bb4ee6a4SAndroid Build Coastguard Worker         self.descriptor = Some(shm as &dyn AsRawDescriptor);
473*bb4ee6a4SAndroid Build Coastguard Worker         self
474*bb4ee6a4SAndroid Build Coastguard Worker     }
475*bb4ee6a4SAndroid Build Coastguard Worker 
476*bb4ee6a4SAndroid Build Coastguard Worker     /// Offset in bytes from the beginning of the mapping to start the mmap.
477*bb4ee6a4SAndroid Build Coastguard Worker     ///
478*bb4ee6a4SAndroid Build Coastguard Worker     /// Default: No offset
offset(mut self, offset: u64) -> MemoryMappingBuilder<'a>479*bb4ee6a4SAndroid Build Coastguard Worker     pub fn offset(mut self, offset: u64) -> MemoryMappingBuilder<'a> {
480*bb4ee6a4SAndroid Build Coastguard Worker         self.offset = Some(offset);
481*bb4ee6a4SAndroid Build Coastguard Worker         self
482*bb4ee6a4SAndroid Build Coastguard Worker     }
483*bb4ee6a4SAndroid Build Coastguard Worker 
484*bb4ee6a4SAndroid Build Coastguard Worker     /// Protection (e.g. readable/writable) of the memory region.
485*bb4ee6a4SAndroid Build Coastguard Worker     ///
486*bb4ee6a4SAndroid Build Coastguard Worker     /// Default: Read/write
protection(mut self, protection: Protection) -> MemoryMappingBuilder<'a>487*bb4ee6a4SAndroid Build Coastguard Worker     pub fn protection(mut self, protection: Protection) -> MemoryMappingBuilder<'a> {
488*bb4ee6a4SAndroid Build Coastguard Worker         self.protection = Some(protection);
489*bb4ee6a4SAndroid Build Coastguard Worker         self
490*bb4ee6a4SAndroid Build Coastguard Worker     }
491*bb4ee6a4SAndroid Build Coastguard Worker 
492*bb4ee6a4SAndroid Build Coastguard Worker     /// Alignment of the memory region mapping in bytes.
493*bb4ee6a4SAndroid Build Coastguard Worker     ///
494*bb4ee6a4SAndroid Build Coastguard Worker     /// Default: No alignment
align(mut self, alignment: u64) -> MemoryMappingBuilder<'a>495*bb4ee6a4SAndroid Build Coastguard Worker     pub fn align(mut self, alignment: u64) -> MemoryMappingBuilder<'a> {
496*bb4ee6a4SAndroid Build Coastguard Worker         self.align = Some(alignment);
497*bb4ee6a4SAndroid Build Coastguard Worker         self
498*bb4ee6a4SAndroid Build Coastguard Worker     }
499*bb4ee6a4SAndroid Build Coastguard Worker }
500*bb4ee6a4SAndroid Build Coastguard Worker 
501*bb4ee6a4SAndroid Build Coastguard Worker impl VolatileMemory for MemoryMapping {
get_slice(&self, offset: usize, count: usize) -> VolatileMemoryResult<VolatileSlice>502*bb4ee6a4SAndroid Build Coastguard Worker     fn get_slice(&self, offset: usize, count: usize) -> VolatileMemoryResult<VolatileSlice> {
503*bb4ee6a4SAndroid Build Coastguard Worker         let mem_end = offset
504*bb4ee6a4SAndroid Build Coastguard Worker             .checked_add(count)
505*bb4ee6a4SAndroid Build Coastguard Worker             .ok_or(VolatileMemoryError::Overflow {
506*bb4ee6a4SAndroid Build Coastguard Worker                 base: offset,
507*bb4ee6a4SAndroid Build Coastguard Worker                 offset: count,
508*bb4ee6a4SAndroid Build Coastguard Worker             })?;
509*bb4ee6a4SAndroid Build Coastguard Worker 
510*bb4ee6a4SAndroid Build Coastguard Worker         if mem_end > self.size() {
511*bb4ee6a4SAndroid Build Coastguard Worker             return Err(VolatileMemoryError::OutOfBounds { addr: mem_end });
512*bb4ee6a4SAndroid Build Coastguard Worker         }
513*bb4ee6a4SAndroid Build Coastguard Worker 
514*bb4ee6a4SAndroid Build Coastguard Worker         let new_addr =
515*bb4ee6a4SAndroid Build Coastguard Worker             (self.as_ptr() as usize)
516*bb4ee6a4SAndroid Build Coastguard Worker                 .checked_add(offset)
517*bb4ee6a4SAndroid Build Coastguard Worker                 .ok_or(VolatileMemoryError::Overflow {
518*bb4ee6a4SAndroid Build Coastguard Worker                     base: self.as_ptr() as usize,
519*bb4ee6a4SAndroid Build Coastguard Worker                     offset,
520*bb4ee6a4SAndroid Build Coastguard Worker                 })?;
521*bb4ee6a4SAndroid Build Coastguard Worker 
522*bb4ee6a4SAndroid Build Coastguard Worker         // SAFETY:
523*bb4ee6a4SAndroid Build Coastguard Worker         // Safe because we checked that offset + count was within our range and we only ever hand
524*bb4ee6a4SAndroid Build Coastguard Worker         // out volatile accessors.
525*bb4ee6a4SAndroid Build Coastguard Worker         Ok(unsafe { VolatileSlice::from_raw_parts(new_addr as *mut u8, count) })
526*bb4ee6a4SAndroid Build Coastguard Worker     }
527*bb4ee6a4SAndroid Build Coastguard Worker }
528*bb4ee6a4SAndroid Build Coastguard Worker 
529*bb4ee6a4SAndroid Build Coastguard Worker /// A range of memory that can be msynced, for abstracting over different types of memory mappings.
530*bb4ee6a4SAndroid Build Coastguard Worker ///
531*bb4ee6a4SAndroid Build Coastguard Worker /// # Safety
532*bb4ee6a4SAndroid Build Coastguard Worker /// Safe when implementers guarantee `ptr`..`ptr+size` is an mmaped region owned by this object that
533*bb4ee6a4SAndroid Build Coastguard Worker /// can't be unmapped during the `MappedRegion`'s lifetime.
534*bb4ee6a4SAndroid Build Coastguard Worker pub unsafe trait MappedRegion: Send + Sync {
535*bb4ee6a4SAndroid Build Coastguard Worker     // SAFETY:
536*bb4ee6a4SAndroid Build Coastguard Worker     /// Returns a pointer to the beginning of the memory region. Should only be
537*bb4ee6a4SAndroid Build Coastguard Worker     /// used for passing this region to ioctls for setting guest memory.
as_ptr(&self) -> *mut u8538*bb4ee6a4SAndroid Build Coastguard Worker     fn as_ptr(&self) -> *mut u8;
539*bb4ee6a4SAndroid Build Coastguard Worker 
540*bb4ee6a4SAndroid Build Coastguard Worker     /// Returns the size of the memory region in bytes.
size(&self) -> usize541*bb4ee6a4SAndroid Build Coastguard Worker     fn size(&self) -> usize;
542*bb4ee6a4SAndroid Build Coastguard Worker 
543*bb4ee6a4SAndroid Build Coastguard Worker     /// Maps `size` bytes starting at `fd_offset` bytes from within the given `fd`
544*bb4ee6a4SAndroid Build Coastguard Worker     /// at `offset` bytes from the start of the region with `prot` protections.
545*bb4ee6a4SAndroid Build Coastguard Worker     /// `offset` must be page aligned.
546*bb4ee6a4SAndroid Build Coastguard Worker     ///
547*bb4ee6a4SAndroid Build Coastguard Worker     /// # Arguments
548*bb4ee6a4SAndroid Build Coastguard Worker     /// * `offset` - Page aligned offset into the arena in bytes.
549*bb4ee6a4SAndroid Build Coastguard Worker     /// * `size` - Size of memory region in bytes.
550*bb4ee6a4SAndroid Build Coastguard Worker     /// * `fd` - File descriptor to mmap from.
551*bb4ee6a4SAndroid Build Coastguard Worker     /// * `fd_offset` - Offset in bytes from the beginning of `fd` to start the mmap.
552*bb4ee6a4SAndroid Build Coastguard Worker     /// * `prot` - Protection (e.g. readable/writable) of the memory region.
add_fd_mapping( &mut self, _offset: usize, _size: usize, _fd: &dyn AsRawDescriptor, _fd_offset: u64, _prot: Protection, ) -> Result<()>553*bb4ee6a4SAndroid Build Coastguard Worker     fn add_fd_mapping(
554*bb4ee6a4SAndroid Build Coastguard Worker         &mut self,
555*bb4ee6a4SAndroid Build Coastguard Worker         _offset: usize,
556*bb4ee6a4SAndroid Build Coastguard Worker         _size: usize,
557*bb4ee6a4SAndroid Build Coastguard Worker         _fd: &dyn AsRawDescriptor,
558*bb4ee6a4SAndroid Build Coastguard Worker         _fd_offset: u64,
559*bb4ee6a4SAndroid Build Coastguard Worker         _prot: Protection,
560*bb4ee6a4SAndroid Build Coastguard Worker     ) -> Result<()> {
561*bb4ee6a4SAndroid Build Coastguard Worker         Err(Error::AddFdMappingIsUnsupported)
562*bb4ee6a4SAndroid Build Coastguard Worker     }
563*bb4ee6a4SAndroid Build Coastguard Worker 
564*bb4ee6a4SAndroid Build Coastguard Worker     /// Remove `size`-byte mapping starting at `offset`.
remove_mapping(&mut self, _offset: usize, _size: usize) -> Result<()>565*bb4ee6a4SAndroid Build Coastguard Worker     fn remove_mapping(&mut self, _offset: usize, _size: usize) -> Result<()> {
566*bb4ee6a4SAndroid Build Coastguard Worker         Err(Error::RemoveMappingIsUnsupported)
567*bb4ee6a4SAndroid Build Coastguard Worker     }
568*bb4ee6a4SAndroid Build Coastguard Worker }
569*bb4ee6a4SAndroid Build Coastguard Worker 
570*bb4ee6a4SAndroid Build Coastguard Worker // SAFETY:
571*bb4ee6a4SAndroid Build Coastguard Worker // Safe because it exclusively forwards calls to a safe implementation.
572*bb4ee6a4SAndroid Build Coastguard Worker unsafe impl MappedRegion for MemoryMapping {
as_ptr(&self) -> *mut u8573*bb4ee6a4SAndroid Build Coastguard Worker     fn as_ptr(&self) -> *mut u8 {
574*bb4ee6a4SAndroid Build Coastguard Worker         self.mapping.as_ptr()
575*bb4ee6a4SAndroid Build Coastguard Worker     }
576*bb4ee6a4SAndroid Build Coastguard Worker 
size(&self) -> usize577*bb4ee6a4SAndroid Build Coastguard Worker     fn size(&self) -> usize {
578*bb4ee6a4SAndroid Build Coastguard Worker         self.mapping.size()
579*bb4ee6a4SAndroid Build Coastguard Worker     }
580*bb4ee6a4SAndroid Build Coastguard Worker }
581*bb4ee6a4SAndroid Build Coastguard Worker 
582*bb4ee6a4SAndroid Build Coastguard Worker #[derive(Debug, PartialEq, Eq)]
583*bb4ee6a4SAndroid Build Coastguard Worker pub struct ExternalMapping {
584*bb4ee6a4SAndroid Build Coastguard Worker     pub ptr: u64,
585*bb4ee6a4SAndroid Build Coastguard Worker     pub size: usize,
586*bb4ee6a4SAndroid Build Coastguard Worker }
587*bb4ee6a4SAndroid Build Coastguard Worker 
588*bb4ee6a4SAndroid Build Coastguard Worker // SAFETY:
589*bb4ee6a4SAndroid Build Coastguard Worker // `ptr`..`ptr+size` is an mmaped region and is owned by this object. Caller
590*bb4ee6a4SAndroid Build Coastguard Worker // needs to ensure that the region is not unmapped during the `MappedRegion`'s
591*bb4ee6a4SAndroid Build Coastguard Worker // lifetime.
592*bb4ee6a4SAndroid Build Coastguard Worker unsafe impl MappedRegion for ExternalMapping {
593*bb4ee6a4SAndroid Build Coastguard Worker     /// used for passing this region to ioctls for setting guest memory.
as_ptr(&self) -> *mut u8594*bb4ee6a4SAndroid Build Coastguard Worker     fn as_ptr(&self) -> *mut u8 {
595*bb4ee6a4SAndroid Build Coastguard Worker         self.ptr as *mut u8
596*bb4ee6a4SAndroid Build Coastguard Worker     }
597*bb4ee6a4SAndroid Build Coastguard Worker 
598*bb4ee6a4SAndroid Build Coastguard Worker     /// Returns the size of the memory region in bytes.
size(&self) -> usize599*bb4ee6a4SAndroid Build Coastguard Worker     fn size(&self) -> usize {
600*bb4ee6a4SAndroid Build Coastguard Worker         self.size
601*bb4ee6a4SAndroid Build Coastguard Worker     }
602*bb4ee6a4SAndroid Build Coastguard Worker }
603