1 // Copyright (C) 2019 Alibaba Cloud Computing. All rights reserved.
2 //
3 // Portions Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
4 //
5 // Portions Copyright 2017 The Chromium OS Authors. All rights reserved.
6 // Use of this source code is governed by a BSD-style license that can be
7 // found in the LICENSE-BSD-3-Clause file.
8 //
9 // SPDX-License-Identifier: Apache-2.0 OR BSD-3-Clause
10
11 //! The default implementation for the [`GuestMemory`](trait.GuestMemory.html) trait.
12 //!
13 //! This implementation is mmap-ing the memory of the guest into the current process.
14
15 use std::borrow::Borrow;
16 use std::io::{Read, Write};
17 #[cfg(unix)]
18 use std::io::{Seek, SeekFrom};
19 use std::ops::Deref;
20 use std::result;
21 use std::sync::atomic::Ordering;
22 use std::sync::Arc;
23
24 use crate::address::Address;
25 use crate::bitmap::{Bitmap, BS};
26 use crate::guest_memory::{
27 self, FileOffset, GuestAddress, GuestMemory, GuestMemoryIterator, GuestMemoryRegion,
28 GuestUsize, MemoryRegionAddress,
29 };
30 use crate::volatile_memory::{VolatileMemory, VolatileSlice};
31 use crate::{AtomicAccess, Bytes};
32
33 #[cfg(all(not(feature = "xen"), unix))]
34 pub use crate::mmap_unix::{Error as MmapRegionError, MmapRegion, MmapRegionBuilder};
35
36 #[cfg(all(feature = "xen", unix))]
37 pub use crate::mmap_xen::{Error as MmapRegionError, MmapRange, MmapRegion, MmapXenFlags};
38
39 #[cfg(windows)]
40 pub use crate::mmap_windows::MmapRegion;
41 #[cfg(windows)]
42 pub use std::io::Error as MmapRegionError;
43
44 /// A `Bitmap` that can be created starting from an initial size.
45 pub trait NewBitmap: Bitmap + Default {
46 /// Create a new object based on the specified length in bytes.
with_len(len: usize) -> Self47 fn with_len(len: usize) -> Self;
48 }
49
50 impl NewBitmap for () {
with_len(_len: usize) -> Self51 fn with_len(_len: usize) -> Self {}
52 }
53
54 /// Errors that can occur when creating a memory map.
55 #[derive(Debug, thiserror::Error)]
56 pub enum Error {
57 /// Adding the guest base address to the length of the underlying mapping resulted
58 /// in an overflow.
59 #[error("Adding the guest base address to the length of the underlying mapping resulted in an overflow")]
60 InvalidGuestRegion,
61 /// Error creating a `MmapRegion` object.
62 #[error("{0}")]
63 MmapRegion(MmapRegionError),
64 /// No memory region found.
65 #[error("No memory region found")]
66 NoMemoryRegion,
67 /// Some of the memory regions intersect with each other.
68 #[error("Some of the memory regions intersect with each other")]
69 MemoryRegionOverlap,
70 /// The provided memory regions haven't been sorted.
71 #[error("The provided memory regions haven't been sorted")]
72 UnsortedMemoryRegions,
73 }
74
75 // TODO: use this for Windows as well after we redefine the Error type there.
76 #[cfg(unix)]
77 /// Checks if a mapping of `size` bytes fits at the provided `file_offset`.
78 ///
79 /// For a borrowed `FileOffset` and size, this function checks whether the mapping does not
80 /// extend past EOF, and that adding the size to the file offset does not lead to overflow.
check_file_offset( file_offset: &FileOffset, size: usize, ) -> result::Result<(), MmapRegionError>81 pub fn check_file_offset(
82 file_offset: &FileOffset,
83 size: usize,
84 ) -> result::Result<(), MmapRegionError> {
85 let mut file = file_offset.file();
86 let start = file_offset.start();
87
88 if let Some(end) = start.checked_add(size as u64) {
89 let filesize = file
90 .seek(SeekFrom::End(0))
91 .map_err(MmapRegionError::SeekEnd)?;
92 file.rewind().map_err(MmapRegionError::SeekStart)?;
93 if filesize < end {
94 return Err(MmapRegionError::MappingPastEof);
95 }
96 } else {
97 return Err(MmapRegionError::InvalidOffsetLength);
98 }
99
100 Ok(())
101 }
102
103 /// [`GuestMemoryRegion`](trait.GuestMemoryRegion.html) implementation that mmaps the guest's
104 /// memory region in the current process.
105 ///
106 /// Represents a continuous region of the guest's physical memory that is backed by a mapping
107 /// in the virtual address space of the calling process.
108 #[derive(Debug)]
109 pub struct GuestRegionMmap<B = ()> {
110 mapping: MmapRegion<B>,
111 guest_base: GuestAddress,
112 }
113
114 impl<B> Deref for GuestRegionMmap<B> {
115 type Target = MmapRegion<B>;
116
deref(&self) -> &MmapRegion<B>117 fn deref(&self) -> &MmapRegion<B> {
118 &self.mapping
119 }
120 }
121
122 impl<B: Bitmap> GuestRegionMmap<B> {
123 /// Create a new memory-mapped memory region for the guest's physical memory.
new(mapping: MmapRegion<B>, guest_base: GuestAddress) -> result::Result<Self, Error>124 pub fn new(mapping: MmapRegion<B>, guest_base: GuestAddress) -> result::Result<Self, Error> {
125 if guest_base.0.checked_add(mapping.size() as u64).is_none() {
126 return Err(Error::InvalidGuestRegion);
127 }
128
129 Ok(GuestRegionMmap {
130 mapping,
131 guest_base,
132 })
133 }
134 }
135
136 #[cfg(not(feature = "xen"))]
137 impl<B: NewBitmap> GuestRegionMmap<B> {
138 /// Create a new memory-mapped memory region from guest's physical memory, size and file.
from_range( addr: GuestAddress, size: usize, file: Option<FileOffset>, ) -> result::Result<Self, Error>139 pub fn from_range(
140 addr: GuestAddress,
141 size: usize,
142 file: Option<FileOffset>,
143 ) -> result::Result<Self, Error> {
144 let region = if let Some(ref f_off) = file {
145 MmapRegion::from_file(f_off.clone(), size)
146 } else {
147 MmapRegion::new(size)
148 }
149 .map_err(Error::MmapRegion)?;
150
151 Self::new(region, addr)
152 }
153 }
154
155 #[cfg(feature = "xen")]
156 impl<B: NewBitmap> GuestRegionMmap<B> {
157 /// Create a new Unix memory-mapped memory region from guest's physical memory, size and file.
158 /// This must only be used for tests, doctests, benches and is not designed for end consumers.
from_range( addr: GuestAddress, size: usize, file: Option<FileOffset>, ) -> result::Result<Self, Error>159 pub fn from_range(
160 addr: GuestAddress,
161 size: usize,
162 file: Option<FileOffset>,
163 ) -> result::Result<Self, Error> {
164 let range = MmapRange::new_unix(size, file, addr);
165
166 let region = MmapRegion::from_range(range).map_err(Error::MmapRegion)?;
167 Self::new(region, addr)
168 }
169 }
170
171 impl<B: Bitmap> Bytes<MemoryRegionAddress> for GuestRegionMmap<B> {
172 type E = guest_memory::Error;
173
174 /// # Examples
175 /// * Write a slice at guest address 0x1200.
176 ///
177 /// ```
178 /// # use vm_memory::{Bytes, GuestAddress, GuestMemoryMmap};
179 /// #
180 /// # let start_addr = GuestAddress(0x1000);
181 /// # let mut gm = GuestMemoryMmap::<()>::from_ranges(&vec![(start_addr, 0x400)])
182 /// # .expect("Could not create guest memory");
183 /// #
184 /// let res = gm
185 /// .write(&[1, 2, 3, 4, 5], GuestAddress(0x1200))
186 /// .expect("Could not write to guest memory");
187 /// assert_eq!(5, res);
188 /// ```
write(&self, buf: &[u8], addr: MemoryRegionAddress) -> guest_memory::Result<usize>189 fn write(&self, buf: &[u8], addr: MemoryRegionAddress) -> guest_memory::Result<usize> {
190 let maddr = addr.raw_value() as usize;
191 self.as_volatile_slice()
192 .unwrap()
193 .write(buf, maddr)
194 .map_err(Into::into)
195 }
196
197 /// # Examples
198 /// * Read a slice of length 16 at guestaddress 0x1200.
199 ///
200 /// ```
201 /// # use vm_memory::{Bytes, GuestAddress, GuestMemoryMmap};
202 /// #
203 /// # let start_addr = GuestAddress(0x1000);
204 /// # let mut gm = GuestMemoryMmap::<()>::from_ranges(&vec![(start_addr, 0x400)])
205 /// # .expect("Could not create guest memory");
206 /// #
207 /// let buf = &mut [0u8; 16];
208 /// let res = gm
209 /// .read(buf, GuestAddress(0x1200))
210 /// .expect("Could not read from guest memory");
211 /// assert_eq!(16, res);
212 /// ```
read(&self, buf: &mut [u8], addr: MemoryRegionAddress) -> guest_memory::Result<usize>213 fn read(&self, buf: &mut [u8], addr: MemoryRegionAddress) -> guest_memory::Result<usize> {
214 let maddr = addr.raw_value() as usize;
215 self.as_volatile_slice()
216 .unwrap()
217 .read(buf, maddr)
218 .map_err(Into::into)
219 }
220
write_slice(&self, buf: &[u8], addr: MemoryRegionAddress) -> guest_memory::Result<()>221 fn write_slice(&self, buf: &[u8], addr: MemoryRegionAddress) -> guest_memory::Result<()> {
222 let maddr = addr.raw_value() as usize;
223 self.as_volatile_slice()
224 .unwrap()
225 .write_slice(buf, maddr)
226 .map_err(Into::into)
227 }
228
read_slice(&self, buf: &mut [u8], addr: MemoryRegionAddress) -> guest_memory::Result<()>229 fn read_slice(&self, buf: &mut [u8], addr: MemoryRegionAddress) -> guest_memory::Result<()> {
230 let maddr = addr.raw_value() as usize;
231 self.as_volatile_slice()
232 .unwrap()
233 .read_slice(buf, maddr)
234 .map_err(Into::into)
235 }
236
237 /// # Examples
238 ///
239 /// * Read bytes from /dev/urandom
240 ///
241 /// ```
242 /// # use vm_memory::{Address, Bytes, GuestAddress, GuestMemoryMmap};
243 /// # use std::fs::File;
244 /// # use std::path::Path;
245 /// #
246 /// # let start_addr = GuestAddress(0x1000);
247 /// # let gm = GuestMemoryMmap::<()>::from_ranges(&vec![(start_addr, 0x400)])
248 /// # .expect("Could not create guest memory");
249 /// # let addr = GuestAddress(0x1010);
250 /// # let mut file = if cfg!(unix) {
251 /// let mut file = File::open(Path::new("/dev/urandom")).expect("Could not open /dev/urandom");
252 /// # file
253 /// # } else {
254 /// # File::open(Path::new("c:\\Windows\\system32\\ntoskrnl.exe"))
255 /// # .expect("Could not open c:\\Windows\\system32\\ntoskrnl.exe")
256 /// # };
257 ///
258 /// gm.read_from(addr, &mut file, 128)
259 /// .expect("Could not read from /dev/urandom into guest memory");
260 ///
261 /// let read_addr = addr.checked_add(8).expect("Could not compute read address");
262 /// let rand_val: u32 = gm
263 /// .read_obj(read_addr)
264 /// .expect("Could not read u32 val from /dev/urandom");
265 /// ```
read_from<F>( &self, addr: MemoryRegionAddress, src: &mut F, count: usize, ) -> guest_memory::Result<usize> where F: Read,266 fn read_from<F>(
267 &self,
268 addr: MemoryRegionAddress,
269 src: &mut F,
270 count: usize,
271 ) -> guest_memory::Result<usize>
272 where
273 F: Read,
274 {
275 let maddr = addr.raw_value() as usize;
276 self.as_volatile_slice()
277 .unwrap()
278 .read_from::<F>(maddr, src, count)
279 .map_err(Into::into)
280 }
281
282 /// # Examples
283 ///
284 /// * Read bytes from /dev/urandom
285 ///
286 /// ```
287 /// # use vm_memory::{Address, Bytes, GuestAddress, GuestMemoryMmap};
288 /// # use std::fs::File;
289 /// # use std::path::Path;
290 /// #
291 /// # let start_addr = GuestAddress(0x1000);
292 /// # let gm = GuestMemoryMmap::<()>::from_ranges(&vec![(start_addr, 0x400)])
293 /// # .expect("Could not create guest memory");
294 /// # let addr = GuestAddress(0x1010);
295 /// # let mut file = if cfg!(unix) {
296 /// let mut file = File::open(Path::new("/dev/urandom")).expect("Could not open /dev/urandom");
297 /// # file
298 /// # } else {
299 /// # File::open(Path::new("c:\\Windows\\system32\\ntoskrnl.exe"))
300 /// # .expect("Could not open c:\\Windows\\system32\\ntoskrnl.exe")
301 /// # };
302 ///
303 /// gm.read_exact_from(addr, &mut file, 128)
304 /// .expect("Could not read from /dev/urandom into guest memory");
305 ///
306 /// let read_addr = addr.checked_add(8).expect("Could not compute read address");
307 /// let rand_val: u32 = gm
308 /// .read_obj(read_addr)
309 /// .expect("Could not read u32 val from /dev/urandom");
310 /// ```
read_exact_from<F>( &self, addr: MemoryRegionAddress, src: &mut F, count: usize, ) -> guest_memory::Result<()> where F: Read,311 fn read_exact_from<F>(
312 &self,
313 addr: MemoryRegionAddress,
314 src: &mut F,
315 count: usize,
316 ) -> guest_memory::Result<()>
317 where
318 F: Read,
319 {
320 let maddr = addr.raw_value() as usize;
321 self.as_volatile_slice()
322 .unwrap()
323 .read_exact_from::<F>(maddr, src, count)
324 .map_err(Into::into)
325 }
326
327 /// Writes data from the region to a writable object.
328 ///
329 /// # Examples
330 ///
331 /// * Write 128 bytes to a /dev/null file
332 ///
333 /// ```
334 /// # #[cfg(not(unix))]
335 /// # extern crate vmm_sys_util;
336 /// # use vm_memory::{Address, Bytes, GuestAddress, GuestMemoryMmap};
337 /// #
338 /// # let start_addr = GuestAddress(0x1000);
339 /// # let gm = GuestMemoryMmap::<()>::from_ranges(&vec![(start_addr, 0x400)])
340 /// # .expect("Could not create guest memory");
341 /// # let mut file = if cfg!(unix) {
342 /// # use std::fs::OpenOptions;
343 /// let mut file = OpenOptions::new()
344 /// .write(true)
345 /// .open("/dev/null")
346 /// .expect("Could not open /dev/null");
347 /// # file
348 /// # } else {
349 /// # use vmm_sys_util::tempfile::TempFile;
350 /// # TempFile::new().unwrap().into_file()
351 /// # };
352 ///
353 /// gm.write_to(start_addr, &mut file, 128)
354 /// .expect("Could not write to file from guest memory");
355 /// ```
write_to<F>( &self, addr: MemoryRegionAddress, dst: &mut F, count: usize, ) -> guest_memory::Result<usize> where F: Write,356 fn write_to<F>(
357 &self,
358 addr: MemoryRegionAddress,
359 dst: &mut F,
360 count: usize,
361 ) -> guest_memory::Result<usize>
362 where
363 F: Write,
364 {
365 let maddr = addr.raw_value() as usize;
366 self.as_volatile_slice()
367 .unwrap()
368 .write_to::<F>(maddr, dst, count)
369 .map_err(Into::into)
370 }
371
372 /// Writes data from the region to a writable object.
373 ///
374 /// # Examples
375 ///
376 /// * Write 128 bytes to a /dev/null file
377 ///
378 /// ```
379 /// # #[cfg(not(unix))]
380 /// # extern crate vmm_sys_util;
381 /// # use vm_memory::{Address, Bytes, GuestAddress, GuestMemoryMmap};
382 /// #
383 /// # let start_addr = GuestAddress(0x1000);
384 /// # let gm = GuestMemoryMmap::<()>::from_ranges(&vec![(start_addr, 0x400)])
385 /// # .expect("Could not create guest memory");
386 /// # let mut file = if cfg!(unix) {
387 /// # use std::fs::OpenOptions;
388 /// let mut file = OpenOptions::new()
389 /// .write(true)
390 /// .open("/dev/null")
391 /// .expect("Could not open /dev/null");
392 /// # file
393 /// # } else {
394 /// # use vmm_sys_util::tempfile::TempFile;
395 /// # TempFile::new().unwrap().into_file()
396 /// # };
397 ///
398 /// gm.write_all_to(start_addr, &mut file, 128)
399 /// .expect("Could not write to file from guest memory");
400 /// ```
write_all_to<F>( &self, addr: MemoryRegionAddress, dst: &mut F, count: usize, ) -> guest_memory::Result<()> where F: Write,401 fn write_all_to<F>(
402 &self,
403 addr: MemoryRegionAddress,
404 dst: &mut F,
405 count: usize,
406 ) -> guest_memory::Result<()>
407 where
408 F: Write,
409 {
410 let maddr = addr.raw_value() as usize;
411 self.as_volatile_slice()
412 .unwrap()
413 .write_all_to::<F>(maddr, dst, count)
414 .map_err(Into::into)
415 }
416
store<T: AtomicAccess>( &self, val: T, addr: MemoryRegionAddress, order: Ordering, ) -> guest_memory::Result<()>417 fn store<T: AtomicAccess>(
418 &self,
419 val: T,
420 addr: MemoryRegionAddress,
421 order: Ordering,
422 ) -> guest_memory::Result<()> {
423 self.as_volatile_slice().and_then(|s| {
424 s.store(val, addr.raw_value() as usize, order)
425 .map_err(Into::into)
426 })
427 }
428
load<T: AtomicAccess>( &self, addr: MemoryRegionAddress, order: Ordering, ) -> guest_memory::Result<T>429 fn load<T: AtomicAccess>(
430 &self,
431 addr: MemoryRegionAddress,
432 order: Ordering,
433 ) -> guest_memory::Result<T> {
434 self.as_volatile_slice()
435 .and_then(|s| s.load(addr.raw_value() as usize, order).map_err(Into::into))
436 }
437 }
438
439 impl<B: Bitmap> GuestMemoryRegion for GuestRegionMmap<B> {
440 type B = B;
441
len(&self) -> GuestUsize442 fn len(&self) -> GuestUsize {
443 self.mapping.size() as GuestUsize
444 }
445
start_addr(&self) -> GuestAddress446 fn start_addr(&self) -> GuestAddress {
447 self.guest_base
448 }
449
bitmap(&self) -> &Self::B450 fn bitmap(&self) -> &Self::B {
451 self.mapping.bitmap()
452 }
453
get_host_address(&self, addr: MemoryRegionAddress) -> guest_memory::Result<*mut u8>454 fn get_host_address(&self, addr: MemoryRegionAddress) -> guest_memory::Result<*mut u8> {
455 // Not sure why wrapping_offset is not unsafe. Anyway this
456 // is safe because we've just range-checked addr using check_address.
457 self.check_address(addr)
458 .ok_or(guest_memory::Error::InvalidBackendAddress)
459 .map(|addr| {
460 self.mapping
461 .as_ptr()
462 .wrapping_offset(addr.raw_value() as isize)
463 })
464 }
465
file_offset(&self) -> Option<&FileOffset>466 fn file_offset(&self) -> Option<&FileOffset> {
467 self.mapping.file_offset()
468 }
469
get_slice( &self, offset: MemoryRegionAddress, count: usize, ) -> guest_memory::Result<VolatileSlice<BS<B>>>470 fn get_slice(
471 &self,
472 offset: MemoryRegionAddress,
473 count: usize,
474 ) -> guest_memory::Result<VolatileSlice<BS<B>>> {
475 let slice = self.mapping.get_slice(offset.raw_value() as usize, count)?;
476 Ok(slice)
477 }
478
479 #[cfg(target_os = "linux")]
is_hugetlbfs(&self) -> Option<bool>480 fn is_hugetlbfs(&self) -> Option<bool> {
481 self.mapping.is_hugetlbfs()
482 }
483 }
484
485 /// [`GuestMemory`](trait.GuestMemory.html) implementation that mmaps the guest's memory
486 /// in the current process.
487 ///
488 /// Represents the entire physical memory of the guest by tracking all its memory regions.
489 /// Each region is an instance of `GuestRegionMmap`, being backed by a mapping in the
490 /// virtual address space of the calling process.
491 #[derive(Clone, Debug, Default)]
492 pub struct GuestMemoryMmap<B = ()> {
493 regions: Vec<Arc<GuestRegionMmap<B>>>,
494 }
495
496 impl<B: NewBitmap> GuestMemoryMmap<B> {
497 /// Creates an empty `GuestMemoryMmap` instance.
new() -> Self498 pub fn new() -> Self {
499 Self::default()
500 }
501
502 /// Creates a container and allocates anonymous memory for guest memory regions.
503 ///
504 /// Valid memory regions are specified as a slice of (Address, Size) tuples sorted by Address.
from_ranges(ranges: &[(GuestAddress, usize)]) -> result::Result<Self, Error>505 pub fn from_ranges(ranges: &[(GuestAddress, usize)]) -> result::Result<Self, Error> {
506 Self::from_ranges_with_files(ranges.iter().map(|r| (r.0, r.1, None)))
507 }
508
509 /// Creates a container and allocates anonymous memory for guest memory regions.
510 ///
511 /// Valid memory regions are specified as a sequence of (Address, Size, Option<FileOffset>)
512 /// tuples sorted by Address.
from_ranges_with_files<A, T>(ranges: T) -> result::Result<Self, Error> where A: Borrow<(GuestAddress, usize, Option<FileOffset>)>, T: IntoIterator<Item = A>,513 pub fn from_ranges_with_files<A, T>(ranges: T) -> result::Result<Self, Error>
514 where
515 A: Borrow<(GuestAddress, usize, Option<FileOffset>)>,
516 T: IntoIterator<Item = A>,
517 {
518 Self::from_regions(
519 ranges
520 .into_iter()
521 .map(|x| {
522 GuestRegionMmap::from_range(x.borrow().0, x.borrow().1, x.borrow().2.clone())
523 })
524 .collect::<result::Result<Vec<_>, Error>>()?,
525 )
526 }
527 }
528
529 impl<B: Bitmap> GuestMemoryMmap<B> {
530 /// Creates a new `GuestMemoryMmap` from a vector of regions.
531 ///
532 /// # Arguments
533 ///
534 /// * `regions` - The vector of regions.
535 /// The regions shouldn't overlap and they should be sorted
536 /// by the starting address.
from_regions(mut regions: Vec<GuestRegionMmap<B>>) -> result::Result<Self, Error>537 pub fn from_regions(mut regions: Vec<GuestRegionMmap<B>>) -> result::Result<Self, Error> {
538 Self::from_arc_regions(regions.drain(..).map(Arc::new).collect())
539 }
540
541 /// Creates a new `GuestMemoryMmap` from a vector of Arc regions.
542 ///
543 /// Similar to the constructor `from_regions()` as it returns a
544 /// `GuestMemoryMmap`. The need for this constructor is to provide a way for
545 /// consumer of this API to create a new `GuestMemoryMmap` based on existing
546 /// regions coming from an existing `GuestMemoryMmap` instance.
547 ///
548 /// # Arguments
549 ///
550 /// * `regions` - The vector of `Arc` regions.
551 /// The regions shouldn't overlap and they should be sorted
552 /// by the starting address.
from_arc_regions(regions: Vec<Arc<GuestRegionMmap<B>>>) -> result::Result<Self, Error>553 pub fn from_arc_regions(regions: Vec<Arc<GuestRegionMmap<B>>>) -> result::Result<Self, Error> {
554 if regions.is_empty() {
555 return Err(Error::NoMemoryRegion);
556 }
557
558 for window in regions.windows(2) {
559 let prev = &window[0];
560 let next = &window[1];
561
562 if prev.start_addr() > next.start_addr() {
563 return Err(Error::UnsortedMemoryRegions);
564 }
565
566 if prev.last_addr() >= next.start_addr() {
567 return Err(Error::MemoryRegionOverlap);
568 }
569 }
570
571 Ok(Self { regions })
572 }
573
574 /// Insert a region into the `GuestMemoryMmap` object and return a new `GuestMemoryMmap`.
575 ///
576 /// # Arguments
577 /// * `region`: the memory region to insert into the guest memory object.
insert_region( &self, region: Arc<GuestRegionMmap<B>>, ) -> result::Result<GuestMemoryMmap<B>, Error>578 pub fn insert_region(
579 &self,
580 region: Arc<GuestRegionMmap<B>>,
581 ) -> result::Result<GuestMemoryMmap<B>, Error> {
582 let mut regions = self.regions.clone();
583 regions.push(region);
584 regions.sort_by_key(|x| x.start_addr());
585
586 Self::from_arc_regions(regions)
587 }
588
589 /// Remove a region into the `GuestMemoryMmap` object and return a new `GuestMemoryMmap`
590 /// on success, together with the removed region.
591 ///
592 /// # Arguments
593 /// * `base`: base address of the region to be removed
594 /// * `size`: size of the region to be removed
remove_region( &self, base: GuestAddress, size: GuestUsize, ) -> result::Result<(GuestMemoryMmap<B>, Arc<GuestRegionMmap<B>>), Error>595 pub fn remove_region(
596 &self,
597 base: GuestAddress,
598 size: GuestUsize,
599 ) -> result::Result<(GuestMemoryMmap<B>, Arc<GuestRegionMmap<B>>), Error> {
600 if let Ok(region_index) = self.regions.binary_search_by_key(&base, |x| x.start_addr()) {
601 if self.regions.get(region_index).unwrap().mapping.size() as GuestUsize == size {
602 let mut regions = self.regions.clone();
603 let region = regions.remove(region_index);
604 return Ok((Self { regions }, region));
605 }
606 }
607
608 Err(Error::InvalidGuestRegion)
609 }
610 }
611
612 /// An iterator over the elements of `GuestMemoryMmap`.
613 ///
614 /// This struct is created by `GuestMemory::iter()`. See its documentation for more.
615 pub struct Iter<'a, B>(std::slice::Iter<'a, Arc<GuestRegionMmap<B>>>);
616
617 impl<'a, B> Iterator for Iter<'a, B> {
618 type Item = &'a GuestRegionMmap<B>;
next(&mut self) -> Option<Self::Item>619 fn next(&mut self) -> Option<Self::Item> {
620 self.0.next().map(AsRef::as_ref)
621 }
622 }
623
624 impl<'a, B: 'a> GuestMemoryIterator<'a, GuestRegionMmap<B>> for GuestMemoryMmap<B> {
625 type Iter = Iter<'a, B>;
626 }
627
628 impl<B: Bitmap + 'static> GuestMemory for GuestMemoryMmap<B> {
629 type R = GuestRegionMmap<B>;
630
631 type I = Self;
632
num_regions(&self) -> usize633 fn num_regions(&self) -> usize {
634 self.regions.len()
635 }
636
find_region(&self, addr: GuestAddress) -> Option<&GuestRegionMmap<B>>637 fn find_region(&self, addr: GuestAddress) -> Option<&GuestRegionMmap<B>> {
638 let index = match self.regions.binary_search_by_key(&addr, |x| x.start_addr()) {
639 Ok(x) => Some(x),
640 // Within the closest region with starting address < addr
641 Err(x) if (x > 0 && addr <= self.regions[x - 1].last_addr()) => Some(x - 1),
642 _ => None,
643 };
644 index.map(|x| self.regions[x].as_ref())
645 }
646
iter(&self) -> Iter<B>647 fn iter(&self) -> Iter<B> {
648 Iter(self.regions.iter())
649 }
650 }
651
652 #[cfg(test)]
653 mod tests {
654 #![allow(clippy::undocumented_unsafe_blocks)]
655 extern crate vmm_sys_util;
656
657 use super::*;
658
659 use crate::bitmap::tests::test_guest_memory_and_region;
660 use crate::bitmap::AtomicBitmap;
661 use crate::GuestAddressSpace;
662
663 use std::fs::File;
664 use std::mem;
665 use std::path::Path;
666 use vmm_sys_util::tempfile::TempFile;
667
668 type GuestMemoryMmap = super::GuestMemoryMmap<()>;
669 type GuestRegionMmap = super::GuestRegionMmap<()>;
670 type MmapRegion = super::MmapRegion<()>;
671
672 #[test]
basic_map()673 fn basic_map() {
674 let m = MmapRegion::new(1024).unwrap();
675 assert_eq!(1024, m.size());
676 }
677
check_guest_memory_mmap( maybe_guest_mem: Result<GuestMemoryMmap, Error>, expected_regions_summary: &[(GuestAddress, usize)], )678 fn check_guest_memory_mmap(
679 maybe_guest_mem: Result<GuestMemoryMmap, Error>,
680 expected_regions_summary: &[(GuestAddress, usize)],
681 ) {
682 assert!(maybe_guest_mem.is_ok());
683
684 let guest_mem = maybe_guest_mem.unwrap();
685 assert_eq!(guest_mem.num_regions(), expected_regions_summary.len());
686 let maybe_last_mem_reg = expected_regions_summary.last();
687 if let Some((region_addr, region_size)) = maybe_last_mem_reg {
688 let mut last_addr = region_addr.unchecked_add(*region_size as u64);
689 if last_addr.raw_value() != 0 {
690 last_addr = last_addr.unchecked_sub(1);
691 }
692 assert_eq!(guest_mem.last_addr(), last_addr);
693 }
694 for ((region_addr, region_size), mmap) in expected_regions_summary
695 .iter()
696 .zip(guest_mem.regions.iter())
697 {
698 assert_eq!(region_addr, &mmap.guest_base);
699 assert_eq!(region_size, &mmap.mapping.size());
700
701 assert!(guest_mem.find_region(*region_addr).is_some());
702 }
703 }
704
new_guest_memory_mmap( regions_summary: &[(GuestAddress, usize)], ) -> Result<GuestMemoryMmap, Error>705 fn new_guest_memory_mmap(
706 regions_summary: &[(GuestAddress, usize)],
707 ) -> Result<GuestMemoryMmap, Error> {
708 GuestMemoryMmap::from_ranges(regions_summary)
709 }
710
new_guest_memory_mmap_from_regions( regions_summary: &[(GuestAddress, usize)], ) -> Result<GuestMemoryMmap, Error>711 fn new_guest_memory_mmap_from_regions(
712 regions_summary: &[(GuestAddress, usize)],
713 ) -> Result<GuestMemoryMmap, Error> {
714 GuestMemoryMmap::from_regions(
715 regions_summary
716 .iter()
717 .map(|(region_addr, region_size)| {
718 GuestRegionMmap::from_range(*region_addr, *region_size, None).unwrap()
719 })
720 .collect(),
721 )
722 }
723
new_guest_memory_mmap_from_arc_regions( regions_summary: &[(GuestAddress, usize)], ) -> Result<GuestMemoryMmap, Error>724 fn new_guest_memory_mmap_from_arc_regions(
725 regions_summary: &[(GuestAddress, usize)],
726 ) -> Result<GuestMemoryMmap, Error> {
727 GuestMemoryMmap::from_arc_regions(
728 regions_summary
729 .iter()
730 .map(|(region_addr, region_size)| {
731 Arc::new(GuestRegionMmap::from_range(*region_addr, *region_size, None).unwrap())
732 })
733 .collect(),
734 )
735 }
736
new_guest_memory_mmap_with_files( regions_summary: &[(GuestAddress, usize)], ) -> Result<GuestMemoryMmap, Error>737 fn new_guest_memory_mmap_with_files(
738 regions_summary: &[(GuestAddress, usize)],
739 ) -> Result<GuestMemoryMmap, Error> {
740 let regions: Vec<(GuestAddress, usize, Option<FileOffset>)> = regions_summary
741 .iter()
742 .map(|(region_addr, region_size)| {
743 let f = TempFile::new().unwrap().into_file();
744 f.set_len(*region_size as u64).unwrap();
745
746 (*region_addr, *region_size, Some(FileOffset::new(f, 0)))
747 })
748 .collect();
749
750 GuestMemoryMmap::from_ranges_with_files(®ions)
751 }
752
753 #[test]
test_no_memory_region()754 fn test_no_memory_region() {
755 let regions_summary = [];
756
757 assert_eq!(
758 format!(
759 "{:?}",
760 new_guest_memory_mmap(®ions_summary).err().unwrap()
761 ),
762 format!("{:?}", Error::NoMemoryRegion)
763 );
764
765 assert_eq!(
766 format!(
767 "{:?}",
768 new_guest_memory_mmap_with_files(®ions_summary)
769 .err()
770 .unwrap()
771 ),
772 format!("{:?}", Error::NoMemoryRegion)
773 );
774
775 assert_eq!(
776 format!(
777 "{:?}",
778 new_guest_memory_mmap_from_regions(®ions_summary)
779 .err()
780 .unwrap()
781 ),
782 format!("{:?}", Error::NoMemoryRegion)
783 );
784
785 assert_eq!(
786 format!(
787 "{:?}",
788 new_guest_memory_mmap_from_arc_regions(®ions_summary)
789 .err()
790 .unwrap()
791 ),
792 format!("{:?}", Error::NoMemoryRegion)
793 );
794 }
795
796 #[test]
test_overlapping_memory_regions()797 fn test_overlapping_memory_regions() {
798 let regions_summary = [(GuestAddress(0), 100_usize), (GuestAddress(99), 100_usize)];
799
800 assert_eq!(
801 format!(
802 "{:?}",
803 new_guest_memory_mmap(®ions_summary).err().unwrap()
804 ),
805 format!("{:?}", Error::MemoryRegionOverlap)
806 );
807
808 assert_eq!(
809 format!(
810 "{:?}",
811 new_guest_memory_mmap_with_files(®ions_summary)
812 .err()
813 .unwrap()
814 ),
815 format!("{:?}", Error::MemoryRegionOverlap)
816 );
817
818 assert_eq!(
819 format!(
820 "{:?}",
821 new_guest_memory_mmap_from_regions(®ions_summary)
822 .err()
823 .unwrap()
824 ),
825 format!("{:?}", Error::MemoryRegionOverlap)
826 );
827
828 assert_eq!(
829 format!(
830 "{:?}",
831 new_guest_memory_mmap_from_arc_regions(®ions_summary)
832 .err()
833 .unwrap()
834 ),
835 format!("{:?}", Error::MemoryRegionOverlap)
836 );
837 }
838
839 #[test]
test_unsorted_memory_regions()840 fn test_unsorted_memory_regions() {
841 let regions_summary = [(GuestAddress(100), 100_usize), (GuestAddress(0), 100_usize)];
842
843 assert_eq!(
844 format!(
845 "{:?}",
846 new_guest_memory_mmap(®ions_summary).err().unwrap()
847 ),
848 format!("{:?}", Error::UnsortedMemoryRegions)
849 );
850
851 assert_eq!(
852 format!(
853 "{:?}",
854 new_guest_memory_mmap_with_files(®ions_summary)
855 .err()
856 .unwrap()
857 ),
858 format!("{:?}", Error::UnsortedMemoryRegions)
859 );
860
861 assert_eq!(
862 format!(
863 "{:?}",
864 new_guest_memory_mmap_from_regions(®ions_summary)
865 .err()
866 .unwrap()
867 ),
868 format!("{:?}", Error::UnsortedMemoryRegions)
869 );
870
871 assert_eq!(
872 format!(
873 "{:?}",
874 new_guest_memory_mmap_from_arc_regions(®ions_summary)
875 .err()
876 .unwrap()
877 ),
878 format!("{:?}", Error::UnsortedMemoryRegions)
879 );
880 }
881
882 #[test]
test_valid_memory_regions()883 fn test_valid_memory_regions() {
884 let regions_summary = [(GuestAddress(0), 100_usize), (GuestAddress(100), 100_usize)];
885
886 let guest_mem = GuestMemoryMmap::new();
887 assert_eq!(guest_mem.regions.len(), 0);
888
889 check_guest_memory_mmap(new_guest_memory_mmap(®ions_summary), ®ions_summary);
890
891 check_guest_memory_mmap(
892 new_guest_memory_mmap_with_files(®ions_summary),
893 ®ions_summary,
894 );
895
896 check_guest_memory_mmap(
897 new_guest_memory_mmap_from_regions(®ions_summary),
898 ®ions_summary,
899 );
900
901 check_guest_memory_mmap(
902 new_guest_memory_mmap_from_arc_regions(®ions_summary),
903 ®ions_summary,
904 );
905 }
906
907 #[test]
slice_addr()908 fn slice_addr() {
909 let m = GuestRegionMmap::from_range(GuestAddress(0), 5, None).unwrap();
910 let s = m.get_slice(MemoryRegionAddress(2), 3).unwrap();
911 let guard = s.ptr_guard();
912 assert_eq!(guard.as_ptr(), unsafe { m.as_ptr().offset(2) });
913 }
914
915 #[test]
916 #[cfg(not(miri))] // Miri cannot mmap files
mapped_file_read()917 fn mapped_file_read() {
918 let mut f = TempFile::new().unwrap().into_file();
919 let sample_buf = &[1, 2, 3, 4, 5];
920 assert!(f.write_all(sample_buf).is_ok());
921
922 let file = Some(FileOffset::new(f, 0));
923 let mem_map = GuestRegionMmap::from_range(GuestAddress(0), sample_buf.len(), file).unwrap();
924 let buf = &mut [0u8; 16];
925 assert_eq!(
926 mem_map.as_volatile_slice().unwrap().read(buf, 0).unwrap(),
927 sample_buf.len()
928 );
929 assert_eq!(buf[0..sample_buf.len()], sample_buf[..]);
930 }
931
932 #[test]
test_address_in_range()933 fn test_address_in_range() {
934 let f1 = TempFile::new().unwrap().into_file();
935 f1.set_len(0x400).unwrap();
936 let f2 = TempFile::new().unwrap().into_file();
937 f2.set_len(0x400).unwrap();
938
939 let start_addr1 = GuestAddress(0x0);
940 let start_addr2 = GuestAddress(0x800);
941 let guest_mem =
942 GuestMemoryMmap::from_ranges(&[(start_addr1, 0x400), (start_addr2, 0x400)]).unwrap();
943 let guest_mem_backed_by_file = GuestMemoryMmap::from_ranges_with_files(&[
944 (start_addr1, 0x400, Some(FileOffset::new(f1, 0))),
945 (start_addr2, 0x400, Some(FileOffset::new(f2, 0))),
946 ])
947 .unwrap();
948
949 let guest_mem_list = vec![guest_mem, guest_mem_backed_by_file];
950 for guest_mem in guest_mem_list.iter() {
951 assert!(guest_mem.address_in_range(GuestAddress(0x200)));
952 assert!(!guest_mem.address_in_range(GuestAddress(0x600)));
953 assert!(guest_mem.address_in_range(GuestAddress(0xa00)));
954 assert!(!guest_mem.address_in_range(GuestAddress(0xc00)));
955 }
956 }
957
958 #[test]
test_check_address()959 fn test_check_address() {
960 let f1 = TempFile::new().unwrap().into_file();
961 f1.set_len(0x400).unwrap();
962 let f2 = TempFile::new().unwrap().into_file();
963 f2.set_len(0x400).unwrap();
964
965 let start_addr1 = GuestAddress(0x0);
966 let start_addr2 = GuestAddress(0x800);
967 let guest_mem =
968 GuestMemoryMmap::from_ranges(&[(start_addr1, 0x400), (start_addr2, 0x400)]).unwrap();
969 let guest_mem_backed_by_file = GuestMemoryMmap::from_ranges_with_files(&[
970 (start_addr1, 0x400, Some(FileOffset::new(f1, 0))),
971 (start_addr2, 0x400, Some(FileOffset::new(f2, 0))),
972 ])
973 .unwrap();
974
975 let guest_mem_list = vec![guest_mem, guest_mem_backed_by_file];
976 for guest_mem in guest_mem_list.iter() {
977 assert_eq!(
978 guest_mem.check_address(GuestAddress(0x200)),
979 Some(GuestAddress(0x200))
980 );
981 assert_eq!(guest_mem.check_address(GuestAddress(0x600)), None);
982 assert_eq!(
983 guest_mem.check_address(GuestAddress(0xa00)),
984 Some(GuestAddress(0xa00))
985 );
986 assert_eq!(guest_mem.check_address(GuestAddress(0xc00)), None);
987 }
988 }
989
990 #[test]
test_to_region_addr()991 fn test_to_region_addr() {
992 let f1 = TempFile::new().unwrap().into_file();
993 f1.set_len(0x400).unwrap();
994 let f2 = TempFile::new().unwrap().into_file();
995 f2.set_len(0x400).unwrap();
996
997 let start_addr1 = GuestAddress(0x0);
998 let start_addr2 = GuestAddress(0x800);
999 let guest_mem =
1000 GuestMemoryMmap::from_ranges(&[(start_addr1, 0x400), (start_addr2, 0x400)]).unwrap();
1001 let guest_mem_backed_by_file = GuestMemoryMmap::from_ranges_with_files(&[
1002 (start_addr1, 0x400, Some(FileOffset::new(f1, 0))),
1003 (start_addr2, 0x400, Some(FileOffset::new(f2, 0))),
1004 ])
1005 .unwrap();
1006
1007 let guest_mem_list = vec![guest_mem, guest_mem_backed_by_file];
1008 for guest_mem in guest_mem_list.iter() {
1009 assert!(guest_mem.to_region_addr(GuestAddress(0x600)).is_none());
1010 let (r0, addr0) = guest_mem.to_region_addr(GuestAddress(0x800)).unwrap();
1011 let (r1, addr1) = guest_mem.to_region_addr(GuestAddress(0xa00)).unwrap();
1012 assert!(r0.as_ptr() == r1.as_ptr());
1013 assert_eq!(addr0, MemoryRegionAddress(0));
1014 assert_eq!(addr1, MemoryRegionAddress(0x200));
1015 }
1016 }
1017
1018 #[test]
test_get_host_address()1019 fn test_get_host_address() {
1020 let f1 = TempFile::new().unwrap().into_file();
1021 f1.set_len(0x400).unwrap();
1022 let f2 = TempFile::new().unwrap().into_file();
1023 f2.set_len(0x400).unwrap();
1024
1025 let start_addr1 = GuestAddress(0x0);
1026 let start_addr2 = GuestAddress(0x800);
1027 let guest_mem =
1028 GuestMemoryMmap::from_ranges(&[(start_addr1, 0x400), (start_addr2, 0x400)]).unwrap();
1029 let guest_mem_backed_by_file = GuestMemoryMmap::from_ranges_with_files(&[
1030 (start_addr1, 0x400, Some(FileOffset::new(f1, 0))),
1031 (start_addr2, 0x400, Some(FileOffset::new(f2, 0))),
1032 ])
1033 .unwrap();
1034
1035 let guest_mem_list = vec![guest_mem, guest_mem_backed_by_file];
1036 for guest_mem in guest_mem_list.iter() {
1037 assert!(guest_mem.get_host_address(GuestAddress(0x600)).is_err());
1038 let ptr0 = guest_mem.get_host_address(GuestAddress(0x800)).unwrap();
1039 let ptr1 = guest_mem.get_host_address(GuestAddress(0xa00)).unwrap();
1040 assert_eq!(
1041 ptr0,
1042 guest_mem.find_region(GuestAddress(0x800)).unwrap().as_ptr()
1043 );
1044 assert_eq!(unsafe { ptr0.offset(0x200) }, ptr1);
1045 }
1046 }
1047
1048 #[test]
test_deref()1049 fn test_deref() {
1050 let f = TempFile::new().unwrap().into_file();
1051 f.set_len(0x400).unwrap();
1052
1053 let start_addr = GuestAddress(0x0);
1054 let guest_mem = GuestMemoryMmap::from_ranges(&[(start_addr, 0x400)]).unwrap();
1055 let guest_mem_backed_by_file = GuestMemoryMmap::from_ranges_with_files(&[(
1056 start_addr,
1057 0x400,
1058 Some(FileOffset::new(f, 0)),
1059 )])
1060 .unwrap();
1061
1062 let guest_mem_list = vec![guest_mem, guest_mem_backed_by_file];
1063 for guest_mem in guest_mem_list.iter() {
1064 let sample_buf = &[1, 2, 3, 4, 5];
1065
1066 assert_eq!(guest_mem.write(sample_buf, start_addr).unwrap(), 5);
1067 let slice = guest_mem
1068 .find_region(GuestAddress(0))
1069 .unwrap()
1070 .as_volatile_slice()
1071 .unwrap();
1072
1073 let buf = &mut [0, 0, 0, 0, 0];
1074 assert_eq!(slice.read(buf, 0).unwrap(), 5);
1075 assert_eq!(buf, sample_buf);
1076 }
1077 }
1078
1079 #[test]
test_read_u64()1080 fn test_read_u64() {
1081 let f1 = TempFile::new().unwrap().into_file();
1082 f1.set_len(0x1000).unwrap();
1083 let f2 = TempFile::new().unwrap().into_file();
1084 f2.set_len(0x1000).unwrap();
1085
1086 let start_addr1 = GuestAddress(0x0);
1087 let start_addr2 = GuestAddress(0x1000);
1088 let bad_addr = GuestAddress(0x2001);
1089 let bad_addr2 = GuestAddress(0x1ffc);
1090 let max_addr = GuestAddress(0x2000);
1091
1092 let gm =
1093 GuestMemoryMmap::from_ranges(&[(start_addr1, 0x1000), (start_addr2, 0x1000)]).unwrap();
1094 let gm_backed_by_file = GuestMemoryMmap::from_ranges_with_files(&[
1095 (start_addr1, 0x1000, Some(FileOffset::new(f1, 0))),
1096 (start_addr2, 0x1000, Some(FileOffset::new(f2, 0))),
1097 ])
1098 .unwrap();
1099
1100 let gm_list = vec![gm, gm_backed_by_file];
1101 for gm in gm_list.iter() {
1102 let val1: u64 = 0xaa55_aa55_aa55_aa55;
1103 let val2: u64 = 0x55aa_55aa_55aa_55aa;
1104 assert_eq!(
1105 format!("{:?}", gm.write_obj(val1, bad_addr).err().unwrap()),
1106 format!("InvalidGuestAddress({:?})", bad_addr,)
1107 );
1108 assert_eq!(
1109 format!("{:?}", gm.write_obj(val1, bad_addr2).err().unwrap()),
1110 format!(
1111 "PartialBuffer {{ expected: {:?}, completed: {:?} }}",
1112 mem::size_of::<u64>(),
1113 max_addr.checked_offset_from(bad_addr2).unwrap()
1114 )
1115 );
1116
1117 gm.write_obj(val1, GuestAddress(0x500)).unwrap();
1118 gm.write_obj(val2, GuestAddress(0x1000 + 32)).unwrap();
1119 let num1: u64 = gm.read_obj(GuestAddress(0x500)).unwrap();
1120 let num2: u64 = gm.read_obj(GuestAddress(0x1000 + 32)).unwrap();
1121 assert_eq!(val1, num1);
1122 assert_eq!(val2, num2);
1123 }
1124 }
1125
1126 #[test]
write_and_read()1127 fn write_and_read() {
1128 let f = TempFile::new().unwrap().into_file();
1129 f.set_len(0x400).unwrap();
1130
1131 let mut start_addr = GuestAddress(0x1000);
1132 let gm = GuestMemoryMmap::from_ranges(&[(start_addr, 0x400)]).unwrap();
1133 let gm_backed_by_file = GuestMemoryMmap::from_ranges_with_files(&[(
1134 start_addr,
1135 0x400,
1136 Some(FileOffset::new(f, 0)),
1137 )])
1138 .unwrap();
1139
1140 let gm_list = vec![gm, gm_backed_by_file];
1141 for gm in gm_list.iter() {
1142 let sample_buf = &[1, 2, 3, 4, 5];
1143
1144 assert_eq!(gm.write(sample_buf, start_addr).unwrap(), 5);
1145
1146 let buf = &mut [0u8; 5];
1147 assert_eq!(gm.read(buf, start_addr).unwrap(), 5);
1148 assert_eq!(buf, sample_buf);
1149
1150 start_addr = GuestAddress(0x13ff);
1151 assert_eq!(gm.write(sample_buf, start_addr).unwrap(), 1);
1152 assert_eq!(gm.read(buf, start_addr).unwrap(), 1);
1153 assert_eq!(buf[0], sample_buf[0]);
1154 start_addr = GuestAddress(0x1000);
1155 }
1156 }
1157
1158 #[test]
read_to_and_write_from_mem()1159 fn read_to_and_write_from_mem() {
1160 let f = TempFile::new().unwrap().into_file();
1161 f.set_len(0x400).unwrap();
1162
1163 let gm = GuestMemoryMmap::from_ranges(&[(GuestAddress(0x1000), 0x400)]).unwrap();
1164 let gm_backed_by_file = GuestMemoryMmap::from_ranges_with_files(&[(
1165 GuestAddress(0x1000),
1166 0x400,
1167 Some(FileOffset::new(f, 0)),
1168 )])
1169 .unwrap();
1170
1171 let gm_list = vec![gm, gm_backed_by_file];
1172 for gm in gm_list.iter() {
1173 let addr = GuestAddress(0x1010);
1174 let mut file = if cfg!(unix) {
1175 File::open(Path::new("/dev/zero")).unwrap()
1176 } else {
1177 File::open(Path::new("c:\\Windows\\system32\\ntoskrnl.exe")).unwrap()
1178 };
1179 gm.write_obj(!0u32, addr).unwrap();
1180 gm.read_exact_from(addr, &mut file, mem::size_of::<u32>())
1181 .unwrap();
1182 let value: u32 = gm.read_obj(addr).unwrap();
1183 if cfg!(unix) {
1184 assert_eq!(value, 0);
1185 } else {
1186 assert_eq!(value, 0x0090_5a4d);
1187 }
1188
1189 let mut sink = Vec::new();
1190 gm.write_all_to(addr, &mut sink, mem::size_of::<u32>())
1191 .unwrap();
1192 if cfg!(unix) {
1193 assert_eq!(sink, vec![0; mem::size_of::<u32>()]);
1194 } else {
1195 assert_eq!(sink, vec![0x4d, 0x5a, 0x90, 0x00]);
1196 };
1197 }
1198 }
1199
1200 #[test]
create_vec_with_regions()1201 fn create_vec_with_regions() {
1202 let region_size = 0x400;
1203 let regions = vec![
1204 (GuestAddress(0x0), region_size),
1205 (GuestAddress(0x1000), region_size),
1206 ];
1207 let mut iterated_regions = Vec::new();
1208 let gm = GuestMemoryMmap::from_ranges(®ions).unwrap();
1209
1210 for region in gm.iter() {
1211 assert_eq!(region.len(), region_size as GuestUsize);
1212 }
1213
1214 for region in gm.iter() {
1215 iterated_regions.push((region.start_addr(), region.len() as usize));
1216 }
1217 assert_eq!(regions, iterated_regions);
1218
1219 assert!(regions
1220 .iter()
1221 .map(|x| (x.0, x.1))
1222 .eq(iterated_regions.iter().copied()));
1223
1224 assert_eq!(gm.regions[0].guest_base, regions[0].0);
1225 assert_eq!(gm.regions[1].guest_base, regions[1].0);
1226 }
1227
1228 #[test]
test_memory()1229 fn test_memory() {
1230 let region_size = 0x400;
1231 let regions = vec![
1232 (GuestAddress(0x0), region_size),
1233 (GuestAddress(0x1000), region_size),
1234 ];
1235 let mut iterated_regions = Vec::new();
1236 let gm = Arc::new(GuestMemoryMmap::from_ranges(®ions).unwrap());
1237 let mem = gm.memory();
1238
1239 for region in mem.iter() {
1240 assert_eq!(region.len(), region_size as GuestUsize);
1241 }
1242
1243 for region in mem.iter() {
1244 iterated_regions.push((region.start_addr(), region.len() as usize));
1245 }
1246 assert_eq!(regions, iterated_regions);
1247
1248 assert!(regions
1249 .iter()
1250 .map(|x| (x.0, x.1))
1251 .eq(iterated_regions.iter().copied()));
1252
1253 assert_eq!(gm.regions[0].guest_base, regions[0].0);
1254 assert_eq!(gm.regions[1].guest_base, regions[1].0);
1255 }
1256
1257 #[test]
test_access_cross_boundary()1258 fn test_access_cross_boundary() {
1259 let f1 = TempFile::new().unwrap().into_file();
1260 f1.set_len(0x1000).unwrap();
1261 let f2 = TempFile::new().unwrap().into_file();
1262 f2.set_len(0x1000).unwrap();
1263
1264 let start_addr1 = GuestAddress(0x0);
1265 let start_addr2 = GuestAddress(0x1000);
1266 let gm =
1267 GuestMemoryMmap::from_ranges(&[(start_addr1, 0x1000), (start_addr2, 0x1000)]).unwrap();
1268 let gm_backed_by_file = GuestMemoryMmap::from_ranges_with_files(&[
1269 (start_addr1, 0x1000, Some(FileOffset::new(f1, 0))),
1270 (start_addr2, 0x1000, Some(FileOffset::new(f2, 0))),
1271 ])
1272 .unwrap();
1273
1274 let gm_list = vec![gm, gm_backed_by_file];
1275 for gm in gm_list.iter() {
1276 let sample_buf = &[1, 2, 3, 4, 5];
1277 assert_eq!(gm.write(sample_buf, GuestAddress(0xffc)).unwrap(), 5);
1278 let buf = &mut [0u8; 5];
1279 assert_eq!(gm.read(buf, GuestAddress(0xffc)).unwrap(), 5);
1280 assert_eq!(buf, sample_buf);
1281 }
1282 }
1283
1284 #[test]
test_retrieve_fd_backing_memory_region()1285 fn test_retrieve_fd_backing_memory_region() {
1286 let f = TempFile::new().unwrap().into_file();
1287 f.set_len(0x400).unwrap();
1288
1289 let start_addr = GuestAddress(0x0);
1290 let gm = GuestMemoryMmap::from_ranges(&[(start_addr, 0x400)]).unwrap();
1291 assert!(gm.find_region(start_addr).is_some());
1292 let region = gm.find_region(start_addr).unwrap();
1293 assert!(region.file_offset().is_none());
1294
1295 let gm = GuestMemoryMmap::from_ranges_with_files(&[(
1296 start_addr,
1297 0x400,
1298 Some(FileOffset::new(f, 0)),
1299 )])
1300 .unwrap();
1301 assert!(gm.find_region(start_addr).is_some());
1302 let region = gm.find_region(start_addr).unwrap();
1303 assert!(region.file_offset().is_some());
1304 }
1305
1306 // Windows needs a dedicated test where it will retrieve the allocation
1307 // granularity to determine a proper offset (other than 0) that can be
1308 // used for the backing file. Refer to Microsoft docs here:
1309 // https://docs.microsoft.com/en-us/windows/desktop/api/memoryapi/nf-memoryapi-mapviewoffile
1310 #[test]
1311 #[cfg(unix)]
test_retrieve_offset_from_fd_backing_memory_region()1312 fn test_retrieve_offset_from_fd_backing_memory_region() {
1313 let f = TempFile::new().unwrap().into_file();
1314 f.set_len(0x1400).unwrap();
1315 // Needs to be aligned on 4k, otherwise mmap will fail.
1316 let offset = 0x1000;
1317
1318 let start_addr = GuestAddress(0x0);
1319 let gm = GuestMemoryMmap::from_ranges(&[(start_addr, 0x400)]).unwrap();
1320 assert!(gm.find_region(start_addr).is_some());
1321 let region = gm.find_region(start_addr).unwrap();
1322 assert!(region.file_offset().is_none());
1323
1324 let gm = GuestMemoryMmap::from_ranges_with_files(&[(
1325 start_addr,
1326 0x400,
1327 Some(FileOffset::new(f, offset)),
1328 )])
1329 .unwrap();
1330 assert!(gm.find_region(start_addr).is_some());
1331 let region = gm.find_region(start_addr).unwrap();
1332 assert!(region.file_offset().is_some());
1333 assert_eq!(region.file_offset().unwrap().start(), offset);
1334 }
1335
1336 #[test]
test_mmap_insert_region()1337 fn test_mmap_insert_region() {
1338 let region_size = 0x1000;
1339 let regions = vec![
1340 (GuestAddress(0x0), region_size),
1341 (GuestAddress(0x10_0000), region_size),
1342 ];
1343 let gm = Arc::new(GuestMemoryMmap::from_ranges(®ions).unwrap());
1344 let mem_orig = gm.memory();
1345 assert_eq!(mem_orig.num_regions(), 2);
1346
1347 let mmap =
1348 Arc::new(GuestRegionMmap::from_range(GuestAddress(0x8000), 0x1000, None).unwrap());
1349 let gm = gm.insert_region(mmap).unwrap();
1350 let mmap =
1351 Arc::new(GuestRegionMmap::from_range(GuestAddress(0x4000), 0x1000, None).unwrap());
1352 let gm = gm.insert_region(mmap).unwrap();
1353 let mmap =
1354 Arc::new(GuestRegionMmap::from_range(GuestAddress(0xc000), 0x1000, None).unwrap());
1355 let gm = gm.insert_region(mmap).unwrap();
1356 let mmap =
1357 Arc::new(GuestRegionMmap::from_range(GuestAddress(0xc000), 0x1000, None).unwrap());
1358 gm.insert_region(mmap).unwrap_err();
1359
1360 assert_eq!(mem_orig.num_regions(), 2);
1361 assert_eq!(gm.num_regions(), 5);
1362
1363 assert_eq!(gm.regions[0].start_addr(), GuestAddress(0x0000));
1364 assert_eq!(gm.regions[1].start_addr(), GuestAddress(0x4000));
1365 assert_eq!(gm.regions[2].start_addr(), GuestAddress(0x8000));
1366 assert_eq!(gm.regions[3].start_addr(), GuestAddress(0xc000));
1367 assert_eq!(gm.regions[4].start_addr(), GuestAddress(0x10_0000));
1368 }
1369
1370 #[test]
test_mmap_remove_region()1371 fn test_mmap_remove_region() {
1372 let region_size = 0x1000;
1373 let regions = vec![
1374 (GuestAddress(0x0), region_size),
1375 (GuestAddress(0x10_0000), region_size),
1376 ];
1377 let gm = Arc::new(GuestMemoryMmap::from_ranges(®ions).unwrap());
1378 let mem_orig = gm.memory();
1379 assert_eq!(mem_orig.num_regions(), 2);
1380
1381 gm.remove_region(GuestAddress(0), 128).unwrap_err();
1382 gm.remove_region(GuestAddress(0x4000), 128).unwrap_err();
1383 let (gm, region) = gm.remove_region(GuestAddress(0x10_0000), 0x1000).unwrap();
1384
1385 assert_eq!(mem_orig.num_regions(), 2);
1386 assert_eq!(gm.num_regions(), 1);
1387
1388 assert_eq!(gm.regions[0].start_addr(), GuestAddress(0x0000));
1389 assert_eq!(region.start_addr(), GuestAddress(0x10_0000));
1390 }
1391
1392 #[test]
test_guest_memory_mmap_get_slice()1393 fn test_guest_memory_mmap_get_slice() {
1394 let region = GuestRegionMmap::from_range(GuestAddress(0), 0x400, None).unwrap();
1395
1396 // Normal case.
1397 let slice_addr = MemoryRegionAddress(0x100);
1398 let slice_size = 0x200;
1399 let slice = region.get_slice(slice_addr, slice_size).unwrap();
1400 assert_eq!(slice.len(), slice_size);
1401
1402 // Empty slice.
1403 let slice_addr = MemoryRegionAddress(0x200);
1404 let slice_size = 0x0;
1405 let slice = region.get_slice(slice_addr, slice_size).unwrap();
1406 assert!(slice.is_empty());
1407
1408 // Error case when slice_size is beyond the boundary.
1409 let slice_addr = MemoryRegionAddress(0x300);
1410 let slice_size = 0x200;
1411 assert!(region.get_slice(slice_addr, slice_size).is_err());
1412 }
1413
1414 #[test]
test_guest_memory_mmap_as_volatile_slice()1415 fn test_guest_memory_mmap_as_volatile_slice() {
1416 let region_size = 0x400;
1417 let region = GuestRegionMmap::from_range(GuestAddress(0), region_size, None).unwrap();
1418
1419 // Test slice length.
1420 let slice = region.as_volatile_slice().unwrap();
1421 assert_eq!(slice.len(), region_size);
1422
1423 // Test slice data.
1424 let v = 0x1234_5678u32;
1425 let r = slice.get_ref::<u32>(0x200).unwrap();
1426 r.store(v);
1427 assert_eq!(r.load(), v);
1428 }
1429
1430 #[test]
test_guest_memory_get_slice()1431 fn test_guest_memory_get_slice() {
1432 let start_addr1 = GuestAddress(0);
1433 let start_addr2 = GuestAddress(0x800);
1434 let guest_mem =
1435 GuestMemoryMmap::from_ranges(&[(start_addr1, 0x400), (start_addr2, 0x400)]).unwrap();
1436
1437 // Normal cases.
1438 let slice_size = 0x200;
1439 let slice = guest_mem
1440 .get_slice(GuestAddress(0x100), slice_size)
1441 .unwrap();
1442 assert_eq!(slice.len(), slice_size);
1443
1444 let slice_size = 0x400;
1445 let slice = guest_mem
1446 .get_slice(GuestAddress(0x800), slice_size)
1447 .unwrap();
1448 assert_eq!(slice.len(), slice_size);
1449
1450 // Empty slice.
1451 assert!(guest_mem
1452 .get_slice(GuestAddress(0x900), 0)
1453 .unwrap()
1454 .is_empty());
1455
1456 // Error cases, wrong size or base address.
1457 assert!(guest_mem.get_slice(GuestAddress(0), 0x500).is_err());
1458 assert!(guest_mem.get_slice(GuestAddress(0x600), 0x100).is_err());
1459 assert!(guest_mem.get_slice(GuestAddress(0xc00), 0x100).is_err());
1460 }
1461
1462 #[test]
test_checked_offset()1463 fn test_checked_offset() {
1464 let start_addr1 = GuestAddress(0);
1465 let start_addr2 = GuestAddress(0x800);
1466 let start_addr3 = GuestAddress(0xc00);
1467 let guest_mem = GuestMemoryMmap::from_ranges(&[
1468 (start_addr1, 0x400),
1469 (start_addr2, 0x400),
1470 (start_addr3, 0x400),
1471 ])
1472 .unwrap();
1473
1474 assert_eq!(
1475 guest_mem.checked_offset(start_addr1, 0x200),
1476 Some(GuestAddress(0x200))
1477 );
1478 assert_eq!(
1479 guest_mem.checked_offset(start_addr1, 0xa00),
1480 Some(GuestAddress(0xa00))
1481 );
1482 assert_eq!(
1483 guest_mem.checked_offset(start_addr2, 0x7ff),
1484 Some(GuestAddress(0xfff))
1485 );
1486 assert_eq!(guest_mem.checked_offset(start_addr2, 0xc00), None);
1487 assert_eq!(guest_mem.checked_offset(start_addr1, std::usize::MAX), None);
1488
1489 assert_eq!(guest_mem.checked_offset(start_addr1, 0x400), None);
1490 assert_eq!(
1491 guest_mem.checked_offset(start_addr1, 0x400 - 1),
1492 Some(GuestAddress(0x400 - 1))
1493 );
1494 }
1495
1496 #[test]
test_check_range()1497 fn test_check_range() {
1498 let start_addr1 = GuestAddress(0);
1499 let start_addr2 = GuestAddress(0x800);
1500 let start_addr3 = GuestAddress(0xc00);
1501 let guest_mem = GuestMemoryMmap::from_ranges(&[
1502 (start_addr1, 0x400),
1503 (start_addr2, 0x400),
1504 (start_addr3, 0x400),
1505 ])
1506 .unwrap();
1507
1508 assert!(guest_mem.check_range(start_addr1, 0x0));
1509 assert!(guest_mem.check_range(start_addr1, 0x200));
1510 assert!(guest_mem.check_range(start_addr1, 0x400));
1511 assert!(!guest_mem.check_range(start_addr1, 0xa00));
1512 assert!(guest_mem.check_range(start_addr2, 0x7ff));
1513 assert!(guest_mem.check_range(start_addr2, 0x800));
1514 assert!(!guest_mem.check_range(start_addr2, 0x801));
1515 assert!(!guest_mem.check_range(start_addr2, 0xc00));
1516 assert!(!guest_mem.check_range(start_addr1, std::usize::MAX));
1517 }
1518
1519 #[test]
test_atomic_accesses()1520 fn test_atomic_accesses() {
1521 let region = GuestRegionMmap::from_range(GuestAddress(0), 0x1000, None).unwrap();
1522
1523 crate::bytes::tests::check_atomic_accesses(
1524 region,
1525 MemoryRegionAddress(0),
1526 MemoryRegionAddress(0x1000),
1527 );
1528 }
1529
1530 #[test]
test_dirty_tracking()1531 fn test_dirty_tracking() {
1532 test_guest_memory_and_region(|| {
1533 crate::GuestMemoryMmap::<AtomicBitmap>::from_ranges(&[(GuestAddress(0), 0x1_0000)])
1534 .unwrap()
1535 });
1536 }
1537 }
1538